aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
diff options
context:
space:
mode:
authorAnish Bhatt <anish@chelsio.com>2014-11-12 20:15:57 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-13 14:36:22 -0500
commitd7990b0c34623cd54475a0562c607efbaba4899d (patch)
tree07df588a531da6bb0cbdac66633f2ce26e50263b /drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
parent8c847d254146d32c86574a1b16923ff91bb784dd (diff)
cxgb4i/cxgb4 : Refactor macros to conform to uniform standards
Refactored all macros used in cxgb4i as part of previously started cxgb4 macro names cleanup. Makes them more uniform and avoids namespace collision. Minor changes in other drivers where required as some of these macros are used by multiple drivers, affected drivers are iw_cxgb4, cxgb4(vf) & csiostor Signed-off-by: Anish Bhatt <anish@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/scsi/cxgbi/cxgb4i/cxgb4i.c')
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c78
1 files changed, 40 insertions, 38 deletions
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ccacf09c2c16..ed0e16866dc7 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -188,18 +188,18 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
188 unsigned int qid_atid = ((unsigned int)csk->atid) | 188 unsigned int qid_atid = ((unsigned int)csk->atid) |
189 (((unsigned int)csk->rss_qid) << 14); 189 (((unsigned int)csk->rss_qid) << 14);
190 190
191 opt0 = KEEP_ALIVE(1) | 191 opt0 = KEEP_ALIVE_F |
192 WND_SCALE(wscale) | 192 WND_SCALE_V(wscale) |
193 MSS_IDX(csk->mss_idx) | 193 MSS_IDX_V(csk->mss_idx) |
194 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | 194 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
195 TX_CHAN(csk->tx_chan) | 195 TX_CHAN_V(csk->tx_chan) |
196 SMAC_SEL(csk->smac_idx) | 196 SMAC_SEL_V(csk->smac_idx) |
197 ULP_MODE(ULP_MODE_ISCSI) | 197 ULP_MODE_V(ULP_MODE_ISCSI) |
198 RCV_BUFSIZ(cxgb4i_rcv_win >> 10); 198 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
199 opt2 = RX_CHANNEL(0) | 199 opt2 = RX_CHANNEL_V(0) |
200 RSS_QUEUE_VALID | 200 RSS_QUEUE_VALID_F |
201 (1 << 20) | 201 (RX_FC_DISABLE_F) |
202 RSS_QUEUE(csk->rss_qid); 202 RSS_QUEUE_V(csk->rss_qid);
203 203
204 if (is_t4(lldi->adapter_type)) { 204 if (is_t4(lldi->adapter_type)) {
205 struct cpl_act_open_req *req = 205 struct cpl_act_open_req *req =
@@ -216,7 +216,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
216 req->params = cpu_to_be32(cxgb4_select_ntuple( 216 req->params = cpu_to_be32(cxgb4_select_ntuple(
217 csk->cdev->ports[csk->port_id], 217 csk->cdev->ports[csk->port_id],
218 csk->l2t)); 218 csk->l2t));
219 opt2 |= 1 << 22; 219 opt2 |= RX_FC_VALID_F;
220 req->opt2 = cpu_to_be32(opt2); 220 req->opt2 = cpu_to_be32(opt2);
221 221
222 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 222 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -236,7 +236,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
236 req->local_ip = csk->saddr.sin_addr.s_addr; 236 req->local_ip = csk->saddr.sin_addr.s_addr;
237 req->peer_ip = csk->daddr.sin_addr.s_addr; 237 req->peer_ip = csk->daddr.sin_addr.s_addr;
238 req->opt0 = cpu_to_be64(opt0); 238 req->opt0 = cpu_to_be64(opt0);
239 req->params = cpu_to_be64(V_FILTER_TUPLE( 239 req->params = cpu_to_be64(FILTER_TUPLE_V(
240 cxgb4_select_ntuple( 240 cxgb4_select_ntuple(
241 csk->cdev->ports[csk->port_id], 241 csk->cdev->ports[csk->port_id],
242 csk->l2t))); 242 csk->l2t)));
@@ -271,19 +271,19 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
271 unsigned int qid_atid = ((unsigned int)csk->atid) | 271 unsigned int qid_atid = ((unsigned int)csk->atid) |
272 (((unsigned int)csk->rss_qid) << 14); 272 (((unsigned int)csk->rss_qid) << 14);
273 273
274 opt0 = KEEP_ALIVE(1) | 274 opt0 = KEEP_ALIVE_F |
275 WND_SCALE(wscale) | 275 WND_SCALE_V(wscale) |
276 MSS_IDX(csk->mss_idx) | 276 MSS_IDX_V(csk->mss_idx) |
277 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | 277 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
278 TX_CHAN(csk->tx_chan) | 278 TX_CHAN_V(csk->tx_chan) |
279 SMAC_SEL(csk->smac_idx) | 279 SMAC_SEL_V(csk->smac_idx) |
280 ULP_MODE(ULP_MODE_ISCSI) | 280 ULP_MODE_V(ULP_MODE_ISCSI) |
281 RCV_BUFSIZ(cxgb4i_rcv_win >> 10); 281 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
282 282
283 opt2 = RX_CHANNEL(0) | 283 opt2 = RX_CHANNEL_V(0) |
284 RSS_QUEUE_VALID | 284 RSS_QUEUE_VALID_F |
285 RX_FC_DISABLE | 285 RX_FC_DISABLE_F |
286 RSS_QUEUE(csk->rss_qid); 286 RSS_QUEUE_V(csk->rss_qid);
287 287
288 if (t4) { 288 if (t4) {
289 struct cpl_act_open_req6 *req = 289 struct cpl_act_open_req6 *req =
@@ -304,7 +304,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
304 304
305 req->opt0 = cpu_to_be64(opt0); 305 req->opt0 = cpu_to_be64(opt0);
306 306
307 opt2 |= RX_FC_VALID; 307 opt2 |= RX_FC_VALID_F;
308 req->opt2 = cpu_to_be32(opt2); 308 req->opt2 = cpu_to_be32(opt2);
309 309
310 req->params = cpu_to_be32(cxgb4_select_ntuple( 310 req->params = cpu_to_be32(cxgb4_select_ntuple(
@@ -327,10 +327,10 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
327 8); 327 8);
328 req->opt0 = cpu_to_be64(opt0); 328 req->opt0 = cpu_to_be64(opt0);
329 329
330 opt2 |= T5_OPT_2_VALID; 330 opt2 |= T5_OPT_2_VALID_F;
331 req->opt2 = cpu_to_be32(opt2); 331 req->opt2 = cpu_to_be32(opt2);
332 332
333 req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple( 333 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
334 csk->cdev->ports[csk->port_id], 334 csk->cdev->ports[csk->port_id],
335 csk->l2t))); 335 csk->l2t)));
336 } 336 }
@@ -451,7 +451,8 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
451 INIT_TP_WR(req, csk->tid); 451 INIT_TP_WR(req, csk->tid);
452 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 452 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
453 csk->tid)); 453 csk->tid));
454 req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1)); 454 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
455 | RX_FORCE_ACK_F);
455 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 456 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
456 return credits; 457 return credits;
457} 458}
@@ -1440,16 +1441,16 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1440 1441
1441 INIT_ULPTX_WR(req, wr_len, 0, 0); 1442 INIT_ULPTX_WR(req, wr_len, 0, 0);
1442 if (is_t4(lldi->adapter_type)) 1443 if (is_t4(lldi->adapter_type))
1443 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1444 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1444 (ULP_MEMIO_ORDER(1))); 1445 (ULP_MEMIO_ORDER_F));
1445 else 1446 else
1446 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1447 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1447 (V_T5_ULP_MEMIO_IMM(1))); 1448 (T5_ULP_MEMIO_IMM_F));
1448 req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); 1449 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1449 req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); 1450 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1450 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1451 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1451 1452
1452 idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM)); 1453 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1453 idata->len = htonl(dlen); 1454 idata->len = htonl(dlen);
1454} 1455}
1455 1456
@@ -1673,7 +1674,8 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1673 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1674 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1674 cdev->itp = &cxgb4i_iscsi_transport; 1675 cdev->itp = &cxgb4i_iscsi_transport;
1675 1676
1676 cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8; 1677 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1678 << FW_VIID_PFN_S;
1677 pr_info("cdev 0x%p,%s, pfvf %u.\n", 1679 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1678 cdev, lldi->ports[0]->name, cdev->pfvf); 1680 cdev, lldi->ports[0]->name, cdev->pfvf);
1679 1681