aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2015-01-15 22:54:47 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-16 01:07:01 -0500
commita56c66e8089ea8474a26af1c7da9b5d7ad3017cb (patch)
treeea2fedd13523898d3bb9ffb553b70f2a1a126b68
parent8cd5a56c2b7c27dc574b465401e4f41a5e363e24 (diff)
iw_cxgb4: Cleanup register defines/MACROS defined in t4.h
Cleanup all the MACROS defined in t4.h and the affected files Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c38
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h100
3 files changed, 70 insertions, 70 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e9fd3a029296..39b0da39a08e 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -182,12 +182,12 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
182 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, 182 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
183 wq, cq, cq->sw_cidx, cq->sw_pidx); 183 wq, cq, cq->sw_cidx, cq->sw_pidx);
184 memset(&cqe, 0, sizeof(cqe)); 184 memset(&cqe, 0, sizeof(cqe));
185 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 185 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
186 V_CQE_OPCODE(FW_RI_SEND) | 186 CQE_OPCODE_V(FW_RI_SEND) |
187 V_CQE_TYPE(0) | 187 CQE_TYPE_V(0) |
188 V_CQE_SWCQE(1) | 188 CQE_SWCQE_V(1) |
189 V_CQE_QPID(wq->sq.qid)); 189 CQE_QPID_V(wq->sq.qid));
190 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 190 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
191 cq->sw_queue[cq->sw_pidx] = cqe; 191 cq->sw_queue[cq->sw_pidx] = cqe;
192 t4_swcq_produce(cq); 192 t4_swcq_produce(cq);
193} 193}
@@ -215,13 +215,13 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
215 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, 215 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
216 wq, cq, cq->sw_cidx, cq->sw_pidx); 216 wq, cq, cq->sw_cidx, cq->sw_pidx);
217 memset(&cqe, 0, sizeof(cqe)); 217 memset(&cqe, 0, sizeof(cqe));
218 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 218 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
219 V_CQE_OPCODE(swcqe->opcode) | 219 CQE_OPCODE_V(swcqe->opcode) |
220 V_CQE_TYPE(1) | 220 CQE_TYPE_V(1) |
221 V_CQE_SWCQE(1) | 221 CQE_SWCQE_V(1) |
222 V_CQE_QPID(wq->sq.qid)); 222 CQE_QPID_V(wq->sq.qid));
223 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; 223 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
224 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 224 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
225 cq->sw_queue[cq->sw_pidx] = cqe; 225 cq->sw_queue[cq->sw_pidx] = cqe;
226 t4_swcq_produce(cq); 226 t4_swcq_produce(cq);
227} 227}
@@ -284,7 +284,7 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
284 */ 284 */
285 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", 285 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
286 __func__, cidx, cq->sw_pidx); 286 __func__, cidx, cq->sw_pidx);
287 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1)); 287 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
288 cq->sw_queue[cq->sw_pidx] = swsqe->cqe; 288 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
289 t4_swcq_produce(cq); 289 t4_swcq_produce(cq);
290 swsqe->flushed = 1; 290 swsqe->flushed = 1;
@@ -301,10 +301,10 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
301{ 301{
302 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; 302 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
303 read_cqe->len = htonl(wq->sq.oldest_read->read_len); 303 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
304 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) | 304 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
305 V_CQE_SWCQE(SW_CQE(hw_cqe)) | 305 CQE_SWCQE_V(SW_CQE(hw_cqe)) |
306 V_CQE_OPCODE(FW_RI_READ_REQ) | 306 CQE_OPCODE_V(FW_RI_READ_REQ) |
307 V_CQE_TYPE(1)); 307 CQE_TYPE_V(1));
308 read_cqe->bits_type_ts = hw_cqe->bits_type_ts; 308 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
309} 309}
310 310
@@ -400,7 +400,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
400 } else { 400 } else {
401 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; 401 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
402 *swcqe = *hw_cqe; 402 *swcqe = *hw_cqe;
403 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); 403 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
404 t4_swcq_produce(&chp->cq); 404 t4_swcq_produce(&chp->cq);
405 } 405 }
406next_cqe: 406next_cqe:
@@ -576,7 +576,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
576 } 576 }
577 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { 577 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
578 t4_set_wq_in_error(wq); 578 t4_set_wq_in_error(wq);
579 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN)); 579 hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
580 goto proc_cqe; 580 goto proc_cqe;
581 } 581 }
582 goto proc_cqe; 582 goto proc_cqe;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bb85d479e66e..42238edc95cb 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1776,7 +1776,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1776 if (mm5) { 1776 if (mm5) {
1777 mm5->key = uresp.ma_sync_key; 1777 mm5->key = uresp.ma_sync_key;
1778 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) 1778 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1779 + A_PCIE_MA_SYNC) & PAGE_MASK; 1779 + PCIE_MA_SYNC_A) & PAGE_MASK;
1780 mm5->len = PAGE_SIZE; 1780 mm5->len = PAGE_SIZE;
1781 insert_mmap(ucontext, mm5); 1781 insert_mmap(ucontext, mm5);
1782 } 1782 }
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 29e764e406e1..871cdcac7be2 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -41,7 +41,7 @@
41#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 41#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
42#define T4_STAG_UNSET 0xffffffff 42#define T4_STAG_UNSET 0xffffffff
43#define T4_FW_MAJ 0 43#define T4_FW_MAJ 0
44#define A_PCIE_MA_SYNC 0x30b4 44#define PCIE_MA_SYNC_A 0x30b4
45 45
46struct t4_status_page { 46struct t4_status_page {
47 __be32 rsvd1; /* flit 0 - hw owns */ 47 __be32 rsvd1; /* flit 0 - hw owns */
@@ -184,44 +184,44 @@ struct t4_cqe {
184 184
185/* macros for flit 0 of the cqe */ 185/* macros for flit 0 of the cqe */
186 186
187#define S_CQE_QPID 12 187#define CQE_QPID_S 12
188#define M_CQE_QPID 0xFFFFF 188#define CQE_QPID_M 0xFFFFF
189#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) 189#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
190#define V_CQE_QPID(x) ((x)<<S_CQE_QPID) 190#define CQE_QPID_V(x) ((x)<<CQE_QPID_S)
191 191
192#define S_CQE_SWCQE 11 192#define CQE_SWCQE_S 11
193#define M_CQE_SWCQE 0x1 193#define CQE_SWCQE_M 0x1
194#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE) 194#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
195#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE) 195#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
196 196
197#define S_CQE_STATUS 5 197#define CQE_STATUS_S 5
198#define M_CQE_STATUS 0x1F 198#define CQE_STATUS_M 0x1F
199#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS) 199#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
200#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS) 200#define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S)
201 201
202#define S_CQE_TYPE 4 202#define CQE_TYPE_S 4
203#define M_CQE_TYPE 0x1 203#define CQE_TYPE_M 0x1
204#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE) 204#define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
205#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE) 205#define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S)
206 206
207#define S_CQE_OPCODE 0 207#define CQE_OPCODE_S 0
208#define M_CQE_OPCODE 0xF 208#define CQE_OPCODE_M 0xF
209#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE) 209#define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
210#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE) 210#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
211 211
212#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header))) 212#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
213#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) 213#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
214#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) 214#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
215#define SQ_TYPE(x) (CQE_TYPE((x))) 215#define SQ_TYPE(x) (CQE_TYPE((x)))
216#define RQ_TYPE(x) (!CQE_TYPE((x))) 216#define RQ_TYPE(x) (!CQE_TYPE((x)))
217#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) 217#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header)))
218#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) 218#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header)))
219 219
220#define CQE_SEND_OPCODE(x)( \ 220#define CQE_SEND_OPCODE(x)( \
221 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ 221 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
222 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ 222 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
223 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ 223 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
224 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) 224 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
225 225
226#define CQE_LEN(x) (be32_to_cpu((x)->len)) 226#define CQE_LEN(x) (be32_to_cpu((x)->len))
227 227
@@ -237,25 +237,25 @@ struct t4_cqe {
237#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 237#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
238 238
239/* macros for flit 3 of the cqe */ 239/* macros for flit 3 of the cqe */
240#define S_CQE_GENBIT 63 240#define CQE_GENBIT_S 63
241#define M_CQE_GENBIT 0x1 241#define CQE_GENBIT_M 0x1
242#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) 242#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
243#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT) 243#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
244 244
245#define S_CQE_OVFBIT 62 245#define CQE_OVFBIT_S 62
246#define M_CQE_OVFBIT 0x1 246#define CQE_OVFBIT_M 0x1
247#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT) 247#define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
248 248
249#define S_CQE_IQTYPE 60 249#define CQE_IQTYPE_S 60
250#define M_CQE_IQTYPE 0x3 250#define CQE_IQTYPE_M 0x3
251#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) 251#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
252 252
253#define M_CQE_TS 0x0fffffffffffffffULL 253#define CQE_TS_M 0x0fffffffffffffffULL
254#define G_CQE_TS(x) ((x) & M_CQE_TS) 254#define CQE_TS_G(x) ((x) & CQE_TS_M)
255 255
256#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) 256#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
257#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) 257#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
258#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) 258#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
259 259
260struct t4_swsqe { 260struct t4_swsqe {
261 u64 wr_id; 261 u64 wr_id;