aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb3/cxio_hal.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-04-17 00:01:10 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:01:10 -0400
commit3371836383d63b627b228875f5ac63023cbf11d2 (patch)
treeb997894d9774bdc07f7df76ceca48e6a848760c8 /drivers/infiniband/hw/cxgb3/cxio_hal.c
parente8e91f6b4dc1179a70b0d21241b769c0ebfaa129 (diff)
IB: Replace remaining __FUNCTION__ occurrences with __func__
__FUNCTION__ is gcc-specific, use __func__ instead. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb3/cxio_hal.c')
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c84
1 files changed, 42 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 03c5ff62889a..66eb7030aea8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -140,7 +140,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
140 struct t3_modify_qp_wr *wqe; 140 struct t3_modify_qp_wr *wqe;
141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
142 if (!skb) { 142 if (!skb) {
143 PDBG("%s alloc_skb failed\n", __FUNCTION__); 143 PDBG("%s alloc_skb failed\n", __func__);
144 return -ENOMEM; 144 return -ENOMEM;
145 } 145 }
146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
@@ -225,7 +225,7 @@ static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
225 } 225 }
226out: 226out:
227 mutex_unlock(&uctx->lock); 227 mutex_unlock(&uctx->lock);
228 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 228 PDBG("%s qpid 0x%x\n", __func__, qpid);
229 return qpid; 229 return qpid;
230} 230}
231 231
@@ -237,7 +237,7 @@ static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
237 entry = kmalloc(sizeof *entry, GFP_KERNEL); 237 entry = kmalloc(sizeof *entry, GFP_KERNEL);
238 if (!entry) 238 if (!entry)
239 return; 239 return;
240 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 240 PDBG("%s qpid 0x%x\n", __func__, qpid);
241 entry->qpid = qpid; 241 entry->qpid = qpid;
242 mutex_lock(&uctx->lock); 242 mutex_lock(&uctx->lock);
243 list_add_tail(&entry->entry, &uctx->qpids); 243 list_add_tail(&entry->entry, &uctx->qpids);
@@ -300,7 +300,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
300 if (!kernel_domain) 300 if (!kernel_domain)
301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
302 (wq->qpid << rdev_p->qpshift); 302 (wq->qpid << rdev_p->qpshift);
303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__, 303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb); 304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
305 return 0; 305 return 0;
306err4: 306err4:
@@ -345,7 +345,7 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
345{ 345{
346 struct t3_cqe cqe; 346 struct t3_cqe cqe;
347 347
348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
349 wq, cq, cq->sw_rptr, cq->sw_wptr); 349 wq, cq, cq->sw_rptr, cq->sw_wptr);
350 memset(&cqe, 0, sizeof(cqe)); 350 memset(&cqe, 0, sizeof(cqe));
351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -363,10 +363,10 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
363{ 363{
364 u32 ptr; 364 u32 ptr;
365 365
366 PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq); 366 PDBG("%s wq %p cq %p\n", __func__, wq, cq);
367 367
368 /* flush RQ */ 368 /* flush RQ */
369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__, 369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
370 wq->rq_rptr, wq->rq_wptr, count); 370 wq->rq_rptr, wq->rq_wptr, count);
371 ptr = wq->rq_rptr + count; 371 ptr = wq->rq_rptr + count;
372 while (ptr++ != wq->rq_wptr) 372 while (ptr++ != wq->rq_wptr)
@@ -378,7 +378,7 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
378{ 378{
379 struct t3_cqe cqe; 379 struct t3_cqe cqe;
380 380
381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
382 wq, cq, cq->sw_rptr, cq->sw_wptr); 382 wq, cq, cq->sw_rptr, cq->sw_wptr);
383 memset(&cqe, 0, sizeof(cqe)); 383 memset(&cqe, 0, sizeof(cqe));
384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -415,11 +415,11 @@ void cxio_flush_hw_cq(struct t3_cq *cq)
415{ 415{
416 struct t3_cqe *cqe, *swcqe; 416 struct t3_cqe *cqe, *swcqe;
417 417
418 PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid); 418 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
419 cqe = cxio_next_hw_cqe(cq); 419 cqe = cxio_next_hw_cqe(cq);
420 while (cqe) { 420 while (cqe) {
421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n", 421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
422 __FUNCTION__, cq->rptr, cq->sw_wptr); 422 __func__, cq->rptr, cq->sw_wptr);
423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2); 423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
424 *swcqe = *cqe; 424 *swcqe = *cqe;
425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); 425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
@@ -461,7 +461,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
461 (*count)++; 461 (*count)++;
462 ptr++; 462 ptr++;
463 } 463 }
464 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 464 PDBG("%s cq %p count %d\n", __func__, cq, *count);
465} 465}
466 466
467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) 467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
@@ -470,7 +470,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
470 u32 ptr; 470 u32 ptr;
471 471
472 *count = 0; 472 *count = 0;
473 PDBG("%s count zero %d\n", __FUNCTION__, *count); 473 PDBG("%s count zero %d\n", __func__, *count);
474 ptr = cq->sw_rptr; 474 ptr = cq->sw_rptr;
475 while (!Q_EMPTY(ptr, cq->sw_wptr)) { 475 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); 476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
@@ -479,7 +479,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
479 (*count)++; 479 (*count)++;
480 ptr++; 480 ptr++;
481 } 481 }
482 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 482 PDBG("%s cq %p count %d\n", __func__, cq, *count);
483} 483}
484 484
485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p) 485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
@@ -506,12 +506,12 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
506 506
507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
508 if (!skb) { 508 if (!skb) {
509 PDBG("%s alloc_skb failed\n", __FUNCTION__); 509 PDBG("%s alloc_skb failed\n", __func__);
510 return -ENOMEM; 510 return -ENOMEM;
511 } 511 }
512 err = cxio_hal_init_ctrl_cq(rdev_p); 512 err = cxio_hal_init_ctrl_cq(rdev_p);
513 if (err) { 513 if (err) {
514 PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err); 514 PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
515 goto err; 515 goto err;
516 } 516 }
517 rdev_p->ctrl_qp.workq = dma_alloc_coherent( 517 rdev_p->ctrl_qp.workq = dma_alloc_coherent(
@@ -521,7 +521,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
521 &(rdev_p->ctrl_qp.dma_addr), 521 &(rdev_p->ctrl_qp.dma_addr),
522 GFP_KERNEL); 522 GFP_KERNEL);
523 if (!rdev_p->ctrl_qp.workq) { 523 if (!rdev_p->ctrl_qp.workq) {
524 PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__); 524 PDBG("%s dma_alloc_coherent failed\n", __func__);
525 err = -ENOMEM; 525 err = -ENOMEM;
526 goto err; 526 goto err;
527 } 527 }
@@ -591,25 +591,25 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
591 addr &= 0x7FFFFFF; 591 addr &= 0x7FFFFFF;
592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */ 592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n", 593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
594 __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len, 594 __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
595 nr_wqe, data, addr); 595 nr_wqe, data, addr);
596 utx_len = 3; /* in 32B unit */ 596 utx_len = 3; /* in 32B unit */
597 for (i = 0; i < nr_wqe; i++) { 597 for (i = 0; i < nr_wqe; i++) {
598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr, 598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
599 T3_CTRL_QP_SIZE_LOG2)) { 599 T3_CTRL_QP_SIZE_LOG2)) {
600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, " 600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
601 "wait for more space i %d\n", __FUNCTION__, 601 "wait for more space i %d\n", __func__,
602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i); 602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, 603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
604 !Q_FULL(rdev_p->ctrl_qp.rptr, 604 !Q_FULL(rdev_p->ctrl_qp.rptr,
605 rdev_p->ctrl_qp.wptr, 605 rdev_p->ctrl_qp.wptr,
606 T3_CTRL_QP_SIZE_LOG2))) { 606 T3_CTRL_QP_SIZE_LOG2))) {
607 PDBG("%s ctrl_qp workq interrupted\n", 607 PDBG("%s ctrl_qp workq interrupted\n",
608 __FUNCTION__); 608 __func__);
609 return -ERESTARTSYS; 609 return -ERESTARTSYS;
610 } 610 }
611 PDBG("%s ctrl_qp wakeup, continue posting work request " 611 PDBG("%s ctrl_qp wakeup, continue posting work request "
612 "i %d\n", __FUNCTION__, i); 612 "i %d\n", __func__, i);
613 } 613 }
614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % 614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
615 (1 << T3_CTRL_QP_SIZE_LOG2))); 615 (1 << T3_CTRL_QP_SIZE_LOG2)));
@@ -630,7 +630,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
630 if ((i != 0) && 630 if ((i != 0) &&
631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) { 631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
632 flag = T3_COMPLETION_FLAG; 632 flag = T3_COMPLETION_FLAG;
633 PDBG("%s force completion at i %d\n", __FUNCTION__, i); 633 PDBG("%s force completion at i %d\n", __func__, i);
634 } 634 }
635 635
636 /* build the utx mem command */ 636 /* build the utx mem command */
@@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
701 *stag = (stag_idx << 8) | ((*stag) & 0xFF); 701 *stag = (stag_idx << 8) | ((*stag) & 0xFF);
702 } 702 }
703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", 703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
704 __FUNCTION__, stag_state, type, pdid, stag_idx); 704 __func__, stag_state, type, pdid, stag_idx);
705 705
706 if (reset_tpt_entry) 706 if (reset_tpt_entry)
707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3); 707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
@@ -718,7 +718,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
718 if (pbl) { 718 if (pbl) {
719 719
720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", 720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
721 __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base, 721 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
722 *pbl_size); 722 *pbl_size);
723 err = cxio_hal_ctrl_qp_write_mem(rdev_p, 723 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
724 (*pbl_addr >> 5), 724 (*pbl_addr >> 5),
@@ -814,7 +814,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC); 814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
815 if (!skb) 815 if (!skb)
816 return -ENOMEM; 816 return -ENOMEM;
817 PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p); 817 PDBG("%s rdev_p %p\n", __func__, rdev_p);
818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); 818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); 819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | 820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
@@ -856,7 +856,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; 856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x" 857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
858 " se %0x notify %0x cqbranch %0x creditth %0x\n", 858 " se %0x notify %0x cqbranch %0x creditth %0x\n",
859 cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg), 859 cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg), 860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg), 861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
862 RSPQ_CREDIT_THRESH(rsp_msg)); 862 RSPQ_CREDIT_THRESH(rsp_msg));
@@ -868,7 +868,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp; 869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
870 if (!rdev_p) { 870 if (!rdev_p) {
871 PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__, 871 PDBG("%s called by t3cdev %p with null ulp\n", __func__,
872 t3cdev_p); 872 t3cdev_p);
873 return 0; 873 return 0;
874 } 874 }
@@ -908,13 +908,13 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name, 908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
909 T3_MAX_DEV_NAME_LEN); 909 T3_MAX_DEV_NAME_LEN);
910 } else { 910 } else {
911 PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__); 911 PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
912 return -EINVAL; 912 return -EINVAL;
913 } 913 }
914 914
915 list_add_tail(&rdev_p->entry, &rdev_list); 915 list_add_tail(&rdev_p->entry, &rdev_list);
916 916
917 PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name); 917 PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp)); 918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
919 if (!rdev_p->t3cdev_p) 919 if (!rdev_p->t3cdev_p)
920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p); 920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
@@ -923,14 +923,14 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
923 &(rdev_p->rnic_info)); 923 &(rdev_p->rnic_info));
924 if (err) { 924 if (err) {
925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
926 __FUNCTION__, rdev_p->t3cdev_p, err); 926 __func__, rdev_p->t3cdev_p, err);
927 goto err1; 927 goto err1;
928 } 928 }
929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS, 929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
930 &(rdev_p->port_info)); 930 &(rdev_p->port_info));
931 if (err) { 931 if (err) {
932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
933 __FUNCTION__, rdev_p->t3cdev_p, err); 933 __func__, rdev_p->t3cdev_p, err);
934 goto err1; 934 goto err1;
935 } 935 }
936 936
@@ -947,7 +947,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1; 947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d " 948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n", 949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
950 __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base, 950 __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p), 951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
952 rdev_p->rnic_info.pbl_base, 952 rdev_p->rnic_info.pbl_base,
953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base, 953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
@@ -961,7 +961,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
961 err = cxio_hal_init_ctrl_qp(rdev_p); 961 err = cxio_hal_init_ctrl_qp(rdev_p);
962 if (err) { 962 if (err) {
963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n", 963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
964 __FUNCTION__, err); 964 __func__, err);
965 goto err1; 965 goto err1;
966 } 966 }
967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0, 967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
@@ -969,19 +969,19 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
969 T3_MAX_NUM_PD); 969 T3_MAX_NUM_PD);
970 if (err) { 970 if (err) {
971 printk(KERN_ERR "%s error %d initializing hal resources.\n", 971 printk(KERN_ERR "%s error %d initializing hal resources.\n",
972 __FUNCTION__, err); 972 __func__, err);
973 goto err2; 973 goto err2;
974 } 974 }
975 err = cxio_hal_pblpool_create(rdev_p); 975 err = cxio_hal_pblpool_create(rdev_p);
976 if (err) { 976 if (err) {
977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n", 977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
978 __FUNCTION__, err); 978 __func__, err);
979 goto err3; 979 goto err3;
980 } 980 }
981 err = cxio_hal_rqtpool_create(rdev_p); 981 err = cxio_hal_rqtpool_create(rdev_p);
982 if (err) { 982 if (err) {
983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n", 983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
984 __FUNCTION__, err); 984 __func__, err);
985 goto err4; 985 goto err4;
986 } 986 }
987 return 0; 987 return 0;
@@ -1043,7 +1043,7 @@ static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
1043 * Insert this completed cqe into the swcq. 1043 * Insert this completed cqe into the swcq.
1044 */ 1044 */
1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n", 1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
1046 __FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2), 1046 __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2)); 1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1)); 1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) 1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
@@ -1112,7 +1112,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1112 1112
1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x" 1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", 1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
1115 __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe), 1115 __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe), 1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe), 1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
1118 CQE_WRID_LOW(*hw_cqe)); 1118 CQE_WRID_LOW(*hw_cqe));
@@ -1215,7 +1215,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1215 struct t3_swsq *sqp; 1215 struct t3_swsq *sqp;
1216 1216
1217 PDBG("%s out of order completion going in swsq at idx %ld\n", 1217 PDBG("%s out of order completion going in swsq at idx %ld\n",
1218 __FUNCTION__, 1218 __func__,
1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); 1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
1220 sqp = wq->sq + 1220 sqp = wq->sq +
1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); 1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
@@ -1234,13 +1234,13 @@ proc_cqe:
1234 */ 1234 */
1235 if (SQ_TYPE(*hw_cqe)) { 1235 if (SQ_TYPE(*hw_cqe)) {
1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); 1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
1237 PDBG("%s completing sq idx %ld\n", __FUNCTION__, 1237 PDBG("%s completing sq idx %ld\n", __func__,
1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); 1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
1239 *cookie = (wq->sq + 1239 *cookie = (wq->sq +
1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id; 1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
1241 wq->sq_rptr++; 1241 wq->sq_rptr++;
1242 } else { 1242 } else {
1243 PDBG("%s completing rq idx %ld\n", __FUNCTION__, 1243 PDBG("%s completing rq idx %ld\n", __func__,
1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1246 wq->rq_rptr++; 1246 wq->rq_rptr++;
@@ -1255,11 +1255,11 @@ flush_wq:
1255skip_cqe: 1255skip_cqe:
1256 if (SW_CQE(*hw_cqe)) { 1256 if (SW_CQE(*hw_cqe)) {
1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n", 1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
1258 __FUNCTION__, cq, cq->cqid, cq->sw_rptr); 1258 __func__, cq, cq->cqid, cq->sw_rptr);
1259 ++cq->sw_rptr; 1259 ++cq->sw_rptr;
1260 } else { 1260 } else {
1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n", 1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
1262 __FUNCTION__, cq, cq->cqid, cq->rptr); 1262 __func__, cq, cq->cqid, cq->rptr);
1263 ++cq->rptr; 1263 ++cq->rptr;
1264 1264
1265 /* 1265 /*