aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb3
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-04-17 00:01:10 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:01:10 -0400
commit3371836383d63b627b228875f5ac63023cbf11d2 (patch)
treeb997894d9774bdc07f7df76ceca48e6a848760c8 /drivers/infiniband/hw/cxgb3
parente8e91f6b4dc1179a70b0d21241b769c0ebfaa129 (diff)
IB: Replace remaining __FUNCTION__ occurrences with __func__
__FUNCTION__ is gcc-specific, use __func__ instead. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb3')
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_dbg.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c84
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c166
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c76
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c38
12 files changed, 218 insertions, 218 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
index 75f7b16a271d..a8d24d53f307 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
@@ -45,16 +45,16 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
45 45
46 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 46 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
47 if (!m) { 47 if (!m) {
48 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 48 PDBG("%s couldn't allocate memory.\n", __func__);
49 return; 49 return;
50 } 50 }
51 m->mem_id = MEM_PMRX; 51 m->mem_id = MEM_PMRX;
52 m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base; 52 m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
53 m->len = size; 53 m->len = size;
54 PDBG("%s TPT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len); 54 PDBG("%s TPT addr 0x%x len %d\n", __func__, m->addr, m->len);
55 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 55 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
56 if (rc) { 56 if (rc) {
57 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 57 PDBG("%s toectl returned error %d\n", __func__, rc);
58 kfree(m); 58 kfree(m);
59 return; 59 return;
60 } 60 }
@@ -82,17 +82,17 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
82 82
83 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 83 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
84 if (!m) { 84 if (!m) {
85 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 85 PDBG("%s couldn't allocate memory.\n", __func__);
86 return; 86 return;
87 } 87 }
88 m->mem_id = MEM_PMRX; 88 m->mem_id = MEM_PMRX;
89 m->addr = pbl_addr; 89 m->addr = pbl_addr;
90 m->len = size; 90 m->len = size;
91 PDBG("%s PBL addr 0x%x len %d depth %d\n", 91 PDBG("%s PBL addr 0x%x len %d depth %d\n",
92 __FUNCTION__, m->addr, m->len, npages); 92 __func__, m->addr, m->len, npages);
93 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 93 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
94 if (rc) { 94 if (rc) {
95 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 95 PDBG("%s toectl returned error %d\n", __func__, rc);
96 kfree(m); 96 kfree(m);
97 return; 97 return;
98 } 98 }
@@ -144,16 +144,16 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
144 144
145 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 145 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
146 if (!m) { 146 if (!m) {
147 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 147 PDBG("%s couldn't allocate memory.\n", __func__);
148 return; 148 return;
149 } 149 }
150 m->mem_id = MEM_PMRX; 150 m->mem_id = MEM_PMRX;
151 m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base; 151 m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
152 m->len = size; 152 m->len = size;
153 PDBG("%s RQT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len); 153 PDBG("%s RQT addr 0x%x len %d\n", __func__, m->addr, m->len);
154 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 154 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
155 if (rc) { 155 if (rc) {
156 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 156 PDBG("%s toectl returned error %d\n", __func__, rc);
157 kfree(m); 157 kfree(m);
158 return; 158 return;
159 } 159 }
@@ -177,16 +177,16 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
177 177
178 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 178 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
179 if (!m) { 179 if (!m) {
180 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 180 PDBG("%s couldn't allocate memory.\n", __func__);
181 return; 181 return;
182 } 182 }
183 m->mem_id = MEM_CM; 183 m->mem_id = MEM_CM;
184 m->addr = hwtid * size; 184 m->addr = hwtid * size;
185 m->len = size; 185 m->len = size;
186 PDBG("%s TCB %d len %d\n", __FUNCTION__, m->addr, m->len); 186 PDBG("%s TCB %d len %d\n", __func__, m->addr, m->len);
187 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 187 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
188 if (rc) { 188 if (rc) {
189 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 189 PDBG("%s toectl returned error %d\n", __func__, rc);
190 kfree(m); 190 kfree(m);
191 return; 191 return;
192 } 192 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 03c5ff62889a..66eb7030aea8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -140,7 +140,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
140 struct t3_modify_qp_wr *wqe; 140 struct t3_modify_qp_wr *wqe;
141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
142 if (!skb) { 142 if (!skb) {
143 PDBG("%s alloc_skb failed\n", __FUNCTION__); 143 PDBG("%s alloc_skb failed\n", __func__);
144 return -ENOMEM; 144 return -ENOMEM;
145 } 145 }
146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
@@ -225,7 +225,7 @@ static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
225 } 225 }
226out: 226out:
227 mutex_unlock(&uctx->lock); 227 mutex_unlock(&uctx->lock);
228 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 228 PDBG("%s qpid 0x%x\n", __func__, qpid);
229 return qpid; 229 return qpid;
230} 230}
231 231
@@ -237,7 +237,7 @@ static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
237 entry = kmalloc(sizeof *entry, GFP_KERNEL); 237 entry = kmalloc(sizeof *entry, GFP_KERNEL);
238 if (!entry) 238 if (!entry)
239 return; 239 return;
240 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 240 PDBG("%s qpid 0x%x\n", __func__, qpid);
241 entry->qpid = qpid; 241 entry->qpid = qpid;
242 mutex_lock(&uctx->lock); 242 mutex_lock(&uctx->lock);
243 list_add_tail(&entry->entry, &uctx->qpids); 243 list_add_tail(&entry->entry, &uctx->qpids);
@@ -300,7 +300,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
300 if (!kernel_domain) 300 if (!kernel_domain)
301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
302 (wq->qpid << rdev_p->qpshift); 302 (wq->qpid << rdev_p->qpshift);
303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__, 303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb); 304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
305 return 0; 305 return 0;
306err4: 306err4:
@@ -345,7 +345,7 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
345{ 345{
346 struct t3_cqe cqe; 346 struct t3_cqe cqe;
347 347
348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
349 wq, cq, cq->sw_rptr, cq->sw_wptr); 349 wq, cq, cq->sw_rptr, cq->sw_wptr);
350 memset(&cqe, 0, sizeof(cqe)); 350 memset(&cqe, 0, sizeof(cqe));
351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -363,10 +363,10 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
363{ 363{
364 u32 ptr; 364 u32 ptr;
365 365
366 PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq); 366 PDBG("%s wq %p cq %p\n", __func__, wq, cq);
367 367
368 /* flush RQ */ 368 /* flush RQ */
369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__, 369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
370 wq->rq_rptr, wq->rq_wptr, count); 370 wq->rq_rptr, wq->rq_wptr, count);
371 ptr = wq->rq_rptr + count; 371 ptr = wq->rq_rptr + count;
372 while (ptr++ != wq->rq_wptr) 372 while (ptr++ != wq->rq_wptr)
@@ -378,7 +378,7 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
378{ 378{
379 struct t3_cqe cqe; 379 struct t3_cqe cqe;
380 380
381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
382 wq, cq, cq->sw_rptr, cq->sw_wptr); 382 wq, cq, cq->sw_rptr, cq->sw_wptr);
383 memset(&cqe, 0, sizeof(cqe)); 383 memset(&cqe, 0, sizeof(cqe));
384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -415,11 +415,11 @@ void cxio_flush_hw_cq(struct t3_cq *cq)
415{ 415{
416 struct t3_cqe *cqe, *swcqe; 416 struct t3_cqe *cqe, *swcqe;
417 417
418 PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid); 418 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
419 cqe = cxio_next_hw_cqe(cq); 419 cqe = cxio_next_hw_cqe(cq);
420 while (cqe) { 420 while (cqe) {
421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n", 421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
422 __FUNCTION__, cq->rptr, cq->sw_wptr); 422 __func__, cq->rptr, cq->sw_wptr);
423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2); 423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
424 *swcqe = *cqe; 424 *swcqe = *cqe;
425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); 425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
@@ -461,7 +461,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
461 (*count)++; 461 (*count)++;
462 ptr++; 462 ptr++;
463 } 463 }
464 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 464 PDBG("%s cq %p count %d\n", __func__, cq, *count);
465} 465}
466 466
467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) 467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
@@ -470,7 +470,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
470 u32 ptr; 470 u32 ptr;
471 471
472 *count = 0; 472 *count = 0;
473 PDBG("%s count zero %d\n", __FUNCTION__, *count); 473 PDBG("%s count zero %d\n", __func__, *count);
474 ptr = cq->sw_rptr; 474 ptr = cq->sw_rptr;
475 while (!Q_EMPTY(ptr, cq->sw_wptr)) { 475 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); 476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
@@ -479,7 +479,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
479 (*count)++; 479 (*count)++;
480 ptr++; 480 ptr++;
481 } 481 }
482 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 482 PDBG("%s cq %p count %d\n", __func__, cq, *count);
483} 483}
484 484
485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p) 485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
@@ -506,12 +506,12 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
506 506
507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
508 if (!skb) { 508 if (!skb) {
509 PDBG("%s alloc_skb failed\n", __FUNCTION__); 509 PDBG("%s alloc_skb failed\n", __func__);
510 return -ENOMEM; 510 return -ENOMEM;
511 } 511 }
512 err = cxio_hal_init_ctrl_cq(rdev_p); 512 err = cxio_hal_init_ctrl_cq(rdev_p);
513 if (err) { 513 if (err) {
514 PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err); 514 PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
515 goto err; 515 goto err;
516 } 516 }
517 rdev_p->ctrl_qp.workq = dma_alloc_coherent( 517 rdev_p->ctrl_qp.workq = dma_alloc_coherent(
@@ -521,7 +521,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
521 &(rdev_p->ctrl_qp.dma_addr), 521 &(rdev_p->ctrl_qp.dma_addr),
522 GFP_KERNEL); 522 GFP_KERNEL);
523 if (!rdev_p->ctrl_qp.workq) { 523 if (!rdev_p->ctrl_qp.workq) {
524 PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__); 524 PDBG("%s dma_alloc_coherent failed\n", __func__);
525 err = -ENOMEM; 525 err = -ENOMEM;
526 goto err; 526 goto err;
527 } 527 }
@@ -591,25 +591,25 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
591 addr &= 0x7FFFFFF; 591 addr &= 0x7FFFFFF;
592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */ 592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n", 593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
594 __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len, 594 __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
595 nr_wqe, data, addr); 595 nr_wqe, data, addr);
596 utx_len = 3; /* in 32B unit */ 596 utx_len = 3; /* in 32B unit */
597 for (i = 0; i < nr_wqe; i++) { 597 for (i = 0; i < nr_wqe; i++) {
598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr, 598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
599 T3_CTRL_QP_SIZE_LOG2)) { 599 T3_CTRL_QP_SIZE_LOG2)) {
600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, " 600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
601 "wait for more space i %d\n", __FUNCTION__, 601 "wait for more space i %d\n", __func__,
602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i); 602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, 603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
604 !Q_FULL(rdev_p->ctrl_qp.rptr, 604 !Q_FULL(rdev_p->ctrl_qp.rptr,
605 rdev_p->ctrl_qp.wptr, 605 rdev_p->ctrl_qp.wptr,
606 T3_CTRL_QP_SIZE_LOG2))) { 606 T3_CTRL_QP_SIZE_LOG2))) {
607 PDBG("%s ctrl_qp workq interrupted\n", 607 PDBG("%s ctrl_qp workq interrupted\n",
608 __FUNCTION__); 608 __func__);
609 return -ERESTARTSYS; 609 return -ERESTARTSYS;
610 } 610 }
611 PDBG("%s ctrl_qp wakeup, continue posting work request " 611 PDBG("%s ctrl_qp wakeup, continue posting work request "
612 "i %d\n", __FUNCTION__, i); 612 "i %d\n", __func__, i);
613 } 613 }
614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % 614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
615 (1 << T3_CTRL_QP_SIZE_LOG2))); 615 (1 << T3_CTRL_QP_SIZE_LOG2)));
@@ -630,7 +630,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
630 if ((i != 0) && 630 if ((i != 0) &&
631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) { 631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
632 flag = T3_COMPLETION_FLAG; 632 flag = T3_COMPLETION_FLAG;
633 PDBG("%s force completion at i %d\n", __FUNCTION__, i); 633 PDBG("%s force completion at i %d\n", __func__, i);
634 } 634 }
635 635
636 /* build the utx mem command */ 636 /* build the utx mem command */
@@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
701 *stag = (stag_idx << 8) | ((*stag) & 0xFF); 701 *stag = (stag_idx << 8) | ((*stag) & 0xFF);
702 } 702 }
703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", 703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
704 __FUNCTION__, stag_state, type, pdid, stag_idx); 704 __func__, stag_state, type, pdid, stag_idx);
705 705
706 if (reset_tpt_entry) 706 if (reset_tpt_entry)
707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3); 707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
@@ -718,7 +718,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
718 if (pbl) { 718 if (pbl) {
719 719
720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", 720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
721 __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base, 721 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
722 *pbl_size); 722 *pbl_size);
723 err = cxio_hal_ctrl_qp_write_mem(rdev_p, 723 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
724 (*pbl_addr >> 5), 724 (*pbl_addr >> 5),
@@ -814,7 +814,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC); 814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
815 if (!skb) 815 if (!skb)
816 return -ENOMEM; 816 return -ENOMEM;
817 PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p); 817 PDBG("%s rdev_p %p\n", __func__, rdev_p);
818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); 818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); 819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | 820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
@@ -856,7 +856,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; 856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x" 857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
858 " se %0x notify %0x cqbranch %0x creditth %0x\n", 858 " se %0x notify %0x cqbranch %0x creditth %0x\n",
859 cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg), 859 cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg), 860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg), 861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
862 RSPQ_CREDIT_THRESH(rsp_msg)); 862 RSPQ_CREDIT_THRESH(rsp_msg));
@@ -868,7 +868,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp; 869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
870 if (!rdev_p) { 870 if (!rdev_p) {
871 PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__, 871 PDBG("%s called by t3cdev %p with null ulp\n", __func__,
872 t3cdev_p); 872 t3cdev_p);
873 return 0; 873 return 0;
874 } 874 }
@@ -908,13 +908,13 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name, 908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
909 T3_MAX_DEV_NAME_LEN); 909 T3_MAX_DEV_NAME_LEN);
910 } else { 910 } else {
911 PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__); 911 PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
912 return -EINVAL; 912 return -EINVAL;
913 } 913 }
914 914
915 list_add_tail(&rdev_p->entry, &rdev_list); 915 list_add_tail(&rdev_p->entry, &rdev_list);
916 916
917 PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name); 917 PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp)); 918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
919 if (!rdev_p->t3cdev_p) 919 if (!rdev_p->t3cdev_p)
920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p); 920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
@@ -923,14 +923,14 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
923 &(rdev_p->rnic_info)); 923 &(rdev_p->rnic_info));
924 if (err) { 924 if (err) {
925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
926 __FUNCTION__, rdev_p->t3cdev_p, err); 926 __func__, rdev_p->t3cdev_p, err);
927 goto err1; 927 goto err1;
928 } 928 }
929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS, 929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
930 &(rdev_p->port_info)); 930 &(rdev_p->port_info));
931 if (err) { 931 if (err) {
932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
933 __FUNCTION__, rdev_p->t3cdev_p, err); 933 __func__, rdev_p->t3cdev_p, err);
934 goto err1; 934 goto err1;
935 } 935 }
936 936
@@ -947,7 +947,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1; 947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d " 948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n", 949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
950 __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base, 950 __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p), 951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
952 rdev_p->rnic_info.pbl_base, 952 rdev_p->rnic_info.pbl_base,
953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base, 953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
@@ -961,7 +961,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
961 err = cxio_hal_init_ctrl_qp(rdev_p); 961 err = cxio_hal_init_ctrl_qp(rdev_p);
962 if (err) { 962 if (err) {
963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n", 963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
964 __FUNCTION__, err); 964 __func__, err);
965 goto err1; 965 goto err1;
966 } 966 }
967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0, 967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
@@ -969,19 +969,19 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
969 T3_MAX_NUM_PD); 969 T3_MAX_NUM_PD);
970 if (err) { 970 if (err) {
971 printk(KERN_ERR "%s error %d initializing hal resources.\n", 971 printk(KERN_ERR "%s error %d initializing hal resources.\n",
972 __FUNCTION__, err); 972 __func__, err);
973 goto err2; 973 goto err2;
974 } 974 }
975 err = cxio_hal_pblpool_create(rdev_p); 975 err = cxio_hal_pblpool_create(rdev_p);
976 if (err) { 976 if (err) {
977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n", 977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
978 __FUNCTION__, err); 978 __func__, err);
979 goto err3; 979 goto err3;
980 } 980 }
981 err = cxio_hal_rqtpool_create(rdev_p); 981 err = cxio_hal_rqtpool_create(rdev_p);
982 if (err) { 982 if (err) {
983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n", 983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
984 __FUNCTION__, err); 984 __func__, err);
985 goto err4; 985 goto err4;
986 } 986 }
987 return 0; 987 return 0;
@@ -1043,7 +1043,7 @@ static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
1043 * Insert this completed cqe into the swcq. 1043 * Insert this completed cqe into the swcq.
1044 */ 1044 */
1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n", 1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
1046 __FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2), 1046 __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2)); 1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1)); 1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) 1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
@@ -1112,7 +1112,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1112 1112
1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x" 1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", 1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
1115 __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe), 1115 __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe), 1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe), 1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
1118 CQE_WRID_LOW(*hw_cqe)); 1118 CQE_WRID_LOW(*hw_cqe));
@@ -1215,7 +1215,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1215 struct t3_swsq *sqp; 1215 struct t3_swsq *sqp;
1216 1216
1217 PDBG("%s out of order completion going in swsq at idx %ld\n", 1217 PDBG("%s out of order completion going in swsq at idx %ld\n",
1218 __FUNCTION__, 1218 __func__,
1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); 1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
1220 sqp = wq->sq + 1220 sqp = wq->sq +
1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); 1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
@@ -1234,13 +1234,13 @@ proc_cqe:
1234 */ 1234 */
1235 if (SQ_TYPE(*hw_cqe)) { 1235 if (SQ_TYPE(*hw_cqe)) {
1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); 1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
1237 PDBG("%s completing sq idx %ld\n", __FUNCTION__, 1237 PDBG("%s completing sq idx %ld\n", __func__,
1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); 1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
1239 *cookie = (wq->sq + 1239 *cookie = (wq->sq +
1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id; 1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
1241 wq->sq_rptr++; 1241 wq->sq_rptr++;
1242 } else { 1242 } else {
1243 PDBG("%s completing rq idx %ld\n", __FUNCTION__, 1243 PDBG("%s completing rq idx %ld\n", __func__,
1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1246 wq->rq_rptr++; 1246 wq->rq_rptr++;
@@ -1255,11 +1255,11 @@ flush_wq:
1255skip_cqe: 1255skip_cqe:
1256 if (SW_CQE(*hw_cqe)) { 1256 if (SW_CQE(*hw_cqe)) {
1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n", 1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
1258 __FUNCTION__, cq, cq->cqid, cq->sw_rptr); 1258 __func__, cq, cq->cqid, cq->sw_rptr);
1259 ++cq->sw_rptr; 1259 ++cq->sw_rptr;
1260 } else { 1260 } else {
1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n", 1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
1262 __FUNCTION__, cq, cq->cqid, cq->rptr); 1262 __func__, cq, cq->cqid, cq->rptr);
1263 ++cq->rptr; 1263 ++cq->rptr;
1264 1264
1265 /* 1265 /*
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index d3095ae5bc2e..45ed4f25ef78 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -206,13 +206,13 @@ void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
206u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) 206u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
207{ 207{
208 u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo); 208 u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
209 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 209 PDBG("%s qpid 0x%x\n", __func__, qpid);
210 return qpid; 210 return qpid;
211} 211}
212 212
213void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid) 213void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
214{ 214{
215 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 215 PDBG("%s qpid 0x%x\n", __func__, qpid);
216 cxio_hal_put_resource(rscp->qpid_fifo, qpid); 216 cxio_hal_put_resource(rscp->qpid_fifo, qpid);
217} 217}
218 218
@@ -255,13 +255,13 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) 255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
256{ 256{
257 unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size); 257 unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
258 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size); 258 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
259 return (u32)addr; 259 return (u32)addr;
260} 260}
261 261
262void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) 262void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
263{ 263{
264 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size); 264 PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
265 gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size); 265 gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
266} 266}
267 267
@@ -292,13 +292,13 @@ void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
292u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size) 292u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
293{ 293{
294 unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6); 294 unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
295 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6); 295 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
296 return (u32)addr; 296 return (u32)addr;
297} 297}
298 298
299void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) 299void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
300{ 300{
301 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6); 301 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
302 gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6); 302 gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
303} 303}
304 304
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 0315c9d9fce9..6ba4138c8ec3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,7 +65,7 @@ static DEFINE_MUTEX(dev_mutex);
65 65
66static void rnic_init(struct iwch_dev *rnicp) 66static void rnic_init(struct iwch_dev *rnicp)
67{ 67{
68 PDBG("%s iwch_dev %p\n", __FUNCTION__, rnicp); 68 PDBG("%s iwch_dev %p\n", __func__, rnicp);
69 idr_init(&rnicp->cqidr); 69 idr_init(&rnicp->cqidr);
70 idr_init(&rnicp->qpidr); 70 idr_init(&rnicp->qpidr);
71 idr_init(&rnicp->mmidr); 71 idr_init(&rnicp->mmidr);
@@ -106,7 +106,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
106 struct iwch_dev *rnicp; 106 struct iwch_dev *rnicp;
107 static int vers_printed; 107 static int vers_printed;
108 108
109 PDBG("%s t3cdev %p\n", __FUNCTION__, tdev); 109 PDBG("%s t3cdev %p\n", __func__, tdev);
110 if (!vers_printed++) 110 if (!vers_printed++)
111 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", 111 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
112 DRV_VERSION); 112 DRV_VERSION);
@@ -144,7 +144,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
144static void close_rnic_dev(struct t3cdev *tdev) 144static void close_rnic_dev(struct t3cdev *tdev)
145{ 145{
146 struct iwch_dev *dev, *tmp; 146 struct iwch_dev *dev, *tmp;
147 PDBG("%s t3cdev %p\n", __FUNCTION__, tdev); 147 PDBG("%s t3cdev %p\n", __func__, tdev);
148 mutex_lock(&dev_mutex); 148 mutex_lock(&dev_mutex);
149 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 149 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
150 if (dev->rdev.t3cdev_p == tdev) { 150 if (dev->rdev.t3cdev_p == tdev) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 99f2f2a46bf7..72ca360c3dbc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -110,9 +110,9 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status);
110 110
111static void start_ep_timer(struct iwch_ep *ep) 111static void start_ep_timer(struct iwch_ep *ep)
112{ 112{
113 PDBG("%s ep %p\n", __FUNCTION__, ep); 113 PDBG("%s ep %p\n", __func__, ep);
114 if (timer_pending(&ep->timer)) { 114 if (timer_pending(&ep->timer)) {
115 PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep); 115 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
116 del_timer_sync(&ep->timer); 116 del_timer_sync(&ep->timer);
117 } else 117 } else
118 get_ep(&ep->com); 118 get_ep(&ep->com);
@@ -124,7 +124,7 @@ static void start_ep_timer(struct iwch_ep *ep)
124 124
125static void stop_ep_timer(struct iwch_ep *ep) 125static void stop_ep_timer(struct iwch_ep *ep)
126{ 126{
127 PDBG("%s ep %p\n", __FUNCTION__, ep); 127 PDBG("%s ep %p\n", __func__, ep);
128 del_timer_sync(&ep->timer); 128 del_timer_sync(&ep->timer);
129 put_ep(&ep->com); 129 put_ep(&ep->com);
130} 130}
@@ -190,7 +190,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
190 190
191static void set_emss(struct iwch_ep *ep, u16 opt) 191static void set_emss(struct iwch_ep *ep, u16 opt)
192{ 192{
193 PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt); 193 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
194 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40; 194 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
195 if (G_TCPOPT_TSTAMP(opt)) 195 if (G_TCPOPT_TSTAMP(opt))
196 ep->emss -= 12; 196 ep->emss -= 12;
@@ -220,7 +220,7 @@ static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
220 unsigned long flags; 220 unsigned long flags;
221 221
222 spin_lock_irqsave(&epc->lock, flags); 222 spin_lock_irqsave(&epc->lock, flags);
223 PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]); 223 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
224 __state_set(epc, new); 224 __state_set(epc, new);
225 spin_unlock_irqrestore(&epc->lock, flags); 225 spin_unlock_irqrestore(&epc->lock, flags);
226 return; 226 return;
@@ -236,7 +236,7 @@ static void *alloc_ep(int size, gfp_t gfp)
236 spin_lock_init(&epc->lock); 236 spin_lock_init(&epc->lock);
237 init_waitqueue_head(&epc->waitq); 237 init_waitqueue_head(&epc->waitq);
238 } 238 }
239 PDBG("%s alloc ep %p\n", __FUNCTION__, epc); 239 PDBG("%s alloc ep %p\n", __func__, epc);
240 return epc; 240 return epc;
241} 241}
242 242
@@ -244,13 +244,13 @@ void __free_ep(struct kref *kref)
244{ 244{
245 struct iwch_ep_common *epc; 245 struct iwch_ep_common *epc;
246 epc = container_of(kref, struct iwch_ep_common, kref); 246 epc = container_of(kref, struct iwch_ep_common, kref);
247 PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]); 247 PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
248 kfree(epc); 248 kfree(epc);
249} 249}
250 250
251static void release_ep_resources(struct iwch_ep *ep) 251static void release_ep_resources(struct iwch_ep *ep)
252{ 252{
253 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 253 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
255 dst_release(ep->dst); 255 dst_release(ep->dst);
256 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 256 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -349,7 +349,7 @@ static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
349 349
350static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb) 350static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
351{ 351{
352 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 352 PDBG("%s t3cdev %p\n", __func__, dev);
353 kfree_skb(skb); 353 kfree_skb(skb);
354} 354}
355 355
@@ -370,7 +370,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
370{ 370{
371 struct cpl_abort_req *req = cplhdr(skb); 371 struct cpl_abort_req *req = cplhdr(skb);
372 372
373 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 373 PDBG("%s t3cdev %p\n", __func__, dev);
374 req->cmd = CPL_ABORT_NO_RST; 374 req->cmd = CPL_ABORT_NO_RST;
375 cxgb3_ofld_send(dev, skb); 375 cxgb3_ofld_send(dev, skb);
376} 376}
@@ -380,10 +380,10 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
380 struct cpl_close_con_req *req; 380 struct cpl_close_con_req *req;
381 struct sk_buff *skb; 381 struct sk_buff *skb;
382 382
383 PDBG("%s ep %p\n", __FUNCTION__, ep); 383 PDBG("%s ep %p\n", __func__, ep);
384 skb = get_skb(NULL, sizeof(*req), gfp); 384 skb = get_skb(NULL, sizeof(*req), gfp);
385 if (!skb) { 385 if (!skb) {
386 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 386 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
387 return -ENOMEM; 387 return -ENOMEM;
388 } 388 }
389 skb->priority = CPL_PRIORITY_DATA; 389 skb->priority = CPL_PRIORITY_DATA;
@@ -400,11 +400,11 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
400{ 400{
401 struct cpl_abort_req *req; 401 struct cpl_abort_req *req;
402 402
403 PDBG("%s ep %p\n", __FUNCTION__, ep); 403 PDBG("%s ep %p\n", __func__, ep);
404 skb = get_skb(skb, sizeof(*req), gfp); 404 skb = get_skb(skb, sizeof(*req), gfp);
405 if (!skb) { 405 if (!skb) {
406 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 406 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
407 __FUNCTION__); 407 __func__);
408 return -ENOMEM; 408 return -ENOMEM;
409 } 409 }
410 skb->priority = CPL_PRIORITY_DATA; 410 skb->priority = CPL_PRIORITY_DATA;
@@ -426,12 +426,12 @@ static int send_connect(struct iwch_ep *ep)
426 unsigned int mtu_idx; 426 unsigned int mtu_idx;
427 int wscale; 427 int wscale;
428 428
429 PDBG("%s ep %p\n", __FUNCTION__, ep); 429 PDBG("%s ep %p\n", __func__, ep);
430 430
431 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 431 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
432 if (!skb) { 432 if (!skb) {
433 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 433 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
434 __FUNCTION__); 434 __func__);
435 return -ENOMEM; 435 return -ENOMEM;
436 } 436 }
437 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); 437 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
@@ -470,7 +470,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
470 struct mpa_message *mpa; 470 struct mpa_message *mpa;
471 int len; 471 int len;
472 472
473 PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen); 473 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
474 474
475 BUG_ON(skb_cloned(skb)); 475 BUG_ON(skb_cloned(skb));
476 476
@@ -530,13 +530,13 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
530 struct mpa_message *mpa; 530 struct mpa_message *mpa;
531 struct sk_buff *skb; 531 struct sk_buff *skb;
532 532
533 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 533 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
534 534
535 mpalen = sizeof(*mpa) + plen; 535 mpalen = sizeof(*mpa) + plen;
536 536
537 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 537 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
538 if (!skb) { 538 if (!skb) {
539 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 539 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
540 return -ENOMEM; 540 return -ENOMEM;
541 } 541 }
542 skb_reserve(skb, sizeof(*req)); 542 skb_reserve(skb, sizeof(*req));
@@ -580,13 +580,13 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
580 int len; 580 int len;
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 582
583 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 583 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
584 584
585 mpalen = sizeof(*mpa) + plen; 585 mpalen = sizeof(*mpa) + plen;
586 586
587 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 587 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
588 if (!skb) { 588 if (!skb) {
589 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 589 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
590 return -ENOMEM; 590 return -ENOMEM;
591 } 591 }
592 skb->priority = CPL_PRIORITY_DATA; 592 skb->priority = CPL_PRIORITY_DATA;
@@ -630,7 +630,7 @@ static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
630 struct cpl_act_establish *req = cplhdr(skb); 630 struct cpl_act_establish *req = cplhdr(skb);
631 unsigned int tid = GET_TID(req); 631 unsigned int tid = GET_TID(req);
632 632
633 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid); 633 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
634 634
635 dst_confirm(ep->dst); 635 dst_confirm(ep->dst);
636 636
@@ -663,7 +663,7 @@ static void close_complete_upcall(struct iwch_ep *ep)
663{ 663{
664 struct iw_cm_event event; 664 struct iw_cm_event event;
665 665
666 PDBG("%s ep %p\n", __FUNCTION__, ep); 666 PDBG("%s ep %p\n", __func__, ep);
667 memset(&event, 0, sizeof(event)); 667 memset(&event, 0, sizeof(event));
668 event.event = IW_CM_EVENT_CLOSE; 668 event.event = IW_CM_EVENT_CLOSE;
669 if (ep->com.cm_id) { 669 if (ep->com.cm_id) {
@@ -680,7 +680,7 @@ static void peer_close_upcall(struct iwch_ep *ep)
680{ 680{
681 struct iw_cm_event event; 681 struct iw_cm_event event;
682 682
683 PDBG("%s ep %p\n", __FUNCTION__, ep); 683 PDBG("%s ep %p\n", __func__, ep);
684 memset(&event, 0, sizeof(event)); 684 memset(&event, 0, sizeof(event));
685 event.event = IW_CM_EVENT_DISCONNECT; 685 event.event = IW_CM_EVENT_DISCONNECT;
686 if (ep->com.cm_id) { 686 if (ep->com.cm_id) {
@@ -694,7 +694,7 @@ static void peer_abort_upcall(struct iwch_ep *ep)
694{ 694{
695 struct iw_cm_event event; 695 struct iw_cm_event event;
696 696
697 PDBG("%s ep %p\n", __FUNCTION__, ep); 697 PDBG("%s ep %p\n", __func__, ep);
698 memset(&event, 0, sizeof(event)); 698 memset(&event, 0, sizeof(event));
699 event.event = IW_CM_EVENT_CLOSE; 699 event.event = IW_CM_EVENT_CLOSE;
700 event.status = -ECONNRESET; 700 event.status = -ECONNRESET;
@@ -712,7 +712,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
712{ 712{
713 struct iw_cm_event event; 713 struct iw_cm_event event;
714 714
715 PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status); 715 PDBG("%s ep %p status %d\n", __func__, ep, status);
716 memset(&event, 0, sizeof(event)); 716 memset(&event, 0, sizeof(event));
717 event.event = IW_CM_EVENT_CONNECT_REPLY; 717 event.event = IW_CM_EVENT_CONNECT_REPLY;
718 event.status = status; 718 event.status = status;
@@ -724,7 +724,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
724 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 724 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
725 } 725 }
726 if (ep->com.cm_id) { 726 if (ep->com.cm_id) {
727 PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep, 727 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
728 ep->hwtid, status); 728 ep->hwtid, status);
729 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 729 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
730 } 730 }
@@ -739,7 +739,7 @@ static void connect_request_upcall(struct iwch_ep *ep)
739{ 739{
740 struct iw_cm_event event; 740 struct iw_cm_event event;
741 741
742 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 742 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
743 memset(&event, 0, sizeof(event)); 743 memset(&event, 0, sizeof(event));
744 event.event = IW_CM_EVENT_CONNECT_REQUEST; 744 event.event = IW_CM_EVENT_CONNECT_REQUEST;
745 event.local_addr = ep->com.local_addr; 745 event.local_addr = ep->com.local_addr;
@@ -759,11 +759,11 @@ static void established_upcall(struct iwch_ep *ep)
759{ 759{
760 struct iw_cm_event event; 760 struct iw_cm_event event;
761 761
762 PDBG("%s ep %p\n", __FUNCTION__, ep); 762 PDBG("%s ep %p\n", __func__, ep);
763 memset(&event, 0, sizeof(event)); 763 memset(&event, 0, sizeof(event));
764 event.event = IW_CM_EVENT_ESTABLISHED; 764 event.event = IW_CM_EVENT_ESTABLISHED;
765 if (ep->com.cm_id) { 765 if (ep->com.cm_id) {
766 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 766 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
767 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 767 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
768 } 768 }
769} 769}
@@ -773,7 +773,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
773 struct cpl_rx_data_ack *req; 773 struct cpl_rx_data_ack *req;
774 struct sk_buff *skb; 774 struct sk_buff *skb;
775 775
776 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 776 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
777 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 777 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
778 if (!skb) { 778 if (!skb) {
779 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 779 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
@@ -797,7 +797,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
797 enum iwch_qp_attr_mask mask; 797 enum iwch_qp_attr_mask mask;
798 int err; 798 int err;
799 799
800 PDBG("%s ep %p\n", __FUNCTION__, ep); 800 PDBG("%s ep %p\n", __func__, ep);
801 801
802 /* 802 /*
803 * Stop mpa timer. If it expired, then the state has 803 * Stop mpa timer. If it expired, then the state has
@@ -884,7 +884,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
884 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 884 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
885 ep->mpa_attr.version = mpa_rev; 885 ep->mpa_attr.version = mpa_rev;
886 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 886 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
887 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 887 "xmit_marker_enabled=%d, version=%d\n", __func__,
888 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 888 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
889 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 889 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
890 890
@@ -915,7 +915,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
915 struct mpa_message *mpa; 915 struct mpa_message *mpa;
916 u16 plen; 916 u16 plen;
917 917
918 PDBG("%s ep %p\n", __FUNCTION__, ep); 918 PDBG("%s ep %p\n", __func__, ep);
919 919
920 /* 920 /*
921 * Stop mpa timer. If it expired, then the state has 921 * Stop mpa timer. If it expired, then the state has
@@ -935,7 +935,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
935 return; 935 return;
936 } 936 }
937 937
938 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 938 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
939 939
940 /* 940 /*
941 * Copy the new data into our accumulation buffer. 941 * Copy the new data into our accumulation buffer.
@@ -950,7 +950,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
950 */ 950 */
951 if (ep->mpa_pkt_len < sizeof(*mpa)) 951 if (ep->mpa_pkt_len < sizeof(*mpa))
952 return; 952 return;
953 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 953 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
954 mpa = (struct mpa_message *) ep->mpa_pkt; 954 mpa = (struct mpa_message *) ep->mpa_pkt;
955 955
956 /* 956 /*
@@ -1000,7 +1000,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
1000 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1000 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1001 ep->mpa_attr.version = mpa_rev; 1001 ep->mpa_attr.version = mpa_rev;
1002 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1002 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1003 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 1003 "xmit_marker_enabled=%d, version=%d\n", __func__,
1004 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1004 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1005 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 1005 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1006 1006
@@ -1017,7 +1017,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1017 struct cpl_rx_data *hdr = cplhdr(skb); 1017 struct cpl_rx_data *hdr = cplhdr(skb);
1018 unsigned int dlen = ntohs(hdr->len); 1018 unsigned int dlen = ntohs(hdr->len);
1019 1019
1020 PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen); 1020 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
1021 1021
1022 skb_pull(skb, sizeof(*hdr)); 1022 skb_pull(skb, sizeof(*hdr));
1023 skb_trim(skb, dlen); 1023 skb_trim(skb, dlen);
@@ -1037,7 +1037,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1037 default: 1037 default:
1038 printk(KERN_ERR MOD "%s Unexpected streaming data." 1038 printk(KERN_ERR MOD "%s Unexpected streaming data."
1039 " ep %p state %d tid %d\n", 1039 " ep %p state %d tid %d\n",
1040 __FUNCTION__, ep, state_read(&ep->com), ep->hwtid); 1040 __func__, ep, state_read(&ep->com), ep->hwtid);
1041 1041
1042 /* 1042 /*
1043 * The ep will timeout and inform the ULP of the failure. 1043 * The ep will timeout and inform the ULP of the failure.
@@ -1063,7 +1063,7 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1063 struct cpl_wr_ack *hdr = cplhdr(skb); 1063 struct cpl_wr_ack *hdr = cplhdr(skb);
1064 unsigned int credits = ntohs(hdr->credits); 1064 unsigned int credits = ntohs(hdr->credits);
1065 1065
1066 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 1066 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1067 1067
1068 if (credits == 0) 1068 if (credits == 0)
1069 return CPL_RET_BUF_DONE; 1069 return CPL_RET_BUF_DONE;
@@ -1084,7 +1084,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1084{ 1084{
1085 struct iwch_ep *ep = ctx; 1085 struct iwch_ep *ep = ctx;
1086 1086
1087 PDBG("%s ep %p\n", __FUNCTION__, ep); 1087 PDBG("%s ep %p\n", __func__, ep);
1088 1088
1089 /* 1089 /*
1090 * We get 2 abort replies from the HW. The first one must 1090 * We get 2 abort replies from the HW. The first one must
@@ -1115,7 +1115,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1115 struct iwch_ep *ep = ctx; 1115 struct iwch_ep *ep = ctx;
1116 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1116 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1117 1117
1118 PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status, 1118 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1119 status2errno(rpl->status)); 1119 status2errno(rpl->status));
1120 connect_reply_upcall(ep, status2errno(rpl->status)); 1120 connect_reply_upcall(ep, status2errno(rpl->status));
1121 state_set(&ep->com, DEAD); 1121 state_set(&ep->com, DEAD);
@@ -1133,7 +1133,7 @@ static int listen_start(struct iwch_listen_ep *ep)
1133 struct sk_buff *skb; 1133 struct sk_buff *skb;
1134 struct cpl_pass_open_req *req; 1134 struct cpl_pass_open_req *req;
1135 1135
1136 PDBG("%s ep %p\n", __FUNCTION__, ep); 1136 PDBG("%s ep %p\n", __func__, ep);
1137 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1137 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1138 if (!skb) { 1138 if (!skb) {
1139 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n"); 1139 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
@@ -1162,7 +1162,7 @@ static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1162 struct iwch_listen_ep *ep = ctx; 1162 struct iwch_listen_ep *ep = ctx;
1163 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1163 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1164 1164
1165 PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep, 1165 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1166 rpl->status, status2errno(rpl->status)); 1166 rpl->status, status2errno(rpl->status));
1167 ep->com.rpl_err = status2errno(rpl->status); 1167 ep->com.rpl_err = status2errno(rpl->status);
1168 ep->com.rpl_done = 1; 1168 ep->com.rpl_done = 1;
@@ -1176,10 +1176,10 @@ static int listen_stop(struct iwch_listen_ep *ep)
1176 struct sk_buff *skb; 1176 struct sk_buff *skb;
1177 struct cpl_close_listserv_req *req; 1177 struct cpl_close_listserv_req *req;
1178 1178
1179 PDBG("%s ep %p\n", __FUNCTION__, ep); 1179 PDBG("%s ep %p\n", __func__, ep);
1180 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1180 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1181 if (!skb) { 1181 if (!skb) {
1182 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 1182 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1183 return -ENOMEM; 1183 return -ENOMEM;
1184 } 1184 }
1185 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); 1185 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
@@ -1197,7 +1197,7 @@ static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1197 struct iwch_listen_ep *ep = ctx; 1197 struct iwch_listen_ep *ep = ctx;
1198 struct cpl_close_listserv_rpl *rpl = cplhdr(skb); 1198 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1199 1199
1200 PDBG("%s ep %p\n", __FUNCTION__, ep); 1200 PDBG("%s ep %p\n", __func__, ep);
1201 ep->com.rpl_err = status2errno(rpl->status); 1201 ep->com.rpl_err = status2errno(rpl->status);
1202 ep->com.rpl_done = 1; 1202 ep->com.rpl_done = 1;
1203 wake_up(&ep->com.waitq); 1203 wake_up(&ep->com.waitq);
@@ -1211,7 +1211,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1211 u32 opt0h, opt0l, opt2; 1211 u32 opt0h, opt0l, opt2;
1212 int wscale; 1212 int wscale;
1213 1213
1214 PDBG("%s ep %p\n", __FUNCTION__, ep); 1214 PDBG("%s ep %p\n", __func__, ep);
1215 BUG_ON(skb_cloned(skb)); 1215 BUG_ON(skb_cloned(skb));
1216 skb_trim(skb, sizeof(*rpl)); 1216 skb_trim(skb, sizeof(*rpl));
1217 skb_get(skb); 1217 skb_get(skb);
@@ -1244,7 +1244,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1244static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, 1244static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1245 struct sk_buff *skb) 1245 struct sk_buff *skb)
1246{ 1246{
1247 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid, 1247 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1248 peer_ip); 1248 peer_ip);
1249 BUG_ON(skb_cloned(skb)); 1249 BUG_ON(skb_cloned(skb));
1250 skb_trim(skb, sizeof(struct cpl_tid_release)); 1250 skb_trim(skb, sizeof(struct cpl_tid_release));
@@ -1279,11 +1279,11 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1279 struct rtable *rt; 1279 struct rtable *rt;
1280 struct iff_mac tim; 1280 struct iff_mac tim;
1281 1281
1282 PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid); 1282 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1283 1283
1284 if (state_read(&parent_ep->com) != LISTEN) { 1284 if (state_read(&parent_ep->com) != LISTEN) {
1285 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1285 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1286 __FUNCTION__); 1286 __func__);
1287 goto reject; 1287 goto reject;
1288 } 1288 }
1289 1289
@@ -1295,7 +1295,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1295 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { 1295 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1296 printk(KERN_ERR 1296 printk(KERN_ERR
1297 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", 1297 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1298 __FUNCTION__, 1298 __func__,
1299 req->dst_mac[0], 1299 req->dst_mac[0],
1300 req->dst_mac[1], 1300 req->dst_mac[1],
1301 req->dst_mac[2], 1301 req->dst_mac[2],
@@ -1313,21 +1313,21 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1313 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); 1313 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1314 if (!rt) { 1314 if (!rt) {
1315 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1315 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1316 __FUNCTION__); 1316 __func__);
1317 goto reject; 1317 goto reject;
1318 } 1318 }
1319 dst = &rt->u.dst; 1319 dst = &rt->u.dst;
1320 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); 1320 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1321 if (!l2t) { 1321 if (!l2t) {
1322 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1322 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1323 __FUNCTION__); 1323 __func__);
1324 dst_release(dst); 1324 dst_release(dst);
1325 goto reject; 1325 goto reject;
1326 } 1326 }
1327 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1327 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1328 if (!child_ep) { 1328 if (!child_ep) {
1329 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1329 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1330 __FUNCTION__); 1330 __func__);
1331 l2t_release(L2DATA(tdev), l2t); 1331 l2t_release(L2DATA(tdev), l2t);
1332 dst_release(dst); 1332 dst_release(dst);
1333 goto reject; 1333 goto reject;
@@ -1362,7 +1362,7 @@ static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1362 struct iwch_ep *ep = ctx; 1362 struct iwch_ep *ep = ctx;
1363 struct cpl_pass_establish *req = cplhdr(skb); 1363 struct cpl_pass_establish *req = cplhdr(skb);
1364 1364
1365 PDBG("%s ep %p\n", __FUNCTION__, ep); 1365 PDBG("%s ep %p\n", __func__, ep);
1366 ep->snd_seq = ntohl(req->snd_isn); 1366 ep->snd_seq = ntohl(req->snd_isn);
1367 ep->rcv_seq = ntohl(req->rcv_isn); 1367 ep->rcv_seq = ntohl(req->rcv_isn);
1368 1368
@@ -1383,7 +1383,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1383 int disconnect = 1; 1383 int disconnect = 1;
1384 int release = 0; 1384 int release = 0;
1385 1385
1386 PDBG("%s ep %p\n", __FUNCTION__, ep); 1386 PDBG("%s ep %p\n", __func__, ep);
1387 dst_confirm(ep->dst); 1387 dst_confirm(ep->dst);
1388 1388
1389 spin_lock_irqsave(&ep->com.lock, flags); 1389 spin_lock_irqsave(&ep->com.lock, flags);
@@ -1473,7 +1473,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1473 int state; 1473 int state;
1474 1474
1475 if (is_neg_adv_abort(req->status)) { 1475 if (is_neg_adv_abort(req->status)) {
1476 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, 1476 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1477 ep->hwtid); 1477 ep->hwtid);
1478 t3_l2t_send_event(ep->com.tdev, ep->l2t); 1478 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1479 return CPL_RET_BUF_DONE; 1479 return CPL_RET_BUF_DONE;
@@ -1489,7 +1489,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1489 } 1489 }
1490 1490
1491 state = state_read(&ep->com); 1491 state = state_read(&ep->com);
1492 PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state); 1492 PDBG("%s ep %p state %u\n", __func__, ep, state);
1493 switch (state) { 1493 switch (state) {
1494 case CONNECTING: 1494 case CONNECTING:
1495 break; 1495 break;
@@ -1528,14 +1528,14 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1528 if (ret) 1528 if (ret)
1529 printk(KERN_ERR MOD 1529 printk(KERN_ERR MOD
1530 "%s - qp <- error failed!\n", 1530 "%s - qp <- error failed!\n",
1531 __FUNCTION__); 1531 __func__);
1532 } 1532 }
1533 peer_abort_upcall(ep); 1533 peer_abort_upcall(ep);
1534 break; 1534 break;
1535 case ABORTING: 1535 case ABORTING:
1536 break; 1536 break;
1537 case DEAD: 1537 case DEAD:
1538 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__); 1538 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1539 return CPL_RET_BUF_DONE; 1539 return CPL_RET_BUF_DONE;
1540 default: 1540 default:
1541 BUG_ON(1); 1541 BUG_ON(1);
@@ -1546,7 +1546,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1546 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1546 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1547 if (!rpl_skb) { 1547 if (!rpl_skb) {
1548 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1548 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1549 __FUNCTION__); 1549 __func__);
1550 dst_release(ep->dst); 1550 dst_release(ep->dst);
1551 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1551 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1552 put_ep(&ep->com); 1552 put_ep(&ep->com);
@@ -1573,7 +1573,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1573 unsigned long flags; 1573 unsigned long flags;
1574 int release = 0; 1574 int release = 0;
1575 1575
1576 PDBG("%s ep %p\n", __FUNCTION__, ep); 1576 PDBG("%s ep %p\n", __func__, ep);
1577 BUG_ON(!ep); 1577 BUG_ON(!ep);
1578 1578
1579 /* The cm_id may be null if we failed to connect */ 1579 /* The cm_id may be null if we failed to connect */
@@ -1624,9 +1624,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1624{ 1624{
1625 struct iwch_ep *ep = ctx; 1625 struct iwch_ep *ep = ctx;
1626 1626
1627 PDBG("%s ep %p\n", __FUNCTION__, ep); 1627 PDBG("%s ep %p\n", __func__, ep);
1628 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1628 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1629 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); 1629 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1630 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, 1630 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1631 skb->len); 1631 skb->len);
1632 ep->com.qp->attr.terminate_msg_len = skb->len; 1632 ep->com.qp->attr.terminate_msg_len = skb->len;
@@ -1639,13 +1639,13 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1639 struct cpl_rdma_ec_status *rep = cplhdr(skb); 1639 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1640 struct iwch_ep *ep = ctx; 1640 struct iwch_ep *ep = ctx;
1641 1641
1642 PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid, 1642 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1643 rep->status); 1643 rep->status);
1644 if (rep->status) { 1644 if (rep->status) {
1645 struct iwch_qp_attributes attrs; 1645 struct iwch_qp_attributes attrs;
1646 1646
1647 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1647 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1648 __FUNCTION__, ep->hwtid); 1648 __func__, ep->hwtid);
1649 stop_ep_timer(ep); 1649 stop_ep_timer(ep);
1650 attrs.next_state = IWCH_QP_STATE_ERROR; 1650 attrs.next_state = IWCH_QP_STATE_ERROR;
1651 iwch_modify_qp(ep->com.qp->rhp, 1651 iwch_modify_qp(ep->com.qp->rhp,
@@ -1663,7 +1663,7 @@ static void ep_timeout(unsigned long arg)
1663 unsigned long flags; 1663 unsigned long flags;
1664 1664
1665 spin_lock_irqsave(&ep->com.lock, flags); 1665 spin_lock_irqsave(&ep->com.lock, flags);
1666 PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid, 1666 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1667 ep->com.state); 1667 ep->com.state);
1668 switch (ep->com.state) { 1668 switch (ep->com.state) {
1669 case MPA_REQ_SENT: 1669 case MPA_REQ_SENT:
@@ -1693,7 +1693,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1693{ 1693{
1694 int err; 1694 int err;
1695 struct iwch_ep *ep = to_ep(cm_id); 1695 struct iwch_ep *ep = to_ep(cm_id);
1696 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1696 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1697 1697
1698 if (state_read(&ep->com) == DEAD) { 1698 if (state_read(&ep->com) == DEAD) {
1699 put_ep(&ep->com); 1699 put_ep(&ep->com);
@@ -1718,7 +1718,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1718 struct iwch_dev *h = to_iwch_dev(cm_id->device); 1718 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1719 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1719 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1720 1720
1721 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1721 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1722 if (state_read(&ep->com) == DEAD) 1722 if (state_read(&ep->com) == DEAD)
1723 return -ECONNRESET; 1723 return -ECONNRESET;
1724 1724
@@ -1739,7 +1739,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1739 ep->com.rpl_err = 0; 1739 ep->com.rpl_err = 0;
1740 ep->ird = conn_param->ird; 1740 ep->ird = conn_param->ird;
1741 ep->ord = conn_param->ord; 1741 ep->ord = conn_param->ord;
1742 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord); 1742 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1743 1743
1744 get_ep(&ep->com); 1744 get_ep(&ep->com);
1745 1745
@@ -1810,7 +1810,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1810 1810
1811 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1811 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1812 if (!ep) { 1812 if (!ep) {
1813 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1813 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1814 err = -ENOMEM; 1814 err = -ENOMEM;
1815 goto out; 1815 goto out;
1816 } 1816 }
@@ -1827,7 +1827,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1827 ep->com.cm_id = cm_id; 1827 ep->com.cm_id = cm_id;
1828 ep->com.qp = get_qhp(h, conn_param->qpn); 1828 ep->com.qp = get_qhp(h, conn_param->qpn);
1829 BUG_ON(!ep->com.qp); 1829 BUG_ON(!ep->com.qp);
1830 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn, 1830 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1831 ep->com.qp, cm_id); 1831 ep->com.qp, cm_id);
1832 1832
1833 /* 1833 /*
@@ -1835,7 +1835,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1835 */ 1835 */
1836 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); 1836 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1837 if (ep->atid == -1) { 1837 if (ep->atid == -1) {
1838 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1838 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1839 err = -ENOMEM; 1839 err = -ENOMEM;
1840 goto fail2; 1840 goto fail2;
1841 } 1841 }
@@ -1847,7 +1847,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1847 cm_id->local_addr.sin_port, 1847 cm_id->local_addr.sin_port,
1848 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY); 1848 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1849 if (!rt) { 1849 if (!rt) {
1850 printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__); 1850 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1851 err = -EHOSTUNREACH; 1851 err = -EHOSTUNREACH;
1852 goto fail3; 1852 goto fail3;
1853 } 1853 }
@@ -1857,7 +1857,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1857 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, 1857 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1858 ep->dst->neighbour->dev); 1858 ep->dst->neighbour->dev);
1859 if (!ep->l2t) { 1859 if (!ep->l2t) {
1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__); 1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1861 err = -ENOMEM; 1861 err = -ENOMEM;
1862 goto fail4; 1862 goto fail4;
1863 } 1863 }
@@ -1894,11 +1894,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1894 1894
1895 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1895 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1896 if (!ep) { 1896 if (!ep) {
1897 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1897 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1898 err = -ENOMEM; 1898 err = -ENOMEM;
1899 goto fail1; 1899 goto fail1;
1900 } 1900 }
1901 PDBG("%s ep %p\n", __FUNCTION__, ep); 1901 PDBG("%s ep %p\n", __func__, ep);
1902 ep->com.tdev = h->rdev.t3cdev_p; 1902 ep->com.tdev = h->rdev.t3cdev_p;
1903 cm_id->add_ref(cm_id); 1903 cm_id->add_ref(cm_id);
1904 ep->com.cm_id = cm_id; 1904 ep->com.cm_id = cm_id;
@@ -1910,7 +1910,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1910 */ 1910 */
1911 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); 1911 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1912 if (ep->stid == -1) { 1912 if (ep->stid == -1) {
1913 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1913 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1914 err = -ENOMEM; 1914 err = -ENOMEM;
1915 goto fail2; 1915 goto fail2;
1916 } 1916 }
@@ -1942,7 +1942,7 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id)
1942 int err; 1942 int err;
1943 struct iwch_listen_ep *ep = to_listen_ep(cm_id); 1943 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
1944 1944
1945 PDBG("%s ep %p\n", __FUNCTION__, ep); 1945 PDBG("%s ep %p\n", __func__, ep);
1946 1946
1947 might_sleep(); 1947 might_sleep();
1948 state_set(&ep->com, DEAD); 1948 state_set(&ep->com, DEAD);
@@ -1965,11 +1965,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1965 1965
1966 spin_lock_irqsave(&ep->com.lock, flags); 1966 spin_lock_irqsave(&ep->com.lock, flags);
1967 1967
1968 PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep, 1968 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
1969 states[ep->com.state], abrupt); 1969 states[ep->com.state], abrupt);
1970 1970
1971 if (ep->com.state == DEAD) { 1971 if (ep->com.state == DEAD) {
1972 PDBG("%s already dead ep %p\n", __FUNCTION__, ep); 1972 PDBG("%s already dead ep %p\n", __func__, ep);
1973 goto out; 1973 goto out;
1974 } 1974 }
1975 1975
@@ -2020,7 +2020,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2020 if (ep->dst != old) 2020 if (ep->dst != old)
2021 return 0; 2021 return 0;
2022 2022
2023 PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new, 2023 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2024 l2t); 2024 l2t);
2025 dst_hold(new); 2025 dst_hold(new);
2026 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 2026 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 6107e7cd9b57..2bb7fbdb3ff4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -54,13 +54,13 @@
54#define MPA_FLAGS_MASK 0xE0 54#define MPA_FLAGS_MASK 0xE0
55 55
56#define put_ep(ep) { \ 56#define put_ep(ep) { \
57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __FUNCTION__, __LINE__, \ 57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
58 ep, atomic_read(&((ep)->kref.refcount))); \ 58 ep, atomic_read(&((ep)->kref.refcount))); \
59 kref_put(&((ep)->kref), __free_ep); \ 59 kref_put(&((ep)->kref), __free_ep); \
60} 60}
61 61
62#define get_ep(ep) { \ 62#define get_ep(ep) { \
63 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __FUNCTION__, __LINE__, \ 63 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
64 ep, atomic_read(&((ep)->kref.refcount))); \ 64 ep, atomic_read(&((ep)->kref.refcount))); \
65 kref_get(&((ep)->kref)); \ 65 kref_get(&((ep)->kref)); \
66} 66}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
index d7624c170ee7..4ee8ccd0a9e5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
@@ -67,7 +67,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
68 &credit); 68 &credit);
69 if (t3a_device(chp->rhp) && credit) { 69 if (t3a_device(chp->rhp) && credit) {
70 PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__, 70 PDBG("%s updating %d cq credits on id %d\n", __func__,
71 credit, chp->cq.cqid); 71 credit, chp->cq.cqid);
72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); 72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
73 } 73 }
@@ -83,7 +83,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
83 wc->vendor_err = CQE_STATUS(cqe); 83 wc->vendor_err = CQE_STATUS(cqe);
84 84
85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x " 85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
86 "lo 0x%x cookie 0x%llx\n", __FUNCTION__, 86 "lo 0x%x cookie 0x%llx\n", __func__,
87 CQE_QPID(cqe), CQE_TYPE(cqe), 87 CQE_QPID(cqe), CQE_TYPE(cqe),
88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), 88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
89 CQE_WRID_LOW(cqe), (unsigned long long) cookie); 89 CQE_WRID_LOW(cqe), (unsigned long long) cookie);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index b40676662a8a..7b67a6771720 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -52,7 +52,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
52 52
53 if (!qhp) { 53 if (!qhp) {
54 printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n", 54 printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
55 __FUNCTION__, CQE_STATUS(rsp_msg->cqe), 55 __func__, CQE_STATUS(rsp_msg->cqe),
56 CQE_QPID(rsp_msg->cqe)); 56 CQE_QPID(rsp_msg->cqe));
57 spin_unlock(&rnicp->lock); 57 spin_unlock(&rnicp->lock);
58 return; 58 return;
@@ -61,14 +61,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
61 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) || 61 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
62 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) { 62 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
63 PDBG("%s AE received after RTS - " 63 PDBG("%s AE received after RTS - "
64 "qp state %d qpid 0x%x status 0x%x\n", __FUNCTION__, 64 "qp state %d qpid 0x%x status 0x%x\n", __func__,
65 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); 65 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
66 spin_unlock(&rnicp->lock); 66 spin_unlock(&rnicp->lock);
67 return; 67 return;
68 } 68 }
69 69
70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " 70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, 71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
@@ -132,10 +132,10 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
132 (CQE_STATUS(rsp_msg->cqe) == 0)) { 132 (CQE_STATUS(rsp_msg->cqe) == 0)) {
133 if (SQ_TYPE(rsp_msg->cqe)) { 133 if (SQ_TYPE(rsp_msg->cqe)) {
134 PDBG("%s QPID 0x%x ep %p disconnecting\n", 134 PDBG("%s QPID 0x%x ep %p disconnecting\n",
135 __FUNCTION__, qhp->wq.qpid, qhp->ep); 135 __func__, qhp->wq.qpid, qhp->ep);
136 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); 136 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
137 } else { 137 } else {
138 PDBG("%s post REQ_ERR AE QPID 0x%x\n", __FUNCTION__, 138 PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
139 qhp->wq.qpid); 139 qhp->wq.qpid);
140 post_qp_event(rnicp, chp, rsp_msg, 140 post_qp_event(rnicp, chp, rsp_msg,
141 IB_EVENT_QP_REQ_ERR, 0); 141 IB_EVENT_QP_REQ_ERR, 0);
@@ -180,7 +180,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
180 case TPT_ERR_INVALIDATE_SHARED_MR: 180 case TPT_ERR_INVALIDATE_SHARED_MR:
181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x " 182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, 183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index b8797c66676d..58c3d61bcd14 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -62,7 +62,7 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
62 mmid = stag >> 8; 62 mmid = stag >> 8;
63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
64 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 64 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
65 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp); 65 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
66 return 0; 66 return 0;
67} 67}
68 68
@@ -96,7 +96,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
96 mmid = stag >> 8; 96 mmid = stag >> 8;
97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
98 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 98 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
99 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp); 99 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
100 return 0; 100 return 0;
101} 101}
102 102
@@ -163,7 +163,7 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
163 ((u64) j << *shift)); 163 ((u64) j << *shift));
164 164
165 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n", 165 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
166 __FUNCTION__, (unsigned long long) *iova_start, 166 __func__, (unsigned long long) *iova_start,
167 (unsigned long long) mask, *shift, (unsigned long long) *total_size, 167 (unsigned long long) mask, *shift, (unsigned long long) *total_size,
168 *npages); 168 *npages);
169 169
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b2ea9210467f..50e1f2a16e0c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -101,7 +101,7 @@ static int iwch_dealloc_ucontext(struct ib_ucontext *context)
101 struct iwch_ucontext *ucontext = to_iwch_ucontext(context); 101 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
102 struct iwch_mm_entry *mm, *tmp; 102 struct iwch_mm_entry *mm, *tmp;
103 103
104 PDBG("%s context %p\n", __FUNCTION__, context); 104 PDBG("%s context %p\n", __func__, context);
105 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 105 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
106 kfree(mm); 106 kfree(mm);
107 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); 107 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -115,7 +115,7 @@ static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
115 struct iwch_ucontext *context; 115 struct iwch_ucontext *context;
116 struct iwch_dev *rhp = to_iwch_dev(ibdev); 116 struct iwch_dev *rhp = to_iwch_dev(ibdev);
117 117
118 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 118 PDBG("%s ibdev %p\n", __func__, ibdev);
119 context = kzalloc(sizeof(*context), GFP_KERNEL); 119 context = kzalloc(sizeof(*context), GFP_KERNEL);
120 if (!context) 120 if (!context)
121 return ERR_PTR(-ENOMEM); 121 return ERR_PTR(-ENOMEM);
@@ -129,7 +129,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
129{ 129{
130 struct iwch_cq *chp; 130 struct iwch_cq *chp;
131 131
132 PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq); 132 PDBG("%s ib_cq %p\n", __func__, ib_cq);
133 chp = to_iwch_cq(ib_cq); 133 chp = to_iwch_cq(ib_cq);
134 134
135 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); 135 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -151,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
151 struct iwch_create_cq_req ureq; 151 struct iwch_create_cq_req ureq;
152 struct iwch_ucontext *ucontext = NULL; 152 struct iwch_ucontext *ucontext = NULL;
153 153
154 PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); 154 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
155 rhp = to_iwch_dev(ibdev); 155 rhp = to_iwch_dev(ibdev);
156 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 156 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
157 if (!chp) 157 if (!chp)
@@ -233,7 +233,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
233 struct t3_cq oldcq, newcq; 233 struct t3_cq oldcq, newcq;
234 int ret; 234 int ret;
235 235
236 PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe); 236 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
237 237
238 /* We don't downsize... */ 238 /* We don't downsize... */
239 if (cqe <= cq->cqe) 239 if (cqe <= cq->cqe)
@@ -281,7 +281,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
281 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); 281 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
282 if (ret) { 282 if (ret) {
283 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", 283 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
284 __FUNCTION__, ret); 284 __func__, ret);
285 } 285 }
286 286
287 /* add user hooks here */ 287 /* add user hooks here */
@@ -316,7 +316,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
316 chp->cq.rptr = rptr; 316 chp->cq.rptr = rptr;
317 } else 317 } else
318 spin_lock_irqsave(&chp->lock, flag); 318 spin_lock_irqsave(&chp->lock, flag);
319 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); 319 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
320 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 320 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
321 spin_unlock_irqrestore(&chp->lock, flag); 321 spin_unlock_irqrestore(&chp->lock, flag);
322 if (err < 0) 322 if (err < 0)
@@ -337,7 +337,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
337 struct iwch_ucontext *ucontext; 337 struct iwch_ucontext *ucontext;
338 u64 addr; 338 u64 addr;
339 339
340 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, 340 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
341 key, len); 341 key, len);
342 342
343 if (vma->vm_start & (PAGE_SIZE-1)) { 343 if (vma->vm_start & (PAGE_SIZE-1)) {
@@ -390,7 +390,7 @@ static int iwch_deallocate_pd(struct ib_pd *pd)
390 390
391 php = to_iwch_pd(pd); 391 php = to_iwch_pd(pd);
392 rhp = php->rhp; 392 rhp = php->rhp;
393 PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid); 393 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
394 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); 394 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
395 kfree(php); 395 kfree(php);
396 return 0; 396 return 0;
@@ -404,7 +404,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
404 u32 pdid; 404 u32 pdid;
405 struct iwch_dev *rhp; 405 struct iwch_dev *rhp;
406 406
407 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 407 PDBG("%s ibdev %p\n", __func__, ibdev);
408 rhp = (struct iwch_dev *) ibdev; 408 rhp = (struct iwch_dev *) ibdev;
409 pdid = cxio_hal_get_pdid(rhp->rdev.rscp); 409 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
410 if (!pdid) 410 if (!pdid)
@@ -422,7 +422,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
422 return ERR_PTR(-EFAULT); 422 return ERR_PTR(-EFAULT);
423 } 423 }
424 } 424 }
425 PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php); 425 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
426 return &php->ibpd; 426 return &php->ibpd;
427} 427}
428 428
@@ -432,7 +432,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
432 struct iwch_mr *mhp; 432 struct iwch_mr *mhp;
433 u32 mmid; 433 u32 mmid;
434 434
435 PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr); 435 PDBG("%s ib_mr %p\n", __func__, ib_mr);
436 /* There can be no memory windows */ 436 /* There can be no memory windows */
437 if (atomic_read(&ib_mr->usecnt)) 437 if (atomic_read(&ib_mr->usecnt))
438 return -EINVAL; 438 return -EINVAL;
@@ -447,7 +447,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
447 kfree((void *) (unsigned long) mhp->kva); 447 kfree((void *) (unsigned long) mhp->kva);
448 if (mhp->umem) 448 if (mhp->umem)
449 ib_umem_release(mhp->umem); 449 ib_umem_release(mhp->umem);
450 PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); 450 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
451 kfree(mhp); 451 kfree(mhp);
452 return 0; 452 return 0;
453} 453}
@@ -467,7 +467,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
467 struct iwch_mr *mhp; 467 struct iwch_mr *mhp;
468 int ret; 468 int ret;
469 469
470 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 470 PDBG("%s ib_pd %p\n", __func__, pd);
471 php = to_iwch_pd(pd); 471 php = to_iwch_pd(pd);
472 rhp = php->rhp; 472 rhp = php->rhp;
473 473
@@ -531,7 +531,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
531 int npages; 531 int npages;
532 int ret; 532 int ret;
533 533
534 PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd); 534 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
535 535
536 /* There can be no memory windows */ 536 /* There can be no memory windows */
537 if (atomic_read(&mr->usecnt)) 537 if (atomic_read(&mr->usecnt))
@@ -594,7 +594,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
594 struct iwch_mr *mhp; 594 struct iwch_mr *mhp;
595 struct iwch_reg_user_mr_resp uresp; 595 struct iwch_reg_user_mr_resp uresp;
596 596
597 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 597 PDBG("%s ib_pd %p\n", __func__, pd);
598 598
599 php = to_iwch_pd(pd); 599 php = to_iwch_pd(pd);
600 rhp = php->rhp; 600 rhp = php->rhp;
@@ -649,7 +649,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
649 if (udata && !t3a_device(rhp)) { 649 if (udata && !t3a_device(rhp)) {
650 uresp.pbl_addr = (mhp->attr.pbl_addr - 650 uresp.pbl_addr = (mhp->attr.pbl_addr -
651 rhp->rdev.rnic_info.pbl_base) >> 3; 651 rhp->rdev.rnic_info.pbl_base) >> 3;
652 PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, 652 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
653 uresp.pbl_addr); 653 uresp.pbl_addr);
654 654
655 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 655 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
@@ -673,7 +673,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
673 u64 kva; 673 u64 kva;
674 struct ib_mr *ibmr; 674 struct ib_mr *ibmr;
675 675
676 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 676 PDBG("%s ib_pd %p\n", __func__, pd);
677 677
678 /* 678 /*
679 * T3 only supports 32 bits of size. 679 * T3 only supports 32 bits of size.
@@ -710,7 +710,7 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
710 mhp->attr.stag = stag; 710 mhp->attr.stag = stag;
711 mmid = (stag) >> 8; 711 mmid = (stag) >> 8;
712 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 712 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
713 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag); 713 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
714 return &(mhp->ibmw); 714 return &(mhp->ibmw);
715} 715}
716 716
@@ -726,7 +726,7 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
726 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 726 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
727 remove_handle(rhp, &rhp->mmidr, mmid); 727 remove_handle(rhp, &rhp->mmidr, mmid);
728 kfree(mhp); 728 kfree(mhp);
729 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp); 729 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
730 return 0; 730 return 0;
731} 731}
732 732
@@ -754,7 +754,7 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
754 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 754 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
755 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 755 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
756 756
757 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__, 757 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
758 ib_qp, qhp->wq.qpid, qhp); 758 ib_qp, qhp->wq.qpid, qhp);
759 kfree(qhp); 759 kfree(qhp);
760 return 0; 760 return 0;
@@ -773,7 +773,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
773 int wqsize, sqsize, rqsize; 773 int wqsize, sqsize, rqsize;
774 struct iwch_ucontext *ucontext; 774 struct iwch_ucontext *ucontext;
775 775
776 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 776 PDBG("%s ib_pd %p\n", __func__, pd);
777 if (attrs->qp_type != IB_QPT_RC) 777 if (attrs->qp_type != IB_QPT_RC)
778 return ERR_PTR(-EINVAL); 778 return ERR_PTR(-EINVAL);
779 php = to_iwch_pd(pd); 779 php = to_iwch_pd(pd);
@@ -805,7 +805,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
805 */ 805 */
806 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); 806 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
807 wqsize = roundup_pow_of_two(rqsize + sqsize); 807 wqsize = roundup_pow_of_two(rqsize + sqsize);
808 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__, 808 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
809 wqsize, sqsize, rqsize); 809 wqsize, sqsize, rqsize);
810 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 810 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
811 if (!qhp) 811 if (!qhp)
@@ -898,7 +898,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
898 init_timer(&(qhp->timer)); 898 init_timer(&(qhp->timer));
899 PDBG("%s sq_num_entries %d, rq_num_entries %d " 899 PDBG("%s sq_num_entries %d, rq_num_entries %d "
900 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n", 900 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n",
901 __FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 901 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
902 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, 902 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
903 1 << qhp->wq.size_log2); 903 1 << qhp->wq.size_log2);
904 return &qhp->ibqp; 904 return &qhp->ibqp;
@@ -912,7 +912,7 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
912 enum iwch_qp_attr_mask mask = 0; 912 enum iwch_qp_attr_mask mask = 0;
913 struct iwch_qp_attributes attrs; 913 struct iwch_qp_attributes attrs;
914 914
915 PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp); 915 PDBG("%s ib_qp %p\n", __func__, ibqp);
916 916
917 /* iwarp does not support the RTR state */ 917 /* iwarp does not support the RTR state */
918 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 918 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -945,20 +945,20 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
945 945
946void iwch_qp_add_ref(struct ib_qp *qp) 946void iwch_qp_add_ref(struct ib_qp *qp)
947{ 947{
948 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 948 PDBG("%s ib_qp %p\n", __func__, qp);
949 atomic_inc(&(to_iwch_qp(qp)->refcnt)); 949 atomic_inc(&(to_iwch_qp(qp)->refcnt));
950} 950}
951 951
952void iwch_qp_rem_ref(struct ib_qp *qp) 952void iwch_qp_rem_ref(struct ib_qp *qp)
953{ 953{
954 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 954 PDBG("%s ib_qp %p\n", __func__, qp);
955 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) 955 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
956 wake_up(&(to_iwch_qp(qp)->wait)); 956 wake_up(&(to_iwch_qp(qp)->wait));
957} 957}
958 958
959static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 959static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
960{ 960{
961 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 961 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
962 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); 962 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
963} 963}
964 964
@@ -966,7 +966,7 @@ static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
966static int iwch_query_pkey(struct ib_device *ibdev, 966static int iwch_query_pkey(struct ib_device *ibdev,
967 u8 port, u16 index, u16 * pkey) 967 u8 port, u16 index, u16 * pkey)
968{ 968{
969 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 969 PDBG("%s ibdev %p\n", __func__, ibdev);
970 *pkey = 0; 970 *pkey = 0;
971 return 0; 971 return 0;
972} 972}
@@ -977,7 +977,7 @@ static int iwch_query_gid(struct ib_device *ibdev, u8 port,
977 struct iwch_dev *dev; 977 struct iwch_dev *dev;
978 978
979 PDBG("%s ibdev %p, port %d, index %d, gid %p\n", 979 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
980 __FUNCTION__, ibdev, port, index, gid); 980 __func__, ibdev, port, index, gid);
981 dev = to_iwch_dev(ibdev); 981 dev = to_iwch_dev(ibdev);
982 BUG_ON(port == 0 || port > 2); 982 BUG_ON(port == 0 || port > 2);
983 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 983 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
@@ -990,7 +990,7 @@ static int iwch_query_device(struct ib_device *ibdev,
990{ 990{
991 991
992 struct iwch_dev *dev; 992 struct iwch_dev *dev;
993 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 993 PDBG("%s ibdev %p\n", __func__, ibdev);
994 994
995 dev = to_iwch_dev(ibdev); 995 dev = to_iwch_dev(ibdev);
996 memset(props, 0, sizeof *props); 996 memset(props, 0, sizeof *props);
@@ -1017,7 +1017,7 @@ static int iwch_query_device(struct ib_device *ibdev,
1017static int iwch_query_port(struct ib_device *ibdev, 1017static int iwch_query_port(struct ib_device *ibdev,
1018 u8 port, struct ib_port_attr *props) 1018 u8 port, struct ib_port_attr *props)
1019{ 1019{
1020 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 1020 PDBG("%s ibdev %p\n", __func__, ibdev);
1021 props->max_mtu = IB_MTU_4096; 1021 props->max_mtu = IB_MTU_4096;
1022 props->lid = 0; 1022 props->lid = 0;
1023 props->lmc = 0; 1023 props->lmc = 0;
@@ -1045,7 +1045,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
1045{ 1045{
1046 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1046 struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1047 ibdev.class_dev); 1047 ibdev.class_dev);
1048 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1048 PDBG("%s class dev 0x%p\n", __func__, cdev);
1049 return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type); 1049 return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type);
1050} 1050}
1051 1051
@@ -1056,7 +1056,7 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1056 struct ethtool_drvinfo info; 1056 struct ethtool_drvinfo info;
1057 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1057 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1058 1058
1059 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1059 PDBG("%s class dev 0x%p\n", __func__, cdev);
1060 rtnl_lock(); 1060 rtnl_lock();
1061 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1061 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1062 rtnl_unlock(); 1062 rtnl_unlock();
@@ -1070,7 +1070,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1070 struct ethtool_drvinfo info; 1070 struct ethtool_drvinfo info;
1071 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1071 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1072 1072
1073 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1073 PDBG("%s class dev 0x%p\n", __func__, cdev);
1074 rtnl_lock(); 1074 rtnl_lock();
1075 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1075 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1076 rtnl_unlock(); 1076 rtnl_unlock();
@@ -1081,7 +1081,7 @@ static ssize_t show_board(struct class_device *cdev, char *buf)
1081{ 1081{
1082 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1082 struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1083 ibdev.class_dev); 1083 ibdev.class_dev);
1084 PDBG("%s class dev 0x%p\n", __FUNCTION__, dev); 1084 PDBG("%s class dev 0x%p\n", __func__, dev);
1085 return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor, 1085 return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor,
1086 dev->rdev.rnic_info.pdev->device); 1086 dev->rdev.rnic_info.pdev->device);
1087} 1087}
@@ -1103,7 +1103,7 @@ int iwch_register_device(struct iwch_dev *dev)
1103 int ret; 1103 int ret;
1104 int i; 1104 int i;
1105 1105
1106 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1106 PDBG("%s iwch_dev %p\n", __func__, dev);
1107 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); 1107 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1108 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1108 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
@@ -1207,7 +1207,7 @@ void iwch_unregister_device(struct iwch_dev *dev)
1207{ 1207{
1208 int i; 1208 int i;
1209 1209
1210 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1210 PDBG("%s iwch_dev %p\n", __func__, dev);
1211 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) 1211 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1212 class_device_remove_file(&dev->ibdev.class_dev, 1212 class_device_remove_file(&dev->ibdev.class_dev,
1213 iwch_class_attributes[i]); 1213 iwch_class_attributes[i]);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 48833f3f3bd0..61356f91109d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -213,7 +213,7 @@ static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
213 if (mm->key == key && mm->len == len) { 213 if (mm->key == key && mm->len == len) {
214 list_del_init(&mm->entry); 214 list_del_init(&mm->entry);
215 spin_unlock(&ucontext->mmap_lock); 215 spin_unlock(&ucontext->mmap_lock);
216 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__, 216 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
217 key, (unsigned long long) mm->addr, mm->len); 217 key, (unsigned long long) mm->addr, mm->len);
218 return mm; 218 return mm;
219 } 219 }
@@ -226,7 +226,7 @@ static inline void insert_mmap(struct iwch_ucontext *ucontext,
226 struct iwch_mm_entry *mm) 226 struct iwch_mm_entry *mm)
227{ 227{
228 spin_lock(&ucontext->mmap_lock); 228 spin_lock(&ucontext->mmap_lock);
229 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__, 229 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
230 mm->key, (unsigned long long) mm->addr, mm->len); 230 mm->key, (unsigned long long) mm->addr, mm->len);
231 list_add_tail(&mm->entry, &ucontext->mmaps); 231 list_add_tail(&mm->entry, &ucontext->mmaps);
232 spin_unlock(&ucontext->mmap_lock); 232 spin_unlock(&ucontext->mmap_lock);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ea2cdd73dd85..bc5d9b0813e5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -168,30 +168,30 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
168 168
169 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); 169 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
170 if (!mhp) { 170 if (!mhp) {
171 PDBG("%s %d\n", __FUNCTION__, __LINE__); 171 PDBG("%s %d\n", __func__, __LINE__);
172 return -EIO; 172 return -EIO;
173 } 173 }
174 if (!mhp->attr.state) { 174 if (!mhp->attr.state) {
175 PDBG("%s %d\n", __FUNCTION__, __LINE__); 175 PDBG("%s %d\n", __func__, __LINE__);
176 return -EIO; 176 return -EIO;
177 } 177 }
178 if (mhp->attr.zbva) { 178 if (mhp->attr.zbva) {
179 PDBG("%s %d\n", __FUNCTION__, __LINE__); 179 PDBG("%s %d\n", __func__, __LINE__);
180 return -EIO; 180 return -EIO;
181 } 181 }
182 182
183 if (sg_list[i].addr < mhp->attr.va_fbo) { 183 if (sg_list[i].addr < mhp->attr.va_fbo) {
184 PDBG("%s %d\n", __FUNCTION__, __LINE__); 184 PDBG("%s %d\n", __func__, __LINE__);
185 return -EINVAL; 185 return -EINVAL;
186 } 186 }
187 if (sg_list[i].addr + ((u64) sg_list[i].length) < 187 if (sg_list[i].addr + ((u64) sg_list[i].length) <
188 sg_list[i].addr) { 188 sg_list[i].addr) {
189 PDBG("%s %d\n", __FUNCTION__, __LINE__); 189 PDBG("%s %d\n", __func__, __LINE__);
190 return -EINVAL; 190 return -EINVAL;
191 } 191 }
192 if (sg_list[i].addr + ((u64) sg_list[i].length) > 192 if (sg_list[i].addr + ((u64) sg_list[i].length) >
193 mhp->attr.va_fbo + ((u64) mhp->attr.len)) { 193 mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
194 PDBG("%s %d\n", __FUNCTION__, __LINE__); 194 PDBG("%s %d\n", __func__, __LINE__);
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197 offset = sg_list[i].addr - mhp->attr.va_fbo; 197 offset = sg_list[i].addr - mhp->attr.va_fbo;
@@ -290,7 +290,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
290 qhp->wq.oldest_read = sqp; 290 qhp->wq.oldest_read = sqp;
291 break; 291 break;
292 default: 292 default:
293 PDBG("%s post of type=%d TBD!\n", __FUNCTION__, 293 PDBG("%s post of type=%d TBD!\n", __func__,
294 wr->opcode); 294 wr->opcode);
295 err = -EINVAL; 295 err = -EINVAL;
296 } 296 }
@@ -309,7 +309,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
309 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 309 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
310 0, t3_wr_flit_cnt); 310 0, t3_wr_flit_cnt);
311 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n", 311 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
312 __FUNCTION__, (unsigned long long) wr->wr_id, idx, 312 __func__, (unsigned long long) wr->wr_id, idx,
313 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2), 313 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
314 sqp->opcode); 314 sqp->opcode);
315 wr = wr->next; 315 wr = wr->next;
@@ -361,7 +361,7 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
361 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 361 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
362 0, sizeof(struct t3_receive_wr) >> 3); 362 0, sizeof(struct t3_receive_wr) >> 3);
363 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x " 363 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
364 "wqe %p \n", __FUNCTION__, (unsigned long long) wr->wr_id, 364 "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
365 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); 365 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
366 ++(qhp->wq.rq_wptr); 366 ++(qhp->wq.rq_wptr);
367 ++(qhp->wq.wptr); 367 ++(qhp->wq.wptr);
@@ -407,7 +407,7 @@ int iwch_bind_mw(struct ib_qp *qp,
407 return -ENOMEM; 407 return -ENOMEM;
408 } 408 }
409 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 409 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
410 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__, idx, 410 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
411 mw, mw_bind); 411 mw, mw_bind);
412 wqe = (union t3_wr *) (qhp->wq.queue + idx); 412 wqe = (union t3_wr *) (qhp->wq.queue + idx);
413 413
@@ -595,10 +595,10 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
595 struct terminate_message *term; 595 struct terminate_message *term;
596 struct sk_buff *skb; 596 struct sk_buff *skb;
597 597
598 PDBG("%s %d\n", __FUNCTION__, __LINE__); 598 PDBG("%s %d\n", __func__, __LINE__);
599 skb = alloc_skb(40, GFP_ATOMIC); 599 skb = alloc_skb(40, GFP_ATOMIC);
600 if (!skb) { 600 if (!skb) {
601 printk(KERN_ERR "%s cannot send TERMINATE!\n", __FUNCTION__); 601 printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
602 return -ENOMEM; 602 return -ENOMEM;
603 } 603 }
604 wqe = (union t3_wr *)skb_put(skb, 40); 604 wqe = (union t3_wr *)skb_put(skb, 40);
@@ -629,7 +629,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
629 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 629 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
630 schp = get_chp(qhp->rhp, qhp->attr.scq); 630 schp = get_chp(qhp->rhp, qhp->attr.scq);
631 631
632 PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp); 632 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
633 /* take a ref on the qhp since we must release the lock */ 633 /* take a ref on the qhp since we must release the lock */
634 atomic_inc(&qhp->refcnt); 634 atomic_inc(&qhp->refcnt);
635 spin_unlock_irqrestore(&qhp->lock, *flag); 635 spin_unlock_irqrestore(&qhp->lock, *flag);
@@ -720,11 +720,11 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0; 720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
721 init_attr.irs = qhp->ep->rcv_seq; 721 init_attr.irs = qhp->ep->rcv_seq;
722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " 722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
723 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__, 723 "flags 0x%x qpcaps 0x%x\n", __func__,
724 init_attr.rq_addr, init_attr.rq_size, 724 init_attr.rq_addr, init_attr.rq_size,
725 init_attr.flags, init_attr.qpcaps); 725 init_attr.flags, init_attr.qpcaps);
726 ret = cxio_rdma_init(&rhp->rdev, &init_attr); 726 ret = cxio_rdma_init(&rhp->rdev, &init_attr);
727 PDBG("%s ret %d\n", __FUNCTION__, ret); 727 PDBG("%s ret %d\n", __func__, ret);
728 return ret; 728 return ret;
729} 729}
730 730
@@ -742,7 +742,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
742 int free = 0; 742 int free = 0;
743 struct iwch_ep *ep = NULL; 743 struct iwch_ep *ep = NULL;
744 744
745 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__, 745 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
746 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, 746 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
747 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); 747 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
748 748
@@ -899,14 +899,14 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
899 break; 899 break;
900 default: 900 default:
901 printk(KERN_ERR "%s in a bad state %d\n", 901 printk(KERN_ERR "%s in a bad state %d\n",
902 __FUNCTION__, qhp->attr.state); 902 __func__, qhp->attr.state);
903 ret = -EINVAL; 903 ret = -EINVAL;
904 goto err; 904 goto err;
905 break; 905 break;
906 } 906 }
907 goto out; 907 goto out;
908err: 908err:
909 PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep, 909 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
910 qhp->wq.qpid); 910 qhp->wq.qpid);
911 911
912 /* disassociate the LLP connection */ 912 /* disassociate the LLP connection */
@@ -939,7 +939,7 @@ out:
939 if (free) 939 if (free)
940 put_ep(&ep->com); 940 put_ep(&ep->com);
941 941
942 PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state); 942 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
943 return ret; 943 return ret;
944} 944}
945 945