aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorParav Pandit <pandit.parav@gmail.com>2016-09-28 16:26:26 -0400
committerDoug Ledford <dledford@redhat.com>2016-10-06 13:50:04 -0400
commite404f945a6101a6f0f2a29aade6dc32015471daa (patch)
treec507b2ce938d5faa0902346f3177373d264978fb
parentb9fe856e545491fd57fc7ca195d0123df0124e7c (diff)
IB/rxe: improved debug prints & code cleanup
1. Debugging qp state transitions and qp errors in loopback and multiple QP tests is difficult without qp numbers in debug logs. This patch adds qp number to important debug logs. 2. Instead of having rxe: prefix in few logs and not having in few logs, using uniform module name prefix using pr_fmt macro. 3. Code cleanup for various warnings reported by checkpatch for incomplete unsigned data type, line over 80 characters, return statements. Signed-off-by: Parav Pandit <pandit.parav@gmail.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c34
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c39
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c31
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c25
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c12
14 files changed, 109 insertions, 88 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 9a48e7277673..ab6c3c25d7ff 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -358,38 +358,16 @@ static int __init rxe_module_init(void)
358 /* initialize slab caches for managed objects */ 358 /* initialize slab caches for managed objects */
359 err = rxe_cache_init(); 359 err = rxe_cache_init();
360 if (err) { 360 if (err) {
361 pr_err("rxe: unable to init object pools\n"); 361 pr_err("unable to init object pools\n");
362 return err; 362 return err;
363 } 363 }
364 364
365 err = rxe_net_ipv4_init(); 365 err = rxe_net_init();
366 if (err) { 366 if (err)
367 pr_err("rxe: unable to init ipv4 tunnel\n"); 367 return err;
368 rxe_cache_exit();
369 goto exit;
370 }
371
372 err = rxe_net_ipv6_init();
373 if (err) {
374 pr_err("rxe: unable to init ipv6 tunnel\n");
375 rxe_cache_exit();
376 goto exit;
377 }
378
379 err = register_netdevice_notifier(&rxe_net_notifier);
380 if (err) {
381 pr_err("rxe: Failed to rigister netdev notifier\n");
382 goto exit;
383 }
384
385 pr_info("rxe: loaded\n");
386 368
369 pr_info("loaded\n");
387 return 0; 370 return 0;
388
389exit:
390 rxe_release_udp_tunnel(recv_sockets.sk4);
391 rxe_release_udp_tunnel(recv_sockets.sk6);
392 return err;
393} 371}
394 372
395static void __exit rxe_module_exit(void) 373static void __exit rxe_module_exit(void)
@@ -398,7 +376,7 @@ static void __exit rxe_module_exit(void)
398 rxe_net_exit(); 376 rxe_net_exit();
399 rxe_cache_exit(); 377 rxe_cache_exit();
400 378
401 pr_info("rxe: unloaded\n"); 379 pr_info("unloaded\n");
402} 380}
403 381
404late_initcall(rxe_module_init); 382late_initcall(rxe_module_init);
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 12c71c549f97..a696af81e4a5 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -34,6 +34,11 @@
34#ifndef RXE_H 34#ifndef RXE_H
35#define RXE_H 35#define RXE_H
36 36
37#ifdef pr_fmt
38#undef pr_fmt
39#endif
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
37#include <linux/module.h> 42#include <linux/module.h>
38#include <linux/skbuff.h> 43#include <linux/skbuff.h>
39#include <linux/crc32.h> 44#include <linux/crc32.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 5c9474212d4e..604f6fee96bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -39,7 +39,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
39 struct rxe_port *port; 39 struct rxe_port *port;
40 40
41 if (attr->port_num != 1) { 41 if (attr->port_num != 1) {
42 pr_info("rxe: invalid port_num = %d\n", attr->port_num); 42 pr_info("invalid port_num = %d\n", attr->port_num);
43 return -EINVAL; 43 return -EINVAL;
44 } 44 }
45 45
@@ -47,7 +47,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
47 47
48 if (attr->ah_flags & IB_AH_GRH) { 48 if (attr->ah_flags & IB_AH_GRH) {
49 if (attr->grh.sgid_index > port->attr.gid_tbl_len) { 49 if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
50 pr_info("rxe: invalid sgid index = %d\n", 50 pr_info("invalid sgid index = %d\n",
51 attr->grh.sgid_index); 51 attr->grh.sgid_index);
52 return -EINVAL; 52 return -EINVAL;
53 } 53 }
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 1c59ef2c67aa..6c5e29db88e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -567,7 +567,8 @@ int rxe_completer(void *arg)
567 state = COMPST_GET_ACK; 567 state = COMPST_GET_ACK;
568 568
569 while (1) { 569 while (1) {
570 pr_debug("state = %s\n", comp_state_name[state]); 570 pr_debug("qp#%d state = %s\n", qp_num(qp),
571 comp_state_name[state]);
571 switch (state) { 572 switch (state) {
572 case COMPST_GET_ACK: 573 case COMPST_GET_ACK:
573 skb = skb_dequeue(&qp->resp_pkts); 574 skb = skb_dequeue(&qp->resp_pkts);
@@ -709,7 +710,8 @@ int rxe_completer(void *arg)
709 qp->comp.rnr_retry--; 710 qp->comp.rnr_retry--;
710 711
711 qp->req.need_retry = 1; 712 qp->req.need_retry = 1;
712 pr_debug("set rnr nak timer\n"); 713 pr_debug("qp#%d set rnr nak timer\n",
714 qp_num(qp));
713 mod_timer(&qp->rnr_nak_timer, 715 mod_timer(&qp->rnr_nak_timer,
714 jiffies + rnrnak_jiffies(aeth_syn(pkt) 716 jiffies + rnrnak_jiffies(aeth_syn(pkt)
715 & ~AETH_TYPE_MASK)); 717 & ~AETH_TYPE_MASK));
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 54b3c7c99eff..c572a4c09359 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -126,7 +126,7 @@ found_it:
126 126
127 ret = remap_vmalloc_range(vma, ip->obj, 0); 127 ret = remap_vmalloc_range(vma, ip->obj, 0);
128 if (ret) { 128 if (ret) {
129 pr_err("rxe: err %d from remap_vmalloc_range\n", ret); 129 pr_err("err %d from remap_vmalloc_range\n", ret);
130 goto done; 130 goto done;
131 } 131 }
132 132
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index f3dab6574504..1869152f1d23 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -39,7 +39,7 @@
39 */ 39 */
40static u8 rxe_get_key(void) 40static u8 rxe_get_key(void)
41{ 41{
42 static unsigned key = 1; 42 static u32 key = 1;
43 43
44 key = key << 1; 44 key = key << 1;
45 45
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 549aa84f9f9f..b8258e4f0aea 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -65,7 +65,7 @@ struct rxe_dev *net_to_rxe(struct net_device *ndev)
65 return found; 65 return found;
66} 66}
67 67
68struct rxe_dev *get_rxe_by_name(const char* name) 68struct rxe_dev *get_rxe_by_name(const char *name)
69{ 69{
70 struct rxe_dev *rxe; 70 struct rxe_dev *rxe;
71 struct rxe_dev *found = NULL; 71 struct rxe_dev *found = NULL;
@@ -601,8 +601,7 @@ void rxe_port_up(struct rxe_dev *rxe)
601 port->attr.phys_state = IB_PHYS_STATE_LINK_UP; 601 port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
602 602
603 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); 603 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
604 pr_info("rxe: set %s active\n", rxe->ib_dev.name); 604 pr_info("set %s active\n", rxe->ib_dev.name);
605 return;
606} 605}
607 606
608/* Caller must hold net_info_lock */ 607/* Caller must hold net_info_lock */
@@ -615,8 +614,7 @@ void rxe_port_down(struct rxe_dev *rxe)
615 port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN; 614 port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
616 615
617 rxe_port_event(rxe, IB_EVENT_PORT_ERR); 616 rxe_port_event(rxe, IB_EVENT_PORT_ERR);
618 pr_info("rxe: set %s down\n", rxe->ib_dev.name); 617 pr_info("set %s down\n", rxe->ib_dev.name);
619 return;
620} 618}
621 619
622static int rxe_notify(struct notifier_block *not_blk, 620static int rxe_notify(struct notifier_block *not_blk,
@@ -641,7 +639,7 @@ static int rxe_notify(struct notifier_block *not_blk,
641 rxe_port_down(rxe); 639 rxe_port_down(rxe);
642 break; 640 break;
643 case NETDEV_CHANGEMTU: 641 case NETDEV_CHANGEMTU:
644 pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu); 642 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
645 rxe_set_mtu(rxe, ndev->mtu); 643 rxe_set_mtu(rxe, ndev->mtu);
646 break; 644 break;
647 case NETDEV_REBOOT: 645 case NETDEV_REBOOT:
@@ -651,7 +649,7 @@ static int rxe_notify(struct notifier_block *not_blk,
651 case NETDEV_CHANGENAME: 649 case NETDEV_CHANGENAME:
652 case NETDEV_FEAT_CHANGE: 650 case NETDEV_FEAT_CHANGE:
653 default: 651 default:
654 pr_info("rxe: ignoring netdev event = %ld for %s\n", 652 pr_info("ignoring netdev event = %ld for %s\n",
655 event, ndev->name); 653 event, ndev->name);
656 break; 654 break;
657 } 655 }
@@ -671,7 +669,7 @@ int rxe_net_ipv4_init(void)
671 htons(ROCE_V2_UDP_DPORT), false); 669 htons(ROCE_V2_UDP_DPORT), false);
672 if (IS_ERR(recv_sockets.sk4)) { 670 if (IS_ERR(recv_sockets.sk4)) {
673 recv_sockets.sk4 = NULL; 671 recv_sockets.sk4 = NULL;
674 pr_err("rxe: Failed to create IPv4 UDP tunnel\n"); 672 pr_err("Failed to create IPv4 UDP tunnel\n");
675 return -1; 673 return -1;
676 } 674 }
677 675
@@ -688,7 +686,7 @@ int rxe_net_ipv6_init(void)
688 htons(ROCE_V2_UDP_DPORT), true); 686 htons(ROCE_V2_UDP_DPORT), true);
689 if (IS_ERR(recv_sockets.sk6)) { 687 if (IS_ERR(recv_sockets.sk6)) {
690 recv_sockets.sk6 = NULL; 688 recv_sockets.sk6 = NULL;
691 pr_err("rxe: Failed to create IPv6 UDP tunnel\n"); 689 pr_err("Failed to create IPv6 UDP tunnel\n");
692 return -1; 690 return -1;
693 } 691 }
694#endif 692#endif
@@ -701,3 +699,26 @@ void rxe_net_exit(void)
701 rxe_release_udp_tunnel(recv_sockets.sk4); 699 rxe_release_udp_tunnel(recv_sockets.sk4);
702 unregister_netdevice_notifier(&rxe_net_notifier); 700 unregister_netdevice_notifier(&rxe_net_notifier);
703} 701}
702
703int rxe_net_init(void)
704{
705 int err;
706
707 recv_sockets.sk6 = NULL;
708
709 err = rxe_net_ipv4_init();
710 if (err)
711 return err;
712 err = rxe_net_ipv6_init();
713 if (err)
714 goto err_out;
715 err = register_netdevice_notifier(&rxe_net_notifier);
716 if (err) {
717 pr_err("Failed to register netdev notifier\n");
718 goto err_out;
719 }
720 return 0;
721err_out:
722 rxe_net_exit();
723 return err;
724}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 0daf7f09e5b5..1c06b3bfe1b6 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -49,8 +49,7 @@ void rxe_release_udp_tunnel(struct socket *sk);
49 49
50struct rxe_dev *rxe_net_add(struct net_device *ndev); 50struct rxe_dev *rxe_net_add(struct net_device *ndev);
51 51
52int rxe_net_ipv4_init(void); 52int rxe_net_init(void);
53int rxe_net_ipv6_init(void);
54void rxe_net_exit(void); 53void rxe_net_exit(void);
55 54
56#endif /* RXE_NET_H */ 55#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 62c37a563df9..b8036cfbce04 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -298,8 +298,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
298 298
299 wqe_size = rcv_wqe_size(qp->rq.max_sge); 299 wqe_size = rcv_wqe_size(qp->rq.max_sge);
300 300
301 pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n", 301 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
302 qp->rq.max_wr, qp->rq.max_sge, wqe_size); 302 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
303 303
304 qp->rq.queue = rxe_queue_init(rxe, 304 qp->rq.queue = rxe_queue_init(rxe,
305 &qp->rq.max_wr, 305 &qp->rq.max_wr,
@@ -680,24 +680,27 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
680 if (mask & IB_QP_RETRY_CNT) { 680 if (mask & IB_QP_RETRY_CNT) {
681 qp->attr.retry_cnt = attr->retry_cnt; 681 qp->attr.retry_cnt = attr->retry_cnt;
682 qp->comp.retry_cnt = attr->retry_cnt; 682 qp->comp.retry_cnt = attr->retry_cnt;
683 pr_debug("set retry count = %d\n", attr->retry_cnt); 683 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
684 attr->retry_cnt);
684 } 685 }
685 686
686 if (mask & IB_QP_RNR_RETRY) { 687 if (mask & IB_QP_RNR_RETRY) {
687 qp->attr.rnr_retry = attr->rnr_retry; 688 qp->attr.rnr_retry = attr->rnr_retry;
688 qp->comp.rnr_retry = attr->rnr_retry; 689 qp->comp.rnr_retry = attr->rnr_retry;
689 pr_debug("set rnr retry count = %d\n", attr->rnr_retry); 690 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
691 attr->rnr_retry);
690 } 692 }
691 693
692 if (mask & IB_QP_RQ_PSN) { 694 if (mask & IB_QP_RQ_PSN) {
693 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); 695 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
694 qp->resp.psn = qp->attr.rq_psn; 696 qp->resp.psn = qp->attr.rq_psn;
695 pr_debug("set resp psn = 0x%x\n", qp->resp.psn); 697 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
698 qp->resp.psn);
696 } 699 }
697 700
698 if (mask & IB_QP_MIN_RNR_TIMER) { 701 if (mask & IB_QP_MIN_RNR_TIMER) {
699 qp->attr.min_rnr_timer = attr->min_rnr_timer; 702 qp->attr.min_rnr_timer = attr->min_rnr_timer;
700 pr_debug("set min rnr timer = 0x%x\n", 703 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
701 attr->min_rnr_timer); 704 attr->min_rnr_timer);
702 } 705 }
703 706
@@ -705,7 +708,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
705 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); 708 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
706 qp->req.psn = qp->attr.sq_psn; 709 qp->req.psn = qp->attr.sq_psn;
707 qp->comp.psn = qp->attr.sq_psn; 710 qp->comp.psn = qp->attr.sq_psn;
708 pr_debug("set req psn = 0x%x\n", qp->req.psn); 711 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
709 } 712 }
710 713
711 if (mask & IB_QP_PATH_MIG_STATE) 714 if (mask & IB_QP_PATH_MIG_STATE)
@@ -719,38 +722,38 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
719 722
720 switch (attr->qp_state) { 723 switch (attr->qp_state) {
721 case IB_QPS_RESET: 724 case IB_QPS_RESET:
722 pr_debug("qp state -> RESET\n"); 725 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
723 rxe_qp_reset(qp); 726 rxe_qp_reset(qp);
724 break; 727 break;
725 728
726 case IB_QPS_INIT: 729 case IB_QPS_INIT:
727 pr_debug("qp state -> INIT\n"); 730 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
728 qp->req.state = QP_STATE_INIT; 731 qp->req.state = QP_STATE_INIT;
729 qp->resp.state = QP_STATE_INIT; 732 qp->resp.state = QP_STATE_INIT;
730 break; 733 break;
731 734
732 case IB_QPS_RTR: 735 case IB_QPS_RTR:
733 pr_debug("qp state -> RTR\n"); 736 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
734 qp->resp.state = QP_STATE_READY; 737 qp->resp.state = QP_STATE_READY;
735 break; 738 break;
736 739
737 case IB_QPS_RTS: 740 case IB_QPS_RTS:
738 pr_debug("qp state -> RTS\n"); 741 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
739 qp->req.state = QP_STATE_READY; 742 qp->req.state = QP_STATE_READY;
740 break; 743 break;
741 744
742 case IB_QPS_SQD: 745 case IB_QPS_SQD:
743 pr_debug("qp state -> SQD\n"); 746 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
744 rxe_qp_drain(qp); 747 rxe_qp_drain(qp);
745 break; 748 break;
746 749
747 case IB_QPS_SQE: 750 case IB_QPS_SQE:
748 pr_warn("qp state -> SQE !!?\n"); 751 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
749 /* Not possible from modify_qp. */ 752 /* Not possible from modify_qp. */
750 break; 753 break;
751 754
752 case IB_QPS_ERR: 755 case IB_QPS_ERR:
753 pr_debug("qp state -> ERR\n"); 756 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
754 rxe_qp_error(qp); 757 rxe_qp_error(qp);
755 break; 758 break;
756 } 759 }
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 144d2f129fcd..46f062842a9a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -387,7 +387,8 @@ int rxe_rcv(struct sk_buff *skb)
387 pack_icrc = be32_to_cpu(*icrcp); 387 pack_icrc = be32_to_cpu(*icrcp);
388 388
389 calc_icrc = rxe_icrc_hdr(pkt, skb); 389 calc_icrc = rxe_icrc_hdr(pkt, skb);
390 calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt)); 390 calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
391 payload_size(pkt));
391 calc_icrc = cpu_to_be32(~calc_icrc); 392 calc_icrc = cpu_to_be32(~calc_icrc);
392 if (unlikely(calc_icrc != pack_icrc)) { 393 if (unlikely(calc_icrc != pack_icrc)) {
393 char saddr[sizeof(struct in6_addr)]; 394 char saddr[sizeof(struct in6_addr)];
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 13a848a518e8..832846b73ea0 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -38,7 +38,7 @@
38#include "rxe_queue.h" 38#include "rxe_queue.h"
39 39
40static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 40static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
41 unsigned opcode); 41 u32 opcode);
42 42
43static inline void retry_first_write_send(struct rxe_qp *qp, 43static inline void retry_first_write_send(struct rxe_qp *qp,
44 struct rxe_send_wqe *wqe, 44 struct rxe_send_wqe *wqe,
@@ -121,7 +121,7 @@ void rnr_nak_timer(unsigned long data)
121{ 121{
122 struct rxe_qp *qp = (struct rxe_qp *)data; 122 struct rxe_qp *qp = (struct rxe_qp *)data;
123 123
124 pr_debug("rnr nak timer fired\n"); 124 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
125 rxe_run_task(&qp->req.task, 1); 125 rxe_run_task(&qp->req.task, 1);
126} 126}
127 127
@@ -187,7 +187,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
187 return wqe; 187 return wqe;
188} 188}
189 189
190static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits) 190static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
191{ 191{
192 switch (opcode) { 192 switch (opcode) {
193 case IB_WR_RDMA_WRITE: 193 case IB_WR_RDMA_WRITE:
@@ -259,7 +259,7 @@ static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
259 return -EINVAL; 259 return -EINVAL;
260} 260}
261 261
262static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits) 262static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
263{ 263{
264 switch (opcode) { 264 switch (opcode) {
265 case IB_WR_RDMA_WRITE: 265 case IB_WR_RDMA_WRITE:
@@ -311,7 +311,7 @@ static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
311} 311}
312 312
313static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 313static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
314 unsigned opcode) 314 u32 opcode)
315{ 315{
316 int fits = (wqe->dma.resid <= qp->mtu); 316 int fits = (wqe->dma.resid <= qp->mtu);
317 317
@@ -588,7 +588,7 @@ int rxe_requester(void *arg)
588 struct rxe_pkt_info pkt; 588 struct rxe_pkt_info pkt;
589 struct sk_buff *skb; 589 struct sk_buff *skb;
590 struct rxe_send_wqe *wqe; 590 struct rxe_send_wqe *wqe;
591 unsigned mask; 591 enum rxe_hdr_mask mask;
592 int payload; 592 int payload;
593 int mtu; 593 int mtu;
594 int opcode; 594 int opcode;
@@ -626,7 +626,8 @@ next_wqe:
626 rmr = rxe_pool_get_index(&rxe->mr_pool, 626 rmr = rxe_pool_get_index(&rxe->mr_pool,
627 wqe->wr.ex.invalidate_rkey >> 8); 627 wqe->wr.ex.invalidate_rkey >> 8);
628 if (!rmr) { 628 if (!rmr) {
629 pr_err("No mr for key %#x\n", wqe->wr.ex.invalidate_rkey); 629 pr_err("No mr for key %#x\n",
630 wqe->wr.ex.invalidate_rkey);
630 wqe->state = wqe_state_error; 631 wqe->state = wqe_state_error;
631 wqe->status = IB_WC_MW_BIND_ERR; 632 wqe->status = IB_WC_MW_BIND_ERR;
632 goto exit; 633 goto exit;
@@ -702,12 +703,12 @@ next_wqe:
702 703
703 skb = init_req_packet(qp, wqe, opcode, payload, &pkt); 704 skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
704 if (unlikely(!skb)) { 705 if (unlikely(!skb)) {
705 pr_err("Failed allocating skb\n"); 706 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
706 goto err; 707 goto err;
707 } 708 }
708 709
709 if (fill_packet(qp, wqe, &pkt, skb, payload)) { 710 if (fill_packet(qp, wqe, &pkt, skb, payload)) {
710 pr_debug("Error during fill packet\n"); 711 pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
711 goto err; 712 goto err;
712 } 713 }
713 714
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index d544b5e84797..dd3d88adc003 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -749,6 +749,18 @@ static enum resp_states read_reply(struct rxe_qp *qp,
749 return state; 749 return state;
750} 750}
751 751
752static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
753 struct rxe_pkt_info *pkt)
754{
755 struct sk_buff *skb = PKT_TO_SKB(pkt);
756
757 memset(hdr, 0, sizeof(*hdr));
758 if (skb->protocol == htons(ETH_P_IP))
759 memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
760 else if (skb->protocol == htons(ETH_P_IPV6))
761 memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
762}
763
752/* Executes a new request. A retried request never reach that function (send 764/* Executes a new request. A retried request never reach that function (send
753 * and writes are discarded, and reads and atomics are retried elsewhere. 765 * and writes are discarded, and reads and atomics are retried elsewhere.
754 */ 766 */
@@ -761,13 +773,8 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
761 qp_type(qp) == IB_QPT_SMI || 773 qp_type(qp) == IB_QPT_SMI ||
762 qp_type(qp) == IB_QPT_GSI) { 774 qp_type(qp) == IB_QPT_GSI) {
763 union rdma_network_hdr hdr; 775 union rdma_network_hdr hdr;
764 struct sk_buff *skb = PKT_TO_SKB(pkt);
765 776
766 memset(&hdr, 0, sizeof(hdr)); 777 build_rdma_network_hdr(&hdr, pkt);
767 if (skb->protocol == htons(ETH_P_IP))
768 memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
769 else if (skb->protocol == htons(ETH_P_IPV6))
770 memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
771 778
772 err = send_data_in(qp, &hdr, sizeof(hdr)); 779 err = send_data_in(qp, &hdr, sizeof(hdr));
773 if (err) 780 if (err)
@@ -881,7 +888,8 @@ static enum resp_states do_complete(struct rxe_qp *qp,
881 rmr = rxe_pool_get_index(&rxe->mr_pool, 888 rmr = rxe_pool_get_index(&rxe->mr_pool,
882 wc->ex.invalidate_rkey >> 8); 889 wc->ex.invalidate_rkey >> 8);
883 if (unlikely(!rmr)) { 890 if (unlikely(!rmr)) {
884 pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey); 891 pr_err("Bad rkey %#x invalidation\n",
892 wc->ex.invalidate_rkey);
885 return RESPST_ERROR; 893 return RESPST_ERROR;
886 } 894 }
887 rmr->state = RXE_MEM_STATE_FREE; 895 rmr->state = RXE_MEM_STATE_FREE;
@@ -1208,7 +1216,8 @@ int rxe_responder(void *arg)
1208 } 1216 }
1209 1217
1210 while (1) { 1218 while (1) {
1211 pr_debug("state = %s\n", resp_state_name[state]); 1219 pr_debug("qp#%d state = %s\n", qp_num(qp),
1220 resp_state_name[state]);
1212 switch (state) { 1221 switch (state) {
1213 case RESPST_GET_REQ: 1222 case RESPST_GET_REQ:
1214 state = get_req(qp, &pkt); 1223 state = get_req(qp, &pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index cf8e77800046..d5ed7571128f 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -79,7 +79,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
79 79
80 len = sanitize_arg(val, intf, sizeof(intf)); 80 len = sanitize_arg(val, intf, sizeof(intf));
81 if (!len) { 81 if (!len) {
82 pr_err("rxe: add: invalid interface name\n"); 82 pr_err("add: invalid interface name\n");
83 err = -EINVAL; 83 err = -EINVAL;
84 goto err; 84 goto err;
85 } 85 }
@@ -92,20 +92,20 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
92 } 92 }
93 93
94 if (net_to_rxe(ndev)) { 94 if (net_to_rxe(ndev)) {
95 pr_err("rxe: already configured on %s\n", intf); 95 pr_err("already configured on %s\n", intf);
96 err = -EINVAL; 96 err = -EINVAL;
97 goto err; 97 goto err;
98 } 98 }
99 99
100 rxe = rxe_net_add(ndev); 100 rxe = rxe_net_add(ndev);
101 if (!rxe) { 101 if (!rxe) {
102 pr_err("rxe: failed to add %s\n", intf); 102 pr_err("failed to add %s\n", intf);
103 err = -EINVAL; 103 err = -EINVAL;
104 goto err; 104 goto err;
105 } 105 }
106 106
107 rxe_set_port_state(ndev); 107 rxe_set_port_state(ndev);
108 pr_info("rxe: added %s to %s\n", rxe->ib_dev.name, intf); 108 pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
109err: 109err:
110 if (ndev) 110 if (ndev)
111 dev_put(ndev); 111 dev_put(ndev);
@@ -120,7 +120,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
120 120
121 len = sanitize_arg(val, intf, sizeof(intf)); 121 len = sanitize_arg(val, intf, sizeof(intf));
122 if (!len) { 122 if (!len) {
123 pr_err("rxe: add: invalid interface name\n"); 123 pr_err("add: invalid interface name\n");
124 return -EINVAL; 124 return -EINVAL;
125 } 125 }
126 126
@@ -133,7 +133,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
133 rxe = get_rxe_by_name(intf); 133 rxe = get_rxe_by_name(intf);
134 134
135 if (!rxe) { 135 if (!rxe) {
136 pr_err("rxe: not configured on %s\n", intf); 136 pr_err("not configured on %s\n", intf);
137 return -EINVAL; 137 return -EINVAL;
138 } 138 }
139 139
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index a5af6917fc1c..19841c863daf 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -100,10 +100,12 @@ static int rxe_query_port(struct ib_device *dev,
100 rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd); 100 rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
101 speed = cmd.speed; 101 speed = cmd.speed;
102 } else { 102 } else {
103 pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name); 103 pr_warn("%s speed is unknown, defaulting to 1000\n",
104 rxe->ndev->name);
104 speed = 1000; 105 speed = 1000;
105 } 106 }
106 rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width); 107 rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
108 &attr->active_width);
107 mutex_unlock(&rxe->usdev_lock); 109 mutex_unlock(&rxe->usdev_lock);
108 110
109 return 0; 111 return 0;
@@ -761,7 +763,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
761} 763}
762 764
763static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, 765static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
764 unsigned mask, u32 length) 766 unsigned int mask, u32 length)
765{ 767{
766 int err; 768 int err;
767 struct rxe_sq *sq = &qp->sq; 769 struct rxe_sq *sq = &qp->sq;
@@ -1145,8 +1147,8 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1145 return 0; 1147 return 0;
1146} 1148}
1147 1149
1148static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1150static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1149 unsigned int *sg_offset) 1151 int sg_nents, unsigned int *sg_offset)
1150{ 1152{
1151 struct rxe_mem *mr = to_rmr(ibmr); 1153 struct rxe_mem *mr = to_rmr(ibmr);
1152 int n; 1154 int n;