aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h1
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c80
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c155
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c117
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c14
-rw-r--r--drivers/infiniband/hw/qib/Kconfig6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c4
25 files changed, 373 insertions, 122 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index ba7a1208ff9e..d619d735838b 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -265,7 +265,6 @@ struct c2_pd_table {
265struct c2_qp_table { 265struct c2_qp_table {
266 struct idr idr; 266 struct idr idr;
267 spinlock_t lock; 267 spinlock_t lock;
268 int last;
269}; 268};
270 269
271struct c2_element { 270struct c2_element {
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 0ab826b280b2..86708dee58b1 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -385,8 +385,7 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
385 idr_preload(GFP_KERNEL); 385 idr_preload(GFP_KERNEL);
386 spin_lock_irq(&c2dev->qp_table.lock); 386 spin_lock_irq(&c2dev->qp_table.lock);
387 387
388 ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0, 388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
389 GFP_NOWAIT);
390 if (ret >= 0) 389 if (ret >= 0)
391 qp->qpn = ret; 390 qp->qpn = ret;
392 391
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 31f9201b2980..c40088ecf9f3 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -62,13 +62,13 @@ static int __cxio_init_resource_fifo(struct kfifo *fifo,
62 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); 62 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
63 if (random) { 63 if (random) {
64 j = 0; 64 j = 0;
65 random_bytes = random32(); 65 random_bytes = prandom_u32();
66 for (i = 0; i < RANDOM_SIZE; i++) 66 for (i = 0; i < RANDOM_SIZE; i++)
67 rarray[i] = i + skip_low; 67 rarray[i] = i + skip_low;
68 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { 68 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
69 if (j >= RANDOM_SIZE) { 69 if (j >= RANDOM_SIZE) {
70 j = 0; 70 j = 0;
71 random_bytes = random32(); 71 random_bytes = prandom_u32();
72 } 72 }
73 idx = (random_bytes >> (j * 2)) & 0xF; 73 idx = (random_bytes >> (j * 2)) & 0xF;
74 kfifo_in(fifo, 74 kfifo_in(fifo,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 565bfb161c1a..65c30ea8c1a1 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
511static int send_connect(struct c4iw_ep *ep) 511static int send_connect(struct c4iw_ep *ep)
512{ 512{
513 struct cpl_act_open_req *req; 513 struct cpl_act_open_req *req;
514 struct cpl_t5_act_open_req *t5_req;
514 struct sk_buff *skb; 515 struct sk_buff *skb;
515 u64 opt0; 516 u64 opt0;
516 u32 opt2; 517 u32 opt2;
517 unsigned int mtu_idx; 518 unsigned int mtu_idx;
518 int wscale; 519 int wscale;
519 int wrlen = roundup(sizeof *req, 16); 520 int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
521 sizeof(struct cpl_act_open_req) :
522 sizeof(struct cpl_t5_act_open_req);
523 int wrlen = roundup(size, 16);
520 524
521 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 525 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
522 526
@@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep)
552 opt2 |= WND_SCALE_EN(1); 556 opt2 |= WND_SCALE_EN(1);
553 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 557 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
554 558
555 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 559 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
556 INIT_TP_WR(req, 0); 560 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
557 OPCODE_TID(req) = cpu_to_be32( 561 INIT_TP_WR(req, 0);
558 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 562 OPCODE_TID(req) = cpu_to_be32(
559 req->local_port = ep->com.local_addr.sin_port; 563 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
560 req->peer_port = ep->com.remote_addr.sin_port; 564 ((ep->rss_qid << 14) | ep->atid)));
561 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 565 req->local_port = ep->com.local_addr.sin_port;
562 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 566 req->peer_port = ep->com.remote_addr.sin_port;
563 req->opt0 = cpu_to_be64(opt0); 567 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
564 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 568 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
565 req->opt2 = cpu_to_be32(opt2); 569 req->opt0 = cpu_to_be64(opt0);
570 req->params = cpu_to_be32(select_ntuple(ep->com.dev,
571 ep->dst, ep->l2t));
572 req->opt2 = cpu_to_be32(opt2);
573 } else {
574 t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
575 INIT_TP_WR(t5_req, 0);
576 OPCODE_TID(t5_req) = cpu_to_be32(
577 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
578 ((ep->rss_qid << 14) | ep->atid)));
579 t5_req->local_port = ep->com.local_addr.sin_port;
580 t5_req->peer_port = ep->com.remote_addr.sin_port;
581 t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
582 t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
583 t5_req->opt0 = cpu_to_be64(opt0);
584 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
585 select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
586 t5_req->opt2 = cpu_to_be32(opt2);
587 }
588
566 set_bit(ACT_OPEN_REQ, &ep->com.history); 589 set_bit(ACT_OPEN_REQ, &ep->com.history);
567 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 590 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
568} 591}
@@ -1575,6 +1598,12 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1575 1598
1576 neigh = dst_neigh_lookup(ep->dst, 1599 neigh = dst_neigh_lookup(ep->dst,
1577 &ep->com.cm_id->remote_addr.sin_addr.s_addr); 1600 &ep->com.cm_id->remote_addr.sin_addr.s_addr);
1601 if (!neigh) {
1602 pr_err("%s - cannot alloc neigh.\n", __func__);
1603 err = -ENOMEM;
1604 goto fail4;
1605 }
1606
1578 /* get a l2t entry */ 1607 /* get a l2t entry */
1579 if (neigh->dev->flags & IFF_LOOPBACK) { 1608 if (neigh->dev->flags & IFF_LOOPBACK) {
1580 PDBG("%s LOOPBACK\n", __func__); 1609 PDBG("%s LOOPBACK\n", __func__);
@@ -1670,9 +1699,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1670 case CPL_ERR_CONN_TIMEDOUT: 1699 case CPL_ERR_CONN_TIMEDOUT:
1671 break; 1700 break;
1672 case CPL_ERR_TCAM_FULL: 1701 case CPL_ERR_TCAM_FULL:
1702 dev->rdev.stats.tcam_full++;
1673 if (dev->rdev.lldi.enable_fw_ofld_conn) { 1703 if (dev->rdev.lldi.enable_fw_ofld_conn) {
1674 mutex_lock(&dev->rdev.stats.lock); 1704 mutex_lock(&dev->rdev.stats.lock);
1675 dev->rdev.stats.tcam_full++;
1676 mutex_unlock(&dev->rdev.stats.lock); 1705 mutex_unlock(&dev->rdev.stats.lock);
1677 send_fw_act_open_req(ep, 1706 send_fw_act_open_req(ep,
1678 GET_TID_TID(GET_AOPEN_ATID( 1707 GET_TID_TID(GET_AOPEN_ATID(
@@ -2869,12 +2898,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2869static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 2898static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2870{ 2899{
2871 u32 l2info; 2900 u32 l2info;
2872 u16 vlantag, len, hdr_len; 2901 u16 vlantag, len, hdr_len, eth_hdr_len;
2873 u8 intf; 2902 u8 intf;
2874 struct cpl_rx_pkt *cpl = cplhdr(skb); 2903 struct cpl_rx_pkt *cpl = cplhdr(skb);
2875 struct cpl_pass_accept_req *req; 2904 struct cpl_pass_accept_req *req;
2876 struct tcp_options_received tmp_opt; 2905 struct tcp_options_received tmp_opt;
2906 struct c4iw_dev *dev;
2877 2907
2908 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2878 /* Store values from cpl_rx_pkt in temporary location. */ 2909 /* Store values from cpl_rx_pkt in temporary location. */
2879 vlantag = (__force u16) cpl->vlan; 2910 vlantag = (__force u16) cpl->vlan;
2880 len = (__force u16) cpl->len; 2911 len = (__force u16) cpl->len;
@@ -2890,7 +2921,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2890 */ 2921 */
2891 memset(&tmp_opt, 0, sizeof(tmp_opt)); 2922 memset(&tmp_opt, 0, sizeof(tmp_opt));
2892 tcp_clear_options(&tmp_opt); 2923 tcp_clear_options(&tmp_opt);
2893 tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); 2924 tcp_parse_options(skb, &tmp_opt, 0, NULL);
2894 2925
2895 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 2926 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2896 memset(req, 0, sizeof(*req)); 2927 memset(req, 0, sizeof(*req));
@@ -2898,14 +2929,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2898 V_SYN_MAC_IDX(G_RX_MACIDX( 2929 V_SYN_MAC_IDX(G_RX_MACIDX(
2899 (__force int) htonl(l2info))) | 2930 (__force int) htonl(l2info))) |
2900 F_SYN_XACT_MATCH); 2931 F_SYN_XACT_MATCH);
2932 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
2933 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
2934 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
2901 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 2935 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
2902 (__force int) htonl(l2info))) | 2936 (__force int) htonl(l2info))) |
2903 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 2937 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
2904 (__force int) htons(hdr_len))) | 2938 (__force int) htons(hdr_len))) |
2905 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 2939 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
2906 (__force int) htons(hdr_len))) | 2940 (__force int) htons(hdr_len))) |
2907 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( 2941 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
2908 (__force int) htonl(l2info))));
2909 req->vlan = (__force __be16) vlantag; 2942 req->vlan = (__force __be16) vlantag;
2910 req->len = (__force __be16) len; 2943 req->len = (__force __be16) len;
2911 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 2944 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
@@ -2993,7 +3026,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
2993 u16 window; 3026 u16 window;
2994 struct port_info *pi; 3027 struct port_info *pi;
2995 struct net_device *pdev; 3028 struct net_device *pdev;
2996 u16 rss_qid; 3029 u16 rss_qid, eth_hdr_len;
2997 int step; 3030 int step;
2998 u32 tx_chan; 3031 u32 tx_chan;
2999 struct neighbour *neigh; 3032 struct neighbour *neigh;
@@ -3022,7 +3055,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3022 goto reject; 3055 goto reject;
3023 } 3056 }
3024 3057
3025 if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { 3058 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3059 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
3060 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
3061 if (eth_hdr_len == ETH_HLEN) {
3026 eh = (struct ethhdr *)(req + 1); 3062 eh = (struct ethhdr *)(req + 1);
3027 iph = (struct iphdr *)(eh + 1); 3063 iph = (struct iphdr *)(eh + 1);
3028 } else { 3064 } else {
@@ -3053,6 +3089,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3053 dst = &rt->dst; 3089 dst = &rt->dst;
3054 neigh = dst_neigh_lookup_skb(dst, skb); 3090 neigh = dst_neigh_lookup_skb(dst, skb);
3055 3091
3092 if (!neigh) {
3093 pr_err("%s - failed to allocate neigh!\n",
3094 __func__);
3095 goto free_dst;
3096 }
3097
3056 if (neigh->dev->flags & IFF_LOOPBACK) { 3098 if (neigh->dev->flags & IFF_LOOPBACK) {
3057 pdev = ip_dev_find(&init_net, iph->daddr); 3099 pdev = ip_dev_find(&init_net, iph->daddr);
3058 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3100 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80069ad595c1..ae656016e1ae 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -41,10 +41,20 @@
41#define DRV_VERSION "0.1" 41#define DRV_VERSION "0.1"
42 42
43MODULE_AUTHOR("Steve Wise"); 43MODULE_AUTHOR("Steve Wise");
44MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); 44MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45MODULE_LICENSE("Dual BSD/GPL"); 45MODULE_LICENSE("Dual BSD/GPL");
46MODULE_VERSION(DRV_VERSION); 46MODULE_VERSION(DRV_VERSION);
47 47
48static int allow_db_fc_on_t5;
49module_param(allow_db_fc_on_t5, int, 0644);
50MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
52
53static int allow_db_coalescing_on_t5;
54module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
57
48struct uld_ctx { 58struct uld_ctx {
49 struct list_head entry; 59 struct list_head entry;
50 struct cxgb4_lld_info lldi; 60 struct cxgb4_lld_info lldi;
@@ -614,7 +624,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop)
614{ 624{
615 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 625 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
616 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 626 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
617 infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; 627 infop->vr->cq.size > 0;
618} 628}
619 629
620static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 630static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -627,6 +637,22 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
627 pci_name(infop->pdev)); 637 pci_name(infop->pdev));
628 return ERR_PTR(-ENOSYS); 638 return ERR_PTR(-ENOSYS);
629 } 639 }
640 if (!ocqp_supported(infop))
641 pr_info("%s: On-Chip Queues not supported on this device.\n",
642 pci_name(infop->pdev));
643
644 if (!is_t4(infop->adapter_type)) {
645 if (!allow_db_fc_on_t5) {
646 db_fc_threshold = 100000;
647 pr_info("DB Flow Control Disabled.\n");
648 }
649
650 if (!allow_db_coalescing_on_t5) {
651 db_coalescing_threshold = -1;
652 pr_info("DB Coalescing Disabled.\n");
653 }
654 }
655
630 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 656 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
631 if (!devp) { 657 if (!devp) {
632 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 658 printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -678,8 +704,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
678 int i; 704 int i;
679 705
680 if (!vers_printed++) 706 if (!vers_printed++)
681 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", 707 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
682 DRV_VERSION); 708 DRV_VERSION);
683 709
684 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 710 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
685 if (!ctx) { 711 if (!ctx) {
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index f95e5df30db2..0161ae6ad629 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
54 54
55 if (obj < alloc->max) { 55 if (obj < alloc->max) {
56 if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) 56 if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
57 alloc->last += random32() % RANDOM_SKIP; 57 alloc->last += prandom_u32() % RANDOM_SKIP;
58 else 58 else
59 alloc->last = obj + 1; 59 alloc->last = obj + 1;
60 if (alloc->last >= alloc->max) 60 if (alloc->last >= alloc->max)
@@ -88,7 +88,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
88 alloc->start = start; 88 alloc->start = start;
89 alloc->flags = flags; 89 alloc->flags = flags;
90 if (flags & C4IW_ID_TABLE_F_RANDOM) 90 if (flags & C4IW_ID_TABLE_F_RANDOM)
91 alloc->last = random32() % RANDOM_SKIP; 91 alloc->last = prandom_u32() % RANDOM_SKIP;
92 else 92 else
93 alloc->last = 0; 93 alloc->last = 0;
94 alloc->max = num; 94 alloc->max = num;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7eec5e13fa8c..485183ad34cd 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -162,7 +162,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
162 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); 162 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
163} 163}
164 164
165#define C4IW_WR_TO (10*HZ) 165#define C4IW_WR_TO (30*HZ)
166 166
167struct c4iw_wr_wait { 167struct c4iw_wr_wait {
168 struct completion completion; 168 struct completion completion;
@@ -369,7 +369,6 @@ struct c4iw_fr_page_list {
369 DEFINE_DMA_UNMAP_ADDR(mapping); 369 DEFINE_DMA_UNMAP_ADDR(mapping);
370 dma_addr_t dma_addr; 370 dma_addr_t dma_addr;
371 struct c4iw_dev *dev; 371 struct c4iw_dev *dev;
372 int size;
373}; 372};
374 373
375static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( 374static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
@@ -817,6 +816,15 @@ static inline int compute_wscale(int win)
817 return wscale; 816 return wscale;
818} 817}
819 818
819static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
820{
821#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
822 return infop->vr->ocq.size > 0;
823#else
824 return 0;
825#endif
826}
827
820u32 c4iw_id_alloc(struct c4iw_id_table *alloc); 828u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
821void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); 829void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
822int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, 830int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
@@ -930,6 +938,8 @@ extern struct cxgb4_client t4c_client;
930extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; 938extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
931extern int c4iw_max_read_depth; 939extern int c4iw_max_read_depth;
932extern int db_fc_threshold; 940extern int db_fc_threshold;
941extern int db_coalescing_threshold;
942extern int use_dsgl;
933 943
934 944
935#endif 945#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 903a92d6f91d..4cb8eb24497c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -30,16 +30,76 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/module.h>
34#include <linux/moduleparam.h>
33#include <rdma/ib_umem.h> 35#include <rdma/ib_umem.h>
34#include <linux/atomic.h> 36#include <linux/atomic.h>
35 37
36#include "iw_cxgb4.h" 38#include "iw_cxgb4.h"
37 39
40int use_dsgl = 1;
41module_param(use_dsgl, int, 0644);
42MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
43
38#define T4_ULPTX_MIN_IO 32 44#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96 45#define C4IW_MAX_INLINE_SIZE 96
46#define T4_ULPTX_MAX_DMA 1024
47#define C4IW_INLINE_THRESHOLD 128
40 48
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, 49static int inline_threshold = C4IW_INLINE_THRESHOLD;
42 void *data) 50module_param(inline_threshold, int, 0644);
51MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
52
53static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
54 u32 len, dma_addr_t data, int wait)
55{
56 struct sk_buff *skb;
57 struct ulp_mem_io *req;
58 struct ulptx_sgl *sgl;
59 u8 wr_len;
60 int ret = 0;
61 struct c4iw_wr_wait wr_wait;
62
63 addr &= 0x7FFFFFF;
64
65 if (wait)
66 c4iw_init_wr_wait(&wr_wait);
67 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
68
69 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
70 if (!skb)
71 return -ENOMEM;
72 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
73
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0);
77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL(1) : 0));
79 req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
86
87 sgl = (struct ulptx_sgl *)(req + 1);
88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
89 ULPTX_NSGE(1));
90 sgl->len0 = cpu_to_be32(len);
91 sgl->addr0 = cpu_to_be64(data);
92
93 ret = c4iw_ofld_send(rdev, skb);
94 if (ret)
95 return ret;
96 if (wait)
97 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
98 return ret;
99}
100
101static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
102 void *data)
43{ 103{
44 struct sk_buff *skb; 104 struct sk_buff *skb;
45 struct ulp_mem_io *req; 105 struct ulp_mem_io *req;
@@ -47,6 +107,12 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
47 u8 wr_len, *to_dp, *from_dp; 107 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0; 108 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait; 109 struct c4iw_wr_wait wr_wait;
110 __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
111
112 if (is_t4(rdev->lldi.adapter_type))
113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
114 else
115 cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
50 116
51 addr &= 0x7FFFFFF; 117 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len); 118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -77,7 +143,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
77 req->wr.wr_mid = cpu_to_be32( 143 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 144 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79 145
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23)); 146 req->cmd = cmd;
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( 147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
@@ -107,6 +173,67 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
107 return ret; 173 return ret;
108} 174}
109 175
176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{
178 u32 remain = len;
179 u32 dmalen;
180 int ret = 0;
181 dma_addr_t daddr;
182 dma_addr_t save;
183
184 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
185 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
186 return -1;
187 save = daddr;
188
189 while (remain > inline_threshold) {
190 if (remain < T4_ULPTX_MAX_DMA) {
191 if (remain & ~T4_ULPTX_MIN_IO)
192 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
193 else
194 dmalen = remain;
195 } else
196 dmalen = T4_ULPTX_MAX_DMA;
197 remain -= dmalen;
198 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
199 !remain);
200 if (ret)
201 goto out;
202 addr += dmalen >> 5;
203 data += dmalen;
204 daddr += dmalen;
205 }
206 if (remain)
207 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
208out:
209 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
210 return ret;
211}
212
213/*
214 * write len bytes of data into addr (32B aligned address)
215 * If data is NULL, clear len byte of memory to zero.
216 */
217static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
218 void *data)
219{
220 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
221 if (len > inline_threshold) {
222 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
223 printk_ratelimited(KERN_WARNING
224 "%s: dma map"
225 " failure (non fatal)\n",
226 pci_name(rdev->lldi.pdev));
227 return _c4iw_write_mem_inline(rdev, addr, len,
228 data);
229 } else
230 return 0;
231 } else
232 return _c4iw_write_mem_inline(rdev, addr, len, data);
233 } else
234 return _c4iw_write_mem_inline(rdev, addr, len, data);
235}
236
110/* 237/*
111 * Build and write a TPT entry. 238 * Build and write a TPT entry.
112 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, 239 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
@@ -760,19 +887,23 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
760 struct c4iw_fr_page_list *c4pl; 887 struct c4iw_fr_page_list *c4pl;
761 struct c4iw_dev *dev = to_c4iw_dev(device); 888 struct c4iw_dev *dev = to_c4iw_dev(device);
762 dma_addr_t dma_addr; 889 dma_addr_t dma_addr;
763 int size = sizeof *c4pl + page_list_len * sizeof(u64); 890 int pll_len = roundup(page_list_len * sizeof(u64), 32);
764 891
765 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, 892 c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
766 &dma_addr, GFP_KERNEL);
767 if (!c4pl) 893 if (!c4pl)
768 return ERR_PTR(-ENOMEM); 894 return ERR_PTR(-ENOMEM);
769 895
896 c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
897 pll_len, &dma_addr,
898 GFP_KERNEL);
899 if (!c4pl->ibpl.page_list) {
900 kfree(c4pl);
901 return ERR_PTR(-ENOMEM);
902 }
770 dma_unmap_addr_set(c4pl, mapping, dma_addr); 903 dma_unmap_addr_set(c4pl, mapping, dma_addr);
771 c4pl->dma_addr = dma_addr; 904 c4pl->dma_addr = dma_addr;
772 c4pl->dev = dev; 905 c4pl->dev = dev;
773 c4pl->size = size; 906 c4pl->ibpl.max_page_list_len = pll_len;
774 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
775 c4pl->ibpl.max_page_list_len = page_list_len;
776 907
777 return &c4pl->ibpl; 908 return &c4pl->ibpl;
778} 909}
@@ -781,8 +912,10 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
781{ 912{
782 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); 913 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
783 914
784 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, 915 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
785 c4pl, dma_unmap_addr(c4pl, mapping)); 916 c4pl->ibpl.max_page_list_len,
917 c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
918 kfree(c4pl);
786} 919}
787 920
788int c4iw_dereg_mr(struct ib_mr *ib_mr) 921int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e084fdc6da7f..7e94c9a656a1 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
162 */ 162 */
163 if (addr >= rdev->oc_mw_pa) 163 if (addr >= rdev->oc_mw_pa)
164 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); 164 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
165 else 165 else {
166 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 166 if (is_t5(rdev->lldi.adapter_type))
167 vma->vm_page_prot =
168 t4_pgprot_wc(vma->vm_page_prot);
169 else
170 vma->vm_page_prot =
171 pgprot_noncached(vma->vm_page_prot);
172 }
167 ret = io_remap_pfn_range(vma, vma->vm_start, 173 ret = io_remap_pfn_range(vma, vma->vm_start,
168 addr >> PAGE_SHIFT, 174 addr >> PAGE_SHIFT,
169 len, vma->vm_page_prot); 175 len, vma->vm_page_prot);
@@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev,
263 dev = to_c4iw_dev(ibdev); 269 dev = to_c4iw_dev(ibdev);
264 memset(props, 0, sizeof *props); 270 memset(props, 0, sizeof *props);
265 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); 271 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
266 props->hw_ver = dev->rdev.lldi.adapter_type; 272 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
267 props->fw_ver = dev->rdev.lldi.fw_vers; 273 props->fw_ver = dev->rdev.lldi.fw_vers;
268 props->device_cap_flags = dev->device_cap_flags; 274 props->device_cap_flags = dev->device_cap_flags;
269 props->page_size_cap = T4_PAGESIZE_MASK; 275 props->page_size_cap = T4_PAGESIZE_MASK;
@@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
346 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, 352 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
347 ibdev.dev); 353 ibdev.dev);
348 PDBG("%s dev 0x%p\n", __func__, dev); 354 PDBG("%s dev 0x%p\n", __func__, dev);
349 return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type); 355 return sprintf(buf, "%d\n",
356 CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
350} 357}
351 358
352static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, 359static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 17ba4f8bc12d..5b059e2d80cc 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -42,10 +42,21 @@ static int ocqp_support = 1;
42module_param(ocqp_support, int, 0644); 42module_param(ocqp_support, int, 0644);
43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); 43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
44 44
45int db_fc_threshold = 2000; 45int db_fc_threshold = 1000;
46module_param(db_fc_threshold, int, 0644); 46module_param(db_fc_threshold, int, 0644);
47MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic " 47MODULE_PARM_DESC(db_fc_threshold,
48 "db flow control mode (default = 2000)"); 48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
50
51int db_coalescing_threshold;
52module_param(db_coalescing_threshold, int, 0644);
53MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
56
57static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
49 60
50static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 61static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
51{ 62{
@@ -76,7 +87,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
76 87
77static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 88static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
78{ 89{
79 if (!ocqp_support || !t4_ocqp_supported()) 90 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
80 return -ENOSYS; 91 return -ENOSYS;
81 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); 92 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
82 if (!sq->dma_addr) 93 if (!sq->dma_addr)
@@ -129,7 +140,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
129 int wr_len; 140 int wr_len;
130 struct c4iw_wr_wait wr_wait; 141 struct c4iw_wr_wait wr_wait;
131 struct sk_buff *skb; 142 struct sk_buff *skb;
132 int ret; 143 int ret = 0;
133 int eqsize; 144 int eqsize;
134 145
135 wq->sq.qid = c4iw_get_qpid(rdev, uctx); 146 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@ -169,25 +180,24 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
169 } 180 }
170 181
171 if (user) { 182 if (user) {
172 ret = alloc_oc_sq(rdev, &wq->sq); 183 if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
173 if (ret)
174 goto free_hwaddr; 184 goto free_hwaddr;
175 185 } else {
176 ret = alloc_host_sq(rdev, &wq->sq);
177 if (ret)
178 goto free_sq;
179 } else
180 ret = alloc_host_sq(rdev, &wq->sq); 186 ret = alloc_host_sq(rdev, &wq->sq);
181 if (ret) 187 if (ret)
182 goto free_hwaddr; 188 goto free_hwaddr;
189 }
190
183 memset(wq->sq.queue, 0, wq->sq.memsize); 191 memset(wq->sq.queue, 0, wq->sq.memsize);
184 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 192 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
185 193
186 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), 194 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
187 wq->rq.memsize, &(wq->rq.dma_addr), 195 wq->rq.memsize, &(wq->rq.dma_addr),
188 GFP_KERNEL); 196 GFP_KERNEL);
189 if (!wq->rq.queue) 197 if (!wq->rq.queue) {
198 ret = -ENOMEM;
190 goto free_sq; 199 goto free_sq;
200 }
191 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", 201 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
192 __func__, wq->sq.queue, 202 __func__, wq->sq.queue,
193 (unsigned long long)virt_to_phys(wq->sq.queue), 203 (unsigned long long)virt_to_phys(wq->sq.queue),
@@ -532,7 +542,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
532} 542}
533 543
534static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, 544static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
535 struct ib_send_wr *wr, u8 *len16) 545 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
536{ 546{
537 547
538 struct fw_ri_immd *imdp; 548 struct fw_ri_immd *imdp;
@@ -554,28 +564,51 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
554 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 564 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
555 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 565 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
556 0xffffffff); 566 0xffffffff);
557 WARN_ON(pbllen > T4_MAX_FR_IMMD); 567
558 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); 568 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
559 imdp->op = FW_RI_DATA_IMMD; 569 struct c4iw_fr_page_list *c4pl =
560 imdp->r1 = 0; 570 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
561 imdp->r2 = 0; 571 struct fw_ri_dsgl *sglp;
562 imdp->immdlen = cpu_to_be32(pbllen); 572
563 p = (__be64 *)(imdp + 1); 573 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
564 rem = pbllen; 574 wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
565 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 575 cpu_to_be64((u64)
566 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); 576 wr->wr.fast_reg.page_list->page_list[i]);
567 rem -= sizeof *p; 577 }
568 if (++p == (__be64 *)&sq->queue[sq->size]) 578
569 p = (__be64 *)sq->queue; 579 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
570 } 580 sglp->op = FW_RI_DATA_DSGL;
571 BUG_ON(rem < 0); 581 sglp->r1 = 0;
572 while (rem) { 582 sglp->nsge = cpu_to_be16(1);
573 *p = 0; 583 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
574 rem -= sizeof *p; 584 sglp->len0 = cpu_to_be32(pbllen);
575 if (++p == (__be64 *)&sq->queue[sq->size]) 585
576 p = (__be64 *)sq->queue; 586 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
587 } else {
588 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
589 imdp->op = FW_RI_DATA_IMMD;
590 imdp->r1 = 0;
591 imdp->r2 = 0;
592 imdp->immdlen = cpu_to_be32(pbllen);
593 p = (__be64 *)(imdp + 1);
594 rem = pbllen;
595 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
596 *p = cpu_to_be64(
597 (u64)wr->wr.fast_reg.page_list->page_list[i]);
598 rem -= sizeof(*p);
599 if (++p == (__be64 *)&sq->queue[sq->size])
600 p = (__be64 *)sq->queue;
601 }
602 BUG_ON(rem < 0);
603 while (rem) {
604 *p = 0;
605 rem -= sizeof(*p);
606 if (++p == (__be64 *)&sq->queue[sq->size])
607 p = (__be64 *)sq->queue;
608 }
609 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
610 + pbllen, 16);
577 } 611 }
578 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
579 return 0; 612 return 0;
580} 613}
581 614
@@ -676,7 +709,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
676 case IB_WR_FAST_REG_MR: 709 case IB_WR_FAST_REG_MR:
677 fw_opcode = FW_RI_FR_NSMR_WR; 710 fw_opcode = FW_RI_FR_NSMR_WR;
678 swsqe->opcode = FW_RI_FAST_REGISTER; 711 swsqe->opcode = FW_RI_FAST_REGISTER;
679 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); 712 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
713 is_t5(
714 qhp->rhp->rdev.lldi.adapter_type) ?
715 1 : 0);
680 break; 716 break;
681 case IB_WR_LOCAL_INV: 717 case IB_WR_LOCAL_INV:
682 if (wr->send_flags & IB_SEND_FENCE) 718 if (wr->send_flags & IB_SEND_FENCE)
@@ -1448,6 +1484,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1448 rhp->db_state = NORMAL; 1484 rhp->db_state = NORMAL;
1449 idr_for_each(&rhp->qpidr, enable_qp_db, NULL); 1485 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1450 } 1486 }
1487 if (db_coalescing_threshold >= 0)
1488 if (rhp->qpcnt <= db_coalescing_threshold)
1489 cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
1451 spin_unlock_irq(&rhp->lock); 1490 spin_unlock_irq(&rhp->lock);
1452 atomic_dec(&qhp->refcnt); 1491 atomic_dec(&qhp->refcnt);
1453 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1492 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@ -1559,11 +1598,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1559 spin_lock_irq(&rhp->lock); 1598 spin_lock_irq(&rhp->lock);
1560 if (rhp->db_state != NORMAL) 1599 if (rhp->db_state != NORMAL)
1561 t4_disable_wq_db(&qhp->wq); 1600 t4_disable_wq_db(&qhp->wq);
1562 if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { 1601 rhp->qpcnt++;
1602 if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
1563 rhp->rdev.stats.db_state_transitions++; 1603 rhp->rdev.stats.db_state_transitions++;
1564 rhp->db_state = FLOW_CONTROL; 1604 rhp->db_state = FLOW_CONTROL;
1565 idr_for_each(&rhp->qpidr, disable_qp_db, NULL); 1605 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1566 } 1606 }
1607 if (db_coalescing_threshold >= 0)
1608 if (rhp->qpcnt > db_coalescing_threshold)
1609 cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
1567 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1610 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1568 spin_unlock_irq(&rhp->lock); 1611 spin_unlock_irq(&rhp->lock);
1569 if (ret) 1612 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 16f26ab29302..ebcb03bd1b72 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -84,7 +84,7 @@ struct t4_status_page {
84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ 85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
86 sizeof(struct fw_ri_immd)) & ~31UL) 86 sizeof(struct fw_ri_immd)) & ~31UL)
87#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) 87#define T4_MAX_FR_DEPTH (1024 / sizeof(u64))
88 88
89#define T4_RQ_NUM_SLOTS 2 89#define T4_RQ_NUM_SLOTS 2
90#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) 90#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
@@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
280#endif 280#endif
281} 281}
282 282
283static inline int t4_ocqp_supported(void)
284{
285#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
286 return 1;
287#else
288 return 0;
289#endif
290}
291
292enum { 283enum {
293 T4_SQ_ONCHIP = (1<<0), 284 T4_SQ_ONCHIP = (1<<0),
294}; 285};
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index aed8afee56da..6d7f453b4d05 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,6 +40,7 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/highmem.h> 41#include <linux/highmem.h>
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/aio.h>
43#include <linux/jiffies.h> 44#include <linux/jiffies.h>
44#include <linux/cpu.h> 45#include <linux/cpu.h>
45#include <asm/pgtable.h> 46#include <asm/pgtable.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 439c35d4a669..ea93870266eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
620 goto bail; 620 goto bail;
621 } 621 }
622 622
623 opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 623 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
624 dev->opstats[opcode].n_bytes += tlen; 624 dev->opstats[opcode].n_bytes += tlen;
625 dev->opstats[opcode].n_packets++; 625 dev->opstats[opcode].n_packets++;
626 626
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index add98d01476c..d1f5f1dd77b0 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -204,7 +204,6 @@ static struct id_map_entry *
204id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) 204id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
205{ 205{
206 int ret; 206 int ret;
207 static int next_id;
208 struct id_map_entry *ent; 207 struct id_map_entry *ent;
209 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 208 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
210 209
@@ -223,9 +222,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
223 idr_preload(GFP_KERNEL); 222 idr_preload(GFP_KERNEL);
224 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); 223 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
225 224
226 ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT); 225 ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
227 if (ret >= 0) { 226 if (ret >= 0) {
228 next_id = max(ret + 1, 0);
229 ent->pv_cm_id = (u32)ret; 227 ent->pv_cm_id = (u32)ret;
230 sl_id_map_add(ibdev, ent); 228 sl_id_map_add(ibdev, ent);
231 list_add_tail(&ent->list, &sriov->cm_list); 229 list_add_tail(&ent->list, &sriov->cm_list);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index ae67df35dd4d..73b3a7132587 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -228,7 +228,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
228 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; 228 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
229 229
230 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, 230 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
231 cq->db.dma, &cq->mcq, vector, 0); 231 cq->db.dma, &cq->mcq, vector, 0, 0);
232 if (err) 232 if (err)
233 goto err_dbmap; 233 goto err_dbmap;
234 234
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 934792c477bc..4d599cedbb0b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -93,7 +93,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
93__be64 mlx4_ib_gen_node_guid(void) 93__be64 mlx4_ib_gen_node_guid(void)
94{ 94{
95#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) 95#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
96 return cpu_to_be64(NODE_GUID_HI | random32()); 96 return cpu_to_be64(NODE_GUID_HI | prandom_u32());
97} 97}
98 98
99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) 99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 67647e264611..418004c93feb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2948,7 +2948,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2948 nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", 2948 nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
2949 nesvnic->netdev->name, vlan_tag); 2949 nesvnic->netdev->name, vlan_tag);
2950 2950
2951 __vlan_hwaccel_put_tag(rx_skb, vlan_tag); 2951 __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
2952 } 2952 }
2953 if (nes_use_lro) 2953 if (nes_use_lro)
2954 lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); 2954 lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 85cf4d1ac442..49eb5111d2cd 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1599,7 +1599,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
1599 1599
1600 /* Enable/Disable VLAN Stripping */ 1600 /* Enable/Disable VLAN Stripping */
1601 u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); 1601 u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
1602 if (features & NETIF_F_HW_VLAN_RX) 1602 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1603 u32temp &= 0xfdffffff; 1603 u32temp &= 0xfdffffff;
1604 else 1604 else
1605 u32temp |= 0x02000000; 1605 u32temp |= 0x02000000;
@@ -1614,10 +1614,10 @@ static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_feat
1614 * Since there is no support for separate rx/tx vlan accel 1614 * Since there is no support for separate rx/tx vlan accel
1615 * enable/disable make sure tx flag is always in same state as rx. 1615 * enable/disable make sure tx flag is always in same state as rx.
1616 */ 1616 */
1617 if (features & NETIF_F_HW_VLAN_RX) 1617 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1618 features |= NETIF_F_HW_VLAN_TX; 1618 features |= NETIF_F_HW_VLAN_CTAG_TX;
1619 else 1619 else
1620 features &= ~NETIF_F_HW_VLAN_TX; 1620 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1621 1621
1622 return features; 1622 return features;
1623} 1623}
@@ -1628,7 +1628,7 @@ static int nes_set_features(struct net_device *netdev, netdev_features_t feature
1628 struct nes_device *nesdev = nesvnic->nesdev; 1628 struct nes_device *nesdev = nesvnic->nesdev;
1629 u32 changed = netdev->features ^ features; 1629 u32 changed = netdev->features ^ features;
1630 1630
1631 if (changed & NETIF_F_HW_VLAN_RX) 1631 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1632 nes_vlan_mode(netdev, nesdev, features); 1632 nes_vlan_mode(netdev, nesdev, features);
1633 1633
1634 return 0; 1634 return 0;
@@ -1706,11 +1706,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1706 netdev->dev_addr[4] = (u8)(u64temp>>8); 1706 netdev->dev_addr[4] = (u8)(u64temp>>8);
1707 netdev->dev_addr[5] = (u8)u64temp; 1707 netdev->dev_addr[5] = (u8)u64temp;
1708 1708
1709 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; 1709 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
1710 if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) 1710 if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
1711 netdev->hw_features |= NETIF_F_TSO; 1711 netdev->hw_features |= NETIF_F_TSO;
1712 1712
1713 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX; 1713 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
1714 netdev->hw_features |= NETIF_F_LRO; 1714 netdev->hw_features |= NETIF_F_LRO;
1715 1715
1716 nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," 1716 nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 8349f9c5064c..1e603a375069 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,7 +1,7 @@
1config INFINIBAND_QIB 1config INFINIBAND_QIB
2 tristate "QLogic PCIe HCA support" 2 tristate "Intel PCIe HCA support"
3 depends on 64BIT 3 depends on 64BIT
4 ---help--- 4 ---help---
5 This is a low-level driver for QLogic PCIe QLE InfiniBand host 5 This is a low-level driver for Intel PCIe QLE InfiniBand host
6 channel adapters. This driver does not support the QLogic 6 channel adapters. This driver does not support the Intel
7 HyperTransport card (model QHT7140). 7 HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5423edcab51f..216092477dfc 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 5 *
@@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
63 "Attempt pre-IBTA 1.2 DDR speed negotiation"); 64 "Attempt pre-IBTA 1.2 DDR speed negotiation");
64 65
65MODULE_LICENSE("Dual BSD/GPL"); 66MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("QLogic <support@qlogic.com>"); 67MODULE_AUTHOR("Intel <ibsupport@intel.com>");
67MODULE_DESCRIPTION("QLogic IB driver"); 68MODULE_DESCRIPTION("Intel IB driver");
68MODULE_VERSION(QIB_DRIVER_VERSION); 69MODULE_VERSION(QIB_DRIVER_VERSION);
69 70
70/* 71/*
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 4f7aa301b3b1..b56c9428f3c5 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -39,7 +39,7 @@
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/highmem.h> 40#include <linux/highmem.h>
41#include <linux/io.h> 41#include <linux/io.h>
42#include <linux/uio.h> 42#include <linux/aio.h>
43#include <linux/jiffies.h> 43#include <linux/jiffies.h>
44#include <asm/pgtable.h> 44#include <asm/pgtable.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a099ac171e22..0232ae56b1fa 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved. 4 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
51 52
52/* 53/*
53 * This file contains all the chip-specific register information and 54 * This file contains all the chip-specific register information and
54 * access functions for the QLogic QLogic_IB PCI-Express chip. 55 * access functions for the Intel Intel_IB PCI-Express chip.
55 * 56 *
56 */ 57 */
57 58
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 50e33aa0b4e3..173f805790da 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
@@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
1138static void qib_remove_one(struct pci_dev *); 1138static void qib_remove_one(struct pci_dev *);
1139static int qib_init_one(struct pci_dev *, const struct pci_device_id *); 1139static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1140 1140
1141#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " 1141#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1142#define PFX QIB_DRV_NAME ": " 1142#define PFX QIB_DRV_NAME ": "
1143 1143
1144static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { 1144static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
@@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1355 dd = qib_init_iba6120_funcs(pdev, ent); 1355 dd = qib_init_iba6120_funcs(pdev, ent);
1356#else 1356#else
1357 qib_early_err(&pdev->dev, 1357 qib_early_err(&pdev->dev,
1358 "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", 1358 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1359 ent->device); 1359 ent->device);
1360 dd = ERR_PTR(-ENODEV); 1360 dd = ERR_PTR(-ENODEV);
1361#endif 1361#endif
@@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1371 1371
1372 default: 1372 default:
1373 qib_early_err(&pdev->dev, 1373 qib_early_err(&pdev->dev,
1374 "Failing on unknown QLogic deviceid 0x%x\n", 1374 "Failing on unknown Intel deviceid 0x%x\n",
1375 ent->device); 1375 ent->device);
1376 ret = -ENODEV; 1376 ret = -ENODEV;
1377 } 1377 }
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 50a8a0d4fe67..911205d3d5a0 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index ba51a4715a1d..7c0ab16a2fe2 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
2224 ibdev->dma_ops = &qib_dma_mapping_ops; 2224 ibdev->dma_ops = &qib_dma_mapping_ops;
2225 2225
2226 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), 2226 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2227 "QLogic Infiniband HCA %s", init_utsname()->nodename); 2227 "Intel Infiniband HCA %s", init_utsname()->nodename);
2228 2228
2229 ret = ib_register_device(ibdev, qib_create_port_files); 2229 ret = ib_register_device(ibdev, qib_create_port_files);
2230 if (ret) 2230 if (ret)