diff options
author | Vipul Pandya <vipul@chelsio.com> | 2013-03-14 01:08:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-03-14 11:35:58 -0400 |
commit | f079af7a117504b5b307b727858c972261047907 (patch) | |
tree | 69db19177b801eccd935815057470cff15d3f9e9 /drivers/infiniband/hw/cxgb4 | |
parent | 622c62b52fae7c1367f0fd55442d5e162c052d5f (diff) |
RDMA/cxgb4: Add Support for Chelsio T5 adapter
Adds support for Chelsio T5 adapter.
Enables T5's Write Combining feature.
Signed-off-by: Vipul Pandya <vipul@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 64 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/device.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/provider.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 9 |
6 files changed, 77 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 565bfb161c1a..272bf789c53b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, | |||
511 | static int send_connect(struct c4iw_ep *ep) | 511 | static int send_connect(struct c4iw_ep *ep) |
512 | { | 512 | { |
513 | struct cpl_act_open_req *req; | 513 | struct cpl_act_open_req *req; |
514 | struct cpl_t5_act_open_req *t5_req; | ||
514 | struct sk_buff *skb; | 515 | struct sk_buff *skb; |
515 | u64 opt0; | 516 | u64 opt0; |
516 | u32 opt2; | 517 | u32 opt2; |
517 | unsigned int mtu_idx; | 518 | unsigned int mtu_idx; |
518 | int wscale; | 519 | int wscale; |
519 | int wrlen = roundup(sizeof *req, 16); | 520 | int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? |
521 | sizeof(struct cpl_act_open_req) : | ||
522 | sizeof(struct cpl_t5_act_open_req); | ||
523 | int wrlen = roundup(size, 16); | ||
520 | 524 | ||
521 | PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); | 525 | PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); |
522 | 526 | ||
@@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep) | |||
552 | opt2 |= WND_SCALE_EN(1); | 556 | opt2 |= WND_SCALE_EN(1); |
553 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | 557 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); |
554 | 558 | ||
555 | req = (struct cpl_act_open_req *) skb_put(skb, wrlen); | 559 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { |
556 | INIT_TP_WR(req, 0); | 560 | req = (struct cpl_act_open_req *) skb_put(skb, wrlen); |
557 | OPCODE_TID(req) = cpu_to_be32( | 561 | INIT_TP_WR(req, 0); |
558 | MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); | 562 | OPCODE_TID(req) = cpu_to_be32( |
559 | req->local_port = ep->com.local_addr.sin_port; | 563 | MK_OPCODE_TID(CPL_ACT_OPEN_REQ, |
560 | req->peer_port = ep->com.remote_addr.sin_port; | 564 | ((ep->rss_qid << 14) | ep->atid))); |
561 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | 565 | req->local_port = ep->com.local_addr.sin_port; |
562 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | 566 | req->peer_port = ep->com.remote_addr.sin_port; |
563 | req->opt0 = cpu_to_be64(opt0); | 567 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; |
564 | req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); | 568 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; |
565 | req->opt2 = cpu_to_be32(opt2); | 569 | req->opt0 = cpu_to_be64(opt0); |
570 | req->params = cpu_to_be32(select_ntuple(ep->com.dev, | ||
571 | ep->dst, ep->l2t)); | ||
572 | req->opt2 = cpu_to_be32(opt2); | ||
573 | } else { | ||
574 | t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen); | ||
575 | INIT_TP_WR(t5_req, 0); | ||
576 | OPCODE_TID(t5_req) = cpu_to_be32( | ||
577 | MK_OPCODE_TID(CPL_ACT_OPEN_REQ, | ||
578 | ((ep->rss_qid << 14) | ep->atid))); | ||
579 | t5_req->local_port = ep->com.local_addr.sin_port; | ||
580 | t5_req->peer_port = ep->com.remote_addr.sin_port; | ||
581 | t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr; | ||
582 | t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | ||
583 | t5_req->opt0 = cpu_to_be64(opt0); | ||
584 | t5_req->params = cpu_to_be64(V_FILTER_TUPLE( | ||
585 | select_ntuple(ep->com.dev, ep->dst, ep->l2t))); | ||
586 | t5_req->opt2 = cpu_to_be32(opt2); | ||
587 | } | ||
588 | |||
566 | set_bit(ACT_OPEN_REQ, &ep->com.history); | 589 | set_bit(ACT_OPEN_REQ, &ep->com.history); |
567 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 590 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
568 | } | 591 | } |
@@ -2869,12 +2892,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2869 | static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) | 2892 | static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) |
2870 | { | 2893 | { |
2871 | u32 l2info; | 2894 | u32 l2info; |
2872 | u16 vlantag, len, hdr_len; | 2895 | u16 vlantag, len, hdr_len, eth_hdr_len; |
2873 | u8 intf; | 2896 | u8 intf; |
2874 | struct cpl_rx_pkt *cpl = cplhdr(skb); | 2897 | struct cpl_rx_pkt *cpl = cplhdr(skb); |
2875 | struct cpl_pass_accept_req *req; | 2898 | struct cpl_pass_accept_req *req; |
2876 | struct tcp_options_received tmp_opt; | 2899 | struct tcp_options_received tmp_opt; |
2900 | struct c4iw_dev *dev; | ||
2877 | 2901 | ||
2902 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); | ||
2878 | /* Store values from cpl_rx_pkt in temporary location. */ | 2903 | /* Store values from cpl_rx_pkt in temporary location. */ |
2879 | vlantag = (__force u16) cpl->vlan; | 2904 | vlantag = (__force u16) cpl->vlan; |
2880 | len = (__force u16) cpl->len; | 2905 | len = (__force u16) cpl->len; |
@@ -2898,14 +2923,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) | |||
2898 | V_SYN_MAC_IDX(G_RX_MACIDX( | 2923 | V_SYN_MAC_IDX(G_RX_MACIDX( |
2899 | (__force int) htonl(l2info))) | | 2924 | (__force int) htonl(l2info))) | |
2900 | F_SYN_XACT_MATCH); | 2925 | F_SYN_XACT_MATCH); |
2926 | eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? | ||
2927 | G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : | ||
2928 | G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); | ||
2901 | req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( | 2929 | req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( |
2902 | (__force int) htonl(l2info))) | | 2930 | (__force int) htonl(l2info))) | |
2903 | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( | 2931 | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( |
2904 | (__force int) htons(hdr_len))) | | 2932 | (__force int) htons(hdr_len))) | |
2905 | V_IP_HDR_LEN(G_RX_IPHDR_LEN( | 2933 | V_IP_HDR_LEN(G_RX_IPHDR_LEN( |
2906 | (__force int) htons(hdr_len))) | | 2934 | (__force int) htons(hdr_len))) | |
2907 | V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( | 2935 | V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); |
2908 | (__force int) htonl(l2info)))); | ||
2909 | req->vlan = (__force __be16) vlantag; | 2936 | req->vlan = (__force __be16) vlantag; |
2910 | req->len = (__force __be16) len; | 2937 | req->len = (__force __be16) len; |
2911 | req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | | 2938 | req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | |
@@ -2993,7 +3020,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2993 | u16 window; | 3020 | u16 window; |
2994 | struct port_info *pi; | 3021 | struct port_info *pi; |
2995 | struct net_device *pdev; | 3022 | struct net_device *pdev; |
2996 | u16 rss_qid; | 3023 | u16 rss_qid, eth_hdr_len; |
2997 | int step; | 3024 | int step; |
2998 | u32 tx_chan; | 3025 | u32 tx_chan; |
2999 | struct neighbour *neigh; | 3026 | struct neighbour *neigh; |
@@ -3022,7 +3049,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3022 | goto reject; | 3049 | goto reject; |
3023 | } | 3050 | } |
3024 | 3051 | ||
3025 | if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { | 3052 | eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? |
3053 | G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : | ||
3054 | G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); | ||
3055 | if (eth_hdr_len == ETH_HLEN) { | ||
3026 | eh = (struct ethhdr *)(req + 1); | 3056 | eh = (struct ethhdr *)(req + 1); |
3027 | iph = (struct iphdr *)(eh + 1); | 3057 | iph = (struct iphdr *)(eh + 1); |
3028 | } else { | 3058 | } else { |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 80069ad595c1..3487c08828f7 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #define DRV_VERSION "0.1" | 41 | #define DRV_VERSION "0.1" |
42 | 42 | ||
43 | MODULE_AUTHOR("Steve Wise"); | 43 | MODULE_AUTHOR("Steve Wise"); |
44 | MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); | 44 | MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); |
45 | MODULE_LICENSE("Dual BSD/GPL"); | 45 | MODULE_LICENSE("Dual BSD/GPL"); |
46 | MODULE_VERSION(DRV_VERSION); | 46 | MODULE_VERSION(DRV_VERSION); |
47 | 47 | ||
@@ -614,7 +614,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop) | |||
614 | { | 614 | { |
615 | return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && | 615 | return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && |
616 | infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && | 616 | infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && |
617 | infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; | 617 | infop->vr->cq.size > 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | 620 | static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) |
@@ -627,6 +627,11 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
627 | pci_name(infop->pdev)); | 627 | pci_name(infop->pdev)); |
628 | return ERR_PTR(-ENOSYS); | 628 | return ERR_PTR(-ENOSYS); |
629 | } | 629 | } |
630 | if (!ocqp_supported(infop)) | ||
631 | pr_info("%s: On-Chip Queues not supported on this device.\n", | ||
632 | pci_name(infop->pdev)); | ||
633 | if (!is_t4(infop->adapter_type)) | ||
634 | db_fc_threshold = 100000; | ||
630 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); | 635 | devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); |
631 | if (!devp) { | 636 | if (!devp) { |
632 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); | 637 | printk(KERN_ERR MOD "Cannot allocate ib device\n"); |
@@ -678,8 +683,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) | |||
678 | int i; | 683 | int i; |
679 | 684 | ||
680 | if (!vers_printed++) | 685 | if (!vers_printed++) |
681 | printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", | 686 | pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", |
682 | DRV_VERSION); | 687 | DRV_VERSION); |
683 | 688 | ||
684 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); | 689 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); |
685 | if (!ctx) { | 690 | if (!ctx) { |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7eec5e13fa8c..34c7e62b8676 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -817,6 +817,15 @@ static inline int compute_wscale(int win) | |||
817 | return wscale; | 817 | return wscale; |
818 | } | 818 | } |
819 | 819 | ||
820 | static inline int ocqp_supported(const struct cxgb4_lld_info *infop) | ||
821 | { | ||
822 | #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) | ||
823 | return infop->vr->ocq.size > 0; | ||
824 | #else | ||
825 | return 0; | ||
826 | #endif | ||
827 | } | ||
828 | |||
820 | u32 c4iw_id_alloc(struct c4iw_id_table *alloc); | 829 | u32 c4iw_id_alloc(struct c4iw_id_table *alloc); |
821 | void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); | 830 | void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); |
822 | int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, | 831 | int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index e084fdc6da7f..7e94c9a656a1 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
162 | */ | 162 | */ |
163 | if (addr >= rdev->oc_mw_pa) | 163 | if (addr >= rdev->oc_mw_pa) |
164 | vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); | 164 | vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); |
165 | else | 165 | else { |
166 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 166 | if (is_t5(rdev->lldi.adapter_type)) |
167 | vma->vm_page_prot = | ||
168 | t4_pgprot_wc(vma->vm_page_prot); | ||
169 | else | ||
170 | vma->vm_page_prot = | ||
171 | pgprot_noncached(vma->vm_page_prot); | ||
172 | } | ||
167 | ret = io_remap_pfn_range(vma, vma->vm_start, | 173 | ret = io_remap_pfn_range(vma, vma->vm_start, |
168 | addr >> PAGE_SHIFT, | 174 | addr >> PAGE_SHIFT, |
169 | len, vma->vm_page_prot); | 175 | len, vma->vm_page_prot); |
@@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev, | |||
263 | dev = to_c4iw_dev(ibdev); | 269 | dev = to_c4iw_dev(ibdev); |
264 | memset(props, 0, sizeof *props); | 270 | memset(props, 0, sizeof *props); |
265 | memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); | 271 | memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); |
266 | props->hw_ver = dev->rdev.lldi.adapter_type; | 272 | props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); |
267 | props->fw_ver = dev->rdev.lldi.fw_vers; | 273 | props->fw_ver = dev->rdev.lldi.fw_vers; |
268 | props->device_cap_flags = dev->device_cap_flags; | 274 | props->device_cap_flags = dev->device_cap_flags; |
269 | props->page_size_cap = T4_PAGESIZE_MASK; | 275 | props->page_size_cap = T4_PAGESIZE_MASK; |
@@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr, | |||
346 | struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, | 352 | struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, |
347 | ibdev.dev); | 353 | ibdev.dev); |
348 | PDBG("%s dev 0x%p\n", __func__, dev); | 354 | PDBG("%s dev 0x%p\n", __func__, dev); |
349 | return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type); | 355 | return sprintf(buf, "%d\n", |
356 | CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type)); | ||
350 | } | 357 | } |
351 | 358 | ||
352 | static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, | 359 | static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 17ba4f8bc12d..c46024409c4e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -76,7 +76,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | |||
76 | 76 | ||
77 | static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | 77 | static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) |
78 | { | 78 | { |
79 | if (!ocqp_support || !t4_ocqp_supported()) | 79 | if (!ocqp_support || !ocqp_supported(&rdev->lldi)) |
80 | return -ENOSYS; | 80 | return -ENOSYS; |
81 | sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); | 81 | sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); |
82 | if (!sq->dma_addr) | 82 | if (!sq->dma_addr) |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 16f26ab29302..689edc96155d 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot) | |||
280 | #endif | 280 | #endif |
281 | } | 281 | } |
282 | 282 | ||
283 | static inline int t4_ocqp_supported(void) | ||
284 | { | ||
285 | #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) | ||
286 | return 1; | ||
287 | #else | ||
288 | return 0; | ||
289 | #endif | ||
290 | } | ||
291 | |||
292 | enum { | 283 | enum { |
293 | T4_SQ_ONCHIP = (1<<0), | 284 | T4_SQ_ONCHIP = (1<<0), |
294 | }; | 285 | }; |