aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/infiniband
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c882
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h745
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c811
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c518
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1577
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h550
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c23
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
51 files changed, 9820 insertions, 267 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 975adce5f40c..330d2a423362 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -46,6 +46,7 @@ source "drivers/infiniband/hw/ipath/Kconfig"
46source "drivers/infiniband/hw/ehca/Kconfig" 46source "drivers/infiniband/hw/ehca/Kconfig"
47source "drivers/infiniband/hw/amso1100/Kconfig" 47source "drivers/infiniband/hw/amso1100/Kconfig"
48source "drivers/infiniband/hw/cxgb3/Kconfig" 48source "drivers/infiniband/hw/cxgb3/Kconfig"
49source "drivers/infiniband/hw/cxgb4/Kconfig"
49source "drivers/infiniband/hw/mlx4/Kconfig" 50source "drivers/infiniband/hw/mlx4/Kconfig"
50source "drivers/infiniband/hw/nes/Kconfig" 51source "drivers/infiniband/hw/nes/Kconfig"
51 52
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index ed35e4496241..0c4e589d746e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ 4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ 5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ 6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
7obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
7obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ 8obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
8obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 9obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
9obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 10obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86d..b930b8110a63 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
79static DEFINE_IDR(tcp_ps); 79static DEFINE_IDR(tcp_ps);
80static DEFINE_IDR(udp_ps); 80static DEFINE_IDR(udp_ps);
81static DEFINE_IDR(ipoib_ps); 81static DEFINE_IDR(ipoib_ps);
82static int next_port;
83 82
84struct cma_device { 83struct cma_device {
85 struct list_head list; 84 struct list_head list;
@@ -1677,13 +1676,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1677 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1676 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1678 return -EINVAL; 1677 return -EINVAL;
1679 1678
1680 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1679 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
1680 GFP_KERNEL);
1681 if (!id->route.path_rec) { 1681 if (!id->route.path_rec) {
1682 ret = -ENOMEM; 1682 ret = -ENOMEM;
1683 goto err; 1683 goto err;
1684 } 1684 }
1685 1685
1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1687 id->route.num_paths = num_paths; 1686 id->route.num_paths = num_paths;
1688 return 0; 1687 return 0;
1689err: 1688err:
@@ -1970,47 +1969,33 @@ err1:
1970 1969
1971static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1970static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1972{ 1971{
1973 struct rdma_bind_list *bind_list; 1972 static unsigned int last_used_port;
1974 int port, ret, low, high; 1973 int low, high, remaining;
1975 1974 unsigned int rover;
1976 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1977 if (!bind_list)
1978 return -ENOMEM;
1979
1980retry:
1981 /* FIXME: add proper port randomization per like inet_csk_get_port */
1982 do {
1983 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1984 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1985
1986 if (ret)
1987 goto err1;
1988 1975
1989 inet_get_local_port_range(&low, &high); 1976 inet_get_local_port_range(&low, &high);
1990 if (port > high) { 1977 remaining = (high - low) + 1;
1991 if (next_port != low) { 1978 rover = net_random() % remaining + low;
1992 idr_remove(ps, port); 1979retry:
1993 next_port = low; 1980 if (last_used_port != rover &&
1994 goto retry; 1981 !idr_find(ps, (unsigned short) rover)) {
1995 } 1982 int ret = cma_alloc_port(ps, id_priv, rover);
1996 ret = -EADDRNOTAVAIL; 1983 /*
1997 goto err2; 1984 * Remember previously used port number in order to avoid
1985 * re-using same port immediately after it is closed.
1986 */
1987 if (!ret)
1988 last_used_port = rover;
1989 if (ret != -EADDRNOTAVAIL)
1990 return ret;
1998 } 1991 }
1999 1992 if (--remaining) {
2000 if (port == high) 1993 rover++;
2001 next_port = low; 1994 if ((rover < low) || (rover > high))
2002 else 1995 rover = low;
2003 next_port = port + 1; 1996 goto retry;
2004 1997 }
2005 bind_list->ps = ps; 1998 return -EADDRNOTAVAIL;
2006 bind_list->port = (unsigned short) port;
2007 cma_bind_port(bind_list, id_priv);
2008 return 0;
2009err2:
2010 idr_remove(ps, port);
2011err1:
2012 kfree(bind_list);
2013 return ret;
2014} 1999}
2015 2000
2016static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2001static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
2995 2980
2996static int __init cma_init(void) 2981static int __init cma_init(void)
2997{ 2982{
2998 int ret, low, high, remaining; 2983 int ret;
2999
3000 get_random_bytes(&next_port, sizeof next_port);
3001 inet_get_local_port_range(&low, &high);
3002 remaining = (high - low) + 1;
3003 next_port = ((unsigned int) next_port % remaining) + low;
3004 2984
3005 cma_wq = create_singlethread_workqueue("rdma_cm"); 2985 cma_wq = create_singlethread_workqueue("rdma_cm");
3006 if (!cma_wq) 2986 if (!cma_wq)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1df1194aeba4..6dc7b77d5d29 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
291 } 291 }
292 292
293 if (mad_reg_req) { 293 if (mad_reg_req) {
294 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); 294 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
295 if (!reg_req) { 295 if (!reg_req) {
296 ret = ERR_PTR(-ENOMEM); 296 ret = ERR_PTR(-ENOMEM);
297 goto error3; 297 goto error3;
298 } 298 }
299 /* Make a copy of the MAD registration request */
300 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
301 } 299 }
302 300
303 /* Now, fill in the various structures */ 301 /* Now, fill in the various structures */
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 512b1c43460c..46474842cfe9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1181,7 +1181,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
1181 file->filp = filp; 1181 file->filp = filp;
1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev); 1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
1183 1183
1184 return 0; 1184 return nonseekable_open(inode, filp);
1185} 1185}
1186 1186
1187static int ib_ucm_close(struct inode *inode, struct file *filp) 1187static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1229,6 +1229,7 @@ static const struct file_operations ucm_fops = {
1229 .release = ib_ucm_close, 1229 .release = ib_ucm_close,
1230 .write = ib_ucm_write, 1230 .write = ib_ucm_write,
1231 .poll = ib_ucm_poll, 1231 .poll = ib_ucm_poll,
1232 .llseek = no_llseek,
1232}; 1233};
1233 1234
1234static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1235static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 46185084121e..ac7edc24165c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
1220 1220
1221 filp->private_data = file; 1221 filp->private_data = file;
1222 file->filp = filp; 1222 file->filp = filp;
1223 return 0; 1223
1224 return nonseekable_open(inode, filp);
1224} 1225}
1225 1226
1226static int ucma_close(struct inode *inode, struct file *filp) 1227static int ucma_close(struct inode *inode, struct file *filp)
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
1250 .release = ucma_close, 1251 .release = ucma_close,
1251 .write = ucma_write, 1252 .write = ucma_write,
1252 .poll = ucma_poll, 1253 .poll = ucma_poll,
1254 .llseek = no_llseek,
1253}; 1255};
1254 1256
1255static struct miscdevice ucma_misc = { 1257static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e7db054fb1c8..6babb72b39fc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
781{ 781{
782 struct ib_umad_port *port; 782 struct ib_umad_port *port;
783 struct ib_umad_file *file; 783 struct ib_umad_file *file;
784 int ret = 0; 784 int ret;
785 785
786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev); 786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
787 if (port) 787 if (port)
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
814 814
815 list_add_tail(&file->port_list, &port->file_list); 815 list_add_tail(&file->port_list, &port->file_list);
816 816
817 ret = nonseekable_open(inode, filp);
818
817out: 819out:
818 mutex_unlock(&port->file_mutex); 820 mutex_unlock(&port->file_mutex);
819 return ret; 821 return ret;
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
866 .compat_ioctl = ib_umad_compat_ioctl, 868 .compat_ioctl = ib_umad_compat_ioctl,
867#endif 869#endif
868 .open = ib_umad_open, 870 .open = ib_umad_open,
869 .release = ib_umad_close 871 .release = ib_umad_close,
872 .llseek = no_llseek,
870}; 873};
871 874
872static int ib_umad_sm_open(struct inode *inode, struct file *filp) 875static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
903 906
904 filp->private_data = port; 907 filp->private_data = port;
905 908
906 return 0; 909 return nonseekable_open(inode, filp);
907 910
908fail: 911fail:
909 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 912 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
933static const struct file_operations umad_sm_fops = { 936static const struct file_operations umad_sm_fops = {
934 .owner = THIS_MODULE, 937 .owner = THIS_MODULE,
935 .open = ib_umad_sm_open, 938 .open = ib_umad_sm_open,
936 .release = ib_umad_sm_close 939 .release = ib_umad_sm_close,
940 .llseek = no_llseek,
937}; 941};
938 942
939static struct ib_client umad_client = { 943static struct ib_client umad_client = {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb3526254426..ec83e9fe387b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = {
369 .read = ib_uverbs_event_read, 369 .read = ib_uverbs_event_read,
370 .poll = ib_uverbs_event_poll, 370 .poll = ib_uverbs_event_poll,
371 .release = ib_uverbs_event_close, 371 .release = ib_uverbs_event_close,
372 .fasync = ib_uverbs_event_fasync 372 .fasync = ib_uverbs_event_fasync,
373 .llseek = no_llseek,
373}; 374};
374 375
375void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 376void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
623 624
624 filp->private_data = file; 625 filp->private_data = file;
625 626
626 return 0; 627 return nonseekable_open(inode, filp);
627 628
628err_module: 629err_module:
629 module_put(dev->ib_dev->owner); 630 module_put(dev->ib_dev->owner);
@@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = {
651 .owner = THIS_MODULE, 652 .owner = THIS_MODULE,
652 .write = ib_uverbs_write, 653 .write = ib_uverbs_write,
653 .open = ib_uverbs_open, 654 .open = ib_uverbs_open,
654 .release = ib_uverbs_close 655 .release = ib_uverbs_close,
656 .llseek = no_llseek,
655}; 657};
656 658
657static const struct file_operations uverbs_mmap_fops = { 659static const struct file_operations uverbs_mmap_fops = {
@@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = {
659 .write = ib_uverbs_write, 661 .write = ib_uverbs_write,
660 .mmap = ib_uverbs_mmap, 662 .mmap = ib_uverbs_mmap,
661 .open = ib_uverbs_open, 663 .open = ib_uverbs_open,
662 .release = ib_uverbs_close 664 .release = ib_uverbs_close,
665 .llseek = no_llseek,
663}; 666};
664 667
665static struct ib_client uverbs_client = { 668static struct ib_client uverbs_client = {
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f98361..6ae698e68775 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
250struct sp_chunk { 250struct sp_chunk {
251 struct sp_chunk *next; 251 struct sp_chunk *next;
252 dma_addr_t dma_addr; 252 dma_addr_t dma_addr;
253 DECLARE_PCI_UNMAP_ADDR(mapping); 253 DEFINE_DMA_UNMAP_ADDR(mapping);
254 u16 head; 254 u16 head;
255 u16 shared_ptr[0]; 255 u16 shared_ptr[0];
256}; 256};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e90..78d247ec6961 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
49 return -ENOMEM; 49 return -ENOMEM;
50 50
51 new_head->dma_addr = dma_addr; 51 new_head->dma_addr = dma_addr;
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 52 dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53 53
54 new_head->next = NULL; 54 new_head->next = NULL;
55 new_head->head = 0; 55 new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
81 while (root) { 81 while (root) {
82 next = root->next; 82 next = root->next;
83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, 83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 pci_unmap_addr(root, mapping)); 84 dma_unmap_addr(root, mapping));
85 root = next; 85 root = next;
86 } 86 }
87} 87}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f413..49e0e8533f74 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
258{ 258{
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, 259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260 mq->msg_pool.host, pci_unmap_addr(mq, mapping)); 260 mq->msg_pool.host, dma_unmap_addr(mq, mapping));
261} 261}
262 262
263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
278 NULL, /* peer (currently unknown) */ 278 NULL, /* peer (currently unknown) */
279 C2_MQ_HOST_TARGET); 279 C2_MQ_HOST_TARGET);
280 280
281 pci_unmap_addr_set(mq, mapping, mq->host_dma); 281 dma_unmap_addr_set(mq, mapping, mq->host_dma);
282 282
283 return 0; 283 return 0;
284} 284}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94a..fc1b9a7cec4b 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
71 u8 __iomem *adapter; 71 u8 __iomem *adapter;
72 } msg_pool; 72 } msg_pool;
73 dma_addr_t host_dma; 73 dma_addr_t host_dma;
74 DECLARE_PCI_UNMAP_ADDR(mapping); 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 u16 hint_count; 75 u16 hint_count;
76 u16 priv; 76 u16 priv;
77 struct c2_mq_shared __iomem *peer; 77 struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96a..bf189987711f 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
50 50
51struct c2_buf_list { 51struct c2_buf_list {
52 void *buf; 52 void *buf;
53 DECLARE_PCI_UNMAP_ADDR(mapping) 53 DEFINE_DMA_UNMAP_ADDR(mapping);
54}; 54};
55 55
56 56
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef60..85cfae4cad71 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
524 err = -ENOMEM; 524 err = -ENOMEM;
525 goto bail1; 525 goto bail1;
526 } 526 }
527 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 527 dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages, 528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
529 (unsigned long long) c2dev->rep_vq.host_dma); 529 (unsigned long long) c2dev->rep_vq.host_dma);
530 c2_mq_rep_init(&c2dev->rep_vq, 530 c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
545 err = -ENOMEM; 545 err = -ENOMEM;
546 goto bail2; 546 goto bail2;
547 } 547 }
548 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 548 dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages, 549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
550 (unsigned long long) c2dev->aeq.host_dma); 550 (unsigned long long) c2dev->aeq.host_dma);
551 c2_mq_rep_init(&c2dev->aeq, 551 c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
596 bail3: 596 bail3:
597 dma_free_coherent(&c2dev->pcidev->dev, 597 dma_free_coherent(&c2dev->pcidev->dev,
598 c2dev->aeq.q_size * c2dev->aeq.msg_size, 598 c2dev->aeq.q_size * c2dev->aeq.msg_size,
599 q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); 599 q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
600 bail2: 600 bail2:
601 dma_free_coherent(&c2dev->pcidev->dev, 601 dma_free_coherent(&c2dev->pcidev->dev,
602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
603 q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); 603 q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
604 bail1: 604 bail1:
605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
606 bail0: 606 bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
637 dma_free_coherent(&c2dev->pcidev->dev, 637 dma_free_coherent(&c2dev->pcidev->dev,
638 c2dev->aeq.q_size * c2dev->aeq.msg_size, 638 c2dev->aeq.q_size * c2dev->aeq.msg_size,
639 c2dev->aeq.msg_pool.host, 639 c2dev->aeq.msg_pool.host,
640 pci_unmap_addr(&c2dev->aeq, mapping)); 640 dma_unmap_addr(&c2dev->aeq, mapping));
641 641
642 /* Free the verbs reply queue */ 642 /* Free the verbs reply queue */
643 dma_free_coherent(&c2dev->pcidev->dev, 643 dma_free_coherent(&c2dev->pcidev->dev,
644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
645 c2dev->rep_vq.msg_pool.host, 645 c2dev->rep_vq.msg_pool.host,
646 pci_unmap_addr(&c2dev->rep_vq, mapping)); 646 dma_unmap_addr(&c2dev->rep_vq, mapping));
647 647
648 /* Free the MQ shared pointer pool */ 648 /* Free the MQ shared pointer pool */
649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1e..005b7b52bc1e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
174 kfree(cq->sw_queue); 174 kfree(cq->sw_queue);
175 return -ENOMEM; 175 return -ENOMEM;
176 } 176 }
177 pci_unmap_addr_set(cq, mapping, cq->dma_addr); 177 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
178 memset(cq->queue, 0, size); 178 memset(cq->queue, 0, size);
179 setup.id = cq->cqid; 179 setup.id = cq->cqid;
180 setup.base_addr = (u64) (cq->dma_addr); 180 setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
297 goto err4; 297 goto err4;
298 298
299 memset(wq->queue, 0, depth * sizeof(union t3_wr)); 299 memset(wq->queue, 0, depth * sizeof(union t3_wr));
300 pci_unmap_addr_set(wq, mapping, wq->dma_addr); 300 dma_unmap_addr_set(wq, mapping, wq->dma_addr);
301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
302 if (!kernel_domain) 302 if (!kernel_domain)
303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
326 (1UL << (cq->size_log2)) 326 (1UL << (cq->size_log2))
327 * sizeof(struct t3_cqe), cq->queue, 327 * sizeof(struct t3_cqe), cq->queue,
328 pci_unmap_addr(cq, mapping)); 328 dma_unmap_addr(cq, mapping));
329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); 329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
330 return err; 330 return err;
331} 331}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
337 (1UL << (wq->size_log2)) 337 (1UL << (wq->size_log2))
338 * sizeof(union t3_wr), wq->queue, 338 * sizeof(union t3_wr), wq->queue,
339 pci_unmap_addr(wq, mapping)); 339 dma_unmap_addr(wq, mapping));
340 kfree(wq->sq); 340 kfree(wq->sq);
341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); 341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
342 kfree(wq->rq); 342 kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
537 err = -ENOMEM; 537 err = -ENOMEM;
538 goto err; 538 goto err;
539 } 539 }
540 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, 540 dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
541 rdev_p->ctrl_qp.dma_addr); 541 rdev_p->ctrl_qp.dma_addr);
542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
543 memset(rdev_p->ctrl_qp.workq, 0, 543 memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
584 (1UL << T3_CTRL_QP_SIZE_LOG2) 584 (1UL << T3_CTRL_QP_SIZE_LOG2)
585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq, 585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
586 pci_unmap_addr(&rdev_p->ctrl_qp, mapping)); 586 dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID); 587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
588} 588}
589 589
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c560..8f0caf7d4482 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */ 71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
72 union t3_wr *workq; /* the work request queue */ 72 union t3_wr *workq; /* the work request queue */
73 dma_addr_t dma_addr; /* pci bus address of the workq */ 73 dma_addr_t dma_addr; /* pci bus address of the workq */
74 DECLARE_PCI_UNMAP_ADDR(mapping) 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 void __iomem *doorbell; 75 void __iomem *doorbell;
76}; 76};
77 77
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c5..e5ddb63e7d23 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
691struct t3_wq { 691struct t3_wq {
692 union t3_wr *queue; /* DMA accessable memory */ 692 union t3_wr *queue; /* DMA accessable memory */
693 dma_addr_t dma_addr; /* DMA address for HW */ 693 dma_addr_t dma_addr; /* DMA address for HW */
694 DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */ 694 DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
695 u32 error; /* 1 once we go to ERROR */ 695 u32 error; /* 1 once we go to ERROR */
696 u32 qpid; 696 u32 qpid;
697 u32 wptr; /* idx to next available WR slot */ 697 u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
718 u32 wptr; 718 u32 wptr;
719 u32 size_log2; 719 u32 size_log2;
720 dma_addr_t dma_addr; 720 dma_addr_t dma_addr;
721 DECLARE_PCI_UNMAP_ADDR(mapping) 721 DEFINE_DMA_UNMAP_ADDR(mapping);
722 struct t3_cqe *queue; 722 struct t3_cqe *queue;
723 struct t3_cqe *sw_queue; 723 struct t3_cqe *sw_queue;
724 u32 sw_rptr; 724 u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30f..8e77dc543dd1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
47MODULE_LICENSE("Dual BSD/GPL"); 47MODULE_LICENSE("Dual BSD/GPL");
48MODULE_VERSION(DRV_VERSION); 48MODULE_VERSION(DRV_VERSION);
49 49
50cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51
52static void open_rnic_dev(struct t3cdev *); 50static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 51static void close_rnic_dev(struct t3cdev *);
54static void iwch_event_handler(struct t3cdev *, u32, u32); 52static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef03296276..ebfb117ba68b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
102module_param(cong_flavor, uint, 0644); 102module_param(cong_flavor, uint, 0644);
103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); 103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104 104
105static void process_work(struct work_struct *work);
106static struct workqueue_struct *workq; 105static struct workqueue_struct *workq;
107static DECLARE_WORK(skb_work, process_work);
108 106
109static struct sk_buff_head rxq; 107static struct sk_buff_head rxq;
110static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111 108
112static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 109static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
113static void ep_timeout(unsigned long arg); 110static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
151 return -EIO; 148 return -EIO;
152 } 149 }
153 error = l2t_send(tdev, skb, l2e); 150 error = l2t_send(tdev, skb, l2e);
154 if (error) 151 if (error < 0)
155 kfree_skb(skb); 152 kfree_skb(skb);
156 return error; 153 return error;
157} 154}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
167 return -EIO; 164 return -EIO;
168 } 165 }
169 error = cxgb3_ofld_send(tdev, skb); 166 error = cxgb3_ofld_send(tdev, skb);
170 if (error) 167 if (error < 0)
171 kfree_skb(skb); 168 kfree_skb(skb);
172 return error; 169 return error;
173} 170}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
302 put_ep(&ep->com); 299 put_ep(&ep->com);
303} 300}
304 301
305static void process_work(struct work_struct *work)
306{
307 struct sk_buff *skb = NULL;
308 void *ep;
309 struct t3cdev *tdev;
310 int ret;
311
312 while ((skb = skb_dequeue(&rxq))) {
313 ep = *((void **) (skb->cb));
314 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
315 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
316 if (ret & CPL_RET_BUF_DONE)
317 kfree_skb(skb);
318
319 /*
320 * ep was referenced in sched(), and is freed here.
321 */
322 put_ep((struct iwch_ep_common *)ep);
323 }
324}
325
326static int status2errno(int status) 302static int status2errno(int status)
327{ 303{
328 switch (status) { 304 switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2157 2133
2158/* 2134/*
2159 * All the CM events are handled on a work queue to have a safe context. 2135 * All the CM events are handled on a work queue to have a safe context.
2136 * These are the real handlers that are called from the work queue.
2160 */ 2137 */
2138static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2139 [CPL_ACT_ESTABLISH] = act_establish,
2140 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2141 [CPL_RX_DATA] = rx_data,
2142 [CPL_TX_DMA_ACK] = tx_ack,
2143 [CPL_ABORT_RPL_RSS] = abort_rpl,
2144 [CPL_ABORT_RPL] = abort_rpl,
2145 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2146 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2147 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2148 [CPL_PASS_ESTABLISH] = pass_establish,
2149 [CPL_PEER_CLOSE] = peer_close,
2150 [CPL_ABORT_REQ_RSS] = peer_abort,
2151 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2152 [CPL_RDMA_TERMINATE] = terminate,
2153 [CPL_RDMA_EC_STATUS] = ec_status,
2154};
2155
2156static void process_work(struct work_struct *work)
2157{
2158 struct sk_buff *skb = NULL;
2159 void *ep;
2160 struct t3cdev *tdev;
2161 int ret;
2162
2163 while ((skb = skb_dequeue(&rxq))) {
2164 ep = *((void **) (skb->cb));
2165 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2166 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2167 if (ret & CPL_RET_BUF_DONE)
2168 kfree_skb(skb);
2169
2170 /*
2171 * ep was referenced in sched(), and is freed here.
2172 */
2173 put_ep((struct iwch_ep_common *)ep);
2174 }
2175}
2176
2177static DECLARE_WORK(skb_work, process_work);
2178
2161static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 2179static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2162{ 2180{
2163 struct iwch_ep_common *epc = ctx; 2181 struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2189 return CPL_RET_BUF_DONE; 2207 return CPL_RET_BUF_DONE;
2190} 2208}
2191 2209
2210/*
2211 * All upcalls from the T3 Core go to sched() to schedule the
2212 * processing on a work queue.
2213 */
2214cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2215 [CPL_ACT_ESTABLISH] = sched,
2216 [CPL_ACT_OPEN_RPL] = sched,
2217 [CPL_RX_DATA] = sched,
2218 [CPL_TX_DMA_ACK] = sched,
2219 [CPL_ABORT_RPL_RSS] = sched,
2220 [CPL_ABORT_RPL] = sched,
2221 [CPL_PASS_OPEN_RPL] = sched,
2222 [CPL_CLOSE_LISTSRV_RPL] = sched,
2223 [CPL_PASS_ACCEPT_REQ] = sched,
2224 [CPL_PASS_ESTABLISH] = sched,
2225 [CPL_PEER_CLOSE] = sched,
2226 [CPL_CLOSE_CON_RPL] = sched,
2227 [CPL_ABORT_REQ_RSS] = sched,
2228 [CPL_RDMA_TERMINATE] = sched,
2229 [CPL_RDMA_EC_STATUS] = sched,
2230 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2231};
2232
2192int __init iwch_cm_init(void) 2233int __init iwch_cm_init(void)
2193{ 2234{
2194 skb_queue_head_init(&rxq); 2235 skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
2197 if (!workq) 2238 if (!workq)
2198 return -ENOMEM; 2239 return -ENOMEM;
2199 2240
2200 /*
2201 * All upcalls from the T3 Core go to sched() to
2202 * schedule the processing on a work queue.
2203 */
2204 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2205 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2206 t3c_handlers[CPL_RX_DATA] = sched;
2207 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2208 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2209 t3c_handlers[CPL_ABORT_RPL] = sched;
2210 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2211 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2212 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2213 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2214 t3c_handlers[CPL_PEER_CLOSE] = sched;
2215 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2216 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2217 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2218 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2219 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2220
2221 /*
2222 * These are the real handlers that are called from a
2223 * work queue.
2224 */
2225 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2226 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2227 work_handlers[CPL_RX_DATA] = rx_data;
2228 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2229 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2230 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2231 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2232 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2233 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2234 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2235 work_handlers[CPL_PEER_CLOSE] = peer_close;
2236 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2237 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2238 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2239 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2240 return 0; 2241 return 0;
2241} 2242}
2242 2243
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 000000000000..ccb85eaaad75
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
1config INFINIBAND_CXGB4
2 tristate "Chelsio T4 RDMA Driver"
3 depends on CHELSIO_T4 && INET
4 select GENERIC_ALLOCATOR
5 ---help---
6 This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
7 10GbE adapters.
8
9 For general information about Chelsio and our products, visit
10 our website at <http://www.chelsio.com>.
11
12 For customer support, please visit our customer support page at
13 <http://www.chelsio.com/support.htm>.
14
15 Please send feedback to <linux-bugs@chelsio.com>.
16
17 To compile this driver as a module, choose M here: the module
18 will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 000000000000..e31a499f0172
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
1EXTRA_CFLAGS += -Idrivers/net/cxgb4
2
3obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
4
5iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 000000000000..30ce0a8eca09
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2374 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41
42#include <net/neighbour.h>
43#include <net/netevent.h>
44#include <net/route.h>
45
46#include "iw_cxgb4.h"
47
48static char *states[] = {
49 "idle",
50 "listen",
51 "connecting",
52 "mpa_wait_req",
53 "mpa_req_sent",
54 "mpa_req_rcvd",
55 "mpa_rep_sent",
56 "fpdu_mode",
57 "aborting",
58 "closing",
59 "moribund",
60 "dead",
61 NULL,
62};
63
64int c4iw_max_read_depth = 8;
65module_param(c4iw_max_read_depth, int, 0644);
66MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
67
68static int enable_tcp_timestamps;
69module_param(enable_tcp_timestamps, int, 0644);
70MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
71
72static int enable_tcp_sack;
73module_param(enable_tcp_sack, int, 0644);
74MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
75
76static int enable_tcp_window_scaling = 1;
77module_param(enable_tcp_window_scaling, int, 0644);
78MODULE_PARM_DESC(enable_tcp_window_scaling,
79 "Enable tcp window scaling (default=1)");
80
81int c4iw_debug;
82module_param(c4iw_debug, int, 0644);
83MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
84
85static int peer2peer;
86module_param(peer2peer, int, 0644);
87MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
88
89static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
90module_param(p2p_type, int, 0644);
91MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
92 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
93
94static int ep_timeout_secs = 60;
95module_param(ep_timeout_secs, int, 0644);
96MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
97 "in seconds (default=60)");
98
99static int mpa_rev = 1;
100module_param(mpa_rev, int, 0644);
101MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
102 "1 is spec compliant. (default=1)");
103
104static int markers_enabled;
105module_param(markers_enabled, int, 0644);
106MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
107
108static int crc_enabled = 1;
109module_param(crc_enabled, int, 0644);
110MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
111
112static int rcv_win = 256 * 1024;
113module_param(rcv_win, int, 0644);
114MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
115
116static int snd_win = 32 * 1024;
117module_param(snd_win, int, 0644);
118MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
119
120static struct workqueue_struct *workq;
121
122static struct sk_buff_head rxq;
123
124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
125static void ep_timeout(unsigned long arg);
126static void connect_reply_upcall(struct c4iw_ep *ep, int status);
127
128static LIST_HEAD(timeout_list);
129static spinlock_t timeout_lock;
130
131static void start_ep_timer(struct c4iw_ep *ep)
132{
133 PDBG("%s ep %p\n", __func__, ep);
134 if (timer_pending(&ep->timer)) {
135 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
136 del_timer_sync(&ep->timer);
137 } else
138 c4iw_get_ep(&ep->com);
139 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
140 ep->timer.data = (unsigned long)ep;
141 ep->timer.function = ep_timeout;
142 add_timer(&ep->timer);
143}
144
145static void stop_ep_timer(struct c4iw_ep *ep)
146{
147 PDBG("%s ep %p\n", __func__, ep);
148 if (!timer_pending(&ep->timer)) {
149 printk(KERN_ERR "%s timer stopped when its not running! "
150 "ep %p state %u\n", __func__, ep, ep->com.state);
151 WARN_ON(1);
152 return;
153 }
154 del_timer_sync(&ep->timer);
155 c4iw_put_ep(&ep->com);
156}
157
158static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
159 struct l2t_entry *l2e)
160{
161 int error = 0;
162
163 if (c4iw_fatal_error(rdev)) {
164 kfree_skb(skb);
165 PDBG("%s - device in error state - dropping\n", __func__);
166 return -EIO;
167 }
168 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
169 if (error < 0)
170 kfree_skb(skb);
171 return error;
172}
173
174int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
175{
176 int error = 0;
177
178 if (c4iw_fatal_error(rdev)) {
179 kfree_skb(skb);
180 PDBG("%s - device in error state - dropping\n", __func__);
181 return -EIO;
182 }
183 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
184 if (error < 0)
185 kfree_skb(skb);
186 return error;
187}
188
189static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
190{
191 struct cpl_tid_release *req;
192
193 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
194 if (!skb)
195 return;
196 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
197 INIT_TP_WR(req, hwtid);
198 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
199 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
200 c4iw_ofld_send(rdev, skb);
201 return;
202}
203
204static void set_emss(struct c4iw_ep *ep, u16 opt)
205{
206 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
207 ep->mss = ep->emss;
208 if (GET_TCPOPT_TSTAMP(opt))
209 ep->emss -= 12;
210 if (ep->emss < 128)
211 ep->emss = 128;
212 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
213 ep->mss, ep->emss);
214}
215
216static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
217{
218 unsigned long flags;
219 enum c4iw_ep_state state;
220
221 spin_lock_irqsave(&epc->lock, flags);
222 state = epc->state;
223 spin_unlock_irqrestore(&epc->lock, flags);
224 return state;
225}
226
227static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
228{
229 epc->state = new;
230}
231
232static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
233{
234 unsigned long flags;
235
236 spin_lock_irqsave(&epc->lock, flags);
237 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
238 __state_set(epc, new);
239 spin_unlock_irqrestore(&epc->lock, flags);
240 return;
241}
242
243static void *alloc_ep(int size, gfp_t gfp)
244{
245 struct c4iw_ep_common *epc;
246
247 epc = kzalloc(size, gfp);
248 if (epc) {
249 kref_init(&epc->kref);
250 spin_lock_init(&epc->lock);
251 init_waitqueue_head(&epc->waitq);
252 }
253 PDBG("%s alloc ep %p\n", __func__, epc);
254 return epc;
255}
256
257void _c4iw_free_ep(struct kref *kref)
258{
259 struct c4iw_ep *ep;
260
261 ep = container_of(kref, struct c4iw_ep, com.kref);
262 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
263 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
264 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
265 dst_release(ep->dst);
266 cxgb4_l2t_release(ep->l2t);
267 }
268 kfree(ep);
269}
270
271static void release_ep_resources(struct c4iw_ep *ep)
272{
273 set_bit(RELEASE_RESOURCES, &ep->com.flags);
274 c4iw_put_ep(&ep->com);
275}
276
277static int status2errno(int status)
278{
279 switch (status) {
280 case CPL_ERR_NONE:
281 return 0;
282 case CPL_ERR_CONN_RESET:
283 return -ECONNRESET;
284 case CPL_ERR_ARP_MISS:
285 return -EHOSTUNREACH;
286 case CPL_ERR_CONN_TIMEDOUT:
287 return -ETIMEDOUT;
288 case CPL_ERR_TCAM_FULL:
289 return -ENOMEM;
290 case CPL_ERR_CONN_EXIST:
291 return -EADDRINUSE;
292 default:
293 return -EIO;
294 }
295}
296
297/*
298 * Try and reuse skbs already allocated...
299 */
300static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
301{
302 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
303 skb_trim(skb, 0);
304 skb_get(skb);
305 skb_reset_transport_header(skb);
306 } else {
307 skb = alloc_skb(len, gfp);
308 }
309 return skb;
310}
311
312static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
313 __be32 peer_ip, __be16 local_port,
314 __be16 peer_port, u8 tos)
315{
316 struct rtable *rt;
317 struct flowi fl = {
318 .oif = 0,
319 .nl_u = {
320 .ip4_u = {
321 .daddr = peer_ip,
322 .saddr = local_ip,
323 .tos = tos}
324 },
325 .proto = IPPROTO_TCP,
326 .uli_u = {
327 .ports = {
328 .sport = local_port,
329 .dport = peer_port}
330 }
331 };
332
333 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
334 return NULL;
335 return rt;
336}
337
338static void arp_failure_discard(void *handle, struct sk_buff *skb)
339{
340 PDBG("%s c4iw_dev %p\n", __func__, handle);
341 kfree_skb(skb);
342}
343
344/*
345 * Handle an ARP failure for an active open.
346 */
347static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
348{
349 printk(KERN_ERR MOD "ARP failure duing connect\n");
350 kfree_skb(skb);
351}
352
353/*
354 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
355 * and send it along.
356 */
357static void abort_arp_failure(void *handle, struct sk_buff *skb)
358{
359 struct c4iw_rdev *rdev = handle;
360 struct cpl_abort_req *req = cplhdr(skb);
361
362 PDBG("%s rdev %p\n", __func__, rdev);
363 req->cmd = CPL_ABORT_NO_RST;
364 c4iw_ofld_send(rdev, skb);
365}
366
367static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
368{
369 unsigned int flowclen = 80;
370 struct fw_flowc_wr *flowc;
371 int i;
372
373 skb = get_skb(skb, flowclen, GFP_KERNEL);
374 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
375
376 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
377 FW_FLOWC_WR_NPARAMS(8));
378 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
379 16)) | FW_WR_FLOWID(ep->hwtid));
380
381 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
382 flowc->mnemval[0].val = cpu_to_be32(0);
383 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
384 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
385 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
386 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
387 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
388 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
389 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
390 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
391 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
392 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
393 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
394 flowc->mnemval[6].val = cpu_to_be32(snd_win);
395 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
396 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
397 /* Pad WR to 16 byte boundary */
398 flowc->mnemval[8].mnemonic = 0;
399 flowc->mnemval[8].val = 0;
400 for (i = 0; i < 9; i++) {
401 flowc->mnemval[i].r4[0] = 0;
402 flowc->mnemval[i].r4[1] = 0;
403 flowc->mnemval[i].r4[2] = 0;
404 }
405
406 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
407 c4iw_ofld_send(&ep->com.dev->rdev, skb);
408}
409
410static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
411{
412 struct cpl_close_con_req *req;
413 struct sk_buff *skb;
414 int wrlen = roundup(sizeof *req, 16);
415
416 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
417 skb = get_skb(NULL, wrlen, gfp);
418 if (!skb) {
419 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
420 return -ENOMEM;
421 }
422 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
423 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
424 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
425 memset(req, 0, wrlen);
426 INIT_TP_WR(req, ep->hwtid);
427 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
428 ep->hwtid));
429 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
430}
431
432static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
433{
434 struct cpl_abort_req *req;
435 int wrlen = roundup(sizeof *req, 16);
436
437 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
438 skb = get_skb(skb, wrlen, gfp);
439 if (!skb) {
440 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
441 __func__);
442 return -ENOMEM;
443 }
444 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
445 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
446 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
447 memset(req, 0, wrlen);
448 INIT_TP_WR(req, ep->hwtid);
449 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
450 req->cmd = CPL_ABORT_SEND_RST;
451 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
452}
453
454static int send_connect(struct c4iw_ep *ep)
455{
456 struct cpl_act_open_req *req;
457 struct sk_buff *skb;
458 u64 opt0;
459 u32 opt2;
460 unsigned int mtu_idx;
461 int wscale;
462 int wrlen = roundup(sizeof *req, 16);
463
464 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
465
466 skb = get_skb(NULL, wrlen, GFP_KERNEL);
467 if (!skb) {
468 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
469 __func__);
470 return -ENOMEM;
471 }
472 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
473
474 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
475 wscale = compute_wscale(rcv_win);
476 opt0 = KEEP_ALIVE(1) |
477 WND_SCALE(wscale) |
478 MSS_IDX(mtu_idx) |
479 L2T_IDX(ep->l2t->idx) |
480 TX_CHAN(ep->tx_chan) |
481 SMAC_SEL(ep->smac_idx) |
482 DSCP(ep->tos) |
483 RCV_BUFSIZ(rcv_win>>10);
484 opt2 = RX_CHANNEL(0) |
485 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
486 if (enable_tcp_timestamps)
487 opt2 |= TSTAMPS_EN(1);
488 if (enable_tcp_sack)
489 opt2 |= SACK_EN(1);
490 if (wscale && enable_tcp_window_scaling)
491 opt2 |= WND_SCALE_EN(1);
492 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
493
494 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
495 INIT_TP_WR(req, 0);
496 OPCODE_TID(req) = cpu_to_be32(
497 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
498 req->local_port = ep->com.local_addr.sin_port;
499 req->peer_port = ep->com.remote_addr.sin_port;
500 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
501 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
502 req->opt0 = cpu_to_be64(opt0);
503 req->params = 0;
504 req->opt2 = cpu_to_be32(opt2);
505 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
506}
507
508static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
509{
510 int mpalen, wrlen;
511 struct fw_ofld_tx_data_wr *req;
512 struct mpa_message *mpa;
513
514 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
515
516 BUG_ON(skb_cloned(skb));
517
518 mpalen = sizeof(*mpa) + ep->plen;
519 wrlen = roundup(mpalen + sizeof *req, 16);
520 skb = get_skb(skb, wrlen, GFP_KERNEL);
521 if (!skb) {
522 connect_reply_upcall(ep, -ENOMEM);
523 return;
524 }
525 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
526
527 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
528 memset(req, 0, wrlen);
529 req->op_to_immdlen = cpu_to_be32(
530 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
531 FW_WR_COMPL(1) |
532 FW_WR_IMMDLEN(mpalen));
533 req->flowid_len16 = cpu_to_be32(
534 FW_WR_FLOWID(ep->hwtid) |
535 FW_WR_LEN16(wrlen >> 4));
536 req->plen = cpu_to_be32(mpalen);
537 req->tunnel_to_proxy = cpu_to_be32(
538 FW_OFLD_TX_DATA_WR_FLUSH(1) |
539 FW_OFLD_TX_DATA_WR_SHOVE(1));
540
541 mpa = (struct mpa_message *)(req + 1);
542 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
543 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
544 (markers_enabled ? MPA_MARKERS : 0);
545 mpa->private_data_size = htons(ep->plen);
546 mpa->revision = mpa_rev;
547
548 if (ep->plen)
549 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
550
551 /*
552 * Reference the mpa skb. This ensures the data area
553 * will remain in memory until the hw acks the tx.
554 * Function fw4_ack() will deref it.
555 */
556 skb_get(skb);
557 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
558 BUG_ON(ep->mpa_skb);
559 ep->mpa_skb = skb;
560 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
561 start_ep_timer(ep);
562 state_set(&ep->com, MPA_REQ_SENT);
563 ep->mpa_attr.initiator = 1;
564 return;
565}
566
567static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
568{
569 int mpalen, wrlen;
570 struct fw_ofld_tx_data_wr *req;
571 struct mpa_message *mpa;
572 struct sk_buff *skb;
573
574 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
575
576 mpalen = sizeof(*mpa) + plen;
577 wrlen = roundup(mpalen + sizeof *req, 16);
578
579 skb = get_skb(NULL, wrlen, GFP_KERNEL);
580 if (!skb) {
581 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
582 return -ENOMEM;
583 }
584 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
585
586 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
587 memset(req, 0, wrlen);
588 req->op_to_immdlen = cpu_to_be32(
589 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
590 FW_WR_COMPL(1) |
591 FW_WR_IMMDLEN(mpalen));
592 req->flowid_len16 = cpu_to_be32(
593 FW_WR_FLOWID(ep->hwtid) |
594 FW_WR_LEN16(wrlen >> 4));
595 req->plen = cpu_to_be32(mpalen);
596 req->tunnel_to_proxy = cpu_to_be32(
597 FW_OFLD_TX_DATA_WR_FLUSH(1) |
598 FW_OFLD_TX_DATA_WR_SHOVE(1));
599
600 mpa = (struct mpa_message *)(req + 1);
601 memset(mpa, 0, sizeof(*mpa));
602 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
603 mpa->flags = MPA_REJECT;
604 mpa->revision = mpa_rev;
605 mpa->private_data_size = htons(plen);
606 if (plen)
607 memcpy(mpa->private_data, pdata, plen);
608
609 /*
610 * Reference the mpa skb again. This ensures the data area
611 * will remain in memory until the hw acks the tx.
612 * Function fw4_ack() will deref it.
613 */
614 skb_get(skb);
615 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
616 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
617 BUG_ON(ep->mpa_skb);
618 ep->mpa_skb = skb;
619 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
620}
621
622static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
623{
624 int mpalen, wrlen;
625 struct fw_ofld_tx_data_wr *req;
626 struct mpa_message *mpa;
627 struct sk_buff *skb;
628
629 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
630
631 mpalen = sizeof(*mpa) + plen;
632 wrlen = roundup(mpalen + sizeof *req, 16);
633
634 skb = get_skb(NULL, wrlen, GFP_KERNEL);
635 if (!skb) {
636 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
637 return -ENOMEM;
638 }
639 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
640
641 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
642 memset(req, 0, wrlen);
643 req->op_to_immdlen = cpu_to_be32(
644 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
645 FW_WR_COMPL(1) |
646 FW_WR_IMMDLEN(mpalen));
647 req->flowid_len16 = cpu_to_be32(
648 FW_WR_FLOWID(ep->hwtid) |
649 FW_WR_LEN16(wrlen >> 4));
650 req->plen = cpu_to_be32(mpalen);
651 req->tunnel_to_proxy = cpu_to_be32(
652 FW_OFLD_TX_DATA_WR_FLUSH(1) |
653 FW_OFLD_TX_DATA_WR_SHOVE(1));
654
655 mpa = (struct mpa_message *)(req + 1);
656 memset(mpa, 0, sizeof(*mpa));
657 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
658 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
659 (markers_enabled ? MPA_MARKERS : 0);
660 mpa->revision = mpa_rev;
661 mpa->private_data_size = htons(plen);
662 if (plen)
663 memcpy(mpa->private_data, pdata, plen);
664
665 /*
666 * Reference the mpa skb. This ensures the data area
667 * will remain in memory until the hw acks the tx.
668 * Function fw4_ack() will deref it.
669 */
670 skb_get(skb);
671 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
672 ep->mpa_skb = skb;
673 state_set(&ep->com, MPA_REP_SENT);
674 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
675}
676
677static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
678{
679 struct c4iw_ep *ep;
680 struct cpl_act_establish *req = cplhdr(skb);
681 unsigned int tid = GET_TID(req);
682 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
683 struct tid_info *t = dev->rdev.lldi.tids;
684
685 ep = lookup_atid(t, atid);
686
687 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
688 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
689
690 dst_confirm(ep->dst);
691
692 /* setup the hwtid for this connection */
693 ep->hwtid = tid;
694 cxgb4_insert_tid(t, ep, tid);
695
696 ep->snd_seq = be32_to_cpu(req->snd_isn);
697 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
698
699 set_emss(ep, ntohs(req->tcp_opt));
700
701 /* dealloc the atid */
702 cxgb4_free_atid(t, atid);
703
704 /* start MPA negotiation */
705 send_flowc(ep, NULL);
706 send_mpa_req(ep, skb);
707
708 return 0;
709}
710
711static void close_complete_upcall(struct c4iw_ep *ep)
712{
713 struct iw_cm_event event;
714
715 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
716 memset(&event, 0, sizeof(event));
717 event.event = IW_CM_EVENT_CLOSE;
718 if (ep->com.cm_id) {
719 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
720 ep, ep->com.cm_id, ep->hwtid);
721 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
722 ep->com.cm_id->rem_ref(ep->com.cm_id);
723 ep->com.cm_id = NULL;
724 ep->com.qp = NULL;
725 }
726}
727
728static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
729{
730 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
731 close_complete_upcall(ep);
732 state_set(&ep->com, ABORTING);
733 return send_abort(ep, skb, gfp);
734}
735
736static void peer_close_upcall(struct c4iw_ep *ep)
737{
738 struct iw_cm_event event;
739
740 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
741 memset(&event, 0, sizeof(event));
742 event.event = IW_CM_EVENT_DISCONNECT;
743 if (ep->com.cm_id) {
744 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
745 ep, ep->com.cm_id, ep->hwtid);
746 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
747 }
748}
749
750static void peer_abort_upcall(struct c4iw_ep *ep)
751{
752 struct iw_cm_event event;
753
754 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
755 memset(&event, 0, sizeof(event));
756 event.event = IW_CM_EVENT_CLOSE;
757 event.status = -ECONNRESET;
758 if (ep->com.cm_id) {
759 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
760 ep->com.cm_id, ep->hwtid);
761 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
762 ep->com.cm_id->rem_ref(ep->com.cm_id);
763 ep->com.cm_id = NULL;
764 ep->com.qp = NULL;
765 }
766}
767
768static void connect_reply_upcall(struct c4iw_ep *ep, int status)
769{
770 struct iw_cm_event event;
771
772 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
773 memset(&event, 0, sizeof(event));
774 event.event = IW_CM_EVENT_CONNECT_REPLY;
775 event.status = status;
776 event.local_addr = ep->com.local_addr;
777 event.remote_addr = ep->com.remote_addr;
778
779 if ((status == 0) || (status == -ECONNREFUSED)) {
780 event.private_data_len = ep->plen;
781 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
782 }
783 if (ep->com.cm_id) {
784 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
785 ep->hwtid, status);
786 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
787 }
788 if (status < 0) {
789 ep->com.cm_id->rem_ref(ep->com.cm_id);
790 ep->com.cm_id = NULL;
791 ep->com.qp = NULL;
792 }
793}
794
795static void connect_request_upcall(struct c4iw_ep *ep)
796{
797 struct iw_cm_event event;
798
799 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
800 memset(&event, 0, sizeof(event));
801 event.event = IW_CM_EVENT_CONNECT_REQUEST;
802 event.local_addr = ep->com.local_addr;
803 event.remote_addr = ep->com.remote_addr;
804 event.private_data_len = ep->plen;
805 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
806 event.provider_data = ep;
807 if (state_read(&ep->parent_ep->com) != DEAD) {
808 c4iw_get_ep(&ep->com);
809 ep->parent_ep->com.cm_id->event_handler(
810 ep->parent_ep->com.cm_id,
811 &event);
812 }
813 c4iw_put_ep(&ep->parent_ep->com);
814 ep->parent_ep = NULL;
815}
816
817static void established_upcall(struct c4iw_ep *ep)
818{
819 struct iw_cm_event event;
820
821 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
822 memset(&event, 0, sizeof(event));
823 event.event = IW_CM_EVENT_ESTABLISHED;
824 if (ep->com.cm_id) {
825 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
826 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
827 }
828}
829
830static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
831{
832 struct cpl_rx_data_ack *req;
833 struct sk_buff *skb;
834 int wrlen = roundup(sizeof *req, 16);
835
836 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
837 skb = get_skb(NULL, wrlen, GFP_KERNEL);
838 if (!skb) {
839 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
840 return 0;
841 }
842
843 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
844 memset(req, 0, wrlen);
845 INIT_TP_WR(req, ep->hwtid);
846 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
847 ep->hwtid));
848 req->credit_dack = cpu_to_be32(credits);
849 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
850 c4iw_ofld_send(&ep->com.dev->rdev, skb);
851 return credits;
852}
853
854static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
855{
856 struct mpa_message *mpa;
857 u16 plen;
858 struct c4iw_qp_attributes attrs;
859 enum c4iw_qp_attr_mask mask;
860 int err;
861
862 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
863
864 /*
865 * Stop mpa timer. If it expired, then the state has
866 * changed and we bail since ep_timeout already aborted
867 * the connection.
868 */
869 stop_ep_timer(ep);
870 if (state_read(&ep->com) != MPA_REQ_SENT)
871 return;
872
873 /*
874 * If we get more than the supported amount of private data
875 * then we must fail this connection.
876 */
877 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
878 err = -EINVAL;
879 goto err;
880 }
881
882 /*
883 * copy the new data into our accumulation buffer.
884 */
885 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
886 skb->len);
887 ep->mpa_pkt_len += skb->len;
888
889 /*
890 * if we don't even have the mpa message, then bail.
891 */
892 if (ep->mpa_pkt_len < sizeof(*mpa))
893 return;
894 mpa = (struct mpa_message *) ep->mpa_pkt;
895
896 /* Validate MPA header. */
897 if (mpa->revision != mpa_rev) {
898 err = -EPROTO;
899 goto err;
900 }
901 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
902 err = -EPROTO;
903 goto err;
904 }
905
906 plen = ntohs(mpa->private_data_size);
907
908 /*
909 * Fail if there's too much private data.
910 */
911 if (plen > MPA_MAX_PRIVATE_DATA) {
912 err = -EPROTO;
913 goto err;
914 }
915
916 /*
917 * If plen does not account for pkt size
918 */
919 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
920 err = -EPROTO;
921 goto err;
922 }
923
924 ep->plen = (u8) plen;
925
926 /*
927 * If we don't have all the pdata yet, then bail.
928 * We'll continue process when more data arrives.
929 */
930 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
931 return;
932
933 if (mpa->flags & MPA_REJECT) {
934 err = -ECONNREFUSED;
935 goto err;
936 }
937
938 /*
939 * If we get here we have accumulated the entire mpa
940 * start reply message including private data. And
941 * the MPA header is valid.
942 */
943 state_set(&ep->com, FPDU_MODE);
944 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
945 ep->mpa_attr.recv_marker_enabled = markers_enabled;
946 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
947 ep->mpa_attr.version = mpa_rev;
948 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
949 FW_RI_INIT_P2PTYPE_DISABLED;
950 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
951 "xmit_marker_enabled=%d, version=%d\n", __func__,
952 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
953 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
954
955 attrs.mpa_attr = ep->mpa_attr;
956 attrs.max_ird = ep->ird;
957 attrs.max_ord = ep->ord;
958 attrs.llp_stream_handle = ep;
959 attrs.next_state = C4IW_QP_STATE_RTS;
960
961 mask = C4IW_QP_ATTR_NEXT_STATE |
962 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
963 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
964
965 /* bind QP and TID with INIT_WR */
966 err = c4iw_modify_qp(ep->com.qp->rhp,
967 ep->com.qp, mask, &attrs, 1);
968 if (err)
969 goto err;
970 goto out;
971err:
972 abort_connection(ep, skb, GFP_KERNEL);
973out:
974 connect_reply_upcall(ep, err);
975 return;
976}
977
978static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
979{
980 struct mpa_message *mpa;
981 u16 plen;
982
983 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
984
985 if (state_read(&ep->com) != MPA_REQ_WAIT)
986 return;
987
988 /*
989 * If we get more than the supported amount of private data
990 * then we must fail this connection.
991 */
992 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
993 stop_ep_timer(ep);
994 abort_connection(ep, skb, GFP_KERNEL);
995 return;
996 }
997
998 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
999
1000 /*
1001 * Copy the new data into our accumulation buffer.
1002 */
1003 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1004 skb->len);
1005 ep->mpa_pkt_len += skb->len;
1006
1007 /*
1008 * If we don't even have the mpa message, then bail.
1009 * We'll continue process when more data arrives.
1010 */
1011 if (ep->mpa_pkt_len < sizeof(*mpa))
1012 return;
1013
1014 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1015 stop_ep_timer(ep);
1016 mpa = (struct mpa_message *) ep->mpa_pkt;
1017
1018 /*
1019 * Validate MPA Header.
1020 */
1021 if (mpa->revision != mpa_rev) {
1022 abort_connection(ep, skb, GFP_KERNEL);
1023 return;
1024 }
1025
1026 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1027 abort_connection(ep, skb, GFP_KERNEL);
1028 return;
1029 }
1030
1031 plen = ntohs(mpa->private_data_size);
1032
1033 /*
1034 * Fail if there's too much private data.
1035 */
1036 if (plen > MPA_MAX_PRIVATE_DATA) {
1037 abort_connection(ep, skb, GFP_KERNEL);
1038 return;
1039 }
1040
1041 /*
1042 * If plen does not account for pkt size
1043 */
1044 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1045 abort_connection(ep, skb, GFP_KERNEL);
1046 return;
1047 }
1048 ep->plen = (u8) plen;
1049
1050 /*
1051 * If we don't have all the pdata yet, then bail.
1052 */
1053 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1054 return;
1055
1056 /*
1057 * If we get here we have accumulated the entire mpa
1058 * start reply message including private data.
1059 */
1060 ep->mpa_attr.initiator = 0;
1061 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1062 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1063 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1064 ep->mpa_attr.version = mpa_rev;
1065 ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1066 FW_RI_INIT_P2PTYPE_DISABLED;
1067 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1068 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1069 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1070 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1071 ep->mpa_attr.p2p_type);
1072
1073 state_set(&ep->com, MPA_REQ_RCVD);
1074
1075 /* drive upcall */
1076 connect_request_upcall(ep);
1077 return;
1078}
1079
1080static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1081{
1082 struct c4iw_ep *ep;
1083 struct cpl_rx_data *hdr = cplhdr(skb);
1084 unsigned int dlen = ntohs(hdr->len);
1085 unsigned int tid = GET_TID(hdr);
1086 struct tid_info *t = dev->rdev.lldi.tids;
1087
1088 ep = lookup_tid(t, tid);
1089 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1090 skb_pull(skb, sizeof(*hdr));
1091 skb_trim(skb, dlen);
1092
1093 ep->rcv_seq += dlen;
1094 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1095
1096 /* update RX credits */
1097 update_rx_credits(ep, dlen);
1098
1099 switch (state_read(&ep->com)) {
1100 case MPA_REQ_SENT:
1101 process_mpa_reply(ep, skb);
1102 break;
1103 case MPA_REQ_WAIT:
1104 process_mpa_request(ep, skb);
1105 break;
1106 case MPA_REP_SENT:
1107 break;
1108 default:
1109 printk(KERN_ERR MOD "%s Unexpected streaming data."
1110 " ep %p state %d tid %u\n",
1111 __func__, ep, state_read(&ep->com), ep->hwtid);
1112
1113 /*
1114 * The ep will timeout and inform the ULP of the failure.
1115 * See ep_timeout().
1116 */
1117 break;
1118 }
1119 return 0;
1120}
1121
1122static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1123{
1124 struct c4iw_ep *ep;
1125 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1126 unsigned long flags;
1127 int release = 0;
1128 unsigned int tid = GET_TID(rpl);
1129 struct tid_info *t = dev->rdev.lldi.tids;
1130
1131 ep = lookup_tid(t, tid);
1132 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1133 BUG_ON(!ep);
1134 spin_lock_irqsave(&ep->com.lock, flags);
1135 switch (ep->com.state) {
1136 case ABORTING:
1137 __state_set(&ep->com, DEAD);
1138 release = 1;
1139 break;
1140 default:
1141 printk(KERN_ERR "%s ep %p state %d\n",
1142 __func__, ep, ep->com.state);
1143 break;
1144 }
1145 spin_unlock_irqrestore(&ep->com.lock, flags);
1146
1147 if (release)
1148 release_ep_resources(ep);
1149 return 0;
1150}
1151
1152/*
1153 * Return whether a failed active open has allocated a TID
1154 */
1155static inline int act_open_has_tid(int status)
1156{
1157 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1158 status != CPL_ERR_ARP_MISS;
1159}
1160
1161static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1162{
1163 struct c4iw_ep *ep;
1164 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1165 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1166 ntohl(rpl->atid_status)));
1167 struct tid_info *t = dev->rdev.lldi.tids;
1168 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1169
1170 ep = lookup_atid(t, atid);
1171
1172 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1173 status, status2errno(status));
1174
1175 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1176 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1177 atid);
1178 return 0;
1179 }
1180
1181 connect_reply_upcall(ep, status2errno(status));
1182 state_set(&ep->com, DEAD);
1183
1184 if (status && act_open_has_tid(status))
1185 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1186
1187 cxgb4_free_atid(t, atid);
1188 dst_release(ep->dst);
1189 cxgb4_l2t_release(ep->l2t);
1190 c4iw_put_ep(&ep->com);
1191
1192 return 0;
1193}
1194
1195static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1196{
1197 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1198 struct tid_info *t = dev->rdev.lldi.tids;
1199 unsigned int stid = GET_TID(rpl);
1200 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1201
1202 if (!ep) {
1203 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1204 return 0;
1205 }
1206 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1207 rpl->status, status2errno(rpl->status));
1208 ep->com.rpl_err = status2errno(rpl->status);
1209 ep->com.rpl_done = 1;
1210 wake_up(&ep->com.waitq);
1211
1212 return 0;
1213}
1214
1215static int listen_stop(struct c4iw_listen_ep *ep)
1216{
1217 struct sk_buff *skb;
1218 struct cpl_close_listsvr_req *req;
1219
1220 PDBG("%s ep %p\n", __func__, ep);
1221 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1222 if (!skb) {
1223 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1224 return -ENOMEM;
1225 }
1226 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1227 INIT_TP_WR(req, 0);
1228 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1229 ep->stid));
1230 req->reply_ctrl = cpu_to_be16(
1231 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1232 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1233 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1234}
1235
1236static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1237{
1238 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1239 struct tid_info *t = dev->rdev.lldi.tids;
1240 unsigned int stid = GET_TID(rpl);
1241 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1242
1243 PDBG("%s ep %p\n", __func__, ep);
1244 ep->com.rpl_err = status2errno(rpl->status);
1245 ep->com.rpl_done = 1;
1246 wake_up(&ep->com.waitq);
1247 return 0;
1248}
1249
1250static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1251 struct cpl_pass_accept_req *req)
1252{
1253 struct cpl_pass_accept_rpl *rpl;
1254 unsigned int mtu_idx;
1255 u64 opt0;
1256 u32 opt2;
1257 int wscale;
1258
1259 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1260 BUG_ON(skb_cloned(skb));
1261 skb_trim(skb, sizeof(*rpl));
1262 skb_get(skb);
1263 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1264 wscale = compute_wscale(rcv_win);
1265 opt0 = KEEP_ALIVE(1) |
1266 WND_SCALE(wscale) |
1267 MSS_IDX(mtu_idx) |
1268 L2T_IDX(ep->l2t->idx) |
1269 TX_CHAN(ep->tx_chan) |
1270 SMAC_SEL(ep->smac_idx) |
1271 DSCP(ep->tos) |
1272 RCV_BUFSIZ(rcv_win>>10);
1273 opt2 = RX_CHANNEL(0) |
1274 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1275
1276 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1277 opt2 |= TSTAMPS_EN(1);
1278 if (enable_tcp_sack && req->tcpopt.sack)
1279 opt2 |= SACK_EN(1);
1280 if (wscale && enable_tcp_window_scaling)
1281 opt2 |= WND_SCALE_EN(1);
1282
1283 rpl = cplhdr(skb);
1284 INIT_TP_WR(rpl, ep->hwtid);
1285 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1286 ep->hwtid));
1287 rpl->opt0 = cpu_to_be64(opt0);
1288 rpl->opt2 = cpu_to_be32(opt2);
1289 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
1290 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1291
1292 return;
1293}
1294
1295static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1296 struct sk_buff *skb)
1297{
1298 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1299 peer_ip);
1300 BUG_ON(skb_cloned(skb));
1301 skb_trim(skb, sizeof(struct cpl_tid_release));
1302 skb_get(skb);
1303 release_tid(&dev->rdev, hwtid, skb);
1304 return;
1305}
1306
1307static void get_4tuple(struct cpl_pass_accept_req *req,
1308 __be32 *local_ip, __be32 *peer_ip,
1309 __be16 *local_port, __be16 *peer_port)
1310{
1311 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1312 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1313 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1314 struct tcphdr *tcp = (struct tcphdr *)
1315 ((u8 *)(req + 1) + eth_len + ip_len);
1316
1317 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1318 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1319 ntohs(tcp->dest));
1320
1321 *peer_ip = ip->saddr;
1322 *local_ip = ip->daddr;
1323 *peer_port = tcp->source;
1324 *local_port = tcp->dest;
1325
1326 return;
1327}
1328
1329static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1330{
1331 struct c4iw_ep *child_ep, *parent_ep;
1332 struct cpl_pass_accept_req *req = cplhdr(skb);
1333 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1334 struct tid_info *t = dev->rdev.lldi.tids;
1335 unsigned int hwtid = GET_TID(req);
1336 struct dst_entry *dst;
1337 struct l2t_entry *l2t;
1338 struct rtable *rt;
1339 __be32 local_ip, peer_ip;
1340 __be16 local_port, peer_port;
1341 struct net_device *pdev;
1342 u32 tx_chan, smac_idx;
1343 u16 rss_qid;
1344 u32 mtu;
1345 int step;
1346 int txq_idx;
1347
1348 parent_ep = lookup_stid(t, stid);
1349 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1350
1351 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1352
1353 if (state_read(&parent_ep->com) != LISTEN) {
1354 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1355 __func__);
1356 goto reject;
1357 }
1358
1359 /* Find output route */
1360 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1361 GET_POPEN_TOS(ntohl(req->tos_stid)));
1362 if (!rt) {
1363 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1364 __func__);
1365 goto reject;
1366 }
1367 dst = &rt->u.dst;
1368 if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1369 pdev = ip_dev_find(&init_net, peer_ip);
1370 BUG_ON(!pdev);
1371 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1372 pdev, 0);
1373 mtu = pdev->mtu;
1374 tx_chan = cxgb4_port_chan(pdev);
1375 smac_idx = tx_chan << 1;
1376 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1377 txq_idx = cxgb4_port_idx(pdev) * step;
1378 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1379 rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1380 dev_put(pdev);
1381 } else {
1382 l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1383 dst->neighbour->dev, 0);
1384 mtu = dst_mtu(dst);
1385 tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1386 smac_idx = tx_chan << 1;
1387 step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1388 txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1389 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1390 rss_qid = dev->rdev.lldi.rxq_ids[
1391 cxgb4_port_idx(dst->neighbour->dev) * step];
1392 }
1393 if (!l2t) {
1394 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1395 __func__);
1396 dst_release(dst);
1397 goto reject;
1398 }
1399
1400 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1401 if (!child_ep) {
1402 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1403 __func__);
1404 cxgb4_l2t_release(l2t);
1405 dst_release(dst);
1406 goto reject;
1407 }
1408 state_set(&child_ep->com, CONNECTING);
1409 child_ep->com.dev = dev;
1410 child_ep->com.cm_id = NULL;
1411 child_ep->com.local_addr.sin_family = PF_INET;
1412 child_ep->com.local_addr.sin_port = local_port;
1413 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1414 child_ep->com.remote_addr.sin_family = PF_INET;
1415 child_ep->com.remote_addr.sin_port = peer_port;
1416 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1417 c4iw_get_ep(&parent_ep->com);
1418 child_ep->parent_ep = parent_ep;
1419 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1420 child_ep->l2t = l2t;
1421 child_ep->dst = dst;
1422 child_ep->hwtid = hwtid;
1423 child_ep->tx_chan = tx_chan;
1424 child_ep->smac_idx = smac_idx;
1425 child_ep->rss_qid = rss_qid;
1426 child_ep->mtu = mtu;
1427 child_ep->txq_idx = txq_idx;
1428
1429 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1430 tx_chan, smac_idx, rss_qid);
1431
1432 init_timer(&child_ep->timer);
1433 cxgb4_insert_tid(t, child_ep, hwtid);
1434 accept_cr(child_ep, peer_ip, skb, req);
1435 goto out;
1436reject:
1437 reject_cr(dev, hwtid, peer_ip, skb);
1438out:
1439 return 0;
1440}
1441
1442static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1443{
1444 struct c4iw_ep *ep;
1445 struct cpl_pass_establish *req = cplhdr(skb);
1446 struct tid_info *t = dev->rdev.lldi.tids;
1447 unsigned int tid = GET_TID(req);
1448
1449 ep = lookup_tid(t, tid);
1450 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1451 ep->snd_seq = be32_to_cpu(req->snd_isn);
1452 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1453
1454 set_emss(ep, ntohs(req->tcp_opt));
1455
1456 dst_confirm(ep->dst);
1457 state_set(&ep->com, MPA_REQ_WAIT);
1458 start_ep_timer(ep);
1459 send_flowc(ep, skb);
1460
1461 return 0;
1462}
1463
1464static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1465{
1466 struct cpl_peer_close *hdr = cplhdr(skb);
1467 struct c4iw_ep *ep;
1468 struct c4iw_qp_attributes attrs;
1469 unsigned long flags;
1470 int disconnect = 1;
1471 int release = 0;
1472 int closing = 0;
1473 struct tid_info *t = dev->rdev.lldi.tids;
1474 unsigned int tid = GET_TID(hdr);
1475 int start_timer = 0;
1476 int stop_timer = 0;
1477
1478 ep = lookup_tid(t, tid);
1479 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1480 dst_confirm(ep->dst);
1481
1482 spin_lock_irqsave(&ep->com.lock, flags);
1483 switch (ep->com.state) {
1484 case MPA_REQ_WAIT:
1485 __state_set(&ep->com, CLOSING);
1486 break;
1487 case MPA_REQ_SENT:
1488 __state_set(&ep->com, CLOSING);
1489 connect_reply_upcall(ep, -ECONNRESET);
1490 break;
1491 case MPA_REQ_RCVD:
1492
1493 /*
1494 * We're gonna mark this puppy DEAD, but keep
1495 * the reference on it until the ULP accepts or
1496 * rejects the CR. Also wake up anyone waiting
1497 * in rdma connection migration (see c4iw_accept_cr()).
1498 */
1499 __state_set(&ep->com, CLOSING);
1500 ep->com.rpl_done = 1;
1501 ep->com.rpl_err = -ECONNRESET;
1502 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1503 wake_up(&ep->com.waitq);
1504 break;
1505 case MPA_REP_SENT:
1506 __state_set(&ep->com, CLOSING);
1507 ep->com.rpl_done = 1;
1508 ep->com.rpl_err = -ECONNRESET;
1509 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1510 wake_up(&ep->com.waitq);
1511 break;
1512 case FPDU_MODE:
1513 start_timer = 1;
1514 __state_set(&ep->com, CLOSING);
1515 closing = 1;
1516 peer_close_upcall(ep);
1517 break;
1518 case ABORTING:
1519 disconnect = 0;
1520 break;
1521 case CLOSING:
1522 __state_set(&ep->com, MORIBUND);
1523 disconnect = 0;
1524 break;
1525 case MORIBUND:
1526 stop_timer = 1;
1527 if (ep->com.cm_id && ep->com.qp) {
1528 attrs.next_state = C4IW_QP_STATE_IDLE;
1529 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1530 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1531 }
1532 close_complete_upcall(ep);
1533 __state_set(&ep->com, DEAD);
1534 release = 1;
1535 disconnect = 0;
1536 break;
1537 case DEAD:
1538 disconnect = 0;
1539 break;
1540 default:
1541 BUG_ON(1);
1542 }
1543 spin_unlock_irqrestore(&ep->com.lock, flags);
1544 if (closing) {
1545 attrs.next_state = C4IW_QP_STATE_CLOSING;
1546 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1547 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1548 }
1549 if (start_timer)
1550 start_ep_timer(ep);
1551 if (stop_timer)
1552 stop_ep_timer(ep);
1553 if (disconnect)
1554 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1555 if (release)
1556 release_ep_resources(ep);
1557 return 0;
1558}
1559
1560/*
1561 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1562 */
1563static int is_neg_adv_abort(unsigned int status)
1564{
1565 return status == CPL_ERR_RTX_NEG_ADVICE ||
1566 status == CPL_ERR_PERSIST_NEG_ADVICE;
1567}
1568
1569static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1570{
1571 struct cpl_abort_req_rss *req = cplhdr(skb);
1572 struct c4iw_ep *ep;
1573 struct cpl_abort_rpl *rpl;
1574 struct sk_buff *rpl_skb;
1575 struct c4iw_qp_attributes attrs;
1576 int ret;
1577 int release = 0;
1578 unsigned long flags;
1579 struct tid_info *t = dev->rdev.lldi.tids;
1580 unsigned int tid = GET_TID(req);
1581 int stop_timer = 0;
1582
1583 ep = lookup_tid(t, tid);
1584 if (is_neg_adv_abort(req->status)) {
1585 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1586 ep->hwtid);
1587 return 0;
1588 }
1589 spin_lock_irqsave(&ep->com.lock, flags);
1590 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1591 ep->com.state);
1592 switch (ep->com.state) {
1593 case CONNECTING:
1594 break;
1595 case MPA_REQ_WAIT:
1596 stop_timer = 1;
1597 break;
1598 case MPA_REQ_SENT:
1599 stop_timer = 1;
1600 connect_reply_upcall(ep, -ECONNRESET);
1601 break;
1602 case MPA_REP_SENT:
1603 ep->com.rpl_done = 1;
1604 ep->com.rpl_err = -ECONNRESET;
1605 PDBG("waking up ep %p\n", ep);
1606 wake_up(&ep->com.waitq);
1607 break;
1608 case MPA_REQ_RCVD:
1609
1610 /*
1611 * We're gonna mark this puppy DEAD, but keep
1612 * the reference on it until the ULP accepts or
1613 * rejects the CR. Also wake up anyone waiting
1614 * in rdma connection migration (see c4iw_accept_cr()).
1615 */
1616 ep->com.rpl_done = 1;
1617 ep->com.rpl_err = -ECONNRESET;
1618 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1619 wake_up(&ep->com.waitq);
1620 break;
1621 case MORIBUND:
1622 case CLOSING:
1623 stop_timer = 1;
1624 /*FALLTHROUGH*/
1625 case FPDU_MODE:
1626 if (ep->com.cm_id && ep->com.qp) {
1627 attrs.next_state = C4IW_QP_STATE_ERROR;
1628 ret = c4iw_modify_qp(ep->com.qp->rhp,
1629 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1630 &attrs, 1);
1631 if (ret)
1632 printk(KERN_ERR MOD
1633 "%s - qp <- error failed!\n",
1634 __func__);
1635 }
1636 peer_abort_upcall(ep);
1637 break;
1638 case ABORTING:
1639 break;
1640 case DEAD:
1641 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1642 spin_unlock_irqrestore(&ep->com.lock, flags);
1643 return 0;
1644 default:
1645 BUG_ON(1);
1646 break;
1647 }
1648 dst_confirm(ep->dst);
1649 if (ep->com.state != ABORTING) {
1650 __state_set(&ep->com, DEAD);
1651 release = 1;
1652 }
1653 spin_unlock_irqrestore(&ep->com.lock, flags);
1654
1655 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1656 if (!rpl_skb) {
1657 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1658 __func__);
1659 release = 1;
1660 goto out;
1661 }
1662 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1663 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1664 INIT_TP_WR(rpl, ep->hwtid);
1665 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1666 rpl->cmd = CPL_ABORT_NO_RST;
1667 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1668out:
1669 if (stop_timer)
1670 stop_ep_timer(ep);
1671 if (release)
1672 release_ep_resources(ep);
1673 return 0;
1674}
1675
1676static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1677{
1678 struct c4iw_ep *ep;
1679 struct c4iw_qp_attributes attrs;
1680 struct cpl_close_con_rpl *rpl = cplhdr(skb);
1681 unsigned long flags;
1682 int release = 0;
1683 struct tid_info *t = dev->rdev.lldi.tids;
1684 unsigned int tid = GET_TID(rpl);
1685 int stop_timer = 0;
1686
1687 ep = lookup_tid(t, tid);
1688
1689 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1690 BUG_ON(!ep);
1691
1692 /* The cm_id may be null if we failed to connect */
1693 spin_lock_irqsave(&ep->com.lock, flags);
1694 switch (ep->com.state) {
1695 case CLOSING:
1696 __state_set(&ep->com, MORIBUND);
1697 break;
1698 case MORIBUND:
1699 stop_timer = 1;
1700 if ((ep->com.cm_id) && (ep->com.qp)) {
1701 attrs.next_state = C4IW_QP_STATE_IDLE;
1702 c4iw_modify_qp(ep->com.qp->rhp,
1703 ep->com.qp,
1704 C4IW_QP_ATTR_NEXT_STATE,
1705 &attrs, 1);
1706 }
1707 close_complete_upcall(ep);
1708 __state_set(&ep->com, DEAD);
1709 release = 1;
1710 break;
1711 case ABORTING:
1712 case DEAD:
1713 break;
1714 default:
1715 BUG_ON(1);
1716 break;
1717 }
1718 spin_unlock_irqrestore(&ep->com.lock, flags);
1719 if (stop_timer)
1720 stop_ep_timer(ep);
1721 if (release)
1722 release_ep_resources(ep);
1723 return 0;
1724}
1725
1726static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1727{
1728 struct c4iw_ep *ep;
1729 struct cpl_rdma_terminate *term = cplhdr(skb);
1730 struct tid_info *t = dev->rdev.lldi.tids;
1731 unsigned int tid = GET_TID(term);
1732
1733 ep = lookup_tid(t, tid);
1734
1735 if (state_read(&ep->com) != FPDU_MODE)
1736 return 0;
1737
1738 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1739 skb_pull(skb, sizeof *term);
1740 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1741 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1742 skb->len);
1743 ep->com.qp->attr.terminate_msg_len = skb->len;
1744 ep->com.qp->attr.is_terminate_local = 0;
1745 return 0;
1746}
1747
1748/*
1749 * Upcall from the adapter indicating data has been transmitted.
1750 * For us its just the single MPA request or reply. We can now free
1751 * the skb holding the mpa message.
1752 */
1753static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1754{
1755 struct c4iw_ep *ep;
1756 struct cpl_fw4_ack *hdr = cplhdr(skb);
1757 u8 credits = hdr->credits;
1758 unsigned int tid = GET_TID(hdr);
1759 struct tid_info *t = dev->rdev.lldi.tids;
1760
1761
1762 ep = lookup_tid(t, tid);
1763 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1764 if (credits == 0) {
1765 PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
1766 __func__, ep, ep->hwtid, state_read(&ep->com));
1767 return 0;
1768 }
1769
1770 dst_confirm(ep->dst);
1771 if (ep->mpa_skb) {
1772 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1773 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1774 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1775 kfree_skb(ep->mpa_skb);
1776 ep->mpa_skb = NULL;
1777 }
1778 return 0;
1779}
1780
1781int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1782{
1783 int err;
1784 struct c4iw_ep *ep = to_ep(cm_id);
1785 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1786
1787 if (state_read(&ep->com) == DEAD) {
1788 c4iw_put_ep(&ep->com);
1789 return -ECONNRESET;
1790 }
1791 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1792 if (mpa_rev == 0)
1793 abort_connection(ep, NULL, GFP_KERNEL);
1794 else {
1795 err = send_mpa_reject(ep, pdata, pdata_len);
1796 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1797 }
1798 c4iw_put_ep(&ep->com);
1799 return 0;
1800}
1801
1802int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1803{
1804 int err;
1805 struct c4iw_qp_attributes attrs;
1806 enum c4iw_qp_attr_mask mask;
1807 struct c4iw_ep *ep = to_ep(cm_id);
1808 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1809 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1810
1811 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1812 if (state_read(&ep->com) == DEAD) {
1813 err = -ECONNRESET;
1814 goto err;
1815 }
1816
1817 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1818 BUG_ON(!qp);
1819
1820 if ((conn_param->ord > c4iw_max_read_depth) ||
1821 (conn_param->ird > c4iw_max_read_depth)) {
1822 abort_connection(ep, NULL, GFP_KERNEL);
1823 err = -EINVAL;
1824 goto err;
1825 }
1826
1827 cm_id->add_ref(cm_id);
1828 ep->com.cm_id = cm_id;
1829 ep->com.qp = qp;
1830
1831 ep->ird = conn_param->ird;
1832 ep->ord = conn_param->ord;
1833
1834 if (peer2peer && ep->ird == 0)
1835 ep->ird = 1;
1836
1837 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1838
1839 /* bind QP to EP and move to RTS */
1840 attrs.mpa_attr = ep->mpa_attr;
1841 attrs.max_ird = ep->ird;
1842 attrs.max_ord = ep->ord;
1843 attrs.llp_stream_handle = ep;
1844 attrs.next_state = C4IW_QP_STATE_RTS;
1845
1846 /* bind QP and TID with INIT_WR */
1847 mask = C4IW_QP_ATTR_NEXT_STATE |
1848 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1849 C4IW_QP_ATTR_MPA_ATTR |
1850 C4IW_QP_ATTR_MAX_IRD |
1851 C4IW_QP_ATTR_MAX_ORD;
1852
1853 err = c4iw_modify_qp(ep->com.qp->rhp,
1854 ep->com.qp, mask, &attrs, 1);
1855 if (err)
1856 goto err1;
1857 err = send_mpa_reply(ep, conn_param->private_data,
1858 conn_param->private_data_len);
1859 if (err)
1860 goto err1;
1861
1862 state_set(&ep->com, FPDU_MODE);
1863 established_upcall(ep);
1864 c4iw_put_ep(&ep->com);
1865 return 0;
1866err1:
1867 ep->com.cm_id = NULL;
1868 ep->com.qp = NULL;
1869 cm_id->rem_ref(cm_id);
1870err:
1871 c4iw_put_ep(&ep->com);
1872 return err;
1873}
1874
1875int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1876{
1877 int err = 0;
1878 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
1879 struct c4iw_ep *ep;
1880 struct rtable *rt;
1881 struct net_device *pdev;
1882 int step;
1883
1884 if ((conn_param->ord > c4iw_max_read_depth) ||
1885 (conn_param->ird > c4iw_max_read_depth)) {
1886 err = -EINVAL;
1887 goto out;
1888 }
1889 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1890 if (!ep) {
1891 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1892 err = -ENOMEM;
1893 goto out;
1894 }
1895 init_timer(&ep->timer);
1896 ep->plen = conn_param->private_data_len;
1897 if (ep->plen)
1898 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1899 conn_param->private_data, ep->plen);
1900 ep->ird = conn_param->ird;
1901 ep->ord = conn_param->ord;
1902
1903 if (peer2peer && ep->ord == 0)
1904 ep->ord = 1;
1905
1906 cm_id->add_ref(cm_id);
1907 ep->com.dev = dev;
1908 ep->com.cm_id = cm_id;
1909 ep->com.qp = get_qhp(dev, conn_param->qpn);
1910 BUG_ON(!ep->com.qp);
1911 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1912 ep->com.qp, cm_id);
1913
1914 /*
1915 * Allocate an active TID to initiate a TCP connection.
1916 */
1917 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
1918 if (ep->atid == -1) {
1919 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1920 err = -ENOMEM;
1921 goto fail2;
1922 }
1923
1924 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
1925 ntohl(cm_id->local_addr.sin_addr.s_addr),
1926 ntohs(cm_id->local_addr.sin_port),
1927 ntohl(cm_id->remote_addr.sin_addr.s_addr),
1928 ntohs(cm_id->remote_addr.sin_port));
1929
1930 /* find a route */
1931 rt = find_route(dev,
1932 cm_id->local_addr.sin_addr.s_addr,
1933 cm_id->remote_addr.sin_addr.s_addr,
1934 cm_id->local_addr.sin_port,
1935 cm_id->remote_addr.sin_port, 0);
1936 if (!rt) {
1937 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1938 err = -EHOSTUNREACH;
1939 goto fail3;
1940 }
1941 ep->dst = &rt->u.dst;
1942
1943 /* get a l2t entry */
1944 if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
1945 PDBG("%s LOOPBACK\n", __func__);
1946 pdev = ip_dev_find(&init_net,
1947 cm_id->remote_addr.sin_addr.s_addr);
1948 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1949 ep->dst->neighbour,
1950 pdev, 0);
1951 ep->mtu = pdev->mtu;
1952 ep->tx_chan = cxgb4_port_chan(pdev);
1953 ep->smac_idx = ep->tx_chan << 1;
1954 step = ep->com.dev->rdev.lldi.ntxq /
1955 ep->com.dev->rdev.lldi.nchan;
1956 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1957 step = ep->com.dev->rdev.lldi.nrxq /
1958 ep->com.dev->rdev.lldi.nchan;
1959 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1960 cxgb4_port_idx(pdev) * step];
1961 dev_put(pdev);
1962 } else {
1963 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1964 ep->dst->neighbour,
1965 ep->dst->neighbour->dev, 0);
1966 ep->mtu = dst_mtu(ep->dst);
1967 ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
1968 ep->smac_idx = ep->tx_chan << 1;
1969 step = ep->com.dev->rdev.lldi.ntxq /
1970 ep->com.dev->rdev.lldi.nchan;
1971 ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
1972 step = ep->com.dev->rdev.lldi.nrxq /
1973 ep->com.dev->rdev.lldi.nchan;
1974 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1975 cxgb4_port_idx(ep->dst->neighbour->dev) * step];
1976 }
1977 if (!ep->l2t) {
1978 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1979 err = -ENOMEM;
1980 goto fail4;
1981 }
1982
1983 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1984 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1985 ep->l2t->idx);
1986
1987 state_set(&ep->com, CONNECTING);
1988 ep->tos = 0;
1989 ep->com.local_addr = cm_id->local_addr;
1990 ep->com.remote_addr = cm_id->remote_addr;
1991
1992 /* send connect request to rnic */
1993 err = send_connect(ep);
1994 if (!err)
1995 goto out;
1996
1997 cxgb4_l2t_release(ep->l2t);
1998fail4:
1999 dst_release(ep->dst);
2000fail3:
2001 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2002fail2:
2003 cm_id->rem_ref(cm_id);
2004 c4iw_put_ep(&ep->com);
2005out:
2006 return err;
2007}
2008
2009int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2010{
2011 int err = 0;
2012 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2013 struct c4iw_listen_ep *ep;
2014
2015
2016 might_sleep();
2017
2018 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2019 if (!ep) {
2020 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2021 err = -ENOMEM;
2022 goto fail1;
2023 }
2024 PDBG("%s ep %p\n", __func__, ep);
2025 cm_id->add_ref(cm_id);
2026 ep->com.cm_id = cm_id;
2027 ep->com.dev = dev;
2028 ep->backlog = backlog;
2029 ep->com.local_addr = cm_id->local_addr;
2030
2031 /*
2032 * Allocate a server TID.
2033 */
2034 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2035 if (ep->stid == -1) {
2036 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2037 err = -ENOMEM;
2038 goto fail2;
2039 }
2040
2041 state_set(&ep->com, LISTEN);
2042 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2043 ep->com.local_addr.sin_addr.s_addr,
2044 ep->com.local_addr.sin_port,
2045 ep->com.dev->rdev.lldi.rxq_ids[0]);
2046 if (err)
2047 goto fail3;
2048
2049 /* wait for pass_open_rpl */
2050 wait_event(ep->com.waitq, ep->com.rpl_done);
2051 err = ep->com.rpl_err;
2052 if (!err) {
2053 cm_id->provider_data = ep;
2054 goto out;
2055 }
2056fail3:
2057 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2058fail2:
2059 cm_id->rem_ref(cm_id);
2060 c4iw_put_ep(&ep->com);
2061fail1:
2062out:
2063 return err;
2064}
2065
2066int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2067{
2068 int err;
2069 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2070
2071 PDBG("%s ep %p\n", __func__, ep);
2072
2073 might_sleep();
2074 state_set(&ep->com, DEAD);
2075 ep->com.rpl_done = 0;
2076 ep->com.rpl_err = 0;
2077 err = listen_stop(ep);
2078 if (err)
2079 goto done;
2080 wait_event(ep->com.waitq, ep->com.rpl_done);
2081 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2082done:
2083 err = ep->com.rpl_err;
2084 cm_id->rem_ref(cm_id);
2085 c4iw_put_ep(&ep->com);
2086 return err;
2087}
2088
2089int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2090{
2091 int ret = 0;
2092 unsigned long flags;
2093 int close = 0;
2094 int fatal = 0;
2095 struct c4iw_rdev *rdev;
2096 int start_timer = 0;
2097 int stop_timer = 0;
2098
2099 spin_lock_irqsave(&ep->com.lock, flags);
2100
2101 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2102 states[ep->com.state], abrupt);
2103
2104 rdev = &ep->com.dev->rdev;
2105 if (c4iw_fatal_error(rdev)) {
2106 fatal = 1;
2107 close_complete_upcall(ep);
2108 ep->com.state = DEAD;
2109 }
2110 switch (ep->com.state) {
2111 case MPA_REQ_WAIT:
2112 case MPA_REQ_SENT:
2113 case MPA_REQ_RCVD:
2114 case MPA_REP_SENT:
2115 case FPDU_MODE:
2116 close = 1;
2117 if (abrupt)
2118 ep->com.state = ABORTING;
2119 else {
2120 ep->com.state = CLOSING;
2121 start_timer = 1;
2122 }
2123 set_bit(CLOSE_SENT, &ep->com.flags);
2124 break;
2125 case CLOSING:
2126 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2127 close = 1;
2128 if (abrupt) {
2129 stop_timer = 1;
2130 ep->com.state = ABORTING;
2131 } else
2132 ep->com.state = MORIBUND;
2133 }
2134 break;
2135 case MORIBUND:
2136 case ABORTING:
2137 case DEAD:
2138 PDBG("%s ignoring disconnect ep %p state %u\n",
2139 __func__, ep, ep->com.state);
2140 break;
2141 default:
2142 BUG();
2143 break;
2144 }
2145
2146 spin_unlock_irqrestore(&ep->com.lock, flags);
2147 if (start_timer)
2148 start_ep_timer(ep);
2149 if (stop_timer)
2150 stop_ep_timer(ep);
2151 if (close) {
2152 if (abrupt)
2153 ret = abort_connection(ep, NULL, gfp);
2154 else
2155 ret = send_halfclose(ep, gfp);
2156 if (ret)
2157 fatal = 1;
2158 }
2159 if (fatal)
2160 release_ep_resources(ep);
2161 return ret;
2162}
2163
2164/*
2165 * These are the real handlers that are called from a
2166 * work queue.
2167 */
2168static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2169 [CPL_ACT_ESTABLISH] = act_establish,
2170 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2171 [CPL_RX_DATA] = rx_data,
2172 [CPL_ABORT_RPL_RSS] = abort_rpl,
2173 [CPL_ABORT_RPL] = abort_rpl,
2174 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2175 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2176 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2177 [CPL_PASS_ESTABLISH] = pass_establish,
2178 [CPL_PEER_CLOSE] = peer_close,
2179 [CPL_ABORT_REQ_RSS] = peer_abort,
2180 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2181 [CPL_RDMA_TERMINATE] = terminate,
2182 [CPL_FW4_ACK] = fw4_ack
2183};
2184
2185static void process_timeout(struct c4iw_ep *ep)
2186{
2187 struct c4iw_qp_attributes attrs;
2188 int abort = 1;
2189
2190 spin_lock_irq(&ep->com.lock);
2191 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2192 ep->com.state);
2193 switch (ep->com.state) {
2194 case MPA_REQ_SENT:
2195 __state_set(&ep->com, ABORTING);
2196 connect_reply_upcall(ep, -ETIMEDOUT);
2197 break;
2198 case MPA_REQ_WAIT:
2199 __state_set(&ep->com, ABORTING);
2200 break;
2201 case CLOSING:
2202 case MORIBUND:
2203 if (ep->com.cm_id && ep->com.qp) {
2204 attrs.next_state = C4IW_QP_STATE_ERROR;
2205 c4iw_modify_qp(ep->com.qp->rhp,
2206 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2207 &attrs, 1);
2208 }
2209 __state_set(&ep->com, ABORTING);
2210 break;
2211 default:
2212 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2213 __func__, ep, ep->hwtid, ep->com.state);
2214 WARN_ON(1);
2215 abort = 0;
2216 }
2217 spin_unlock_irq(&ep->com.lock);
2218 if (abort)
2219 abort_connection(ep, NULL, GFP_KERNEL);
2220 c4iw_put_ep(&ep->com);
2221}
2222
2223static void process_timedout_eps(void)
2224{
2225 struct c4iw_ep *ep;
2226
2227 spin_lock_irq(&timeout_lock);
2228 while (!list_empty(&timeout_list)) {
2229 struct list_head *tmp;
2230
2231 tmp = timeout_list.next;
2232 list_del(tmp);
2233 spin_unlock_irq(&timeout_lock);
2234 ep = list_entry(tmp, struct c4iw_ep, entry);
2235 process_timeout(ep);
2236 spin_lock_irq(&timeout_lock);
2237 }
2238 spin_unlock_irq(&timeout_lock);
2239}
2240
2241static void process_work(struct work_struct *work)
2242{
2243 struct sk_buff *skb = NULL;
2244 struct c4iw_dev *dev;
2245 struct cpl_act_establish *rpl = cplhdr(skb);
2246 unsigned int opcode;
2247 int ret;
2248
2249 while ((skb = skb_dequeue(&rxq))) {
2250 rpl = cplhdr(skb);
2251 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2252 opcode = rpl->ot.opcode;
2253
2254 BUG_ON(!work_handlers[opcode]);
2255 ret = work_handlers[opcode](dev, skb);
2256 if (!ret)
2257 kfree_skb(skb);
2258 }
2259 process_timedout_eps();
2260}
2261
2262static DECLARE_WORK(skb_work, process_work);
2263
2264static void ep_timeout(unsigned long arg)
2265{
2266 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2267
2268 spin_lock(&timeout_lock);
2269 list_add_tail(&ep->entry, &timeout_list);
2270 spin_unlock(&timeout_lock);
2271 queue_work(workq, &skb_work);
2272}
2273
2274/*
2275 * All the CM events are handled on a work queue to have a safe context.
2276 */
2277static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
2278{
2279
2280 /*
2281 * Save dev in the skb->cb area.
2282 */
2283 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
2284
2285 /*
2286 * Queue the skb and schedule the worker thread.
2287 */
2288 skb_queue_tail(&rxq, skb);
2289 queue_work(workq, &skb_work);
2290 return 0;
2291}
2292
2293static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2294{
2295 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2296
2297 if (rpl->status != CPL_ERR_NONE) {
2298 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2299 "for tid %u\n", rpl->status, GET_TID(rpl));
2300 }
2301 return 0;
2302}
2303
2304static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2305{
2306 struct cpl_fw6_msg *rpl = cplhdr(skb);
2307 struct c4iw_wr_wait *wr_waitp;
2308 int ret;
2309
2310 PDBG("%s type %u\n", __func__, rpl->type);
2311
2312 switch (rpl->type) {
2313 case 1:
2314 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2315 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
2316 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2317 if (wr_waitp) {
2318 wr_waitp->ret = ret;
2319 wr_waitp->done = 1;
2320 wake_up(&wr_waitp->wait);
2321 }
2322 break;
2323 case 2:
2324 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2325 break;
2326 default:
2327 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2328 rpl->type);
2329 break;
2330 }
2331 return 0;
2332}
2333
2334/*
2335 * Most upcalls from the T4 Core go to sched() to
2336 * schedule the processing on a work queue.
2337 */
2338c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2339 [CPL_ACT_ESTABLISH] = sched,
2340 [CPL_ACT_OPEN_RPL] = sched,
2341 [CPL_RX_DATA] = sched,
2342 [CPL_ABORT_RPL_RSS] = sched,
2343 [CPL_ABORT_RPL] = sched,
2344 [CPL_PASS_OPEN_RPL] = sched,
2345 [CPL_CLOSE_LISTSRV_RPL] = sched,
2346 [CPL_PASS_ACCEPT_REQ] = sched,
2347 [CPL_PASS_ESTABLISH] = sched,
2348 [CPL_PEER_CLOSE] = sched,
2349 [CPL_CLOSE_CON_RPL] = sched,
2350 [CPL_ABORT_REQ_RSS] = sched,
2351 [CPL_RDMA_TERMINATE] = sched,
2352 [CPL_FW4_ACK] = sched,
2353 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2354 [CPL_FW6_MSG] = fw6_msg
2355};
2356
2357int __init c4iw_cm_init(void)
2358{
2359 spin_lock_init(&timeout_lock);
2360 skb_queue_head_init(&rxq);
2361
2362 workq = create_singlethread_workqueue("iw_cxgb4");
2363 if (!workq)
2364 return -ENOMEM;
2365
2366 return 0;
2367}
2368
2369void __exit c4iw_cm_term(void)
2370{
2371 WARN_ON(!list_empty(&timeout_list));
2372 flush_workqueue(workq);
2373 destroy_workqueue(workq);
2374}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
new file mode 100644
index 000000000000..fb1aafcc294f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -0,0 +1,882 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
37{
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
42 struct sk_buff *skb;
43 int ret;
44
45 wr_len = sizeof *res_wr + sizeof *res;
46 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
47 if (!skb)
48 return -ENOMEM;
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) |
56 FW_WR_COMPL(1));
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (u64)&wr_wait;
59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
63
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
66 if (!ret) {
67 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
68 if (!wr_wait.done) {
69 printk(KERN_ERR MOD "Device %s not responding!\n",
70 pci_name(rdev->lldi.pdev));
71 rdev->flags = T4_FATAL_ERROR;
72 ret = -EIO;
73 } else
74 ret = wr_wait.ret;
75 }
76
77 kfree(cq->sw_queue);
78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx);
82 return ret;
83}
84
85static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
86 struct c4iw_dev_ucontext *uctx)
87{
88 struct fw_ri_res_wr *res_wr;
89 struct fw_ri_res *res;
90 int wr_len;
91 int user = (uctx != &rdev->uctx);
92 struct c4iw_wr_wait wr_wait;
93 int ret;
94 struct sk_buff *skb;
95
96 cq->cqid = c4iw_get_cqid(rdev, uctx);
97 if (!cq->cqid) {
98 ret = -ENOMEM;
99 goto err1;
100 }
101
102 if (!user) {
103 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
104 if (!cq->sw_queue) {
105 ret = -ENOMEM;
106 goto err2;
107 }
108 }
109 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
110 &cq->dma_addr, GFP_KERNEL);
111 if (!cq->queue) {
112 ret = -ENOMEM;
113 goto err3;
114 }
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize);
117
118 /* build fw_ri_res_wr */
119 wr_len = sizeof *res_wr + sizeof *res;
120
121 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
122 if (!skb) {
123 ret = -ENOMEM;
124 goto err4;
125 }
126 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
127
128 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
129 memset(res_wr, 0, wr_len);
130 res_wr->op_nres = cpu_to_be32(
131 FW_WR_OP(FW_RI_RES_WR) |
132 V_FW_RI_RES_WR_NRES(1) |
133 FW_WR_COMPL(1));
134 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
135 res_wr->cookie = (u64)&wr_wait;
136 res = res_wr->res;
137 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
138 res->u.cq.op = FW_RI_RES_OP_WRITE;
139 res->u.cq.iqid = cpu_to_be32(cq->cqid);
140 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
141 V_FW_RI_RES_WR_IQANUS(0) |
142 V_FW_RI_RES_WR_IQANUD(1) |
143 F_FW_RI_RES_WR_IQANDST |
144 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
145 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
146 F_FW_RI_RES_WR_IQDROPRSS |
147 V_FW_RI_RES_WR_IQPCIECH(2) |
148 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
149 F_FW_RI_RES_WR_IQO |
150 V_FW_RI_RES_WR_IQESIZE(1));
151 res->u.cq.iqsize = cpu_to_be16(cq->size);
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
153
154 c4iw_init_wr_wait(&wr_wait);
155
156 ret = c4iw_ofld_send(rdev, skb);
157 if (ret)
158 goto err4;
159 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
160 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
161 if (!wr_wait.done) {
162 printk(KERN_ERR MOD "Device %s not responding!\n",
163 pci_name(rdev->lldi.pdev));
164 rdev->flags = T4_FATAL_ERROR;
165 ret = -EIO;
166 } else
167 ret = wr_wait.ret;
168 if (ret)
169 goto err4;
170
171 cq->gen = 1;
172 cq->gts = rdev->lldi.gts_reg;
173 cq->rdev = rdev;
174 if (user) {
175 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
176 (cq->cqid << rdev->cqshift);
177 cq->ugts &= PAGE_MASK;
178 }
179 return 0;
180err4:
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping));
183err3:
184 kfree(cq->sw_queue);
185err2:
186 c4iw_put_cqid(rdev, cq->cqid, uctx);
187err1:
188 return ret;
189}
190
191static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
192{
193 struct t4_cqe cqe;
194
195 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
196 wq, cq, cq->sw_cidx, cq->sw_pidx);
197 memset(&cqe, 0, sizeof(cqe));
198 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
199 V_CQE_OPCODE(FW_RI_SEND) |
200 V_CQE_TYPE(0) |
201 V_CQE_SWCQE(1) |
202 V_CQE_QPID(wq->rq.qid));
203 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
204 cq->sw_queue[cq->sw_pidx] = cqe;
205 t4_swcq_produce(cq);
206}
207
208int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
209{
210 int flushed = 0;
211 int in_use = wq->rq.in_use - count;
212
213 BUG_ON(in_use < 0);
214 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
215 wq, cq, wq->rq.in_use, count);
216 while (in_use--) {
217 insert_recv_cqe(wq, cq);
218 flushed++;
219 }
220 return flushed;
221}
222
223static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
224 struct t4_swsqe *swcqe)
225{
226 struct t4_cqe cqe;
227
228 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
229 wq, cq, cq->sw_cidx, cq->sw_pidx);
230 memset(&cqe, 0, sizeof(cqe));
231 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
232 V_CQE_OPCODE(swcqe->opcode) |
233 V_CQE_TYPE(1) |
234 V_CQE_SWCQE(1) |
235 V_CQE_QPID(wq->sq.qid));
236 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
237 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
238 cq->sw_queue[cq->sw_pidx] = cqe;
239 t4_swcq_produce(cq);
240}
241
242int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
243{
244 int flushed = 0;
245 struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
246 int in_use = wq->sq.in_use - count;
247
248 BUG_ON(in_use < 0);
249 while (in_use--) {
250 swsqe->signaled = 0;
251 insert_sq_cqe(wq, cq, swsqe);
252 swsqe++;
253 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
254 swsqe = wq->sq.sw_sq;
255 flushed++;
256 }
257 return flushed;
258}
259
260/*
261 * Move all CQEs from the HWCQ into the SWCQ.
262 */
263void c4iw_flush_hw_cq(struct t4_cq *cq)
264{
265 struct t4_cqe *cqe = NULL, *swcqe;
266 int ret;
267
268 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
269 ret = t4_next_hw_cqe(cq, &cqe);
270 while (!ret) {
271 PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
272 __func__, cq->cidx, cq->sw_pidx);
273 swcqe = &cq->sw_queue[cq->sw_pidx];
274 *swcqe = *cqe;
275 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
276 t4_swcq_produce(cq);
277 t4_hwcq_consume(cq);
278 ret = t4_next_hw_cqe(cq, &cqe);
279 }
280}
281
282static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
283{
284 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
285 return 0;
286
287 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
288 return 0;
289
290 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
291 return 0;
292
293 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
294 return 0;
295 return 1;
296}
297
298void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
299{
300 struct t4_cqe *cqe;
301 u32 ptr;
302
303 *count = 0;
304 ptr = cq->sw_cidx;
305 while (ptr != cq->sw_pidx) {
306 cqe = &cq->sw_queue[ptr];
307 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
308 wq->sq.oldest_read)) &&
309 (CQE_QPID(cqe) == wq->sq.qid))
310 (*count)++;
311 if (++ptr == cq->size)
312 ptr = 0;
313 }
314 PDBG("%s cq %p count %d\n", __func__, cq, *count);
315}
316
317void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
318{
319 struct t4_cqe *cqe;
320 u32 ptr;
321
322 *count = 0;
323 PDBG("%s count zero %d\n", __func__, *count);
324 ptr = cq->sw_cidx;
325 while (ptr != cq->sw_pidx) {
326 cqe = &cq->sw_queue[ptr];
327 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
328 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
329 (*count)++;
330 if (++ptr == cq->size)
331 ptr = 0;
332 }
333 PDBG("%s cq %p count %d\n", __func__, cq, *count);
334}
335
336static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
337{
338 struct t4_swsqe *swsqe;
339 u16 ptr = wq->sq.cidx;
340 int count = wq->sq.in_use;
341 int unsignaled = 0;
342
343 swsqe = &wq->sq.sw_sq[ptr];
344 while (count--)
345 if (!swsqe->signaled) {
346 if (++ptr == wq->sq.size)
347 ptr = 0;
348 swsqe = &wq->sq.sw_sq[ptr];
349 unsignaled++;
350 } else if (swsqe->complete) {
351
352 /*
353 * Insert this completed cqe into the swcq.
354 */
355 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
356 __func__, ptr, cq->sw_pidx);
357 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
358 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
359 t4_swcq_produce(cq);
360 swsqe->signaled = 0;
361 wq->sq.in_use -= unsignaled;
362 break;
363 } else
364 break;
365}
366
367static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
368 struct t4_cqe *read_cqe)
369{
370 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
371 read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
372 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
373 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
374 V_CQE_OPCODE(FW_RI_READ_REQ) |
375 V_CQE_TYPE(1));
376}
377
378/*
379 * Return a ptr to the next read wr in the SWSQ or NULL.
380 */
381static void advance_oldest_read(struct t4_wq *wq)
382{
383
384 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
385
386 if (rptr == wq->sq.size)
387 rptr = 0;
388 while (rptr != wq->sq.pidx) {
389 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
390
391 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
392 return;
393 if (++rptr == wq->sq.size)
394 rptr = 0;
395 }
396 wq->sq.oldest_read = NULL;
397}
398
399/*
400 * poll_cq
401 *
402 * Caller must:
403 * check the validity of the first CQE,
404 * supply the wq assicated with the qpid.
405 *
406 * credit: cq credit to return to sge.
407 * cqe_flushed: 1 iff the CQE is flushed.
408 * cqe: copy of the polled CQE.
409 *
410 * return value:
411 * 0 CQE returned ok.
412 * -EAGAIN CQE skipped, try again.
413 * -EOVERFLOW CQ overflow detected.
414 */
415static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
416 u8 *cqe_flushed, u64 *cookie, u32 *credit)
417{
418 int ret = 0;
419 struct t4_cqe *hw_cqe, read_cqe;
420
421 *cqe_flushed = 0;
422 *credit = 0;
423 ret = t4_next_cqe(cq, &hw_cqe);
424 if (ret)
425 return ret;
426
427 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
428 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
429 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
430 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
431 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
432 CQE_WRID_LOW(hw_cqe));
433
434 /*
435 * skip cqe's not affiliated with a QP.
436 */
437 if (wq == NULL) {
438 ret = -EAGAIN;
439 goto skip_cqe;
440 }
441
442 /*
443 * Gotta tweak READ completions:
444 * 1) the cqe doesn't contain the sq_wptr from the wr.
445 * 2) opcode not reflected from the wr.
446 * 3) read_len not reflected from the wr.
447 * 4) cq_type is RQ_TYPE not SQ_TYPE.
448 */
449 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
450
451 /*
452 * If this is an unsolicited read response, then the read
453 * was generated by the kernel driver as part of peer-2-peer
454 * connection setup. So ignore the completion.
455 */
456 if (!wq->sq.oldest_read) {
457 if (CQE_STATUS(hw_cqe))
458 t4_set_wq_in_error(wq);
459 ret = -EAGAIN;
460 goto skip_cqe;
461 }
462
463 /*
464 * Don't write to the HWCQ, so create a new read req CQE
465 * in local memory.
466 */
467 create_read_req_cqe(wq, hw_cqe, &read_cqe);
468 hw_cqe = &read_cqe;
469 advance_oldest_read(wq);
470 }
471
472 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
473 *cqe_flushed = t4_wq_in_error(wq);
474 t4_set_wq_in_error(wq);
475 goto proc_cqe;
476 }
477
478 /*
479 * RECV completion.
480 */
481 if (RQ_TYPE(hw_cqe)) {
482
483 /*
484 * HW only validates 4 bits of MSN. So we must validate that
485 * the MSN in the SEND is the next expected MSN. If its not,
486 * then we complete this with T4_ERR_MSN and mark the wq in
487 * error.
488 */
489
490 if (t4_rq_empty(wq)) {
491 t4_set_wq_in_error(wq);
492 ret = -EAGAIN;
493 goto skip_cqe;
494 }
495 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
496 t4_set_wq_in_error(wq);
497 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
498 goto proc_cqe;
499 }
500 goto proc_cqe;
501 }
502
503 /*
504 * If we get here its a send completion.
505 *
506 * Handle out of order completion. These get stuffed
507 * in the SW SQ. Then the SW SQ is walked to move any
508 * now in-order completions into the SW CQ. This handles
509 * 2 cases:
510 * 1) reaping unsignaled WRs when the first subsequent
511 * signaled WR is completed.
512 * 2) out of order read completions.
513 */
514 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
515 struct t4_swsqe *swsqe;
516
517 PDBG("%s out of order completion going in sw_sq at idx %u\n",
518 __func__, CQE_WRID_SQ_IDX(hw_cqe));
519 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
520 swsqe->cqe = *hw_cqe;
521 swsqe->complete = 1;
522 ret = -EAGAIN;
523 goto flush_wq;
524 }
525
526proc_cqe:
527 *cqe = *hw_cqe;
528
529 /*
530 * Reap the associated WR(s) that are freed up with this
531 * completion.
532 */
533 if (SQ_TYPE(hw_cqe)) {
534 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
535 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
536 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
537 t4_sq_consume(wq);
538 } else {
539 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
540 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
541 BUG_ON(t4_rq_empty(wq));
542 t4_rq_consume(wq);
543 }
544
545flush_wq:
546 /*
547 * Flush any completed cqes that are now in-order.
548 */
549 flush_completed_wrs(wq, cq);
550
551skip_cqe:
552 if (SW_CQE(hw_cqe)) {
553 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
554 __func__, cq, cq->cqid, cq->sw_cidx);
555 t4_swcq_consume(cq);
556 } else {
557 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
558 __func__, cq, cq->cqid, cq->cidx);
559 t4_hwcq_consume(cq);
560 }
561 return ret;
562}
563
564/*
565 * Get one cq entry from c4iw and map it to openib.
566 *
567 * Returns:
568 * 0 cqe returned
569 * -ENODATA EMPTY;
570 * -EAGAIN caller must try again
571 * any other -errno fatal error
572 */
573static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
574{
575 struct c4iw_qp *qhp = NULL;
576 struct t4_cqe cqe = {0, 0}, *rd_cqe;
577 struct t4_wq *wq;
578 u32 credit = 0;
579 u8 cqe_flushed;
580 u64 cookie = 0;
581 int ret;
582
583 ret = t4_next_cqe(&chp->cq, &rd_cqe);
584
585 if (ret)
586 return ret;
587
588 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
589 if (!qhp)
590 wq = NULL;
591 else {
592 spin_lock(&qhp->lock);
593 wq = &(qhp->wq);
594 }
595 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
596 if (ret)
597 goto out;
598
599 wc->wr_id = cookie;
600 wc->qp = &qhp->ibqp;
601 wc->vendor_err = CQE_STATUS(&cqe);
602 wc->wc_flags = 0;
603
604 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
605 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
606 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
607 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
608
609 if (CQE_TYPE(&cqe) == 0) {
610 if (!CQE_STATUS(&cqe))
611 wc->byte_len = CQE_LEN(&cqe);
612 else
613 wc->byte_len = 0;
614 wc->opcode = IB_WC_RECV;
615 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
616 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
617 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
618 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
619 }
620 } else {
621 switch (CQE_OPCODE(&cqe)) {
622 case FW_RI_RDMA_WRITE:
623 wc->opcode = IB_WC_RDMA_WRITE;
624 break;
625 case FW_RI_READ_REQ:
626 wc->opcode = IB_WC_RDMA_READ;
627 wc->byte_len = CQE_LEN(&cqe);
628 break;
629 case FW_RI_SEND_WITH_INV:
630 case FW_RI_SEND_WITH_SE_INV:
631 wc->opcode = IB_WC_SEND;
632 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
633 break;
634 case FW_RI_SEND:
635 case FW_RI_SEND_WITH_SE:
636 wc->opcode = IB_WC_SEND;
637 break;
638 case FW_RI_BIND_MW:
639 wc->opcode = IB_WC_BIND_MW;
640 break;
641
642 case FW_RI_LOCAL_INV:
643 wc->opcode = IB_WC_LOCAL_INV;
644 break;
645 case FW_RI_FAST_REGISTER:
646 wc->opcode = IB_WC_FAST_REG_MR;
647 break;
648 default:
649 printk(KERN_ERR MOD "Unexpected opcode %d "
650 "in the CQE received for QPID=0x%0x\n",
651 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
652 ret = -EINVAL;
653 goto out;
654 }
655 }
656
657 if (cqe_flushed)
658 wc->status = IB_WC_WR_FLUSH_ERR;
659 else {
660
661 switch (CQE_STATUS(&cqe)) {
662 case T4_ERR_SUCCESS:
663 wc->status = IB_WC_SUCCESS;
664 break;
665 case T4_ERR_STAG:
666 wc->status = IB_WC_LOC_ACCESS_ERR;
667 break;
668 case T4_ERR_PDID:
669 wc->status = IB_WC_LOC_PROT_ERR;
670 break;
671 case T4_ERR_QPID:
672 case T4_ERR_ACCESS:
673 wc->status = IB_WC_LOC_ACCESS_ERR;
674 break;
675 case T4_ERR_WRAP:
676 wc->status = IB_WC_GENERAL_ERR;
677 break;
678 case T4_ERR_BOUND:
679 wc->status = IB_WC_LOC_LEN_ERR;
680 break;
681 case T4_ERR_INVALIDATE_SHARED_MR:
682 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
683 wc->status = IB_WC_MW_BIND_ERR;
684 break;
685 case T4_ERR_CRC:
686 case T4_ERR_MARKER:
687 case T4_ERR_PDU_LEN_ERR:
688 case T4_ERR_OUT_OF_RQE:
689 case T4_ERR_DDP_VERSION:
690 case T4_ERR_RDMA_VERSION:
691 case T4_ERR_DDP_QUEUE_NUM:
692 case T4_ERR_MSN:
693 case T4_ERR_TBIT:
694 case T4_ERR_MO:
695 case T4_ERR_MSN_RANGE:
696 case T4_ERR_IRD_OVERFLOW:
697 case T4_ERR_OPCODE:
698 wc->status = IB_WC_FATAL_ERR;
699 break;
700 case T4_ERR_SWFLUSH:
701 wc->status = IB_WC_WR_FLUSH_ERR;
702 break;
703 default:
704 printk(KERN_ERR MOD
705 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
706 CQE_STATUS(&cqe), CQE_QPID(&cqe));
707 ret = -EINVAL;
708 }
709 }
710out:
711 if (wq)
712 spin_unlock(&qhp->lock);
713 return ret;
714}
715
716int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
717{
718 struct c4iw_cq *chp;
719 unsigned long flags;
720 int npolled;
721 int err = 0;
722
723 chp = to_c4iw_cq(ibcq);
724
725 spin_lock_irqsave(&chp->lock, flags);
726 for (npolled = 0; npolled < num_entries; ++npolled) {
727 do {
728 err = c4iw_poll_cq_one(chp, wc + npolled);
729 } while (err == -EAGAIN);
730 if (err)
731 break;
732 }
733 spin_unlock_irqrestore(&chp->lock, flags);
734 return !err || err == -ENODATA ? npolled : err;
735}
736
737int c4iw_destroy_cq(struct ib_cq *ib_cq)
738{
739 struct c4iw_cq *chp;
740 struct c4iw_ucontext *ucontext;
741
742 PDBG("%s ib_cq %p\n", __func__, ib_cq);
743 chp = to_c4iw_cq(ib_cq);
744
745 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
746 atomic_dec(&chp->refcnt);
747 wait_event(chp->wait, !atomic_read(&chp->refcnt));
748
749 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
750 : NULL;
751 destroy_cq(&chp->rhp->rdev, &chp->cq,
752 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
753 kfree(chp);
754 return 0;
755}
756
757struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
758 int vector, struct ib_ucontext *ib_context,
759 struct ib_udata *udata)
760{
761 struct c4iw_dev *rhp;
762 struct c4iw_cq *chp;
763 struct c4iw_create_cq_resp uresp;
764 struct c4iw_ucontext *ucontext = NULL;
765 int ret;
766 size_t memsize;
767 struct c4iw_mm_entry *mm, *mm2;
768
769 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
770
771 rhp = to_c4iw_dev(ibdev);
772
773 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
774 if (!chp)
775 return ERR_PTR(-ENOMEM);
776
777 if (ib_context)
778 ucontext = to_c4iw_ucontext(ib_context);
779
780 /* account for the status page. */
781 entries++;
782
783 /*
784 * entries must be multiple of 16 for HW.
785 */
786 entries = roundup(entries, 16);
787 memsize = entries * sizeof *chp->cq.queue;
788
789 /*
790 * memsize must be a multiple of the page size if its a user cq.
791 */
792 if (ucontext)
793 memsize = roundup(memsize, PAGE_SIZE);
794 chp->cq.size = entries;
795 chp->cq.memsize = memsize;
796
797 ret = create_cq(&rhp->rdev, &chp->cq,
798 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
799 if (ret)
800 goto err1;
801
802 chp->rhp = rhp;
803 chp->cq.size--; /* status page */
804 chp->ibcq.cqe = chp->cq.size;
805 spin_lock_init(&chp->lock);
806 atomic_set(&chp->refcnt, 1);
807 init_waitqueue_head(&chp->wait);
808 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
809 if (ret)
810 goto err2;
811
812 if (ucontext) {
813 mm = kmalloc(sizeof *mm, GFP_KERNEL);
814 if (!mm)
815 goto err3;
816 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
817 if (!mm2)
818 goto err4;
819
820 uresp.qid_mask = rhp->rdev.cqmask;
821 uresp.cqid = chp->cq.cqid;
822 uresp.size = chp->cq.size;
823 uresp.memsize = chp->cq.memsize;
824 spin_lock(&ucontext->mmap_lock);
825 uresp.key = ucontext->key;
826 ucontext->key += PAGE_SIZE;
827 uresp.gts_key = ucontext->key;
828 ucontext->key += PAGE_SIZE;
829 spin_unlock(&ucontext->mmap_lock);
830 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
831 if (ret)
832 goto err5;
833
834 mm->key = uresp.key;
835 mm->addr = virt_to_phys(chp->cq.queue);
836 mm->len = chp->cq.memsize;
837 insert_mmap(ucontext, mm);
838
839 mm2->key = uresp.gts_key;
840 mm2->addr = chp->cq.ugts;
841 mm2->len = PAGE_SIZE;
842 insert_mmap(ucontext, mm2);
843 }
844 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
845 __func__, chp->cq.cqid, chp, chp->cq.size,
846 chp->cq.memsize,
847 (unsigned long long) chp->cq.dma_addr);
848 return &chp->ibcq;
849err5:
850 kfree(mm2);
851err4:
852 kfree(mm);
853err3:
854 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
855err2:
856 destroy_cq(&chp->rhp->rdev, &chp->cq,
857 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
858err1:
859 kfree(chp);
860 return ERR_PTR(ret);
861}
862
863int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
864{
865 return -ENOSYS;
866}
867
868int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
869{
870 struct c4iw_cq *chp;
871 int ret;
872 unsigned long flag;
873
874 chp = to_c4iw_cq(ibcq);
875 spin_lock_irqsave(&chp->lock, flag);
876 ret = t4_arm_cq(&chp->cq,
877 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
878 spin_unlock_irqrestore(&chp->lock, flag);
879 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
880 ret = 0;
881 return ret;
882}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
new file mode 100644
index 000000000000..be23b5eab13b
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -0,0 +1,520 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
35
36#include <rdma/ib_verbs.h>
37
38#include "iw_cxgb4.h"
39
40#define DRV_VERSION "0.1"
41
42MODULE_AUTHOR("Steve Wise");
43MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION);
46
47static LIST_HEAD(dev_list);
48static DEFINE_MUTEX(dev_mutex);
49
50static struct dentry *c4iw_debugfs_root;
51
52struct debugfs_qp_data {
53 struct c4iw_dev *devp;
54 char *buf;
55 int bufsize;
56 int pos;
57};
58
59static int count_qps(int id, void *p, void *data)
60{
61 struct c4iw_qp *qp = p;
62 int *countp = data;
63
64 if (id != qp->wq.sq.qid)
65 return 0;
66
67 *countp = *countp + 1;
68 return 0;
69}
70
71static int dump_qps(int id, void *p, void *data)
72{
73 struct c4iw_qp *qp = p;
74 struct debugfs_qp_data *qpd = data;
75 int space;
76 int cc;
77
78 if (id != qp->wq.sq.qid)
79 return 0;
80
81 space = qpd->bufsize - qpd->pos - 1;
82 if (space == 0)
83 return 1;
84
85 if (qp->ep)
86 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
87 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
88 qp->wq.sq.qid, (int)qp->attr.state,
89 qp->ep->hwtid, (int)qp->ep->com.state,
90 &qp->ep->com.local_addr.sin_addr.s_addr,
91 ntohs(qp->ep->com.local_addr.sin_port),
92 &qp->ep->com.remote_addr.sin_addr.s_addr,
93 ntohs(qp->ep->com.remote_addr.sin_port));
94 else
95 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
96 qp->wq.sq.qid, (int)qp->attr.state);
97 if (cc < space)
98 qpd->pos += cc;
99 return 0;
100}
101
102static int qp_release(struct inode *inode, struct file *file)
103{
104 struct debugfs_qp_data *qpd = file->private_data;
105 if (!qpd) {
106 printk(KERN_INFO "%s null qpd?\n", __func__);
107 return 0;
108 }
109 kfree(qpd->buf);
110 kfree(qpd);
111 return 0;
112}
113
114static int qp_open(struct inode *inode, struct file *file)
115{
116 struct debugfs_qp_data *qpd;
117 int ret = 0;
118 int count = 1;
119
120 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
121 if (!qpd) {
122 ret = -ENOMEM;
123 goto out;
124 }
125 qpd->devp = inode->i_private;
126 qpd->pos = 0;
127
128 spin_lock_irq(&qpd->devp->lock);
129 idr_for_each(&qpd->devp->qpidr, count_qps, &count);
130 spin_unlock_irq(&qpd->devp->lock);
131
132 qpd->bufsize = count * 128;
133 qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
134 if (!qpd->buf) {
135 ret = -ENOMEM;
136 goto err1;
137 }
138
139 spin_lock_irq(&qpd->devp->lock);
140 idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
141 spin_unlock_irq(&qpd->devp->lock);
142
143 qpd->buf[qpd->pos++] = 0;
144 file->private_data = qpd;
145 goto out;
146err1:
147 kfree(qpd);
148out:
149 return ret;
150}
151
152static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
153 loff_t *ppos)
154{
155 struct debugfs_qp_data *qpd = file->private_data;
156 loff_t pos = *ppos;
157 loff_t avail = qpd->pos;
158
159 if (pos < 0)
160 return -EINVAL;
161 if (pos >= avail)
162 return 0;
163 if (count > avail - pos)
164 count = avail - pos;
165
166 while (count) {
167 size_t len = 0;
168
169 len = min((int)count, (int)qpd->pos - (int)pos);
170 if (copy_to_user(buf, qpd->buf + pos, len))
171 return -EFAULT;
172 if (len == 0)
173 return -EINVAL;
174
175 buf += len;
176 pos += len;
177 count -= len;
178 }
179 count = pos - *ppos;
180 *ppos = pos;
181 return count;
182}
183
184static const struct file_operations qp_debugfs_fops = {
185 .owner = THIS_MODULE,
186 .open = qp_open,
187 .release = qp_release,
188 .read = qp_read,
189};
190
191static int setup_debugfs(struct c4iw_dev *devp)
192{
193 struct dentry *de;
194
195 if (!devp->debugfs_root)
196 return -1;
197
198 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
199 (void *)devp, &qp_debugfs_fops);
200 if (de && de->d_inode)
201 de->d_inode->i_size = 4096;
202 return 0;
203}
204
205void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
206 struct c4iw_dev_ucontext *uctx)
207{
208 struct list_head *pos, *nxt;
209 struct c4iw_qid_list *entry;
210
211 mutex_lock(&uctx->lock);
212 list_for_each_safe(pos, nxt, &uctx->qpids) {
213 entry = list_entry(pos, struct c4iw_qid_list, entry);
214 list_del_init(&entry->entry);
215 if (!(entry->qid & rdev->qpmask))
216 c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
217 &rdev->resource.qid_fifo_lock);
218 kfree(entry);
219 }
220
221 list_for_each_safe(pos, nxt, &uctx->qpids) {
222 entry = list_entry(pos, struct c4iw_qid_list, entry);
223 list_del_init(&entry->entry);
224 kfree(entry);
225 }
226 mutex_unlock(&uctx->lock);
227}
228
229void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
230 struct c4iw_dev_ucontext *uctx)
231{
232 INIT_LIST_HEAD(&uctx->qpids);
233 INIT_LIST_HEAD(&uctx->cqids);
234 mutex_init(&uctx->lock);
235}
236
237/* Caller takes care of locking if needed */
238static int c4iw_rdev_open(struct c4iw_rdev *rdev)
239{
240 int err;
241
242 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
243
244 /*
245 * qpshift is the number of bits to shift the qpid left in order
246 * to get the correct address of the doorbell for that qp.
247 */
248 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
249 rdev->qpmask = rdev->lldi.udb_density - 1;
250 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
251 rdev->cqmask = rdev->lldi.ucq_density - 1;
252 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
253 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
254 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
255 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
256 rdev->lldi.vr->pbl.start,
257 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
258 rdev->lldi.vr->rq.size);
259 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
260 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
261 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
262 (void *)pci_resource_start(rdev->lldi.pdev, 2),
263 rdev->lldi.db_reg,
264 rdev->lldi.gts_reg,
265 rdev->qpshift, rdev->qpmask,
266 rdev->cqshift, rdev->cqmask);
267
268 if (c4iw_num_stags(rdev) == 0) {
269 err = -EINVAL;
270 goto err1;
271 }
272
273 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
274 if (err) {
275 printk(KERN_ERR MOD "error %d initializing resources\n", err);
276 goto err1;
277 }
278 err = c4iw_pblpool_create(rdev);
279 if (err) {
280 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
281 goto err2;
282 }
283 err = c4iw_rqtpool_create(rdev);
284 if (err) {
285 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
286 goto err3;
287 }
288 return 0;
289err3:
290 c4iw_pblpool_destroy(rdev);
291err2:
292 c4iw_destroy_resource(&rdev->resource);
293err1:
294 return err;
295}
296
297static void c4iw_rdev_close(struct c4iw_rdev *rdev)
298{
299 c4iw_pblpool_destroy(rdev);
300 c4iw_rqtpool_destroy(rdev);
301 c4iw_destroy_resource(&rdev->resource);
302}
303
304static void c4iw_remove(struct c4iw_dev *dev)
305{
306 PDBG("%s c4iw_dev %p\n", __func__, dev);
307 cancel_delayed_work_sync(&dev->db_drop_task);
308 list_del(&dev->entry);
309 c4iw_unregister_device(dev);
310 c4iw_rdev_close(&dev->rdev);
311 idr_destroy(&dev->cqidr);
312 idr_destroy(&dev->qpidr);
313 idr_destroy(&dev->mmidr);
314 ib_dealloc_device(&dev->ibdev);
315}
316
317static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
318{
319 struct c4iw_dev *devp;
320 int ret;
321
322 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
323 if (!devp) {
324 printk(KERN_ERR MOD "Cannot allocate ib device\n");
325 return NULL;
326 }
327 devp->rdev.lldi = *infop;
328
329 mutex_lock(&dev_mutex);
330
331 ret = c4iw_rdev_open(&devp->rdev);
332 if (ret) {
333 mutex_unlock(&dev_mutex);
334 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
335 ib_dealloc_device(&devp->ibdev);
336 return NULL;
337 }
338
339 idr_init(&devp->cqidr);
340 idr_init(&devp->qpidr);
341 idr_init(&devp->mmidr);
342 spin_lock_init(&devp->lock);
343 list_add_tail(&devp->entry, &dev_list);
344 mutex_unlock(&dev_mutex);
345
346 if (c4iw_register_device(devp)) {
347 printk(KERN_ERR MOD "Unable to register device\n");
348 mutex_lock(&dev_mutex);
349 c4iw_remove(devp);
350 mutex_unlock(&dev_mutex);
351 }
352 if (c4iw_debugfs_root) {
353 devp->debugfs_root = debugfs_create_dir(
354 pci_name(devp->rdev.lldi.pdev),
355 c4iw_debugfs_root);
356 setup_debugfs(devp);
357 }
358 return devp;
359}
360
361static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
362{
363 struct c4iw_dev *dev;
364 static int vers_printed;
365 int i;
366
367 if (!vers_printed++)
368 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
369 DRV_VERSION);
370
371 dev = c4iw_alloc(infop);
372 if (!dev)
373 goto out;
374
375 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
376 __func__, pci_name(dev->rdev.lldi.pdev),
377 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
378 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
379
380 for (i = 0; i < dev->rdev.lldi.nrxq; i++)
381 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
382
383 printk(KERN_INFO MOD "Initialized device %s\n",
384 pci_name(dev->rdev.lldi.pdev));
385out:
386 return dev;
387}
388
389static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
390 unsigned int skb_len,
391 unsigned int pull_len)
392{
393 struct sk_buff *skb;
394 struct skb_shared_info *ssi;
395
396 if (gl->tot_len <= 512) {
397 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
398 if (unlikely(!skb))
399 goto out;
400 __skb_put(skb, gl->tot_len);
401 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
402 } else {
403 skb = alloc_skb(skb_len, GFP_ATOMIC);
404 if (unlikely(!skb))
405 goto out;
406 __skb_put(skb, pull_len);
407 skb_copy_to_linear_data(skb, gl->va, pull_len);
408
409 ssi = skb_shinfo(skb);
410 ssi->frags[0].page = gl->frags[0].page;
411 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
412 ssi->frags[0].size = gl->frags[0].size - pull_len;
413 if (gl->nfrags > 1)
414 memcpy(&ssi->frags[1], &gl->frags[1],
415 (gl->nfrags - 1) * sizeof(skb_frag_t));
416 ssi->nr_frags = gl->nfrags;
417
418 skb->len = gl->tot_len;
419 skb->data_len = skb->len - pull_len;
420 skb->truesize += skb->data_len;
421
422 /* Get a reference for the last page, we don't own it */
423 get_page(gl->frags[gl->nfrags - 1].page);
424 }
425out:
426 return skb;
427}
428
429static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
430 const struct pkt_gl *gl)
431{
432 struct c4iw_dev *dev = handle;
433 struct sk_buff *skb;
434 const struct cpl_act_establish *rpl;
435 unsigned int opcode;
436
437 if (gl == NULL) {
438 /* omit RSS and rsp_ctrl at end of descriptor */
439 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
440
441 skb = alloc_skb(256, GFP_ATOMIC);
442 if (!skb)
443 goto nomem;
444 __skb_put(skb, len);
445 skb_copy_to_linear_data(skb, &rsp[1], len);
446 } else if (gl == CXGB4_MSG_AN) {
447 const struct rsp_ctrl *rc = (void *)rsp;
448
449 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
450 c4iw_ev_handler(dev, qid);
451 return 0;
452 } else {
453 skb = t4_pktgl_to_skb(gl, 128, 128);
454 if (unlikely(!skb))
455 goto nomem;
456 }
457
458 rpl = cplhdr(skb);
459 opcode = rpl->ot.opcode;
460
461 if (c4iw_handlers[opcode])
462 c4iw_handlers[opcode](dev, skb);
463 else
464 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
465 opcode);
466
467 return 0;
468nomem:
469 return -1;
470}
471
472static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
473{
474 PDBG("%s new_state %u\n", __func__, new_state);
475 return 0;
476}
477
478static struct cxgb4_uld_info c4iw_uld_info = {
479 .name = DRV_NAME,
480 .add = c4iw_uld_add,
481 .rx_handler = c4iw_uld_rx_handler,
482 .state_change = c4iw_uld_state_change,
483};
484
485static int __init c4iw_init_module(void)
486{
487 int err;
488
489 err = c4iw_cm_init();
490 if (err)
491 return err;
492
493 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
494 if (!c4iw_debugfs_root)
495 printk(KERN_WARNING MOD
496 "could not create debugfs entry, continuing\n");
497
498 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
499
500 return 0;
501}
502
503static void __exit c4iw_exit_module(void)
504{
505 struct c4iw_dev *dev, *tmp;
506
507 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
508
509 mutex_lock(&dev_mutex);
510 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
511 c4iw_remove(dev);
512 }
513 mutex_unlock(&dev_mutex);
514
515 c4iw_cm_term();
516 debugfs_remove_recursive(c4iw_debugfs_root);
517}
518
519module_init(c4iw_init_module);
520module_exit(c4iw_exit_module);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
new file mode 100644
index 000000000000..491e76a0327f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/slab.h>
33#include <linux/mman.h>
34#include <net/sock.h>
35
36#include "iw_cxgb4.h"
37
38static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
39 struct c4iw_qp *qhp,
40 struct t4_cqe *err_cqe,
41 enum ib_event_type ib_event)
42{
43 struct ib_event event;
44 struct c4iw_qp_attributes attrs;
45
46 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
47 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
48 PDBG("%s AE received after RTS - "
49 "qp state %d qpid 0x%x status 0x%x\n", __func__,
50 qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
51 return;
52 }
53
54 printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
55 "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
56 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
57 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
58 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
59
60 if (qhp->attr.state == C4IW_QP_STATE_RTS) {
61 attrs.next_state = C4IW_QP_STATE_TERMINATE;
62 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
63 &attrs, 1);
64 }
65
66 event.event = ib_event;
67 event.device = chp->ibcq.device;
68 if (ib_event == IB_EVENT_CQ_ERR)
69 event.element.cq = &chp->ibcq;
70 else
71 event.element.qp = &qhp->ibqp;
72 if (qhp->ibqp.event_handler)
73 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
74
75 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
76}
77
78void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
79{
80 struct c4iw_cq *chp;
81 struct c4iw_qp *qhp;
82 u32 cqid;
83
84 spin_lock(&dev->lock);
85 qhp = get_qhp(dev, CQE_QPID(err_cqe));
86 if (!qhp) {
87 printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
88 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
89 CQE_QPID(err_cqe),
90 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
91 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
92 CQE_WRID_LOW(err_cqe));
93 spin_unlock(&dev->lock);
94 goto out;
95 }
96
97 if (SQ_TYPE(err_cqe))
98 cqid = qhp->attr.scq;
99 else
100 cqid = qhp->attr.rcq;
101 chp = get_chp(dev, cqid);
102 if (!chp) {
103 printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
104 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
105 cqid, CQE_QPID(err_cqe),
106 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
107 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
108 CQE_WRID_LOW(err_cqe));
109 spin_unlock(&dev->lock);
110 goto out;
111 }
112
113 c4iw_qp_add_ref(&qhp->ibqp);
114 atomic_inc(&chp->refcnt);
115 spin_unlock(&dev->lock);
116
117 /* Bad incoming write */
118 if (RQ_TYPE(err_cqe) &&
119 (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
120 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
121 goto done;
122 }
123
124 switch (CQE_STATUS(err_cqe)) {
125
126 /* Completion Events */
127 case T4_ERR_SUCCESS:
128 printk(KERN_ERR MOD "AE with status 0!\n");
129 break;
130
131 case T4_ERR_STAG:
132 case T4_ERR_PDID:
133 case T4_ERR_QPID:
134 case T4_ERR_ACCESS:
135 case T4_ERR_WRAP:
136 case T4_ERR_BOUND:
137 case T4_ERR_INVALIDATE_SHARED_MR:
138 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
139 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
140 break;
141
142 /* Device Fatal Errors */
143 case T4_ERR_ECC:
144 case T4_ERR_ECC_PSTAG:
145 case T4_ERR_INTERNAL_ERR:
146 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
147 break;
148
149 /* QP Fatal Errors */
150 case T4_ERR_OUT_OF_RQE:
151 case T4_ERR_PBL_ADDR_BOUND:
152 case T4_ERR_CRC:
153 case T4_ERR_MARKER:
154 case T4_ERR_PDU_LEN_ERR:
155 case T4_ERR_DDP_VERSION:
156 case T4_ERR_RDMA_VERSION:
157 case T4_ERR_OPCODE:
158 case T4_ERR_DDP_QUEUE_NUM:
159 case T4_ERR_MSN:
160 case T4_ERR_TBIT:
161 case T4_ERR_MO:
162 case T4_ERR_MSN_GAP:
163 case T4_ERR_MSN_RANGE:
164 case T4_ERR_RQE_ADDR_BOUND:
165 case T4_ERR_IRD_OVERFLOW:
166 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
167 break;
168
169 default:
170 printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
171 CQE_STATUS(err_cqe), qhp->wq.sq.qid);
172 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
173 break;
174 }
175done:
176 if (atomic_dec_and_test(&chp->refcnt))
177 wake_up(&chp->wait);
178 c4iw_qp_rem_ref(&qhp->ibqp);
179out:
180 return;
181}
182
183int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
184{
185 struct c4iw_cq *chp;
186
187 chp = get_chp(dev, qid);
188 if (chp)
189 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
190 else
191 PDBG("%s unknown cqid 0x%x\n", __func__, qid);
192 return 0;
193}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
new file mode 100644
index 000000000000..a6269981e815
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -0,0 +1,745 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __IW_CXGB4_H__
32#define __IW_CXGB4_H__
33
34#include <linux/mutex.h>
35#include <linux/list.h>
36#include <linux/spinlock.h>
37#include <linux/idr.h>
38#include <linux/workqueue.h>
39#include <linux/netdevice.h>
40#include <linux/sched.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/inet.h>
44#include <linux/wait.h>
45#include <linux/kref.h>
46#include <linux/timer.h>
47#include <linux/io.h>
48#include <linux/kfifo.h>
49
50#include <asm/byteorder.h>
51
52#include <net/net_namespace.h>
53
54#include <rdma/ib_verbs.h>
55#include <rdma/iw_cm.h>
56
57#include "cxgb4.h"
58#include "cxgb4_uld.h"
59#include "l2t.h"
60#include "user.h"
61
62#define DRV_NAME "iw_cxgb4"
63#define MOD DRV_NAME ":"
64
65extern int c4iw_debug;
66#define PDBG(fmt, args...) \
67do { \
68 if (c4iw_debug) \
69 printk(MOD fmt, ## args); \
70} while (0)
71
72#include "t4.h"
73
74#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
75#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
76
77static inline void *cplhdr(struct sk_buff *skb)
78{
79 return skb->data;
80}
81
82#define C4IW_WR_TO (10*HZ)
83
84struct c4iw_wr_wait {
85 wait_queue_head_t wait;
86 int done;
87 int ret;
88};
89
90static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
91{
92 wr_waitp->ret = 0;
93 wr_waitp->done = 0;
94 init_waitqueue_head(&wr_waitp->wait);
95}
96
97struct c4iw_resource {
98 struct kfifo tpt_fifo;
99 spinlock_t tpt_fifo_lock;
100 struct kfifo qid_fifo;
101 spinlock_t qid_fifo_lock;
102 struct kfifo pdid_fifo;
103 spinlock_t pdid_fifo_lock;
104};
105
106struct c4iw_qid_list {
107 struct list_head entry;
108 u32 qid;
109};
110
111struct c4iw_dev_ucontext {
112 struct list_head qpids;
113 struct list_head cqids;
114 struct mutex lock;
115};
116
117enum c4iw_rdev_flags {
118 T4_FATAL_ERROR = (1<<0),
119};
120
121struct c4iw_rdev {
122 struct c4iw_resource resource;
123 unsigned long qpshift;
124 u32 qpmask;
125 unsigned long cqshift;
126 u32 cqmask;
127 struct c4iw_dev_ucontext uctx;
128 struct gen_pool *pbl_pool;
129 struct gen_pool *rqt_pool;
130 u32 flags;
131 struct cxgb4_lld_info lldi;
132};
133
134static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
135{
136 return rdev->flags & T4_FATAL_ERROR;
137}
138
139static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
140{
141 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
142}
143
144struct c4iw_dev {
145 struct ib_device ibdev;
146 struct c4iw_rdev rdev;
147 u32 device_cap_flags;
148 struct idr cqidr;
149 struct idr qpidr;
150 struct idr mmidr;
151 spinlock_t lock;
152 struct list_head entry;
153 struct delayed_work db_drop_task;
154 struct dentry *debugfs_root;
155};
156
157static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
158{
159 return container_of(ibdev, struct c4iw_dev, ibdev);
160}
161
162static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
163{
164 return container_of(rdev, struct c4iw_dev, rdev);
165}
166
167static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
168{
169 return idr_find(&rhp->cqidr, cqid);
170}
171
172static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
173{
174 return idr_find(&rhp->qpidr, qpid);
175}
176
177static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
178{
179 return idr_find(&rhp->mmidr, mmid);
180}
181
182static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
183 void *handle, u32 id)
184{
185 int ret;
186 int newid;
187
188 do {
189 if (!idr_pre_get(idr, GFP_KERNEL))
190 return -ENOMEM;
191 spin_lock_irq(&rhp->lock);
192 ret = idr_get_new_above(idr, handle, id, &newid);
193 BUG_ON(newid != id);
194 spin_unlock_irq(&rhp->lock);
195 } while (ret == -EAGAIN);
196
197 return ret;
198}
199
200static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
201{
202 spin_lock_irq(&rhp->lock);
203 idr_remove(idr, id);
204 spin_unlock_irq(&rhp->lock);
205}
206
207struct c4iw_pd {
208 struct ib_pd ibpd;
209 u32 pdid;
210 struct c4iw_dev *rhp;
211};
212
213static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
214{
215 return container_of(ibpd, struct c4iw_pd, ibpd);
216}
217
218struct tpt_attributes {
219 u64 len;
220 u64 va_fbo;
221 enum fw_ri_mem_perms perms;
222 u32 stag;
223 u32 pdid;
224 u32 qpid;
225 u32 pbl_addr;
226 u32 pbl_size;
227 u32 state:1;
228 u32 type:2;
229 u32 rsvd:1;
230 u32 remote_invaliate_disable:1;
231 u32 zbva:1;
232 u32 mw_bind_enable:1;
233 u32 page_size:5;
234};
235
236struct c4iw_mr {
237 struct ib_mr ibmr;
238 struct ib_umem *umem;
239 struct c4iw_dev *rhp;
240 u64 kva;
241 struct tpt_attributes attr;
242};
243
244static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
245{
246 return container_of(ibmr, struct c4iw_mr, ibmr);
247}
248
249struct c4iw_mw {
250 struct ib_mw ibmw;
251 struct c4iw_dev *rhp;
252 u64 kva;
253 struct tpt_attributes attr;
254};
255
256static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
257{
258 return container_of(ibmw, struct c4iw_mw, ibmw);
259}
260
261struct c4iw_fr_page_list {
262 struct ib_fast_reg_page_list ibpl;
263 DECLARE_PCI_UNMAP_ADDR(mapping);
264 dma_addr_t dma_addr;
265 struct c4iw_dev *dev;
266 int size;
267};
268
269static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
270 struct ib_fast_reg_page_list *ibpl)
271{
272 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
273}
274
275struct c4iw_cq {
276 struct ib_cq ibcq;
277 struct c4iw_dev *rhp;
278 struct t4_cq cq;
279 spinlock_t lock;
280 atomic_t refcnt;
281 wait_queue_head_t wait;
282};
283
284static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
285{
286 return container_of(ibcq, struct c4iw_cq, ibcq);
287}
288
289struct c4iw_mpa_attributes {
290 u8 initiator;
291 u8 recv_marker_enabled;
292 u8 xmit_marker_enabled;
293 u8 crc_enabled;
294 u8 version;
295 u8 p2p_type;
296};
297
298struct c4iw_qp_attributes {
299 u32 scq;
300 u32 rcq;
301 u32 sq_num_entries;
302 u32 rq_num_entries;
303 u32 sq_max_sges;
304 u32 sq_max_sges_rdma_write;
305 u32 rq_max_sges;
306 u32 state;
307 u8 enable_rdma_read;
308 u8 enable_rdma_write;
309 u8 enable_bind;
310 u8 enable_mmid0_fastreg;
311 u32 max_ord;
312 u32 max_ird;
313 u32 pd;
314 u32 next_state;
315 char terminate_buffer[52];
316 u32 terminate_msg_len;
317 u8 is_terminate_local;
318 struct c4iw_mpa_attributes mpa_attr;
319 struct c4iw_ep *llp_stream_handle;
320};
321
322struct c4iw_qp {
323 struct ib_qp ibqp;
324 struct c4iw_dev *rhp;
325 struct c4iw_ep *ep;
326 struct c4iw_qp_attributes attr;
327 struct t4_wq wq;
328 spinlock_t lock;
329 atomic_t refcnt;
330 wait_queue_head_t wait;
331 struct timer_list timer;
332};
333
334static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
335{
336 return container_of(ibqp, struct c4iw_qp, ibqp);
337}
338
339struct c4iw_ucontext {
340 struct ib_ucontext ibucontext;
341 struct c4iw_dev_ucontext uctx;
342 u32 key;
343 spinlock_t mmap_lock;
344 struct list_head mmaps;
345};
346
347static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
348{
349 return container_of(c, struct c4iw_ucontext, ibucontext);
350}
351
352struct c4iw_mm_entry {
353 struct list_head entry;
354 u64 addr;
355 u32 key;
356 unsigned len;
357};
358
359static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
360 u32 key, unsigned len)
361{
362 struct list_head *pos, *nxt;
363 struct c4iw_mm_entry *mm;
364
365 spin_lock(&ucontext->mmap_lock);
366 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
367
368 mm = list_entry(pos, struct c4iw_mm_entry, entry);
369 if (mm->key == key && mm->len == len) {
370 list_del_init(&mm->entry);
371 spin_unlock(&ucontext->mmap_lock);
372 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
373 key, (unsigned long long) mm->addr, mm->len);
374 return mm;
375 }
376 }
377 spin_unlock(&ucontext->mmap_lock);
378 return NULL;
379}
380
381static inline void insert_mmap(struct c4iw_ucontext *ucontext,
382 struct c4iw_mm_entry *mm)
383{
384 spin_lock(&ucontext->mmap_lock);
385 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
386 mm->key, (unsigned long long) mm->addr, mm->len);
387 list_add_tail(&mm->entry, &ucontext->mmaps);
388 spin_unlock(&ucontext->mmap_lock);
389}
390
391enum c4iw_qp_attr_mask {
392 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
393 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
394 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
395 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
396 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
397 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
398 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
399 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
400 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
401 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
402 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
403 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
404 C4IW_QP_ATTR_MAX_ORD |
405 C4IW_QP_ATTR_MAX_IRD |
406 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
407 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
408 C4IW_QP_ATTR_MPA_ATTR |
409 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
410};
411
412int c4iw_modify_qp(struct c4iw_dev *rhp,
413 struct c4iw_qp *qhp,
414 enum c4iw_qp_attr_mask mask,
415 struct c4iw_qp_attributes *attrs,
416 int internal);
417
418enum c4iw_qp_state {
419 C4IW_QP_STATE_IDLE,
420 C4IW_QP_STATE_RTS,
421 C4IW_QP_STATE_ERROR,
422 C4IW_QP_STATE_TERMINATE,
423 C4IW_QP_STATE_CLOSING,
424 C4IW_QP_STATE_TOT
425};
426
427static inline int c4iw_convert_state(enum ib_qp_state ib_state)
428{
429 switch (ib_state) {
430 case IB_QPS_RESET:
431 case IB_QPS_INIT:
432 return C4IW_QP_STATE_IDLE;
433 case IB_QPS_RTS:
434 return C4IW_QP_STATE_RTS;
435 case IB_QPS_SQD:
436 return C4IW_QP_STATE_CLOSING;
437 case IB_QPS_SQE:
438 return C4IW_QP_STATE_TERMINATE;
439 case IB_QPS_ERR:
440 return C4IW_QP_STATE_ERROR;
441 default:
442 return -1;
443 }
444}
445
446static inline u32 c4iw_ib_to_tpt_access(int a)
447{
448 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
449 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
450 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
451 FW_RI_MEM_ACCESS_LOCAL_READ;
452}
453
454static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
455{
456 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
457 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
458}
459
460enum c4iw_mmid_state {
461 C4IW_STAG_STATE_VALID,
462 C4IW_STAG_STATE_INVALID
463};
464
465#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
466
467#define MPA_KEY_REQ "MPA ID Req Frame"
468#define MPA_KEY_REP "MPA ID Rep Frame"
469
470#define MPA_MAX_PRIVATE_DATA 256
471#define MPA_REJECT 0x20
472#define MPA_CRC 0x40
473#define MPA_MARKERS 0x80
474#define MPA_FLAGS_MASK 0xE0
475
476#define c4iw_put_ep(ep) { \
477 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
478 ep, atomic_read(&((ep)->kref.refcount))); \
479 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
480 kref_put(&((ep)->kref), _c4iw_free_ep); \
481}
482
483#define c4iw_get_ep(ep) { \
484 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
485 ep, atomic_read(&((ep)->kref.refcount))); \
486 kref_get(&((ep)->kref)); \
487}
488void _c4iw_free_ep(struct kref *kref);
489
490struct mpa_message {
491 u8 key[16];
492 u8 flags;
493 u8 revision;
494 __be16 private_data_size;
495 u8 private_data[0];
496};
497
498struct terminate_message {
499 u8 layer_etype;
500 u8 ecode;
501 __be16 hdrct_rsvd;
502 u8 len_hdrs[0];
503};
504
505#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
506
507enum c4iw_layers_types {
508 LAYER_RDMAP = 0x00,
509 LAYER_DDP = 0x10,
510 LAYER_MPA = 0x20,
511 RDMAP_LOCAL_CATA = 0x00,
512 RDMAP_REMOTE_PROT = 0x01,
513 RDMAP_REMOTE_OP = 0x02,
514 DDP_LOCAL_CATA = 0x00,
515 DDP_TAGGED_ERR = 0x01,
516 DDP_UNTAGGED_ERR = 0x02,
517 DDP_LLP = 0x03
518};
519
520enum c4iw_rdma_ecodes {
521 RDMAP_INV_STAG = 0x00,
522 RDMAP_BASE_BOUNDS = 0x01,
523 RDMAP_ACC_VIOL = 0x02,
524 RDMAP_STAG_NOT_ASSOC = 0x03,
525 RDMAP_TO_WRAP = 0x04,
526 RDMAP_INV_VERS = 0x05,
527 RDMAP_INV_OPCODE = 0x06,
528 RDMAP_STREAM_CATA = 0x07,
529 RDMAP_GLOBAL_CATA = 0x08,
530 RDMAP_CANT_INV_STAG = 0x09,
531 RDMAP_UNSPECIFIED = 0xff
532};
533
534enum c4iw_ddp_ecodes {
535 DDPT_INV_STAG = 0x00,
536 DDPT_BASE_BOUNDS = 0x01,
537 DDPT_STAG_NOT_ASSOC = 0x02,
538 DDPT_TO_WRAP = 0x03,
539 DDPT_INV_VERS = 0x04,
540 DDPU_INV_QN = 0x01,
541 DDPU_INV_MSN_NOBUF = 0x02,
542 DDPU_INV_MSN_RANGE = 0x03,
543 DDPU_INV_MO = 0x04,
544 DDPU_MSG_TOOBIG = 0x05,
545 DDPU_INV_VERS = 0x06
546};
547
548enum c4iw_mpa_ecodes {
549 MPA_CRC_ERR = 0x02,
550 MPA_MARKER_ERR = 0x03
551};
552
553enum c4iw_ep_state {
554 IDLE = 0,
555 LISTEN,
556 CONNECTING,
557 MPA_REQ_WAIT,
558 MPA_REQ_SENT,
559 MPA_REQ_RCVD,
560 MPA_REP_SENT,
561 FPDU_MODE,
562 ABORTING,
563 CLOSING,
564 MORIBUND,
565 DEAD,
566};
567
568enum c4iw_ep_flags {
569 PEER_ABORT_IN_PROGRESS = 0,
570 ABORT_REQ_IN_PROGRESS = 1,
571 RELEASE_RESOURCES = 2,
572 CLOSE_SENT = 3,
573};
574
575struct c4iw_ep_common {
576 struct iw_cm_id *cm_id;
577 struct c4iw_qp *qp;
578 struct c4iw_dev *dev;
579 enum c4iw_ep_state state;
580 struct kref kref;
581 spinlock_t lock;
582 struct sockaddr_in local_addr;
583 struct sockaddr_in remote_addr;
584 wait_queue_head_t waitq;
585 int rpl_done;
586 int rpl_err;
587 unsigned long flags;
588};
589
590struct c4iw_listen_ep {
591 struct c4iw_ep_common com;
592 unsigned int stid;
593 int backlog;
594};
595
596struct c4iw_ep {
597 struct c4iw_ep_common com;
598 struct c4iw_ep *parent_ep;
599 struct timer_list timer;
600 struct list_head entry;
601 unsigned int atid;
602 u32 hwtid;
603 u32 snd_seq;
604 u32 rcv_seq;
605 struct l2t_entry *l2t;
606 struct dst_entry *dst;
607 struct sk_buff *mpa_skb;
608 struct c4iw_mpa_attributes mpa_attr;
609 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
610 unsigned int mpa_pkt_len;
611 u32 ird;
612 u32 ord;
613 u32 smac_idx;
614 u32 tx_chan;
615 u32 mtu;
616 u16 mss;
617 u16 emss;
618 u16 plen;
619 u16 rss_qid;
620 u16 txq_idx;
621 u8 tos;
622};
623
624static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
625{
626 return cm_id->provider_data;
627}
628
629static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
630{
631 return cm_id->provider_data;
632}
633
634static inline int compute_wscale(int win)
635{
636 int wscale = 0;
637
638 while (wscale < 14 && (65535<<wscale) < win)
639 wscale++;
640 return wscale;
641}
642
643typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
644
645int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
646 struct l2t_entry *l2t);
647void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
648 struct c4iw_dev_ucontext *uctx);
649u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
650void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
651int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
652int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
653int c4iw_pblpool_create(struct c4iw_rdev *rdev);
654int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
655void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
656void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
657void c4iw_destroy_resource(struct c4iw_resource *rscp);
658int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
659int c4iw_register_device(struct c4iw_dev *dev);
660void c4iw_unregister_device(struct c4iw_dev *dev);
661int __init c4iw_cm_init(void);
662void __exit c4iw_cm_term(void);
663void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
664 struct c4iw_dev_ucontext *uctx);
665void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
666 struct c4iw_dev_ucontext *uctx);
667int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
668int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
669 struct ib_send_wr **bad_wr);
670int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
671 struct ib_recv_wr **bad_wr);
672int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
673 struct ib_mw_bind *mw_bind);
674int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
675int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
676int c4iw_destroy_listen(struct iw_cm_id *cm_id);
677int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
678int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
679void c4iw_qp_add_ref(struct ib_qp *qp);
680void c4iw_qp_rem_ref(struct ib_qp *qp);
681void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
682struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
683 struct ib_device *device,
684 int page_list_len);
685struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
686int c4iw_dealloc_mw(struct ib_mw *mw);
687struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
688struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
689 u64 length, u64 virt, int acc,
690 struct ib_udata *udata);
691struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
692struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
693 struct ib_phys_buf *buffer_list,
694 int num_phys_buf,
695 int acc,
696 u64 *iova_start);
697int c4iw_reregister_phys_mem(struct ib_mr *mr,
698 int mr_rereg_mask,
699 struct ib_pd *pd,
700 struct ib_phys_buf *buffer_list,
701 int num_phys_buf,
702 int acc, u64 *iova_start);
703int c4iw_dereg_mr(struct ib_mr *ib_mr);
704int c4iw_destroy_cq(struct ib_cq *ib_cq);
705struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
706 int vector,
707 struct ib_ucontext *ib_context,
708 struct ib_udata *udata);
709int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
710int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
711int c4iw_destroy_qp(struct ib_qp *ib_qp);
712struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
713 struct ib_qp_init_attr *attrs,
714 struct ib_udata *udata);
715int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
716 int attr_mask, struct ib_udata *udata);
717struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
718u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
719void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
720u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
721void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
722int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
723void c4iw_flush_hw_cq(struct t4_cq *cq);
724void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
725void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
726int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
727int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
728int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
729int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
730u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
731int c4iw_post_zb_read(struct c4iw_qp *qhp);
732int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
733u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
734void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
735 struct c4iw_dev_ucontext *uctx);
736u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
737void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
738 struct c4iw_dev_ucontext *uctx);
739void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
740
741extern struct cxgb4_client t4c_client;
742extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
743extern int c4iw_max_read_depth;
744
745#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
new file mode 100644
index 000000000000..e54ff6d25691
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem.h>
34#include <asm/atomic.h>
35
36#include "iw_cxgb4.h"
37
38#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96
40
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
42 void *data)
43{
44 struct sk_buff *skb;
45 struct ulp_mem_io *req;
46 struct ulptx_idata *sc;
47 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait;
50
51 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
53 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
54 c4iw_init_wr_wait(&wr_wait);
55 for (i = 0; i < num_wqe; i++) {
56
57 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
58 len;
59 wr_len = roundup(sizeof *req + sizeof *sc +
60 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
61
62 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
63 if (!skb)
64 return -ENOMEM;
65 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
66
67 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
68 memset(req, 0, wr_len);
69 INIT_ULPTX_WR(req, wr_len, 0, 0);
70
71 if (i == (num_wqe-1)) {
72 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
73 FW_WR_COMPL(1));
74 req->wr.wr_lo = (__force __be64)&wr_wait;
75 } else
76 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
77 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
84 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
86
87 sc = (struct ulptx_idata *)(req + 1);
88 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
89 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
90
91 to_dp = (u8 *)(sc + 1);
92 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
93 if (data)
94 memcpy(to_dp, from_dp, copy_len);
95 else
96 memset(to_dp, 0, copy_len);
97 if (copy_len % T4_ULPTX_MIN_IO)
98 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
99 (copy_len % T4_ULPTX_MIN_IO));
100 ret = c4iw_ofld_send(rdev, skb);
101 if (ret)
102 return ret;
103 len -= C4IW_MAX_INLINE_SIZE;
104 }
105
106 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
107 if (!wr_wait.done) {
108 printk(KERN_ERR MOD "Device %s not responding!\n",
109 pci_name(rdev->lldi.pdev));
110 rdev->flags = T4_FATAL_ERROR;
111 ret = -EIO;
112 } else
113 ret = wr_wait.ret;
114 return ret;
115}
116
117/*
118 * Build and write a TPT entry.
119 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
120 * pbl_size and pbl_addr
121 * OUT: stag index
122 */
123static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
124 u32 *stag, u8 stag_state, u32 pdid,
125 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
126 int bind_enabled, u32 zbva, u64 to,
127 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
128{
129 int err;
130 struct fw_ri_tpte tpt;
131 u32 stag_idx;
132 static atomic_t key;
133
134 if (c4iw_fatal_error(rdev))
135 return -EIO;
136
137 stag_state = stag_state > 0;
138 stag_idx = (*stag) >> 8;
139
140 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
141 stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
142 &rdev->resource.tpt_fifo_lock);
143 if (!stag_idx)
144 return -ENOMEM;
145 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
146 }
147 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
148 __func__, stag_state, type, pdid, stag_idx);
149
150 /* write TPT entry */
151 if (reset_tpt_entry)
152 memset(&tpt, 0, sizeof(tpt));
153 else {
154 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
155 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
156 V_FW_RI_TPTE_STAGSTATE(stag_state) |
157 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
158 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
159 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
160 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
161 FW_RI_VA_BASED_TO))|
162 V_FW_RI_TPTE_PS(page_size));
163 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
164 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
165 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
166 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
167 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
168 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
169 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
170 }
171 err = write_adapter_mem(rdev, stag_idx +
172 (rdev->lldi.vr->stag.start >> 5),
173 sizeof(tpt), &tpt);
174
175 if (reset_tpt_entry)
176 c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
177 &rdev->resource.tpt_fifo_lock);
178 return err;
179}
180
181static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
182 u32 pbl_addr, u32 pbl_size)
183{
184 int err;
185
186 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
187 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
188 pbl_size);
189
190 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
191 return err;
192}
193
194static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
195 u32 pbl_addr)
196{
197 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
198 pbl_size, pbl_addr);
199}
200
201static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
202{
203 *stag = T4_STAG_UNSET;
204 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
205 0UL, 0, 0, 0, 0);
206}
207
208static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
209{
210 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
211 0);
212}
213
214static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
215 u32 pbl_size, u32 pbl_addr)
216{
217 *stag = T4_STAG_UNSET;
218 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
219 0UL, 0, 0, pbl_size, pbl_addr);
220}
221
222static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
223{
224 u32 mmid;
225
226 mhp->attr.state = 1;
227 mhp->attr.stag = stag;
228 mmid = stag >> 8;
229 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
230 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
231 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
232}
233
234static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
235 struct c4iw_mr *mhp, int shift)
236{
237 u32 stag = T4_STAG_UNSET;
238 int ret;
239
240 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
241 FW_RI_STAG_NSMR, mhp->attr.perms,
242 mhp->attr.mw_bind_enable, mhp->attr.zbva,
243 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
244 mhp->attr.pbl_size, mhp->attr.pbl_addr);
245 if (ret)
246 return ret;
247
248 ret = finish_mem_reg(mhp, stag);
249 if (ret)
250 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
251 mhp->attr.pbl_addr);
252 return ret;
253}
254
255static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
256 struct c4iw_mr *mhp, int shift, int npages)
257{
258 u32 stag;
259 int ret;
260
261 if (npages > mhp->attr.pbl_size)
262 return -ENOMEM;
263
264 stag = mhp->attr.stag;
265 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
266 FW_RI_STAG_NSMR, mhp->attr.perms,
267 mhp->attr.mw_bind_enable, mhp->attr.zbva,
268 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
269 mhp->attr.pbl_size, mhp->attr.pbl_addr);
270 if (ret)
271 return ret;
272
273 ret = finish_mem_reg(mhp, stag);
274 if (ret)
275 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
276 mhp->attr.pbl_addr);
277
278 return ret;
279}
280
281static int alloc_pbl(struct c4iw_mr *mhp, int npages)
282{
283 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
284 npages << 3);
285
286 if (!mhp->attr.pbl_addr)
287 return -ENOMEM;
288
289 mhp->attr.pbl_size = npages;
290
291 return 0;
292}
293
294static int build_phys_page_list(struct ib_phys_buf *buffer_list,
295 int num_phys_buf, u64 *iova_start,
296 u64 *total_size, int *npages,
297 int *shift, __be64 **page_list)
298{
299 u64 mask;
300 int i, j, n;
301
302 mask = 0;
303 *total_size = 0;
304 for (i = 0; i < num_phys_buf; ++i) {
305 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
306 return -EINVAL;
307 if (i != 0 && i != num_phys_buf - 1 &&
308 (buffer_list[i].size & ~PAGE_MASK))
309 return -EINVAL;
310 *total_size += buffer_list[i].size;
311 if (i > 0)
312 mask |= buffer_list[i].addr;
313 else
314 mask |= buffer_list[i].addr & PAGE_MASK;
315 if (i != num_phys_buf - 1)
316 mask |= buffer_list[i].addr + buffer_list[i].size;
317 else
318 mask |= (buffer_list[i].addr + buffer_list[i].size +
319 PAGE_SIZE - 1) & PAGE_MASK;
320 }
321
322 if (*total_size > 0xFFFFFFFFULL)
323 return -ENOMEM;
324
325 /* Find largest page shift we can use to cover buffers */
326 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
327 if ((1ULL << *shift) & mask)
328 break;
329
330 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
331 buffer_list[0].addr &= ~0ull << *shift;
332
333 *npages = 0;
334 for (i = 0; i < num_phys_buf; ++i)
335 *npages += (buffer_list[i].size +
336 (1ULL << *shift) - 1) >> *shift;
337
338 if (!*npages)
339 return -EINVAL;
340
341 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
342 if (!*page_list)
343 return -ENOMEM;
344
345 n = 0;
346 for (i = 0; i < num_phys_buf; ++i)
347 for (j = 0;
348 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
349 ++j)
350 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
351 ((u64) j << *shift));
352
353 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
354 __func__, (unsigned long long)*iova_start,
355 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
356 *npages);
357
358 return 0;
359
360}
361
362int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
363 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
364 int num_phys_buf, int acc, u64 *iova_start)
365{
366
367 struct c4iw_mr mh, *mhp;
368 struct c4iw_pd *php;
369 struct c4iw_dev *rhp;
370 __be64 *page_list = NULL;
371 int shift = 0;
372 u64 total_size;
373 int npages;
374 int ret;
375
376 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
377
378 /* There can be no memory windows */
379 if (atomic_read(&mr->usecnt))
380 return -EINVAL;
381
382 mhp = to_c4iw_mr(mr);
383 rhp = mhp->rhp;
384 php = to_c4iw_pd(mr->pd);
385
386 /* make sure we are on the same adapter */
387 if (rhp != php->rhp)
388 return -EINVAL;
389
390 memcpy(&mh, mhp, sizeof *mhp);
391
392 if (mr_rereg_mask & IB_MR_REREG_PD)
393 php = to_c4iw_pd(pd);
394 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
395 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
396 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
397 IB_ACCESS_MW_BIND;
398 }
399 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
400 ret = build_phys_page_list(buffer_list, num_phys_buf,
401 iova_start,
402 &total_size, &npages,
403 &shift, &page_list);
404 if (ret)
405 return ret;
406 }
407
408 ret = reregister_mem(rhp, php, &mh, shift, npages);
409 kfree(page_list);
410 if (ret)
411 return ret;
412 if (mr_rereg_mask & IB_MR_REREG_PD)
413 mhp->attr.pdid = php->pdid;
414 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
415 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
416 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
417 mhp->attr.zbva = 0;
418 mhp->attr.va_fbo = *iova_start;
419 mhp->attr.page_size = shift - 12;
420 mhp->attr.len = (u32) total_size;
421 mhp->attr.pbl_size = npages;
422 }
423
424 return 0;
425}
426
427struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
428 struct ib_phys_buf *buffer_list,
429 int num_phys_buf, int acc, u64 *iova_start)
430{
431 __be64 *page_list;
432 int shift;
433 u64 total_size;
434 int npages;
435 struct c4iw_dev *rhp;
436 struct c4iw_pd *php;
437 struct c4iw_mr *mhp;
438 int ret;
439
440 PDBG("%s ib_pd %p\n", __func__, pd);
441 php = to_c4iw_pd(pd);
442 rhp = php->rhp;
443
444 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
445 if (!mhp)
446 return ERR_PTR(-ENOMEM);
447
448 mhp->rhp = rhp;
449
450 /* First check that we have enough alignment */
451 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
452 ret = -EINVAL;
453 goto err;
454 }
455
456 if (num_phys_buf > 1 &&
457 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
458 ret = -EINVAL;
459 goto err;
460 }
461
462 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
463 &total_size, &npages, &shift,
464 &page_list);
465 if (ret)
466 goto err;
467
468 ret = alloc_pbl(mhp, npages);
469 if (ret) {
470 kfree(page_list);
471 goto err_pbl;
472 }
473
474 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
475 npages);
476 kfree(page_list);
477 if (ret)
478 goto err_pbl;
479
480 mhp->attr.pdid = php->pdid;
481 mhp->attr.zbva = 0;
482
483 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
484 mhp->attr.va_fbo = *iova_start;
485 mhp->attr.page_size = shift - 12;
486
487 mhp->attr.len = (u32) total_size;
488 mhp->attr.pbl_size = npages;
489 ret = register_mem(rhp, php, mhp, shift);
490 if (ret)
491 goto err_pbl;
492
493 return &mhp->ibmr;
494
495err_pbl:
496 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
497 mhp->attr.pbl_size << 3);
498
499err:
500 kfree(mhp);
501 return ERR_PTR(ret);
502
503}
504
505struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
506{
507 struct c4iw_dev *rhp;
508 struct c4iw_pd *php;
509 struct c4iw_mr *mhp;
510 int ret;
511 u32 stag = T4_STAG_UNSET;
512
513 PDBG("%s ib_pd %p\n", __func__, pd);
514 php = to_c4iw_pd(pd);
515 rhp = php->rhp;
516
517 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
518 if (!mhp)
519 return ERR_PTR(-ENOMEM);
520
521 mhp->rhp = rhp;
522 mhp->attr.pdid = php->pdid;
523 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
524 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
525 mhp->attr.zbva = 0;
526 mhp->attr.va_fbo = 0;
527 mhp->attr.page_size = 0;
528 mhp->attr.len = ~0UL;
529 mhp->attr.pbl_size = 0;
530
531 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
532 FW_RI_STAG_NSMR, mhp->attr.perms,
533 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
534 if (ret)
535 goto err1;
536
537 ret = finish_mem_reg(mhp, stag);
538 if (ret)
539 goto err2;
540 return &mhp->ibmr;
541err2:
542 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
543 mhp->attr.pbl_addr);
544err1:
545 kfree(mhp);
546 return ERR_PTR(ret);
547}
548
549struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
550 u64 virt, int acc, struct ib_udata *udata)
551{
552 __be64 *pages;
553 int shift, n, len;
554 int i, j, k;
555 int err = 0;
556 struct ib_umem_chunk *chunk;
557 struct c4iw_dev *rhp;
558 struct c4iw_pd *php;
559 struct c4iw_mr *mhp;
560
561 PDBG("%s ib_pd %p\n", __func__, pd);
562
563 if (length == ~0ULL)
564 return ERR_PTR(-EINVAL);
565
566 if ((length + start) < start)
567 return ERR_PTR(-EINVAL);
568
569 php = to_c4iw_pd(pd);
570 rhp = php->rhp;
571 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
572 if (!mhp)
573 return ERR_PTR(-ENOMEM);
574
575 mhp->rhp = rhp;
576
577 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
578 if (IS_ERR(mhp->umem)) {
579 err = PTR_ERR(mhp->umem);
580 kfree(mhp);
581 return ERR_PTR(err);
582 }
583
584 shift = ffs(mhp->umem->page_size) - 1;
585
586 n = 0;
587 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
588 n += chunk->nents;
589
590 err = alloc_pbl(mhp, n);
591 if (err)
592 goto err;
593
594 pages = (__be64 *) __get_free_page(GFP_KERNEL);
595 if (!pages) {
596 err = -ENOMEM;
597 goto err_pbl;
598 }
599
600 i = n = 0;
601
602 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
603 for (j = 0; j < chunk->nmap; ++j) {
604 len = sg_dma_len(&chunk->page_list[j]) >> shift;
605 for (k = 0; k < len; ++k) {
606 pages[i++] = cpu_to_be64(sg_dma_address(
607 &chunk->page_list[j]) +
608 mhp->umem->page_size * k);
609 if (i == PAGE_SIZE / sizeof *pages) {
610 err = write_pbl(&mhp->rhp->rdev,
611 pages,
612 mhp->attr.pbl_addr + (n << 3), i);
613 if (err)
614 goto pbl_done;
615 n += i;
616 i = 0;
617 }
618 }
619 }
620
621 if (i)
622 err = write_pbl(&mhp->rhp->rdev, pages,
623 mhp->attr.pbl_addr + (n << 3), i);
624
625pbl_done:
626 free_page((unsigned long) pages);
627 if (err)
628 goto err_pbl;
629
630 mhp->attr.pdid = php->pdid;
631 mhp->attr.zbva = 0;
632 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
633 mhp->attr.va_fbo = virt;
634 mhp->attr.page_size = shift - 12;
635 mhp->attr.len = (u32) length;
636
637 err = register_mem(rhp, php, mhp, shift);
638 if (err)
639 goto err_pbl;
640
641 return &mhp->ibmr;
642
643err_pbl:
644 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
645 mhp->attr.pbl_size << 3);
646
647err:
648 ib_umem_release(mhp->umem);
649 kfree(mhp);
650 return ERR_PTR(err);
651}
652
653struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
654{
655 struct c4iw_dev *rhp;
656 struct c4iw_pd *php;
657 struct c4iw_mw *mhp;
658 u32 mmid;
659 u32 stag = 0;
660 int ret;
661
662 php = to_c4iw_pd(pd);
663 rhp = php->rhp;
664 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
665 if (!mhp)
666 return ERR_PTR(-ENOMEM);
667 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
668 if (ret) {
669 kfree(mhp);
670 return ERR_PTR(ret);
671 }
672 mhp->rhp = rhp;
673 mhp->attr.pdid = php->pdid;
674 mhp->attr.type = FW_RI_STAG_MW;
675 mhp->attr.stag = stag;
676 mmid = (stag) >> 8;
677 mhp->ibmw.rkey = stag;
678 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
679 deallocate_window(&rhp->rdev, mhp->attr.stag);
680 kfree(mhp);
681 return ERR_PTR(-ENOMEM);
682 }
683 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
684 return &(mhp->ibmw);
685}
686
687int c4iw_dealloc_mw(struct ib_mw *mw)
688{
689 struct c4iw_dev *rhp;
690 struct c4iw_mw *mhp;
691 u32 mmid;
692
693 mhp = to_c4iw_mw(mw);
694 rhp = mhp->rhp;
695 mmid = (mw->rkey) >> 8;
696 deallocate_window(&rhp->rdev, mhp->attr.stag);
697 remove_handle(rhp, &rhp->mmidr, mmid);
698 kfree(mhp);
699 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
700 return 0;
701}
702
703struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
704{
705 struct c4iw_dev *rhp;
706 struct c4iw_pd *php;
707 struct c4iw_mr *mhp;
708 u32 mmid;
709 u32 stag = 0;
710 int ret = 0;
711
712 php = to_c4iw_pd(pd);
713 rhp = php->rhp;
714 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
715 if (!mhp)
716 goto err;
717
718 mhp->rhp = rhp;
719 ret = alloc_pbl(mhp, pbl_depth);
720 if (ret)
721 goto err1;
722 mhp->attr.pbl_size = pbl_depth;
723 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
724 mhp->attr.pbl_size, mhp->attr.pbl_addr);
725 if (ret)
726 goto err2;
727 mhp->attr.pdid = php->pdid;
728 mhp->attr.type = FW_RI_STAG_NSMR;
729 mhp->attr.stag = stag;
730 mhp->attr.state = 1;
731 mmid = (stag) >> 8;
732 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
733 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
734 goto err3;
735
736 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
737 return &(mhp->ibmr);
738err3:
739 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
740 mhp->attr.pbl_addr);
741err2:
742 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
743 mhp->attr.pbl_size << 3);
744err1:
745 kfree(mhp);
746err:
747 return ERR_PTR(ret);
748}
749
750struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
751 int page_list_len)
752{
753 struct c4iw_fr_page_list *c4pl;
754 struct c4iw_dev *dev = to_c4iw_dev(device);
755 dma_addr_t dma_addr;
756 int size = sizeof *c4pl + page_list_len * sizeof(u64);
757
758 if (page_list_len > T4_MAX_FR_DEPTH)
759 return ERR_PTR(-EINVAL);
760
761 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
762 &dma_addr, GFP_KERNEL);
763 if (!c4pl)
764 return ERR_PTR(-ENOMEM);
765
766 pci_unmap_addr_set(c4pl, mapping, dma_addr);
767 c4pl->dma_addr = dma_addr;
768 c4pl->dev = dev;
769 c4pl->size = size;
770 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
771 c4pl->ibpl.max_page_list_len = page_list_len;
772
773 return &c4pl->ibpl;
774}
775
776void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
777{
778 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
779
780 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
781 c4pl, pci_unmap_addr(c4pl, mapping));
782}
783
784int c4iw_dereg_mr(struct ib_mr *ib_mr)
785{
786 struct c4iw_dev *rhp;
787 struct c4iw_mr *mhp;
788 u32 mmid;
789
790 PDBG("%s ib_mr %p\n", __func__, ib_mr);
791 /* There can be no memory windows */
792 if (atomic_read(&ib_mr->usecnt))
793 return -EINVAL;
794
795 mhp = to_c4iw_mr(ib_mr);
796 rhp = mhp->rhp;
797 mmid = mhp->attr.stag >> 8;
798 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
799 mhp->attr.pbl_addr);
800 if (mhp->attr.pbl_size)
801 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
802 mhp->attr.pbl_size << 3);
803 remove_handle(rhp, &rhp->mmidr, mmid);
804 if (mhp->kva)
805 kfree((void *) (unsigned long) mhp->kva);
806 if (mhp->umem)
807 ib_umem_release(mhp->umem);
808 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
809 kfree(mhp);
810 return 0;
811}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
new file mode 100644
index 000000000000..dfc49020bb9c
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -0,0 +1,518 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/delay.h>
38#include <linux/errno.h>
39#include <linux/list.h>
40#include <linux/spinlock.h>
41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h>
43#include <linux/inetdevice.h>
44#include <linux/io.h>
45
46#include <asm/irq.h>
47#include <asm/byteorder.h>
48
49#include <rdma/iw_cm.h>
50#include <rdma/ib_verbs.h>
51#include <rdma/ib_smi.h>
52#include <rdma/ib_umem.h>
53#include <rdma/ib_user_verbs.h>
54
55#include "iw_cxgb4.h"
56
57static int fastreg_support;
58module_param(fastreg_support, int, 0644);
59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
60
61static int c4iw_modify_port(struct ib_device *ibdev,
62 u8 port, int port_modify_mask,
63 struct ib_port_modify *props)
64{
65 return -ENOSYS;
66}
67
68static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
69 struct ib_ah_attr *ah_attr)
70{
71 return ERR_PTR(-ENOSYS);
72}
73
74static int c4iw_ah_destroy(struct ib_ah *ah)
75{
76 return -ENOSYS;
77}
78
79static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
80{
81 return -ENOSYS;
82}
83
84static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
85{
86 return -ENOSYS;
87}
88
89static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
90 u8 port_num, struct ib_wc *in_wc,
91 struct ib_grh *in_grh, struct ib_mad *in_mad,
92 struct ib_mad *out_mad)
93{
94 return -ENOSYS;
95}
96
97static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
98{
99 struct c4iw_dev *rhp = to_c4iw_dev(context->device);
100 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
101 struct c4iw_mm_entry *mm, *tmp;
102
103 PDBG("%s context %p\n", __func__, context);
104 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
105 kfree(mm);
106 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
107 kfree(ucontext);
108 return 0;
109}
110
111static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
112 struct ib_udata *udata)
113{
114 struct c4iw_ucontext *context;
115 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
116
117 PDBG("%s ibdev %p\n", __func__, ibdev);
118 context = kzalloc(sizeof(*context), GFP_KERNEL);
119 if (!context)
120 return ERR_PTR(-ENOMEM);
121 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
122 INIT_LIST_HEAD(&context->mmaps);
123 spin_lock_init(&context->mmap_lock);
124 return &context->ibucontext;
125}
126
127static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
128{
129 int len = vma->vm_end - vma->vm_start;
130 u32 key = vma->vm_pgoff << PAGE_SHIFT;
131 struct c4iw_rdev *rdev;
132 int ret = 0;
133 struct c4iw_mm_entry *mm;
134 struct c4iw_ucontext *ucontext;
135 u64 addr;
136
137 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
138 key, len);
139
140 if (vma->vm_start & (PAGE_SIZE-1))
141 return -EINVAL;
142
143 rdev = &(to_c4iw_dev(context->device)->rdev);
144 ucontext = to_c4iw_ucontext(context);
145
146 mm = remove_mmap(ucontext, key, len);
147 if (!mm)
148 return -EINVAL;
149 addr = mm->addr;
150 kfree(mm);
151
152 if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
153 (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
154 pci_resource_len(rdev->lldi.pdev, 2)))) {
155
156 /*
157 * Map T4 DB register.
158 */
159 if (vma->vm_flags & VM_READ)
160 return -EPERM;
161
162 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
163 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
164 vma->vm_flags &= ~VM_MAYREAD;
165 ret = io_remap_pfn_range(vma, vma->vm_start,
166 addr >> PAGE_SHIFT,
167 len, vma->vm_page_prot);
168 } else {
169
170 /*
171 * Map WQ or CQ contig dma memory...
172 */
173 ret = remap_pfn_range(vma, vma->vm_start,
174 addr >> PAGE_SHIFT,
175 len, vma->vm_page_prot);
176 }
177
178 return ret;
179}
180
181static int c4iw_deallocate_pd(struct ib_pd *pd)
182{
183 struct c4iw_dev *rhp;
184 struct c4iw_pd *php;
185
186 php = to_c4iw_pd(pd);
187 rhp = php->rhp;
188 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
189 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
190 &rhp->rdev.resource.pdid_fifo_lock);
191 kfree(php);
192 return 0;
193}
194
195static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
196 struct ib_ucontext *context,
197 struct ib_udata *udata)
198{
199 struct c4iw_pd *php;
200 u32 pdid;
201 struct c4iw_dev *rhp;
202
203 PDBG("%s ibdev %p\n", __func__, ibdev);
204 rhp = (struct c4iw_dev *) ibdev;
205 pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
206 &rhp->rdev.resource.pdid_fifo_lock);
207 if (!pdid)
208 return ERR_PTR(-EINVAL);
209 php = kzalloc(sizeof(*php), GFP_KERNEL);
210 if (!php) {
211 c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
212 &rhp->rdev.resource.pdid_fifo_lock);
213 return ERR_PTR(-ENOMEM);
214 }
215 php->pdid = pdid;
216 php->rhp = rhp;
217 if (context) {
218 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
219 c4iw_deallocate_pd(&php->ibpd);
220 return ERR_PTR(-EFAULT);
221 }
222 }
223 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
224 return &php->ibpd;
225}
226
227static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
228 u16 *pkey)
229{
230 PDBG("%s ibdev %p\n", __func__, ibdev);
231 *pkey = 0;
232 return 0;
233}
234
235static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
236 union ib_gid *gid)
237{
238 struct c4iw_dev *dev;
239
240 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
241 __func__, ibdev, port, index, gid);
242 dev = to_c4iw_dev(ibdev);
243 BUG_ON(port == 0);
244 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
245 memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
246 return 0;
247}
248
249static int c4iw_query_device(struct ib_device *ibdev,
250 struct ib_device_attr *props)
251{
252
253 struct c4iw_dev *dev;
254 PDBG("%s ibdev %p\n", __func__, ibdev);
255
256 dev = to_c4iw_dev(ibdev);
257 memset(props, 0, sizeof *props);
258 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
259 props->hw_ver = dev->rdev.lldi.adapter_type;
260 props->fw_ver = dev->rdev.lldi.fw_vers;
261 props->device_cap_flags = dev->device_cap_flags;
262 props->page_size_cap = T4_PAGESIZE_MASK;
263 props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
264 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
265 props->max_mr_size = T4_MAX_MR_SIZE;
266 props->max_qp = T4_MAX_NUM_QP;
267 props->max_qp_wr = T4_MAX_QP_DEPTH;
268 props->max_sge = T4_MAX_RECV_SGE;
269 props->max_sge_rd = 1;
270 props->max_qp_rd_atom = c4iw_max_read_depth;
271 props->max_qp_init_rd_atom = c4iw_max_read_depth;
272 props->max_cq = T4_MAX_NUM_CQ;
273 props->max_cqe = T4_MAX_CQ_DEPTH;
274 props->max_mr = c4iw_num_stags(&dev->rdev);
275 props->max_pd = T4_MAX_NUM_PD;
276 props->local_ca_ack_delay = 0;
277 props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
278
279 return 0;
280}
281
282static int c4iw_query_port(struct ib_device *ibdev, u8 port,
283 struct ib_port_attr *props)
284{
285 struct c4iw_dev *dev;
286 struct net_device *netdev;
287 struct in_device *inetdev;
288
289 PDBG("%s ibdev %p\n", __func__, ibdev);
290
291 dev = to_c4iw_dev(ibdev);
292 netdev = dev->rdev.lldi.ports[port-1];
293
294 memset(props, 0, sizeof(struct ib_port_attr));
295 props->max_mtu = IB_MTU_4096;
296 if (netdev->mtu >= 4096)
297 props->active_mtu = IB_MTU_4096;
298 else if (netdev->mtu >= 2048)
299 props->active_mtu = IB_MTU_2048;
300 else if (netdev->mtu >= 1024)
301 props->active_mtu = IB_MTU_1024;
302 else if (netdev->mtu >= 512)
303 props->active_mtu = IB_MTU_512;
304 else
305 props->active_mtu = IB_MTU_256;
306
307 if (!netif_carrier_ok(netdev))
308 props->state = IB_PORT_DOWN;
309 else {
310 inetdev = in_dev_get(netdev);
311 if (inetdev) {
312 if (inetdev->ifa_list)
313 props->state = IB_PORT_ACTIVE;
314 else
315 props->state = IB_PORT_INIT;
316 in_dev_put(inetdev);
317 } else
318 props->state = IB_PORT_INIT;
319 }
320
321 props->port_cap_flags =
322 IB_PORT_CM_SUP |
323 IB_PORT_SNMP_TUNNEL_SUP |
324 IB_PORT_REINIT_SUP |
325 IB_PORT_DEVICE_MGMT_SUP |
326 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
327 props->gid_tbl_len = 1;
328 props->pkey_tbl_len = 1;
329 props->active_width = 2;
330 props->active_speed = 2;
331 props->max_msg_sz = -1;
332
333 return 0;
334}
335
336static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
337 char *buf)
338{
339 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
340 ibdev.dev);
341 PDBG("%s dev 0x%p\n", __func__, dev);
342 return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
343}
344
345static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
346 char *buf)
347{
348 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
349 ibdev.dev);
350 PDBG("%s dev 0x%p\n", __func__, dev);
351
352 return sprintf(buf, "%u.%u.%u.%u\n",
353 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
354 FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
355 FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
356 FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
357}
358
359static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
360 char *buf)
361{
362 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
363 ibdev.dev);
364 struct ethtool_drvinfo info;
365 struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
366
367 PDBG("%s dev 0x%p\n", __func__, dev);
368 lldev->ethtool_ops->get_drvinfo(lldev, &info);
369 return sprintf(buf, "%s\n", info.driver);
370}
371
372static ssize_t show_board(struct device *dev, struct device_attribute *attr,
373 char *buf)
374{
375 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
376 ibdev.dev);
377 PDBG("%s dev 0x%p\n", __func__, dev);
378 return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
379 c4iw_dev->rdev.lldi.pdev->device);
380}
381
382static int c4iw_get_mib(struct ib_device *ibdev,
383 union rdma_protocol_stats *stats)
384{
385 return -ENOSYS;
386}
387
388static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
389static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
390static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
391static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
392
393static struct device_attribute *c4iw_class_attributes[] = {
394 &dev_attr_hw_rev,
395 &dev_attr_fw_ver,
396 &dev_attr_hca_type,
397 &dev_attr_board_id,
398};
399
400int c4iw_register_device(struct c4iw_dev *dev)
401{
402 int ret;
403 int i;
404
405 PDBG("%s c4iw_dev %p\n", __func__, dev);
406 BUG_ON(!dev->rdev.lldi.ports[0]);
407 strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
408 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
409 memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
410 dev->ibdev.owner = THIS_MODULE;
411 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
412 if (fastreg_support)
413 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
414 dev->ibdev.local_dma_lkey = 0;
415 dev->ibdev.uverbs_cmd_mask =
416 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
417 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
418 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
419 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
420 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
421 (1ull << IB_USER_VERBS_CMD_REG_MR) |
422 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
423 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
424 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
425 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
426 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
427 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
428 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
429 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
430 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
431 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
432 (1ull << IB_USER_VERBS_CMD_POST_RECV);
433 dev->ibdev.node_type = RDMA_NODE_RNIC;
434 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
435 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
436 dev->ibdev.num_comp_vectors = 1;
437 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
438 dev->ibdev.query_device = c4iw_query_device;
439 dev->ibdev.query_port = c4iw_query_port;
440 dev->ibdev.modify_port = c4iw_modify_port;
441 dev->ibdev.query_pkey = c4iw_query_pkey;
442 dev->ibdev.query_gid = c4iw_query_gid;
443 dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
444 dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
445 dev->ibdev.mmap = c4iw_mmap;
446 dev->ibdev.alloc_pd = c4iw_allocate_pd;
447 dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
448 dev->ibdev.create_ah = c4iw_ah_create;
449 dev->ibdev.destroy_ah = c4iw_ah_destroy;
450 dev->ibdev.create_qp = c4iw_create_qp;
451 dev->ibdev.modify_qp = c4iw_ib_modify_qp;
452 dev->ibdev.destroy_qp = c4iw_destroy_qp;
453 dev->ibdev.create_cq = c4iw_create_cq;
454 dev->ibdev.destroy_cq = c4iw_destroy_cq;
455 dev->ibdev.resize_cq = c4iw_resize_cq;
456 dev->ibdev.poll_cq = c4iw_poll_cq;
457 dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
458 dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
459 dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
460 dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
461 dev->ibdev.dereg_mr = c4iw_dereg_mr;
462 dev->ibdev.alloc_mw = c4iw_alloc_mw;
463 dev->ibdev.bind_mw = c4iw_bind_mw;
464 dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
465 dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
466 dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
467 dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
468 dev->ibdev.attach_mcast = c4iw_multicast_attach;
469 dev->ibdev.detach_mcast = c4iw_multicast_detach;
470 dev->ibdev.process_mad = c4iw_process_mad;
471 dev->ibdev.req_notify_cq = c4iw_arm_cq;
472 dev->ibdev.post_send = c4iw_post_send;
473 dev->ibdev.post_recv = c4iw_post_receive;
474 dev->ibdev.get_protocol_stats = c4iw_get_mib;
475
476 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
477 if (!dev->ibdev.iwcm)
478 return -ENOMEM;
479
480 dev->ibdev.iwcm->connect = c4iw_connect;
481 dev->ibdev.iwcm->accept = c4iw_accept_cr;
482 dev->ibdev.iwcm->reject = c4iw_reject_cr;
483 dev->ibdev.iwcm->create_listen = c4iw_create_listen;
484 dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
485 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
486 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
487 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
488
489 ret = ib_register_device(&dev->ibdev);
490 if (ret)
491 goto bail1;
492
493 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
494 ret = device_create_file(&dev->ibdev.dev,
495 c4iw_class_attributes[i]);
496 if (ret)
497 goto bail2;
498 }
499 return 0;
500bail2:
501 ib_unregister_device(&dev->ibdev);
502bail1:
503 kfree(dev->ibdev.iwcm);
504 return ret;
505}
506
507void c4iw_unregister_device(struct c4iw_dev *dev)
508{
509 int i;
510
511 PDBG("%s c4iw_dev %p\n", __func__, dev);
512 for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
513 device_remove_file(&dev->ibdev.dev,
514 c4iw_class_attributes[i]);
515 ib_unregister_device(&dev->ibdev);
516 kfree(dev->ibdev.iwcm);
517 return;
518}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
new file mode 100644
index 000000000000..83a01dc0c4c1
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -0,0 +1,1577 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
133 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
147 res_wr->cookie = (u64)&wr_wait;
148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
165 V_FW_RI_RES_WR_FBMIN(3) |
166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
188 V_FW_RI_RES_WR_FBMIN(3) |
189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
201 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
202 if (!wr_wait.done) {
203 printk(KERN_ERR MOD "Device %s not responding!\n",
204 pci_name(rdev->lldi.pdev));
205 rdev->flags = T4_FATAL_ERROR;
206 ret = -EIO;
207 } else
208 ret = wr_wait.ret;
209 if (ret)
210 goto err7;
211
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__, wq->sq.qid, wq->rq.qid, wq->db,
214 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
215
216 return 0;
217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping));
221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping));
225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4:
228 kfree(wq->rq.sw_rq);
229err3:
230 kfree(wq->sq.sw_sq);
231err2:
232 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
233err1:
234 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
235 return -ENOMEM;
236}
237
238static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
239{
240 int i;
241 u32 plen;
242 int size;
243 u8 *datap;
244
245 if (wr->num_sge > T4_MAX_SEND_SGE)
246 return -EINVAL;
247 switch (wr->opcode) {
248 case IB_WR_SEND:
249 if (wr->send_flags & IB_SEND_SOLICITED)
250 wqe->send.sendop_pkd = cpu_to_be32(
251 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
252 else
253 wqe->send.sendop_pkd = cpu_to_be32(
254 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
255 wqe->send.stag_inv = 0;
256 break;
257 case IB_WR_SEND_WITH_INV:
258 if (wr->send_flags & IB_SEND_SOLICITED)
259 wqe->send.sendop_pkd = cpu_to_be32(
260 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
261 else
262 wqe->send.sendop_pkd = cpu_to_be32(
263 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
264 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
265 break;
266
267 default:
268 return -EINVAL;
269 }
270 plen = 0;
271 if (wr->num_sge) {
272 if (wr->send_flags & IB_SEND_INLINE) {
273 datap = (u8 *)wqe->send.u.immd_src[0].data;
274 for (i = 0; i < wr->num_sge; i++) {
275 if ((plen + wr->sg_list[i].length) >
276 T4_MAX_SEND_INLINE) {
277 return -EMSGSIZE;
278 }
279 plen += wr->sg_list[i].length;
280 memcpy(datap,
281 (void *)(unsigned long)wr->sg_list[i].addr,
282 wr->sg_list[i].length);
283 datap += wr->sg_list[i].length;
284 }
285 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
286 wqe->send.u.immd_src[0].r1 = 0;
287 wqe->send.u.immd_src[0].r2 = 0;
288 wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
289 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
290 plen;
291 } else {
292 for (i = 0; i < wr->num_sge; i++) {
293 if ((plen + wr->sg_list[i].length) < plen)
294 return -EMSGSIZE;
295 plen += wr->sg_list[i].length;
296 wqe->send.u.isgl_src[0].sge[i].stag =
297 cpu_to_be32(wr->sg_list[i].lkey);
298 wqe->send.u.isgl_src[0].sge[i].len =
299 cpu_to_be32(wr->sg_list[i].length);
300 wqe->send.u.isgl_src[0].sge[i].to =
301 cpu_to_be64(wr->sg_list[i].addr);
302 }
303 wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
304 wqe->send.u.isgl_src[0].r1 = 0;
305 wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
306 wqe->send.u.isgl_src[0].r2 = 0;
307 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
308 wr->num_sge * sizeof(struct fw_ri_sge);
309 }
310 } else {
311 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
312 wqe->send.u.immd_src[0].r1 = 0;
313 wqe->send.u.immd_src[0].r2 = 0;
314 wqe->send.u.immd_src[0].immdlen = 0;
315 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
316 }
317 *len16 = DIV_ROUND_UP(size, 16);
318 wqe->send.plen = cpu_to_be32(plen);
319 return 0;
320}
321
322static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
323{
324 int i;
325 u32 plen;
326 int size;
327 u8 *datap;
328
329 if (wr->num_sge > T4_MAX_WRITE_SGE)
330 return -EINVAL;
331 wqe->write.r2 = 0;
332 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
333 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
334 plen = 0;
335 if (wr->num_sge) {
336 if (wr->send_flags & IB_SEND_INLINE) {
337 datap = (u8 *)wqe->write.u.immd_src[0].data;
338 for (i = 0; i < wr->num_sge; i++) {
339 if ((plen + wr->sg_list[i].length) >
340 T4_MAX_WRITE_INLINE) {
341 return -EMSGSIZE;
342 }
343 plen += wr->sg_list[i].length;
344 memcpy(datap,
345 (void *)(unsigned long)wr->sg_list[i].addr,
346 wr->sg_list[i].length);
347 datap += wr->sg_list[i].length;
348 }
349 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
350 wqe->write.u.immd_src[0].r1 = 0;
351 wqe->write.u.immd_src[0].r2 = 0;
352 wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
353 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
354 plen;
355 } else {
356 for (i = 0; i < wr->num_sge; i++) {
357 if ((plen + wr->sg_list[i].length) < plen)
358 return -EMSGSIZE;
359 plen += wr->sg_list[i].length;
360 wqe->write.u.isgl_src[0].sge[i].stag =
361 cpu_to_be32(wr->sg_list[i].lkey);
362 wqe->write.u.isgl_src[0].sge[i].len =
363 cpu_to_be32(wr->sg_list[i].length);
364 wqe->write.u.isgl_src[0].sge[i].to =
365 cpu_to_be64(wr->sg_list[i].addr);
366 }
367 wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
368 wqe->write.u.isgl_src[0].r1 = 0;
369 wqe->write.u.isgl_src[0].nsge =
370 cpu_to_be16(wr->num_sge);
371 wqe->write.u.isgl_src[0].r2 = 0;
372 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
373 wr->num_sge * sizeof(struct fw_ri_sge);
374 }
375 } else {
376 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
377 wqe->write.u.immd_src[0].r1 = 0;
378 wqe->write.u.immd_src[0].r2 = 0;
379 wqe->write.u.immd_src[0].immdlen = 0;
380 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
381 }
382 *len16 = DIV_ROUND_UP(size, 16);
383 wqe->write.plen = cpu_to_be32(plen);
384 return 0;
385}
386
387static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
388{
389 if (wr->num_sge > 1)
390 return -EINVAL;
391 if (wr->num_sge) {
392 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
393 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
394 >> 32));
395 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
396 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
397 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
398 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
399 >> 32));
400 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
401 } else {
402 wqe->read.stag_src = cpu_to_be32(2);
403 wqe->read.to_src_hi = 0;
404 wqe->read.to_src_lo = 0;
405 wqe->read.stag_sink = cpu_to_be32(2);
406 wqe->read.plen = 0;
407 wqe->read.to_sink_hi = 0;
408 wqe->read.to_sink_lo = 0;
409 }
410 wqe->read.r2 = 0;
411 wqe->read.r5 = 0;
412 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
413 return 0;
414}
415
416static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
417 struct ib_recv_wr *wr, u8 *len16)
418{
419 int i;
420 int plen = 0;
421
422 for (i = 0; i < wr->num_sge; i++) {
423 if ((plen + wr->sg_list[i].length) < plen)
424 return -EMSGSIZE;
425 plen += wr->sg_list[i].length;
426 wqe->recv.isgl.sge[i].stag =
427 cpu_to_be32(wr->sg_list[i].lkey);
428 wqe->recv.isgl.sge[i].len =
429 cpu_to_be32(wr->sg_list[i].length);
430 wqe->recv.isgl.sge[i].to =
431 cpu_to_be64(wr->sg_list[i].addr);
432 }
433 for (; i < T4_MAX_RECV_SGE; i++) {
434 wqe->recv.isgl.sge[i].stag = 0;
435 wqe->recv.isgl.sge[i].len = 0;
436 wqe->recv.isgl.sge[i].to = 0;
437 }
438 wqe->recv.isgl.op = FW_RI_DATA_ISGL;
439 wqe->recv.isgl.r1 = 0;
440 wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
441 wqe->recv.isgl.r2 = 0;
442 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
443 wr->num_sge * sizeof(struct fw_ri_sge), 16);
444 return 0;
445}
446
447static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
448{
449
450 struct fw_ri_immd *imdp;
451 __be64 *p;
452 int i;
453 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
454
455 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
456 return -EINVAL;
457
458 wqe->fr.qpbinde_to_dcacpu = 0;
459 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
460 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
461 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
462 wqe->fr.len_hi = 0;
463 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
464 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
465 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
466 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
467 0xffffffff);
468 if (pbllen > T4_MAX_FR_IMMD) {
469 struct c4iw_fr_page_list *c4pl =
470 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
471 struct fw_ri_dsgl *sglp;
472
473 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
474 sglp->op = FW_RI_DATA_DSGL;
475 sglp->r1 = 0;
476 sglp->nsge = cpu_to_be16(1);
477 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
478 sglp->len0 = cpu_to_be32(pbllen);
479
480 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
481 } else {
482 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
483 imdp->op = FW_RI_DATA_IMMD;
484 imdp->r1 = 0;
485 imdp->r2 = 0;
486 imdp->immdlen = cpu_to_be32(pbllen);
487 p = (__be64 *)(imdp + 1);
488 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
489 *p = cpu_to_be64(
490 (u64)wr->wr.fast_reg.page_list->page_list[i]);
491 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
492 16);
493 }
494 return 0;
495}
496
497static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
498 u8 *len16)
499{
500 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
501 wqe->inv.r2 = 0;
502 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
503 return 0;
504}
505
506void c4iw_qp_add_ref(struct ib_qp *qp)
507{
508 PDBG("%s ib_qp %p\n", __func__, qp);
509 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
510}
511
512void c4iw_qp_rem_ref(struct ib_qp *qp)
513{
514 PDBG("%s ib_qp %p\n", __func__, qp);
515 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
516 wake_up(&(to_c4iw_qp(qp)->wait));
517}
518
519int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
520 struct ib_send_wr **bad_wr)
521{
522 int err = 0;
523 u8 len16 = 0;
524 enum fw_wr_opcodes fw_opcode = 0;
525 enum fw_ri_wr_flags fw_flags;
526 struct c4iw_qp *qhp;
527 union t4_wr *wqe;
528 u32 num_wrs;
529 struct t4_swsqe *swsqe;
530 unsigned long flag;
531 u16 idx = 0;
532
533 qhp = to_c4iw_qp(ibqp);
534 spin_lock_irqsave(&qhp->lock, flag);
535 if (t4_wq_in_error(&qhp->wq)) {
536 spin_unlock_irqrestore(&qhp->lock, flag);
537 return -EINVAL;
538 }
539 num_wrs = t4_sq_avail(&qhp->wq);
540 if (num_wrs == 0) {
541 spin_unlock_irqrestore(&qhp->lock, flag);
542 return -ENOMEM;
543 }
544 while (wr) {
545 if (num_wrs == 0) {
546 err = -ENOMEM;
547 *bad_wr = wr;
548 break;
549 }
550 wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
551 fw_flags = 0;
552 if (wr->send_flags & IB_SEND_SOLICITED)
553 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
554 if (wr->send_flags & IB_SEND_SIGNALED)
555 fw_flags |= FW_RI_COMPLETION_FLAG;
556 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
557 switch (wr->opcode) {
558 case IB_WR_SEND_WITH_INV:
559 case IB_WR_SEND:
560 if (wr->send_flags & IB_SEND_FENCE)
561 fw_flags |= FW_RI_READ_FENCE_FLAG;
562 fw_opcode = FW_RI_SEND_WR;
563 if (wr->opcode == IB_WR_SEND)
564 swsqe->opcode = FW_RI_SEND;
565 else
566 swsqe->opcode = FW_RI_SEND_WITH_INV;
567 err = build_rdma_send(wqe, wr, &len16);
568 break;
569 case IB_WR_RDMA_WRITE:
570 fw_opcode = FW_RI_RDMA_WRITE_WR;
571 swsqe->opcode = FW_RI_RDMA_WRITE;
572 err = build_rdma_write(wqe, wr, &len16);
573 break;
574 case IB_WR_RDMA_READ:
575 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16);
579 if (err)
580 break;
581 swsqe->read_len = wr->sg_list[0].length;
582 if (!qhp->wq.sq.oldest_read)
583 qhp->wq.sq.oldest_read = swsqe;
584 break;
585 case IB_WR_FAST_REG_MR:
586 fw_opcode = FW_RI_FR_NSMR_WR;
587 swsqe->opcode = FW_RI_FAST_REGISTER;
588 err = build_fastreg(wqe, wr, &len16);
589 break;
590 case IB_WR_LOCAL_INV:
591 fw_opcode = FW_RI_INV_LSTAG_WR;
592 swsqe->opcode = FW_RI_LOCAL_INV;
593 err = build_inv_stag(wqe, wr, &len16);
594 break;
595 default:
596 PDBG("%s post of type=%d TBD!\n", __func__,
597 wr->opcode);
598 err = -EINVAL;
599 }
600 if (err) {
601 *bad_wr = wr;
602 break;
603 }
604 swsqe->idx = qhp->wq.sq.pidx;
605 swsqe->complete = 0;
606 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
607 swsqe->wr_id = wr->wr_id;
608
609 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
610
611 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
612 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
613 swsqe->opcode, swsqe->read_len);
614 wr = wr->next;
615 num_wrs--;
616 t4_sq_produce(&qhp->wq);
617 idx++;
618 }
619 if (t4_wq_db_enabled(&qhp->wq))
620 t4_ring_sq_db(&qhp->wq, idx);
621 spin_unlock_irqrestore(&qhp->lock, flag);
622 return err;
623}
624
625int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
626 struct ib_recv_wr **bad_wr)
627{
628 int err = 0;
629 struct c4iw_qp *qhp;
630 union t4_recv_wr *wqe;
631 u32 num_wrs;
632 u8 len16 = 0;
633 unsigned long flag;
634 u16 idx = 0;
635
636 qhp = to_c4iw_qp(ibqp);
637 spin_lock_irqsave(&qhp->lock, flag);
638 if (t4_wq_in_error(&qhp->wq)) {
639 spin_unlock_irqrestore(&qhp->lock, flag);
640 return -EINVAL;
641 }
642 num_wrs = t4_rq_avail(&qhp->wq);
643 if (num_wrs == 0) {
644 spin_unlock_irqrestore(&qhp->lock, flag);
645 return -ENOMEM;
646 }
647 while (wr) {
648 if (wr->num_sge > T4_MAX_RECV_SGE) {
649 err = -EINVAL;
650 *bad_wr = wr;
651 break;
652 }
653 wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
654 if (num_wrs)
655 err = build_rdma_recv(qhp, wqe, wr, &len16);
656 else
657 err = -ENOMEM;
658 if (err) {
659 *bad_wr = wr;
660 break;
661 }
662
663 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
664
665 wqe->recv.opcode = FW_RI_RECV_WR;
666 wqe->recv.r1 = 0;
667 wqe->recv.wrid = qhp->wq.rq.pidx;
668 wqe->recv.r2[0] = 0;
669 wqe->recv.r2[1] = 0;
670 wqe->recv.r2[2] = 0;
671 wqe->recv.len16 = len16;
672 if (len16 < 5)
673 wqe->flits[8] = 0;
674
675 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
676 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
677 t4_rq_produce(&qhp->wq);
678 wr = wr->next;
679 num_wrs--;
680 idx++;
681 }
682 if (t4_wq_db_enabled(&qhp->wq))
683 t4_ring_rq_db(&qhp->wq, idx);
684 spin_unlock_irqrestore(&qhp->lock, flag);
685 return err;
686}
687
688int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
689{
690 return -ENOSYS;
691}
692
693static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
694 u8 *ecode)
695{
696 int status;
697 int tagged;
698 int opcode;
699 int rqtype;
700 int send_inv;
701
702 if (!err_cqe) {
703 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
704 *ecode = 0;
705 return;
706 }
707
708 status = CQE_STATUS(err_cqe);
709 opcode = CQE_OPCODE(err_cqe);
710 rqtype = RQ_TYPE(err_cqe);
711 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
712 (opcode == FW_RI_SEND_WITH_SE_INV);
713 tagged = (opcode == FW_RI_RDMA_WRITE) ||
714 (rqtype && (opcode == FW_RI_READ_RESP));
715
716 switch (status) {
717 case T4_ERR_STAG:
718 if (send_inv) {
719 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
720 *ecode = RDMAP_CANT_INV_STAG;
721 } else {
722 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
723 *ecode = RDMAP_INV_STAG;
724 }
725 break;
726 case T4_ERR_PDID:
727 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
728 if ((opcode == FW_RI_SEND_WITH_INV) ||
729 (opcode == FW_RI_SEND_WITH_SE_INV))
730 *ecode = RDMAP_CANT_INV_STAG;
731 else
732 *ecode = RDMAP_STAG_NOT_ASSOC;
733 break;
734 case T4_ERR_QPID:
735 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
736 *ecode = RDMAP_STAG_NOT_ASSOC;
737 break;
738 case T4_ERR_ACCESS:
739 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
740 *ecode = RDMAP_ACC_VIOL;
741 break;
742 case T4_ERR_WRAP:
743 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
744 *ecode = RDMAP_TO_WRAP;
745 break;
746 case T4_ERR_BOUND:
747 if (tagged) {
748 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
749 *ecode = DDPT_BASE_BOUNDS;
750 } else {
751 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
752 *ecode = RDMAP_BASE_BOUNDS;
753 }
754 break;
755 case T4_ERR_INVALIDATE_SHARED_MR:
756 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
757 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
758 *ecode = RDMAP_CANT_INV_STAG;
759 break;
760 case T4_ERR_ECC:
761 case T4_ERR_ECC_PSTAG:
762 case T4_ERR_INTERNAL_ERR:
763 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
764 *ecode = 0;
765 break;
766 case T4_ERR_OUT_OF_RQE:
767 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
768 *ecode = DDPU_INV_MSN_NOBUF;
769 break;
770 case T4_ERR_PBL_ADDR_BOUND:
771 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
772 *ecode = DDPT_BASE_BOUNDS;
773 break;
774 case T4_ERR_CRC:
775 *layer_type = LAYER_MPA|DDP_LLP;
776 *ecode = MPA_CRC_ERR;
777 break;
778 case T4_ERR_MARKER:
779 *layer_type = LAYER_MPA|DDP_LLP;
780 *ecode = MPA_MARKER_ERR;
781 break;
782 case T4_ERR_PDU_LEN_ERR:
783 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
784 *ecode = DDPU_MSG_TOOBIG;
785 break;
786 case T4_ERR_DDP_VERSION:
787 if (tagged) {
788 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
789 *ecode = DDPT_INV_VERS;
790 } else {
791 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
792 *ecode = DDPU_INV_VERS;
793 }
794 break;
795 case T4_ERR_RDMA_VERSION:
796 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
797 *ecode = RDMAP_INV_VERS;
798 break;
799 case T4_ERR_OPCODE:
800 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
801 *ecode = RDMAP_INV_OPCODE;
802 break;
803 case T4_ERR_DDP_QUEUE_NUM:
804 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
805 *ecode = DDPU_INV_QN;
806 break;
807 case T4_ERR_MSN:
808 case T4_ERR_MSN_GAP:
809 case T4_ERR_MSN_RANGE:
810 case T4_ERR_IRD_OVERFLOW:
811 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
812 *ecode = DDPU_INV_MSN_RANGE;
813 break;
814 case T4_ERR_TBIT:
815 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
816 *ecode = 0;
817 break;
818 case T4_ERR_MO:
819 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
820 *ecode = DDPU_INV_MO;
821 break;
822 default:
823 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
824 *ecode = 0;
825 break;
826 }
827}
828
829int c4iw_post_zb_read(struct c4iw_qp *qhp)
830{
831 union t4_wr *wqe;
832 struct sk_buff *skb;
833 u8 len16;
834
835 PDBG("%s enter\n", __func__);
836 skb = alloc_skb(40, GFP_KERNEL);
837 if (!skb) {
838 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
839 return -ENOMEM;
840 }
841 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
842
843 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
844 memset(wqe, 0, sizeof wqe->read);
845 wqe->read.r2 = cpu_to_be64(0);
846 wqe->read.stag_sink = cpu_to_be32(1);
847 wqe->read.to_sink_hi = cpu_to_be32(0);
848 wqe->read.to_sink_lo = cpu_to_be32(1);
849 wqe->read.stag_src = cpu_to_be32(1);
850 wqe->read.plen = cpu_to_be32(0);
851 wqe->read.to_src_hi = cpu_to_be32(0);
852 wqe->read.to_src_lo = cpu_to_be32(1);
853 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
854 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
855
856 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
857}
858
859static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
860 gfp_t gfp)
861{
862 struct fw_ri_wr *wqe;
863 struct sk_buff *skb;
864 struct terminate_message *term;
865
866 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
867 qhp->ep->hwtid);
868
869 skb = alloc_skb(sizeof *wqe, gfp);
870 if (!skb)
871 return;
872 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
873
874 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
875 memset(wqe, 0, sizeof *wqe);
876 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
877 wqe->flowid_len16 = cpu_to_be32(
878 FW_WR_FLOWID(qhp->ep->hwtid) |
879 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
880
881 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
882 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
883 term = (struct terminate_message *)wqe->u.terminate.termmsg;
884 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
885 c4iw_ofld_send(&qhp->rhp->rdev, skb);
886}
887
888/*
889 * Assumes qhp lock is held.
890 */
891static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
892 struct c4iw_cq *schp, unsigned long *flag)
893{
894 int count;
895 int flushed;
896
897 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
898 /* take a ref on the qhp since we must release the lock */
899 atomic_inc(&qhp->refcnt);
900 spin_unlock_irqrestore(&qhp->lock, *flag);
901
902 /* locking heirarchy: cq lock first, then qp lock. */
903 spin_lock_irqsave(&rchp->lock, *flag);
904 spin_lock(&qhp->lock);
905 c4iw_flush_hw_cq(&rchp->cq);
906 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
907 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
908 spin_unlock(&qhp->lock);
909 spin_unlock_irqrestore(&rchp->lock, *flag);
910 if (flushed)
911 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
912
913 /* locking heirarchy: cq lock first, then qp lock. */
914 spin_lock_irqsave(&schp->lock, *flag);
915 spin_lock(&qhp->lock);
916 c4iw_flush_hw_cq(&schp->cq);
917 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
918 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
919 spin_unlock(&qhp->lock);
920 spin_unlock_irqrestore(&schp->lock, *flag);
921 if (flushed)
922 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
923
924 /* deref */
925 if (atomic_dec_and_test(&qhp->refcnt))
926 wake_up(&qhp->wait);
927
928 spin_lock_irqsave(&qhp->lock, *flag);
929}
930
931static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
932{
933 struct c4iw_cq *rchp, *schp;
934
935 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
936 schp = get_chp(qhp->rhp, qhp->attr.scq);
937
938 if (qhp->ibqp.uobject) {
939 t4_set_wq_in_error(&qhp->wq);
940 t4_set_cq_in_error(&rchp->cq);
941 if (schp != rchp)
942 t4_set_cq_in_error(&schp->cq);
943 return;
944 }
945 __flush_qp(qhp, rchp, schp, flag);
946}
947
948static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
949{
950 struct fw_ri_wr *wqe;
951 int ret;
952 struct c4iw_wr_wait wr_wait;
953 struct sk_buff *skb;
954
955 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
956 qhp->ep->hwtid);
957
958 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
959 if (!skb)
960 return -ENOMEM;
961 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
962
963 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
964 memset(wqe, 0, sizeof *wqe);
965 wqe->op_compl = cpu_to_be32(
966 FW_WR_OP(FW_RI_INIT_WR) |
967 FW_WR_COMPL(1));
968 wqe->flowid_len16 = cpu_to_be32(
969 FW_WR_FLOWID(qhp->ep->hwtid) |
970 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
971 wqe->cookie = (u64)&wr_wait;
972
973 wqe->u.fini.type = FW_RI_TYPE_FINI;
974 c4iw_init_wr_wait(&wr_wait);
975 ret = c4iw_ofld_send(&rhp->rdev, skb);
976 if (ret)
977 goto out;
978
979 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
980 if (!wr_wait.done) {
981 printk(KERN_ERR MOD "Device %s not responding!\n",
982 pci_name(rhp->rdev.lldi.pdev));
983 rhp->rdev.flags = T4_FATAL_ERROR;
984 ret = -EIO;
985 } else {
986 ret = wr_wait.ret;
987 if (ret)
988 printk(KERN_WARNING MOD
989 "%s: Abnormal close qpid %d ret %u\n",
990 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
991 ret);
992 }
993out:
994 PDBG("%s ret %d\n", __func__, ret);
995 return ret;
996}
997
998static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
999{
1000 memset(&init->u, 0, sizeof init->u);
1001 switch (p2p_type) {
1002 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1003 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1004 init->u.write.stag_sink = cpu_to_be32(1);
1005 init->u.write.to_sink = cpu_to_be64(1);
1006 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1007 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1008 sizeof(struct fw_ri_immd),
1009 16);
1010 break;
1011 case FW_RI_INIT_P2PTYPE_READ_REQ:
1012 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1013 init->u.read.stag_src = cpu_to_be32(1);
1014 init->u.read.to_src_lo = cpu_to_be32(1);
1015 init->u.read.stag_sink = cpu_to_be32(1);
1016 init->u.read.to_sink_lo = cpu_to_be32(1);
1017 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1018 break;
1019 }
1020}
1021
1022static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1023{
1024 struct fw_ri_wr *wqe;
1025 int ret;
1026 struct c4iw_wr_wait wr_wait;
1027 struct sk_buff *skb;
1028
1029 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1030 qhp->ep->hwtid);
1031
1032 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
1033 if (!skb)
1034 return -ENOMEM;
1035 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1036
1037 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1038 memset(wqe, 0, sizeof *wqe);
1039 wqe->op_compl = cpu_to_be32(
1040 FW_WR_OP(FW_RI_INIT_WR) |
1041 FW_WR_COMPL(1));
1042 wqe->flowid_len16 = cpu_to_be32(
1043 FW_WR_FLOWID(qhp->ep->hwtid) |
1044 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1045
1046 wqe->cookie = (u64)&wr_wait;
1047
1048 wqe->u.init.type = FW_RI_TYPE_INIT;
1049 wqe->u.init.mpareqbit_p2ptype =
1050 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1051 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1052 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1053 if (qhp->attr.mpa_attr.recv_marker_enabled)
1054 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1055 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1056 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1057 if (qhp->attr.mpa_attr.crc_enabled)
1058 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1059
1060 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1061 FW_RI_QP_RDMA_WRITE_ENABLE |
1062 FW_RI_QP_BIND_ENABLE;
1063 if (!qhp->ibqp.uobject)
1064 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1065 FW_RI_QP_STAG0_ENABLE;
1066 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1067 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1068 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1069 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1070 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1071 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1072 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1073 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1074 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1075 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1076 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1077 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1078 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1079 rhp->rdev.lldi.vr->rq.start);
1080 if (qhp->attr.mpa_attr.initiator)
1081 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1082
1083 c4iw_init_wr_wait(&wr_wait);
1084 ret = c4iw_ofld_send(&rhp->rdev, skb);
1085 if (ret)
1086 goto out;
1087
1088 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1089 if (!wr_wait.done) {
1090 printk(KERN_ERR MOD "Device %s not responding!\n",
1091 pci_name(rhp->rdev.lldi.pdev));
1092 rhp->rdev.flags = T4_FATAL_ERROR;
1093 ret = -EIO;
1094 } else
1095 ret = wr_wait.ret;
1096out:
1097 PDBG("%s ret %d\n", __func__, ret);
1098 return ret;
1099}
1100
1101int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1102 enum c4iw_qp_attr_mask mask,
1103 struct c4iw_qp_attributes *attrs,
1104 int internal)
1105{
1106 int ret = 0;
1107 struct c4iw_qp_attributes newattr = qhp->attr;
1108 unsigned long flag;
1109 int disconnect = 0;
1110 int terminate = 0;
1111 int abort = 0;
1112 int free = 0;
1113 struct c4iw_ep *ep = NULL;
1114
1115 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1116 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1117 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1118
1119 spin_lock_irqsave(&qhp->lock, flag);
1120
1121 /* Process attr changes if in IDLE */
1122 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1123 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1124 ret = -EIO;
1125 goto out;
1126 }
1127 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1128 newattr.enable_rdma_read = attrs->enable_rdma_read;
1129 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1130 newattr.enable_rdma_write = attrs->enable_rdma_write;
1131 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1132 newattr.enable_bind = attrs->enable_bind;
1133 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1134 if (attrs->max_ord > c4iw_max_read_depth) {
1135 ret = -EINVAL;
1136 goto out;
1137 }
1138 newattr.max_ord = attrs->max_ord;
1139 }
1140 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1141 if (attrs->max_ird > c4iw_max_read_depth) {
1142 ret = -EINVAL;
1143 goto out;
1144 }
1145 newattr.max_ird = attrs->max_ird;
1146 }
1147 qhp->attr = newattr;
1148 }
1149
1150 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1151 goto out;
1152 if (qhp->attr.state == attrs->next_state)
1153 goto out;
1154
1155 switch (qhp->attr.state) {
1156 case C4IW_QP_STATE_IDLE:
1157 switch (attrs->next_state) {
1158 case C4IW_QP_STATE_RTS:
1159 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1160 ret = -EINVAL;
1161 goto out;
1162 }
1163 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1164 ret = -EINVAL;
1165 goto out;
1166 }
1167 qhp->attr.mpa_attr = attrs->mpa_attr;
1168 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1169 qhp->ep = qhp->attr.llp_stream_handle;
1170 qhp->attr.state = C4IW_QP_STATE_RTS;
1171
1172 /*
1173 * Ref the endpoint here and deref when we
1174 * disassociate the endpoint from the QP. This
1175 * happens in CLOSING->IDLE transition or *->ERROR
1176 * transition.
1177 */
1178 c4iw_get_ep(&qhp->ep->com);
1179 spin_unlock_irqrestore(&qhp->lock, flag);
1180 ret = rdma_init(rhp, qhp);
1181 spin_lock_irqsave(&qhp->lock, flag);
1182 if (ret)
1183 goto err;
1184 break;
1185 case C4IW_QP_STATE_ERROR:
1186 qhp->attr.state = C4IW_QP_STATE_ERROR;
1187 flush_qp(qhp, &flag);
1188 break;
1189 default:
1190 ret = -EINVAL;
1191 goto out;
1192 }
1193 break;
1194 case C4IW_QP_STATE_RTS:
1195 switch (attrs->next_state) {
1196 case C4IW_QP_STATE_CLOSING:
1197 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1198 qhp->attr.state = C4IW_QP_STATE_CLOSING;
1199 if (!internal) {
1200 abort = 0;
1201 disconnect = 1;
1202 ep = qhp->ep;
1203 c4iw_get_ep(&ep->com);
1204 }
1205 spin_unlock_irqrestore(&qhp->lock, flag);
1206 ret = rdma_fini(rhp, qhp);
1207 spin_lock_irqsave(&qhp->lock, flag);
1208 if (ret) {
1209 ep = qhp->ep;
1210 c4iw_get_ep(&ep->com);
1211 disconnect = abort = 1;
1212 goto err;
1213 }
1214 break;
1215 case C4IW_QP_STATE_TERMINATE:
1216 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1217 if (qhp->ibqp.uobject)
1218 t4_set_wq_in_error(&qhp->wq);
1219 ep = qhp->ep;
1220 c4iw_get_ep(&ep->com);
1221 terminate = 1;
1222 disconnect = 1;
1223 break;
1224 case C4IW_QP_STATE_ERROR:
1225 qhp->attr.state = C4IW_QP_STATE_ERROR;
1226 if (!internal) {
1227 abort = 1;
1228 disconnect = 1;
1229 ep = qhp->ep;
1230 c4iw_get_ep(&ep->com);
1231 }
1232 goto err;
1233 break;
1234 default:
1235 ret = -EINVAL;
1236 goto out;
1237 }
1238 break;
1239 case C4IW_QP_STATE_CLOSING:
1240 if (!internal) {
1241 ret = -EINVAL;
1242 goto out;
1243 }
1244 switch (attrs->next_state) {
1245 case C4IW_QP_STATE_IDLE:
1246 flush_qp(qhp, &flag);
1247 qhp->attr.state = C4IW_QP_STATE_IDLE;
1248 qhp->attr.llp_stream_handle = NULL;
1249 c4iw_put_ep(&qhp->ep->com);
1250 qhp->ep = NULL;
1251 wake_up(&qhp->wait);
1252 break;
1253 case C4IW_QP_STATE_ERROR:
1254 goto err;
1255 default:
1256 ret = -EINVAL;
1257 goto err;
1258 }
1259 break;
1260 case C4IW_QP_STATE_ERROR:
1261 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1266 ret = -EINVAL;
1267 goto out;
1268 }
1269 qhp->attr.state = C4IW_QP_STATE_IDLE;
1270 break;
1271 case C4IW_QP_STATE_TERMINATE:
1272 if (!internal) {
1273 ret = -EINVAL;
1274 goto out;
1275 }
1276 goto err;
1277 break;
1278 default:
1279 printk(KERN_ERR "%s in a bad state %d\n",
1280 __func__, qhp->attr.state);
1281 ret = -EINVAL;
1282 goto err;
1283 break;
1284 }
1285 goto out;
1286err:
1287 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1288 qhp->wq.sq.qid);
1289
1290 /* disassociate the LLP connection */
1291 qhp->attr.llp_stream_handle = NULL;
1292 ep = qhp->ep;
1293 qhp->ep = NULL;
1294 qhp->attr.state = C4IW_QP_STATE_ERROR;
1295 free = 1;
1296 wake_up(&qhp->wait);
1297 BUG_ON(!ep);
1298 flush_qp(qhp, &flag);
1299out:
1300 spin_unlock_irqrestore(&qhp->lock, flag);
1301
1302 if (terminate)
1303 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1304
1305 /*
1306 * If disconnect is 1, then we need to initiate a disconnect
1307 * on the EP. This can be a normal close (RTS->CLOSING) or
1308 * an abnormal close (RTS/CLOSING->ERROR).
1309 */
1310 if (disconnect) {
1311 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1312 GFP_KERNEL);
1313 c4iw_put_ep(&ep->com);
1314 }
1315
1316 /*
1317 * If free is 1, then we've disassociated the EP from the QP
1318 * and we need to dereference the EP.
1319 */
1320 if (free)
1321 c4iw_put_ep(&ep->com);
1322
1323 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1324 return ret;
1325}
1326
1327int c4iw_destroy_qp(struct ib_qp *ib_qp)
1328{
1329 struct c4iw_dev *rhp;
1330 struct c4iw_qp *qhp;
1331 struct c4iw_qp_attributes attrs;
1332 struct c4iw_ucontext *ucontext;
1333
1334 qhp = to_c4iw_qp(ib_qp);
1335 rhp = qhp->rhp;
1336
1337 attrs.next_state = C4IW_QP_STATE_ERROR;
1338 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1339 wait_event(qhp->wait, !qhp->ep);
1340
1341 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1342 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1343 atomic_dec(&qhp->refcnt);
1344 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1345
1346 ucontext = ib_qp->uobject ?
1347 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1348 destroy_qp(&rhp->rdev, &qhp->wq,
1349 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1350
1351 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1352 kfree(qhp);
1353 return 0;
1354}
1355
1356struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1357 struct ib_udata *udata)
1358{
1359 struct c4iw_dev *rhp;
1360 struct c4iw_qp *qhp;
1361 struct c4iw_pd *php;
1362 struct c4iw_cq *schp;
1363 struct c4iw_cq *rchp;
1364 struct c4iw_create_qp_resp uresp;
1365 int sqsize, rqsize;
1366 struct c4iw_ucontext *ucontext;
1367 int ret;
1368 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1369
1370 PDBG("%s ib_pd %p\n", __func__, pd);
1371
1372 if (attrs->qp_type != IB_QPT_RC)
1373 return ERR_PTR(-EINVAL);
1374
1375 php = to_c4iw_pd(pd);
1376 rhp = php->rhp;
1377 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1378 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1379 if (!schp || !rchp)
1380 return ERR_PTR(-EINVAL);
1381
1382 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1383 return ERR_PTR(-EINVAL);
1384
1385 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1386 if (rqsize > T4_MAX_RQ_SIZE)
1387 return ERR_PTR(-E2BIG);
1388
1389 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1390 if (sqsize > T4_MAX_SQ_SIZE)
1391 return ERR_PTR(-E2BIG);
1392
1393 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1394
1395
1396 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1397 if (!qhp)
1398 return ERR_PTR(-ENOMEM);
1399 qhp->wq.sq.size = sqsize;
1400 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1401 qhp->wq.rq.size = rqsize;
1402 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1403
1404 if (ucontext) {
1405 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1406 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1407 }
1408
1409 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1410 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1411
1412 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1413 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1414 if (ret)
1415 goto err1;
1416
1417 attrs->cap.max_recv_wr = rqsize - 1;
1418 attrs->cap.max_send_wr = sqsize - 1;
1419 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1420
1421 qhp->rhp = rhp;
1422 qhp->attr.pd = php->pdid;
1423 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1424 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1425 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1426 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1427 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1428 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1429 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1430 qhp->attr.state = C4IW_QP_STATE_IDLE;
1431 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1432 qhp->attr.enable_rdma_read = 1;
1433 qhp->attr.enable_rdma_write = 1;
1434 qhp->attr.enable_bind = 1;
1435 qhp->attr.max_ord = 1;
1436 qhp->attr.max_ird = 1;
1437 spin_lock_init(&qhp->lock);
1438 init_waitqueue_head(&qhp->wait);
1439 atomic_set(&qhp->refcnt, 1);
1440
1441 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1442 if (ret)
1443 goto err2;
1444
1445 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1446 if (ret)
1447 goto err3;
1448
1449 if (udata) {
1450 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1451 if (!mm1) {
1452 ret = -ENOMEM;
1453 goto err4;
1454 }
1455 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1456 if (!mm2) {
1457 ret = -ENOMEM;
1458 goto err5;
1459 }
1460 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1461 if (!mm3) {
1462 ret = -ENOMEM;
1463 goto err6;
1464 }
1465 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1466 if (!mm4) {
1467 ret = -ENOMEM;
1468 goto err7;
1469 }
1470
1471 uresp.qid_mask = rhp->rdev.qpmask;
1472 uresp.sqid = qhp->wq.sq.qid;
1473 uresp.sq_size = qhp->wq.sq.size;
1474 uresp.sq_memsize = qhp->wq.sq.memsize;
1475 uresp.rqid = qhp->wq.rq.qid;
1476 uresp.rq_size = qhp->wq.rq.size;
1477 uresp.rq_memsize = qhp->wq.rq.memsize;
1478 spin_lock(&ucontext->mmap_lock);
1479 uresp.sq_key = ucontext->key;
1480 ucontext->key += PAGE_SIZE;
1481 uresp.rq_key = ucontext->key;
1482 ucontext->key += PAGE_SIZE;
1483 uresp.sq_db_gts_key = ucontext->key;
1484 ucontext->key += PAGE_SIZE;
1485 uresp.rq_db_gts_key = ucontext->key;
1486 ucontext->key += PAGE_SIZE;
1487 spin_unlock(&ucontext->mmap_lock);
1488 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1489 if (ret)
1490 goto err8;
1491 mm1->key = uresp.sq_key;
1492 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1493 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1494 insert_mmap(ucontext, mm1);
1495 mm2->key = uresp.rq_key;
1496 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1497 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1498 insert_mmap(ucontext, mm2);
1499 mm3->key = uresp.sq_db_gts_key;
1500 mm3->addr = qhp->wq.sq.udb;
1501 mm3->len = PAGE_SIZE;
1502 insert_mmap(ucontext, mm3);
1503 mm4->key = uresp.rq_db_gts_key;
1504 mm4->addr = qhp->wq.rq.udb;
1505 mm4->len = PAGE_SIZE;
1506 insert_mmap(ucontext, mm4);
1507 }
1508 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1509 init_timer(&(qhp->timer));
1510 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1511 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1512 qhp->wq.sq.qid);
1513 return &qhp->ibqp;
1514err8:
1515 kfree(mm4);
1516err7:
1517 kfree(mm3);
1518err6:
1519 kfree(mm2);
1520err5:
1521 kfree(mm1);
1522err4:
1523 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1524err3:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1526err2:
1527 destroy_qp(&rhp->rdev, &qhp->wq,
1528 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1529err1:
1530 kfree(qhp);
1531 return ERR_PTR(ret);
1532}
1533
1534int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1535 int attr_mask, struct ib_udata *udata)
1536{
1537 struct c4iw_dev *rhp;
1538 struct c4iw_qp *qhp;
1539 enum c4iw_qp_attr_mask mask = 0;
1540 struct c4iw_qp_attributes attrs;
1541
1542 PDBG("%s ib_qp %p\n", __func__, ibqp);
1543
1544 /* iwarp does not support the RTR state */
1545 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1546 attr_mask &= ~IB_QP_STATE;
1547
1548 /* Make sure we still have something left to do */
1549 if (!attr_mask)
1550 return 0;
1551
1552 memset(&attrs, 0, sizeof attrs);
1553 qhp = to_c4iw_qp(ibqp);
1554 rhp = qhp->rhp;
1555
1556 attrs.next_state = c4iw_convert_state(attr->qp_state);
1557 attrs.enable_rdma_read = (attr->qp_access_flags &
1558 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1559 attrs.enable_rdma_write = (attr->qp_access_flags &
1560 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1561 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1562
1563
1564 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1565 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1566 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1567 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1568 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1569
1570 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1571}
1572
1573struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1574{
1575 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1576 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1577}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
new file mode 100644
index 000000000000..fb195d1d9015
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* Crude resource management */
33#include <linux/kernel.h>
34#include <linux/random.h>
35#include <linux/slab.h>
36#include <linux/kfifo.h>
37#include <linux/spinlock.h>
38#include <linux/errno.h>
39#include <linux/genalloc.h>
40#include "iw_cxgb4.h"
41
42#define RANDOM_SIZE 16
43
44static int __c4iw_init_resource_fifo(struct kfifo *fifo,
45 spinlock_t *fifo_lock,
46 u32 nr, u32 skip_low,
47 u32 skip_high,
48 int random)
49{
50 u32 i, j, entry = 0, idx;
51 u32 random_bytes;
52 u32 rarray[16];
53 spin_lock_init(fifo_lock);
54
55 if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
56 return -ENOMEM;
57
58 for (i = 0; i < skip_low + skip_high; i++)
59 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
60 if (random) {
61 j = 0;
62 random_bytes = random32();
63 for (i = 0; i < RANDOM_SIZE; i++)
64 rarray[i] = i + skip_low;
65 for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
66 if (j >= RANDOM_SIZE) {
67 j = 0;
68 random_bytes = random32();
69 }
70 idx = (random_bytes >> (j * 2)) & 0xF;
71 kfifo_in(fifo,
72 (unsigned char *) &rarray[idx],
73 sizeof(u32));
74 rarray[idx] = i;
75 j++;
76 }
77 for (i = 0; i < RANDOM_SIZE; i++)
78 kfifo_in(fifo,
79 (unsigned char *) &rarray[i],
80 sizeof(u32));
81 } else
82 for (i = skip_low; i < nr - skip_high; i++)
83 kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
84
85 for (i = 0; i < skip_low + skip_high; i++)
86 if (kfifo_out_locked(fifo, (unsigned char *) &entry,
87 sizeof(u32), fifo_lock))
88 break;
89 return 0;
90}
91
92static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
93 u32 nr, u32 skip_low, u32 skip_high)
94{
95 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
96 skip_high, 0);
97}
98
99static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
100 spinlock_t *fifo_lock,
101 u32 nr, u32 skip_low, u32 skip_high)
102{
103 return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
104 skip_high, 1);
105}
106
107static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
108{
109 u32 i;
110
111 spin_lock_init(&rdev->resource.qid_fifo_lock);
112
113 if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
114 GFP_KERNEL))
115 return -ENOMEM;
116
117 for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
118 if (!(i & rdev->qpmask))
119 kfifo_in(&rdev->resource.qid_fifo,
120 (unsigned char *) &i, sizeof(u32));
121 return 0;
122}
123
124/* nr_* must be power of 2 */
125int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
126{
127 int err = 0;
128 err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
129 &rdev->resource.tpt_fifo_lock,
130 nr_tpt, 1, 0);
131 if (err)
132 goto tpt_err;
133 err = c4iw_init_qid_fifo(rdev);
134 if (err)
135 goto qid_err;
136 err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
137 &rdev->resource.pdid_fifo_lock,
138 nr_pdid, 1, 0);
139 if (err)
140 goto pdid_err;
141 return 0;
142pdid_err:
143 kfifo_free(&rdev->resource.qid_fifo);
144qid_err:
145 kfifo_free(&rdev->resource.tpt_fifo);
146tpt_err:
147 return -ENOMEM;
148}
149
150/*
151 * returns 0 if no resource available
152 */
153u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
154{
155 u32 entry;
156 if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
157 return entry;
158 else
159 return 0;
160}
161
162void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
163{
164 PDBG("%s entry 0x%x\n", __func__, entry);
165 kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
166}
167
168u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
169{
170 struct c4iw_qid_list *entry;
171 u32 qid;
172 int i;
173
174 mutex_lock(&uctx->lock);
175 if (!list_empty(&uctx->cqids)) {
176 entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
177 entry);
178 list_del(&entry->entry);
179 qid = entry->qid;
180 kfree(entry);
181 } else {
182 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
183 &rdev->resource.qid_fifo_lock);
184 if (!qid)
185 goto out;
186 for (i = qid+1; i & rdev->qpmask; i++) {
187 entry = kmalloc(sizeof *entry, GFP_KERNEL);
188 if (!entry)
189 goto out;
190 entry->qid = i;
191 list_add_tail(&entry->entry, &uctx->cqids);
192 }
193
194 /*
195 * now put the same ids on the qp list since they all
196 * map to the same db/gts page.
197 */
198 entry = kmalloc(sizeof *entry, GFP_KERNEL);
199 if (!entry)
200 goto out;
201 entry->qid = qid;
202 list_add_tail(&entry->entry, &uctx->qpids);
203 for (i = qid+1; i & rdev->qpmask; i++) {
204 entry = kmalloc(sizeof *entry, GFP_KERNEL);
205 if (!entry)
206 goto out;
207 entry->qid = i;
208 list_add_tail(&entry->entry, &uctx->qpids);
209 }
210 }
211out:
212 mutex_unlock(&uctx->lock);
213 PDBG("%s qid 0x%x\n", __func__, qid);
214 return qid;
215}
216
217void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
218 struct c4iw_dev_ucontext *uctx)
219{
220 struct c4iw_qid_list *entry;
221
222 entry = kmalloc(sizeof *entry, GFP_KERNEL);
223 if (!entry)
224 return;
225 PDBG("%s qid 0x%x\n", __func__, qid);
226 entry->qid = qid;
227 mutex_lock(&uctx->lock);
228 list_add_tail(&entry->entry, &uctx->cqids);
229 mutex_unlock(&uctx->lock);
230}
231
232u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
233{
234 struct c4iw_qid_list *entry;
235 u32 qid;
236 int i;
237
238 mutex_lock(&uctx->lock);
239 if (!list_empty(&uctx->qpids)) {
240 entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
241 entry);
242 list_del(&entry->entry);
243 qid = entry->qid;
244 kfree(entry);
245 } else {
246 qid = c4iw_get_resource(&rdev->resource.qid_fifo,
247 &rdev->resource.qid_fifo_lock);
248 if (!qid)
249 goto out;
250 for (i = qid+1; i & rdev->qpmask; i++) {
251 entry = kmalloc(sizeof *entry, GFP_KERNEL);
252 if (!entry)
253 goto out;
254 entry->qid = i;
255 list_add_tail(&entry->entry, &uctx->qpids);
256 }
257
258 /*
259 * now put the same ids on the cq list since they all
260 * map to the same db/gts page.
261 */
262 entry = kmalloc(sizeof *entry, GFP_KERNEL);
263 if (!entry)
264 goto out;
265 entry->qid = qid;
266 list_add_tail(&entry->entry, &uctx->cqids);
267 for (i = qid; i & rdev->qpmask; i++) {
268 entry = kmalloc(sizeof *entry, GFP_KERNEL);
269 if (!entry)
270 goto out;
271 entry->qid = i;
272 list_add_tail(&entry->entry, &uctx->cqids);
273 }
274 }
275out:
276 mutex_unlock(&uctx->lock);
277 PDBG("%s qid 0x%x\n", __func__, qid);
278 return qid;
279}
280
281void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
282 struct c4iw_dev_ucontext *uctx)
283{
284 struct c4iw_qid_list *entry;
285
286 entry = kmalloc(sizeof *entry, GFP_KERNEL);
287 if (!entry)
288 return;
289 PDBG("%s qid 0x%x\n", __func__, qid);
290 entry->qid = qid;
291 mutex_lock(&uctx->lock);
292 list_add_tail(&entry->entry, &uctx->qpids);
293 mutex_unlock(&uctx->lock);
294}
295
296void c4iw_destroy_resource(struct c4iw_resource *rscp)
297{
298 kfifo_free(&rscp->tpt_fifo);
299 kfifo_free(&rscp->qid_fifo);
300 kfifo_free(&rscp->pdid_fifo);
301}
302
303/*
304 * PBL Memory Manager. Uses Linux generic allocator.
305 */
306
307#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
308
309u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
310{
311 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
312 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
313 return (u32)addr;
314}
315
316void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
317{
318 PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
319 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
320}
321
322int c4iw_pblpool_create(struct c4iw_rdev *rdev)
323{
324 unsigned pbl_start, pbl_chunk, pbl_top;
325
326 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
327 if (!rdev->pbl_pool)
328 return -ENOMEM;
329
330 pbl_start = rdev->lldi.vr->pbl.start;
331 pbl_chunk = rdev->lldi.vr->pbl.size;
332 pbl_top = pbl_start + pbl_chunk;
333
334 while (pbl_start < pbl_top) {
335 pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
336 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
337 PDBG("%s failed to add PBL chunk (%x/%x)\n",
338 __func__, pbl_start, pbl_chunk);
339 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
340 printk(KERN_WARNING MOD
341 "Failed to add all PBL chunks (%x/%x)\n",
342 pbl_start,
343 pbl_top - pbl_start);
344 return 0;
345 }
346 pbl_chunk >>= 1;
347 } else {
348 PDBG("%s added PBL chunk (%x/%x)\n",
349 __func__, pbl_start, pbl_chunk);
350 pbl_start += pbl_chunk;
351 }
352 }
353
354 return 0;
355}
356
357void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
358{
359 gen_pool_destroy(rdev->pbl_pool);
360}
361
362/*
363 * RQT Memory Manager. Uses Linux generic allocator.
364 */
365
366#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
367
368u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
369{
370 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
371 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
372 return (u32)addr;
373}
374
375void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
376{
377 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
378 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
379}
380
381int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
382{
383 unsigned rqt_start, rqt_chunk, rqt_top;
384
385 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
386 if (!rdev->rqt_pool)
387 return -ENOMEM;
388
389 rqt_start = rdev->lldi.vr->rq.start;
390 rqt_chunk = rdev->lldi.vr->rq.size;
391 rqt_top = rqt_start + rqt_chunk;
392
393 while (rqt_start < rqt_top) {
394 rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
395 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
396 PDBG("%s failed to add RQT chunk (%x/%x)\n",
397 __func__, rqt_start, rqt_chunk);
398 if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
399 printk(KERN_WARNING MOD
400 "Failed to add all RQT chunks (%x/%x)\n",
401 rqt_start, rqt_top - rqt_start);
402 return 0;
403 }
404 rqt_chunk >>= 1;
405 } else {
406 PDBG("%s added RQT chunk (%x/%x)\n",
407 __func__, rqt_start, rqt_chunk);
408 rqt_start += rqt_chunk;
409 }
410 }
411 return 0;
412}
413
414void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
415{
416 gen_pool_destroy(rdev->rqt_pool);
417}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
new file mode 100644
index 000000000000..d0e8af352408
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -0,0 +1,550 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __T4_H__
32#define __T4_H__
33
34#include "t4_hw.h"
35#include "t4_regs.h"
36#include "t4_msg.h"
37#include "t4fw_ri_api.h"
38
39#define T4_QID_BASE 1024
40#define T4_MAX_QIDS 256
41#define T4_MAX_NUM_QP (1<<16)
42#define T4_MAX_NUM_CQ (1<<15)
43#define T4_MAX_NUM_PD (1<<15)
44#define T4_MAX_PBL_SIZE 256
45#define T4_MAX_RQ_SIZE 1024
46#define T4_MAX_SQ_SIZE 1024
47#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1)
48#define T4_MAX_CQ_DEPTH 8192
49#define T4_MAX_NUM_STAG (1<<15)
50#define T4_MAX_MR_SIZE (~0ULL - 1)
51#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
52#define T4_STAG_UNSET 0xffffffff
53#define T4_FW_MAJ 0
54#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
55
56struct t4_status_page {
57 __be32 rsvd1; /* flit 0 - hw owns */
58 __be16 rsvd2;
59 __be16 qid;
60 __be16 cidx;
61 __be16 pidx;
62 u8 qp_err; /* flit 1 - sw owns */
63 u8 db_off;
64};
65
66#define T4_EQ_SIZE 64
67
68#define T4_SQ_NUM_SLOTS 4
69#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
70#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
71 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
72#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
73 sizeof(struct fw_ri_immd)))
74#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
75 sizeof(struct fw_ri_rdma_write_wr) - \
76 sizeof(struct fw_ri_immd)))
77#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
78 sizeof(struct fw_ri_rdma_write_wr) - \
79 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
80#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
81 sizeof(struct fw_ri_immd)))
82#define T4_MAX_FR_DEPTH 255
83
84#define T4_RQ_NUM_SLOTS 2
85#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
86#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \
87 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
88
89union t4_wr {
90 struct fw_ri_res_wr res;
91 struct fw_ri_wr ri;
92 struct fw_ri_rdma_write_wr write;
93 struct fw_ri_send_wr send;
94 struct fw_ri_rdma_read_wr read;
95 struct fw_ri_bind_mw_wr bind;
96 struct fw_ri_fr_nsmr_wr fr;
97 struct fw_ri_inv_lstag_wr inv;
98 struct t4_status_page status;
99 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
100};
101
102union t4_recv_wr {
103 struct fw_ri_recv_wr recv;
104 struct t4_status_page status;
105 __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
106};
107
108static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
109 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
110{
111 int slots_used;
112
113 wqe->send.opcode = (u8)opcode;
114 wqe->send.flags = flags;
115 wqe->send.wrid = wrid;
116 wqe->send.r1[0] = 0;
117 wqe->send.r1[1] = 0;
118 wqe->send.r1[2] = 0;
119 wqe->send.len16 = len16;
120
121 slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
122 while (slots_used < T4_SQ_NUM_SLOTS) {
123 wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
124 slots_used++;
125 }
126}
127
128/* CQE/AE status codes */
129#define T4_ERR_SUCCESS 0x0
130#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
131 /* STAG is offlimt, being 0, */
132 /* or STAG_key mismatch */
133#define T4_ERR_PDID 0x2 /* PDID mismatch */
134#define T4_ERR_QPID 0x3 /* QPID mismatch */
135#define T4_ERR_ACCESS 0x4 /* Invalid access right */
136#define T4_ERR_WRAP 0x5 /* Wrap error */
137#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
138#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
139 /* shared memory region */
140#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
141 /* shared memory region */
142#define T4_ERR_ECC 0x9 /* ECC error detected */
143#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
144 /* reading PSTAG for a MW */
145 /* Invalidate */
146#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
147 /* software error */
148#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
149#define T4_ERR_CRC 0x10 /* CRC error */
150#define T4_ERR_MARKER 0x11 /* Marker error */
151#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
152#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
153#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
154#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
155#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
156#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
157#define T4_ERR_MSN 0x18 /* MSN error */
158#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
159#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
160 /* or READ_REQ */
161#define T4_ERR_MSN_GAP 0x1B
162#define T4_ERR_MSN_RANGE 0x1C
163#define T4_ERR_IRD_OVERFLOW 0x1D
164#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
165 /* software error */
166#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
167 /* mismatch) */
168/*
169 * CQE defs
170 */
171struct t4_cqe {
172 __be32 header;
173 __be32 len;
174 union {
175 struct {
176 __be32 stag;
177 __be32 msn;
178 } rcqe;
179 struct {
180 u32 nada1;
181 u16 nada2;
182 u16 cidx;
183 } scqe;
184 struct {
185 __be32 wrid_hi;
186 __be32 wrid_low;
187 } gen;
188 } u;
189 __be64 reserved;
190 __be64 bits_type_ts;
191};
192
193/* macros for flit 0 of the cqe */
194
195#define S_CQE_QPID 12
196#define M_CQE_QPID 0xFFFFF
197#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
198#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
199
200#define S_CQE_SWCQE 11
201#define M_CQE_SWCQE 0x1
202#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
203#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
204
205#define S_CQE_STATUS 5
206#define M_CQE_STATUS 0x1F
207#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
208#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
209
210#define S_CQE_TYPE 4
211#define M_CQE_TYPE 0x1
212#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
213#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
214
215#define S_CQE_OPCODE 0
216#define M_CQE_OPCODE 0xF
217#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
218#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
219
220#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
221#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
222#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
223#define SQ_TYPE(x) (CQE_TYPE((x)))
224#define RQ_TYPE(x) (!CQE_TYPE((x)))
225#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
226#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
227
228#define CQE_SEND_OPCODE(x)( \
229 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
230 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
231 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
232 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
233
234#define CQE_LEN(x) (be32_to_cpu((x)->len))
235
236/* used for RQ completion processing */
237#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
238#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
239
240/* used for SQ completion processing */
241#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
242
243/* generic accessor macros */
244#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
245#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
246
247/* macros for flit 3 of the cqe */
248#define S_CQE_GENBIT 63
249#define M_CQE_GENBIT 0x1
250#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
251#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
252
253#define S_CQE_OVFBIT 62
254#define M_CQE_OVFBIT 0x1
255#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
256
257#define S_CQE_IQTYPE 60
258#define M_CQE_IQTYPE 0x3
259#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
260
261#define M_CQE_TS 0x0fffffffffffffffULL
262#define G_CQE_TS(x) ((x) & M_CQE_TS)
263
264#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
265#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
266#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
267
268struct t4_swsqe {
269 u64 wr_id;
270 struct t4_cqe cqe;
271 int read_len;
272 int opcode;
273 int complete;
274 int signaled;
275 u16 idx;
276};
277
278struct t4_sq {
279 union t4_wr *queue;
280 dma_addr_t dma_addr;
281 DECLARE_PCI_UNMAP_ADDR(mapping);
282 struct t4_swsqe *sw_sq;
283 struct t4_swsqe *oldest_read;
284 u64 udb;
285 size_t memsize;
286 u32 qid;
287 u16 in_use;
288 u16 size;
289 u16 cidx;
290 u16 pidx;
291};
292
293struct t4_swrqe {
294 u64 wr_id;
295};
296
297struct t4_rq {
298 union t4_recv_wr *queue;
299 dma_addr_t dma_addr;
300 DECLARE_PCI_UNMAP_ADDR(mapping);
301 struct t4_swrqe *sw_rq;
302 u64 udb;
303 size_t memsize;
304 u32 qid;
305 u32 msn;
306 u32 rqt_hwaddr;
307 u16 rqt_size;
308 u16 in_use;
309 u16 size;
310 u16 cidx;
311 u16 pidx;
312};
313
314struct t4_wq {
315 struct t4_sq sq;
316 struct t4_rq rq;
317 void __iomem *db;
318 void __iomem *gts;
319 struct c4iw_rdev *rdev;
320};
321
322static inline int t4_rqes_posted(struct t4_wq *wq)
323{
324 return wq->rq.in_use;
325}
326
327static inline int t4_rq_empty(struct t4_wq *wq)
328{
329 return wq->rq.in_use == 0;
330}
331
332static inline int t4_rq_full(struct t4_wq *wq)
333{
334 return wq->rq.in_use == (wq->rq.size - 1);
335}
336
337static inline u32 t4_rq_avail(struct t4_wq *wq)
338{
339 return wq->rq.size - 1 - wq->rq.in_use;
340}
341
342static inline void t4_rq_produce(struct t4_wq *wq)
343{
344 wq->rq.in_use++;
345 if (++wq->rq.pidx == wq->rq.size)
346 wq->rq.pidx = 0;
347}
348
349static inline void t4_rq_consume(struct t4_wq *wq)
350{
351 wq->rq.in_use--;
352 wq->rq.msn++;
353 if (++wq->rq.cidx == wq->rq.size)
354 wq->rq.cidx = 0;
355}
356
357static inline int t4_sq_empty(struct t4_wq *wq)
358{
359 return wq->sq.in_use == 0;
360}
361
362static inline int t4_sq_full(struct t4_wq *wq)
363{
364 return wq->sq.in_use == (wq->sq.size - 1);
365}
366
367static inline u32 t4_sq_avail(struct t4_wq *wq)
368{
369 return wq->sq.size - 1 - wq->sq.in_use;
370}
371
372static inline void t4_sq_produce(struct t4_wq *wq)
373{
374 wq->sq.in_use++;
375 if (++wq->sq.pidx == wq->sq.size)
376 wq->sq.pidx = 0;
377}
378
379static inline void t4_sq_consume(struct t4_wq *wq)
380{
381 wq->sq.in_use--;
382 if (++wq->sq.cidx == wq->sq.size)
383 wq->sq.cidx = 0;
384}
385
386static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
387{
388 inc *= T4_SQ_NUM_SLOTS;
389 wmb();
390 writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
391}
392
393static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
394{
395 inc *= T4_RQ_NUM_SLOTS;
396 wmb();
397 writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
398}
399
400static inline int t4_wq_in_error(struct t4_wq *wq)
401{
402 return wq->sq.queue[wq->sq.size].status.qp_err;
403}
404
405static inline void t4_set_wq_in_error(struct t4_wq *wq)
406{
407 wq->sq.queue[wq->sq.size].status.qp_err = 1;
408 wq->rq.queue[wq->rq.size].status.qp_err = 1;
409}
410
411static inline void t4_disable_wq_db(struct t4_wq *wq)
412{
413 wq->sq.queue[wq->sq.size].status.db_off = 1;
414 wq->rq.queue[wq->rq.size].status.db_off = 1;
415}
416
417static inline void t4_enable_wq_db(struct t4_wq *wq)
418{
419 wq->sq.queue[wq->sq.size].status.db_off = 0;
420 wq->rq.queue[wq->rq.size].status.db_off = 0;
421}
422
423static inline int t4_wq_db_enabled(struct t4_wq *wq)
424{
425 return !wq->sq.queue[wq->sq.size].status.db_off;
426}
427
428struct t4_cq {
429 struct t4_cqe *queue;
430 dma_addr_t dma_addr;
431 DECLARE_PCI_UNMAP_ADDR(mapping);
432 struct t4_cqe *sw_queue;
433 void __iomem *gts;
434 struct c4iw_rdev *rdev;
435 u64 ugts;
436 size_t memsize;
437 u64 timestamp;
438 u32 cqid;
439 u16 size; /* including status page */
440 u16 cidx;
441 u16 sw_pidx;
442 u16 sw_cidx;
443 u16 sw_in_use;
444 u16 cidx_inc;
445 u8 gen;
446 u8 error;
447};
448
449static inline int t4_arm_cq(struct t4_cq *cq, int se)
450{
451 u32 val;
452 u16 inc;
453
454 do {
455 /*
456 * inc must be less the both the max update value -and-
457 * the size of the CQ.
458 */
459 inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
460 CIDXINC_MASK;
461 inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
462 if (inc == cq->cidx_inc)
463 val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
464 INGRESSQID(cq->cqid);
465 else
466 val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
467 INGRESSQID(cq->cqid);
468 cq->cidx_inc -= inc;
469 writel(val, cq->gts);
470 } while (cq->cidx_inc);
471 return 0;
472}
473
474static inline void t4_swcq_produce(struct t4_cq *cq)
475{
476 cq->sw_in_use++;
477 if (++cq->sw_pidx == cq->size)
478 cq->sw_pidx = 0;
479}
480
481static inline void t4_swcq_consume(struct t4_cq *cq)
482{
483 cq->sw_in_use--;
484 if (++cq->sw_cidx == cq->size)
485 cq->sw_cidx = 0;
486}
487
488static inline void t4_hwcq_consume(struct t4_cq *cq)
489{
490 cq->cidx_inc++;
491 if (++cq->cidx == cq->size) {
492 cq->cidx = 0;
493 cq->gen ^= 1;
494 }
495}
496
497static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
498{
499 return (CQE_GENBIT(cqe) == cq->gen);
500}
501
502static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
503{
504 int ret = 0;
505 u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
506
507 if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
508 *cqe = &cq->queue[cq->cidx];
509 cq->timestamp = G_CQE_TS(bits_type_ts);
510 } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
511 ret = -EOVERFLOW;
512 else
513 ret = -ENODATA;
514 if (ret == -EOVERFLOW) {
515 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
516 cq->error = 1;
517 }
518 return ret;
519}
520
521static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
522{
523 if (cq->sw_in_use)
524 return &cq->sw_queue[cq->sw_cidx];
525 return NULL;
526}
527
528static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
529{
530 int ret = 0;
531
532 if (cq->error)
533 ret = -ENODATA;
534 else if (cq->sw_in_use)
535 *cqe = &cq->sw_queue[cq->sw_cidx];
536 else
537 ret = t4_next_hw_cqe(cq, cqe);
538 return ret;
539}
540
541static inline int t4_cq_in_error(struct t4_cq *cq)
542{
543 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
544}
545
546static inline void t4_set_cq_in_error(struct t4_cq *cq)
547{
548 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
549}
550#endif
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
new file mode 100644
index 000000000000..fc706bd07fae
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -0,0 +1,829 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef _T4FW_RI_API_H_
32#define _T4FW_RI_API_H_
33
34#include "t4fw_api.h"
35
36enum fw_ri_wr_opcode {
37 FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
38 FW_RI_READ_REQ = 0x1,
39 FW_RI_READ_RESP = 0x2,
40 FW_RI_SEND = 0x3,
41 FW_RI_SEND_WITH_INV = 0x4,
42 FW_RI_SEND_WITH_SE = 0x5,
43 FW_RI_SEND_WITH_SE_INV = 0x6,
44 FW_RI_TERMINATE = 0x7,
45 FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
46 FW_RI_BIND_MW = 0x9,
47 FW_RI_FAST_REGISTER = 0xa,
48 FW_RI_LOCAL_INV = 0xb,
49 FW_RI_QP_MODIFY = 0xc,
50 FW_RI_BYPASS = 0xd,
51 FW_RI_RECEIVE = 0xe,
52
53 FW_RI_SGE_EC_CR_RETURN = 0xf
54};
55
56enum fw_ri_wr_flags {
57 FW_RI_COMPLETION_FLAG = 0x01,
58 FW_RI_NOTIFICATION_FLAG = 0x02,
59 FW_RI_SOLICITED_EVENT_FLAG = 0x04,
60 FW_RI_READ_FENCE_FLAG = 0x08,
61 FW_RI_LOCAL_FENCE_FLAG = 0x10,
62 FW_RI_RDMA_READ_INVALIDATE = 0x20
63};
64
65enum fw_ri_mpa_attrs {
66 FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
67 FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
68 FW_RI_MPA_CRC_ENABLE = 0x04,
69 FW_RI_MPA_IETF_ENABLE = 0x08
70};
71
72enum fw_ri_qp_caps {
73 FW_RI_QP_RDMA_READ_ENABLE = 0x01,
74 FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
75 FW_RI_QP_BIND_ENABLE = 0x04,
76 FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
77 FW_RI_QP_STAG0_ENABLE = 0x10
78};
79
80enum fw_ri_addr_type {
81 FW_RI_ZERO_BASED_TO = 0x00,
82 FW_RI_VA_BASED_TO = 0x01
83};
84
85enum fw_ri_mem_perms {
86 FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
87 FW_RI_MEM_ACCESS_REM_READ = 0x02,
88 FW_RI_MEM_ACCESS_REM = 0x03,
89 FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
90 FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
91 FW_RI_MEM_ACCESS_LOCAL = 0x0C
92};
93
94enum fw_ri_stag_type {
95 FW_RI_STAG_NSMR = 0x00,
96 FW_RI_STAG_SMR = 0x01,
97 FW_RI_STAG_MW = 0x02,
98 FW_RI_STAG_MW_RELAXED = 0x03
99};
100
101enum fw_ri_data_op {
102 FW_RI_DATA_IMMD = 0x81,
103 FW_RI_DATA_DSGL = 0x82,
104 FW_RI_DATA_ISGL = 0x83
105};
106
107enum fw_ri_sgl_depth {
108 FW_RI_SGL_DEPTH_MAX_SQ = 16,
109 FW_RI_SGL_DEPTH_MAX_RQ = 4
110};
111
112struct fw_ri_dsge_pair {
113 __be32 len[2];
114 __be64 addr[2];
115};
116
117struct fw_ri_dsgl {
118 __u8 op;
119 __u8 r1;
120 __be16 nsge;
121 __be32 len0;
122 __be64 addr0;
123#ifndef C99_NOT_SUPPORTED
124 struct fw_ri_dsge_pair sge[0];
125#endif
126};
127
128struct fw_ri_sge {
129 __be32 stag;
130 __be32 len;
131 __be64 to;
132};
133
134struct fw_ri_isgl {
135 __u8 op;
136 __u8 r1;
137 __be16 nsge;
138 __be32 r2;
139#ifndef C99_NOT_SUPPORTED
140 struct fw_ri_sge sge[0];
141#endif
142};
143
144struct fw_ri_immd {
145 __u8 op;
146 __u8 r1;
147 __be16 r2;
148 __be32 immdlen;
149#ifndef C99_NOT_SUPPORTED
150 __u8 data[0];
151#endif
152};
153
154struct fw_ri_tpte {
155 __be32 valid_to_pdid;
156 __be32 locread_to_qpid;
157 __be32 nosnoop_pbladdr;
158 __be32 len_lo;
159 __be32 va_hi;
160 __be32 va_lo_fbo;
161 __be32 dca_mwbcnt_pstag;
162 __be32 len_hi;
163};
164
165#define S_FW_RI_TPTE_VALID 31
166#define M_FW_RI_TPTE_VALID 0x1
167#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
168#define G_FW_RI_TPTE_VALID(x) \
169 (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
170#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
171
172#define S_FW_RI_TPTE_STAGKEY 23
173#define M_FW_RI_TPTE_STAGKEY 0xff
174#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
175#define G_FW_RI_TPTE_STAGKEY(x) \
176 (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
177
178#define S_FW_RI_TPTE_STAGSTATE 22
179#define M_FW_RI_TPTE_STAGSTATE 0x1
180#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
181#define G_FW_RI_TPTE_STAGSTATE(x) \
182 (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
183#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
184
185#define S_FW_RI_TPTE_STAGTYPE 20
186#define M_FW_RI_TPTE_STAGTYPE 0x3
187#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
188#define G_FW_RI_TPTE_STAGTYPE(x) \
189 (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
190
191#define S_FW_RI_TPTE_PDID 0
192#define M_FW_RI_TPTE_PDID 0xfffff
193#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
194#define G_FW_RI_TPTE_PDID(x) \
195 (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
196
197#define S_FW_RI_TPTE_PERM 28
198#define M_FW_RI_TPTE_PERM 0xf
199#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
200#define G_FW_RI_TPTE_PERM(x) \
201 (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
202
203#define S_FW_RI_TPTE_REMINVDIS 27
204#define M_FW_RI_TPTE_REMINVDIS 0x1
205#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
206#define G_FW_RI_TPTE_REMINVDIS(x) \
207 (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
208#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
209
210#define S_FW_RI_TPTE_ADDRTYPE 26
211#define M_FW_RI_TPTE_ADDRTYPE 1
212#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
213#define G_FW_RI_TPTE_ADDRTYPE(x) \
214 (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
215#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
216
217#define S_FW_RI_TPTE_MWBINDEN 25
218#define M_FW_RI_TPTE_MWBINDEN 0x1
219#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
220#define G_FW_RI_TPTE_MWBINDEN(x) \
221 (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
222#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
223
224#define S_FW_RI_TPTE_PS 20
225#define M_FW_RI_TPTE_PS 0x1f
226#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
227#define G_FW_RI_TPTE_PS(x) \
228 (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
229
230#define S_FW_RI_TPTE_QPID 0
231#define M_FW_RI_TPTE_QPID 0xfffff
232#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
233#define G_FW_RI_TPTE_QPID(x) \
234 (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
235
236#define S_FW_RI_TPTE_NOSNOOP 30
237#define M_FW_RI_TPTE_NOSNOOP 0x1
238#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
239#define G_FW_RI_TPTE_NOSNOOP(x) \
240 (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
241#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
242
243#define S_FW_RI_TPTE_PBLADDR 0
244#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
245#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
246#define G_FW_RI_TPTE_PBLADDR(x) \
247 (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
248
249#define S_FW_RI_TPTE_DCA 24
250#define M_FW_RI_TPTE_DCA 0x1f
251#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
252#define G_FW_RI_TPTE_DCA(x) \
253 (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
254
255#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
256#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
257#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
258 ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
259#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
260 (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
261
262enum fw_ri_res_type {
263 FW_RI_RES_TYPE_SQ,
264 FW_RI_RES_TYPE_RQ,
265 FW_RI_RES_TYPE_CQ,
266};
267
268enum fw_ri_res_op {
269 FW_RI_RES_OP_WRITE,
270 FW_RI_RES_OP_RESET,
271};
272
273struct fw_ri_res {
274 union fw_ri_restype {
275 struct fw_ri_res_sqrq {
276 __u8 restype;
277 __u8 op;
278 __be16 r3;
279 __be32 eqid;
280 __be32 r4[2];
281 __be32 fetchszm_to_iqid;
282 __be32 dcaen_to_eqsize;
283 __be64 eqaddr;
284 } sqrq;
285 struct fw_ri_res_cq {
286 __u8 restype;
287 __u8 op;
288 __be16 r3;
289 __be32 iqid;
290 __be32 r4[2];
291 __be32 iqandst_to_iqandstindex;
292 __be16 iqdroprss_to_iqesize;
293 __be16 iqsize;
294 __be64 iqaddr;
295 __be32 iqns_iqro;
296 __be32 r6_lo;
297 __be64 r7;
298 } cq;
299 } u;
300};
301
302struct fw_ri_res_wr {
303 __be32 op_nres;
304 __be32 len16_pkd;
305 __u64 cookie;
306#ifndef C99_NOT_SUPPORTED
307 struct fw_ri_res res[0];
308#endif
309};
310
311#define S_FW_RI_RES_WR_NRES 0
312#define M_FW_RI_RES_WR_NRES 0xff
313#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
314#define G_FW_RI_RES_WR_NRES(x) \
315 (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
316
317#define S_FW_RI_RES_WR_FETCHSZM 26
318#define M_FW_RI_RES_WR_FETCHSZM 0x1
319#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
320#define G_FW_RI_RES_WR_FETCHSZM(x) \
321 (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
322#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
323
324#define S_FW_RI_RES_WR_STATUSPGNS 25
325#define M_FW_RI_RES_WR_STATUSPGNS 0x1
326#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
327#define G_FW_RI_RES_WR_STATUSPGNS(x) \
328 (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
329#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
330
331#define S_FW_RI_RES_WR_STATUSPGRO 24
332#define M_FW_RI_RES_WR_STATUSPGRO 0x1
333#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
334#define G_FW_RI_RES_WR_STATUSPGRO(x) \
335 (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
336#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
337
338#define S_FW_RI_RES_WR_FETCHNS 23
339#define M_FW_RI_RES_WR_FETCHNS 0x1
340#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
341#define G_FW_RI_RES_WR_FETCHNS(x) \
342 (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
343#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
344
345#define S_FW_RI_RES_WR_FETCHRO 22
346#define M_FW_RI_RES_WR_FETCHRO 0x1
347#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
348#define G_FW_RI_RES_WR_FETCHRO(x) \
349 (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
350#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
351
352#define S_FW_RI_RES_WR_HOSTFCMODE 20
353#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
354#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
355#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
356 (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
357
358#define S_FW_RI_RES_WR_CPRIO 19
359#define M_FW_RI_RES_WR_CPRIO 0x1
360#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
361#define G_FW_RI_RES_WR_CPRIO(x) \
362 (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
363#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
364
365#define S_FW_RI_RES_WR_ONCHIP 18
366#define M_FW_RI_RES_WR_ONCHIP 0x1
367#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
368#define G_FW_RI_RES_WR_ONCHIP(x) \
369 (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
370#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
371
372#define S_FW_RI_RES_WR_PCIECHN 16
373#define M_FW_RI_RES_WR_PCIECHN 0x3
374#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
375#define G_FW_RI_RES_WR_PCIECHN(x) \
376 (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
377
378#define S_FW_RI_RES_WR_IQID 0
379#define M_FW_RI_RES_WR_IQID 0xffff
380#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
381#define G_FW_RI_RES_WR_IQID(x) \
382 (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
383
384#define S_FW_RI_RES_WR_DCAEN 31
385#define M_FW_RI_RES_WR_DCAEN 0x1
386#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
387#define G_FW_RI_RES_WR_DCAEN(x) \
388 (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
389#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
390
391#define S_FW_RI_RES_WR_DCACPU 26
392#define M_FW_RI_RES_WR_DCACPU 0x1f
393#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
394#define G_FW_RI_RES_WR_DCACPU(x) \
395 (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
396
397#define S_FW_RI_RES_WR_FBMIN 23
398#define M_FW_RI_RES_WR_FBMIN 0x7
399#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
400#define G_FW_RI_RES_WR_FBMIN(x) \
401 (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
402
403#define S_FW_RI_RES_WR_FBMAX 20
404#define M_FW_RI_RES_WR_FBMAX 0x7
405#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
406#define G_FW_RI_RES_WR_FBMAX(x) \
407 (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
408
409#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
410#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
411#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
412#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
413 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
414#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
415
416#define S_FW_RI_RES_WR_CIDXFTHRESH 16
417#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
418#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
419#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
420 (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
421
422#define S_FW_RI_RES_WR_EQSIZE 0
423#define M_FW_RI_RES_WR_EQSIZE 0xffff
424#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
425#define G_FW_RI_RES_WR_EQSIZE(x) \
426 (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
427
428#define S_FW_RI_RES_WR_IQANDST 15
429#define M_FW_RI_RES_WR_IQANDST 0x1
430#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
431#define G_FW_RI_RES_WR_IQANDST(x) \
432 (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
433#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
434
435#define S_FW_RI_RES_WR_IQANUS 14
436#define M_FW_RI_RES_WR_IQANUS 0x1
437#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
438#define G_FW_RI_RES_WR_IQANUS(x) \
439 (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
440#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
441
442#define S_FW_RI_RES_WR_IQANUD 12
443#define M_FW_RI_RES_WR_IQANUD 0x3
444#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
445#define G_FW_RI_RES_WR_IQANUD(x) \
446 (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
447
448#define S_FW_RI_RES_WR_IQANDSTINDEX 0
449#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
450#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
451#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
452 (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
453
454#define S_FW_RI_RES_WR_IQDROPRSS 15
455#define M_FW_RI_RES_WR_IQDROPRSS 0x1
456#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
457#define G_FW_RI_RES_WR_IQDROPRSS(x) \
458 (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
459#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
460
461#define S_FW_RI_RES_WR_IQGTSMODE 14
462#define M_FW_RI_RES_WR_IQGTSMODE 0x1
463#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
464#define G_FW_RI_RES_WR_IQGTSMODE(x) \
465 (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
466#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
467
468#define S_FW_RI_RES_WR_IQPCIECH 12
469#define M_FW_RI_RES_WR_IQPCIECH 0x3
470#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
471#define G_FW_RI_RES_WR_IQPCIECH(x) \
472 (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
473
474#define S_FW_RI_RES_WR_IQDCAEN 11
475#define M_FW_RI_RES_WR_IQDCAEN 0x1
476#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
477#define G_FW_RI_RES_WR_IQDCAEN(x) \
478 (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
479#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
480
481#define S_FW_RI_RES_WR_IQDCACPU 6
482#define M_FW_RI_RES_WR_IQDCACPU 0x1f
483#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
484#define G_FW_RI_RES_WR_IQDCACPU(x) \
485 (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
486
487#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
488#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
489#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
490 ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
491#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
492 (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
493
494#define S_FW_RI_RES_WR_IQO 3
495#define M_FW_RI_RES_WR_IQO 0x1
496#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
497#define G_FW_RI_RES_WR_IQO(x) \
498 (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
499#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
500
501#define S_FW_RI_RES_WR_IQCPRIO 2
502#define M_FW_RI_RES_WR_IQCPRIO 0x1
503#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
504#define G_FW_RI_RES_WR_IQCPRIO(x) \
505 (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
506#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
507
508#define S_FW_RI_RES_WR_IQESIZE 0
509#define M_FW_RI_RES_WR_IQESIZE 0x3
510#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
511#define G_FW_RI_RES_WR_IQESIZE(x) \
512 (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
513
514#define S_FW_RI_RES_WR_IQNS 31
515#define M_FW_RI_RES_WR_IQNS 0x1
516#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
517#define G_FW_RI_RES_WR_IQNS(x) \
518 (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
519#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
520
521#define S_FW_RI_RES_WR_IQRO 30
522#define M_FW_RI_RES_WR_IQRO 0x1
523#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
524#define G_FW_RI_RES_WR_IQRO(x) \
525 (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
526#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
527
528struct fw_ri_rdma_write_wr {
529 __u8 opcode;
530 __u8 flags;
531 __u16 wrid;
532 __u8 r1[3];
533 __u8 len16;
534 __be64 r2;
535 __be32 plen;
536 __be32 stag_sink;
537 __be64 to_sink;
538#ifndef C99_NOT_SUPPORTED
539 union {
540 struct fw_ri_immd immd_src[0];
541 struct fw_ri_isgl isgl_src[0];
542 } u;
543#endif
544};
545
546struct fw_ri_send_wr {
547 __u8 opcode;
548 __u8 flags;
549 __u16 wrid;
550 __u8 r1[3];
551 __u8 len16;
552 __be32 sendop_pkd;
553 __be32 stag_inv;
554 __be32 plen;
555 __be32 r3;
556 __be64 r4;
557#ifndef C99_NOT_SUPPORTED
558 union {
559 struct fw_ri_immd immd_src[0];
560 struct fw_ri_isgl isgl_src[0];
561 } u;
562#endif
563};
564
565#define S_FW_RI_SEND_WR_SENDOP 0
566#define M_FW_RI_SEND_WR_SENDOP 0xf
567#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
568#define G_FW_RI_SEND_WR_SENDOP(x) \
569 (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
570
571struct fw_ri_rdma_read_wr {
572 __u8 opcode;
573 __u8 flags;
574 __u16 wrid;
575 __u8 r1[3];
576 __u8 len16;
577 __be64 r2;
578 __be32 stag_sink;
579 __be32 to_sink_hi;
580 __be32 to_sink_lo;
581 __be32 plen;
582 __be32 stag_src;
583 __be32 to_src_hi;
584 __be32 to_src_lo;
585 __be32 r5;
586};
587
588struct fw_ri_recv_wr {
589 __u8 opcode;
590 __u8 r1;
591 __u16 wrid;
592 __u8 r2[3];
593 __u8 len16;
594 struct fw_ri_isgl isgl;
595};
596
597struct fw_ri_bind_mw_wr {
598 __u8 opcode;
599 __u8 flags;
600 __u16 wrid;
601 __u8 r1[3];
602 __u8 len16;
603 __u8 qpbinde_to_dcacpu;
604 __u8 pgsz_shift;
605 __u8 addr_type;
606 __u8 mem_perms;
607 __be32 stag_mr;
608 __be32 stag_mw;
609 __be32 r3;
610 __be64 len_mw;
611 __be64 va_fbo;
612 __be64 r4;
613};
614
615#define S_FW_RI_BIND_MW_WR_QPBINDE 6
616#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
617#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
618#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
619 (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
620#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
621
622#define S_FW_RI_BIND_MW_WR_NS 5
623#define M_FW_RI_BIND_MW_WR_NS 0x1
624#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
625#define G_FW_RI_BIND_MW_WR_NS(x) \
626 (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
627#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
628
629#define S_FW_RI_BIND_MW_WR_DCACPU 0
630#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
631#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
632#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
633 (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
634
635struct fw_ri_fr_nsmr_wr {
636 __u8 opcode;
637 __u8 flags;
638 __u16 wrid;
639 __u8 r1[3];
640 __u8 len16;
641 __u8 qpbinde_to_dcacpu;
642 __u8 pgsz_shift;
643 __u8 addr_type;
644 __u8 mem_perms;
645 __be32 stag;
646 __be32 len_hi;
647 __be32 len_lo;
648 __be32 va_hi;
649 __be32 va_lo_fbo;
650};
651
652#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
653#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
654#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
655#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
656 (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
657#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
658
659#define S_FW_RI_FR_NSMR_WR_NS 5
660#define M_FW_RI_FR_NSMR_WR_NS 0x1
661#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
662#define G_FW_RI_FR_NSMR_WR_NS(x) \
663 (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
664#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
665
666#define S_FW_RI_FR_NSMR_WR_DCACPU 0
667#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
668#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
669#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
670 (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
671
672struct fw_ri_inv_lstag_wr {
673 __u8 opcode;
674 __u8 flags;
675 __u16 wrid;
676 __u8 r1[3];
677 __u8 len16;
678 __be32 r2;
679 __be32 stag_inv;
680};
681
682enum fw_ri_type {
683 FW_RI_TYPE_INIT,
684 FW_RI_TYPE_FINI,
685 FW_RI_TYPE_TERMINATE
686};
687
688enum fw_ri_init_p2ptype {
689 FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
690 FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
691 FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
692 FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
693 FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
694 FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
695 FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
696};
697
698struct fw_ri_wr {
699 __be32 op_compl;
700 __be32 flowid_len16;
701 __u64 cookie;
702 union fw_ri {
703 struct fw_ri_init {
704 __u8 type;
705 __u8 mpareqbit_p2ptype;
706 __u8 r4[2];
707 __u8 mpa_attrs;
708 __u8 qp_caps;
709 __be16 nrqe;
710 __be32 pdid;
711 __be32 qpid;
712 __be32 sq_eqid;
713 __be32 rq_eqid;
714 __be32 scqid;
715 __be32 rcqid;
716 __be32 ord_max;
717 __be32 ird_max;
718 __be32 iss;
719 __be32 irs;
720 __be32 hwrqsize;
721 __be32 hwrqaddr;
722 __be64 r5;
723 union fw_ri_init_p2p {
724 struct fw_ri_rdma_write_wr write;
725 struct fw_ri_rdma_read_wr read;
726 struct fw_ri_send_wr send;
727 } u;
728 } init;
729 struct fw_ri_fini {
730 __u8 type;
731 __u8 r3[7];
732 __be64 r4;
733 } fini;
734 struct fw_ri_terminate {
735 __u8 type;
736 __u8 r3[3];
737 __be32 immdlen;
738 __u8 termmsg[40];
739 } terminate;
740 } u;
741};
742
743#define S_FW_RI_WR_MPAREQBIT 7
744#define M_FW_RI_WR_MPAREQBIT 0x1
745#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
746#define G_FW_RI_WR_MPAREQBIT(x) \
747 (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
748#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
749
750#define S_FW_RI_WR_P2PTYPE 0
751#define M_FW_RI_WR_P2PTYPE 0xf
752#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
753#define G_FW_RI_WR_P2PTYPE(x) \
754 (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
755
756struct tcp_options {
757 __be16 mss;
758 __u8 wsf;
759#if defined(__LITTLE_ENDIAN_BITFIELD)
760 __u8:4;
761 __u8 unknown:1;
762 __u8:1;
763 __u8 sack:1;
764 __u8 tstamp:1;
765#else
766 __u8 tstamp:1;
767 __u8 sack:1;
768 __u8:1;
769 __u8 unknown:1;
770 __u8:4;
771#endif
772};
773
774struct cpl_pass_accept_req {
775 union opcode_tid ot;
776 __be16 rsvd;
777 __be16 len;
778 __be32 hdr_len;
779 __be16 vlan;
780 __be16 l2info;
781 __be32 tos_stid;
782 struct tcp_options tcpopt;
783};
784
785/* cpl_pass_accept_req.hdr_len fields */
786#define S_SYN_RX_CHAN 0
787#define M_SYN_RX_CHAN 0xF
788#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
789#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
790
791#define S_TCP_HDR_LEN 10
792#define M_TCP_HDR_LEN 0x3F
793#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
794#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
795
796#define S_IP_HDR_LEN 16
797#define M_IP_HDR_LEN 0x3FF
798#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
799#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
800
801#define S_ETH_HDR_LEN 26
802#define M_ETH_HDR_LEN 0x1F
803#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
804#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
805
806/* cpl_pass_accept_req.l2info fields */
807#define S_SYN_MAC_IDX 0
808#define M_SYN_MAC_IDX 0x1FF
809#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
810#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
811
812#define S_SYN_XACT_MATCH 9
813#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
814#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
815
816#define S_SYN_INTF 12
817#define M_SYN_INTF 0xF
818#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
819#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
820
821struct ulptx_idata {
822 __be32 cmd_more;
823 __be32 len;
824};
825
826#define S_ULPTX_NSGE 0
827#define M_ULPTX_NSGE 0xFFFF
828#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
829#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
new file mode 100644
index 000000000000..ed6414abde02
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __C4IW_USER_H__
33#define __C4IW_USER_H__
34
35#define C4IW_UVERBS_ABI_VERSION 1
36
37/*
38 * Make sure that all structs defined in this file remain laid out so
39 * that they pack the same way on 32-bit and 64-bit architectures (to
40 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
41 * In particular do not use pointer types -- pass pointers in __u64
42 * instead.
43 */
44struct c4iw_create_cq_resp {
45 __u64 key;
46 __u64 gts_key;
47 __u64 memsize;
48 __u32 cqid;
49 __u32 size;
50 __u32 qid_mask;
51};
52
53struct c4iw_create_qp_resp {
54 __u64 sq_key;
55 __u64 rq_key;
56 __u64 sq_db_gts_key;
57 __u64 rq_db_gts_key;
58 __u64 sq_memsize;
59 __u64 rq_memsize;
60 __u32 sqid;
61 __u32 rqid;
62 __u32 sq_size;
63 __u32 rq_size;
64 __u32 qid_mask;
65};
66#endif
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 37d12e5efa49..1d7aea132a09 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1474,7 +1474,7 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
1474/** 1474/**
1475 * ipath_pe_put_tid - write a TID in chip 1475 * ipath_pe_put_tid - write a TID in chip
1476 * @dd: the infinipath device 1476 * @dd: the infinipath device
1477 * @tidptr: pointer to the expected TID (in chip) to udpate 1477 * @tidptr: pointer to the expected TID (in chip) to update
1478 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected 1478 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1479 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing 1479 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1480 * 1480 *
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index fbf8c5379ea8..4b4a30b0dabd 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -1328,7 +1328,7 @@ bail:
1328/** 1328/**
1329 * ipath_pe_put_tid - write a TID in chip 1329 * ipath_pe_put_tid - write a TID in chip
1330 * @dd: the infinipath device 1330 * @dd: the infinipath device
1331 * @tidptr: pointer to the expected TID (in chip) to udpate 1331 * @tidptr: pointer to the expected TID (in chip) to update
1332 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected 1332 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1333 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing 1333 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1334 * 1334 *
@@ -1394,7 +1394,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1394/** 1394/**
1395 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher 1395 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
1396 * @dd: the infinipath device 1396 * @dd: the infinipath device
1397 * @tidptr: pointer to the expected TID (in chip) to udpate 1397 * @tidptr: pointer to the expected TID (in chip) to update
1398 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected 1398 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1399 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing 1399 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1400 * 1400 *
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index a805402dd4ae..34b778ed97fc 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -1738,7 +1738,7 @@ bail:
1738/** 1738/**
1739 * ipath_7220_put_tid - write a TID to the chip 1739 * ipath_7220_put_tid - write a TID to the chip
1740 * @dd: the infinipath device 1740 * @dd: the infinipath device
1741 * @tidptr: pointer to the expected TID (in chip) to udpate 1741 * @tidptr: pointer to the expected TID (in chip) to update
1742 * @tidtype: 0 for eager, 1 for expected 1742 * @tidtype: 0 for eager, 1 for expected
1743 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing 1743 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1744 * 1744 *
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc2ddd29ac57..5a219a2fdf16 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -661,6 +661,14 @@ repoll:
661 wc->opcode = IB_WC_FETCH_ADD; 661 wc->opcode = IB_WC_FETCH_ADD;
662 wc->byte_len = 8; 662 wc->byte_len = 8;
663 break; 663 break;
664 case MLX4_OPCODE_MASKED_ATOMIC_CS:
665 wc->opcode = IB_WC_MASKED_COMP_SWAP;
666 wc->byte_len = 8;
667 break;
668 case MLX4_OPCODE_MASKED_ATOMIC_FA:
669 wc->opcode = IB_WC_MASKED_FETCH_ADD;
670 wc->byte_len = 8;
671 break;
664 case MLX4_OPCODE_BIND_MW: 672 case MLX4_OPCODE_BIND_MW:
665 wc->opcode = IB_WC_BIND_MW; 673 wc->opcode = IB_WC_BIND_MW;
666 break; 674 break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 01f2a3f93355..39051417054c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
141 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 141 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
142 props->masked_atomic_cap = IB_ATOMIC_HCA;
142 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 143 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
143 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 144 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
144 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 145 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5643f4a8ffef..6a60827b2301 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -74,17 +74,19 @@ enum {
74}; 74};
75 75
76static const __be32 mlx4_ib_opcode[] = { 76static const __be32 mlx4_ib_opcode[] = {
77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), 79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), 82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
88 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
89 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
88}; 90};
89 91
90static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 92static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1407 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1409 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1408 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1410 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1409 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1411 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1412 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1413 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1414 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1410 } else { 1415 } else {
1411 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1416 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1412 aseg->compare = 0; 1417 aseg->compare = 0;
@@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1414 1419
1415} 1420}
1416 1421
1422static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1423 struct ib_send_wr *wr)
1424{
1425 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1426 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1427 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1428 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1429}
1430
1417static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1431static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1418 struct ib_send_wr *wr) 1432 struct ib_send_wr *wr)
1419{ 1433{
@@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1567 switch (wr->opcode) { 1581 switch (wr->opcode) {
1568 case IB_WR_ATOMIC_CMP_AND_SWP: 1582 case IB_WR_ATOMIC_CMP_AND_SWP:
1569 case IB_WR_ATOMIC_FETCH_AND_ADD: 1583 case IB_WR_ATOMIC_FETCH_AND_ADD:
1584 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1570 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1585 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1571 wr->wr.atomic.rkey); 1586 wr->wr.atomic.rkey);
1572 wqe += sizeof (struct mlx4_wqe_raddr_seg); 1587 wqe += sizeof (struct mlx4_wqe_raddr_seg);
@@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1579 1594
1580 break; 1595 break;
1581 1596
1597 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1598 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1599 wr->wr.atomic.rkey);
1600 wqe += sizeof (struct mlx4_wqe_raddr_seg);
1601
1602 set_masked_atomic_seg(wqe, wr);
1603 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
1604
1605 size += (sizeof (struct mlx4_wqe_raddr_seg) +
1606 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
1607
1608 break;
1609
1582 case IB_WR_RDMA_READ: 1610 case IB_WR_RDMA_READ:
1583 case IB_WR_RDMA_WRITE: 1611 case IB_WR_RDMA_WRITE:
1584 case IB_WR_RDMA_WRITE_WITH_IMM: 1612 case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index c5ccc2daab60..b4e0cf4e95cd 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
211 if (!buf->direct.buf) 211 if (!buf->direct.buf)
212 return -ENOMEM; 212 return -ENOMEM;
213 213
214 pci_unmap_addr_set(&buf->direct, mapping, t); 214 dma_unmap_addr_set(&buf->direct, mapping, t);
215 215
216 memset(buf->direct.buf, 0, size); 216 memset(buf->direct.buf, 0, size);
217 217
@@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
251 goto err_free; 251 goto err_free;
252 252
253 dma_list[i] = t; 253 dma_list[i] = t;
254 pci_unmap_addr_set(&buf->page_list[i], mapping, t); 254 dma_unmap_addr_set(&buf->page_list[i], mapping, t);
255 255
256 clear_page(buf->page_list[i].buf); 256 clear_page(buf->page_list[i].buf);
257 } 257 }
@@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
289 289
290 if (is_direct) 290 if (is_direct)
291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
292 pci_unmap_addr(&buf->direct, mapping)); 292 dma_unmap_addr(&buf->direct, mapping));
293 else { 293 else {
294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) 294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
296 buf->page_list[i].buf, 296 buf->page_list[i].buf,
297 pci_unmap_addr(&buf->page_list[i], 297 dma_unmap_addr(&buf->page_list[i],
298 mapping)); 298 mapping));
299 kfree(buf->page_list); 299 kfree(buf->page_list);
300 } 300 }
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 9388164b6053..8e8c728aff88 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
504 goto err_out_free_pages; 504 goto err_out_free_pages;
505 505
506 dma_list[i] = t; 506 dma_list[i] = t;
507 pci_unmap_addr_set(&eq->page_list[i], mapping, t); 507 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
508 508
509 clear_page(eq->page_list[i].buf); 509 clear_page(eq->page_list[i].buf);
510 } 510 }
@@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
579 if (eq->page_list[i].buf) 579 if (eq->page_list[i].buf)
580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
581 eq->page_list[i].buf, 581 eq->page_list[i].buf,
582 pci_unmap_addr(&eq->page_list[i], 582 dma_unmap_addr(&eq->page_list[i],
583 mapping)); 583 mapping));
584 584
585 mthca_free_mailbox(dev, mailbox); 585 mthca_free_mailbox(dev, mailbox);
@@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
629 for (i = 0; i < npages; ++i) 629 for (i = 0; i < npages; ++i)
630 pci_free_consistent(dev->pdev, PAGE_SIZE, 630 pci_free_consistent(dev->pdev, PAGE_SIZE,
631 eq->page_list[i].buf, 631 eq->page_list[i].buf,
632 pci_unmap_addr(&eq->page_list[i], mapping)); 632 dma_unmap_addr(&eq->page_list[i], mapping));
633 633
634 kfree(eq->page_list); 634 kfree(eq->page_list);
635 mthca_free_mailbox(dev, mailbox); 635 mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 90f4c4d2e983..596acc45569b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -46,7 +46,7 @@
46 46
47struct mthca_buf_list { 47struct mthca_buf_list {
48 void *buf; 48 void *buf;
49 DECLARE_PCI_UNMAP_ADDR(mapping) 49 DEFINE_DMA_UNMAP_ADDR(mapping);
50}; 50};
51 51
52union mthca_buf { 52union mthca_buf {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index c36a3f514929..86acb7d57064 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev)
1297/** 1297/**
1298 * nes_init_1g_phy 1298 * nes_init_1g_phy
1299 */ 1299 */
1300int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1300static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1301{ 1301{
1302 u32 counter = 0; 1302 u32 counter = 0;
1303 u16 phy_data; 1303 u16 phy_data;
@@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1351/** 1351/**
1352 * nes_init_2025_phy 1352 * nes_init_2025_phy
1353 */ 1353 */
1354int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1354static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1355{ 1355{
1356 u32 temp_phy_data = 0; 1356 u32 temp_phy_data = 0;
1357 u32 temp_phy_data2 = 0; 1357 u32 temp_phy_data2 = 0;
@@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2458 return; 2458 return;
2459 } 2459 }
2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT; 2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
2461 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2462 2461
2463 /* ack the MAC interrupt */ 2462 /* ack the MAC interrupt */
2464 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200)); 2463 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
@@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2469 2468
2470 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) { 2469 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
2471 nesdev->link_status_interrupts++; 2470 nesdev->link_status_interrupts++;
2472 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) { 2471 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
2473 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2474 nes_reset_link(nesdev, mac_index); 2472 nes_reset_link(nesdev, mac_index);
2475 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2473
2476 }
2477 /* read the PHY interrupt status register */ 2474 /* read the PHY interrupt status register */
2478 if ((nesadapter->OneG_Mode) && 2475 if ((nesadapter->OneG_Mode) &&
2479 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { 2476 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
@@ -2587,6 +2584,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2587 break; 2584 break;
2588 } 2585 }
2589 } 2586 }
2587 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2590 2588
2591 if (phy_data & 0x0004) { 2589 if (phy_data & 0x0004) {
2592 if (wide_ppm_offset && 2590 if (wide_ppm_offset &&
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be43..e95e8d09ff38 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -877,7 +877,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
877 if (!mc_all_on) { 877 if (!mc_all_on) {
878 char *addrs; 878 char *addrs;
879 int i; 879 int i;
880 struct dev_mc_list *mcaddr; 880 struct netdev_hw_addr *ha;
881 881
882 addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC); 882 addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
883 if (!addrs) { 883 if (!addrs) {
@@ -885,9 +885,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
885 goto unlock; 885 goto unlock;
886 } 886 }
887 i = 0; 887 i = 0;
888 netdev_for_each_mc_addr(mcaddr, netdev) 888 netdev_for_each_mc_addr(ha, netdev)
889 memcpy(get_addr(addrs, i++), 889 memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
890 mcaddr->dmi_addr, ETH_ALEN);
891 890
892 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 891 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
893 pft_entries_preallocated * 0x8; 892 pft_entries_preallocated * 0x8;
@@ -1461,11 +1460,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1461 et_cmd->transceiver = XCVR_INTERNAL; 1460 et_cmd->transceiver = XCVR_INTERNAL;
1462 et_cmd->phy_address = mac_index; 1461 et_cmd->phy_address = mac_index;
1463 } else { 1462 } else {
1463 unsigned long flags;
1464 et_cmd->supported = SUPPORTED_1000baseT_Full 1464 et_cmd->supported = SUPPORTED_1000baseT_Full
1465 | SUPPORTED_Autoneg; 1465 | SUPPORTED_Autoneg;
1466 et_cmd->advertising = ADVERTISED_1000baseT_Full 1466 et_cmd->advertising = ADVERTISED_1000baseT_Full
1467 | ADVERTISED_Autoneg; 1467 | ADVERTISED_Autoneg;
1468 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1468 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1469 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1470 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1469 if (phy_data & 0x1000) 1471 if (phy_data & 0x1000)
1470 et_cmd->autoneg = AUTONEG_ENABLE; 1472 et_cmd->autoneg = AUTONEG_ENABLE;
1471 else 1473 else
@@ -1503,12 +1505,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1503 struct nes_vnic *nesvnic = netdev_priv(netdev); 1505 struct nes_vnic *nesvnic = netdev_priv(netdev);
1504 struct nes_device *nesdev = nesvnic->nesdev; 1506 struct nes_device *nesdev = nesvnic->nesdev;
1505 struct nes_adapter *nesadapter = nesdev->nesadapter; 1507 struct nes_adapter *nesadapter = nesdev->nesadapter;
1506 u16 phy_data;
1507 1508
1508 if ((nesadapter->OneG_Mode) && 1509 if ((nesadapter->OneG_Mode) &&
1509 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) { 1510 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
1510 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1511 unsigned long flags;
1511 &phy_data); 1512 u16 phy_data;
1513 u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
1514
1515 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1516 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1512 if (et_cmd->autoneg) { 1517 if (et_cmd->autoneg) {
1513 /* Turn on Full duplex, Autoneg, and restart autonegotiation */ 1518 /* Turn on Full duplex, Autoneg, and restart autonegotiation */
1514 phy_data |= 0x1300; 1519 phy_data |= 0x1300;
@@ -1516,8 +1521,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1516 /* Turn off autoneg */ 1521 /* Turn off autoneg */
1517 phy_data &= ~0x1000; 1522 phy_data &= ~0x1000;
1518 } 1523 }
1519 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1524 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1520 phy_data); 1525 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1521 } 1526 }
1522 1527
1523 return 0; 1528 return 0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 186623d86959..a9f5dd272f1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
381 */ 381 */
382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data) 382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
383{ 383{
384 struct nes_adapter *nesadapter = nesdev->nesadapter;
385 u32 u32temp; 384 u32 u32temp;
386 u32 counter; 385 u32 counter;
387 unsigned long flags;
388
389 spin_lock_irqsave(&nesadapter->phy_lock, flags);
390 386
391 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 387 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
392 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 388 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
402 if (!(u32temp & 1)) 398 if (!(u32temp & 1))
403 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", 399 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
404 u32temp); 400 u32temp);
405
406 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
407} 401}
408 402
409 403
@@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
414 */ 408 */
415void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data) 409void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
416{ 410{
417 struct nes_adapter *nesadapter = nesdev->nesadapter;
418 u32 u32temp; 411 u32 u32temp;
419 u32 counter; 412 u32 counter;
420 unsigned long flags;
421 413
422 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n", 414 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
423 phy_addr, nesdev->mac_index); */ 415 phy_addr, nesdev->mac_index); */
424 spin_lock_irqsave(&nesadapter->phy_lock, flags);
425 416
426 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 417 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
427 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 418 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
441 } else { 432 } else {
442 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 433 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
443 } 434 }
444 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
445} 435}
446 436
447 437
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e54f312e4bdc..925e1f2d1d55 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
374/* 374/*
375 * nes_alloc_fast_reg_mr 375 * nes_alloc_fast_reg_mr
376 */ 376 */
377struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len) 377static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
378{ 378{
379 struct nes_pd *nespd = to_nespd(ibpd); 379 struct nes_pd *nespd = to_nespd(ibpd);
380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); 380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index d10b4ec68d28..40e858492f90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev)
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50} 50}
51 51
52static int ipoib_set_tso(struct net_device *dev, u32 data)
53{
54 struct ipoib_dev_priv *priv = netdev_priv(dev);
55
56 if (data) {
57 if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
58 (dev->features & NETIF_F_SG) &&
59 (priv->hca_caps & IB_DEVICE_UD_TSO)) {
60 dev->features |= NETIF_F_TSO;
61 } else {
62 ipoib_warn(priv, "can't set TSO on\n");
63 return -EOPNOTSUPP;
64 }
65 } else
66 dev->features &= ~NETIF_F_TSO;
67
68 return 0;
69}
70
52static int ipoib_get_coalesce(struct net_device *dev, 71static int ipoib_get_coalesce(struct net_device *dev,
53 struct ethtool_coalesce *coal) 72 struct ethtool_coalesce *coal)
54{ 73{
@@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
131static const struct ethtool_ops ipoib_ethtool_ops = { 150static const struct ethtool_ops ipoib_ethtool_ops = {
132 .get_drvinfo = ipoib_get_drvinfo, 151 .get_drvinfo = ipoib_get_drvinfo,
133 .get_rx_csum = ipoib_get_rx_csum, 152 .get_rx_csum = ipoib_get_rx_csum,
153 .set_tso = ipoib_set_tso,
134 .get_coalesce = ipoib_get_coalesce, 154 .get_coalesce = ipoib_get_coalesce,
135 .set_coalesce = ipoib_set_coalesce, 155 .set_coalesce = ipoib_set_coalesce,
136 .get_flags = ethtool_op_get_flags, 156 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b166bb75753d..3871ac663554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -768,11 +768,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
768 } 768 }
769} 769}
770 770
771static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, 771static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
772 const u8 *broadcast)
773{ 772{
774 if (addrlen != INFINIBAND_ALEN)
775 return 0;
776 /* reserved QPN, prefix, scope */ 773 /* reserved QPN, prefix, scope */
777 if (memcmp(addr, broadcast, 6)) 774 if (memcmp(addr, broadcast, 6))
778 return 0; 775 return 0;
@@ -787,7 +784,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
787 struct ipoib_dev_priv *priv = 784 struct ipoib_dev_priv *priv =
788 container_of(work, struct ipoib_dev_priv, restart_task); 785 container_of(work, struct ipoib_dev_priv, restart_task);
789 struct net_device *dev = priv->dev; 786 struct net_device *dev = priv->dev;
790 struct dev_mc_list *mclist; 787 struct netdev_hw_addr *ha;
791 struct ipoib_mcast *mcast, *tmcast; 788 struct ipoib_mcast *mcast, *tmcast;
792 LIST_HEAD(remove_list); 789 LIST_HEAD(remove_list);
793 unsigned long flags; 790 unsigned long flags;
@@ -812,15 +809,13 @@ void ipoib_mcast_restart_task(struct work_struct *work)
812 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 809 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
813 810
814 /* Mark all of the entries that are found or don't exist */ 811 /* Mark all of the entries that are found or don't exist */
815 netdev_for_each_mc_addr(mclist, dev) { 812 netdev_for_each_mc_addr(ha, dev) {
816 union ib_gid mgid; 813 union ib_gid mgid;
817 814
818 if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, 815 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
819 mclist->dmi_addrlen,
820 dev->broadcast))
821 continue; 816 continue;
822 817
823 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); 818 memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
824 819
825 mcast = __ipoib_mcast_find(dev, &mgid); 820 mcast = __ipoib_mcast_find(dev, &mgid);
826 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 821 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93399dff0c6f..7b2fc98e2f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
325 */ 325 */
326 if (ib_conn) { 326 if (ib_conn) {
327 ib_conn->iser_conn = NULL; 327 ib_conn->iser_conn = NULL;
328 iser_conn_put(ib_conn); 328 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
329 } 329 }
330} 330}
331 331
@@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
357 /* binds the iSER connection retrieved from the previously 357 /* binds the iSER connection retrieved from the previously
358 * connected ep_handle to the iSCSI layer connection. exchanges 358 * connected ep_handle to the iSCSI layer connection. exchanges
359 * connection pointers */ 359 * connection pointers */
360 iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn); 360 iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
361 conn, conn->dd_data, ib_conn);
361 iser_conn = conn->dd_data; 362 iser_conn = conn->dd_data;
362 ib_conn->iser_conn = iser_conn; 363 ib_conn->iser_conn = iser_conn;
363 iser_conn->ib_conn = ib_conn; 364 iser_conn->ib_conn = ib_conn;
364 iser_conn_get(ib_conn); 365 iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
365 return 0; 366 return 0;
366} 367}
367 368
@@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
382 * There is no unbind event so the stop callback 383 * There is no unbind event so the stop callback
383 * must release the ref from the bind. 384 * must release the ref from the bind.
384 */ 385 */
385 iser_conn_put(ib_conn); 386 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
386 } 387 }
387 iser_conn->ib_conn = NULL; 388 iser_conn->ib_conn = NULL;
388} 389}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 036934cdcb92..f1df01567bb6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -232,6 +232,7 @@ struct iser_device {
232 struct ib_cq *tx_cq; 232 struct ib_cq *tx_cq;
233 struct ib_mr *mr; 233 struct ib_mr *mr;
234 struct tasklet_struct cq_tasklet; 234 struct tasklet_struct cq_tasklet;
235 struct ib_event_handler event_handler;
235 struct list_head ig_list; /* entry in ig devices list */ 236 struct list_head ig_list; /* entry in ig devices list */
236 int refcount; 237 int refcount;
237}; 238};
@@ -246,7 +247,6 @@ struct iser_conn {
246 struct rdma_cm_id *cma_id; /* CMA ID */ 247 struct rdma_cm_id *cma_id; /* CMA ID */
247 struct ib_qp *qp; /* QP */ 248 struct ib_qp *qp; /* QP */
248 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ 249 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
249 int disc_evt_flag; /* disconn event delivered */
250 wait_queue_head_t wait; /* waitq for conn/disconn */ 250 wait_queue_head_t wait; /* waitq for conn/disconn */
251 int post_recv_buf_count; /* posted rx count */ 251 int post_recv_buf_count; /* posted rx count */
252 atomic_t post_send_buf_count; /* posted tx count */ 252 atomic_t post_send_buf_count; /* posted tx count */
@@ -320,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn);
320 320
321void iser_conn_get(struct iser_conn *ib_conn); 321void iser_conn_get(struct iser_conn *ib_conn);
322 322
323void iser_conn_put(struct iser_conn *ib_conn); 323int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
324 324
325void iser_conn_terminate(struct iser_conn *ib_conn); 325void iser_conn_terminate(struct iser_conn *ib_conn);
326 326
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b89d76b39a13..9876865732f7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context)
54 iser_err("got qp event %d\n",cause->event); 54 iser_err("got qp event %d\n",cause->event);
55} 55}
56 56
57static void iser_event_handler(struct ib_event_handler *handler,
58 struct ib_event *event)
59{
60 iser_err("async event %d on device %s port %d\n", event->event,
61 event->device->name, event->element.port_num);
62}
63
57/** 64/**
58 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 65 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
59 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 66 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
@@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
96 if (IS_ERR(device->mr)) 103 if (IS_ERR(device->mr))
97 goto dma_mr_err; 104 goto dma_mr_err;
98 105
106 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
107 iser_event_handler);
108 if (ib_register_event_handler(&device->event_handler))
109 goto handler_err;
110
99 return 0; 111 return 0;
100 112
113handler_err:
114 ib_dereg_mr(device->mr);
101dma_mr_err: 115dma_mr_err:
102 tasklet_kill(&device->cq_tasklet); 116 tasklet_kill(&device->cq_tasklet);
103cq_arm_err: 117cq_arm_err:
@@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
120 BUG_ON(device->mr == NULL); 134 BUG_ON(device->mr == NULL);
121 135
122 tasklet_kill(&device->cq_tasklet); 136 tasklet_kill(&device->cq_tasklet);
123 137 (void)ib_unregister_event_handler(&device->event_handler);
124 (void)ib_dereg_mr(device->mr); 138 (void)ib_dereg_mr(device->mr);
125 (void)ib_destroy_cq(device->tx_cq); 139 (void)ib_destroy_cq(device->tx_cq);
126 (void)ib_destroy_cq(device->rx_cq); 140 (void)ib_destroy_cq(device->rx_cq);
@@ -149,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
149 device = ib_conn->device; 163 device = ib_conn->device;
150 164
151 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 165 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
152 if (!ib_conn->login_buf) { 166 if (!ib_conn->login_buf)
153 goto alloc_err; 167 goto out_err;
154 ret = -ENOMEM;
155 }
156 168
157 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, 169 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
158 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, 170 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
@@ -161,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
161 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 173 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
162 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 174 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
163 GFP_KERNEL); 175 GFP_KERNEL);
164 if (!ib_conn->page_vec) { 176 if (!ib_conn->page_vec)
165 ret = -ENOMEM; 177 goto out_err;
166 goto alloc_err; 178
167 }
168 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 179 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
169 180
170 params.page_shift = SHIFT_4K; 181 params.page_shift = SHIFT_4K;
@@ -184,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
184 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); 195 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
185 if (IS_ERR(ib_conn->fmr_pool)) { 196 if (IS_ERR(ib_conn->fmr_pool)) {
186 ret = PTR_ERR(ib_conn->fmr_pool); 197 ret = PTR_ERR(ib_conn->fmr_pool);
187 goto fmr_pool_err; 198 ib_conn->fmr_pool = NULL;
199 goto out_err;
188 } 200 }
189 201
190 memset(&init_attr, 0, sizeof init_attr); 202 memset(&init_attr, 0, sizeof init_attr);
@@ -202,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
202 214
203 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 215 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
204 if (ret) 216 if (ret)
205 goto qp_err; 217 goto out_err;
206 218
207 ib_conn->qp = ib_conn->cma_id->qp; 219 ib_conn->qp = ib_conn->cma_id->qp;
208 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 220 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
@@ -210,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
210 ib_conn->fmr_pool, ib_conn->cma_id->qp); 222 ib_conn->fmr_pool, ib_conn->cma_id->qp);
211 return ret; 223 return ret;
212 224
213qp_err: 225out_err:
214 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
215fmr_pool_err:
216 kfree(ib_conn->page_vec);
217 kfree(ib_conn->login_buf);
218alloc_err:
219 iser_err("unable to alloc mem or create resource, err %d\n", ret); 226 iser_err("unable to alloc mem or create resource, err %d\n", ret);
220 return ret; 227 return ret;
221} 228}
@@ -224,7 +231,7 @@ alloc_err:
224 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 231 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
225 * -1 on failure 232 * -1 on failure
226 */ 233 */
227static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 234static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
228{ 235{
229 BUG_ON(ib_conn == NULL); 236 BUG_ON(ib_conn == NULL);
230 237
@@ -239,7 +246,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
239 if (ib_conn->qp != NULL) 246 if (ib_conn->qp != NULL)
240 rdma_destroy_qp(ib_conn->cma_id); 247 rdma_destroy_qp(ib_conn->cma_id);
241 248
242 if (ib_conn->cma_id != NULL) 249 /* if cma handler context, the caller acts s.t the cma destroy the id */
250 if (ib_conn->cma_id != NULL && can_destroy_id)
243 rdma_destroy_id(ib_conn->cma_id); 251 rdma_destroy_id(ib_conn->cma_id);
244 252
245 ib_conn->fmr_pool = NULL; 253 ib_conn->fmr_pool = NULL;
@@ -317,7 +325,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
317/** 325/**
318 * Frees all conn objects and deallocs conn descriptor 326 * Frees all conn objects and deallocs conn descriptor
319 */ 327 */
320static void iser_conn_release(struct iser_conn *ib_conn) 328static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
321{ 329{
322 struct iser_device *device = ib_conn->device; 330 struct iser_device *device = ib_conn->device;
323 331
@@ -327,13 +335,11 @@ static void iser_conn_release(struct iser_conn *ib_conn)
327 list_del(&ib_conn->conn_list); 335 list_del(&ib_conn->conn_list);
328 mutex_unlock(&ig.connlist_mutex); 336 mutex_unlock(&ig.connlist_mutex);
329 iser_free_rx_descriptors(ib_conn); 337 iser_free_rx_descriptors(ib_conn);
330 iser_free_ib_conn_res(ib_conn); 338 iser_free_ib_conn_res(ib_conn, can_destroy_id);
331 ib_conn->device = NULL; 339 ib_conn->device = NULL;
332 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 340 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
333 if (device != NULL) 341 if (device != NULL)
334 iser_device_try_release(device); 342 iser_device_try_release(device);
335 if (ib_conn->iser_conn)
336 ib_conn->iser_conn->ib_conn = NULL;
337 iscsi_destroy_endpoint(ib_conn->ep); 343 iscsi_destroy_endpoint(ib_conn->ep);
338} 344}
339 345
@@ -342,10 +348,13 @@ void iser_conn_get(struct iser_conn *ib_conn)
342 atomic_inc(&ib_conn->refcount); 348 atomic_inc(&ib_conn->refcount);
343} 349}
344 350
345void iser_conn_put(struct iser_conn *ib_conn) 351int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
346{ 352{
347 if (atomic_dec_and_test(&ib_conn->refcount)) 353 if (atomic_dec_and_test(&ib_conn->refcount)) {
348 iser_conn_release(ib_conn); 354 iser_conn_release(ib_conn, can_destroy_id);
355 return 1;
356 }
357 return 0;
349} 358}
350 359
351/** 360/**
@@ -369,19 +378,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
369 wait_event_interruptible(ib_conn->wait, 378 wait_event_interruptible(ib_conn->wait,
370 ib_conn->state == ISER_CONN_DOWN); 379 ib_conn->state == ISER_CONN_DOWN);
371 380
372 iser_conn_put(ib_conn); 381 iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
373} 382}
374 383
375static void iser_connect_error(struct rdma_cm_id *cma_id) 384static int iser_connect_error(struct rdma_cm_id *cma_id)
376{ 385{
377 struct iser_conn *ib_conn; 386 struct iser_conn *ib_conn;
378 ib_conn = (struct iser_conn *)cma_id->context; 387 ib_conn = (struct iser_conn *)cma_id->context;
379 388
380 ib_conn->state = ISER_CONN_DOWN; 389 ib_conn->state = ISER_CONN_DOWN;
381 wake_up_interruptible(&ib_conn->wait); 390 wake_up_interruptible(&ib_conn->wait);
391 return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
382} 392}
383 393
384static void iser_addr_handler(struct rdma_cm_id *cma_id) 394static int iser_addr_handler(struct rdma_cm_id *cma_id)
385{ 395{
386 struct iser_device *device; 396 struct iser_device *device;
387 struct iser_conn *ib_conn; 397 struct iser_conn *ib_conn;
@@ -390,8 +400,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
390 device = iser_device_find_by_ib_device(cma_id); 400 device = iser_device_find_by_ib_device(cma_id);
391 if (!device) { 401 if (!device) {
392 iser_err("device lookup/creation failed\n"); 402 iser_err("device lookup/creation failed\n");
393 iser_connect_error(cma_id); 403 return iser_connect_error(cma_id);
394 return;
395 } 404 }
396 405
397 ib_conn = (struct iser_conn *)cma_id->context; 406 ib_conn = (struct iser_conn *)cma_id->context;
@@ -400,11 +409,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
400 ret = rdma_resolve_route(cma_id, 1000); 409 ret = rdma_resolve_route(cma_id, 1000);
401 if (ret) { 410 if (ret) {
402 iser_err("resolve route failed: %d\n", ret); 411 iser_err("resolve route failed: %d\n", ret);
403 iser_connect_error(cma_id); 412 return iser_connect_error(cma_id);
404 } 413 }
414
415 return 0;
405} 416}
406 417
407static void iser_route_handler(struct rdma_cm_id *cma_id) 418static int iser_route_handler(struct rdma_cm_id *cma_id)
408{ 419{
409 struct rdma_conn_param conn_param; 420 struct rdma_conn_param conn_param;
410 int ret; 421 int ret;
@@ -425,9 +436,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
425 goto failure; 436 goto failure;
426 } 437 }
427 438
428 return; 439 return 0;
429failure: 440failure:
430 iser_connect_error(cma_id); 441 return iser_connect_error(cma_id);
431} 442}
432 443
433static void iser_connected_handler(struct rdma_cm_id *cma_id) 444static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -439,12 +450,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
439 wake_up_interruptible(&ib_conn->wait); 450 wake_up_interruptible(&ib_conn->wait);
440} 451}
441 452
442static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 453static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
443{ 454{
444 struct iser_conn *ib_conn; 455 struct iser_conn *ib_conn;
456 int ret;
445 457
446 ib_conn = (struct iser_conn *)cma_id->context; 458 ib_conn = (struct iser_conn *)cma_id->context;
447 ib_conn->disc_evt_flag = 1;
448 459
449 /* getting here when the state is UP means that the conn is being * 460 /* getting here when the state is UP means that the conn is being *
450 * terminated asynchronously from the iSCSI layer's perspective. */ 461 * terminated asynchronously from the iSCSI layer's perspective. */
@@ -459,20 +470,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
459 ib_conn->state = ISER_CONN_DOWN; 470 ib_conn->state = ISER_CONN_DOWN;
460 wake_up_interruptible(&ib_conn->wait); 471 wake_up_interruptible(&ib_conn->wait);
461 } 472 }
473
474 ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
475 return ret;
462} 476}
463 477
464static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 478static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
465{ 479{
466 int ret = 0; 480 int ret = 0;
467 481
468 iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id); 482 iser_err("event %d status %d conn %p id %p\n",
483 event->event, event->status, cma_id->context, cma_id);
469 484
470 switch (event->event) { 485 switch (event->event) {
471 case RDMA_CM_EVENT_ADDR_RESOLVED: 486 case RDMA_CM_EVENT_ADDR_RESOLVED:
472 iser_addr_handler(cma_id); 487 ret = iser_addr_handler(cma_id);
473 break; 488 break;
474 case RDMA_CM_EVENT_ROUTE_RESOLVED: 489 case RDMA_CM_EVENT_ROUTE_RESOLVED:
475 iser_route_handler(cma_id); 490 ret = iser_route_handler(cma_id);
476 break; 491 break;
477 case RDMA_CM_EVENT_ESTABLISHED: 492 case RDMA_CM_EVENT_ESTABLISHED:
478 iser_connected_handler(cma_id); 493 iser_connected_handler(cma_id);
@@ -482,13 +497,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
482 case RDMA_CM_EVENT_CONNECT_ERROR: 497 case RDMA_CM_EVENT_CONNECT_ERROR:
483 case RDMA_CM_EVENT_UNREACHABLE: 498 case RDMA_CM_EVENT_UNREACHABLE:
484 case RDMA_CM_EVENT_REJECTED: 499 case RDMA_CM_EVENT_REJECTED:
485 iser_err("event: %d, error: %d\n", event->event, event->status); 500 ret = iser_connect_error(cma_id);
486 iser_connect_error(cma_id);
487 break; 501 break;
488 case RDMA_CM_EVENT_DISCONNECTED: 502 case RDMA_CM_EVENT_DISCONNECTED:
489 case RDMA_CM_EVENT_DEVICE_REMOVAL: 503 case RDMA_CM_EVENT_DEVICE_REMOVAL:
490 case RDMA_CM_EVENT_ADDR_CHANGE: 504 case RDMA_CM_EVENT_ADDR_CHANGE:
491 iser_disconnected_handler(cma_id); 505 ret = iser_disconnected_handler(cma_id);
492 break; 506 break;
493 default: 507 default:
494 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 508 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -503,7 +517,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
503 init_waitqueue_head(&ib_conn->wait); 517 init_waitqueue_head(&ib_conn->wait);
504 ib_conn->post_recv_buf_count = 0; 518 ib_conn->post_recv_buf_count = 0;
505 atomic_set(&ib_conn->post_send_buf_count, 0); 519 atomic_set(&ib_conn->post_send_buf_count, 0);
506 atomic_set(&ib_conn->refcount, 1); 520 atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
507 INIT_LIST_HEAD(&ib_conn->conn_list); 521 INIT_LIST_HEAD(&ib_conn->conn_list);
508 spin_lock_init(&ib_conn->lock); 522 spin_lock_init(&ib_conn->lock);
509} 523}
@@ -531,6 +545,7 @@ int iser_connect(struct iser_conn *ib_conn,
531 545
532 ib_conn->state = ISER_CONN_PENDING; 546 ib_conn->state = ISER_CONN_PENDING;
533 547
548 iser_conn_get(ib_conn); /* ref ib conn's cma id */
534 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
535 (void *)ib_conn, 550 (void *)ib_conn,
536 RDMA_PS_TCP); 551 RDMA_PS_TCP);
@@ -568,7 +583,7 @@ id_failure:
568addr_failure: 583addr_failure:
569 ib_conn->state = ISER_CONN_DOWN; 584 ib_conn->state = ISER_CONN_DOWN;
570connect_failure: 585connect_failure:
571 iser_conn_release(ib_conn); 586 iser_conn_release(ib_conn, 1);
572 return err; 587 return err;
573} 588}
574 589
@@ -737,12 +752,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
737 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 752 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
738 ISCSI_ERR_CONN_FAILED); 753 ISCSI_ERR_CONN_FAILED);
739 754
740 /* complete the termination process if disconnect event was delivered * 755 /* no more non completed posts to the QP, complete the
741 * note there are no more non completed posts to the QP */ 756 * termination process w.o worrying on disconnect event */
742 if (ib_conn->disc_evt_flag) { 757 ib_conn->state = ISER_CONN_DOWN;
743 ib_conn->state = ISER_CONN_DOWN; 758 wake_up_interruptible(&ib_conn->wait);
744 wake_up_interruptible(&ib_conn->wait);
745 }
746 } 759 }
747} 760}
748 761