aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx4/qp.h7
-rw-r--r--include/rdma/ib_verbs.h7
34 files changed, 317 insertions, 251 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86d..b930b8110a63 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
79static DEFINE_IDR(tcp_ps); 79static DEFINE_IDR(tcp_ps);
80static DEFINE_IDR(udp_ps); 80static DEFINE_IDR(udp_ps);
81static DEFINE_IDR(ipoib_ps); 81static DEFINE_IDR(ipoib_ps);
82static int next_port;
83 82
84struct cma_device { 83struct cma_device {
85 struct list_head list; 84 struct list_head list;
@@ -1677,13 +1676,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1677 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1676 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1678 return -EINVAL; 1677 return -EINVAL;
1679 1678
1680 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1679 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
1680 GFP_KERNEL);
1681 if (!id->route.path_rec) { 1681 if (!id->route.path_rec) {
1682 ret = -ENOMEM; 1682 ret = -ENOMEM;
1683 goto err; 1683 goto err;
1684 } 1684 }
1685 1685
1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1687 id->route.num_paths = num_paths; 1686 id->route.num_paths = num_paths;
1688 return 0; 1687 return 0;
1689err: 1688err:
@@ -1970,47 +1969,33 @@ err1:
1970 1969
1971static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1970static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1972{ 1971{
1973 struct rdma_bind_list *bind_list; 1972 static unsigned int last_used_port;
1974 int port, ret, low, high; 1973 int low, high, remaining;
1975 1974 unsigned int rover;
1976 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1977 if (!bind_list)
1978 return -ENOMEM;
1979
1980retry:
1981 /* FIXME: add proper port randomization per like inet_csk_get_port */
1982 do {
1983 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1984 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1985
1986 if (ret)
1987 goto err1;
1988 1975
1989 inet_get_local_port_range(&low, &high); 1976 inet_get_local_port_range(&low, &high);
1990 if (port > high) { 1977 remaining = (high - low) + 1;
1991 if (next_port != low) { 1978 rover = net_random() % remaining + low;
1992 idr_remove(ps, port); 1979retry:
1993 next_port = low; 1980 if (last_used_port != rover &&
1994 goto retry; 1981 !idr_find(ps, (unsigned short) rover)) {
1995 } 1982 int ret = cma_alloc_port(ps, id_priv, rover);
1996 ret = -EADDRNOTAVAIL; 1983 /*
1997 goto err2; 1984 * Remember previously used port number in order to avoid
1985 * re-using same port immediately after it is closed.
1986 */
1987 if (!ret)
1988 last_used_port = rover;
1989 if (ret != -EADDRNOTAVAIL)
1990 return ret;
1998 } 1991 }
1999 1992 if (--remaining) {
2000 if (port == high) 1993 rover++;
2001 next_port = low; 1994 if ((rover < low) || (rover > high))
2002 else 1995 rover = low;
2003 next_port = port + 1; 1996 goto retry;
2004 1997 }
2005 bind_list->ps = ps; 1998 return -EADDRNOTAVAIL;
2006 bind_list->port = (unsigned short) port;
2007 cma_bind_port(bind_list, id_priv);
2008 return 0;
2009err2:
2010 idr_remove(ps, port);
2011err1:
2012 kfree(bind_list);
2013 return ret;
2014} 1999}
2015 2000
2016static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2001static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
2995 2980
2996static int __init cma_init(void) 2981static int __init cma_init(void)
2997{ 2982{
2998 int ret, low, high, remaining; 2983 int ret;
2999
3000 get_random_bytes(&next_port, sizeof next_port);
3001 inet_get_local_port_range(&low, &high);
3002 remaining = (high - low) + 1;
3003 next_port = ((unsigned int) next_port % remaining) + low;
3004 2984
3005 cma_wq = create_singlethread_workqueue("rdma_cm"); 2985 cma_wq = create_singlethread_workqueue("rdma_cm");
3006 if (!cma_wq) 2986 if (!cma_wq)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1df1194aeba4..6dc7b77d5d29 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
291 } 291 }
292 292
293 if (mad_reg_req) { 293 if (mad_reg_req) {
294 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); 294 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
295 if (!reg_req) { 295 if (!reg_req) {
296 ret = ERR_PTR(-ENOMEM); 296 ret = ERR_PTR(-ENOMEM);
297 goto error3; 297 goto error3;
298 } 298 }
299 /* Make a copy of the MAD registration request */
300 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
301 } 299 }
302 300
303 /* Now, fill in the various structures */ 301 /* Now, fill in the various structures */
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 512b1c43460c..46474842cfe9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1181,7 +1181,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
1181 file->filp = filp; 1181 file->filp = filp;
1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev); 1182 file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
1183 1183
1184 return 0; 1184 return nonseekable_open(inode, filp);
1185} 1185}
1186 1186
1187static int ib_ucm_close(struct inode *inode, struct file *filp) 1187static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1229,6 +1229,7 @@ static const struct file_operations ucm_fops = {
1229 .release = ib_ucm_close, 1229 .release = ib_ucm_close,
1230 .write = ib_ucm_write, 1230 .write = ib_ucm_write,
1231 .poll = ib_ucm_poll, 1231 .poll = ib_ucm_poll,
1232 .llseek = no_llseek,
1232}; 1233};
1233 1234
1234static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1235static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 46185084121e..ac7edc24165c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
1220 1220
1221 filp->private_data = file; 1221 filp->private_data = file;
1222 file->filp = filp; 1222 file->filp = filp;
1223 return 0; 1223
1224 return nonseekable_open(inode, filp);
1224} 1225}
1225 1226
1226static int ucma_close(struct inode *inode, struct file *filp) 1227static int ucma_close(struct inode *inode, struct file *filp)
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
1250 .release = ucma_close, 1251 .release = ucma_close,
1251 .write = ucma_write, 1252 .write = ucma_write,
1252 .poll = ucma_poll, 1253 .poll = ucma_poll,
1254 .llseek = no_llseek,
1253}; 1255};
1254 1256
1255static struct miscdevice ucma_misc = { 1257static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e7db054fb1c8..6babb72b39fc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
781{ 781{
782 struct ib_umad_port *port; 782 struct ib_umad_port *port;
783 struct ib_umad_file *file; 783 struct ib_umad_file *file;
784 int ret = 0; 784 int ret;
785 785
786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev); 786 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
787 if (port) 787 if (port)
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
814 814
815 list_add_tail(&file->port_list, &port->file_list); 815 list_add_tail(&file->port_list, &port->file_list);
816 816
817 ret = nonseekable_open(inode, filp);
818
817out: 819out:
818 mutex_unlock(&port->file_mutex); 820 mutex_unlock(&port->file_mutex);
819 return ret; 821 return ret;
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
866 .compat_ioctl = ib_umad_compat_ioctl, 868 .compat_ioctl = ib_umad_compat_ioctl,
867#endif 869#endif
868 .open = ib_umad_open, 870 .open = ib_umad_open,
869 .release = ib_umad_close 871 .release = ib_umad_close,
872 .llseek = no_llseek,
870}; 873};
871 874
872static int ib_umad_sm_open(struct inode *inode, struct file *filp) 875static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
903 906
904 filp->private_data = port; 907 filp->private_data = port;
905 908
906 return 0; 909 return nonseekable_open(inode, filp);
907 910
908fail: 911fail:
909 kref_put(&port->umad_dev->ref, ib_umad_release_dev); 912 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
933static const struct file_operations umad_sm_fops = { 936static const struct file_operations umad_sm_fops = {
934 .owner = THIS_MODULE, 937 .owner = THIS_MODULE,
935 .open = ib_umad_sm_open, 938 .open = ib_umad_sm_open,
936 .release = ib_umad_sm_close 939 .release = ib_umad_sm_close,
940 .llseek = no_llseek,
937}; 941};
938 942
939static struct ib_client umad_client = { 943static struct ib_client umad_client = {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb3526254426..ec83e9fe387b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = {
369 .read = ib_uverbs_event_read, 369 .read = ib_uverbs_event_read,
370 .poll = ib_uverbs_event_poll, 370 .poll = ib_uverbs_event_poll,
371 .release = ib_uverbs_event_close, 371 .release = ib_uverbs_event_close,
372 .fasync = ib_uverbs_event_fasync 372 .fasync = ib_uverbs_event_fasync,
373 .llseek = no_llseek,
373}; 374};
374 375
375void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 376void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
623 624
624 filp->private_data = file; 625 filp->private_data = file;
625 626
626 return 0; 627 return nonseekable_open(inode, filp);
627 628
628err_module: 629err_module:
629 module_put(dev->ib_dev->owner); 630 module_put(dev->ib_dev->owner);
@@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = {
651 .owner = THIS_MODULE, 652 .owner = THIS_MODULE,
652 .write = ib_uverbs_write, 653 .write = ib_uverbs_write,
653 .open = ib_uverbs_open, 654 .open = ib_uverbs_open,
654 .release = ib_uverbs_close 655 .release = ib_uverbs_close,
656 .llseek = no_llseek,
655}; 657};
656 658
657static const struct file_operations uverbs_mmap_fops = { 659static const struct file_operations uverbs_mmap_fops = {
@@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = {
659 .write = ib_uverbs_write, 661 .write = ib_uverbs_write,
660 .mmap = ib_uverbs_mmap, 662 .mmap = ib_uverbs_mmap,
661 .open = ib_uverbs_open, 663 .open = ib_uverbs_open,
662 .release = ib_uverbs_close 664 .release = ib_uverbs_close,
665 .llseek = no_llseek,
663}; 666};
664 667
665static struct ib_client uverbs_client = { 668static struct ib_client uverbs_client = {
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f98361..6ae698e68775 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
250struct sp_chunk { 250struct sp_chunk {
251 struct sp_chunk *next; 251 struct sp_chunk *next;
252 dma_addr_t dma_addr; 252 dma_addr_t dma_addr;
253 DECLARE_PCI_UNMAP_ADDR(mapping); 253 DEFINE_DMA_UNMAP_ADDR(mapping);
254 u16 head; 254 u16 head;
255 u16 shared_ptr[0]; 255 u16 shared_ptr[0];
256}; 256};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e90..78d247ec6961 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
49 return -ENOMEM; 49 return -ENOMEM;
50 50
51 new_head->dma_addr = dma_addr; 51 new_head->dma_addr = dma_addr;
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 52 dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53 53
54 new_head->next = NULL; 54 new_head->next = NULL;
55 new_head->head = 0; 55 new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
81 while (root) { 81 while (root) {
82 next = root->next; 82 next = root->next;
83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, 83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 pci_unmap_addr(root, mapping)); 84 dma_unmap_addr(root, mapping));
85 root = next; 85 root = next;
86 } 86 }
87} 87}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f413..49e0e8533f74 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 257static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
258{ 258{
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, 259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260 mq->msg_pool.host, pci_unmap_addr(mq, mapping)); 260 mq->msg_pool.host, dma_unmap_addr(mq, mapping));
261} 261}
262 262
263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 263static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
278 NULL, /* peer (currently unknown) */ 278 NULL, /* peer (currently unknown) */
279 C2_MQ_HOST_TARGET); 279 C2_MQ_HOST_TARGET);
280 280
281 pci_unmap_addr_set(mq, mapping, mq->host_dma); 281 dma_unmap_addr_set(mq, mapping, mq->host_dma);
282 282
283 return 0; 283 return 0;
284} 284}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94a..fc1b9a7cec4b 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
71 u8 __iomem *adapter; 71 u8 __iomem *adapter;
72 } msg_pool; 72 } msg_pool;
73 dma_addr_t host_dma; 73 dma_addr_t host_dma;
74 DECLARE_PCI_UNMAP_ADDR(mapping); 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 u16 hint_count; 75 u16 hint_count;
76 u16 priv; 76 u16 priv;
77 struct c2_mq_shared __iomem *peer; 77 struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96a..bf189987711f 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
50 50
51struct c2_buf_list { 51struct c2_buf_list {
52 void *buf; 52 void *buf;
53 DECLARE_PCI_UNMAP_ADDR(mapping) 53 DEFINE_DMA_UNMAP_ADDR(mapping);
54}; 54};
55 55
56 56
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef60..85cfae4cad71 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
524 err = -ENOMEM; 524 err = -ENOMEM;
525 goto bail1; 525 goto bail1;
526 } 526 }
527 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 527 dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages, 528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
529 (unsigned long long) c2dev->rep_vq.host_dma); 529 (unsigned long long) c2dev->rep_vq.host_dma);
530 c2_mq_rep_init(&c2dev->rep_vq, 530 c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
545 err = -ENOMEM; 545 err = -ENOMEM;
546 goto bail2; 546 goto bail2;
547 } 547 }
548 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 548 dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages, 549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
550 (unsigned long long) c2dev->aeq.host_dma); 550 (unsigned long long) c2dev->aeq.host_dma);
551 c2_mq_rep_init(&c2dev->aeq, 551 c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
596 bail3: 596 bail3:
597 dma_free_coherent(&c2dev->pcidev->dev, 597 dma_free_coherent(&c2dev->pcidev->dev,
598 c2dev->aeq.q_size * c2dev->aeq.msg_size, 598 c2dev->aeq.q_size * c2dev->aeq.msg_size,
599 q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); 599 q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
600 bail2: 600 bail2:
601 dma_free_coherent(&c2dev->pcidev->dev, 601 dma_free_coherent(&c2dev->pcidev->dev,
602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
603 q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); 603 q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
604 bail1: 604 bail1:
605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
606 bail0: 606 bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
637 dma_free_coherent(&c2dev->pcidev->dev, 637 dma_free_coherent(&c2dev->pcidev->dev,
638 c2dev->aeq.q_size * c2dev->aeq.msg_size, 638 c2dev->aeq.q_size * c2dev->aeq.msg_size,
639 c2dev->aeq.msg_pool.host, 639 c2dev->aeq.msg_pool.host,
640 pci_unmap_addr(&c2dev->aeq, mapping)); 640 dma_unmap_addr(&c2dev->aeq, mapping));
641 641
642 /* Free the verbs reply queue */ 642 /* Free the verbs reply queue */
643 dma_free_coherent(&c2dev->pcidev->dev, 643 dma_free_coherent(&c2dev->pcidev->dev,
644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
645 c2dev->rep_vq.msg_pool.host, 645 c2dev->rep_vq.msg_pool.host,
646 pci_unmap_addr(&c2dev->rep_vq, mapping)); 646 dma_unmap_addr(&c2dev->rep_vq, mapping));
647 647
648 /* Free the MQ shared pointer pool */ 648 /* Free the MQ shared pointer pool */
649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1e..005b7b52bc1e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
174 kfree(cq->sw_queue); 174 kfree(cq->sw_queue);
175 return -ENOMEM; 175 return -ENOMEM;
176 } 176 }
177 pci_unmap_addr_set(cq, mapping, cq->dma_addr); 177 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
178 memset(cq->queue, 0, size); 178 memset(cq->queue, 0, size);
179 setup.id = cq->cqid; 179 setup.id = cq->cqid;
180 setup.base_addr = (u64) (cq->dma_addr); 180 setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
297 goto err4; 297 goto err4;
298 298
299 memset(wq->queue, 0, depth * sizeof(union t3_wr)); 299 memset(wq->queue, 0, depth * sizeof(union t3_wr));
300 pci_unmap_addr_set(wq, mapping, wq->dma_addr); 300 dma_unmap_addr_set(wq, mapping, wq->dma_addr);
301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 301 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
302 if (!kernel_domain) 302 if (!kernel_domain)
303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 303 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 325 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
326 (1UL << (cq->size_log2)) 326 (1UL << (cq->size_log2))
327 * sizeof(struct t3_cqe), cq->queue, 327 * sizeof(struct t3_cqe), cq->queue,
328 pci_unmap_addr(cq, mapping)); 328 dma_unmap_addr(cq, mapping));
329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); 329 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
330 return err; 330 return err;
331} 331}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 336 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
337 (1UL << (wq->size_log2)) 337 (1UL << (wq->size_log2))
338 * sizeof(union t3_wr), wq->queue, 338 * sizeof(union t3_wr), wq->queue,
339 pci_unmap_addr(wq, mapping)); 339 dma_unmap_addr(wq, mapping));
340 kfree(wq->sq); 340 kfree(wq->sq);
341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); 341 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
342 kfree(wq->rq); 342 kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
537 err = -ENOMEM; 537 err = -ENOMEM;
538 goto err; 538 goto err;
539 } 539 }
540 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, 540 dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
541 rdev_p->ctrl_qp.dma_addr); 541 rdev_p->ctrl_qp.dma_addr);
542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; 542 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
543 memset(rdev_p->ctrl_qp.workq, 0, 543 memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), 583 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
584 (1UL << T3_CTRL_QP_SIZE_LOG2) 584 (1UL << T3_CTRL_QP_SIZE_LOG2)
585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq, 585 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
586 pci_unmap_addr(&rdev_p->ctrl_qp, mapping)); 586 dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID); 587 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
588} 588}
589 589
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c560..8f0caf7d4482 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */ 71 wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
72 union t3_wr *workq; /* the work request queue */ 72 union t3_wr *workq; /* the work request queue */
73 dma_addr_t dma_addr; /* pci bus address of the workq */ 73 dma_addr_t dma_addr; /* pci bus address of the workq */
74 DECLARE_PCI_UNMAP_ADDR(mapping) 74 DEFINE_DMA_UNMAP_ADDR(mapping);
75 void __iomem *doorbell; 75 void __iomem *doorbell;
76}; 76};
77 77
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c5..e5ddb63e7d23 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
691struct t3_wq { 691struct t3_wq {
692 union t3_wr *queue; /* DMA accessable memory */ 692 union t3_wr *queue; /* DMA accessable memory */
693 dma_addr_t dma_addr; /* DMA address for HW */ 693 dma_addr_t dma_addr; /* DMA address for HW */
694 DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */ 694 DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
695 u32 error; /* 1 once we go to ERROR */ 695 u32 error; /* 1 once we go to ERROR */
696 u32 qpid; 696 u32 qpid;
697 u32 wptr; /* idx to next available WR slot */ 697 u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
718 u32 wptr; 718 u32 wptr;
719 u32 size_log2; 719 u32 size_log2;
720 dma_addr_t dma_addr; 720 dma_addr_t dma_addr;
721 DECLARE_PCI_UNMAP_ADDR(mapping) 721 DEFINE_DMA_UNMAP_ADDR(mapping);
722 struct t3_cqe *queue; 722 struct t3_cqe *queue;
723 struct t3_cqe *sw_queue; 723 struct t3_cqe *sw_queue;
724 u32 sw_rptr; 724 u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30f..8e77dc543dd1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
47MODULE_LICENSE("Dual BSD/GPL"); 47MODULE_LICENSE("Dual BSD/GPL");
48MODULE_VERSION(DRV_VERSION); 48MODULE_VERSION(DRV_VERSION);
49 49
50cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51
52static void open_rnic_dev(struct t3cdev *); 50static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 51static void close_rnic_dev(struct t3cdev *);
54static void iwch_event_handler(struct t3cdev *, u32, u32); 52static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef03296276..ebfb117ba68b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
102module_param(cong_flavor, uint, 0644); 102module_param(cong_flavor, uint, 0644);
103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); 103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104 104
105static void process_work(struct work_struct *work);
106static struct workqueue_struct *workq; 105static struct workqueue_struct *workq;
107static DECLARE_WORK(skb_work, process_work);
108 106
109static struct sk_buff_head rxq; 107static struct sk_buff_head rxq;
110static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111 108
112static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 109static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
113static void ep_timeout(unsigned long arg); 110static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
151 return -EIO; 148 return -EIO;
152 } 149 }
153 error = l2t_send(tdev, skb, l2e); 150 error = l2t_send(tdev, skb, l2e);
154 if (error) 151 if (error < 0)
155 kfree_skb(skb); 152 kfree_skb(skb);
156 return error; 153 return error;
157} 154}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
167 return -EIO; 164 return -EIO;
168 } 165 }
169 error = cxgb3_ofld_send(tdev, skb); 166 error = cxgb3_ofld_send(tdev, skb);
170 if (error) 167 if (error < 0)
171 kfree_skb(skb); 168 kfree_skb(skb);
172 return error; 169 return error;
173} 170}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
302 put_ep(&ep->com); 299 put_ep(&ep->com);
303} 300}
304 301
305static void process_work(struct work_struct *work)
306{
307 struct sk_buff *skb = NULL;
308 void *ep;
309 struct t3cdev *tdev;
310 int ret;
311
312 while ((skb = skb_dequeue(&rxq))) {
313 ep = *((void **) (skb->cb));
314 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
315 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
316 if (ret & CPL_RET_BUF_DONE)
317 kfree_skb(skb);
318
319 /*
320 * ep was referenced in sched(), and is freed here.
321 */
322 put_ep((struct iwch_ep_common *)ep);
323 }
324}
325
326static int status2errno(int status) 302static int status2errno(int status)
327{ 303{
328 switch (status) { 304 switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2157 2133
2158/* 2134/*
2159 * All the CM events are handled on a work queue to have a safe context. 2135 * All the CM events are handled on a work queue to have a safe context.
2136 * These are the real handlers that are called from the work queue.
2160 */ 2137 */
2138static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2139 [CPL_ACT_ESTABLISH] = act_establish,
2140 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2141 [CPL_RX_DATA] = rx_data,
2142 [CPL_TX_DMA_ACK] = tx_ack,
2143 [CPL_ABORT_RPL_RSS] = abort_rpl,
2144 [CPL_ABORT_RPL] = abort_rpl,
2145 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2146 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2147 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2148 [CPL_PASS_ESTABLISH] = pass_establish,
2149 [CPL_PEER_CLOSE] = peer_close,
2150 [CPL_ABORT_REQ_RSS] = peer_abort,
2151 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2152 [CPL_RDMA_TERMINATE] = terminate,
2153 [CPL_RDMA_EC_STATUS] = ec_status,
2154};
2155
2156static void process_work(struct work_struct *work)
2157{
2158 struct sk_buff *skb = NULL;
2159 void *ep;
2160 struct t3cdev *tdev;
2161 int ret;
2162
2163 while ((skb = skb_dequeue(&rxq))) {
2164 ep = *((void **) (skb->cb));
2165 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2166 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2167 if (ret & CPL_RET_BUF_DONE)
2168 kfree_skb(skb);
2169
2170 /*
2171 * ep was referenced in sched(), and is freed here.
2172 */
2173 put_ep((struct iwch_ep_common *)ep);
2174 }
2175}
2176
2177static DECLARE_WORK(skb_work, process_work);
2178
2161static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 2179static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2162{ 2180{
2163 struct iwch_ep_common *epc = ctx; 2181 struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2189 return CPL_RET_BUF_DONE; 2207 return CPL_RET_BUF_DONE;
2190} 2208}
2191 2209
2210/*
2211 * All upcalls from the T3 Core go to sched() to schedule the
2212 * processing on a work queue.
2213 */
2214cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2215 [CPL_ACT_ESTABLISH] = sched,
2216 [CPL_ACT_OPEN_RPL] = sched,
2217 [CPL_RX_DATA] = sched,
2218 [CPL_TX_DMA_ACK] = sched,
2219 [CPL_ABORT_RPL_RSS] = sched,
2220 [CPL_ABORT_RPL] = sched,
2221 [CPL_PASS_OPEN_RPL] = sched,
2222 [CPL_CLOSE_LISTSRV_RPL] = sched,
2223 [CPL_PASS_ACCEPT_REQ] = sched,
2224 [CPL_PASS_ESTABLISH] = sched,
2225 [CPL_PEER_CLOSE] = sched,
2226 [CPL_CLOSE_CON_RPL] = sched,
2227 [CPL_ABORT_REQ_RSS] = sched,
2228 [CPL_RDMA_TERMINATE] = sched,
2229 [CPL_RDMA_EC_STATUS] = sched,
2230 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2231};
2232
2192int __init iwch_cm_init(void) 2233int __init iwch_cm_init(void)
2193{ 2234{
2194 skb_queue_head_init(&rxq); 2235 skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
2197 if (!workq) 2238 if (!workq)
2198 return -ENOMEM; 2239 return -ENOMEM;
2199 2240
2200 /*
2201 * All upcalls from the T3 Core go to sched() to
2202 * schedule the processing on a work queue.
2203 */
2204 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2205 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2206 t3c_handlers[CPL_RX_DATA] = sched;
2207 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2208 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2209 t3c_handlers[CPL_ABORT_RPL] = sched;
2210 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2211 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2212 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2213 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2214 t3c_handlers[CPL_PEER_CLOSE] = sched;
2215 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2216 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2217 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2218 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2219 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2220
2221 /*
2222 * These are the real handlers that are called from a
2223 * work queue.
2224 */
2225 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2226 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2227 work_handlers[CPL_RX_DATA] = rx_data;
2228 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2229 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2230 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2231 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2232 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2233 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2234 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2235 work_handlers[CPL_PEER_CLOSE] = peer_close;
2236 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2237 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2238 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2239 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2240 return 0; 2241 return 0;
2241} 2242}
2242 2243
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc2ddd29ac57..5a219a2fdf16 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -661,6 +661,14 @@ repoll:
661 wc->opcode = IB_WC_FETCH_ADD; 661 wc->opcode = IB_WC_FETCH_ADD;
662 wc->byte_len = 8; 662 wc->byte_len = 8;
663 break; 663 break;
664 case MLX4_OPCODE_MASKED_ATOMIC_CS:
665 wc->opcode = IB_WC_MASKED_COMP_SWAP;
666 wc->byte_len = 8;
667 break;
668 case MLX4_OPCODE_MASKED_ATOMIC_FA:
669 wc->opcode = IB_WC_MASKED_FETCH_ADD;
670 wc->byte_len = 8;
671 break;
664 case MLX4_OPCODE_BIND_MW: 672 case MLX4_OPCODE_BIND_MW:
665 wc->opcode = IB_WC_BIND_MW; 673 wc->opcode = IB_WC_BIND_MW;
666 break; 674 break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 01f2a3f93355..39051417054c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
141 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 141 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
142 props->masked_atomic_cap = IB_ATOMIC_HCA;
142 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 143 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
143 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 144 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
144 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 145 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5643f4a8ffef..6a60827b2301 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -74,17 +74,19 @@ enum {
74}; 74};
75 75
76static const __be32 mlx4_ib_opcode[] = { 76static const __be32 mlx4_ib_opcode[] = {
77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 77 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 78 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), 79 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 80 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 81 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), 82 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 83 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 84 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 85 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 86 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 87 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
88 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
89 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
88}; 90};
89 91
90static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 92static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1407 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1409 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1408 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1410 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1409 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1411 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1412 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1413 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1414 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1410 } else { 1415 } else {
1411 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1416 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1412 aseg->compare = 0; 1417 aseg->compare = 0;
@@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
1414 1419
1415} 1420}
1416 1421
1422static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1423 struct ib_send_wr *wr)
1424{
1425 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1426 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1427 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1428 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1429}
1430
1417static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1431static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1418 struct ib_send_wr *wr) 1432 struct ib_send_wr *wr)
1419{ 1433{
@@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1567 switch (wr->opcode) { 1581 switch (wr->opcode) {
1568 case IB_WR_ATOMIC_CMP_AND_SWP: 1582 case IB_WR_ATOMIC_CMP_AND_SWP:
1569 case IB_WR_ATOMIC_FETCH_AND_ADD: 1583 case IB_WR_ATOMIC_FETCH_AND_ADD:
1584 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1570 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1585 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1571 wr->wr.atomic.rkey); 1586 wr->wr.atomic.rkey);
1572 wqe += sizeof (struct mlx4_wqe_raddr_seg); 1587 wqe += sizeof (struct mlx4_wqe_raddr_seg);
@@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1579 1594
1580 break; 1595 break;
1581 1596
1597 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1598 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1599 wr->wr.atomic.rkey);
1600 wqe += sizeof (struct mlx4_wqe_raddr_seg);
1601
1602 set_masked_atomic_seg(wqe, wr);
1603 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
1604
1605 size += (sizeof (struct mlx4_wqe_raddr_seg) +
1606 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
1607
1608 break;
1609
1582 case IB_WR_RDMA_READ: 1610 case IB_WR_RDMA_READ:
1583 case IB_WR_RDMA_WRITE: 1611 case IB_WR_RDMA_WRITE:
1584 case IB_WR_RDMA_WRITE_WITH_IMM: 1612 case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index c5ccc2daab60..b4e0cf4e95cd 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
211 if (!buf->direct.buf) 211 if (!buf->direct.buf)
212 return -ENOMEM; 212 return -ENOMEM;
213 213
214 pci_unmap_addr_set(&buf->direct, mapping, t); 214 dma_unmap_addr_set(&buf->direct, mapping, t);
215 215
216 memset(buf->direct.buf, 0, size); 216 memset(buf->direct.buf, 0, size);
217 217
@@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
251 goto err_free; 251 goto err_free;
252 252
253 dma_list[i] = t; 253 dma_list[i] = t;
254 pci_unmap_addr_set(&buf->page_list[i], mapping, t); 254 dma_unmap_addr_set(&buf->page_list[i], mapping, t);
255 255
256 clear_page(buf->page_list[i].buf); 256 clear_page(buf->page_list[i].buf);
257 } 257 }
@@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
289 289
290 if (is_direct) 290 if (is_direct)
291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 291 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
292 pci_unmap_addr(&buf->direct, mapping)); 292 dma_unmap_addr(&buf->direct, mapping));
293 else { 293 else {
294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) 294 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 295 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
296 buf->page_list[i].buf, 296 buf->page_list[i].buf,
297 pci_unmap_addr(&buf->page_list[i], 297 dma_unmap_addr(&buf->page_list[i],
298 mapping)); 298 mapping));
299 kfree(buf->page_list); 299 kfree(buf->page_list);
300 } 300 }
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 9388164b6053..8e8c728aff88 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
504 goto err_out_free_pages; 504 goto err_out_free_pages;
505 505
506 dma_list[i] = t; 506 dma_list[i] = t;
507 pci_unmap_addr_set(&eq->page_list[i], mapping, t); 507 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
508 508
509 clear_page(eq->page_list[i].buf); 509 clear_page(eq->page_list[i].buf);
510 } 510 }
@@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
579 if (eq->page_list[i].buf) 579 if (eq->page_list[i].buf)
580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 580 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
581 eq->page_list[i].buf, 581 eq->page_list[i].buf,
582 pci_unmap_addr(&eq->page_list[i], 582 dma_unmap_addr(&eq->page_list[i],
583 mapping)); 583 mapping));
584 584
585 mthca_free_mailbox(dev, mailbox); 585 mthca_free_mailbox(dev, mailbox);
@@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
629 for (i = 0; i < npages; ++i) 629 for (i = 0; i < npages; ++i)
630 pci_free_consistent(dev->pdev, PAGE_SIZE, 630 pci_free_consistent(dev->pdev, PAGE_SIZE,
631 eq->page_list[i].buf, 631 eq->page_list[i].buf,
632 pci_unmap_addr(&eq->page_list[i], mapping)); 632 dma_unmap_addr(&eq->page_list[i], mapping));
633 633
634 kfree(eq->page_list); 634 kfree(eq->page_list);
635 mthca_free_mailbox(dev, mailbox); 635 mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 90f4c4d2e983..596acc45569b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -46,7 +46,7 @@
46 46
47struct mthca_buf_list { 47struct mthca_buf_list {
48 void *buf; 48 void *buf;
49 DECLARE_PCI_UNMAP_ADDR(mapping) 49 DEFINE_DMA_UNMAP_ADDR(mapping);
50}; 50};
51 51
52union mthca_buf { 52union mthca_buf {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index c36a3f514929..86acb7d57064 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev)
1297/** 1297/**
1298 * nes_init_1g_phy 1298 * nes_init_1g_phy
1299 */ 1299 */
1300int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1300static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1301{ 1301{
1302 u32 counter = 0; 1302 u32 counter = 0;
1303 u16 phy_data; 1303 u16 phy_data;
@@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1351/** 1351/**
1352 * nes_init_2025_phy 1352 * nes_init_2025_phy
1353 */ 1353 */
1354int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) 1354static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1355{ 1355{
1356 u32 temp_phy_data = 0; 1356 u32 temp_phy_data = 0;
1357 u32 temp_phy_data2 = 0; 1357 u32 temp_phy_data2 = 0;
@@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2458 return; 2458 return;
2459 } 2459 }
2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT; 2460 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
2461 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2462 2461
2463 /* ack the MAC interrupt */ 2462 /* ack the MAC interrupt */
2464 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200)); 2463 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
@@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2469 2468
2470 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) { 2469 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
2471 nesdev->link_status_interrupts++; 2470 nesdev->link_status_interrupts++;
2472 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) { 2471 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
2473 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2474 nes_reset_link(nesdev, mac_index); 2472 nes_reset_link(nesdev, mac_index);
2475 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2473
2476 }
2477 /* read the PHY interrupt status register */ 2474 /* read the PHY interrupt status register */
2478 if ((nesadapter->OneG_Mode) && 2475 if ((nesadapter->OneG_Mode) &&
2479 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { 2476 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
@@ -2587,6 +2584,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2587 break; 2584 break;
2588 } 2585 }
2589 } 2586 }
2587 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2590 2588
2591 if (phy_data & 0x0004) { 2589 if (phy_data & 0x0004) {
2592 if (wide_ppm_offset && 2590 if (wide_ppm_offset &&
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be43..9f4cadf9f851 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1461,11 +1461,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1461 et_cmd->transceiver = XCVR_INTERNAL; 1461 et_cmd->transceiver = XCVR_INTERNAL;
1462 et_cmd->phy_address = mac_index; 1462 et_cmd->phy_address = mac_index;
1463 } else { 1463 } else {
1464 unsigned long flags;
1464 et_cmd->supported = SUPPORTED_1000baseT_Full 1465 et_cmd->supported = SUPPORTED_1000baseT_Full
1465 | SUPPORTED_Autoneg; 1466 | SUPPORTED_Autoneg;
1466 et_cmd->advertising = ADVERTISED_1000baseT_Full 1467 et_cmd->advertising = ADVERTISED_1000baseT_Full
1467 | ADVERTISED_Autoneg; 1468 | ADVERTISED_Autoneg;
1469 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1468 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1470 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1471 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1469 if (phy_data & 0x1000) 1472 if (phy_data & 0x1000)
1470 et_cmd->autoneg = AUTONEG_ENABLE; 1473 et_cmd->autoneg = AUTONEG_ENABLE;
1471 else 1474 else
@@ -1503,12 +1506,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1503 struct nes_vnic *nesvnic = netdev_priv(netdev); 1506 struct nes_vnic *nesvnic = netdev_priv(netdev);
1504 struct nes_device *nesdev = nesvnic->nesdev; 1507 struct nes_device *nesdev = nesvnic->nesdev;
1505 struct nes_adapter *nesadapter = nesdev->nesadapter; 1508 struct nes_adapter *nesadapter = nesdev->nesadapter;
1506 u16 phy_data;
1507 1509
1508 if ((nesadapter->OneG_Mode) && 1510 if ((nesadapter->OneG_Mode) &&
1509 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) { 1511 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
1510 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1512 unsigned long flags;
1511 &phy_data); 1513 u16 phy_data;
1514 u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
1515
1516 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1517 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1512 if (et_cmd->autoneg) { 1518 if (et_cmd->autoneg) {
1513 /* Turn on Full duplex, Autoneg, and restart autonegotiation */ 1519 /* Turn on Full duplex, Autoneg, and restart autonegotiation */
1514 phy_data |= 0x1300; 1520 phy_data |= 0x1300;
@@ -1516,8 +1522,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1516 /* Turn off autoneg */ 1522 /* Turn off autoneg */
1517 phy_data &= ~0x1000; 1523 phy_data &= ~0x1000;
1518 } 1524 }
1519 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1525 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1520 phy_data); 1526 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1521 } 1527 }
1522 1528
1523 return 0; 1529 return 0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 186623d86959..a9f5dd272f1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
381 */ 381 */
382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data) 382void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
383{ 383{
384 struct nes_adapter *nesadapter = nesdev->nesadapter;
385 u32 u32temp; 384 u32 u32temp;
386 u32 counter; 385 u32 counter;
387 unsigned long flags;
388
389 spin_lock_irqsave(&nesadapter->phy_lock, flags);
390 386
391 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 387 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
392 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 388 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
402 if (!(u32temp & 1)) 398 if (!(u32temp & 1))
403 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", 399 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
404 u32temp); 400 u32temp);
405
406 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
407} 401}
408 402
409 403
@@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
414 */ 408 */
415void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data) 409void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
416{ 410{
417 struct nes_adapter *nesadapter = nesdev->nesadapter;
418 u32 u32temp; 411 u32 u32temp;
419 u32 counter; 412 u32 counter;
420 unsigned long flags;
421 413
422 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n", 414 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
423 phy_addr, nesdev->mac_index); */ 415 phy_addr, nesdev->mac_index); */
424 spin_lock_irqsave(&nesadapter->phy_lock, flags);
425 416
426 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 417 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
427 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); 418 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
441 } else { 432 } else {
442 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 433 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
443 } 434 }
444 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
445} 435}
446 436
447 437
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e54f312e4bdc..925e1f2d1d55 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
374/* 374/*
375 * nes_alloc_fast_reg_mr 375 * nes_alloc_fast_reg_mr
376 */ 376 */
377struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len) 377static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
378{ 378{
379 struct nes_pd *nespd = to_nespd(ibpd); 379 struct nes_pd *nespd = to_nespd(ibpd);
380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); 380 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index d10b4ec68d28..40e858492f90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev)
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50} 50}
51 51
52static int ipoib_set_tso(struct net_device *dev, u32 data)
53{
54 struct ipoib_dev_priv *priv = netdev_priv(dev);
55
56 if (data) {
57 if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
58 (dev->features & NETIF_F_SG) &&
59 (priv->hca_caps & IB_DEVICE_UD_TSO)) {
60 dev->features |= NETIF_F_TSO;
61 } else {
62 ipoib_warn(priv, "can't set TSO on\n");
63 return -EOPNOTSUPP;
64 }
65 } else
66 dev->features &= ~NETIF_F_TSO;
67
68 return 0;
69}
70
52static int ipoib_get_coalesce(struct net_device *dev, 71static int ipoib_get_coalesce(struct net_device *dev,
53 struct ethtool_coalesce *coal) 72 struct ethtool_coalesce *coal)
54{ 73{
@@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
131static const struct ethtool_ops ipoib_ethtool_ops = { 150static const struct ethtool_ops ipoib_ethtool_ops = {
132 .get_drvinfo = ipoib_get_drvinfo, 151 .get_drvinfo = ipoib_get_drvinfo,
133 .get_rx_csum = ipoib_get_rx_csum, 152 .get_rx_csum = ipoib_get_rx_csum,
153 .set_tso = ipoib_set_tso,
134 .get_coalesce = ipoib_get_coalesce, 154 .get_coalesce = ipoib_get_coalesce,
135 .set_coalesce = ipoib_set_coalesce, 155 .set_coalesce = ipoib_set_coalesce,
136 .get_flags = ethtool_op_get_flags, 156 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93399dff0c6f..7b2fc98e2f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
325 */ 325 */
326 if (ib_conn) { 326 if (ib_conn) {
327 ib_conn->iser_conn = NULL; 327 ib_conn->iser_conn = NULL;
328 iser_conn_put(ib_conn); 328 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
329 } 329 }
330} 330}
331 331
@@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
357 /* binds the iSER connection retrieved from the previously 357 /* binds the iSER connection retrieved from the previously
358 * connected ep_handle to the iSCSI layer connection. exchanges 358 * connected ep_handle to the iSCSI layer connection. exchanges
359 * connection pointers */ 359 * connection pointers */
360 iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn); 360 iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
361 conn, conn->dd_data, ib_conn);
361 iser_conn = conn->dd_data; 362 iser_conn = conn->dd_data;
362 ib_conn->iser_conn = iser_conn; 363 ib_conn->iser_conn = iser_conn;
363 iser_conn->ib_conn = ib_conn; 364 iser_conn->ib_conn = ib_conn;
364 iser_conn_get(ib_conn); 365 iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
365 return 0; 366 return 0;
366} 367}
367 368
@@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
382 * There is no unbind event so the stop callback 383 * There is no unbind event so the stop callback
383 * must release the ref from the bind. 384 * must release the ref from the bind.
384 */ 385 */
385 iser_conn_put(ib_conn); 386 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
386 } 387 }
387 iser_conn->ib_conn = NULL; 388 iser_conn->ib_conn = NULL;
388} 389}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 036934cdcb92..f1df01567bb6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -232,6 +232,7 @@ struct iser_device {
232 struct ib_cq *tx_cq; 232 struct ib_cq *tx_cq;
233 struct ib_mr *mr; 233 struct ib_mr *mr;
234 struct tasklet_struct cq_tasklet; 234 struct tasklet_struct cq_tasklet;
235 struct ib_event_handler event_handler;
235 struct list_head ig_list; /* entry in ig devices list */ 236 struct list_head ig_list; /* entry in ig devices list */
236 int refcount; 237 int refcount;
237}; 238};
@@ -246,7 +247,6 @@ struct iser_conn {
246 struct rdma_cm_id *cma_id; /* CMA ID */ 247 struct rdma_cm_id *cma_id; /* CMA ID */
247 struct ib_qp *qp; /* QP */ 248 struct ib_qp *qp; /* QP */
248 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ 249 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
249 int disc_evt_flag; /* disconn event delivered */
250 wait_queue_head_t wait; /* waitq for conn/disconn */ 250 wait_queue_head_t wait; /* waitq for conn/disconn */
251 int post_recv_buf_count; /* posted rx count */ 251 int post_recv_buf_count; /* posted rx count */
252 atomic_t post_send_buf_count; /* posted tx count */ 252 atomic_t post_send_buf_count; /* posted tx count */
@@ -320,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn);
320 320
321void iser_conn_get(struct iser_conn *ib_conn); 321void iser_conn_get(struct iser_conn *ib_conn);
322 322
323void iser_conn_put(struct iser_conn *ib_conn); 323int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
324 324
325void iser_conn_terminate(struct iser_conn *ib_conn); 325void iser_conn_terminate(struct iser_conn *ib_conn);
326 326
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b89d76b39a13..9876865732f7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context)
54 iser_err("got qp event %d\n",cause->event); 54 iser_err("got qp event %d\n",cause->event);
55} 55}
56 56
57static void iser_event_handler(struct ib_event_handler *handler,
58 struct ib_event *event)
59{
60 iser_err("async event %d on device %s port %d\n", event->event,
61 event->device->name, event->element.port_num);
62}
63
57/** 64/**
58 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 65 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
59 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 66 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
@@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
96 if (IS_ERR(device->mr)) 103 if (IS_ERR(device->mr))
97 goto dma_mr_err; 104 goto dma_mr_err;
98 105
106 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
107 iser_event_handler);
108 if (ib_register_event_handler(&device->event_handler))
109 goto handler_err;
110
99 return 0; 111 return 0;
100 112
113handler_err:
114 ib_dereg_mr(device->mr);
101dma_mr_err: 115dma_mr_err:
102 tasklet_kill(&device->cq_tasklet); 116 tasklet_kill(&device->cq_tasklet);
103cq_arm_err: 117cq_arm_err:
@@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
120 BUG_ON(device->mr == NULL); 134 BUG_ON(device->mr == NULL);
121 135
122 tasklet_kill(&device->cq_tasklet); 136 tasklet_kill(&device->cq_tasklet);
123 137 (void)ib_unregister_event_handler(&device->event_handler);
124 (void)ib_dereg_mr(device->mr); 138 (void)ib_dereg_mr(device->mr);
125 (void)ib_destroy_cq(device->tx_cq); 139 (void)ib_destroy_cq(device->tx_cq);
126 (void)ib_destroy_cq(device->rx_cq); 140 (void)ib_destroy_cq(device->rx_cq);
@@ -149,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
149 device = ib_conn->device; 163 device = ib_conn->device;
150 164
151 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 165 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
152 if (!ib_conn->login_buf) { 166 if (!ib_conn->login_buf)
153 goto alloc_err; 167 goto out_err;
154 ret = -ENOMEM;
155 }
156 168
157 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, 169 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
158 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, 170 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
@@ -161,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
161 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 173 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
162 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 174 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
163 GFP_KERNEL); 175 GFP_KERNEL);
164 if (!ib_conn->page_vec) { 176 if (!ib_conn->page_vec)
165 ret = -ENOMEM; 177 goto out_err;
166 goto alloc_err; 178
167 }
168 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 179 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
169 180
170 params.page_shift = SHIFT_4K; 181 params.page_shift = SHIFT_4K;
@@ -184,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
184 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); 195 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
185 if (IS_ERR(ib_conn->fmr_pool)) { 196 if (IS_ERR(ib_conn->fmr_pool)) {
186 ret = PTR_ERR(ib_conn->fmr_pool); 197 ret = PTR_ERR(ib_conn->fmr_pool);
187 goto fmr_pool_err; 198 ib_conn->fmr_pool = NULL;
199 goto out_err;
188 } 200 }
189 201
190 memset(&init_attr, 0, sizeof init_attr); 202 memset(&init_attr, 0, sizeof init_attr);
@@ -202,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
202 214
203 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 215 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
204 if (ret) 216 if (ret)
205 goto qp_err; 217 goto out_err;
206 218
207 ib_conn->qp = ib_conn->cma_id->qp; 219 ib_conn->qp = ib_conn->cma_id->qp;
208 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 220 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
@@ -210,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
210 ib_conn->fmr_pool, ib_conn->cma_id->qp); 222 ib_conn->fmr_pool, ib_conn->cma_id->qp);
211 return ret; 223 return ret;
212 224
213qp_err: 225out_err:
214 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
215fmr_pool_err:
216 kfree(ib_conn->page_vec);
217 kfree(ib_conn->login_buf);
218alloc_err:
219 iser_err("unable to alloc mem or create resource, err %d\n", ret); 226 iser_err("unable to alloc mem or create resource, err %d\n", ret);
220 return ret; 227 return ret;
221} 228}
@@ -224,7 +231,7 @@ alloc_err:
224 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 231 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
225 * -1 on failure 232 * -1 on failure
226 */ 233 */
227static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 234static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
228{ 235{
229 BUG_ON(ib_conn == NULL); 236 BUG_ON(ib_conn == NULL);
230 237
@@ -239,7 +246,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
239 if (ib_conn->qp != NULL) 246 if (ib_conn->qp != NULL)
240 rdma_destroy_qp(ib_conn->cma_id); 247 rdma_destroy_qp(ib_conn->cma_id);
241 248
242 if (ib_conn->cma_id != NULL) 249 /* if cma handler context, the caller acts s.t the cma destroy the id */
250 if (ib_conn->cma_id != NULL && can_destroy_id)
243 rdma_destroy_id(ib_conn->cma_id); 251 rdma_destroy_id(ib_conn->cma_id);
244 252
245 ib_conn->fmr_pool = NULL; 253 ib_conn->fmr_pool = NULL;
@@ -317,7 +325,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
317/** 325/**
318 * Frees all conn objects and deallocs conn descriptor 326 * Frees all conn objects and deallocs conn descriptor
319 */ 327 */
320static void iser_conn_release(struct iser_conn *ib_conn) 328static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
321{ 329{
322 struct iser_device *device = ib_conn->device; 330 struct iser_device *device = ib_conn->device;
323 331
@@ -327,13 +335,11 @@ static void iser_conn_release(struct iser_conn *ib_conn)
327 list_del(&ib_conn->conn_list); 335 list_del(&ib_conn->conn_list);
328 mutex_unlock(&ig.connlist_mutex); 336 mutex_unlock(&ig.connlist_mutex);
329 iser_free_rx_descriptors(ib_conn); 337 iser_free_rx_descriptors(ib_conn);
330 iser_free_ib_conn_res(ib_conn); 338 iser_free_ib_conn_res(ib_conn, can_destroy_id);
331 ib_conn->device = NULL; 339 ib_conn->device = NULL;
332 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 340 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
333 if (device != NULL) 341 if (device != NULL)
334 iser_device_try_release(device); 342 iser_device_try_release(device);
335 if (ib_conn->iser_conn)
336 ib_conn->iser_conn->ib_conn = NULL;
337 iscsi_destroy_endpoint(ib_conn->ep); 343 iscsi_destroy_endpoint(ib_conn->ep);
338} 344}
339 345
@@ -342,10 +348,13 @@ void iser_conn_get(struct iser_conn *ib_conn)
342 atomic_inc(&ib_conn->refcount); 348 atomic_inc(&ib_conn->refcount);
343} 349}
344 350
345void iser_conn_put(struct iser_conn *ib_conn) 351int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
346{ 352{
347 if (atomic_dec_and_test(&ib_conn->refcount)) 353 if (atomic_dec_and_test(&ib_conn->refcount)) {
348 iser_conn_release(ib_conn); 354 iser_conn_release(ib_conn, can_destroy_id);
355 return 1;
356 }
357 return 0;
349} 358}
350 359
351/** 360/**
@@ -369,19 +378,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
369 wait_event_interruptible(ib_conn->wait, 378 wait_event_interruptible(ib_conn->wait,
370 ib_conn->state == ISER_CONN_DOWN); 379 ib_conn->state == ISER_CONN_DOWN);
371 380
372 iser_conn_put(ib_conn); 381 iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
373} 382}
374 383
375static void iser_connect_error(struct rdma_cm_id *cma_id) 384static int iser_connect_error(struct rdma_cm_id *cma_id)
376{ 385{
377 struct iser_conn *ib_conn; 386 struct iser_conn *ib_conn;
378 ib_conn = (struct iser_conn *)cma_id->context; 387 ib_conn = (struct iser_conn *)cma_id->context;
379 388
380 ib_conn->state = ISER_CONN_DOWN; 389 ib_conn->state = ISER_CONN_DOWN;
381 wake_up_interruptible(&ib_conn->wait); 390 wake_up_interruptible(&ib_conn->wait);
391 return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
382} 392}
383 393
384static void iser_addr_handler(struct rdma_cm_id *cma_id) 394static int iser_addr_handler(struct rdma_cm_id *cma_id)
385{ 395{
386 struct iser_device *device; 396 struct iser_device *device;
387 struct iser_conn *ib_conn; 397 struct iser_conn *ib_conn;
@@ -390,8 +400,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
390 device = iser_device_find_by_ib_device(cma_id); 400 device = iser_device_find_by_ib_device(cma_id);
391 if (!device) { 401 if (!device) {
392 iser_err("device lookup/creation failed\n"); 402 iser_err("device lookup/creation failed\n");
393 iser_connect_error(cma_id); 403 return iser_connect_error(cma_id);
394 return;
395 } 404 }
396 405
397 ib_conn = (struct iser_conn *)cma_id->context; 406 ib_conn = (struct iser_conn *)cma_id->context;
@@ -400,11 +409,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
400 ret = rdma_resolve_route(cma_id, 1000); 409 ret = rdma_resolve_route(cma_id, 1000);
401 if (ret) { 410 if (ret) {
402 iser_err("resolve route failed: %d\n", ret); 411 iser_err("resolve route failed: %d\n", ret);
403 iser_connect_error(cma_id); 412 return iser_connect_error(cma_id);
404 } 413 }
414
415 return 0;
405} 416}
406 417
407static void iser_route_handler(struct rdma_cm_id *cma_id) 418static int iser_route_handler(struct rdma_cm_id *cma_id)
408{ 419{
409 struct rdma_conn_param conn_param; 420 struct rdma_conn_param conn_param;
410 int ret; 421 int ret;
@@ -425,9 +436,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
425 goto failure; 436 goto failure;
426 } 437 }
427 438
428 return; 439 return 0;
429failure: 440failure:
430 iser_connect_error(cma_id); 441 return iser_connect_error(cma_id);
431} 442}
432 443
433static void iser_connected_handler(struct rdma_cm_id *cma_id) 444static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -439,12 +450,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
439 wake_up_interruptible(&ib_conn->wait); 450 wake_up_interruptible(&ib_conn->wait);
440} 451}
441 452
442static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 453static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
443{ 454{
444 struct iser_conn *ib_conn; 455 struct iser_conn *ib_conn;
456 int ret;
445 457
446 ib_conn = (struct iser_conn *)cma_id->context; 458 ib_conn = (struct iser_conn *)cma_id->context;
447 ib_conn->disc_evt_flag = 1;
448 459
449 /* getting here when the state is UP means that the conn is being * 460 /* getting here when the state is UP means that the conn is being *
450 * terminated asynchronously from the iSCSI layer's perspective. */ 461 * terminated asynchronously from the iSCSI layer's perspective. */
@@ -459,20 +470,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
459 ib_conn->state = ISER_CONN_DOWN; 470 ib_conn->state = ISER_CONN_DOWN;
460 wake_up_interruptible(&ib_conn->wait); 471 wake_up_interruptible(&ib_conn->wait);
461 } 472 }
473
474 ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
475 return ret;
462} 476}
463 477
464static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 478static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
465{ 479{
466 int ret = 0; 480 int ret = 0;
467 481
468 iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id); 482 iser_err("event %d status %d conn %p id %p\n",
483 event->event, event->status, cma_id->context, cma_id);
469 484
470 switch (event->event) { 485 switch (event->event) {
471 case RDMA_CM_EVENT_ADDR_RESOLVED: 486 case RDMA_CM_EVENT_ADDR_RESOLVED:
472 iser_addr_handler(cma_id); 487 ret = iser_addr_handler(cma_id);
473 break; 488 break;
474 case RDMA_CM_EVENT_ROUTE_RESOLVED: 489 case RDMA_CM_EVENT_ROUTE_RESOLVED:
475 iser_route_handler(cma_id); 490 ret = iser_route_handler(cma_id);
476 break; 491 break;
477 case RDMA_CM_EVENT_ESTABLISHED: 492 case RDMA_CM_EVENT_ESTABLISHED:
478 iser_connected_handler(cma_id); 493 iser_connected_handler(cma_id);
@@ -482,13 +497,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
482 case RDMA_CM_EVENT_CONNECT_ERROR: 497 case RDMA_CM_EVENT_CONNECT_ERROR:
483 case RDMA_CM_EVENT_UNREACHABLE: 498 case RDMA_CM_EVENT_UNREACHABLE:
484 case RDMA_CM_EVENT_REJECTED: 499 case RDMA_CM_EVENT_REJECTED:
485 iser_err("event: %d, error: %d\n", event->event, event->status); 500 ret = iser_connect_error(cma_id);
486 iser_connect_error(cma_id);
487 break; 501 break;
488 case RDMA_CM_EVENT_DISCONNECTED: 502 case RDMA_CM_EVENT_DISCONNECTED:
489 case RDMA_CM_EVENT_DEVICE_REMOVAL: 503 case RDMA_CM_EVENT_DEVICE_REMOVAL:
490 case RDMA_CM_EVENT_ADDR_CHANGE: 504 case RDMA_CM_EVENT_ADDR_CHANGE:
491 iser_disconnected_handler(cma_id); 505 ret = iser_disconnected_handler(cma_id);
492 break; 506 break;
493 default: 507 default:
494 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 508 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -503,7 +517,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
503 init_waitqueue_head(&ib_conn->wait); 517 init_waitqueue_head(&ib_conn->wait);
504 ib_conn->post_recv_buf_count = 0; 518 ib_conn->post_recv_buf_count = 0;
505 atomic_set(&ib_conn->post_send_buf_count, 0); 519 atomic_set(&ib_conn->post_send_buf_count, 0);
506 atomic_set(&ib_conn->refcount, 1); 520 atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
507 INIT_LIST_HEAD(&ib_conn->conn_list); 521 INIT_LIST_HEAD(&ib_conn->conn_list);
508 spin_lock_init(&ib_conn->lock); 522 spin_lock_init(&ib_conn->lock);
509} 523}
@@ -531,6 +545,7 @@ int iser_connect(struct iser_conn *ib_conn,
531 545
532 ib_conn->state = ISER_CONN_PENDING; 546 ib_conn->state = ISER_CONN_PENDING;
533 547
548 iser_conn_get(ib_conn); /* ref ib conn's cma id */
534 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
535 (void *)ib_conn, 550 (void *)ib_conn,
536 RDMA_PS_TCP); 551 RDMA_PS_TCP);
@@ -568,7 +583,7 @@ id_failure:
568addr_failure: 583addr_failure:
569 ib_conn->state = ISER_CONN_DOWN; 584 ib_conn->state = ISER_CONN_DOWN;
570connect_failure: 585connect_failure:
571 iser_conn_release(ib_conn); 586 iser_conn_release(ib_conn, 1);
572 return err; 587 return err;
573} 588}
574 589
@@ -737,12 +752,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
737 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 752 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
738 ISCSI_ERR_CONN_FAILED); 753 ISCSI_ERR_CONN_FAILED);
739 754
740 /* complete the termination process if disconnect event was delivered * 755 /* no more non completed posts to the QP, complete the
741 * note there are no more non completed posts to the QP */ 756 * termination process w.o worrying on disconnect event */
742 if (ib_conn->disc_evt_flag) { 757 ib_conn->state = ISER_CONN_DOWN;
743 ib_conn->state = ISER_CONN_DOWN; 758 wake_up_interruptible(&ib_conn->wait);
744 wake_up_interruptible(&ib_conn->wait);
745 }
746 } 759 }
747} 760}
748 761
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index e92d1bfdb330..7a7f9c1e679a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -123,8 +123,8 @@ enum {
123 MLX4_OPCODE_RDMA_READ = 0x10, 123 MLX4_OPCODE_RDMA_READ = 0x10,
124 MLX4_OPCODE_ATOMIC_CS = 0x11, 124 MLX4_OPCODE_ATOMIC_CS = 0x11,
125 MLX4_OPCODE_ATOMIC_FA = 0x12, 125 MLX4_OPCODE_ATOMIC_FA = 0x12,
126 MLX4_OPCODE_ATOMIC_MASK_CS = 0x14, 126 MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14,
127 MLX4_OPCODE_ATOMIC_MASK_FA = 0x15, 127 MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15,
128 MLX4_OPCODE_BIND_MW = 0x18, 128 MLX4_OPCODE_BIND_MW = 0x18,
129 MLX4_OPCODE_FMR = 0x19, 129 MLX4_OPCODE_FMR = 0x19,
130 MLX4_OPCODE_LOCAL_INVAL = 0x1b, 130 MLX4_OPCODE_LOCAL_INVAL = 0x1b,
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9f29d86e5dc9..7abe64326f72 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -285,6 +285,13 @@ struct mlx4_wqe_atomic_seg {
285 __be64 compare; 285 __be64 compare;
286}; 286};
287 287
288struct mlx4_wqe_masked_atomic_seg {
289 __be64 swap_add;
290 __be64 compare;
291 __be64 swap_add_mask;
292 __be64 compare_mask;
293};
294
288struct mlx4_wqe_data_seg { 295struct mlx4_wqe_data_seg {
289 __be32 byte_count; 296 __be32 byte_count;
290 __be32 lkey; 297 __be32 lkey;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a585e0f92bc3..310d31474034 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -136,6 +136,7 @@ struct ib_device_attr {
136 int max_qp_init_rd_atom; 136 int max_qp_init_rd_atom;
137 int max_ee_init_rd_atom; 137 int max_ee_init_rd_atom;
138 enum ib_atomic_cap atomic_cap; 138 enum ib_atomic_cap atomic_cap;
139 enum ib_atomic_cap masked_atomic_cap;
139 int max_ee; 140 int max_ee;
140 int max_rdd; 141 int max_rdd;
141 int max_mw; 142 int max_mw;
@@ -467,6 +468,8 @@ enum ib_wc_opcode {
467 IB_WC_LSO, 468 IB_WC_LSO,
468 IB_WC_LOCAL_INV, 469 IB_WC_LOCAL_INV,
469 IB_WC_FAST_REG_MR, 470 IB_WC_FAST_REG_MR,
471 IB_WC_MASKED_COMP_SWAP,
472 IB_WC_MASKED_FETCH_ADD,
470/* 473/*
471 * Set value of IB_WC_RECV so consumers can test if a completion is a 474 * Set value of IB_WC_RECV so consumers can test if a completion is a
472 * receive by testing (opcode & IB_WC_RECV). 475 * receive by testing (opcode & IB_WC_RECV).
@@ -689,6 +692,8 @@ enum ib_wr_opcode {
689 IB_WR_RDMA_READ_WITH_INV, 692 IB_WR_RDMA_READ_WITH_INV,
690 IB_WR_LOCAL_INV, 693 IB_WR_LOCAL_INV,
691 IB_WR_FAST_REG_MR, 694 IB_WR_FAST_REG_MR,
695 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
696 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
692}; 697};
693 698
694enum ib_send_flags { 699enum ib_send_flags {
@@ -731,6 +736,8 @@ struct ib_send_wr {
731 u64 remote_addr; 736 u64 remote_addr;
732 u64 compare_add; 737 u64 compare_add;
733 u64 swap; 738 u64 swap;
739 u64 compare_add_mask;
740 u64 swap_mask;
734 u32 rkey; 741 u32 rkey;
735 } atomic; 742 } atomic;
736 struct { 743 struct {