aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-10-09 13:14:32 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-09 13:14:32 -0400
commit8dd9117cc7a021ced1c5cf177e2d44dd92b88617 (patch)
treecad990f58f9ec6d400226dda86718fc10781416e /drivers/infiniband
parent16e310ae6ed352c4963b1f2413fcd88fa693eeda (diff)
parent547b1e81afe3119f7daf702cc03b158495535a25 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Pulled mainline in order to get the UAPI infrastructure already merged before I pull in David Howells's UAPI trees for networking. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/nes/nes.h3
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c31
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h17
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c130
12 files changed, 148 insertions, 103 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 4fff27a7e37c..a7568c34a1aa 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2648,8 +2648,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2648 req.responder_resources = conn_param->responder_resources; 2648 req.responder_resources = conn_param->responder_resources;
2649 req.initiator_depth = conn_param->initiator_depth; 2649 req.initiator_depth = conn_param->initiator_depth;
2650 req.flow_control = conn_param->flow_control; 2650 req.flow_control = conn_param->flow_control;
2651 req.retry_count = conn_param->retry_count; 2651 req.retry_count = min_t(u8, 7, conn_param->retry_count);
2652 req.rnr_retry_count = conn_param->rnr_retry_count; 2652 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
2653 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2653 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2654 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2654 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2655 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2655 req.max_cm_retries = CMA_MAX_CM_RETRIES;
@@ -2770,7 +2770,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
2770 rep.initiator_depth = conn_param->initiator_depth; 2770 rep.initiator_depth = conn_param->initiator_depth;
2771 rep.failover_accepted = 0; 2771 rep.failover_accepted = 0;
2772 rep.flow_control = conn_param->flow_control; 2772 rep.flow_control = conn_param->flow_control;
2773 rep.rnr_retry_count = conn_param->rnr_retry_count; 2773 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
2774 rep.srq = id_priv->srq ? 1 : 0; 2774 rep.srq = id_priv->srq ? 1 : 0;
2775 2775
2776 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2776 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 45ee89b65c23..1a1d5d99fcf9 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -117,7 +117,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
117 physical = galpas->user.fw_handle; 117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical); 119 ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ 120 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
121 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, 121 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
122 vma->vm_page_prot); 122 vma->vm_page_prot);
123 if (unlikely(ret)) { 123 if (unlikely(ret)) {
@@ -139,7 +139,7 @@ static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
139 u64 start, ofs; 139 u64 start, ofs;
140 struct page *page; 140 struct page *page;
141 141
142 vma->vm_flags |= VM_RESERVED; 142 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
143 start = vma->vm_start; 143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) { 144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs); 145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 736d9edbdbe7..3eb7e454849b 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1225,7 +1225,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1225 1225
1226 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1226 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1227 vma->vm_ops = &ipath_file_vm_ops; 1227 vma->vm_ops = &ipath_file_vm_ops;
1228 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1228 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1229 ret = 1; 1229 ret = 1;
1230 1230
1231bail: 1231bail:
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 7140199f562e..748db2d3e465 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -79,11 +79,6 @@ int disable_mpa_crc = 0;
79module_param(disable_mpa_crc, int, 0644); 79module_param(disable_mpa_crc, int, 0644);
80MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC"); 80MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
81 81
82unsigned int send_first = 0;
83module_param(send_first, int, 0644);
84MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
85
86
87unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU; 82unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU;
88module_param(nes_drv_opt, int, 0644); 83module_param(nes_drv_opt, int, 0644);
89MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); 84MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 0da62b904d00..5cac29e6bc1c 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -57,7 +57,7 @@
57#define QUEUE_DISCONNECTS 57#define QUEUE_DISCONNECTS
58 58
59#define DRV_NAME "iw_nes" 59#define DRV_NAME "iw_nes"
60#define DRV_VERSION "1.5.0.0" 60#define DRV_VERSION "1.5.0.1"
61#define PFX DRV_NAME ": " 61#define PFX DRV_NAME ": "
62 62
63/* 63/*
@@ -172,7 +172,6 @@ extern int interrupt_mod_interval;
172extern int nes_if_count; 172extern int nes_if_count;
173extern int mpa_version; 173extern int mpa_version;
174extern int disable_mpa_crc; 174extern int disable_mpa_crc;
175extern unsigned int send_first;
176extern unsigned int nes_drv_opt; 175extern unsigned int nes_drv_opt;
177extern unsigned int nes_debug_level; 176extern unsigned int nes_debug_level;
178extern unsigned int wqm_quanta; 177extern unsigned int wqm_quanta;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 1dadcf388c02..cd0ecb215cca 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3006,6 +3006,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3006 switch (nesqp->hw_iwarp_state) { 3006 switch (nesqp->hw_iwarp_state) {
3007 case NES_AEQE_IWARP_STATE_CLOSING: 3007 case NES_AEQE_IWARP_STATE_CLOSING:
3008 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; 3008 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3009 break;
3009 case NES_AEQE_IWARP_STATE_TERMINATE: 3010 case NES_AEQE_IWARP_STATE_TERMINATE:
3010 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; 3011 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
3011 break; 3012 break;
@@ -3068,18 +3069,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3068 } 3069 }
3069 3070
3070 nesqp->ibqp_state = attr->qp_state; 3071 nesqp->ibqp_state = attr->qp_state;
3071 if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == 3072 nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
3072 (u32)NES_CQP_QP_IWARP_STATE_RTS) && 3073 nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
3073 ((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) > 3074 nesqp->iwarp_state);
3074 (u32)NES_CQP_QP_IWARP_STATE_RTS)) {
3075 nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
3076 nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
3077 nesqp->iwarp_state);
3078 } else {
3079 nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
3080 nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
3081 nesqp->iwarp_state);
3082 }
3083 } 3075 }
3084 3076
3085 if (attr_mask & IB_QP_ACCESS_FLAGS) { 3077 if (attr_mask & IB_QP_ACCESS_FLAGS) {
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index faa44cb08071..959a5c4ff812 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -971,7 +971,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
971 971
972 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 972 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
973 vma->vm_ops = &qib_file_vm_ops; 973 vma->vm_ops = &qib_file_vm_ops;
974 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 974 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
975 ret = 1; 975 ret = 1;
976 976
977bail: 977bail:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 196eb52f0035..07ca6fd5546b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -535,14 +535,14 @@ void ipoib_drain_cq(struct net_device *dev);
535void ipoib_set_ethtool_ops(struct net_device *dev); 535void ipoib_set_ethtool_ops(struct net_device *dev);
536int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca); 536int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
537 537
538#ifdef CONFIG_INFINIBAND_IPOIB_CM
539
540#define IPOIB_FLAGS_RC 0x80 538#define IPOIB_FLAGS_RC 0x80
541#define IPOIB_FLAGS_UC 0x40 539#define IPOIB_FLAGS_UC 0x40
542 540
543/* We don't support UC connections at the moment */ 541/* We don't support UC connections at the moment */
544#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC)) 542#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
545 543
544#ifdef CONFIG_INFINIBAND_IPOIB_CM
545
546extern int ipoib_max_conn_qp; 546extern int ipoib_max_conn_qp;
547 547
548static inline int ipoib_cm_admin_enabled(struct net_device *dev) 548static inline int ipoib_cm_admin_enabled(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 175581cf478c..72ae63f0072d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1448,37 +1448,6 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1448 return sprintf(buf, "datagram\n"); 1448 return sprintf(buf, "datagram\n");
1449} 1449}
1450 1450
1451int ipoib_set_mode(struct net_device *dev, const char *buf)
1452{
1453 struct ipoib_dev_priv *priv = netdev_priv(dev);
1454
1455 /* flush paths if we switch modes so that connections are restarted */
1456 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1457 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1458 ipoib_warn(priv, "enabling connected mode "
1459 "will cause multicast packet drops\n");
1460 netdev_update_features(dev);
1461 rtnl_unlock();
1462 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1463
1464 ipoib_flush_paths(dev);
1465 rtnl_lock();
1466 return 0;
1467 }
1468
1469 if (!strcmp(buf, "datagram\n")) {
1470 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1471 netdev_update_features(dev);
1472 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
1473 rtnl_unlock();
1474 ipoib_flush_paths(dev);
1475 rtnl_lock();
1476 return 0;
1477 }
1478
1479 return -EINVAL;
1480}
1481
1482static ssize_t set_mode(struct device *d, struct device_attribute *attr, 1451static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1483 const char *buf, size_t count) 1452 const char *buf, size_t count)
1484{ 1453{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d576c7aad89d..6fdc9e78da0d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -215,6 +215,37 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
215 return 0; 215 return 0;
216} 216}
217 217
218int ipoib_set_mode(struct net_device *dev, const char *buf)
219{
220 struct ipoib_dev_priv *priv = netdev_priv(dev);
221
222 /* flush paths if we switch modes so that connections are restarted */
223 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
224 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
225 ipoib_warn(priv, "enabling connected mode "
226 "will cause multicast packet drops\n");
227 netdev_update_features(dev);
228 rtnl_unlock();
229 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
230
231 ipoib_flush_paths(dev);
232 rtnl_lock();
233 return 0;
234 }
235
236 if (!strcmp(buf, "datagram\n")) {
237 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
238 netdev_update_features(dev);
239 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
240 rtnl_unlock();
241 ipoib_flush_paths(dev);
242 rtnl_lock();
243 return 0;
244 }
245
246 return -EINVAL;
247}
248
218static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 249static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
219{ 250{
220 struct ipoib_dev_priv *priv = netdev_priv(dev); 251 struct ipoib_dev_priv *priv = netdev_priv(dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 296be431a0e9..ef7d3be46c31 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -177,6 +177,7 @@ struct iser_data_buf {
177 177
178/* fwd declarations */ 178/* fwd declarations */
179struct iser_device; 179struct iser_device;
180struct iser_cq_desc;
180struct iscsi_iser_conn; 181struct iscsi_iser_conn;
181struct iscsi_iser_task; 182struct iscsi_iser_task;
182struct iscsi_endpoint; 183struct iscsi_endpoint;
@@ -226,16 +227,21 @@ struct iser_rx_desc {
226 char pad[ISER_RX_PAD_SIZE]; 227 char pad[ISER_RX_PAD_SIZE];
227} __attribute__((packed)); 228} __attribute__((packed));
228 229
230#define ISER_MAX_CQ 4
231
229struct iser_device { 232struct iser_device {
230 struct ib_device *ib_device; 233 struct ib_device *ib_device;
231 struct ib_pd *pd; 234 struct ib_pd *pd;
232 struct ib_cq *rx_cq; 235 struct ib_cq *rx_cq[ISER_MAX_CQ];
233 struct ib_cq *tx_cq; 236 struct ib_cq *tx_cq[ISER_MAX_CQ];
234 struct ib_mr *mr; 237 struct ib_mr *mr;
235 struct tasklet_struct cq_tasklet; 238 struct tasklet_struct cq_tasklet[ISER_MAX_CQ];
236 struct ib_event_handler event_handler; 239 struct ib_event_handler event_handler;
237 struct list_head ig_list; /* entry in ig devices list */ 240 struct list_head ig_list; /* entry in ig devices list */
238 int refcount; 241 int refcount;
242 int cq_active_qps[ISER_MAX_CQ];
243 int cqs_used;
244 struct iser_cq_desc *cq_desc;
239}; 245};
240 246
241struct iser_conn { 247struct iser_conn {
@@ -287,6 +293,11 @@ struct iser_page_vec {
287 int data_size; 293 int data_size;
288}; 294};
289 295
296struct iser_cq_desc {
297 struct iser_device *device;
298 int cq_index;
299};
300
290struct iser_global { 301struct iser_global {
291 struct mutex device_list_mutex;/* */ 302 struct mutex device_list_mutex;/* */
292 struct list_head device_list; /* all iSER devices */ 303 struct list_head device_list; /* all iSER devices */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2dddabd8fcf9..95a49affee44 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -70,32 +70,50 @@ static void iser_event_handler(struct ib_event_handler *handler,
70 */ 70 */
71static int iser_create_device_ib_res(struct iser_device *device) 71static int iser_create_device_ib_res(struct iser_device *device)
72{ 72{
73 int i, j;
74 struct iser_cq_desc *cq_desc;
75
76 device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
77 iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
78 device->ib_device->name, device->ib_device->num_comp_vectors);
79
80 device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
81 GFP_KERNEL);
82 if (device->cq_desc == NULL)
83 goto cq_desc_err;
84 cq_desc = device->cq_desc;
85
73 device->pd = ib_alloc_pd(device->ib_device); 86 device->pd = ib_alloc_pd(device->ib_device);
74 if (IS_ERR(device->pd)) 87 if (IS_ERR(device->pd))
75 goto pd_err; 88 goto pd_err;
76 89
77 device->rx_cq = ib_create_cq(device->ib_device, 90 for (i = 0; i < device->cqs_used; i++) {
78 iser_cq_callback, 91 cq_desc[i].device = device;
79 iser_cq_event_callback, 92 cq_desc[i].cq_index = i;
80 (void *)device, 93
81 ISER_MAX_RX_CQ_LEN, 0); 94 device->rx_cq[i] = ib_create_cq(device->ib_device,
82 if (IS_ERR(device->rx_cq)) 95 iser_cq_callback,
83 goto rx_cq_err; 96 iser_cq_event_callback,
97 (void *)&cq_desc[i],
98 ISER_MAX_RX_CQ_LEN, i);
99 if (IS_ERR(device->rx_cq[i]))
100 goto cq_err;
84 101
85 device->tx_cq = ib_create_cq(device->ib_device, 102 device->tx_cq[i] = ib_create_cq(device->ib_device,
86 NULL, iser_cq_event_callback, 103 NULL, iser_cq_event_callback,
87 (void *)device, 104 (void *)&cq_desc[i],
88 ISER_MAX_TX_CQ_LEN, 0); 105 ISER_MAX_TX_CQ_LEN, i);
89 106
90 if (IS_ERR(device->tx_cq)) 107 if (IS_ERR(device->tx_cq[i]))
91 goto tx_cq_err; 108 goto cq_err;
92 109
93 if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) 110 if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
94 goto cq_arm_err; 111 goto cq_err;
95 112
96 tasklet_init(&device->cq_tasklet, 113 tasklet_init(&device->cq_tasklet[i],
97 iser_cq_tasklet_fn, 114 iser_cq_tasklet_fn,
98 (unsigned long)device); 115 (unsigned long)&cq_desc[i]);
116 }
99 117
100 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | 118 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
101 IB_ACCESS_REMOTE_WRITE | 119 IB_ACCESS_REMOTE_WRITE |
@@ -113,14 +131,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
113handler_err: 131handler_err:
114 ib_dereg_mr(device->mr); 132 ib_dereg_mr(device->mr);
115dma_mr_err: 133dma_mr_err:
116 tasklet_kill(&device->cq_tasklet); 134 for (j = 0; j < device->cqs_used; j++)
117cq_arm_err: 135 tasklet_kill(&device->cq_tasklet[j]);
118 ib_destroy_cq(device->tx_cq); 136cq_err:
119tx_cq_err: 137 for (j = 0; j < i; j++) {
120 ib_destroy_cq(device->rx_cq); 138 if (device->tx_cq[j])
121rx_cq_err: 139 ib_destroy_cq(device->tx_cq[j]);
140 if (device->rx_cq[j])
141 ib_destroy_cq(device->rx_cq[j]);
142 }
122 ib_dealloc_pd(device->pd); 143 ib_dealloc_pd(device->pd);
123pd_err: 144pd_err:
145 kfree(device->cq_desc);
146cq_desc_err:
124 iser_err("failed to allocate an IB resource\n"); 147 iser_err("failed to allocate an IB resource\n");
125 return -1; 148 return -1;
126} 149}
@@ -131,18 +154,24 @@ pd_err:
131 */ 154 */
132static void iser_free_device_ib_res(struct iser_device *device) 155static void iser_free_device_ib_res(struct iser_device *device)
133{ 156{
157 int i;
134 BUG_ON(device->mr == NULL); 158 BUG_ON(device->mr == NULL);
135 159
136 tasklet_kill(&device->cq_tasklet); 160 for (i = 0; i < device->cqs_used; i++) {
161 tasklet_kill(&device->cq_tasklet[i]);
162 (void)ib_destroy_cq(device->tx_cq[i]);
163 (void)ib_destroy_cq(device->rx_cq[i]);
164 device->tx_cq[i] = NULL;
165 device->rx_cq[i] = NULL;
166 }
167
137 (void)ib_unregister_event_handler(&device->event_handler); 168 (void)ib_unregister_event_handler(&device->event_handler);
138 (void)ib_dereg_mr(device->mr); 169 (void)ib_dereg_mr(device->mr);
139 (void)ib_destroy_cq(device->tx_cq);
140 (void)ib_destroy_cq(device->rx_cq);
141 (void)ib_dealloc_pd(device->pd); 170 (void)ib_dealloc_pd(device->pd);
142 171
172 kfree(device->cq_desc);
173
143 device->mr = NULL; 174 device->mr = NULL;
144 device->tx_cq = NULL;
145 device->rx_cq = NULL;
146 device->pd = NULL; 175 device->pd = NULL;
147} 176}
148 177
@@ -157,6 +186,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
157 struct ib_qp_init_attr init_attr; 186 struct ib_qp_init_attr init_attr;
158 int req_err, resp_err, ret = -ENOMEM; 187 int req_err, resp_err, ret = -ENOMEM;
159 struct ib_fmr_pool_param params; 188 struct ib_fmr_pool_param params;
189 int index, min_index = 0;
160 190
161 BUG_ON(ib_conn->device == NULL); 191 BUG_ON(ib_conn->device == NULL);
162 192
@@ -220,10 +250,20 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
220 250
221 memset(&init_attr, 0, sizeof init_attr); 251 memset(&init_attr, 0, sizeof init_attr);
222 252
253 mutex_lock(&ig.connlist_mutex);
254 /* select the CQ with the minimal number of usages */
255 for (index = 0; index < device->cqs_used; index++)
256 if (device->cq_active_qps[index] <
257 device->cq_active_qps[min_index])
258 min_index = index;
259 device->cq_active_qps[min_index]++;
260 mutex_unlock(&ig.connlist_mutex);
261 iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
262
223 init_attr.event_handler = iser_qp_event_callback; 263 init_attr.event_handler = iser_qp_event_callback;
224 init_attr.qp_context = (void *)ib_conn; 264 init_attr.qp_context = (void *)ib_conn;
225 init_attr.send_cq = device->tx_cq; 265 init_attr.send_cq = device->tx_cq[min_index];
226 init_attr.recv_cq = device->rx_cq; 266 init_attr.recv_cq = device->rx_cq[min_index];
227 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 267 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
228 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 268 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
229 init_attr.cap.max_send_sge = 2; 269 init_attr.cap.max_send_sge = 2;
@@ -252,6 +292,7 @@ out_err:
252 */ 292 */
253static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) 293static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
254{ 294{
295 int cq_index;
255 BUG_ON(ib_conn == NULL); 296 BUG_ON(ib_conn == NULL);
256 297
257 iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", 298 iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
@@ -262,9 +303,12 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
262 if (ib_conn->fmr_pool != NULL) 303 if (ib_conn->fmr_pool != NULL)
263 ib_destroy_fmr_pool(ib_conn->fmr_pool); 304 ib_destroy_fmr_pool(ib_conn->fmr_pool);
264 305
265 if (ib_conn->qp != NULL) 306 if (ib_conn->qp != NULL) {
266 rdma_destroy_qp(ib_conn->cma_id); 307 cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
308 ib_conn->device->cq_active_qps[cq_index]--;
267 309
310 rdma_destroy_qp(ib_conn->cma_id);
311 }
268 /* if cma handler context, the caller acts s.t the cma destroy the id */ 312 /* if cma handler context, the caller acts s.t the cma destroy the id */
269 if (ib_conn->cma_id != NULL && can_destroy_id) 313 if (ib_conn->cma_id != NULL && can_destroy_id)
270 rdma_destroy_id(ib_conn->cma_id); 314 rdma_destroy_id(ib_conn->cma_id);
@@ -791,9 +835,9 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
791 } 835 }
792} 836}
793 837
794static int iser_drain_tx_cq(struct iser_device *device) 838static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
795{ 839{
796 struct ib_cq *cq = device->tx_cq; 840 struct ib_cq *cq = device->tx_cq[cq_index];
797 struct ib_wc wc; 841 struct ib_wc wc;
798 struct iser_tx_desc *tx_desc; 842 struct iser_tx_desc *tx_desc;
799 struct iser_conn *ib_conn; 843 struct iser_conn *ib_conn;
@@ -822,8 +866,10 @@ static int iser_drain_tx_cq(struct iser_device *device)
822 866
823static void iser_cq_tasklet_fn(unsigned long data) 867static void iser_cq_tasklet_fn(unsigned long data)
824{ 868{
825 struct iser_device *device = (struct iser_device *)data; 869 struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
826 struct ib_cq *cq = device->rx_cq; 870 struct iser_device *device = cq_desc->device;
871 int cq_index = cq_desc->cq_index;
872 struct ib_cq *cq = device->rx_cq[cq_index];
827 struct ib_wc wc; 873 struct ib_wc wc;
828 struct iser_rx_desc *desc; 874 struct iser_rx_desc *desc;
829 unsigned long xfer_len; 875 unsigned long xfer_len;
@@ -851,19 +897,21 @@ static void iser_cq_tasklet_fn(unsigned long data)
851 } 897 }
852 completed_rx++; 898 completed_rx++;
853 if (!(completed_rx & 63)) 899 if (!(completed_rx & 63))
854 completed_tx += iser_drain_tx_cq(device); 900 completed_tx += iser_drain_tx_cq(device, cq_index);
855 } 901 }
856 /* #warning "it is assumed here that arming CQ only once its empty" * 902 /* #warning "it is assumed here that arming CQ only once its empty" *
857 * " would not cause interrupts to be missed" */ 903 * " would not cause interrupts to be missed" */
858 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 904 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
859 905
860 completed_tx += iser_drain_tx_cq(device); 906 completed_tx += iser_drain_tx_cq(device, cq_index);
861 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); 907 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
862} 908}
863 909
864static void iser_cq_callback(struct ib_cq *cq, void *cq_context) 910static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
865{ 911{
866 struct iser_device *device = (struct iser_device *)cq_context; 912 struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
913 struct iser_device *device = cq_desc->device;
914 int cq_index = cq_desc->cq_index;
867 915
868 tasklet_schedule(&device->cq_tasklet); 916 tasklet_schedule(&device->cq_tasklet[cq_index]);
869} 917}