diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/cma.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 17 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 130 |
6 files changed, 139 insertions, 80 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 26b37603dcf1..1983adc19243 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2648,8 +2648,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, | |||
2648 | req.responder_resources = conn_param->responder_resources; | 2648 | req.responder_resources = conn_param->responder_resources; |
2649 | req.initiator_depth = conn_param->initiator_depth; | 2649 | req.initiator_depth = conn_param->initiator_depth; |
2650 | req.flow_control = conn_param->flow_control; | 2650 | req.flow_control = conn_param->flow_control; |
2651 | req.retry_count = conn_param->retry_count; | 2651 | req.retry_count = min_t(u8, 7, conn_param->retry_count); |
2652 | req.rnr_retry_count = conn_param->rnr_retry_count; | 2652 | req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); |
2653 | req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; | 2653 | req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
2654 | req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; | 2654 | req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
2655 | req.max_cm_retries = CMA_MAX_CM_RETRIES; | 2655 | req.max_cm_retries = CMA_MAX_CM_RETRIES; |
@@ -2770,7 +2770,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv, | |||
2770 | rep.initiator_depth = conn_param->initiator_depth; | 2770 | rep.initiator_depth = conn_param->initiator_depth; |
2771 | rep.failover_accepted = 0; | 2771 | rep.failover_accepted = 0; |
2772 | rep.flow_control = conn_param->flow_control; | 2772 | rep.flow_control = conn_param->flow_control; |
2773 | rep.rnr_retry_count = conn_param->rnr_retry_count; | 2773 | rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); |
2774 | rep.srq = id_priv->srq ? 1 : 0; | 2774 | rep.srq = id_priv->srq ? 1 : 0; |
2775 | 2775 | ||
2776 | ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); | 2776 | ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 196eb52f0035..07ca6fd5546b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -535,14 +535,14 @@ void ipoib_drain_cq(struct net_device *dev); | |||
535 | void ipoib_set_ethtool_ops(struct net_device *dev); | 535 | void ipoib_set_ethtool_ops(struct net_device *dev); |
536 | int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca); | 536 | int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca); |
537 | 537 | ||
538 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | ||
539 | |||
540 | #define IPOIB_FLAGS_RC 0x80 | 538 | #define IPOIB_FLAGS_RC 0x80 |
541 | #define IPOIB_FLAGS_UC 0x40 | 539 | #define IPOIB_FLAGS_UC 0x40 |
542 | 540 | ||
543 | /* We don't support UC connections at the moment */ | 541 | /* We don't support UC connections at the moment */ |
544 | #define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC)) | 542 | #define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC)) |
545 | 543 | ||
544 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | ||
545 | |||
546 | extern int ipoib_max_conn_qp; | 546 | extern int ipoib_max_conn_qp; |
547 | 547 | ||
548 | static inline int ipoib_cm_admin_enabled(struct net_device *dev) | 548 | static inline int ipoib_cm_admin_enabled(struct net_device *dev) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 175581cf478c..72ae63f0072d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1448,37 +1448,6 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr, | |||
1448 | return sprintf(buf, "datagram\n"); | 1448 | return sprintf(buf, "datagram\n"); |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | int ipoib_set_mode(struct net_device *dev, const char *buf) | ||
1452 | { | ||
1453 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
1454 | |||
1455 | /* flush paths if we switch modes so that connections are restarted */ | ||
1456 | if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { | ||
1457 | set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | ||
1458 | ipoib_warn(priv, "enabling connected mode " | ||
1459 | "will cause multicast packet drops\n"); | ||
1460 | netdev_update_features(dev); | ||
1461 | rtnl_unlock(); | ||
1462 | priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; | ||
1463 | |||
1464 | ipoib_flush_paths(dev); | ||
1465 | rtnl_lock(); | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | if (!strcmp(buf, "datagram\n")) { | ||
1470 | clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | ||
1471 | netdev_update_features(dev); | ||
1472 | dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); | ||
1473 | rtnl_unlock(); | ||
1474 | ipoib_flush_paths(dev); | ||
1475 | rtnl_lock(); | ||
1476 | return 0; | ||
1477 | } | ||
1478 | |||
1479 | return -EINVAL; | ||
1480 | } | ||
1481 | |||
1482 | static ssize_t set_mode(struct device *d, struct device_attribute *attr, | 1451 | static ssize_t set_mode(struct device *d, struct device_attribute *attr, |
1483 | const char *buf, size_t count) | 1452 | const char *buf, size_t count) |
1484 | { | 1453 | { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index d576c7aad89d..6fdc9e78da0d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -215,6 +215,37 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) | |||
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
217 | 217 | ||
218 | int ipoib_set_mode(struct net_device *dev, const char *buf) | ||
219 | { | ||
220 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
221 | |||
222 | /* flush paths if we switch modes so that connections are restarted */ | ||
223 | if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { | ||
224 | set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | ||
225 | ipoib_warn(priv, "enabling connected mode " | ||
226 | "will cause multicast packet drops\n"); | ||
227 | netdev_update_features(dev); | ||
228 | rtnl_unlock(); | ||
229 | priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; | ||
230 | |||
231 | ipoib_flush_paths(dev); | ||
232 | rtnl_lock(); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | if (!strcmp(buf, "datagram\n")) { | ||
237 | clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | ||
238 | netdev_update_features(dev); | ||
239 | dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); | ||
240 | rtnl_unlock(); | ||
241 | ipoib_flush_paths(dev); | ||
242 | rtnl_lock(); | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | return -EINVAL; | ||
247 | } | ||
248 | |||
218 | static struct ipoib_path *__path_find(struct net_device *dev, void *gid) | 249 | static struct ipoib_path *__path_find(struct net_device *dev, void *gid) |
219 | { | 250 | { |
220 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 251 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 296be431a0e9..ef7d3be46c31 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -177,6 +177,7 @@ struct iser_data_buf { | |||
177 | 177 | ||
178 | /* fwd declarations */ | 178 | /* fwd declarations */ |
179 | struct iser_device; | 179 | struct iser_device; |
180 | struct iser_cq_desc; | ||
180 | struct iscsi_iser_conn; | 181 | struct iscsi_iser_conn; |
181 | struct iscsi_iser_task; | 182 | struct iscsi_iser_task; |
182 | struct iscsi_endpoint; | 183 | struct iscsi_endpoint; |
@@ -226,16 +227,21 @@ struct iser_rx_desc { | |||
226 | char pad[ISER_RX_PAD_SIZE]; | 227 | char pad[ISER_RX_PAD_SIZE]; |
227 | } __attribute__((packed)); | 228 | } __attribute__((packed)); |
228 | 229 | ||
230 | #define ISER_MAX_CQ 4 | ||
231 | |||
229 | struct iser_device { | 232 | struct iser_device { |
230 | struct ib_device *ib_device; | 233 | struct ib_device *ib_device; |
231 | struct ib_pd *pd; | 234 | struct ib_pd *pd; |
232 | struct ib_cq *rx_cq; | 235 | struct ib_cq *rx_cq[ISER_MAX_CQ]; |
233 | struct ib_cq *tx_cq; | 236 | struct ib_cq *tx_cq[ISER_MAX_CQ]; |
234 | struct ib_mr *mr; | 237 | struct ib_mr *mr; |
235 | struct tasklet_struct cq_tasklet; | 238 | struct tasklet_struct cq_tasklet[ISER_MAX_CQ]; |
236 | struct ib_event_handler event_handler; | 239 | struct ib_event_handler event_handler; |
237 | struct list_head ig_list; /* entry in ig devices list */ | 240 | struct list_head ig_list; /* entry in ig devices list */ |
238 | int refcount; | 241 | int refcount; |
242 | int cq_active_qps[ISER_MAX_CQ]; | ||
243 | int cqs_used; | ||
244 | struct iser_cq_desc *cq_desc; | ||
239 | }; | 245 | }; |
240 | 246 | ||
241 | struct iser_conn { | 247 | struct iser_conn { |
@@ -287,6 +293,11 @@ struct iser_page_vec { | |||
287 | int data_size; | 293 | int data_size; |
288 | }; | 294 | }; |
289 | 295 | ||
296 | struct iser_cq_desc { | ||
297 | struct iser_device *device; | ||
298 | int cq_index; | ||
299 | }; | ||
300 | |||
290 | struct iser_global { | 301 | struct iser_global { |
291 | struct mutex device_list_mutex;/* */ | 302 | struct mutex device_list_mutex;/* */ |
292 | struct list_head device_list; /* all iSER devices */ | 303 | struct list_head device_list; /* all iSER devices */ |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 2dddabd8fcf9..95a49affee44 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -70,32 +70,50 @@ static void iser_event_handler(struct ib_event_handler *handler, | |||
70 | */ | 70 | */ |
71 | static int iser_create_device_ib_res(struct iser_device *device) | 71 | static int iser_create_device_ib_res(struct iser_device *device) |
72 | { | 72 | { |
73 | int i, j; | ||
74 | struct iser_cq_desc *cq_desc; | ||
75 | |||
76 | device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); | ||
77 | iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used, | ||
78 | device->ib_device->name, device->ib_device->num_comp_vectors); | ||
79 | |||
80 | device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, | ||
81 | GFP_KERNEL); | ||
82 | if (device->cq_desc == NULL) | ||
83 | goto cq_desc_err; | ||
84 | cq_desc = device->cq_desc; | ||
85 | |||
73 | device->pd = ib_alloc_pd(device->ib_device); | 86 | device->pd = ib_alloc_pd(device->ib_device); |
74 | if (IS_ERR(device->pd)) | 87 | if (IS_ERR(device->pd)) |
75 | goto pd_err; | 88 | goto pd_err; |
76 | 89 | ||
77 | device->rx_cq = ib_create_cq(device->ib_device, | 90 | for (i = 0; i < device->cqs_used; i++) { |
78 | iser_cq_callback, | 91 | cq_desc[i].device = device; |
79 | iser_cq_event_callback, | 92 | cq_desc[i].cq_index = i; |
80 | (void *)device, | 93 | |
81 | ISER_MAX_RX_CQ_LEN, 0); | 94 | device->rx_cq[i] = ib_create_cq(device->ib_device, |
82 | if (IS_ERR(device->rx_cq)) | 95 | iser_cq_callback, |
83 | goto rx_cq_err; | 96 | iser_cq_event_callback, |
97 | (void *)&cq_desc[i], | ||
98 | ISER_MAX_RX_CQ_LEN, i); | ||
99 | if (IS_ERR(device->rx_cq[i])) | ||
100 | goto cq_err; | ||
84 | 101 | ||
85 | device->tx_cq = ib_create_cq(device->ib_device, | 102 | device->tx_cq[i] = ib_create_cq(device->ib_device, |
86 | NULL, iser_cq_event_callback, | 103 | NULL, iser_cq_event_callback, |
87 | (void *)device, | 104 | (void *)&cq_desc[i], |
88 | ISER_MAX_TX_CQ_LEN, 0); | 105 | ISER_MAX_TX_CQ_LEN, i); |
89 | 106 | ||
90 | if (IS_ERR(device->tx_cq)) | 107 | if (IS_ERR(device->tx_cq[i])) |
91 | goto tx_cq_err; | 108 | goto cq_err; |
92 | 109 | ||
93 | if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) | 110 | if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) |
94 | goto cq_arm_err; | 111 | goto cq_err; |
95 | 112 | ||
96 | tasklet_init(&device->cq_tasklet, | 113 | tasklet_init(&device->cq_tasklet[i], |
97 | iser_cq_tasklet_fn, | 114 | iser_cq_tasklet_fn, |
98 | (unsigned long)device); | 115 | (unsigned long)&cq_desc[i]); |
116 | } | ||
99 | 117 | ||
100 | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | | 118 | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | |
101 | IB_ACCESS_REMOTE_WRITE | | 119 | IB_ACCESS_REMOTE_WRITE | |
@@ -113,14 +131,19 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
113 | handler_err: | 131 | handler_err: |
114 | ib_dereg_mr(device->mr); | 132 | ib_dereg_mr(device->mr); |
115 | dma_mr_err: | 133 | dma_mr_err: |
116 | tasklet_kill(&device->cq_tasklet); | 134 | for (j = 0; j < device->cqs_used; j++) |
117 | cq_arm_err: | 135 | tasklet_kill(&device->cq_tasklet[j]); |
118 | ib_destroy_cq(device->tx_cq); | 136 | cq_err: |
119 | tx_cq_err: | 137 | for (j = 0; j < i; j++) { |
120 | ib_destroy_cq(device->rx_cq); | 138 | if (device->tx_cq[j]) |
121 | rx_cq_err: | 139 | ib_destroy_cq(device->tx_cq[j]); |
140 | if (device->rx_cq[j]) | ||
141 | ib_destroy_cq(device->rx_cq[j]); | ||
142 | } | ||
122 | ib_dealloc_pd(device->pd); | 143 | ib_dealloc_pd(device->pd); |
123 | pd_err: | 144 | pd_err: |
145 | kfree(device->cq_desc); | ||
146 | cq_desc_err: | ||
124 | iser_err("failed to allocate an IB resource\n"); | 147 | iser_err("failed to allocate an IB resource\n"); |
125 | return -1; | 148 | return -1; |
126 | } | 149 | } |
@@ -131,18 +154,24 @@ pd_err: | |||
131 | */ | 154 | */ |
132 | static void iser_free_device_ib_res(struct iser_device *device) | 155 | static void iser_free_device_ib_res(struct iser_device *device) |
133 | { | 156 | { |
157 | int i; | ||
134 | BUG_ON(device->mr == NULL); | 158 | BUG_ON(device->mr == NULL); |
135 | 159 | ||
136 | tasklet_kill(&device->cq_tasklet); | 160 | for (i = 0; i < device->cqs_used; i++) { |
161 | tasklet_kill(&device->cq_tasklet[i]); | ||
162 | (void)ib_destroy_cq(device->tx_cq[i]); | ||
163 | (void)ib_destroy_cq(device->rx_cq[i]); | ||
164 | device->tx_cq[i] = NULL; | ||
165 | device->rx_cq[i] = NULL; | ||
166 | } | ||
167 | |||
137 | (void)ib_unregister_event_handler(&device->event_handler); | 168 | (void)ib_unregister_event_handler(&device->event_handler); |
138 | (void)ib_dereg_mr(device->mr); | 169 | (void)ib_dereg_mr(device->mr); |
139 | (void)ib_destroy_cq(device->tx_cq); | ||
140 | (void)ib_destroy_cq(device->rx_cq); | ||
141 | (void)ib_dealloc_pd(device->pd); | 170 | (void)ib_dealloc_pd(device->pd); |
142 | 171 | ||
172 | kfree(device->cq_desc); | ||
173 | |||
143 | device->mr = NULL; | 174 | device->mr = NULL; |
144 | device->tx_cq = NULL; | ||
145 | device->rx_cq = NULL; | ||
146 | device->pd = NULL; | 175 | device->pd = NULL; |
147 | } | 176 | } |
148 | 177 | ||
@@ -157,6 +186,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
157 | struct ib_qp_init_attr init_attr; | 186 | struct ib_qp_init_attr init_attr; |
158 | int req_err, resp_err, ret = -ENOMEM; | 187 | int req_err, resp_err, ret = -ENOMEM; |
159 | struct ib_fmr_pool_param params; | 188 | struct ib_fmr_pool_param params; |
189 | int index, min_index = 0; | ||
160 | 190 | ||
161 | BUG_ON(ib_conn->device == NULL); | 191 | BUG_ON(ib_conn->device == NULL); |
162 | 192 | ||
@@ -220,10 +250,20 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
220 | 250 | ||
221 | memset(&init_attr, 0, sizeof init_attr); | 251 | memset(&init_attr, 0, sizeof init_attr); |
222 | 252 | ||
253 | mutex_lock(&ig.connlist_mutex); | ||
254 | /* select the CQ with the minimal number of usages */ | ||
255 | for (index = 0; index < device->cqs_used; index++) | ||
256 | if (device->cq_active_qps[index] < | ||
257 | device->cq_active_qps[min_index]) | ||
258 | min_index = index; | ||
259 | device->cq_active_qps[min_index]++; | ||
260 | mutex_unlock(&ig.connlist_mutex); | ||
261 | iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn); | ||
262 | |||
223 | init_attr.event_handler = iser_qp_event_callback; | 263 | init_attr.event_handler = iser_qp_event_callback; |
224 | init_attr.qp_context = (void *)ib_conn; | 264 | init_attr.qp_context = (void *)ib_conn; |
225 | init_attr.send_cq = device->tx_cq; | 265 | init_attr.send_cq = device->tx_cq[min_index]; |
226 | init_attr.recv_cq = device->rx_cq; | 266 | init_attr.recv_cq = device->rx_cq[min_index]; |
227 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; | 267 | init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; |
228 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; | 268 | init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; |
229 | init_attr.cap.max_send_sge = 2; | 269 | init_attr.cap.max_send_sge = 2; |
@@ -252,6 +292,7 @@ out_err: | |||
252 | */ | 292 | */ |
253 | static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) | 293 | static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) |
254 | { | 294 | { |
295 | int cq_index; | ||
255 | BUG_ON(ib_conn == NULL); | 296 | BUG_ON(ib_conn == NULL); |
256 | 297 | ||
257 | iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", | 298 | iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", |
@@ -262,9 +303,12 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) | |||
262 | if (ib_conn->fmr_pool != NULL) | 303 | if (ib_conn->fmr_pool != NULL) |
263 | ib_destroy_fmr_pool(ib_conn->fmr_pool); | 304 | ib_destroy_fmr_pool(ib_conn->fmr_pool); |
264 | 305 | ||
265 | if (ib_conn->qp != NULL) | 306 | if (ib_conn->qp != NULL) { |
266 | rdma_destroy_qp(ib_conn->cma_id); | 307 | cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index; |
308 | ib_conn->device->cq_active_qps[cq_index]--; | ||
267 | 309 | ||
310 | rdma_destroy_qp(ib_conn->cma_id); | ||
311 | } | ||
268 | /* if cma handler context, the caller acts s.t the cma destroy the id */ | 312 | /* if cma handler context, the caller acts s.t the cma destroy the id */ |
269 | if (ib_conn->cma_id != NULL && can_destroy_id) | 313 | if (ib_conn->cma_id != NULL && can_destroy_id) |
270 | rdma_destroy_id(ib_conn->cma_id); | 314 | rdma_destroy_id(ib_conn->cma_id); |
@@ -791,9 +835,9 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc, | |||
791 | } | 835 | } |
792 | } | 836 | } |
793 | 837 | ||
794 | static int iser_drain_tx_cq(struct iser_device *device) | 838 | static int iser_drain_tx_cq(struct iser_device *device, int cq_index) |
795 | { | 839 | { |
796 | struct ib_cq *cq = device->tx_cq; | 840 | struct ib_cq *cq = device->tx_cq[cq_index]; |
797 | struct ib_wc wc; | 841 | struct ib_wc wc; |
798 | struct iser_tx_desc *tx_desc; | 842 | struct iser_tx_desc *tx_desc; |
799 | struct iser_conn *ib_conn; | 843 | struct iser_conn *ib_conn; |
@@ -822,8 +866,10 @@ static int iser_drain_tx_cq(struct iser_device *device) | |||
822 | 866 | ||
823 | static void iser_cq_tasklet_fn(unsigned long data) | 867 | static void iser_cq_tasklet_fn(unsigned long data) |
824 | { | 868 | { |
825 | struct iser_device *device = (struct iser_device *)data; | 869 | struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data; |
826 | struct ib_cq *cq = device->rx_cq; | 870 | struct iser_device *device = cq_desc->device; |
871 | int cq_index = cq_desc->cq_index; | ||
872 | struct ib_cq *cq = device->rx_cq[cq_index]; | ||
827 | struct ib_wc wc; | 873 | struct ib_wc wc; |
828 | struct iser_rx_desc *desc; | 874 | struct iser_rx_desc *desc; |
829 | unsigned long xfer_len; | 875 | unsigned long xfer_len; |
@@ -851,19 +897,21 @@ static void iser_cq_tasklet_fn(unsigned long data) | |||
851 | } | 897 | } |
852 | completed_rx++; | 898 | completed_rx++; |
853 | if (!(completed_rx & 63)) | 899 | if (!(completed_rx & 63)) |
854 | completed_tx += iser_drain_tx_cq(device); | 900 | completed_tx += iser_drain_tx_cq(device, cq_index); |
855 | } | 901 | } |
856 | /* #warning "it is assumed here that arming CQ only once its empty" * | 902 | /* #warning "it is assumed here that arming CQ only once its empty" * |
857 | * " would not cause interrupts to be missed" */ | 903 | * " would not cause interrupts to be missed" */ |
858 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 904 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
859 | 905 | ||
860 | completed_tx += iser_drain_tx_cq(device); | 906 | completed_tx += iser_drain_tx_cq(device, cq_index); |
861 | iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); | 907 | iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); |
862 | } | 908 | } |
863 | 909 | ||
864 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context) | 910 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context) |
865 | { | 911 | { |
866 | struct iser_device *device = (struct iser_device *)cq_context; | 912 | struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context; |
913 | struct iser_device *device = cq_desc->device; | ||
914 | int cq_index = cq_desc->cq_index; | ||
867 | 915 | ||
868 | tasklet_schedule(&device->cq_tasklet); | 916 | tasklet_schedule(&device->cq_tasklet[cq_index]); |
869 | } | 917 | } |