aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2005-10-17 18:37:43 -0400
committerRoland Dreier <rolandd@cisco.com>2005-10-17 18:37:43 -0400
commit07d357d0cbf89d9980b1769d5444a3c70f000e00 (patch)
treecc7bcdee52b4e79e2115295e763f2e3d49c68a86 /drivers/infiniband
parent595e726a1f28420c5fc7970b1a87cbce77a1cd45 (diff)
[IB] CM: bind IDs to a specific device
Bind communication identifiers to a device to support device removal. Export per HCA CM devices to userspace. Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c41
-rw-r--r--drivers/infiniband/core/ucm.c218
2 files changed, 192 insertions, 67 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 54db6d4831f1..6f747debca90 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -366,9 +366,15 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
366 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 366 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
367 service_node); 367 service_node);
368 if ((cur_cm_id_priv->id.service_mask & service_id) == 368 if ((cur_cm_id_priv->id.service_mask & service_id) ==
369 (service_mask & cur_cm_id_priv->id.service_id)) 369 (service_mask & cur_cm_id_priv->id.service_id) &&
370 return cm_id_priv; 370 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
371 if (service_id < cur_cm_id_priv->id.service_id) 371 return cur_cm_id_priv;
372
373 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
374 link = &(*link)->rb_left;
375 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
376 link = &(*link)->rb_right;
377 else if (service_id < cur_cm_id_priv->id.service_id)
372 link = &(*link)->rb_left; 378 link = &(*link)->rb_left;
373 else 379 else
374 link = &(*link)->rb_right; 380 link = &(*link)->rb_right;
@@ -378,7 +384,8 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
378 return NULL; 384 return NULL;
379} 385}
380 386
381static struct cm_id_private * cm_find_listen(__be64 service_id) 387static struct cm_id_private * cm_find_listen(struct ib_device *device,
388 __be64 service_id)
382{ 389{
383 struct rb_node *node = cm.listen_service_table.rb_node; 390 struct rb_node *node = cm.listen_service_table.rb_node;
384 struct cm_id_private *cm_id_priv; 391 struct cm_id_private *cm_id_priv;
@@ -386,9 +393,15 @@ static struct cm_id_private * cm_find_listen(__be64 service_id)
386 while (node) { 393 while (node) {
387 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 394 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
388 if ((cm_id_priv->id.service_mask & service_id) == 395 if ((cm_id_priv->id.service_mask & service_id) ==
389 (cm_id_priv->id.service_mask & cm_id_priv->id.service_id)) 396 cm_id_priv->id.service_id &&
397 (cm_id_priv->id.device == device))
390 return cm_id_priv; 398 return cm_id_priv;
391 if (service_id < cm_id_priv->id.service_id) 399
400 if (device < cm_id_priv->id.device)
401 node = node->rb_left;
402 else if (device > cm_id_priv->id.device)
403 node = node->rb_right;
404 else if (service_id < cm_id_priv->id.service_id)
392 node = node->rb_left; 405 node = node->rb_left;
393 else 406 else
394 node = node->rb_right; 407 node = node->rb_right;
@@ -523,7 +536,8 @@ static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
523 ib_send_cm_sidr_rep(&cm_id_priv->id, &param); 536 ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
524} 537}
525 538
526struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler, 539struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
540 ib_cm_handler cm_handler,
527 void *context) 541 void *context)
528{ 542{
529 struct cm_id_private *cm_id_priv; 543 struct cm_id_private *cm_id_priv;
@@ -535,6 +549,7 @@ struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
535 549
536 memset(cm_id_priv, 0, sizeof *cm_id_priv); 550 memset(cm_id_priv, 0, sizeof *cm_id_priv);
537 cm_id_priv->id.state = IB_CM_IDLE; 551 cm_id_priv->id.state = IB_CM_IDLE;
552 cm_id_priv->id.device = device;
538 cm_id_priv->id.cm_handler = cm_handler; 553 cm_id_priv->id.cm_handler = cm_handler;
539 cm_id_priv->id.context = context; 554 cm_id_priv->id.context = context;
540 cm_id_priv->id.remote_cm_qpn = 1; 555 cm_id_priv->id.remote_cm_qpn = 1;
@@ -1047,7 +1062,6 @@ static void cm_format_req_event(struct cm_work *work,
1047 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1062 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1048 param = &work->cm_event.param.req_rcvd; 1063 param = &work->cm_event.param.req_rcvd;
1049 param->listen_id = listen_id; 1064 param->listen_id = listen_id;
1050 param->device = cm_id_priv->av.port->mad_agent->device;
1051 param->port = cm_id_priv->av.port->port_num; 1065 param->port = cm_id_priv->av.port->port_num;
1052 param->primary_path = &work->path[0]; 1066 param->primary_path = &work->path[0];
1053 if (req_msg->alt_local_lid) 1067 if (req_msg->alt_local_lid)
@@ -1226,7 +1240,8 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
1226 } 1240 }
1227 1241
1228 /* Find matching listen request. */ 1242 /* Find matching listen request. */
1229 listen_cm_id_priv = cm_find_listen(req_msg->service_id); 1243 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1244 req_msg->service_id);
1230 if (!listen_cm_id_priv) { 1245 if (!listen_cm_id_priv) {
1231 spin_unlock_irqrestore(&cm.lock, flags); 1246 spin_unlock_irqrestore(&cm.lock, flags);
1232 cm_issue_rej(work->port, work->mad_recv_wc, 1247 cm_issue_rej(work->port, work->mad_recv_wc,
@@ -1254,7 +1269,7 @@ static int cm_req_handler(struct cm_work *work)
1254 1269
1255 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1270 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1256 1271
1257 cm_id = ib_create_cm_id(NULL, NULL); 1272 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
1258 if (IS_ERR(cm_id)) 1273 if (IS_ERR(cm_id))
1259 return PTR_ERR(cm_id); 1274 return PTR_ERR(cm_id);
1260 1275
@@ -2629,7 +2644,6 @@ static void cm_format_sidr_req_event(struct cm_work *work,
2629 param = &work->cm_event.param.sidr_req_rcvd; 2644 param = &work->cm_event.param.sidr_req_rcvd;
2630 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2645 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2631 param->listen_id = listen_id; 2646 param->listen_id = listen_id;
2632 param->device = work->port->mad_agent->device;
2633 param->port = work->port->port_num; 2647 param->port = work->port->port_num;
2634 work->cm_event.private_data = &sidr_req_msg->private_data; 2648 work->cm_event.private_data = &sidr_req_msg->private_data;
2635} 2649}
@@ -2642,7 +2656,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2642 struct ib_wc *wc; 2656 struct ib_wc *wc;
2643 unsigned long flags; 2657 unsigned long flags;
2644 2658
2645 cm_id = ib_create_cm_id(NULL, NULL); 2659 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2646 if (IS_ERR(cm_id)) 2660 if (IS_ERR(cm_id))
2647 return PTR_ERR(cm_id); 2661 return PTR_ERR(cm_id);
2648 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2662 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -2666,7 +2680,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
2666 spin_unlock_irqrestore(&cm.lock, flags); 2680 spin_unlock_irqrestore(&cm.lock, flags);
2667 goto out; /* Duplicate message. */ 2681 goto out; /* Duplicate message. */
2668 } 2682 }
2669 cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id); 2683 cur_cm_id_priv = cm_find_listen(cm_id->device,
2684 sidr_req_msg->service_id);
2670 if (!cur_cm_id_priv) { 2685 if (!cur_cm_id_priv) {
2671 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2686 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2672 spin_unlock_irqrestore(&cm.lock, flags); 2687 spin_unlock_irqrestore(&cm.lock, flags);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index b7470f0fe83b..d208ea29e07a 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -52,12 +52,20 @@ MODULE_AUTHOR("Libor Michalek");
52MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access"); 52MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
53MODULE_LICENSE("Dual BSD/GPL"); 53MODULE_LICENSE("Dual BSD/GPL");
54 54
55struct ib_ucm_device {
56 int devnum;
57 struct cdev dev;
58 struct class_device class_dev;
59 struct ib_device *ib_dev;
60};
61
55struct ib_ucm_file { 62struct ib_ucm_file {
56 struct semaphore mutex; 63 struct semaphore mutex;
57 struct file *filp; 64 struct file *filp;
65 struct ib_ucm_device *device;
58 66
59 struct list_head ctxs; /* list of active connections */ 67 struct list_head ctxs;
60 struct list_head events; /* list of pending events */ 68 struct list_head events;
61 wait_queue_head_t poll_wait; 69 wait_queue_head_t poll_wait;
62}; 70};
63 71
@@ -90,14 +98,24 @@ struct ib_ucm_event {
90 98
91enum { 99enum {
92 IB_UCM_MAJOR = 231, 100 IB_UCM_MAJOR = 231,
93 IB_UCM_MINOR = 255 101 IB_UCM_BASE_MINOR = 224,
102 IB_UCM_MAX_DEVICES = 32
94}; 103};
95 104
96#define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR) 105#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
97 106
98static struct semaphore ctx_id_mutex; 107static void ib_ucm_add_one(struct ib_device *device);
99static struct idr ctx_id_table; 108static void ib_ucm_remove_one(struct ib_device *device);
100 109
110static struct ib_client ucm_client = {
111 .name = "ucm",
112 .add = ib_ucm_add_one,
113 .remove = ib_ucm_remove_one
114};
115
116DECLARE_MUTEX(ctx_id_mutex);
117DEFINE_IDR(ctx_id_table);
118static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
101 119
102static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) 120static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
103{ 121{
@@ -184,10 +202,7 @@ error:
184 kfree(ctx); 202 kfree(ctx);
185 return NULL; 203 return NULL;
186} 204}
187/* 205
188 * Event portion of the API, handle CM events
189 * and allow event polling.
190 */
191static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath, 206static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
192 struct ib_sa_path_rec *kpath) 207 struct ib_sa_path_rec *kpath)
193{ 208{
@@ -234,6 +249,7 @@ static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
234 ureq->retry_count = kreq->retry_count; 249 ureq->retry_count = kreq->retry_count;
235 ureq->rnr_retry_count = kreq->rnr_retry_count; 250 ureq->rnr_retry_count = kreq->rnr_retry_count;
236 ureq->srq = kreq->srq; 251 ureq->srq = kreq->srq;
252 ureq->port = kreq->port;
237 253
238 ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path); 254 ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path);
239 ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path); 255 ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path);
@@ -320,6 +336,8 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
320 case IB_CM_SIDR_REQ_RECEIVED: 336 case IB_CM_SIDR_REQ_RECEIVED:
321 uvt->resp.u.sidr_req_resp.pkey = 337 uvt->resp.u.sidr_req_resp.pkey =
322 evt->param.sidr_req_rcvd.pkey; 338 evt->param.sidr_req_rcvd.pkey;
339 uvt->resp.u.sidr_req_resp.port =
340 evt->param.sidr_req_rcvd.port;
323 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; 341 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
324 break; 342 break;
325 case IB_CM_SIDR_REP_RECEIVED: 343 case IB_CM_SIDR_REP_RECEIVED:
@@ -412,9 +430,7 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
412 430
413 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 431 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
414 return -EFAULT; 432 return -EFAULT;
415 /* 433
416 * wait
417 */
418 down(&file->mutex); 434 down(&file->mutex);
419 while (list_empty(&file->events)) { 435 while (list_empty(&file->events)) {
420 436
@@ -496,7 +512,6 @@ done:
496 return result; 512 return result;
497} 513}
498 514
499
500static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, 515static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
501 const char __user *inbuf, 516 const char __user *inbuf,
502 int in_len, int out_len) 517 int in_len, int out_len)
@@ -519,29 +534,27 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
519 return -ENOMEM; 534 return -ENOMEM;
520 535
521 ctx->uid = cmd.uid; 536 ctx->uid = cmd.uid;
522 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); 537 ctx->cm_id = ib_create_cm_id(file->device->ib_dev,
538 ib_ucm_event_handler, ctx);
523 if (IS_ERR(ctx->cm_id)) { 539 if (IS_ERR(ctx->cm_id)) {
524 result = PTR_ERR(ctx->cm_id); 540 result = PTR_ERR(ctx->cm_id);
525 goto err; 541 goto err1;
526 } 542 }
527 543
528 resp.id = ctx->id; 544 resp.id = ctx->id;
529 if (copy_to_user((void __user *)(unsigned long)cmd.response, 545 if (copy_to_user((void __user *)(unsigned long)cmd.response,
530 &resp, sizeof(resp))) { 546 &resp, sizeof(resp))) {
531 result = -EFAULT; 547 result = -EFAULT;
532 goto err; 548 goto err2;
533 } 549 }
534
535 return 0; 550 return 0;
536 551
537err: 552err2:
553 ib_destroy_cm_id(ctx->cm_id);
554err1:
538 down(&ctx_id_mutex); 555 down(&ctx_id_mutex);
539 idr_remove(&ctx_id_table, ctx->id); 556 idr_remove(&ctx_id_table, ctx->id);
540 up(&ctx_id_mutex); 557 up(&ctx_id_mutex);
541
542 if (!IS_ERR(ctx->cm_id))
543 ib_destroy_cm_id(ctx->cm_id);
544
545 kfree(ctx); 558 kfree(ctx);
546 return result; 559 return result;
547} 560}
@@ -1253,6 +1266,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
1253 1266
1254 filp->private_data = file; 1267 filp->private_data = file;
1255 file->filp = filp; 1268 file->filp = filp;
1269 file->device = container_of(inode->i_cdev, struct ib_ucm_device, dev);
1256 1270
1257 return 0; 1271 return 0;
1258} 1272}
@@ -1283,7 +1297,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
1283 return 0; 1297 return 0;
1284} 1298}
1285 1299
1286static struct file_operations ib_ucm_fops = { 1300static void ib_ucm_release_class_dev(struct class_device *class_dev)
1301{
1302 struct ib_ucm_device *dev;
1303
1304 dev = container_of(class_dev, struct ib_ucm_device, class_dev);
1305 cdev_del(&dev->dev);
1306 clear_bit(dev->devnum, dev_map);
1307 kfree(dev);
1308}
1309
1310static struct file_operations ucm_fops = {
1287 .owner = THIS_MODULE, 1311 .owner = THIS_MODULE,
1288 .open = ib_ucm_open, 1312 .open = ib_ucm_open,
1289 .release = ib_ucm_close, 1313 .release = ib_ucm_close,
@@ -1291,55 +1315,141 @@ static struct file_operations ib_ucm_fops = {
1291 .poll = ib_ucm_poll, 1315 .poll = ib_ucm_poll,
1292}; 1316};
1293 1317
1318static struct class ucm_class = {
1319 .name = "infiniband_cm",
1320 .release = ib_ucm_release_class_dev
1321};
1322
1323static ssize_t show_dev(struct class_device *class_dev, char *buf)
1324{
1325 struct ib_ucm_device *dev;
1326
1327 dev = container_of(class_dev, struct ib_ucm_device, class_dev);
1328 return print_dev_t(buf, dev->dev.dev);
1329}
1330static CLASS_DEVICE_ATTR(dev, S_IRUGO, show_dev, NULL);
1294 1331
1295static struct class *ib_ucm_class; 1332static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1296static struct cdev ib_ucm_cdev; 1333{
1334 struct ib_ucm_device *dev;
1335
1336 dev = container_of(class_dev, struct ib_ucm_device, class_dev);
1337 return sprintf(buf, "%s\n", dev->ib_dev->name);
1338}
1339static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1297 1340
1298static int __init ib_ucm_init(void) 1341static void ib_ucm_add_one(struct ib_device *device)
1299{ 1342{
1300 int result; 1343 struct ib_ucm_device *ucm_dev;
1301 1344
1302 result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm"); 1345 if (!device->alloc_ucontext)
1303 if (result) { 1346 return;
1304 printk(KERN_ERR "ucm: Error <%d> registering dev\n", result); 1347
1305 goto err_chr; 1348 ucm_dev = kmalloc(sizeof *ucm_dev, GFP_KERNEL);
1306 } 1349 if (!ucm_dev)
1350 return;
1307 1351
1308 cdev_init(&ib_ucm_cdev, &ib_ucm_fops); 1352 memset(ucm_dev, 0, sizeof *ucm_dev);
1353 ucm_dev->ib_dev = device;
1309 1354
1310 result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1); 1355 ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
1311 if (result) { 1356 if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES)
1312 printk(KERN_ERR "ucm: Error <%d> adding cdev\n", result); 1357 goto err;
1358
1359 set_bit(ucm_dev->devnum, dev_map);
1360
1361 cdev_init(&ucm_dev->dev, &ucm_fops);
1362 ucm_dev->dev.owner = THIS_MODULE;
1363 kobject_set_name(&ucm_dev->dev.kobj, "ucm%d", ucm_dev->devnum);
1364 if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
1365 goto err;
1366
1367 ucm_dev->class_dev.class = &ucm_class;
1368 ucm_dev->class_dev.dev = device->dma_device;
1369 snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
1370 ucm_dev->devnum);
1371 if (class_device_register(&ucm_dev->class_dev))
1313 goto err_cdev; 1372 goto err_cdev;
1314 }
1315 1373
1316 ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm"); 1374 if (class_device_create_file(&ucm_dev->class_dev,
1317 if (IS_ERR(ib_ucm_class)) { 1375 &class_device_attr_dev))
1318 result = PTR_ERR(ib_ucm_class); 1376 goto err_class;
1319 printk(KERN_ERR "Error <%d> creating class\n", result); 1377 if (class_device_create_file(&ucm_dev->class_dev,
1378 &class_device_attr_ibdev))
1320 goto err_class; 1379 goto err_class;
1380
1381 ib_set_client_data(device, &ucm_client, ucm_dev);
1382 return;
1383
1384err_class:
1385 class_device_unregister(&ucm_dev->class_dev);
1386err_cdev:
1387 cdev_del(&ucm_dev->dev);
1388 clear_bit(ucm_dev->devnum, dev_map);
1389err:
1390 kfree(ucm_dev);
1391 return;
1392}
1393
1394static void ib_ucm_remove_one(struct ib_device *device)
1395{
1396 struct ib_ucm_device *ucm_dev = ib_get_client_data(device, &ucm_client);
1397
1398 if (!ucm_dev)
1399 return;
1400
1401 class_device_unregister(&ucm_dev->class_dev);
1402}
1403
1404static ssize_t show_abi_version(struct class *class, char *buf)
1405{
1406 return sprintf(buf, "%d\n", IB_USER_CM_ABI_VERSION);
1407}
1408static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1409
1410static int __init ib_ucm_init(void)
1411{
1412 int ret;
1413
1414 ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
1415 "infiniband_cm");
1416 if (ret) {
1417 printk(KERN_ERR "ucm: couldn't register device number\n");
1418 goto err;
1321 } 1419 }
1322 1420
1323 class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm"); 1421 ret = class_register(&ucm_class);
1422 if (ret) {
1423 printk(KERN_ERR "ucm: couldn't create class infiniband_cm\n");
1424 goto err_chrdev;
1425 }
1324 1426
1325 idr_init(&ctx_id_table); 1427 ret = class_create_file(&ucm_class, &class_attr_abi_version);
1326 init_MUTEX(&ctx_id_mutex); 1428 if (ret) {
1429 printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
1430 goto err_class;
1431 }
1327 1432
1433 ret = ib_register_client(&ucm_client);
1434 if (ret) {
1435 printk(KERN_ERR "ucm: couldn't register client\n");
1436 goto err_class;
1437 }
1328 return 0; 1438 return 0;
1439
1329err_class: 1440err_class:
1330 cdev_del(&ib_ucm_cdev); 1441 class_unregister(&ucm_class);
1331err_cdev: 1442err_chrdev:
1332 unregister_chrdev_region(IB_UCM_DEV, 1); 1443 unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
1333err_chr: 1444err:
1334 return result; 1445 return ret;
1335} 1446}
1336 1447
1337static void __exit ib_ucm_cleanup(void) 1448static void __exit ib_ucm_cleanup(void)
1338{ 1449{
1339 class_device_destroy(ib_ucm_class, IB_UCM_DEV); 1450 ib_unregister_client(&ucm_client);
1340 class_destroy(ib_ucm_class); 1451 class_unregister(&ucm_class);
1341 cdev_del(&ib_ucm_cdev); 1452 unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
1342 unregister_chrdev_region(IB_UCM_DEV, 1);
1343} 1453}
1344 1454
1345module_init(ib_ucm_init); 1455module_init(ib_ucm_init);