diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/ucm.c | 63 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 173 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs.h | 11 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 175 |
4 files changed, 253 insertions, 169 deletions
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f504c9b00c1b..1b09b735c5a8 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -1215,15 +1215,18 @@ static void ib_ucm_release_dev(struct device *dev) | |||
1215 | 1215 | ||
1216 | ucm_dev = container_of(dev, struct ib_ucm_device, dev); | 1216 | ucm_dev = container_of(dev, struct ib_ucm_device, dev); |
1217 | cdev_del(&ucm_dev->cdev); | 1217 | cdev_del(&ucm_dev->cdev); |
1218 | clear_bit(ucm_dev->devnum, dev_map); | 1218 | if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) |
1219 | clear_bit(ucm_dev->devnum, dev_map); | ||
1220 | else | ||
1221 | clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); | ||
1219 | kfree(ucm_dev); | 1222 | kfree(ucm_dev); |
1220 | } | 1223 | } |
1221 | 1224 | ||
1222 | static const struct file_operations ucm_fops = { | 1225 | static const struct file_operations ucm_fops = { |
1223 | .owner = THIS_MODULE, | 1226 | .owner = THIS_MODULE, |
1224 | .open = ib_ucm_open, | 1227 | .open = ib_ucm_open, |
1225 | .release = ib_ucm_close, | 1228 | .release = ib_ucm_close, |
1226 | .write = ib_ucm_write, | 1229 | .write = ib_ucm_write, |
1227 | .poll = ib_ucm_poll, | 1230 | .poll = ib_ucm_poll, |
1228 | }; | 1231 | }; |
1229 | 1232 | ||
@@ -1237,8 +1240,32 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, | |||
1237 | } | 1240 | } |
1238 | static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); | 1241 | static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); |
1239 | 1242 | ||
1243 | static dev_t overflow_maj; | ||
1244 | static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES); | ||
1245 | static int find_overflow_devnum(void) | ||
1246 | { | ||
1247 | int ret; | ||
1248 | |||
1249 | if (!overflow_maj) { | ||
1250 | ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES, | ||
1251 | "infiniband_cm"); | ||
1252 | if (ret) { | ||
1253 | printk(KERN_ERR "ucm: couldn't register dynamic device number\n"); | ||
1254 | return ret; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES); | ||
1259 | if (ret >= IB_UCM_MAX_DEVICES) | ||
1260 | return -1; | ||
1261 | |||
1262 | return ret; | ||
1263 | } | ||
1264 | |||
1240 | static void ib_ucm_add_one(struct ib_device *device) | 1265 | static void ib_ucm_add_one(struct ib_device *device) |
1241 | { | 1266 | { |
1267 | int devnum; | ||
1268 | dev_t base; | ||
1242 | struct ib_ucm_device *ucm_dev; | 1269 | struct ib_ucm_device *ucm_dev; |
1243 | 1270 | ||
1244 | if (!device->alloc_ucontext || | 1271 | if (!device->alloc_ucontext || |
@@ -1251,16 +1278,25 @@ static void ib_ucm_add_one(struct ib_device *device) | |||
1251 | 1278 | ||
1252 | ucm_dev->ib_dev = device; | 1279 | ucm_dev->ib_dev = device; |
1253 | 1280 | ||
1254 | ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); | 1281 | devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); |
1255 | if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES) | 1282 | if (devnum >= IB_UCM_MAX_DEVICES) { |
1256 | goto err; | 1283 | devnum = find_overflow_devnum(); |
1257 | 1284 | if (devnum < 0) | |
1258 | set_bit(ucm_dev->devnum, dev_map); | 1285 | goto err; |
1286 | |||
1287 | ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES; | ||
1288 | base = devnum + overflow_maj; | ||
1289 | set_bit(devnum, overflow_map); | ||
1290 | } else { | ||
1291 | ucm_dev->devnum = devnum; | ||
1292 | base = devnum + IB_UCM_BASE_DEV; | ||
1293 | set_bit(devnum, dev_map); | ||
1294 | } | ||
1259 | 1295 | ||
1260 | cdev_init(&ucm_dev->cdev, &ucm_fops); | 1296 | cdev_init(&ucm_dev->cdev, &ucm_fops); |
1261 | ucm_dev->cdev.owner = THIS_MODULE; | 1297 | ucm_dev->cdev.owner = THIS_MODULE; |
1262 | kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum); | 1298 | kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum); |
1263 | if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1)) | 1299 | if (cdev_add(&ucm_dev->cdev, base, 1)) |
1264 | goto err; | 1300 | goto err; |
1265 | 1301 | ||
1266 | ucm_dev->dev.class = &cm_class; | 1302 | ucm_dev->dev.class = &cm_class; |
@@ -1281,7 +1317,10 @@ err_dev: | |||
1281 | device_unregister(&ucm_dev->dev); | 1317 | device_unregister(&ucm_dev->dev); |
1282 | err_cdev: | 1318 | err_cdev: |
1283 | cdev_del(&ucm_dev->cdev); | 1319 | cdev_del(&ucm_dev->cdev); |
1284 | clear_bit(ucm_dev->devnum, dev_map); | 1320 | if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) |
1321 | clear_bit(devnum, dev_map); | ||
1322 | else | ||
1323 | clear_bit(devnum, overflow_map); | ||
1285 | err: | 1324 | err: |
1286 | kfree(ucm_dev); | 1325 | kfree(ucm_dev); |
1287 | return; | 1326 | return; |
@@ -1340,6 +1379,8 @@ static void __exit ib_ucm_cleanup(void) | |||
1340 | ib_unregister_client(&ucm_client); | 1379 | ib_unregister_client(&ucm_client); |
1341 | class_remove_file(&cm_class, &class_attr_abi_version); | 1380 | class_remove_file(&cm_class, &class_attr_abi_version); |
1342 | unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); | 1381 | unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); |
1382 | if (overflow_maj) | ||
1383 | unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES); | ||
1343 | idr_destroy(&ctx_id_table); | 1384 | idr_destroy(&ctx_id_table); |
1344 | } | 1385 | } |
1345 | 1386 | ||
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 7de02969ed7d..02d360cfc2f7 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -65,12 +65,9 @@ enum { | |||
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Our lifetime rules for these structs are the following: each time a | 68 | * Our lifetime rules for these structs are the following: |
69 | * device special file is opened, we look up the corresponding struct | 69 | * device special file is opened, we take a reference on the |
70 | * ib_umad_port by minor in the umad_port[] table while holding the | 70 | * ib_umad_port's struct ib_umad_device. We drop these |
71 | * port_lock. If this lookup succeeds, we take a reference on the | ||
72 | * ib_umad_port's struct ib_umad_device while still holding the | ||
73 | * port_lock; if the lookup fails, we fail the open(). We drop these | ||
74 | * references in the corresponding close(). | 71 | * references in the corresponding close(). |
75 | * | 72 | * |
76 | * In addition to references coming from open character devices, there | 73 | * In addition to references coming from open character devices, there |
@@ -78,19 +75,14 @@ enum { | |||
78 | * module's reference taken when allocating the ib_umad_device in | 75 | * module's reference taken when allocating the ib_umad_device in |
79 | * ib_umad_add_one(). | 76 | * ib_umad_add_one(). |
80 | * | 77 | * |
81 | * When destroying an ib_umad_device, we clear all of its | 78 | * When destroying an ib_umad_device, we drop the module's reference. |
82 | * ib_umad_ports from umad_port[] while holding port_lock before | ||
83 | * dropping the module's reference to the ib_umad_device. This is | ||
84 | * always safe because any open() calls will either succeed and obtain | ||
85 | * a reference before we clear the umad_port[] entries, or fail after | ||
86 | * we clear the umad_port[] entries. | ||
87 | */ | 79 | */ |
88 | 80 | ||
89 | struct ib_umad_port { | 81 | struct ib_umad_port { |
90 | struct cdev *cdev; | 82 | struct cdev cdev; |
91 | struct device *dev; | 83 | struct device *dev; |
92 | 84 | ||
93 | struct cdev *sm_cdev; | 85 | struct cdev sm_cdev; |
94 | struct device *sm_dev; | 86 | struct device *sm_dev; |
95 | struct semaphore sm_sem; | 87 | struct semaphore sm_sem; |
96 | 88 | ||
@@ -136,7 +128,6 @@ static struct class *umad_class; | |||
136 | static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); | 128 | static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); |
137 | 129 | ||
138 | static DEFINE_SPINLOCK(port_lock); | 130 | static DEFINE_SPINLOCK(port_lock); |
139 | static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS]; | ||
140 | static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); | 131 | static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); |
141 | 132 | ||
142 | static void ib_umad_add_one(struct ib_device *device); | 133 | static void ib_umad_add_one(struct ib_device *device); |
@@ -496,8 +487,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
496 | ah_attr.ah_flags = IB_AH_GRH; | 487 | ah_attr.ah_flags = IB_AH_GRH; |
497 | memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); | 488 | memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); |
498 | ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; | 489 | ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; |
499 | ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); | 490 | ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); |
500 | ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; | 491 | ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; |
501 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; | 492 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; |
502 | } | 493 | } |
503 | 494 | ||
@@ -528,9 +519,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
528 | goto err_ah; | 519 | goto err_ah; |
529 | } | 520 | } |
530 | 521 | ||
531 | packet->msg->ah = ah; | 522 | packet->msg->ah = ah; |
532 | packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; | 523 | packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; |
533 | packet->msg->retries = packet->mad.hdr.retries; | 524 | packet->msg->retries = packet->mad.hdr.retries; |
534 | packet->msg->context[0] = packet; | 525 | packet->msg->context[0] = packet; |
535 | 526 | ||
536 | /* Copy MAD header. Any RMPP header is already in place. */ | 527 | /* Copy MAD header. Any RMPP header is already in place. */ |
@@ -779,15 +770,11 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, | |||
779 | /* | 770 | /* |
780 | * ib_umad_open() does not need the BKL: | 771 | * ib_umad_open() does not need the BKL: |
781 | * | 772 | * |
782 | * - umad_port[] accesses are protected by port_lock, the | 773 | * - the ib_umad_port structures are properly reference counted, and |
783 | * ib_umad_port structures are properly reference counted, and | ||
784 | * everything else is purely local to the file being created, so | 774 | * everything else is purely local to the file being created, so |
785 | * races against other open calls are not a problem; | 775 | * races against other open calls are not a problem; |
786 | * - the ioctl method does not affect any global state outside of the | 776 | * - the ioctl method does not affect any global state outside of the |
787 | * file structure being operated on; | 777 | * file structure being operated on; |
788 | * - the port is added to umad_port[] as the last part of module | ||
789 | * initialization so the open method will either immediately run | ||
790 | * -ENXIO, or all required initialization will be done. | ||
791 | */ | 778 | */ |
792 | static int ib_umad_open(struct inode *inode, struct file *filp) | 779 | static int ib_umad_open(struct inode *inode, struct file *filp) |
793 | { | 780 | { |
@@ -795,13 +782,10 @@ static int ib_umad_open(struct inode *inode, struct file *filp) | |||
795 | struct ib_umad_file *file; | 782 | struct ib_umad_file *file; |
796 | int ret = 0; | 783 | int ret = 0; |
797 | 784 | ||
798 | spin_lock(&port_lock); | 785 | port = container_of(inode->i_cdev, struct ib_umad_port, cdev); |
799 | port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE]; | ||
800 | if (port) | 786 | if (port) |
801 | kref_get(&port->umad_dev->ref); | 787 | kref_get(&port->umad_dev->ref); |
802 | spin_unlock(&port_lock); | 788 | else |
803 | |||
804 | if (!port) | ||
805 | return -ENXIO; | 789 | return -ENXIO; |
806 | 790 | ||
807 | mutex_lock(&port->file_mutex); | 791 | mutex_lock(&port->file_mutex); |
@@ -872,16 +856,16 @@ static int ib_umad_close(struct inode *inode, struct file *filp) | |||
872 | } | 856 | } |
873 | 857 | ||
874 | static const struct file_operations umad_fops = { | 858 | static const struct file_operations umad_fops = { |
875 | .owner = THIS_MODULE, | 859 | .owner = THIS_MODULE, |
876 | .read = ib_umad_read, | 860 | .read = ib_umad_read, |
877 | .write = ib_umad_write, | 861 | .write = ib_umad_write, |
878 | .poll = ib_umad_poll, | 862 | .poll = ib_umad_poll, |
879 | .unlocked_ioctl = ib_umad_ioctl, | 863 | .unlocked_ioctl = ib_umad_ioctl, |
880 | #ifdef CONFIG_COMPAT | 864 | #ifdef CONFIG_COMPAT |
881 | .compat_ioctl = ib_umad_compat_ioctl, | 865 | .compat_ioctl = ib_umad_compat_ioctl, |
882 | #endif | 866 | #endif |
883 | .open = ib_umad_open, | 867 | .open = ib_umad_open, |
884 | .release = ib_umad_close | 868 | .release = ib_umad_close |
885 | }; | 869 | }; |
886 | 870 | ||
887 | static int ib_umad_sm_open(struct inode *inode, struct file *filp) | 871 | static int ib_umad_sm_open(struct inode *inode, struct file *filp) |
@@ -892,13 +876,10 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp) | |||
892 | }; | 876 | }; |
893 | int ret; | 877 | int ret; |
894 | 878 | ||
895 | spin_lock(&port_lock); | 879 | port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); |
896 | port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS]; | ||
897 | if (port) | 880 | if (port) |
898 | kref_get(&port->umad_dev->ref); | 881 | kref_get(&port->umad_dev->ref); |
899 | spin_unlock(&port_lock); | 882 | else |
900 | |||
901 | if (!port) | ||
902 | return -ENXIO; | 883 | return -ENXIO; |
903 | 884 | ||
904 | if (filp->f_flags & O_NONBLOCK) { | 885 | if (filp->f_flags & O_NONBLOCK) { |
@@ -949,8 +930,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp) | |||
949 | } | 930 | } |
950 | 931 | ||
951 | static const struct file_operations umad_sm_fops = { | 932 | static const struct file_operations umad_sm_fops = { |
952 | .owner = THIS_MODULE, | 933 | .owner = THIS_MODULE, |
953 | .open = ib_umad_sm_open, | 934 | .open = ib_umad_sm_open, |
954 | .release = ib_umad_sm_close | 935 | .release = ib_umad_sm_close |
955 | }; | 936 | }; |
956 | 937 | ||
@@ -990,16 +971,51 @@ static ssize_t show_abi_version(struct class *class, char *buf) | |||
990 | } | 971 | } |
991 | static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); | 972 | static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); |
992 | 973 | ||
974 | static dev_t overflow_maj; | ||
975 | static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); | ||
976 | static int find_overflow_devnum(void) | ||
977 | { | ||
978 | int ret; | ||
979 | |||
980 | if (!overflow_maj) { | ||
981 | ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, | ||
982 | "infiniband_mad"); | ||
983 | if (ret) { | ||
984 | printk(KERN_ERR "user_mad: couldn't register dynamic device number\n"); | ||
985 | return ret; | ||
986 | } | ||
987 | } | ||
988 | |||
989 | ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS); | ||
990 | if (ret >= IB_UMAD_MAX_PORTS) | ||
991 | return -1; | ||
992 | |||
993 | return ret; | ||
994 | } | ||
995 | |||
993 | static int ib_umad_init_port(struct ib_device *device, int port_num, | 996 | static int ib_umad_init_port(struct ib_device *device, int port_num, |
994 | struct ib_umad_port *port) | 997 | struct ib_umad_port *port) |
995 | { | 998 | { |
999 | int devnum; | ||
1000 | dev_t base; | ||
1001 | |||
996 | spin_lock(&port_lock); | 1002 | spin_lock(&port_lock); |
997 | port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); | 1003 | devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); |
998 | if (port->dev_num >= IB_UMAD_MAX_PORTS) { | 1004 | if (devnum >= IB_UMAD_MAX_PORTS) { |
999 | spin_unlock(&port_lock); | 1005 | spin_unlock(&port_lock); |
1000 | return -1; | 1006 | devnum = find_overflow_devnum(); |
1007 | if (devnum < 0) | ||
1008 | return -1; | ||
1009 | |||
1010 | spin_lock(&port_lock); | ||
1011 | port->dev_num = devnum + IB_UMAD_MAX_PORTS; | ||
1012 | base = devnum + overflow_maj; | ||
1013 | set_bit(devnum, overflow_map); | ||
1014 | } else { | ||
1015 | port->dev_num = devnum; | ||
1016 | base = devnum + base_dev; | ||
1017 | set_bit(devnum, dev_map); | ||
1001 | } | 1018 | } |
1002 | set_bit(port->dev_num, dev_map); | ||
1003 | spin_unlock(&port_lock); | 1019 | spin_unlock(&port_lock); |
1004 | 1020 | ||
1005 | port->ib_dev = device; | 1021 | port->ib_dev = device; |
@@ -1008,17 +1024,14 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
1008 | mutex_init(&port->file_mutex); | 1024 | mutex_init(&port->file_mutex); |
1009 | INIT_LIST_HEAD(&port->file_list); | 1025 | INIT_LIST_HEAD(&port->file_list); |
1010 | 1026 | ||
1011 | port->cdev = cdev_alloc(); | 1027 | cdev_init(&port->cdev, &umad_fops); |
1012 | if (!port->cdev) | 1028 | port->cdev.owner = THIS_MODULE; |
1013 | return -1; | 1029 | kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); |
1014 | port->cdev->owner = THIS_MODULE; | 1030 | if (cdev_add(&port->cdev, base, 1)) |
1015 | port->cdev->ops = &umad_fops; | ||
1016 | kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num); | ||
1017 | if (cdev_add(port->cdev, base_dev + port->dev_num, 1)) | ||
1018 | goto err_cdev; | 1031 | goto err_cdev; |
1019 | 1032 | ||
1020 | port->dev = device_create(umad_class, device->dma_device, | 1033 | port->dev = device_create(umad_class, device->dma_device, |
1021 | port->cdev->dev, port, | 1034 | port->cdev.dev, port, |
1022 | "umad%d", port->dev_num); | 1035 | "umad%d", port->dev_num); |
1023 | if (IS_ERR(port->dev)) | 1036 | if (IS_ERR(port->dev)) |
1024 | goto err_cdev; | 1037 | goto err_cdev; |
@@ -1028,17 +1041,15 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
1028 | if (device_create_file(port->dev, &dev_attr_port)) | 1041 | if (device_create_file(port->dev, &dev_attr_port)) |
1029 | goto err_dev; | 1042 | goto err_dev; |
1030 | 1043 | ||
1031 | port->sm_cdev = cdev_alloc(); | 1044 | base += IB_UMAD_MAX_PORTS; |
1032 | if (!port->sm_cdev) | 1045 | cdev_init(&port->sm_cdev, &umad_sm_fops); |
1033 | goto err_dev; | 1046 | port->sm_cdev.owner = THIS_MODULE; |
1034 | port->sm_cdev->owner = THIS_MODULE; | 1047 | kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); |
1035 | port->sm_cdev->ops = &umad_sm_fops; | 1048 | if (cdev_add(&port->sm_cdev, base, 1)) |
1036 | kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num); | ||
1037 | if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1)) | ||
1038 | goto err_sm_cdev; | 1049 | goto err_sm_cdev; |
1039 | 1050 | ||
1040 | port->sm_dev = device_create(umad_class, device->dma_device, | 1051 | port->sm_dev = device_create(umad_class, device->dma_device, |
1041 | port->sm_cdev->dev, port, | 1052 | port->sm_cdev.dev, port, |
1042 | "issm%d", port->dev_num); | 1053 | "issm%d", port->dev_num); |
1043 | if (IS_ERR(port->sm_dev)) | 1054 | if (IS_ERR(port->sm_dev)) |
1044 | goto err_sm_cdev; | 1055 | goto err_sm_cdev; |
@@ -1048,24 +1059,23 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
1048 | if (device_create_file(port->sm_dev, &dev_attr_port)) | 1059 | if (device_create_file(port->sm_dev, &dev_attr_port)) |
1049 | goto err_sm_dev; | 1060 | goto err_sm_dev; |
1050 | 1061 | ||
1051 | spin_lock(&port_lock); | ||
1052 | umad_port[port->dev_num] = port; | ||
1053 | spin_unlock(&port_lock); | ||
1054 | |||
1055 | return 0; | 1062 | return 0; |
1056 | 1063 | ||
1057 | err_sm_dev: | 1064 | err_sm_dev: |
1058 | device_destroy(umad_class, port->sm_cdev->dev); | 1065 | device_destroy(umad_class, port->sm_cdev.dev); |
1059 | 1066 | ||
1060 | err_sm_cdev: | 1067 | err_sm_cdev: |
1061 | cdev_del(port->sm_cdev); | 1068 | cdev_del(&port->sm_cdev); |
1062 | 1069 | ||
1063 | err_dev: | 1070 | err_dev: |
1064 | device_destroy(umad_class, port->cdev->dev); | 1071 | device_destroy(umad_class, port->cdev.dev); |
1065 | 1072 | ||
1066 | err_cdev: | 1073 | err_cdev: |
1067 | cdev_del(port->cdev); | 1074 | cdev_del(&port->cdev); |
1068 | clear_bit(port->dev_num, dev_map); | 1075 | if (port->dev_num < IB_UMAD_MAX_PORTS) |
1076 | clear_bit(devnum, dev_map); | ||
1077 | else | ||
1078 | clear_bit(devnum, overflow_map); | ||
1069 | 1079 | ||
1070 | return -1; | 1080 | return -1; |
1071 | } | 1081 | } |
@@ -1079,15 +1089,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port) | |||
1079 | dev_set_drvdata(port->dev, NULL); | 1089 | dev_set_drvdata(port->dev, NULL); |
1080 | dev_set_drvdata(port->sm_dev, NULL); | 1090 | dev_set_drvdata(port->sm_dev, NULL); |
1081 | 1091 | ||
1082 | device_destroy(umad_class, port->cdev->dev); | 1092 | device_destroy(umad_class, port->cdev.dev); |
1083 | device_destroy(umad_class, port->sm_cdev->dev); | 1093 | device_destroy(umad_class, port->sm_cdev.dev); |
1084 | 1094 | ||
1085 | cdev_del(port->cdev); | 1095 | cdev_del(&port->cdev); |
1086 | cdev_del(port->sm_cdev); | 1096 | cdev_del(&port->sm_cdev); |
1087 | |||
1088 | spin_lock(&port_lock); | ||
1089 | umad_port[port->dev_num] = NULL; | ||
1090 | spin_unlock(&port_lock); | ||
1091 | 1097 | ||
1092 | mutex_lock(&port->file_mutex); | 1098 | mutex_lock(&port->file_mutex); |
1093 | 1099 | ||
@@ -1106,7 +1112,10 @@ static void ib_umad_kill_port(struct ib_umad_port *port) | |||
1106 | 1112 | ||
1107 | mutex_unlock(&port->file_mutex); | 1113 | mutex_unlock(&port->file_mutex); |
1108 | 1114 | ||
1109 | clear_bit(port->dev_num, dev_map); | 1115 | if (port->dev_num < IB_UMAD_MAX_PORTS) |
1116 | clear_bit(port->dev_num, dev_map); | ||
1117 | else | ||
1118 | clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map); | ||
1110 | } | 1119 | } |
1111 | 1120 | ||
1112 | static void ib_umad_add_one(struct ib_device *device) | 1121 | static void ib_umad_add_one(struct ib_device *device) |
@@ -1214,6 +1223,8 @@ static void __exit ib_umad_cleanup(void) | |||
1214 | ib_unregister_client(&umad_client); | 1223 | ib_unregister_client(&umad_client); |
1215 | class_destroy(umad_class); | 1224 | class_destroy(umad_class); |
1216 | unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); | 1225 | unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); |
1226 | if (overflow_maj) | ||
1227 | unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2); | ||
1217 | } | 1228 | } |
1218 | 1229 | ||
1219 | module_init(ib_umad_init); | 1230 | module_init(ib_umad_init); |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index b3ea9587dc80..e54d9ac6d1ca 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/idr.h> | 41 | #include <linux/idr.h> |
42 | #include <linux/mutex.h> | 42 | #include <linux/mutex.h> |
43 | #include <linux/completion.h> | 43 | #include <linux/completion.h> |
44 | #include <linux/cdev.h> | ||
44 | 45 | ||
45 | #include <rdma/ib_verbs.h> | 46 | #include <rdma/ib_verbs.h> |
46 | #include <rdma/ib_umem.h> | 47 | #include <rdma/ib_umem.h> |
@@ -69,23 +70,23 @@ | |||
69 | 70 | ||
70 | struct ib_uverbs_device { | 71 | struct ib_uverbs_device { |
71 | struct kref ref; | 72 | struct kref ref; |
73 | int num_comp_vectors; | ||
72 | struct completion comp; | 74 | struct completion comp; |
73 | int devnum; | ||
74 | struct cdev *cdev; | ||
75 | struct device *dev; | 75 | struct device *dev; |
76 | struct ib_device *ib_dev; | 76 | struct ib_device *ib_dev; |
77 | int num_comp_vectors; | 77 | int devnum; |
78 | struct cdev cdev; | ||
78 | }; | 79 | }; |
79 | 80 | ||
80 | struct ib_uverbs_event_file { | 81 | struct ib_uverbs_event_file { |
81 | struct kref ref; | 82 | struct kref ref; |
83 | int is_async; | ||
82 | struct ib_uverbs_file *uverbs_file; | 84 | struct ib_uverbs_file *uverbs_file; |
83 | spinlock_t lock; | 85 | spinlock_t lock; |
86 | int is_closed; | ||
84 | wait_queue_head_t poll_wait; | 87 | wait_queue_head_t poll_wait; |
85 | struct fasync_struct *async_queue; | 88 | struct fasync_struct *async_queue; |
86 | struct list_head event_list; | 89 | struct list_head event_list; |
87 | int is_async; | ||
88 | int is_closed; | ||
89 | }; | 90 | }; |
90 | 91 | ||
91 | struct ib_uverbs_file { | 92 | struct ib_uverbs_file { |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 5f284ffd430e..dbf04511cf0a 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/sched.h> | 43 | #include <linux/sched.h> |
44 | #include <linux/file.h> | 44 | #include <linux/file.h> |
45 | #include <linux/mount.h> | 45 | #include <linux/mount.h> |
46 | #include <linux/cdev.h> | ||
47 | 46 | ||
48 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
49 | 48 | ||
@@ -75,40 +74,39 @@ DEFINE_IDR(ib_uverbs_qp_idr); | |||
75 | DEFINE_IDR(ib_uverbs_srq_idr); | 74 | DEFINE_IDR(ib_uverbs_srq_idr); |
76 | 75 | ||
77 | static DEFINE_SPINLOCK(map_lock); | 76 | static DEFINE_SPINLOCK(map_lock); |
78 | static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; | ||
79 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); | 77 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); |
80 | 78 | ||
81 | static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | 79 | static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, |
82 | const char __user *buf, int in_len, | 80 | const char __user *buf, int in_len, |
83 | int out_len) = { | 81 | int out_len) = { |
84 | [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, | 82 | [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, |
85 | [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, | 83 | [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, |
86 | [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, | 84 | [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, |
87 | [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, | 85 | [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, |
88 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, | 86 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, |
89 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, | 87 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, |
90 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, | 88 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, |
91 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, | 89 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, |
92 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, | 90 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, |
93 | [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, | 91 | [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, |
94 | [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, | 92 | [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, |
95 | [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, | 93 | [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, |
96 | [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, | 94 | [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, |
97 | [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, | 95 | [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, |
98 | [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, | 96 | [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, |
99 | [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, | 97 | [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, |
100 | [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, | 98 | [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, |
101 | [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, | 99 | [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, |
102 | [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, | 100 | [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, |
103 | [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, | 101 | [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, |
104 | [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, | 102 | [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, |
105 | [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, | 103 | [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, |
106 | [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, | 104 | [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, |
107 | [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, | 105 | [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, |
108 | [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, | 106 | [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, |
109 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, | 107 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, |
110 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, | 108 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, |
111 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, | 109 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, |
112 | }; | 110 | }; |
113 | 111 | ||
114 | static struct vfsmount *uverbs_event_mnt; | 112 | static struct vfsmount *uverbs_event_mnt; |
@@ -370,7 +368,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp) | |||
370 | 368 | ||
371 | static const struct file_operations uverbs_event_fops = { | 369 | static const struct file_operations uverbs_event_fops = { |
372 | .owner = THIS_MODULE, | 370 | .owner = THIS_MODULE, |
373 | .read = ib_uverbs_event_read, | 371 | .read = ib_uverbs_event_read, |
374 | .poll = ib_uverbs_event_poll, | 372 | .poll = ib_uverbs_event_poll, |
375 | .release = ib_uverbs_event_close, | 373 | .release = ib_uverbs_event_close, |
376 | .fasync = ib_uverbs_event_fasync | 374 | .fasync = ib_uverbs_event_fasync |
@@ -617,14 +615,12 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) | |||
617 | /* | 615 | /* |
618 | * ib_uverbs_open() does not need the BKL: | 616 | * ib_uverbs_open() does not need the BKL: |
619 | * | 617 | * |
620 | * - dev_table[] accesses are protected by map_lock, the | 618 | * - the ib_uverbs_device structures are properly reference counted and |
621 | * ib_uverbs_device structures are properly reference counted, and | ||
622 | * everything else is purely local to the file being created, so | 619 | * everything else is purely local to the file being created, so |
623 | * races against other open calls are not a problem; | 620 | * races against other open calls are not a problem; |
624 | * - there is no ioctl method to race against; | 621 | * - there is no ioctl method to race against; |
625 | * - the device is added to dev_table[] as the last part of module | 622 | * - the open method will either immediately run -ENXIO, or all |
626 | * initialization, the open method will either immediately run | 623 | * required initialization will be done. |
627 | * -ENXIO, or all required initialization will be done. | ||
628 | */ | 624 | */ |
629 | static int ib_uverbs_open(struct inode *inode, struct file *filp) | 625 | static int ib_uverbs_open(struct inode *inode, struct file *filp) |
630 | { | 626 | { |
@@ -632,13 +628,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) | |||
632 | struct ib_uverbs_file *file; | 628 | struct ib_uverbs_file *file; |
633 | int ret; | 629 | int ret; |
634 | 630 | ||
635 | spin_lock(&map_lock); | 631 | dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); |
636 | dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR]; | ||
637 | if (dev) | 632 | if (dev) |
638 | kref_get(&dev->ref); | 633 | kref_get(&dev->ref); |
639 | spin_unlock(&map_lock); | 634 | else |
640 | |||
641 | if (!dev) | ||
642 | return -ENXIO; | 635 | return -ENXIO; |
643 | 636 | ||
644 | if (!try_module_get(dev->ib_dev->owner)) { | 637 | if (!try_module_get(dev->ib_dev->owner)) { |
@@ -685,17 +678,17 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) | |||
685 | } | 678 | } |
686 | 679 | ||
687 | static const struct file_operations uverbs_fops = { | 680 | static const struct file_operations uverbs_fops = { |
688 | .owner = THIS_MODULE, | 681 | .owner = THIS_MODULE, |
689 | .write = ib_uverbs_write, | 682 | .write = ib_uverbs_write, |
690 | .open = ib_uverbs_open, | 683 | .open = ib_uverbs_open, |
691 | .release = ib_uverbs_close | 684 | .release = ib_uverbs_close |
692 | }; | 685 | }; |
693 | 686 | ||
694 | static const struct file_operations uverbs_mmap_fops = { | 687 | static const struct file_operations uverbs_mmap_fops = { |
695 | .owner = THIS_MODULE, | 688 | .owner = THIS_MODULE, |
696 | .write = ib_uverbs_write, | 689 | .write = ib_uverbs_write, |
697 | .mmap = ib_uverbs_mmap, | 690 | .mmap = ib_uverbs_mmap, |
698 | .open = ib_uverbs_open, | 691 | .open = ib_uverbs_open, |
699 | .release = ib_uverbs_close | 692 | .release = ib_uverbs_close |
700 | }; | 693 | }; |
701 | 694 | ||
@@ -735,8 +728,38 @@ static ssize_t show_abi_version(struct class *class, char *buf) | |||
735 | } | 728 | } |
736 | static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); | 729 | static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); |
737 | 730 | ||
731 | static dev_t overflow_maj; | ||
732 | static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES); | ||
733 | |||
734 | /* | ||
735 | * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by | ||
736 | * requesting a new major number and doubling the number of max devices we | ||
737 | * support. It's stupid, but simple. | ||
738 | */ | ||
739 | static int find_overflow_devnum(void) | ||
740 | { | ||
741 | int ret; | ||
742 | |||
743 | if (!overflow_maj) { | ||
744 | ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES, | ||
745 | "infiniband_verbs"); | ||
746 | if (ret) { | ||
747 | printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n"); | ||
748 | return ret; | ||
749 | } | ||
750 | } | ||
751 | |||
752 | ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES); | ||
753 | if (ret >= IB_UVERBS_MAX_DEVICES) | ||
754 | return -1; | ||
755 | |||
756 | return ret; | ||
757 | } | ||
758 | |||
738 | static void ib_uverbs_add_one(struct ib_device *device) | 759 | static void ib_uverbs_add_one(struct ib_device *device) |
739 | { | 760 | { |
761 | int devnum; | ||
762 | dev_t base; | ||
740 | struct ib_uverbs_device *uverbs_dev; | 763 | struct ib_uverbs_device *uverbs_dev; |
741 | 764 | ||
742 | if (!device->alloc_ucontext) | 765 | if (!device->alloc_ucontext) |
@@ -750,28 +773,36 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
750 | init_completion(&uverbs_dev->comp); | 773 | init_completion(&uverbs_dev->comp); |
751 | 774 | ||
752 | spin_lock(&map_lock); | 775 | spin_lock(&map_lock); |
753 | uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); | 776 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); |
754 | if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) { | 777 | if (devnum >= IB_UVERBS_MAX_DEVICES) { |
755 | spin_unlock(&map_lock); | 778 | spin_unlock(&map_lock); |
756 | goto err; | 779 | devnum = find_overflow_devnum(); |
780 | if (devnum < 0) | ||
781 | goto err; | ||
782 | |||
783 | spin_lock(&map_lock); | ||
784 | uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES; | ||
785 | base = devnum + overflow_maj; | ||
786 | set_bit(devnum, overflow_map); | ||
787 | } else { | ||
788 | uverbs_dev->devnum = devnum; | ||
789 | base = devnum + IB_UVERBS_BASE_DEV; | ||
790 | set_bit(devnum, dev_map); | ||
757 | } | 791 | } |
758 | set_bit(uverbs_dev->devnum, dev_map); | ||
759 | spin_unlock(&map_lock); | 792 | spin_unlock(&map_lock); |
760 | 793 | ||
761 | uverbs_dev->ib_dev = device; | 794 | uverbs_dev->ib_dev = device; |
762 | uverbs_dev->num_comp_vectors = device->num_comp_vectors; | 795 | uverbs_dev->num_comp_vectors = device->num_comp_vectors; |
763 | 796 | ||
764 | uverbs_dev->cdev = cdev_alloc(); | 797 | cdev_init(&uverbs_dev->cdev, NULL); |
765 | if (!uverbs_dev->cdev) | 798 | uverbs_dev->cdev.owner = THIS_MODULE; |
766 | goto err; | 799 | uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; |
767 | uverbs_dev->cdev->owner = THIS_MODULE; | 800 | kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum); |
768 | uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; | 801 | if (cdev_add(&uverbs_dev->cdev, base, 1)) |
769 | kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum); | ||
770 | if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1)) | ||
771 | goto err_cdev; | 802 | goto err_cdev; |
772 | 803 | ||
773 | uverbs_dev->dev = device_create(uverbs_class, device->dma_device, | 804 | uverbs_dev->dev = device_create(uverbs_class, device->dma_device, |
774 | uverbs_dev->cdev->dev, uverbs_dev, | 805 | uverbs_dev->cdev.dev, uverbs_dev, |
775 | "uverbs%d", uverbs_dev->devnum); | 806 | "uverbs%d", uverbs_dev->devnum); |
776 | if (IS_ERR(uverbs_dev->dev)) | 807 | if (IS_ERR(uverbs_dev->dev)) |
777 | goto err_cdev; | 808 | goto err_cdev; |
@@ -781,20 +812,19 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
781 | if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version)) | 812 | if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version)) |
782 | goto err_class; | 813 | goto err_class; |
783 | 814 | ||
784 | spin_lock(&map_lock); | ||
785 | dev_table[uverbs_dev->devnum] = uverbs_dev; | ||
786 | spin_unlock(&map_lock); | ||
787 | |||
788 | ib_set_client_data(device, &uverbs_client, uverbs_dev); | 815 | ib_set_client_data(device, &uverbs_client, uverbs_dev); |
789 | 816 | ||
790 | return; | 817 | return; |
791 | 818 | ||
792 | err_class: | 819 | err_class: |
793 | device_destroy(uverbs_class, uverbs_dev->cdev->dev); | 820 | device_destroy(uverbs_class, uverbs_dev->cdev.dev); |
794 | 821 | ||
795 | err_cdev: | 822 | err_cdev: |
796 | cdev_del(uverbs_dev->cdev); | 823 | cdev_del(&uverbs_dev->cdev); |
797 | clear_bit(uverbs_dev->devnum, dev_map); | 824 | if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) |
825 | clear_bit(devnum, dev_map); | ||
826 | else | ||
827 | clear_bit(devnum, overflow_map); | ||
798 | 828 | ||
799 | err: | 829 | err: |
800 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); | 830 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
@@ -811,14 +841,13 @@ static void ib_uverbs_remove_one(struct ib_device *device) | |||
811 | return; | 841 | return; |
812 | 842 | ||
813 | dev_set_drvdata(uverbs_dev->dev, NULL); | 843 | dev_set_drvdata(uverbs_dev->dev, NULL); |
814 | device_destroy(uverbs_class, uverbs_dev->cdev->dev); | 844 | device_destroy(uverbs_class, uverbs_dev->cdev.dev); |
815 | cdev_del(uverbs_dev->cdev); | 845 | cdev_del(&uverbs_dev->cdev); |
816 | 846 | ||
817 | spin_lock(&map_lock); | 847 | if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) |
818 | dev_table[uverbs_dev->devnum] = NULL; | 848 | clear_bit(uverbs_dev->devnum, dev_map); |
819 | spin_unlock(&map_lock); | 849 | else |
820 | 850 | clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map); | |
821 | clear_bit(uverbs_dev->devnum, dev_map); | ||
822 | 851 | ||
823 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); | 852 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
824 | wait_for_completion(&uverbs_dev->comp); | 853 | wait_for_completion(&uverbs_dev->comp); |
@@ -908,6 +937,8 @@ static void __exit ib_uverbs_cleanup(void) | |||
908 | unregister_filesystem(&uverbs_event_fs); | 937 | unregister_filesystem(&uverbs_event_fs); |
909 | class_destroy(uverbs_class); | 938 | class_destroy(uverbs_class); |
910 | unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); | 939 | unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); |
940 | if (overflow_maj) | ||
941 | unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES); | ||
911 | idr_destroy(&ib_uverbs_pd_idr); | 942 | idr_destroy(&ib_uverbs_pd_idr); |
912 | idr_destroy(&ib_uverbs_mr_idr); | 943 | idr_destroy(&ib_uverbs_mr_idr); |
913 | idr_destroy(&ib_uverbs_mw_idr); | 944 | idr_destroy(&ib_uverbs_mw_idr); |