aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_file_ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_file_ops.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b15e34eeef68..41937c6f888a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
351 * unless perhaps the user has mpin'ed the pages 351 * unless perhaps the user has mpin'ed the pages
352 * themselves. 352 * themselves.
353 */ 353 */
354 qib_devinfo(dd->pcidev, 354 qib_devinfo(
355 "Failed to lock addr %p, %u pages: " 355 dd->pcidev,
356 "errno %d\n", (void *) vaddr, cnt, -ret); 356 "Failed to lock addr %p, %u pages: errno %d\n",
357 (void *) vaddr, cnt, -ret);
357 goto done; 358 goto done;
358 } 359 }
359 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { 360 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
437 goto cleanup; 438 goto cleanup;
438 } 439 }
439 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 440 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
440 tidmap, sizeof tidmap)) { 441 tidmap, sizeof(tidmap))) {
441 ret = -EFAULT; 442 ret = -EFAULT;
442 goto cleanup; 443 goto cleanup;
443 } 444 }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
484 } 485 }
485 486
486 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 487 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
487 sizeof tidmap)) { 488 sizeof(tidmap))) {
488 ret = -EFAULT; 489 ret = -EFAULT;
489 goto done; 490 goto done;
490 } 491 }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
951 /* rcvegrbufs are read-only on the slave */ 952 /* rcvegrbufs are read-only on the slave */
952 if (vma->vm_flags & VM_WRITE) { 953 if (vma->vm_flags & VM_WRITE) {
953 qib_devinfo(dd->pcidev, 954 qib_devinfo(dd->pcidev,
954 "Can't map eager buffers as " 955 "Can't map eager buffers as writable (flags=%lx)\n",
955 "writable (flags=%lx)\n", vma->vm_flags); 956 vma->vm_flags);
956 ret = -EPERM; 957 ret = -EPERM;
957 goto bail; 958 goto bail;
958 } 959 }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1185 */ 1186 */
1186 if (weight >= qib_cpulist_count) { 1187 if (weight >= qib_cpulist_count) {
1187 int cpu; 1188 int cpu;
1189
1188 cpu = find_first_zero_bit(qib_cpulist, 1190 cpu = find_first_zero_bit(qib_cpulist,
1189 qib_cpulist_count); 1191 qib_cpulist_count);
1190 if (cpu == qib_cpulist_count) 1192 if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
1247 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, 1249 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1248 uinfo->spu_userversion & 0xffff)) { 1250 uinfo->spu_userversion & 0xffff)) {
1249 qib_devinfo(dd->pcidev, 1251 qib_devinfo(dd->pcidev,
1250 "Mismatched user version (%d.%d) and driver " 1252 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1251 "version (%d.%d) while context sharing. Ensure "
1252 "that driver and library are from the same "
1253 "release.\n",
1254 (int) (uinfo->spu_userversion >> 16), 1253 (int) (uinfo->spu_userversion >> 16),
1255 (int) (uinfo->spu_userversion & 0xffff), 1254 (int) (uinfo->spu_userversion & 0xffff),
1256 QIB_USER_SWMAJOR, QIB_USER_SWMINOR); 1255 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1391 } 1390 }
1392 if (!ppd) { 1391 if (!ppd) {
1393 u32 pidx = ctxt % dd->num_pports; 1392 u32 pidx = ctxt % dd->num_pports;
1393
1394 if (usable(dd->pport + pidx)) 1394 if (usable(dd->pport + pidx))
1395 ppd = dd->pport + pidx; 1395 ppd = dd->pport + pidx;
1396 else { 1396 else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1438 1438
1439 if (alg == QIB_PORT_ALG_ACROSS) { 1439 if (alg == QIB_PORT_ALG_ACROSS) {
1440 unsigned inuse = ~0U; 1440 unsigned inuse = ~0U;
1441
1441 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1442 /* find device (with ACTIVE ports) with fewest ctxts in use */
1442 for (ndev = 0; ndev < devmax; ndev++) { 1443 for (ndev = 0; ndev < devmax; ndev++) {
1443 struct qib_devdata *dd = qib_lookup(ndev); 1444 struct qib_devdata *dd = qib_lookup(ndev);
1444 unsigned cused = 0, cfree = 0, pusable = 0; 1445 unsigned cused = 0, cfree = 0, pusable = 0;
1446
1445 if (!dd) 1447 if (!dd)
1446 continue; 1448 continue;
1447 if (port && port <= dd->num_pports && 1449 if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1471 } else { 1473 } else {
1472 for (ndev = 0; ndev < devmax; ndev++) { 1474 for (ndev = 0; ndev < devmax; ndev++) {
1473 struct qib_devdata *dd = qib_lookup(ndev); 1475 struct qib_devdata *dd = qib_lookup(ndev);
1476
1474 if (dd) { 1477 if (dd) {
1475 ret = choose_port_ctxt(fp, dd, port, uinfo); 1478 ret = choose_port_ctxt(fp, dd, port, uinfo);
1476 if (!ret) 1479 if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
1556 } 1559 }
1557 for (ndev = 0; ndev < devmax; ndev++) { 1560 for (ndev = 0; ndev < devmax; ndev++) {
1558 struct qib_devdata *dd = qib_lookup(ndev); 1561 struct qib_devdata *dd = qib_lookup(ndev);
1562
1559 if (dd) { 1563 if (dd) {
1560 if (pcibus_to_node(dd->pcidev->bus) < 0) { 1564 if (pcibus_to_node(dd->pcidev->bus) < 0) {
1561 ret = -EINVAL; 1565 ret = -EINVAL;