aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_file_ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_file_ops.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c287
1 files changed, 170 insertions, 117 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 5d64ff875297..1272aaf2a785 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -41,12 +41,6 @@
41#include "ipath_kernel.h" 41#include "ipath_kernel.h"
42#include "ipath_common.h" 42#include "ipath_common.h"
43 43
44/*
45 * mmap64 doesn't allow all 64 bits for 32-bit applications
46 * so only use the low 43 bits.
47 */
48#define MMAP64_MASK 0x7FFFFFFFFFFUL
49
50static int ipath_open(struct inode *, struct file *); 44static int ipath_open(struct inode *, struct file *);
51static int ipath_close(struct inode *, struct file *); 45static int ipath_close(struct inode *, struct file *);
52static ssize_t ipath_write(struct file *, const char __user *, size_t, 46static ssize_t ipath_write(struct file *, const char __user *, size_t,
@@ -63,6 +57,24 @@ static const struct file_operations ipath_file_ops = {
63 .mmap = ipath_mmap 57 .mmap = ipath_mmap
64}; 58};
65 59
60/*
61 * Convert kernel virtual addresses to physical addresses so they don't
62 * potentially conflict with the chip addresses used as mmap offsets.
63 * It doesn't really matter what mmap offset we use as long as we can
64 * interpret it correctly.
65 */
66static u64 cvt_kvaddr(void *p)
67{
68 struct page *page;
69 u64 paddr = 0;
70
71 page = vmalloc_to_page(p);
72 if (page)
73 paddr = page_to_pfn(page) << PAGE_SHIFT;
74
75 return paddr;
76}
77
66static int ipath_get_base_info(struct file *fp, 78static int ipath_get_base_info(struct file *fp,
67 void __user *ubase, size_t ubase_size) 79 void __user *ubase, size_t ubase_size)
68{ 80{
@@ -87,7 +99,7 @@ static int ipath_get_base_info(struct file *fp,
87 sz = sizeof(*kinfo); 99 sz = sizeof(*kinfo);
88 /* If port sharing is not requested, allow the old size structure */ 100 /* If port sharing is not requested, allow the old size structure */
89 if (!shared) 101 if (!shared)
90 sz -= 3 * sizeof(u64); 102 sz -= 7 * sizeof(u64);
91 if (ubase_size < sz) { 103 if (ubase_size < sz) {
92 ipath_cdbg(PROC, 104 ipath_cdbg(PROC,
93 "Base size %zu, need %zu (version mismatch?)\n", 105 "Base size %zu, need %zu (version mismatch?)\n",
@@ -165,24 +177,41 @@ static int ipath_get_base_info(struct file *fp,
165 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 177 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
166 dd->ipath_palign * 178 dd->ipath_palign *
167 (dd->ipath_pbufsport - kinfo->spi_piocnt); 179 (dd->ipath_pbufsport - kinfo->spi_piocnt);
168 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
169 dd->ipath_palign * pd->port_port;
170 } else { 180 } else {
171 unsigned slave = subport_fp(fp) - 1; 181 unsigned slave = subport_fp(fp) - 1;
172 182
173 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; 183 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
174 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 184 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
175 dd->ipath_palign * kinfo->spi_piocnt * slave; 185 dd->ipath_palign * kinfo->spi_piocnt * slave;
176 kinfo->__spi_uregbase = ((u64) pd->subport_uregbase + 186 }
177 PAGE_SIZE * slave) & MMAP64_MASK; 187 if (shared) {
188 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
189 dd->ipath_palign * pd->port_port;
190 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
191 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
192 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
178 193
179 kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base + 194 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
180 pd->port_rcvhdrq_size * slave) & MMAP64_MASK; 195 PAGE_SIZE * subport_fp(fp));
181 kinfo->spi_rcvhdr_tailaddr = 196
182 (u64) pd->port_rcvhdrqtailaddr_phys & MMAP64_MASK; 197 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
183 kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf + 198 pd->port_rcvhdrq_size * subport_fp(fp));
184 dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) & 199 kinfo->spi_rcvhdr_tailaddr = 0;
185 MMAP64_MASK; 200 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
201 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
202 subport_fp(fp));
203
204 kinfo->spi_subport_uregbase =
205 cvt_kvaddr(pd->subport_uregbase);
206 kinfo->spi_subport_rcvegrbuf =
207 cvt_kvaddr(pd->subport_rcvegrbuf);
208 kinfo->spi_subport_rcvhdr_base =
209 cvt_kvaddr(pd->subport_rcvhdr_base);
210 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
211 kinfo->spi_port, kinfo->spi_runtime_flags,
212 (unsigned long long) kinfo->spi_subport_uregbase,
213 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
214 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
186 } 215 }
187 216
188 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / 217 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
@@ -199,20 +228,10 @@ static int ipath_get_base_info(struct file *fp,
199 228
200 if (master) { 229 if (master) {
201 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; 230 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
202 kinfo->spi_subport_uregbase =
203 (u64) pd->subport_uregbase & MMAP64_MASK;
204 kinfo->spi_subport_rcvegrbuf =
205 (u64) pd->subport_rcvegrbuf & MMAP64_MASK;
206 kinfo->spi_subport_rcvhdr_base =
207 (u64) pd->subport_rcvhdr_base & MMAP64_MASK;
208 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
209 kinfo->spi_port, kinfo->spi_runtime_flags,
210 (unsigned long long) kinfo->spi_subport_uregbase,
211 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
212 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
213 } 231 }
214 232
215 if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) 233 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
234 if (copy_to_user(ubase, kinfo, sz))
216 ret = -EFAULT; 235 ret = -EFAULT;
217 236
218bail: 237bail:
@@ -1132,67 +1151,55 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1132 struct ipath_devdata *dd; 1151 struct ipath_devdata *dd;
1133 void *addr; 1152 void *addr;
1134 size_t size; 1153 size_t size;
1135 int ret; 1154 int ret = 0;
1136 1155
1137 /* If the port is not shared, all addresses should be physical */ 1156 /* If the port is not shared, all addresses should be physical */
1138 if (!pd->port_subport_cnt) { 1157 if (!pd->port_subport_cnt)
1139 ret = -EINVAL;
1140 goto bail; 1158 goto bail;
1141 }
1142 1159
1143 dd = pd->port_dd; 1160 dd = pd->port_dd;
1144 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 1161 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
1145 1162
1146 /* 1163 /*
1147 * Master has all the slave uregbase, rcvhdrq, and 1164 * Each process has all the subport uregbase, rcvhdrq, and
1148 * rcvegrbufs mmapped. 1165 * rcvegrbufs mmapped - as an array for all the processes,
1166 * and also separately for this process.
1149 */ 1167 */
1150 if (subport == 0) { 1168 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
1151 unsigned num_slaves = pd->port_subport_cnt - 1; 1169 addr = pd->subport_uregbase;
1152 1170 size = PAGE_SIZE * pd->port_subport_cnt;
1153 if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) { 1171 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
1154 addr = pd->subport_uregbase; 1172 addr = pd->subport_rcvhdr_base;
1155 size = PAGE_SIZE * num_slaves; 1173 size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
1156 } else if (pgaddr == ((u64) pd->subport_rcvhdr_base & 1174 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
1157 MMAP64_MASK)) { 1175 addr = pd->subport_rcvegrbuf;
1158 addr = pd->subport_rcvhdr_base; 1176 size *= pd->port_subport_cnt;
1159 size = pd->port_rcvhdrq_size * num_slaves; 1177 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
1160 } else if (pgaddr == ((u64) pd->subport_rcvegrbuf & 1178 PAGE_SIZE * subport)) {
1161 MMAP64_MASK)) { 1179 addr = pd->subport_uregbase + PAGE_SIZE * subport;
1162 addr = pd->subport_rcvegrbuf; 1180 size = PAGE_SIZE;
1163 size *= num_slaves; 1181 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
1164 } else { 1182 pd->port_rcvhdrq_size * subport)) {
1165 ret = -EINVAL; 1183 addr = pd->subport_rcvhdr_base +
1166 goto bail; 1184 pd->port_rcvhdrq_size * subport;
1167 } 1185 size = pd->port_rcvhdrq_size;
1168 } else if (pgaddr == (((u64) pd->subport_uregbase + 1186 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
1169 PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) { 1187 size * subport)) {
1170 addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1); 1188 addr = pd->subport_rcvegrbuf + size * subport;
1171 size = PAGE_SIZE; 1189 /* rcvegrbufs are read-only on the slave */
1172 } else if (pgaddr == (((u64) pd->subport_rcvhdr_base + 1190 if (vma->vm_flags & VM_WRITE) {
1173 pd->port_rcvhdrq_size * (subport - 1)) & 1191 dev_info(&dd->pcidev->dev,
1174 MMAP64_MASK)) { 1192 "Can't map eager buffers as "
1175 addr = pd->subport_rcvhdr_base + 1193 "writable (flags=%lx)\n", vma->vm_flags);
1176 pd->port_rcvhdrq_size * (subport - 1); 1194 ret = -EPERM;
1177 size = pd->port_rcvhdrq_size; 1195 goto bail;
1178 } else if (pgaddr == (((u64) pd->subport_rcvegrbuf + 1196 }
1179 size * (subport - 1)) & MMAP64_MASK)) { 1197 /*
1180 addr = pd->subport_rcvegrbuf + size * (subport - 1); 1198 * Don't allow permission to later change to writeable
1181 /* rcvegrbufs are read-only on the slave */ 1199 * with mprotect.
1182 if (vma->vm_flags & VM_WRITE) { 1200 */
1183 dev_info(&dd->pcidev->dev, 1201 vma->vm_flags &= ~VM_MAYWRITE;
1184 "Can't map eager buffers as "
1185 "writable (flags=%lx)\n", vma->vm_flags);
1186 ret = -EPERM;
1187 goto bail;
1188 }
1189 /*
1190 * Don't allow permission to later change to writeable
1191 * with mprotect.
1192 */
1193 vma->vm_flags &= ~VM_MAYWRITE;
1194 } else { 1202 } else {
1195 ret = -EINVAL;
1196 goto bail; 1203 goto bail;
1197 } 1204 }
1198 len = vma->vm_end - vma->vm_start; 1205 len = vma->vm_end - vma->vm_start;
@@ -1205,7 +1212,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1205 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1212 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1206 vma->vm_ops = &ipath_file_vm_ops; 1213 vma->vm_ops = &ipath_file_vm_ops;
1207 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1214 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1208 ret = 0; 1215 ret = 1;
1209 1216
1210bail: 1217bail:
1211 return ret; 1218 return ret;
@@ -1265,19 +1272,20 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1265 * Check for kernel virtual addresses first, anything else must 1272 * Check for kernel virtual addresses first, anything else must
1266 * match a HW or memory address. 1273 * match a HW or memory address.
1267 */ 1274 */
1268 if (pgaddr >= (1ULL<<40)) { 1275 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
1269 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); 1276 if (ret) {
1277 if (ret > 0)
1278 ret = 0;
1270 goto bail; 1279 goto bail;
1271 } 1280 }
1272 1281
1282 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1273 if (!pd->port_subport_cnt) { 1283 if (!pd->port_subport_cnt) {
1274 /* port is not shared */ 1284 /* port is not shared */
1275 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1276 piocnt = dd->ipath_pbufsport; 1285 piocnt = dd->ipath_pbufsport;
1277 piobufs = pd->port_piobufs; 1286 piobufs = pd->port_piobufs;
1278 } else if (!subport_fp(fp)) { 1287 } else if (!subport_fp(fp)) {
1279 /* caller is the master */ 1288 /* caller is the master */
1280 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1281 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) + 1289 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) +
1282 (dd->ipath_pbufsport % pd->port_subport_cnt); 1290 (dd->ipath_pbufsport % pd->port_subport_cnt);
1283 piobufs = pd->port_piobufs + 1291 piobufs = pd->port_piobufs +
@@ -1286,7 +1294,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1286 unsigned slave = subport_fp(fp) - 1; 1294 unsigned slave = subport_fp(fp) - 1;
1287 1295
1288 /* caller is a slave */ 1296 /* caller is a slave */
1289 ureg = 0;
1290 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt; 1297 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt;
1291 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; 1298 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1292 } 1299 }
@@ -1300,9 +1307,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1300 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, 1307 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1301 (void *) dd->ipath_pioavailregs_dma, 1308 (void *) dd->ipath_pioavailregs_dma,
1302 "pioavail registers"); 1309 "pioavail registers");
1303 else if (subport_fp(fp))
1304 /* Subports don't mmap the physical receive buffers */
1305 ret = -EINVAL;
1306 else if (pgaddr == pd->port_rcvegr_phys) 1310 else if (pgaddr == pd->port_rcvegr_phys)
1307 ret = mmap_rcvegrbufs(vma, pd); 1311 ret = mmap_rcvegrbufs(vma, pd);
1308 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) 1312 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
@@ -1400,32 +1404,41 @@ static int init_subports(struct ipath_devdata *dd,
1400 const struct ipath_user_info *uinfo) 1404 const struct ipath_user_info *uinfo)
1401{ 1405{
1402 int ret = 0; 1406 int ret = 0;
1403 unsigned num_slaves; 1407 unsigned num_subports;
1404 size_t size; 1408 size_t size;
1405 1409
1406 /* Old user binaries don't know about subports */
1407 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
1408 goto bail;
1409 /* 1410 /*
1410 * If the user is requesting zero or one port, 1411 * If the user is requesting zero or one port,
1411 * skip the subport allocation. 1412 * skip the subport allocation.
1412 */ 1413 */
1413 if (uinfo->spu_subport_cnt <= 1) 1414 if (uinfo->spu_subport_cnt <= 1)
1414 goto bail; 1415 goto bail;
1415 if (uinfo->spu_subport_cnt > 4) { 1416
1417 /* Old user binaries don't know about new subport implementation */
1418 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) {
1419 dev_info(&dd->pcidev->dev,
1420 "Mismatched user minor version (%d) and driver "
1421 "minor version (%d) while port sharing. Ensure "
1422 "that driver and library are from the same "
1423 "release.\n",
1424 (int) (uinfo->spu_userversion & 0xffff),
1425 IPATH_USER_SWMINOR);
1426 goto bail;
1427 }
1428 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
1416 ret = -EINVAL; 1429 ret = -EINVAL;
1417 goto bail; 1430 goto bail;
1418 } 1431 }
1419 1432
1420 num_slaves = uinfo->spu_subport_cnt - 1; 1433 num_subports = uinfo->spu_subport_cnt;
1421 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_slaves); 1434 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
1422 if (!pd->subport_uregbase) { 1435 if (!pd->subport_uregbase) {
1423 ret = -ENOMEM; 1436 ret = -ENOMEM;
1424 goto bail; 1437 goto bail;
1425 } 1438 }
1426 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1439 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1427 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1440 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1428 sizeof(u32), PAGE_SIZE) * num_slaves; 1441 sizeof(u32), PAGE_SIZE) * num_subports;
1429 pd->subport_rcvhdr_base = vmalloc(size); 1442 pd->subport_rcvhdr_base = vmalloc(size);
1430 if (!pd->subport_rcvhdr_base) { 1443 if (!pd->subport_rcvhdr_base) {
1431 ret = -ENOMEM; 1444 ret = -ENOMEM;
@@ -1434,7 +1447,7 @@ static int init_subports(struct ipath_devdata *dd,
1434 1447
1435 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1448 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
1436 pd->port_rcvegrbuf_size * 1449 pd->port_rcvegrbuf_size *
1437 num_slaves); 1450 num_subports);
1438 if (!pd->subport_rcvegrbuf) { 1451 if (!pd->subport_rcvegrbuf) {
1439 ret = -ENOMEM; 1452 ret = -ENOMEM;
1440 goto bail_rhdr; 1453 goto bail_rhdr;
@@ -1443,6 +1456,12 @@ static int init_subports(struct ipath_devdata *dd,
1443 pd->port_subport_cnt = uinfo->spu_subport_cnt; 1456 pd->port_subport_cnt = uinfo->spu_subport_cnt;
1444 pd->port_subport_id = uinfo->spu_subport_id; 1457 pd->port_subport_id = uinfo->spu_subport_id;
1445 pd->active_slaves = 1; 1458 pd->active_slaves = 1;
1459 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1460 memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
1461 memset(pd->subport_rcvhdr_base, 0, size);
1462 memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
1463 pd->port_rcvegrbuf_size *
1464 num_subports);
1446 goto bail; 1465 goto bail;
1447 1466
1448bail_rhdr: 1467bail_rhdr:
@@ -1573,18 +1592,19 @@ static int find_best_unit(struct file *fp,
1573 */ 1592 */
1574 if (!cpus_empty(current->cpus_allowed) && 1593 if (!cpus_empty(current->cpus_allowed) &&
1575 !cpus_full(current->cpus_allowed)) { 1594 !cpus_full(current->cpus_allowed)) {
1576 int ncpus = num_online_cpus(), curcpu = -1; 1595 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1577 for (i = 0; i < ncpus; i++) 1596 for (i = 0; i < ncpus; i++)
1578 if (cpu_isset(i, current->cpus_allowed)) { 1597 if (cpu_isset(i, current->cpus_allowed)) {
1579 ipath_cdbg(PROC, "%s[%u] affinity set for " 1598 ipath_cdbg(PROC, "%s[%u] affinity set for "
1580 "cpu %d\n", current->comm, 1599 "cpu %d/%d\n", current->comm,
1581 current->pid, i); 1600 current->pid, i, ncpus);
1582 curcpu = i; 1601 curcpu = i;
1602 nset++;
1583 } 1603 }
1584 if (curcpu != -1) { 1604 if (curcpu != -1 && nset != ncpus) {
1585 if (npresent) { 1605 if (npresent) {
1586 prefunit = curcpu / (ncpus / npresent); 1606 prefunit = curcpu / (ncpus / npresent);
1587 ipath_dbg("%s[%u] %d chips, %d cpus, " 1607 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
1588 "%d cpus/chip, select unit %d\n", 1608 "%d cpus/chip, select unit %d\n",
1589 current->comm, current->pid, 1609 current->comm, current->pid,
1590 npresent, ncpus, ncpus / npresent, 1610 npresent, ncpus, ncpus / npresent,
@@ -1764,11 +1784,17 @@ static int ipath_do_user_init(struct file *fp,
1764 const struct ipath_user_info *uinfo) 1784 const struct ipath_user_info *uinfo)
1765{ 1785{
1766 int ret; 1786 int ret;
1767 struct ipath_portdata *pd; 1787 struct ipath_portdata *pd = port_fp(fp);
1768 struct ipath_devdata *dd; 1788 struct ipath_devdata *dd;
1769 u32 head32; 1789 u32 head32;
1770 1790
1771 pd = port_fp(fp); 1791 /* Subports don't need to initialize anything since master did it. */
1792 if (subport_fp(fp)) {
1793 ret = wait_event_interruptible(pd->port_wait,
1794 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
1795 goto done;
1796 }
1797
1772 dd = pd->port_dd; 1798 dd = pd->port_dd;
1773 1799
1774 if (uinfo->spu_rcvhdrsize) { 1800 if (uinfo->spu_rcvhdrsize) {
@@ -1826,6 +1852,11 @@ static int ipath_do_user_init(struct file *fp,
1826 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); 1852 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
1827 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1853 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1828 dd->ipath_rcvctrl); 1854 dd->ipath_rcvctrl);
1855 /* Notify any waiting slaves */
1856 if (pd->port_subport_cnt) {
1857 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1858 wake_up(&pd->port_wait);
1859 }
1829done: 1860done:
1830 return ret; 1861 return ret;
1831} 1862}
@@ -2017,6 +2048,17 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
2017 return ret; 2048 return ret;
2018} 2049}
2019 2050
2051static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
2052{
2053 u64 reg = dd->ipath_sendctrl;
2054
2055 clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg);
2056 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
2057 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2058
2059 return 0;
2060}
2061
2020static ssize_t ipath_write(struct file *fp, const char __user *data, 2062static ssize_t ipath_write(struct file *fp, const char __user *data,
2021 size_t count, loff_t *off) 2063 size_t count, loff_t *off)
2022{ 2064{
@@ -2071,27 +2113,35 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2071 dest = &cmd.cmd.part_key; 2113 dest = &cmd.cmd.part_key;
2072 src = &ucmd->cmd.part_key; 2114 src = &ucmd->cmd.part_key;
2073 break; 2115 break;
2074 case IPATH_CMD_SLAVE_INFO: 2116 case __IPATH_CMD_SLAVE_INFO:
2075 copy = sizeof(cmd.cmd.slave_mask_addr); 2117 copy = sizeof(cmd.cmd.slave_mask_addr);
2076 dest = &cmd.cmd.slave_mask_addr; 2118 dest = &cmd.cmd.slave_mask_addr;
2077 src = &ucmd->cmd.slave_mask_addr; 2119 src = &ucmd->cmd.slave_mask_addr;
2078 break; 2120 break;
2121 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
2122 copy = 0;
2123 src = NULL;
2124 dest = NULL;
2125 break;
2079 default: 2126 default:
2080 ret = -EINVAL; 2127 ret = -EINVAL;
2081 goto bail; 2128 goto bail;
2082 } 2129 }
2083 2130
2084 if ((count - consumed) < copy) { 2131 if (copy) {
2085 ret = -EINVAL; 2132 if ((count - consumed) < copy) {
2086 goto bail; 2133 ret = -EINVAL;
2087 } 2134 goto bail;
2135 }
2088 2136
2089 if (copy_from_user(dest, src, copy)) { 2137 if (copy_from_user(dest, src, copy)) {
2090 ret = -EFAULT; 2138 ret = -EFAULT;
2091 goto bail; 2139 goto bail;
2140 }
2141
2142 consumed += copy;
2092 } 2143 }
2093 2144
2094 consumed += copy;
2095 pd = port_fp(fp); 2145 pd = port_fp(fp);
2096 if (!pd && cmd.type != __IPATH_CMD_USER_INIT && 2146 if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
2097 cmd.type != IPATH_CMD_ASSIGN_PORT) { 2147 cmd.type != IPATH_CMD_ASSIGN_PORT) {
@@ -2137,11 +2187,14 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2137 case IPATH_CMD_SET_PART_KEY: 2187 case IPATH_CMD_SET_PART_KEY:
2138 ret = ipath_set_part_key(pd, cmd.cmd.part_key); 2188 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
2139 break; 2189 break;
2140 case IPATH_CMD_SLAVE_INFO: 2190 case __IPATH_CMD_SLAVE_INFO:
2141 ret = ipath_get_slave_info(pd, 2191 ret = ipath_get_slave_info(pd,
2142 (void __user *) (unsigned long) 2192 (void __user *) (unsigned long)
2143 cmd.cmd.slave_mask_addr); 2193 cmd.cmd.slave_mask_addr);
2144 break; 2194 break;
2195 case IPATH_CMD_PIOAVAILUPD:
2196 ret = ipath_force_pio_avail_update(pd->port_dd);
2197 break;
2145 } 2198 }
2146 2199
2147 if (ret >= 0) 2200 if (ret >= 0)