aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_file_ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_file_ops.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 1af1f3a907c6..239d4e8068ac 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -223,8 +223,13 @@ static int ipath_get_base_info(struct file *fp,
223 (unsigned long long) kinfo->spi_subport_rcvhdr_base); 223 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
224 } 224 }
225 225
226 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / 226 /*
227 dd->ipath_palign; 227 * All user buffers are 2KB buffers. If we ever support
228 * giving 4KB buffers to user processes, this will need some
229 * work.
230 */
231 kinfo->spi_pioindex = (kinfo->spi_piobufbase -
232 (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
228 kinfo->spi_pioalign = dd->ipath_palign; 233 kinfo->spi_pioalign = dd->ipath_palign;
229 234
230 kinfo->spi_qpair = IPATH_KD_QP; 235 kinfo->spi_qpair = IPATH_KD_QP;
@@ -2041,7 +2046,9 @@ static int ipath_close(struct inode *in, struct file *fp)
2041 struct ipath_filedata *fd; 2046 struct ipath_filedata *fd;
2042 struct ipath_portdata *pd; 2047 struct ipath_portdata *pd;
2043 struct ipath_devdata *dd; 2048 struct ipath_devdata *dd;
2049 unsigned long flags;
2044 unsigned port; 2050 unsigned port;
2051 struct pid *pid;
2045 2052
2046 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", 2053 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
2047 (long)in->i_rdev, fp->private_data); 2054 (long)in->i_rdev, fp->private_data);
@@ -2074,14 +2081,13 @@ static int ipath_close(struct inode *in, struct file *fp)
2074 mutex_unlock(&ipath_mutex); 2081 mutex_unlock(&ipath_mutex);
2075 goto bail; 2082 goto bail;
2076 } 2083 }
2084 /* early; no interrupt users after this */
2085 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2077 port = pd->port_port; 2086 port = pd->port_port;
2078 2087 dd->ipath_pd[port] = NULL;
2079 if (pd->port_hdrqfull) { 2088 pid = pd->port_pid;
2080 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2089 pd->port_pid = NULL;
2081 "during run\n", pd->port_comm, pid_nr(pd->port_pid), 2090 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2082 pd->port_hdrqfull);
2083 pd->port_hdrqfull = 0;
2084 }
2085 2091
2086 if (pd->port_rcvwait_to || pd->port_piowait_to 2092 if (pd->port_rcvwait_to || pd->port_piowait_to
2087 || pd->port_rcvnowait || pd->port_pionowait) { 2093 || pd->port_rcvnowait || pd->port_pionowait) {
@@ -2138,13 +2144,11 @@ static int ipath_close(struct inode *in, struct file *fp)
2138 unlock_expected_tids(pd); 2144 unlock_expected_tids(pd);
2139 ipath_stats.sps_ports--; 2145 ipath_stats.sps_ports--;
2140 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2146 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
2141 pd->port_comm, pid_nr(pd->port_pid), 2147 pd->port_comm, pid_nr(pid),
2142 dd->ipath_unit, port); 2148 dd->ipath_unit, port);
2143 } 2149 }
2144 2150
2145 put_pid(pd->port_pid); 2151 put_pid(pid);
2146 pd->port_pid = NULL;
2147 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
2148 mutex_unlock(&ipath_mutex); 2152 mutex_unlock(&ipath_mutex);
2149 ipath_free_pddata(dd, pd); /* after releasing the mutex */ 2153 ipath_free_pddata(dd, pd); /* after releasing the mutex */
2150 2154