aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c88
1 files changed, 46 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 5de1dd49722f..a1cfedf8fb1c 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -41,12 +41,6 @@
41#include "ipath_kernel.h" 41#include "ipath_kernel.h"
42#include "ipath_common.h" 42#include "ipath_common.h"
43 43
44/*
45 * mmap64 doesn't allow all 64 bits for 32-bit applications
46 * so only use the low 43 bits.
47 */
48#define MMAP64_MASK 0x7FFFFFFFFFFUL
49
50static int ipath_open(struct inode *, struct file *); 44static int ipath_open(struct inode *, struct file *);
51static int ipath_close(struct inode *, struct file *); 45static int ipath_close(struct inode *, struct file *);
52static ssize_t ipath_write(struct file *, const char __user *, size_t, 46static ssize_t ipath_write(struct file *, const char __user *, size_t,
@@ -63,6 +57,24 @@ static const struct file_operations ipath_file_ops = {
63 .mmap = ipath_mmap 57 .mmap = ipath_mmap
64}; 58};
65 59
60/*
61 * Convert kernel virtual addresses to physical addresses so they don't
62 * potentially conflict with the chip addresses used as mmap offsets.
63 * It doesn't really matter what mmap offset we use as long as we can
64 * interpret it correctly.
65 */
66static u64 cvt_kvaddr(void *p)
67{
68 struct page *page;
69 u64 paddr = 0;
70
71 page = vmalloc_to_page(p);
72 if (page)
73 paddr = page_to_pfn(page) << PAGE_SHIFT;
74
75 return paddr;
76}
77
66static int ipath_get_base_info(struct file *fp, 78static int ipath_get_base_info(struct file *fp,
67 void __user *ubase, size_t ubase_size) 79 void __user *ubase, size_t ubase_size)
68{ 80{
@@ -173,15 +185,14 @@ static int ipath_get_base_info(struct file *fp,
173 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt; 185 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
174 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 186 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
175 dd->ipath_palign * kinfo->spi_piocnt * slave; 187 dd->ipath_palign * kinfo->spi_piocnt * slave;
176 kinfo->__spi_uregbase = ((u64) pd->subport_uregbase + 188 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
177 PAGE_SIZE * slave) & MMAP64_MASK; 189 PAGE_SIZE * slave);
178 190
179 kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base + 191 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
180 pd->port_rcvhdrq_size * slave) & MMAP64_MASK; 192 pd->port_rcvhdrq_size * slave);
181 kinfo->spi_rcvhdr_tailaddr = 0; 193 kinfo->spi_rcvhdr_tailaddr = 0;
182 kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf + 194 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
183 dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) & 195 dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave);
184 MMAP64_MASK;
185 } 196 }
186 197
187 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / 198 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
@@ -199,11 +210,11 @@ static int ipath_get_base_info(struct file *fp,
199 if (master) { 210 if (master) {
200 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; 211 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
201 kinfo->spi_subport_uregbase = 212 kinfo->spi_subport_uregbase =
202 (u64) pd->subport_uregbase & MMAP64_MASK; 213 cvt_kvaddr(pd->subport_uregbase);
203 kinfo->spi_subport_rcvegrbuf = 214 kinfo->spi_subport_rcvegrbuf =
204 (u64) pd->subport_rcvegrbuf & MMAP64_MASK; 215 cvt_kvaddr(pd->subport_rcvegrbuf);
205 kinfo->spi_subport_rcvhdr_base = 216 kinfo->spi_subport_rcvhdr_base =
206 (u64) pd->subport_rcvhdr_base & MMAP64_MASK; 217 cvt_kvaddr(pd->subport_rcvhdr_base);
207 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", 218 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
208 kinfo->spi_port, kinfo->spi_runtime_flags, 219 kinfo->spi_port, kinfo->spi_runtime_flags,
209 (unsigned long long) kinfo->spi_subport_uregbase, 220 (unsigned long long) kinfo->spi_subport_uregbase,
@@ -1131,13 +1142,11 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1131 struct ipath_devdata *dd; 1142 struct ipath_devdata *dd;
1132 void *addr; 1143 void *addr;
1133 size_t size; 1144 size_t size;
1134 int ret; 1145 int ret = 0;
1135 1146
1136 /* If the port is not shared, all addresses should be physical */ 1147 /* If the port is not shared, all addresses should be physical */
1137 if (!pd->port_subport_cnt) { 1148 if (!pd->port_subport_cnt)
1138 ret = -EINVAL;
1139 goto bail; 1149 goto bail;
1140 }
1141 1150
1142 dd = pd->port_dd; 1151 dd = pd->port_dd;
1143 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; 1152 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
@@ -1149,33 +1158,28 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1149 if (subport == 0) { 1158 if (subport == 0) {
1150 unsigned num_slaves = pd->port_subport_cnt - 1; 1159 unsigned num_slaves = pd->port_subport_cnt - 1;
1151 1160
1152 if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) { 1161 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
1153 addr = pd->subport_uregbase; 1162 addr = pd->subport_uregbase;
1154 size = PAGE_SIZE * num_slaves; 1163 size = PAGE_SIZE * num_slaves;
1155 } else if (pgaddr == ((u64) pd->subport_rcvhdr_base & 1164 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
1156 MMAP64_MASK)) {
1157 addr = pd->subport_rcvhdr_base; 1165 addr = pd->subport_rcvhdr_base;
1158 size = pd->port_rcvhdrq_size * num_slaves; 1166 size = pd->port_rcvhdrq_size * num_slaves;
1159 } else if (pgaddr == ((u64) pd->subport_rcvegrbuf & 1167 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
1160 MMAP64_MASK)) {
1161 addr = pd->subport_rcvegrbuf; 1168 addr = pd->subport_rcvegrbuf;
1162 size *= num_slaves; 1169 size *= num_slaves;
1163 } else { 1170 } else
1164 ret = -EINVAL;
1165 goto bail; 1171 goto bail;
1166 } 1172 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
1167 } else if (pgaddr == (((u64) pd->subport_uregbase + 1173 PAGE_SIZE * (subport - 1))) {
1168 PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) {
1169 addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1); 1174 addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1);
1170 size = PAGE_SIZE; 1175 size = PAGE_SIZE;
1171 } else if (pgaddr == (((u64) pd->subport_rcvhdr_base + 1176 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
1172 pd->port_rcvhdrq_size * (subport - 1)) & 1177 pd->port_rcvhdrq_size * (subport - 1))) {
1173 MMAP64_MASK)) {
1174 addr = pd->subport_rcvhdr_base + 1178 addr = pd->subport_rcvhdr_base +
1175 pd->port_rcvhdrq_size * (subport - 1); 1179 pd->port_rcvhdrq_size * (subport - 1);
1176 size = pd->port_rcvhdrq_size; 1180 size = pd->port_rcvhdrq_size;
1177 } else if (pgaddr == (((u64) pd->subport_rcvegrbuf + 1181 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
1178 size * (subport - 1)) & MMAP64_MASK)) { 1182 size * (subport - 1))) {
1179 addr = pd->subport_rcvegrbuf + size * (subport - 1); 1183 addr = pd->subport_rcvegrbuf + size * (subport - 1);
1180 /* rcvegrbufs are read-only on the slave */ 1184 /* rcvegrbufs are read-only on the slave */
1181 if (vma->vm_flags & VM_WRITE) { 1185 if (vma->vm_flags & VM_WRITE) {
@@ -1190,10 +1194,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1190 * with mprotect. 1194 * with mprotect.
1191 */ 1195 */
1192 vma->vm_flags &= ~VM_MAYWRITE; 1196 vma->vm_flags &= ~VM_MAYWRITE;
1193 } else { 1197 } else
1194 ret = -EINVAL;
1195 goto bail; 1198 goto bail;
1196 }
1197 len = vma->vm_end - vma->vm_start; 1199 len = vma->vm_end - vma->vm_start;
1198 if (len > size) { 1200 if (len > size) {
1199 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); 1201 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
@@ -1204,7 +1206,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1204 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; 1206 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1205 vma->vm_ops = &ipath_file_vm_ops; 1207 vma->vm_ops = &ipath_file_vm_ops;
1206 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; 1208 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1207 ret = 0; 1209 ret = 1;
1208 1210
1209bail: 1211bail:
1210 return ret; 1212 return ret;
@@ -1264,8 +1266,10 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1264 * Check for kernel virtual addresses first, anything else must 1266 * Check for kernel virtual addresses first, anything else must
1265 * match a HW or memory address. 1267 * match a HW or memory address.
1266 */ 1268 */
1267 if (pgaddr >= (1ULL<<40)) { 1269 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
1268 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); 1270 if (ret) {
1271 if (ret > 0)
1272 ret = 0;
1269 goto bail; 1273 goto bail;
1270 } 1274 }
1271 1275
@@ -1411,7 +1415,7 @@ static int init_subports(struct ipath_devdata *dd,
1411 */ 1415 */
1412 if (uinfo->spu_subport_cnt <= 1) 1416 if (uinfo->spu_subport_cnt <= 1)
1413 goto bail; 1417 goto bail;
1414 if (uinfo->spu_subport_cnt > 4) { 1418 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
1415 ret = -EINVAL; 1419 ret = -EINVAL;
1416 goto bail; 1420 goto bail;
1417 } 1421 }