aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_uverbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_uverbs.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c395
1 files changed, 164 insertions, 231 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e08764e4aef..73db920b694 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -68,105 +68,183 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
68 return 0; 68 return 0;
69} 69}
70 70
71struct page *ehca_nopage(struct vm_area_struct *vma, 71static void ehca_mm_open(struct vm_area_struct *vma)
72 unsigned long address, int *type)
73{ 72{
74 struct page *mypage = NULL; 73 u32 *count = (u32*)vma->vm_private_data;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 74 if (!count) {
76 u32 idr_handle = fileoffset >> 32; 75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ 76 vma->vm_start, vma->vm_end);
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 77 return;
79 u32 cur_pid = current->tgid; 78 }
80 unsigned long flags; 79 (*count)++;
81 struct ehca_cq *cq; 80 if (!(*count))
82 struct ehca_qp *qp; 81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
83 struct ehca_pd *pd; 82 vma->vm_start, vma->vm_end);
84 u64 offset; 83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
85 void *vaddr; 84 vma->vm_start, vma->vm_end, *count);
85}
86 86
87 switch (q_type) { 87static void ehca_mm_close(struct vm_area_struct *vma)
88 case 1: /* CQ */ 88{
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 89 u32 *count = (u32*)vma->vm_private_data;
90 cq = idr_find(&ehca_cq_idr, idr_handle); 90 if (!count) {
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
92 99
93 /* make sure this mmap really belongs to the authorized user */ 100static struct vm_operations_struct vm_ops = {
94 if (!cq) { 101 .open = ehca_mm_open,
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); 102 .close = ehca_mm_close,
96 return NOPAGE_SIGBUS; 103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize != EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121 ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
122 vsize, vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_RESERVED;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%x", ret);
150 return ret;
97 } 151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
98 157
99 if (cq->ownpid != cur_pid) { 158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 1: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
100 ehca_err(cq->ib_cq.device, 171 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x", 172 "ehca_mmap_fw() failed rc=%x cq_num=%x",
102 cur_pid, cq->ownpid); 173 ret, cq->cq_number);
103 return NOPAGE_SIGBUS; 174 return ret;
104 } 175 }
176 break;
105 177
106 if (rsrc_type == 2) { 178 case 2: /* cq queue_addr */
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
108 offset = address - vma->vm_start; 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); 181 if (unlikely(ret)) {
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", 182 ehca_err(cq->ib_cq.device,
111 offset, vaddr); 183 "ehca_mmap_queue() failed rc=%x cq_num=%x",
112 mypage = virt_to_page(vaddr); 184 ret, cq->cq_number);
185 return ret;
113 } 186 }
114 break; 187 break;
115 188
116 case 2: /* QP */ 189 default:
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags); 190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
118 qp = idr_find(&ehca_qp_idr, idr_handle); 191 rsrc_type, cq->cq_number);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 192 return -EINVAL;
193 }
120 194
121 /* make sure this mmap really belongs to the authorized user */ 195 return 0;
122 if (!qp) { 196}
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); 197
124 return NOPAGE_SIGBUS; 198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 1: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%x qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
125 } 212 }
213 break;
126 214
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); 215 case 2: /* qp rqueue_addr */
128 if (pd->ownpid != cur_pid) { 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217 qp->ib_qp.qp_num);
218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
129 ehca_err(qp->ib_qp.device, 220 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x", 221 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
131 cur_pid, pd->ownpid); 222 ret, qp->ib_qp.qp_num);
132 return NOPAGE_SIGBUS; 223 return ret;
133 } 224 }
225 break;
134 226
135 if (rsrc_type == 2) { /* rqueue */ 227 case 3: /* qp squeue_addr */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
137 offset = address - vma->vm_start; 229 qp->ib_qp.qp_num);
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); 230 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", 231 if (unlikely(ret)) {
140 offset, vaddr); 232 ehca_err(qp->ib_qp.device,
141 mypage = virt_to_page(vaddr); 233 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
142 } else if (rsrc_type == 3) { /* squeue */ 234 ret, qp->ib_qp.qp_num);
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); 235 return ret;
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147 offset, vaddr);
148 mypage = virt_to_page(vaddr);
149 } 236 }
150 break; 237 break;
151 238
152 default: 239 default:
153 ehca_gen_err("bad queue type %x", q_type); 240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
154 return NOPAGE_SIGBUS; 241 rsrc_type, qp->ib_qp.qp_num);
155 } 242 return -EINVAL;
156
157 if (!mypage) {
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
160 } 243 }
161 get_page(mypage);
162 244
163 return mypage; 245 return 0;
164} 246}
165 247
166static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
168};
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171{ 249{
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 250 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
@@ -175,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid; 254 u32 cur_pid = current->tgid;
177 u32 ret; 255 u32 ret;
178 u64 vsize, physical;
179 unsigned long flags; 256 unsigned long flags;
180 struct ehca_cq *cq; 257 struct ehca_cq *cq;
181 struct ehca_qp *qp; 258 struct ehca_qp *qp;
@@ -201,44 +278,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 278 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202 return -EINVAL; 279 return -EINVAL;
203 280
204 switch (rsrc_type) { 281 ret = ehca_mmap_cq(vma, cq, rsrc_type);
205 case 1: /* galpa fw handle */ 282 if (unlikely(ret)) {
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); 283 ehca_err(cq->ib_cq.device,
207 vma->vm_flags |= VM_RESERVED; 284 "ehca_mmap_cq() failed rc=%x cq_num=%x",
208 vsize = vma->vm_end - vma->vm_start; 285 ret, cq->cq_number);
209 if (vsize != EHCA_PAGESIZE) { 286 return ret;
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
212 return -EINVAL;
213 }
214
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
218
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
223 vma->vm_page_prot);
224 if (ret) {
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
227 ret);
228 return -ENOMEM;
229 }
230 break;
231
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
236 break;
237
238 default:
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
240 rsrc_type);
241 return -EINVAL;
242 } 287 }
243 break; 288 break;
244 289
@@ -262,50 +307,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) 307 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263 return -EINVAL; 308 return -EINVAL;
264 309
265 switch (rsrc_type) { 310 ret = ehca_mmap_qp(vma, qp, rsrc_type);
266 case 1: /* galpa fw handle */ 311 if (unlikely(ret)) {
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); 312 ehca_err(qp->ib_qp.device,
268 vma->vm_flags |= VM_RESERVED; 313 "ehca_mmap_qp() failed rc=%x qp_num=%x",
269 vsize = vma->vm_end - vma->vm_start; 314 ret, qp->ib_qp.qp_num);
270 if (vsize != EHCA_PAGESIZE) { 315 return ret;
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
273 return -EINVAL;
274 }
275
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
279
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281 vsize, physical);
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
284 vma->vm_page_prot);
285 if (ret) {
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
288 ret);
289 return -ENOMEM;
290 }
291 break;
292
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
297 break;
298
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
303 break;
304
305 default:
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
307 rsrc_type);
308 return -EINVAL;
309 } 316 }
310 break; 317 break;
311 318
@@ -316,77 +323,3 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
316 323
317 return 0; 324 return 0;
318} 325}
319
320int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
322{
323 down_write(&current->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
326 foffset);
327 up_write(&current->mm->mmap_sem);
328 if (!(*mapped)) {
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330 foffset, length);
331 return -EINVAL;
332 }
333
334 *vma = find_vma(current->mm, (u64)*mapped);
335 if (!(*vma)) {
336 down_write(&current->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(&current->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340 return -EINVAL;
341 }
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
344
345 return 0;
346}
347
348int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
350{
351 int ret;
352 unsigned long vsize;
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355 if (ret) {
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
357 return ret;
358 }
359
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
365 return -EINVAL;
366 }
367
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
374 if (ret) {
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376 return -ENOMEM;
377 }
378
379 return 0;
380
381}
382
383int ehca_munmap(unsigned long addr, size_t len) {
384 int ret = 0;
385 struct mm_struct *mm = current->mm;
386 if (mm) {
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);
390 }
391 return ret;
392}