diff options
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_uverbs.c')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_uverbs.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c index 4bc687fdf531..5234d6c15c49 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c | |||
@@ -109,7 +109,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, | |||
109 | u64 vsize, physical; | 109 | u64 vsize, physical; |
110 | 110 | ||
111 | vsize = vma->vm_end - vma->vm_start; | 111 | vsize = vma->vm_end - vma->vm_start; |
112 | if (vsize != EHCA_PAGESIZE) { | 112 | if (vsize < EHCA_PAGESIZE) { |
113 | ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start); | 113 | ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start); |
114 | return -EINVAL; | 114 | return -EINVAL; |
115 | } | 115 | } |
@@ -118,10 +118,10 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, | |||
118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
119 | ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical); | 119 | ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical); |
120 | /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ | 120 | /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ |
121 | ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT, | 121 | ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, |
122 | vsize, vma->vm_page_prot); | 122 | vma->vm_page_prot); |
123 | if (unlikely(ret)) { | 123 | if (unlikely(ret)) { |
124 | ehca_gen_err("remap_pfn_range() failed ret=%x", ret); | 124 | ehca_gen_err("remap_pfn_range() failed ret=%i", ret); |
125 | return -ENOMEM; | 125 | return -ENOMEM; |
126 | } | 126 | } |
127 | 127 | ||
@@ -146,7 +146,7 @@ static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue, | |||
146 | page = virt_to_page(virt_addr); | 146 | page = virt_to_page(virt_addr); |
147 | ret = vm_insert_page(vma, start, page); | 147 | ret = vm_insert_page(vma, start, page); |
148 | if (unlikely(ret)) { | 148 | if (unlikely(ret)) { |
149 | ehca_gen_err("vm_insert_page() failed rc=%x", ret); | 149 | ehca_gen_err("vm_insert_page() failed rc=%i", ret); |
150 | return ret; | 150 | return ret; |
151 | } | 151 | } |
152 | start += PAGE_SIZE; | 152 | start += PAGE_SIZE; |
@@ -164,23 +164,23 @@ static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq, | |||
164 | int ret; | 164 | int ret; |
165 | 165 | ||
166 | switch (rsrc_type) { | 166 | switch (rsrc_type) { |
167 | case 1: /* galpa fw handle */ | 167 | case 0: /* galpa fw handle */ |
168 | ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number); | 168 | ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number); |
169 | ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); | 169 | ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); |
170 | if (unlikely(ret)) { | 170 | if (unlikely(ret)) { |
171 | ehca_err(cq->ib_cq.device, | 171 | ehca_err(cq->ib_cq.device, |
172 | "ehca_mmap_fw() failed rc=%x cq_num=%x", | 172 | "ehca_mmap_fw() failed rc=%i cq_num=%x", |
173 | ret, cq->cq_number); | 173 | ret, cq->cq_number); |
174 | return ret; | 174 | return ret; |
175 | } | 175 | } |
176 | break; | 176 | break; |
177 | 177 | ||
178 | case 2: /* cq queue_addr */ | 178 | case 1: /* cq queue_addr */ |
179 | ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number); | 179 | ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number); |
180 | ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); | 180 | ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); |
181 | if (unlikely(ret)) { | 181 | if (unlikely(ret)) { |
182 | ehca_err(cq->ib_cq.device, | 182 | ehca_err(cq->ib_cq.device, |
183 | "ehca_mmap_queue() failed rc=%x cq_num=%x", | 183 | "ehca_mmap_queue() failed rc=%i cq_num=%x", |
184 | ret, cq->cq_number); | 184 | ret, cq->cq_number); |
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
@@ -201,38 +201,38 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
201 | int ret; | 201 | int ret; |
202 | 202 | ||
203 | switch (rsrc_type) { | 203 | switch (rsrc_type) { |
204 | case 1: /* galpa fw handle */ | 204 | case 0: /* galpa fw handle */ |
205 | ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); | 205 | ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); |
206 | ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); | 206 | ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); |
207 | if (unlikely(ret)) { | 207 | if (unlikely(ret)) { |
208 | ehca_err(qp->ib_qp.device, | 208 | ehca_err(qp->ib_qp.device, |
209 | "remap_pfn_range() failed ret=%x qp_num=%x", | 209 | "remap_pfn_range() failed ret=%i qp_num=%x", |
210 | ret, qp->ib_qp.qp_num); | 210 | ret, qp->ib_qp.qp_num); |
211 | return -ENOMEM; | 211 | return -ENOMEM; |
212 | } | 212 | } |
213 | break; | 213 | break; |
214 | 214 | ||
215 | case 2: /* qp rqueue_addr */ | 215 | case 1: /* qp rqueue_addr */ |
216 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", | 216 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", |
217 | qp->ib_qp.qp_num); | 217 | qp->ib_qp.qp_num); |
218 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, | 218 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, |
219 | &qp->mm_count_rqueue); | 219 | &qp->mm_count_rqueue); |
220 | if (unlikely(ret)) { | 220 | if (unlikely(ret)) { |
221 | ehca_err(qp->ib_qp.device, | 221 | ehca_err(qp->ib_qp.device, |
222 | "ehca_mmap_queue(rq) failed rc=%x qp_num=%x", | 222 | "ehca_mmap_queue(rq) failed rc=%i qp_num=%x", |
223 | ret, qp->ib_qp.qp_num); | 223 | ret, qp->ib_qp.qp_num); |
224 | return ret; | 224 | return ret; |
225 | } | 225 | } |
226 | break; | 226 | break; |
227 | 227 | ||
228 | case 3: /* qp squeue_addr */ | 228 | case 2: /* qp squeue_addr */ |
229 | ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", | 229 | ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", |
230 | qp->ib_qp.qp_num); | 230 | qp->ib_qp.qp_num); |
231 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, | 231 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, |
232 | &qp->mm_count_squeue); | 232 | &qp->mm_count_squeue); |
233 | if (unlikely(ret)) { | 233 | if (unlikely(ret)) { |
234 | ehca_err(qp->ib_qp.device, | 234 | ehca_err(qp->ib_qp.device, |
235 | "ehca_mmap_queue(sq) failed rc=%x qp_num=%x", | 235 | "ehca_mmap_queue(sq) failed rc=%i qp_num=%x", |
236 | ret, qp->ib_qp.qp_num); | 236 | ret, qp->ib_qp.qp_num); |
237 | return ret; | 237 | return ret; |
238 | } | 238 | } |
@@ -249,10 +249,10 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
249 | 249 | ||
250 | int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | 250 | int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) |
251 | { | 251 | { |
252 | u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; | 252 | u64 fileoffset = vma->vm_pgoff; |
253 | u32 idr_handle = fileoffset >> 32; | 253 | u32 idr_handle = fileoffset & 0x1FFFFFF; |
254 | u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ | 254 | u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */ |
255 | u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ | 255 | u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */ |
256 | u32 cur_pid = current->tgid; | 256 | u32 cur_pid = current->tgid; |
257 | u32 ret; | 257 | u32 ret; |
258 | struct ehca_cq *cq; | 258 | struct ehca_cq *cq; |
@@ -261,7 +261,7 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
261 | struct ib_uobject *uobject; | 261 | struct ib_uobject *uobject; |
262 | 262 | ||
263 | switch (q_type) { | 263 | switch (q_type) { |
264 | case 1: /* CQ */ | 264 | case 0: /* CQ */ |
265 | read_lock(&ehca_cq_idr_lock); | 265 | read_lock(&ehca_cq_idr_lock); |
266 | cq = idr_find(&ehca_cq_idr, idr_handle); | 266 | cq = idr_find(&ehca_cq_idr, idr_handle); |
267 | read_unlock(&ehca_cq_idr_lock); | 267 | read_unlock(&ehca_cq_idr_lock); |
@@ -283,13 +283,13 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
283 | ret = ehca_mmap_cq(vma, cq, rsrc_type); | 283 | ret = ehca_mmap_cq(vma, cq, rsrc_type); |
284 | if (unlikely(ret)) { | 284 | if (unlikely(ret)) { |
285 | ehca_err(cq->ib_cq.device, | 285 | ehca_err(cq->ib_cq.device, |
286 | "ehca_mmap_cq() failed rc=%x cq_num=%x", | 286 | "ehca_mmap_cq() failed rc=%i cq_num=%x", |
287 | ret, cq->cq_number); | 287 | ret, cq->cq_number); |
288 | return ret; | 288 | return ret; |
289 | } | 289 | } |
290 | break; | 290 | break; |
291 | 291 | ||
292 | case 2: /* QP */ | 292 | case 1: /* QP */ |
293 | read_lock(&ehca_qp_idr_lock); | 293 | read_lock(&ehca_qp_idr_lock); |
294 | qp = idr_find(&ehca_qp_idr, idr_handle); | 294 | qp = idr_find(&ehca_qp_idr, idr_handle); |
295 | read_unlock(&ehca_qp_idr_lock); | 295 | read_unlock(&ehca_qp_idr_lock); |
@@ -313,7 +313,7 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
313 | ret = ehca_mmap_qp(vma, qp, rsrc_type); | 313 | ret = ehca_mmap_qp(vma, qp, rsrc_type); |
314 | if (unlikely(ret)) { | 314 | if (unlikely(ret)) { |
315 | ehca_err(qp->ib_qp.device, | 315 | ehca_err(qp->ib_qp.device, |
316 | "ehca_mmap_qp() failed rc=%x qp_num=%x", | 316 | "ehca_mmap_qp() failed rc=%i qp_num=%x", |
317 | ret, qp->ib_qp.qp_num); | 317 | ret, qp->ib_qp.qp_num); |
318 | return ret; | 318 | return ret; |
319 | } | 319 | } |