aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath
diff options
context:
space:
mode:
authorRobert Walsh <rjwalsh@pathscale.com>2007-04-28 00:07:23 -0400
committerRoland Dreier <rolandd@cisco.com>2007-04-30 20:30:28 -0400
commit6b66b2da1e821181a001c00b04a807724ad803cd (patch)
tree8ddbae34ef4ad3e9242f91b6d7df4abbd9f3a161 /drivers/infiniband/hw/ipath
parent9ba6d5529dd919b442eedf5bef1dd28aca2ee9fe (diff)
IB/ipath: Don't corrupt pending mmap list when unmapped objects are freed
Fix the pending mmap code so it doesn't corrupt the list of pending mmaps and crash the machine when pending mmaps are destroyed without first being mapped. Also, remove an unused variable, and use standard kernel lists instead of our own homebrewed linked list implementation to keep the pending mmap list. Signed-off-by: Robert Walsh <robert.walsh@qlogic.com> Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c51
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c64
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c52
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c55
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h18
6 files changed, 153 insertions, 90 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index ea78e6dddc90..4715f89528cd 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -243,33 +243,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
243 * See ipath_mmap() for details. 243 * See ipath_mmap() for details.
244 */ 244 */
245 if (udata && udata->outlen >= sizeof(__u64)) { 245 if (udata && udata->outlen >= sizeof(__u64)) {
246 struct ipath_mmap_info *ip;
247 __u64 offset = (__u64) wc;
248 int err; 246 int err;
247 u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;
249 248
250 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 249 cq->ip = ipath_create_mmap_info(dev, s, context, wc);
251 if (err) { 250 if (!cq->ip) {
252 ret = ERR_PTR(err); 251 ret = ERR_PTR(-ENOMEM);
253 goto bail_wc; 252 goto bail_wc;
254 } 253 }
255 254
256 /* Allocate info for ipath_mmap(). */ 255 err = ib_copy_to_udata(udata, &cq->ip->offset,
257 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 256 sizeof(cq->ip->offset));
258 if (!ip) { 257 if (err) {
259 ret = ERR_PTR(-ENOMEM); 258 ret = ERR_PTR(err);
260 goto bail_wc; 259 goto bail_ip;
261 } 260 }
262 cq->ip = ip;
263 ip->context = context;
264 ip->obj = wc;
265 kref_init(&ip->ref);
266 ip->mmap_cnt = 0;
267 ip->size = PAGE_ALIGN(sizeof(*wc) +
268 sizeof(struct ib_wc) * entries);
269 spin_lock_irq(&dev->pending_lock);
270 ip->next = dev->pending_mmaps;
271 dev->pending_mmaps = ip;
272 spin_unlock_irq(&dev->pending_lock);
273 } else 261 } else
274 cq->ip = NULL; 262 cq->ip = NULL;
275 263
@@ -277,12 +265,18 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
277 if (dev->n_cqs_allocated == ib_ipath_max_cqs) { 265 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
278 spin_unlock(&dev->n_cqs_lock); 266 spin_unlock(&dev->n_cqs_lock);
279 ret = ERR_PTR(-ENOMEM); 267 ret = ERR_PTR(-ENOMEM);
280 goto bail_wc; 268 goto bail_ip;
281 } 269 }
282 270
283 dev->n_cqs_allocated++; 271 dev->n_cqs_allocated++;
284 spin_unlock(&dev->n_cqs_lock); 272 spin_unlock(&dev->n_cqs_lock);
285 273
274 if (cq->ip) {
275 spin_lock_irq(&dev->pending_lock);
276 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
277 spin_unlock_irq(&dev->pending_lock);
278 }
279
286 /* 280 /*
287 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. 281 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
288 * The number of entries should be >= the number requested or return 282 * The number of entries should be >= the number requested or return
@@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
301 295
302 goto done; 296 goto done;
303 297
298bail_ip:
299 kfree(cq->ip);
304bail_wc: 300bail_wc:
305 vfree(wc); 301 vfree(wc);
306
307bail_cq: 302bail_cq:
308 kfree(cq); 303 kfree(cq);
309
310done: 304done:
311 return ret; 305 return ret;
312} 306}
@@ -443,13 +437,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
443 if (cq->ip) { 437 if (cq->ip) {
444 struct ipath_ibdev *dev = to_idev(ibcq->device); 438 struct ipath_ibdev *dev = to_idev(ibcq->device);
445 struct ipath_mmap_info *ip = cq->ip; 439 struct ipath_mmap_info *ip = cq->ip;
440 u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;
446 441
447 ip->obj = wc; 442 ipath_update_mmap_info(dev, ip, s, wc);
448 ip->size = PAGE_ALIGN(sizeof(*wc) +
449 sizeof(struct ib_wc) * cqe);
450 spin_lock_irq(&dev->pending_lock); 443 spin_lock_irq(&dev->pending_lock);
451 ip->next = dev->pending_mmaps; 444 if (list_empty(&ip->pending_mmaps))
452 dev->pending_mmaps = ip; 445 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
453 spin_unlock_irq(&dev->pending_lock); 446 spin_unlock_irq(&dev->pending_lock);
454 } 447 }
455 448
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index a82157db4689..937bc3396b53 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref)
46{ 46{
47 struct ipath_mmap_info *ip = 47 struct ipath_mmap_info *ip =
48 container_of(ref, struct ipath_mmap_info, ref); 48 container_of(ref, struct ipath_mmap_info, ref);
49 struct ipath_ibdev *dev = to_idev(ip->context->device);
50
51 spin_lock_irq(&dev->pending_lock);
52 list_del(&ip->pending_mmaps);
53 spin_unlock_irq(&dev->pending_lock);
49 54
50 vfree(ip->obj); 55 vfree(ip->obj);
51 kfree(ip); 56 kfree(ip);
@@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma)
60 struct ipath_mmap_info *ip = vma->vm_private_data; 65 struct ipath_mmap_info *ip = vma->vm_private_data;
61 66
62 kref_get(&ip->ref); 67 kref_get(&ip->ref);
63 ip->mmap_cnt++;
64} 68}
65 69
66static void ipath_vma_close(struct vm_area_struct *vma) 70static void ipath_vma_close(struct vm_area_struct *vma)
67{ 71{
68 struct ipath_mmap_info *ip = vma->vm_private_data; 72 struct ipath_mmap_info *ip = vma->vm_private_data;
69 73
70 ip->mmap_cnt--;
71 kref_put(&ip->ref, ipath_release_mmap_info); 74 kref_put(&ip->ref, ipath_release_mmap_info);
72} 75}
73 76
@@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
87 struct ipath_ibdev *dev = to_idev(context->device); 90 struct ipath_ibdev *dev = to_idev(context->device);
88 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 91 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
89 unsigned long size = vma->vm_end - vma->vm_start; 92 unsigned long size = vma->vm_end - vma->vm_start;
90 struct ipath_mmap_info *ip, **pp; 93 struct ipath_mmap_info *ip, *pp;
91 int ret = -EINVAL; 94 int ret = -EINVAL;
92 95
93 /* 96 /*
@@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
96 * CQ, QP, or SRQ is soon followed by a call to mmap(). 99 * CQ, QP, or SRQ is soon followed by a call to mmap().
97 */ 100 */
98 spin_lock_irq(&dev->pending_lock); 101 spin_lock_irq(&dev->pending_lock);
99 for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) { 102 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
103 pending_mmaps) {
100 /* Only the creator is allowed to mmap the object */ 104 /* Only the creator is allowed to mmap the object */
101 if (context != ip->context || (void *) offset != ip->obj) 105 if (context != ip->context || (__u64) offset != ip->offset)
102 continue; 106 continue;
103 /* Don't allow a mmap larger than the object. */ 107 /* Don't allow a mmap larger than the object. */
104 if (size > ip->size) 108 if (size > ip->size)
105 break; 109 break;
106 110
107 *pp = ip->next; 111 list_del_init(&ip->pending_mmaps);
108 spin_unlock_irq(&dev->pending_lock); 112 spin_unlock_irq(&dev->pending_lock);
109 113
110 ret = remap_vmalloc_range(vma, ip->obj, 0); 114 ret = remap_vmalloc_range(vma, ip->obj, 0);
@@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
119done: 123done:
120 return ret; 124 return ret;
121} 125}
126
127/*
128 * Allocate information for ipath_mmap
129 */
130struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
131 u32 size,
132 struct ib_ucontext *context,
133 void *obj) {
134 struct ipath_mmap_info *ip;
135
136 ip = kmalloc(sizeof *ip, GFP_KERNEL);
137 if (!ip)
138 goto bail;
139
140 size = PAGE_ALIGN(size);
141
142 spin_lock_irq(&dev->mmap_offset_lock);
143 if (dev->mmap_offset == 0)
144 dev->mmap_offset = PAGE_SIZE;
145 ip->offset = dev->mmap_offset;
146 dev->mmap_offset += size;
147 spin_unlock_irq(&dev->mmap_offset_lock);
148
149 INIT_LIST_HEAD(&ip->pending_mmaps);
150 ip->size = size;
151 ip->context = context;
152 ip->obj = obj;
153 kref_init(&ip->ref);
154
155bail:
156 return ip;
157}
158
159void ipath_update_mmap_info(struct ipath_ibdev *dev,
160 struct ipath_mmap_info *ip,
161 u32 size, void *obj) {
162 size = PAGE_ALIGN(size);
163
164 spin_lock_irq(&dev->mmap_offset_lock);
165 if (dev->mmap_offset == 0)
166 dev->mmap_offset = PAGE_SIZE;
167 ip->offset = dev->mmap_offset;
168 dev->mmap_offset += size;
169 spin_unlock_irq(&dev->mmap_offset_lock);
170
171 ip->size = size;
172 ip->obj = obj;
173}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 16db9ac0b402..bfef08ecd342 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
844 * See ipath_mmap() for details. 844 * See ipath_mmap() for details.
845 */ 845 */
846 if (udata && udata->outlen >= sizeof(__u64)) { 846 if (udata && udata->outlen >= sizeof(__u64)) {
847 struct ipath_mmap_info *ip;
848 __u64 offset = (__u64) qp->r_rq.wq;
849 int err; 847 int err;
850 848
851 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 849 if (!qp->r_rq.wq) {
852 if (err) { 850 __u64 offset = 0;
853 ret = ERR_PTR(err);
854 goto bail_rwq;
855 }
856 851
857 if (qp->r_rq.wq) { 852 err = ib_copy_to_udata(udata, &offset,
858 /* Allocate info for ipath_mmap(). */ 853 sizeof(offset));
859 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 854 if (err) {
860 if (!ip) { 855 ret = ERR_PTR(err);
856 goto bail_rwq;
857 }
858 } else {
859 u32 s = sizeof(struct ipath_rwq) +
860 qp->r_rq.size * sz;
861
862 qp->ip =
863 ipath_create_mmap_info(dev, s,
864 ibpd->uobject->context,
865 qp->r_rq.wq);
866 if (!qp->ip) {
861 ret = ERR_PTR(-ENOMEM); 867 ret = ERR_PTR(-ENOMEM);
862 goto bail_rwq; 868 goto bail_rwq;
863 } 869 }
864 qp->ip = ip; 870
865 ip->context = ibpd->uobject->context; 871 err = ib_copy_to_udata(udata, &(qp->ip->offset),
866 ip->obj = qp->r_rq.wq; 872 sizeof(qp->ip->offset));
867 kref_init(&ip->ref); 873 if (err) {
868 ip->mmap_cnt = 0; 874 ret = ERR_PTR(err);
869 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + 875 goto bail_ip;
870 qp->r_rq.size * sz); 876 }
871 spin_lock_irq(&dev->pending_lock);
872 ip->next = dev->pending_mmaps;
873 dev->pending_mmaps = ip;
874 spin_unlock_irq(&dev->pending_lock);
875 } 877 }
876 } 878 }
877 879
@@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
885 dev->n_qps_allocated++; 887 dev->n_qps_allocated++;
886 spin_unlock(&dev->n_qps_lock); 888 spin_unlock(&dev->n_qps_lock);
887 889
890 if (qp->ip) {
891 spin_lock_irq(&dev->pending_lock);
892 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
893 spin_unlock_irq(&dev->pending_lock);
894 }
895
888 ret = &qp->ibqp; 896 ret = &qp->ibqp;
889 goto bail; 897 goto bail;
890 898
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 94033503400c..03acae66ba81 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
139 * See ipath_mmap() for details. 139 * See ipath_mmap() for details.
140 */ 140 */
141 if (udata && udata->outlen >= sizeof(__u64)) { 141 if (udata && udata->outlen >= sizeof(__u64)) {
142 struct ipath_mmap_info *ip;
143 __u64 offset = (__u64) srq->rq.wq;
144 int err; 142 int err;
143 u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
145 144
146 err = ib_copy_to_udata(udata, &offset, sizeof(offset)); 145 srq->ip =
147 if (err) { 146 ipath_create_mmap_info(dev, s,
148 ret = ERR_PTR(err); 147 ibpd->uobject->context,
148 srq->rq.wq);
149 if (!srq->ip) {
150 ret = ERR_PTR(-ENOMEM);
149 goto bail_wq; 151 goto bail_wq;
150 } 152 }
151 153
152 /* Allocate info for ipath_mmap(). */ 154 err = ib_copy_to_udata(udata, &srq->ip->offset,
153 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 155 sizeof(srq->ip->offset));
154 if (!ip) { 156 if (err) {
155 ret = ERR_PTR(-ENOMEM); 157 ret = ERR_PTR(err);
156 goto bail_wq; 158 goto bail_ip;
157 } 159 }
158 srq->ip = ip;
159 ip->context = ibpd->uobject->context;
160 ip->obj = srq->rq.wq;
161 kref_init(&ip->ref);
162 ip->mmap_cnt = 0;
163 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
164 srq->rq.size * sz);
165 spin_lock_irq(&dev->pending_lock);
166 ip->next = dev->pending_mmaps;
167 dev->pending_mmaps = ip;
168 spin_unlock_irq(&dev->pending_lock);
169 } else 160 } else
170 srq->ip = NULL; 161 srq->ip = NULL;
171 162
@@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
181 if (dev->n_srqs_allocated == ib_ipath_max_srqs) { 172 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
182 spin_unlock(&dev->n_srqs_lock); 173 spin_unlock(&dev->n_srqs_lock);
183 ret = ERR_PTR(-ENOMEM); 174 ret = ERR_PTR(-ENOMEM);
184 goto bail_wq; 175 goto bail_ip;
185 } 176 }
186 177
187 dev->n_srqs_allocated++; 178 dev->n_srqs_allocated++;
188 spin_unlock(&dev->n_srqs_lock); 179 spin_unlock(&dev->n_srqs_lock);
189 180
181 if (srq->ip) {
182 spin_lock_irq(&dev->pending_lock);
183 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
184 spin_unlock_irq(&dev->pending_lock);
185 }
186
190 ret = &srq->ibsrq; 187 ret = &srq->ibsrq;
191 goto done; 188 goto done;
192 189
190bail_ip:
191 kfree(srq->ip);
193bail_wq: 192bail_wq:
194 vfree(srq->rq.wq); 193 vfree(srq->rq.wq);
195
196bail_srq: 194bail_srq:
197 kfree(srq); 195 kfree(srq);
198
199done: 196done:
200 return ret; 197 return ret;
201} 198}
@@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
312 if (srq->ip) { 309 if (srq->ip) {
313 struct ipath_mmap_info *ip = srq->ip; 310 struct ipath_mmap_info *ip = srq->ip;
314 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); 311 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
312 u32 s = sizeof(struct ipath_rwq) + size * sz;
315 313
316 ip->obj = wq; 314 ipath_update_mmap_info(dev, ip, s, wq);
317 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
318 size * sz);
319 spin_lock_irq(&dev->pending_lock); 315 spin_lock_irq(&dev->pending_lock);
320 ip->next = dev->pending_mmaps; 316 if (list_empty(&ip->pending_mmaps))
321 dev->pending_mmaps = ip; 317 list_add(&ip->pending_mmaps,
318 &dev->pending_mmaps);
322 spin_unlock_irq(&dev->pending_lock); 319 spin_unlock_irq(&dev->pending_lock);
323 } 320 }
324 } else if (attr_mask & IB_SRQ_LIMIT) { 321 } else if (attr_mask & IB_SRQ_LIMIT) {
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 18c6df2052c2..b676ea81fc41 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1476 ret = -ENOMEM; 1476 ret = -ENOMEM;
1477 goto err_lk; 1477 goto err_lk;
1478 } 1478 }
1479 INIT_LIST_HEAD(&idev->pending_mmaps);
1479 spin_lock_init(&idev->pending_lock); 1480 spin_lock_init(&idev->pending_lock);
1481 idev->mmap_offset = PAGE_SIZE;
1482 spin_lock_init(&idev->mmap_offset_lock);
1480 INIT_LIST_HEAD(&idev->pending[0]); 1483 INIT_LIST_HEAD(&idev->pending[0]);
1481 INIT_LIST_HEAD(&idev->pending[1]); 1484 INIT_LIST_HEAD(&idev->pending[1]);
1482 INIT_LIST_HEAD(&idev->pending[2]); 1485 INIT_LIST_HEAD(&idev->pending[2]);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index c62f9c5854f6..ac66c00a2976 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -173,12 +173,12 @@ struct ipath_ah {
173 * this as its vm_private_data. 173 * this as its vm_private_data.
174 */ 174 */
175struct ipath_mmap_info { 175struct ipath_mmap_info {
176 struct ipath_mmap_info *next; 176 struct list_head pending_mmaps;
177 struct ib_ucontext *context; 177 struct ib_ucontext *context;
178 void *obj; 178 void *obj;
179 __u64 offset;
179 struct kref ref; 180 struct kref ref;
180 unsigned size; 181 unsigned size;
181 unsigned mmap_cnt;
182}; 182};
183 183
184/* 184/*
@@ -485,9 +485,10 @@ struct ipath_opcode_stats {
485 485
486struct ipath_ibdev { 486struct ipath_ibdev {
487 struct ib_device ibdev; 487 struct ib_device ibdev;
488 struct list_head dev_list;
489 struct ipath_devdata *dd; 488 struct ipath_devdata *dd;
490 struct ipath_mmap_info *pending_mmaps; 489 struct list_head pending_mmaps;
490 spinlock_t mmap_offset_lock;
491 u32 mmap_offset;
491 int ib_unit; /* This is the device number */ 492 int ib_unit; /* This is the device number */
492 u16 sm_lid; /* in host order */ 493 u16 sm_lid; /* in host order */
493 u8 sm_sl; 494 u8 sm_sl;
@@ -768,6 +769,15 @@ int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
768 769
769void ipath_release_mmap_info(struct kref *ref); 770void ipath_release_mmap_info(struct kref *ref);
770 771
772struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
773 u32 size,
774 struct ib_ucontext *context,
775 void *obj);
776
777void ipath_update_mmap_info(struct ipath_ibdev *dev,
778 struct ipath_mmap_info *ip,
779 u32 size, void *obj);
780
771int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 781int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
772 782
773void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); 783void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);