diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-04-18 23:20:28 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-05-08 21:00:37 -0400 |
commit | 1bf66a30421ca772820f489d88c16d0c430d6a67 (patch) | |
tree | b1ab223e6908d772bcad7f9bc3382c33ad5a4490 /drivers | |
parent | f7c6a7b5d59980b076abbf2ceeb8735591290285 (diff) |
IB: Put rlimit accounting struct in struct ib_umem
When memory pinned with ib_umem_get() is released, ib_umem_release()
needs to subtract the amount of memory being unpinned from
mm->locked_vm. However, ib_umem_release() may be called with
mm->mmap_sem already held for writing if the memory is being released
as part of an munmap() call, so it is sometimes necessary to defer
this accounting into a workqueue.
However, the work struct used to defer this accounting is dynamically
allocated before it is queued, so there is the possibility of failing
that allocation. If the allocation fails, then ib_umem_release has no
choice except to bail out and leave the process with a permanently
elevated locked_vm.
Fix this by allocating the structure to defer accounting as part of
the original struct ib_umem, so there's no possibility of failing a
later allocation if creating the struct ib_umem and pinning memory
succeeds.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/umem.c | 41 |
1 files changed, 13 insertions, 28 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 48e854cf416f..f32ca5fbb26b 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -39,13 +39,6 @@ | |||
39 | 39 | ||
40 | #include "uverbs.h" | 40 | #include "uverbs.h" |
41 | 41 | ||
42 | struct ib_umem_account_work { | ||
43 | struct work_struct work; | ||
44 | struct mm_struct *mm; | ||
45 | unsigned long diff; | ||
46 | }; | ||
47 | |||
48 | |||
49 | static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) | 42 | static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) |
50 | { | 43 | { |
51 | struct ib_umem_chunk *chunk, *tmp; | 44 | struct ib_umem_chunk *chunk, *tmp; |
@@ -192,16 +185,15 @@ out: | |||
192 | } | 185 | } |
193 | EXPORT_SYMBOL(ib_umem_get); | 186 | EXPORT_SYMBOL(ib_umem_get); |
194 | 187 | ||
195 | static void ib_umem_account(struct work_struct *_work) | 188 | static void ib_umem_account(struct work_struct *work) |
196 | { | 189 | { |
197 | struct ib_umem_account_work *work = | 190 | struct ib_umem *umem = container_of(work, struct ib_umem, work); |
198 | container_of(_work, struct ib_umem_account_work, work); | 191 | |
199 | 192 | down_write(&umem->mm->mmap_sem); | |
200 | down_write(&work->mm->mmap_sem); | 193 | umem->mm->locked_vm -= umem->diff; |
201 | work->mm->locked_vm -= work->diff; | 194 | up_write(&umem->mm->mmap_sem); |
202 | up_write(&work->mm->mmap_sem); | 195 | mmput(umem->mm); |
203 | mmput(work->mm); | 196 | kfree(umem); |
204 | kfree(work); | ||
205 | } | 197 | } |
206 | 198 | ||
207 | /** | 199 | /** |
@@ -210,7 +202,6 @@ static void ib_umem_account(struct work_struct *_work) | |||
210 | */ | 202 | */ |
211 | void ib_umem_release(struct ib_umem *umem) | 203 | void ib_umem_release(struct ib_umem *umem) |
212 | { | 204 | { |
213 | struct ib_umem_account_work *work; | ||
214 | struct ib_ucontext *context = umem->context; | 205 | struct ib_ucontext *context = umem->context; |
215 | struct mm_struct *mm; | 206 | struct mm_struct *mm; |
216 | unsigned long diff; | 207 | unsigned long diff; |
@@ -222,7 +213,6 @@ void ib_umem_release(struct ib_umem *umem) | |||
222 | return; | 213 | return; |
223 | 214 | ||
224 | diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; | 215 | diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
225 | kfree(umem); | ||
226 | 216 | ||
227 | /* | 217 | /* |
228 | * We may be called with the mm's mmap_sem already held. This | 218 | * We may be called with the mm's mmap_sem already held. This |
@@ -233,17 +223,11 @@ void ib_umem_release(struct ib_umem *umem) | |||
233 | * we defer the vm_locked accounting to the system workqueue. | 223 | * we defer the vm_locked accounting to the system workqueue. |
234 | */ | 224 | */ |
235 | if (context->closing && !down_write_trylock(&mm->mmap_sem)) { | 225 | if (context->closing && !down_write_trylock(&mm->mmap_sem)) { |
236 | work = kmalloc(sizeof *work, GFP_KERNEL); | 226 | INIT_WORK(&umem->work, ib_umem_account); |
237 | if (!work) { | 227 | umem->mm = mm; |
238 | mmput(mm); | 228 | umem->diff = diff; |
239 | return; | ||
240 | } | ||
241 | 229 | ||
242 | INIT_WORK(&work->work, ib_umem_account); | 230 | schedule_work(&umem->work); |
243 | work->mm = mm; | ||
244 | work->diff = diff; | ||
245 | |||
246 | schedule_work(&work->work); | ||
247 | return; | 231 | return; |
248 | } else | 232 | } else |
249 | down_write(&mm->mmap_sem); | 233 | down_write(&mm->mmap_sem); |
@@ -251,6 +235,7 @@ void ib_umem_release(struct ib_umem *umem) | |||
251 | current->mm->locked_vm -= diff; | 235 | current->mm->locked_vm -= diff; |
252 | up_write(&mm->mmap_sem); | 236 | up_write(&mm->mmap_sem); |
253 | mmput(mm); | 237 | mmput(mm); |
238 | kfree(umem); | ||
254 | } | 239 | } |
255 | EXPORT_SYMBOL(ib_umem_release); | 240 | EXPORT_SYMBOL(ib_umem_release); |
256 | 241 | ||