aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/umem.c (renamed from drivers/infiniband/core/uverbs_mem.c)136
-rw-r--r--drivers/infiniband/core/uverbs.h6
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c60
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
6 files changed, 120 insertions, 99 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 189e5d4b9b1..cb1ab3ea499 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
9 9
10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
11 device.o fmr_pool.o cache.o 11 device.o fmr_pool.o cache.o
12ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
12 13
13ib_mad-y := mad.o smi.o agent.o mad_rmpp.o 14ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
14 15
@@ -28,5 +29,4 @@ ib_umad-y := user_mad.o
28 29
29ib_ucm-y := ucm.o 30ib_ucm-y := ucm.o
30 31
31ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o \ 32ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o
32 uverbs_marshall.o
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 7fabb425b03..592c90aa318 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -613,6 +613,8 @@ static void __exit ib_core_cleanup(void)
613{ 613{
614 ib_cache_cleanup(); 614 ib_cache_cleanup();
615 ib_sysfs_cleanup(); 615 ib_sysfs_cleanup();
616 /* Make sure that any pending umem accounting work is done. */
617 flush_scheduled_work();
616} 618}
617 619
618module_init(ib_core_init); 620module_init(ib_core_init);
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/umem.c
index c95fe952abd..48e854cf416 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/umem.c
@@ -64,35 +64,56 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
64 } 64 }
65} 65}
66 66
67int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, 67/**
68 void *addr, size_t size, int write) 68 * ib_umem_get - Pin and DMA map userspace memory.
69 * @context: userspace context to pin memory for
70 * @addr: userspace virtual address to start at
71 * @size: length of region to pin
72 * @access: IB_ACCESS_xxx flags for memory being pinned
73 */
74struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
75 size_t size, int access)
69{ 76{
77 struct ib_umem *umem;
70 struct page **page_list; 78 struct page **page_list;
71 struct ib_umem_chunk *chunk; 79 struct ib_umem_chunk *chunk;
72 unsigned long locked; 80 unsigned long locked;
73 unsigned long lock_limit; 81 unsigned long lock_limit;
74 unsigned long cur_base; 82 unsigned long cur_base;
75 unsigned long npages; 83 unsigned long npages;
76 int ret = 0; 84 int ret;
77 int off; 85 int off;
78 int i; 86 int i;
79 87
80 if (!can_do_mlock()) 88 if (!can_do_mlock())
81 return -EPERM; 89 return ERR_PTR(-EPERM);
82 90
83 page_list = (struct page **) __get_free_page(GFP_KERNEL); 91 umem = kmalloc(sizeof *umem, GFP_KERNEL);
84 if (!page_list) 92 if (!umem)
85 return -ENOMEM; 93 return ERR_PTR(-ENOMEM);
86 94
87 mem->user_base = (unsigned long) addr; 95 umem->context = context;
88 mem->length = size; 96 umem->length = size;
89 mem->offset = (unsigned long) addr & ~PAGE_MASK; 97 umem->offset = addr & ~PAGE_MASK;
90 mem->page_size = PAGE_SIZE; 98 umem->page_size = PAGE_SIZE;
91 mem->writable = write; 99 /*
100 * We ask for writable memory if any access flags other than
101 * "remote read" are set. "Local write" and "remote write"
102 * obviously require write access. "Remote atomic" can do
103 * things like fetch and add, which will modify memory, and
104 * "MW bind" can change permissions by binding a window.
105 */
106 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
92 107
93 INIT_LIST_HEAD(&mem->chunk_list); 108 INIT_LIST_HEAD(&umem->chunk_list);
109
110 page_list = (struct page **) __get_free_page(GFP_KERNEL);
111 if (!page_list) {
112 kfree(umem);
113 return ERR_PTR(-ENOMEM);
114 }
94 115
95 npages = PAGE_ALIGN(size + mem->offset) >> PAGE_SHIFT; 116 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
96 117
97 down_write(&current->mm->mmap_sem); 118 down_write(&current->mm->mmap_sem);
98 119
@@ -104,13 +125,13 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
104 goto out; 125 goto out;
105 } 126 }
106 127
107 cur_base = (unsigned long) addr & PAGE_MASK; 128 cur_base = addr & PAGE_MASK;
108 129
109 while (npages) { 130 while (npages) {
110 ret = get_user_pages(current, current->mm, cur_base, 131 ret = get_user_pages(current, current->mm, cur_base,
111 min_t(int, npages, 132 min_t(int, npages,
112 PAGE_SIZE / sizeof (struct page *)), 133 PAGE_SIZE / sizeof (struct page *)),
113 1, !write, page_list, NULL); 134 1, !umem->writable, page_list, NULL);
114 135
115 if (ret < 0) 136 if (ret < 0)
116 goto out; 137 goto out;
@@ -136,7 +157,7 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136 chunk->page_list[i].length = PAGE_SIZE; 157 chunk->page_list[i].length = PAGE_SIZE;
137 } 158 }
138 159
139 chunk->nmap = ib_dma_map_sg(dev, 160 chunk->nmap = ib_dma_map_sg(context->device,
140 &chunk->page_list[0], 161 &chunk->page_list[0],
141 chunk->nents, 162 chunk->nents,
142 DMA_BIDIRECTIONAL); 163 DMA_BIDIRECTIONAL);
@@ -151,33 +172,25 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
151 172
152 ret -= chunk->nents; 173 ret -= chunk->nents;
153 off += chunk->nents; 174 off += chunk->nents;
154 list_add_tail(&chunk->list, &mem->chunk_list); 175 list_add_tail(&chunk->list, &umem->chunk_list);
155 } 176 }
156 177
157 ret = 0; 178 ret = 0;
158 } 179 }
159 180
160out: 181out:
161 if (ret < 0) 182 if (ret < 0) {
162 __ib_umem_release(dev, mem, 0); 183 __ib_umem_release(context->device, umem, 0);
163 else 184 kfree(umem);
185 } else
164 current->mm->locked_vm = locked; 186 current->mm->locked_vm = locked;
165 187
166 up_write(&current->mm->mmap_sem); 188 up_write(&current->mm->mmap_sem);
167 free_page((unsigned long) page_list); 189 free_page((unsigned long) page_list);
168 190
169 return ret; 191 return ret < 0 ? ERR_PTR(ret) : umem;
170}
171
172void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
173{
174 __ib_umem_release(dev, umem, 1);
175
176 down_write(&current->mm->mmap_sem);
177 current->mm->locked_vm -=
178 PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
179 up_write(&current->mm->mmap_sem);
180} 192}
193EXPORT_SYMBOL(ib_umem_get);
181 194
182static void ib_umem_account(struct work_struct *_work) 195static void ib_umem_account(struct work_struct *_work)
183{ 196{
@@ -191,35 +204,70 @@ static void ib_umem_account(struct work_struct *_work)
191 kfree(work); 204 kfree(work);
192} 205}
193 206
194void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) 207/**
208 * ib_umem_release - release memory pinned with ib_umem_get
209 * @umem: umem struct to release
210 */
211void ib_umem_release(struct ib_umem *umem)
195{ 212{
196 struct ib_umem_account_work *work; 213 struct ib_umem_account_work *work;
214 struct ib_ucontext *context = umem->context;
197 struct mm_struct *mm; 215 struct mm_struct *mm;
216 unsigned long diff;
198 217
199 __ib_umem_release(dev, umem, 1); 218 __ib_umem_release(umem->context->device, umem, 1);
200 219
201 mm = get_task_mm(current); 220 mm = get_task_mm(current);
202 if (!mm) 221 if (!mm)
203 return; 222 return;
204 223
224 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
225 kfree(umem);
226
205 /* 227 /*
206 * We may be called with the mm's mmap_sem already held. This 228 * We may be called with the mm's mmap_sem already held. This
207 * can happen when a userspace munmap() is the call that drops 229 * can happen when a userspace munmap() is the call that drops
208 * the last reference to our file and calls our release 230 * the last reference to our file and calls our release
209 * method. If there are memory regions to destroy, we'll end 231 * method. If there are memory regions to destroy, we'll end
210 * up here and not be able to take the mmap_sem. Therefore we 232 * up here and not be able to take the mmap_sem. In that case
211 * defer the vm_locked accounting to the system workqueue. 233 * we defer the vm_locked accounting to the system workqueue.
212 */ 234 */
235 if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
236 work = kmalloc(sizeof *work, GFP_KERNEL);
237 if (!work) {
238 mmput(mm);
239 return;
240 }
213 241
214 work = kmalloc(sizeof *work, GFP_KERNEL); 242 INIT_WORK(&work->work, ib_umem_account);
215 if (!work) { 243 work->mm = mm;
216 mmput(mm); 244 work->diff = diff;
245
246 schedule_work(&work->work);
217 return; 247 return;
218 } 248 } else
249 down_write(&mm->mmap_sem);
250
251 current->mm->locked_vm -= diff;
252 up_write(&mm->mmap_sem);
253 mmput(mm);
254}
255EXPORT_SYMBOL(ib_umem_release);
256
257int ib_umem_page_count(struct ib_umem *umem)
258{
259 struct ib_umem_chunk *chunk;
260 int shift;
261 int i;
262 int n;
263
264 shift = ilog2(umem->page_size);
219 265
220 INIT_WORK(&work->work, ib_umem_account); 266 n = 0;
221 work->mm = mm; 267 list_for_each_entry(chunk, &umem->chunk_list, list)
222 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 268 for (i = 0; i < chunk->nmap; ++i)
269 n += sg_dma_len(&chunk->page_list[i]) >> shift;
223 270
224 schedule_work(&work->work); 271 return n;
225} 272}
273EXPORT_SYMBOL(ib_umem_page_count);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 102a59c033f..c33546f9e96 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -45,6 +45,7 @@
45#include <linux/completion.h> 45#include <linux/completion.h>
46 46
47#include <rdma/ib_verbs.h> 47#include <rdma/ib_verbs.h>
48#include <rdma/ib_umem.h>
48#include <rdma/ib_user_verbs.h> 49#include <rdma/ib_user_verbs.h>
49 50
50/* 51/*
@@ -163,11 +164,6 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
163void ib_uverbs_event_handler(struct ib_event_handler *handler, 164void ib_uverbs_event_handler(struct ib_event_handler *handler,
164 struct ib_event *event); 165 struct ib_event *event);
165 166
166int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
167 void *addr, size_t size, int write);
168void ib_umem_release(struct ib_device *dev, struct ib_umem *umem);
169void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
170
171#define IB_UVERBS_DECLARE_CMD(name) \ 167#define IB_UVERBS_DECLARE_CMD(name) \
172 ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ 168 ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
173 const char __user *buf, int in_len, \ 169 const char __user *buf, int in_len, \
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bab66769be1..01d70084aeb 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
6 * 6 *
@@ -295,6 +295,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
295 INIT_LIST_HEAD(&ucontext->qp_list); 295 INIT_LIST_HEAD(&ucontext->qp_list);
296 INIT_LIST_HEAD(&ucontext->srq_list); 296 INIT_LIST_HEAD(&ucontext->srq_list);
297 INIT_LIST_HEAD(&ucontext->ah_list); 297 INIT_LIST_HEAD(&ucontext->ah_list);
298 ucontext->closing = 0;
298 299
299 resp.num_comp_vectors = file->device->num_comp_vectors; 300 resp.num_comp_vectors = file->device->num_comp_vectors;
300 301
@@ -573,7 +574,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
573 struct ib_uverbs_reg_mr cmd; 574 struct ib_uverbs_reg_mr cmd;
574 struct ib_uverbs_reg_mr_resp resp; 575 struct ib_uverbs_reg_mr_resp resp;
575 struct ib_udata udata; 576 struct ib_udata udata;
576 struct ib_umem_object *obj; 577 struct ib_uobject *uobj;
577 struct ib_pd *pd; 578 struct ib_pd *pd;
578 struct ib_mr *mr; 579 struct ib_mr *mr;
579 int ret; 580 int ret;
@@ -599,35 +600,21 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
599 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 600 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
600 return -EINVAL; 601 return -EINVAL;
601 602
602 obj = kmalloc(sizeof *obj, GFP_KERNEL); 603 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
603 if (!obj) 604 if (!uobj)
604 return -ENOMEM; 605 return -ENOMEM;
605 606
606 init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key); 607 init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
607 down_write(&obj->uobject.mutex); 608 down_write(&uobj->mutex);
608
609 /*
610 * We ask for writable memory if any access flags other than
611 * "remote read" are set. "Local write" and "remote write"
612 * obviously require write access. "Remote atomic" can do
613 * things like fetch and add, which will modify memory, and
614 * "MW bind" can change permissions by binding a window.
615 */
616 ret = ib_umem_get(file->device->ib_dev, &obj->umem,
617 (void *) (unsigned long) cmd.start, cmd.length,
618 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
619 if (ret)
620 goto err_free;
621
622 obj->umem.virt_base = cmd.hca_va;
623 609
624 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 610 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
625 if (!pd) { 611 if (!pd) {
626 ret = -EINVAL; 612 ret = -EINVAL;
627 goto err_release; 613 goto err_free;
628 } 614 }
629 615
630 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 616 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
617 cmd.access_flags, &udata);
631 if (IS_ERR(mr)) { 618 if (IS_ERR(mr)) {
632 ret = PTR_ERR(mr); 619 ret = PTR_ERR(mr);
633 goto err_put; 620 goto err_put;
@@ -635,19 +622,19 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
635 622
636 mr->device = pd->device; 623 mr->device = pd->device;
637 mr->pd = pd; 624 mr->pd = pd;
638 mr->uobject = &obj->uobject; 625 mr->uobject = uobj;
639 atomic_inc(&pd->usecnt); 626 atomic_inc(&pd->usecnt);
640 atomic_set(&mr->usecnt, 0); 627 atomic_set(&mr->usecnt, 0);
641 628
642 obj->uobject.object = mr; 629 uobj->object = mr;
643 ret = idr_add_uobj(&ib_uverbs_mr_idr, &obj->uobject); 630 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
644 if (ret) 631 if (ret)
645 goto err_unreg; 632 goto err_unreg;
646 633
647 memset(&resp, 0, sizeof resp); 634 memset(&resp, 0, sizeof resp);
648 resp.lkey = mr->lkey; 635 resp.lkey = mr->lkey;
649 resp.rkey = mr->rkey; 636 resp.rkey = mr->rkey;
650 resp.mr_handle = obj->uobject.id; 637 resp.mr_handle = uobj->id;
651 638
652 if (copy_to_user((void __user *) (unsigned long) cmd.response, 639 if (copy_to_user((void __user *) (unsigned long) cmd.response,
653 &resp, sizeof resp)) { 640 &resp, sizeof resp)) {
@@ -658,17 +645,17 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
658 put_pd_read(pd); 645 put_pd_read(pd);
659 646
660 mutex_lock(&file->mutex); 647 mutex_lock(&file->mutex);
661 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 648 list_add_tail(&uobj->list, &file->ucontext->mr_list);
662 mutex_unlock(&file->mutex); 649 mutex_unlock(&file->mutex);
663 650
664 obj->uobject.live = 1; 651 uobj->live = 1;
665 652
666 up_write(&obj->uobject.mutex); 653 up_write(&uobj->mutex);
667 654
668 return in_len; 655 return in_len;
669 656
670err_copy: 657err_copy:
671 idr_remove_uobj(&ib_uverbs_mr_idr, &obj->uobject); 658 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
672 659
673err_unreg: 660err_unreg:
674 ib_dereg_mr(mr); 661 ib_dereg_mr(mr);
@@ -676,11 +663,8 @@ err_unreg:
676err_put: 663err_put:
677 put_pd_read(pd); 664 put_pd_read(pd);
678 665
679err_release:
680 ib_umem_release(file->device->ib_dev, &obj->umem);
681
682err_free: 666err_free:
683 put_uobj_write(&obj->uobject); 667 put_uobj_write(uobj);
684 return ret; 668 return ret;
685} 669}
686 670
@@ -691,7 +675,6 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
691 struct ib_uverbs_dereg_mr cmd; 675 struct ib_uverbs_dereg_mr cmd;
692 struct ib_mr *mr; 676 struct ib_mr *mr;
693 struct ib_uobject *uobj; 677 struct ib_uobject *uobj;
694 struct ib_umem_object *memobj;
695 int ret = -EINVAL; 678 int ret = -EINVAL;
696 679
697 if (copy_from_user(&cmd, buf, sizeof cmd)) 680 if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -701,8 +684,7 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
701 if (!uobj) 684 if (!uobj)
702 return -EINVAL; 685 return -EINVAL;
703 686
704 memobj = container_of(uobj, struct ib_umem_object, uobject); 687 mr = uobj->object;
705 mr = uobj->object;
706 688
707 ret = ib_dereg_mr(mr); 689 ret = ib_dereg_mr(mr);
708 if (!ret) 690 if (!ret)
@@ -719,8 +701,6 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
719 list_del(&uobj->list); 701 list_del(&uobj->list);
720 mutex_unlock(&file->mutex); 702 mutex_unlock(&file->mutex);
721 703
722 ib_umem_release(file->device->ib_dev, &memobj->umem);
723
724 put_uobj(uobj); 704 put_uobj(uobj);
725 705
726 return in_len; 706 return in_len;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d44e5479965..14d7ccd8919 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -183,6 +183,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
183 if (!context) 183 if (!context)
184 return 0; 184 return 0;
185 185
186 context->closing = 1;
187
186 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { 188 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
187 struct ib_ah *ah = uobj->object; 189 struct ib_ah *ah = uobj->object;
188 190
@@ -230,16 +232,10 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
230 232
231 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { 233 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
232 struct ib_mr *mr = uobj->object; 234 struct ib_mr *mr = uobj->object;
233 struct ib_device *mrdev = mr->device;
234 struct ib_umem_object *memobj;
235 235
236 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 236 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
237 ib_dereg_mr(mr); 237 ib_dereg_mr(mr);
238 238 kfree(uobj);
239 memobj = container_of(uobj, struct ib_umem_object, uobject);
240 ib_umem_release_on_close(mrdev, &memobj->umem);
241
242 kfree(memobj);
243 } 239 }
244 240
245 list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { 241 list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
@@ -906,7 +902,6 @@ static void __exit ib_uverbs_cleanup(void)
906 unregister_filesystem(&uverbs_event_fs); 902 unregister_filesystem(&uverbs_event_fs);
907 class_destroy(uverbs_class); 903 class_destroy(uverbs_class);
908 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); 904 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
909 flush_scheduled_work();
910 idr_destroy(&ib_uverbs_pd_idr); 905 idr_destroy(&ib_uverbs_pd_idr);
911 idr_destroy(&ib_uverbs_mr_idr); 906 idr_destroy(&ib_uverbs_mr_idr);
912 idr_destroy(&ib_uverbs_mw_idr); 907 idr_destroy(&ib_uverbs_mw_idr);