aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/uverbs_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/uverbs_cmd.c')
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 94816aeb95a0..1c02deab068f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows: 62 * The ib_uobject locking scheme is as follows:
63 * 63 *
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is 65 * needs to be held during all idr write operations. When an object is
66 * looked up, a reference must be taken on the object's kref before 66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock. 67 * dropping this lock. For read operations, the rcu_read_lock()
68 * and rcu_write_lock() but similarly the kref reference is grabbed
69 * before the rcu_read_unlock().
68 * 70 *
69 * - Each object also has an rwsem. This rwsem must be held for 71 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed. 72 * reading while an operation that uses the object is performed.
@@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
96 98
97static void release_uobj(struct kref *kref) 99static void release_uobj(struct kref *kref)
98{ 100{
99 kfree(container_of(kref, struct ib_uobject, ref)); 101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
100} 102}
101 103
102static void put_uobj(struct ib_uobject *uobj) 104static void put_uobj(struct ib_uobject *uobj)
@@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
145{ 147{
146 struct ib_uobject *uobj; 148 struct ib_uobject *uobj;
147 149
148 spin_lock(&ib_uverbs_idr_lock); 150 rcu_read_lock();
149 uobj = idr_find(idr, id); 151 uobj = idr_find(idr, id);
150 if (uobj) { 152 if (uobj) {
151 if (uobj->context == context) 153 if (uobj->context == context)
@@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
153 else 155 else
154 uobj = NULL; 156 uobj = NULL;
155 } 157 }
156 spin_unlock(&ib_uverbs_idr_lock); 158 rcu_read_unlock();
157 159
158 return uobj; 160 return uobj;
159} 161}
@@ -2446,6 +2448,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2446 int i, sg_ind; 2448 int i, sg_ind;
2447 int is_ud; 2449 int is_ud;
2448 ssize_t ret = -EINVAL; 2450 ssize_t ret = -EINVAL;
2451 size_t next_size;
2449 2452
2450 if (copy_from_user(&cmd, buf, sizeof cmd)) 2453 if (copy_from_user(&cmd, buf, sizeof cmd))
2451 return -EFAULT; 2454 return -EFAULT;
@@ -2490,7 +2493,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2490 goto out_put; 2493 goto out_put;
2491 } 2494 }
2492 2495
2493 ud = alloc_wr(sizeof(*ud), user_wr->num_sge); 2496 next_size = sizeof(*ud);
2497 ud = alloc_wr(next_size, user_wr->num_sge);
2494 if (!ud) { 2498 if (!ud) {
2495 ret = -ENOMEM; 2499 ret = -ENOMEM;
2496 goto out_put; 2500 goto out_put;
@@ -2511,7 +2515,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2511 user_wr->opcode == IB_WR_RDMA_READ) { 2515 user_wr->opcode == IB_WR_RDMA_READ) {
2512 struct ib_rdma_wr *rdma; 2516 struct ib_rdma_wr *rdma;
2513 2517
2514 rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); 2518 next_size = sizeof(*rdma);
2519 rdma = alloc_wr(next_size, user_wr->num_sge);
2515 if (!rdma) { 2520 if (!rdma) {
2516 ret = -ENOMEM; 2521 ret = -ENOMEM;
2517 goto out_put; 2522 goto out_put;
@@ -2525,7 +2530,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2525 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2530 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2526 struct ib_atomic_wr *atomic; 2531 struct ib_atomic_wr *atomic;
2527 2532
2528 atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); 2533 next_size = sizeof(*atomic);
2534 atomic = alloc_wr(next_size, user_wr->num_sge);
2529 if (!atomic) { 2535 if (!atomic) {
2530 ret = -ENOMEM; 2536 ret = -ENOMEM;
2531 goto out_put; 2537 goto out_put;
@@ -2540,7 +2546,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2540 } else if (user_wr->opcode == IB_WR_SEND || 2546 } else if (user_wr->opcode == IB_WR_SEND ||
2541 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2547 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2542 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2548 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2543 next = alloc_wr(sizeof(*next), user_wr->num_sge); 2549 next_size = sizeof(*next);
2550 next = alloc_wr(next_size, user_wr->num_sge);
2544 if (!next) { 2551 if (!next) {
2545 ret = -ENOMEM; 2552 ret = -ENOMEM;
2546 goto out_put; 2553 goto out_put;
@@ -2572,7 +2579,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2572 2579
2573 if (next->num_sge) { 2580 if (next->num_sge) {
2574 next->sg_list = (void *) next + 2581 next->sg_list = (void *) next +
2575 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2582 ALIGN(next_size, sizeof(struct ib_sge));
2576 if (copy_from_user(next->sg_list, 2583 if (copy_from_user(next->sg_list,
2577 buf + sizeof cmd + 2584 buf + sizeof cmd +
2578 cmd.wr_count * cmd.wqe_size + 2585 cmd.wr_count * cmd.wqe_size +