aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDan Carpenter <error27@gmail.com>2010-10-13 05:13:12 -0400
committerRoland Dreier <rolandd@cisco.com>2010-12-08 18:23:49 -0500
commit7182afea8d1afd432a17c18162cc3fd441d0da93 (patch)
tree6a39903f54a0b1b95cce0d3774d661d3b1f28977 /drivers/infiniband
parent59e57c622c3502346e8f930421ebc482d639520c (diff)
IB/uverbs: Handle large number of entries in poll CQ
In ib_uverbs_poll_cq() code there is a potential integer overflow if userspace passes in a large cmd.ne. The calls to kmalloc() would allocate smaller buffers than intended, leading to memory corruption. There iss also an information leak if resp wasn't all used. Unprivileged userspace may call this function, although only if an RDMA device that uses this function is present. Fix this by copying CQ entries one at a time, which avoids the allocation entirely, and also by moving this copying into a function that makes sure to initialize all memory copied to userspace. Special thanks to Jason Gunthorpe <jgunthorpe@obsidianresearch.com> for his help and advice. Cc: <stable@kernel.org> Signed-off-by: Dan Carpenter <error27@gmail.com> [ Monkey around with things a bit to avoid bad code generation by gcc when designated initializers are used. - Roland ] Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c99
1 files changed, 56 insertions, 43 deletions
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b342248aec05..c42699285f8e 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -893,68 +893,81 @@ out:
893 return ret ? ret : in_len; 893 return ret ? ret : in_len;
894} 894}
895 895
896static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
897{
898 struct ib_uverbs_wc tmp;
899
900 tmp.wr_id = wc->wr_id;
901 tmp.status = wc->status;
902 tmp.opcode = wc->opcode;
903 tmp.vendor_err = wc->vendor_err;
904 tmp.byte_len = wc->byte_len;
905 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
906 tmp.qp_num = wc->qp->qp_num;
907 tmp.src_qp = wc->src_qp;
908 tmp.wc_flags = wc->wc_flags;
909 tmp.pkey_index = wc->pkey_index;
910 tmp.slid = wc->slid;
911 tmp.sl = wc->sl;
912 tmp.dlid_path_bits = wc->dlid_path_bits;
913 tmp.port_num = wc->port_num;
914 tmp.reserved = 0;
915
916 if (copy_to_user(dest, &tmp, sizeof tmp))
917 return -EFAULT;
918
919 return 0;
920}
921
896ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 922ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
897 const char __user *buf, int in_len, 923 const char __user *buf, int in_len,
898 int out_len) 924 int out_len)
899{ 925{
900 struct ib_uverbs_poll_cq cmd; 926 struct ib_uverbs_poll_cq cmd;
901 struct ib_uverbs_poll_cq_resp *resp; 927 struct ib_uverbs_poll_cq_resp resp;
928 u8 __user *header_ptr;
929 u8 __user *data_ptr;
902 struct ib_cq *cq; 930 struct ib_cq *cq;
903 struct ib_wc *wc; 931 struct ib_wc wc;
904 int ret = 0; 932 int ret;
905 int i;
906 int rsize;
907 933
908 if (copy_from_user(&cmd, buf, sizeof cmd)) 934 if (copy_from_user(&cmd, buf, sizeof cmd))
909 return -EFAULT; 935 return -EFAULT;
910 936
911 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
912 if (!wc)
913 return -ENOMEM;
914
915 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
916 resp = kmalloc(rsize, GFP_KERNEL);
917 if (!resp) {
918 ret = -ENOMEM;
919 goto out_wc;
920 }
921
922 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 937 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
923 if (!cq) { 938 if (!cq)
924 ret = -EINVAL; 939 return -EINVAL;
925 goto out;
926 }
927 940
928 resp->count = ib_poll_cq(cq, cmd.ne, wc); 941 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
942 header_ptr = (void __user *)(unsigned long) cmd.response;
943 data_ptr = header_ptr + sizeof resp;
929 944
930 put_cq_read(cq); 945 memset(&resp, 0, sizeof resp);
946 while (resp.count < cmd.ne) {
947 ret = ib_poll_cq(cq, 1, &wc);
948 if (ret < 0)
949 goto out_put;
950 if (!ret)
951 break;
952
953 ret = copy_wc_to_user(data_ptr, &wc);
954 if (ret)
955 goto out_put;
931 956
932 for (i = 0; i < resp->count; i++) { 957 data_ptr += sizeof(struct ib_uverbs_wc);
933 resp->wc[i].wr_id = wc[i].wr_id; 958 ++resp.count;
934 resp->wc[i].status = wc[i].status;
935 resp->wc[i].opcode = wc[i].opcode;
936 resp->wc[i].vendor_err = wc[i].vendor_err;
937 resp->wc[i].byte_len = wc[i].byte_len;
938 resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data;
939 resp->wc[i].qp_num = wc[i].qp->qp_num;
940 resp->wc[i].src_qp = wc[i].src_qp;
941 resp->wc[i].wc_flags = wc[i].wc_flags;
942 resp->wc[i].pkey_index = wc[i].pkey_index;
943 resp->wc[i].slid = wc[i].slid;
944 resp->wc[i].sl = wc[i].sl;
945 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
946 resp->wc[i].port_num = wc[i].port_num;
947 } 959 }
948 960
949 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) 961 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
950 ret = -EFAULT; 962 ret = -EFAULT;
963 goto out_put;
964 }
951 965
952out: 966 ret = in_len;
953 kfree(resp);
954 967
955out_wc: 968out_put:
956 kfree(wc); 969 put_cq_read(cq);
957 return ret ? ret : in_len; 970 return ret;
958} 971}
959 972
960ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 973ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,