aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2005-10-27 23:48:11 -0400
committerRoland Dreier <rolandd@cisco.com>2005-10-27 23:48:11 -0400
commitcb0f0910f4b41772a6771bdb4fb2d419b27bcd77 (patch)
treefe34505915c31b3b77296e3828cd7675f4616ba9 /drivers/infiniband
parent089a1bedd84be16a4f49a319e7ccb4a128da5ce9 (diff)
[IB] ib_umad: various cleanups
Simplify user_mad.c code in a few places, and convert from kmalloc() + memset() to kzalloc(). This also fixes a theoretical race window by not accessing packet->length after posting the send buffer (the send could complete and packet could be freed before we get to the return statement at the end of ib_umad_write()). Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/user_mad.c73
1 files changed, 21 insertions, 52 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index a48166a8e04b..17ec0a19dbc0 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -99,7 +99,6 @@ struct ib_umad_packet {
99 struct ib_mad_send_buf *msg; 99 struct ib_mad_send_buf *msg;
100 struct list_head list; 100 struct list_head list;
101 int length; 101 int length;
102 DECLARE_PCI_UNMAP_ADDR(mapping)
103 struct ib_user_mad mad; 102 struct ib_user_mad mad;
104}; 103};
105 104
@@ -145,15 +144,12 @@ static void send_handler(struct ib_mad_agent *agent,
145 ib_free_send_mad(packet->msg); 144 ib_free_send_mad(packet->msg);
146 145
147 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 146 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
148 timeout = kmalloc(sizeof *timeout + sizeof (struct ib_mad_hdr), 147 timeout = kzalloc(sizeof *timeout + IB_MGMT_MAD_HDR, GFP_KERNEL);
149 GFP_KERNEL);
150 if (!timeout) 148 if (!timeout)
151 goto out; 149 goto out;
152 150
153 memset(timeout, 0, sizeof *timeout + sizeof (struct ib_mad_hdr)); 151 timeout->length = IB_MGMT_MAD_HDR;
154 152 timeout->mad.hdr.id = packet->mad.hdr.id;
155 timeout->length = sizeof (struct ib_mad_hdr);
156 timeout->mad.hdr.id = packet->mad.hdr.id;
157 timeout->mad.hdr.status = ETIMEDOUT; 153 timeout->mad.hdr.status = ETIMEDOUT;
158 memcpy(timeout->mad.data, packet->mad.data, 154 memcpy(timeout->mad.data, packet->mad.data,
159 sizeof (struct ib_mad_hdr)); 155 sizeof (struct ib_mad_hdr));
@@ -176,11 +172,10 @@ static void recv_handler(struct ib_mad_agent *agent,
176 goto out; 172 goto out;
177 173
178 length = mad_recv_wc->mad_len; 174 length = mad_recv_wc->mad_len;
179 packet = kmalloc(sizeof *packet + length, GFP_KERNEL); 175 packet = kzalloc(sizeof *packet + length, GFP_KERNEL);
180 if (!packet) 176 if (!packet)
181 goto out; 177 goto out;
182 178
183 memset(packet, 0, sizeof *packet + length);
184 packet->length = length; 179 packet->length = length;
185 180
186 ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data); 181 ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
@@ -246,7 +241,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
246 else 241 else
247 ret = -ENOSPC; 242 ret = -ENOSPC;
248 } else if (copy_to_user(buf, &packet->mad, 243 } else if (copy_to_user(buf, &packet->mad,
249 packet->length + sizeof (struct ib_user_mad))) 244 packet->length + sizeof (struct ib_user_mad)))
250 ret = -EFAULT; 245 ret = -EFAULT;
251 else 246 else
252 ret = packet->length + sizeof (struct ib_user_mad); 247 ret = packet->length + sizeof (struct ib_user_mad);
@@ -271,22 +266,19 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
271 struct ib_rmpp_mad *rmpp_mad; 266 struct ib_rmpp_mad *rmpp_mad;
272 u8 method; 267 u8 method;
273 __be64 *tid; 268 __be64 *tid;
274 int ret, length, hdr_len, rmpp_hdr_size; 269 int ret, length, hdr_len, copy_offset;
275 int rmpp_active = 0; 270 int rmpp_active = 0;
276 271
277 if (count < sizeof (struct ib_user_mad)) 272 if (count < sizeof (struct ib_user_mad))
278 return -EINVAL; 273 return -EINVAL;
279 274
280 length = count - sizeof (struct ib_user_mad); 275 length = count - sizeof (struct ib_user_mad);
281 packet = kmalloc(sizeof *packet + sizeof(struct ib_mad_hdr) + 276 packet = kmalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
282 sizeof (struct ib_rmpp_hdr), GFP_KERNEL);
283 if (!packet) 277 if (!packet)
284 return -ENOMEM; 278 return -ENOMEM;
285 279
286 if (copy_from_user(&packet->mad, buf, 280 if (copy_from_user(&packet->mad, buf,
287 sizeof (struct ib_user_mad) + 281 sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)) {
288 sizeof (struct ib_mad_hdr) +
289 sizeof (struct ib_rmpp_hdr))) {
290 ret = -EFAULT; 282 ret = -EFAULT;
291 goto err; 283 goto err;
292 } 284 }
@@ -297,8 +289,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
297 goto err; 289 goto err;
298 } 290 }
299 291
300 packet->length = length;
301
302 down_read(&file->agent_mutex); 292 down_read(&file->agent_mutex);
303 293
304 agent = file->agent[packet->mad.hdr.id]; 294 agent = file->agent[packet->mad.hdr.id];
@@ -345,12 +335,10 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
345 goto err_ah; 335 goto err_ah;
346 } 336 }
347 rmpp_active = 1; 337 rmpp_active = 1;
338 copy_offset = IB_MGMT_RMPP_HDR;
348 } else { 339 } else {
349 if (length > sizeof (struct ib_mad)) {
350 ret = -EINVAL;
351 goto err_ah;
352 }
353 hdr_len = IB_MGMT_MAD_HDR; 340 hdr_len = IB_MGMT_MAD_HDR;
341 copy_offset = IB_MGMT_MAD_HDR;
354 } 342 }
355 343
356 packet->msg = ib_create_send_mad(agent, 344 packet->msg = ib_create_send_mad(agent,
@@ -368,28 +356,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
368 packet->msg->retries = packet->mad.hdr.retries; 356 packet->msg->retries = packet->mad.hdr.retries;
369 packet->msg->context[0] = packet; 357 packet->msg->context[0] = packet;
370 358
371 if (!rmpp_active) { 359 /* Copy MAD headers (RMPP header in place) */
372 /* Copy message from user into send buffer */ 360 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
373 if (copy_from_user(packet->msg->mad, 361 /* Now, copy rest of message from user into send buffer */
374 buf + sizeof (struct ib_user_mad), length)) { 362 if (copy_from_user(packet->msg->mad + copy_offset,
375 ret = -EFAULT; 363 buf + sizeof (struct ib_user_mad) + copy_offset,
376 goto err_msg; 364 length - copy_offset)) {
377 } 365 ret = -EFAULT;
378 } else { 366 goto err_msg;
379 rmpp_hdr_size = sizeof (struct ib_mad_hdr) +
380 sizeof (struct ib_rmpp_hdr);
381
382 /* Only copy MAD headers (RMPP header in place) */
383 memcpy(packet->msg->mad, packet->mad.data,
384 sizeof (struct ib_mad_hdr));
385
386 /* Now, copy rest of message from user into send buffer */
387 if (copy_from_user(((struct ib_rmpp_mad *) packet->msg->mad)->data,
388 buf + sizeof (struct ib_user_mad) + rmpp_hdr_size,
389 length - rmpp_hdr_size)) {
390 ret = -EFAULT;
391 goto err_msg;
392 }
393 } 367 }
394 368
395 /* 369 /*
@@ -414,7 +388,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
414 388
415 up_read(&file->agent_mutex); 389 up_read(&file->agent_mutex);
416 390
417 return sizeof (struct ib_user_mad_hdr) + packet->length; 391 return count;
418 392
419err_msg: 393err_msg:
420 ib_free_send_mad(packet->msg); 394 ib_free_send_mad(packet->msg);
@@ -564,12 +538,10 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
564 container_of(inode->i_cdev, struct ib_umad_port, dev); 538 container_of(inode->i_cdev, struct ib_umad_port, dev);
565 struct ib_umad_file *file; 539 struct ib_umad_file *file;
566 540
567 file = kmalloc(sizeof *file, GFP_KERNEL); 541 file = kzalloc(sizeof *file, GFP_KERNEL);
568 if (!file) 542 if (!file)
569 return -ENOMEM; 543 return -ENOMEM;
570 544
571 memset(file, 0, sizeof *file);
572
573 spin_lock_init(&file->recv_lock); 545 spin_lock_init(&file->recv_lock);
574 init_rwsem(&file->agent_mutex); 546 init_rwsem(&file->agent_mutex);
575 INIT_LIST_HEAD(&file->recv_list); 547 INIT_LIST_HEAD(&file->recv_list);
@@ -814,15 +786,12 @@ static void ib_umad_add_one(struct ib_device *device)
814 e = device->phys_port_cnt; 786 e = device->phys_port_cnt;
815 } 787 }
816 788
817 umad_dev = kmalloc(sizeof *umad_dev + 789 umad_dev = kzalloc(sizeof *umad_dev +
818 (e - s + 1) * sizeof (struct ib_umad_port), 790 (e - s + 1) * sizeof (struct ib_umad_port),
819 GFP_KERNEL); 791 GFP_KERNEL);
820 if (!umad_dev) 792 if (!umad_dev)
821 return; 793 return;
822 794
823 memset(umad_dev, 0, sizeof *umad_dev +
824 (e - s + 1) * sizeof (struct ib_umad_port));
825
826 kref_init(&umad_dev->ref); 795 kref_init(&umad_dev->ref);
827 796
828 umad_dev->start_port = s; 797 umad_dev->start_port = s;