aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-11-02 10:23:14 -0500
committerRoland Dreier <rolandd@cisco.com>2005-11-02 10:23:14 -0500
commitde6eb66b56d9df5ce6bd254994f05e065214e8cd (patch)
tree7463446a05b5e9a5d2fc400da0be8d4a6c2ff6f1 /drivers
parent7b28b0d000eeb62d77add636f5d6eb0da04e48aa (diff)
[IB] kzalloc() conversions
Replace kmalloc()+memset(,0,) with kzalloc(), for a net savings of 35 source lines and about 500 bytes of text. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cm.c6
-rw-r--r--drivers/infiniband/core/device.c10
-rw-r--r--drivers/infiniband/core/mad.c31
-rw-r--r--drivers/infiniband/core/sysfs.c6
-rw-r--r--drivers/infiniband/core/ucm.c9
-rw-r--r--drivers/infiniband/core/uverbs_main.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c4
11 files changed, 27 insertions, 62 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 0c3c6952faae..7545775d38ef 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -155,13 +155,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
155 int ret; 155 int ret;
156 156
157 /* Create new device info */ 157 /* Create new device info */
158 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); 158 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
159 if (!port_priv) { 159 if (!port_priv) {
160 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); 160 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
161 ret = -ENOMEM; 161 ret = -ENOMEM;
162 goto error1; 162 goto error1;
163 } 163 }
164 memset(port_priv, 0, sizeof *port_priv);
165 164
166 /* Obtain send only MAD agent for SMI QP */ 165 /* Obtain send only MAD agent for SMI QP */
167 port_priv->agent[0] = ib_register_mad_agent(device, port_num, 166 port_priv->agent[0] = ib_register_mad_agent(device, port_num,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 580c3a2bb102..02110e00d145 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -544,11 +544,10 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
544 struct cm_id_private *cm_id_priv; 544 struct cm_id_private *cm_id_priv;
545 int ret; 545 int ret;
546 546
547 cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL); 547 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
548 if (!cm_id_priv) 548 if (!cm_id_priv)
549 return ERR_PTR(-ENOMEM); 549 return ERR_PTR(-ENOMEM);
550 550
551 memset(cm_id_priv, 0, sizeof *cm_id_priv);
552 cm_id_priv->id.state = IB_CM_IDLE; 551 cm_id_priv->id.state = IB_CM_IDLE;
553 cm_id_priv->id.device = device; 552 cm_id_priv->id.device = device;
554 cm_id_priv->id.cm_handler = cm_handler; 553 cm_id_priv->id.cm_handler = cm_handler;
@@ -621,10 +620,9 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
621{ 620{
622 struct cm_timewait_info *timewait_info; 621 struct cm_timewait_info *timewait_info;
623 622
624 timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL); 623 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
625 if (!timewait_info) 624 if (!timewait_info)
626 return ERR_PTR(-ENOMEM); 625 return ERR_PTR(-ENOMEM);
627 memset(timewait_info, 0, sizeof *timewait_info);
628 626
629 timewait_info->work.local_id = local_id; 627 timewait_info->work.local_id = local_id;
630 INIT_WORK(&timewait_info->work.work, cm_work_handler, 628 INIT_WORK(&timewait_info->work.work, cm_work_handler,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5a6e44976405..e169e798354b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -161,17 +161,9 @@ static int alloc_name(char *name)
161 */ 161 */
162struct ib_device *ib_alloc_device(size_t size) 162struct ib_device *ib_alloc_device(size_t size)
163{ 163{
164 void *dev;
165
166 BUG_ON(size < sizeof (struct ib_device)); 164 BUG_ON(size < sizeof (struct ib_device));
167 165
168 dev = kmalloc(size, GFP_KERNEL); 166 return kzalloc(size, GFP_KERNEL);
169 if (!dev)
170 return NULL;
171
172 memset(dev, 0, size);
173
174 return dev;
175} 167}
176EXPORT_SYMBOL(ib_alloc_device); 168EXPORT_SYMBOL(ib_alloc_device);
177 169
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 88f9f8c9eacc..3d8175e5f054 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -255,12 +255,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
255 } 255 }
256 256
257 /* Allocate structures */ 257 /* Allocate structures */
258 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL); 258 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259 if (!mad_agent_priv) { 259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM); 260 ret = ERR_PTR(-ENOMEM);
261 goto error1; 261 goto error1;
262 } 262 }
263 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
264 263
265 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, 264 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
266 IB_ACCESS_LOCAL_WRITE); 265 IB_ACCESS_LOCAL_WRITE);
@@ -448,14 +447,13 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
448 goto error1; 447 goto error1;
449 } 448 }
450 /* Allocate structures */ 449 /* Allocate structures */
451 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 450 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
452 if (!mad_snoop_priv) { 451 if (!mad_snoop_priv) {
453 ret = ERR_PTR(-ENOMEM); 452 ret = ERR_PTR(-ENOMEM);
454 goto error1; 453 goto error1;
455 } 454 }
456 455
457 /* Now, fill in the various structures */ 456 /* Now, fill in the various structures */
458 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
459 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 457 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
460 mad_snoop_priv->agent.device = device; 458 mad_snoop_priv->agent.device = device;
461 mad_snoop_priv->agent.recv_handler = recv_handler; 459 mad_snoop_priv->agent.recv_handler = recv_handler;
@@ -794,10 +792,9 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
794 (!rmpp_active && buf_size > sizeof(struct ib_mad))) 792 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
795 return ERR_PTR(-EINVAL); 793 return ERR_PTR(-EINVAL);
796 794
797 buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask); 795 buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
798 if (!buf) 796 if (!buf)
799 return ERR_PTR(-ENOMEM); 797 return ERR_PTR(-ENOMEM);
800 memset(buf, 0, sizeof *mad_send_wr + buf_size);
801 798
802 mad_send_wr = buf + buf_size; 799 mad_send_wr = buf + buf_size;
803 mad_send_wr->send_buf.mad = buf; 800 mad_send_wr->send_buf.mad = buf;
@@ -1039,14 +1036,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
1039static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1036static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1040{ 1037{
1041 /* Allocate management method table */ 1038 /* Allocate management method table */
1042 *method = kmalloc(sizeof **method, GFP_ATOMIC); 1039 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1043 if (!*method) { 1040 if (!*method) {
1044 printk(KERN_ERR PFX "No memory for " 1041 printk(KERN_ERR PFX "No memory for "
1045 "ib_mad_mgmt_method_table\n"); 1042 "ib_mad_mgmt_method_table\n");
1046 return -ENOMEM; 1043 return -ENOMEM;
1047 } 1044 }
1048 /* Clear management method table */
1049 memset(*method, 0, sizeof **method);
1050 1045
1051 return 0; 1046 return 0;
1052} 1047}
@@ -1137,15 +1132,14 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1137 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1132 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1138 if (!*class) { 1133 if (!*class) {
1139 /* Allocate management class table for "new" class version */ 1134 /* Allocate management class table for "new" class version */
1140 *class = kmalloc(sizeof **class, GFP_ATOMIC); 1135 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1141 if (!*class) { 1136 if (!*class) {
1142 printk(KERN_ERR PFX "No memory for " 1137 printk(KERN_ERR PFX "No memory for "
1143 "ib_mad_mgmt_class_table\n"); 1138 "ib_mad_mgmt_class_table\n");
1144 ret = -ENOMEM; 1139 ret = -ENOMEM;
1145 goto error1; 1140 goto error1;
1146 } 1141 }
1147 /* Clear management class table */ 1142
1148 memset(*class, 0, sizeof(**class));
1149 /* Allocate method table for this management class */ 1143 /* Allocate method table for this management class */
1150 method = &(*class)->method_table[mgmt_class]; 1144 method = &(*class)->method_table[mgmt_class];
1151 if ((ret = allocate_method_table(method))) 1145 if ((ret = allocate_method_table(method)))
@@ -1209,25 +1203,24 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1209 mad_reg_req->mgmt_class_version].vendor; 1203 mad_reg_req->mgmt_class_version].vendor;
1210 if (!*vendor_table) { 1204 if (!*vendor_table) {
1211 /* Allocate mgmt vendor class table for "new" class version */ 1205 /* Allocate mgmt vendor class table for "new" class version */
1212 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC); 1206 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1213 if (!vendor) { 1207 if (!vendor) {
1214 printk(KERN_ERR PFX "No memory for " 1208 printk(KERN_ERR PFX "No memory for "
1215 "ib_mad_mgmt_vendor_class_table\n"); 1209 "ib_mad_mgmt_vendor_class_table\n");
1216 goto error1; 1210 goto error1;
1217 } 1211 }
1218 /* Clear management vendor class table */ 1212
1219 memset(vendor, 0, sizeof(*vendor));
1220 *vendor_table = vendor; 1213 *vendor_table = vendor;
1221 } 1214 }
1222 if (!(*vendor_table)->vendor_class[vclass]) { 1215 if (!(*vendor_table)->vendor_class[vclass]) {
1223 /* Allocate table for this management vendor class */ 1216 /* Allocate table for this management vendor class */
1224 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC); 1217 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1225 if (!vendor_class) { 1218 if (!vendor_class) {
1226 printk(KERN_ERR PFX "No memory for " 1219 printk(KERN_ERR PFX "No memory for "
1227 "ib_mad_mgmt_vendor_class\n"); 1220 "ib_mad_mgmt_vendor_class\n");
1228 goto error2; 1221 goto error2;
1229 } 1222 }
1230 memset(vendor_class, 0, sizeof(*vendor_class)); 1223
1231 (*vendor_table)->vendor_class[vclass] = vendor_class; 1224 (*vendor_table)->vendor_class[vclass] = vendor_class;
1232 } 1225 }
1233 for (i = 0; i < MAX_MGMT_OUI; i++) { 1226 for (i = 0; i < MAX_MGMT_OUI; i++) {
@@ -2524,12 +2517,12 @@ static int ib_mad_port_open(struct ib_device *device,
2524 char name[sizeof "ib_mad123"]; 2517 char name[sizeof "ib_mad123"];
2525 2518
2526 /* Create new device info */ 2519 /* Create new device info */
2527 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); 2520 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2528 if (!port_priv) { 2521 if (!port_priv) {
2529 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); 2522 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2530 return -ENOMEM; 2523 return -ENOMEM;
2531 } 2524 }
2532 memset(port_priv, 0, sizeof *port_priv); 2525
2533 port_priv->device = device; 2526 port_priv->device = device;
2534 port_priv->port_num = port_num; 2527 port_priv->port_num = port_num;
2535 spin_lock_init(&port_priv->reg_lock); 2528 spin_lock_init(&port_priv->reg_lock);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7ce7a6c782fa..b8120650e711 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -307,14 +307,13 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
307 if (!p->ibdev->process_mad) 307 if (!p->ibdev->process_mad)
308 return sprintf(buf, "N/A (no PMA)\n"); 308 return sprintf(buf, "N/A (no PMA)\n");
309 309
310 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 310 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
311 out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 311 out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
312 if (!in_mad || !out_mad) { 312 if (!in_mad || !out_mad) {
313 ret = -ENOMEM; 313 ret = -ENOMEM;
314 goto out; 314 goto out;
315 } 315 }
316 316
317 memset(in_mad, 0, sizeof *in_mad);
318 in_mad->mad_hdr.base_version = 1; 317 in_mad->mad_hdr.base_version = 1;
319 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; 318 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
320 in_mad->mad_hdr.class_version = 1; 319 in_mad->mad_hdr.class_version = 1;
@@ -508,10 +507,9 @@ static int add_port(struct ib_device *device, int port_num)
508 if (ret) 507 if (ret)
509 return ret; 508 return ret;
510 509
511 p = kmalloc(sizeof *p, GFP_KERNEL); 510 p = kzalloc(sizeof *p, GFP_KERNEL);
512 if (!p) 511 if (!p)
513 return -ENOMEM; 512 return -ENOMEM;
514 memset(p, 0, sizeof *p);
515 513
516 p->ibdev = device; 514 p->ibdev = device;
517 p->port_num = port_num; 515 p->port_num = port_num;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 28477565ecba..6e15787d1de1 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -172,11 +172,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
172 struct ib_ucm_context *ctx; 172 struct ib_ucm_context *ctx;
173 int result; 173 int result;
174 174
175 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 175 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
176 if (!ctx) 176 if (!ctx)
177 return NULL; 177 return NULL;
178 178
179 memset(ctx, 0, sizeof *ctx);
180 atomic_set(&ctx->ref, 1); 179 atomic_set(&ctx->ref, 1);
181 init_waitqueue_head(&ctx->wait); 180 init_waitqueue_head(&ctx->wait);
182 ctx->file = file; 181 ctx->file = file;
@@ -386,11 +385,10 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
386 385
387 ctx = cm_id->context; 386 ctx = cm_id->context;
388 387
389 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); 388 uevent = kzalloc(sizeof *uevent, GFP_KERNEL);
390 if (!uevent) 389 if (!uevent)
391 goto err1; 390 goto err1;
392 391
393 memset(uevent, 0, sizeof(*uevent));
394 uevent->ctx = ctx; 392 uevent->ctx = ctx;
395 uevent->cm_id = cm_id; 393 uevent->cm_id = cm_id;
396 uevent->resp.uid = ctx->uid; 394 uevent->resp.uid = ctx->uid;
@@ -1345,11 +1343,10 @@ static void ib_ucm_add_one(struct ib_device *device)
1345 if (!device->alloc_ucontext) 1343 if (!device->alloc_ucontext)
1346 return; 1344 return;
1347 1345
1348 ucm_dev = kmalloc(sizeof *ucm_dev, GFP_KERNEL); 1346 ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
1349 if (!ucm_dev) 1347 if (!ucm_dev)
1350 return; 1348 return;
1351 1349
1352 memset(ucm_dev, 0, sizeof *ucm_dev);
1353 ucm_dev->ib_dev = device; 1350 ucm_dev->ib_dev = device;
1354 1351
1355 ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); 1352 ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e58a7b278a00..de6581d7cb8d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -725,12 +725,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
725 if (!device->alloc_ucontext) 725 if (!device->alloc_ucontext)
726 return; 726 return;
727 727
728 uverbs_dev = kmalloc(sizeof *uverbs_dev, GFP_KERNEL); 728 uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
729 if (!uverbs_dev) 729 if (!uverbs_dev)
730 return; 730 return;
731 731
732 memset(uverbs_dev, 0, sizeof *uverbs_dev);
733
734 kref_init(&uverbs_dev->ref); 732 kref_init(&uverbs_dev->ref);
735 733
736 spin_lock(&map_lock); 734 spin_lock(&map_lock);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 1f97a44477f5..e995e2aa016d 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -140,13 +140,11 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
140 buddy->max_order = max_order; 140 buddy->max_order = max_order;
141 spin_lock_init(&buddy->lock); 141 spin_lock_init(&buddy->lock);
142 142
143 buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *), 143 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
144 GFP_KERNEL); 144 GFP_KERNEL);
145 if (!buddy->bits) 145 if (!buddy->bits)
146 goto err_out; 146 goto err_out;
147 147
148 memset(buddy->bits, 0, (buddy->max_order + 1) * sizeof (long *));
149
150 for (i = 0; i <= buddy->max_order; ++i) { 148 for (i = 0; i <= buddy->max_order; ++i) {
151 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 149 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
152 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); 150 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 0576056b34f4..408cd551bff1 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -80,12 +80,10 @@ u64 mthca_make_profile(struct mthca_dev *dev,
80 struct mthca_resource tmp; 80 struct mthca_resource tmp;
81 int i, j; 81 int i, j;
82 82
83 profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL); 83 profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
84 if (!profile) 84 if (!profile)
85 return -ENOMEM; 85 return -ENOMEM;
86 86
87 memset(profile, 0, MTHCA_RES_NUM * sizeof *profile);
88
89 profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz; 87 profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz;
90 profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz; 88 profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz;
91 profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz; 89 profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 273d5f418a67..8b67db868306 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -729,25 +729,21 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
729 729
730 /* Allocate RX/TX "rings" to hold queued skbs */ 730 /* Allocate RX/TX "rings" to hold queued skbs */
731 731
732 priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf), 732 priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
733 GFP_KERNEL); 733 GFP_KERNEL);
734 if (!priv->rx_ring) { 734 if (!priv->rx_ring) {
735 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 735 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
736 ca->name, IPOIB_RX_RING_SIZE); 736 ca->name, IPOIB_RX_RING_SIZE);
737 goto out; 737 goto out;
738 } 738 }
739 memset(priv->rx_ring, 0,
740 IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf));
741 739
742 priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf), 740 priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
743 GFP_KERNEL); 741 GFP_KERNEL);
744 if (!priv->tx_ring) { 742 if (!priv->tx_ring) {
745 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 743 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
746 ca->name, IPOIB_TX_RING_SIZE); 744 ca->name, IPOIB_TX_RING_SIZE);
747 goto out_rx_ring_cleanup; 745 goto out_rx_ring_cleanup;
748 } 746 }
749 memset(priv->tx_ring, 0,
750 IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf));
751 747
752 /* priv->tx_head & tx_tail are already 0 */ 748 /* priv->tx_head & tx_tail are already 0 */
753 749
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 36ce29836bf2..022eec730751 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,12 +135,10 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
135{ 135{
136 struct ipoib_mcast *mcast; 136 struct ipoib_mcast *mcast;
137 137
138 mcast = kmalloc(sizeof (*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC); 138 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
139 if (!mcast) 139 if (!mcast)
140 return NULL; 140 return NULL;
141 141
142 memset(mcast, 0, sizeof (*mcast));
143
144 init_completion(&mcast->done); 142 init_completion(&mcast->done);
145 143
146 mcast->dev = dev; 144 mcast->dev = dev;