diff options
author | Andrew Morton <akpm@osdl.org> | 2006-04-07 17:52:59 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-04-10 01:25:48 -0400 |
commit | 77d04bd957ddca9d48a664e28b40f33993f4550e (patch) | |
tree | 8940d3ab7557aed03756e05eec13605e315c887e /net/core | |
parent | 31380de95cc3183bbb379339e67f83d69e56fbd6 (diff) |
[NET]: More kzalloc conversions.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dv.c | 5 | ||||
-rw-r--r-- | net/core/flow.c | 4 | ||||
-rw-r--r-- | net/core/gen_estimator.c | 3 | ||||
-rw-r--r-- | net/core/neighbour.c | 14 | ||||
-rw-r--r-- | net/core/request_sock.c | 4 |
5 files changed, 8 insertions, 22 deletions
diff --git a/net/core/dv.c b/net/core/dv.c index cf581407538c..29ee77f15932 100644 --- a/net/core/dv.c +++ b/net/core/dv.c | |||
@@ -55,15 +55,12 @@ int alloc_divert_blk(struct net_device *dev) | |||
55 | 55 | ||
56 | dev->divert = NULL; | 56 | dev->divert = NULL; |
57 | if (dev->type == ARPHRD_ETHER) { | 57 | if (dev->type == ARPHRD_ETHER) { |
58 | dev->divert = (struct divert_blk *) | 58 | dev->divert = kzalloc(alloc_size, GFP_KERNEL); |
59 | kmalloc(alloc_size, GFP_KERNEL); | ||
60 | if (dev->divert == NULL) { | 59 | if (dev->divert == NULL) { |
61 | printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n", | 60 | printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n", |
62 | dev->name); | 61 | dev->name); |
63 | return -ENOMEM; | 62 | return -ENOMEM; |
64 | } | 63 | } |
65 | |||
66 | memset(dev->divert, 0, sizeof(struct divert_blk)); | ||
67 | dev_hold(dev); | 64 | dev_hold(dev); |
68 | } | 65 | } |
69 | 66 | ||
diff --git a/net/core/flow.c b/net/core/flow.c index 55789f832eda..885a2f655db0 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -318,12 +318,10 @@ static void __devinit flow_cache_cpu_prepare(int cpu) | |||
318 | /* NOTHING */; | 318 | /* NOTHING */; |
319 | 319 | ||
320 | flow_table(cpu) = (struct flow_cache_entry **) | 320 | flow_table(cpu) = (struct flow_cache_entry **) |
321 | __get_free_pages(GFP_KERNEL, order); | 321 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); |
322 | if (!flow_table(cpu)) | 322 | if (!flow_table(cpu)) |
323 | panic("NET: failed to allocate flow cache order %lu\n", order); | 323 | panic("NET: failed to allocate flow cache order %lu\n", order); |
324 | 324 | ||
325 | memset(flow_table(cpu), 0, PAGE_SIZE << order); | ||
326 | |||
327 | flow_hash_rnd_recalc(cpu) = 1; | 325 | flow_hash_rnd_recalc(cpu) = 1; |
328 | flow_count(cpu) = 0; | 326 | flow_count(cpu) = 0; |
329 | 327 | ||
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index b07c029e8219..3cad026764f0 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -159,11 +159,10 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, | |||
159 | if (parm->interval < -2 || parm->interval > 3) | 159 | if (parm->interval < -2 || parm->interval > 3) |
160 | return -EINVAL; | 160 | return -EINVAL; |
161 | 161 | ||
162 | est = kmalloc(sizeof(*est), GFP_KERNEL); | 162 | est = kzalloc(sizeof(*est), GFP_KERNEL); |
163 | if (est == NULL) | 163 | if (est == NULL) |
164 | return -ENOBUFS; | 164 | return -ENOBUFS; |
165 | 165 | ||
166 | memset(est, 0, sizeof(*est)); | ||
167 | est->interval = parm->interval + 2; | 166 | est->interval = parm->interval + 2; |
168 | est->bstats = bstats; | 167 | est->bstats = bstats; |
169 | est->rate_est = rate_est; | 168 | est->rate_est = rate_est; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 0c8666872d10..2ec8693fb778 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -284,14 +284,11 @@ static struct neighbour **neigh_hash_alloc(unsigned int entries) | |||
284 | struct neighbour **ret; | 284 | struct neighbour **ret; |
285 | 285 | ||
286 | if (size <= PAGE_SIZE) { | 286 | if (size <= PAGE_SIZE) { |
287 | ret = kmalloc(size, GFP_ATOMIC); | 287 | ret = kzalloc(size, GFP_ATOMIC); |
288 | } else { | 288 | } else { |
289 | ret = (struct neighbour **) | 289 | ret = (struct neighbour **) |
290 | __get_free_pages(GFP_ATOMIC, get_order(size)); | 290 | __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size)); |
291 | } | 291 | } |
292 | if (ret) | ||
293 | memset(ret, 0, size); | ||
294 | |||
295 | return ret; | 292 | return ret; |
296 | } | 293 | } |
297 | 294 | ||
@@ -1089,8 +1086,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, | |||
1089 | if (hh->hh_type == protocol) | 1086 | if (hh->hh_type == protocol) |
1090 | break; | 1087 | break; |
1091 | 1088 | ||
1092 | if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { | 1089 | if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { |
1093 | memset(hh, 0, sizeof(struct hh_cache)); | ||
1094 | rwlock_init(&hh->hh_lock); | 1090 | rwlock_init(&hh->hh_lock); |
1095 | hh->hh_type = protocol; | 1091 | hh->hh_type = protocol; |
1096 | atomic_set(&hh->hh_refcnt, 0); | 1092 | atomic_set(&hh->hh_refcnt, 0); |
@@ -1366,13 +1362,11 @@ void neigh_table_init(struct neigh_table *tbl) | |||
1366 | tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1); | 1362 | tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1); |
1367 | 1363 | ||
1368 | phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); | 1364 | phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); |
1369 | tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL); | 1365 | tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); |
1370 | 1366 | ||
1371 | if (!tbl->hash_buckets || !tbl->phash_buckets) | 1367 | if (!tbl->hash_buckets || !tbl->phash_buckets) |
1372 | panic("cannot allocate neighbour cache hashes"); | 1368 | panic("cannot allocate neighbour cache hashes"); |
1373 | 1369 | ||
1374 | memset(tbl->phash_buckets, 0, phsize); | ||
1375 | |||
1376 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); | 1370 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
1377 | 1371 | ||
1378 | rwlock_init(&tbl->lock); | 1372 | rwlock_init(&tbl->lock); |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 1e44eda1fda9..79ebd75fbe4d 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -38,13 +38,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, | |||
38 | { | 38 | { |
39 | const int lopt_size = sizeof(struct listen_sock) + | 39 | const int lopt_size = sizeof(struct listen_sock) + |
40 | nr_table_entries * sizeof(struct request_sock *); | 40 | nr_table_entries * sizeof(struct request_sock *); |
41 | struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL); | 41 | struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL); |
42 | 42 | ||
43 | if (lopt == NULL) | 43 | if (lopt == NULL) |
44 | return -ENOMEM; | 44 | return -ENOMEM; |
45 | 45 | ||
46 | memset(lopt, 0, lopt_size); | ||
47 | |||
48 | for (lopt->max_qlen_log = 6; | 46 | for (lopt->max_qlen_log = 6; |
49 | (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog; | 47 | (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog; |
50 | lopt->max_qlen_log++); | 48 | lopt->max_qlen_log++); |