aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_hhf.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-05-08 18:57:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 20:15:13 -0400
commit752ade68cbd81d0321dfecc188f655a945551b25 (patch)
tree34b2ba4cabedb829639c925b7b38dd2f73aa02a9 /net/sched/sch_hhf.c
parent81be3dee96346fbe08c31be5ef74f03f6b63cf68 (diff)
treewide: use kv[mz]alloc* rather than opencoded variants
There are many code paths opencoding kvmalloc. Let's use the helper instead. The main difference to kvmalloc is that those users are usually not considering all the aspects of the memory allocator. E.g. allocation requests <= 32kB (with 4kB pages) are basically never failing and invoke OOM killer to satisfy the allocation. This sounds too disruptive for something that has a reasonable fallback - the vmalloc. On the other hand those requests might fallback to vmalloc even when the memory allocator would succeed after several more reclaim/compaction attempts previously. There is no guarantee something like that happens though. This patch converts many of those places to kv[mz]alloc* helpers because they are more conservative. Link: http://lkml.kernel.org/r/20170306103327.2766-2-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> # Xen bits Acked-by: Kees Cook <keescook@chromium.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Andreas Dilger <andreas.dilger@intel.com> # Lustre Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> # KVM/s390 Acked-by: Dan Williams <dan.j.williams@intel.com> # nvdim Acked-by: David Sterba <dsterba@suse.com> # btrfs Acked-by: Ilya Dryomov <idryomov@gmail.com> # Ceph Acked-by: Tariq Toukan <tariqt@mellanox.com> # mlx4 Acked-by: Leon Romanovsky <leonro@mellanox.com> # mlx5 Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Anton Vorontsov <anton@enomsg.org> Cc: Colin Cross <ccross@android.com> Cc: Tony Luck <tony.luck@intel.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Kent Overstreet <kent.overstreet@gmail.com> Cc: Santosh Raspatur <santosh@chelsio.com> Cc: Hariprasad S <hariprasad@chelsio.com> Cc: Yishai Hadas <yishaih@mellanox.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: "Yan, Zheng" <zyan@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/sched/sch_hhf.c')
-rw-r--r--net/sched/sch_hhf.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index c19d346e6c5a..51d3ba682af9 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -467,29 +467,14 @@ static void hhf_reset(struct Qdisc *sch)
467 rtnl_kfree_skbs(skb, skb); 467 rtnl_kfree_skbs(skb, skb);
468} 468}
469 469
470static void *hhf_zalloc(size_t sz)
471{
472 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
473
474 if (!ptr)
475 ptr = vzalloc(sz);
476
477 return ptr;
478}
479
480static void hhf_free(void *addr)
481{
482 kvfree(addr);
483}
484
485static void hhf_destroy(struct Qdisc *sch) 470static void hhf_destroy(struct Qdisc *sch)
486{ 471{
487 int i; 472 int i;
488 struct hhf_sched_data *q = qdisc_priv(sch); 473 struct hhf_sched_data *q = qdisc_priv(sch);
489 474
490 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 475 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
491 hhf_free(q->hhf_arrays[i]); 476 kvfree(q->hhf_arrays[i]);
492 hhf_free(q->hhf_valid_bits[i]); 477 kvfree(q->hhf_valid_bits[i]);
493 } 478 }
494 479
495 for (i = 0; i < HH_FLOWS_CNT; i++) { 480 for (i = 0; i < HH_FLOWS_CNT; i++) {
@@ -503,7 +488,7 @@ static void hhf_destroy(struct Qdisc *sch)
503 kfree(flow); 488 kfree(flow);
504 } 489 }
505 } 490 }
506 hhf_free(q->hh_flows); 491 kvfree(q->hh_flows);
507} 492}
508 493
509static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = { 494static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
@@ -609,8 +594,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
609 594
610 if (!q->hh_flows) { 595 if (!q->hh_flows) {
611 /* Initialize heavy-hitter flow table. */ 596 /* Initialize heavy-hitter flow table. */
612 q->hh_flows = hhf_zalloc(HH_FLOWS_CNT * 597 q->hh_flows = kvzalloc(HH_FLOWS_CNT *
613 sizeof(struct list_head)); 598 sizeof(struct list_head), GFP_KERNEL);
614 if (!q->hh_flows) 599 if (!q->hh_flows)
615 return -ENOMEM; 600 return -ENOMEM;
616 for (i = 0; i < HH_FLOWS_CNT; i++) 601 for (i = 0; i < HH_FLOWS_CNT; i++)
@@ -624,8 +609,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
624 609
625 /* Initialize heavy-hitter filter arrays. */ 610 /* Initialize heavy-hitter filter arrays. */
626 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 611 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
627 q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * 612 q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN *
628 sizeof(u32)); 613 sizeof(u32), GFP_KERNEL);
629 if (!q->hhf_arrays[i]) { 614 if (!q->hhf_arrays[i]) {
630 /* Note: hhf_destroy() will be called 615 /* Note: hhf_destroy() will be called
631 * by our caller. 616 * by our caller.
@@ -637,8 +622,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
637 622
638 /* Initialize valid bits of heavy-hitter filter arrays. */ 623 /* Initialize valid bits of heavy-hitter filter arrays. */
639 for (i = 0; i < HHF_ARRAYS_CNT; i++) { 624 for (i = 0; i < HHF_ARRAYS_CNT; i++) {
640 q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / 625 q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
641 BITS_PER_BYTE); 626 BITS_PER_BYTE, GFP_KERNEL);
642 if (!q->hhf_valid_bits[i]) { 627 if (!q->hhf_valid_bits[i]) {
643 /* Note: hhf_destroy() will be called 628 /* Note: hhf_destroy() will be called
644 * by our caller. 629 * by our caller.