aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorIdo Schimmel <idosch@mellanox.com>2019-08-11 03:35:55 -0400
committerDavid S. Miller <davem@davemloft.net>2019-08-11 13:53:30 -0400
commite9feb58020f952f7d9de785ede9a7d54ab1eda5c (patch)
tree57692f58d04846f26299a67895480ea59730a3c3 /net/core
parent30328d46af593dcf24582f2a431d84ea0cf4bdef (diff)
drop_monitor: Expose tail drop counter
Previous patch made the length of the per-CPU skb drop list configurable. Expose a counter that shows how many packets could not be enqueued to this list. This allows users determine the desired queue length. Signed-off-by: Ido Schimmel <idosch@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/drop_monitor.c101
1 files changed, 101 insertions, 0 deletions
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index eb3c34d69ea9..39e094907391 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -51,12 +51,18 @@ static int trace_state = TRACE_OFF;
51 */ 51 */
52static DEFINE_MUTEX(net_dm_mutex); 52static DEFINE_MUTEX(net_dm_mutex);
53 53
54struct net_dm_stats {
55 u64 dropped;
56 struct u64_stats_sync syncp;
57};
58
54struct per_cpu_dm_data { 59struct per_cpu_dm_data {
55 spinlock_t lock; /* Protects 'skb' and 'send_timer' */ 60 spinlock_t lock; /* Protects 'skb' and 'send_timer' */
56 struct sk_buff *skb; 61 struct sk_buff *skb;
57 struct sk_buff_head drop_queue; 62 struct sk_buff_head drop_queue;
58 struct work_struct dm_alert_work; 63 struct work_struct dm_alert_work;
59 struct timer_list send_timer; 64 struct timer_list send_timer;
65 struct net_dm_stats stats;
60}; 66};
61 67
62struct dm_hw_stat_delta { 68struct dm_hw_stat_delta {
@@ -300,6 +306,9 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
300 306
301unlock_free: 307unlock_free:
302 spin_unlock_irqrestore(&data->drop_queue.lock, flags); 308 spin_unlock_irqrestore(&data->drop_queue.lock, flags);
309 u64_stats_update_begin(&data->stats.syncp);
310 data->stats.dropped++;
311 u64_stats_update_end(&data->stats.syncp);
303 consume_skb(nskb); 312 consume_skb(nskb);
304} 313}
305 314
@@ -732,6 +741,93 @@ free_msg:
732 return rc; 741 return rc;
733} 742}
734 743
744static void net_dm_stats_read(struct net_dm_stats *stats)
745{
746 int cpu;
747
748 memset(stats, 0, sizeof(*stats));
749 for_each_possible_cpu(cpu) {
750 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
751 struct net_dm_stats *cpu_stats = &data->stats;
752 unsigned int start;
753 u64 dropped;
754
755 do {
756 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
757 dropped = cpu_stats->dropped;
758 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
759
760 stats->dropped += dropped;
761 }
762}
763
764static int net_dm_stats_put(struct sk_buff *msg)
765{
766 struct net_dm_stats stats;
767 struct nlattr *attr;
768
769 net_dm_stats_read(&stats);
770
771 attr = nla_nest_start(msg, NET_DM_ATTR_STATS);
772 if (!attr)
773 return -EMSGSIZE;
774
775 if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED,
776 stats.dropped, NET_DM_ATTR_PAD))
777 goto nla_put_failure;
778
779 nla_nest_end(msg, attr);
780
781 return 0;
782
783nla_put_failure:
784 nla_nest_cancel(msg, attr);
785 return -EMSGSIZE;
786}
787
788static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info)
789{
790 void *hdr;
791 int rc;
792
793 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
794 &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW);
795 if (!hdr)
796 return -EMSGSIZE;
797
798 rc = net_dm_stats_put(msg);
799 if (rc)
800 goto nla_put_failure;
801
802 genlmsg_end(msg, hdr);
803
804 return 0;
805
806nla_put_failure:
807 genlmsg_cancel(msg, hdr);
808 return -EMSGSIZE;
809}
810
811static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info)
812{
813 struct sk_buff *msg;
814 int rc;
815
816 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
817 if (!msg)
818 return -ENOMEM;
819
820 rc = net_dm_stats_fill(msg, info);
821 if (rc)
822 goto free_msg;
823
824 return genlmsg_reply(msg, info);
825
826free_msg:
827 nlmsg_free(msg);
828 return rc;
829}
830
735static int dropmon_net_event(struct notifier_block *ev_block, 831static int dropmon_net_event(struct notifier_block *ev_block,
736 unsigned long event, void *ptr) 832 unsigned long event, void *ptr)
737{ 833{
@@ -799,6 +895,10 @@ static const struct genl_ops dropmon_ops[] = {
799 .cmd = NET_DM_CMD_CONFIG_GET, 895 .cmd = NET_DM_CMD_CONFIG_GET,
800 .doit = net_dm_cmd_config_get, 896 .doit = net_dm_cmd_config_get,
801 }, 897 },
898 {
899 .cmd = NET_DM_CMD_STATS_GET,
900 .doit = net_dm_cmd_stats_get,
901 },
802}; 902};
803 903
804static int net_dm_nl_pre_doit(const struct genl_ops *ops, 904static int net_dm_nl_pre_doit(const struct genl_ops *ops,
@@ -865,6 +965,7 @@ static int __init init_net_drop_monitor(void)
865 data = &per_cpu(dm_cpu_data, cpu); 965 data = &per_cpu(dm_cpu_data, cpu);
866 spin_lock_init(&data->lock); 966 spin_lock_init(&data->lock);
867 skb_queue_head_init(&data->drop_queue); 967 skb_queue_head_init(&data->drop_queue);
968 u64_stats_init(&data->stats.syncp);
868 } 969 }
869 970
870 goto out; 971 goto out;