aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/ip_vs.h51
-rw-r--r--include/net/netns/ip_vs.h4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c89
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c134
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c39
5 files changed, 256 insertions, 61 deletions
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4265b5e00c9..605d5db81a3 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -90,6 +90,18 @@ static inline struct net *skb_sknet(struct sk_buff *skb)
90 return &init_net; 90 return &init_net;
91#endif 91#endif
92} 92}
93/*
94 * This one needed for single_open_net since net is stored directly in
95 * private not as a struct i.e. seq_file_net cant be used.
96 */
97static inline struct net *seq_file_single_net(struct seq_file *seq)
98{
99#ifdef CONFIG_NET_NS
100 return (struct net *)seq->private;
101#else
102 return &init_net;
103#endif
104}
93 105
94/* Connections' size value needed by ip_vs_ctl.c */ 106/* Connections' size value needed by ip_vs_ctl.c */
95extern int ip_vs_conn_tab_size; 107extern int ip_vs_conn_tab_size;
@@ -320,6 +332,23 @@ struct ip_vs_seq {
320 before last resized pkt */ 332 before last resized pkt */
321}; 333};
322 334
335/*
336 * counters per cpu
337 */
338struct ip_vs_counters {
339 __u32 conns; /* connections scheduled */
340 __u32 inpkts; /* incoming packets */
341 __u32 outpkts; /* outgoing packets */
342 __u64 inbytes; /* incoming bytes */
343 __u64 outbytes; /* outgoing bytes */
344};
345/*
346 * Stats per cpu
347 */
348struct ip_vs_cpu_stats {
349 struct ip_vs_counters ustats;
350 struct u64_stats_sync syncp;
351};
323 352
324/* 353/*
325 * IPVS statistics objects 354 * IPVS statistics objects
@@ -341,12 +370,28 @@ struct ip_vs_estimator {
341}; 370};
342 371
343struct ip_vs_stats { 372struct ip_vs_stats {
344 struct ip_vs_stats_user ustats; /* statistics */ 373 struct ip_vs_stats_user ustats; /* statistics */
345 struct ip_vs_estimator est; /* estimator */ 374 struct ip_vs_estimator est; /* estimator */
346 375 struct ip_vs_cpu_stats *cpustats; /* per cpu counters */
347 spinlock_t lock; /* spin lock */ 376 spinlock_t lock; /* spin lock */
348}; 377};
349 378
379/*
380 * Helper Macros for per cpu
381 * ipvs->tot_stats->ustats.count
382 */
383#define IPVS_STAT_INC(ipvs, count) \
384 __this_cpu_inc((ipvs)->ustats->count)
385
386#define IPVS_STAT_ADD(ipvs, count, value) \
387 do {\
388 write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \
389 raw_smp_processor_id())); \
390 __this_cpu_add((ipvs)->ustats->count, value); \
391 write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \
392 raw_smp_processor_id())); \
393 } while (0)
394
350struct dst_entry; 395struct dst_entry;
351struct iphdr; 396struct iphdr;
352struct ip_vs_conn; 397struct ip_vs_conn;
diff --git a/include/net/netns/ip_vs.h b/include/net/netns/ip_vs.h
index aba78f3c834..bd1dad87217 100644
--- a/include/net/netns/ip_vs.h
+++ b/include/net/netns/ip_vs.h
@@ -61,6 +61,10 @@ struct netns_ipvs {
61 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 61 struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
62 spinlock_t sctp_app_lock; 62 spinlock_t sctp_app_lock;
63#endif 63#endif
64 /* ip_vs_ctl */
65 struct ip_vs_stats *tot_stats; /* Statistics & est. */
66 struct ip_vs_cpu_stats __percpu *cpustats; /* Stats per cpu */
67 seqcount_t *ustats_seq; /* u64 read retry */
64 68
65 /* ip_vs_lblc */ 69 /* ip_vs_lblc */
66 int sysctl_lblc_expiration; 70 int sysctl_lblc_expiration;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5531d569aa5..7e6a2a046bf 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -115,21 +115,28 @@ static inline void
115ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) 115ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
116{ 116{
117 struct ip_vs_dest *dest = cp->dest; 117 struct ip_vs_dest *dest = cp->dest;
118 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
119
118 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 120 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
119 spin_lock(&dest->stats.lock); 121 struct ip_vs_cpu_stats *s;
120 dest->stats.ustats.inpkts++; 122
121 dest->stats.ustats.inbytes += skb->len; 123 s = this_cpu_ptr(dest->stats.cpustats);
122 spin_unlock(&dest->stats.lock); 124 s->ustats.inpkts++;
123 125 u64_stats_update_begin(&s->syncp);
124 spin_lock(&dest->svc->stats.lock); 126 s->ustats.inbytes += skb->len;
125 dest->svc->stats.ustats.inpkts++; 127 u64_stats_update_end(&s->syncp);
126 dest->svc->stats.ustats.inbytes += skb->len; 128
127 spin_unlock(&dest->svc->stats.lock); 129 s = this_cpu_ptr(dest->svc->stats.cpustats);
128 130 s->ustats.inpkts++;
129 spin_lock(&ip_vs_stats.lock); 131 u64_stats_update_begin(&s->syncp);
130 ip_vs_stats.ustats.inpkts++; 132 s->ustats.inbytes += skb->len;
131 ip_vs_stats.ustats.inbytes += skb->len; 133 u64_stats_update_end(&s->syncp);
132 spin_unlock(&ip_vs_stats.lock); 134
135 s = this_cpu_ptr(ipvs->cpustats);
136 s->ustats.inpkts++;
137 u64_stats_update_begin(&s->syncp);
138 s->ustats.inbytes += skb->len;
139 u64_stats_update_end(&s->syncp);
133 } 140 }
134} 141}
135 142
@@ -138,21 +145,28 @@ static inline void
138ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) 145ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
139{ 146{
140 struct ip_vs_dest *dest = cp->dest; 147 struct ip_vs_dest *dest = cp->dest;
148 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
149
141 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 150 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
142 spin_lock(&dest->stats.lock); 151 struct ip_vs_cpu_stats *s;
143 dest->stats.ustats.outpkts++; 152
144 dest->stats.ustats.outbytes += skb->len; 153 s = this_cpu_ptr(dest->stats.cpustats);
145 spin_unlock(&dest->stats.lock); 154 s->ustats.outpkts++;
146 155 u64_stats_update_begin(&s->syncp);
147 spin_lock(&dest->svc->stats.lock); 156 s->ustats.outbytes += skb->len;
148 dest->svc->stats.ustats.outpkts++; 157 u64_stats_update_end(&s->syncp);
149 dest->svc->stats.ustats.outbytes += skb->len; 158
150 spin_unlock(&dest->svc->stats.lock); 159 s = this_cpu_ptr(dest->svc->stats.cpustats);
151 160 s->ustats.outpkts++;
152 spin_lock(&ip_vs_stats.lock); 161 u64_stats_update_begin(&s->syncp);
153 ip_vs_stats.ustats.outpkts++; 162 s->ustats.outbytes += skb->len;
154 ip_vs_stats.ustats.outbytes += skb->len; 163 u64_stats_update_end(&s->syncp);
155 spin_unlock(&ip_vs_stats.lock); 164
165 s = this_cpu_ptr(ipvs->cpustats);
166 s->ustats.outpkts++;
167 u64_stats_update_begin(&s->syncp);
168 s->ustats.outbytes += skb->len;
169 u64_stats_update_end(&s->syncp);
156 } 170 }
157} 171}
158 172
@@ -160,17 +174,17 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
160static inline void 174static inline void
161ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) 175ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
162{ 176{
163 spin_lock(&cp->dest->stats.lock); 177 struct netns_ipvs *ipvs = net_ipvs(svc->net);
164 cp->dest->stats.ustats.conns++; 178 struct ip_vs_cpu_stats *s;
165 spin_unlock(&cp->dest->stats.lock);
166 179
167 spin_lock(&svc->stats.lock); 180 s = this_cpu_ptr(cp->dest->stats.cpustats);
168 svc->stats.ustats.conns++; 181 s->ustats.conns++;
169 spin_unlock(&svc->stats.lock);
170 182
171 spin_lock(&ip_vs_stats.lock); 183 s = this_cpu_ptr(svc->stats.cpustats);
172 ip_vs_stats.ustats.conns++; 184 s->ustats.conns++;
173 spin_unlock(&ip_vs_stats.lock); 185
186 s = this_cpu_ptr(ipvs->cpustats);
187 s->ustats.conns++;
174} 188}
175 189
176 190
@@ -1841,7 +1855,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1841 }, 1855 },
1842#endif 1856#endif
1843}; 1857};
1844
1845/* 1858/*
1846 * Initialize IP Virtual Server netns mem. 1859 * Initialize IP Virtual Server netns mem.
1847 */ 1860 */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 03f86312b4b..cbd58c60e1b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -257,8 +257,7 @@ static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
257 257
258static void defense_work_handler(struct work_struct *work) 258static void defense_work_handler(struct work_struct *work)
259{ 259{
260 struct net *net = &init_net; 260 struct netns_ipvs *ipvs = net_ipvs(&init_net);
261 struct netns_ipvs *ipvs = net_ipvs(net);
262 261
263 update_defense_level(ipvs); 262 update_defense_level(ipvs);
264 if (atomic_read(&ip_vs_dropentry)) 263 if (atomic_read(&ip_vs_dropentry))
@@ -519,6 +518,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
519 svc->fwmark, 518 svc->fwmark,
520 IP_VS_DBG_ADDR(svc->af, &svc->addr), 519 IP_VS_DBG_ADDR(svc->af, &svc->addr),
521 ntohs(svc->port), atomic_read(&svc->usecnt)); 520 ntohs(svc->port), atomic_read(&svc->usecnt));
521 free_percpu(svc->stats.cpustats);
522 kfree(svc); 522 kfree(svc);
523 } 523 }
524} 524}
@@ -722,6 +722,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
722 list_del(&dest->n_list); 722 list_del(&dest->n_list);
723 ip_vs_dst_reset(dest); 723 ip_vs_dst_reset(dest);
724 __ip_vs_unbind_svc(dest); 724 __ip_vs_unbind_svc(dest);
725 free_percpu(dest->stats.cpustats);
725 kfree(dest); 726 kfree(dest);
726 } 727 }
727 } 728 }
@@ -747,6 +748,7 @@ static void ip_vs_trash_cleanup(void)
747 list_del(&dest->n_list); 748 list_del(&dest->n_list);
748 ip_vs_dst_reset(dest); 749 ip_vs_dst_reset(dest);
749 __ip_vs_unbind_svc(dest); 750 __ip_vs_unbind_svc(dest);
751 free_percpu(dest->stats.cpustats);
750 kfree(dest); 752 kfree(dest);
751 } 753 }
752} 754}
@@ -868,6 +870,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
868 pr_err("%s(): no memory.\n", __func__); 870 pr_err("%s(): no memory.\n", __func__);
869 return -ENOMEM; 871 return -ENOMEM;
870 } 872 }
873 dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
874 if (!dest->stats.cpustats) {
875 pr_err("%s() alloc_percpu failed\n", __func__);
876 goto err_alloc;
877 }
871 878
872 dest->af = svc->af; 879 dest->af = svc->af;
873 dest->protocol = svc->protocol; 880 dest->protocol = svc->protocol;
@@ -891,6 +898,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
891 898
892 LeaveFunction(2); 899 LeaveFunction(2);
893 return 0; 900 return 0;
901
902err_alloc:
903 kfree(dest);
904 return -ENOMEM;
894} 905}
895 906
896 907
@@ -1037,6 +1048,7 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
1037 and only one user context can update virtual service at a 1048 and only one user context can update virtual service at a
1038 time, so the operation here is OK */ 1049 time, so the operation here is OK */
1039 atomic_dec(&dest->svc->refcnt); 1050 atomic_dec(&dest->svc->refcnt);
1051 free_percpu(dest->stats.cpustats);
1040 kfree(dest); 1052 kfree(dest);
1041 } else { 1053 } else {
1042 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, " 1054 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
@@ -1163,6 +1175,11 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1163 ret = -ENOMEM; 1175 ret = -ENOMEM;
1164 goto out_err; 1176 goto out_err;
1165 } 1177 }
1178 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
1179 if (!svc->stats.cpustats) {
1180 pr_err("%s() alloc_percpu failed\n", __func__);
1181 goto out_err;
1182 }
1166 1183
1167 /* I'm the first user of the service */ 1184 /* I'm the first user of the service */
1168 atomic_set(&svc->usecnt, 0); 1185 atomic_set(&svc->usecnt, 0);
@@ -1212,6 +1229,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1212 *svc_p = svc; 1229 *svc_p = svc;
1213 return 0; 1230 return 0;
1214 1231
1232
1215 out_err: 1233 out_err:
1216 if (svc != NULL) { 1234 if (svc != NULL) {
1217 ip_vs_unbind_scheduler(svc); 1235 ip_vs_unbind_scheduler(svc);
@@ -1220,6 +1238,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1220 ip_vs_app_inc_put(svc->inc); 1238 ip_vs_app_inc_put(svc->inc);
1221 local_bh_enable(); 1239 local_bh_enable();
1222 } 1240 }
1241 if (svc->stats.cpustats)
1242 free_percpu(svc->stats.cpustats);
1223 kfree(svc); 1243 kfree(svc);
1224 } 1244 }
1225 ip_vs_scheduler_put(sched); 1245 ip_vs_scheduler_put(sched);
@@ -1388,6 +1408,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
1388 svc->fwmark, 1408 svc->fwmark,
1389 IP_VS_DBG_ADDR(svc->af, &svc->addr), 1409 IP_VS_DBG_ADDR(svc->af, &svc->addr),
1390 ntohs(svc->port), atomic_read(&svc->usecnt)); 1410 ntohs(svc->port), atomic_read(&svc->usecnt));
1411 free_percpu(svc->stats.cpustats);
1391 kfree(svc); 1412 kfree(svc);
1392 } 1413 }
1393 1414
@@ -1499,7 +1520,7 @@ static int ip_vs_zero_all(struct net *net)
1499 } 1520 }
1500 } 1521 }
1501 1522
1502 ip_vs_zero_stats(&ip_vs_stats); 1523 ip_vs_zero_stats(net_ipvs(net)->tot_stats);
1503 return 0; 1524 return 0;
1504} 1525}
1505 1526
@@ -1989,13 +2010,11 @@ static const struct file_operations ip_vs_info_fops = {
1989 2010
1990#endif 2011#endif
1991 2012
1992struct ip_vs_stats ip_vs_stats = {
1993 .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
1994};
1995
1996#ifdef CONFIG_PROC_FS 2013#ifdef CONFIG_PROC_FS
1997static int ip_vs_stats_show(struct seq_file *seq, void *v) 2014static int ip_vs_stats_show(struct seq_file *seq, void *v)
1998{ 2015{
2016 struct net *net = seq_file_single_net(seq);
2017 struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
1999 2018
2000/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ 2019/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
2001 seq_puts(seq, 2020 seq_puts(seq,
@@ -2003,22 +2022,22 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
2003 seq_printf(seq, 2022 seq_printf(seq,
2004 " Conns Packets Packets Bytes Bytes\n"); 2023 " Conns Packets Packets Bytes Bytes\n");
2005 2024
2006 spin_lock_bh(&ip_vs_stats.lock); 2025 spin_lock_bh(&tot_stats->lock);
2007 seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns, 2026 seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns,
2008 ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts, 2027 tot_stats->ustats.inpkts, tot_stats->ustats.outpkts,
2009 (unsigned long long) ip_vs_stats.ustats.inbytes, 2028 (unsigned long long) tot_stats->ustats.inbytes,
2010 (unsigned long long) ip_vs_stats.ustats.outbytes); 2029 (unsigned long long) tot_stats->ustats.outbytes);
2011 2030
2012/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ 2031/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
2013 seq_puts(seq, 2032 seq_puts(seq,
2014 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); 2033 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
2015 seq_printf(seq,"%8X %8X %8X %16X %16X\n", 2034 seq_printf(seq,"%8X %8X %8X %16X %16X\n",
2016 ip_vs_stats.ustats.cps, 2035 tot_stats->ustats.cps,
2017 ip_vs_stats.ustats.inpps, 2036 tot_stats->ustats.inpps,
2018 ip_vs_stats.ustats.outpps, 2037 tot_stats->ustats.outpps,
2019 ip_vs_stats.ustats.inbps, 2038 tot_stats->ustats.inbps,
2020 ip_vs_stats.ustats.outbps); 2039 tot_stats->ustats.outbps);
2021 spin_unlock_bh(&ip_vs_stats.lock); 2040 spin_unlock_bh(&tot_stats->lock);
2022 2041
2023 return 0; 2042 return 0;
2024} 2043}
@@ -2036,6 +2055,59 @@ static const struct file_operations ip_vs_stats_fops = {
2036 .release = single_release, 2055 .release = single_release,
2037}; 2056};
2038 2057
2058static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
2059{
2060 struct net *net = seq_file_single_net(seq);
2061 struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
2062 int i;
2063
2064/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
2065 seq_puts(seq,
2066 " Total Incoming Outgoing Incoming Outgoing\n");
2067 seq_printf(seq,
2068 "CPU Conns Packets Packets Bytes Bytes\n");
2069
2070 for_each_possible_cpu(i) {
2071 struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i);
2072 seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
2073 i, u->ustats.conns, u->ustats.inpkts,
2074 u->ustats.outpkts, (__u64)u->ustats.inbytes,
2075 (__u64)u->ustats.outbytes);
2076 }
2077
2078 spin_lock_bh(&tot_stats->lock);
2079 seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
2080 tot_stats->ustats.conns, tot_stats->ustats.inpkts,
2081 tot_stats->ustats.outpkts,
2082 (unsigned long long) tot_stats->ustats.inbytes,
2083 (unsigned long long) tot_stats->ustats.outbytes);
2084
2085/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
2086 seq_puts(seq,
2087 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
2088 seq_printf(seq, " %8X %8X %8X %16X %16X\n",
2089 tot_stats->ustats.cps,
2090 tot_stats->ustats.inpps,
2091 tot_stats->ustats.outpps,
2092 tot_stats->ustats.inbps,
2093 tot_stats->ustats.outbps);
2094 spin_unlock_bh(&tot_stats->lock);
2095
2096 return 0;
2097}
2098
2099static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
2100{
2101 return single_open_net(inode, file, ip_vs_stats_percpu_show);
2102}
2103
2104static const struct file_operations ip_vs_stats_percpu_fops = {
2105 .owner = THIS_MODULE,
2106 .open = ip_vs_stats_percpu_seq_open,
2107 .read = seq_read,
2108 .llseek = seq_lseek,
2109 .release = single_release,
2110};
2039#endif 2111#endif
2040 2112
2041/* 2113/*
@@ -3461,32 +3533,54 @@ int __net_init __ip_vs_control_init(struct net *net)
3461 3533
3462 if (!net_eq(net, &init_net)) /* netns not enabled yet */ 3534 if (!net_eq(net, &init_net)) /* netns not enabled yet */
3463 return -EPERM; 3535 return -EPERM;
3536 /* procfs stats */
3537 ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL);
3538 if (ipvs->tot_stats == NULL) {
3539 pr_err("%s(): no memory.\n", __func__);
3540 return -ENOMEM;
3541 }
3542 ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
3543 if (!ipvs->cpustats) {
3544 pr_err("%s() alloc_percpu failed\n", __func__);
3545 goto err_alloc;
3546 }
3547 spin_lock_init(&ipvs->tot_stats->lock);
3464 3548
3465 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) 3549 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
3466 INIT_LIST_HEAD(&ipvs->rs_table[idx]); 3550 INIT_LIST_HEAD(&ipvs->rs_table[idx]);
3467 3551
3468 proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops); 3552 proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
3469 proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops); 3553 proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
3554 proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
3555 &ip_vs_stats_percpu_fops);
3470 sysctl_header = register_net_sysctl_table(net, net_vs_ctl_path, 3556 sysctl_header = register_net_sysctl_table(net, net_vs_ctl_path,
3471 vs_vars); 3557 vs_vars);
3472 if (sysctl_header == NULL) 3558 if (sysctl_header == NULL)
3473 goto err_reg; 3559 goto err_reg;
3474 ip_vs_new_estimator(net, &ip_vs_stats); 3560 ip_vs_new_estimator(net, ipvs->tot_stats);
3475 return 0; 3561 return 0;
3476 3562
3477err_reg: 3563err_reg:
3564 free_percpu(ipvs->cpustats);
3565err_alloc:
3566 kfree(ipvs->tot_stats);
3478 return -ENOMEM; 3567 return -ENOMEM;
3479} 3568}
3480 3569
3481static void __net_exit __ip_vs_control_cleanup(struct net *net) 3570static void __net_exit __ip_vs_control_cleanup(struct net *net)
3482{ 3571{
3572 struct netns_ipvs *ipvs = net_ipvs(net);
3573
3483 if (!net_eq(net, &init_net)) /* netns not enabled yet */ 3574 if (!net_eq(net, &init_net)) /* netns not enabled yet */
3484 return; 3575 return;
3485 3576
3486 ip_vs_kill_estimator(net, &ip_vs_stats); 3577 ip_vs_kill_estimator(net, ipvs->tot_stats);
3487 unregister_net_sysctl_table(sysctl_header); 3578 unregister_net_sysctl_table(sysctl_header);
3579 proc_net_remove(net, "ip_vs_stats_percpu");
3488 proc_net_remove(net, "ip_vs_stats"); 3580 proc_net_remove(net, "ip_vs_stats");
3489 proc_net_remove(net, "ip_vs"); 3581 proc_net_remove(net, "ip_vs");
3582 free_percpu(ipvs->cpustats);
3583 kfree(ipvs->tot_stats);
3490} 3584}
3491 3585
3492static struct pernet_operations ipvs_control_ops = { 3586static struct pernet_operations ipvs_control_ops = {
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 07d839bef53..d13616b138c 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -52,6 +52,43 @@
52 */ 52 */
53 53
54 54
55/*
56 * Make a summary from each cpu
57 */
58static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
59 struct ip_vs_cpu_stats *stats)
60{
61 int i;
62
63 for_each_possible_cpu(i) {
64 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
65 unsigned int start;
66 __u64 inbytes, outbytes;
67 if (i) {
68 sum->conns += s->ustats.conns;
69 sum->inpkts += s->ustats.inpkts;
70 sum->outpkts += s->ustats.outpkts;
71 do {
72 start = u64_stats_fetch_begin_bh(&s->syncp);
73 inbytes = s->ustats.inbytes;
74 outbytes = s->ustats.outbytes;
75 } while (u64_stats_fetch_retry_bh(&s->syncp, start));
76 sum->inbytes += inbytes;
77 sum->outbytes += outbytes;
78 } else {
79 sum->conns = s->ustats.conns;
80 sum->inpkts = s->ustats.inpkts;
81 sum->outpkts = s->ustats.outpkts;
82 do {
83 start = u64_stats_fetch_begin_bh(&s->syncp);
84 sum->inbytes = s->ustats.inbytes;
85 sum->outbytes = s->ustats.outbytes;
86 } while (u64_stats_fetch_retry_bh(&s->syncp, start));
87 }
88 }
89}
90
91
55static void estimation_timer(unsigned long arg) 92static void estimation_timer(unsigned long arg)
56{ 93{
57 struct ip_vs_estimator *e; 94 struct ip_vs_estimator *e;
@@ -64,10 +101,12 @@ static void estimation_timer(unsigned long arg)
64 struct netns_ipvs *ipvs; 101 struct netns_ipvs *ipvs;
65 102
66 ipvs = net_ipvs(net); 103 ipvs = net_ipvs(net);
104 ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats);
67 spin_lock(&ipvs->est_lock); 105 spin_lock(&ipvs->est_lock);
68 list_for_each_entry(e, &ipvs->est_list, list) { 106 list_for_each_entry(e, &ipvs->est_list, list) {
69 s = container_of(e, struct ip_vs_stats, est); 107 s = container_of(e, struct ip_vs_stats, est);
70 108
109 ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
71 spin_lock(&s->lock); 110 spin_lock(&s->lock);
72 n_conns = s->ustats.conns; 111 n_conns = s->ustats.conns;
73 n_inpkts = s->ustats.inpkts; 112 n_inpkts = s->ustats.inpkts;