aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2014-09-28 14:54:24 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-30 01:02:26 -0400
commitb0ab6f92752b9f9d8da980506e9df3bd9dcd7ed3 (patch)
treee8dc1bb9dc3bdce6a62b785b1828efded9f87205 /net/core
parent6401585366326fc0ecbc372ec60d1a15cd8be2f5 (diff)
net: sched: enable per cpu qstats
After previous patches to simplify qstats the qstats can be made per cpu with a packed union in Qdisc struct. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/gen_stats.c55
1 files changed, 48 insertions, 7 deletions
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index ad3ecb6ba835..14681b97a4f3 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -215,33 +215,74 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
215} 215}
216EXPORT_SYMBOL(gnet_stats_copy_rate_est); 216EXPORT_SYMBOL(gnet_stats_copy_rate_est);
217 217
218static void
219__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
220 const struct gnet_stats_queue __percpu *q)
221{
222 int i;
223
224 for_each_possible_cpu(i) {
225 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
226
227 qstats->qlen = 0;
228 qstats->backlog += qcpu->backlog;
229 qstats->drops += qcpu->drops;
230 qstats->requeues += qcpu->requeues;
231 qstats->overlimits += qcpu->overlimits;
232 }
233}
234
235static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
236 const struct gnet_stats_queue __percpu *cpu,
237 const struct gnet_stats_queue *q,
238 __u32 qlen)
239{
240 if (cpu) {
241 __gnet_stats_copy_queue_cpu(qstats, cpu);
242 } else {
243 qstats->qlen = q->qlen;
244 qstats->backlog = q->backlog;
245 qstats->drops = q->drops;
246 qstats->requeues = q->requeues;
247 qstats->overlimits = q->overlimits;
248 }
249
250 qstats->qlen = qlen;
251}
252
218/** 253/**
219 * gnet_stats_copy_queue - copy queue statistics into statistics TLV 254 * gnet_stats_copy_queue - copy queue statistics into statistics TLV
220 * @d: dumping handle 255 * @d: dumping handle
256 * @cpu_q: per cpu queue statistics
221 * @q: queue statistics 257 * @q: queue statistics
222 * @qlen: queue length statistics 258 * @qlen: queue length statistics
223 * 259 *
224 * Appends the queue statistics to the top level TLV created by 260 * Appends the queue statistics to the top level TLV created by
225 * gnet_stats_start_copy(). 261 * gnet_stats_start_copy(). Using per cpu queue statistics if
262 * they are available.
226 * 263 *
227 * Returns 0 on success or -1 with the statistic lock released 264 * Returns 0 on success or -1 with the statistic lock released
228 * if the room in the socket buffer was not sufficient. 265 * if the room in the socket buffer was not sufficient.
229 */ 266 */
230int 267int
231gnet_stats_copy_queue(struct gnet_dump *d, 268gnet_stats_copy_queue(struct gnet_dump *d,
269 struct gnet_stats_queue __percpu *cpu_q,
232 struct gnet_stats_queue *q, __u32 qlen) 270 struct gnet_stats_queue *q, __u32 qlen)
233{ 271{
234 q->qlen = qlen; 272 struct gnet_stats_queue qstats = {0};
273
274 __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
235 275
236 if (d->compat_tc_stats) { 276 if (d->compat_tc_stats) {
237 d->tc_stats.drops = q->drops; 277 d->tc_stats.drops = qstats.drops;
238 d->tc_stats.qlen = q->qlen; 278 d->tc_stats.qlen = qstats.qlen;
239 d->tc_stats.backlog = q->backlog; 279 d->tc_stats.backlog = qstats.backlog;
240 d->tc_stats.overlimits = q->overlimits; 280 d->tc_stats.overlimits = qstats.overlimits;
241 } 281 }
242 282
243 if (d->tail) 283 if (d->tail)
244 return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q)); 284 return gnet_stats_copy(d, TCA_STATS_QUEUE,
285 &qstats, sizeof(qstats));
245 286
246 return 0; 287 return 0;
247} 288}