aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-14 17:46:13 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-14 17:46:13 -0500
commitc49fa0166391279249483d61511c358580e29ee4 (patch)
tree34d5de250013a76f8949ddf733cebd9385e0379d /net/core/dev.c
parent0a379e21c503b2ff66b44d588df9f231e9b0b9ca (diff)
parentae78dbfa40c629f79c72ab93525508ef49e798b6 (diff)
Merge branch 'netdev_tracing'
Ben Hutchings says: ==================== Improve tracing at the driver/core boundary These patches add static tracpeoints at the driver/core boundary which record various skb fields likely to be useful for datapath debugging. On the TX side the boundary is where the core calls ndo_start_xmit, and on the RX side it is where any of the various exported receive functions is called. The set of skb fields is mostly based on what I thought would be interesting for sfc. These patches are basically the same as what I sent as an RFC in November, but rebased. They now depend on 'net: core: explicitly select a txq before doing l2 forwarding', so please merge net into net-next before trying to apply them. The first patch fixes a code formatting error left behind after that fix. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c105
1 files changed, 64 insertions, 41 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 2bee80591f9a..20c834e3c7ca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -147,6 +147,8 @@ struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147struct list_head ptype_all __read_mostly; /* Taps */ 147struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb);
151
150/* 152/*
151 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
152 * semaphore. 154 * semaphore.
@@ -1698,7 +1700,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1698 skb_scrub_packet(skb, true); 1700 skb_scrub_packet(skb, true);
1699 skb->protocol = eth_type_trans(skb, dev); 1701 skb->protocol = eth_type_trans(skb, dev);
1700 1702
1701 return netif_rx(skb); 1703 return netif_rx_internal(skb);
1702} 1704}
1703EXPORT_SYMBOL_GPL(dev_forward_skb); 1705EXPORT_SYMBOL_GPL(dev_forward_skb);
1704 1706
@@ -2596,8 +2598,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2596 dev_queue_xmit_nit(skb, dev); 2598 dev_queue_xmit_nit(skb, dev);
2597 2599
2598 skb_len = skb->len; 2600 skb_len = skb->len;
2599 rc = ops->ndo_start_xmit(skb, dev); 2601 trace_net_dev_start_xmit(skb, dev);
2600 2602 rc = ops->ndo_start_xmit(skb, dev);
2601 trace_net_dev_xmit(skb, rc, dev, skb_len); 2603 trace_net_dev_xmit(skb, rc, dev, skb_len);
2602 if (rc == NETDEV_TX_OK) 2604 if (rc == NETDEV_TX_OK)
2603 txq_trans_update(txq); 2605 txq_trans_update(txq);
@@ -2615,6 +2617,7 @@ gso:
2615 dev_queue_xmit_nit(nskb, dev); 2617 dev_queue_xmit_nit(nskb, dev);
2616 2618
2617 skb_len = nskb->len; 2619 skb_len = nskb->len;
2620 trace_net_dev_start_xmit(nskb, dev);
2618 rc = ops->ndo_start_xmit(nskb, dev); 2621 rc = ops->ndo_start_xmit(nskb, dev);
2619 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2622 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2620 if (unlikely(rc != NETDEV_TX_OK)) { 2623 if (unlikely(rc != NETDEV_TX_OK)) {
@@ -3218,22 +3221,7 @@ enqueue:
3218 return NET_RX_DROP; 3221 return NET_RX_DROP;
3219} 3222}
3220 3223
3221/** 3224static int netif_rx_internal(struct sk_buff *skb)
3222 * netif_rx - post buffer to the network code
3223 * @skb: buffer to post
3224 *
3225 * This function receives a packet from a device driver and queues it for
3226 * the upper (protocol) levels to process. It always succeeds. The buffer
3227 * may be dropped during processing for congestion control or by the
3228 * protocol layers.
3229 *
3230 * return values:
3231 * NET_RX_SUCCESS (no congestion)
3232 * NET_RX_DROP (packet was dropped)
3233 *
3234 */
3235
3236int netif_rx(struct sk_buff *skb)
3237{ 3225{
3238 int ret; 3226 int ret;
3239 3227
@@ -3269,14 +3257,38 @@ int netif_rx(struct sk_buff *skb)
3269 } 3257 }
3270 return ret; 3258 return ret;
3271} 3259}
3260
3261/**
3262 * netif_rx - post buffer to the network code
3263 * @skb: buffer to post
3264 *
3265 * This function receives a packet from a device driver and queues it for
3266 * the upper (protocol) levels to process. It always succeeds. The buffer
3267 * may be dropped during processing for congestion control or by the
3268 * protocol layers.
3269 *
3270 * return values:
3271 * NET_RX_SUCCESS (no congestion)
3272 * NET_RX_DROP (packet was dropped)
3273 *
3274 */
3275
3276int netif_rx(struct sk_buff *skb)
3277{
3278 trace_netif_rx_entry(skb);
3279
3280 return netif_rx_internal(skb);
3281}
3272EXPORT_SYMBOL(netif_rx); 3282EXPORT_SYMBOL(netif_rx);
3273 3283
3274int netif_rx_ni(struct sk_buff *skb) 3284int netif_rx_ni(struct sk_buff *skb)
3275{ 3285{
3276 int err; 3286 int err;
3277 3287
3288 trace_netif_rx_ni_entry(skb);
3289
3278 preempt_disable(); 3290 preempt_disable();
3279 err = netif_rx(skb); 3291 err = netif_rx_internal(skb);
3280 if (local_softirq_pending()) 3292 if (local_softirq_pending())
3281 do_softirq(); 3293 do_softirq();
3282 preempt_enable(); 3294 preempt_enable();
@@ -3661,22 +3673,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3661 return ret; 3673 return ret;
3662} 3674}
3663 3675
3664/** 3676static int netif_receive_skb_internal(struct sk_buff *skb)
3665 * netif_receive_skb - process receive buffer from network
3666 * @skb: buffer to process
3667 *
3668 * netif_receive_skb() is the main receive data processing function.
3669 * It always succeeds. The buffer may be dropped during processing
3670 * for congestion control or by the protocol layers.
3671 *
3672 * This function may only be called from softirq context and interrupts
3673 * should be enabled.
3674 *
3675 * Return values (usually ignored):
3676 * NET_RX_SUCCESS: no congestion
3677 * NET_RX_DROP: packet was dropped
3678 */
3679int netif_receive_skb(struct sk_buff *skb)
3680{ 3677{
3681 net_timestamp_check(netdev_tstamp_prequeue, skb); 3678 net_timestamp_check(netdev_tstamp_prequeue, skb);
3682 3679
@@ -3702,6 +3699,28 @@ int netif_receive_skb(struct sk_buff *skb)
3702#endif 3699#endif
3703 return __netif_receive_skb(skb); 3700 return __netif_receive_skb(skb);
3704} 3701}
3702
3703/**
3704 * netif_receive_skb - process receive buffer from network
3705 * @skb: buffer to process
3706 *
3707 * netif_receive_skb() is the main receive data processing function.
3708 * It always succeeds. The buffer may be dropped during processing
3709 * for congestion control or by the protocol layers.
3710 *
3711 * This function may only be called from softirq context and interrupts
3712 * should be enabled.
3713 *
3714 * Return values (usually ignored):
3715 * NET_RX_SUCCESS: no congestion
3716 * NET_RX_DROP: packet was dropped
3717 */
3718int netif_receive_skb(struct sk_buff *skb)
3719{
3720 trace_netif_receive_skb_entry(skb);
3721
3722 return netif_receive_skb_internal(skb);
3723}
3705EXPORT_SYMBOL(netif_receive_skb); 3724EXPORT_SYMBOL(netif_receive_skb);
3706 3725
3707/* Network device is going away, flush any packets still pending 3726/* Network device is going away, flush any packets still pending
@@ -3763,7 +3782,7 @@ static int napi_gro_complete(struct sk_buff *skb)
3763 } 3782 }
3764 3783
3765out: 3784out:
3766 return netif_receive_skb(skb); 3785 return netif_receive_skb_internal(skb);
3767} 3786}
3768 3787
3769/* napi->gro_list contains packets ordered by age. 3788/* napi->gro_list contains packets ordered by age.
@@ -3971,7 +3990,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3971{ 3990{
3972 switch (ret) { 3991 switch (ret) {
3973 case GRO_NORMAL: 3992 case GRO_NORMAL:
3974 if (netif_receive_skb(skb)) 3993 if (netif_receive_skb_internal(skb))
3975 ret = GRO_DROP; 3994 ret = GRO_DROP;
3976 break; 3995 break;
3977 3996
@@ -3996,6 +4015,8 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3996 4015
3997gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 4016gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3998{ 4017{
4018 trace_napi_gro_receive_entry(skb);
4019
3999 return napi_skb_finish(dev_gro_receive(napi, skb), skb); 4020 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4000} 4021}
4001EXPORT_SYMBOL(napi_gro_receive); 4022EXPORT_SYMBOL(napi_gro_receive);
@@ -4029,7 +4050,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
4029{ 4050{
4030 switch (ret) { 4051 switch (ret) {
4031 case GRO_NORMAL: 4052 case GRO_NORMAL:
4032 if (netif_receive_skb(skb)) 4053 if (netif_receive_skb_internal(skb))
4033 ret = GRO_DROP; 4054 ret = GRO_DROP;
4034 break; 4055 break;
4035 4056
@@ -4068,6 +4089,8 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
4068 if (!skb) 4089 if (!skb)
4069 return GRO_DROP; 4090 return GRO_DROP;
4070 4091
4092 trace_napi_gro_frags_entry(skb);
4093
4071 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 4094 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4072} 4095}
4073EXPORT_SYMBOL(napi_gro_frags); 4096EXPORT_SYMBOL(napi_gro_frags);
@@ -6620,11 +6643,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6620 6643
6621 /* Process offline CPU's input_pkt_queue */ 6644 /* Process offline CPU's input_pkt_queue */
6622 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 6645 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6623 netif_rx(skb); 6646 netif_rx_internal(skb);
6624 input_queue_head_incr(oldsd); 6647 input_queue_head_incr(oldsd);
6625 } 6648 }
6626 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 6649 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6627 netif_rx(skb); 6650 netif_rx_internal(skb);
6628 input_queue_head_incr(oldsd); 6651 input_queue_head_incr(oldsd);
6629 } 6652 }
6630 6653