diff options
-rw-r--r-- | include/trace/events/net.h | 100 | ||||
-rw-r--r-- | net/core/dev.c | 100 |
2 files changed, 161 insertions, 39 deletions
diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 7a72f44ae6ab..a34f27b2e394 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h | |||
@@ -136,6 +136,106 @@ DEFINE_EVENT(net_dev_template, netif_rx, | |||
136 | 136 | ||
137 | TP_ARGS(skb) | 137 | TP_ARGS(skb) |
138 | ); | 138 | ); |
139 | |||
140 | DECLARE_EVENT_CLASS(net_dev_rx_verbose_template, | ||
141 | |||
142 | TP_PROTO(const struct sk_buff *skb), | ||
143 | |||
144 | TP_ARGS(skb), | ||
145 | |||
146 | TP_STRUCT__entry( | ||
147 | __string( name, skb->dev->name ) | ||
148 | __field( unsigned int, napi_id ) | ||
149 | __field( u16, queue_mapping ) | ||
150 | __field( const void *, skbaddr ) | ||
151 | __field( bool, vlan_tagged ) | ||
152 | __field( u16, vlan_proto ) | ||
153 | __field( u16, vlan_tci ) | ||
154 | __field( u16, protocol ) | ||
155 | __field( u8, ip_summed ) | ||
156 | __field( u32, rxhash ) | ||
157 | __field( bool, l4_rxhash ) | ||
158 | __field( unsigned int, len ) | ||
159 | __field( unsigned int, data_len ) | ||
160 | __field( unsigned int, truesize ) | ||
161 | __field( bool, mac_header_valid) | ||
162 | __field( int, mac_header ) | ||
163 | __field( unsigned char, nr_frags ) | ||
164 | __field( u16, gso_size ) | ||
165 | __field( u16, gso_type ) | ||
166 | ), | ||
167 | |||
168 | TP_fast_assign( | ||
169 | __assign_str(name, skb->dev->name); | ||
170 | #ifdef CONFIG_NET_RX_BUSY_POLL | ||
171 | __entry->napi_id = skb->napi_id; | ||
172 | #else | ||
173 | __entry->napi_id = 0; | ||
174 | #endif | ||
175 | __entry->queue_mapping = skb->queue_mapping; | ||
176 | __entry->skbaddr = skb; | ||
177 | __entry->vlan_tagged = vlan_tx_tag_present(skb); | ||
178 | __entry->vlan_proto = ntohs(skb->vlan_proto); | ||
179 | __entry->vlan_tci = vlan_tx_tag_get(skb); | ||
180 | __entry->protocol = ntohs(skb->protocol); | ||
181 | __entry->ip_summed = skb->ip_summed; | ||
182 | __entry->rxhash = skb->rxhash; | ||
183 | __entry->l4_rxhash = skb->l4_rxhash; | ||
184 | __entry->len = skb->len; | ||
185 | __entry->data_len = skb->data_len; | ||
186 | __entry->truesize = skb->truesize; | ||
187 | __entry->mac_header_valid = skb_mac_header_was_set(skb); | ||
188 | __entry->mac_header = skb_mac_header(skb) - skb->data; | ||
189 | __entry->nr_frags = skb_shinfo(skb)->nr_frags; | ||
190 | __entry->gso_size = skb_shinfo(skb)->gso_size; | ||
191 | __entry->gso_type = skb_shinfo(skb)->gso_type; | ||
192 | ), | ||
193 | |||
194 | TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d rxhash=0x%08x l4_rxhash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x", | ||
195 | __get_str(name), __entry->napi_id, __entry->queue_mapping, | ||
196 | __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto, | ||
197 | __entry->vlan_tci, __entry->protocol, __entry->ip_summed, | ||
198 | __entry->rxhash, __entry->l4_rxhash, __entry->len, | ||
199 | __entry->data_len, __entry->truesize, | ||
200 | __entry->mac_header_valid, __entry->mac_header, | ||
201 | __entry->nr_frags, __entry->gso_size, __entry->gso_type) | ||
202 | ); | ||
203 | |||
204 | DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry, | ||
205 | |||
206 | TP_PROTO(const struct sk_buff *skb), | ||
207 | |||
208 | TP_ARGS(skb) | ||
209 | ); | ||
210 | |||
211 | DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry, | ||
212 | |||
213 | TP_PROTO(const struct sk_buff *skb), | ||
214 | |||
215 | TP_ARGS(skb) | ||
216 | ); | ||
217 | |||
218 | DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry, | ||
219 | |||
220 | TP_PROTO(const struct sk_buff *skb), | ||
221 | |||
222 | TP_ARGS(skb) | ||
223 | ); | ||
224 | |||
225 | DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, | ||
226 | |||
227 | TP_PROTO(const struct sk_buff *skb), | ||
228 | |||
229 | TP_ARGS(skb) | ||
230 | ); | ||
231 | |||
232 | DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry, | ||
233 | |||
234 | TP_PROTO(const struct sk_buff *skb), | ||
235 | |||
236 | TP_ARGS(skb) | ||
237 | ); | ||
238 | |||
139 | #endif /* _TRACE_NET_H */ | 239 | #endif /* _TRACE_NET_H */ |
140 | 240 | ||
141 | /* This part must be outside protection */ | 241 | /* This part must be outside protection */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 9e93a1464216..20c834e3c7ca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -147,6 +147,8 @@ struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | |||
147 | struct list_head ptype_all __read_mostly; /* Taps */ | 147 | struct list_head ptype_all __read_mostly; /* Taps */ |
148 | static struct list_head offload_base __read_mostly; | 148 | static struct list_head offload_base __read_mostly; |
149 | 149 | ||
150 | static int netif_rx_internal(struct sk_buff *skb); | ||
151 | |||
150 | /* | 152 | /* |
151 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 153 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
152 | * semaphore. | 154 | * semaphore. |
@@ -1698,7 +1700,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1698 | skb_scrub_packet(skb, true); | 1700 | skb_scrub_packet(skb, true); |
1699 | skb->protocol = eth_type_trans(skb, dev); | 1701 | skb->protocol = eth_type_trans(skb, dev); |
1700 | 1702 | ||
1701 | return netif_rx(skb); | 1703 | return netif_rx_internal(skb); |
1702 | } | 1704 | } |
1703 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1705 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
1704 | 1706 | ||
@@ -3219,22 +3221,7 @@ enqueue: | |||
3219 | return NET_RX_DROP; | 3221 | return NET_RX_DROP; |
3220 | } | 3222 | } |
3221 | 3223 | ||
3222 | /** | 3224 | static int netif_rx_internal(struct sk_buff *skb) |
3223 | * netif_rx - post buffer to the network code | ||
3224 | * @skb: buffer to post | ||
3225 | * | ||
3226 | * This function receives a packet from a device driver and queues it for | ||
3227 | * the upper (protocol) levels to process. It always succeeds. The buffer | ||
3228 | * may be dropped during processing for congestion control or by the | ||
3229 | * protocol layers. | ||
3230 | * | ||
3231 | * return values: | ||
3232 | * NET_RX_SUCCESS (no congestion) | ||
3233 | * NET_RX_DROP (packet was dropped) | ||
3234 | * | ||
3235 | */ | ||
3236 | |||
3237 | int netif_rx(struct sk_buff *skb) | ||
3238 | { | 3225 | { |
3239 | int ret; | 3226 | int ret; |
3240 | 3227 | ||
@@ -3270,14 +3257,38 @@ int netif_rx(struct sk_buff *skb) | |||
3270 | } | 3257 | } |
3271 | return ret; | 3258 | return ret; |
3272 | } | 3259 | } |
3260 | |||
3261 | /** | ||
3262 | * netif_rx - post buffer to the network code | ||
3263 | * @skb: buffer to post | ||
3264 | * | ||
3265 | * This function receives a packet from a device driver and queues it for | ||
3266 | * the upper (protocol) levels to process. It always succeeds. The buffer | ||
3267 | * may be dropped during processing for congestion control or by the | ||
3268 | * protocol layers. | ||
3269 | * | ||
3270 | * return values: | ||
3271 | * NET_RX_SUCCESS (no congestion) | ||
3272 | * NET_RX_DROP (packet was dropped) | ||
3273 | * | ||
3274 | */ | ||
3275 | |||
3276 | int netif_rx(struct sk_buff *skb) | ||
3277 | { | ||
3278 | trace_netif_rx_entry(skb); | ||
3279 | |||
3280 | return netif_rx_internal(skb); | ||
3281 | } | ||
3273 | EXPORT_SYMBOL(netif_rx); | 3282 | EXPORT_SYMBOL(netif_rx); |
3274 | 3283 | ||
3275 | int netif_rx_ni(struct sk_buff *skb) | 3284 | int netif_rx_ni(struct sk_buff *skb) |
3276 | { | 3285 | { |
3277 | int err; | 3286 | int err; |
3278 | 3287 | ||
3288 | trace_netif_rx_ni_entry(skb); | ||
3289 | |||
3279 | preempt_disable(); | 3290 | preempt_disable(); |
3280 | err = netif_rx(skb); | 3291 | err = netif_rx_internal(skb); |
3281 | if (local_softirq_pending()) | 3292 | if (local_softirq_pending()) |
3282 | do_softirq(); | 3293 | do_softirq(); |
3283 | preempt_enable(); | 3294 | preempt_enable(); |
@@ -3662,22 +3673,7 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
3662 | return ret; | 3673 | return ret; |
3663 | } | 3674 | } |
3664 | 3675 | ||
3665 | /** | 3676 | static int netif_receive_skb_internal(struct sk_buff *skb) |
3666 | * netif_receive_skb - process receive buffer from network | ||
3667 | * @skb: buffer to process | ||
3668 | * | ||
3669 | * netif_receive_skb() is the main receive data processing function. | ||
3670 | * It always succeeds. The buffer may be dropped during processing | ||
3671 | * for congestion control or by the protocol layers. | ||
3672 | * | ||
3673 | * This function may only be called from softirq context and interrupts | ||
3674 | * should be enabled. | ||
3675 | * | ||
3676 | * Return values (usually ignored): | ||
3677 | * NET_RX_SUCCESS: no congestion | ||
3678 | * NET_RX_DROP: packet was dropped | ||
3679 | */ | ||
3680 | int netif_receive_skb(struct sk_buff *skb) | ||
3681 | { | 3677 | { |
3682 | net_timestamp_check(netdev_tstamp_prequeue, skb); | 3678 | net_timestamp_check(netdev_tstamp_prequeue, skb); |
3683 | 3679 | ||
@@ -3703,6 +3699,28 @@ int netif_receive_skb(struct sk_buff *skb) | |||
3703 | #endif | 3699 | #endif |
3704 | return __netif_receive_skb(skb); | 3700 | return __netif_receive_skb(skb); |
3705 | } | 3701 | } |
3702 | |||
3703 | /** | ||
3704 | * netif_receive_skb - process receive buffer from network | ||
3705 | * @skb: buffer to process | ||
3706 | * | ||
3707 | * netif_receive_skb() is the main receive data processing function. | ||
3708 | * It always succeeds. The buffer may be dropped during processing | ||
3709 | * for congestion control or by the protocol layers. | ||
3710 | * | ||
3711 | * This function may only be called from softirq context and interrupts | ||
3712 | * should be enabled. | ||
3713 | * | ||
3714 | * Return values (usually ignored): | ||
3715 | * NET_RX_SUCCESS: no congestion | ||
3716 | * NET_RX_DROP: packet was dropped | ||
3717 | */ | ||
3718 | int netif_receive_skb(struct sk_buff *skb) | ||
3719 | { | ||
3720 | trace_netif_receive_skb_entry(skb); | ||
3721 | |||
3722 | return netif_receive_skb_internal(skb); | ||
3723 | } | ||
3706 | EXPORT_SYMBOL(netif_receive_skb); | 3724 | EXPORT_SYMBOL(netif_receive_skb); |
3707 | 3725 | ||
3708 | /* Network device is going away, flush any packets still pending | 3726 | /* Network device is going away, flush any packets still pending |
@@ -3764,7 +3782,7 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
3764 | } | 3782 | } |
3765 | 3783 | ||
3766 | out: | 3784 | out: |
3767 | return netif_receive_skb(skb); | 3785 | return netif_receive_skb_internal(skb); |
3768 | } | 3786 | } |
3769 | 3787 | ||
3770 | /* napi->gro_list contains packets ordered by age. | 3788 | /* napi->gro_list contains packets ordered by age. |
@@ -3972,7 +3990,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) | |||
3972 | { | 3990 | { |
3973 | switch (ret) { | 3991 | switch (ret) { |
3974 | case GRO_NORMAL: | 3992 | case GRO_NORMAL: |
3975 | if (netif_receive_skb(skb)) | 3993 | if (netif_receive_skb_internal(skb)) |
3976 | ret = GRO_DROP; | 3994 | ret = GRO_DROP; |
3977 | break; | 3995 | break; |
3978 | 3996 | ||
@@ -3997,6 +4015,8 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) | |||
3997 | 4015 | ||
3998 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 4016 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
3999 | { | 4017 | { |
4018 | trace_napi_gro_receive_entry(skb); | ||
4019 | |||
4000 | return napi_skb_finish(dev_gro_receive(napi, skb), skb); | 4020 | return napi_skb_finish(dev_gro_receive(napi, skb), skb); |
4001 | } | 4021 | } |
4002 | EXPORT_SYMBOL(napi_gro_receive); | 4022 | EXPORT_SYMBOL(napi_gro_receive); |
@@ -4030,7 +4050,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff * | |||
4030 | { | 4050 | { |
4031 | switch (ret) { | 4051 | switch (ret) { |
4032 | case GRO_NORMAL: | 4052 | case GRO_NORMAL: |
4033 | if (netif_receive_skb(skb)) | 4053 | if (netif_receive_skb_internal(skb)) |
4034 | ret = GRO_DROP; | 4054 | ret = GRO_DROP; |
4035 | break; | 4055 | break; |
4036 | 4056 | ||
@@ -4069,6 +4089,8 @@ gro_result_t napi_gro_frags(struct napi_struct *napi) | |||
4069 | if (!skb) | 4089 | if (!skb) |
4070 | return GRO_DROP; | 4090 | return GRO_DROP; |
4071 | 4091 | ||
4092 | trace_napi_gro_frags_entry(skb); | ||
4093 | |||
4072 | return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); | 4094 | return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); |
4073 | } | 4095 | } |
4074 | EXPORT_SYMBOL(napi_gro_frags); | 4096 | EXPORT_SYMBOL(napi_gro_frags); |
@@ -6621,11 +6643,11 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
6621 | 6643 | ||
6622 | /* Process offline CPU's input_pkt_queue */ | 6644 | /* Process offline CPU's input_pkt_queue */ |
6623 | while ((skb = __skb_dequeue(&oldsd->process_queue))) { | 6645 | while ((skb = __skb_dequeue(&oldsd->process_queue))) { |
6624 | netif_rx(skb); | 6646 | netif_rx_internal(skb); |
6625 | input_queue_head_incr(oldsd); | 6647 | input_queue_head_incr(oldsd); |
6626 | } | 6648 | } |
6627 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { | 6649 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { |
6628 | netif_rx(skb); | 6650 | netif_rx_internal(skb); |
6629 | input_queue_head_incr(oldsd); | 6651 | input_queue_head_incr(oldsd); |
6630 | } | 6652 | } |
6631 | 6653 | ||