aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDenis Vlasenko <vda@ilport.com.ua>2006-03-29 18:57:29 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-29 18:57:29 -0500
commit56079431b6ba163df8ba26b3eccc82379f0c0ce4 (patch)
treecdcfb1fdee55c567603ede007c7b0c91efe29b3a /include
parent68907dad58cd7ef11536e1db6baeb98b20af91b2 (diff)
[NET]: Deinline some larger functions from netdevice.h
On a allyesconfig'ured kernel: Size Uses Wasted Name and definition ===== ==== ====== ================================================ 95 162 12075 netif_wake_queue include/linux/netdevice.h 129 86 9265 dev_kfree_skb_any include/linux/netdevice.h 127 56 5885 netif_device_attach include/linux/netdevice.h 73 86 4505 dev_kfree_skb_irq include/linux/netdevice.h 46 60 1534 netif_device_detach include/linux/netdevice.h 119 16 1485 __netif_rx_schedule include/linux/netdevice.h 143 5 492 netif_rx_schedule include/linux/netdevice.h 81 7 366 netif_schedule include/linux/netdevice.h netif_wake_queue is big because __netif_schedule is a big inline: static inline void __netif_schedule(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { unsigned long flags; struct softnet_data *sd; local_irq_save(flags); sd = &__get_cpu_var(softnet_data); dev->next_sched = sd->output_queue; sd->output_queue = dev; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } } static inline void netif_wake_queue(struct net_device *dev) { #ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) return; #endif if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) __netif_schedule(dev); } By de-inlining __netif_schedule we are saving a lot of text at each callsite of netif_wake_queue and netif_schedule. __netif_rx_schedule is also big, and it makes more sense to keep both of them out of line. Patch also deinlines dev_kfree_skb_any. We can deinline dev_kfree_skb_irq instead... oh well. netif_device_attach/detach are not hot paths, we can deinline them too. Signed-off-by: Denis Vlasenko <vda@ilport.com.ua> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/netdevice.h55
1 files changed, 5 insertions, 50 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 950dc55e5192..40ccf8cc4239 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -598,20 +598,7 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
598 598
599#define HAVE_NETIF_QUEUE 599#define HAVE_NETIF_QUEUE
600 600
601static inline void __netif_schedule(struct net_device *dev) 601extern void __netif_schedule(struct net_device *dev);
602{
603 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
604 unsigned long flags;
605 struct softnet_data *sd;
606
607 local_irq_save(flags);
608 sd = &__get_cpu_var(softnet_data);
609 dev->next_sched = sd->output_queue;
610 sd->output_queue = dev;
611 raise_softirq_irqoff(NET_TX_SOFTIRQ);
612 local_irq_restore(flags);
613 }
614}
615 602
616static inline void netif_schedule(struct net_device *dev) 603static inline void netif_schedule(struct net_device *dev)
617{ 604{
@@ -675,13 +662,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
675/* Use this variant in places where it could be invoked 662/* Use this variant in places where it could be invoked
676 * either from interrupt or non-interrupt context. 663 * either from interrupt or non-interrupt context.
677 */ 664 */
678static inline void dev_kfree_skb_any(struct sk_buff *skb) 665extern void dev_kfree_skb_any(struct sk_buff *skb);
679{
680 if (in_irq() || irqs_disabled())
681 dev_kfree_skb_irq(skb);
682 else
683 dev_kfree_skb(skb);
684}
685 666
686#define HAVE_NETIF_RX 1 667#define HAVE_NETIF_RX 1
687extern int netif_rx(struct sk_buff *skb); 668extern int netif_rx(struct sk_buff *skb);
@@ -768,22 +749,9 @@ static inline int netif_device_present(struct net_device *dev)
768 return test_bit(__LINK_STATE_PRESENT, &dev->state); 749 return test_bit(__LINK_STATE_PRESENT, &dev->state);
769} 750}
770 751
771static inline void netif_device_detach(struct net_device *dev) 752extern void netif_device_detach(struct net_device *dev);
772{
773 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
774 netif_running(dev)) {
775 netif_stop_queue(dev);
776 }
777}
778 753
779static inline void netif_device_attach(struct net_device *dev) 754extern void netif_device_attach(struct net_device *dev);
780{
781 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
782 netif_running(dev)) {
783 netif_wake_queue(dev);
784 __netdev_watchdog_up(dev);
785 }
786}
787 755
788/* 756/*
789 * Network interface message level settings 757 * Network interface message level settings
@@ -851,20 +819,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
851 * already been called and returned 1. 819 * already been called and returned 1.
852 */ 820 */
853 821
854static inline void __netif_rx_schedule(struct net_device *dev) 822extern void __netif_rx_schedule(struct net_device *dev);
855{
856 unsigned long flags;
857
858 local_irq_save(flags);
859 dev_hold(dev);
860 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
861 if (dev->quota < 0)
862 dev->quota += dev->weight;
863 else
864 dev->quota = dev->weight;
865 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
866 local_irq_restore(flags);
867}
868 823
869/* Try to reschedule poll. Called by irq handler. */ 824/* Try to reschedule poll. Called by irq handler. */
870 825