diff options
| author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-31 15:52:30 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-31 15:52:30 -0500 |
| commit | 4b75679f60d0ce780609cbff249769b669f4fb69 (patch) | |
| tree | 2c8890020b5e7d340036acb01f73a6e53feb038d /include/linux | |
| parent | 30c14e40ed85469f166b5effdab6705c73c5cd5e (diff) | |
| parent | 025be81e83043f20538dcced1e12c5f8d152fbdb (diff) | |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[NET]: Allow skb headroom to be overridden
[TCP]: Kill unused extern decl for tcp_v4_hash_connecting()
[NET]: add SO_RCVBUF comment
[NET]: Deinline some larger functions from netdevice.h
[DCCP]: Use NULL for pointers, comfort sparse.
[DECNET]: Fix refcount
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/netdevice.h | 55 | ||||
| -rw-r--r-- | include/linux/skbuff.h | 29 |
2 files changed, 30 insertions, 54 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 950dc55e5192..40ccf8cc4239 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -598,20 +598,7 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data); | |||
| 598 | 598 | ||
| 599 | #define HAVE_NETIF_QUEUE | 599 | #define HAVE_NETIF_QUEUE |
| 600 | 600 | ||
| 601 | static inline void __netif_schedule(struct net_device *dev) | 601 | extern void __netif_schedule(struct net_device *dev); |
| 602 | { | ||
| 603 | if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { | ||
| 604 | unsigned long flags; | ||
| 605 | struct softnet_data *sd; | ||
| 606 | |||
| 607 | local_irq_save(flags); | ||
| 608 | sd = &__get_cpu_var(softnet_data); | ||
| 609 | dev->next_sched = sd->output_queue; | ||
| 610 | sd->output_queue = dev; | ||
| 611 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | ||
| 612 | local_irq_restore(flags); | ||
| 613 | } | ||
| 614 | } | ||
| 615 | 602 | ||
| 616 | static inline void netif_schedule(struct net_device *dev) | 603 | static inline void netif_schedule(struct net_device *dev) |
| 617 | { | 604 | { |
| @@ -675,13 +662,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb) | |||
| 675 | /* Use this variant in places where it could be invoked | 662 | /* Use this variant in places where it could be invoked |
| 676 | * either from interrupt or non-interrupt context. | 663 | * either from interrupt or non-interrupt context. |
| 677 | */ | 664 | */ |
| 678 | static inline void dev_kfree_skb_any(struct sk_buff *skb) | 665 | extern void dev_kfree_skb_any(struct sk_buff *skb); |
| 679 | { | ||
| 680 | if (in_irq() || irqs_disabled()) | ||
| 681 | dev_kfree_skb_irq(skb); | ||
| 682 | else | ||
| 683 | dev_kfree_skb(skb); | ||
| 684 | } | ||
| 685 | 666 | ||
| 686 | #define HAVE_NETIF_RX 1 | 667 | #define HAVE_NETIF_RX 1 |
| 687 | extern int netif_rx(struct sk_buff *skb); | 668 | extern int netif_rx(struct sk_buff *skb); |
| @@ -768,22 +749,9 @@ static inline int netif_device_present(struct net_device *dev) | |||
| 768 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | 749 | return test_bit(__LINK_STATE_PRESENT, &dev->state); |
| 769 | } | 750 | } |
| 770 | 751 | ||
| 771 | static inline void netif_device_detach(struct net_device *dev) | 752 | extern void netif_device_detach(struct net_device *dev); |
| 772 | { | ||
| 773 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | ||
| 774 | netif_running(dev)) { | ||
| 775 | netif_stop_queue(dev); | ||
| 776 | } | ||
| 777 | } | ||
| 778 | 753 | ||
| 779 | static inline void netif_device_attach(struct net_device *dev) | 754 | extern void netif_device_attach(struct net_device *dev); |
| 780 | { | ||
| 781 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | ||
| 782 | netif_running(dev)) { | ||
| 783 | netif_wake_queue(dev); | ||
| 784 | __netdev_watchdog_up(dev); | ||
| 785 | } | ||
| 786 | } | ||
| 787 | 755 | ||
| 788 | /* | 756 | /* |
| 789 | * Network interface message level settings | 757 | * Network interface message level settings |
| @@ -851,20 +819,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev) | |||
| 851 | * already been called and returned 1. | 819 | * already been called and returned 1. |
| 852 | */ | 820 | */ |
| 853 | 821 | ||
| 854 | static inline void __netif_rx_schedule(struct net_device *dev) | 822 | extern void __netif_rx_schedule(struct net_device *dev); |
| 855 | { | ||
| 856 | unsigned long flags; | ||
| 857 | |||
| 858 | local_irq_save(flags); | ||
| 859 | dev_hold(dev); | ||
| 860 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | ||
| 861 | if (dev->quota < 0) | ||
| 862 | dev->quota += dev->weight; | ||
| 863 | else | ||
| 864 | dev->quota = dev->weight; | ||
| 865 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
| 866 | local_irq_restore(flags); | ||
| 867 | } | ||
| 868 | 823 | ||
| 869 | /* Try to reschedule poll. Called by irq handler. */ | 824 | /* Try to reschedule poll. Called by irq handler. */ |
| 870 | 825 | ||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 613b9513f8b9..c4619a428d9b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -941,6 +941,25 @@ static inline void skb_reserve(struct sk_buff *skb, int len) | |||
| 941 | #define NET_IP_ALIGN 2 | 941 | #define NET_IP_ALIGN 2 |
| 942 | #endif | 942 | #endif |
| 943 | 943 | ||
| 944 | /* | ||
| 945 | * The networking layer reserves some headroom in skb data (via | ||
| 946 | * dev_alloc_skb). This is used to avoid having to reallocate skb data when | ||
| 947 | * the header has to grow. In the default case, if the header has to grow | ||
| 948 | * 16 bytes or less we avoid the reallocation. | ||
| 949 | * | ||
| 950 | * Unfortunately this headroom changes the DMA alignment of the resulting | ||
| 951 | * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive | ||
| 952 | * on some architectures. An architecture can override this value, | ||
| 953 | * perhaps setting it to a cacheline in size (since that will maintain | ||
| 954 | * cacheline alignment of the DMA). It must be a power of 2. | ||
| 955 | * | ||
| 956 | * Various parts of the networking layer expect at least 16 bytes of | ||
| 957 | * headroom, you should not reduce this. | ||
| 958 | */ | ||
| 959 | #ifndef NET_SKB_PAD | ||
| 960 | #define NET_SKB_PAD 16 | ||
| 961 | #endif | ||
| 962 | |||
| 944 | extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); | 963 | extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); |
| 945 | 964 | ||
| 946 | static inline void __skb_trim(struct sk_buff *skb, unsigned int len) | 965 | static inline void __skb_trim(struct sk_buff *skb, unsigned int len) |
| @@ -1030,9 +1049,9 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) | |||
| 1030 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | 1049 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, |
| 1031 | gfp_t gfp_mask) | 1050 | gfp_t gfp_mask) |
| 1032 | { | 1051 | { |
| 1033 | struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); | 1052 | struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); |
| 1034 | if (likely(skb)) | 1053 | if (likely(skb)) |
| 1035 | skb_reserve(skb, 16); | 1054 | skb_reserve(skb, NET_SKB_PAD); |
| 1036 | return skb; | 1055 | return skb; |
| 1037 | } | 1056 | } |
| 1038 | #else | 1057 | #else |
| @@ -1070,13 +1089,15 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length) | |||
| 1070 | */ | 1089 | */ |
| 1071 | static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) | 1090 | static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) |
| 1072 | { | 1091 | { |
| 1073 | int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); | 1092 | int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - |
| 1093 | skb_headroom(skb); | ||
| 1074 | 1094 | ||
| 1075 | if (delta < 0) | 1095 | if (delta < 0) |
| 1076 | delta = 0; | 1096 | delta = 0; |
| 1077 | 1097 | ||
| 1078 | if (delta || skb_cloned(skb)) | 1098 | if (delta || skb_cloned(skb)) |
| 1079 | return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC); | 1099 | return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & |
| 1100 | ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); | ||
| 1080 | return 0; | 1101 | return 0; |
| 1081 | } | 1102 | } |
| 1082 | 1103 | ||
