aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-31 15:52:30 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-31 15:52:30 -0500
commit4b75679f60d0ce780609cbff249769b669f4fb69 (patch)
tree2c8890020b5e7d340036acb01f73a6e53feb038d
parent30c14e40ed85469f166b5effdab6705c73c5cd5e (diff)
parent025be81e83043f20538dcced1e12c5f8d152fbdb (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [NET]: Allow skb headroom to be overridden [TCP]: Kill unused extern decl for tcp_v4_hash_connecting() [NET]: add SO_RCVBUF comment [NET]: Deinline some larger functions from netdevice.h [DCCP]: Use NULL for pointers, comfort sparse. [DECNET]: Fix refcount
-rw-r--r--include/asm-powerpc/system.h5
-rw-r--r--include/linux/netdevice.h55
-rw-r--r--include/linux/skbuff.h29
-rw-r--r--include/net/tcp.h3
-rw-r--r--net/core/dev.c64
-rw-r--r--net/core/sock.c16
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/decnet/dn_dev.c2
8 files changed, 117 insertions, 63 deletions
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 65f5a7b2646..d075725bf44 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -365,8 +365,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
365 * powers of 2 writes until it reaches sufficient alignment). 365 * powers of 2 writes until it reaches sufficient alignment).
366 * 366 *
367 * Based on this we disable the IP header alignment in network drivers. 367 * Based on this we disable the IP header alignment in network drivers.
368 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
369 * cacheline alignment of buffers.
368 */ 370 */
369#define NET_IP_ALIGN 0 371#define NET_IP_ALIGN 0
372#define NET_SKB_PAD L1_CACHE_BYTES
370#endif 373#endif
371 374
372#define arch_align_stack(x) (x) 375#define arch_align_stack(x) (x)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 950dc55e519..40ccf8cc423 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -598,20 +598,7 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
598 598
599#define HAVE_NETIF_QUEUE 599#define HAVE_NETIF_QUEUE
600 600
601static inline void __netif_schedule(struct net_device *dev) 601extern void __netif_schedule(struct net_device *dev);
602{
603 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
604 unsigned long flags;
605 struct softnet_data *sd;
606
607 local_irq_save(flags);
608 sd = &__get_cpu_var(softnet_data);
609 dev->next_sched = sd->output_queue;
610 sd->output_queue = dev;
611 raise_softirq_irqoff(NET_TX_SOFTIRQ);
612 local_irq_restore(flags);
613 }
614}
615 602
616static inline void netif_schedule(struct net_device *dev) 603static inline void netif_schedule(struct net_device *dev)
617{ 604{
@@ -675,13 +662,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
675/* Use this variant in places where it could be invoked 662/* Use this variant in places where it could be invoked
676 * either from interrupt or non-interrupt context. 663 * either from interrupt or non-interrupt context.
677 */ 664 */
678static inline void dev_kfree_skb_any(struct sk_buff *skb) 665extern void dev_kfree_skb_any(struct sk_buff *skb);
679{
680 if (in_irq() || irqs_disabled())
681 dev_kfree_skb_irq(skb);
682 else
683 dev_kfree_skb(skb);
684}
685 666
686#define HAVE_NETIF_RX 1 667#define HAVE_NETIF_RX 1
687extern int netif_rx(struct sk_buff *skb); 668extern int netif_rx(struct sk_buff *skb);
@@ -768,22 +749,9 @@ static inline int netif_device_present(struct net_device *dev)
768 return test_bit(__LINK_STATE_PRESENT, &dev->state); 749 return test_bit(__LINK_STATE_PRESENT, &dev->state);
769} 750}
770 751
771static inline void netif_device_detach(struct net_device *dev) 752extern void netif_device_detach(struct net_device *dev);
772{
773 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
774 netif_running(dev)) {
775 netif_stop_queue(dev);
776 }
777}
778 753
779static inline void netif_device_attach(struct net_device *dev) 754extern void netif_device_attach(struct net_device *dev);
780{
781 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
782 netif_running(dev)) {
783 netif_wake_queue(dev);
784 __netdev_watchdog_up(dev);
785 }
786}
787 755
788/* 756/*
789 * Network interface message level settings 757 * Network interface message level settings
@@ -851,20 +819,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
851 * already been called and returned 1. 819 * already been called and returned 1.
852 */ 820 */
853 821
854static inline void __netif_rx_schedule(struct net_device *dev) 822extern void __netif_rx_schedule(struct net_device *dev);
855{
856 unsigned long flags;
857
858 local_irq_save(flags);
859 dev_hold(dev);
860 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
861 if (dev->quota < 0)
862 dev->quota += dev->weight;
863 else
864 dev->quota = dev->weight;
865 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
866 local_irq_restore(flags);
867}
868 823
869/* Try to reschedule poll. Called by irq handler. */ 824/* Try to reschedule poll. Called by irq handler. */
870 825
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 613b9513f8b..c4619a428d9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -941,6 +941,25 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
941#define NET_IP_ALIGN 2 941#define NET_IP_ALIGN 2
942#endif 942#endif
943 943
944/*
945 * The networking layer reserves some headroom in skb data (via
946 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
947 * the header has to grow. In the default case, if the header has to grow
948 * 16 bytes or less we avoid the reallocation.
949 *
950 * Unfortunately this headroom changes the DMA alignment of the resulting
951 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
952 * on some architectures. An architecture can override this value,
953 * perhaps setting it to a cacheline in size (since that will maintain
954 * cacheline alignment of the DMA). It must be a power of 2.
955 *
956 * Various parts of the networking layer expect at least 16 bytes of
957 * headroom, you should not reduce this.
958 */
959#ifndef NET_SKB_PAD
960#define NET_SKB_PAD 16
961#endif
962
944extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); 963extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
945 964
946static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 965static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
@@ -1030,9 +1049,9 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1030static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1049static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1031 gfp_t gfp_mask) 1050 gfp_t gfp_mask)
1032{ 1051{
1033 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); 1052 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1034 if (likely(skb)) 1053 if (likely(skb))
1035 skb_reserve(skb, 16); 1054 skb_reserve(skb, NET_SKB_PAD);
1036 return skb; 1055 return skb;
1037} 1056}
1038#else 1057#else
@@ -1070,13 +1089,15 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1070 */ 1089 */
1071static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1090static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1072{ 1091{
1073 int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); 1092 int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
1093 skb_headroom(skb);
1074 1094
1075 if (delta < 0) 1095 if (delta < 0)
1076 delta = 0; 1096 delta = 0;
1077 1097
1078 if (delta || skb_cloned(skb)) 1098 if (delta || skb_cloned(skb))
1079 return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC); 1099 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
1100 ~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
1080 return 0; 1101 return 0;
1081} 1102}
1082 1103
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9418f4d1afb..3c989db8a7a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -405,9 +405,6 @@ extern int tcp_disconnect(struct sock *sk, int flags);
405 405
406extern void tcp_unhash(struct sock *sk); 406extern void tcp_unhash(struct sock *sk);
407 407
408extern int tcp_v4_hash_connecting(struct sock *sk);
409
410
411/* From syncookies.c */ 408/* From syncookies.c */
412extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 409extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
413 struct ip_options *opt); 410 struct ip_options *opt);
diff --git a/net/core/dev.c b/net/core/dev.c
index a3ab11f3415..434220d093a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1080,6 +1080,70 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1080 rcu_read_unlock(); 1080 rcu_read_unlock();
1081} 1081}
1082 1082
1083
1084void __netif_schedule(struct net_device *dev)
1085{
1086 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1087 unsigned long flags;
1088 struct softnet_data *sd;
1089
1090 local_irq_save(flags);
1091 sd = &__get_cpu_var(softnet_data);
1092 dev->next_sched = sd->output_queue;
1093 sd->output_queue = dev;
1094 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1095 local_irq_restore(flags);
1096 }
1097}
1098EXPORT_SYMBOL(__netif_schedule);
1099
1100void __netif_rx_schedule(struct net_device *dev)
1101{
1102 unsigned long flags;
1103
1104 local_irq_save(flags);
1105 dev_hold(dev);
1106 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
1107 if (dev->quota < 0)
1108 dev->quota += dev->weight;
1109 else
1110 dev->quota = dev->weight;
1111 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1112 local_irq_restore(flags);
1113}
1114EXPORT_SYMBOL(__netif_rx_schedule);
1115
1116void dev_kfree_skb_any(struct sk_buff *skb)
1117{
1118 if (in_irq() || irqs_disabled())
1119 dev_kfree_skb_irq(skb);
1120 else
1121 dev_kfree_skb(skb);
1122}
1123EXPORT_SYMBOL(dev_kfree_skb_any);
1124
1125
1126/* Hot-plugging. */
1127void netif_device_detach(struct net_device *dev)
1128{
1129 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1130 netif_running(dev)) {
1131 netif_stop_queue(dev);
1132 }
1133}
1134EXPORT_SYMBOL(netif_device_detach);
1135
1136void netif_device_attach(struct net_device *dev)
1137{
1138 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1139 netif_running(dev)) {
1140 netif_wake_queue(dev);
1141 __netdev_watchdog_up(dev);
1142 }
1143}
1144EXPORT_SYMBOL(netif_device_attach);
1145
1146
1083/* 1147/*
1084 * Invalidate hardware checksum when packet is to be mangled, and 1148 * Invalidate hardware checksum when packet is to be mangled, and
1085 * complete checksum manually on outgoing path. 1149 * complete checksum manually on outgoing path.
diff --git a/net/core/sock.c b/net/core/sock.c
index a96ea7dd0fc..ed2afdb9ea2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -385,7 +385,21 @@ set_sndbuf:
385 val = sysctl_rmem_max; 385 val = sysctl_rmem_max;
386set_rcvbuf: 386set_rcvbuf:
387 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 387 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
388 /* FIXME: is this lower bound the right one? */ 388 /*
389 * We double it on the way in to account for
390 * "struct sk_buff" etc. overhead. Applications
391 * assume that the SO_RCVBUF setting they make will
392 * allow that much actual data to be received on that
393 * socket.
394 *
395 * Applications are unaware that "struct sk_buff" and
396 * other overheads allocate from the receive buffer
397 * during socket buffer allocation.
398 *
399 * And after considering the possible alternatives,
400 * returning the value we actually used in getsockopt
401 * is the most desirable behavior.
402 */
389 if ((val * 2) < SOCK_MIN_RCVBUF) 403 if ((val * 2) < SOCK_MIN_RCVBUF)
390 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 404 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
391 else 405 else
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index e3dd30d36c8..b39e2a59788 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -204,7 +204,7 @@ static int dccp_feat_reconcile(struct sock *sk, struct dccp_opt_pend *opt,
204 if (rc) { 204 if (rc) {
205 kfree(opt->dccpop_sc->dccpoc_val); 205 kfree(opt->dccpop_sc->dccpoc_val);
206 kfree(opt->dccpop_sc); 206 kfree(opt->dccpop_sc);
207 opt->dccpop_sc = 0; 207 opt->dccpop_sc = NULL;
208 return rc; 208 return rc;
209 } 209 }
210 210
@@ -322,7 +322,7 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
322 opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R : 322 opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R :
323 DCCPO_CONFIRM_L; 323 DCCPO_CONFIRM_L;
324 opt->dccpop_feat = feature; 324 opt->dccpop_feat = feature;
325 opt->dccpop_val = 0; 325 opt->dccpop_val = NULL;
326 opt->dccpop_len = 0; 326 opt->dccpop_len = 0;
327 327
328 /* change feature */ 328 /* change feature */
@@ -523,7 +523,7 @@ int dccp_feat_clone(struct sock *oldsk, struct sock *newsk)
523 * once... 523 * once...
524 */ 524 */
525 /* the master socket no longer needs to worry about confirms */ 525 /* the master socket no longer needs to worry about confirms */
526 opt->dccpop_sc = 0; /* it's not a memleak---new socket has it */ 526 opt->dccpop_sc = NULL; /* it's not a memleak---new socket has it */
527 527
528 /* reset state for a new socket */ 528 /* reset state for a new socket */
529 opt->dccpop_conf = 0; 529 opt->dccpop_conf = 0;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d2ae9893ca1..a26ff9f4457 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -620,7 +620,7 @@ int dn_dev_set_default(struct net_device *dev, int force)
620 } 620 }
621 write_unlock(&dndev_lock); 621 write_unlock(&dndev_lock);
622 if (old) 622 if (old)
623 dev_put(dev); 623 dev_put(old);
624 return rv; 624 return rv;
625} 625}
626 626