aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2006-06-26 01:31:38 -0400
committerDmitry Torokhov <dtor_core@ameritech.net>2006-06-26 01:31:38 -0400
commit4854c7b27f0975a2b629f35ea3996d2968eb7c4f (patch)
tree4102bdb70289764a2058aff0f907b13d7cf0e0d1 /include/net/sock.h
parent3cbd5b32cb625f5c0f1b1476d154fac873dd49ce (diff)
parentfcc18e83e1f6fd9fa6b333735bf0fcd530655511 (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h26
1 files changed, 20 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index c9fad6fb629b..2d8d6adf1616 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -40,7 +40,6 @@
40#ifndef _SOCK_H 40#ifndef _SOCK_H
41#define _SOCK_H 41#define _SOCK_H
42 42
43#include <linux/config.h>
44#include <linux/list.h> 43#include <linux/list.h>
45#include <linux/timer.h> 44#include <linux/timer.h>
46#include <linux/cache.h> 45#include <linux/cache.h>
@@ -132,6 +131,7 @@ struct sock_common {
132 * @sk_receive_queue: incoming packets 131 * @sk_receive_queue: incoming packets
133 * @sk_wmem_alloc: transmit queue bytes committed 132 * @sk_wmem_alloc: transmit queue bytes committed
134 * @sk_write_queue: Packet sending queue 133 * @sk_write_queue: Packet sending queue
134 * @sk_async_wait_queue: DMA copied packets
135 * @sk_omem_alloc: "o" is "option" or "other" 135 * @sk_omem_alloc: "o" is "option" or "other"
136 * @sk_wmem_queued: persistent queue size 136 * @sk_wmem_queued: persistent queue size
137 * @sk_forward_alloc: space allocated forward 137 * @sk_forward_alloc: space allocated forward
@@ -205,6 +205,7 @@ struct sock {
205 atomic_t sk_omem_alloc; 205 atomic_t sk_omem_alloc;
206 struct sk_buff_head sk_receive_queue; 206 struct sk_buff_head sk_receive_queue;
207 struct sk_buff_head sk_write_queue; 207 struct sk_buff_head sk_write_queue;
208 struct sk_buff_head sk_async_wait_queue;
208 int sk_wmem_queued; 209 int sk_wmem_queued;
209 int sk_forward_alloc; 210 int sk_forward_alloc;
210 gfp_t sk_allocation; 211 gfp_t sk_allocation;
@@ -871,10 +872,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
871 if (filter) { 872 if (filter) {
872 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 873 unsigned int pkt_len = sk_run_filter(skb, filter->insns,
873 filter->len); 874 filter->len);
874 if (!pkt_len) 875 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
875 err = -EPERM;
876 else
877 skb_trim(skb, pkt_len);
878 } 876 }
879 877
880 if (needlock) 878 if (needlock)
@@ -1032,9 +1030,13 @@ static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1032{ 1030{
1033 __sk_dst_set(sk, dst); 1031 __sk_dst_set(sk, dst);
1034 sk->sk_route_caps = dst->dev->features; 1032 sk->sk_route_caps = dst->dev->features;
1033 if (sk->sk_route_caps & NETIF_F_GSO)
1034 sk->sk_route_caps |= NETIF_F_TSO;
1035 if (sk->sk_route_caps & NETIF_F_TSO) { 1035 if (sk->sk_route_caps & NETIF_F_TSO) {
1036 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) 1036 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
1037 sk->sk_route_caps &= ~NETIF_F_TSO; 1037 sk->sk_route_caps &= ~NETIF_F_TSO;
1038 else
1039 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1038 } 1040 }
1039} 1041}
1040 1042
@@ -1267,15 +1269,27 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1267 * sk_eat_skb - Release a skb if it is no longer needed 1269 * sk_eat_skb - Release a skb if it is no longer needed
1268 * @sk: socket to eat this skb from 1270 * @sk: socket to eat this skb from
1269 * @skb: socket buffer to eat 1271 * @skb: socket buffer to eat
1272 * @copied_early: flag indicating whether DMA operations copied this data early
1270 * 1273 *
1271 * This routine must be called with interrupts disabled or with the socket 1274 * This routine must be called with interrupts disabled or with the socket
1272 * locked so that the sk_buff queue operation is ok. 1275 * locked so that the sk_buff queue operation is ok.
1273*/ 1276*/
1274static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) 1277#ifdef CONFIG_NET_DMA
1278static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1279{
1280 __skb_unlink(skb, &sk->sk_receive_queue);
1281 if (!copied_early)
1282 __kfree_skb(skb);
1283 else
1284 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
1285}
1286#else
1287static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1275{ 1288{
1276 __skb_unlink(skb, &sk->sk_receive_queue); 1289 __skb_unlink(skb, &sk->sk_receive_queue);
1277 __kfree_skb(skb); 1290 __kfree_skb(skb);
1278} 1291}
1292#endif
1279 1293
1280extern void sock_enable_timestamp(struct sock *sk); 1294extern void sock_enable_timestamp(struct sock *sk);
1281extern int sock_get_timestamp(struct sock *, struct timeval __user *); 1295extern int sock_get_timestamp(struct sock *, struct timeval __user *);