aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h91
1 files changed, 27 insertions, 64 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 03684e702d13..25c37e34bfdc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -202,6 +202,15 @@ struct sock {
202 unsigned short sk_type; 202 unsigned short sk_type;
203 int sk_rcvbuf; 203 int sk_rcvbuf;
204 socket_lock_t sk_lock; 204 socket_lock_t sk_lock;
205 /*
206 * The backlog queue is special, it is always used with
207 * the per-socket spinlock held and requires low latency
208 * access. Therefore we special case it's implementation.
209 */
210 struct {
211 struct sk_buff *head;
212 struct sk_buff *tail;
213 } sk_backlog;
205 wait_queue_head_t *sk_sleep; 214 wait_queue_head_t *sk_sleep;
206 struct dst_entry *sk_dst_cache; 215 struct dst_entry *sk_dst_cache;
207 struct xfrm_policy *sk_policy[2]; 216 struct xfrm_policy *sk_policy[2];
@@ -221,15 +230,6 @@ struct sock {
221 int sk_rcvlowat; 230 int sk_rcvlowat;
222 unsigned long sk_flags; 231 unsigned long sk_flags;
223 unsigned long sk_lingertime; 232 unsigned long sk_lingertime;
224 /*
225 * The backlog queue is special, it is always used with
226 * the per-socket spinlock held and requires low latency
227 * access. Therefore we special case it's implementation.
228 */
229 struct {
230 struct sk_buff *head;
231 struct sk_buff *tail;
232 } sk_backlog;
233 struct sk_buff_head sk_error_queue; 233 struct sk_buff_head sk_error_queue;
234 struct proto *sk_prot_creator; 234 struct proto *sk_prot_creator;
235 rwlock_t sk_callback_lock; 235 rwlock_t sk_callback_lock;
@@ -244,7 +244,7 @@ struct sock {
244 struct sk_filter *sk_filter; 244 struct sk_filter *sk_filter;
245 void *sk_protinfo; 245 void *sk_protinfo;
246 struct timer_list sk_timer; 246 struct timer_list sk_timer;
247 struct timeval sk_stamp; 247 ktime_t sk_stamp;
248 struct socket *sk_socket; 248 struct socket *sk_socket;
249 void *sk_user_data; 249 void *sk_user_data;
250 struct page *sk_sndmsg_page; 250 struct page *sk_sndmsg_page;
@@ -390,6 +390,7 @@ enum sock_flags {
390 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 390 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
391 SOCK_DBG, /* %SO_DEBUG setting */ 391 SOCK_DBG, /* %SO_DEBUG setting */
392 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 392 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
393 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
393 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 394 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
394 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 395 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
395}; 396};
@@ -710,15 +711,6 @@ static inline void sk_stream_mem_reclaim(struct sock *sk)
710 __sk_stream_mem_reclaim(sk); 711 __sk_stream_mem_reclaim(sk);
711} 712}
712 713
713static inline void sk_stream_writequeue_purge(struct sock *sk)
714{
715 struct sk_buff *skb;
716
717 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
718 sk_stream_free_skb(sk, skb);
719 sk_stream_mem_reclaim(sk);
720}
721
722static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) 714static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
723{ 715{
724 return (int)skb->truesize <= sk->sk_forward_alloc || 716 return (int)skb->truesize <= sk->sk_forward_alloc ||
@@ -1083,19 +1075,7 @@ static inline int sk_can_gso(const struct sock *sk)
1083 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1075 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1084} 1076}
1085 1077
1086static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1078extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1087{
1088 __sk_dst_set(sk, dst);
1089 sk->sk_route_caps = dst->dev->features;
1090 if (sk->sk_route_caps & NETIF_F_GSO)
1091 sk->sk_route_caps |= NETIF_F_GSO_MASK;
1092 if (sk_can_gso(sk)) {
1093 if (dst->header_len)
1094 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1095 else
1096 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1097 }
1098}
1099 1079
1100static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) 1080static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1101{ 1081{
@@ -1256,18 +1236,6 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
1256 return page; 1236 return page;
1257} 1237}
1258 1238
1259#define sk_stream_for_retrans_queue(skb, sk) \
1260 for (skb = (sk)->sk_write_queue.next; \
1261 (skb != (sk)->sk_send_head) && \
1262 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1263 skb = skb->next)
1264
1265/*from STCP for fast SACK Process*/
1266#define sk_stream_for_retrans_queue_from(skb, sk) \
1267 for (; (skb != (sk)->sk_send_head) && \
1268 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1269 skb = skb->next)
1270
1271/* 1239/*
1272 * Default write policy as shown to user space via poll/select/SIGIO 1240 * Default write policy as shown to user space via poll/select/SIGIO
1273 */ 1241 */
@@ -1278,7 +1246,7 @@ static inline int sock_writeable(const struct sock *sk)
1278 1246
1279static inline gfp_t gfp_any(void) 1247static inline gfp_t gfp_any(void)
1280{ 1248{
1281 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1249 return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
1282} 1250}
1283 1251
1284static inline long sock_rcvtimeo(const struct sock *sk, int noblock) 1252static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
@@ -1304,22 +1272,18 @@ static inline int sock_intr_errno(long timeo)
1304 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 1272 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1305} 1273}
1306 1274
1275extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1276 struct sk_buff *skb);
1277
1307static __inline__ void 1278static __inline__ void
1308sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1279sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1309{ 1280{
1310 struct timeval stamp; 1281 ktime_t kt = skb->tstamp;
1311 1282
1312 skb_get_timestamp(skb, &stamp); 1283 if (sock_flag(sk, SOCK_RCVTSTAMP))
1313 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 1284 __sock_recv_timestamp(msg, sk, skb);
1314 /* Race occurred between timestamp enabling and packet 1285 else
1315 receiving. Fill in the current time for now. */ 1286 sk->sk_stamp = kt;
1316 if (stamp.tv_sec == 0)
1317 do_gettimeofday(&stamp);
1318 skb_set_timestamp(skb, &stamp);
1319 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
1320 &stamp);
1321 } else
1322 sk->sk_stamp = stamp;
1323} 1287}
1324 1288
1325/** 1289/**
@@ -1350,18 +1314,17 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e
1350 1314
1351extern void sock_enable_timestamp(struct sock *sk); 1315extern void sock_enable_timestamp(struct sock *sk);
1352extern int sock_get_timestamp(struct sock *, struct timeval __user *); 1316extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1317extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1353 1318
1354/* 1319/*
1355 * Enable debug/info messages 1320 * Enable debug/info messages
1356 */ 1321 */
1322extern int net_msg_warn;
1323#define NETDEBUG(fmt, args...) \
1324 do { if (net_msg_warn) printk(fmt,##args); } while (0)
1357 1325
1358#ifdef CONFIG_NETDEBUG 1326#define LIMIT_NETDEBUG(fmt, args...) \
1359#define NETDEBUG(fmt, args...) printk(fmt,##args) 1327 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1360#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
1361#else
1362#define NETDEBUG(fmt, args...) do { } while (0)
1363#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
1364#endif
1365 1328
1366/* 1329/*
1367 * Macros for sleeping on a socket. Use them like this: 1330 * Macros for sleeping on a socket. Use them like this: