summaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h282
1 files changed, 137 insertions, 145 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 1d37a8086bed..e3a18ff0c38b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair;
156 */ 156 */
157struct sock_common { 157struct sock_common {
158 /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned 158 /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
159 * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH() 159 * address on 64bit arches : cf INET_MATCH()
160 */ 160 */
161 union { 161 union {
162 __addrpair skc_addrpair; 162 __addrpair skc_addrpair;
@@ -191,6 +191,12 @@ struct sock_common {
191#ifdef CONFIG_NET_NS 191#ifdef CONFIG_NET_NS
192 struct net *skc_net; 192 struct net *skc_net;
193#endif 193#endif
194
195#if IS_ENABLED(CONFIG_IPV6)
196 struct in6_addr skc_v6_daddr;
197 struct in6_addr skc_v6_rcv_saddr;
198#endif
199
194 /* 200 /*
195 * fields between dontcopy_begin/dontcopy_end 201 * fields between dontcopy_begin/dontcopy_end
196 * are not copied in sock_copy() 202 * are not copied in sock_copy()
@@ -218,7 +224,7 @@ struct cg_proto;
218 * @sk_lock: synchronizer 224 * @sk_lock: synchronizer
219 * @sk_rcvbuf: size of receive buffer in bytes 225 * @sk_rcvbuf: size of receive buffer in bytes
220 * @sk_wq: sock wait queue and async head 226 * @sk_wq: sock wait queue and async head
221 * @sk_rx_dst: receive input route used by early tcp demux 227 * @sk_rx_dst: receive input route used by early demux
222 * @sk_dst_cache: destination cache 228 * @sk_dst_cache: destination cache
223 * @sk_dst_lock: destination cache lock 229 * @sk_dst_lock: destination cache lock
224 * @sk_policy: flow policy 230 * @sk_policy: flow policy
@@ -233,6 +239,7 @@ struct cg_proto;
233 * @sk_ll_usec: usecs to busypoll when there is no data 239 * @sk_ll_usec: usecs to busypoll when there is no data
234 * @sk_allocation: allocation mode 240 * @sk_allocation: allocation mode
235 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 241 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
242 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
236 * @sk_sndbuf: size of send buffer in bytes 243 * @sk_sndbuf: size of send buffer in bytes
237 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 244 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
238 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 245 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -299,6 +306,12 @@ struct sock {
299#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin 306#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
300#define sk_dontcopy_end __sk_common.skc_dontcopy_end 307#define sk_dontcopy_end __sk_common.skc_dontcopy_end
301#define sk_hash __sk_common.skc_hash 308#define sk_hash __sk_common.skc_hash
309#define sk_portpair __sk_common.skc_portpair
310#define sk_num __sk_common.skc_num
311#define sk_dport __sk_common.skc_dport
312#define sk_addrpair __sk_common.skc_addrpair
313#define sk_daddr __sk_common.skc_daddr
314#define sk_rcv_saddr __sk_common.skc_rcv_saddr
302#define sk_family __sk_common.skc_family 315#define sk_family __sk_common.skc_family
303#define sk_state __sk_common.skc_state 316#define sk_state __sk_common.skc_state
304#define sk_reuse __sk_common.skc_reuse 317#define sk_reuse __sk_common.skc_reuse
@@ -307,6 +320,9 @@ struct sock {
307#define sk_bind_node __sk_common.skc_bind_node 320#define sk_bind_node __sk_common.skc_bind_node
308#define sk_prot __sk_common.skc_prot 321#define sk_prot __sk_common.skc_prot
309#define sk_net __sk_common.skc_net 322#define sk_net __sk_common.skc_net
323#define sk_v6_daddr __sk_common.skc_v6_daddr
324#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
325
310 socket_lock_t sk_lock; 326 socket_lock_t sk_lock;
311 struct sk_buff_head sk_receive_queue; 327 struct sk_buff_head sk_receive_queue;
312 /* 328 /*
@@ -363,6 +379,7 @@ struct sock {
363 int sk_wmem_queued; 379 int sk_wmem_queued;
364 gfp_t sk_allocation; 380 gfp_t sk_allocation;
365 u32 sk_pacing_rate; /* bytes per second */ 381 u32 sk_pacing_rate; /* bytes per second */
382 u32 sk_max_pacing_rate;
366 netdev_features_t sk_route_caps; 383 netdev_features_t sk_route_caps;
367 netdev_features_t sk_route_nocaps; 384 netdev_features_t sk_route_nocaps;
368 int sk_gso_type; 385 int sk_gso_type;
@@ -751,7 +768,7 @@ static inline int sk_stream_wspace(const struct sock *sk)
751 return sk->sk_sndbuf - sk->sk_wmem_queued; 768 return sk->sk_sndbuf - sk->sk_wmem_queued;
752} 769}
753 770
754extern void sk_stream_write_space(struct sock *sk); 771void sk_stream_write_space(struct sock *sk);
755 772
756/* OOB backlog add */ 773/* OOB backlog add */
757static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 774static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
@@ -793,7 +810,7 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
793 return 0; 810 return 0;
794} 811}
795 812
796extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 813int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
797 814
798static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 815static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
799{ 816{
@@ -858,15 +875,15 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
858 __rc; \ 875 __rc; \
859 }) 876 })
860 877
861extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 878int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
862extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 879int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
863extern void sk_stream_wait_close(struct sock *sk, long timeo_p); 880void sk_stream_wait_close(struct sock *sk, long timeo_p);
864extern int sk_stream_error(struct sock *sk, int flags, int err); 881int sk_stream_error(struct sock *sk, int flags, int err);
865extern void sk_stream_kill_queues(struct sock *sk); 882void sk_stream_kill_queues(struct sock *sk);
866extern void sk_set_memalloc(struct sock *sk); 883void sk_set_memalloc(struct sock *sk);
867extern void sk_clear_memalloc(struct sock *sk); 884void sk_clear_memalloc(struct sock *sk);
868 885
869extern int sk_wait_data(struct sock *sk, long *timeo); 886int sk_wait_data(struct sock *sk, long *timeo);
870 887
871struct request_sock_ops; 888struct request_sock_ops;
872struct timewait_sock_ops; 889struct timewait_sock_ops;
@@ -1019,10 +1036,10 @@ enum cg_proto_flags {
1019 1036
1020struct cg_proto { 1037struct cg_proto {
1021 void (*enter_memory_pressure)(struct sock *sk); 1038 void (*enter_memory_pressure)(struct sock *sk);
1022 struct res_counter *memory_allocated; /* Current allocated memory. */ 1039 struct res_counter memory_allocated; /* Current allocated memory. */
1023 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 1040 struct percpu_counter sockets_allocated; /* Current number of sockets. */
1024 int *memory_pressure; 1041 int memory_pressure;
1025 long *sysctl_mem; 1042 long sysctl_mem[3];
1026 unsigned long flags; 1043 unsigned long flags;
1027 /* 1044 /*
1028 * memcg field is used to find which memcg we belong directly 1045 * memcg field is used to find which memcg we belong directly
@@ -1036,8 +1053,8 @@ struct cg_proto {
1036 struct mem_cgroup *memcg; 1053 struct mem_cgroup *memcg;
1037}; 1054};
1038 1055
1039extern int proto_register(struct proto *prot, int alloc_slab); 1056int proto_register(struct proto *prot, int alloc_slab);
1040extern void proto_unregister(struct proto *prot); 1057void proto_unregister(struct proto *prot);
1041 1058
1042static inline bool memcg_proto_active(struct cg_proto *cg_proto) 1059static inline bool memcg_proto_active(struct cg_proto *cg_proto)
1043{ 1060{
@@ -1118,7 +1135,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
1118 return false; 1135 return false;
1119 1136
1120 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1137 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1121 return !!*sk->sk_cgrp->memory_pressure; 1138 return !!sk->sk_cgrp->memory_pressure;
1122 1139
1123 return !!*sk->sk_prot->memory_pressure; 1140 return !!*sk->sk_prot->memory_pressure;
1124} 1141}
@@ -1138,8 +1155,8 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
1138 struct proto *prot = sk->sk_prot; 1155 struct proto *prot = sk->sk_prot;
1139 1156
1140 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1157 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1141 if (*cg_proto->memory_pressure) 1158 if (cg_proto->memory_pressure)
1142 *cg_proto->memory_pressure = 0; 1159 cg_proto->memory_pressure = 0;
1143 } 1160 }
1144 1161
1145} 1162}
@@ -1175,7 +1192,7 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
1175 struct res_counter *fail; 1192 struct res_counter *fail;
1176 int ret; 1193 int ret;
1177 1194
1178 ret = res_counter_charge_nofail(prot->memory_allocated, 1195 ret = res_counter_charge_nofail(&prot->memory_allocated,
1179 amt << PAGE_SHIFT, &fail); 1196 amt << PAGE_SHIFT, &fail);
1180 if (ret < 0) 1197 if (ret < 0)
1181 *parent_status = OVER_LIMIT; 1198 *parent_status = OVER_LIMIT;
@@ -1184,13 +1201,13 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
1184static inline void memcg_memory_allocated_sub(struct cg_proto *prot, 1201static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
1185 unsigned long amt) 1202 unsigned long amt)
1186{ 1203{
1187 res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT); 1204 res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
1188} 1205}
1189 1206
1190static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) 1207static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
1191{ 1208{
1192 u64 ret; 1209 u64 ret;
1193 ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE); 1210 ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
1194 return ret >> PAGE_SHIFT; 1211 return ret >> PAGE_SHIFT;
1195} 1212}
1196 1213
@@ -1238,7 +1255,7 @@ static inline void sk_sockets_allocated_dec(struct sock *sk)
1238 struct cg_proto *cg_proto = sk->sk_cgrp; 1255 struct cg_proto *cg_proto = sk->sk_cgrp;
1239 1256
1240 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1257 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1241 percpu_counter_dec(cg_proto->sockets_allocated); 1258 percpu_counter_dec(&cg_proto->sockets_allocated);
1242 } 1259 }
1243 1260
1244 percpu_counter_dec(prot->sockets_allocated); 1261 percpu_counter_dec(prot->sockets_allocated);
@@ -1252,7 +1269,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
1252 struct cg_proto *cg_proto = sk->sk_cgrp; 1269 struct cg_proto *cg_proto = sk->sk_cgrp;
1253 1270
1254 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1271 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1255 percpu_counter_inc(cg_proto->sockets_allocated); 1272 percpu_counter_inc(&cg_proto->sockets_allocated);
1256 } 1273 }
1257 1274
1258 percpu_counter_inc(prot->sockets_allocated); 1275 percpu_counter_inc(prot->sockets_allocated);
@@ -1264,7 +1281,7 @@ sk_sockets_allocated_read_positive(struct sock *sk)
1264 struct proto *prot = sk->sk_prot; 1281 struct proto *prot = sk->sk_prot;
1265 1282
1266 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1283 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1267 return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated); 1284 return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
1268 1285
1269 return percpu_counter_read_positive(prot->sockets_allocated); 1286 return percpu_counter_read_positive(prot->sockets_allocated);
1270} 1287}
@@ -1292,8 +1309,8 @@ proto_memory_pressure(struct proto *prot)
1292 1309
1293#ifdef CONFIG_PROC_FS 1310#ifdef CONFIG_PROC_FS
1294/* Called with local bh disabled */ 1311/* Called with local bh disabled */
1295extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 1312void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1296extern int sock_prot_inuse_get(struct net *net, struct proto *proto); 1313int sock_prot_inuse_get(struct net *net, struct proto *proto);
1297#else 1314#else
1298static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, 1315static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1299 int inc) 1316 int inc)
@@ -1369,8 +1386,8 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
1369/* 1386/*
1370 * Functions for memory accounting 1387 * Functions for memory accounting
1371 */ 1388 */
1372extern int __sk_mem_schedule(struct sock *sk, int size, int kind); 1389int __sk_mem_schedule(struct sock *sk, int size, int kind);
1373extern void __sk_mem_reclaim(struct sock *sk); 1390void __sk_mem_reclaim(struct sock *sk);
1374 1391
1375#define SK_MEM_QUANTUM ((int)PAGE_SIZE) 1392#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
1376#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) 1393#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1478,14 +1495,14 @@ do { \
1478 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 1495 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1479} while (0) 1496} while (0)
1480 1497
1481extern void lock_sock_nested(struct sock *sk, int subclass); 1498void lock_sock_nested(struct sock *sk, int subclass);
1482 1499
1483static inline void lock_sock(struct sock *sk) 1500static inline void lock_sock(struct sock *sk)
1484{ 1501{
1485 lock_sock_nested(sk, 0); 1502 lock_sock_nested(sk, 0);
1486} 1503}
1487 1504
1488extern void release_sock(struct sock *sk); 1505void release_sock(struct sock *sk);
1489 1506
1490/* BH context may only use the following locking interface. */ 1507/* BH context may only use the following locking interface. */
1491#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 1508#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
@@ -1494,7 +1511,7 @@ extern void release_sock(struct sock *sk);
1494 SINGLE_DEPTH_NESTING) 1511 SINGLE_DEPTH_NESTING)
1495#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1512#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1496 1513
1497extern bool lock_sock_fast(struct sock *sk); 1514bool lock_sock_fast(struct sock *sk);
1498/** 1515/**
1499 * unlock_sock_fast - complement of lock_sock_fast 1516 * unlock_sock_fast - complement of lock_sock_fast
1500 * @sk: socket 1517 * @sk: socket
@@ -1512,108 +1529,84 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
1512} 1529}
1513 1530
1514 1531
1515extern struct sock *sk_alloc(struct net *net, int family, 1532struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1516 gfp_t priority, 1533 struct proto *prot);
1517 struct proto *prot); 1534void sk_free(struct sock *sk);
1518extern void sk_free(struct sock *sk); 1535void sk_release_kernel(struct sock *sk);
1519extern void sk_release_kernel(struct sock *sk); 1536struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1520extern struct sock *sk_clone_lock(const struct sock *sk, 1537
1521 const gfp_t priority); 1538struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1522 1539 gfp_t priority);
1523extern struct sk_buff *sock_wmalloc(struct sock *sk, 1540struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1524 unsigned long size, int force, 1541 gfp_t priority);
1525 gfp_t priority); 1542void sock_wfree(struct sk_buff *skb);
1526extern struct sk_buff *sock_rmalloc(struct sock *sk, 1543void skb_orphan_partial(struct sk_buff *skb);
1527 unsigned long size, int force, 1544void sock_rfree(struct sk_buff *skb);
1528 gfp_t priority); 1545void sock_edemux(struct sk_buff *skb);
1529extern void sock_wfree(struct sk_buff *skb); 1546
1530extern void skb_orphan_partial(struct sk_buff *skb); 1547int sock_setsockopt(struct socket *sock, int level, int op,
1531extern void sock_rfree(struct sk_buff *skb); 1548 char __user *optval, unsigned int optlen);
1532extern void sock_edemux(struct sk_buff *skb); 1549
1533 1550int sock_getsockopt(struct socket *sock, int level, int op,
1534extern int sock_setsockopt(struct socket *sock, int level, 1551 char __user *optval, int __user *optlen);
1535 int op, char __user *optval, 1552struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1536 unsigned int optlen); 1553 int noblock, int *errcode);
1537 1554struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1538extern int sock_getsockopt(struct socket *sock, int level, 1555 unsigned long data_len, int noblock,
1539 int op, char __user *optval, 1556 int *errcode, int max_page_order);
1540 int __user *optlen); 1557void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1541extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 1558void sock_kfree_s(struct sock *sk, void *mem, int size);
1542 unsigned long size, 1559void sk_send_sigurg(struct sock *sk);
1543 int noblock,
1544 int *errcode);
1545extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
1546 unsigned long header_len,
1547 unsigned long data_len,
1548 int noblock,
1549 int *errcode,
1550 int max_page_order);
1551extern void *sock_kmalloc(struct sock *sk, int size,
1552 gfp_t priority);
1553extern void sock_kfree_s(struct sock *sk, void *mem, int size);
1554extern void sk_send_sigurg(struct sock *sk);
1555 1560
1556/* 1561/*
1557 * Functions to fill in entries in struct proto_ops when a protocol 1562 * Functions to fill in entries in struct proto_ops when a protocol
1558 * does not implement a particular function. 1563 * does not implement a particular function.
1559 */ 1564 */
1560extern int sock_no_bind(struct socket *, 1565int sock_no_bind(struct socket *, struct sockaddr *, int);
1561 struct sockaddr *, int); 1566int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1562extern int sock_no_connect(struct socket *, 1567int sock_no_socketpair(struct socket *, struct socket *);
1563 struct sockaddr *, int, int); 1568int sock_no_accept(struct socket *, struct socket *, int);
1564extern int sock_no_socketpair(struct socket *, 1569int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
1565 struct socket *); 1570unsigned int sock_no_poll(struct file *, struct socket *,
1566extern int sock_no_accept(struct socket *, 1571 struct poll_table_struct *);
1567 struct socket *, int); 1572int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1568extern int sock_no_getname(struct socket *, 1573int sock_no_listen(struct socket *, int);
1569 struct sockaddr *, int *, int); 1574int sock_no_shutdown(struct socket *, int);
1570extern unsigned int sock_no_poll(struct file *, struct socket *, 1575int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
1571 struct poll_table_struct *); 1576int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
1572extern int sock_no_ioctl(struct socket *, unsigned int, 1577int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
1573 unsigned long); 1578int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
1574extern int sock_no_listen(struct socket *, int); 1579 int);
1575extern int sock_no_shutdown(struct socket *, int); 1580int sock_no_mmap(struct file *file, struct socket *sock,
1576extern int sock_no_getsockopt(struct socket *, int , int, 1581 struct vm_area_struct *vma);
1577 char __user *, int __user *); 1582ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1578extern int sock_no_setsockopt(struct socket *, int, int, 1583 size_t size, int flags);
1579 char __user *, unsigned int);
1580extern int sock_no_sendmsg(struct kiocb *, struct socket *,
1581 struct msghdr *, size_t);
1582extern int sock_no_recvmsg(struct kiocb *, struct socket *,
1583 struct msghdr *, size_t, int);
1584extern int sock_no_mmap(struct file *file,
1585 struct socket *sock,
1586 struct vm_area_struct *vma);
1587extern ssize_t sock_no_sendpage(struct socket *sock,
1588 struct page *page,
1589 int offset, size_t size,
1590 int flags);
1591 1584
1592/* 1585/*
1593 * Functions to fill in entries in struct proto_ops when a protocol 1586 * Functions to fill in entries in struct proto_ops when a protocol
1594 * uses the inet style. 1587 * uses the inet style.
1595 */ 1588 */
1596extern int sock_common_getsockopt(struct socket *sock, int level, int optname, 1589int sock_common_getsockopt(struct socket *sock, int level, int optname,
1597 char __user *optval, int __user *optlen); 1590 char __user *optval, int __user *optlen);
1598extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 1591int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1599 struct msghdr *msg, size_t size, int flags); 1592 struct msghdr *msg, size_t size, int flags);
1600extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 1593int sock_common_setsockopt(struct socket *sock, int level, int optname,
1601 char __user *optval, unsigned int optlen); 1594 char __user *optval, unsigned int optlen);
1602extern int compat_sock_common_getsockopt(struct socket *sock, int level, 1595int compat_sock_common_getsockopt(struct socket *sock, int level,
1603 int optname, char __user *optval, int __user *optlen); 1596 int optname, char __user *optval, int __user *optlen);
1604extern int compat_sock_common_setsockopt(struct socket *sock, int level, 1597int compat_sock_common_setsockopt(struct socket *sock, int level,
1605 int optname, char __user *optval, unsigned int optlen); 1598 int optname, char __user *optval, unsigned int optlen);
1606 1599
1607extern void sk_common_release(struct sock *sk); 1600void sk_common_release(struct sock *sk);
1608 1601
1609/* 1602/*
1610 * Default socket callbacks and setup code 1603 * Default socket callbacks and setup code
1611 */ 1604 */
1612 1605
1613/* Initialise core socket variables */ 1606/* Initialise core socket variables */
1614extern void sock_init_data(struct socket *sock, struct sock *sk); 1607void sock_init_data(struct socket *sock, struct sock *sk);
1615 1608
1616extern void sk_filter_release_rcu(struct rcu_head *rcu); 1609void sk_filter_release_rcu(struct rcu_head *rcu);
1617 1610
1618/** 1611/**
1619 * sk_filter_release - release a socket filter 1612 * sk_filter_release - release a socket filter
@@ -1630,16 +1623,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
1630 1623
1631static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1624static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1632{ 1625{
1633 unsigned int size = sk_filter_len(fp); 1626 atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
1634
1635 atomic_sub(size, &sk->sk_omem_alloc);
1636 sk_filter_release(fp); 1627 sk_filter_release(fp);
1637} 1628}
1638 1629
1639static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1630static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1640{ 1631{
1641 atomic_inc(&fp->refcnt); 1632 atomic_inc(&fp->refcnt);
1642 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 1633 atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
1643} 1634}
1644 1635
1645/* 1636/*
@@ -1673,9 +1664,12 @@ static inline void sock_put(struct sock *sk)
1673 if (atomic_dec_and_test(&sk->sk_refcnt)) 1664 if (atomic_dec_and_test(&sk->sk_refcnt))
1674 sk_free(sk); 1665 sk_free(sk);
1675} 1666}
1667/* Generic version of sock_put(), dealing with all sockets
1668 * (TCP_TIMEWAIT, ESTABLISHED...)
1669 */
1670void sock_gen_put(struct sock *sk);
1676 1671
1677extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1672int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
1678 const int nested);
1679 1673
1680static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1674static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1681{ 1675{
@@ -1729,8 +1723,8 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
1729 write_unlock_bh(&sk->sk_callback_lock); 1723 write_unlock_bh(&sk->sk_callback_lock);
1730} 1724}
1731 1725
1732extern kuid_t sock_i_uid(struct sock *sk); 1726kuid_t sock_i_uid(struct sock *sk);
1733extern unsigned long sock_i_ino(struct sock *sk); 1727unsigned long sock_i_ino(struct sock *sk);
1734 1728
1735static inline struct dst_entry * 1729static inline struct dst_entry *
1736__sk_dst_get(struct sock *sk) 1730__sk_dst_get(struct sock *sk)
@@ -1752,8 +1746,6 @@ sk_dst_get(struct sock *sk)
1752 return dst; 1746 return dst;
1753} 1747}
1754 1748
1755extern void sk_reset_txq(struct sock *sk);
1756
1757static inline void dst_negative_advice(struct sock *sk) 1749static inline void dst_negative_advice(struct sock *sk)
1758{ 1750{
1759 struct dst_entry *ndst, *dst = __sk_dst_get(sk); 1751 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
@@ -1763,7 +1755,7 @@ static inline void dst_negative_advice(struct sock *sk)
1763 1755
1764 if (ndst != dst) { 1756 if (ndst != dst) {
1765 rcu_assign_pointer(sk->sk_dst_cache, ndst); 1757 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1766 sk_reset_txq(sk); 1758 sk_tx_queue_clear(sk);
1767 } 1759 }
1768 } 1760 }
1769} 1761}
@@ -1805,16 +1797,16 @@ sk_dst_reset(struct sock *sk)
1805 spin_unlock(&sk->sk_dst_lock); 1797 spin_unlock(&sk->sk_dst_lock);
1806} 1798}
1807 1799
1808extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1800struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1809 1801
1810extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1802struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1811 1803
1812static inline bool sk_can_gso(const struct sock *sk) 1804static inline bool sk_can_gso(const struct sock *sk)
1813{ 1805{
1814 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1806 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1815} 1807}
1816 1808
1817extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1809void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1818 1810
1819static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) 1811static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1820{ 1812{
@@ -2027,14 +2019,14 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2027 sk_mem_charge(sk, skb->truesize); 2019 sk_mem_charge(sk, skb->truesize);
2028} 2020}
2029 2021
2030extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, 2022void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2031 unsigned long expires); 2023 unsigned long expires);
2032 2024
2033extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); 2025void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2034 2026
2035extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2027int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2036 2028
2037extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 2029int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2038 2030
2039/* 2031/*
2040 * Recover an error report and clear atomically 2032 * Recover an error report and clear atomically
@@ -2102,7 +2094,7 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
2102 return &sk->sk_frag; 2094 return &sk->sk_frag;
2103} 2095}
2104 2096
2105extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); 2097bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2106 2098
2107/* 2099/*
2108 * Default write policy as shown to user space via poll/select/SIGIO 2100 * Default write policy as shown to user space via poll/select/SIGIO
@@ -2140,10 +2132,10 @@ static inline int sock_intr_errno(long timeo)
2140 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 2132 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2141} 2133}
2142 2134
2143extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 2135void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2144 struct sk_buff *skb); 2136 struct sk_buff *skb);
2145extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 2137void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2146 struct sk_buff *skb); 2138 struct sk_buff *skb);
2147 2139
2148static inline void 2140static inline void
2149sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 2141sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -2176,8 +2168,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2176 __sock_recv_wifi_status(msg, sk, skb); 2168 __sock_recv_wifi_status(msg, sk, skb);
2177} 2169}
2178 2170
2179extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2171void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2180 struct sk_buff *skb); 2172 struct sk_buff *skb);
2181 2173
2182static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2174static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2183 struct sk_buff *skb) 2175 struct sk_buff *skb)
@@ -2202,7 +2194,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2202 * 2194 *
2203 * Currently only depends on SOCK_TIMESTAMPING* flags. 2195 * Currently only depends on SOCK_TIMESTAMPING* flags.
2204 */ 2196 */
2205extern void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); 2197void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
2206 2198
2207/** 2199/**
2208 * sk_eat_skb - Release a skb if it is no longer needed 2200 * sk_eat_skb - Release a skb if it is no longer needed
@@ -2266,11 +2258,11 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2266 return NULL; 2258 return NULL;
2267} 2259}
2268 2260
2269extern void sock_enable_timestamp(struct sock *sk, int flag); 2261void sock_enable_timestamp(struct sock *sk, int flag);
2270extern int sock_get_timestamp(struct sock *, struct timeval __user *); 2262int sock_get_timestamp(struct sock *, struct timeval __user *);
2271extern int sock_get_timestampns(struct sock *, struct timespec __user *); 2263int sock_get_timestampns(struct sock *, struct timespec __user *);
2272extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 2264int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2273 int level, int type); 2265 int type);
2274 2266
2275/* 2267/*
2276 * Enable debug/info messages 2268 * Enable debug/info messages