diff options
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 186 |
1 files changed, 98 insertions, 88 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 5a0a58ac4126..d89f0582b6b6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -70,16 +70,16 @@ | |||
70 | struct cgroup; | 70 | struct cgroup; |
71 | struct cgroup_subsys; | 71 | struct cgroup_subsys; |
72 | #ifdef CONFIG_NET | 72 | #ifdef CONFIG_NET |
73 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); | 73 | int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); |
74 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp); | 74 | void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); |
75 | #else | 75 | #else |
76 | static inline | 76 | static inline |
77 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) | 77 | int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) |
78 | { | 78 | { |
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
81 | static inline | 81 | static inline |
82 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp) | 82 | void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) |
83 | { | 83 | { |
84 | } | 84 | } |
85 | #endif | 85 | #endif |
@@ -97,7 +97,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp) | |||
97 | #else | 97 | #else |
98 | /* Validate arguments and do nothing */ | 98 | /* Validate arguments and do nothing */ |
99 | static inline __printf(2, 3) | 99 | static inline __printf(2, 3) |
100 | void SOCK_DEBUG(struct sock *sk, const char *msg, ...) | 100 | void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) |
101 | { | 101 | { |
102 | } | 102 | } |
103 | #endif | 103 | #endif |
@@ -372,11 +372,22 @@ struct sock { | |||
372 | void (*sk_data_ready)(struct sock *sk, int bytes); | 372 | void (*sk_data_ready)(struct sock *sk, int bytes); |
373 | void (*sk_write_space)(struct sock *sk); | 373 | void (*sk_write_space)(struct sock *sk); |
374 | void (*sk_error_report)(struct sock *sk); | 374 | void (*sk_error_report)(struct sock *sk); |
375 | int (*sk_backlog_rcv)(struct sock *sk, | 375 | int (*sk_backlog_rcv)(struct sock *sk, |
376 | struct sk_buff *skb); | 376 | struct sk_buff *skb); |
377 | void (*sk_destruct)(struct sock *sk); | 377 | void (*sk_destruct)(struct sock *sk); |
378 | }; | 378 | }; |
379 | 379 | ||
380 | /* | ||
381 | * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK | ||
382 | * or not whether his port will be reused by someone else. SK_FORCE_REUSE | ||
383 | * on a socket means that the socket will reuse everybody else's port | ||
384 | * without looking at the other's sk_reuse value. | ||
385 | */ | ||
386 | |||
387 | #define SK_NO_REUSE 0 | ||
388 | #define SK_CAN_REUSE 1 | ||
389 | #define SK_FORCE_REUSE 2 | ||
390 | |||
380 | static inline int sk_peek_offset(struct sock *sk, int flags) | 391 | static inline int sk_peek_offset(struct sock *sk, int flags) |
381 | { | 392 | { |
382 | if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) | 393 | if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) |
@@ -443,40 +454,40 @@ static inline struct sock *sk_nulls_next(const struct sock *sk) | |||
443 | NULL; | 454 | NULL; |
444 | } | 455 | } |
445 | 456 | ||
446 | static inline int sk_unhashed(const struct sock *sk) | 457 | static inline bool sk_unhashed(const struct sock *sk) |
447 | { | 458 | { |
448 | return hlist_unhashed(&sk->sk_node); | 459 | return hlist_unhashed(&sk->sk_node); |
449 | } | 460 | } |
450 | 461 | ||
451 | static inline int sk_hashed(const struct sock *sk) | 462 | static inline bool sk_hashed(const struct sock *sk) |
452 | { | 463 | { |
453 | return !sk_unhashed(sk); | 464 | return !sk_unhashed(sk); |
454 | } | 465 | } |
455 | 466 | ||
456 | static __inline__ void sk_node_init(struct hlist_node *node) | 467 | static inline void sk_node_init(struct hlist_node *node) |
457 | { | 468 | { |
458 | node->pprev = NULL; | 469 | node->pprev = NULL; |
459 | } | 470 | } |
460 | 471 | ||
461 | static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) | 472 | static inline void sk_nulls_node_init(struct hlist_nulls_node *node) |
462 | { | 473 | { |
463 | node->pprev = NULL; | 474 | node->pprev = NULL; |
464 | } | 475 | } |
465 | 476 | ||
466 | static __inline__ void __sk_del_node(struct sock *sk) | 477 | static inline void __sk_del_node(struct sock *sk) |
467 | { | 478 | { |
468 | __hlist_del(&sk->sk_node); | 479 | __hlist_del(&sk->sk_node); |
469 | } | 480 | } |
470 | 481 | ||
471 | /* NB: equivalent to hlist_del_init_rcu */ | 482 | /* NB: equivalent to hlist_del_init_rcu */ |
472 | static __inline__ int __sk_del_node_init(struct sock *sk) | 483 | static inline bool __sk_del_node_init(struct sock *sk) |
473 | { | 484 | { |
474 | if (sk_hashed(sk)) { | 485 | if (sk_hashed(sk)) { |
475 | __sk_del_node(sk); | 486 | __sk_del_node(sk); |
476 | sk_node_init(&sk->sk_node); | 487 | sk_node_init(&sk->sk_node); |
477 | return 1; | 488 | return true; |
478 | } | 489 | } |
479 | return 0; | 490 | return false; |
480 | } | 491 | } |
481 | 492 | ||
482 | /* Grab socket reference count. This operation is valid only | 493 | /* Grab socket reference count. This operation is valid only |
@@ -498,9 +509,9 @@ static inline void __sock_put(struct sock *sk) | |||
498 | atomic_dec(&sk->sk_refcnt); | 509 | atomic_dec(&sk->sk_refcnt); |
499 | } | 510 | } |
500 | 511 | ||
501 | static __inline__ int sk_del_node_init(struct sock *sk) | 512 | static inline bool sk_del_node_init(struct sock *sk) |
502 | { | 513 | { |
503 | int rc = __sk_del_node_init(sk); | 514 | bool rc = __sk_del_node_init(sk); |
504 | 515 | ||
505 | if (rc) { | 516 | if (rc) { |
506 | /* paranoid for a while -acme */ | 517 | /* paranoid for a while -acme */ |
@@ -511,18 +522,18 @@ static __inline__ int sk_del_node_init(struct sock *sk) | |||
511 | } | 522 | } |
512 | #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) | 523 | #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) |
513 | 524 | ||
514 | static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) | 525 | static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) |
515 | { | 526 | { |
516 | if (sk_hashed(sk)) { | 527 | if (sk_hashed(sk)) { |
517 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); | 528 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); |
518 | return 1; | 529 | return true; |
519 | } | 530 | } |
520 | return 0; | 531 | return false; |
521 | } | 532 | } |
522 | 533 | ||
523 | static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) | 534 | static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) |
524 | { | 535 | { |
525 | int rc = __sk_nulls_del_node_init_rcu(sk); | 536 | bool rc = __sk_nulls_del_node_init_rcu(sk); |
526 | 537 | ||
527 | if (rc) { | 538 | if (rc) { |
528 | /* paranoid for a while -acme */ | 539 | /* paranoid for a while -acme */ |
@@ -532,40 +543,40 @@ static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) | |||
532 | return rc; | 543 | return rc; |
533 | } | 544 | } |
534 | 545 | ||
535 | static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) | 546 | static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) |
536 | { | 547 | { |
537 | hlist_add_head(&sk->sk_node, list); | 548 | hlist_add_head(&sk->sk_node, list); |
538 | } | 549 | } |
539 | 550 | ||
540 | static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) | 551 | static inline void sk_add_node(struct sock *sk, struct hlist_head *list) |
541 | { | 552 | { |
542 | sock_hold(sk); | 553 | sock_hold(sk); |
543 | __sk_add_node(sk, list); | 554 | __sk_add_node(sk, list); |
544 | } | 555 | } |
545 | 556 | ||
546 | static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) | 557 | static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) |
547 | { | 558 | { |
548 | sock_hold(sk); | 559 | sock_hold(sk); |
549 | hlist_add_head_rcu(&sk->sk_node, list); | 560 | hlist_add_head_rcu(&sk->sk_node, list); |
550 | } | 561 | } |
551 | 562 | ||
552 | static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | 563 | static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
553 | { | 564 | { |
554 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); | 565 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); |
555 | } | 566 | } |
556 | 567 | ||
557 | static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | 568 | static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
558 | { | 569 | { |
559 | sock_hold(sk); | 570 | sock_hold(sk); |
560 | __sk_nulls_add_node_rcu(sk, list); | 571 | __sk_nulls_add_node_rcu(sk, list); |
561 | } | 572 | } |
562 | 573 | ||
563 | static __inline__ void __sk_del_bind_node(struct sock *sk) | 574 | static inline void __sk_del_bind_node(struct sock *sk) |
564 | { | 575 | { |
565 | __hlist_del(&sk->sk_bind_node); | 576 | __hlist_del(&sk->sk_bind_node); |
566 | } | 577 | } |
567 | 578 | ||
568 | static __inline__ void sk_add_bind_node(struct sock *sk, | 579 | static inline void sk_add_bind_node(struct sock *sk, |
569 | struct hlist_head *list) | 580 | struct hlist_head *list) |
570 | { | 581 | { |
571 | hlist_add_head(&sk->sk_bind_node, list); | 582 | hlist_add_head(&sk->sk_bind_node, list); |
@@ -639,7 +650,7 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) | |||
639 | __clear_bit(flag, &sk->sk_flags); | 650 | __clear_bit(flag, &sk->sk_flags); |
640 | } | 651 | } |
641 | 652 | ||
642 | static inline int sock_flag(struct sock *sk, enum sock_flags flag) | 653 | static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) |
643 | { | 654 | { |
644 | return test_bit(flag, &sk->sk_flags); | 655 | return test_bit(flag, &sk->sk_flags); |
645 | } | 656 | } |
@@ -654,7 +665,7 @@ static inline void sk_acceptq_added(struct sock *sk) | |||
654 | sk->sk_ack_backlog++; | 665 | sk->sk_ack_backlog++; |
655 | } | 666 | } |
656 | 667 | ||
657 | static inline int sk_acceptq_is_full(struct sock *sk) | 668 | static inline bool sk_acceptq_is_full(const struct sock *sk) |
658 | { | 669 | { |
659 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; | 670 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; |
660 | } | 671 | } |
@@ -662,19 +673,19 @@ static inline int sk_acceptq_is_full(struct sock *sk) | |||
662 | /* | 673 | /* |
663 | * Compute minimal free write space needed to queue new packets. | 674 | * Compute minimal free write space needed to queue new packets. |
664 | */ | 675 | */ |
665 | static inline int sk_stream_min_wspace(struct sock *sk) | 676 | static inline int sk_stream_min_wspace(const struct sock *sk) |
666 | { | 677 | { |
667 | return sk->sk_wmem_queued >> 1; | 678 | return sk->sk_wmem_queued >> 1; |
668 | } | 679 | } |
669 | 680 | ||
670 | static inline int sk_stream_wspace(struct sock *sk) | 681 | static inline int sk_stream_wspace(const struct sock *sk) |
671 | { | 682 | { |
672 | return sk->sk_sndbuf - sk->sk_wmem_queued; | 683 | return sk->sk_sndbuf - sk->sk_wmem_queued; |
673 | } | 684 | } |
674 | 685 | ||
675 | extern void sk_stream_write_space(struct sock *sk); | 686 | extern void sk_stream_write_space(struct sock *sk); |
676 | 687 | ||
677 | static inline int sk_stream_memory_free(struct sock *sk) | 688 | static inline bool sk_stream_memory_free(const struct sock *sk) |
678 | { | 689 | { |
679 | return sk->sk_wmem_queued < sk->sk_sndbuf; | 690 | return sk->sk_wmem_queued < sk->sk_sndbuf; |
680 | } | 691 | } |
@@ -699,17 +710,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
699 | * Do not take into account this skb truesize, | 710 | * Do not take into account this skb truesize, |
700 | * to allow even a single big packet to come. | 711 | * to allow even a single big packet to come. |
701 | */ | 712 | */ |
702 | static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) | 713 | static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb, |
714 | unsigned int limit) | ||
703 | { | 715 | { |
704 | unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); | 716 | unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); |
705 | 717 | ||
706 | return qsize > sk->sk_rcvbuf; | 718 | return qsize > limit; |
707 | } | 719 | } |
708 | 720 | ||
709 | /* The per-socket spinlock must be held here. */ | 721 | /* The per-socket spinlock must be held here. */ |
710 | static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 722 | static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, |
723 | unsigned int limit) | ||
711 | { | 724 | { |
712 | if (sk_rcvqueues_full(sk, skb)) | 725 | if (sk_rcvqueues_full(sk, skb, limit)) |
713 | return -ENOBUFS; | 726 | return -ENOBUFS; |
714 | 727 | ||
715 | __sk_add_backlog(sk, skb); | 728 | __sk_add_backlog(sk, skb); |
@@ -796,26 +809,26 @@ struct module; | |||
796 | * transport -> network interface is defined by struct inet_proto | 809 | * transport -> network interface is defined by struct inet_proto |
797 | */ | 810 | */ |
798 | struct proto { | 811 | struct proto { |
799 | void (*close)(struct sock *sk, | 812 | void (*close)(struct sock *sk, |
800 | long timeout); | 813 | long timeout); |
801 | int (*connect)(struct sock *sk, | 814 | int (*connect)(struct sock *sk, |
802 | struct sockaddr *uaddr, | 815 | struct sockaddr *uaddr, |
803 | int addr_len); | 816 | int addr_len); |
804 | int (*disconnect)(struct sock *sk, int flags); | 817 | int (*disconnect)(struct sock *sk, int flags); |
805 | 818 | ||
806 | struct sock * (*accept) (struct sock *sk, int flags, int *err); | 819 | struct sock * (*accept)(struct sock *sk, int flags, int *err); |
807 | 820 | ||
808 | int (*ioctl)(struct sock *sk, int cmd, | 821 | int (*ioctl)(struct sock *sk, int cmd, |
809 | unsigned long arg); | 822 | unsigned long arg); |
810 | int (*init)(struct sock *sk); | 823 | int (*init)(struct sock *sk); |
811 | void (*destroy)(struct sock *sk); | 824 | void (*destroy)(struct sock *sk); |
812 | void (*shutdown)(struct sock *sk, int how); | 825 | void (*shutdown)(struct sock *sk, int how); |
813 | int (*setsockopt)(struct sock *sk, int level, | 826 | int (*setsockopt)(struct sock *sk, int level, |
814 | int optname, char __user *optval, | 827 | int optname, char __user *optval, |
815 | unsigned int optlen); | 828 | unsigned int optlen); |
816 | int (*getsockopt)(struct sock *sk, int level, | 829 | int (*getsockopt)(struct sock *sk, int level, |
817 | int optname, char __user *optval, | 830 | int optname, char __user *optval, |
818 | int __user *option); | 831 | int __user *option); |
819 | #ifdef CONFIG_COMPAT | 832 | #ifdef CONFIG_COMPAT |
820 | int (*compat_setsockopt)(struct sock *sk, | 833 | int (*compat_setsockopt)(struct sock *sk, |
821 | int level, | 834 | int level, |
@@ -832,14 +845,14 @@ struct proto { | |||
832 | struct msghdr *msg, size_t len); | 845 | struct msghdr *msg, size_t len); |
833 | int (*recvmsg)(struct kiocb *iocb, struct sock *sk, | 846 | int (*recvmsg)(struct kiocb *iocb, struct sock *sk, |
834 | struct msghdr *msg, | 847 | struct msghdr *msg, |
835 | size_t len, int noblock, int flags, | 848 | size_t len, int noblock, int flags, |
836 | int *addr_len); | 849 | int *addr_len); |
837 | int (*sendpage)(struct sock *sk, struct page *page, | 850 | int (*sendpage)(struct sock *sk, struct page *page, |
838 | int offset, size_t size, int flags); | 851 | int offset, size_t size, int flags); |
839 | int (*bind)(struct sock *sk, | 852 | int (*bind)(struct sock *sk, |
840 | struct sockaddr *uaddr, int addr_len); | 853 | struct sockaddr *uaddr, int addr_len); |
841 | 854 | ||
842 | int (*backlog_rcv) (struct sock *sk, | 855 | int (*backlog_rcv) (struct sock *sk, |
843 | struct sk_buff *skb); | 856 | struct sk_buff *skb); |
844 | 857 | ||
845 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 858 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
@@ -901,9 +914,9 @@ struct proto { | |||
901 | * This function has to setup any files the protocol want to | 914 | * This function has to setup any files the protocol want to |
902 | * appear in the kmem cgroup filesystem. | 915 | * appear in the kmem cgroup filesystem. |
903 | */ | 916 | */ |
904 | int (*init_cgroup)(struct cgroup *cgrp, | 917 | int (*init_cgroup)(struct mem_cgroup *memcg, |
905 | struct cgroup_subsys *ss); | 918 | struct cgroup_subsys *ss); |
906 | void (*destroy_cgroup)(struct cgroup *cgrp); | 919 | void (*destroy_cgroup)(struct mem_cgroup *memcg); |
907 | struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); | 920 | struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); |
908 | #endif | 921 | #endif |
909 | }; | 922 | }; |
@@ -1160,7 +1173,7 @@ proto_memory_pressure(struct proto *prot) | |||
1160 | extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); | 1173 | extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); |
1161 | extern int sock_prot_inuse_get(struct net *net, struct proto *proto); | 1174 | extern int sock_prot_inuse_get(struct net *net, struct proto *proto); |
1162 | #else | 1175 | #else |
1163 | static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, | 1176 | static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, |
1164 | int inc) | 1177 | int inc) |
1165 | { | 1178 | { |
1166 | } | 1179 | } |
@@ -1247,24 +1260,24 @@ static inline int sk_mem_pages(int amt) | |||
1247 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; | 1260 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; |
1248 | } | 1261 | } |
1249 | 1262 | ||
1250 | static inline int sk_has_account(struct sock *sk) | 1263 | static inline bool sk_has_account(struct sock *sk) |
1251 | { | 1264 | { |
1252 | /* return true if protocol supports memory accounting */ | 1265 | /* return true if protocol supports memory accounting */ |
1253 | return !!sk->sk_prot->memory_allocated; | 1266 | return !!sk->sk_prot->memory_allocated; |
1254 | } | 1267 | } |
1255 | 1268 | ||
1256 | static inline int sk_wmem_schedule(struct sock *sk, int size) | 1269 | static inline bool sk_wmem_schedule(struct sock *sk, int size) |
1257 | { | 1270 | { |
1258 | if (!sk_has_account(sk)) | 1271 | if (!sk_has_account(sk)) |
1259 | return 1; | 1272 | return true; |
1260 | return size <= sk->sk_forward_alloc || | 1273 | return size <= sk->sk_forward_alloc || |
1261 | __sk_mem_schedule(sk, size, SK_MEM_SEND); | 1274 | __sk_mem_schedule(sk, size, SK_MEM_SEND); |
1262 | } | 1275 | } |
1263 | 1276 | ||
1264 | static inline int sk_rmem_schedule(struct sock *sk, int size) | 1277 | static inline bool sk_rmem_schedule(struct sock *sk, int size) |
1265 | { | 1278 | { |
1266 | if (!sk_has_account(sk)) | 1279 | if (!sk_has_account(sk)) |
1267 | return 1; | 1280 | return true; |
1268 | return size <= sk->sk_forward_alloc || | 1281 | return size <= sk->sk_forward_alloc || |
1269 | __sk_mem_schedule(sk, size, SK_MEM_RECV); | 1282 | __sk_mem_schedule(sk, size, SK_MEM_RECV); |
1270 | } | 1283 | } |
@@ -1329,7 +1342,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | |||
1329 | * Mark both the sk_lock and the sk_lock.slock as a | 1342 | * Mark both the sk_lock and the sk_lock.slock as a |
1330 | * per-address-family lock class. | 1343 | * per-address-family lock class. |
1331 | */ | 1344 | */ |
1332 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ | 1345 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ |
1333 | do { \ | 1346 | do { \ |
1334 | sk->sk_lock.owned = 0; \ | 1347 | sk->sk_lock.owned = 0; \ |
1335 | init_waitqueue_head(&sk->sk_lock.wq); \ | 1348 | init_waitqueue_head(&sk->sk_lock.wq); \ |
@@ -1337,7 +1350,7 @@ do { \ | |||
1337 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ | 1350 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ |
1338 | sizeof((sk)->sk_lock)); \ | 1351 | sizeof((sk)->sk_lock)); \ |
1339 | lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ | 1352 | lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ |
1340 | (skey), (sname)); \ | 1353 | (skey), (sname)); \ |
1341 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ | 1354 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ |
1342 | } while (0) | 1355 | } while (0) |
1343 | 1356 | ||
@@ -1397,13 +1410,13 @@ extern int sock_setsockopt(struct socket *sock, int level, | |||
1397 | unsigned int optlen); | 1410 | unsigned int optlen); |
1398 | 1411 | ||
1399 | extern int sock_getsockopt(struct socket *sock, int level, | 1412 | extern int sock_getsockopt(struct socket *sock, int level, |
1400 | int op, char __user *optval, | 1413 | int op, char __user *optval, |
1401 | int __user *optlen); | 1414 | int __user *optlen); |
1402 | extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, | 1415 | extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, |
1403 | unsigned long size, | 1416 | unsigned long size, |
1404 | int noblock, | 1417 | int noblock, |
1405 | int *errcode); | 1418 | int *errcode); |
1406 | extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, | 1419 | extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, |
1407 | unsigned long header_len, | 1420 | unsigned long header_len, |
1408 | unsigned long data_len, | 1421 | unsigned long data_len, |
1409 | int noblock, | 1422 | int noblock, |
@@ -1425,7 +1438,7 @@ static inline void sock_update_classid(struct sock *sk) | |||
1425 | * Functions to fill in entries in struct proto_ops when a protocol | 1438 | * Functions to fill in entries in struct proto_ops when a protocol |
1426 | * does not implement a particular function. | 1439 | * does not implement a particular function. |
1427 | */ | 1440 | */ |
1428 | extern int sock_no_bind(struct socket *, | 1441 | extern int sock_no_bind(struct socket *, |
1429 | struct sockaddr *, int); | 1442 | struct sockaddr *, int); |
1430 | extern int sock_no_connect(struct socket *, | 1443 | extern int sock_no_connect(struct socket *, |
1431 | struct sockaddr *, int, int); | 1444 | struct sockaddr *, int, int); |
@@ -1454,7 +1467,7 @@ extern int sock_no_mmap(struct file *file, | |||
1454 | struct vm_area_struct *vma); | 1467 | struct vm_area_struct *vma); |
1455 | extern ssize_t sock_no_sendpage(struct socket *sock, | 1468 | extern ssize_t sock_no_sendpage(struct socket *sock, |
1456 | struct page *page, | 1469 | struct page *page, |
1457 | int offset, size_t size, | 1470 | int offset, size_t size, |
1458 | int flags); | 1471 | int flags); |
1459 | 1472 | ||
1460 | /* | 1473 | /* |
@@ -1477,7 +1490,7 @@ extern void sk_common_release(struct sock *sk); | |||
1477 | /* | 1490 | /* |
1478 | * Default socket callbacks and setup code | 1491 | * Default socket callbacks and setup code |
1479 | */ | 1492 | */ |
1480 | 1493 | ||
1481 | /* Initialise core socket variables */ | 1494 | /* Initialise core socket variables */ |
1482 | extern void sock_init_data(struct socket *sock, struct sock *sk); | 1495 | extern void sock_init_data(struct socket *sock, struct sock *sk); |
1483 | 1496 | ||
@@ -1677,7 +1690,7 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); | |||
1677 | 1690 | ||
1678 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); | 1691 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
1679 | 1692 | ||
1680 | static inline int sk_can_gso(const struct sock *sk) | 1693 | static inline bool sk_can_gso(const struct sock *sk) |
1681 | { | 1694 | { |
1682 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); | 1695 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); |
1683 | } | 1696 | } |
@@ -1794,7 +1807,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk) | |||
1794 | * | 1807 | * |
1795 | * Returns true if socket has write or read allocations | 1808 | * Returns true if socket has write or read allocations |
1796 | */ | 1809 | */ |
1797 | static inline int sk_has_allocations(const struct sock *sk) | 1810 | static inline bool sk_has_allocations(const struct sock *sk) |
1798 | { | 1811 | { |
1799 | return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); | 1812 | return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); |
1800 | } | 1813 | } |
@@ -1833,9 +1846,7 @@ static inline int sk_has_allocations(const struct sock *sk) | |||
1833 | */ | 1846 | */ |
1834 | static inline bool wq_has_sleeper(struct socket_wq *wq) | 1847 | static inline bool wq_has_sleeper(struct socket_wq *wq) |
1835 | { | 1848 | { |
1836 | 1849 | /* We need to be sure we are in sync with the | |
1837 | /* | ||
1838 | * We need to be sure we are in sync with the | ||
1839 | * add_wait_queue modifications to the wait queue. | 1850 | * add_wait_queue modifications to the wait queue. |
1840 | * | 1851 | * |
1841 | * This memory barrier is paired in the sock_poll_wait. | 1852 | * This memory barrier is paired in the sock_poll_wait. |
@@ -1857,22 +1868,21 @@ static inline void sock_poll_wait(struct file *filp, | |||
1857 | { | 1868 | { |
1858 | if (!poll_does_not_wait(p) && wait_address) { | 1869 | if (!poll_does_not_wait(p) && wait_address) { |
1859 | poll_wait(filp, wait_address, p); | 1870 | poll_wait(filp, wait_address, p); |
1860 | /* | 1871 | /* We need to be sure we are in sync with the |
1861 | * We need to be sure we are in sync with the | ||
1862 | * socket flags modification. | 1872 | * socket flags modification. |
1863 | * | 1873 | * |
1864 | * This memory barrier is paired in the wq_has_sleeper. | 1874 | * This memory barrier is paired in the wq_has_sleeper. |
1865 | */ | 1875 | */ |
1866 | smp_mb(); | 1876 | smp_mb(); |
1867 | } | 1877 | } |
1868 | } | 1878 | } |
1869 | 1879 | ||
1870 | /* | 1880 | /* |
1871 | * Queue a received datagram if it will fit. Stream and sequenced | 1881 | * Queue a received datagram if it will fit. Stream and sequenced |
1872 | * protocols can't normally use this as they need to fit buffers in | 1882 | * protocols can't normally use this as they need to fit buffers in |
1873 | * and play with them. | 1883 | * and play with them. |
1874 | * | 1884 | * |
1875 | * Inlined as it's very short and called for pretty much every | 1885 | * Inlined as it's very short and called for pretty much every |
1876 | * packet ever received. | 1886 | * packet ever received. |
1877 | */ | 1887 | */ |
1878 | 1888 | ||
@@ -1898,10 +1908,10 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
1898 | sk_mem_charge(sk, skb->truesize); | 1908 | sk_mem_charge(sk, skb->truesize); |
1899 | } | 1909 | } |
1900 | 1910 | ||
1901 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, | 1911 | extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, |
1902 | unsigned long expires); | 1912 | unsigned long expires); |
1903 | 1913 | ||
1904 | extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); | 1914 | extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); |
1905 | 1915 | ||
1906 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 1916 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
1907 | 1917 | ||
@@ -1910,7 +1920,7 @@ extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); | |||
1910 | /* | 1920 | /* |
1911 | * Recover an error report and clear atomically | 1921 | * Recover an error report and clear atomically |
1912 | */ | 1922 | */ |
1913 | 1923 | ||
1914 | static inline int sock_error(struct sock *sk) | 1924 | static inline int sock_error(struct sock *sk) |
1915 | { | 1925 | { |
1916 | int err; | 1926 | int err; |
@@ -1926,7 +1936,7 @@ static inline unsigned long sock_wspace(struct sock *sk) | |||
1926 | 1936 | ||
1927 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | 1937 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { |
1928 | amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | 1938 | amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); |
1929 | if (amt < 0) | 1939 | if (amt < 0) |
1930 | amt = 0; | 1940 | amt = 0; |
1931 | } | 1941 | } |
1932 | return amt; | 1942 | return amt; |
@@ -1970,7 +1980,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) | |||
1970 | /* | 1980 | /* |
1971 | * Default write policy as shown to user space via poll/select/SIGIO | 1981 | * Default write policy as shown to user space via poll/select/SIGIO |
1972 | */ | 1982 | */ |
1973 | static inline int sock_writeable(const struct sock *sk) | 1983 | static inline bool sock_writeable(const struct sock *sk) |
1974 | { | 1984 | { |
1975 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); | 1985 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); |
1976 | } | 1986 | } |
@@ -1980,12 +1990,12 @@ static inline gfp_t gfp_any(void) | |||
1980 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; | 1990 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; |
1981 | } | 1991 | } |
1982 | 1992 | ||
1983 | static inline long sock_rcvtimeo(const struct sock *sk, int noblock) | 1993 | static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) |
1984 | { | 1994 | { |
1985 | return noblock ? 0 : sk->sk_rcvtimeo; | 1995 | return noblock ? 0 : sk->sk_rcvtimeo; |
1986 | } | 1996 | } |
1987 | 1997 | ||
1988 | static inline long sock_sndtimeo(const struct sock *sk, int noblock) | 1998 | static inline long sock_sndtimeo(const struct sock *sk, bool noblock) |
1989 | { | 1999 | { |
1990 | return noblock ? 0 : sk->sk_sndtimeo; | 2000 | return noblock ? 0 : sk->sk_sndtimeo; |
1991 | } | 2001 | } |
@@ -2008,7 +2018,7 @@ extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
2008 | extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, | 2018 | extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, |
2009 | struct sk_buff *skb); | 2019 | struct sk_buff *skb); |
2010 | 2020 | ||
2011 | static __inline__ void | 2021 | static inline void |
2012 | sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) | 2022 | sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) |
2013 | { | 2023 | { |
2014 | ktime_t kt = skb->tstamp; | 2024 | ktime_t kt = skb->tstamp; |
@@ -2049,7 +2059,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
2049 | (1UL << SOCK_RCVTSTAMP) | \ | 2059 | (1UL << SOCK_RCVTSTAMP) | \ |
2050 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ | 2060 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ |
2051 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ | 2061 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ |
2052 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ | 2062 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ |
2053 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) | 2063 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) |
2054 | 2064 | ||
2055 | if (sk->sk_flags & FLAGS_TS_OR_DROPS) | 2065 | if (sk->sk_flags & FLAGS_TS_OR_DROPS) |
@@ -2078,7 +2088,7 @@ extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); | |||
2078 | * locked so that the sk_buff queue operation is ok. | 2088 | * locked so that the sk_buff queue operation is ok. |
2079 | */ | 2089 | */ |
2080 | #ifdef CONFIG_NET_DMA | 2090 | #ifdef CONFIG_NET_DMA |
2081 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) | 2091 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) |
2082 | { | 2092 | { |
2083 | __skb_unlink(skb, &sk->sk_receive_queue); | 2093 | __skb_unlink(skb, &sk->sk_receive_queue); |
2084 | if (!copied_early) | 2094 | if (!copied_early) |
@@ -2087,7 +2097,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e | |||
2087 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); | 2097 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); |
2088 | } | 2098 | } |
2089 | #else | 2099 | #else |
2090 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) | 2100 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) |
2091 | { | 2101 | { |
2092 | __skb_unlink(skb, &sk->sk_receive_queue); | 2102 | __skb_unlink(skb, &sk->sk_receive_queue); |
2093 | __kfree_skb(skb); | 2103 | __kfree_skb(skb); |
@@ -2134,8 +2144,8 @@ extern void sock_enable_timestamp(struct sock *sk, int flag); | |||
2134 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); | 2144 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); |
2135 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); | 2145 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); |
2136 | 2146 | ||
2137 | /* | 2147 | /* |
2138 | * Enable debug/info messages | 2148 | * Enable debug/info messages |
2139 | */ | 2149 | */ |
2140 | extern int net_msg_warn; | 2150 | extern int net_msg_warn; |
2141 | #define NETDEBUG(fmt, args...) \ | 2151 | #define NETDEBUG(fmt, args...) \ |