diff options
| author | Len Brown <len.brown@intel.com> | 2012-06-04 00:35:19 -0400 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2012-06-04 00:35:19 -0400 |
| commit | 7e1bd6e38b1f30860ce25a014c6d6adfb0079f4a (patch) | |
| tree | 65c5898ba93007d4399150c7a127a670bcfbc30d /include/net/sock.h | |
| parent | 301f33fbcf4ced53b3de114846ecece5d6aafeeb (diff) | |
| parent | f8f5701bdaf9134b1f90e5044a82c66324d2073f (diff) | |
Merge branch 'upstream' into bugfix-video
Update bugfix-video branch to 2.5-rc1
so I don't have to again resolve the
conflict in these patches vs. upstream.
Conflicts:
drivers/gpu/drm/gma500/psb_drv.c
text conflict: add comment vs delete neighboring line
keep just this:
/* igd_opregion_init(&dev_priv->opregion_dev); */
/* acpi_video_register(); */
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'include/net/sock.h')
| -rw-r--r-- | include/net/sock.h | 208 |
1 files changed, 120 insertions, 88 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 5a0a58ac4126..4a4521699563 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/list_nulls.h> | 46 | #include <linux/list_nulls.h> |
| 47 | #include <linux/timer.h> | 47 | #include <linux/timer.h> |
| 48 | #include <linux/cache.h> | 48 | #include <linux/cache.h> |
| 49 | #include <linux/bitops.h> | ||
| 49 | #include <linux/lockdep.h> | 50 | #include <linux/lockdep.h> |
| 50 | #include <linux/netdevice.h> | 51 | #include <linux/netdevice.h> |
| 51 | #include <linux/skbuff.h> /* struct sk_buff */ | 52 | #include <linux/skbuff.h> /* struct sk_buff */ |
| @@ -70,16 +71,16 @@ | |||
| 70 | struct cgroup; | 71 | struct cgroup; |
| 71 | struct cgroup_subsys; | 72 | struct cgroup_subsys; |
| 72 | #ifdef CONFIG_NET | 73 | #ifdef CONFIG_NET |
| 73 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); | 74 | int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); |
| 74 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp); | 75 | void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); |
| 75 | #else | 76 | #else |
| 76 | static inline | 77 | static inline |
| 77 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) | 78 | int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) |
| 78 | { | 79 | { |
| 79 | return 0; | 80 | return 0; |
| 80 | } | 81 | } |
| 81 | static inline | 82 | static inline |
| 82 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp) | 83 | void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) |
| 83 | { | 84 | { |
| 84 | } | 85 | } |
| 85 | #endif | 86 | #endif |
| @@ -97,7 +98,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp) | |||
| 97 | #else | 98 | #else |
| 98 | /* Validate arguments and do nothing */ | 99 | /* Validate arguments and do nothing */ |
| 99 | static inline __printf(2, 3) | 100 | static inline __printf(2, 3) |
| 100 | void SOCK_DEBUG(struct sock *sk, const char *msg, ...) | 101 | void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) |
| 101 | { | 102 | { |
| 102 | } | 103 | } |
| 103 | #endif | 104 | #endif |
| @@ -372,11 +373,22 @@ struct sock { | |||
| 372 | void (*sk_data_ready)(struct sock *sk, int bytes); | 373 | void (*sk_data_ready)(struct sock *sk, int bytes); |
| 373 | void (*sk_write_space)(struct sock *sk); | 374 | void (*sk_write_space)(struct sock *sk); |
| 374 | void (*sk_error_report)(struct sock *sk); | 375 | void (*sk_error_report)(struct sock *sk); |
| 375 | int (*sk_backlog_rcv)(struct sock *sk, | 376 | int (*sk_backlog_rcv)(struct sock *sk, |
| 376 | struct sk_buff *skb); | 377 | struct sk_buff *skb); |
| 377 | void (*sk_destruct)(struct sock *sk); | 378 | void (*sk_destruct)(struct sock *sk); |
| 378 | }; | 379 | }; |
| 379 | 380 | ||
| 381 | /* | ||
| 382 | * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK | ||
| 383 | * or not whether his port will be reused by someone else. SK_FORCE_REUSE | ||
| 384 | * on a socket means that the socket will reuse everybody else's port | ||
| 385 | * without looking at the other's sk_reuse value. | ||
| 386 | */ | ||
| 387 | |||
| 388 | #define SK_NO_REUSE 0 | ||
| 389 | #define SK_CAN_REUSE 1 | ||
| 390 | #define SK_FORCE_REUSE 2 | ||
| 391 | |||
| 380 | static inline int sk_peek_offset(struct sock *sk, int flags) | 392 | static inline int sk_peek_offset(struct sock *sk, int flags) |
| 381 | { | 393 | { |
| 382 | if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) | 394 | if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) |
| @@ -443,40 +455,40 @@ static inline struct sock *sk_nulls_next(const struct sock *sk) | |||
| 443 | NULL; | 455 | NULL; |
| 444 | } | 456 | } |
| 445 | 457 | ||
| 446 | static inline int sk_unhashed(const struct sock *sk) | 458 | static inline bool sk_unhashed(const struct sock *sk) |
| 447 | { | 459 | { |
| 448 | return hlist_unhashed(&sk->sk_node); | 460 | return hlist_unhashed(&sk->sk_node); |
| 449 | } | 461 | } |
| 450 | 462 | ||
| 451 | static inline int sk_hashed(const struct sock *sk) | 463 | static inline bool sk_hashed(const struct sock *sk) |
| 452 | { | 464 | { |
| 453 | return !sk_unhashed(sk); | 465 | return !sk_unhashed(sk); |
| 454 | } | 466 | } |
| 455 | 467 | ||
| 456 | static __inline__ void sk_node_init(struct hlist_node *node) | 468 | static inline void sk_node_init(struct hlist_node *node) |
| 457 | { | 469 | { |
| 458 | node->pprev = NULL; | 470 | node->pprev = NULL; |
| 459 | } | 471 | } |
| 460 | 472 | ||
| 461 | static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) | 473 | static inline void sk_nulls_node_init(struct hlist_nulls_node *node) |
| 462 | { | 474 | { |
| 463 | node->pprev = NULL; | 475 | node->pprev = NULL; |
| 464 | } | 476 | } |
| 465 | 477 | ||
| 466 | static __inline__ void __sk_del_node(struct sock *sk) | 478 | static inline void __sk_del_node(struct sock *sk) |
| 467 | { | 479 | { |
| 468 | __hlist_del(&sk->sk_node); | 480 | __hlist_del(&sk->sk_node); |
| 469 | } | 481 | } |
| 470 | 482 | ||
| 471 | /* NB: equivalent to hlist_del_init_rcu */ | 483 | /* NB: equivalent to hlist_del_init_rcu */ |
| 472 | static __inline__ int __sk_del_node_init(struct sock *sk) | 484 | static inline bool __sk_del_node_init(struct sock *sk) |
| 473 | { | 485 | { |
| 474 | if (sk_hashed(sk)) { | 486 | if (sk_hashed(sk)) { |
| 475 | __sk_del_node(sk); | 487 | __sk_del_node(sk); |
| 476 | sk_node_init(&sk->sk_node); | 488 | sk_node_init(&sk->sk_node); |
| 477 | return 1; | 489 | return true; |
| 478 | } | 490 | } |
| 479 | return 0; | 491 | return false; |
| 480 | } | 492 | } |
| 481 | 493 | ||
| 482 | /* Grab socket reference count. This operation is valid only | 494 | /* Grab socket reference count. This operation is valid only |
| @@ -498,9 +510,9 @@ static inline void __sock_put(struct sock *sk) | |||
| 498 | atomic_dec(&sk->sk_refcnt); | 510 | atomic_dec(&sk->sk_refcnt); |
| 499 | } | 511 | } |
| 500 | 512 | ||
| 501 | static __inline__ int sk_del_node_init(struct sock *sk) | 513 | static inline bool sk_del_node_init(struct sock *sk) |
| 502 | { | 514 | { |
| 503 | int rc = __sk_del_node_init(sk); | 515 | bool rc = __sk_del_node_init(sk); |
| 504 | 516 | ||
| 505 | if (rc) { | 517 | if (rc) { |
| 506 | /* paranoid for a while -acme */ | 518 | /* paranoid for a while -acme */ |
| @@ -511,18 +523,18 @@ static __inline__ int sk_del_node_init(struct sock *sk) | |||
| 511 | } | 523 | } |
| 512 | #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) | 524 | #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) |
| 513 | 525 | ||
| 514 | static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) | 526 | static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) |
| 515 | { | 527 | { |
| 516 | if (sk_hashed(sk)) { | 528 | if (sk_hashed(sk)) { |
| 517 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); | 529 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); |
| 518 | return 1; | 530 | return true; |
| 519 | } | 531 | } |
| 520 | return 0; | 532 | return false; |
| 521 | } | 533 | } |
| 522 | 534 | ||
| 523 | static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) | 535 | static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) |
| 524 | { | 536 | { |
| 525 | int rc = __sk_nulls_del_node_init_rcu(sk); | 537 | bool rc = __sk_nulls_del_node_init_rcu(sk); |
| 526 | 538 | ||
| 527 | if (rc) { | 539 | if (rc) { |
| 528 | /* paranoid for a while -acme */ | 540 | /* paranoid for a while -acme */ |
| @@ -532,40 +544,40 @@ static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) | |||
| 532 | return rc; | 544 | return rc; |
| 533 | } | 545 | } |
| 534 | 546 | ||
| 535 | static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) | 547 | static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) |
| 536 | { | 548 | { |
| 537 | hlist_add_head(&sk->sk_node, list); | 549 | hlist_add_head(&sk->sk_node, list); |
| 538 | } | 550 | } |
| 539 | 551 | ||
| 540 | static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) | 552 | static inline void sk_add_node(struct sock *sk, struct hlist_head *list) |
| 541 | { | 553 | { |
| 542 | sock_hold(sk); | 554 | sock_hold(sk); |
| 543 | __sk_add_node(sk, list); | 555 | __sk_add_node(sk, list); |
| 544 | } | 556 | } |
| 545 | 557 | ||
| 546 | static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) | 558 | static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) |
| 547 | { | 559 | { |
| 548 | sock_hold(sk); | 560 | sock_hold(sk); |
| 549 | hlist_add_head_rcu(&sk->sk_node, list); | 561 | hlist_add_head_rcu(&sk->sk_node, list); |
| 550 | } | 562 | } |
| 551 | 563 | ||
| 552 | static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | 564 | static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
| 553 | { | 565 | { |
| 554 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); | 566 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); |
| 555 | } | 567 | } |
| 556 | 568 | ||
| 557 | static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | 569 | static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
| 558 | { | 570 | { |
| 559 | sock_hold(sk); | 571 | sock_hold(sk); |
| 560 | __sk_nulls_add_node_rcu(sk, list); | 572 | __sk_nulls_add_node_rcu(sk, list); |
| 561 | } | 573 | } |
| 562 | 574 | ||
| 563 | static __inline__ void __sk_del_bind_node(struct sock *sk) | 575 | static inline void __sk_del_bind_node(struct sock *sk) |
| 564 | { | 576 | { |
| 565 | __hlist_del(&sk->sk_bind_node); | 577 | __hlist_del(&sk->sk_bind_node); |
| 566 | } | 578 | } |
| 567 | 579 | ||
| 568 | static __inline__ void sk_add_bind_node(struct sock *sk, | 580 | static inline void sk_add_bind_node(struct sock *sk, |
| 569 | struct hlist_head *list) | 581 | struct hlist_head *list) |
| 570 | { | 582 | { |
| 571 | hlist_add_head(&sk->sk_bind_node, list); | 583 | hlist_add_head(&sk->sk_bind_node, list); |
| @@ -639,7 +651,7 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) | |||
| 639 | __clear_bit(flag, &sk->sk_flags); | 651 | __clear_bit(flag, &sk->sk_flags); |
| 640 | } | 652 | } |
| 641 | 653 | ||
| 642 | static inline int sock_flag(struct sock *sk, enum sock_flags flag) | 654 | static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) |
| 643 | { | 655 | { |
| 644 | return test_bit(flag, &sk->sk_flags); | 656 | return test_bit(flag, &sk->sk_flags); |
| 645 | } | 657 | } |
| @@ -654,7 +666,7 @@ static inline void sk_acceptq_added(struct sock *sk) | |||
| 654 | sk->sk_ack_backlog++; | 666 | sk->sk_ack_backlog++; |
| 655 | } | 667 | } |
| 656 | 668 | ||
| 657 | static inline int sk_acceptq_is_full(struct sock *sk) | 669 | static inline bool sk_acceptq_is_full(const struct sock *sk) |
| 658 | { | 670 | { |
| 659 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; | 671 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; |
| 660 | } | 672 | } |
| @@ -662,19 +674,19 @@ static inline int sk_acceptq_is_full(struct sock *sk) | |||
| 662 | /* | 674 | /* |
| 663 | * Compute minimal free write space needed to queue new packets. | 675 | * Compute minimal free write space needed to queue new packets. |
| 664 | */ | 676 | */ |
| 665 | static inline int sk_stream_min_wspace(struct sock *sk) | 677 | static inline int sk_stream_min_wspace(const struct sock *sk) |
| 666 | { | 678 | { |
| 667 | return sk->sk_wmem_queued >> 1; | 679 | return sk->sk_wmem_queued >> 1; |
| 668 | } | 680 | } |
| 669 | 681 | ||
| 670 | static inline int sk_stream_wspace(struct sock *sk) | 682 | static inline int sk_stream_wspace(const struct sock *sk) |
| 671 | { | 683 | { |
| 672 | return sk->sk_sndbuf - sk->sk_wmem_queued; | 684 | return sk->sk_sndbuf - sk->sk_wmem_queued; |
| 673 | } | 685 | } |
| 674 | 686 | ||
| 675 | extern void sk_stream_write_space(struct sock *sk); | 687 | extern void sk_stream_write_space(struct sock *sk); |
| 676 | 688 | ||
| 677 | static inline int sk_stream_memory_free(struct sock *sk) | 689 | static inline bool sk_stream_memory_free(const struct sock *sk) |
| 678 | { | 690 | { |
| 679 | return sk->sk_wmem_queued < sk->sk_sndbuf; | 691 | return sk->sk_wmem_queued < sk->sk_sndbuf; |
| 680 | } | 692 | } |
| @@ -699,17 +711,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
| 699 | * Do not take into account this skb truesize, | 711 | * Do not take into account this skb truesize, |
| 700 | * to allow even a single big packet to come. | 712 | * to allow even a single big packet to come. |
| 701 | */ | 713 | */ |
| 702 | static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) | 714 | static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb, |
| 715 | unsigned int limit) | ||
| 703 | { | 716 | { |
| 704 | unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); | 717 | unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); |
| 705 | 718 | ||
| 706 | return qsize > sk->sk_rcvbuf; | 719 | return qsize > limit; |
| 707 | } | 720 | } |
| 708 | 721 | ||
| 709 | /* The per-socket spinlock must be held here. */ | 722 | /* The per-socket spinlock must be held here. */ |
| 710 | static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 723 | static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, |
| 724 | unsigned int limit) | ||
| 711 | { | 725 | { |
| 712 | if (sk_rcvqueues_full(sk, skb)) | 726 | if (sk_rcvqueues_full(sk, skb, limit)) |
| 713 | return -ENOBUFS; | 727 | return -ENOBUFS; |
| 714 | 728 | ||
| 715 | __sk_add_backlog(sk, skb); | 729 | __sk_add_backlog(sk, skb); |
| @@ -796,26 +810,26 @@ struct module; | |||
| 796 | * transport -> network interface is defined by struct inet_proto | 810 | * transport -> network interface is defined by struct inet_proto |
| 797 | */ | 811 | */ |
| 798 | struct proto { | 812 | struct proto { |
| 799 | void (*close)(struct sock *sk, | 813 | void (*close)(struct sock *sk, |
| 800 | long timeout); | 814 | long timeout); |
| 801 | int (*connect)(struct sock *sk, | 815 | int (*connect)(struct sock *sk, |
| 802 | struct sockaddr *uaddr, | 816 | struct sockaddr *uaddr, |
| 803 | int addr_len); | 817 | int addr_len); |
| 804 | int (*disconnect)(struct sock *sk, int flags); | 818 | int (*disconnect)(struct sock *sk, int flags); |
| 805 | 819 | ||
| 806 | struct sock * (*accept) (struct sock *sk, int flags, int *err); | 820 | struct sock * (*accept)(struct sock *sk, int flags, int *err); |
| 807 | 821 | ||
| 808 | int (*ioctl)(struct sock *sk, int cmd, | 822 | int (*ioctl)(struct sock *sk, int cmd, |
| 809 | unsigned long arg); | 823 | unsigned long arg); |
| 810 | int (*init)(struct sock *sk); | 824 | int (*init)(struct sock *sk); |
| 811 | void (*destroy)(struct sock *sk); | 825 | void (*destroy)(struct sock *sk); |
| 812 | void (*shutdown)(struct sock *sk, int how); | 826 | void (*shutdown)(struct sock *sk, int how); |
| 813 | int (*setsockopt)(struct sock *sk, int level, | 827 | int (*setsockopt)(struct sock *sk, int level, |
| 814 | int optname, char __user *optval, | 828 | int optname, char __user *optval, |
| 815 | unsigned int optlen); | 829 | unsigned int optlen); |
| 816 | int (*getsockopt)(struct sock *sk, int level, | 830 | int (*getsockopt)(struct sock *sk, int level, |
| 817 | int optname, char __user *optval, | 831 | int optname, char __user *optval, |
| 818 | int __user *option); | 832 | int __user *option); |
| 819 | #ifdef CONFIG_COMPAT | 833 | #ifdef CONFIG_COMPAT |
| 820 | int (*compat_setsockopt)(struct sock *sk, | 834 | int (*compat_setsockopt)(struct sock *sk, |
| 821 | int level, | 835 | int level, |
| @@ -832,14 +846,14 @@ struct proto { | |||
| 832 | struct msghdr *msg, size_t len); | 846 | struct msghdr *msg, size_t len); |
| 833 | int (*recvmsg)(struct kiocb *iocb, struct sock *sk, | 847 | int (*recvmsg)(struct kiocb *iocb, struct sock *sk, |
| 834 | struct msghdr *msg, | 848 | struct msghdr *msg, |
| 835 | size_t len, int noblock, int flags, | 849 | size_t len, int noblock, int flags, |
| 836 | int *addr_len); | 850 | int *addr_len); |
| 837 | int (*sendpage)(struct sock *sk, struct page *page, | 851 | int (*sendpage)(struct sock *sk, struct page *page, |
| 838 | int offset, size_t size, int flags); | 852 | int offset, size_t size, int flags); |
| 839 | int (*bind)(struct sock *sk, | 853 | int (*bind)(struct sock *sk, |
| 840 | struct sockaddr *uaddr, int addr_len); | 854 | struct sockaddr *uaddr, int addr_len); |
| 841 | 855 | ||
| 842 | int (*backlog_rcv) (struct sock *sk, | 856 | int (*backlog_rcv) (struct sock *sk, |
| 843 | struct sk_buff *skb); | 857 | struct sk_buff *skb); |
| 844 | 858 | ||
| 845 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 859 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
| @@ -901,19 +915,30 @@ struct proto { | |||
| 901 | * This function has to setup any files the protocol want to | 915 | * This function has to setup any files the protocol want to |
| 902 | * appear in the kmem cgroup filesystem. | 916 | * appear in the kmem cgroup filesystem. |
| 903 | */ | 917 | */ |
| 904 | int (*init_cgroup)(struct cgroup *cgrp, | 918 | int (*init_cgroup)(struct mem_cgroup *memcg, |
| 905 | struct cgroup_subsys *ss); | 919 | struct cgroup_subsys *ss); |
| 906 | void (*destroy_cgroup)(struct cgroup *cgrp); | 920 | void (*destroy_cgroup)(struct mem_cgroup *memcg); |
| 907 | struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); | 921 | struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); |
| 908 | #endif | 922 | #endif |
| 909 | }; | 923 | }; |
| 910 | 924 | ||
| 925 | /* | ||
| 926 | * Bits in struct cg_proto.flags | ||
| 927 | */ | ||
| 928 | enum cg_proto_flags { | ||
| 929 | /* Currently active and new sockets should be assigned to cgroups */ | ||
| 930 | MEMCG_SOCK_ACTIVE, | ||
| 931 | /* It was ever activated; we must disarm static keys on destruction */ | ||
| 932 | MEMCG_SOCK_ACTIVATED, | ||
| 933 | }; | ||
| 934 | |||
| 911 | struct cg_proto { | 935 | struct cg_proto { |
| 912 | void (*enter_memory_pressure)(struct sock *sk); | 936 | void (*enter_memory_pressure)(struct sock *sk); |
| 913 | struct res_counter *memory_allocated; /* Current allocated memory. */ | 937 | struct res_counter *memory_allocated; /* Current allocated memory. */ |
| 914 | struct percpu_counter *sockets_allocated; /* Current number of sockets. */ | 938 | struct percpu_counter *sockets_allocated; /* Current number of sockets. */ |
| 915 | int *memory_pressure; | 939 | int *memory_pressure; |
| 916 | long *sysctl_mem; | 940 | long *sysctl_mem; |
| 941 | unsigned long flags; | ||
| 917 | /* | 942 | /* |
| 918 | * memcg field is used to find which memcg we belong directly | 943 | * memcg field is used to find which memcg we belong directly |
| 919 | * Each memcg struct can hold more than one cg_proto, so container_of | 944 | * Each memcg struct can hold more than one cg_proto, so container_of |
| @@ -929,6 +954,16 @@ struct cg_proto { | |||
| 929 | extern int proto_register(struct proto *prot, int alloc_slab); | 954 | extern int proto_register(struct proto *prot, int alloc_slab); |
| 930 | extern void proto_unregister(struct proto *prot); | 955 | extern void proto_unregister(struct proto *prot); |
| 931 | 956 | ||
| 957 | static inline bool memcg_proto_active(struct cg_proto *cg_proto) | ||
| 958 | { | ||
| 959 | return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); | ||
| 960 | } | ||
| 961 | |||
| 962 | static inline bool memcg_proto_activated(struct cg_proto *cg_proto) | ||
| 963 | { | ||
| 964 | return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags); | ||
| 965 | } | ||
| 966 | |||
| 932 | #ifdef SOCK_REFCNT_DEBUG | 967 | #ifdef SOCK_REFCNT_DEBUG |
| 933 | static inline void sk_refcnt_debug_inc(struct sock *sk) | 968 | static inline void sk_refcnt_debug_inc(struct sock *sk) |
| 934 | { | 969 | { |
| @@ -1160,7 +1195,7 @@ proto_memory_pressure(struct proto *prot) | |||
| 1160 | extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); | 1195 | extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); |
| 1161 | extern int sock_prot_inuse_get(struct net *net, struct proto *proto); | 1196 | extern int sock_prot_inuse_get(struct net *net, struct proto *proto); |
| 1162 | #else | 1197 | #else |
| 1163 | static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, | 1198 | static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, |
| 1164 | int inc) | 1199 | int inc) |
| 1165 | { | 1200 | { |
| 1166 | } | 1201 | } |
| @@ -1247,24 +1282,24 @@ static inline int sk_mem_pages(int amt) | |||
| 1247 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; | 1282 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; |
| 1248 | } | 1283 | } |
| 1249 | 1284 | ||
| 1250 | static inline int sk_has_account(struct sock *sk) | 1285 | static inline bool sk_has_account(struct sock *sk) |
| 1251 | { | 1286 | { |
| 1252 | /* return true if protocol supports memory accounting */ | 1287 | /* return true if protocol supports memory accounting */ |
| 1253 | return !!sk->sk_prot->memory_allocated; | 1288 | return !!sk->sk_prot->memory_allocated; |
| 1254 | } | 1289 | } |
| 1255 | 1290 | ||
| 1256 | static inline int sk_wmem_schedule(struct sock *sk, int size) | 1291 | static inline bool sk_wmem_schedule(struct sock *sk, int size) |
| 1257 | { | 1292 | { |
| 1258 | if (!sk_has_account(sk)) | 1293 | if (!sk_has_account(sk)) |
| 1259 | return 1; | 1294 | return true; |
| 1260 | return size <= sk->sk_forward_alloc || | 1295 | return size <= sk->sk_forward_alloc || |
| 1261 | __sk_mem_schedule(sk, size, SK_MEM_SEND); | 1296 | __sk_mem_schedule(sk, size, SK_MEM_SEND); |
| 1262 | } | 1297 | } |
| 1263 | 1298 | ||
| 1264 | static inline int sk_rmem_schedule(struct sock *sk, int size) | 1299 | static inline bool sk_rmem_schedule(struct sock *sk, int size) |
| 1265 | { | 1300 | { |
| 1266 | if (!sk_has_account(sk)) | 1301 | if (!sk_has_account(sk)) |
| 1267 | return 1; | 1302 | return true; |
| 1268 | return size <= sk->sk_forward_alloc || | 1303 | return size <= sk->sk_forward_alloc || |
| 1269 | __sk_mem_schedule(sk, size, SK_MEM_RECV); | 1304 | __sk_mem_schedule(sk, size, SK_MEM_RECV); |
| 1270 | } | 1305 | } |
| @@ -1329,7 +1364,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | |||
| 1329 | * Mark both the sk_lock and the sk_lock.slock as a | 1364 | * Mark both the sk_lock and the sk_lock.slock as a |
| 1330 | * per-address-family lock class. | 1365 | * per-address-family lock class. |
| 1331 | */ | 1366 | */ |
| 1332 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ | 1367 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ |
| 1333 | do { \ | 1368 | do { \ |
| 1334 | sk->sk_lock.owned = 0; \ | 1369 | sk->sk_lock.owned = 0; \ |
| 1335 | init_waitqueue_head(&sk->sk_lock.wq); \ | 1370 | init_waitqueue_head(&sk->sk_lock.wq); \ |
| @@ -1337,7 +1372,7 @@ do { \ | |||
| 1337 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ | 1372 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ |
| 1338 | sizeof((sk)->sk_lock)); \ | 1373 | sizeof((sk)->sk_lock)); \ |
| 1339 | lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ | 1374 | lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ |
| 1340 | (skey), (sname)); \ | 1375 | (skey), (sname)); \ |
| 1341 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ | 1376 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ |
| 1342 | } while (0) | 1377 | } while (0) |
| 1343 | 1378 | ||
| @@ -1397,13 +1432,13 @@ extern int sock_setsockopt(struct socket *sock, int level, | |||
| 1397 | unsigned int optlen); | 1432 | unsigned int optlen); |
| 1398 | 1433 | ||
| 1399 | extern int sock_getsockopt(struct socket *sock, int level, | 1434 | extern int sock_getsockopt(struct socket *sock, int level, |
| 1400 | int op, char __user *optval, | 1435 | int op, char __user *optval, |
| 1401 | int __user *optlen); | 1436 | int __user *optlen); |
| 1402 | extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, | 1437 | extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, |
| 1403 | unsigned long size, | 1438 | unsigned long size, |
| 1404 | int noblock, | 1439 | int noblock, |
| 1405 | int *errcode); | 1440 | int *errcode); |
| 1406 | extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, | 1441 | extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, |
| 1407 | unsigned long header_len, | 1442 | unsigned long header_len, |
| 1408 | unsigned long data_len, | 1443 | unsigned long data_len, |
| 1409 | int noblock, | 1444 | int noblock, |
| @@ -1425,7 +1460,7 @@ static inline void sock_update_classid(struct sock *sk) | |||
| 1425 | * Functions to fill in entries in struct proto_ops when a protocol | 1460 | * Functions to fill in entries in struct proto_ops when a protocol |
| 1426 | * does not implement a particular function. | 1461 | * does not implement a particular function. |
| 1427 | */ | 1462 | */ |
| 1428 | extern int sock_no_bind(struct socket *, | 1463 | extern int sock_no_bind(struct socket *, |
| 1429 | struct sockaddr *, int); | 1464 | struct sockaddr *, int); |
| 1430 | extern int sock_no_connect(struct socket *, | 1465 | extern int sock_no_connect(struct socket *, |
| 1431 | struct sockaddr *, int, int); | 1466 | struct sockaddr *, int, int); |
| @@ -1454,7 +1489,7 @@ extern int sock_no_mmap(struct file *file, | |||
| 1454 | struct vm_area_struct *vma); | 1489 | struct vm_area_struct *vma); |
| 1455 | extern ssize_t sock_no_sendpage(struct socket *sock, | 1490 | extern ssize_t sock_no_sendpage(struct socket *sock, |
| 1456 | struct page *page, | 1491 | struct page *page, |
| 1457 | int offset, size_t size, | 1492 | int offset, size_t size, |
| 1458 | int flags); | 1493 | int flags); |
| 1459 | 1494 | ||
| 1460 | /* | 1495 | /* |
| @@ -1477,7 +1512,7 @@ extern void sk_common_release(struct sock *sk); | |||
| 1477 | /* | 1512 | /* |
| 1478 | * Default socket callbacks and setup code | 1513 | * Default socket callbacks and setup code |
| 1479 | */ | 1514 | */ |
| 1480 | 1515 | ||
| 1481 | /* Initialise core socket variables */ | 1516 | /* Initialise core socket variables */ |
| 1482 | extern void sock_init_data(struct socket *sock, struct sock *sk); | 1517 | extern void sock_init_data(struct socket *sock, struct sock *sk); |
| 1483 | 1518 | ||
| @@ -1677,7 +1712,7 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); | |||
| 1677 | 1712 | ||
| 1678 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); | 1713 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
| 1679 | 1714 | ||
| 1680 | static inline int sk_can_gso(const struct sock *sk) | 1715 | static inline bool sk_can_gso(const struct sock *sk) |
| 1681 | { | 1716 | { |
| 1682 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); | 1717 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); |
| 1683 | } | 1718 | } |
| @@ -1794,7 +1829,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk) | |||
| 1794 | * | 1829 | * |
| 1795 | * Returns true if socket has write or read allocations | 1830 | * Returns true if socket has write or read allocations |
| 1796 | */ | 1831 | */ |
| 1797 | static inline int sk_has_allocations(const struct sock *sk) | 1832 | static inline bool sk_has_allocations(const struct sock *sk) |
| 1798 | { | 1833 | { |
| 1799 | return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); | 1834 | return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); |
| 1800 | } | 1835 | } |
| @@ -1833,9 +1868,7 @@ static inline int sk_has_allocations(const struct sock *sk) | |||
| 1833 | */ | 1868 | */ |
| 1834 | static inline bool wq_has_sleeper(struct socket_wq *wq) | 1869 | static inline bool wq_has_sleeper(struct socket_wq *wq) |
| 1835 | { | 1870 | { |
| 1836 | 1871 | /* We need to be sure we are in sync with the | |
| 1837 | /* | ||
| 1838 | * We need to be sure we are in sync with the | ||
| 1839 | * add_wait_queue modifications to the wait queue. | 1872 | * add_wait_queue modifications to the wait queue. |
| 1840 | * | 1873 | * |
| 1841 | * This memory barrier is paired in the sock_poll_wait. | 1874 | * This memory barrier is paired in the sock_poll_wait. |
| @@ -1857,22 +1890,21 @@ static inline void sock_poll_wait(struct file *filp, | |||
| 1857 | { | 1890 | { |
| 1858 | if (!poll_does_not_wait(p) && wait_address) { | 1891 | if (!poll_does_not_wait(p) && wait_address) { |
| 1859 | poll_wait(filp, wait_address, p); | 1892 | poll_wait(filp, wait_address, p); |
| 1860 | /* | 1893 | /* We need to be sure we are in sync with the |
| 1861 | * We need to be sure we are in sync with the | ||
| 1862 | * socket flags modification. | 1894 | * socket flags modification. |
| 1863 | * | 1895 | * |
| 1864 | * This memory barrier is paired in the wq_has_sleeper. | 1896 | * This memory barrier is paired in the wq_has_sleeper. |
| 1865 | */ | 1897 | */ |
| 1866 | smp_mb(); | 1898 | smp_mb(); |
| 1867 | } | 1899 | } |
| 1868 | } | 1900 | } |
| 1869 | 1901 | ||
| 1870 | /* | 1902 | /* |
| 1871 | * Queue a received datagram if it will fit. Stream and sequenced | 1903 | * Queue a received datagram if it will fit. Stream and sequenced |
| 1872 | * protocols can't normally use this as they need to fit buffers in | 1904 | * protocols can't normally use this as they need to fit buffers in |
| 1873 | * and play with them. | 1905 | * and play with them. |
| 1874 | * | 1906 | * |
| 1875 | * Inlined as it's very short and called for pretty much every | 1907 | * Inlined as it's very short and called for pretty much every |
| 1876 | * packet ever received. | 1908 | * packet ever received. |
| 1877 | */ | 1909 | */ |
| 1878 | 1910 | ||
| @@ -1898,10 +1930,10 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
| 1898 | sk_mem_charge(sk, skb->truesize); | 1930 | sk_mem_charge(sk, skb->truesize); |
| 1899 | } | 1931 | } |
| 1900 | 1932 | ||
| 1901 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, | 1933 | extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, |
| 1902 | unsigned long expires); | 1934 | unsigned long expires); |
| 1903 | 1935 | ||
| 1904 | extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); | 1936 | extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); |
| 1905 | 1937 | ||
| 1906 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 1938 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
| 1907 | 1939 | ||
| @@ -1910,7 +1942,7 @@ extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); | |||
| 1910 | /* | 1942 | /* |
| 1911 | * Recover an error report and clear atomically | 1943 | * Recover an error report and clear atomically |
| 1912 | */ | 1944 | */ |
| 1913 | 1945 | ||
| 1914 | static inline int sock_error(struct sock *sk) | 1946 | static inline int sock_error(struct sock *sk) |
| 1915 | { | 1947 | { |
| 1916 | int err; | 1948 | int err; |
| @@ -1926,7 +1958,7 @@ static inline unsigned long sock_wspace(struct sock *sk) | |||
| 1926 | 1958 | ||
| 1927 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | 1959 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { |
| 1928 | amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | 1960 | amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); |
| 1929 | if (amt < 0) | 1961 | if (amt < 0) |
| 1930 | amt = 0; | 1962 | amt = 0; |
| 1931 | } | 1963 | } |
| 1932 | return amt; | 1964 | return amt; |
| @@ -1970,7 +2002,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) | |||
| 1970 | /* | 2002 | /* |
| 1971 | * Default write policy as shown to user space via poll/select/SIGIO | 2003 | * Default write policy as shown to user space via poll/select/SIGIO |
| 1972 | */ | 2004 | */ |
| 1973 | static inline int sock_writeable(const struct sock *sk) | 2005 | static inline bool sock_writeable(const struct sock *sk) |
| 1974 | { | 2006 | { |
| 1975 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); | 2007 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); |
| 1976 | } | 2008 | } |
| @@ -1980,12 +2012,12 @@ static inline gfp_t gfp_any(void) | |||
| 1980 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; | 2012 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; |
| 1981 | } | 2013 | } |
| 1982 | 2014 | ||
| 1983 | static inline long sock_rcvtimeo(const struct sock *sk, int noblock) | 2015 | static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) |
| 1984 | { | 2016 | { |
| 1985 | return noblock ? 0 : sk->sk_rcvtimeo; | 2017 | return noblock ? 0 : sk->sk_rcvtimeo; |
| 1986 | } | 2018 | } |
| 1987 | 2019 | ||
| 1988 | static inline long sock_sndtimeo(const struct sock *sk, int noblock) | 2020 | static inline long sock_sndtimeo(const struct sock *sk, bool noblock) |
| 1989 | { | 2021 | { |
| 1990 | return noblock ? 0 : sk->sk_sndtimeo; | 2022 | return noblock ? 0 : sk->sk_sndtimeo; |
| 1991 | } | 2023 | } |
| @@ -2008,7 +2040,7 @@ extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
| 2008 | extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, | 2040 | extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, |
| 2009 | struct sk_buff *skb); | 2041 | struct sk_buff *skb); |
| 2010 | 2042 | ||
| 2011 | static __inline__ void | 2043 | static inline void |
| 2012 | sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) | 2044 | sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) |
| 2013 | { | 2045 | { |
| 2014 | ktime_t kt = skb->tstamp; | 2046 | ktime_t kt = skb->tstamp; |
| @@ -2049,7 +2081,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
| 2049 | (1UL << SOCK_RCVTSTAMP) | \ | 2081 | (1UL << SOCK_RCVTSTAMP) | \ |
| 2050 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ | 2082 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ |
| 2051 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ | 2083 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ |
| 2052 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ | 2084 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ |
| 2053 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) | 2085 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) |
| 2054 | 2086 | ||
| 2055 | if (sk->sk_flags & FLAGS_TS_OR_DROPS) | 2087 | if (sk->sk_flags & FLAGS_TS_OR_DROPS) |
| @@ -2078,7 +2110,7 @@ extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); | |||
| 2078 | * locked so that the sk_buff queue operation is ok. | 2110 | * locked so that the sk_buff queue operation is ok. |
| 2079 | */ | 2111 | */ |
| 2080 | #ifdef CONFIG_NET_DMA | 2112 | #ifdef CONFIG_NET_DMA |
| 2081 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) | 2113 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) |
| 2082 | { | 2114 | { |
| 2083 | __skb_unlink(skb, &sk->sk_receive_queue); | 2115 | __skb_unlink(skb, &sk->sk_receive_queue); |
| 2084 | if (!copied_early) | 2116 | if (!copied_early) |
| @@ -2087,7 +2119,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e | |||
| 2087 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); | 2119 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); |
| 2088 | } | 2120 | } |
| 2089 | #else | 2121 | #else |
| 2090 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) | 2122 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) |
| 2091 | { | 2123 | { |
| 2092 | __skb_unlink(skb, &sk->sk_receive_queue); | 2124 | __skb_unlink(skb, &sk->sk_receive_queue); |
| 2093 | __kfree_skb(skb); | 2125 | __kfree_skb(skb); |
| @@ -2134,8 +2166,8 @@ extern void sock_enable_timestamp(struct sock *sk, int flag); | |||
| 2134 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); | 2166 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); |
| 2135 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); | 2167 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); |
| 2136 | 2168 | ||
| 2137 | /* | 2169 | /* |
| 2138 | * Enable debug/info messages | 2170 | * Enable debug/info messages |
| 2139 | */ | 2171 | */ |
| 2140 | extern int net_msg_warn; | 2172 | extern int net_msg_warn; |
| 2141 | #define NETDEBUG(fmt, args...) \ | 2173 | #define NETDEBUG(fmt, args...) \ |
