diff options
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 269 |
1 files changed, 118 insertions, 151 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 67e35c7e230c..8a7889b35810 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/module.h> | 47 | #include <linux/module.h> |
48 | #include <linux/lockdep.h> | 48 | #include <linux/lockdep.h> |
49 | #include <linux/netdevice.h> | 49 | #include <linux/netdevice.h> |
50 | #include <linux/pcounter.h> | ||
50 | #include <linux/skbuff.h> /* struct sk_buff */ | 51 | #include <linux/skbuff.h> /* struct sk_buff */ |
51 | #include <linux/mm.h> | 52 | #include <linux/mm.h> |
52 | #include <linux/security.h> | 53 | #include <linux/security.h> |
@@ -56,7 +57,6 @@ | |||
56 | #include <asm/atomic.h> | 57 | #include <asm/atomic.h> |
57 | #include <net/dst.h> | 58 | #include <net/dst.h> |
58 | #include <net/checksum.h> | 59 | #include <net/checksum.h> |
59 | #include <net/net_namespace.h> | ||
60 | 60 | ||
61 | /* | 61 | /* |
62 | * This structure really needs to be cleaned up. | 62 | * This structure really needs to be cleaned up. |
@@ -94,6 +94,7 @@ typedef struct { | |||
94 | 94 | ||
95 | struct sock; | 95 | struct sock; |
96 | struct proto; | 96 | struct proto; |
97 | struct net; | ||
97 | 98 | ||
98 | /** | 99 | /** |
99 | * struct sock_common - minimal network layer representation of sockets | 100 | * struct sock_common - minimal network layer representation of sockets |
@@ -145,7 +146,8 @@ struct sock_common { | |||
145 | * @sk_forward_alloc: space allocated forward | 146 | * @sk_forward_alloc: space allocated forward |
146 | * @sk_allocation: allocation mode | 147 | * @sk_allocation: allocation mode |
147 | * @sk_sndbuf: size of send buffer in bytes | 148 | * @sk_sndbuf: size of send buffer in bytes |
148 | * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings | 149 | * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, |
150 | * %SO_OOBINLINE settings | ||
149 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets | 151 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets |
150 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 152 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) |
151 | * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) | 153 | * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) |
@@ -153,9 +155,12 @@ struct sock_common { | |||
153 | * @sk_backlog: always used with the per-socket spinlock held | 155 | * @sk_backlog: always used with the per-socket spinlock held |
154 | * @sk_callback_lock: used with the callbacks in the end of this struct | 156 | * @sk_callback_lock: used with the callbacks in the end of this struct |
155 | * @sk_error_queue: rarely used | 157 | * @sk_error_queue: rarely used |
156 | * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) | 158 | * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, |
159 | * IPV6_ADDRFORM for instance) | ||
157 | * @sk_err: last error | 160 | * @sk_err: last error |
158 | * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' | 161 | * @sk_err_soft: errors that don't cause failure but are the cause of a |
162 | * persistent failure not just 'timed out' | ||
163 | * @sk_drops: raw drops counter | ||
159 | * @sk_ack_backlog: current listen backlog | 164 | * @sk_ack_backlog: current listen backlog |
160 | * @sk_max_ack_backlog: listen backlog set in listen() | 165 | * @sk_max_ack_backlog: listen backlog set in listen() |
161 | * @sk_priority: %SO_PRIORITY setting | 166 | * @sk_priority: %SO_PRIORITY setting |
@@ -239,6 +244,7 @@ struct sock { | |||
239 | rwlock_t sk_callback_lock; | 244 | rwlock_t sk_callback_lock; |
240 | int sk_err, | 245 | int sk_err, |
241 | sk_err_soft; | 246 | sk_err_soft; |
247 | atomic_t sk_drops; | ||
242 | unsigned short sk_ack_backlog; | 248 | unsigned short sk_ack_backlog; |
243 | unsigned short sk_max_ack_backlog; | 249 | unsigned short sk_max_ack_backlog; |
244 | __u32 sk_priority; | 250 | __u32 sk_priority; |
@@ -256,6 +262,8 @@ struct sock { | |||
256 | __u32 sk_sndmsg_off; | 262 | __u32 sk_sndmsg_off; |
257 | int sk_write_pending; | 263 | int sk_write_pending; |
258 | void *sk_security; | 264 | void *sk_security; |
265 | __u32 sk_mark; | ||
266 | /* XXX 4 bytes hole on 64 bit */ | ||
259 | void (*sk_state_change)(struct sock *sk); | 267 | void (*sk_state_change)(struct sock *sk); |
260 | void (*sk_data_ready)(struct sock *sk, int bytes); | 268 | void (*sk_data_ready)(struct sock *sk, int bytes); |
261 | void (*sk_write_space)(struct sock *sk); | 269 | void (*sk_write_space)(struct sock *sk); |
@@ -439,7 +447,7 @@ static inline int sk_acceptq_is_full(struct sock *sk) | |||
439 | */ | 447 | */ |
440 | static inline int sk_stream_min_wspace(struct sock *sk) | 448 | static inline int sk_stream_min_wspace(struct sock *sk) |
441 | { | 449 | { |
442 | return sk->sk_wmem_queued / 2; | 450 | return sk->sk_wmem_queued >> 1; |
443 | } | 451 | } |
444 | 452 | ||
445 | static inline int sk_stream_wspace(struct sock *sk) | 453 | static inline int sk_stream_wspace(struct sock *sk) |
@@ -454,25 +462,6 @@ static inline int sk_stream_memory_free(struct sock *sk) | |||
454 | return sk->sk_wmem_queued < sk->sk_sndbuf; | 462 | return sk->sk_wmem_queued < sk->sk_sndbuf; |
455 | } | 463 | } |
456 | 464 | ||
457 | extern void sk_stream_rfree(struct sk_buff *skb); | ||
458 | |||
459 | static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) | ||
460 | { | ||
461 | skb->sk = sk; | ||
462 | skb->destructor = sk_stream_rfree; | ||
463 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
464 | sk->sk_forward_alloc -= skb->truesize; | ||
465 | } | ||
466 | |||
467 | static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) | ||
468 | { | ||
469 | skb_truesize_check(skb); | ||
470 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); | ||
471 | sk->sk_wmem_queued -= skb->truesize; | ||
472 | sk->sk_forward_alloc += skb->truesize; | ||
473 | __kfree_skb(skb); | ||
474 | } | ||
475 | |||
476 | /* The per-socket spinlock must be held here. */ | 465 | /* The per-socket spinlock must be held here. */ |
477 | static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 466 | static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
478 | { | 467 | { |
@@ -507,6 +496,7 @@ extern int sk_wait_data(struct sock *sk, long *timeo); | |||
507 | 496 | ||
508 | struct request_sock_ops; | 497 | struct request_sock_ops; |
509 | struct timewait_sock_ops; | 498 | struct timewait_sock_ops; |
499 | struct inet_hashinfo; | ||
510 | 500 | ||
511 | /* Networking protocol blocks we attach to sockets. | 501 | /* Networking protocol blocks we attach to sockets. |
512 | * socket layer -> transport layer interface | 502 | * socket layer -> transport layer interface |
@@ -560,14 +550,11 @@ struct proto { | |||
560 | void (*unhash)(struct sock *sk); | 550 | void (*unhash)(struct sock *sk); |
561 | int (*get_port)(struct sock *sk, unsigned short snum); | 551 | int (*get_port)(struct sock *sk, unsigned short snum); |
562 | 552 | ||
563 | #ifdef CONFIG_SMP | ||
564 | /* Keeping track of sockets in use */ | 553 | /* Keeping track of sockets in use */ |
565 | void (*inuse_add)(struct proto *prot, int inc); | 554 | #ifdef CONFIG_PROC_FS |
566 | int (*inuse_getval)(const struct proto *prot); | 555 | struct pcounter inuse; |
567 | int *inuse_ptr; | ||
568 | #else | ||
569 | int inuse; | ||
570 | #endif | 556 | #endif |
557 | |||
571 | /* Memory pressure */ | 558 | /* Memory pressure */ |
572 | void (*enter_memory_pressure)(void); | 559 | void (*enter_memory_pressure)(void); |
573 | atomic_t *memory_allocated; /* Current allocated memory. */ | 560 | atomic_t *memory_allocated; /* Current allocated memory. */ |
@@ -575,7 +562,7 @@ struct proto { | |||
575 | /* | 562 | /* |
576 | * Pressure flag: try to collapse. | 563 | * Pressure flag: try to collapse. |
577 | * Technical note: it is used by multiple contexts non atomically. | 564 | * Technical note: it is used by multiple contexts non atomically. |
578 | * All the sk_stream_mem_schedule() is of this nature: accounting | 565 | * All the __sk_mem_schedule() is of this nature: accounting |
579 | * is strict, actions are advisory and have some latency. | 566 | * is strict, actions are advisory and have some latency. |
580 | */ | 567 | */ |
581 | int *memory_pressure; | 568 | int *memory_pressure; |
@@ -592,6 +579,8 @@ struct proto { | |||
592 | struct request_sock_ops *rsk_prot; | 579 | struct request_sock_ops *rsk_prot; |
593 | struct timewait_sock_ops *twsk_prot; | 580 | struct timewait_sock_ops *twsk_prot; |
594 | 581 | ||
582 | struct inet_hashinfo *hashinfo; | ||
583 | |||
595 | struct module *owner; | 584 | struct module *owner; |
596 | 585 | ||
597 | char name[32]; | 586 | char name[32]; |
@@ -602,36 +591,6 @@ struct proto { | |||
602 | #endif | 591 | #endif |
603 | }; | 592 | }; |
604 | 593 | ||
605 | /* | ||
606 | * Special macros to let protos use a fast version of inuse{get|add} | ||
607 | * using a static percpu variable per proto instead of an allocated one, | ||
608 | * saving one dereference. | ||
609 | * This might be changed if/when dynamic percpu vars become fast. | ||
610 | */ | ||
611 | #ifdef CONFIG_SMP | ||
612 | # define DEFINE_PROTO_INUSE(NAME) \ | ||
613 | static DEFINE_PER_CPU(int, NAME##_inuse); \ | ||
614 | static void NAME##_inuse_add(struct proto *prot, int inc) \ | ||
615 | { \ | ||
616 | __get_cpu_var(NAME##_inuse) += inc; \ | ||
617 | } \ | ||
618 | \ | ||
619 | static int NAME##_inuse_getval(const struct proto *prot)\ | ||
620 | { \ | ||
621 | int res = 0, cpu; \ | ||
622 | \ | ||
623 | for_each_possible_cpu(cpu) \ | ||
624 | res += per_cpu(NAME##_inuse, cpu); \ | ||
625 | return res; \ | ||
626 | } | ||
627 | # define REF_PROTO_INUSE(NAME) \ | ||
628 | .inuse_add = NAME##_inuse_add, \ | ||
629 | .inuse_getval = NAME##_inuse_getval, | ||
630 | #else | ||
631 | # define DEFINE_PROTO_INUSE(NAME) | ||
632 | # define REF_PROTO_INUSE(NAME) | ||
633 | #endif | ||
634 | |||
635 | extern int proto_register(struct proto *prot, int alloc_slab); | 594 | extern int proto_register(struct proto *prot, int alloc_slab); |
636 | extern void proto_unregister(struct proto *prot); | 595 | extern void proto_unregister(struct proto *prot); |
637 | 596 | ||
@@ -660,33 +619,42 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) | |||
660 | #define sk_refcnt_debug_release(sk) do { } while (0) | 619 | #define sk_refcnt_debug_release(sk) do { } while (0) |
661 | #endif /* SOCK_REFCNT_DEBUG */ | 620 | #endif /* SOCK_REFCNT_DEBUG */ |
662 | 621 | ||
622 | |||
623 | #ifdef CONFIG_PROC_FS | ||
624 | # define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME) | ||
625 | # define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse) | ||
663 | /* Called with local bh disabled */ | 626 | /* Called with local bh disabled */ |
664 | static __inline__ void sock_prot_inc_use(struct proto *prot) | 627 | static inline void sock_prot_inuse_add(struct proto *prot, int inc) |
665 | { | 628 | { |
666 | #ifdef CONFIG_SMP | 629 | pcounter_add(&prot->inuse, inc); |
667 | prot->inuse_add(prot, 1); | ||
668 | #else | ||
669 | prot->inuse++; | ||
670 | #endif | ||
671 | } | 630 | } |
672 | 631 | static inline int sock_prot_inuse_init(struct proto *proto) | |
673 | static __inline__ void sock_prot_dec_use(struct proto *prot) | ||
674 | { | 632 | { |
675 | #ifdef CONFIG_SMP | 633 | return pcounter_alloc(&proto->inuse); |
676 | prot->inuse_add(prot, -1); | ||
677 | #else | ||
678 | prot->inuse--; | ||
679 | #endif | ||
680 | } | 634 | } |
681 | 635 | static inline int sock_prot_inuse_get(struct proto *proto) | |
682 | static __inline__ int sock_prot_inuse(struct proto *proto) | 636 | { |
637 | return pcounter_getval(&proto->inuse); | ||
638 | } | ||
639 | static inline void sock_prot_inuse_free(struct proto *proto) | ||
683 | { | 640 | { |
684 | #ifdef CONFIG_SMP | 641 | pcounter_free(&proto->inuse); |
685 | return proto->inuse_getval(proto); | 642 | } |
686 | #else | 643 | #else |
687 | return proto->inuse; | 644 | # define DEFINE_PROTO_INUSE(NAME) |
688 | #endif | 645 | # define REF_PROTO_INUSE(NAME) |
646 | static void inline sock_prot_inuse_add(struct proto *prot, int inc) | ||
647 | { | ||
648 | } | ||
649 | static int inline sock_prot_inuse_init(struct proto *proto) | ||
650 | { | ||
651 | return 0; | ||
689 | } | 652 | } |
653 | static void inline sock_prot_inuse_free(struct proto *proto) | ||
654 | { | ||
655 | } | ||
656 | #endif | ||
657 | |||
690 | 658 | ||
691 | /* With per-bucket locks this operation is not-atomic, so that | 659 | /* With per-bucket locks this operation is not-atomic, so that |
692 | * this version is not worse. | 660 | * this version is not worse. |
@@ -750,32 +718,81 @@ static inline struct inode *SOCK_INODE(struct socket *socket) | |||
750 | return &container_of(socket, struct socket_alloc, socket)->vfs_inode; | 718 | return &container_of(socket, struct socket_alloc, socket)->vfs_inode; |
751 | } | 719 | } |
752 | 720 | ||
753 | extern void __sk_stream_mem_reclaim(struct sock *sk); | 721 | /* |
754 | extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); | 722 | * Functions for memory accounting |
723 | */ | ||
724 | extern int __sk_mem_schedule(struct sock *sk, int size, int kind); | ||
725 | extern void __sk_mem_reclaim(struct sock *sk); | ||
755 | 726 | ||
756 | #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) | 727 | #define SK_MEM_QUANTUM ((int)PAGE_SIZE) |
728 | #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) | ||
729 | #define SK_MEM_SEND 0 | ||
730 | #define SK_MEM_RECV 1 | ||
757 | 731 | ||
758 | static inline int sk_stream_pages(int amt) | 732 | static inline int sk_mem_pages(int amt) |
759 | { | 733 | { |
760 | return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM); | 734 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; |
761 | } | 735 | } |
762 | 736 | ||
763 | static inline void sk_stream_mem_reclaim(struct sock *sk) | 737 | static inline int sk_has_account(struct sock *sk) |
764 | { | 738 | { |
765 | if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) | 739 | /* return true if protocol supports memory accounting */ |
766 | __sk_stream_mem_reclaim(sk); | 740 | return !!sk->sk_prot->memory_allocated; |
767 | } | 741 | } |
768 | 742 | ||
769 | static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) | 743 | static inline int sk_wmem_schedule(struct sock *sk, int size) |
770 | { | 744 | { |
771 | return (int)skb->truesize <= sk->sk_forward_alloc || | 745 | if (!sk_has_account(sk)) |
772 | sk_stream_mem_schedule(sk, skb->truesize, 1); | 746 | return 1; |
747 | return size <= sk->sk_forward_alloc || | ||
748 | __sk_mem_schedule(sk, size, SK_MEM_SEND); | ||
773 | } | 749 | } |
774 | 750 | ||
775 | static inline int sk_stream_wmem_schedule(struct sock *sk, int size) | 751 | static inline int sk_rmem_schedule(struct sock *sk, int size) |
776 | { | 752 | { |
753 | if (!sk_has_account(sk)) | ||
754 | return 1; | ||
777 | return size <= sk->sk_forward_alloc || | 755 | return size <= sk->sk_forward_alloc || |
778 | sk_stream_mem_schedule(sk, size, 0); | 756 | __sk_mem_schedule(sk, size, SK_MEM_RECV); |
757 | } | ||
758 | |||
759 | static inline void sk_mem_reclaim(struct sock *sk) | ||
760 | { | ||
761 | if (!sk_has_account(sk)) | ||
762 | return; | ||
763 | if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) | ||
764 | __sk_mem_reclaim(sk); | ||
765 | } | ||
766 | |||
767 | static inline void sk_mem_reclaim_partial(struct sock *sk) | ||
768 | { | ||
769 | if (!sk_has_account(sk)) | ||
770 | return; | ||
771 | if (sk->sk_forward_alloc > SK_MEM_QUANTUM) | ||
772 | __sk_mem_reclaim(sk); | ||
773 | } | ||
774 | |||
775 | static inline void sk_mem_charge(struct sock *sk, int size) | ||
776 | { | ||
777 | if (!sk_has_account(sk)) | ||
778 | return; | ||
779 | sk->sk_forward_alloc -= size; | ||
780 | } | ||
781 | |||
782 | static inline void sk_mem_uncharge(struct sock *sk, int size) | ||
783 | { | ||
784 | if (!sk_has_account(sk)) | ||
785 | return; | ||
786 | sk->sk_forward_alloc += size; | ||
787 | } | ||
788 | |||
789 | static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | ||
790 | { | ||
791 | skb_truesize_check(skb); | ||
792 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); | ||
793 | sk->sk_wmem_queued -= skb->truesize; | ||
794 | sk_mem_uncharge(sk, skb->truesize); | ||
795 | __kfree_skb(skb); | ||
779 | } | 796 | } |
780 | 797 | ||
781 | /* Used by processes to "lock" a socket state, so that | 798 | /* Used by processes to "lock" a socket state, so that |
@@ -812,14 +829,14 @@ do { \ | |||
812 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ | 829 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ |
813 | } while (0) | 830 | } while (0) |
814 | 831 | ||
815 | extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass)); | 832 | extern void lock_sock_nested(struct sock *sk, int subclass); |
816 | 833 | ||
817 | static inline void lock_sock(struct sock *sk) | 834 | static inline void lock_sock(struct sock *sk) |
818 | { | 835 | { |
819 | lock_sock_nested(sk, 0); | 836 | lock_sock_nested(sk, 0); |
820 | } | 837 | } |
821 | 838 | ||
822 | extern void FASTCALL(release_sock(struct sock *sk)); | 839 | extern void release_sock(struct sock *sk); |
823 | 840 | ||
824 | /* BH context may only use the following locking interface. */ | 841 | /* BH context may only use the following locking interface. */ |
825 | #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) | 842 | #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) |
@@ -944,7 +961,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | |||
944 | return err; | 961 | return err; |
945 | 962 | ||
946 | rcu_read_lock_bh(); | 963 | rcu_read_lock_bh(); |
947 | filter = sk->sk_filter; | 964 | filter = rcu_dereference(sk->sk_filter); |
948 | if (filter) { | 965 | if (filter) { |
949 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, | 966 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, |
950 | filter->len); | 967 | filter->len); |
@@ -1113,12 +1130,6 @@ static inline int sk_can_gso(const struct sock *sk) | |||
1113 | 1130 | ||
1114 | extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); | 1131 | extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); |
1115 | 1132 | ||
1116 | static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) | ||
1117 | { | ||
1118 | sk->sk_wmem_queued += skb->truesize; | ||
1119 | sk->sk_forward_alloc -= skb->truesize; | ||
1120 | } | ||
1121 | |||
1122 | static inline int skb_copy_to_page(struct sock *sk, char __user *from, | 1133 | static inline int skb_copy_to_page(struct sock *sk, char __user *from, |
1123 | struct sk_buff *skb, struct page *page, | 1134 | struct sk_buff *skb, struct page *page, |
1124 | int off, int copy) | 1135 | int off, int copy) |
@@ -1138,7 +1149,7 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from, | |||
1138 | skb->data_len += copy; | 1149 | skb->data_len += copy; |
1139 | skb->truesize += copy; | 1150 | skb->truesize += copy; |
1140 | sk->sk_wmem_queued += copy; | 1151 | sk->sk_wmem_queued += copy; |
1141 | sk->sk_forward_alloc -= copy; | 1152 | sk_mem_charge(sk, copy); |
1142 | return 0; | 1153 | return 0; |
1143 | } | 1154 | } |
1144 | 1155 | ||
@@ -1164,6 +1175,7 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
1164 | skb->sk = sk; | 1175 | skb->sk = sk; |
1165 | skb->destructor = sock_rfree; | 1176 | skb->destructor = sock_rfree; |
1166 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | 1177 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
1178 | sk_mem_charge(sk, skb->truesize); | ||
1167 | } | 1179 | } |
1168 | 1180 | ||
1169 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, | 1181 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, |
@@ -1225,45 +1237,12 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) | |||
1225 | static inline void sk_stream_moderate_sndbuf(struct sock *sk) | 1237 | static inline void sk_stream_moderate_sndbuf(struct sock *sk) |
1226 | { | 1238 | { |
1227 | if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { | 1239 | if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { |
1228 | sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); | 1240 | sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); |
1229 | sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); | 1241 | sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); |
1230 | } | 1242 | } |
1231 | } | 1243 | } |
1232 | 1244 | ||
1233 | static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, | 1245 | struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); |
1234 | int size, int mem, | ||
1235 | gfp_t gfp) | ||
1236 | { | ||
1237 | struct sk_buff *skb; | ||
1238 | |||
1239 | /* The TCP header must be at least 32-bit aligned. */ | ||
1240 | size = ALIGN(size, 4); | ||
1241 | |||
1242 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); | ||
1243 | if (skb) { | ||
1244 | skb->truesize += mem; | ||
1245 | if (sk_stream_wmem_schedule(sk, skb->truesize)) { | ||
1246 | /* | ||
1247 | * Make sure that we have exactly size bytes | ||
1248 | * available to the caller, no more, no less. | ||
1249 | */ | ||
1250 | skb_reserve(skb, skb_tailroom(skb) - size); | ||
1251 | return skb; | ||
1252 | } | ||
1253 | __kfree_skb(skb); | ||
1254 | } else { | ||
1255 | sk->sk_prot->enter_memory_pressure(); | ||
1256 | sk_stream_moderate_sndbuf(sk); | ||
1257 | } | ||
1258 | return NULL; | ||
1259 | } | ||
1260 | |||
1261 | static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, | ||
1262 | int size, | ||
1263 | gfp_t gfp) | ||
1264 | { | ||
1265 | return sk_stream_alloc_pskb(sk, size, 0, gfp); | ||
1266 | } | ||
1267 | 1246 | ||
1268 | static inline struct page *sk_stream_alloc_page(struct sock *sk) | 1247 | static inline struct page *sk_stream_alloc_page(struct sock *sk) |
1269 | { | 1248 | { |
@@ -1282,7 +1261,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) | |||
1282 | */ | 1261 | */ |
1283 | static inline int sock_writeable(const struct sock *sk) | 1262 | static inline int sock_writeable(const struct sock *sk) |
1284 | { | 1263 | { |
1285 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); | 1264 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); |
1286 | } | 1265 | } |
1287 | 1266 | ||
1288 | static inline gfp_t gfp_any(void) | 1267 | static inline gfp_t gfp_any(void) |
@@ -1391,23 +1370,11 @@ extern int net_msg_warn; | |||
1391 | lock_sock(sk); \ | 1370 | lock_sock(sk); \ |
1392 | } | 1371 | } |
1393 | 1372 | ||
1394 | static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | ||
1395 | { | ||
1396 | if (valbool) | ||
1397 | sock_set_flag(sk, bit); | ||
1398 | else | ||
1399 | sock_reset_flag(sk, bit); | ||
1400 | } | ||
1401 | |||
1402 | extern __u32 sysctl_wmem_max; | 1373 | extern __u32 sysctl_wmem_max; |
1403 | extern __u32 sysctl_rmem_max; | 1374 | extern __u32 sysctl_rmem_max; |
1404 | 1375 | ||
1405 | extern void sk_init(void); | 1376 | extern void sk_init(void); |
1406 | 1377 | ||
1407 | #ifdef CONFIG_SYSCTL | ||
1408 | extern struct ctl_table core_table[]; | ||
1409 | #endif | ||
1410 | |||
1411 | extern int sysctl_optmem_max; | 1378 | extern int sysctl_optmem_max; |
1412 | 1379 | ||
1413 | extern __u32 sysctl_wmem_default; | 1380 | extern __u32 sysctl_wmem_default; |