aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/sock.h59
-rw-r--r--net/core/sock.c61
2 files changed, 14 insertions, 106 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 9c55af8e5f81..e329d05f7995 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -47,6 +47,7 @@
47#include <linux/module.h> 47#include <linux/module.h>
48#include <linux/lockdep.h> 48#include <linux/lockdep.h>
49#include <linux/netdevice.h> 49#include <linux/netdevice.h>
50#include <linux/pcounter.h>
50#include <linux/skbuff.h> /* struct sk_buff */ 51#include <linux/skbuff.h> /* struct sk_buff */
51#include <linux/mm.h> 52#include <linux/mm.h>
52#include <linux/security.h> 53#include <linux/security.h>
@@ -565,14 +566,9 @@ struct proto {
565 void (*unhash)(struct sock *sk); 566 void (*unhash)(struct sock *sk);
566 int (*get_port)(struct sock *sk, unsigned short snum); 567 int (*get_port)(struct sock *sk, unsigned short snum);
567 568
568#ifdef CONFIG_SMP
569 /* Keeping track of sockets in use */ 569 /* Keeping track of sockets in use */
570 void (*inuse_add)(struct proto *prot, int inc); 570 struct pcounter inuse;
571 int (*inuse_getval)(const struct proto *prot); 571
572 int *inuse_ptr;
573#else
574 int inuse;
575#endif
576 /* Memory pressure */ 572 /* Memory pressure */
577 void (*enter_memory_pressure)(void); 573 void (*enter_memory_pressure)(void);
578 atomic_t *memory_allocated; /* Current allocated memory. */ 574 atomic_t *memory_allocated; /* Current allocated memory. */
@@ -607,35 +603,8 @@ struct proto {
607#endif 603#endif
608}; 604};
609 605
610/* 606#define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
611 * Special macros to let protos use a fast version of inuse{get|add} 607#define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
612 * using a static percpu variable per proto instead of an allocated one,
613 * saving one dereference.
614 * This might be changed if/when dynamic percpu vars become fast.
615 */
616#ifdef CONFIG_SMP
617# define DEFINE_PROTO_INUSE(NAME) \
618static DEFINE_PER_CPU(int, NAME##_inuse); \
619static void NAME##_inuse_add(struct proto *prot, int inc) \
620{ \
621 __get_cpu_var(NAME##_inuse) += inc; \
622} \
623 \
624static int NAME##_inuse_getval(const struct proto *prot)\
625{ \
626 int res = 0, cpu; \
627 \
628 for_each_possible_cpu(cpu) \
629 res += per_cpu(NAME##_inuse, cpu); \
630 return res; \
631}
632# define REF_PROTO_INUSE(NAME) \
633 .inuse_add = NAME##_inuse_add, \
634 .inuse_getval = NAME##_inuse_getval,
635#else
636# define DEFINE_PROTO_INUSE(NAME)
637# define REF_PROTO_INUSE(NAME)
638#endif
639 608
640extern int proto_register(struct proto *prot, int alloc_slab); 609extern int proto_register(struct proto *prot, int alloc_slab);
641extern void proto_unregister(struct proto *prot); 610extern void proto_unregister(struct proto *prot);
@@ -668,29 +637,17 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
668/* Called with local bh disabled */ 637/* Called with local bh disabled */
669static __inline__ void sock_prot_inc_use(struct proto *prot) 638static __inline__ void sock_prot_inc_use(struct proto *prot)
670{ 639{
671#ifdef CONFIG_SMP 640 pcounter_add(&prot->inuse, 1);
672 prot->inuse_add(prot, 1);
673#else
674 prot->inuse++;
675#endif
676} 641}
677 642
678static __inline__ void sock_prot_dec_use(struct proto *prot) 643static __inline__ void sock_prot_dec_use(struct proto *prot)
679{ 644{
680#ifdef CONFIG_SMP 645 pcounter_add(&prot->inuse, -1);
681 prot->inuse_add(prot, -1);
682#else
683 prot->inuse--;
684#endif
685} 646}
686 647
687static __inline__ int sock_prot_inuse(struct proto *proto) 648static __inline__ int sock_prot_inuse(struct proto *proto)
688{ 649{
689#ifdef CONFIG_SMP 650 return pcounter_getval(&proto->inuse);
690 return proto->inuse_getval(proto);
691#else
692 return proto->inuse;
693#endif
694} 651}
695 652
696/* With per-bucket locks this operation is not-atomic, so that 653/* With per-bucket locks this operation is not-atomic, so that
diff --git a/net/core/sock.c b/net/core/sock.c
index c9305a861760..eac7aa0721da 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1804,65 +1804,15 @@ EXPORT_SYMBOL(sk_common_release);
1804static DEFINE_RWLOCK(proto_list_lock); 1804static DEFINE_RWLOCK(proto_list_lock);
1805static LIST_HEAD(proto_list); 1805static LIST_HEAD(proto_list);
1806 1806
1807#ifdef CONFIG_SMP
1808/*
1809 * Define default functions to keep track of inuse sockets per protocol
1810 * Note that often used protocols use dedicated functions to get a speed increase.
1811 * (see DEFINE_PROTO_INUSE/REF_PROTO_INUSE)
1812 */
1813static void inuse_add(struct proto *prot, int inc)
1814{
1815 per_cpu_ptr(prot->inuse_ptr, smp_processor_id())[0] += inc;
1816}
1817
1818static int inuse_get(const struct proto *prot)
1819{
1820 int res = 0, cpu;
1821 for_each_possible_cpu(cpu)
1822 res += per_cpu_ptr(prot->inuse_ptr, cpu)[0];
1823 return res;
1824}
1825
1826static int inuse_init(struct proto *prot)
1827{
1828 if (!prot->inuse_getval || !prot->inuse_add) {
1829 prot->inuse_ptr = alloc_percpu(int);
1830 if (prot->inuse_ptr == NULL)
1831 return -ENOBUFS;
1832
1833 prot->inuse_getval = inuse_get;
1834 prot->inuse_add = inuse_add;
1835 }
1836 return 0;
1837}
1838
1839static void inuse_fini(struct proto *prot)
1840{
1841 if (prot->inuse_ptr != NULL) {
1842 free_percpu(prot->inuse_ptr);
1843 prot->inuse_ptr = NULL;
1844 prot->inuse_getval = NULL;
1845 prot->inuse_add = NULL;
1846 }
1847}
1848#else
1849static inline int inuse_init(struct proto *prot)
1850{
1851 return 0;
1852}
1853
1854static inline void inuse_fini(struct proto *prot)
1855{
1856}
1857#endif
1858
1859int proto_register(struct proto *prot, int alloc_slab) 1807int proto_register(struct proto *prot, int alloc_slab)
1860{ 1808{
1861 char *request_sock_slab_name = NULL; 1809 char *request_sock_slab_name = NULL;
1862 char *timewait_sock_slab_name; 1810 char *timewait_sock_slab_name;
1863 1811
1864 if (inuse_init(prot)) 1812 if (pcounter_alloc(&prot->inuse) != 0) {
1813 printk(KERN_CRIT "%s: Can't alloc inuse counters!\n", prot->name);
1865 goto out; 1814 goto out;
1815 }
1866 1816
1867 if (alloc_slab) { 1817 if (alloc_slab) {
1868 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 1818 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
@@ -1930,7 +1880,7 @@ out_free_sock_slab:
1930 kmem_cache_destroy(prot->slab); 1880 kmem_cache_destroy(prot->slab);
1931 prot->slab = NULL; 1881 prot->slab = NULL;
1932out_free_inuse: 1882out_free_inuse:
1933 inuse_fini(prot); 1883 pcounter_free(&prot->inuse);
1934out: 1884out:
1935 return -ENOBUFS; 1885 return -ENOBUFS;
1936} 1886}
@@ -1943,7 +1893,8 @@ void proto_unregister(struct proto *prot)
1943 list_del(&prot->node); 1893 list_del(&prot->node);
1944 write_unlock(&proto_list_lock); 1894 write_unlock(&proto_list_lock);
1945 1895
1946 inuse_fini(prot); 1896 pcounter_free(&prot->inuse);
1897
1947 if (prot->slab != NULL) { 1898 if (prot->slab != NULL) {
1948 kmem_cache_destroy(prot->slab); 1899 kmem_cache_destroy(prot->slab);
1949 prot->slab = NULL; 1900 prot->slab = NULL;