aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYan, Zheng <zheng.z.yan@intel.com>2011-09-29 13:10:10 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-04 23:31:24 -0400
commit260fcbeb1ae9e768a44c9925338fbacb0d7e5ba9 (patch)
tree741dea73d68eca24c8d515bb7091cbedb77192a4
parentcb2d0f3e968bff7c6d262aca3e3ab8d4184e69b2 (diff)
tcp: properly handle md5sig_pool references
tcp_v4_clear_md5_list() assumes that multiple tcp md5sig peers only hold one reference to md5sig_pool. but tcp_v4_md5_do_add() increases use count of md5sig_pool for each peer. This patch makes tcp_v4_md5_do_add() only increases use count for the first tcp md5sig peer. Signed-off-by: Zheng Yan <zheng.z.yan@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv6/tcp_ipv6.c8
2 files changed, 12 insertions, 7 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c34f01513945..7963e03f1068 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
927 } 927 }
928 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 928 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
929 } 929 }
930 if (tcp_alloc_md5sig_pool(sk) == NULL) { 930
931 md5sig = tp->md5sig_info;
932 if (md5sig->entries4 == 0 &&
933 tcp_alloc_md5sig_pool(sk) == NULL) {
931 kfree(newkey); 934 kfree(newkey);
932 return -ENOMEM; 935 return -ENOMEM;
933 } 936 }
934 md5sig = tp->md5sig_info;
935 937
936 if (md5sig->alloced4 == md5sig->entries4) { 938 if (md5sig->alloced4 == md5sig->entries4) {
937 keys = kmalloc((sizeof(*keys) * 939 keys = kmalloc((sizeof(*keys) *
938 (md5sig->entries4 + 1)), GFP_ATOMIC); 940 (md5sig->entries4 + 1)), GFP_ATOMIC);
939 if (!keys) { 941 if (!keys) {
940 kfree(newkey); 942 kfree(newkey);
941 tcp_free_md5sig_pool(); 943 if (md5sig->entries4 == 0)
944 tcp_free_md5sig_pool();
942 return -ENOMEM; 945 return -ENOMEM;
943 } 946 }
944 947
@@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
982 kfree(tp->md5sig_info->keys4); 985 kfree(tp->md5sig_info->keys4);
983 tp->md5sig_info->keys4 = NULL; 986 tp->md5sig_info->keys4 = NULL;
984 tp->md5sig_info->alloced4 = 0; 987 tp->md5sig_info->alloced4 = 0;
988 tcp_free_md5sig_pool();
985 } else if (tp->md5sig_info->entries4 != i) { 989 } else if (tp->md5sig_info->entries4 != i) {
986 /* Need to do some manipulation */ 990 /* Need to do some manipulation */
987 memmove(&tp->md5sig_info->keys4[i], 991 memmove(&tp->md5sig_info->keys4[i],
@@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
989 (tp->md5sig_info->entries4 - i) * 993 (tp->md5sig_info->entries4 - i) *
990 sizeof(struct tcp4_md5sig_key)); 994 sizeof(struct tcp4_md5sig_key));
991 } 995 }
992 tcp_free_md5sig_pool();
993 return 0; 996 return 0;
994 } 997 }
995 } 998 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 79cc6469508d..7b8fc5794352 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
591 } 591 }
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 } 593 }
594 if (tcp_alloc_md5sig_pool(sk) == NULL) { 594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
595 kfree(newkey); 596 kfree(newkey);
596 return -ENOMEM; 597 return -ENOMEM;
597 } 598 }
@@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
601 602
602 if (!keys) { 603 if (!keys) {
603 tcp_free_md5sig_pool();
604 kfree(newkey); 604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
605 return -ENOMEM; 607 return -ENOMEM;
606 } 608 }
607 609
@@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
647 kfree(tp->md5sig_info->keys6); 649 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL; 650 tp->md5sig_info->keys6 = NULL;
649 tp->md5sig_info->alloced6 = 0; 651 tp->md5sig_info->alloced6 = 0;
652 tcp_free_md5sig_pool();
650 } else { 653 } else {
651 /* shrink the database */ 654 /* shrink the database */
652 if (tp->md5sig_info->entries6 != i) 655 if (tp->md5sig_info->entries6 != i)
@@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
655 (tp->md5sig_info->entries6 - i) 658 (tp->md5sig_info->entries6 - i)
656 * sizeof (tp->md5sig_info->keys6[0])); 659 * sizeof (tp->md5sig_info->keys6[0]));
657 } 660 }
658 tcp_free_md5sig_pool();
659 return 0; 661 return 0;
660 } 662 }
661 } 663 }
ast is valid here. */ sys_wait4(pid, (int __user *)&ret, 0, NULL); /* * If ret is 0, either ____call_usermodehelper failed and the * real error code is already in sub_info->retval or * sub_info->retval is 0 anyway, so don't mess with it then. */ if (ret) sub_info->retval = ret; } complete(sub_info->complete); return 0; } /* This is run by khelper thread */ static void __call_usermodehelper(struct work_struct *work) { struct subprocess_info *sub_info = container_of(work, struct subprocess_info, work); enum umh_wait wait = sub_info->wait; pid_t pid; /* CLONE_VFORK: wait until the usermode helper has execve'd * successfully We need the data structures to stay around * until that is done. */ if (wait == UMH_WAIT_PROC) pid = kernel_thread(wait_for_helper, sub_info, CLONE_FS | CLONE_FILES | SIGCHLD); else pid = kernel_thread(____call_usermodehelper, sub_info, CLONE_VFORK | SIGCHLD); switch (wait) { case UMH_NO_WAIT: call_usermodehelper_freeinfo(sub_info); break; case UMH_WAIT_PROC: if (pid > 0) break; /* FALLTHROUGH */ case UMH_WAIT_EXEC: if (pid < 0) sub_info->retval = pid; complete(sub_info->complete); } } /* * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY * (used for preventing user land processes from being created after the user * land has been frozen during a system-wide hibernation or suspend operation). * Should always be manipulated under umhelper_sem acquired for write. */ static int usermodehelper_disabled = 1; /* Number of helpers running */ static atomic_t running_helpers = ATOMIC_INIT(0); /* * Wait queue head used by usermodehelper_disable() to wait for all running * helpers to finish. */ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); /* * Time to wait for running_helpers to become zero before the setting of * usermodehelper_disabled in usermodehelper_disable() fails */ #define RUNNING_HELPERS_TIMEOUT (5 * HZ) void read_lock_usermodehelper(void) { down_read(&umhelper_sem); } EXPORT_SYMBOL_GPL(read_lock_usermodehelper); void read_unlock_usermodehelper(void) { up_read(&umhelper_sem); } EXPORT_SYMBOL_GPL(read_unlock_usermodehelper); /** * usermodehelper_disable - prevent new helpers from being started */ int usermodehelper_disable(void) { long retval; down_write(&umhelper_sem); usermodehelper_disabled = 1; up_write(&umhelper_sem); /* * From now on call_usermodehelper_exec() won't start any new * helpers, so it is sufficient if running_helpers turns out to * be zero at one point (it may be increased later, but that * doesn't matter). */ retval = wait_event_timeout(running_helpers_waitq, atomic_read(&running_helpers) == 0, RUNNING_HELPERS_TIMEOUT); if (retval) return 0; down_write(&umhelper_sem); usermodehelper_disabled = 0; up_write(&umhelper_sem); return -EAGAIN; } /** * usermodehelper_enable - allow new helpers to be started again */ void usermodehelper_enable(void) { down_write(&umhelper_sem); usermodehelper_disabled = 0; up_write(&umhelper_sem); } /** * usermodehelper_is_disabled - check if new helpers are allowed to be started */ bool usermodehelper_is_disabled(void) { return usermodehelper_disabled; } EXPORT_SYMBOL_GPL(usermodehelper_is_disabled); static void helper_lock(void) { atomic_inc(&running_helpers); smp_mb__after_atomic_inc(); } static void helper_unlock(void) { if (atomic_dec_and_test(&running_helpers)) wake_up(&running_helpers_waitq); } /** * call_usermodehelper_setup - prepare to call a usermode helper * @path: path to usermode executable * @argv: arg vector for process * @envp: environment for process * @gfp_mask: gfp mask for memory allocation * * Returns either %NULL on allocation failure, or a subprocess_info * structure. This should be passed to call_usermodehelper_exec to * exec the process and free the structure. */ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask) { struct subprocess_info *sub_info; sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); if (!sub_info) goto out; INIT_WORK(&sub_info->work, __call_usermodehelper); sub_info->path = path; sub_info->argv = argv; sub_info->envp = envp; out: return sub_info; } EXPORT_SYMBOL(call_usermodehelper_setup); /** * call_usermodehelper_setfns - set a cleanup/init function * @info: a subprocess_info returned by call_usermodehelper_setup * @cleanup: a cleanup function * @init: an init function * @data: arbitrary context sensitive data * * The init function is used to customize the helper process prior to * exec. A non-zero return code causes the process to error out, exit, * and return the failure to the calling process * * The cleanup function is just before ethe subprocess_info is about to * be freed. This can be used for freeing the argv and envp. The * Function must be runnable in either a process context or the * context in which call_usermodehelper_exec is called. */ void call_usermodehelper_setfns(struct subprocess_info *info, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *info), void *data) { info->cleanup = cleanup; info->init = init; info->data = data; } EXPORT_SYMBOL(call_usermodehelper_setfns); /** * call_usermodehelper_exec - start a usermode application * @sub_info: information about the subprocessa * @wait: wait for the application to finish and return status. * when -1 don't wait at all, but you get no useful error back when * the program couldn't be exec'ed. This makes it safe to call * from interrupt context. * * Runs a user-space application. The application is started * asynchronously if wait is not set, and runs as a child of keventd. * (ie. it runs with full root capabilities). */ int call_usermodehelper_exec(struct subprocess_info *sub_info, enum umh_wait wait) { DECLARE_COMPLETION_ONSTACK(done); int retval = 0; helper_lock(); if (sub_info->path[0] == '\0') goto out; if (!khelper_wq || usermodehelper_disabled) { retval = -EBUSY; goto out; } sub_info->complete = &done; sub_info->wait = wait; queue_work(khelper_wq, &sub_info->work); if (wait == UMH_NO_WAIT) /* task has freed sub_info */ goto unlock; wait_for_completion(&done); retval = sub_info->retval; out: call_usermodehelper_freeinfo(sub_info); unlock: helper_unlock(); return retval; } EXPORT_SYMBOL(call_usermodehelper_exec); static int proc_cap_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; kernel_cap_t new_cap; int err, i; if (write && (!capable(CAP_SETPCAP) || !capable(CAP_SYS_MODULE))) return -EPERM; /* * convert from the global kernel_cap_t to the ulong array to print to * userspace if this is a read. */ spin_lock(&umh_sysctl_lock); for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { if (table->data == CAP_BSET) cap_array[i] = usermodehelper_bset.cap[i]; else if (table->data == CAP_PI) cap_array[i] = usermodehelper_inheritable.cap[i]; else BUG(); } spin_unlock(&umh_sysctl_lock); t = *table; t.data = &cap_array; /* * actually read or write and array of ulongs from userspace. Remember * these are least significant 32 bits first */ err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; /* * convert from the sysctl array of ulongs to the kernel_cap_t * internal representation */ for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) new_cap.cap[i] = cap_array[i]; /* * Drop everything not in the new_cap (but don't add things) */ spin_lock(&umh_sysctl_lock); if (write) { if (table->data == CAP_BSET) usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); if (table->data == CAP_PI) usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); } spin_unlock(&umh_sysctl_lock); return 0; } struct ctl_table usermodehelper_table[] = { { .procname = "bset", .data = CAP_BSET, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { .procname = "inheritable", .data = CAP_PI, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { } }; void __init usermodehelper_init(void) { khelper_wq = create_singlethread_workqueue("khelper"); BUG_ON(!khelper_wq); }