aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/core/request_sock.c43
1 files changed, 12 insertions, 31 deletions
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 467f326126e0..04db318e6218 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -41,27 +41,27 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
41 unsigned int nr_table_entries) 41 unsigned int nr_table_entries)
42{ 42{
43 size_t lopt_size = sizeof(struct listen_sock); 43 size_t lopt_size = sizeof(struct listen_sock);
44 struct listen_sock *lopt; 44 struct listen_sock *lopt = NULL;
45 45
46 nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); 46 nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
47 nr_table_entries = max_t(u32, nr_table_entries, 8); 47 nr_table_entries = max_t(u32, nr_table_entries, 8);
48 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); 48 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
49 lopt_size += nr_table_entries * sizeof(struct request_sock *); 49 lopt_size += nr_table_entries * sizeof(struct request_sock *);
50 if (lopt_size > PAGE_SIZE) 50
51 if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
52 lopt = kzalloc(lopt_size, GFP_KERNEL |
53 __GFP_NOWARN |
54 __GFP_NORETRY);
55 if (!lopt)
51 lopt = vzalloc(lopt_size); 56 lopt = vzalloc(lopt_size);
52 else 57 if (!lopt)
53 lopt = kzalloc(lopt_size, GFP_KERNEL);
54 if (lopt == NULL)
55 return -ENOMEM; 58 return -ENOMEM;
56 59
57 for (lopt->max_qlen_log = 3;
58 (1 << lopt->max_qlen_log) < nr_table_entries;
59 lopt->max_qlen_log++);
60
61 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); 60 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
62 rwlock_init(&queue->syn_wait_lock); 61 rwlock_init(&queue->syn_wait_lock);
63 queue->rskq_accept_head = NULL; 62 queue->rskq_accept_head = NULL;
64 lopt->nr_table_entries = nr_table_entries; 63 lopt->nr_table_entries = nr_table_entries;
64 lopt->max_qlen_log = ilog2(nr_table_entries);
65 65
66 write_lock_bh(&queue->syn_wait_lock); 66 write_lock_bh(&queue->syn_wait_lock);
67 queue->listen_opt = lopt; 67 queue->listen_opt = lopt;
@@ -72,22 +72,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
72 72
73void __reqsk_queue_destroy(struct request_sock_queue *queue) 73void __reqsk_queue_destroy(struct request_sock_queue *queue)
74{ 74{
75 struct listen_sock *lopt; 75 /* This is an error recovery path only, no locking needed */
76 size_t lopt_size; 76 kvfree(queue->listen_opt);
77
78 /*
79 * this is an error recovery path only
80 * no locking needed and the lopt is not NULL
81 */
82
83 lopt = queue->listen_opt;
84 lopt_size = sizeof(struct listen_sock) +
85 lopt->nr_table_entries * sizeof(struct request_sock *);
86
87 if (lopt_size > PAGE_SIZE)
88 vfree(lopt);
89 else
90 kfree(lopt);
91} 77}
92 78
93static inline struct listen_sock *reqsk_queue_yank_listen_sk( 79static inline struct listen_sock *reqsk_queue_yank_listen_sk(
@@ -107,8 +93,6 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
107{ 93{
108 /* make all the listen_opt local to us */ 94 /* make all the listen_opt local to us */
109 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); 95 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
110 size_t lopt_size = sizeof(struct listen_sock) +
111 lopt->nr_table_entries * sizeof(struct request_sock *);
112 96
113 if (lopt->qlen != 0) { 97 if (lopt->qlen != 0) {
114 unsigned int i; 98 unsigned int i;
@@ -125,10 +109,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
125 } 109 }
126 110
127 WARN_ON(lopt->qlen != 0); 111 WARN_ON(lopt->qlen != 0);
128 if (lopt_size > PAGE_SIZE) 112 kvfree(lopt);
129 vfree(lopt);
130 else
131 kfree(lopt);
132} 113}
133 114
134/* 115/*