aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDenis Vlasenko <vda@ilport.com.ua>2006-03-28 04:08:21 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-28 20:02:45 -0500
commitf0088a50e7c49d1ba285c88fe06345f223652fd3 (patch)
tree82e3fd2dfbab6e7b73f6c6aabf9ba108d007e4da
parent1d1818316f0b61e0997a159680e1e631a23a407e (diff)
[NET]: deinline 200+ byte inlines in sock.h
Sizes in bytes (allyesconfig, i386) and files where those inlines are used: 238 sock_queue_rcv_skb 2.6.16/net/x25/x25_in.o 238 sock_queue_rcv_skb 2.6.16/net/rose/rose_in.o 238 sock_queue_rcv_skb 2.6.16/net/packet/af_packet.o 238 sock_queue_rcv_skb 2.6.16/net/netrom/nr_in.o 238 sock_queue_rcv_skb 2.6.16/net/llc/llc_sap.o 238 sock_queue_rcv_skb 2.6.16/net/llc/llc_conn.o 238 sock_queue_rcv_skb 2.6.16/net/irda/af_irda.o 238 sock_queue_rcv_skb 2.6.16/net/ipx/af_ipx.o 238 sock_queue_rcv_skb 2.6.16/net/ipv6/udp.o 238 sock_queue_rcv_skb 2.6.16/net/ipv6/raw.o 238 sock_queue_rcv_skb 2.6.16/net/ipv4/udp.o 238 sock_queue_rcv_skb 2.6.16/net/ipv4/raw.o 238 sock_queue_rcv_skb 2.6.16/net/ipv4/ipmr.o 238 sock_queue_rcv_skb 2.6.16/net/econet/econet.o 238 sock_queue_rcv_skb 2.6.16/net/econet/af_econet.o 238 sock_queue_rcv_skb 2.6.16/net/bluetooth/sco.o 238 sock_queue_rcv_skb 2.6.16/net/bluetooth/l2cap.o 238 sock_queue_rcv_skb 2.6.16/net/bluetooth/hci_sock.o 238 sock_queue_rcv_skb 2.6.16/net/ax25/ax25_in.o 238 sock_queue_rcv_skb 2.6.16/net/ax25/af_ax25.o 238 sock_queue_rcv_skb 2.6.16/net/appletalk/ddp.o 238 sock_queue_rcv_skb 2.6.16/drivers/net/pppoe.o 276 sk_receive_skb 2.6.16/net/decnet/dn_nsp_in.o 276 sk_receive_skb 2.6.16/net/dccp/ipv6.o 276 sk_receive_skb 2.6.16/net/dccp/ipv4.o 276 sk_receive_skb 2.6.16/net/dccp/dccp_ipv6.o 276 sk_receive_skb 2.6.16/drivers/net/pppoe.o 209 sk_dst_check 2.6.16/net/ipv6/ip6_output.o 209 sk_dst_check 2.6.16/net/ipv4/udp.o 209 sk_dst_check 2.6.16/net/decnet/dn_nsp_out.o Large inlines with multiple callers: Size Uses Wasted Name and definition ===== ==== ====== ================================================ 238 21 4360 sock_queue_rcv_skb include/net/sock.h 109 10 801 sock_recv_timestamp include/net/sock.h 276 4 768 sk_receive_skb include/net/sock.h 94 8 518 __sk_dst_check include/net/sock.h 209 3 378 sk_dst_check include/net/sock.h 131 4 333 sk_setup_caps include/net/sock.h 152 2 132 sk_stream_alloc_pskb include/net/sock.h 125 2 105 sk_stream_writequeue_purge include/net/sock.h Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h91
-rw-r--r--net/core/sock.c93
2 files changed, 97 insertions, 87 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 2aa73c0ec6c2..af2b0544586e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -938,28 +938,7 @@ static inline void sock_put(struct sock *sk)
938 sk_free(sk); 938 sk_free(sk);
939} 939}
940 940
941static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb) 941extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb);
942{
943 int rc = NET_RX_SUCCESS;
944
945 if (sk_filter(sk, skb, 0))
946 goto discard_and_relse;
947
948 skb->dev = NULL;
949
950 bh_lock_sock(sk);
951 if (!sock_owned_by_user(sk))
952 rc = sk->sk_backlog_rcv(sk, skb);
953 else
954 sk_add_backlog(sk, skb);
955 bh_unlock_sock(sk);
956out:
957 sock_put(sk);
958 return rc;
959discard_and_relse:
960 kfree_skb(skb);
961 goto out;
962}
963 942
964/* Detach socket from process context. 943/* Detach socket from process context.
965 * Announce socket dead, detach it from wait queue and inode. 944 * Announce socket dead, detach it from wait queue and inode.
@@ -1044,33 +1023,9 @@ sk_dst_reset(struct sock *sk)
1044 write_unlock(&sk->sk_dst_lock); 1023 write_unlock(&sk->sk_dst_lock);
1045} 1024}
1046 1025
1047static inline struct dst_entry * 1026extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1048__sk_dst_check(struct sock *sk, u32 cookie)
1049{
1050 struct dst_entry *dst = sk->sk_dst_cache;
1051
1052 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
1053 sk->sk_dst_cache = NULL;
1054 dst_release(dst);
1055 return NULL;
1056 }
1057
1058 return dst;
1059}
1060
1061static inline struct dst_entry *
1062sk_dst_check(struct sock *sk, u32 cookie)
1063{
1064 struct dst_entry *dst = sk_dst_get(sk);
1065 1027
1066 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 1028extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1067 sk_dst_reset(sk);
1068 dst_release(dst);
1069 return NULL;
1070 }
1071
1072 return dst;
1073}
1074 1029
1075static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1030static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1076{ 1031{
@@ -1140,45 +1095,7 @@ extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1140 1095
1141extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); 1096extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1142 1097
1143static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1098extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1144{
1145 int err = 0;
1146 int skb_len;
1147
1148 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1149 number of warnings when compiling with -W --ANK
1150 */
1151 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1152 (unsigned)sk->sk_rcvbuf) {
1153 err = -ENOMEM;
1154 goto out;
1155 }
1156
1157 /* It would be deadlock, if sock_queue_rcv_skb is used
1158 with socket lock! We assume that users of this
1159 function are lock free.
1160 */
1161 err = sk_filter(sk, skb, 1);
1162 if (err)
1163 goto out;
1164
1165 skb->dev = NULL;
1166 skb_set_owner_r(skb, sk);
1167
1168 /* Cache the SKB length before we tack it onto the receive
1169 * queue. Once it is added it no longer belongs to us and
1170 * may be freed by other threads of control pulling packets
1171 * from the queue.
1172 */
1173 skb_len = skb->len;
1174
1175 skb_queue_tail(&sk->sk_receive_queue, skb);
1176
1177 if (!sock_flag(sk, SOCK_DEAD))
1178 sk->sk_data_ready(sk, skb_len);
1179out:
1180 return err;
1181}
1182 1099
1183static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1100static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1184{ 1101{
diff --git a/net/core/sock.c b/net/core/sock.c
index e110b9004147..a96ea7dd0fc1 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -187,6 +187,99 @@ static void sock_disable_timestamp(struct sock *sk)
187} 187}
188 188
189 189
190int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
191{
192 int err = 0;
193 int skb_len;
194
195 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
196 number of warnings when compiling with -W --ANK
197 */
198 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
199 (unsigned)sk->sk_rcvbuf) {
200 err = -ENOMEM;
201 goto out;
202 }
203
204 /* It would be deadlock, if sock_queue_rcv_skb is used
205 with socket lock! We assume that users of this
206 function are lock free.
207 */
208 err = sk_filter(sk, skb, 1);
209 if (err)
210 goto out;
211
212 skb->dev = NULL;
213 skb_set_owner_r(skb, sk);
214
215 /* Cache the SKB length before we tack it onto the receive
216 * queue. Once it is added it no longer belongs to us and
217 * may be freed by other threads of control pulling packets
218 * from the queue.
219 */
220 skb_len = skb->len;
221
222 skb_queue_tail(&sk->sk_receive_queue, skb);
223
224 if (!sock_flag(sk, SOCK_DEAD))
225 sk->sk_data_ready(sk, skb_len);
226out:
227 return err;
228}
229EXPORT_SYMBOL(sock_queue_rcv_skb);
230
231int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
232{
233 int rc = NET_RX_SUCCESS;
234
235 if (sk_filter(sk, skb, 0))
236 goto discard_and_relse;
237
238 skb->dev = NULL;
239
240 bh_lock_sock(sk);
241 if (!sock_owned_by_user(sk))
242 rc = sk->sk_backlog_rcv(sk, skb);
243 else
244 sk_add_backlog(sk, skb);
245 bh_unlock_sock(sk);
246out:
247 sock_put(sk);
248 return rc;
249discard_and_relse:
250 kfree_skb(skb);
251 goto out;
252}
253EXPORT_SYMBOL(sk_receive_skb);
254
255struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
256{
257 struct dst_entry *dst = sk->sk_dst_cache;
258
259 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
260 sk->sk_dst_cache = NULL;
261 dst_release(dst);
262 return NULL;
263 }
264
265 return dst;
266}
267EXPORT_SYMBOL(__sk_dst_check);
268
269struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
270{
271 struct dst_entry *dst = sk_dst_get(sk);
272
273 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
274 sk_dst_reset(sk);
275 dst_release(dst);
276 return NULL;
277 }
278
279 return dst;
280}
281EXPORT_SYMBOL(sk_dst_check);
282
190/* 283/*
191 * This is meant for all protocols to use and covers goings on 284 * This is meant for all protocols to use and covers goings on
192 * at the socket level. Everything here is generic. 285 * at the socket level. Everything here is generic.