diff options
author | Eric Dumazet <edumazet@google.com> | 2012-05-16 18:48:15 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-17 04:50:21 -0400 |
commit | dc6b9b78234fecdc6d2ca5e1629185718202bcf5 (patch) | |
tree | 81faf65c9cd9747d8dfe2828ff624507459bdf24 /include/net/sock.h | |
parent | 1f352920b908247273a7c1937fd8c341cc1cedb1 (diff) |
net: include/net/sock.h cleanup
bool/const conversions where possible
__inline__ -> inline
space cleanups
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 151 |
1 files changed, 74 insertions, 77 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 036f5069b6e0..da931555e000 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -97,7 +97,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp) | |||
97 | #else | 97 | #else |
98 | /* Validate arguments and do nothing */ | 98 | /* Validate arguments and do nothing */ |
99 | static inline __printf(2, 3) | 99 | static inline __printf(2, 3) |
100 | void SOCK_DEBUG(struct sock *sk, const char *msg, ...) | 100 | void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) |
101 | { | 101 | { |
102 | } | 102 | } |
103 | #endif | 103 | #endif |
@@ -372,8 +372,8 @@ struct sock { | |||
372 | void (*sk_data_ready)(struct sock *sk, int bytes); | 372 | void (*sk_data_ready)(struct sock *sk, int bytes); |
373 | void (*sk_write_space)(struct sock *sk); | 373 | void (*sk_write_space)(struct sock *sk); |
374 | void (*sk_error_report)(struct sock *sk); | 374 | void (*sk_error_report)(struct sock *sk); |
375 | int (*sk_backlog_rcv)(struct sock *sk, | 375 | int (*sk_backlog_rcv)(struct sock *sk, |
376 | struct sk_buff *skb); | 376 | struct sk_buff *skb); |
377 | void (*sk_destruct)(struct sock *sk); | 377 | void (*sk_destruct)(struct sock *sk); |
378 | }; | 378 | }; |
379 | 379 | ||
@@ -454,40 +454,40 @@ static inline struct sock *sk_nulls_next(const struct sock *sk) | |||
454 | NULL; | 454 | NULL; |
455 | } | 455 | } |
456 | 456 | ||
457 | static inline int sk_unhashed(const struct sock *sk) | 457 | static inline bool sk_unhashed(const struct sock *sk) |
458 | { | 458 | { |
459 | return hlist_unhashed(&sk->sk_node); | 459 | return hlist_unhashed(&sk->sk_node); |
460 | } | 460 | } |
461 | 461 | ||
462 | static inline int sk_hashed(const struct sock *sk) | 462 | static inline bool sk_hashed(const struct sock *sk) |
463 | { | 463 | { |
464 | return !sk_unhashed(sk); | 464 | return !sk_unhashed(sk); |
465 | } | 465 | } |
466 | 466 | ||
467 | static __inline__ void sk_node_init(struct hlist_node *node) | 467 | static inline void sk_node_init(struct hlist_node *node) |
468 | { | 468 | { |
469 | node->pprev = NULL; | 469 | node->pprev = NULL; |
470 | } | 470 | } |
471 | 471 | ||
472 | static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) | 472 | static inline void sk_nulls_node_init(struct hlist_nulls_node *node) |
473 | { | 473 | { |
474 | node->pprev = NULL; | 474 | node->pprev = NULL; |
475 | } | 475 | } |
476 | 476 | ||
477 | static __inline__ void __sk_del_node(struct sock *sk) | 477 | static inline void __sk_del_node(struct sock *sk) |
478 | { | 478 | { |
479 | __hlist_del(&sk->sk_node); | 479 | __hlist_del(&sk->sk_node); |
480 | } | 480 | } |
481 | 481 | ||
482 | /* NB: equivalent to hlist_del_init_rcu */ | 482 | /* NB: equivalent to hlist_del_init_rcu */ |
483 | static __inline__ int __sk_del_node_init(struct sock *sk) | 483 | static inline bool __sk_del_node_init(struct sock *sk) |
484 | { | 484 | { |
485 | if (sk_hashed(sk)) { | 485 | if (sk_hashed(sk)) { |
486 | __sk_del_node(sk); | 486 | __sk_del_node(sk); |
487 | sk_node_init(&sk->sk_node); | 487 | sk_node_init(&sk->sk_node); |
488 | return 1; | 488 | return true; |
489 | } | 489 | } |
490 | return 0; | 490 | return false; |
491 | } | 491 | } |
492 | 492 | ||
493 | /* Grab socket reference count. This operation is valid only | 493 | /* Grab socket reference count. This operation is valid only |
@@ -509,9 +509,9 @@ static inline void __sock_put(struct sock *sk) | |||
509 | atomic_dec(&sk->sk_refcnt); | 509 | atomic_dec(&sk->sk_refcnt); |
510 | } | 510 | } |
511 | 511 | ||
512 | static __inline__ int sk_del_node_init(struct sock *sk) | 512 | static inline bool sk_del_node_init(struct sock *sk) |
513 | { | 513 | { |
514 | int rc = __sk_del_node_init(sk); | 514 | bool rc = __sk_del_node_init(sk); |
515 | 515 | ||
516 | if (rc) { | 516 | if (rc) { |
517 | /* paranoid for a while -acme */ | 517 | /* paranoid for a while -acme */ |
@@ -522,18 +522,18 @@ static __inline__ int sk_del_node_init(struct sock *sk) | |||
522 | } | 522 | } |
523 | #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) | 523 | #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) |
524 | 524 | ||
525 | static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) | 525 | static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) |
526 | { | 526 | { |
527 | if (sk_hashed(sk)) { | 527 | if (sk_hashed(sk)) { |
528 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); | 528 | hlist_nulls_del_init_rcu(&sk->sk_nulls_node); |
529 | return 1; | 529 | return true; |
530 | } | 530 | } |
531 | return 0; | 531 | return false; |
532 | } | 532 | } |
533 | 533 | ||
534 | static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) | 534 | static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) |
535 | { | 535 | { |
536 | int rc = __sk_nulls_del_node_init_rcu(sk); | 536 | bool rc = __sk_nulls_del_node_init_rcu(sk); |
537 | 537 | ||
538 | if (rc) { | 538 | if (rc) { |
539 | /* paranoid for a while -acme */ | 539 | /* paranoid for a while -acme */ |
@@ -543,40 +543,40 @@ static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) | |||
543 | return rc; | 543 | return rc; |
544 | } | 544 | } |
545 | 545 | ||
546 | static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) | 546 | static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) |
547 | { | 547 | { |
548 | hlist_add_head(&sk->sk_node, list); | 548 | hlist_add_head(&sk->sk_node, list); |
549 | } | 549 | } |
550 | 550 | ||
551 | static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) | 551 | static inline void sk_add_node(struct sock *sk, struct hlist_head *list) |
552 | { | 552 | { |
553 | sock_hold(sk); | 553 | sock_hold(sk); |
554 | __sk_add_node(sk, list); | 554 | __sk_add_node(sk, list); |
555 | } | 555 | } |
556 | 556 | ||
557 | static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) | 557 | static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) |
558 | { | 558 | { |
559 | sock_hold(sk); | 559 | sock_hold(sk); |
560 | hlist_add_head_rcu(&sk->sk_node, list); | 560 | hlist_add_head_rcu(&sk->sk_node, list); |
561 | } | 561 | } |
562 | 562 | ||
563 | static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | 563 | static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
564 | { | 564 | { |
565 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); | 565 | hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); |
566 | } | 566 | } |
567 | 567 | ||
568 | static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) | 568 | static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
569 | { | 569 | { |
570 | sock_hold(sk); | 570 | sock_hold(sk); |
571 | __sk_nulls_add_node_rcu(sk, list); | 571 | __sk_nulls_add_node_rcu(sk, list); |
572 | } | 572 | } |
573 | 573 | ||
574 | static __inline__ void __sk_del_bind_node(struct sock *sk) | 574 | static inline void __sk_del_bind_node(struct sock *sk) |
575 | { | 575 | { |
576 | __hlist_del(&sk->sk_bind_node); | 576 | __hlist_del(&sk->sk_bind_node); |
577 | } | 577 | } |
578 | 578 | ||
579 | static __inline__ void sk_add_bind_node(struct sock *sk, | 579 | static inline void sk_add_bind_node(struct sock *sk, |
580 | struct hlist_head *list) | 580 | struct hlist_head *list) |
581 | { | 581 | { |
582 | hlist_add_head(&sk->sk_bind_node, list); | 582 | hlist_add_head(&sk->sk_bind_node, list); |
@@ -665,7 +665,7 @@ static inline void sk_acceptq_added(struct sock *sk) | |||
665 | sk->sk_ack_backlog++; | 665 | sk->sk_ack_backlog++; |
666 | } | 666 | } |
667 | 667 | ||
668 | static inline int sk_acceptq_is_full(struct sock *sk) | 668 | static inline bool sk_acceptq_is_full(const struct sock *sk) |
669 | { | 669 | { |
670 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; | 670 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; |
671 | } | 671 | } |
@@ -673,19 +673,19 @@ static inline int sk_acceptq_is_full(struct sock *sk) | |||
673 | /* | 673 | /* |
674 | * Compute minimal free write space needed to queue new packets. | 674 | * Compute minimal free write space needed to queue new packets. |
675 | */ | 675 | */ |
676 | static inline int sk_stream_min_wspace(struct sock *sk) | 676 | static inline int sk_stream_min_wspace(const struct sock *sk) |
677 | { | 677 | { |
678 | return sk->sk_wmem_queued >> 1; | 678 | return sk->sk_wmem_queued >> 1; |
679 | } | 679 | } |
680 | 680 | ||
681 | static inline int sk_stream_wspace(struct sock *sk) | 681 | static inline int sk_stream_wspace(const struct sock *sk) |
682 | { | 682 | { |
683 | return sk->sk_sndbuf - sk->sk_wmem_queued; | 683 | return sk->sk_sndbuf - sk->sk_wmem_queued; |
684 | } | 684 | } |
685 | 685 | ||
686 | extern void sk_stream_write_space(struct sock *sk); | 686 | extern void sk_stream_write_space(struct sock *sk); |
687 | 687 | ||
688 | static inline int sk_stream_memory_free(struct sock *sk) | 688 | static inline bool sk_stream_memory_free(const struct sock *sk) |
689 | { | 689 | { |
690 | return sk->sk_wmem_queued < sk->sk_sndbuf; | 690 | return sk->sk_wmem_queued < sk->sk_sndbuf; |
691 | } | 691 | } |
@@ -809,26 +809,26 @@ struct module; | |||
809 | * transport -> network interface is defined by struct inet_proto | 809 | * transport -> network interface is defined by struct inet_proto |
810 | */ | 810 | */ |
811 | struct proto { | 811 | struct proto { |
812 | void (*close)(struct sock *sk, | 812 | void (*close)(struct sock *sk, |
813 | long timeout); | 813 | long timeout); |
814 | int (*connect)(struct sock *sk, | 814 | int (*connect)(struct sock *sk, |
815 | struct sockaddr *uaddr, | 815 | struct sockaddr *uaddr, |
816 | int addr_len); | 816 | int addr_len); |
817 | int (*disconnect)(struct sock *sk, int flags); | 817 | int (*disconnect)(struct sock *sk, int flags); |
818 | 818 | ||
819 | struct sock * (*accept) (struct sock *sk, int flags, int *err); | 819 | struct sock * (*accept)(struct sock *sk, int flags, int *err); |
820 | 820 | ||
821 | int (*ioctl)(struct sock *sk, int cmd, | 821 | int (*ioctl)(struct sock *sk, int cmd, |
822 | unsigned long arg); | 822 | unsigned long arg); |
823 | int (*init)(struct sock *sk); | 823 | int (*init)(struct sock *sk); |
824 | void (*destroy)(struct sock *sk); | 824 | void (*destroy)(struct sock *sk); |
825 | void (*shutdown)(struct sock *sk, int how); | 825 | void (*shutdown)(struct sock *sk, int how); |
826 | int (*setsockopt)(struct sock *sk, int level, | 826 | int (*setsockopt)(struct sock *sk, int level, |
827 | int optname, char __user *optval, | 827 | int optname, char __user *optval, |
828 | unsigned int optlen); | 828 | unsigned int optlen); |
829 | int (*getsockopt)(struct sock *sk, int level, | 829 | int (*getsockopt)(struct sock *sk, int level, |
830 | int optname, char __user *optval, | 830 | int optname, char __user *optval, |
831 | int __user *option); | 831 | int __user *option); |
832 | #ifdef CONFIG_COMPAT | 832 | #ifdef CONFIG_COMPAT |
833 | int (*compat_setsockopt)(struct sock *sk, | 833 | int (*compat_setsockopt)(struct sock *sk, |
834 | int level, | 834 | int level, |
@@ -845,14 +845,14 @@ struct proto { | |||
845 | struct msghdr *msg, size_t len); | 845 | struct msghdr *msg, size_t len); |
846 | int (*recvmsg)(struct kiocb *iocb, struct sock *sk, | 846 | int (*recvmsg)(struct kiocb *iocb, struct sock *sk, |
847 | struct msghdr *msg, | 847 | struct msghdr *msg, |
848 | size_t len, int noblock, int flags, | 848 | size_t len, int noblock, int flags, |
849 | int *addr_len); | 849 | int *addr_len); |
850 | int (*sendpage)(struct sock *sk, struct page *page, | 850 | int (*sendpage)(struct sock *sk, struct page *page, |
851 | int offset, size_t size, int flags); | 851 | int offset, size_t size, int flags); |
852 | int (*bind)(struct sock *sk, | 852 | int (*bind)(struct sock *sk, |
853 | struct sockaddr *uaddr, int addr_len); | 853 | struct sockaddr *uaddr, int addr_len); |
854 | 854 | ||
855 | int (*backlog_rcv) (struct sock *sk, | 855 | int (*backlog_rcv) (struct sock *sk, |
856 | struct sk_buff *skb); | 856 | struct sk_buff *skb); |
857 | 857 | ||
858 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 858 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
@@ -1173,7 +1173,7 @@ proto_memory_pressure(struct proto *prot) | |||
1173 | extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); | 1173 | extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); |
1174 | extern int sock_prot_inuse_get(struct net *net, struct proto *proto); | 1174 | extern int sock_prot_inuse_get(struct net *net, struct proto *proto); |
1175 | #else | 1175 | #else |
1176 | static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, | 1176 | static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, |
1177 | int inc) | 1177 | int inc) |
1178 | { | 1178 | { |
1179 | } | 1179 | } |
@@ -1260,24 +1260,24 @@ static inline int sk_mem_pages(int amt) | |||
1260 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; | 1260 | return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | static inline int sk_has_account(struct sock *sk) | 1263 | static inline bool sk_has_account(struct sock *sk) |
1264 | { | 1264 | { |
1265 | /* return true if protocol supports memory accounting */ | 1265 | /* return true if protocol supports memory accounting */ |
1266 | return !!sk->sk_prot->memory_allocated; | 1266 | return !!sk->sk_prot->memory_allocated; |
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | static inline int sk_wmem_schedule(struct sock *sk, int size) | 1269 | static inline bool sk_wmem_schedule(struct sock *sk, int size) |
1270 | { | 1270 | { |
1271 | if (!sk_has_account(sk)) | 1271 | if (!sk_has_account(sk)) |
1272 | return 1; | 1272 | return true; |
1273 | return size <= sk->sk_forward_alloc || | 1273 | return size <= sk->sk_forward_alloc || |
1274 | __sk_mem_schedule(sk, size, SK_MEM_SEND); | 1274 | __sk_mem_schedule(sk, size, SK_MEM_SEND); |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | static inline int sk_rmem_schedule(struct sock *sk, int size) | 1277 | static inline bool sk_rmem_schedule(struct sock *sk, int size) |
1278 | { | 1278 | { |
1279 | if (!sk_has_account(sk)) | 1279 | if (!sk_has_account(sk)) |
1280 | return 1; | 1280 | return true; |
1281 | return size <= sk->sk_forward_alloc || | 1281 | return size <= sk->sk_forward_alloc || |
1282 | __sk_mem_schedule(sk, size, SK_MEM_RECV); | 1282 | __sk_mem_schedule(sk, size, SK_MEM_RECV); |
1283 | } | 1283 | } |
@@ -1342,7 +1342,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | |||
1342 | * Mark both the sk_lock and the sk_lock.slock as a | 1342 | * Mark both the sk_lock and the sk_lock.slock as a |
1343 | * per-address-family lock class. | 1343 | * per-address-family lock class. |
1344 | */ | 1344 | */ |
1345 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ | 1345 | #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ |
1346 | do { \ | 1346 | do { \ |
1347 | sk->sk_lock.owned = 0; \ | 1347 | sk->sk_lock.owned = 0; \ |
1348 | init_waitqueue_head(&sk->sk_lock.wq); \ | 1348 | init_waitqueue_head(&sk->sk_lock.wq); \ |
@@ -1350,7 +1350,7 @@ do { \ | |||
1350 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ | 1350 | debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ |
1351 | sizeof((sk)->sk_lock)); \ | 1351 | sizeof((sk)->sk_lock)); \ |
1352 | lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ | 1352 | lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ |
1353 | (skey), (sname)); \ | 1353 | (skey), (sname)); \ |
1354 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ | 1354 | lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ |
1355 | } while (0) | 1355 | } while (0) |
1356 | 1356 | ||
@@ -1410,13 +1410,13 @@ extern int sock_setsockopt(struct socket *sock, int level, | |||
1410 | unsigned int optlen); | 1410 | unsigned int optlen); |
1411 | 1411 | ||
1412 | extern int sock_getsockopt(struct socket *sock, int level, | 1412 | extern int sock_getsockopt(struct socket *sock, int level, |
1413 | int op, char __user *optval, | 1413 | int op, char __user *optval, |
1414 | int __user *optlen); | 1414 | int __user *optlen); |
1415 | extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, | 1415 | extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, |
1416 | unsigned long size, | 1416 | unsigned long size, |
1417 | int noblock, | 1417 | int noblock, |
1418 | int *errcode); | 1418 | int *errcode); |
1419 | extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, | 1419 | extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, |
1420 | unsigned long header_len, | 1420 | unsigned long header_len, |
1421 | unsigned long data_len, | 1421 | unsigned long data_len, |
1422 | int noblock, | 1422 | int noblock, |
@@ -1438,7 +1438,7 @@ static inline void sock_update_classid(struct sock *sk) | |||
1438 | * Functions to fill in entries in struct proto_ops when a protocol | 1438 | * Functions to fill in entries in struct proto_ops when a protocol |
1439 | * does not implement a particular function. | 1439 | * does not implement a particular function. |
1440 | */ | 1440 | */ |
1441 | extern int sock_no_bind(struct socket *, | 1441 | extern int sock_no_bind(struct socket *, |
1442 | struct sockaddr *, int); | 1442 | struct sockaddr *, int); |
1443 | extern int sock_no_connect(struct socket *, | 1443 | extern int sock_no_connect(struct socket *, |
1444 | struct sockaddr *, int, int); | 1444 | struct sockaddr *, int, int); |
@@ -1467,7 +1467,7 @@ extern int sock_no_mmap(struct file *file, | |||
1467 | struct vm_area_struct *vma); | 1467 | struct vm_area_struct *vma); |
1468 | extern ssize_t sock_no_sendpage(struct socket *sock, | 1468 | extern ssize_t sock_no_sendpage(struct socket *sock, |
1469 | struct page *page, | 1469 | struct page *page, |
1470 | int offset, size_t size, | 1470 | int offset, size_t size, |
1471 | int flags); | 1471 | int flags); |
1472 | 1472 | ||
1473 | /* | 1473 | /* |
@@ -1490,7 +1490,7 @@ extern void sk_common_release(struct sock *sk); | |||
1490 | /* | 1490 | /* |
1491 | * Default socket callbacks and setup code | 1491 | * Default socket callbacks and setup code |
1492 | */ | 1492 | */ |
1493 | 1493 | ||
1494 | /* Initialise core socket variables */ | 1494 | /* Initialise core socket variables */ |
1495 | extern void sock_init_data(struct socket *sock, struct sock *sk); | 1495 | extern void sock_init_data(struct socket *sock, struct sock *sk); |
1496 | 1496 | ||
@@ -1690,7 +1690,7 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); | |||
1690 | 1690 | ||
1691 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); | 1691 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
1692 | 1692 | ||
1693 | static inline int sk_can_gso(const struct sock *sk) | 1693 | static inline bool sk_can_gso(const struct sock *sk) |
1694 | { | 1694 | { |
1695 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); | 1695 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); |
1696 | } | 1696 | } |
@@ -1807,7 +1807,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk) | |||
1807 | * | 1807 | * |
1808 | * Returns true if socket has write or read allocations | 1808 | * Returns true if socket has write or read allocations |
1809 | */ | 1809 | */ |
1810 | static inline int sk_has_allocations(const struct sock *sk) | 1810 | static inline bool sk_has_allocations(const struct sock *sk) |
1811 | { | 1811 | { |
1812 | return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); | 1812 | return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); |
1813 | } | 1813 | } |
@@ -1846,9 +1846,7 @@ static inline int sk_has_allocations(const struct sock *sk) | |||
1846 | */ | 1846 | */ |
1847 | static inline bool wq_has_sleeper(struct socket_wq *wq) | 1847 | static inline bool wq_has_sleeper(struct socket_wq *wq) |
1848 | { | 1848 | { |
1849 | 1849 | /* We need to be sure we are in sync with the | |
1850 | /* | ||
1851 | * We need to be sure we are in sync with the | ||
1852 | * add_wait_queue modifications to the wait queue. | 1850 | * add_wait_queue modifications to the wait queue. |
1853 | * | 1851 | * |
1854 | * This memory barrier is paired in the sock_poll_wait. | 1852 | * This memory barrier is paired in the sock_poll_wait. |
@@ -1870,22 +1868,21 @@ static inline void sock_poll_wait(struct file *filp, | |||
1870 | { | 1868 | { |
1871 | if (!poll_does_not_wait(p) && wait_address) { | 1869 | if (!poll_does_not_wait(p) && wait_address) { |
1872 | poll_wait(filp, wait_address, p); | 1870 | poll_wait(filp, wait_address, p); |
1873 | /* | 1871 | /* We need to be sure we are in sync with the |
1874 | * We need to be sure we are in sync with the | ||
1875 | * socket flags modification. | 1872 | * socket flags modification. |
1876 | * | 1873 | * |
1877 | * This memory barrier is paired in the wq_has_sleeper. | 1874 | * This memory barrier is paired in the wq_has_sleeper. |
1878 | */ | 1875 | */ |
1879 | smp_mb(); | 1876 | smp_mb(); |
1880 | } | 1877 | } |
1881 | } | 1878 | } |
1882 | 1879 | ||
1883 | /* | 1880 | /* |
1884 | * Queue a received datagram if it will fit. Stream and sequenced | 1881 | * Queue a received datagram if it will fit. Stream and sequenced |
1885 | * protocols can't normally use this as they need to fit buffers in | 1882 | * protocols can't normally use this as they need to fit buffers in |
1886 | * and play with them. | 1883 | * and play with them. |
1887 | * | 1884 | * |
1888 | * Inlined as it's very short and called for pretty much every | 1885 | * Inlined as it's very short and called for pretty much every |
1889 | * packet ever received. | 1886 | * packet ever received. |
1890 | */ | 1887 | */ |
1891 | 1888 | ||
@@ -1911,10 +1908,10 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
1911 | sk_mem_charge(sk, skb->truesize); | 1908 | sk_mem_charge(sk, skb->truesize); |
1912 | } | 1909 | } |
1913 | 1910 | ||
1914 | extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, | 1911 | extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, |
1915 | unsigned long expires); | 1912 | unsigned long expires); |
1916 | 1913 | ||
1917 | extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); | 1914 | extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); |
1918 | 1915 | ||
1919 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 1916 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
1920 | 1917 | ||
@@ -1923,7 +1920,7 @@ extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); | |||
1923 | /* | 1920 | /* |
1924 | * Recover an error report and clear atomically | 1921 | * Recover an error report and clear atomically |
1925 | */ | 1922 | */ |
1926 | 1923 | ||
1927 | static inline int sock_error(struct sock *sk) | 1924 | static inline int sock_error(struct sock *sk) |
1928 | { | 1925 | { |
1929 | int err; | 1926 | int err; |
@@ -1939,7 +1936,7 @@ static inline unsigned long sock_wspace(struct sock *sk) | |||
1939 | 1936 | ||
1940 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | 1937 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { |
1941 | amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); | 1938 | amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); |
1942 | if (amt < 0) | 1939 | if (amt < 0) |
1943 | amt = 0; | 1940 | amt = 0; |
1944 | } | 1941 | } |
1945 | return amt; | 1942 | return amt; |
@@ -1983,7 +1980,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) | |||
1983 | /* | 1980 | /* |
1984 | * Default write policy as shown to user space via poll/select/SIGIO | 1981 | * Default write policy as shown to user space via poll/select/SIGIO |
1985 | */ | 1982 | */ |
1986 | static inline int sock_writeable(const struct sock *sk) | 1983 | static inline bool sock_writeable(const struct sock *sk) |
1987 | { | 1984 | { |
1988 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); | 1985 | return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); |
1989 | } | 1986 | } |
@@ -1993,12 +1990,12 @@ static inline gfp_t gfp_any(void) | |||
1993 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; | 1990 | return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; |
1994 | } | 1991 | } |
1995 | 1992 | ||
1996 | static inline long sock_rcvtimeo(const struct sock *sk, int noblock) | 1993 | static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) |
1997 | { | 1994 | { |
1998 | return noblock ? 0 : sk->sk_rcvtimeo; | 1995 | return noblock ? 0 : sk->sk_rcvtimeo; |
1999 | } | 1996 | } |
2000 | 1997 | ||
2001 | static inline long sock_sndtimeo(const struct sock *sk, int noblock) | 1998 | static inline long sock_sndtimeo(const struct sock *sk, bool noblock) |
2002 | { | 1999 | { |
2003 | return noblock ? 0 : sk->sk_sndtimeo; | 2000 | return noblock ? 0 : sk->sk_sndtimeo; |
2004 | } | 2001 | } |
@@ -2021,7 +2018,7 @@ extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
2021 | extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, | 2018 | extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, |
2022 | struct sk_buff *skb); | 2019 | struct sk_buff *skb); |
2023 | 2020 | ||
2024 | static __inline__ void | 2021 | static inline void |
2025 | sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) | 2022 | sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) |
2026 | { | 2023 | { |
2027 | ktime_t kt = skb->tstamp; | 2024 | ktime_t kt = skb->tstamp; |
@@ -2062,7 +2059,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
2062 | (1UL << SOCK_RCVTSTAMP) | \ | 2059 | (1UL << SOCK_RCVTSTAMP) | \ |
2063 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ | 2060 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ |
2064 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ | 2061 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ |
2065 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ | 2062 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ |
2066 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) | 2063 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) |
2067 | 2064 | ||
2068 | if (sk->sk_flags & FLAGS_TS_OR_DROPS) | 2065 | if (sk->sk_flags & FLAGS_TS_OR_DROPS) |
@@ -2091,7 +2088,7 @@ extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); | |||
2091 | * locked so that the sk_buff queue operation is ok. | 2088 | * locked so that the sk_buff queue operation is ok. |
2092 | */ | 2089 | */ |
2093 | #ifdef CONFIG_NET_DMA | 2090 | #ifdef CONFIG_NET_DMA |
2094 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) | 2091 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) |
2095 | { | 2092 | { |
2096 | __skb_unlink(skb, &sk->sk_receive_queue); | 2093 | __skb_unlink(skb, &sk->sk_receive_queue); |
2097 | if (!copied_early) | 2094 | if (!copied_early) |
@@ -2100,7 +2097,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e | |||
2100 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); | 2097 | __skb_queue_tail(&sk->sk_async_wait_queue, skb); |
2101 | } | 2098 | } |
2102 | #else | 2099 | #else |
2103 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) | 2100 | static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) |
2104 | { | 2101 | { |
2105 | __skb_unlink(skb, &sk->sk_receive_queue); | 2102 | __skb_unlink(skb, &sk->sk_receive_queue); |
2106 | __kfree_skb(skb); | 2103 | __kfree_skb(skb); |
@@ -2147,8 +2144,8 @@ extern void sock_enable_timestamp(struct sock *sk, int flag); | |||
2147 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); | 2144 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); |
2148 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); | 2145 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); |
2149 | 2146 | ||
2150 | /* | 2147 | /* |
2151 | * Enable debug/info messages | 2148 | * Enable debug/info messages |
2152 | */ | 2149 | */ |
2153 | extern int net_msg_warn; | 2150 | extern int net_msg_warn; |
2154 | #define NETDEBUG(fmt, args...) \ | 2151 | #define NETDEBUG(fmt, args...) \ |