diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 13:01:50 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 13:01:50 -0400 |
| commit | 3c4cfadef6a1665d9cd02a543782d03d3e6740c6 (patch) | |
| tree | 3df72faaacd494d5ac8c9668df4f529b1b5e4457 /include/linux/rtnetlink.h | |
| parent | e017507f37d5cb8b541df165a824958bc333bec3 (diff) | |
| parent | 320f5ea0cedc08ef65d67e056bcb9d181386ef2c (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David S Miller:
1) Remove the ipv4 routing cache. Now lookups go directly into the FIB
trie and use prebuilt routes cached there.
No more garbage collection, no more rDOS attacks on the routing
cache. Instead we now get predictable and consistent performance,
no matter what the pattern of traffic we service.
This has been almost 2 years in the making. Special thanks to
Julian Anastasov, Eric Dumazet, Steffen Klassert, and others who
have helped along the way.
I'm sure that with a change of this magnitude there will be some
kind of fallout, but such things ought the be simple to fix at this
point. Luckily I'm not European so I'll be around all of August to
fix things :-)
The major stages of this work here are each fronted by a forced
merge commit whose commit message contains a top-level description
of the motivations and implementation issues.
2) Pre-demux of established ipv4 TCP sockets, saves a route demux on
input.
3) TCP SYN/ACK performance tweaks from Eric Dumazet.
4) Add namespace support for netfilter L4 conntrack helpers, from Gao
Feng.
5) Add config mechanism for Energy Efficient Ethernet to ethtool, from
Yuval Mintz.
6) Remove quadratic behavior from /proc/net/unix, from Eric Dumazet.
7) Support for connection tracker helpers in userspace, from Pablo
Neira Ayuso.
8) Allow userspace driven TX load balancing functions in TEAM driver,
from Jiri Pirko.
9) Kill off NLMSG_PUT and RTA_PUT macros, more gross stuff with
embedded gotos.
10) TCP Small Queues, essentially minimize the amount of TCP data queued
up in the packet scheduler layer. Whereas the existing BQL (Byte
Queue Limits) limits the pkt_sched --> netdevice queuing levels,
this controls the TCP --> pkt_sched queueing levels.
From Eric Dumazet.
11) Reduce the number of get_page/put_page ops done on SKB fragments,
from Alexander Duyck.
12) Implement protection against blind resets in TCP (RFC 5961), from
Eric Dumazet.
13) Support the client side of TCP Fast Open, basically the ability to
send data in the SYN exchange, from Yuchung Cheng.
Basically, the sender queues up data with a sendmsg() call using
MSG_FASTOPEN, then they do the connect() which emits the queued up
fastopen data.
14) Avoid all the problems we get into in TCP when timers or PMTU events
hit a locked socket. The TCP Small Queues changes added a
tcp_release_cb() that allows us to queue work up to the
release_sock() caller, and that's what we use here too. From Eric
Dumazet.
15) Zero copy on TX support for TUN driver, from Michael S. Tsirkin.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1870 commits)
genetlink: define lockdep_genl_is_held() when CONFIG_LOCKDEP
r8169: revert "add byte queue limit support".
ipv4: Change rt->rt_iif encoding.
net: Make skb->skb_iif always track skb->dev
ipv4: Prepare for change of rt->rt_iif encoding.
ipv4: Remove all RTCF_DIRECTSRC handliing.
ipv4: Really ignore ICMP address requests/replies.
decnet: Don't set RTCF_DIRECTSRC.
net/ipv4/ip_vti.c: Fix __rcu warnings detected by sparse.
ipv4: Remove redundant assignment
rds: set correct msg_namelen
openvswitch: potential NULL deref in sample()
tcp: dont drop MTU reduction indications
bnx2x: Add new 57840 device IDs
tcp: avoid oops in tcp_metrics and reset tcpm_stamp
niu: Change niu_rbr_fill() to use unlikely() to check niu_rbr_add_page() return value
niu: Fix to check for dma mapping errors.
net: Fix references to out-of-scope variables in put_cmsg_compat()
net: ethernet: davinci_emac: add pm_runtime support
net: ethernet: davinci_emac: Remove unnecessary #include
...
Diffstat (limited to 'include/linux/rtnetlink.h')
| -rw-r--r-- | include/linux/rtnetlink.h | 132 |
1 files changed, 1 insertions, 131 deletions
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 2c1de8982c8..db71c4ad862 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
| @@ -612,12 +612,6 @@ struct tcamsg { | |||
| 612 | #include <linux/mutex.h> | 612 | #include <linux/mutex.h> |
| 613 | #include <linux/netdevice.h> | 613 | #include <linux/netdevice.h> |
| 614 | 614 | ||
| 615 | static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) | ||
| 616 | { | ||
| 617 | int len = strlen(str) + 1; | ||
| 618 | return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len); | ||
| 619 | } | ||
| 620 | |||
| 621 | extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); | 615 | extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); |
| 622 | extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); | 616 | extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); |
| 623 | extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, | 617 | extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, |
| @@ -625,124 +619,7 @@ extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, | |||
| 625 | extern void rtnl_set_sk_err(struct net *net, u32 group, int error); | 619 | extern void rtnl_set_sk_err(struct net *net, u32 group, int error); |
| 626 | extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); | 620 | extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); |
| 627 | extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, | 621 | extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, |
| 628 | u32 id, u32 ts, u32 tsage, long expires, | 622 | u32 id, long expires, u32 error); |
| 629 | u32 error); | ||
| 630 | |||
| 631 | extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data); | ||
| 632 | |||
| 633 | #define RTA_PUT(skb, attrtype, attrlen, data) \ | ||
| 634 | ({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \ | ||
| 635 | goto rtattr_failure; \ | ||
| 636 | __rta_fill(skb, attrtype, attrlen, data); }) | ||
| 637 | |||
| 638 | #define RTA_APPEND(skb, attrlen, data) \ | ||
| 639 | ({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ | ||
| 640 | goto rtattr_failure; \ | ||
| 641 | memcpy(skb_put(skb, attrlen), data, attrlen); }) | ||
| 642 | |||
| 643 | #define RTA_PUT_NOHDR(skb, attrlen, data) \ | ||
| 644 | ({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \ | ||
| 645 | memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \ | ||
| 646 | RTA_ALIGN(attrlen) - attrlen); }) | ||
| 647 | |||
| 648 | #define RTA_PUT_U8(skb, attrtype, value) \ | ||
| 649 | ({ u8 _tmp = (value); \ | ||
| 650 | RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); }) | ||
| 651 | |||
| 652 | #define RTA_PUT_U16(skb, attrtype, value) \ | ||
| 653 | ({ u16 _tmp = (value); \ | ||
| 654 | RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); }) | ||
| 655 | |||
| 656 | #define RTA_PUT_U32(skb, attrtype, value) \ | ||
| 657 | ({ u32 _tmp = (value); \ | ||
| 658 | RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); }) | ||
| 659 | |||
| 660 | #define RTA_PUT_U64(skb, attrtype, value) \ | ||
| 661 | ({ u64 _tmp = (value); \ | ||
| 662 | RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); }) | ||
| 663 | |||
| 664 | #define RTA_PUT_SECS(skb, attrtype, value) \ | ||
| 665 | RTA_PUT_U64(skb, attrtype, (value) / HZ) | ||
| 666 | |||
| 667 | #define RTA_PUT_MSECS(skb, attrtype, value) \ | ||
| 668 | RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value)) | ||
| 669 | |||
| 670 | #define RTA_PUT_STRING(skb, attrtype, value) \ | ||
| 671 | RTA_PUT(skb, attrtype, strlen(value) + 1, value) | ||
| 672 | |||
| 673 | #define RTA_PUT_FLAG(skb, attrtype) \ | ||
| 674 | RTA_PUT(skb, attrtype, 0, NULL); | ||
| 675 | |||
| 676 | #define RTA_NEST(skb, type) \ | ||
| 677 | ({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ | ||
| 678 | RTA_PUT(skb, type, 0, NULL); \ | ||
| 679 | __start; }) | ||
| 680 | |||
| 681 | #define RTA_NEST_END(skb, start) \ | ||
| 682 | ({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ | ||
| 683 | (skb)->len; }) | ||
| 684 | |||
| 685 | #define RTA_NEST_COMPAT(skb, type, attrlen, data) \ | ||
| 686 | ({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ | ||
| 687 | RTA_PUT(skb, type, attrlen, data); \ | ||
| 688 | RTA_NEST(skb, type); \ | ||
| 689 | __start; }) | ||
| 690 | |||
| 691 | #define RTA_NEST_COMPAT_END(skb, start) \ | ||
| 692 | ({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \ | ||
| 693 | (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ | ||
| 694 | RTA_NEST_END(skb, __nest); \ | ||
| 695 | (skb)->len; }) | ||
| 696 | |||
| 697 | #define RTA_NEST_CANCEL(skb, start) \ | ||
| 698 | ({ if (start) \ | ||
| 699 | skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ | ||
| 700 | -1; }) | ||
| 701 | |||
| 702 | #define RTA_GET_U8(rta) \ | ||
| 703 | ({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \ | ||
| 704 | goto rtattr_failure; \ | ||
| 705 | *(u8 *) RTA_DATA(rta); }) | ||
| 706 | |||
| 707 | #define RTA_GET_U16(rta) \ | ||
| 708 | ({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \ | ||
| 709 | goto rtattr_failure; \ | ||
| 710 | *(u16 *) RTA_DATA(rta); }) | ||
| 711 | |||
| 712 | #define RTA_GET_U32(rta) \ | ||
| 713 | ({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \ | ||
| 714 | goto rtattr_failure; \ | ||
| 715 | *(u32 *) RTA_DATA(rta); }) | ||
| 716 | |||
| 717 | #define RTA_GET_U64(rta) \ | ||
| 718 | ({ u64 _tmp; \ | ||
| 719 | if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \ | ||
| 720 | goto rtattr_failure; \ | ||
| 721 | memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \ | ||
| 722 | _tmp; }) | ||
| 723 | |||
| 724 | #define RTA_GET_FLAG(rta) (!!(rta)) | ||
| 725 | |||
| 726 | #define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ) | ||
| 727 | #define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta))) | ||
| 728 | |||
| 729 | static inline struct rtattr * | ||
| 730 | __rta_reserve(struct sk_buff *skb, int attrtype, int attrlen) | ||
| 731 | { | ||
| 732 | struct rtattr *rta; | ||
| 733 | int size = RTA_LENGTH(attrlen); | ||
| 734 | |||
| 735 | rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); | ||
| 736 | rta->rta_type = attrtype; | ||
| 737 | rta->rta_len = size; | ||
| 738 | memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); | ||
| 739 | return rta; | ||
| 740 | } | ||
| 741 | |||
| 742 | #define __RTA_PUT(skb, attrtype, attrlen) \ | ||
| 743 | ({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \ | ||
| 744 | goto rtattr_failure; \ | ||
| 745 | __rta_reserve(skb, attrtype, attrlen); }) | ||
| 746 | 623 | ||
| 747 | extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); | 624 | extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); |
| 748 | 625 | ||
| @@ -794,13 +671,6 @@ extern void __rtnl_unlock(void); | |||
| 794 | } \ | 671 | } \ |
| 795 | } while(0) | 672 | } while(0) |
| 796 | 673 | ||
| 797 | static inline u32 rtm_get_table(struct rtattr **rta, u8 table) | ||
| 798 | { | ||
| 799 | return RTA_GET_U32(rta[RTA_TABLE-1]); | ||
| 800 | rtattr_failure: | ||
| 801 | return table; | ||
| 802 | } | ||
| 803 | |||
| 804 | extern int ndo_dflt_fdb_dump(struct sk_buff *skb, | 674 | extern int ndo_dflt_fdb_dump(struct sk_buff *skb, |
| 805 | struct netlink_callback *cb, | 675 | struct netlink_callback *cb, |
| 806 | struct net_device *dev, | 676 | struct net_device *dev, |
