diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 13:12:29 -0400 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 19:58:18 -0400 |
commit | daa138a58c802e7b4c2fb73f9b85bb082616ef43 (patch) | |
tree | be913e8e3745bb367d2ba371598f447649102cfc /include/net/tcp.h | |
parent | 6869b7b206595ae0e326f59719090351eb8f4f5d (diff) | |
parent | fba0e448a2c5b297a4ddc1ec4e48f4aa6600a1c9 (diff) |
Merge branch 'odp_fixes' into hmm.git
From rdma.git
Jason Gunthorpe says:
====================
This is a collection of general cleanups for ODP to clarify some of the
flows around umem creation and use of the interval tree.
====================
The branch is based on v5.3-rc5 due to dependencies, and is being taken
into hmm.git due to dependencies in the next patches.
* odp_fixes:
RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr
RDMA/mlx5: Use ib_umem_start instead of umem.address
RDMA/core: Make invalidate_range a device operation
RDMA/odp: Use kvcalloc for the dma_list and page_list
RDMA/odp: Check for overflow when computing the umem_odp end
RDMA/odp: Provide ib_umem_odp_release() to undo the allocs
RDMA/odp: Split creating a umem_odp from ib_umem_get
RDMA/odp: Make the three ways to create a umem_odp clear
RMDA/odp: Consolidate umem_odp initialization
RDMA/odp: Make it clearer when a umem is an implicit ODP umem
RDMA/odp: Iterate over the whole rbtree directly
RDMA/odp: Use the common interval tree library instead of generic
RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index f42d300f0cfa..81e8ade1e6e4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1709,6 +1709,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) | |||
1709 | return skb_rb_first(&sk->tcp_rtx_queue); | 1709 | return skb_rb_first(&sk->tcp_rtx_queue); |
1710 | } | 1710 | } |
1711 | 1711 | ||
1712 | static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) | ||
1713 | { | ||
1714 | return skb_rb_last(&sk->tcp_rtx_queue); | ||
1715 | } | ||
1716 | |||
1712 | static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) | 1717 | static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) |
1713 | { | 1718 | { |
1714 | return skb_peek(&sk->sk_write_queue); | 1719 | return skb_peek(&sk->sk_write_queue); |
@@ -2103,6 +2108,8 @@ struct tcp_ulp_ops { | |||
2103 | 2108 | ||
2104 | /* initialize ulp */ | 2109 | /* initialize ulp */ |
2105 | int (*init)(struct sock *sk); | 2110 | int (*init)(struct sock *sk); |
2111 | /* update ulp */ | ||
2112 | void (*update)(struct sock *sk, struct proto *p); | ||
2106 | /* cleanup ulp */ | 2113 | /* cleanup ulp */ |
2107 | void (*release)(struct sock *sk); | 2114 | void (*release)(struct sock *sk); |
2108 | 2115 | ||
@@ -2114,6 +2121,7 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type); | |||
2114 | int tcp_set_ulp(struct sock *sk, const char *name); | 2121 | int tcp_set_ulp(struct sock *sk, const char *name); |
2115 | void tcp_get_available_ulp(char *buf, size_t len); | 2122 | void tcp_get_available_ulp(char *buf, size_t len); |
2116 | void tcp_cleanup_ulp(struct sock *sk); | 2123 | void tcp_cleanup_ulp(struct sock *sk); |
2124 | void tcp_update_ulp(struct sock *sk, struct proto *p); | ||
2117 | 2125 | ||
2118 | #define MODULE_ALIAS_TCP_ULP(name) \ | 2126 | #define MODULE_ALIAS_TCP_ULP(name) \ |
2119 | __MODULE_INFO(alias, alias_userspace, name); \ | 2127 | __MODULE_INFO(alias, alias_userspace, name); \ |