diff options
Diffstat (limited to 'include/linux')
32 files changed, 1200 insertions, 103 deletions
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 2b038442c352..3ce61342fa31 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h | |||
| @@ -135,6 +135,7 @@ enum virtchnl_ops { | |||
| 135 | VIRTCHNL_OP_SET_RSS_HENA = 26, | 135 | VIRTCHNL_OP_SET_RSS_HENA = 26, |
| 136 | VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, | 136 | VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, |
| 137 | VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, | 137 | VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, |
| 138 | VIRTCHNL_OP_REQUEST_QUEUES = 29, | ||
| 138 | }; | 139 | }; |
| 139 | 140 | ||
| 140 | /* This macro is used to generate a compilation error if a structure | 141 | /* This macro is used to generate a compilation error if a structure |
| @@ -235,6 +236,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); | |||
| 235 | #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 | 236 | #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 |
| 236 | #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 | 237 | #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 |
| 237 | #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 | 238 | #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 |
| 239 | #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 | ||
| 238 | #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 | 240 | #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 |
| 239 | #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 | 241 | #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 |
| 240 | #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 | 242 | #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 |
| @@ -325,6 +327,21 @@ struct virtchnl_vsi_queue_config_info { | |||
| 325 | struct virtchnl_queue_pair_info qpair[1]; | 327 | struct virtchnl_queue_pair_info qpair[1]; |
| 326 | }; | 328 | }; |
| 327 | 329 | ||
| 330 | /* VIRTCHNL_OP_REQUEST_QUEUES | ||
| 331 | * VF sends this message to request the PF to allocate additional queues to | ||
| 332 | * this VF. Each VF gets a guaranteed number of queues on init but asking for | ||
| 333 | * additional queues must be negotiated. This is a best effort request as it | ||
| 334 | * is possible the PF does not have enough queues left to support the request. | ||
| 335 | * If the PF cannot support the number requested it will respond with the | ||
| 336 | * maximum number it is able to support. If the request is successful, PF will | ||
| 337 | * then reset the VF to institute required changes. | ||
| 338 | */ | ||
| 339 | |||
| 340 | /* VF resource request */ | ||
| 341 | struct virtchnl_vf_res_request { | ||
| 342 | u16 num_queue_pairs; | ||
| 343 | }; | ||
| 344 | |||
| 328 | VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); | 345 | VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); |
| 329 | 346 | ||
| 330 | /* VIRTCHNL_OP_CONFIG_IRQ_MAP | 347 | /* VIRTCHNL_OP_CONFIG_IRQ_MAP |
| @@ -691,6 +708,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, | |||
| 691 | case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: | 708 | case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: |
| 692 | case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: | 709 | case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: |
| 693 | break; | 710 | break; |
| 711 | case VIRTCHNL_OP_REQUEST_QUEUES: | ||
| 712 | valid_len = sizeof(struct virtchnl_vf_res_request); | ||
| 713 | break; | ||
| 694 | /* These are always errors coming from the VF. */ | 714 | /* These are always errors coming from the VF. */ |
| 695 | case VIRTCHNL_OP_EVENT: | 715 | case VIRTCHNL_OP_EVENT: |
| 696 | case VIRTCHNL_OP_UNKNOWN: | 716 | case VIRTCHNL_OP_UNKNOWN: |
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index d41d40ac3efd..359b6f5d3d90 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h | |||
| @@ -14,27 +14,46 @@ struct bpf_sock_ops_kern; | |||
| 14 | extern struct static_key_false cgroup_bpf_enabled_key; | 14 | extern struct static_key_false cgroup_bpf_enabled_key; |
| 15 | #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) | 15 | #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) |
| 16 | 16 | ||
| 17 | struct bpf_prog_list { | ||
| 18 | struct list_head node; | ||
| 19 | struct bpf_prog *prog; | ||
| 20 | }; | ||
| 21 | |||
| 22 | struct bpf_prog_array; | ||
| 23 | |||
| 17 | struct cgroup_bpf { | 24 | struct cgroup_bpf { |
| 18 | /* | 25 | /* array of effective progs in this cgroup */ |
| 19 | * Store two sets of bpf_prog pointers, one for programs that are | 26 | struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; |
| 20 | * pinned directly to this cgroup, and one for those that are effective | 27 | |
| 21 | * when this cgroup is accessed. | 28 | /* attached progs to this cgroup and attach flags |
| 29 | * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will | ||
| 30 | * have either zero or one element | ||
| 31 | * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS | ||
| 22 | */ | 32 | */ |
| 23 | struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; | 33 | struct list_head progs[MAX_BPF_ATTACH_TYPE]; |
| 24 | struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; | 34 | u32 flags[MAX_BPF_ATTACH_TYPE]; |
| 25 | bool disallow_override[MAX_BPF_ATTACH_TYPE]; | 35 | |
| 36 | /* temp storage for effective prog array used by prog_attach/detach */ | ||
| 37 | struct bpf_prog_array __rcu *inactive; | ||
| 26 | }; | 38 | }; |
| 27 | 39 | ||
| 28 | void cgroup_bpf_put(struct cgroup *cgrp); | 40 | void cgroup_bpf_put(struct cgroup *cgrp); |
| 29 | void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); | 41 | int cgroup_bpf_inherit(struct cgroup *cgrp); |
| 30 | 42 | ||
| 31 | int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, | 43 | int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, |
| 32 | struct bpf_prog *prog, enum bpf_attach_type type, | 44 | enum bpf_attach_type type, u32 flags); |
| 33 | bool overridable); | 45 | int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, |
| 34 | 46 | enum bpf_attach_type type, u32 flags); | |
| 35 | /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ | 47 | int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, |
| 36 | int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, | 48 | union bpf_attr __user *uattr); |
| 37 | enum bpf_attach_type type, bool overridable); | 49 | |
| 50 | /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ | ||
| 51 | int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, | ||
| 52 | enum bpf_attach_type type, u32 flags); | ||
| 53 | int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, | ||
| 54 | enum bpf_attach_type type, u32 flags); | ||
| 55 | int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, | ||
| 56 | union bpf_attr __user *uattr); | ||
| 38 | 57 | ||
| 39 | int __cgroup_bpf_run_filter_skb(struct sock *sk, | 58 | int __cgroup_bpf_run_filter_skb(struct sock *sk, |
| 40 | struct sk_buff *skb, | 59 | struct sk_buff *skb, |
| @@ -96,8 +115,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, | |||
| 96 | 115 | ||
| 97 | struct cgroup_bpf {}; | 116 | struct cgroup_bpf {}; |
| 98 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} | 117 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} |
| 99 | static inline void cgroup_bpf_inherit(struct cgroup *cgrp, | 118 | static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } |
| 100 | struct cgroup *parent) {} | ||
| 101 | 119 | ||
| 102 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) | 120 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) |
| 103 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) | 121 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f1af7d63d678..1e334b248ff6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -56,6 +56,10 @@ struct bpf_map { | |||
| 56 | struct work_struct work; | 56 | struct work_struct work; |
| 57 | atomic_t usercnt; | 57 | atomic_t usercnt; |
| 58 | struct bpf_map *inner_map_meta; | 58 | struct bpf_map *inner_map_meta; |
| 59 | char name[BPF_OBJ_NAME_LEN]; | ||
| 60 | #ifdef CONFIG_SECURITY | ||
| 61 | void *security; | ||
| 62 | #endif | ||
| 59 | }; | 63 | }; |
| 60 | 64 | ||
| 61 | /* function argument constraints */ | 65 | /* function argument constraints */ |
| @@ -137,6 +141,7 @@ enum bpf_reg_type { | |||
| 137 | PTR_TO_MAP_VALUE, /* reg points to map element value */ | 141 | PTR_TO_MAP_VALUE, /* reg points to map element value */ |
| 138 | PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ | 142 | PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ |
| 139 | PTR_TO_STACK, /* reg == frame_pointer + offset */ | 143 | PTR_TO_STACK, /* reg == frame_pointer + offset */ |
| 144 | PTR_TO_PACKET_META, /* skb->data - meta_len */ | ||
| 140 | PTR_TO_PACKET, /* reg points to skb->data */ | 145 | PTR_TO_PACKET, /* reg points to skb->data */ |
| 141 | PTR_TO_PACKET_END, /* skb->data + headlen */ | 146 | PTR_TO_PACKET_END, /* skb->data + headlen */ |
| 142 | }; | 147 | }; |
| @@ -155,6 +160,11 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) | |||
| 155 | aux->ctx_field_size = size; | 160 | aux->ctx_field_size = size; |
| 156 | } | 161 | } |
| 157 | 162 | ||
| 163 | struct bpf_prog_ops { | ||
| 164 | int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, | ||
| 165 | union bpf_attr __user *uattr); | ||
| 166 | }; | ||
| 167 | |||
| 158 | struct bpf_verifier_ops { | 168 | struct bpf_verifier_ops { |
| 159 | /* return eBPF function prototype for verification */ | 169 | /* return eBPF function prototype for verification */ |
| 160 | const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); | 170 | const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); |
| @@ -170,8 +180,6 @@ struct bpf_verifier_ops { | |||
| 170 | const struct bpf_insn *src, | 180 | const struct bpf_insn *src, |
| 171 | struct bpf_insn *dst, | 181 | struct bpf_insn *dst, |
| 172 | struct bpf_prog *prog, u32 *target_size); | 182 | struct bpf_prog *prog, u32 *target_size); |
| 173 | int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, | ||
| 174 | union bpf_attr __user *uattr); | ||
| 175 | }; | 183 | }; |
| 176 | 184 | ||
| 177 | struct bpf_prog_aux { | 185 | struct bpf_prog_aux { |
| @@ -182,10 +190,15 @@ struct bpf_prog_aux { | |||
| 182 | u32 id; | 190 | u32 id; |
| 183 | struct latch_tree_node ksym_tnode; | 191 | struct latch_tree_node ksym_tnode; |
| 184 | struct list_head ksym_lnode; | 192 | struct list_head ksym_lnode; |
| 185 | const struct bpf_verifier_ops *ops; | 193 | const struct bpf_prog_ops *ops; |
| 186 | struct bpf_map **used_maps; | 194 | struct bpf_map **used_maps; |
| 187 | struct bpf_prog *prog; | 195 | struct bpf_prog *prog; |
| 188 | struct user_struct *user; | 196 | struct user_struct *user; |
| 197 | u64 load_time; /* ns since boottime */ | ||
| 198 | char name[BPF_OBJ_NAME_LEN]; | ||
| 199 | #ifdef CONFIG_SECURITY | ||
| 200 | void *security; | ||
| 201 | #endif | ||
| 189 | union { | 202 | union { |
| 190 | struct work_struct work; | 203 | struct work_struct work; |
| 191 | struct rcu_head rcu; | 204 | struct rcu_head rcu; |
| @@ -237,17 +250,59 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 237 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | 250 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
| 238 | union bpf_attr __user *uattr); | 251 | union bpf_attr __user *uattr); |
| 239 | 252 | ||
| 253 | /* an array of programs to be executed under rcu_lock. | ||
| 254 | * | ||
| 255 | * Typical usage: | ||
| 256 | * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); | ||
| 257 | * | ||
| 258 | * the structure returned by bpf_prog_array_alloc() should be populated | ||
| 259 | * with program pointers and the last pointer must be NULL. | ||
| 260 | * The user has to keep refcnt on the program and make sure the program | ||
| 261 | * is removed from the array before bpf_prog_put(). | ||
| 262 | * The 'struct bpf_prog_array *' should only be replaced with xchg() | ||
| 263 | * since other cpus are walking the array of pointers in parallel. | ||
| 264 | */ | ||
| 265 | struct bpf_prog_array { | ||
| 266 | struct rcu_head rcu; | ||
| 267 | struct bpf_prog *progs[0]; | ||
| 268 | }; | ||
| 269 | |||
| 270 | struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); | ||
| 271 | void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); | ||
| 272 | int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); | ||
| 273 | int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, | ||
| 274 | __u32 __user *prog_ids, u32 cnt); | ||
| 275 | |||
| 276 | #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ | ||
| 277 | ({ \ | ||
| 278 | struct bpf_prog **_prog; \ | ||
| 279 | u32 _ret = 1; \ | ||
| 280 | rcu_read_lock(); \ | ||
| 281 | _prog = rcu_dereference(array)->progs; \ | ||
| 282 | for (; *_prog; _prog++) \ | ||
| 283 | _ret &= func(*_prog, ctx); \ | ||
| 284 | rcu_read_unlock(); \ | ||
| 285 | _ret; \ | ||
| 286 | }) | ||
| 287 | |||
| 240 | #ifdef CONFIG_BPF_SYSCALL | 288 | #ifdef CONFIG_BPF_SYSCALL |
| 241 | DECLARE_PER_CPU(int, bpf_prog_active); | 289 | DECLARE_PER_CPU(int, bpf_prog_active); |
| 242 | 290 | ||
| 243 | #define BPF_PROG_TYPE(_id, _ops) \ | 291 | extern const struct file_operations bpf_map_fops; |
| 244 | extern const struct bpf_verifier_ops _ops; | 292 | extern const struct file_operations bpf_prog_fops; |
| 293 | |||
| 294 | #define BPF_PROG_TYPE(_id, _name) \ | ||
| 295 | extern const struct bpf_prog_ops _name ## _prog_ops; \ | ||
| 296 | extern const struct bpf_verifier_ops _name ## _verifier_ops; | ||
| 245 | #define BPF_MAP_TYPE(_id, _ops) \ | 297 | #define BPF_MAP_TYPE(_id, _ops) \ |
| 246 | extern const struct bpf_map_ops _ops; | 298 | extern const struct bpf_map_ops _ops; |
| 247 | #include <linux/bpf_types.h> | 299 | #include <linux/bpf_types.h> |
| 248 | #undef BPF_PROG_TYPE | 300 | #undef BPF_PROG_TYPE |
| 249 | #undef BPF_MAP_TYPE | 301 | #undef BPF_MAP_TYPE |
| 250 | 302 | ||
| 303 | extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; | ||
| 304 | extern const struct bpf_verifier_ops xdp_analyzer_ops; | ||
| 305 | |||
| 251 | struct bpf_prog *bpf_prog_get(u32 ufd); | 306 | struct bpf_prog *bpf_prog_get(u32 ufd); |
| 252 | struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); | 307 | struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); |
| 253 | struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); | 308 | struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); |
| @@ -269,11 +324,11 @@ void bpf_map_area_free(void *base); | |||
| 269 | 324 | ||
| 270 | extern int sysctl_unprivileged_bpf_disabled; | 325 | extern int sysctl_unprivileged_bpf_disabled; |
| 271 | 326 | ||
| 272 | int bpf_map_new_fd(struct bpf_map *map); | 327 | int bpf_map_new_fd(struct bpf_map *map, int flags); |
| 273 | int bpf_prog_new_fd(struct bpf_prog *prog); | 328 | int bpf_prog_new_fd(struct bpf_prog *prog); |
| 274 | 329 | ||
| 275 | int bpf_obj_pin_user(u32 ufd, const char __user *pathname); | 330 | int bpf_obj_pin_user(u32 ufd, const char __user *pathname); |
| 276 | int bpf_obj_get_user(const char __user *pathname); | 331 | int bpf_obj_get_user(const char __user *pathname, int flags); |
| 277 | 332 | ||
| 278 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); | 333 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); |
| 279 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); | 334 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); |
| @@ -292,6 +347,8 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, | |||
| 292 | void *key, void *value, u64 map_flags); | 347 | void *key, void *value, u64 map_flags); |
| 293 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); | 348 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
| 294 | 349 | ||
| 350 | int bpf_get_file_flag(int flags); | ||
| 351 | |||
| 295 | /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and | 352 | /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and |
| 296 | * forced to use 'long' read/writes to try to atomically copy long counters. | 353 | * forced to use 'long' read/writes to try to atomically copy long counters. |
| 297 | * Best-effort only. No barriers here, since it _will_ race with concurrent | 354 | * Best-effort only. No barriers here, since it _will_ race with concurrent |
| @@ -316,6 +373,13 @@ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); | |||
| 316 | void __dev_map_insert_ctx(struct bpf_map *map, u32 index); | 373 | void __dev_map_insert_ctx(struct bpf_map *map, u32 index); |
| 317 | void __dev_map_flush(struct bpf_map *map); | 374 | void __dev_map_flush(struct bpf_map *map); |
| 318 | 375 | ||
| 376 | struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); | ||
| 377 | void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); | ||
| 378 | void __cpu_map_flush(struct bpf_map *map); | ||
| 379 | struct xdp_buff; | ||
| 380 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, | ||
| 381 | struct net_device *dev_rx); | ||
| 382 | |||
| 319 | /* Return map's numa specified by userspace */ | 383 | /* Return map's numa specified by userspace */ |
| 320 | static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) | 384 | static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) |
| 321 | { | 385 | { |
| @@ -323,7 +387,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) | |||
| 323 | attr->numa_node : NUMA_NO_NODE; | 387 | attr->numa_node : NUMA_NO_NODE; |
| 324 | } | 388 | } |
| 325 | 389 | ||
| 326 | #else | 390 | #else /* !CONFIG_BPF_SYSCALL */ |
| 327 | static inline struct bpf_prog *bpf_prog_get(u32 ufd) | 391 | static inline struct bpf_prog *bpf_prog_get(u32 ufd) |
| 328 | { | 392 | { |
| 329 | return ERR_PTR(-EOPNOTSUPP); | 393 | return ERR_PTR(-EOPNOTSUPP); |
| @@ -368,7 +432,7 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) | |||
| 368 | { | 432 | { |
| 369 | } | 433 | } |
| 370 | 434 | ||
| 371 | static inline int bpf_obj_get_user(const char __user *pathname) | 435 | static inline int bpf_obj_get_user(const char __user *pathname, int flags) |
| 372 | { | 436 | { |
| 373 | return -EOPNOTSUPP; | 437 | return -EOPNOTSUPP; |
| 374 | } | 438 | } |
| @@ -386,6 +450,28 @@ static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) | |||
| 386 | static inline void __dev_map_flush(struct bpf_map *map) | 450 | static inline void __dev_map_flush(struct bpf_map *map) |
| 387 | { | 451 | { |
| 388 | } | 452 | } |
| 453 | |||
| 454 | static inline | ||
| 455 | struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) | ||
| 456 | { | ||
| 457 | return NULL; | ||
| 458 | } | ||
| 459 | |||
| 460 | static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) | ||
| 461 | { | ||
| 462 | } | ||
| 463 | |||
| 464 | static inline void __cpu_map_flush(struct bpf_map *map) | ||
| 465 | { | ||
| 466 | } | ||
| 467 | |||
| 468 | struct xdp_buff; | ||
| 469 | static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, | ||
| 470 | struct xdp_buff *xdp, | ||
| 471 | struct net_device *dev_rx) | ||
| 472 | { | ||
| 473 | return 0; | ||
| 474 | } | ||
| 389 | #endif /* CONFIG_BPF_SYSCALL */ | 475 | #endif /* CONFIG_BPF_SYSCALL */ |
| 390 | 476 | ||
| 391 | #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) | 477 | #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) |
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 6f1a567667b8..36418ad43245 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h | |||
| @@ -1,22 +1,22 @@ | |||
| 1 | /* internal file - do not include directly */ | 1 | /* internal file - do not include directly */ |
| 2 | 2 | ||
| 3 | #ifdef CONFIG_NET | 3 | #ifdef CONFIG_NET |
| 4 | BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops) | 4 | BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) |
| 5 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops) | 5 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) |
| 6 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops) | 6 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) |
| 7 | BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops) | 7 | BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) |
| 8 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops) | 8 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) |
| 9 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops) | 9 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) |
| 10 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops) | 10 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout) |
| 11 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops) | 11 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout) |
| 12 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops) | 12 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) |
| 13 | BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops) | 13 | BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) |
| 14 | BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb_prog_ops) | 14 | BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) |
| 15 | #endif | 15 | #endif |
| 16 | #ifdef CONFIG_BPF_EVENTS | 16 | #ifdef CONFIG_BPF_EVENTS |
| 17 | BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops) | 17 | BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) |
| 18 | BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops) | 18 | BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) |
| 19 | BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops) | 19 | BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) |
| 20 | #endif | 20 | #endif |
| 21 | 21 | ||
| 22 | BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) | 22 | BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) |
| @@ -41,4 +41,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) | |||
| 41 | #ifdef CONFIG_STREAM_PARSER | 41 | #ifdef CONFIG_STREAM_PARSER |
| 42 | BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) | 42 | BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) |
| 43 | #endif | 43 | #endif |
| 44 | BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) | ||
| 44 | #endif | 45 | #endif |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b8d200f60a40..feeaea93d959 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -115,6 +115,21 @@ struct bpf_insn_aux_data { | |||
| 115 | 115 | ||
| 116 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | 116 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ |
| 117 | 117 | ||
| 118 | #define BPF_VERIFIER_TMP_LOG_SIZE 1024 | ||
| 119 | |||
| 120 | struct bpf_verifer_log { | ||
| 121 | u32 level; | ||
| 122 | char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; | ||
| 123 | char __user *ubuf; | ||
| 124 | u32 len_used; | ||
| 125 | u32 len_total; | ||
| 126 | }; | ||
| 127 | |||
| 128 | static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) | ||
| 129 | { | ||
| 130 | return log->len_used >= log->len_total - 1; | ||
| 131 | } | ||
| 132 | |||
| 118 | struct bpf_verifier_env; | 133 | struct bpf_verifier_env; |
| 119 | struct bpf_ext_analyzer_ops { | 134 | struct bpf_ext_analyzer_ops { |
| 120 | int (*insn_hook)(struct bpf_verifier_env *env, | 135 | int (*insn_hook)(struct bpf_verifier_env *env, |
| @@ -126,6 +141,7 @@ struct bpf_ext_analyzer_ops { | |||
| 126 | */ | 141 | */ |
| 127 | struct bpf_verifier_env { | 142 | struct bpf_verifier_env { |
| 128 | struct bpf_prog *prog; /* eBPF program being verified */ | 143 | struct bpf_prog *prog; /* eBPF program being verified */ |
| 144 | const struct bpf_verifier_ops *ops; | ||
| 129 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ | 145 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ |
| 130 | int stack_size; /* number of states to be processed */ | 146 | int stack_size; /* number of states to be processed */ |
| 131 | bool strict_alignment; /* perform strict pointer alignment checks */ | 147 | bool strict_alignment; /* perform strict pointer alignment checks */ |
| @@ -139,6 +155,8 @@ struct bpf_verifier_env { | |||
| 139 | bool allow_ptr_leaks; | 155 | bool allow_ptr_leaks; |
| 140 | bool seen_direct_write; | 156 | bool seen_direct_write; |
| 141 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | 157 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ |
| 158 | |||
| 159 | struct bpf_verifer_log log; | ||
| 142 | }; | 160 | }; |
| 143 | 161 | ||
| 144 | int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, | 162 | int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index abcda9b458ab..9ac9e3e3d1e5 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 | 63 | #define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 |
| 64 | #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 | 64 | #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 |
| 65 | #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 | 65 | #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 |
| 66 | #define PHY_BRCM_EN_MASTER_MODE 0x00010000 | ||
| 66 | 67 | ||
| 67 | /* Broadcom BCM7xxx specific workarounds */ | 68 | /* Broadcom BCM7xxx specific workarounds */ |
| 68 | #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) | 69 | #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) |
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index 89f67c1c3160..805d16654459 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h | |||
| @@ -170,4 +170,20 @@ static inline void be64_add_cpu(__be64 *var, u64 val) | |||
| 170 | *var = cpu_to_be64(be64_to_cpu(*var) + val); | 170 | *var = cpu_to_be64(be64_to_cpu(*var) + val); |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) | ||
| 174 | { | ||
| 175 | int i; | ||
| 176 | |||
| 177 | for (i = 0; i < len; i++) | ||
| 178 | dst[i] = cpu_to_be32(src[i]); | ||
| 179 | } | ||
| 180 | |||
| 181 | static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) | ||
| 182 | { | ||
| 183 | int i; | ||
| 184 | |||
| 185 | for (i = 0; i < len; i++) | ||
| 186 | dst[i] = be32_to_cpu(src[i]); | ||
| 187 | } | ||
| 188 | |||
| 173 | #endif /* _LINUX_BYTEORDER_GENERIC_H */ | 189 | #endif /* _LINUX_BYTEORDER_GENERIC_H */ |
diff --git a/include/linux/connector.h b/include/linux/connector.h index f8fe8637d771..032102b19645 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | #define __CONNECTOR_H | 22 | #define __CONNECTOR_H |
| 23 | 23 | ||
| 24 | 24 | ||
| 25 | #include <linux/atomic.h> | 25 | #include <linux/refcount.h> |
| 26 | 26 | ||
| 27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
| 28 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
| @@ -49,7 +49,7 @@ struct cn_callback_id { | |||
| 49 | 49 | ||
| 50 | struct cn_callback_entry { | 50 | struct cn_callback_entry { |
| 51 | struct list_head callback_entry; | 51 | struct list_head callback_entry; |
| 52 | atomic_t refcnt; | 52 | refcount_t refcnt; |
| 53 | struct cn_queue_dev *pdev; | 53 | struct cn_queue_dev *pdev; |
| 54 | 54 | ||
| 55 | struct cn_callback_id id; | 55 | struct cn_callback_id id; |
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index a4be70398ce1..f69f98541953 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h | |||
| @@ -98,7 +98,7 @@ void dql_completed(struct dql *dql, unsigned int count); | |||
| 98 | void dql_reset(struct dql *dql); | 98 | void dql_reset(struct dql *dql); |
| 99 | 99 | ||
| 100 | /* Initialize dql state */ | 100 | /* Initialize dql state */ |
| 101 | int dql_init(struct dql *dql, unsigned hold_time); | 101 | void dql_init(struct dql *dql, unsigned int hold_time); |
| 102 | 102 | ||
| 103 | #endif /* _KERNEL_ */ | 103 | #endif /* _KERNEL_ */ |
| 104 | 104 | ||
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 4587a4c36923..c77fa3529e15 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
| @@ -163,6 +163,16 @@ extern int | |||
| 163 | __ethtool_get_link_ksettings(struct net_device *dev, | 163 | __ethtool_get_link_ksettings(struct net_device *dev, |
| 164 | struct ethtool_link_ksettings *link_ksettings); | 164 | struct ethtool_link_ksettings *link_ksettings); |
| 165 | 165 | ||
| 166 | /** | ||
| 167 | * ethtool_intersect_link_masks - Given two link masks, AND them together | ||
| 168 | * @dst: first mask and where result is stored | ||
| 169 | * @src: second mask to intersect with | ||
| 170 | * | ||
| 171 | * Given two link mode masks, AND them together and save the result in dst. | ||
| 172 | */ | ||
| 173 | void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, | ||
| 174 | struct ethtool_link_ksettings *src); | ||
| 175 | |||
| 166 | void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, | 176 | void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, |
| 167 | u32 legacy_u32); | 177 | u32 legacy_u32); |
| 168 | 178 | ||
diff --git a/include/linux/filter.h b/include/linux/filter.h index 818a0b26249e..cdd78a7beaae 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -481,30 +481,36 @@ struct sk_filter { | |||
| 481 | struct bpf_prog *prog; | 481 | struct bpf_prog *prog; |
| 482 | }; | 482 | }; |
| 483 | 483 | ||
| 484 | #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) | 484 | #define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi) |
| 485 | 485 | ||
| 486 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN | 486 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN |
| 487 | 487 | ||
| 488 | struct bpf_skb_data_end { | 488 | struct bpf_skb_data_end { |
| 489 | struct qdisc_skb_cb qdisc_cb; | 489 | struct qdisc_skb_cb qdisc_cb; |
| 490 | void *data_meta; | ||
| 490 | void *data_end; | 491 | void *data_end; |
| 491 | }; | 492 | }; |
| 492 | 493 | ||
| 493 | struct xdp_buff { | 494 | struct xdp_buff { |
| 494 | void *data; | 495 | void *data; |
| 495 | void *data_end; | 496 | void *data_end; |
| 497 | void *data_meta; | ||
| 496 | void *data_hard_start; | 498 | void *data_hard_start; |
| 497 | }; | 499 | }; |
| 498 | 500 | ||
| 499 | /* compute the linear packet data range [data, data_end) which | 501 | /* Compute the linear packet data range [data, data_end) which |
| 500 | * will be accessed by cls_bpf, act_bpf and lwt programs | 502 | * will be accessed by various program types (cls_bpf, act_bpf, |
| 503 | * lwt, ...). Subsystems allowing direct data access must (!) | ||
| 504 | * ensure that cb[] area can be written to when BPF program is | ||
| 505 | * invoked (otherwise cb[] save/restore is necessary). | ||
| 501 | */ | 506 | */ |
| 502 | static inline void bpf_compute_data_end(struct sk_buff *skb) | 507 | static inline void bpf_compute_data_pointers(struct sk_buff *skb) |
| 503 | { | 508 | { |
| 504 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | 509 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; |
| 505 | 510 | ||
| 506 | BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); | 511 | BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); |
| 507 | cb->data_end = skb->data + skb_headlen(skb); | 512 | cb->data_meta = skb->data - skb_metadata_len(skb); |
| 513 | cb->data_end = skb->data + skb_headlen(skb); | ||
| 508 | } | 514 | } |
| 509 | 515 | ||
| 510 | static inline u8 *bpf_skb_cb(struct sk_buff *skb) | 516 | static inline u8 *bpf_skb_cb(struct sk_buff *skb) |
| @@ -725,8 +731,22 @@ int xdp_do_redirect(struct net_device *dev, | |||
| 725 | struct bpf_prog *prog); | 731 | struct bpf_prog *prog); |
| 726 | void xdp_do_flush_map(void); | 732 | void xdp_do_flush_map(void); |
| 727 | 733 | ||
| 734 | /* Drivers not supporting XDP metadata can use this helper, which | ||
| 735 | * rejects any room expansion for metadata as a result. | ||
| 736 | */ | ||
| 737 | static __always_inline void | ||
| 738 | xdp_set_data_meta_invalid(struct xdp_buff *xdp) | ||
| 739 | { | ||
| 740 | xdp->data_meta = xdp->data + 1; | ||
| 741 | } | ||
| 742 | |||
| 743 | static __always_inline bool | ||
| 744 | xdp_data_meta_unsupported(const struct xdp_buff *xdp) | ||
| 745 | { | ||
| 746 | return unlikely(xdp->data_meta > xdp->data); | ||
| 747 | } | ||
| 748 | |||
| 728 | void bpf_warn_invalid_xdp_action(u32 act); | 749 | void bpf_warn_invalid_xdp_action(u32 act); |
| 729 | void bpf_warn_invalid_xdp_redirect(u32 ifindex); | ||
| 730 | 750 | ||
| 731 | struct sock *do_sk_redirect_map(struct sk_buff *skb); | 751 | struct sock *do_sk_redirect_map(struct sk_buff *skb); |
| 732 | 752 | ||
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 55a604ad459f..ee6657a0ed69 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
| @@ -2445,6 +2445,7 @@ enum ieee80211_sa_query_action { | |||
| 2445 | #define WLAN_OUI_TYPE_MICROSOFT_WPA 1 | 2445 | #define WLAN_OUI_TYPE_MICROSOFT_WPA 1 |
| 2446 | #define WLAN_OUI_TYPE_MICROSOFT_WMM 2 | 2446 | #define WLAN_OUI_TYPE_MICROSOFT_WMM 2 |
| 2447 | #define WLAN_OUI_TYPE_MICROSOFT_WPS 4 | 2447 | #define WLAN_OUI_TYPE_MICROSOFT_WPS 4 |
| 2448 | #define WLAN_OUI_TYPE_MICROSOFT_TPC 8 | ||
| 2448 | 2449 | ||
| 2449 | /* | 2450 | /* |
| 2450 | * WMM/802.11e Tspec Element | 2451 | * WMM/802.11e Tspec Element |
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 3355efc89781..6756fea18b69 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h | |||
| @@ -31,7 +31,7 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb) | |||
| 31 | return (struct arphdr *)skb_network_header(skb); | 31 | return (struct arphdr *)skb_network_header(skb); |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static inline int arp_hdr_len(struct net_device *dev) | 34 | static inline unsigned int arp_hdr_len(const struct net_device *dev) |
| 35 | { | 35 | { |
| 36 | switch (dev->type) { | 36 | switch (dev->type) { |
| 37 | #if IS_ENABLED(CONFIG_FIREWIRE_NET) | 37 | #if IS_ENABLED(CONFIG_FIREWIRE_NET) |
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 3cd18ac0697f..02639ebea2f0 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
| @@ -49,6 +49,7 @@ struct br_ip_list { | |||
| 49 | #define BR_MULTICAST_TO_UNICAST BIT(12) | 49 | #define BR_MULTICAST_TO_UNICAST BIT(12) |
| 50 | #define BR_VLAN_TUNNEL BIT(13) | 50 | #define BR_VLAN_TUNNEL BIT(13) |
| 51 | #define BR_BCAST_FLOOD BIT(14) | 51 | #define BR_BCAST_FLOOD BIT(14) |
| 52 | #define BR_NEIGH_SUPPRESS BIT(15) | ||
| 52 | 53 | ||
| 53 | #define BR_DEFAULT_AGEING_TIME (300 * HZ) | 54 | #define BR_DEFAULT_AGEING_TIME (300 * HZ) |
| 54 | 55 | ||
| @@ -63,6 +64,7 @@ int br_multicast_list_adjacent(struct net_device *dev, | |||
| 63 | bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); | 64 | bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); |
| 64 | bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); | 65 | bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); |
| 65 | bool br_multicast_enabled(const struct net_device *dev); | 66 | bool br_multicast_enabled(const struct net_device *dev); |
| 67 | bool br_multicast_router(const struct net_device *dev); | ||
| 66 | #else | 68 | #else |
| 67 | static inline int br_multicast_list_adjacent(struct net_device *dev, | 69 | static inline int br_multicast_list_adjacent(struct net_device *dev, |
| 68 | struct list_head *br_ip_list) | 70 | struct list_head *br_ip_list) |
| @@ -83,6 +85,10 @@ static inline bool br_multicast_enabled(const struct net_device *dev) | |||
| 83 | { | 85 | { |
| 84 | return false; | 86 | return false; |
| 85 | } | 87 | } |
| 88 | static inline bool br_multicast_router(const struct net_device *dev) | ||
| 89 | { | ||
| 90 | return false; | ||
| 91 | } | ||
| 86 | #endif | 92 | #endif |
| 87 | 93 | ||
| 88 | #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) | 94 | #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index c9ec1343d187..10e319f41fb1 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
| @@ -72,7 +72,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, | |||
| 72 | extern void macvlan_common_setup(struct net_device *dev); | 72 | extern void macvlan_common_setup(struct net_device *dev); |
| 73 | 73 | ||
| 74 | extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | 74 | extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, |
| 75 | struct nlattr *tb[], struct nlattr *data[]); | 75 | struct nlattr *tb[], struct nlattr *data[], |
| 76 | struct netlink_ext_ack *extack); | ||
| 76 | 77 | ||
| 77 | extern void macvlan_count_rx(const struct macvlan_dev *vlan, | 78 | extern void macvlan_count_rx(const struct macvlan_dev *vlan, |
| 78 | unsigned int len, bool success, | 79 | unsigned int len, bool success, |
diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h index bbcdb0a767d8..a118ee4a8428 100644 --- a/include/linux/if_phonet.h +++ b/include/linux/if_phonet.h | |||
| @@ -10,5 +10,5 @@ | |||
| 10 | 10 | ||
| 11 | #include <uapi/linux/if_phonet.h> | 11 | #include <uapi/linux/if_phonet.h> |
| 12 | 12 | ||
| 13 | extern struct header_ops phonet_header_ops; | 13 | extern const struct header_ops phonet_header_ops; |
| 14 | #endif | 14 | #endif |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index fb3f809e34e4..681dff30940b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
| @@ -154,6 +154,7 @@ struct in_ifaddr { | |||
| 154 | struct in_validator_info { | 154 | struct in_validator_info { |
| 155 | __be32 ivi_addr; | 155 | __be32 ivi_addr; |
| 156 | struct in_device *ivi_dev; | 156 | struct in_device *ivi_dev; |
| 157 | struct netlink_ext_ack *extack; | ||
| 157 | }; | 158 | }; |
| 158 | 159 | ||
| 159 | int register_inetaddr_notifier(struct notifier_block *nb); | 160 | int register_inetaddr_notifier(struct notifier_block *nb); |
| @@ -179,6 +180,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, | |||
| 179 | __be32 local, int scope); | 180 | __be32 local, int scope); |
| 180 | struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, | 181 | struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, |
| 181 | __be32 mask); | 182 | __be32 mask); |
| 183 | struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); | ||
| 182 | static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) | 184 | static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) |
| 183 | { | 185 | { |
| 184 | return !((addr^ifa->ifa_address)&ifa->ifa_mask); | 186 | return !((addr^ifa->ifa_address)&ifa->ifa_mask); |
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index c9258124e417..7161d8e7ee79 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h | |||
| @@ -1351,6 +1351,40 @@ | |||
| 1351 | * @inode we wish to get the security context of. | 1351 | * @inode we wish to get the security context of. |
| 1352 | * @ctx is a pointer in which to place the allocated security context. | 1352 | * @ctx is a pointer in which to place the allocated security context. |
| 1353 | * @ctxlen points to the place to put the length of @ctx. | 1353 | * @ctxlen points to the place to put the length of @ctx. |
| 1354 | * | ||
| 1355 | * Security hooks for using the eBPF maps and programs functionalities through | ||
| 1356 | * eBPF syscalls. | ||
| 1357 | * | ||
| 1358 | * @bpf: | ||
| 1359 | * Do a initial check for all bpf syscalls after the attribute is copied | ||
| 1360 | * into the kernel. The actual security module can implement their own | ||
| 1361 | * rules to check the specific cmd they need. | ||
| 1362 | * | ||
| 1363 | * @bpf_map: | ||
| 1364 | * Do a check when the kernel generate and return a file descriptor for | ||
| 1365 | * eBPF maps. | ||
| 1366 | * | ||
| 1367 | * @map: bpf map that we want to access | ||
| 1368 | * @mask: the access flags | ||
| 1369 | * | ||
| 1370 | * @bpf_prog: | ||
| 1371 | * Do a check when the kernel generate and return a file descriptor for | ||
| 1372 | * eBPF programs. | ||
| 1373 | * | ||
| 1374 | * @prog: bpf prog that userspace want to use. | ||
| 1375 | * | ||
| 1376 | * @bpf_map_alloc_security: | ||
| 1377 | * Initialize the security field inside bpf map. | ||
| 1378 | * | ||
| 1379 | * @bpf_map_free_security: | ||
| 1380 | * Clean up the security information stored inside bpf map. | ||
| 1381 | * | ||
| 1382 | * @bpf_prog_alloc_security: | ||
| 1383 | * Initialize the security field inside bpf program. | ||
| 1384 | * | ||
| 1385 | * @bpf_prog_free_security: | ||
| 1386 | * Clean up the security information stored inside bpf prog. | ||
| 1387 | * | ||
| 1354 | */ | 1388 | */ |
| 1355 | union security_list_options { | 1389 | union security_list_options { |
| 1356 | int (*binder_set_context_mgr)(struct task_struct *mgr); | 1390 | int (*binder_set_context_mgr)(struct task_struct *mgr); |
| @@ -1682,6 +1716,17 @@ union security_list_options { | |||
| 1682 | struct audit_context *actx); | 1716 | struct audit_context *actx); |
| 1683 | void (*audit_rule_free)(void *lsmrule); | 1717 | void (*audit_rule_free)(void *lsmrule); |
| 1684 | #endif /* CONFIG_AUDIT */ | 1718 | #endif /* CONFIG_AUDIT */ |
| 1719 | |||
| 1720 | #ifdef CONFIG_BPF_SYSCALL | ||
| 1721 | int (*bpf)(int cmd, union bpf_attr *attr, | ||
| 1722 | unsigned int size); | ||
| 1723 | int (*bpf_map)(struct bpf_map *map, fmode_t fmode); | ||
| 1724 | int (*bpf_prog)(struct bpf_prog *prog); | ||
| 1725 | int (*bpf_map_alloc_security)(struct bpf_map *map); | ||
| 1726 | void (*bpf_map_free_security)(struct bpf_map *map); | ||
| 1727 | int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux); | ||
| 1728 | void (*bpf_prog_free_security)(struct bpf_prog_aux *aux); | ||
| 1729 | #endif /* CONFIG_BPF_SYSCALL */ | ||
| 1685 | }; | 1730 | }; |
| 1686 | 1731 | ||
| 1687 | struct security_hook_heads { | 1732 | struct security_hook_heads { |
| @@ -1901,6 +1946,15 @@ struct security_hook_heads { | |||
| 1901 | struct list_head audit_rule_match; | 1946 | struct list_head audit_rule_match; |
| 1902 | struct list_head audit_rule_free; | 1947 | struct list_head audit_rule_free; |
| 1903 | #endif /* CONFIG_AUDIT */ | 1948 | #endif /* CONFIG_AUDIT */ |
| 1949 | #ifdef CONFIG_BPF_SYSCALL | ||
| 1950 | struct list_head bpf; | ||
| 1951 | struct list_head bpf_map; | ||
| 1952 | struct list_head bpf_prog; | ||
| 1953 | struct list_head bpf_map_alloc_security; | ||
| 1954 | struct list_head bpf_map_free_security; | ||
| 1955 | struct list_head bpf_prog_alloc_security; | ||
| 1956 | struct list_head bpf_prog_free_security; | ||
| 1957 | #endif /* CONFIG_BPF_SYSCALL */ | ||
| 1904 | } __randomize_layout; | 1958 | } __randomize_layout; |
| 1905 | 1959 | ||
| 1906 | /* | 1960 | /* |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index b0a57e043fa3..a9b5fed8f7c6 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include <linux/cpu_rmap.h> | 40 | #include <linux/cpu_rmap.h> |
| 41 | #include <linux/crash_dump.h> | 41 | #include <linux/crash_dump.h> |
| 42 | 42 | ||
| 43 | #include <linux/atomic.h> | 43 | #include <linux/refcount.h> |
| 44 | 44 | ||
| 45 | #include <linux/timecounter.h> | 45 | #include <linux/timecounter.h> |
| 46 | 46 | ||
| @@ -751,7 +751,7 @@ struct mlx4_cq { | |||
| 751 | int cqn; | 751 | int cqn; |
| 752 | unsigned vector; | 752 | unsigned vector; |
| 753 | 753 | ||
| 754 | atomic_t refcount; | 754 | refcount_t refcount; |
| 755 | struct completion free; | 755 | struct completion free; |
| 756 | struct { | 756 | struct { |
| 757 | struct list_head list; | 757 | struct list_head list; |
| @@ -768,7 +768,7 @@ struct mlx4_qp { | |||
| 768 | 768 | ||
| 769 | int qpn; | 769 | int qpn; |
| 770 | 770 | ||
| 771 | atomic_t refcount; | 771 | refcount_t refcount; |
| 772 | struct completion free; | 772 | struct completion free; |
| 773 | u8 usage; | 773 | u8 usage; |
| 774 | }; | 774 | }; |
| @@ -781,7 +781,7 @@ struct mlx4_srq { | |||
| 781 | int max_gs; | 781 | int max_gs; |
| 782 | int wqe_shift; | 782 | int wqe_shift; |
| 783 | 783 | ||
| 784 | atomic_t refcount; | 784 | refcount_t refcount; |
| 785 | struct completion free; | 785 | struct completion free; |
| 786 | }; | 786 | }; |
| 787 | 787 | ||
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 95898847c7d4..6a57ec2f1ef7 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | 35 | ||
| 36 | #include <rdma/ib_verbs.h> | 36 | #include <rdma/ib_verbs.h> |
| 37 | #include <linux/mlx5/driver.h> | 37 | #include <linux/mlx5/driver.h> |
| 38 | 38 | #include <linux/refcount.h> | |
| 39 | 39 | ||
| 40 | struct mlx5_core_cq { | 40 | struct mlx5_core_cq { |
| 41 | u32 cqn; | 41 | u32 cqn; |
| @@ -43,7 +43,7 @@ struct mlx5_core_cq { | |||
| 43 | __be32 *set_ci_db; | 43 | __be32 *set_ci_db; |
| 44 | __be32 *arm_db; | 44 | __be32 *arm_db; |
| 45 | struct mlx5_uars_page *uar; | 45 | struct mlx5_uars_page *uar; |
| 46 | atomic_t refcount; | 46 | refcount_t refcount; |
| 47 | struct completion free; | 47 | struct completion free; |
| 48 | unsigned vector; | 48 | unsigned vector; |
| 49 | unsigned int irqn; | 49 | unsigned int irqn; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 401c8972cc3a..08c77b7e59cb 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -49,6 +49,8 @@ | |||
| 49 | #include <linux/mlx5/device.h> | 49 | #include <linux/mlx5/device.h> |
| 50 | #include <linux/mlx5/doorbell.h> | 50 | #include <linux/mlx5/doorbell.h> |
| 51 | #include <linux/mlx5/srq.h> | 51 | #include <linux/mlx5/srq.h> |
| 52 | #include <linux/timecounter.h> | ||
| 53 | #include <linux/ptp_clock_kernel.h> | ||
| 52 | 54 | ||
| 53 | enum { | 55 | enum { |
| 54 | MLX5_BOARD_ID_LEN = 64, | 56 | MLX5_BOARD_ID_LEN = 64, |
| @@ -760,6 +762,27 @@ struct mlx5_rsvd_gids { | |||
| 760 | struct ida ida; | 762 | struct ida ida; |
| 761 | }; | 763 | }; |
| 762 | 764 | ||
| 765 | #define MAX_PIN_NUM 8 | ||
| 766 | struct mlx5_pps { | ||
| 767 | u8 pin_caps[MAX_PIN_NUM]; | ||
| 768 | struct work_struct out_work; | ||
| 769 | u64 start[MAX_PIN_NUM]; | ||
| 770 | u8 enabled; | ||
| 771 | }; | ||
| 772 | |||
| 773 | struct mlx5_clock { | ||
| 774 | rwlock_t lock; | ||
| 775 | struct cyclecounter cycles; | ||
| 776 | struct timecounter tc; | ||
| 777 | struct hwtstamp_config hwtstamp_config; | ||
| 778 | u32 nominal_c_mult; | ||
| 779 | unsigned long overflow_period; | ||
| 780 | struct delayed_work overflow_work; | ||
| 781 | struct ptp_clock *ptp; | ||
| 782 | struct ptp_clock_info ptp_info; | ||
| 783 | struct mlx5_pps pps_info; | ||
| 784 | }; | ||
| 785 | |||
| 763 | struct mlx5_core_dev { | 786 | struct mlx5_core_dev { |
| 764 | struct pci_dev *pdev; | 787 | struct pci_dev *pdev; |
| 765 | /* sync pci state */ | 788 | /* sync pci state */ |
| @@ -800,6 +823,7 @@ struct mlx5_core_dev { | |||
| 800 | #ifdef CONFIG_RFS_ACCEL | 823 | #ifdef CONFIG_RFS_ACCEL |
| 801 | struct cpu_rmap *rmap; | 824 | struct cpu_rmap *rmap; |
| 802 | #endif | 825 | #endif |
| 826 | struct mlx5_clock clock; | ||
| 803 | }; | 827 | }; |
| 804 | 828 | ||
| 805 | struct mlx5_db { | 829 | struct mlx5_db { |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 2657f9f51536..d9fb7abad445 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -686,5 +686,31 @@ struct fsl_mc_device_id { | |||
| 686 | const char obj_type[16]; | 686 | const char obj_type[16]; |
| 687 | }; | 687 | }; |
| 688 | 688 | ||
| 689 | /** | ||
| 690 | * struct tb_service_id - Thunderbolt service identifiers | ||
| 691 | * @match_flags: Flags used to match the structure | ||
| 692 | * @protocol_key: Protocol key the service supports | ||
| 693 | * @protocol_id: Protocol id the service supports | ||
| 694 | * @protocol_version: Version of the protocol | ||
| 695 | * @protocol_revision: Revision of the protocol software | ||
| 696 | * @driver_data: Driver specific data | ||
| 697 | * | ||
| 698 | * Thunderbolt XDomain services are exposed as devices where each device | ||
| 699 | * carries the protocol information the service supports. Thunderbolt | ||
| 700 | * XDomain service drivers match against that information. | ||
| 701 | */ | ||
| 702 | struct tb_service_id { | ||
| 703 | __u32 match_flags; | ||
| 704 | char protocol_key[8 + 1]; | ||
| 705 | __u32 protocol_id; | ||
| 706 | __u32 protocol_version; | ||
| 707 | __u32 protocol_revision; | ||
| 708 | kernel_ulong_t driver_data; | ||
| 709 | }; | ||
| 710 | |||
| 711 | #define TBSVC_MATCH_PROTOCOL_KEY 0x0001 | ||
| 712 | #define TBSVC_MATCH_PROTOCOL_ID 0x0002 | ||
| 713 | #define TBSVC_MATCH_PROTOCOL_VERSION 0x0004 | ||
| 714 | #define TBSVC_MATCH_PROTOCOL_REVISION 0x0008 | ||
| 689 | 715 | ||
| 690 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 716 | #endif /* LINUX_MOD_DEVICETABLE_H */ |
diff --git a/include/linux/mroute.h b/include/linux/mroute.h index d7f63339ef0b..8242d05df35e 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h | |||
| @@ -5,6 +5,8 @@ | |||
| 5 | #include <linux/pim.h> | 5 | #include <linux/pim.h> |
| 6 | #include <linux/rhashtable.h> | 6 | #include <linux/rhashtable.h> |
| 7 | #include <net/sock.h> | 7 | #include <net/sock.h> |
| 8 | #include <net/fib_rules.h> | ||
| 9 | #include <net/fib_notifier.h> | ||
| 8 | #include <uapi/linux/mroute.h> | 10 | #include <uapi/linux/mroute.h> |
| 9 | 11 | ||
| 10 | #ifdef CONFIG_IP_MROUTE | 12 | #ifdef CONFIG_IP_MROUTE |
| @@ -18,6 +20,7 @@ int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); | |||
| 18 | int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); | 20 | int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); |
| 19 | int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); | 21 | int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); |
| 20 | int ip_mr_init(void); | 22 | int ip_mr_init(void); |
| 23 | bool ipmr_rule_default(const struct fib_rule *rule); | ||
| 21 | #else | 24 | #else |
| 22 | static inline int ip_mroute_setsockopt(struct sock *sock, int optname, | 25 | static inline int ip_mroute_setsockopt(struct sock *sock, int optname, |
| 23 | char __user *optval, unsigned int optlen) | 26 | char __user *optval, unsigned int optlen) |
| @@ -45,10 +48,16 @@ static inline int ip_mroute_opt(int opt) | |||
| 45 | { | 48 | { |
| 46 | return 0; | 49 | return 0; |
| 47 | } | 50 | } |
| 51 | |||
| 52 | static inline bool ipmr_rule_default(const struct fib_rule *rule) | ||
| 53 | { | ||
| 54 | return true; | ||
| 55 | } | ||
| 48 | #endif | 56 | #endif |
| 49 | 57 | ||
| 50 | struct vif_device { | 58 | struct vif_device { |
| 51 | struct net_device *dev; /* Device we are using */ | 59 | struct net_device *dev; /* Device we are using */ |
| 60 | struct netdev_phys_item_id dev_parent_id; /* Device parent ID */ | ||
| 52 | unsigned long bytes_in,bytes_out; | 61 | unsigned long bytes_in,bytes_out; |
| 53 | unsigned long pkt_in,pkt_out; /* Statistics */ | 62 | unsigned long pkt_in,pkt_out; /* Statistics */ |
| 54 | unsigned long rate_limit; /* Traffic shaping (NI) */ | 63 | unsigned long rate_limit; /* Traffic shaping (NI) */ |
| @@ -58,6 +67,14 @@ struct vif_device { | |||
| 58 | int link; /* Physical interface index */ | 67 | int link; /* Physical interface index */ |
| 59 | }; | 68 | }; |
| 60 | 69 | ||
| 70 | struct vif_entry_notifier_info { | ||
| 71 | struct fib_notifier_info info; | ||
| 72 | struct net_device *dev; | ||
| 73 | vifi_t vif_index; | ||
| 74 | unsigned short vif_flags; | ||
| 75 | u32 tb_id; | ||
| 76 | }; | ||
| 77 | |||
| 61 | #define VIFF_STATIC 0x8000 | 78 | #define VIFF_STATIC 0x8000 |
| 62 | 79 | ||
| 63 | #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) | 80 | #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) |
| @@ -81,9 +98,11 @@ struct mr_table { | |||
| 81 | 98 | ||
| 82 | /* mfc_flags: | 99 | /* mfc_flags: |
| 83 | * MFC_STATIC - the entry was added statically (not by a routing daemon) | 100 | * MFC_STATIC - the entry was added statically (not by a routing daemon) |
| 101 | * MFC_OFFLOAD - the entry was offloaded to the hardware | ||
| 84 | */ | 102 | */ |
| 85 | enum { | 103 | enum { |
| 86 | MFC_STATIC = BIT(0), | 104 | MFC_STATIC = BIT(0), |
| 105 | MFC_OFFLOAD = BIT(1), | ||
| 87 | }; | 106 | }; |
| 88 | 107 | ||
| 89 | struct mfc_cache_cmp_arg { | 108 | struct mfc_cache_cmp_arg { |
| @@ -109,6 +128,7 @@ struct mfc_cache_cmp_arg { | |||
| 109 | * @wrong_if: number of wrong source interface hits | 128 | * @wrong_if: number of wrong source interface hits |
| 110 | * @lastuse: time of last use of the group (traffic or update) | 129 | * @lastuse: time of last use of the group (traffic or update) |
| 111 | * @ttls: OIF TTL threshold array | 130 | * @ttls: OIF TTL threshold array |
| 131 | * @refcount: reference count for this entry | ||
| 112 | * @list: global entry list | 132 | * @list: global entry list |
| 113 | * @rcu: used for entry destruction | 133 | * @rcu: used for entry destruction |
| 114 | */ | 134 | */ |
| @@ -138,14 +158,40 @@ struct mfc_cache { | |||
| 138 | unsigned long wrong_if; | 158 | unsigned long wrong_if; |
| 139 | unsigned long lastuse; | 159 | unsigned long lastuse; |
| 140 | unsigned char ttls[MAXVIFS]; | 160 | unsigned char ttls[MAXVIFS]; |
| 161 | refcount_t refcount; | ||
| 141 | } res; | 162 | } res; |
| 142 | } mfc_un; | 163 | } mfc_un; |
| 143 | struct list_head list; | 164 | struct list_head list; |
| 144 | struct rcu_head rcu; | 165 | struct rcu_head rcu; |
| 145 | }; | 166 | }; |
| 146 | 167 | ||
| 168 | struct mfc_entry_notifier_info { | ||
| 169 | struct fib_notifier_info info; | ||
| 170 | struct mfc_cache *mfc; | ||
| 171 | u32 tb_id; | ||
| 172 | }; | ||
| 173 | |||
| 147 | struct rtmsg; | 174 | struct rtmsg; |
| 148 | int ipmr_get_route(struct net *net, struct sk_buff *skb, | 175 | int ipmr_get_route(struct net *net, struct sk_buff *skb, |
| 149 | __be32 saddr, __be32 daddr, | 176 | __be32 saddr, __be32 daddr, |
| 150 | struct rtmsg *rtm, u32 portid); | 177 | struct rtmsg *rtm, u32 portid); |
| 178 | |||
| 179 | #ifdef CONFIG_IP_MROUTE | ||
| 180 | void ipmr_cache_free(struct mfc_cache *mfc_cache); | ||
| 181 | #else | ||
| 182 | static inline void ipmr_cache_free(struct mfc_cache *mfc_cache) | ||
| 183 | { | ||
| 184 | } | ||
| 185 | #endif | ||
| 186 | |||
| 187 | static inline void ipmr_cache_put(struct mfc_cache *c) | ||
| 188 | { | ||
| 189 | if (refcount_dec_and_test(&c->mfc_un.res.refcount)) | ||
| 190 | ipmr_cache_free(c); | ||
| 191 | } | ||
| 192 | static inline void ipmr_cache_hold(struct mfc_cache *c) | ||
| 193 | { | ||
| 194 | refcount_inc(&c->mfc_un.res.refcount); | ||
| 195 | } | ||
| 196 | |||
| 151 | #endif | 197 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2eaac7d75af4..6c7960c8338a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -55,7 +55,7 @@ | |||
| 55 | struct netpoll_info; | 55 | struct netpoll_info; |
| 56 | struct device; | 56 | struct device; |
| 57 | struct phy_device; | 57 | struct phy_device; |
| 58 | struct dsa_switch_tree; | 58 | struct dsa_port; |
| 59 | 59 | ||
| 60 | /* 802.11 specific */ | 60 | /* 802.11 specific */ |
| 61 | struct wireless_dev; | 61 | struct wireless_dev; |
| @@ -775,6 +775,7 @@ enum tc_setup_type { | |||
| 775 | TC_SETUP_CLSFLOWER, | 775 | TC_SETUP_CLSFLOWER, |
| 776 | TC_SETUP_CLSMATCHALL, | 776 | TC_SETUP_CLSMATCHALL, |
| 777 | TC_SETUP_CLSBPF, | 777 | TC_SETUP_CLSBPF, |
| 778 | TC_SETUP_BLOCK, | ||
| 778 | }; | 779 | }; |
| 779 | 780 | ||
| 780 | /* These structures hold the attributes of xdp state that are being passed | 781 | /* These structures hold the attributes of xdp state that are being passed |
| @@ -826,6 +827,11 @@ struct xfrmdev_ops { | |||
| 826 | }; | 827 | }; |
| 827 | #endif | 828 | #endif |
| 828 | 829 | ||
| 830 | struct dev_ifalias { | ||
| 831 | struct rcu_head rcuhead; | ||
| 832 | char ifalias[]; | ||
| 833 | }; | ||
| 834 | |||
| 829 | /* | 835 | /* |
| 830 | * This structure defines the management hooks for network devices. | 836 | * This structure defines the management hooks for network devices. |
| 831 | * The following hooks can be defined; unless noted otherwise, they are | 837 | * The following hooks can be defined; unless noted otherwise, they are |
| @@ -1241,7 +1247,8 @@ struct net_device_ops { | |||
| 1241 | u32 flow_id); | 1247 | u32 flow_id); |
| 1242 | #endif | 1248 | #endif |
| 1243 | int (*ndo_add_slave)(struct net_device *dev, | 1249 | int (*ndo_add_slave)(struct net_device *dev, |
| 1244 | struct net_device *slave_dev); | 1250 | struct net_device *slave_dev, |
| 1251 | struct netlink_ext_ack *extack); | ||
| 1245 | int (*ndo_del_slave)(struct net_device *dev, | 1252 | int (*ndo_del_slave)(struct net_device *dev, |
| 1246 | struct net_device *slave_dev); | 1253 | struct net_device *slave_dev); |
| 1247 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, | 1254 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
| @@ -1632,7 +1639,7 @@ enum netdev_priv_flags { | |||
| 1632 | struct net_device { | 1639 | struct net_device { |
| 1633 | char name[IFNAMSIZ]; | 1640 | char name[IFNAMSIZ]; |
| 1634 | struct hlist_node name_hlist; | 1641 | struct hlist_node name_hlist; |
| 1635 | char *ifalias; | 1642 | struct dev_ifalias __rcu *ifalias; |
| 1636 | /* | 1643 | /* |
| 1637 | * I/O specific fields | 1644 | * I/O specific fields |
| 1638 | * FIXME: Merge these and struct ifmap into one | 1645 | * FIXME: Merge these and struct ifmap into one |
| @@ -1752,7 +1759,7 @@ struct net_device { | |||
| 1752 | struct vlan_info __rcu *vlan_info; | 1759 | struct vlan_info __rcu *vlan_info; |
| 1753 | #endif | 1760 | #endif |
| 1754 | #if IS_ENABLED(CONFIG_NET_DSA) | 1761 | #if IS_ENABLED(CONFIG_NET_DSA) |
| 1755 | struct dsa_switch_tree *dsa_ptr; | 1762 | struct dsa_port *dsa_ptr; |
| 1756 | #endif | 1763 | #endif |
| 1757 | #if IS_ENABLED(CONFIG_TIPC) | 1764 | #if IS_ENABLED(CONFIG_TIPC) |
| 1758 | struct tipc_bearer __rcu *tipc_ptr; | 1765 | struct tipc_bearer __rcu *tipc_ptr; |
| @@ -2304,7 +2311,8 @@ int register_netdevice_notifier(struct notifier_block *nb); | |||
| 2304 | int unregister_netdevice_notifier(struct notifier_block *nb); | 2311 | int unregister_netdevice_notifier(struct notifier_block *nb); |
| 2305 | 2312 | ||
| 2306 | struct netdev_notifier_info { | 2313 | struct netdev_notifier_info { |
| 2307 | struct net_device *dev; | 2314 | struct net_device *dev; |
| 2315 | struct netlink_ext_ack *extack; | ||
| 2308 | }; | 2316 | }; |
| 2309 | 2317 | ||
| 2310 | struct netdev_notifier_change_info { | 2318 | struct netdev_notifier_change_info { |
| @@ -2329,6 +2337,7 @@ static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, | |||
| 2329 | struct net_device *dev) | 2337 | struct net_device *dev) |
| 2330 | { | 2338 | { |
| 2331 | info->dev = dev; | 2339 | info->dev = dev; |
| 2340 | info->extack = NULL; | ||
| 2332 | } | 2341 | } |
| 2333 | 2342 | ||
| 2334 | static inline struct net_device * | 2343 | static inline struct net_device * |
| @@ -2337,6 +2346,12 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) | |||
| 2337 | return info->dev; | 2346 | return info->dev; |
| 2338 | } | 2347 | } |
| 2339 | 2348 | ||
| 2349 | static inline struct netlink_ext_ack * | ||
| 2350 | netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) | ||
| 2351 | { | ||
| 2352 | return info->extack; | ||
| 2353 | } | ||
| 2354 | |||
| 2340 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 2355 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
| 2341 | 2356 | ||
| 2342 | 2357 | ||
| @@ -3246,6 +3261,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); | |||
| 3246 | int netif_rx(struct sk_buff *skb); | 3261 | int netif_rx(struct sk_buff *skb); |
| 3247 | int netif_rx_ni(struct sk_buff *skb); | 3262 | int netif_rx_ni(struct sk_buff *skb); |
| 3248 | int netif_receive_skb(struct sk_buff *skb); | 3263 | int netif_receive_skb(struct sk_buff *skb); |
| 3264 | int netif_receive_skb_core(struct sk_buff *skb); | ||
| 3249 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); | 3265 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); |
| 3250 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); | 3266 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); |
| 3251 | struct sk_buff *napi_get_frags(struct napi_struct *napi); | 3267 | struct sk_buff *napi_get_frags(struct napi_struct *napi); |
| @@ -3275,6 +3291,7 @@ void __dev_notify_flags(struct net_device *, unsigned int old_flags, | |||
| 3275 | unsigned int gchanges); | 3291 | unsigned int gchanges); |
| 3276 | int dev_change_name(struct net_device *, const char *); | 3292 | int dev_change_name(struct net_device *, const char *); |
| 3277 | int dev_set_alias(struct net_device *, const char *, size_t); | 3293 | int dev_set_alias(struct net_device *, const char *, size_t); |
| 3294 | int dev_get_alias(const struct net_device *, char *, size_t); | ||
| 3278 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); | 3295 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); |
| 3279 | int __dev_set_mtu(struct net_device *, int); | 3296 | int __dev_set_mtu(struct net_device *, int); |
| 3280 | int dev_set_mtu(struct net_device *, int); | 3297 | int dev_set_mtu(struct net_device *, int); |
| @@ -3907,10 +3924,12 @@ void *netdev_adjacent_get_private(struct list_head *adj_list); | |||
| 3907 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); | 3924 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
| 3908 | struct net_device *netdev_master_upper_dev_get(struct net_device *dev); | 3925 | struct net_device *netdev_master_upper_dev_get(struct net_device *dev); |
| 3909 | struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); | 3926 | struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); |
| 3910 | int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); | 3927 | int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, |
| 3928 | struct netlink_ext_ack *extack); | ||
| 3911 | int netdev_master_upper_dev_link(struct net_device *dev, | 3929 | int netdev_master_upper_dev_link(struct net_device *dev, |
| 3912 | struct net_device *upper_dev, | 3930 | struct net_device *upper_dev, |
| 3913 | void *upper_priv, void *upper_info); | 3931 | void *upper_priv, void *upper_info, |
| 3932 | struct netlink_ext_ack *extack); | ||
| 3914 | void netdev_upper_dev_unlink(struct net_device *dev, | 3933 | void netdev_upper_dev_unlink(struct net_device *dev, |
| 3915 | struct net_device *upper_dev); | 3934 | struct net_device *upper_dev); |
| 3916 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); | 3935 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
diff --git a/include/linux/once.h b/include/linux/once.h index 9c98aaa87cbc..724724918e8b 100644 --- a/include/linux/once.h +++ b/include/linux/once.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | #include <linux/jump_label.h> | 5 | #include <linux/jump_label.h> |
| 6 | 6 | ||
| 7 | bool __do_once_start(bool *done, unsigned long *flags); | 7 | bool __do_once_start(bool *done, unsigned long *flags); |
| 8 | void __do_once_done(bool *done, struct static_key *once_key, | 8 | void __do_once_done(bool *done, struct static_key_true *once_key, |
| 9 | unsigned long *flags); | 9 | unsigned long *flags); |
| 10 | 10 | ||
| 11 | /* Call a function exactly once. The idea of DO_ONCE() is to perform | 11 | /* Call a function exactly once. The idea of DO_ONCE() is to perform |
| @@ -38,8 +38,8 @@ void __do_once_done(bool *done, struct static_key *once_key, | |||
| 38 | ({ \ | 38 | ({ \ |
| 39 | bool ___ret = false; \ | 39 | bool ___ret = false; \ |
| 40 | static bool ___done = false; \ | 40 | static bool ___done = false; \ |
| 41 | static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \ | 41 | static DEFINE_STATIC_KEY_TRUE(___once_key); \ |
| 42 | if (static_key_true(&___once_key)) { \ | 42 | if (static_branch_unlikely(&___once_key)) { \ |
| 43 | unsigned long ___flags; \ | 43 | unsigned long ___flags; \ |
| 44 | ___ret = __do_once_start(&___done, &___flags); \ | 44 | ___ret = __do_once_start(&___done, &___flags); \ |
| 45 | if (unlikely(___ret)) { \ | 45 | if (unlikely(___ret)) { \ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8e22f24ded6a..79b18a20cf5d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -806,6 +806,7 @@ struct perf_output_handle { | |||
| 806 | struct bpf_perf_event_data_kern { | 806 | struct bpf_perf_event_data_kern { |
| 807 | struct pt_regs *regs; | 807 | struct pt_regs *regs; |
| 808 | struct perf_sample_data *data; | 808 | struct perf_sample_data *data; |
| 809 | struct perf_event *event; | ||
| 809 | }; | 810 | }; |
| 810 | 811 | ||
| 811 | #ifdef CONFIG_CGROUP_PERF | 812 | #ifdef CONFIG_CGROUP_PERF |
| @@ -884,7 +885,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, | |||
| 884 | void *context); | 885 | void *context); |
| 885 | extern void perf_pmu_migrate_context(struct pmu *pmu, | 886 | extern void perf_pmu_migrate_context(struct pmu *pmu, |
| 886 | int src_cpu, int dst_cpu); | 887 | int src_cpu, int dst_cpu); |
| 887 | int perf_event_read_local(struct perf_event *event, u64 *value); | 888 | int perf_event_read_local(struct perf_event *event, u64 *value, |
| 889 | u64 *enabled, u64 *running); | ||
| 888 | extern u64 perf_event_read_value(struct perf_event *event, | 890 | extern u64 perf_event_read_value(struct perf_event *event, |
| 889 | u64 *enabled, u64 *running); | 891 | u64 *enabled, u64 *running); |
| 890 | 892 | ||
| @@ -1286,7 +1288,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event * | |||
| 1286 | { | 1288 | { |
| 1287 | return ERR_PTR(-EINVAL); | 1289 | return ERR_PTR(-EINVAL); |
| 1288 | } | 1290 | } |
| 1289 | static inline int perf_event_read_local(struct perf_event *event, u64 *value) | 1291 | static inline int perf_event_read_local(struct perf_event *event, u64 *value, |
| 1292 | u64 *enabled, u64 *running) | ||
| 1290 | { | 1293 | { |
| 1291 | return -EINVAL; | 1294 | return -EINVAL; |
| 1292 | } | 1295 | } |
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index dd7a3b86bb9e..e755954d85fd 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h | |||
| @@ -64,6 +64,7 @@ enum qed_ll2_roce_flavor_type { | |||
| 64 | enum qed_ll2_tx_dest { | 64 | enum qed_ll2_tx_dest { |
| 65 | QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ | 65 | QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ |
| 66 | QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ | 66 | QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ |
| 67 | QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */ | ||
| 67 | QED_LL2_TX_DEST_MAX | 68 | QED_LL2_TX_DEST_MAX |
| 68 | }; | 69 | }; |
| 69 | 70 | ||
| @@ -101,6 +102,7 @@ struct qed_ll2_comp_rx_data { | |||
| 101 | void *cookie; | 102 | void *cookie; |
| 102 | dma_addr_t rx_buf_addr; | 103 | dma_addr_t rx_buf_addr; |
| 103 | u16 parse_flags; | 104 | u16 parse_flags; |
| 105 | u16 err_flags; | ||
| 104 | u16 vlan; | 106 | u16 vlan; |
| 105 | bool b_last_packet; | 107 | bool b_last_packet; |
| 106 | u8 connection_handle; | 108 | u8 connection_handle; |
| @@ -149,11 +151,16 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt, | |||
| 149 | dma_addr_t first_frag_addr, | 151 | dma_addr_t first_frag_addr, |
| 150 | bool b_last_fragment, bool b_last_packet); | 152 | bool b_last_fragment, bool b_last_packet); |
| 151 | 153 | ||
| 154 | typedef | ||
| 155 | void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle, | ||
| 156 | u32 opaque_data_0, u32 opaque_data_1); | ||
| 157 | |||
| 152 | struct qed_ll2_cbs { | 158 | struct qed_ll2_cbs { |
| 153 | qed_ll2_complete_rx_packet_cb rx_comp_cb; | 159 | qed_ll2_complete_rx_packet_cb rx_comp_cb; |
| 154 | qed_ll2_release_rx_packet_cb rx_release_cb; | 160 | qed_ll2_release_rx_packet_cb rx_release_cb; |
| 155 | qed_ll2_complete_tx_packet_cb tx_comp_cb; | 161 | qed_ll2_complete_tx_packet_cb tx_comp_cb; |
| 156 | qed_ll2_release_tx_packet_cb tx_release_cb; | 162 | qed_ll2_release_tx_packet_cb tx_release_cb; |
| 163 | qed_ll2_slowpath_cb slowpath_cb; | ||
| 157 | void *cookie; | 164 | void *cookie; |
| 158 | }; | 165 | }; |
| 159 | 166 | ||
| @@ -170,6 +177,7 @@ struct qed_ll2_acquire_data_inputs { | |||
| 170 | enum qed_ll2_tx_dest tx_dest; | 177 | enum qed_ll2_tx_dest tx_dest; |
| 171 | enum qed_ll2_error_handle ai_err_packet_too_big; | 178 | enum qed_ll2_error_handle ai_err_packet_too_big; |
| 172 | enum qed_ll2_error_handle ai_err_no_buf; | 179 | enum qed_ll2_error_handle ai_err_no_buf; |
| 180 | bool secondary_queue; | ||
| 173 | u8 gsi_enable; | 181 | u8 gsi_enable; |
| 174 | }; | 182 | }; |
| 175 | 183 | ||
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index dea59c8eec54..1251638e60d3 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
| @@ -17,9 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, | |||
| 17 | u32 id, long expires, u32 error); | 17 | u32 id, long expires, u32 error); |
| 18 | 18 | ||
| 19 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); | 19 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); |
| 20 | void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, | ||
| 21 | gfp_t flags, int *new_nsid); | ||
| 20 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, | 22 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, |
| 21 | unsigned change, u32 event, | 23 | unsigned change, u32 event, |
| 22 | gfp_t flags); | 24 | gfp_t flags, int *new_nsid); |
| 23 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, | 25 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, |
| 24 | gfp_t flags); | 26 | gfp_t flags); |
| 25 | 27 | ||
diff --git a/include/linux/security.h b/include/linux/security.h index ce6265960d6c..18800b0911e5 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
| 32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
| 33 | #include <linux/fs.h> | 33 | #include <linux/fs.h> |
| 34 | #include <linux/bpf.h> | ||
| 34 | 35 | ||
| 35 | struct linux_binprm; | 36 | struct linux_binprm; |
| 36 | struct cred; | 37 | struct cred; |
| @@ -1730,6 +1731,50 @@ static inline void securityfs_remove(struct dentry *dentry) | |||
| 1730 | 1731 | ||
| 1731 | #endif | 1732 | #endif |
| 1732 | 1733 | ||
| 1734 | #ifdef CONFIG_BPF_SYSCALL | ||
| 1735 | #ifdef CONFIG_SECURITY | ||
| 1736 | extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size); | ||
| 1737 | extern int security_bpf_map(struct bpf_map *map, fmode_t fmode); | ||
| 1738 | extern int security_bpf_prog(struct bpf_prog *prog); | ||
| 1739 | extern int security_bpf_map_alloc(struct bpf_map *map); | ||
| 1740 | extern void security_bpf_map_free(struct bpf_map *map); | ||
| 1741 | extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux); | ||
| 1742 | extern void security_bpf_prog_free(struct bpf_prog_aux *aux); | ||
| 1743 | #else | ||
| 1744 | static inline int security_bpf(int cmd, union bpf_attr *attr, | ||
| 1745 | unsigned int size) | ||
| 1746 | { | ||
| 1747 | return 0; | ||
| 1748 | } | ||
| 1749 | |||
| 1750 | static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode) | ||
| 1751 | { | ||
| 1752 | return 0; | ||
| 1753 | } | ||
| 1754 | |||
| 1755 | static inline int security_bpf_prog(struct bpf_prog *prog) | ||
| 1756 | { | ||
| 1757 | return 0; | ||
| 1758 | } | ||
| 1759 | |||
| 1760 | static inline int security_bpf_map_alloc(struct bpf_map *map) | ||
| 1761 | { | ||
| 1762 | return 0; | ||
| 1763 | } | ||
| 1764 | |||
| 1765 | static inline void security_bpf_map_free(struct bpf_map *map) | ||
| 1766 | { } | ||
| 1767 | |||
| 1768 | static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux) | ||
| 1769 | { | ||
| 1770 | return 0; | ||
| 1771 | } | ||
| 1772 | |||
| 1773 | static inline void security_bpf_prog_free(struct bpf_prog_aux *aux) | ||
| 1774 | { } | ||
| 1775 | #endif /* CONFIG_SECURITY */ | ||
| 1776 | #endif /* CONFIG_BPF_SYSCALL */ | ||
| 1777 | |||
| 1733 | #ifdef CONFIG_SECURITY | 1778 | #ifdef CONFIG_SECURITY |
| 1734 | 1779 | ||
| 1735 | static inline char *alloc_secdata(void) | 1780 | static inline char *alloc_secdata(void) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 72299ef00061..03634ec2f918 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -489,8 +489,9 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, | |||
| 489 | * the end of the header data, ie. at skb->end. | 489 | * the end of the header data, ie. at skb->end. |
| 490 | */ | 490 | */ |
| 491 | struct skb_shared_info { | 491 | struct skb_shared_info { |
| 492 | unsigned short _unused; | 492 | __u8 __unused; |
| 493 | unsigned char nr_frags; | 493 | __u8 meta_len; |
| 494 | __u8 nr_frags; | ||
| 494 | __u8 tx_flags; | 495 | __u8 tx_flags; |
| 495 | unsigned short gso_size; | 496 | unsigned short gso_size; |
| 496 | /* Warning: this field is not always filled in (UFO)! */ | 497 | /* Warning: this field is not always filled in (UFO)! */ |
| @@ -616,6 +617,7 @@ typedef unsigned char *sk_buff_data_t; | |||
| 616 | * @nf_trace: netfilter packet trace flag | 617 | * @nf_trace: netfilter packet trace flag |
| 617 | * @protocol: Packet protocol from driver | 618 | * @protocol: Packet protocol from driver |
| 618 | * @destructor: Destruct function | 619 | * @destructor: Destruct function |
| 620 | * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue) | ||
| 619 | * @_nfct: Associated connection, if any (with nfctinfo bits) | 621 | * @_nfct: Associated connection, if any (with nfctinfo bits) |
| 620 | * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c | 622 | * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c |
| 621 | * @skb_iif: ifindex of device we arrived on | 623 | * @skb_iif: ifindex of device we arrived on |
| @@ -661,8 +663,12 @@ struct sk_buff { | |||
| 661 | struct sk_buff *prev; | 663 | struct sk_buff *prev; |
| 662 | 664 | ||
| 663 | union { | 665 | union { |
| 664 | ktime_t tstamp; | 666 | struct net_device *dev; |
| 665 | u64 skb_mstamp; | 667 | /* Some protocols might use this space to store information, |
| 668 | * while device pointer would be NULL. | ||
| 669 | * UDP receive path is one user. | ||
| 670 | */ | ||
| 671 | unsigned long dev_scratch; | ||
| 666 | }; | 672 | }; |
| 667 | }; | 673 | }; |
| 668 | struct rb_node rbnode; /* used in netem & tcp stack */ | 674 | struct rb_node rbnode; /* used in netem & tcp stack */ |
| @@ -670,12 +676,8 @@ struct sk_buff { | |||
| 670 | struct sock *sk; | 676 | struct sock *sk; |
| 671 | 677 | ||
| 672 | union { | 678 | union { |
| 673 | struct net_device *dev; | 679 | ktime_t tstamp; |
| 674 | /* Some protocols might use this space to store information, | 680 | u64 skb_mstamp; |
| 675 | * while device pointer would be NULL. | ||
| 676 | * UDP receive path is one user. | ||
| 677 | */ | ||
| 678 | unsigned long dev_scratch; | ||
| 679 | }; | 681 | }; |
| 680 | /* | 682 | /* |
| 681 | * This is the control buffer. It is free to use for every | 683 | * This is the control buffer. It is free to use for every |
| @@ -685,8 +687,14 @@ struct sk_buff { | |||
| 685 | */ | 687 | */ |
| 686 | char cb[48] __aligned(8); | 688 | char cb[48] __aligned(8); |
| 687 | 689 | ||
| 688 | unsigned long _skb_refdst; | 690 | union { |
| 689 | void (*destructor)(struct sk_buff *skb); | 691 | struct { |
| 692 | unsigned long _skb_refdst; | ||
| 693 | void (*destructor)(struct sk_buff *skb); | ||
| 694 | }; | ||
| 695 | struct list_head tcp_tsorted_anchor; | ||
| 696 | }; | ||
| 697 | |||
| 690 | #ifdef CONFIG_XFRM | 698 | #ifdef CONFIG_XFRM |
| 691 | struct sec_path *sp; | 699 | struct sec_path *sp; |
| 692 | #endif | 700 | #endif |
| @@ -771,6 +779,7 @@ struct sk_buff { | |||
| 771 | __u8 remcsum_offload:1; | 779 | __u8 remcsum_offload:1; |
| 772 | #ifdef CONFIG_NET_SWITCHDEV | 780 | #ifdef CONFIG_NET_SWITCHDEV |
| 773 | __u8 offload_fwd_mark:1; | 781 | __u8 offload_fwd_mark:1; |
| 782 | __u8 offload_mr_fwd_mark:1; | ||
| 774 | #endif | 783 | #endif |
| 775 | #ifdef CONFIG_NET_CLS_ACT | 784 | #ifdef CONFIG_NET_CLS_ACT |
| 776 | __u8 tc_skip_classify:1; | 785 | __u8 tc_skip_classify:1; |
| @@ -1457,27 +1466,8 @@ static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) | |||
| 1457 | } | 1466 | } |
| 1458 | 1467 | ||
| 1459 | /** | 1468 | /** |
| 1460 | * skb_header_release - release reference to header | ||
| 1461 | * @skb: buffer to operate on | ||
| 1462 | * | ||
| 1463 | * Drop a reference to the header part of the buffer. This is done | ||
| 1464 | * by acquiring a payload reference. You must not read from the header | ||
| 1465 | * part of skb->data after this. | ||
| 1466 | * Note : Check if you can use __skb_header_release() instead. | ||
| 1467 | */ | ||
| 1468 | static inline void skb_header_release(struct sk_buff *skb) | ||
| 1469 | { | ||
| 1470 | BUG_ON(skb->nohdr); | ||
| 1471 | skb->nohdr = 1; | ||
| 1472 | atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | /** | ||
| 1476 | * __skb_header_release - release reference to header | 1469 | * __skb_header_release - release reference to header |
| 1477 | * @skb: buffer to operate on | 1470 | * @skb: buffer to operate on |
| 1478 | * | ||
| 1479 | * Variant of skb_header_release() assuming skb is private to caller. | ||
| 1480 | * We can avoid one atomic operation. | ||
| 1481 | */ | 1471 | */ |
| 1482 | static inline void __skb_header_release(struct sk_buff *skb) | 1472 | static inline void __skb_header_release(struct sk_buff *skb) |
| 1483 | { | 1473 | { |
| @@ -3168,6 +3158,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) | |||
| 3168 | return __skb_grow(skb, len); | 3158 | return __skb_grow(skb, len); |
| 3169 | } | 3159 | } |
| 3170 | 3160 | ||
| 3161 | #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) | ||
| 3162 | #define skb_rb_first(root) rb_to_skb(rb_first(root)) | ||
| 3163 | #define skb_rb_last(root) rb_to_skb(rb_last(root)) | ||
| 3164 | #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) | ||
| 3165 | #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) | ||
| 3166 | |||
| 3171 | #define skb_queue_walk(queue, skb) \ | 3167 | #define skb_queue_walk(queue, skb) \ |
| 3172 | for (skb = (queue)->next; \ | 3168 | for (skb = (queue)->next; \ |
| 3173 | skb != (struct sk_buff *)(queue); \ | 3169 | skb != (struct sk_buff *)(queue); \ |
| @@ -3182,6 +3178,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) | |||
| 3182 | for (; skb != (struct sk_buff *)(queue); \ | 3178 | for (; skb != (struct sk_buff *)(queue); \ |
| 3183 | skb = skb->next) | 3179 | skb = skb->next) |
| 3184 | 3180 | ||
| 3181 | #define skb_rbtree_walk(skb, root) \ | ||
| 3182 | for (skb = skb_rb_first(root); skb != NULL; \ | ||
| 3183 | skb = skb_rb_next(skb)) | ||
| 3184 | |||
| 3185 | #define skb_rbtree_walk_from(skb) \ | ||
| 3186 | for (; skb != NULL; \ | ||
| 3187 | skb = skb_rb_next(skb)) | ||
| 3188 | |||
| 3189 | #define skb_rbtree_walk_from_safe(skb, tmp) \ | ||
| 3190 | for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \ | ||
| 3191 | skb = tmp) | ||
| 3192 | |||
| 3185 | #define skb_queue_walk_from_safe(queue, skb, tmp) \ | 3193 | #define skb_queue_walk_from_safe(queue, skb, tmp) \ |
| 3186 | for (tmp = skb->next; \ | 3194 | for (tmp = skb->next; \ |
| 3187 | skb != (struct sk_buff *)(queue); \ | 3195 | skb != (struct sk_buff *)(queue); \ |
| @@ -3419,6 +3427,69 @@ static inline ktime_t net_invalid_timestamp(void) | |||
| 3419 | return 0; | 3427 | return 0; |
| 3420 | } | 3428 | } |
| 3421 | 3429 | ||
| 3430 | static inline u8 skb_metadata_len(const struct sk_buff *skb) | ||
| 3431 | { | ||
| 3432 | return skb_shinfo(skb)->meta_len; | ||
| 3433 | } | ||
| 3434 | |||
| 3435 | static inline void *skb_metadata_end(const struct sk_buff *skb) | ||
| 3436 | { | ||
| 3437 | return skb_mac_header(skb); | ||
| 3438 | } | ||
| 3439 | |||
| 3440 | static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, | ||
| 3441 | const struct sk_buff *skb_b, | ||
| 3442 | u8 meta_len) | ||
| 3443 | { | ||
| 3444 | const void *a = skb_metadata_end(skb_a); | ||
| 3445 | const void *b = skb_metadata_end(skb_b); | ||
| 3446 | /* Using more efficient varaiant than plain call to memcmp(). */ | ||
| 3447 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 | ||
| 3448 | u64 diffs = 0; | ||
| 3449 | |||
| 3450 | switch (meta_len) { | ||
| 3451 | #define __it(x, op) (x -= sizeof(u##op)) | ||
| 3452 | #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) | ||
| 3453 | case 32: diffs |= __it_diff(a, b, 64); | ||
| 3454 | case 24: diffs |= __it_diff(a, b, 64); | ||
| 3455 | case 16: diffs |= __it_diff(a, b, 64); | ||
| 3456 | case 8: diffs |= __it_diff(a, b, 64); | ||
| 3457 | break; | ||
| 3458 | case 28: diffs |= __it_diff(a, b, 64); | ||
| 3459 | case 20: diffs |= __it_diff(a, b, 64); | ||
| 3460 | case 12: diffs |= __it_diff(a, b, 64); | ||
| 3461 | case 4: diffs |= __it_diff(a, b, 32); | ||
| 3462 | break; | ||
| 3463 | } | ||
| 3464 | return diffs; | ||
| 3465 | #else | ||
| 3466 | return memcmp(a - meta_len, b - meta_len, meta_len); | ||
| 3467 | #endif | ||
| 3468 | } | ||
| 3469 | |||
| 3470 | static inline bool skb_metadata_differs(const struct sk_buff *skb_a, | ||
| 3471 | const struct sk_buff *skb_b) | ||
| 3472 | { | ||
| 3473 | u8 len_a = skb_metadata_len(skb_a); | ||
| 3474 | u8 len_b = skb_metadata_len(skb_b); | ||
| 3475 | |||
| 3476 | if (!(len_a | len_b)) | ||
| 3477 | return false; | ||
| 3478 | |||
| 3479 | return len_a != len_b ? | ||
| 3480 | true : __skb_metadata_differs(skb_a, skb_b, len_a); | ||
| 3481 | } | ||
| 3482 | |||
| 3483 | static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) | ||
| 3484 | { | ||
| 3485 | skb_shinfo(skb)->meta_len = meta_len; | ||
| 3486 | } | ||
| 3487 | |||
| 3488 | static inline void skb_metadata_clear(struct sk_buff *skb) | ||
| 3489 | { | ||
| 3490 | skb_metadata_set(skb, 0); | ||
| 3491 | } | ||
| 3492 | |||
| 3422 | struct sk_buff *skb_clone_sk(struct sk_buff *skb); | 3493 | struct sk_buff *skb_clone_sk(struct sk_buff *skb); |
| 3423 | 3494 | ||
| 3424 | #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING | 3495 | #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4aa40ef02d32..1d2c44e09e31 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -191,6 +191,7 @@ struct tcp_sock { | |||
| 191 | u32 tsoffset; /* timestamp offset */ | 191 | u32 tsoffset; /* timestamp offset */ |
| 192 | 192 | ||
| 193 | struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ | 193 | struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ |
| 194 | struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ | ||
| 194 | 195 | ||
| 195 | u32 snd_wl1; /* Sequence for window update */ | 196 | u32 snd_wl1; /* Sequence for window update */ |
| 196 | u32 snd_wnd; /* The window we expect to receive */ | 197 | u32 snd_wnd; /* The window we expect to receive */ |
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h new file mode 100644 index 000000000000..7b69853188b1 --- /dev/null +++ b/include/linux/thunderbolt.h | |||
| @@ -0,0 +1,598 @@ | |||
| 1 | /* | ||
| 2 | * Thunderbolt service API | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
| 5 | * Copyright (C) 2017, Intel Corporation | ||
| 6 | * Authors: Michael Jamet <michael.jamet@intel.com> | ||
| 7 | * Mika Westerberg <mika.westerberg@linux.intel.com> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef THUNDERBOLT_H_ | ||
| 15 | #define THUNDERBOLT_H_ | ||
| 16 | |||
| 17 | #include <linux/device.h> | ||
| 18 | #include <linux/idr.h> | ||
| 19 | #include <linux/list.h> | ||
| 20 | #include <linux/mutex.h> | ||
| 21 | #include <linux/mod_devicetable.h> | ||
| 22 | #include <linux/pci.h> | ||
| 23 | #include <linux/uuid.h> | ||
| 24 | #include <linux/workqueue.h> | ||
| 25 | |||
| 26 | enum tb_cfg_pkg_type { | ||
| 27 | TB_CFG_PKG_READ = 1, | ||
| 28 | TB_CFG_PKG_WRITE = 2, | ||
| 29 | TB_CFG_PKG_ERROR = 3, | ||
| 30 | TB_CFG_PKG_NOTIFY_ACK = 4, | ||
| 31 | TB_CFG_PKG_EVENT = 5, | ||
| 32 | TB_CFG_PKG_XDOMAIN_REQ = 6, | ||
| 33 | TB_CFG_PKG_XDOMAIN_RESP = 7, | ||
| 34 | TB_CFG_PKG_OVERRIDE = 8, | ||
| 35 | TB_CFG_PKG_RESET = 9, | ||
| 36 | TB_CFG_PKG_ICM_EVENT = 10, | ||
| 37 | TB_CFG_PKG_ICM_CMD = 11, | ||
| 38 | TB_CFG_PKG_ICM_RESP = 12, | ||
| 39 | TB_CFG_PKG_PREPARE_TO_SLEEP = 13, | ||
| 40 | }; | ||
| 41 | |||
| 42 | /** | ||
| 43 | * enum tb_security_level - Thunderbolt security level | ||
| 44 | * @TB_SECURITY_NONE: No security, legacy mode | ||
| 45 | * @TB_SECURITY_USER: User approval required at minimum | ||
| 46 | * @TB_SECURITY_SECURE: One time saved key required at minimum | ||
| 47 | * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB) | ||
| 48 | */ | ||
| 49 | enum tb_security_level { | ||
| 50 | TB_SECURITY_NONE, | ||
| 51 | TB_SECURITY_USER, | ||
| 52 | TB_SECURITY_SECURE, | ||
| 53 | TB_SECURITY_DPONLY, | ||
| 54 | }; | ||
| 55 | |||
| 56 | /** | ||
| 57 | * struct tb - main thunderbolt bus structure | ||
| 58 | * @dev: Domain device | ||
| 59 | * @lock: Big lock. Must be held when accessing any struct | ||
| 60 | * tb_switch / struct tb_port. | ||
| 61 | * @nhi: Pointer to the NHI structure | ||
| 62 | * @ctl: Control channel for this domain | ||
| 63 | * @wq: Ordered workqueue for all domain specific work | ||
| 64 | * @root_switch: Root switch of this domain | ||
| 65 | * @cm_ops: Connection manager specific operations vector | ||
| 66 | * @index: Linux assigned domain number | ||
| 67 | * @security_level: Current security level | ||
| 68 | * @privdata: Private connection manager specific data | ||
| 69 | */ | ||
| 70 | struct tb { | ||
| 71 | struct device dev; | ||
| 72 | struct mutex lock; | ||
| 73 | struct tb_nhi *nhi; | ||
| 74 | struct tb_ctl *ctl; | ||
| 75 | struct workqueue_struct *wq; | ||
| 76 | struct tb_switch *root_switch; | ||
| 77 | const struct tb_cm_ops *cm_ops; | ||
| 78 | int index; | ||
| 79 | enum tb_security_level security_level; | ||
| 80 | unsigned long privdata[0]; | ||
| 81 | }; | ||
| 82 | |||
| 83 | extern struct bus_type tb_bus_type; | ||
| 84 | extern struct device_type tb_service_type; | ||
| 85 | extern struct device_type tb_xdomain_type; | ||
| 86 | |||
| 87 | #define TB_LINKS_PER_PHY_PORT 2 | ||
| 88 | |||
| 89 | static inline unsigned int tb_phy_port_from_link(unsigned int link) | ||
| 90 | { | ||
| 91 | return (link - 1) / TB_LINKS_PER_PHY_PORT; | ||
| 92 | } | ||
| 93 | |||
| 94 | /** | ||
| 95 | * struct tb_property_dir - XDomain property directory | ||
| 96 | * @uuid: Directory UUID or %NULL if root directory | ||
| 97 | * @properties: List of properties in this directory | ||
| 98 | * | ||
| 99 | * User needs to provide serialization if needed. | ||
| 100 | */ | ||
| 101 | struct tb_property_dir { | ||
| 102 | const uuid_t *uuid; | ||
| 103 | struct list_head properties; | ||
| 104 | }; | ||
| 105 | |||
| 106 | enum tb_property_type { | ||
| 107 | TB_PROPERTY_TYPE_UNKNOWN = 0x00, | ||
| 108 | TB_PROPERTY_TYPE_DIRECTORY = 0x44, | ||
| 109 | TB_PROPERTY_TYPE_DATA = 0x64, | ||
| 110 | TB_PROPERTY_TYPE_TEXT = 0x74, | ||
| 111 | TB_PROPERTY_TYPE_VALUE = 0x76, | ||
| 112 | }; | ||
| 113 | |||
| 114 | #define TB_PROPERTY_KEY_SIZE 8 | ||
| 115 | |||
| 116 | /** | ||
| 117 | * struct tb_property - XDomain property | ||
| 118 | * @list: Used to link properties together in a directory | ||
| 119 | * @key: Key for the property (always terminated). | ||
| 120 | * @type: Type of the property | ||
| 121 | * @length: Length of the property data in dwords | ||
| 122 | * @value: Property value | ||
| 123 | * | ||
| 124 | * Users use @type to determine which field in @value is filled. | ||
| 125 | */ | ||
| 126 | struct tb_property { | ||
| 127 | struct list_head list; | ||
| 128 | char key[TB_PROPERTY_KEY_SIZE + 1]; | ||
| 129 | enum tb_property_type type; | ||
| 130 | size_t length; | ||
| 131 | union { | ||
| 132 | struct tb_property_dir *dir; | ||
| 133 | u8 *data; | ||
| 134 | char *text; | ||
| 135 | u32 immediate; | ||
| 136 | } value; | ||
| 137 | }; | ||
| 138 | |||
| 139 | struct tb_property_dir *tb_property_parse_dir(const u32 *block, | ||
| 140 | size_t block_len); | ||
| 141 | ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, | ||
| 142 | size_t block_len); | ||
| 143 | struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid); | ||
| 144 | void tb_property_free_dir(struct tb_property_dir *dir); | ||
| 145 | int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, | ||
| 146 | u32 value); | ||
| 147 | int tb_property_add_data(struct tb_property_dir *parent, const char *key, | ||
| 148 | const void *buf, size_t buflen); | ||
| 149 | int tb_property_add_text(struct tb_property_dir *parent, const char *key, | ||
| 150 | const char *text); | ||
| 151 | int tb_property_add_dir(struct tb_property_dir *parent, const char *key, | ||
| 152 | struct tb_property_dir *dir); | ||
| 153 | void tb_property_remove(struct tb_property *tb_property); | ||
| 154 | struct tb_property *tb_property_find(struct tb_property_dir *dir, | ||
| 155 | const char *key, enum tb_property_type type); | ||
| 156 | struct tb_property *tb_property_get_next(struct tb_property_dir *dir, | ||
| 157 | struct tb_property *prev); | ||
| 158 | |||
| 159 | #define tb_property_for_each(dir, property) \ | ||
| 160 | for (property = tb_property_get_next(dir, NULL); \ | ||
| 161 | property; \ | ||
| 162 | property = tb_property_get_next(dir, property)) | ||
| 163 | |||
| 164 | int tb_register_property_dir(const char *key, struct tb_property_dir *dir); | ||
| 165 | void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); | ||
| 166 | |||
| 167 | /** | ||
| 168 | * struct tb_xdomain - Cross-domain (XDomain) connection | ||
| 169 | * @dev: XDomain device | ||
| 170 | * @tb: Pointer to the domain | ||
| 171 | * @remote_uuid: UUID of the remote domain (host) | ||
| 172 | * @local_uuid: Cached local UUID | ||
| 173 | * @route: Route string the other domain can be reached | ||
| 174 | * @vendor: Vendor ID of the remote domain | ||
| 175 | * @device: Device ID of the demote domain | ||
| 176 | * @lock: Lock to serialize access to the following fields of this structure | ||
| 177 | * @vendor_name: Name of the vendor (or %NULL if not known) | ||
| 178 | * @device_name: Name of the device (or %NULL if not known) | ||
| 179 | * @is_unplugged: The XDomain is unplugged | ||
| 180 | * @resume: The XDomain is being resumed | ||
| 181 | * @transmit_path: HopID which the remote end expects us to transmit | ||
| 182 | * @transmit_ring: Local ring (hop) where outgoing packets are pushed | ||
| 183 | * @receive_path: HopID which we expect the remote end to transmit | ||
| 184 | * @receive_ring: Local ring (hop) where incoming packets arrive | ||
| 185 | * @service_ids: Used to generate IDs for the services | ||
| 186 | * @properties: Properties exported by the remote domain | ||
| 187 | * @property_block_gen: Generation of @properties | ||
| 188 | * @properties_lock: Lock protecting @properties. | ||
| 189 | * @get_properties_work: Work used to get remote domain properties | ||
| 190 | * @properties_retries: Number of times left to read properties | ||
| 191 | * @properties_changed_work: Work used to notify the remote domain that | ||
| 192 | * our properties have changed | ||
| 193 | * @properties_changed_retries: Number of times left to send properties | ||
| 194 | * changed notification | ||
| 195 | * @link: Root switch link the remote domain is connected (ICM only) | ||
| 196 | * @depth: Depth in the chain the remote domain is connected (ICM only) | ||
| 197 | * | ||
| 198 | * This structure represents connection across two domains (hosts). | ||
| 199 | * Each XDomain contains zero or more services which are exposed as | ||
| 200 | * &struct tb_service objects. | ||
| 201 | * | ||
| 202 | * Service drivers may access this structure if they need to enumerate | ||
| 203 | * non-standard properties but they need hold @lock when doing so | ||
| 204 | * because properties can be changed asynchronously in response to | ||
| 205 | * changes in the remote domain. | ||
| 206 | */ | ||
| 207 | struct tb_xdomain { | ||
| 208 | struct device dev; | ||
| 209 | struct tb *tb; | ||
| 210 | uuid_t *remote_uuid; | ||
| 211 | const uuid_t *local_uuid; | ||
| 212 | u64 route; | ||
| 213 | u16 vendor; | ||
| 214 | u16 device; | ||
| 215 | struct mutex lock; | ||
| 216 | const char *vendor_name; | ||
| 217 | const char *device_name; | ||
| 218 | bool is_unplugged; | ||
| 219 | bool resume; | ||
| 220 | u16 transmit_path; | ||
| 221 | u16 transmit_ring; | ||
| 222 | u16 receive_path; | ||
| 223 | u16 receive_ring; | ||
| 224 | struct ida service_ids; | ||
| 225 | struct tb_property_dir *properties; | ||
| 226 | u32 property_block_gen; | ||
| 227 | struct delayed_work get_properties_work; | ||
| 228 | int properties_retries; | ||
| 229 | struct delayed_work properties_changed_work; | ||
| 230 | int properties_changed_retries; | ||
| 231 | u8 link; | ||
| 232 | u8 depth; | ||
| 233 | }; | ||
| 234 | |||
| 235 | int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, | ||
| 236 | u16 transmit_ring, u16 receive_path, | ||
| 237 | u16 receive_ring); | ||
| 238 | int tb_xdomain_disable_paths(struct tb_xdomain *xd); | ||
| 239 | struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); | ||
| 240 | |||
| 241 | static inline struct tb_xdomain * | ||
| 242 | tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid) | ||
| 243 | { | ||
| 244 | struct tb_xdomain *xd; | ||
| 245 | |||
| 246 | mutex_lock(&tb->lock); | ||
| 247 | xd = tb_xdomain_find_by_uuid(tb, uuid); | ||
| 248 | mutex_unlock(&tb->lock); | ||
| 249 | |||
| 250 | return xd; | ||
| 251 | } | ||
| 252 | |||
| 253 | static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd) | ||
| 254 | { | ||
| 255 | if (xd) | ||
| 256 | get_device(&xd->dev); | ||
| 257 | return xd; | ||
| 258 | } | ||
| 259 | |||
| 260 | static inline void tb_xdomain_put(struct tb_xdomain *xd) | ||
| 261 | { | ||
| 262 | if (xd) | ||
| 263 | put_device(&xd->dev); | ||
| 264 | } | ||
| 265 | |||
| 266 | static inline bool tb_is_xdomain(const struct device *dev) | ||
| 267 | { | ||
| 268 | return dev->type == &tb_xdomain_type; | ||
| 269 | } | ||
| 270 | |||
| 271 | static inline struct tb_xdomain *tb_to_xdomain(struct device *dev) | ||
| 272 | { | ||
| 273 | if (tb_is_xdomain(dev)) | ||
| 274 | return container_of(dev, struct tb_xdomain, dev); | ||
| 275 | return NULL; | ||
| 276 | } | ||
| 277 | |||
| 278 | int tb_xdomain_response(struct tb_xdomain *xd, const void *response, | ||
| 279 | size_t size, enum tb_cfg_pkg_type type); | ||
| 280 | int tb_xdomain_request(struct tb_xdomain *xd, const void *request, | ||
| 281 | size_t request_size, enum tb_cfg_pkg_type request_type, | ||
| 282 | void *response, size_t response_size, | ||
| 283 | enum tb_cfg_pkg_type response_type, | ||
| 284 | unsigned int timeout_msec); | ||
| 285 | |||
| 286 | /** | ||
| 287 | * tb_protocol_handler - Protocol specific handler | ||
| 288 | * @uuid: XDomain messages with this UUID are dispatched to this handler | ||
| 289 | * @callback: Callback called with the XDomain message. Returning %1 | ||
| 290 | * here tells the XDomain core that the message was handled | ||
| 291 | * by this handler and should not be forwared to other | ||
| 292 | * handlers. | ||
| 293 | * @data: Data passed with the callback | ||
| 294 | * @list: Handlers are linked using this | ||
| 295 | * | ||
| 296 | * Thunderbolt services can hook into incoming XDomain requests by | ||
| 297 | * registering protocol handler. Only limitation is that the XDomain | ||
| 298 | * discovery protocol UUID cannot be registered since it is handled by | ||
| 299 | * the core XDomain code. | ||
| 300 | * | ||
| 301 | * The @callback must check that the message is really directed to the | ||
| 302 | * service the driver implements. | ||
| 303 | */ | ||
| 304 | struct tb_protocol_handler { | ||
| 305 | const uuid_t *uuid; | ||
| 306 | int (*callback)(const void *buf, size_t size, void *data); | ||
| 307 | void *data; | ||
| 308 | struct list_head list; | ||
| 309 | }; | ||
| 310 | |||
| 311 | int tb_register_protocol_handler(struct tb_protocol_handler *handler); | ||
| 312 | void tb_unregister_protocol_handler(struct tb_protocol_handler *handler); | ||
| 313 | |||
| 314 | /** | ||
| 315 | * struct tb_service - Thunderbolt service | ||
| 316 | * @dev: XDomain device | ||
| 317 | * @id: ID of the service (shown in sysfs) | ||
| 318 | * @key: Protocol key from the properties directory | ||
| 319 | * @prtcid: Protocol ID from the properties directory | ||
| 320 | * @prtcvers: Protocol version from the properties directory | ||
| 321 | * @prtcrevs: Protocol software revision from the properties directory | ||
| 322 | * @prtcstns: Protocol settings mask from the properties directory | ||
| 323 | * | ||
| 324 | * Each domain exposes set of services it supports as collection of | ||
| 325 | * properties. For each service there will be one corresponding | ||
| 326 | * &struct tb_service. Service drivers are bound to these. | ||
| 327 | */ | ||
| 328 | struct tb_service { | ||
| 329 | struct device dev; | ||
| 330 | int id; | ||
| 331 | const char *key; | ||
| 332 | u32 prtcid; | ||
| 333 | u32 prtcvers; | ||
| 334 | u32 prtcrevs; | ||
| 335 | u32 prtcstns; | ||
| 336 | }; | ||
| 337 | |||
| 338 | static inline struct tb_service *tb_service_get(struct tb_service *svc) | ||
| 339 | { | ||
| 340 | if (svc) | ||
| 341 | get_device(&svc->dev); | ||
| 342 | return svc; | ||
| 343 | } | ||
| 344 | |||
| 345 | static inline void tb_service_put(struct tb_service *svc) | ||
| 346 | { | ||
| 347 | if (svc) | ||
| 348 | put_device(&svc->dev); | ||
| 349 | } | ||
| 350 | |||
| 351 | static inline bool tb_is_service(const struct device *dev) | ||
| 352 | { | ||
| 353 | return dev->type == &tb_service_type; | ||
| 354 | } | ||
| 355 | |||
| 356 | static inline struct tb_service *tb_to_service(struct device *dev) | ||
| 357 | { | ||
| 358 | if (tb_is_service(dev)) | ||
| 359 | return container_of(dev, struct tb_service, dev); | ||
| 360 | return NULL; | ||
| 361 | } | ||
| 362 | |||
| 363 | /** | ||
| 364 | * tb_service_driver - Thunderbolt service driver | ||
| 365 | * @driver: Driver structure | ||
| 366 | * @probe: Called when the driver is probed | ||
| 367 | * @remove: Called when the driver is removed (optional) | ||
| 368 | * @shutdown: Called at shutdown time to stop the service (optional) | ||
| 369 | * @id_table: Table of service identifiers the driver supports | ||
| 370 | */ | ||
| 371 | struct tb_service_driver { | ||
| 372 | struct device_driver driver; | ||
| 373 | int (*probe)(struct tb_service *svc, const struct tb_service_id *id); | ||
| 374 | void (*remove)(struct tb_service *svc); | ||
| 375 | void (*shutdown)(struct tb_service *svc); | ||
| 376 | const struct tb_service_id *id_table; | ||
| 377 | }; | ||
| 378 | |||
| 379 | #define TB_SERVICE(key, id) \ | ||
| 380 | .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \ | ||
| 381 | TBSVC_MATCH_PROTOCOL_ID, \ | ||
| 382 | .protocol_key = (key), \ | ||
| 383 | .protocol_id = (id) | ||
| 384 | |||
| 385 | int tb_register_service_driver(struct tb_service_driver *drv); | ||
| 386 | void tb_unregister_service_driver(struct tb_service_driver *drv); | ||
| 387 | |||
| 388 | static inline void *tb_service_get_drvdata(const struct tb_service *svc) | ||
| 389 | { | ||
| 390 | return dev_get_drvdata(&svc->dev); | ||
| 391 | } | ||
| 392 | |||
| 393 | static inline void tb_service_set_drvdata(struct tb_service *svc, void *data) | ||
| 394 | { | ||
| 395 | dev_set_drvdata(&svc->dev, data); | ||
| 396 | } | ||
| 397 | |||
| 398 | static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) | ||
| 399 | { | ||
| 400 | return tb_to_xdomain(svc->dev.parent); | ||
| 401 | } | ||
| 402 | |||
| 403 | /** | ||
| 404 | * struct tb_nhi - thunderbolt native host interface | ||
| 405 | * @lock: Must be held during ring creation/destruction. Is acquired by | ||
| 406 | * interrupt_work when dispatching interrupts to individual rings. | ||
| 407 | * @pdev: Pointer to the PCI device | ||
| 408 | * @iobase: MMIO space of the NHI | ||
| 409 | * @tx_rings: All Tx rings available on this host controller | ||
| 410 | * @rx_rings: All Rx rings available on this host controller | ||
| 411 | * @msix_ida: Used to allocate MSI-X vectors for rings | ||
| 412 | * @going_away: The host controller device is about to disappear so when | ||
| 413 | * this flag is set, avoid touching the hardware anymore. | ||
| 414 | * @interrupt_work: Work scheduled to handle ring interrupt when no | ||
| 415 | * MSI-X is used. | ||
| 416 | * @hop_count: Number of rings (end point hops) supported by NHI. | ||
| 417 | */ | ||
| 418 | struct tb_nhi { | ||
| 419 | spinlock_t lock; | ||
| 420 | struct pci_dev *pdev; | ||
| 421 | void __iomem *iobase; | ||
| 422 | struct tb_ring **tx_rings; | ||
| 423 | struct tb_ring **rx_rings; | ||
| 424 | struct ida msix_ida; | ||
| 425 | bool going_away; | ||
| 426 | struct work_struct interrupt_work; | ||
| 427 | u32 hop_count; | ||
| 428 | }; | ||
| 429 | |||
| 430 | /** | ||
| 431 | * struct tb_ring - thunderbolt TX or RX ring associated with a NHI | ||
| 432 | * @lock: Lock serializing actions to this ring. Must be acquired after | ||
| 433 | * nhi->lock. | ||
| 434 | * @nhi: Pointer to the native host controller interface | ||
| 435 | * @size: Size of the ring | ||
| 436 | * @hop: Hop (DMA channel) associated with this ring | ||
| 437 | * @head: Head of the ring (write next descriptor here) | ||
| 438 | * @tail: Tail of the ring (complete next descriptor here) | ||
| 439 | * @descriptors: Allocated descriptors for this ring | ||
| 440 | * @queue: Queue holding frames to be transferred over this ring | ||
| 441 | * @in_flight: Queue holding frames that are currently in flight | ||
| 442 | * @work: Interrupt work structure | ||
| 443 | * @is_tx: Is the ring Tx or Rx | ||
| 444 | * @running: Is the ring running | ||
| 445 | * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. | ||
| 446 | * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) | ||
| 447 | * @flags: Ring specific flags | ||
| 448 | * @sof_mask: Bit mask used to detect start of frame PDF | ||
| 449 | * @eof_mask: Bit mask used to detect end of frame PDF | ||
| 450 | * @start_poll: Called when ring interrupt is triggered to start | ||
| 451 | * polling. Passing %NULL keeps the ring in interrupt mode. | ||
| 452 | * @poll_data: Data passed to @start_poll | ||
| 453 | */ | ||
| 454 | struct tb_ring { | ||
| 455 | spinlock_t lock; | ||
| 456 | struct tb_nhi *nhi; | ||
| 457 | int size; | ||
| 458 | int hop; | ||
| 459 | int head; | ||
| 460 | int tail; | ||
| 461 | struct ring_desc *descriptors; | ||
| 462 | dma_addr_t descriptors_dma; | ||
| 463 | struct list_head queue; | ||
| 464 | struct list_head in_flight; | ||
| 465 | struct work_struct work; | ||
| 466 | bool is_tx:1; | ||
| 467 | bool running:1; | ||
| 468 | int irq; | ||
| 469 | u8 vector; | ||
| 470 | unsigned int flags; | ||
| 471 | u16 sof_mask; | ||
| 472 | u16 eof_mask; | ||
| 473 | void (*start_poll)(void *data); | ||
| 474 | void *poll_data; | ||
| 475 | }; | ||
| 476 | |||
| 477 | /* Leave ring interrupt enabled on suspend */ | ||
| 478 | #define RING_FLAG_NO_SUSPEND BIT(0) | ||
| 479 | /* Configure the ring to be in frame mode */ | ||
| 480 | #define RING_FLAG_FRAME BIT(1) | ||
| 481 | /* Enable end-to-end flow control */ | ||
| 482 | #define RING_FLAG_E2E BIT(2) | ||
| 483 | |||
| 484 | struct ring_frame; | ||
| 485 | typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); | ||
| 486 | |||
| 487 | /** | ||
| 488 | * enum ring_desc_flags - Flags for DMA ring descriptor | ||
| 489 | * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only) | ||
| 490 | * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only) | ||
| 491 | * %RING_DESC_COMPLETED: Descriptor completed (set by NHI) | ||
| 492 | * %RING_DESC_POSTED: Always set this | ||
| 493 | * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun | ||
| 494 | * %RING_DESC_INTERRUPT: Request an interrupt on completion | ||
| 495 | */ | ||
| 496 | enum ring_desc_flags { | ||
| 497 | RING_DESC_ISOCH = 0x1, | ||
| 498 | RING_DESC_CRC_ERROR = 0x1, | ||
| 499 | RING_DESC_COMPLETED = 0x2, | ||
| 500 | RING_DESC_POSTED = 0x4, | ||
| 501 | RING_DESC_BUFFER_OVERRUN = 0x04, | ||
| 502 | RING_DESC_INTERRUPT = 0x8, | ||
| 503 | }; | ||
| 504 | |||
| 505 | /** | ||
| 506 | * struct ring_frame - For use with ring_rx/ring_tx | ||
| 507 | * @buffer_phy: DMA mapped address of the frame | ||
| 508 | * @callback: Callback called when the frame is finished (optional) | ||
| 509 | * @list: Frame is linked to a queue using this | ||
| 510 | * @size: Size of the frame in bytes (%0 means %4096) | ||
| 511 | * @flags: Flags for the frame (see &enum ring_desc_flags) | ||
| 512 | * @eof: End of frame protocol defined field | ||
| 513 | * @sof: Start of frame protocol defined field | ||
| 514 | */ | ||
| 515 | struct ring_frame { | ||
| 516 | dma_addr_t buffer_phy; | ||
| 517 | ring_cb callback; | ||
| 518 | struct list_head list; | ||
| 519 | u32 size:12; | ||
| 520 | u32 flags:12; | ||
| 521 | u32 eof:4; | ||
| 522 | u32 sof:4; | ||
| 523 | }; | ||
| 524 | |||
| 525 | /* Minimum size for ring_rx */ | ||
| 526 | #define TB_FRAME_SIZE 0x100 | ||
| 527 | |||
| 528 | struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, | ||
| 529 | unsigned int flags); | ||
| 530 | struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, | ||
| 531 | unsigned int flags, u16 sof_mask, u16 eof_mask, | ||
| 532 | void (*start_poll)(void *), void *poll_data); | ||
| 533 | void tb_ring_start(struct tb_ring *ring); | ||
| 534 | void tb_ring_stop(struct tb_ring *ring); | ||
| 535 | void tb_ring_free(struct tb_ring *ring); | ||
| 536 | |||
| 537 | int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); | ||
| 538 | |||
| 539 | /** | ||
| 540 | * tb_ring_rx() - enqueue a frame on an RX ring | ||
| 541 | * @ring: Ring to enqueue the frame | ||
| 542 | * @frame: Frame to enqueue | ||
| 543 | * | ||
| 544 | * @frame->buffer, @frame->buffer_phy have to be set. The buffer must | ||
| 545 | * contain at least %TB_FRAME_SIZE bytes. | ||
| 546 | * | ||
| 547 | * @frame->callback will be invoked with @frame->size, @frame->flags, | ||
| 548 | * @frame->eof, @frame->sof set once the frame has been received. | ||
| 549 | * | ||
| 550 | * If ring_stop() is called after the packet has been enqueued | ||
| 551 | * @frame->callback will be called with canceled set to true. | ||
| 552 | * | ||
| 553 | * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. | ||
| 554 | */ | ||
| 555 | static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame) | ||
| 556 | { | ||
| 557 | WARN_ON(ring->is_tx); | ||
| 558 | return __tb_ring_enqueue(ring, frame); | ||
| 559 | } | ||
| 560 | |||
| 561 | /** | ||
| 562 | * tb_ring_tx() - enqueue a frame on an TX ring | ||
| 563 | * @ring: Ring the enqueue the frame | ||
| 564 | * @frame: Frame to enqueue | ||
| 565 | * | ||
| 566 | * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and | ||
| 567 | * @frame->sof have to be set. | ||
| 568 | * | ||
| 569 | * @frame->callback will be invoked with once the frame has been transmitted. | ||
| 570 | * | ||
| 571 | * If ring_stop() is called after the packet has been enqueued @frame->callback | ||
| 572 | * will be called with canceled set to true. | ||
| 573 | * | ||
| 574 | * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. | ||
| 575 | */ | ||
| 576 | static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame) | ||
| 577 | { | ||
| 578 | WARN_ON(!ring->is_tx); | ||
| 579 | return __tb_ring_enqueue(ring, frame); | ||
| 580 | } | ||
| 581 | |||
| 582 | /* Used only when the ring is in polling mode */ | ||
| 583 | struct ring_frame *tb_ring_poll(struct tb_ring *ring); | ||
| 584 | void tb_ring_poll_complete(struct tb_ring *ring); | ||
| 585 | |||
| 586 | /** | ||
| 587 | * tb_ring_dma_device() - Return device used for DMA mapping | ||
| 588 | * @ring: Ring whose DMA device is retrieved | ||
| 589 | * | ||
| 590 | * Use this function when you are mapping DMA for buffers that are | ||
| 591 | * passed to the ring for sending/receiving. | ||
| 592 | */ | ||
| 593 | static inline struct device *tb_ring_dma_device(struct tb_ring *ring) | ||
| 594 | { | ||
| 595 | return &ring->nhi->pdev->dev; | ||
| 596 | } | ||
| 597 | |||
| 598 | #endif /* THUNDERBOLT_H_ */ | ||
