aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 21:57:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-12 21:57:02 -0500
commitaee3bfa3307cd0da2126bdc0ea359dabea5ee8f7 (patch)
tree3d35c69e8fa835098bb90f77f30abed120681651 /include/linux
parentc597b6bcd5c624534afc3df65cdc42bb05173bca (diff)
parent415b6f19e87e350b13585591859d4fdf50772229 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from Davic Miller: 1) Support busy polling generically, for all NAPI drivers. From Eric Dumazet. 2) Add byte/packet counter support to nft_ct, from Floriani Westphal. 3) Add RSS/XPS support to mvneta driver, from Gregory Clement. 4) Implement IPV6_HDRINCL socket option for raw sockets, from Hannes Frederic Sowa. 5) Add support for T6 adapter to cxgb4 driver, from Hariprasad Shenai. 6) Add support for VLAN device bridging to mlxsw switch driver, from Ido Schimmel. 7) Add driver for Netronome NFP4000/NFP6000, from Jakub Kicinski. 8) Provide hwmon interface to mlxsw switch driver, from Jiri Pirko. 9) Reorganize wireless drivers into per-vendor directories just like we do for ethernet drivers. From Kalle Valo. 10) Provide a way for administrators "destroy" connected sockets via the SOCK_DESTROY socket netlink diag operation. From Lorenzo Colitti. 11) Add support to add/remove multicast routes via netlink, from Nikolay Aleksandrov. 12) Make TCP keepalive settings per-namespace, from Nikolay Borisov. 13) Add forwarding and packet duplication facilities to nf_tables, from Pablo Neira Ayuso. 14) Dead route support in MPLS, from Roopa Prabhu. 15) TSO support for thunderx chips, from Sunil Goutham. 16) Add driver for IBM's System i/p VNIC protocol, from Thomas Falcon. 17) Rationalize, consolidate, and more completely document the checksum offloading facilities in the networking stack. From Tom Herbert. 18) Support aborting an ongoing scan in mac80211/cfg80211, from Vidyullatha Kanchanapally. 19) Use per-bucket spinlock for bpf hash facility, from Tom Leiming. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1375 commits) net: bnxt: always return values from _bnxt_get_max_rings net: bpf: reject invalid shifts phonet: properly unshare skbs in phonet_rcv() dwc_eth_qos: Fix dma address for multi-fragment skbs phy: remove an unneeded condition mdio: remove an unneed condition mdio_bus: NULL dereference on allocation error net: Fix typo in netdev_intersect_features net: freescale: mac-fec: Fix build error from phy_device API change net: freescale: ucc_geth: Fix build error from phy_device API change bonding: Prevent IPv6 link local address on enslaved devices IB/mlx5: Add flow steering support net/mlx5_core: Export flow steering API net/mlx5_core: Make ipv4/ipv6 location more clear net/mlx5_core: Enable flow steering support for the IB driver net/mlx5_core: Initialize namespaces only when supported by device net/mlx5_core: Set priority attributes net/mlx5_core: Connect flow tables net/mlx5_core: Introduce modify flow table command net/mlx5_core: Managing root flow table ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/cgroup-defs.h126
-rw-r--r--include/linux/cgroup.h66
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/filter.h41
-rw-r--r--include/linux/hashtable.h4
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/if_pppox.h1
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/if_vlan.h4
-rw-r--r--include/linux/inet_diag.h9
-rw-r--r--include/linux/kernfs.h12
-rw-r--r--include/linux/mdio.h78
-rw-r--r--include/linux/mlx4/driver.h5
-rw-r--r--include/linux/mlx5/device.h66
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/fs.h111
-rw-r--r--include/linux/mlx5/mlx5_ifc.h311
-rw-r--r--include/linux/mlx5/vport.h37
-rw-r--r--include/linux/mroute.h76
-rw-r--r--include/linux/netdev_features.h14
-rw-r--r--include/linux/netdevice.h282
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h13
-rw-r--r--include/linux/netfilter/nfnetlink.h12
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/phy.h80
-rw-r--r--include/linux/pim.h5
-rw-r--r--include/linux/platform_data/microread.h2
-rw-r--r--include/linux/qed/qed_if.h17
-rw-r--r--include/linux/rhashtable.h82
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sh_eth.h2
-rw-r--r--include/linux/skbuff.h171
-rw-r--r--include/linux/soc/ti/knav_dma.h22
-rw-r--r--include/linux/sock_diag.h2
-rw-r--r--include/linux/ssb/ssb.h10
-rw-r--r--include/linux/wait.h21
40 files changed, 1493 insertions, 290 deletions
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 59f4a7304419..f0ba9c2ec639 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -26,6 +26,7 @@
26#define PHY_ID_BCM7366 0x600d8490 26#define PHY_ID_BCM7366 0x600d8490
27#define PHY_ID_BCM7425 0x600d86b0 27#define PHY_ID_BCM7425 0x600d86b0
28#define PHY_ID_BCM7429 0x600d8730 28#define PHY_ID_BCM7429 0x600d8730
29#define PHY_ID_BCM7435 0x600d8750
29#define PHY_ID_BCM7439 0x600d8480 30#define PHY_ID_BCM7439 0x600d8480
30#define PHY_ID_BCM7439_2 0xae025080 31#define PHY_ID_BCM7439_2 0xae025080
31#define PHY_ID_BCM7445 0x600d8510 32#define PHY_ID_BCM7445 0x600d8510
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 06b77f9dd3f2..e5f4164cbd99 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -231,6 +231,14 @@ struct cgroup {
231 int id; 231 int id;
232 232
233 /* 233 /*
234 * The depth this cgroup is at. The root is at depth zero and each
235 * step down the hierarchy increments the level. This along with
236 * ancestor_ids[] can determine whether a given cgroup is a
237 * descendant of another without traversing the hierarchy.
238 */
239 int level;
240
241 /*
234 * Each non-empty css_set associated with this cgroup contributes 242 * Each non-empty css_set associated with this cgroup contributes
235 * one to populated_cnt. All children with non-zero popuplated_cnt 243 * one to populated_cnt. All children with non-zero popuplated_cnt
236 * of their own contribute one. The count is zero iff there's no 244 * of their own contribute one. The count is zero iff there's no
@@ -285,6 +293,9 @@ struct cgroup {
285 293
286 /* used to schedule release agent */ 294 /* used to schedule release agent */
287 struct work_struct release_agent_work; 295 struct work_struct release_agent_work;
296
297 /* ids of the ancestors at each level including self */
298 int ancestor_ids[];
288}; 299};
289 300
290/* 301/*
@@ -304,6 +315,9 @@ struct cgroup_root {
304 /* The root cgroup. Root is destroyed on its release. */ 315 /* The root cgroup. Root is destroyed on its release. */
305 struct cgroup cgrp; 316 struct cgroup cgrp;
306 317
318 /* for cgrp->ancestor_ids[0] */
319 int cgrp_ancestor_id_storage;
320
307 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ 321 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
308 atomic_t nr_cgrps; 322 atomic_t nr_cgrps;
309 323
@@ -521,4 +535,116 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
521 535
522#endif /* CONFIG_CGROUPS */ 536#endif /* CONFIG_CGROUPS */
523 537
538#ifdef CONFIG_SOCK_CGROUP_DATA
539
540/*
541 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
542 * per-socket cgroup information except for memcg association.
543 *
544 * On legacy hierarchies, net_prio and net_cls controllers directly set
545 * attributes on each sock which can then be tested by the network layer.
546 * On the default hierarchy, each sock is associated with the cgroup it was
547 * created in and the networking layer can match the cgroup directly.
548 *
549 * To avoid carrying all three cgroup related fields separately in sock,
550 * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
551 * On boot, sock_cgroup_data records the cgroup that the sock was created
552 * in so that cgroup2 matches can be made; however, once either net_prio or
553 * net_cls starts being used, the area is overriden to carry prioidx and/or
554 * classid. The two modes are distinguished by whether the lowest bit is
555 * set. Clear bit indicates cgroup pointer while set bit prioidx and
556 * classid.
557 *
558 * While userland may start using net_prio or net_cls at any time, once
559 * either is used, cgroup2 matching no longer works. There is no reason to
560 * mix the two and this is in line with how legacy and v2 compatibility is
561 * handled. On mode switch, cgroup references which are already being
562 * pointed to by socks may be leaked. While this can be remedied by adding
563 * synchronization around sock_cgroup_data, given that the number of leaked
564 * cgroups is bound and highly unlikely to be high, this seems to be the
565 * better trade-off.
566 */
567struct sock_cgroup_data {
568 union {
569#ifdef __LITTLE_ENDIAN
570 struct {
571 u8 is_data;
572 u8 padding;
573 u16 prioidx;
574 u32 classid;
575 } __packed;
576#else
577 struct {
578 u32 classid;
579 u16 prioidx;
580 u8 padding;
581 u8 is_data;
582 } __packed;
583#endif
584 u64 val;
585 };
586};
587
588/*
589 * There's a theoretical window where the following accessors race with
590 * updaters and return part of the previous pointer as the prioidx or
591 * classid. Such races are short-lived and the result isn't critical.
592 */
593static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
594{
595 /* fallback to 1 which is always the ID of the root cgroup */
596 return (skcd->is_data & 1) ? skcd->prioidx : 1;
597}
598
599static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
600{
601 /* fallback to 0 which is the unconfigured default classid */
602 return (skcd->is_data & 1) ? skcd->classid : 0;
603}
604
605/*
606 * If invoked concurrently, the updaters may clobber each other. The
607 * caller is responsible for synchronization.
608 */
609static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
610 u16 prioidx)
611{
612 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
613
614 if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
615 return;
616
617 if (!(skcd_buf.is_data & 1)) {
618 skcd_buf.val = 0;
619 skcd_buf.is_data = 1;
620 }
621
622 skcd_buf.prioidx = prioidx;
623 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
624}
625
626static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
627 u32 classid)
628{
629 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
630
631 if (sock_cgroup_classid(&skcd_buf) == classid)
632 return;
633
634 if (!(skcd_buf.is_data & 1)) {
635 skcd_buf.val = 0;
636 skcd_buf.is_data = 1;
637 }
638
639 skcd_buf.classid = classid;
640 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
641}
642
643#else /* CONFIG_SOCK_CGROUP_DATA */
644
645struct sock_cgroup_data {
646};
647
648#endif /* CONFIG_SOCK_CGROUP_DATA */
649
524#endif /* _LINUX_CGROUP_DEFS_H */ 650#endif /* _LINUX_CGROUP_DEFS_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index cb91b44f5f78..322a28482745 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -81,7 +81,8 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
81struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 81struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
82 struct cgroup_subsys *ss); 82 struct cgroup_subsys *ss);
83 83
84bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); 84struct cgroup *cgroup_get_from_path(const char *path);
85
85int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 86int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
86int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 87int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
87 88
@@ -364,6 +365,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
364 percpu_ref_put_many(&css->refcnt, n); 365 percpu_ref_put_many(&css->refcnt, n);
365} 366}
366 367
368static inline void cgroup_put(struct cgroup *cgrp)
369{
370 css_put(&cgrp->self);
371}
372
367/** 373/**
368 * task_css_set_check - obtain a task's css_set with extra access conditions 374 * task_css_set_check - obtain a task's css_set with extra access conditions
369 * @task: the task to obtain css_set for 375 * @task: the task to obtain css_set for
@@ -471,6 +477,23 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
471 return task_css(task, subsys_id)->cgroup; 477 return task_css(task, subsys_id)->cgroup;
472} 478}
473 479
480/**
481 * cgroup_is_descendant - test ancestry
482 * @cgrp: the cgroup to be tested
483 * @ancestor: possible ancestor of @cgrp
484 *
485 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
486 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
487 * and @ancestor are accessible.
488 */
489static inline bool cgroup_is_descendant(struct cgroup *cgrp,
490 struct cgroup *ancestor)
491{
492 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
493 return false;
494 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
495}
496
474/* no synchronization, the result can only be used as a hint */ 497/* no synchronization, the result can only be used as a hint */
475static inline bool cgroup_is_populated(struct cgroup *cgrp) 498static inline bool cgroup_is_populated(struct cgroup *cgrp)
476{ 499{
@@ -554,4 +577,45 @@ static inline int cgroup_init(void) { return 0; }
554 577
555#endif /* !CONFIG_CGROUPS */ 578#endif /* !CONFIG_CGROUPS */
556 579
580/*
581 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
582 * definition in cgroup-defs.h.
583 */
584#ifdef CONFIG_SOCK_CGROUP_DATA
585
586#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
587extern spinlock_t cgroup_sk_update_lock;
588#endif
589
590void cgroup_sk_alloc_disable(void);
591void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
592void cgroup_sk_free(struct sock_cgroup_data *skcd);
593
594static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
595{
596#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
597 unsigned long v;
598
599 /*
600 * @skcd->val is 64bit but the following is safe on 32bit too as we
601 * just need the lower ulong to be written and read atomically.
602 */
603 v = READ_ONCE(skcd->val);
604
605 if (v & 1)
606 return &cgrp_dfl_root.cgrp;
607
608 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
609#else
610 return (struct cgroup *)(unsigned long)skcd->val;
611#endif
612}
613
614#else /* CONFIG_CGROUP_DATA */
615
616static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
617static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
618
619#endif /* CONFIG_CGROUP_DATA */
620
557#endif /* _LINUX_CGROUP_H */ 621#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index eb049c622208..37ff4a6faa9a 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -29,6 +29,9 @@
29#include <asm/bitsperlong.h> 29#include <asm/bitsperlong.h>
30 30
31#ifdef __KERNEL__ 31#ifdef __KERNEL__
32struct device;
33int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
34unsigned char *arch_get_platform_get_mac_address(void);
32u32 eth_get_headlen(void *data, unsigned int max_len); 35u32 eth_get_headlen(void *data, unsigned int max_len);
33__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 36__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
34extern const struct header_ops eth_header_ops; 37extern const struct header_ops eth_header_ops;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 5972ffe5719a..43aa1f8855c7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -350,25 +350,43 @@ struct sk_filter {
350 350
351#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) 351#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
352 352
353#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
354
355static inline u8 *bpf_skb_cb(struct sk_buff *skb)
356{
357 /* eBPF programs may read/write skb->cb[] area to transfer meta
358 * data between tail calls. Since this also needs to work with
359 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
360 *
361 * In some socket filter cases, the cb unfortunately needs to be
362 * saved/restored so that protocol specific skb->cb[] data won't
363 * be lost. In any case, due to unpriviledged eBPF programs
364 * attached to sockets, we need to clear the bpf_skb_cb() area
365 * to not leak previous contents to user space.
366 */
367 BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
368 BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
369 FIELD_SIZEOF(struct qdisc_skb_cb, data));
370
371 return qdisc_skb_cb(skb)->data;
372}
373
353static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, 374static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
354 struct sk_buff *skb) 375 struct sk_buff *skb)
355{ 376{
356 u8 *cb_data = qdisc_skb_cb(skb)->data; 377 u8 *cb_data = bpf_skb_cb(skb);
357 u8 saved_cb[QDISC_CB_PRIV_LEN]; 378 u8 cb_saved[BPF_SKB_CB_LEN];
358 u32 res; 379 u32 res;
359 380
360 BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
361 QDISC_CB_PRIV_LEN);
362
363 if (unlikely(prog->cb_access)) { 381 if (unlikely(prog->cb_access)) {
364 memcpy(saved_cb, cb_data, sizeof(saved_cb)); 382 memcpy(cb_saved, cb_data, sizeof(cb_saved));
365 memset(cb_data, 0, sizeof(saved_cb)); 383 memset(cb_data, 0, sizeof(cb_saved));
366 } 384 }
367 385
368 res = BPF_PROG_RUN(prog, skb); 386 res = BPF_PROG_RUN(prog, skb);
369 387
370 if (unlikely(prog->cb_access)) 388 if (unlikely(prog->cb_access))
371 memcpy(cb_data, saved_cb, sizeof(saved_cb)); 389 memcpy(cb_data, cb_saved, sizeof(cb_saved));
372 390
373 return res; 391 return res;
374} 392}
@@ -376,10 +394,11 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
376static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, 394static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
377 struct sk_buff *skb) 395 struct sk_buff *skb)
378{ 396{
379 u8 *cb_data = qdisc_skb_cb(skb)->data; 397 u8 *cb_data = bpf_skb_cb(skb);
380 398
381 if (unlikely(prog->cb_access)) 399 if (unlikely(prog->cb_access))
382 memset(cb_data, 0, QDISC_CB_PRIV_LEN); 400 memset(cb_data, 0, BPF_SKB_CB_LEN);
401
383 return BPF_PROG_RUN(prog, skb); 402 return BPF_PROG_RUN(prog, skb);
384} 403}
385 404
@@ -447,6 +466,8 @@ void bpf_prog_destroy(struct bpf_prog *fp);
447 466
448int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 467int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
449int sk_attach_bpf(u32 ufd, struct sock *sk); 468int sk_attach_bpf(u32 ufd, struct sock *sk);
469int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
470int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
450int sk_detach_filter(struct sock *sk); 471int sk_detach_filter(struct sock *sk);
451int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 472int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
452 unsigned int len); 473 unsigned int len);
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 519b6e2d769e..661e5c2a8e2a 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -16,6 +16,10 @@
16 struct hlist_head name[1 << (bits)] = \ 16 struct hlist_head name[1 << (bits)] = \
17 { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } 17 { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
18 18
19#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
20 struct hlist_head name[1 << (bits)] __read_mostly = \
21 { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
22
19#define DECLARE_HASHTABLE(name, bits) \ 23#define DECLARE_HASHTABLE(name, bits) \
20 struct hlist_head name[1 << (bits)] 24 struct hlist_head name[1 << (bits)]
21 25
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 1acb1445e05f..e31bcd4c7859 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -101,7 +101,7 @@ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
101int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, 101int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
102 size_t size); 102 size_t size);
103/* May be used by hardware driver to gain control over HDLC device */ 103/* May be used by hardware driver to gain control over HDLC device */
104void detach_hdlc_protocol(struct net_device *dev); 104int detach_hdlc_protocol(struct net_device *dev);
105 105
106static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, 106static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
107 struct net_device *dev) 107 struct net_device *dev)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index b49cf923becc..ba7a9b0c7c57 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -91,7 +91,6 @@ enum {
91 PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */ 91 PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */
92 PPPOX_BOUND = 2, /* bound to ppp device */ 92 PPPOX_BOUND = 2, /* bound to ppp device */
93 PPPOX_RELAY = 4, /* forwarding is enabled */ 93 PPPOX_RELAY = 4, /* forwarding is enabled */
94 PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */
95 PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/ 94 PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
96}; 95};
97 96
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index a6aa970758a2..b84e49c3a738 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -164,6 +164,7 @@ struct team_mode {
164 size_t priv_size; 164 size_t priv_size;
165 size_t port_priv_size; 165 size_t port_priv_size;
166 const struct team_mode_ops *ops; 166 const struct team_mode_ops *ops;
167 enum netdev_lag_tx_type lag_tx_type;
167}; 168};
168 169
169#define TEAM_PORT_HASHBITS 4 170#define TEAM_PORT_HASHBITS 4
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 67ce5bd3b56a..a5f6ce6b578c 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
73/* found in socket.c */ 73/* found in socket.c */
74extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 74extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
75 75
76static inline bool is_vlan_dev(struct net_device *dev) 76static inline bool is_vlan_dev(const struct net_device *dev)
77{ 77{
78 return dev->priv_flags & IFF_802_1Q_VLAN; 78 return dev->priv_flags & IFF_802_1Q_VLAN;
79} 79}
@@ -621,7 +621,7 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
621 NETIF_F_SG | 621 NETIF_F_SG |
622 NETIF_F_HIGHDMA | 622 NETIF_F_HIGHDMA |
623 NETIF_F_FRAGLIST | 623 NETIF_F_FRAGLIST |
624 NETIF_F_GEN_CSUM | 624 NETIF_F_HW_CSUM |
625 NETIF_F_HW_VLAN_CTAG_TX | 625 NETIF_F_HW_VLAN_CTAG_TX |
626 NETIF_F_HW_VLAN_STAG_TX); 626 NETIF_F_HW_VLAN_STAG_TX);
627 627
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 0e707f0c1a3e..7c27fa1030e8 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -3,6 +3,7 @@
3 3
4#include <uapi/linux/inet_diag.h> 4#include <uapi/linux/inet_diag.h>
5 5
6struct net;
6struct sock; 7struct sock;
7struct inet_hashinfo; 8struct inet_hashinfo;
8struct nlattr; 9struct nlattr;
@@ -23,6 +24,10 @@ struct inet_diag_handler {
23 void (*idiag_get_info)(struct sock *sk, 24 void (*idiag_get_info)(struct sock *sk,
24 struct inet_diag_msg *r, 25 struct inet_diag_msg *r,
25 void *info); 26 void *info);
27
28 int (*destroy)(struct sk_buff *in_skb,
29 const struct inet_diag_req_v2 *req);
30
26 __u16 idiag_type; 31 __u16 idiag_type;
27 __u16 idiag_info_size; 32 __u16 idiag_info_size;
28}; 33};
@@ -41,6 +46,10 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
41 struct sk_buff *in_skb, const struct nlmsghdr *nlh, 46 struct sk_buff *in_skb, const struct nlmsghdr *nlh,
42 const struct inet_diag_req_v2 *req); 47 const struct inet_diag_req_v2 *req);
43 48
49struct sock *inet_diag_find_one_icsk(struct net *net,
50 struct inet_hashinfo *hashinfo,
51 const struct inet_diag_req_v2 *req);
52
44int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); 53int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
45 54
46extern int inet_diag_register(const struct inet_diag_handler *handler); 55extern int inet_diag_register(const struct inet_diag_handler *handler);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 5d4e9c4b821d..af51df35d749 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -274,6 +274,8 @@ void pr_cont_kernfs_path(struct kernfs_node *kn);
274struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); 274struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
275struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, 275struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
276 const char *name, const void *ns); 276 const char *name, const void *ns);
277struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
278 const char *path, const void *ns);
277void kernfs_get(struct kernfs_node *kn); 279void kernfs_get(struct kernfs_node *kn);
278void kernfs_put(struct kernfs_node *kn); 280void kernfs_put(struct kernfs_node *kn);
279 281
@@ -350,6 +352,10 @@ static inline struct kernfs_node *
350kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, 352kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name,
351 const void *ns) 353 const void *ns)
352{ return NULL; } 354{ return NULL; }
355static inline struct kernfs_node *
356kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path,
357 const void *ns)
358{ return NULL; }
353 359
354static inline void kernfs_get(struct kernfs_node *kn) { } 360static inline void kernfs_get(struct kernfs_node *kn) { }
355static inline void kernfs_put(struct kernfs_node *kn) { } 361static inline void kernfs_put(struct kernfs_node *kn) { }
@@ -431,6 +437,12 @@ kernfs_find_and_get(struct kernfs_node *kn, const char *name)
431} 437}
432 438
433static inline struct kernfs_node * 439static inline struct kernfs_node *
440kernfs_walk_and_get(struct kernfs_node *kn, const char *path)
441{
442 return kernfs_walk_and_get_ns(kn, path, NULL);
443}
444
445static inline struct kernfs_node *
434kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, 446kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
435 void *priv) 447 void *priv)
436{ 448{
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index b42963bc81dd..5bfd99d1a40a 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -11,6 +11,55 @@
11 11
12#include <uapi/linux/mdio.h> 12#include <uapi/linux/mdio.h>
13 13
14struct mii_bus;
15
16struct mdio_device {
17 struct device dev;
18
19 const struct dev_pm_ops *pm_ops;
20 struct mii_bus *bus;
21
22 int (*bus_match)(struct device *dev, struct device_driver *drv);
23 void (*device_free)(struct mdio_device *mdiodev);
24 void (*device_remove)(struct mdio_device *mdiodev);
25
26 /* Bus address of the MDIO device (0-31) */
27 int addr;
28 int flags;
29};
30#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
31
32/* struct mdio_driver_common: Common to all MDIO drivers */
33struct mdio_driver_common {
34 struct device_driver driver;
35 int flags;
36};
37#define MDIO_DEVICE_FLAG_PHY 1
38#define to_mdio_common_driver(d) \
39 container_of(d, struct mdio_driver_common, driver)
40
41/* struct mdio_driver: Generic MDIO driver */
42struct mdio_driver {
43 struct mdio_driver_common mdiodrv;
44
45 /*
46 * Called during discovery. Used to set
47 * up device-specific structures, if any
48 */
49 int (*probe)(struct mdio_device *mdiodev);
50
51 /* Clears up any memory if needed */
52 void (*remove)(struct mdio_device *mdiodev);
53};
54#define to_mdio_driver(d) \
55 container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
56
57void mdio_device_free(struct mdio_device *mdiodev);
58struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
59int mdio_device_register(struct mdio_device *mdiodev);
60void mdio_device_remove(struct mdio_device *mdiodev);
61int mdio_driver_register(struct mdio_driver *drv);
62void mdio_driver_unregister(struct mdio_driver *drv);
14 63
15static inline bool mdio_phy_id_is_c45(int phy_id) 64static inline bool mdio_phy_id_is_c45(int phy_id)
16{ 65{
@@ -173,4 +222,33 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
173 return reg; 222 return reg;
174} 223}
175 224
225int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
226int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
227int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
228int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
229
230int mdiobus_register_device(struct mdio_device *mdiodev);
231int mdiobus_unregister_device(struct mdio_device *mdiodev);
232bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
233struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
234
235/**
236 * module_mdio_driver() - Helper macro for registering mdio drivers
237 *
238 * Helper macro for MDIO drivers which do not do anything special in module
239 * init/exit. Each module may only use this macro once, and calling it
240 * replaces module_init() and module_exit().
241 */
242#define mdio_module_driver(_mdio_driver) \
243static int __init mdio_module_init(void) \
244{ \
245 return mdio_driver_register(&_mdio_driver); \
246} \
247module_init(mdio_module_init); \
248static void __exit mdio_module_exit(void) \
249{ \
250 mdio_driver_unregister(&_mdio_driver); \
251} \
252module_exit(mdio_module_exit)
253
176#endif /* __LINUX_MDIO_H__ */ 254#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 5a06d969338e..2e8af001c5da 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -75,6 +75,11 @@ static inline int mlx4_is_bonded(struct mlx4_dev *dev)
75 return !!(dev->flags & MLX4_FLAG_BONDED); 75 return !!(dev->flags & MLX4_FLAG_BONDED);
76} 76}
77 77
78static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
79{
80 return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
81}
82
78struct mlx4_port_map { 83struct mlx4_port_map {
79 u8 port1; 84 u8 port1;
80 u8 port2; 85 u8 port2;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b473cbfa7ef..7be845e30689 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -251,6 +251,7 @@ enum mlx5_event {
251 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, 251 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
252 252
253 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, 253 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
254 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
254}; 255};
255 256
256enum { 257enum {
@@ -442,9 +443,12 @@ struct mlx5_init_seg {
442 __be32 rsvd1[120]; 443 __be32 rsvd1[120];
443 __be32 initializing; 444 __be32 initializing;
444 struct health_buffer health; 445 struct health_buffer health;
445 __be32 rsvd2[884]; 446 __be32 rsvd2[880];
447 __be32 internal_timer_h;
448 __be32 internal_timer_l;
449 __be32 rsrv3[2];
446 __be32 health_counter; 450 __be32 health_counter;
447 __be32 rsvd3[1019]; 451 __be32 rsvd4[1019];
448 __be64 ieee1588_clk; 452 __be64 ieee1588_clk;
449 __be32 ieee1588_clk_type; 453 __be32 ieee1588_clk_type;
450 __be32 clr_intx; 454 __be32 clr_intx;
@@ -520,6 +524,12 @@ struct mlx5_eqe_page_fault {
520 __be32 flags_qpn; 524 __be32 flags_qpn;
521} __packed; 525} __packed;
522 526
527struct mlx5_eqe_vport_change {
528 u8 rsvd0[2];
529 __be16 vport_num;
530 __be32 rsvd1[6];
531} __packed;
532
523union ev_data { 533union ev_data {
524 __be32 raw[7]; 534 __be32 raw[7];
525 struct mlx5_eqe_cmd cmd; 535 struct mlx5_eqe_cmd cmd;
@@ -532,6 +542,7 @@ union ev_data {
532 struct mlx5_eqe_stall_vl stall_vl; 542 struct mlx5_eqe_stall_vl stall_vl;
533 struct mlx5_eqe_page_req req_pages; 543 struct mlx5_eqe_page_req req_pages;
534 struct mlx5_eqe_page_fault page_fault; 544 struct mlx5_eqe_page_fault page_fault;
545 struct mlx5_eqe_vport_change vport_change;
535} __packed; 546} __packed;
536 547
537struct mlx5_eqe { 548struct mlx5_eqe {
@@ -593,7 +604,8 @@ struct mlx5_cqe64 {
593 __be32 imm_inval_pkey; 604 __be32 imm_inval_pkey;
594 u8 rsvd40[4]; 605 u8 rsvd40[4];
595 __be32 byte_cnt; 606 __be32 byte_cnt;
596 __be64 timestamp; 607 __be32 timestamp_h;
608 __be32 timestamp_l;
597 __be32 sop_drop_qpn; 609 __be32 sop_drop_qpn;
598 __be16 wqe_counter; 610 __be16 wqe_counter;
599 u8 signature; 611 u8 signature;
@@ -615,6 +627,16 @@ static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
615 return !!(cqe->l4_hdr_type_etc & 0x1); 627 return !!(cqe->l4_hdr_type_etc & 0x1);
616} 628}
617 629
630static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
631{
632 u32 hi, lo;
633
634 hi = be32_to_cpu(cqe->timestamp_h);
635 lo = be32_to_cpu(cqe->timestamp_l);
636
637 return (u64)lo | ((u64)hi << 32);
638}
639
618enum { 640enum {
619 CQE_L4_HDR_TYPE_NONE = 0x0, 641 CQE_L4_HDR_TYPE_NONE = 0x0,
620 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, 642 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
@@ -1067,6 +1089,12 @@ enum {
1067}; 1089};
1068 1090
1069enum { 1091enum {
1092 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
1093 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
1094 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
1095};
1096
1097enum {
1070 MLX5_L3_PROT_TYPE_IPV4 = 0, 1098 MLX5_L3_PROT_TYPE_IPV4 = 0,
1071 MLX5_L3_PROT_TYPE_IPV6 = 1, 1099 MLX5_L3_PROT_TYPE_IPV6 = 1,
1072}; 1100};
@@ -1102,6 +1130,12 @@ enum {
1102 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, 1130 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1103}; 1131};
1104 1132
1133enum mlx5_list_type {
1134 MLX5_NVPRT_LIST_TYPE_UC = 0x0,
1135 MLX5_NVPRT_LIST_TYPE_MC = 0x1,
1136 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1137};
1138
1105enum { 1139enum {
1106 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, 1140 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1107 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, 1141 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
@@ -1124,6 +1158,8 @@ enum mlx5_cap_type {
1124 MLX5_CAP_IPOIB_OFFLOADS, 1158 MLX5_CAP_IPOIB_OFFLOADS,
1125 MLX5_CAP_EOIB_OFFLOADS, 1159 MLX5_CAP_EOIB_OFFLOADS,
1126 MLX5_CAP_FLOW_TABLE, 1160 MLX5_CAP_FLOW_TABLE,
1161 MLX5_CAP_ESWITCH_FLOW_TABLE,
1162 MLX5_CAP_ESWITCH,
1127 /* NUM OF CAP Types */ 1163 /* NUM OF CAP Types */
1128 MLX5_CAP_NUM 1164 MLX5_CAP_NUM
1129}; 1165};
@@ -1161,6 +1197,28 @@ enum mlx5_cap_type {
1161#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1197#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1162 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 1198 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1163 1199
1200#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1201 MLX5_GET(flow_table_eswitch_cap, \
1202 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1203
1204#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1205 MLX5_GET(flow_table_eswitch_cap, \
1206 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1207
1208#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1209 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1210
1211#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1212 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1213
1214#define MLX5_CAP_ESW(mdev, cap) \
1215 MLX5_GET(e_switch_cap, \
1216 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1217
1218#define MLX5_CAP_ESW_MAX(mdev, cap) \
1219 MLX5_GET(e_switch_cap, \
1220 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1221
1164#define MLX5_CAP_ODP(mdev, cap)\ 1222#define MLX5_CAP_ODP(mdev, cap)\
1165 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) 1223 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1166 1224
@@ -1200,4 +1258,6 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1200 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; 1258 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1201} 1259}
1202 1260
1261#define MLX5_BY_PASS_NUM_PRIOS 9
1262
1203#endif /* MLX5_DEVICE_H */ 1263#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5c857f2a20d7..2fd7019f69db 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -426,11 +426,23 @@ struct mlx5_mr_table {
426 struct radix_tree_root tree; 426 struct radix_tree_root tree;
427}; 427};
428 428
429struct mlx5_vf_context {
430 int enabled;
431};
432
433struct mlx5_core_sriov {
434 struct mlx5_vf_context *vfs_ctx;
435 int num_vfs;
436 int enabled_vfs;
437};
438
429struct mlx5_irq_info { 439struct mlx5_irq_info {
430 cpumask_var_t mask; 440 cpumask_var_t mask;
431 char name[MLX5_MAX_IRQ_NAME]; 441 char name[MLX5_MAX_IRQ_NAME];
432}; 442};
433 443
444struct mlx5_eswitch;
445
434struct mlx5_priv { 446struct mlx5_priv {
435 char name[MLX5_MAX_NAME_LEN]; 447 char name[MLX5_MAX_NAME_LEN];
436 struct mlx5_eq_table eq_table; 448 struct mlx5_eq_table eq_table;
@@ -447,6 +459,7 @@ struct mlx5_priv {
447 int fw_pages; 459 int fw_pages;
448 atomic_t reg_pages; 460 atomic_t reg_pages;
449 struct list_head free_list; 461 struct list_head free_list;
462 int vfs_pages;
450 463
451 struct mlx5_core_health health; 464 struct mlx5_core_health health;
452 465
@@ -485,6 +498,12 @@ struct mlx5_priv {
485 struct list_head dev_list; 498 struct list_head dev_list;
486 struct list_head ctx_list; 499 struct list_head ctx_list;
487 spinlock_t ctx_lock; 500 spinlock_t ctx_lock;
501
502 struct mlx5_eswitch *eswitch;
503 struct mlx5_core_sriov sriov;
504 unsigned long pci_dev_data;
505 struct mlx5_flow_root_namespace *root_ns;
506 struct mlx5_flow_root_namespace *fdb_root_ns;
488}; 507};
489 508
490enum mlx5_device_state { 509enum mlx5_device_state {
@@ -739,6 +758,8 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
739void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 758void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
740int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 759int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
741void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 760void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
761int mlx5_sriov_init(struct mlx5_core_dev *dev);
762int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
742void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 763void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
743 s32 npages); 764 s32 npages);
744int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 765int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -884,6 +905,15 @@ struct mlx5_profile {
884 } mr_cache[MAX_MR_CACHE_ENTRIES]; 905 } mr_cache[MAX_MR_CACHE_ENTRIES];
885}; 906};
886 907
908enum {
909 MLX5_PCI_DEV_IS_VF = 1 << 0,
910};
911
912static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
913{
914 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
915}
916
887static inline int mlx5_get_gid_table_len(u16 param) 917static inline int mlx5_get_gid_table_len(u16 param)
888{ 918{
889 if (param > 4) { 919 if (param > 4) {
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
deleted file mode 100644
index 5f922c6d4fc2..000000000000
--- a/include/linux/mlx5/flow_table.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_FLOW_TABLE_H
34#define MLX5_FLOW_TABLE_H
35
36#include <linux/mlx5/driver.h>
37
38struct mlx5_flow_table_group {
39 u8 log_sz;
40 u8 match_criteria_enable;
41 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
42};
43
44void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
45 u16 num_groups,
46 struct mlx5_flow_table_group *group);
47void mlx5_destroy_flow_table(void *flow_table);
48int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
49 void *match_criteria, void *flow_context,
50 u32 *flow_index);
51void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
52u32 mlx5_get_flow_table_id(void *flow_table);
53
54#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
new file mode 100644
index 000000000000..8230caa3fb6e
--- /dev/null
+++ b/include/linux/mlx5/fs.h
@@ -0,0 +1,111 @@
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _MLX5_FS_
34#define _MLX5_FS_
35
36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/mlx5_ifc.h>
38
39#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
40
41#define LEFTOVERS_RULE_NUM 2
42static inline void build_leftovers_ft_param(int *priority,
43 int *n_ent,
44 int *n_grp)
45{
46 *priority = 0; /* Priority of leftovers_prio-0 */
47 *n_ent = LEFTOVERS_RULE_NUM;
48 *n_grp = LEFTOVERS_RULE_NUM;
49}
50
51enum mlx5_flow_namespace_type {
52 MLX5_FLOW_NAMESPACE_BYPASS,
53 MLX5_FLOW_NAMESPACE_KERNEL,
54 MLX5_FLOW_NAMESPACE_LEFTOVERS,
55 MLX5_FLOW_NAMESPACE_FDB,
56};
57
58struct mlx5_flow_table;
59struct mlx5_flow_group;
60struct mlx5_flow_rule;
61struct mlx5_flow_namespace;
62
63struct mlx5_flow_destination {
64 enum mlx5_flow_destination_type type;
65 union {
66 u32 tir_num;
67 struct mlx5_flow_table *ft;
68 u32 vport_num;
69 };
70};
71
72struct mlx5_flow_namespace *
73mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
74 enum mlx5_flow_namespace_type type);
75
76struct mlx5_flow_table *
77mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
78 int prio,
79 int num_flow_table_entries,
80 int max_num_groups);
81
82struct mlx5_flow_table *
83mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
84 int prio,
85 int num_flow_table_entries);
86int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
87
88/* inbox should be set with the following values:
89 * start_flow_index
90 * end_flow_index
91 * match_criteria_enable
92 * match_criteria
93 */
94struct mlx5_flow_group *
95mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
96void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
97
98/* Single destination per rule.
99 * Group ID is implied by the match criteria.
100 */
101struct mlx5_flow_rule *
102mlx5_add_flow_rule(struct mlx5_flow_table *ft,
103 u8 match_criteria_enable,
104 u32 *match_criteria,
105 u32 *match_value,
106 u32 action,
107 u32 flow_tag,
108 struct mlx5_flow_destination *dest);
109void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
110
111#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1565324eb620..68d73f82e009 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -185,6 +185,7 @@ enum {
185 MLX5_CMD_OP_MODIFY_RQT = 0x917, 185 MLX5_CMD_OP_MODIFY_RQT = 0x917,
186 MLX5_CMD_OP_DESTROY_RQT = 0x918, 186 MLX5_CMD_OP_DESTROY_RQT = 0x918,
187 MLX5_CMD_OP_QUERY_RQT = 0x919, 187 MLX5_CMD_OP_QUERY_RQT = 0x919,
188 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f,
188 MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, 189 MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
189 MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, 190 MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
190 MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, 191 MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
@@ -193,7 +194,8 @@ enum {
193 MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, 194 MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
194 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, 195 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
195 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, 196 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
196 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938 197 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938,
198 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
197}; 199};
198 200
199struct mlx5_ifc_flow_table_fields_supported_bits { 201struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -256,25 +258,30 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
256 258
257struct mlx5_ifc_flow_table_prop_layout_bits { 259struct mlx5_ifc_flow_table_prop_layout_bits {
258 u8 ft_support[0x1]; 260 u8 ft_support[0x1];
259 u8 reserved_0[0x1f]; 261 u8 reserved_0[0x2];
262 u8 flow_modify_en[0x1];
263 u8 modify_root[0x1];
264 u8 identified_miss_table_mode[0x1];
265 u8 flow_table_modify[0x1];
266 u8 reserved_1[0x19];
260 267
261 u8 reserved_1[0x2]; 268 u8 reserved_2[0x2];
262 u8 log_max_ft_size[0x6]; 269 u8 log_max_ft_size[0x6];
263 u8 reserved_2[0x10]; 270 u8 reserved_3[0x10];
264 u8 max_ft_level[0x8]; 271 u8 max_ft_level[0x8];
265 272
266 u8 reserved_3[0x20]; 273 u8 reserved_4[0x20];
267 274
268 u8 reserved_4[0x18]; 275 u8 reserved_5[0x18];
269 u8 log_max_ft_num[0x8]; 276 u8 log_max_ft_num[0x8];
270 277
271 u8 reserved_5[0x18]; 278 u8 reserved_6[0x18];
272 u8 log_max_destination[0x8]; 279 u8 log_max_destination[0x8];
273 280
274 u8 reserved_6[0x18]; 281 u8 reserved_7[0x18];
275 u8 log_max_flow[0x8]; 282 u8 log_max_flow[0x8];
276 283
277 u8 reserved_7[0x40]; 284 u8 reserved_8[0x40];
278 285
279 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; 286 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
280 287
@@ -291,6 +298,22 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
291 u8 reserved_1[0x1a]; 298 u8 reserved_1[0x1a];
292}; 299};
293 300
301struct mlx5_ifc_ipv4_layout_bits {
302 u8 reserved_0[0x60];
303
304 u8 ipv4[0x20];
305};
306
307struct mlx5_ifc_ipv6_layout_bits {
308 u8 ipv6[16][0x8];
309};
310
311union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
312 struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
313 struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
314 u8 reserved_0[0x80];
315};
316
294struct mlx5_ifc_fte_match_set_lyr_2_4_bits { 317struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
295 u8 smac_47_16[0x20]; 318 u8 smac_47_16[0x20];
296 319
@@ -321,9 +344,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
321 u8 udp_sport[0x10]; 344 u8 udp_sport[0x10];
322 u8 udp_dport[0x10]; 345 u8 udp_dport[0x10];
323 346
324 u8 src_ip[4][0x20]; 347 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
325 348
326 u8 dst_ip[4][0x20]; 349 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
327}; 350};
328 351
329struct mlx5_ifc_fte_match_set_misc_bits { 352struct mlx5_ifc_fte_match_set_misc_bits {
@@ -447,6 +470,29 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
447 u8 reserved_3[0x7200]; 470 u8 reserved_3[0x7200];
448}; 471};
449 472
473struct mlx5_ifc_flow_table_eswitch_cap_bits {
474 u8 reserved_0[0x200];
475
476 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
477
478 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress;
479
480 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
481
482 u8 reserved_1[0x7800];
483};
484
485struct mlx5_ifc_e_switch_cap_bits {
486 u8 vport_svlan_strip[0x1];
487 u8 vport_cvlan_strip[0x1];
488 u8 vport_svlan_insert[0x1];
489 u8 vport_cvlan_insert_if_not_exist[0x1];
490 u8 vport_cvlan_insert_overwrite[0x1];
491 u8 reserved_0[0x1b];
492
493 u8 reserved_1[0x7e0];
494};
495
450struct mlx5_ifc_per_protocol_networking_offload_caps_bits { 496struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
451 u8 csum_cap[0x1]; 497 u8 csum_cap[0x1];
452 u8 vlan_cap[0x1]; 498 u8 vlan_cap[0x1];
@@ -665,7 +711,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
665 u8 reserved_17[0x1]; 711 u8 reserved_17[0x1];
666 u8 ets[0x1]; 712 u8 ets[0x1];
667 u8 nic_flow_table[0x1]; 713 u8 nic_flow_table[0x1];
668 u8 reserved_18[0x4]; 714 u8 eswitch_flow_table[0x1];
715 u8 early_vf_enable;
716 u8 reserved_18[0x2];
669 u8 local_ca_ack_delay[0x5]; 717 u8 local_ca_ack_delay[0x5];
670 u8 reserved_19[0x6]; 718 u8 reserved_19[0x6];
671 u8 port_type[0x2]; 719 u8 port_type[0x2];
@@ -787,27 +835,36 @@ struct mlx5_ifc_cmd_hca_cap_bits {
787 u8 reserved_60[0x1b]; 835 u8 reserved_60[0x1b];
788 u8 log_max_wq_sz[0x5]; 836 u8 log_max_wq_sz[0x5];
789 837
790 u8 reserved_61[0xa0]; 838 u8 nic_vport_change_event[0x1];
791 839 u8 reserved_61[0xa];
840 u8 log_max_vlan_list[0x5];
792 u8 reserved_62[0x3]; 841 u8 reserved_62[0x3];
842 u8 log_max_current_mc_list[0x5];
843 u8 reserved_63[0x3];
844 u8 log_max_current_uc_list[0x5];
845
846 u8 reserved_64[0x80];
847
848 u8 reserved_65[0x3];
793 u8 log_max_l2_table[0x5]; 849 u8 log_max_l2_table[0x5];
794 u8 reserved_63[0x8]; 850 u8 reserved_66[0x8];
795 u8 log_uar_page_sz[0x10]; 851 u8 log_uar_page_sz[0x10];
796 852
797 u8 reserved_64[0x100]; 853 u8 reserved_67[0x40];
798 854 u8 device_frequency_khz[0x20];
799 u8 reserved_65[0x1f]; 855 u8 reserved_68[0x5f];
800 u8 cqe_zip[0x1]; 856 u8 cqe_zip[0x1];
801 857
802 u8 cqe_zip_timeout[0x10]; 858 u8 cqe_zip_timeout[0x10];
803 u8 cqe_zip_max_num[0x10]; 859 u8 cqe_zip_max_num[0x10];
804 860
805 u8 reserved_66[0x220]; 861 u8 reserved_69[0x220];
806}; 862};
807 863
808enum { 864enum mlx5_flow_destination_type {
809 MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1, 865 MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
810 MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2, 866 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
867 MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
811}; 868};
812 869
813struct mlx5_ifc_dest_format_struct_bits { 870struct mlx5_ifc_dest_format_struct_bits {
@@ -900,6 +957,13 @@ struct mlx5_ifc_mac_address_layout_bits {
900 u8 mac_addr_31_0[0x20]; 957 u8 mac_addr_31_0[0x20];
901}; 958};
902 959
960struct mlx5_ifc_vlan_layout_bits {
961 u8 reserved_0[0x14];
962 u8 vlan[0x0c];
963
964 u8 reserved_1[0x20];
965};
966
903struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { 967struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
904 u8 reserved_0[0xa0]; 968 u8 reserved_0[0xa0];
905 969
@@ -1829,6 +1893,8 @@ union mlx5_ifc_hca_cap_union_bits {
1829 struct mlx5_ifc_roce_cap_bits roce_cap; 1893 struct mlx5_ifc_roce_cap_bits roce_cap;
1830 struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; 1894 struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
1831 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; 1895 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1896 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
1897 struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
1832 u8 reserved_0[0x8000]; 1898 u8 reserved_0[0x8000];
1833}; 1899};
1834 1900
@@ -2133,24 +2199,35 @@ struct mlx5_ifc_rmpc_bits {
2133 struct mlx5_ifc_wq_bits wq; 2199 struct mlx5_ifc_wq_bits wq;
2134}; 2200};
2135 2201
2136enum {
2137 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
2138};
2139
2140struct mlx5_ifc_nic_vport_context_bits { 2202struct mlx5_ifc_nic_vport_context_bits {
2141 u8 reserved_0[0x1f]; 2203 u8 reserved_0[0x1f];
2142 u8 roce_en[0x1]; 2204 u8 roce_en[0x1];
2143 2205
2144 u8 reserved_1[0x760]; 2206 u8 arm_change_event[0x1];
2207 u8 reserved_1[0x1a];
2208 u8 event_on_mtu[0x1];
2209 u8 event_on_promisc_change[0x1];
2210 u8 event_on_vlan_change[0x1];
2211 u8 event_on_mc_address_change[0x1];
2212 u8 event_on_uc_address_change[0x1];
2145 2213
2146 u8 reserved_2[0x5]; 2214 u8 reserved_2[0xf0];
2215
2216 u8 mtu[0x10];
2217
2218 u8 reserved_3[0x640];
2219
2220 u8 promisc_uc[0x1];
2221 u8 promisc_mc[0x1];
2222 u8 promisc_all[0x1];
2223 u8 reserved_4[0x2];
2147 u8 allowed_list_type[0x3]; 2224 u8 allowed_list_type[0x3];
2148 u8 reserved_3[0xc]; 2225 u8 reserved_5[0xc];
2149 u8 allowed_list_size[0xc]; 2226 u8 allowed_list_size[0xc];
2150 2227
2151 struct mlx5_ifc_mac_address_layout_bits permanent_address; 2228 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2152 2229
2153 u8 reserved_4[0x20]; 2230 u8 reserved_6[0x20];
2154 2231
2155 u8 current_uc_mac_address[0][0x40]; 2232 u8 current_uc_mac_address[0][0x40];
2156}; 2233};
@@ -2263,6 +2340,26 @@ struct mlx5_ifc_hca_vport_context_bits {
2263 u8 reserved_6[0xca0]; 2340 u8 reserved_6[0xca0];
2264}; 2341};
2265 2342
2343struct mlx5_ifc_esw_vport_context_bits {
2344 u8 reserved_0[0x3];
2345 u8 vport_svlan_strip[0x1];
2346 u8 vport_cvlan_strip[0x1];
2347 u8 vport_svlan_insert[0x1];
2348 u8 vport_cvlan_insert[0x2];
2349 u8 reserved_1[0x18];
2350
2351 u8 reserved_2[0x20];
2352
2353 u8 svlan_cfi[0x1];
2354 u8 svlan_pcp[0x3];
2355 u8 svlan_id[0xc];
2356 u8 cvlan_cfi[0x1];
2357 u8 cvlan_pcp[0x3];
2358 u8 cvlan_id[0xc];
2359
2360 u8 reserved_3[0x7a0];
2361};
2362
2266enum { 2363enum {
2267 MLX5_EQC_STATUS_OK = 0x0, 2364 MLX5_EQC_STATUS_OK = 0x0,
2268 MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, 2365 MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
@@ -2769,6 +2866,13 @@ struct mlx5_ifc_set_hca_cap_in_bits {
2769 union mlx5_ifc_hca_cap_union_bits capability; 2866 union mlx5_ifc_hca_cap_union_bits capability;
2770}; 2867};
2771 2868
2869enum {
2870 MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
2871 MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
2872 MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
2873 MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
2874};
2875
2772struct mlx5_ifc_set_fte_out_bits { 2876struct mlx5_ifc_set_fte_out_bits {
2773 u8 status[0x8]; 2877 u8 status[0x8];
2774 u8 reserved_0[0x18]; 2878 u8 reserved_0[0x18];
@@ -2793,11 +2897,14 @@ struct mlx5_ifc_set_fte_in_bits {
2793 u8 reserved_4[0x8]; 2897 u8 reserved_4[0x8];
2794 u8 table_id[0x18]; 2898 u8 table_id[0x18];
2795 2899
2796 u8 reserved_5[0x40]; 2900 u8 reserved_5[0x18];
2901 u8 modify_enable_mask[0x8];
2902
2903 u8 reserved_6[0x20];
2797 2904
2798 u8 flow_index[0x20]; 2905 u8 flow_index[0x20];
2799 2906
2800 u8 reserved_6[0xe0]; 2907 u8 reserved_7[0xe0];
2801 2908
2802 struct mlx5_ifc_flow_context_bits flow_context; 2909 struct mlx5_ifc_flow_context_bits flow_context;
2803}; 2910};
@@ -2940,6 +3047,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
2940 3047
2941enum { 3048enum {
2942 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, 3049 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
3050 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
2943}; 3051};
2944 3052
2945struct mlx5_ifc_query_vport_state_in_bits { 3053struct mlx5_ifc_query_vport_state_in_bits {
@@ -3700,6 +3808,64 @@ struct mlx5_ifc_query_flow_group_in_bits {
3700 u8 reserved_5[0x120]; 3808 u8 reserved_5[0x120];
3701}; 3809};
3702 3810
3811struct mlx5_ifc_query_esw_vport_context_out_bits {
3812 u8 status[0x8];
3813 u8 reserved_0[0x18];
3814
3815 u8 syndrome[0x20];
3816
3817 u8 reserved_1[0x40];
3818
3819 struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
3820};
3821
3822struct mlx5_ifc_query_esw_vport_context_in_bits {
3823 u8 opcode[0x10];
3824 u8 reserved_0[0x10];
3825
3826 u8 reserved_1[0x10];
3827 u8 op_mod[0x10];
3828
3829 u8 other_vport[0x1];
3830 u8 reserved_2[0xf];
3831 u8 vport_number[0x10];
3832
3833 u8 reserved_3[0x20];
3834};
3835
3836struct mlx5_ifc_modify_esw_vport_context_out_bits {
3837 u8 status[0x8];
3838 u8 reserved_0[0x18];
3839
3840 u8 syndrome[0x20];
3841
3842 u8 reserved_1[0x40];
3843};
3844
3845struct mlx5_ifc_esw_vport_context_fields_select_bits {
3846 u8 reserved[0x1c];
3847 u8 vport_cvlan_insert[0x1];
3848 u8 vport_svlan_insert[0x1];
3849 u8 vport_cvlan_strip[0x1];
3850 u8 vport_svlan_strip[0x1];
3851};
3852
3853struct mlx5_ifc_modify_esw_vport_context_in_bits {
3854 u8 opcode[0x10];
3855 u8 reserved_0[0x10];
3856
3857 u8 reserved_1[0x10];
3858 u8 op_mod[0x10];
3859
3860 u8 other_vport[0x1];
3861 u8 reserved_2[0xf];
3862 u8 vport_number[0x10];
3863
3864 struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
3865
3866 struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
3867};
3868
3703struct mlx5_ifc_query_eq_out_bits { 3869struct mlx5_ifc_query_eq_out_bits {
3704 u8 status[0x8]; 3870 u8 status[0x8];
3705 u8 reserved_0[0x18]; 3871 u8 reserved_0[0x18];
@@ -4228,7 +4394,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
4228}; 4394};
4229 4395
4230struct mlx5_ifc_modify_nic_vport_field_select_bits { 4396struct mlx5_ifc_modify_nic_vport_field_select_bits {
4231 u8 reserved_0[0x1c]; 4397 u8 reserved_0[0x19];
4398 u8 mtu[0x1];
4399 u8 change_event[0x1];
4400 u8 promisc[0x1];
4232 u8 permanent_address[0x1]; 4401 u8 permanent_address[0x1];
4233 u8 addresses_list[0x1]; 4402 u8 addresses_list[0x1];
4234 u8 roce_en[0x1]; 4403 u8 roce_en[0x1];
@@ -5519,12 +5688,16 @@ struct mlx5_ifc_create_flow_table_in_bits {
5519 5688
5520 u8 reserved_4[0x20]; 5689 u8 reserved_4[0x20];
5521 5690
5522 u8 reserved_5[0x8]; 5691 u8 reserved_5[0x4];
5692 u8 table_miss_mode[0x4];
5523 u8 level[0x8]; 5693 u8 level[0x8];
5524 u8 reserved_6[0x8]; 5694 u8 reserved_6[0x8];
5525 u8 log_size[0x8]; 5695 u8 log_size[0x8];
5526 5696
5527 u8 reserved_7[0x120]; 5697 u8 reserved_7[0x8];
5698 u8 table_miss_id[0x18];
5699
5700 u8 reserved_8[0x100];
5528}; 5701};
5529 5702
5530struct mlx5_ifc_create_flow_group_out_bits { 5703struct mlx5_ifc_create_flow_group_out_bits {
@@ -6798,4 +6971,72 @@ union mlx5_ifc_uplink_pci_interface_document_bits {
6798 u8 reserved_0[0x20060]; 6971 u8 reserved_0[0x20060];
6799}; 6972};
6800 6973
6974struct mlx5_ifc_set_flow_table_root_out_bits {
6975 u8 status[0x8];
6976 u8 reserved_0[0x18];
6977
6978 u8 syndrome[0x20];
6979
6980 u8 reserved_1[0x40];
6981};
6982
6983struct mlx5_ifc_set_flow_table_root_in_bits {
6984 u8 opcode[0x10];
6985 u8 reserved_0[0x10];
6986
6987 u8 reserved_1[0x10];
6988 u8 op_mod[0x10];
6989
6990 u8 reserved_2[0x40];
6991
6992 u8 table_type[0x8];
6993 u8 reserved_3[0x18];
6994
6995 u8 reserved_4[0x8];
6996 u8 table_id[0x18];
6997
6998 u8 reserved_5[0x140];
6999};
7000
7001enum {
7002 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1,
7003};
7004
7005struct mlx5_ifc_modify_flow_table_out_bits {
7006 u8 status[0x8];
7007 u8 reserved_0[0x18];
7008
7009 u8 syndrome[0x20];
7010
7011 u8 reserved_1[0x40];
7012};
7013
7014struct mlx5_ifc_modify_flow_table_in_bits {
7015 u8 opcode[0x10];
7016 u8 reserved_0[0x10];
7017
7018 u8 reserved_1[0x10];
7019 u8 op_mod[0x10];
7020
7021 u8 reserved_2[0x20];
7022
7023 u8 reserved_3[0x10];
7024 u8 modify_field_select[0x10];
7025
7026 u8 table_type[0x8];
7027 u8 reserved_4[0x18];
7028
7029 u8 reserved_5[0x8];
7030 u8 table_id[0x18];
7031
7032 u8 reserved_6[0x4];
7033 u8 table_miss_mode[0x4];
7034 u8 reserved_7[0x18];
7035
7036 u8 reserved_8[0x8];
7037 u8 table_miss_id[0x18];
7038
7039 u8 reserved_9[0x100];
7040};
7041
6801#endif /* MLX5_IFC_H */ 7042#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 967e0fd06e89..638f2ca7a527 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -34,9 +34,17 @@
34#define __MLX5_VPORT_H__ 34#define __MLX5_VPORT_H__
35 35
36#include <linux/mlx5/driver.h> 36#include <linux/mlx5/driver.h>
37#include <linux/mlx5/device.h>
37 38
38u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); 39u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
39void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); 40u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
41 u16 vport);
42int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
43 u16 vport, u8 state);
44int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr);
46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
47 u16 vport, u8 *addr);
40int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, 48int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
41 u8 port_num, u16 vf_num, u16 gid_index, 49 u8 port_num, u16 vf_num, u16 gid_index,
42 union ib_gid *gid); 50 union ib_gid *gid);
@@ -51,5 +59,30 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
51 u64 *sys_image_guid); 59 u64 *sys_image_guid);
52int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, 60int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
53 u64 *node_guid); 61 u64 *node_guid);
62int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
63 u32 vport,
64 enum mlx5_list_type list_type,
65 u8 addr_list[][ETH_ALEN],
66 int *list_size);
67int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
68 enum mlx5_list_type list_type,
69 u8 addr_list[][ETH_ALEN],
70 int list_size);
71int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
72 u32 vport,
73 int *promisc_uc,
74 int *promisc_mc,
75 int *promisc_all);
76int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
77 int promisc_uc,
78 int promisc_mc,
79 int promisc_all);
80int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
81 u32 vport,
82 u16 vlans[],
83 int *size);
84int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
85 u16 vlans[],
86 int list_size);
54 87
55#endif /* __MLX5_VPORT_H__ */ 88#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 79aaa9fc1a15..bf9b322cb0b0 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -9,38 +9,28 @@
9#ifdef CONFIG_IP_MROUTE 9#ifdef CONFIG_IP_MROUTE
10static inline int ip_mroute_opt(int opt) 10static inline int ip_mroute_opt(int opt)
11{ 11{
12 return (opt >= MRT_BASE) && (opt <= MRT_MAX); 12 return opt >= MRT_BASE && opt <= MRT_MAX;
13} 13}
14#else
15static inline int ip_mroute_opt(int opt)
16{
17 return 0;
18}
19#endif
20 14
21#ifdef CONFIG_IP_MROUTE 15int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
22extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); 16int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
23extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); 17int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
24extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); 18int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
25extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); 19int ip_mr_init(void);
26extern int ip_mr_init(void);
27#else 20#else
28static inline 21static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
29int ip_mroute_setsockopt(struct sock *sock, 22 char __user *optval, unsigned int optlen)
30 int optname, char __user *optval, unsigned int optlen)
31{ 23{
32 return -ENOPROTOOPT; 24 return -ENOPROTOOPT;
33} 25}
34 26
35static inline 27static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
36int ip_mroute_getsockopt(struct sock *sock, 28 char __user *optval, int __user *optlen)
37 int optname, char __user *optval, int __user *optlen)
38{ 29{
39 return -ENOPROTOOPT; 30 return -ENOPROTOOPT;
40} 31}
41 32
42static inline 33static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
43int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
44{ 34{
45 return -ENOIOCTLCMD; 35 return -ENOIOCTLCMD;
46} 36}
@@ -49,6 +39,11 @@ static inline int ip_mr_init(void)
49{ 39{
50 return 0; 40 return 0;
51} 41}
42
43static inline int ip_mroute_opt(int opt)
44{
45 return 0;
46}
52#endif 47#endif
53 48
54struct vif_device { 49struct vif_device {
@@ -64,6 +59,32 @@ struct vif_device {
64 59
65#define VIFF_STATIC 0x8000 60#define VIFF_STATIC 0x8000
66 61
62#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
63#define MFC_LINES 64
64
65struct mr_table {
66 struct list_head list;
67 possible_net_t net;
68 u32 id;
69 struct sock __rcu *mroute_sk;
70 struct timer_list ipmr_expire_timer;
71 struct list_head mfc_unres_queue;
72 struct list_head mfc_cache_array[MFC_LINES];
73 struct vif_device vif_table[MAXVIFS];
74 int maxvif;
75 atomic_t cache_resolve_queue_len;
76 bool mroute_do_assert;
77 bool mroute_do_pim;
78 int mroute_reg_vif_num;
79};
80
81/* mfc_flags:
82 * MFC_STATIC - the entry was added statically (not by a routing daemon)
83 */
84enum {
85 MFC_STATIC = BIT(0),
86};
87
67struct mfc_cache { 88struct mfc_cache {
68 struct list_head list; 89 struct list_head list;
69 __be32 mfc_mcastgrp; /* Group the entry belongs to */ 90 __be32 mfc_mcastgrp; /* Group the entry belongs to */
@@ -89,19 +110,14 @@ struct mfc_cache {
89 struct rcu_head rcu; 110 struct rcu_head rcu;
90}; 111};
91 112
92#define MFC_STATIC 1
93#define MFC_NOTIFY 2
94
95#define MFC_LINES 64
96
97#ifdef __BIG_ENDIAN 113#ifdef __BIG_ENDIAN
98#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1)) 114#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
99#else 115#else
100#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1)) 116#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
101#endif 117#endif
102 118
103struct rtmsg; 119struct rtmsg;
104extern int ipmr_get_route(struct net *net, struct sk_buff *skb, 120int ipmr_get_route(struct net *net, struct sk_buff *skb,
105 __be32 saddr, __be32 daddr, 121 __be32 saddr, __be32 daddr,
106 struct rtmsg *rtm, int nowait); 122 struct rtmsg *rtm, int nowait);
107#endif 123#endif
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index f0d87347df19..d9654f0eecb3 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -52,7 +52,7 @@ enum {
52 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, 52 NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
53 53
54 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ 54 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
55 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ 55 NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
56 NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ 56 NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
57 NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ 57 NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
58 NETIF_F_RXHASH_BIT, /* Receive hashing offload */ 58 NETIF_F_RXHASH_BIT, /* Receive hashing offload */
@@ -103,7 +103,7 @@ enum {
103#define NETIF_F_NTUPLE __NETIF_F(NTUPLE) 103#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
104#define NETIF_F_RXCSUM __NETIF_F(RXCSUM) 104#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
105#define NETIF_F_RXHASH __NETIF_F(RXHASH) 105#define NETIF_F_RXHASH __NETIF_F(RXHASH)
106#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM) 106#define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CRC)
107#define NETIF_F_SG __NETIF_F(SG) 107#define NETIF_F_SG __NETIF_F(SG)
108#define NETIF_F_TSO6 __NETIF_F(TSO6) 108#define NETIF_F_TSO6 __NETIF_F(TSO6)
109#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) 109#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
@@ -146,10 +146,12 @@ enum {
146#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ 146#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
147 NETIF_F_TSO6 | NETIF_F_UFO) 147 NETIF_F_TSO6 | NETIF_F_UFO)
148 148
149#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM 149/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
150#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) 150 * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
151#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) 151 * this would be contradictory
152#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) 152 */
153#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
154 NETIF_F_HW_CSUM)
153 155
154#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 156#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
155 157
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3143c847bddb..5ac140dcb789 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -132,7 +132,9 @@ static inline bool dev_xmit_complete(int rc)
132 * used. 132 * used.
133 */ 133 */
134 134
135#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 135#if defined(CONFIG_HYPERV_NET)
136# define LL_MAX_HEADER 128
137#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
136# if defined(CONFIG_MAC80211_MESH) 138# if defined(CONFIG_MAC80211_MESH)
137# define LL_MAX_HEADER 128 139# define LL_MAX_HEADER 128
138# else 140# else
@@ -326,7 +328,8 @@ enum {
326 NAPI_STATE_SCHED, /* Poll is scheduled */ 328 NAPI_STATE_SCHED, /* Poll is scheduled */
327 NAPI_STATE_DISABLE, /* Disable pending */ 329 NAPI_STATE_DISABLE, /* Disable pending */
328 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 330 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
329 NAPI_STATE_HASHED, /* In NAPI hash */ 331 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
332 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
330}; 333};
331 334
332enum gro_result { 335enum gro_result {
@@ -461,19 +464,13 @@ static inline void napi_complete(struct napi_struct *n)
461} 464}
462 465
463/** 466/**
464 * napi_by_id - lookup a NAPI by napi_id
465 * @napi_id: hashed napi_id
466 *
467 * lookup @napi_id in napi_hash table
468 * must be called under rcu_read_lock()
469 */
470struct napi_struct *napi_by_id(unsigned int napi_id);
471
472/**
473 * napi_hash_add - add a NAPI to global hashtable 467 * napi_hash_add - add a NAPI to global hashtable
474 * @napi: napi context 468 * @napi: napi context
475 * 469 *
476 * generate a new napi_id and store a @napi under it in napi_hash 470 * generate a new napi_id and store a @napi under it in napi_hash
471 * Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
472 * Note: This is normally automatically done from netif_napi_add(),
473 * so might disappear in a future linux version.
477 */ 474 */
478void napi_hash_add(struct napi_struct *napi); 475void napi_hash_add(struct napi_struct *napi);
479 476
@@ -482,9 +479,14 @@ void napi_hash_add(struct napi_struct *napi);
482 * @napi: napi context 479 * @napi: napi context
483 * 480 *
484 * Warning: caller must observe rcu grace period 481 * Warning: caller must observe rcu grace period
485 * before freeing memory containing @napi 482 * before freeing memory containing @napi, if
483 * this function returns true.
484 * Note: core networking stack automatically calls it
485 * from netif_napi_del()
486 * Drivers might want to call this helper to combine all
487 * the needed rcu grace periods into a single one.
486 */ 488 */
487void napi_hash_del(struct napi_struct *napi); 489bool napi_hash_del(struct napi_struct *napi);
488 490
489/** 491/**
490 * napi_disable - prevent NAPI from scheduling 492 * napi_disable - prevent NAPI from scheduling
@@ -810,6 +812,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
810 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 812 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
811 * Required can not be NULL. 813 * Required can not be NULL.
812 * 814 *
815 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
816 * netdev_features_t features);
817 * Adjusts the requested feature flags according to device-specific
818 * constraints, and returns the resulting flags. Must not modify
819 * the device state.
820 *
813 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 821 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
814 * void *accel_priv, select_queue_fallback_t fallback); 822 * void *accel_priv, select_queue_fallback_t fallback);
815 * Called to decide which queue to when device supports multiple 823 * Called to decide which queue to when device supports multiple
@@ -957,12 +965,6 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
957 * Called to release previously enslaved netdev. 965 * Called to release previously enslaved netdev.
958 * 966 *
959 * Feature/offload setting functions. 967 * Feature/offload setting functions.
960 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
961 * netdev_features_t features);
962 * Adjusts the requested feature flags according to device-specific
963 * constraints, and returns the resulting flags. Must not modify
964 * the device state.
965 *
966 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 968 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
967 * Called to update device configuration to new features. Passed 969 * Called to update device configuration to new features. Passed
968 * feature set might be less than what was returned by ndo_fix_features()). 970 * feature set might be less than what was returned by ndo_fix_features()).
@@ -1011,6 +1013,19 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1011 * a new port starts listening. The operation is protected by the 1013 * a new port starts listening. The operation is protected by the
1012 * vxlan_net->sock_lock. 1014 * vxlan_net->sock_lock.
1013 * 1015 *
1016 * void (*ndo_add_geneve_port)(struct net_device *dev,
1017 * sa_family_t sa_family, __be16 port);
1018 * Called by geneve to notify a driver about the UDP port and socket
1019 * address family that geneve is listnening to. It is called only when
1020 * a new port starts listening. The operation is protected by the
1021 * geneve_net->sock_lock.
1022 *
1023 * void (*ndo_del_geneve_port)(struct net_device *dev,
1024 * sa_family_t sa_family, __be16 port);
1025 * Called by geneve to notify the driver about a UDP port and socket
1026 * address family that geneve is not listening to anymore. The operation
1027 * is protected by the geneve_net->sock_lock.
1028 *
1014 * void (*ndo_del_vxlan_port)(struct net_device *dev, 1029 * void (*ndo_del_vxlan_port)(struct net_device *dev,
1015 * sa_family_t sa_family, __be16 port); 1030 * sa_family_t sa_family, __be16 port);
1016 * Called by vxlan to notify the driver about a UDP port and socket 1031 * Called by vxlan to notify the driver about a UDP port and socket
@@ -1066,8 +1081,11 @@ struct net_device_ops {
1066 void (*ndo_uninit)(struct net_device *dev); 1081 void (*ndo_uninit)(struct net_device *dev);
1067 int (*ndo_open)(struct net_device *dev); 1082 int (*ndo_open)(struct net_device *dev);
1068 int (*ndo_stop)(struct net_device *dev); 1083 int (*ndo_stop)(struct net_device *dev);
1069 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 1084 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1070 struct net_device *dev); 1085 struct net_device *dev);
1086 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1087 struct net_device *dev,
1088 netdev_features_t features);
1071 u16 (*ndo_select_queue)(struct net_device *dev, 1089 u16 (*ndo_select_queue)(struct net_device *dev,
1072 struct sk_buff *skb, 1090 struct sk_buff *skb,
1073 void *accel_priv, 1091 void *accel_priv,
@@ -1215,7 +1233,12 @@ struct net_device_ops {
1215 void (*ndo_del_vxlan_port)(struct net_device *dev, 1233 void (*ndo_del_vxlan_port)(struct net_device *dev,
1216 sa_family_t sa_family, 1234 sa_family_t sa_family,
1217 __be16 port); 1235 __be16 port);
1218 1236 void (*ndo_add_geneve_port)(struct net_device *dev,
1237 sa_family_t sa_family,
1238 __be16 port);
1239 void (*ndo_del_geneve_port)(struct net_device *dev,
1240 sa_family_t sa_family,
1241 __be16 port);
1219 void* (*ndo_dfwd_add_station)(struct net_device *pdev, 1242 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1220 struct net_device *dev); 1243 struct net_device *dev);
1221 void (*ndo_dfwd_del_station)(struct net_device *pdev, 1244 void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1225,9 +1248,6 @@ struct net_device_ops {
1225 struct net_device *dev, 1248 struct net_device *dev,
1226 void *priv); 1249 void *priv);
1227 int (*ndo_get_lock_subclass)(struct net_device *dev); 1250 int (*ndo_get_lock_subclass)(struct net_device *dev);
1228 netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
1229 struct net_device *dev,
1230 netdev_features_t features);
1231 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1251 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1232 int queue_index, 1252 int queue_index,
1233 u32 maxrate); 1253 u32 maxrate);
@@ -1271,6 +1291,7 @@ struct net_device_ops {
1271 * @IFF_NO_QUEUE: device can run without qdisc attached 1291 * @IFF_NO_QUEUE: device can run without qdisc attached
1272 * @IFF_OPENVSWITCH: device is a Open vSwitch master 1292 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1273 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device 1293 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1294 * @IFF_TEAM: device is a team device
1274 */ 1295 */
1275enum netdev_priv_flags { 1296enum netdev_priv_flags {
1276 IFF_802_1Q_VLAN = 1<<0, 1297 IFF_802_1Q_VLAN = 1<<0,
@@ -1297,6 +1318,7 @@ enum netdev_priv_flags {
1297 IFF_NO_QUEUE = 1<<21, 1318 IFF_NO_QUEUE = 1<<21,
1298 IFF_OPENVSWITCH = 1<<22, 1319 IFF_OPENVSWITCH = 1<<22,
1299 IFF_L3MDEV_SLAVE = 1<<23, 1320 IFF_L3MDEV_SLAVE = 1<<23,
1321 IFF_TEAM = 1<<24,
1300}; 1322};
1301 1323
1302#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1324#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1323,6 +1345,7 @@ enum netdev_priv_flags {
1323#define IFF_NO_QUEUE IFF_NO_QUEUE 1345#define IFF_NO_QUEUE IFF_NO_QUEUE
1324#define IFF_OPENVSWITCH IFF_OPENVSWITCH 1346#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1325#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE 1347#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1348#define IFF_TEAM IFF_TEAM
1326 1349
1327/** 1350/**
1328 * struct net_device - The DEVICE structure. 1351 * struct net_device - The DEVICE structure.
@@ -1716,7 +1739,9 @@ struct net_device {
1716#ifdef CONFIG_XPS 1739#ifdef CONFIG_XPS
1717 struct xps_dev_maps __rcu *xps_maps; 1740 struct xps_dev_maps __rcu *xps_maps;
1718#endif 1741#endif
1719 1742#ifdef CONFIG_NET_CLS_ACT
1743 struct tcf_proto __rcu *egress_cl_list;
1744#endif
1720#ifdef CONFIG_NET_SWITCHDEV 1745#ifdef CONFIG_NET_SWITCHDEV
1721 u32 offload_fwd_mark; 1746 u32 offload_fwd_mark;
1722#endif 1747#endif
@@ -1949,6 +1974,26 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1949 int (*poll)(struct napi_struct *, int), int weight); 1974 int (*poll)(struct napi_struct *, int), int weight);
1950 1975
1951/** 1976/**
1977 * netif_tx_napi_add - initialize a napi context
1978 * @dev: network device
1979 * @napi: napi context
1980 * @poll: polling function
1981 * @weight: default weight
1982 *
1983 * This variant of netif_napi_add() should be used from drivers using NAPI
1984 * to exclusively poll a TX queue.
1985 * This will avoid we add it into napi_hash[], thus polluting this hash table.
1986 */
1987static inline void netif_tx_napi_add(struct net_device *dev,
1988 struct napi_struct *napi,
1989 int (*poll)(struct napi_struct *, int),
1990 int weight)
1991{
1992 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
1993 netif_napi_add(dev, napi, poll, weight);
1994}
1995
1996/**
1952 * netif_napi_del - remove a napi context 1997 * netif_napi_del - remove a napi context
1953 * @napi: napi context 1998 * @napi: napi context
1954 * 1999 *
@@ -2086,6 +2131,24 @@ struct pcpu_sw_netstats {
2086#define netdev_alloc_pcpu_stats(type) \ 2131#define netdev_alloc_pcpu_stats(type) \
2087 __netdev_alloc_pcpu_stats(type, GFP_KERNEL) 2132 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2088 2133
2134enum netdev_lag_tx_type {
2135 NETDEV_LAG_TX_TYPE_UNKNOWN,
2136 NETDEV_LAG_TX_TYPE_RANDOM,
2137 NETDEV_LAG_TX_TYPE_BROADCAST,
2138 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2139 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2140 NETDEV_LAG_TX_TYPE_HASH,
2141};
2142
2143struct netdev_lag_upper_info {
2144 enum netdev_lag_tx_type tx_type;
2145};
2146
2147struct netdev_lag_lower_state_info {
2148 u8 link_up : 1,
2149 tx_enabled : 1;
2150};
2151
2089#include <linux/notifier.h> 2152#include <linux/notifier.h>
2090 2153
2091/* netdevice notifier chain. Please remember to update the rtnetlink 2154/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -2121,6 +2184,7 @@ struct pcpu_sw_netstats {
2121#define NETDEV_CHANGEINFODATA 0x0018 2184#define NETDEV_CHANGEINFODATA 0x0018
2122#define NETDEV_BONDING_INFO 0x0019 2185#define NETDEV_BONDING_INFO 0x0019
2123#define NETDEV_PRECHANGEUPPER 0x001A 2186#define NETDEV_PRECHANGEUPPER 0x001A
2187#define NETDEV_CHANGELOWERSTATE 0x001B
2124 2188
2125int register_netdevice_notifier(struct notifier_block *nb); 2189int register_netdevice_notifier(struct notifier_block *nb);
2126int unregister_netdevice_notifier(struct notifier_block *nb); 2190int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2139,6 +2203,12 @@ struct netdev_notifier_changeupper_info {
2139 struct net_device *upper_dev; /* new upper dev */ 2203 struct net_device *upper_dev; /* new upper dev */
2140 bool master; /* is upper dev master */ 2204 bool master; /* is upper dev master */
2141 bool linking; /* is the nofication for link or unlink */ 2205 bool linking; /* is the nofication for link or unlink */
2206 void *upper_info; /* upper dev info */
2207};
2208
2209struct netdev_notifier_changelowerstate_info {
2210 struct netdev_notifier_info info; /* must be first */
2211 void *lower_state_info; /* is lower dev state */
2142}; 2212};
2143 2213
2144static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2214static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
@@ -2472,6 +2542,71 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2472 remcsum_unadjust((__sum16 *)ptr, grc->delta); 2542 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2473} 2543}
2474 2544
2545struct skb_csum_offl_spec {
2546 __u16 ipv4_okay:1,
2547 ipv6_okay:1,
2548 encap_okay:1,
2549 ip_options_okay:1,
2550 ext_hdrs_okay:1,
2551 tcp_okay:1,
2552 udp_okay:1,
2553 sctp_okay:1,
2554 vlan_okay:1,
2555 no_encapped_ipv6:1,
2556 no_not_encapped:1;
2557};
2558
2559bool __skb_csum_offload_chk(struct sk_buff *skb,
2560 const struct skb_csum_offl_spec *spec,
2561 bool *csum_encapped,
2562 bool csum_help);
2563
2564static inline bool skb_csum_offload_chk(struct sk_buff *skb,
2565 const struct skb_csum_offl_spec *spec,
2566 bool *csum_encapped,
2567 bool csum_help)
2568{
2569 if (skb->ip_summed != CHECKSUM_PARTIAL)
2570 return false;
2571
2572 return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help);
2573}
2574
2575static inline bool skb_csum_offload_chk_help(struct sk_buff *skb,
2576 const struct skb_csum_offl_spec *spec)
2577{
2578 bool csum_encapped;
2579
2580 return skb_csum_offload_chk(skb, spec, &csum_encapped, true);
2581}
2582
2583static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb)
2584{
2585 static const struct skb_csum_offl_spec csum_offl_spec = {
2586 .ipv4_okay = 1,
2587 .ip_options_okay = 1,
2588 .ipv6_okay = 1,
2589 .vlan_okay = 1,
2590 .tcp_okay = 1,
2591 .udp_okay = 1,
2592 };
2593
2594 return skb_csum_offload_chk_help(skb, &csum_offl_spec);
2595}
2596
2597static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb)
2598{
2599 static const struct skb_csum_offl_spec csum_offl_spec = {
2600 .ipv4_okay = 1,
2601 .ip_options_okay = 1,
2602 .tcp_okay = 1,
2603 .udp_okay = 1,
2604 .vlan_okay = 1,
2605 };
2606
2607 return skb_csum_offload_chk_help(skb, &csum_offl_spec);
2608}
2609
2475static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2610static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2476 unsigned short type, 2611 unsigned short type,
2477 const void *daddr, const void *saddr, 2612 const void *daddr, const void *saddr,
@@ -3595,15 +3730,15 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3595struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 3730struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3596int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); 3731int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3597int netdev_master_upper_dev_link(struct net_device *dev, 3732int netdev_master_upper_dev_link(struct net_device *dev,
3598 struct net_device *upper_dev); 3733 struct net_device *upper_dev,
3599int netdev_master_upper_dev_link_private(struct net_device *dev, 3734 void *upper_priv, void *upper_info);
3600 struct net_device *upper_dev,
3601 void *private);
3602void netdev_upper_dev_unlink(struct net_device *dev, 3735void netdev_upper_dev_unlink(struct net_device *dev,
3603 struct net_device *upper_dev); 3736 struct net_device *upper_dev);
3604void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 3737void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3605void *netdev_lower_dev_get_private(struct net_device *dev, 3738void *netdev_lower_dev_get_private(struct net_device *dev,
3606 struct net_device *lower_dev); 3739 struct net_device *lower_dev);
3740void netdev_lower_state_changed(struct net_device *lower_dev,
3741 void *lower_state_info);
3607 3742
3608/* RSS keys are 40 or 52 bytes long */ 3743/* RSS keys are 40 or 52 bytes long */
3609#define NETDEV_RSS_KEY_LEN 52 3744#define NETDEV_RSS_KEY_LEN 52
@@ -3611,7 +3746,7 @@ extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
3611void netdev_rss_key_fill(void *buffer, size_t len); 3746void netdev_rss_key_fill(void *buffer, size_t len);
3612 3747
3613int dev_get_nest_level(struct net_device *dev, 3748int dev_get_nest_level(struct net_device *dev,
3614 bool (*type_check)(struct net_device *dev)); 3749 bool (*type_check)(const struct net_device *dev));
3615int skb_checksum_help(struct sk_buff *skb); 3750int skb_checksum_help(struct sk_buff *skb);
3616struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3751struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3617 netdev_features_t features, bool tx_path); 3752 netdev_features_t features, bool tx_path);
@@ -3641,13 +3776,37 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3641static inline bool can_checksum_protocol(netdev_features_t features, 3776static inline bool can_checksum_protocol(netdev_features_t features,
3642 __be16 protocol) 3777 __be16 protocol)
3643{ 3778{
3644 return ((features & NETIF_F_GEN_CSUM) || 3779 if (protocol == htons(ETH_P_FCOE))
3645 ((features & NETIF_F_V4_CSUM) && 3780 return !!(features & NETIF_F_FCOE_CRC);
3646 protocol == htons(ETH_P_IP)) || 3781
3647 ((features & NETIF_F_V6_CSUM) && 3782 /* Assume this is an IP checksum (not SCTP CRC) */
3648 protocol == htons(ETH_P_IPV6)) || 3783
3649 ((features & NETIF_F_FCOE_CRC) && 3784 if (features & NETIF_F_HW_CSUM) {
3650 protocol == htons(ETH_P_FCOE))); 3785 /* Can checksum everything */
3786 return true;
3787 }
3788
3789 switch (protocol) {
3790 case htons(ETH_P_IP):
3791 return !!(features & NETIF_F_IP_CSUM);
3792 case htons(ETH_P_IPV6):
3793 return !!(features & NETIF_F_IPV6_CSUM);
3794 default:
3795 return false;
3796 }
3797}
3798
3799/* Map an ethertype into IP protocol if possible */
3800static inline int eproto_to_ipproto(int eproto)
3801{
3802 switch (eproto) {
3803 case htons(ETH_P_IP):
3804 return IPPROTO_IP;
3805 case htons(ETH_P_IPV6):
3806 return IPPROTO_IPV6;
3807 default:
3808 return -1;
3809 }
3651} 3810}
3652 3811
3653#ifdef CONFIG_BUG 3812#ifdef CONFIG_BUG
@@ -3712,15 +3871,14 @@ void linkwatch_run_queue(void);
3712static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, 3871static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
3713 netdev_features_t f2) 3872 netdev_features_t f2)
3714{ 3873{
3715 if (f1 & NETIF_F_GEN_CSUM) 3874 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
3716 f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 3875 if (f1 & NETIF_F_HW_CSUM)
3717 if (f2 & NETIF_F_GEN_CSUM) 3876 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3718 f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 3877 else
3719 f1 &= f2; 3878 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3720 if (f1 & NETIF_F_GEN_CSUM) 3879 }
3721 f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3722 3880
3723 return f1; 3881 return f1 & f2;
3724} 3882}
3725 3883
3726static inline netdev_features_t netdev_get_wanted_features( 3884static inline netdev_features_t netdev_get_wanted_features(
@@ -3808,32 +3966,32 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3808 skb->mac_len = mac_len; 3966 skb->mac_len = mac_len;
3809} 3967}
3810 3968
3811static inline bool netif_is_macvlan(struct net_device *dev) 3969static inline bool netif_is_macvlan(const struct net_device *dev)
3812{ 3970{
3813 return dev->priv_flags & IFF_MACVLAN; 3971 return dev->priv_flags & IFF_MACVLAN;
3814} 3972}
3815 3973
3816static inline bool netif_is_macvlan_port(struct net_device *dev) 3974static inline bool netif_is_macvlan_port(const struct net_device *dev)
3817{ 3975{
3818 return dev->priv_flags & IFF_MACVLAN_PORT; 3976 return dev->priv_flags & IFF_MACVLAN_PORT;
3819} 3977}
3820 3978
3821static inline bool netif_is_ipvlan(struct net_device *dev) 3979static inline bool netif_is_ipvlan(const struct net_device *dev)
3822{ 3980{
3823 return dev->priv_flags & IFF_IPVLAN_SLAVE; 3981 return dev->priv_flags & IFF_IPVLAN_SLAVE;
3824} 3982}
3825 3983
3826static inline bool netif_is_ipvlan_port(struct net_device *dev) 3984static inline bool netif_is_ipvlan_port(const struct net_device *dev)
3827{ 3985{
3828 return dev->priv_flags & IFF_IPVLAN_MASTER; 3986 return dev->priv_flags & IFF_IPVLAN_MASTER;
3829} 3987}
3830 3988
3831static inline bool netif_is_bond_master(struct net_device *dev) 3989static inline bool netif_is_bond_master(const struct net_device *dev)
3832{ 3990{
3833 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 3991 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
3834} 3992}
3835 3993
3836static inline bool netif_is_bond_slave(struct net_device *dev) 3994static inline bool netif_is_bond_slave(const struct net_device *dev)
3837{ 3995{
3838 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 3996 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
3839} 3997}
@@ -3868,6 +4026,26 @@ static inline bool netif_is_ovs_master(const struct net_device *dev)
3868 return dev->priv_flags & IFF_OPENVSWITCH; 4026 return dev->priv_flags & IFF_OPENVSWITCH;
3869} 4027}
3870 4028
4029static inline bool netif_is_team_master(const struct net_device *dev)
4030{
4031 return dev->priv_flags & IFF_TEAM;
4032}
4033
4034static inline bool netif_is_team_port(const struct net_device *dev)
4035{
4036 return dev->priv_flags & IFF_TEAM_PORT;
4037}
4038
4039static inline bool netif_is_lag_master(const struct net_device *dev)
4040{
4041 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4042}
4043
4044static inline bool netif_is_lag_port(const struct net_device *dev)
4045{
4046 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4047}
4048
3871/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 4049/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
3872static inline void netif_keep_dst(struct net_device *dev) 4050static inline void netif_keep_dst(struct net_device *dev)
3873{ 4051{
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
new file mode 100644
index 000000000000..22a16a23cd8a
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -0,0 +1,13 @@
1#ifndef _NF_CONNTRACK_SCTP_H
2#define _NF_CONNTRACK_SCTP_H
3/* SCTP tracking. */
4
5#include <uapi/linux/netfilter/nf_conntrack_sctp.h>
6
7struct ip_ct_sctp {
8 enum sctp_conntrack state;
9
10 __be32 vtag[IP_CT_DIR_MAX];
11};
12
13#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 5646b24bfc64..ba0d9789eb6e 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -8,12 +8,12 @@
8#include <uapi/linux/netfilter/nfnetlink.h> 8#include <uapi/linux/netfilter/nfnetlink.h>
9 9
10struct nfnl_callback { 10struct nfnl_callback {
11 int (*call)(struct sock *nl, struct sk_buff *skb, 11 int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
12 const struct nlmsghdr *nlh,
13 const struct nlattr * const cda[]);
14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
15 const struct nlmsghdr *nlh, 12 const struct nlmsghdr *nlh,
16 const struct nlattr * const cda[]); 13 const struct nlattr * const cda[]);
14 int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb,
15 const struct nlmsghdr *nlh,
16 const struct nlattr * const cda[]);
17 int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, 17 int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
18 const struct nlmsghdr *nlh, 18 const struct nlmsghdr *nlh,
19 const struct nlattr * const cda[]); 19 const struct nlattr * const cda[]);
@@ -26,8 +26,8 @@ struct nfnetlink_subsystem {
26 __u8 subsys_id; /* nfnetlink subsystem ID */ 26 __u8 subsys_id; /* nfnetlink subsystem ID */
27 __u8 cb_count; /* number of callbacks */ 27 __u8 cb_count; /* number of callbacks */
28 const struct nfnl_callback *cb; /* callback for individual types */ 28 const struct nfnl_callback *cb; /* callback for individual types */
29 int (*commit)(struct sk_buff *skb); 29 int (*commit)(struct net *net, struct sk_buff *skb);
30 int (*abort)(struct sk_buff *skb); 30 int (*abort)(struct net *net, struct sk_buff *skb);
31}; 31};
32 32
33int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); 33int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 639e9b8b0e4d..0b41959aab9f 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
131struct netlink_callback { 131struct netlink_callback {
132 struct sk_buff *skb; 132 struct sk_buff *skb;
133 const struct nlmsghdr *nlh; 133 const struct nlmsghdr *nlh;
134 int (*start)(struct netlink_callback *);
134 int (*dump)(struct sk_buff * skb, 135 int (*dump)(struct sk_buff * skb,
135 struct netlink_callback *cb); 136 struct netlink_callback *cb);
136 int (*done)(struct netlink_callback *cb); 137 int (*done)(struct netlink_callback *cb);
@@ -153,6 +154,7 @@ struct nlmsghdr *
153__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); 154__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
154 155
155struct netlink_dump_control { 156struct netlink_dump_control {
157 int (*start)(struct netlink_callback *);
156 int (*dump)(struct sk_buff *skb, struct netlink_callback *); 158 int (*dump)(struct sk_buff *skb, struct netlink_callback *);
157 int (*done)(struct netlink_callback *); 159 int (*done)(struct netlink_callback *);
158 void *data; 160 void *data;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d9ba49cedc5d..1acbefc4bbda 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2495,6 +2495,8 @@
2495#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 2495#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
2496#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff 2496#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
2497 2497
2498#define PCI_VENDOR_ID_NETRONOME 0x19ee
2499
2498#define PCI_VENDOR_ID_QMI 0x1a32 2500#define PCI_VENDOR_ID_QMI 0x1a32
2499 2501
2500#define PCI_VENDOR_ID_AZWAVE 0x1a3b 2502#define PCI_VENDOR_ID_AZWAVE 0x1a3b
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 05fde31b6dc6..d6f3641e7933 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -16,8 +16,10 @@
16#ifndef __PHY_H 16#ifndef __PHY_H
17#define __PHY_H 17#define __PHY_H
18 18
19#include <linux/compiler.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/mdio.h>
21#include <linux/mii.h> 23#include <linux/mii.h>
22#include <linux/module.h> 24#include <linux/module.h>
23#include <linux/timer.h> 25#include <linux/timer.h>
@@ -58,6 +60,7 @@
58#define PHY_HAS_INTERRUPT 0x00000001 60#define PHY_HAS_INTERRUPT 0x00000001
59#define PHY_HAS_MAGICANEG 0x00000002 61#define PHY_HAS_MAGICANEG 0x00000002
60#define PHY_IS_INTERNAL 0x00000004 62#define PHY_IS_INTERNAL 0x00000004
63#define MDIO_DEVICE_IS_PHY 0x80000000
61 64
62/* Interface Mode definitions */ 65/* Interface Mode definitions */
63typedef enum { 66typedef enum {
@@ -158,8 +161,8 @@ struct mii_bus {
158 const char *name; 161 const char *name;
159 char id[MII_BUS_ID_SIZE]; 162 char id[MII_BUS_ID_SIZE];
160 void *priv; 163 void *priv;
161 int (*read)(struct mii_bus *bus, int phy_id, int regnum); 164 int (*read)(struct mii_bus *bus, int addr, int regnum);
162 int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); 165 int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
163 int (*reset)(struct mii_bus *bus); 166 int (*reset)(struct mii_bus *bus);
164 167
165 /* 168 /*
@@ -178,7 +181,7 @@ struct mii_bus {
178 struct device dev; 181 struct device dev;
179 182
180 /* list of all PHYs on bus */ 183 /* list of all PHYs on bus */
181 struct phy_device *phy_map[PHY_MAX_ADDR]; 184 struct mdio_device *mdio_map[PHY_MAX_ADDR];
182 185
183 /* PHY addresses to be ignored when probing */ 186 /* PHY addresses to be ignored when probing */
184 u32 phy_mask; 187 u32 phy_mask;
@@ -187,10 +190,10 @@ struct mii_bus {
187 u32 phy_ignore_ta_mask; 190 u32 phy_ignore_ta_mask;
188 191
189 /* 192 /*
190 * Pointer to an array of interrupts, each PHY's 193 * An array of interrupts, each PHY's interrupt at the index
191 * interrupt at the index matching its address 194 * matching its address
192 */ 195 */
193 int *irq; 196 int irq[PHY_MAX_ADDR];
194}; 197};
195#define to_mii_bus(d) container_of(d, struct mii_bus, dev) 198#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
196 199
@@ -212,11 +215,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
212 215
213void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); 216void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
214struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); 217struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
215int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
216int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
217int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
218int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
219
220 218
221#define PHY_INTERRUPT_DISABLED 0x0 219#define PHY_INTERRUPT_DISABLED 0x0
222#define PHY_INTERRUPT_ENABLED 0x80000000 220#define PHY_INTERRUPT_ENABLED 0x80000000
@@ -361,14 +359,12 @@ struct phy_c45_device_ids {
361 * handling, as well as handling shifts in PHY hardware state 359 * handling, as well as handling shifts in PHY hardware state
362 */ 360 */
363struct phy_device { 361struct phy_device {
362 struct mdio_device mdio;
363
364 /* Information about the PHY type */ 364 /* Information about the PHY type */
365 /* And management functions */ 365 /* And management functions */
366 struct phy_driver *drv; 366 struct phy_driver *drv;
367 367
368 struct mii_bus *bus;
369
370 struct device dev;
371
372 u32 phy_id; 368 u32 phy_id;
373 369
374 struct phy_c45_device_ids c45_ids; 370 struct phy_c45_device_ids c45_ids;
@@ -384,9 +380,6 @@ struct phy_device {
384 380
385 phy_interface_t interface; 381 phy_interface_t interface;
386 382
387 /* Bus address of the PHY (0-31) */
388 int addr;
389
390 /* 383 /*
391 * forced speed & duplex (no autoneg) 384 * forced speed & duplex (no autoneg)
392 * partner speed & duplex & pause (autoneg) 385 * partner speed & duplex & pause (autoneg)
@@ -435,10 +428,12 @@ struct phy_device {
435 428
436 void (*adjust_link)(struct net_device *dev); 429 void (*adjust_link)(struct net_device *dev);
437}; 430};
438#define to_phy_device(d) container_of(d, struct phy_device, dev) 431#define to_phy_device(d) container_of(to_mdio_device(d), \
432 struct phy_device, mdio)
439 433
440/* struct phy_driver: Driver structure for a particular PHY type 434/* struct phy_driver: Driver structure for a particular PHY type
441 * 435 *
436 * driver_data: static driver data
442 * phy_id: The result of reading the UID registers of this PHY 437 * phy_id: The result of reading the UID registers of this PHY
443 * type, and ANDing them with the phy_id_mask. This driver 438 * type, and ANDing them with the phy_id_mask. This driver
444 * only works for PHYs with IDs which match this field 439 * only works for PHYs with IDs which match this field
@@ -448,7 +443,6 @@ struct phy_device {
448 * by this PHY 443 * by this PHY
449 * flags: A bitfield defining certain other features this PHY 444 * flags: A bitfield defining certain other features this PHY
450 * supports (like interrupts) 445 * supports (like interrupts)
451 * driver_data: static driver data
452 * 446 *
453 * The drivers must implement config_aneg and read_status. All 447 * The drivers must implement config_aneg and read_status. All
454 * other functions are optional. Note that none of these 448 * other functions are optional. Note that none of these
@@ -459,6 +453,7 @@ struct phy_device {
459 * supported in the driver). 453 * supported in the driver).
460 */ 454 */
461struct phy_driver { 455struct phy_driver {
456 struct mdio_driver_common mdiodrv;
462 u32 phy_id; 457 u32 phy_id;
463 char *name; 458 char *name;
464 unsigned int phy_id_mask; 459 unsigned int phy_id_mask;
@@ -589,9 +584,14 @@ struct phy_driver {
589 int (*module_eeprom)(struct phy_device *dev, 584 int (*module_eeprom)(struct phy_device *dev,
590 struct ethtool_eeprom *ee, u8 *data); 585 struct ethtool_eeprom *ee, u8 *data);
591 586
592 struct device_driver driver; 587 /* Get statistics from the phy using ethtool */
588 int (*get_sset_count)(struct phy_device *dev);
589 void (*get_strings)(struct phy_device *dev, u8 *data);
590 void (*get_stats)(struct phy_device *dev,
591 struct ethtool_stats *stats, u64 *data);
593}; 592};
594#define to_phy_driver(d) container_of(d, struct phy_driver, driver) 593#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
594 struct phy_driver, mdiodrv)
595 595
596#define PHY_ANY_ID "MATCH ANY PHY" 596#define PHY_ANY_ID "MATCH ANY PHY"
597#define PHY_ANY_UID 0xffffffff 597#define PHY_ANY_UID 0xffffffff
@@ -619,7 +619,7 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
619 if (!phydev->is_c45) 619 if (!phydev->is_c45)
620 return -EOPNOTSUPP; 620 return -EOPNOTSUPP;
621 621
622 return mdiobus_read(phydev->bus, phydev->addr, 622 return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
623 MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff)); 623 MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff));
624} 624}
625 625
@@ -627,14 +627,12 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
627 * phy_read_mmd_indirect - reads data from the MMD registers 627 * phy_read_mmd_indirect - reads data from the MMD registers
628 * @phydev: The PHY device bus 628 * @phydev: The PHY device bus
629 * @prtad: MMD Address 629 * @prtad: MMD Address
630 * @devad: MMD DEVAD
631 * @addr: PHY address on the MII bus 630 * @addr: PHY address on the MII bus
632 * 631 *
633 * Description: it reads data from the MMD registers (clause 22 to access to 632 * Description: it reads data from the MMD registers (clause 22 to access to
634 * clause 45) of the specified phy address. 633 * clause 45) of the specified phy address.
635 */ 634 */
636int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, 635int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad);
637 int devad, int addr);
638 636
639/** 637/**
640 * phy_read - Convenience function for reading a given PHY register 638 * phy_read - Convenience function for reading a given PHY register
@@ -647,7 +645,7 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
647 */ 645 */
648static inline int phy_read(struct phy_device *phydev, u32 regnum) 646static inline int phy_read(struct phy_device *phydev, u32 regnum)
649{ 647{
650 return mdiobus_read(phydev->bus, phydev->addr, regnum); 648 return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
651} 649}
652 650
653/** 651/**
@@ -662,7 +660,7 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
662 */ 660 */
663static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) 661static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
664{ 662{
665 return mdiobus_write(phydev->bus, phydev->addr, regnum, val); 663 return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val);
666} 664}
667 665
668/** 666/**
@@ -725,7 +723,7 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
725 723
726 regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff); 724 regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff);
727 725
728 return mdiobus_write(phydev->bus, phydev->addr, regnum, val); 726 return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val);
729} 727}
730 728
731/** 729/**
@@ -733,14 +731,13 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
733 * @phydev: The PHY device 731 * @phydev: The PHY device
734 * @prtad: MMD Address 732 * @prtad: MMD Address
735 * @devad: MMD DEVAD 733 * @devad: MMD DEVAD
736 * @addr: PHY address on the MII bus
737 * @data: data to write in the MMD register 734 * @data: data to write in the MMD register
738 * 735 *
739 * Description: Write data from the MMD registers of the specified 736 * Description: Write data from the MMD registers of the specified
740 * phy address. 737 * phy address.
741 */ 738 */
742void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, 739void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
743 int devad, int addr, u32 data); 740 int devad, u32 data);
744 741
745struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, 742struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
746 bool is_c45, 743 bool is_c45,
@@ -775,6 +772,20 @@ static inline int phy_read_status(struct phy_device *phydev)
775 return phydev->drv->read_status(phydev); 772 return phydev->drv->read_status(phydev);
776} 773}
777 774
775#define phydev_err(_phydev, format, args...) \
776 dev_err(&_phydev->mdio.dev, format, ##args)
777
778#define phydev_dbg(_phydev, format, args...) \
779 dev_dbg(&_phydev->mdio.dev, format, ##args);
780
781static inline const char *phydev_name(const struct phy_device *phydev)
782{
783 return dev_name(&phydev->mdio.dev);
784}
785
786void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
787 __printf(2, 3);
788void phy_attached_info(struct phy_device *phydev);
778int genphy_config_init(struct phy_device *phydev); 789int genphy_config_init(struct phy_device *phydev);
779int genphy_setup_forced(struct phy_device *phydev); 790int genphy_setup_forced(struct phy_device *phydev);
780int genphy_restart_aneg(struct phy_device *phydev); 791int genphy_restart_aneg(struct phy_device *phydev);
@@ -787,8 +798,9 @@ int genphy_resume(struct phy_device *phydev);
787int genphy_soft_reset(struct phy_device *phydev); 798int genphy_soft_reset(struct phy_device *phydev);
788void phy_driver_unregister(struct phy_driver *drv); 799void phy_driver_unregister(struct phy_driver *drv);
789void phy_drivers_unregister(struct phy_driver *drv, int n); 800void phy_drivers_unregister(struct phy_driver *drv, int n);
790int phy_driver_register(struct phy_driver *new_driver); 801int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
791int phy_drivers_register(struct phy_driver *new_driver, int n); 802int phy_drivers_register(struct phy_driver *new_driver, int n,
803 struct module *owner);
792void phy_state_machine(struct work_struct *work); 804void phy_state_machine(struct work_struct *work);
793void phy_change(struct work_struct *work); 805void phy_change(struct work_struct *work);
794void phy_mac_interrupt(struct phy_device *phydev, int new_link); 806void phy_mac_interrupt(struct phy_device *phydev, int new_link);
@@ -833,7 +845,7 @@ extern struct bus_type mdio_bus_type;
833#define phy_module_driver(__phy_drivers, __count) \ 845#define phy_module_driver(__phy_drivers, __count) \
834static int __init phy_module_init(void) \ 846static int __init phy_module_init(void) \
835{ \ 847{ \
836 return phy_drivers_register(__phy_drivers, __count); \ 848 return phy_drivers_register(__phy_drivers, __count, THIS_MODULE); \
837} \ 849} \
838module_init(phy_module_init); \ 850module_init(phy_module_init); \
839static void __exit phy_module_exit(void) \ 851static void __exit phy_module_exit(void) \
diff --git a/include/linux/pim.h b/include/linux/pim.h
index 252bf6644c51..e1d756f81348 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -13,6 +13,11 @@
13 13
14#define PIM_NULL_REGISTER cpu_to_be32(0x40000000) 14#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
15 15
16static inline bool ipmr_pimsm_enabled(void)
17{
18 return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
19}
20
16/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ 21/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
17struct pimreghdr 22struct pimreghdr
18{ 23{
diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h
index cfda59b226ee..ca13992089b8 100644
--- a/include/linux/platform_data/microread.h
+++ b/include/linux/platform_data/microread.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Driver include for the PN544 NFC chip. 2 * Driver include for the Inside Secure microread NFC Chip.
3 * 3 *
4 * Copyright (C) 2011 Tieto Poland 4 * Copyright (C) 2011 Tieto Poland
5 * Copyright (C) 2012 Intel Corporation. All rights reserved. 5 * Copyright (C) 2012 Intel Corporation. All rights reserved.
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index dc9a1353f971..d4a32e878180 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -25,6 +25,12 @@
25#include <linux/qed/common_hsi.h> 25#include <linux/qed/common_hsi.h>
26#include <linux/qed/qed_chain.h> 26#include <linux/qed/qed_chain.h>
27 27
28enum qed_led_mode {
29 QED_LED_MODE_OFF,
30 QED_LED_MODE_ON,
31 QED_LED_MODE_RESTORE
32};
33
28#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ 34#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
29 (void __iomem *)(reg_addr)) 35 (void __iomem *)(reg_addr))
30 36
@@ -252,6 +258,17 @@ struct qed_common_ops {
252 258
253 void (*chain_free)(struct qed_dev *cdev, 259 void (*chain_free)(struct qed_dev *cdev,
254 struct qed_chain *p_chain); 260 struct qed_chain *p_chain);
261
262/**
263 * @brief set_led - Configure LED mode
264 *
265 * @param cdev
266 * @param mode - LED mode
267 *
268 * @return 0 on success, error otherwise.
269 */
270 int (*set_led)(struct qed_dev *cdev,
271 enum qed_led_mode mode);
255}; 272};
256 273
257/** 274/**
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index e50b31d18462..63bd7601b6de 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -823,4 +823,86 @@ out:
823 return err; 823 return err;
824} 824}
825 825
826/* Internal function, please use rhashtable_replace_fast() instead */
827static inline int __rhashtable_replace_fast(
828 struct rhashtable *ht, struct bucket_table *tbl,
829 struct rhash_head *obj_old, struct rhash_head *obj_new,
830 const struct rhashtable_params params)
831{
832 struct rhash_head __rcu **pprev;
833 struct rhash_head *he;
834 spinlock_t *lock;
835 unsigned int hash;
836 int err = -ENOENT;
837
838 /* Minimally, the old and new objects must have same hash
839 * (which should mean identifiers are the same).
840 */
841 hash = rht_head_hashfn(ht, tbl, obj_old, params);
842 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
843 return -EINVAL;
844
845 lock = rht_bucket_lock(tbl, hash);
846
847 spin_lock_bh(lock);
848
849 pprev = &tbl->buckets[hash];
850 rht_for_each(he, tbl, hash) {
851 if (he != obj_old) {
852 pprev = &he->next;
853 continue;
854 }
855
856 rcu_assign_pointer(obj_new->next, obj_old->next);
857 rcu_assign_pointer(*pprev, obj_new);
858 err = 0;
859 break;
860 }
861
862 spin_unlock_bh(lock);
863
864 return err;
865}
866
867/**
868 * rhashtable_replace_fast - replace an object in hash table
869 * @ht: hash table
870 * @obj_old: pointer to hash head inside object being replaced
871 * @obj_new: pointer to hash head inside object which is new
872 * @params: hash table parameters
873 *
874 * Replacing an object doesn't affect the number of elements in the hash table
875 * or bucket, so we don't need to worry about shrinking or expanding the
876 * table here.
877 *
878 * Returns zero on success, -ENOENT if the entry could not be found,
879 * -EINVAL if hash is not the same for the old and new objects.
880 */
881static inline int rhashtable_replace_fast(
882 struct rhashtable *ht, struct rhash_head *obj_old,
883 struct rhash_head *obj_new,
884 const struct rhashtable_params params)
885{
886 struct bucket_table *tbl;
887 int err;
888
889 rcu_read_lock();
890
891 tbl = rht_dereference_rcu(ht->tbl, ht);
892
893 /* Because we have already taken (and released) the bucket
894 * lock in old_tbl, if we find that future_tbl is not yet
895 * visible then that guarantees the entry to still be in
896 * the old tbl if it exists.
897 */
898 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
899 obj_new, params)) &&
900 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
901 ;
902
903 rcu_read_unlock();
904
905 return err;
906}
907
826#endif /* _LINUX_RHASHTABLE_H */ 908#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 4be5048b1fbe..c006cc900c44 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -84,6 +84,11 @@ void net_inc_ingress_queue(void);
84void net_dec_ingress_queue(void); 84void net_dec_ingress_queue(void);
85#endif 85#endif
86 86
87#ifdef CONFIG_NET_EGRESS
88void net_inc_egress_queue(void);
89void net_dec_egress_queue(void);
90#endif
91
87extern void rtnetlink_init(void); 92extern void rtnetlink_init(void);
88extern void __rtnl_unlock(void); 93extern void __rtnl_unlock(void);
89 94
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4bae8ab3b893..61aa9bbea871 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -834,6 +834,7 @@ struct user_struct {
834 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 834 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
835#endif 835#endif
836 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 836 unsigned long locked_shm; /* How many pages of mlocked shm ? */
837 unsigned long unix_inflight; /* How many files in flight in unix sockets */
837 838
838#ifdef CONFIG_KEYS 839#ifdef CONFIG_KEYS
839 struct key *uid_keyring; /* UID specific keyring */ 840 struct key *uid_keyring; /* UID specific keyring */
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 8c9131db2b25..f2e27e078362 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -4,7 +4,7 @@
4#include <linux/phy.h> 4#include <linux/phy.h>
5#include <linux/if_ether.h> 5#include <linux/if_ether.h>
6 6
7enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; 7enum {EDMAC_LITTLE_ENDIAN};
8 8
9struct sh_eth_plat_data { 9struct sh_eth_plat_data {
10 int phy; 10 int phy;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4355129fff91..07f9ccd28654 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -39,11 +39,55 @@
39#include <linux/in6.h> 39#include <linux/in6.h>
40#include <net/flow.h> 40#include <net/flow.h>
41 41
42/* A. Checksumming of received packets by device. 42/* The interface for checksum offload between the stack and networking drivers
43 * is as follows...
44 *
45 * A. IP checksum related features
46 *
47 * Drivers advertise checksum offload capabilities in the features of a device.
48 * From the stack's point of view these are capabilities offered by the driver,
49 * a driver typically only advertises features that it is capable of offloading
50 * to its device.
51 *
52 * The checksum related features are:
53 *
54 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
55 * IP (one's complement) checksum for any combination
56 * of protocols or protocol layering. The checksum is
57 * computed and set in a packet per the CHECKSUM_PARTIAL
58 * interface (see below).
59 *
60 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
61 * TCP or UDP packets over IPv4. These are specifically
62 * unencapsulated packets of the form IPv4|TCP or
63 * IPv4|UDP where the Protocol field in the IPv4 header
64 * is TCP or UDP. The IPv4 header may contain IP options
65 * This feature cannot be set in features for a device
66 * with NETIF_F_HW_CSUM also set. This feature is being
67 * DEPRECATED (see below).
68 *
69 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
70 * TCP or UDP packets over IPv6. These are specifically
71 * unencapsulated packets of the form IPv6|TCP or
72 * IPv4|UDP where the Next Header field in the IPv6
73 * header is either TCP or UDP. IPv6 extension headers
74 * are not supported with this feature. This feature
75 * cannot be set in features for a device with
76 * NETIF_F_HW_CSUM also set. This feature is being
77 * DEPRECATED (see below).
78 *
79 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
80 * This flag is used only used to disable the RX checksum
81 * feature for a device. The stack will accept receive
82 * checksum indication in packets received on a device
83 * regardless of whether NETIF_F_RXCSUM is set.
84 *
85 * B. Checksumming of received packets by device. Indication of checksum
86 * verification is in set skb->ip_summed. Possible values are:
43 * 87 *
44 * CHECKSUM_NONE: 88 * CHECKSUM_NONE:
45 * 89 *
46 * Device failed to checksum this packet e.g. due to lack of capabilities. 90 * Device did not checksum this packet e.g. due to lack of capabilities.
47 * The packet contains full (though not verified) checksum in packet but 91 * The packet contains full (though not verified) checksum in packet but
48 * not in skb->csum. Thus, skb->csum is undefined in this case. 92 * not in skb->csum. Thus, skb->csum is undefined in this case.
49 * 93 *
@@ -53,9 +97,8 @@
53 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums 97 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
54 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY 98 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
55 * if their checksums are okay. skb->csum is still undefined in this case 99 * if their checksums are okay. skb->csum is still undefined in this case
56 * though. It is a bad option, but, unfortunately, nowadays most vendors do 100 * though. A driver or device must never modify the checksum field in the
57 * this. Apparently with the secret goal to sell you new devices, when you 101 * packet even if checksum is verified.
58 * will add new protocol to your host, f.e. IPv6 8)
59 * 102 *
60 * CHECKSUM_UNNECESSARY is applicable to following protocols: 103 * CHECKSUM_UNNECESSARY is applicable to following protocols:
61 * TCP: IPv6 and IPv4. 104 * TCP: IPv6 and IPv4.
@@ -96,40 +139,77 @@
96 * packet that are after the checksum being offloaded are not considered to 139 * packet that are after the checksum being offloaded are not considered to
97 * be verified. 140 * be verified.
98 * 141 *
99 * B. Checksumming on output. 142 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
100 * 143 * in the skb->ip_summed for a packet. Values are:
101 * CHECKSUM_NONE:
102 *
103 * The skb was already checksummed by the protocol, or a checksum is not
104 * required.
105 * 144 *
106 * CHECKSUM_PARTIAL: 145 * CHECKSUM_PARTIAL:
107 * 146 *
108 * The device is required to checksum the packet as seen by hard_start_xmit() 147 * The driver is required to checksum the packet as seen by hard_start_xmit()
109 * from skb->csum_start up to the end, and to record/write the checksum at 148 * from skb->csum_start up to the end, and to record/write the checksum at
110 * offset skb->csum_start + skb->csum_offset. 149 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
150 * csum_start and csum_offset values are valid values given the length and
151 * offset of the packet, however they should not attempt to validate that the
152 * checksum refers to a legitimate transport layer checksum-- it is the
153 * purview of the stack to validate that csum_start and csum_offset are set
154 * correctly.
155 *
156 * When the stack requests checksum offload for a packet, the driver MUST
157 * ensure that the checksum is set correctly. A driver can either offload the
158 * checksum calculation to the device, or call skb_checksum_help (in the case
159 * that the device does not support offload for a particular checksum).
160 *
161 * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
162 * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
163 * checksum offload capability. If a device has limited checksum capabilities
164 * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
165 * described above) a helper function can be called to resolve
166 * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
167 * function takes a spec argument that describes the protocol layer that is
168 * supported for checksum offload and can be called for each packet. If a
169 * packet does not match the specification for offload, skb_checksum_help
170 * is called to resolve the checksum.
111 * 171 *
112 * The device must show its capabilities in dev->features, set up at device 172 * CHECKSUM_NONE:
113 * setup time, e.g. netdev_features.h:
114 * 173 *
115 * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything. 174 * The skb was already checksummed by the protocol, or a checksum is not
116 * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over 175 * required.
117 * IPv4. Sigh. Vendors like this way for an unknown reason.
118 * Though, see comment above about CHECKSUM_UNNECESSARY. 8)
119 * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
120 * NETIF_F_... - Well, you get the picture.
121 * 176 *
122 * CHECKSUM_UNNECESSARY: 177 * CHECKSUM_UNNECESSARY:
123 * 178 *
124 * Normally, the device will do per protocol specific checksumming. Protocol 179 * This has the same meaning on as CHECKSUM_NONE for checksum offload on
125 * implementations that do not want the NIC to perform the checksum 180 * output.
126 * calculation should use this flag in their outgoing skbs.
127 *
128 * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
129 * offload. Correspondingly, the FCoE protocol driver
130 * stack should use CHECKSUM_UNNECESSARY.
131 * 181 *
132 * Any questions? No questions, good. --ANK 182 * CHECKSUM_COMPLETE:
183 * Not used in checksum output. If a driver observes a packet with this value
184 * set in skbuff, if should treat as CHECKSUM_NONE being set.
185 *
186 * D. Non-IP checksum (CRC) offloads
187 *
188 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
189 * offloading the SCTP CRC in a packet. To perform this offload the stack
190 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
191 * accordingly. Note the there is no indication in the skbuff that the
192 * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
193 * both IP checksum offload and SCTP CRC offload must verify which offload
194 * is configured for a packet presumably by inspecting packet headers.
195 *
196 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
197 * offloading the FCOE CRC in a packet. To perform this offload the stack
198 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
199 * accordingly. Note the there is no indication in the skbuff that the
200 * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
201 * both IP checksum offload and FCOE CRC offload must verify which offload
202 * is configured for a packet presumably by inspecting packet headers.
203 *
204 * E. Checksumming on output with GSO.
205 *
206 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
207 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
208 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
209 * part of the GSO operation is implied. If a checksum is being offloaded
210 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
211 * are set to refer to the outermost checksum being offload (two offloaded
212 * checksums are possible with UDP encapsulation).
133 */ 213 */
134 214
135/* Don't change this without changing skb_csum_unnecessary! */ 215/* Don't change this without changing skb_csum_unnecessary! */
@@ -833,7 +913,7 @@ struct sk_buff_fclones {
833 * skb_fclone_busy - check if fclone is busy 913 * skb_fclone_busy - check if fclone is busy
834 * @skb: buffer 914 * @skb: buffer
835 * 915 *
836 * Returns true is skb is a fast clone, and its clone is not freed. 916 * Returns true if skb is a fast clone, and its clone is not freed.
837 * Some drivers call skb_orphan() in their ndo_start_xmit(), 917 * Some drivers call skb_orphan() in their ndo_start_xmit(),
838 * so we also check that this didnt happen. 918 * so we also check that this didnt happen.
839 */ 919 */
@@ -1082,9 +1162,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1082 1162
1083static inline void skb_sender_cpu_clear(struct sk_buff *skb) 1163static inline void skb_sender_cpu_clear(struct sk_buff *skb)
1084{ 1164{
1085#ifdef CONFIG_XPS
1086 skb->sender_cpu = 0;
1087#endif
1088} 1165}
1089 1166
1090#ifdef NET_SKBUFF_DATA_USES_OFFSET 1167#ifdef NET_SKBUFF_DATA_USES_OFFSET
@@ -1942,6 +2019,11 @@ static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1942 return skb->head + skb->inner_transport_header; 2019 return skb->head + skb->inner_transport_header;
1943} 2020}
1944 2021
2022static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2023{
2024 return skb_inner_transport_header(skb) - skb->data;
2025}
2026
1945static inline void skb_reset_inner_transport_header(struct sk_buff *skb) 2027static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1946{ 2028{
1947 skb->inner_transport_header = skb->data - skb->head; 2029 skb->inner_transport_header = skb->data - skb->head;
@@ -2723,6 +2805,23 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
2723 2805
2724unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2806unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2725 2807
2808static inline void skb_postpush_rcsum(struct sk_buff *skb,
2809 const void *start, unsigned int len)
2810{
2811 /* For performing the reverse operation to skb_postpull_rcsum(),
2812 * we can instead of ...
2813 *
2814 * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
2815 *
2816 * ... just use this equivalent version here to save a few
2817 * instructions. Feeding csum of 0 in csum_partial() and later
2818 * on adding skb->csum is equivalent to feed skb->csum in the
2819 * first place.
2820 */
2821 if (skb->ip_summed == CHECKSUM_COMPLETE)
2822 skb->csum = csum_partial(start, len, skb->csum);
2823}
2824
2726/** 2825/**
2727 * pskb_trim_rcsum - trim received skb and update checksum 2826 * pskb_trim_rcsum - trim received skb and update checksum
2728 * @skb: buffer to trim 2827 * @skb: buffer to trim
@@ -2788,6 +2887,12 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
2788#define skb_walk_frags(skb, iter) \ 2887#define skb_walk_frags(skb, iter) \
2789 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2888 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2790 2889
2890
2891int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
2892 const struct sk_buff *skb);
2893struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
2894 int *peeked, int *off, int *err,
2895 struct sk_buff **last);
2791struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2896struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2792 int *peeked, int *off, int *err); 2897 int *peeked, int *off, int *err);
2793struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, 2898struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index dad035c16d94..343c13ac4f71 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -144,17 +144,17 @@ struct knav_dma_cfg {
144 * @psdata: Protocol specific 144 * @psdata: Protocol specific
145 */ 145 */
146struct knav_dma_desc { 146struct knav_dma_desc {
147 u32 desc_info; 147 __le32 desc_info;
148 u32 tag_info; 148 __le32 tag_info;
149 u32 packet_info; 149 __le32 packet_info;
150 u32 buff_len; 150 __le32 buff_len;
151 u32 buff; 151 __le32 buff;
152 u32 next_desc; 152 __le32 next_desc;
153 u32 orig_len; 153 __le32 orig_len;
154 u32 orig_buff; 154 __le32 orig_buff;
155 u32 epib[KNAV_DMA_NUM_EPIB_WORDS]; 155 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156 u32 psdata[KNAV_DMA_NUM_PS_WORDS]; 156 __le32 psdata[KNAV_DMA_NUM_PS_WORDS];
157 u32 pad[4]; 157 __le32 pad[4];
158} ____cacheline_aligned; 158} ____cacheline_aligned;
159 159
160#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) 160#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index fddebc617469..4018b48f2b3b 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -15,6 +15,7 @@ struct sock_diag_handler {
15 __u8 family; 15 __u8 family;
16 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); 16 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
17 int (*get_info)(struct sk_buff *skb, struct sock *sk); 17 int (*get_info)(struct sk_buff *skb, struct sock *sk);
18 int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
18}; 19};
19 20
20int sock_diag_register(const struct sock_diag_handler *h); 21int sock_diag_register(const struct sock_diag_handler *h);
@@ -68,4 +69,5 @@ bool sock_diag_has_destroy_listeners(const struct sock *sk)
68} 69}
69void sock_diag_broadcast_destroy(struct sock *sk); 70void sock_diag_broadcast_destroy(struct sock *sk);
70 71
72int sock_diag_destroy(struct sock *sk, int err);
71#endif 73#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index c3d1a525bacc..26a0b3c3ce5f 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -524,13 +524,9 @@ struct ssb_init_invariants {
524typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, 524typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus,
525 struct ssb_init_invariants *iv); 525 struct ssb_init_invariants *iv);
526 526
527/* Register a SSB system bus. get_invariants() is called after the 527/* Register SoC bus. */
528 * basic system devices are initialized. 528extern int ssb_bus_host_soc_register(struct ssb_bus *bus,
529 * The invariants are usually fetched from some NVRAM. 529 unsigned long baseaddr);
530 * Put the invariants into the struct pointed to by iv. */
531extern int ssb_bus_ssbbus_register(struct ssb_bus *bus,
532 unsigned long baseaddr,
533 ssb_invariants_func_t get_invariants);
534#ifdef CONFIG_SSB_PCIHOST 530#ifdef CONFIG_SSB_PCIHOST
535extern int ssb_bus_pcibus_register(struct ssb_bus *bus, 531extern int ssb_bus_pcibus_register(struct ssb_bus *bus,
536 struct pci_dev *host_pci); 532 struct pci_dev *host_pci);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d2f4ec7dba7c..ae71a769b89e 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -137,6 +137,27 @@ static inline int waitqueue_active(wait_queue_head_t *q)
137 return !list_empty(&q->task_list); 137 return !list_empty(&q->task_list);
138} 138}
139 139
140/**
141 * wq_has_sleeper - check if there are any waiting processes
142 * @wq: wait queue head
143 *
144 * Returns true if wq has waiting processes
145 *
146 * Please refer to the comment for waitqueue_active.
147 */
148static inline bool wq_has_sleeper(wait_queue_head_t *wq)
149{
150 /*
151 * We need to be sure we are in sync with the
152 * add_wait_queue modifications to the wait queue.
153 *
154 * This memory barrier should be paired with one on the
155 * waiting side.
156 */
157 smp_mb();
158 return waitqueue_active(wq);
159}
160
140extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 161extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
141extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); 162extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
142extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 163extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);