aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cputime.h65
-rw-r--r--include/asm-generic/gpio.h6
-rw-r--r--include/asm-generic/socket.h3
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/amba/bus.h7
-rw-r--r--include/linux/amba/pl022.h4
-rw-r--r--include/linux/atmdev.h10
-rw-r--r--include/linux/bcma/bcma.h55
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/bitops.h10
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/can/platform/cc770.h33
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/cpu.h19
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/debugfs.h26
-rw-r--r--include/linux/debugobjects.h6
-rw-r--r--include/linux/device.h108
-rw-r--r--include/linux/dynamic_queue_limits.h97
-rw-r--r--include/linux/edac.h8
-rw-r--r--include/linux/eeprom_93cx6.h8
-rw-r--r--include/linux/errqueue.h7
-rw-r--r--include/linux/ethtool.h116
-rw-r--r--include/linux/genetlink.h24
-rw-r--r--include/linux/hardirq.h21
-rw-r--r--include/linux/i2c.h13
-rw-r--r--include/linux/ieee80211.h32
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/if_team.h242
-rw-r--r--include/linux/if_vlan.h80
-rw-r--r--include/linux/inet_diag.h43
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--include/linux/jump_label.h27
-rw-r--r--include/linux/kernel_stat.h36
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kref.h77
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/lockd/lockd.h6
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/mdio-bitbang.h2
-rw-r--r--include/linux/mdio-gpio.h2
-rw-r--r--include/linux/memblock.h170
-rw-r--r--include/linux/memcontrol.h23
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/mii.h200
-rw-r--r--include/linux/mlx4/cmd.h51
-rw-r--r--include/linux/mlx4/device.h80
-rw-r--r--include/linux/mlx4/qp.h28
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/mod_devicetable.h18
-rw-r--r--include/linux/neighbour.h1
-rw-r--r--include/linux/netdev_features.h146
-rw-r--r--include/linux/netdevice.h343
-rw-r--r--include/linux/netfilter.h26
-rw-r--r--include/linux/netfilter/Kbuild4
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_tuple_common.h27
-rw-r--r--include/linux/netfilter/nf_nat.h25
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h36
-rw-r--r--include/linux/netfilter/xt_CT.h3
-rw-r--r--include/linux/netfilter/xt_ecn.h35
-rw-r--r--include/linux/netfilter/xt_nfacct.h13
-rw-r--r--include/linux/netfilter/xt_rpfilter.h23
-rw-r--r--include/linux/netfilter_ipv4/Kbuild1
-rw-r--r--include/linux/netfilter_ipv4/ipt_ecn.h38
-rw-r--r--include/linux/netfilter_ipv4/nf_nat.h58
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfc.h31
-rw-r--r--include/linux/nl80211.h171
-rw-r--r--include/linux/node.h6
-rw-r--r--include/linux/of.h44
-rw-r--r--include/linux/of_fdt.h4
-rw-r--r--include/linux/of_gpio.h10
-rw-r--r--include/linux/openvswitch.h452
-rw-r--r--include/linux/perf_event.h8
-rw-r--r--include/linux/phonet.h2
-rw-r--r--include/linux/pkt_sched.h32
-rw-r--r--include/linux/platform_device.h14
-rw-r--r--include/linux/poison.h6
-rw-r--r--include/linux/pstore.h12
-rw-r--r--include/linux/rcupdate.h115
-rw-r--r--include/linux/sched.h31
-rw-r--r--include/linux/skbuff.h86
-rw-r--r--include/linux/smscphy.h25
-rw-r--r--include/linux/sock_diag.h48
-rw-r--r--include/linux/spi/spi.h11
-rw-r--r--include/linux/srcu.h87
-rw-r--r--include/linux/ssb/ssb.h9
-rw-r--r--include/linux/ssb/ssb_regs.h17
-rw-r--r--include/linux/sunrpc/clnt.h8
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/linux/tick.h11
-rw-r--r--include/linux/unix_diag.h54
-rw-r--r--include/linux/usb.h12
-rw-r--r--include/linux/virtio_config.h14
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/wait.h4
-rw-r--r--include/linux/wl12xx.h5
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/net/arp.h2
-rw-r--r--include/net/atmclip.h7
-rw-r--r--include/net/bluetooth/bluetooth.h56
-rw-r--r--include/net/bluetooth/hci.h83
-rw-r--r--include/net/bluetooth/hci_core.h366
-rw-r--r--include/net/bluetooth/l2cap.h457
-rw-r--r--include/net/bluetooth/mgmt.h251
-rw-r--r--include/net/bluetooth/smp.h6
-rw-r--r--include/net/caif/caif_dev.h21
-rw-r--r--include/net/caif/caif_layer.h4
-rw-r--r--include/net/caif/caif_spi.h4
-rw-r--r--include/net/caif/cfcnfg.h23
-rw-r--r--include/net/caif/cfserl.h4
-rw-r--r--include/net/cfg80211.h269
-rw-r--r--include/net/dsa.h144
-rw-r--r--include/net/dst.h6
-rw-r--r--include/net/flow.h5
-rw-r--r--include/net/flow_keys.h16
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/icmp.h4
-rw-r--r--include/net/ieee80211_radiotap.h8
-rw-r--r--include/net/ieee802154.h6
-rw-r--r--include/net/inet6_hashtables.h4
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/inet_timewait_sock.h12
-rw-r--r--include/net/inetpeer.h2
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip6_fib.h7
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_vs.h8
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/mac80211.h62
-rw-r--r--include/net/ndisc.h45
-rw-r--r--include/net/neighbour.h17
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h4
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h1
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h1
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_core.h2
-rw-r--r--include/net/netfilter/nf_nat_protocol.h17
-rw-r--r--include/net/netfilter/nf_tproxy_core.h2
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/mib.h6
-rw-r--r--include/net/netns/xfrm.h2
-rw-r--r--include/net/netprio_cgroup.h57
-rw-r--r--include/net/nfc/nci.h178
-rw-r--r--include/net/nfc/nci_core.h13
-rw-r--r--include/net/nfc/nfc.h24
-rw-r--r--include/net/protocol.h12
-rw-r--r--include/net/red.h187
-rw-r--r--include/net/regulatory.h6
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/snmp.h4
-rw-r--r--include/net/sock.h277
-rw-r--r--include/net/tcp.h25
-rw-r--r--include/net/tcp_memcontrol.h19
-rw-r--r--include/net/udp.h13
-rw-r--r--include/net/xfrm.h12
-rw-r--r--include/trace/events/rcu.h122
-rw-r--r--include/trace/events/sched.h57
-rw-r--r--include/xen/balloon.h6
169 files changed, 5486 insertions, 1510 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 12a1764f612b..9a62937c56ca 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,71 +4,66 @@
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/jiffies.h> 5#include <linux/jiffies.h>
6 6
7typedef unsigned long cputime_t; 7typedef unsigned long __nocast cputime_t;
8 8
9#define cputime_zero (0UL)
10#define cputime_one_jiffy jiffies_to_cputime(1) 9#define cputime_one_jiffy jiffies_to_cputime(1)
11#define cputime_max ((~0UL >> 1) - 1) 10#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
12#define cputime_add(__a, __b) ((__a) + (__b))
13#define cputime_sub(__a, __b) ((__a) - (__b))
14#define cputime_div(__a, __n) ((__a) / (__n))
15#define cputime_halve(__a) ((__a) >> 1)
16#define cputime_eq(__a, __b) ((__a) == (__b))
17#define cputime_gt(__a, __b) ((__a) > (__b))
18#define cputime_ge(__a, __b) ((__a) >= (__b))
19#define cputime_lt(__a, __b) ((__a) < (__b))
20#define cputime_le(__a, __b) ((__a) <= (__b))
21#define cputime_to_jiffies(__ct) (__ct)
22#define cputime_to_scaled(__ct) (__ct) 11#define cputime_to_scaled(__ct) (__ct)
23#define jiffies_to_cputime(__hz) (__hz) 12#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
24 13
25typedef u64 cputime64_t; 14typedef u64 __nocast cputime64_t;
26 15
27#define cputime64_zero (0ULL) 16#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
28#define cputime64_add(__a, __b) ((__a) + (__b)) 17#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
29#define cputime64_sub(__a, __b) ((__a) - (__b))
30#define cputime64_to_jiffies64(__ct) (__ct)
31#define jiffies64_to_cputime64(__jif) (__jif)
32#define cputime_to_cputime64(__ct) ((u64) __ct)
33#define cputime64_gt(__a, __b) ((__a) > (__b))
34 18
35#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) 19#define nsecs_to_cputime64(__ct) \
20 jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
36 21
37 22
38/* 23/*
39 * Convert cputime to microseconds and back. 24 * Convert cputime to microseconds and back.
40 */ 25 */
41#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) 26#define cputime_to_usecs(__ct) \
42#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) 27 jiffies_to_usecs(cputime_to_jiffies(__ct))
43#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000) 28#define usecs_to_cputime(__usec) \
29 jiffies_to_cputime(usecs_to_jiffies(__usec))
30#define usecs_to_cputime64(__usec) \
31 jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
44 32
45/* 33/*
46 * Convert cputime to seconds and back. 34 * Convert cputime to seconds and back.
47 */ 35 */
48#define cputime_to_secs(jif) ((jif) / HZ) 36#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
49#define secs_to_cputime(sec) ((sec) * HZ) 37#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
50 38
51/* 39/*
52 * Convert cputime to timespec and back. 40 * Convert cputime to timespec and back.
53 */ 41 */
54#define timespec_to_cputime(__val) timespec_to_jiffies(__val) 42#define timespec_to_cputime(__val) \
55#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) 43 jiffies_to_cputime(timespec_to_jiffies(__val))
44#define cputime_to_timespec(__ct,__val) \
45 jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
56 46
57/* 47/*
58 * Convert cputime to timeval and back. 48 * Convert cputime to timeval and back.
59 */ 49 */
60#define timeval_to_cputime(__val) timeval_to_jiffies(__val) 50#define timeval_to_cputime(__val) \
61#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) 51 jiffies_to_cputime(timeval_to_jiffies(__val))
52#define cputime_to_timeval(__ct,__val) \
53 jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
62 54
63/* 55/*
64 * Convert cputime to clock and back. 56 * Convert cputime to clock and back.
65 */ 57 */
66#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) 58#define cputime_to_clock_t(__ct) \
67#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) 59 jiffies_to_clock_t(cputime_to_jiffies(__ct))
60#define clock_t_to_cputime(__x) \
61 jiffies_to_cputime(clock_t_to_jiffies(__x))
68 62
69/* 63/*
70 * Convert cputime64 to clock. 64 * Convert cputime64 to clock.
71 */ 65 */
72#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) 66#define cputime64_to_clock_t(__ct) \
67 jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
73 68
74#endif 69#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 8c8621097fa0..d466c8d8826d 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -4,6 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/of.h>
7 8
8#ifdef CONFIG_GPIOLIB 9#ifdef CONFIG_GPIOLIB
9 10
@@ -128,13 +129,14 @@ struct gpio_chip {
128 */ 129 */
129 struct device_node *of_node; 130 struct device_node *of_node;
130 int of_gpio_n_cells; 131 int of_gpio_n_cells;
131 int (*of_xlate)(struct gpio_chip *gc, struct device_node *np, 132 int (*of_xlate)(struct gpio_chip *gc,
132 const void *gpio_spec, u32 *flags); 133 const struct of_phandle_args *gpiospec, u32 *flags);
133#endif 134#endif
134}; 135};
135 136
136extern const char *gpiochip_is_requested(struct gpio_chip *chip, 137extern const char *gpiochip_is_requested(struct gpio_chip *chip,
137 unsigned offset); 138 unsigned offset);
139extern struct gpio_chip *gpio_to_chip(unsigned gpio);
138extern int __must_check gpiochip_reserve(int start, int ngpio); 140extern int __must_check gpiochip_reserve(int start, int ngpio);
139 141
140/* add/remove chips */ 142/* add/remove chips */
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
index 9a6115e7cf63..49c1704173e7 100644
--- a/include/asm-generic/socket.h
+++ b/include/asm-generic/socket.h
@@ -64,4 +64,7 @@
64#define SO_DOMAIN 39 64#define SO_DOMAIN 39
65 65
66#define SO_RXQ_OVFL 40 66#define SO_RXQ_OVFL 40
67
68#define SO_WIFI_STATUS 41
69#define SCM_WIFI_STATUS SO_WIFI_STATUS
67#endif /* __ASM_GENERIC_SOCKET_H */ 70#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 619b5657af77..c94e71781b79 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -185,6 +185,7 @@ header-y += if_pppol2tp.h
185header-y += if_pppox.h 185header-y += if_pppox.h
186header-y += if_slip.h 186header-y += if_slip.h
187header-y += if_strip.h 187header-y += if_strip.h
188header-y += if_team.h
188header-y += if_tr.h 189header-y += if_tr.h
189header-y += if_tun.h 190header-y += if_tun.h
190header-y += if_tunnel.h 191header-y += if_tunnel.h
@@ -194,7 +195,9 @@ header-y += igmp.h
194header-y += in.h 195header-y += in.h
195header-y += in6.h 196header-y += in6.h
196header-y += in_route.h 197header-y += in_route.h
198header-y += sock_diag.h
197header-y += inet_diag.h 199header-y += inet_diag.h
200header-y += unix_diag.h
198header-y += inotify.h 201header-y += inotify.h
199header-y += input.h 202header-y += input.h
200header-y += ioctl.h 203header-y += ioctl.h
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index fcbbe71a3cc1..724c69c40bb8 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/mod_devicetable.h>
19#include <linux/err.h> 20#include <linux/err.h>
20#include <linux/resource.h> 21#include <linux/resource.h>
21#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
@@ -35,12 +36,6 @@ struct amba_device {
35 unsigned int irq[AMBA_NR_IRQS]; 36 unsigned int irq[AMBA_NR_IRQS];
36}; 37};
37 38
38struct amba_id {
39 unsigned int id;
40 unsigned int mask;
41 void *data;
42};
43
44struct amba_driver { 39struct amba_driver {
45 struct device_driver drv; 40 struct device_driver drv;
46 int (*probe)(struct amba_device *, const struct amba_id *); 41 int (*probe)(struct amba_device *, const struct amba_id *);
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 4ce98f54186b..572f637299c9 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -238,6 +238,9 @@ struct dma_chan;
238 * @enable_dma: if true enables DMA driven transfers. 238 * @enable_dma: if true enables DMA driven transfers.
239 * @dma_rx_param: parameter to locate an RX DMA channel. 239 * @dma_rx_param: parameter to locate an RX DMA channel.
240 * @dma_tx_param: parameter to locate a TX DMA channel. 240 * @dma_tx_param: parameter to locate a TX DMA channel.
241 * @autosuspend_delay: delay in ms following transfer completion before the
242 * runtime power management system suspends the device. A setting of 0
243 * indicates no delay and the device will be suspended immediately.
241 */ 244 */
242struct pl022_ssp_controller { 245struct pl022_ssp_controller {
243 u16 bus_id; 246 u16 bus_id;
@@ -246,6 +249,7 @@ struct pl022_ssp_controller {
246 bool (*dma_filter)(struct dma_chan *chan, void *filter_param); 249 bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
247 void *dma_rx_param; 250 void *dma_rx_param;
248 void *dma_tx_param; 251 void *dma_tx_param;
252 int autosuspend_delay;
249}; 253};
250 254
251/** 255/**
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 49a83ca900ba..f4ff882cb2da 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -445,16 +445,6 @@ void vcc_insert_socket(struct sock *sk);
445 445
446void atm_dev_release_vccs(struct atm_dev *dev); 446void atm_dev_release_vccs(struct atm_dev *dev);
447 447
448/*
449 * This is approximately the algorithm used by alloc_skb.
450 *
451 */
452
453static inline int atm_guess_pdu2truesize(int size)
454{
455 return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info);
456}
457
458 448
459static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) 449static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
460{ 450{
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 4d4b59de9467..f4b8346b1a33 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,61 +205,82 @@ struct bcma_bus {
205 struct ssb_sprom sprom; 205 struct ssb_sprom sprom;
206}; 206};
207 207
208extern inline u32 bcma_read8(struct bcma_device *core, u16 offset) 208static inline u32 bcma_read8(struct bcma_device *core, u16 offset)
209{ 209{
210 return core->bus->ops->read8(core, offset); 210 return core->bus->ops->read8(core, offset);
211} 211}
212extern inline u32 bcma_read16(struct bcma_device *core, u16 offset) 212static inline u32 bcma_read16(struct bcma_device *core, u16 offset)
213{ 213{
214 return core->bus->ops->read16(core, offset); 214 return core->bus->ops->read16(core, offset);
215} 215}
216extern inline u32 bcma_read32(struct bcma_device *core, u16 offset) 216static inline u32 bcma_read32(struct bcma_device *core, u16 offset)
217{ 217{
218 return core->bus->ops->read32(core, offset); 218 return core->bus->ops->read32(core, offset);
219} 219}
220extern inline 220static inline
221void bcma_write8(struct bcma_device *core, u16 offset, u32 value) 221void bcma_write8(struct bcma_device *core, u16 offset, u32 value)
222{ 222{
223 core->bus->ops->write8(core, offset, value); 223 core->bus->ops->write8(core, offset, value);
224} 224}
225extern inline 225static inline
226void bcma_write16(struct bcma_device *core, u16 offset, u32 value) 226void bcma_write16(struct bcma_device *core, u16 offset, u32 value)
227{ 227{
228 core->bus->ops->write16(core, offset, value); 228 core->bus->ops->write16(core, offset, value);
229} 229}
230extern inline 230static inline
231void bcma_write32(struct bcma_device *core, u16 offset, u32 value) 231void bcma_write32(struct bcma_device *core, u16 offset, u32 value)
232{ 232{
233 core->bus->ops->write32(core, offset, value); 233 core->bus->ops->write32(core, offset, value);
234} 234}
235#ifdef CONFIG_BCMA_BLOCKIO 235#ifdef CONFIG_BCMA_BLOCKIO
236extern inline void bcma_block_read(struct bcma_device *core, void *buffer, 236static inline void bcma_block_read(struct bcma_device *core, void *buffer,
237 size_t count, u16 offset, u8 reg_width) 237 size_t count, u16 offset, u8 reg_width)
238{ 238{
239 core->bus->ops->block_read(core, buffer, count, offset, reg_width); 239 core->bus->ops->block_read(core, buffer, count, offset, reg_width);
240} 240}
241extern inline void bcma_block_write(struct bcma_device *core, const void *buffer, 241static inline void bcma_block_write(struct bcma_device *core,
242 size_t count, u16 offset, u8 reg_width) 242 const void *buffer, size_t count,
243 u16 offset, u8 reg_width)
243{ 244{
244 core->bus->ops->block_write(core, buffer, count, offset, reg_width); 245 core->bus->ops->block_write(core, buffer, count, offset, reg_width);
245} 246}
246#endif 247#endif
247extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset) 248static inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
248{ 249{
249 return core->bus->ops->aread32(core, offset); 250 return core->bus->ops->aread32(core, offset);
250} 251}
251extern inline 252static inline
252void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) 253void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value)
253{ 254{
254 core->bus->ops->awrite32(core, offset, value); 255 core->bus->ops->awrite32(core, offset, value);
255} 256}
256 257
257#define bcma_mask32(cc, offset, mask) \ 258static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask)
258 bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask)) 259{
259#define bcma_set32(cc, offset, set) \ 260 bcma_write32(cc, offset, bcma_read32(cc, offset) & mask);
260 bcma_write32(cc, offset, bcma_read32(cc, offset) | (set)) 261}
261#define bcma_maskset32(cc, offset, mask, set) \ 262static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set)
262 bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set)) 263{
264 bcma_write32(cc, offset, bcma_read32(cc, offset) | set);
265}
266static inline void bcma_maskset32(struct bcma_device *cc,
267 u16 offset, u32 mask, u32 set)
268{
269 bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set);
270}
271static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask)
272{
273 bcma_write16(cc, offset, bcma_read16(cc, offset) & mask);
274}
275static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set)
276{
277 bcma_write16(cc, offset, bcma_read16(cc, offset) | set);
278}
279static inline void bcma_maskset16(struct bcma_device *cc,
280 u16 offset, u16 mask, u16 set)
281{
282 bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
283}
263 284
264extern bool bcma_core_is_enabled(struct bcma_device *core); 285extern bool bcma_core_is_enabled(struct bcma_device *core);
265extern void bcma_core_disable(struct bcma_device *core, u32 flags); 286extern void bcma_core_disable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 1526d965ed06..a33086a7530b 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -203,6 +203,7 @@
203#define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ 203#define BCMA_CC_PMU_CTL 0x0600 /* PMU control */
204#define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ 204#define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
205#define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 205#define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16
206#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400
206#define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ 207#define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
207#define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ 208#define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
208#define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ 209#define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a2a083..3c1063acb2ab 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w);
22#include <asm/bitops.h> 22#include <asm/bitops.h>
23 23
24#define for_each_set_bit(bit, addr, size) \ 24#define for_each_set_bit(bit, addr, size) \
25 for ((bit) = find_first_bit((addr), (size)); \ 25 for ((bit) = find_first_bit((addr), (size)); \
26 (bit) < (size); \ 26 (bit) < (size); \
27 (bit) = find_next_bit((addr), (size), (bit) + 1))
28
29/* same as for_each_set_bit() but use bit as value to start with */
30#define for_each_set_bit_cont(bit, addr, size) \
31 for ((bit) = find_next_bit((addr), (size), (bit)); \
32 (bit) < (size); \
27 (bit) = find_next_bit((addr), (size), (bit) + 1)) 33 (bit) = find_next_bit((addr), (size), (bit) + 1))
28 34
29static __inline__ int get_bitmask_order(unsigned int count) 35static __inline__ int get_bitmask_order(unsigned int count)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index ab344a521105..66d3e954eb6c 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
44 unsigned long endpfn); 44 unsigned long endpfn);
45extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 45extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
46 46
47unsigned long free_all_memory_core_early(int nodeid); 47extern unsigned long free_low_memory_core_early(int nodeid);
48extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); 48extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
49extern unsigned long free_all_bootmem(void); 49extern unsigned long free_all_bootmem(void);
50 50
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
new file mode 100644
index 000000000000..7702641f87ee
--- /dev/null
+++ b/include/linux/can/platform/cc770.h
@@ -0,0 +1,33 @@
1#ifndef _CAN_PLATFORM_CC770_H_
2#define _CAN_PLATFORM_CC770_H_
3
4/* CPU Interface Register (0x02) */
5#define CPUIF_CEN 0x01 /* Clock Out Enable */
6#define CPUIF_MUX 0x04 /* Multiplex */
7#define CPUIF_SLP 0x08 /* Sleep */
8#define CPUIF_PWD 0x10 /* Power Down Mode */
9#define CPUIF_DMC 0x20 /* Divide Memory Clock */
10#define CPUIF_DSC 0x40 /* Divide System Clock */
11#define CPUIF_RST 0x80 /* Hardware Reset Status */
12
13/* Clock Out Register (0x1f) */
14#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */
15#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */
16#define CLKOUT_SL_SHIFT 4
17
18/* Bus Configuration Register (0x2f) */
19#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */
20#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */
21#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */
22#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */
23#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */
24
25struct cc770_platform_data {
26 u32 osc_freq; /* CAN bus oscillator frequency in Hz */
27
28 u8 cir; /* CPU Interface Register */
29 u8 cor; /* Clock Out Register */
30 u8 bcr; /* Bus Configuration Register */
31};
32
33#endif /* !_CAN_PLATFORM_CC770_H_ */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index ac663c18776c..0bd390ce98b2 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -59,8 +59,16 @@ SUBSYS(net_cls)
59SUBSYS(blkio) 59SUBSYS(blkio)
60#endif 60#endif
61 61
62/* */
63
62#ifdef CONFIG_CGROUP_PERF 64#ifdef CONFIG_CGROUP_PERF
63SUBSYS(perf) 65SUBSYS(perf)
64#endif 66#endif
65 67
66/* */ 68/* */
69
70#ifdef CONFIG_NETPRIO_CGROUP
71SUBSYS(net_prio)
72#endif
73
74/* */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 6cb60fd2ea84..1f6587590a1a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -14,7 +14,7 @@
14#ifndef _LINUX_CPU_H_ 14#ifndef _LINUX_CPU_H_
15#define _LINUX_CPU_H_ 15#define _LINUX_CPU_H_
16 16
17#include <linux/sysdev.h> 17#include <linux/device.h>
18#include <linux/node.h> 18#include <linux/node.h>
19#include <linux/compiler.h> 19#include <linux/compiler.h>
20#include <linux/cpumask.h> 20#include <linux/cpumask.h>
@@ -22,19 +22,20 @@
22struct cpu { 22struct cpu {
23 int node_id; /* The node which contains the CPU */ 23 int node_id; /* The node which contains the CPU */
24 int hotpluggable; /* creates sysfs control file if hotpluggable */ 24 int hotpluggable; /* creates sysfs control file if hotpluggable */
25 struct sys_device sysdev; 25 struct device dev;
26}; 26};
27 27
28extern int register_cpu(struct cpu *cpu, int num); 28extern int register_cpu(struct cpu *cpu, int num);
29extern struct sys_device *get_cpu_sysdev(unsigned cpu); 29extern struct device *get_cpu_device(unsigned cpu);
30extern bool cpu_is_hotpluggable(unsigned cpu);
30 31
31extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); 32extern int cpu_add_dev_attr(struct device_attribute *attr);
32extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); 33extern void cpu_remove_dev_attr(struct device_attribute *attr);
33 34
34extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); 35extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
35extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); 36extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
36 37
37extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); 38extern int sched_create_sysfs_power_savings_entries(struct device *dev);
38 39
39#ifdef CONFIG_HOTPLUG_CPU 40#ifdef CONFIG_HOTPLUG_CPU
40extern void unregister_cpu(struct cpu *cpu); 41extern void unregister_cpu(struct cpu *cpu);
@@ -160,7 +161,7 @@ static inline void cpu_maps_update_done(void)
160} 161}
161 162
162#endif /* CONFIG_SMP */ 163#endif /* CONFIG_SMP */
163extern struct sysdev_class cpu_sysdev_class; 164extern struct bus_type cpu_subsys;
164 165
165#ifdef CONFIG_HOTPLUG_CPU 166#ifdef CONFIG_HOTPLUG_CPU
166/* Stop CPUs going up and down. */ 167/* Stop CPUs going up and down. */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 7408af843b8a..23f81de51829 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -130,7 +130,6 @@ struct cpuidle_driver {
130#ifdef CONFIG_CPU_IDLE 130#ifdef CONFIG_CPU_IDLE
131extern void disable_cpuidle(void); 131extern void disable_cpuidle(void);
132extern int cpuidle_idle_call(void); 132extern int cpuidle_idle_call(void);
133
134extern int cpuidle_register_driver(struct cpuidle_driver *drv); 133extern int cpuidle_register_driver(struct cpuidle_driver *drv);
135struct cpuidle_driver *cpuidle_get_driver(void); 134struct cpuidle_driver *cpuidle_get_driver(void);
136extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); 135extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
@@ -145,7 +144,6 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
145#else 144#else
146static inline void disable_cpuidle(void) { } 145static inline void disable_cpuidle(void) { }
147static inline int cpuidle_idle_call(void) { return -ENODEV; } 146static inline int cpuidle_idle_call(void) { return -ENODEV; }
148
149static inline int cpuidle_register_driver(struct cpuidle_driver *drv) 147static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
150{return -ENODEV; } 148{return -ENODEV; }
151static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } 149static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index d1ac841e8dc7..6169c26fd8c8 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -16,6 +16,7 @@
16#define _DEBUGFS_H_ 16#define _DEBUGFS_H_
17 17
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/seq_file.h>
19 20
20#include <linux/types.h> 21#include <linux/types.h>
21 22
@@ -26,6 +27,17 @@ struct debugfs_blob_wrapper {
26 unsigned long size; 27 unsigned long size;
27}; 28};
28 29
30struct debugfs_reg32 {
31 char *name;
32 unsigned long offset;
33};
34
35struct debugfs_regset32 {
36 struct debugfs_reg32 *regs;
37 int nregs;
38 void __iomem *base;
39};
40
29extern struct dentry *arch_debugfs_dir; 41extern struct dentry *arch_debugfs_dir;
30 42
31#if defined(CONFIG_DEBUG_FS) 43#if defined(CONFIG_DEBUG_FS)
@@ -74,6 +86,13 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
74 struct dentry *parent, 86 struct dentry *parent,
75 struct debugfs_blob_wrapper *blob); 87 struct debugfs_blob_wrapper *blob);
76 88
89struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
90 struct dentry *parent,
91 struct debugfs_regset32 *regset);
92
93int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
94 int nregs, void __iomem *base, char *prefix);
95
77bool debugfs_initialized(void); 96bool debugfs_initialized(void);
78 97
79#else 98#else
@@ -188,6 +207,13 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
188 return ERR_PTR(-ENODEV); 207 return ERR_PTR(-ENODEV);
189} 208}
190 209
210static inline struct dentry *debugfs_create_regset32(const char *name,
211 mode_t mode, struct dentry *parent,
212 struct debugfs_regset32 *regset)
213{
214 return ERR_PTR(-ENODEV);
215}
216
191static inline bool debugfs_initialized(void) 217static inline bool debugfs_initialized(void)
192{ 218{
193 return false; 219 return false;
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 65970b811e22..0e5f5785d9f2 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -46,6 +46,8 @@ struct debug_obj {
46 * fails 46 * fails
47 * @fixup_free: fixup function, which is called when the free check 47 * @fixup_free: fixup function, which is called when the free check
48 * fails 48 * fails
49 * @fixup_assert_init: fixup function, which is called when the assert_init
50 * check fails
49 */ 51 */
50struct debug_obj_descr { 52struct debug_obj_descr {
51 const char *name; 53 const char *name;
@@ -54,6 +56,7 @@ struct debug_obj_descr {
54 int (*fixup_activate) (void *addr, enum debug_obj_state state); 56 int (*fixup_activate) (void *addr, enum debug_obj_state state);
55 int (*fixup_destroy) (void *addr, enum debug_obj_state state); 57 int (*fixup_destroy) (void *addr, enum debug_obj_state state);
56 int (*fixup_free) (void *addr, enum debug_obj_state state); 58 int (*fixup_free) (void *addr, enum debug_obj_state state);
59 int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
57}; 60};
58 61
59#ifdef CONFIG_DEBUG_OBJECTS 62#ifdef CONFIG_DEBUG_OBJECTS
@@ -64,6 +67,7 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
64extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); 67extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
65extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); 68extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
66extern void debug_object_free (void *addr, struct debug_obj_descr *descr); 69extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
70extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
67 71
68/* 72/*
69 * Active state: 73 * Active state:
@@ -89,6 +93,8 @@ static inline void
89debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } 93debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
90static inline void 94static inline void
91debug_object_free (void *addr, struct debug_obj_descr *descr) { } 95debug_object_free (void *addr, struct debug_obj_descr *descr) { }
96static inline void
97debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
92 98
93static inline void debug_objects_early_init(void) { } 99static inline void debug_objects_early_init(void) { }
94static inline void debug_objects_mem_init(void) { } 100static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/device.h b/include/linux/device.h
index 2fe0005543ed..5b3adb8f9588 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -53,6 +53,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
53 * struct bus_type - The bus type of the device 53 * struct bus_type - The bus type of the device
54 * 54 *
55 * @name: The name of the bus. 55 * @name: The name of the bus.
56 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
57 * @dev_root: Default device to use as the parent.
56 * @bus_attrs: Default attributes of the bus. 58 * @bus_attrs: Default attributes of the bus.
57 * @dev_attrs: Default attributes of the devices on the bus. 59 * @dev_attrs: Default attributes of the devices on the bus.
58 * @drv_attrs: Default attributes of the device drivers on the bus. 60 * @drv_attrs: Default attributes of the device drivers on the bus.
@@ -86,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
86 */ 88 */
87struct bus_type { 89struct bus_type {
88 const char *name; 90 const char *name;
91 const char *dev_name;
92 struct device *dev_root;
89 struct bus_attribute *bus_attrs; 93 struct bus_attribute *bus_attrs;
90 struct device_attribute *dev_attrs; 94 struct device_attribute *dev_attrs;
91 struct driver_attribute *drv_attrs; 95 struct driver_attribute *drv_attrs;
@@ -106,12 +110,30 @@ struct bus_type {
106 struct subsys_private *p; 110 struct subsys_private *p;
107}; 111};
108 112
109extern int __must_check bus_register(struct bus_type *bus); 113/* This is a #define to keep the compiler from merging different
114 * instances of the __key variable */
115#define bus_register(subsys) \
116({ \
117 static struct lock_class_key __key; \
118 __bus_register(subsys, &__key); \
119})
120extern int __must_check __bus_register(struct bus_type *bus,
121 struct lock_class_key *key);
110extern void bus_unregister(struct bus_type *bus); 122extern void bus_unregister(struct bus_type *bus);
111 123
112extern int __must_check bus_rescan_devices(struct bus_type *bus); 124extern int __must_check bus_rescan_devices(struct bus_type *bus);
113 125
114/* iterator helpers for buses */ 126/* iterator helpers for buses */
127struct subsys_dev_iter {
128 struct klist_iter ki;
129 const struct device_type *type;
130};
131void subsys_dev_iter_init(struct subsys_dev_iter *iter,
132 struct bus_type *subsys,
133 struct device *start,
134 const struct device_type *type);
135struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
136void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
115 137
116int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, 138int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
117 int (*fn)(struct device *dev, void *data)); 139 int (*fn)(struct device *dev, void *data));
@@ -121,10 +143,10 @@ struct device *bus_find_device(struct bus_type *bus, struct device *start,
121struct device *bus_find_device_by_name(struct bus_type *bus, 143struct device *bus_find_device_by_name(struct bus_type *bus,
122 struct device *start, 144 struct device *start,
123 const char *name); 145 const char *name);
124 146struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
147 struct device *hint);
125int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, 148int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
126 void *data, int (*fn)(struct device_driver *, void *)); 149 void *data, int (*fn)(struct device_driver *, void *));
127
128void bus_sort_breadthfirst(struct bus_type *bus, 150void bus_sort_breadthfirst(struct bus_type *bus,
129 int (*compare)(const struct device *a, 151 int (*compare)(const struct device *a,
130 const struct device *b)); 152 const struct device *b));
@@ -256,6 +278,33 @@ struct device *driver_find_device(struct device_driver *drv,
256 int (*match)(struct device *dev, void *data)); 278 int (*match)(struct device *dev, void *data));
257 279
258/** 280/**
281 * struct subsys_interface - interfaces to device functions
282 * @name name of the device function
283 * @subsystem subsytem of the devices to attach to
284 * @node the list of functions registered at the subsystem
285 * @add device hookup to device function handler
286 * @remove device hookup to device function handler
287 *
288 * Simple interfaces attached to a subsystem. Multiple interfaces can
289 * attach to a subsystem and its devices. Unlike drivers, they do not
290 * exclusively claim or control devices. Interfaces usually represent
291 * a specific functionality of a subsystem/class of devices.
292 */
293struct subsys_interface {
294 const char *name;
295 struct bus_type *subsys;
296 struct list_head node;
297 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
298 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
299};
300
301int subsys_interface_register(struct subsys_interface *sif);
302void subsys_interface_unregister(struct subsys_interface *sif);
303
304int subsys_system_register(struct bus_type *subsys,
305 const struct attribute_group **groups);
306
307/**
259 * struct class - device classes 308 * struct class - device classes
260 * @name: Name of the class. 309 * @name: Name of the class.
261 * @owner: The module owner. 310 * @owner: The module owner.
@@ -438,11 +487,31 @@ struct device_attribute {
438 const char *buf, size_t count); 487 const char *buf, size_t count);
439}; 488};
440 489
441#define DEVICE_ATTR(_name, _mode, _show, _store) \ 490struct dev_ext_attribute {
442struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 491 struct device_attribute attr;
492 void *var;
493};
494
495ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
496 char *buf);
497ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
498 const char *buf, size_t count);
499ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
500 char *buf);
501ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
502 const char *buf, size_t count);
443 503
444extern int __must_check device_create_file(struct device *device, 504#define DEVICE_ATTR(_name, _mode, _show, _store) \
445 const struct device_attribute *entry); 505 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
506#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
507 struct dev_ext_attribute dev_attr_##_name = \
508 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
509#define DEVICE_INT_ATTR(_name, _mode, _var) \
510 struct dev_ext_attribute dev_attr_##_name = \
511 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
512
513extern int device_create_file(struct device *device,
514 const struct device_attribute *entry);
446extern void device_remove_file(struct device *dev, 515extern void device_remove_file(struct device *dev,
447 const struct device_attribute *attr); 516 const struct device_attribute *attr);
448extern int __must_check device_create_bin_file(struct device *dev, 517extern int __must_check device_create_bin_file(struct device *dev,
@@ -490,6 +559,9 @@ extern int devres_release_group(struct device *dev, void *id);
490extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); 559extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
491extern void devm_kfree(struct device *dev, void *p); 560extern void devm_kfree(struct device *dev, void *p);
492 561
562void __iomem *devm_request_and_ioremap(struct device *dev,
563 struct resource *res);
564
493struct device_dma_parameters { 565struct device_dma_parameters {
494 /* 566 /*
495 * a low level driver may set these to teach IOMMU code about 567 * a low level driver may set these to teach IOMMU code about
@@ -600,6 +672,7 @@ struct device {
600 struct device_node *of_node; /* associated device tree node */ 672 struct device_node *of_node; /* associated device tree node */
601 673
602 dev_t devt; /* dev_t, creates the sysfs "dev" */ 674 dev_t devt; /* dev_t, creates the sysfs "dev" */
675 u32 id; /* device instance */
603 676
604 spinlock_t devres_lock; 677 spinlock_t devres_lock;
605 struct list_head devres_head; 678 struct list_head devres_head;
@@ -924,4 +997,25 @@ extern long sysfs_deprecated;
924#define sysfs_deprecated 0 997#define sysfs_deprecated 0
925#endif 998#endif
926 999
1000/**
1001 * module_driver() - Helper macro for drivers that don't do anything
1002 * special in module init/exit. This eliminates a lot of boilerplate.
1003 * Each module may only use this macro once, and calling it replaces
1004 * module_init() and module_exit().
1005 *
1006 * Use this macro to construct bus specific macros for registering
1007 * drivers, and do not use it on its own.
1008 */
1009#define module_driver(__driver, __register, __unregister) \
1010static int __init __driver##_init(void) \
1011{ \
1012 return __register(&(__driver)); \
1013} \
1014module_init(__driver##_init); \
1015static void __exit __driver##_exit(void) \
1016{ \
1017 __unregister(&(__driver)); \
1018} \
1019module_exit(__driver##_exit);
1020
927#endif /* _DEVICE_H_ */ 1021#endif /* _DEVICE_H_ */
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
new file mode 100644
index 000000000000..5621547d631b
--- /dev/null
+++ b/include/linux/dynamic_queue_limits.h
@@ -0,0 +1,97 @@
1/*
2 * Dynamic queue limits (dql) - Definitions
3 *
4 * Copyright (c) 2011, Tom Herbert <therbert@google.com>
5 *
6 * This header file contains the definitions for dynamic queue limits (dql).
7 * dql would be used in conjunction with a producer/consumer type queue
8 * (possibly a HW queue). Such a queue would have these general properties:
9 *
10 * 1) Objects are queued up to some limit specified as number of objects.
11 * 2) Periodically a completion process executes which retires consumed
12 * objects.
13 * 3) Starvation occurs when limit has been reached, all queued data has
14 * actually been consumed, but completion processing has not yet run
15 * so queuing new data is blocked.
16 * 4) Minimizing the amount of queued data is desirable.
17 *
18 * The goal of dql is to calculate the limit as the minimum number of objects
19 * needed to prevent starvation.
20 *
21 * The primary functions of dql are:
22 * dql_queued - called when objects are enqueued to record number of objects
23 * dql_avail - returns how many objects are available to be queued based
24 * on the object limit and how many objects are already enqueued
25 * dql_completed - called at completion time to indicate how many objects
26 * were retired from the queue
27 *
28 * The dql implementation does not implement any locking for the dql data
29 * structures, the higher layer should provide this. dql_queued should
30 * be serialized to prevent concurrent execution of the function; this
31 * is also true for dql_completed. However, dql_queued and dlq_completed can
32 * be executed concurrently (i.e. they can be protected by different locks).
33 */
34
35#ifndef _LINUX_DQL_H
36#define _LINUX_DQL_H
37
38#ifdef __KERNEL__
39
40struct dql {
41 /* Fields accessed in enqueue path (dql_queued) */
42 unsigned int num_queued; /* Total ever queued */
43 unsigned int adj_limit; /* limit + num_completed */
44 unsigned int last_obj_cnt; /* Count at last queuing */
45
46 /* Fields accessed only by completion path (dql_completed) */
47
48 unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
49 unsigned int num_completed; /* Total ever completed */
50
51 unsigned int prev_ovlimit; /* Previous over limit */
52 unsigned int prev_num_queued; /* Previous queue total */
53 unsigned int prev_last_obj_cnt; /* Previous queuing cnt */
54
55 unsigned int lowest_slack; /* Lowest slack found */
56 unsigned long slack_start_time; /* Time slacks seen */
57
58 /* Configuration */
59 unsigned int max_limit; /* Max limit */
60 unsigned int min_limit; /* Minimum limit */
61 unsigned int slack_hold_time; /* Time to measure slack */
62};
63
64/* Set some static maximums */
65#define DQL_MAX_OBJECT (UINT_MAX / 16)
66#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
67
68/*
69 * Record number of objects queued. Assumes that caller has already checked
70 * availability in the queue with dql_avail.
71 */
72static inline void dql_queued(struct dql *dql, unsigned int count)
73{
74 BUG_ON(count > DQL_MAX_OBJECT);
75
76 dql->num_queued += count;
77 dql->last_obj_cnt = count;
78}
79
80/* Returns how many objects can be queued, < 0 indicates over limit. */
81static inline int dql_avail(const struct dql *dql)
82{
83 return dql->adj_limit - dql->num_queued;
84}
85
86/* Record number of completed objects and recalculate the limit. */
87void dql_completed(struct dql *dql, unsigned int count);
88
89/* Reset dql state */
90void dql_reset(struct dql *dql);
91
92/* Initialize dql state */
93int dql_init(struct dql *dql, unsigned hold_time);
94
95#endif /* _KERNEL_ */
96
97#endif /* _LINUX_DQL_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 055b248bdd53..1cd3947987e5 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -13,7 +13,7 @@
13#define _LINUX_EDAC_H_ 13#define _LINUX_EDAC_H_
14 14
15#include <linux/atomic.h> 15#include <linux/atomic.h>
16#include <linux/sysdev.h> 16#include <linux/device.h>
17 17
18#define EDAC_OPSTATE_INVAL -1 18#define EDAC_OPSTATE_INVAL -1
19#define EDAC_OPSTATE_POLL 0 19#define EDAC_OPSTATE_POLL 0
@@ -23,12 +23,12 @@
23extern int edac_op_state; 23extern int edac_op_state;
24extern int edac_err_assert; 24extern int edac_err_assert;
25extern atomic_t edac_handlers; 25extern atomic_t edac_handlers;
26extern struct sysdev_class edac_class; 26extern struct bus_type edac_subsys;
27 27
28extern int edac_handler_set(void); 28extern int edac_handler_set(void);
29extern void edac_atomic_assert_error(void); 29extern void edac_atomic_assert_error(void);
30extern struct sysdev_class *edac_get_sysfs_class(void); 30extern struct bus_type *edac_get_sysfs_subsys(void);
31extern void edac_put_sysfs_class(void); 31extern void edac_put_sysfs_subsys(void);
32 32
33static inline void opstate_init(void) 33static inline void opstate_init(void)
34{ 34{
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index c4627cbdb8e0..e50f98b0297a 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -33,6 +33,7 @@
33#define PCI_EEPROM_WIDTH_93C86 8 33#define PCI_EEPROM_WIDTH_93C86 8
34#define PCI_EEPROM_WIDTH_OPCODE 3 34#define PCI_EEPROM_WIDTH_OPCODE 3
35#define PCI_EEPROM_WRITE_OPCODE 0x05 35#define PCI_EEPROM_WRITE_OPCODE 0x05
36#define PCI_EEPROM_ERASE_OPCODE 0x07
36#define PCI_EEPROM_READ_OPCODE 0x06 37#define PCI_EEPROM_READ_OPCODE 0x06
37#define PCI_EEPROM_EWDS_OPCODE 0x10 38#define PCI_EEPROM_EWDS_OPCODE 0x10
38#define PCI_EEPROM_EWEN_OPCODE 0x13 39#define PCI_EEPROM_EWEN_OPCODE 0x13
@@ -46,6 +47,7 @@
46 * @register_write(struct eeprom_93cx6 *eeprom): handler to 47 * @register_write(struct eeprom_93cx6 *eeprom): handler to
47 * write to the eeprom register by using all reg_* fields. 48 * write to the eeprom register by using all reg_* fields.
48 * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines 49 * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
50 * @drive_data: Set if we're driving the data line.
49 * @reg_data_in: register field to indicate data input 51 * @reg_data_in: register field to indicate data input
50 * @reg_data_out: register field to indicate data output 52 * @reg_data_out: register field to indicate data output
51 * @reg_data_clock: register field to set the data clock 53 * @reg_data_clock: register field to set the data clock
@@ -62,6 +64,7 @@ struct eeprom_93cx6 {
62 64
63 int width; 65 int width;
64 66
67 char drive_data;
65 char reg_data_in; 68 char reg_data_in;
66 char reg_data_out; 69 char reg_data_out;
67 char reg_data_clock; 70 char reg_data_clock;
@@ -72,3 +75,8 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
72 const u8 word, u16 *data); 75 const u8 word, u16 *data);
73extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, 76extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
74 const u8 word, __le16 *data, const u16 words); 77 const u8 word, __le16 *data, const u16 words);
78
79extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
80
81extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
82 u8 addr, u16 data);
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 034072cea853..fd0628be45ce 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -17,14 +17,15 @@ struct sock_extended_err {
17#define SO_EE_ORIGIN_LOCAL 1 17#define SO_EE_ORIGIN_LOCAL 1
18#define SO_EE_ORIGIN_ICMP 2 18#define SO_EE_ORIGIN_ICMP 2
19#define SO_EE_ORIGIN_ICMP6 3 19#define SO_EE_ORIGIN_ICMP6 3
20#define SO_EE_ORIGIN_TIMESTAMPING 4 20#define SO_EE_ORIGIN_TXSTATUS 4
21#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
21 22
22#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) 23#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
23 24
24#ifdef __KERNEL__ 25#ifdef __KERNEL__
25 26
26#include <net/ip.h> 27#include <net/ip.h>
27#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 28#if IS_ENABLED(CONFIG_IPV6)
28#include <linux/ipv6.h> 29#include <linux/ipv6.h>
29#endif 30#endif
30 31
@@ -33,7 +34,7 @@ struct sock_extended_err {
33struct sock_exterr_skb { 34struct sock_exterr_skb {
34 union { 35 union {
35 struct inet_skb_parm h4; 36 struct inet_skb_parm h4;
36#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 37#if IS_ENABLED(CONFIG_IPV6)
37 struct inet6_skb_parm h6; 38 struct inet6_skb_parm h6;
38#endif 39#endif
39 } header; 40 } header;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index de33de1e2052..da5b2de99ae4 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -489,7 +489,10 @@ struct ethtool_rx_flow_spec {
489 * on return. 489 * on return.
490 * 490 *
491 * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined 491 * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined
492 * rules on return. 492 * rules on return. If @data is non-zero on return then it is the
493 * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the
494 * driver supports any special location values. If that flag is not
495 * set in @data then special location values should not be used.
493 * 496 *
494 * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an 497 * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an
495 * existing rule on entry and @fs contains the rule on return. 498 * existing rule on entry and @fs contains the rule on return.
@@ -501,10 +504,23 @@ struct ethtool_rx_flow_spec {
501 * must use the second parameter to get_rxnfc() instead of @rule_locs. 504 * must use the second parameter to get_rxnfc() instead of @rule_locs.
502 * 505 *
503 * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. 506 * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update.
504 * @fs.@location specifies the location to use and must not be ignored. 507 * @fs.@location either specifies the location to use or is a special
508 * location value with %RX_CLS_LOC_SPECIAL flag set. On return,
509 * @fs.@location is the actual rule location.
505 * 510 *
506 * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an 511 * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an
507 * existing rule on entry. 512 * existing rule on entry.
513 *
514 * A driver supporting the special location values for
515 * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused
516 * location, and may remove a rule at a later location (lower
517 * priority) that matches exactly the same set of flows. The special
518 * values are: %RX_CLS_LOC_ANY, selecting any location;
519 * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum
520 * priority); and %RX_CLS_LOC_LAST, selecting the last suitable
521 * location (minimum priority). Additional special values may be
522 * defined in future and drivers must return -%EINVAL for any
523 * unrecognised value.
508 */ 524 */
509struct ethtool_rxnfc { 525struct ethtool_rxnfc {
510 __u32 cmd; 526 __u32 cmd;
@@ -543,9 +559,15 @@ struct compat_ethtool_rxnfc {
543/** 559/**
544 * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection 560 * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
545 * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR 561 * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
546 * @size: On entry, the array size of the user buffer. On return from 562 * @size: On entry, the array size of the user buffer, which may be zero.
547 * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table. 563 * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware
564 * indirection table.
548 * @ring_index: RX ring/queue index for each hash value 565 * @ring_index: RX ring/queue index for each hash value
566 *
567 * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size
568 * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means
569 * the table should be reset to default values. This last feature
570 * is not supported by the original implementations.
549 */ 571 */
550struct ethtool_rxfh_indir { 572struct ethtool_rxfh_indir {
551 __u32 cmd; 573 __u32 cmd;
@@ -724,9 +746,6 @@ enum ethtool_sfeatures_retval_bits {
724 746
725#include <linux/rculist.h> 747#include <linux/rculist.h>
726 748
727/* needed by dev_disable_lro() */
728extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
729
730extern int __ethtool_get_settings(struct net_device *dev, 749extern int __ethtool_get_settings(struct net_device *dev,
731 struct ethtool_cmd *cmd); 750 struct ethtool_cmd *cmd);
732 751
@@ -750,19 +769,18 @@ struct net_device;
750 769
751/* Some generic methods drivers may use in their ethtool_ops */ 770/* Some generic methods drivers may use in their ethtool_ops */
752u32 ethtool_op_get_link(struct net_device *dev); 771u32 ethtool_op_get_link(struct net_device *dev);
753u32 ethtool_op_get_tx_csum(struct net_device *dev); 772
754int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); 773/**
755int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); 774 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
756int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data); 775 * @index: Index in RX flow hash indirection table
757u32 ethtool_op_get_sg(struct net_device *dev); 776 * @n_rx_rings: Number of RX rings to use
758int ethtool_op_set_sg(struct net_device *dev, u32 data); 777 *
759u32 ethtool_op_get_tso(struct net_device *dev); 778 * This function provides the default policy for RX flow hash indirection.
760int ethtool_op_set_tso(struct net_device *dev, u32 data); 779 */
761u32 ethtool_op_get_ufo(struct net_device *dev); 780static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
762int ethtool_op_set_ufo(struct net_device *dev, u32 data); 781{
763u32 ethtool_op_get_flags(struct net_device *dev); 782 return index % n_rx_rings;
764int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); 783}
765bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
766 784
767/** 785/**
768 * struct ethtool_ops - optional netdev operations 786 * struct ethtool_ops - optional netdev operations
@@ -807,22 +825,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
807 * @get_pauseparam: Report pause parameters 825 * @get_pauseparam: Report pause parameters
808 * @set_pauseparam: Set pause parameters. Returns a negative error code 826 * @set_pauseparam: Set pause parameters. Returns a negative error code
809 * or zero. 827 * or zero.
810 * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM.
811 * Report whether receive checksums are turned on or off.
812 * @set_rx_csum: Deprecated in favour of generic netdev features. Turn
813 * receive checksum on or off. Returns a negative error code or zero.
814 * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
815 * are turned on or off.
816 * @set_tx_csum: Deprecated in favour of generic netdev features. Turn
817 * transmit checksums on or off. Returns a negative error code or zero.
818 * @get_sg: Deprecated as redundant. Report whether scatter-gather is
819 * enabled.
820 * @set_sg: Deprecated in favour of generic netdev features. Turn
821 * scatter-gather on or off. Returns a negative error code or zero.
822 * @get_tso: Deprecated as redundant. Report whether TCP segmentation
823 * offload is enabled.
824 * @set_tso: Deprecated in favour of generic netdev features. Turn TCP
825 * segmentation offload on or off. Returns a negative error code or zero.
826 * @self_test: Run specified self-tests 828 * @self_test: Run specified self-tests
827 * @get_strings: Return a set of strings that describe the requested objects 829 * @get_strings: Return a set of strings that describe the requested objects
828 * @set_phys_id: Identify the physical devices, e.g. by flashing an LED 830 * @set_phys_id: Identify the physical devices, e.g. by flashing an LED
@@ -844,15 +846,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
844 * negative error code or zero. 846 * negative error code or zero.
845 * @complete: Function to be called after any other operation except 847 * @complete: Function to be called after any other operation except
846 * @begin. Will be called even if the other operation failed. 848 * @begin. Will be called even if the other operation failed.
847 * @get_ufo: Deprecated as redundant. Report whether UDP fragmentation
848 * offload is enabled.
849 * @set_ufo: Deprecated in favour of generic netdev features. Turn UDP
850 * fragmentation offload on or off. Returns a negative error code or zero.
851 * @get_flags: Deprecated as redundant. Report features included in
852 * &enum ethtool_flags that are enabled.
853 * @set_flags: Deprecated in favour of generic netdev features. Turn
854 * features included in &enum ethtool_flags on or off. Returns a
855 * negative error code or zero.
856 * @get_priv_flags: Report driver-specific feature flags. 849 * @get_priv_flags: Report driver-specific feature flags.
857 * @set_priv_flags: Set driver-specific feature flags. Returns a negative 850 * @set_priv_flags: Set driver-specific feature flags. Returns a negative
858 * error code or zero. 851 * error code or zero.
@@ -866,11 +859,13 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
866 * @reset: Reset (part of) the device, as specified by a bitmask of 859 * @reset: Reset (part of) the device, as specified by a bitmask of
867 * flags from &enum ethtool_reset_flags. Returns a negative 860 * flags from &enum ethtool_reset_flags. Returns a negative
868 * error code or zero. 861 * error code or zero.
869 * @set_rx_ntuple: Set an RX n-tuple rule. Returns a negative error code 862 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
870 * or zero. 863 * Returns zero if not supported for this specific device.
871 * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. 864 * @get_rxfh_indir: Get the contents of the RX flow hash indirection table.
865 * Will not be called if @get_rxfh_indir_size returns zero.
872 * Returns a negative error code or zero. 866 * Returns a negative error code or zero.
873 * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. 867 * @set_rxfh_indir: Set the contents of the RX flow hash indirection table.
868 * Will not be called if @get_rxfh_indir_size returns zero.
874 * Returns a negative error code or zero. 869 * Returns a negative error code or zero.
875 * @get_channels: Get number of channels. 870 * @get_channels: Get number of channels.
876 * @set_channels: Set number of channels. Returns a negative error code or 871 * @set_channels: Set number of channels. Returns a negative error code or
@@ -917,14 +912,6 @@ struct ethtool_ops {
917 struct ethtool_pauseparam*); 912 struct ethtool_pauseparam*);
918 int (*set_pauseparam)(struct net_device *, 913 int (*set_pauseparam)(struct net_device *,
919 struct ethtool_pauseparam*); 914 struct ethtool_pauseparam*);
920 u32 (*get_rx_csum)(struct net_device *);
921 int (*set_rx_csum)(struct net_device *, u32);
922 u32 (*get_tx_csum)(struct net_device *);
923 int (*set_tx_csum)(struct net_device *, u32);
924 u32 (*get_sg)(struct net_device *);
925 int (*set_sg)(struct net_device *, u32);
926 u32 (*get_tso)(struct net_device *);
927 int (*set_tso)(struct net_device *, u32);
928 void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); 915 void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
929 void (*get_strings)(struct net_device *, u32 stringset, u8 *); 916 void (*get_strings)(struct net_device *, u32 stringset, u8 *);
930 int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); 917 int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
@@ -932,10 +919,6 @@ struct ethtool_ops {
932 struct ethtool_stats *, u64 *); 919 struct ethtool_stats *, u64 *);
933 int (*begin)(struct net_device *); 920 int (*begin)(struct net_device *);
934 void (*complete)(struct net_device *); 921 void (*complete)(struct net_device *);
935 u32 (*get_ufo)(struct net_device *);
936 int (*set_ufo)(struct net_device *, u32);
937 u32 (*get_flags)(struct net_device *);
938 int (*set_flags)(struct net_device *, u32);
939 u32 (*get_priv_flags)(struct net_device *); 922 u32 (*get_priv_flags)(struct net_device *);
940 int (*set_priv_flags)(struct net_device *, u32); 923 int (*set_priv_flags)(struct net_device *, u32);
941 int (*get_sset_count)(struct net_device *, int); 924 int (*get_sset_count)(struct net_device *, int);
@@ -944,12 +927,9 @@ struct ethtool_ops {
944 int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); 927 int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
945 int (*flash_device)(struct net_device *, struct ethtool_flash *); 928 int (*flash_device)(struct net_device *, struct ethtool_flash *);
946 int (*reset)(struct net_device *, u32 *); 929 int (*reset)(struct net_device *, u32 *);
947 int (*set_rx_ntuple)(struct net_device *, 930 u32 (*get_rxfh_indir_size)(struct net_device *);
948 struct ethtool_rx_ntuple *); 931 int (*get_rxfh_indir)(struct net_device *, u32 *);
949 int (*get_rxfh_indir)(struct net_device *, 932 int (*set_rxfh_indir)(struct net_device *, const u32 *);
950 struct ethtool_rxfh_indir *);
951 int (*set_rxfh_indir)(struct net_device *,
952 const struct ethtool_rxfh_indir *);
953 void (*get_channels)(struct net_device *, struct ethtool_channels *); 933 void (*get_channels)(struct net_device *, struct ethtool_channels *);
954 int (*set_channels)(struct net_device *, struct ethtool_channels *); 934 int (*set_channels)(struct net_device *, struct ethtool_channels *);
955 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); 935 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -1173,6 +1153,12 @@ struct ethtool_ops {
1173 1153
1174#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL 1154#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
1175 1155
1156/* Special RX classification rule insert location values */
1157#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */
1158#define RX_CLS_LOC_ANY 0xffffffff
1159#define RX_CLS_LOC_FIRST 0xfffffffe
1160#define RX_CLS_LOC_LAST 0xfffffffd
1161
1176/* Reset flags */ 1162/* Reset flags */
1177/* The reset() operation must clear the flags for the components which 1163/* The reset() operation must clear the flags for the components which
1178 * were actually reset. On successful return, the flags indicate the 1164 * were actually reset. On successful return, the flags indicate the
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 61549b26ad6f..73c28dea10ae 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -85,6 +85,30 @@ enum {
85/* All generic netlink requests are serialized by a global lock. */ 85/* All generic netlink requests are serialized by a global lock. */
86extern void genl_lock(void); 86extern void genl_lock(void);
87extern void genl_unlock(void); 87extern void genl_unlock(void);
88#ifdef CONFIG_PROVE_LOCKING
89extern int lockdep_genl_is_held(void);
90#endif
91
92/**
93 * rcu_dereference_genl - rcu_dereference with debug checking
94 * @p: The pointer to read, prior to dereferencing
95 *
96 * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
97 * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
98 */
99#define rcu_dereference_genl(p) \
100 rcu_dereference_check(p, lockdep_genl_is_held())
101
102/**
103 * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
104 * @p: The pointer to read, prior to dereferencing
105 *
106 * Return the value of the specified RCU-protected pointer, but omit
107 * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
108 * caller holds genl mutex.
109 */
110#define genl_dereference(p) \
111 rcu_dereference_protected(p, lockdep_genl_is_held())
88 112
89#endif /* __KERNEL__ */ 113#endif /* __KERNEL__ */
90 114
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f743883f769e..bb7f30971858 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,20 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
139extern void account_system_vtime(struct task_struct *tsk); 139extern void account_system_vtime(struct task_struct *tsk);
140#endif 140#endif
141 141
142#if defined(CONFIG_NO_HZ)
143#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) 142#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
144extern void rcu_enter_nohz(void);
145extern void rcu_exit_nohz(void);
146
147static inline void rcu_irq_enter(void)
148{
149 rcu_exit_nohz();
150}
151
152static inline void rcu_irq_exit(void)
153{
154 rcu_enter_nohz();
155}
156 143
157static inline void rcu_nmi_enter(void) 144static inline void rcu_nmi_enter(void)
158{ 145{
@@ -163,17 +150,9 @@ static inline void rcu_nmi_exit(void)
163} 150}
164 151
165#else 152#else
166extern void rcu_irq_enter(void);
167extern void rcu_irq_exit(void);
168extern void rcu_nmi_enter(void); 153extern void rcu_nmi_enter(void);
169extern void rcu_nmi_exit(void); 154extern void rcu_nmi_exit(void);
170#endif 155#endif
171#else
172# define rcu_irq_enter() do { } while (0)
173# define rcu_irq_exit() do { } while (0)
174# define rcu_nmi_enter() do { } while (0)
175# define rcu_nmi_exit() do { } while (0)
176#endif /* #if defined(CONFIG_NO_HZ) */
177 156
178/* 157/*
179 * It is safe to do non-atomic ops on ->hardirq_context, 158 * It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 07d103a06d64..8e25a9167f13 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -482,6 +482,19 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
482{ 482{
483 return adap->nr; 483 return adap->nr;
484} 484}
485
486/**
487 * module_i2c_driver() - Helper macro for registering a I2C driver
488 * @__i2c_driver: i2c_driver struct
489 *
490 * Helper macro for I2C drivers which do not do anything special in module
491 * init/exit. This eliminates a lot of boilerplate. Each module may only
492 * use this macro once, and calling it replaces module_init() and module_exit()
493 */
494#define module_i2c_driver(__i2c_driver) \
495 module_driver(__i2c_driver, i2c_add_driver, \
496 i2c_del_driver)
497
485#endif /* I2C */ 498#endif /* I2C */
486#endif /* __KERNEL__ */ 499#endif /* __KERNEL__ */
487 500
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 48363c3c40f8..210e2c325534 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -128,6 +128,7 @@
128#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 128#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020
129#define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 129#define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040
130#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 130#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060
131#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060
131/* A-MSDU 802.11n */ 132/* A-MSDU 802.11n */
132#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 133#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080
133/* Mesh Control 802.11s */ 134/* Mesh Control 802.11s */
@@ -543,6 +544,15 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc)
543 cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); 544 cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
544} 545}
545 546
547/**
548 * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
549 * @seq_ctrl: frame sequence control bytes in little-endian byteorder
550 */
551static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
552{
553 return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
554}
555
546struct ieee80211s_hdr { 556struct ieee80211s_hdr {
547 u8 flags; 557 u8 flags;
548 u8 ttl; 558 u8 ttl;
@@ -770,6 +780,9 @@ struct ieee80211_mgmt {
770 } u; 780 } u;
771} __attribute__ ((packed)); 781} __attribute__ ((packed));
772 782
783/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
784#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
785
773/* mgmt header + 1 byte category code */ 786/* mgmt header + 1 byte category code */
774#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) 787#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
775 788
@@ -1552,6 +1565,8 @@ enum ieee80211_sa_query_action {
1552#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 1565#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
1553#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 1566#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
1554 1567
1568#define WLAN_CIPHER_SUITE_SMS4 0x00147201
1569
1555/* AKM suite selectors */ 1570/* AKM suite selectors */
1556#define WLAN_AKM_SUITE_8021X 0x000FAC01 1571#define WLAN_AKM_SUITE_8021X 0x000FAC01
1557#define WLAN_AKM_SUITE_PSK 0x000FAC02 1572#define WLAN_AKM_SUITE_PSK 0x000FAC02
@@ -1689,6 +1704,23 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
1689} 1704}
1690 1705
1691/** 1706/**
1707 * ieee80211_is_public_action - check if frame is a public action frame
1708 * @hdr: the frame
1709 * @len: length of the frame
1710 */
1711static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
1712 size_t len)
1713{
1714 struct ieee80211_mgmt *mgmt = (void *)hdr;
1715
1716 if (len < IEEE80211_MIN_ACTION_SIZE)
1717 return false;
1718 if (!ieee80211_is_action(hdr->frame_control))
1719 return false;
1720 return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
1721}
1722
1723/**
1692 * ieee80211_fhss_chan_to_freq - get channel frequency 1724 * ieee80211_fhss_chan_to_freq - get channel frequency
1693 * @channel: the FHSS channel 1725 * @channel: the FHSS channel
1694 * 1726 *
diff --git a/include/linux/if.h b/include/linux/if.h
index db20bd4fd16b..06b6ef60c821 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -79,6 +79,7 @@
79#define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing 79#define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing
80 * skbs on transmit */ 80 * skbs on transmit */
81#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ 81#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
82#define IFF_TEAM_PORT 0x40000 /* device used as team port */
82 83
83#define IF_GET_IFACE 0x0001 /* for querying only */ 84#define IF_GET_IFACE 0x0001 /* for querying only */
84#define IF_GET_PROTO 0x0002 85#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index e473003e4bda..56d907a2c804 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -79,6 +79,7 @@
79#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ 79#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
80#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ 80#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
81#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ 81#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
82#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
82#define ETH_P_TIPC 0x88CA /* TIPC */ 83#define ETH_P_TIPC 0x88CA /* TIPC */
83#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ 84#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */
84#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ 85#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
new file mode 100644
index 000000000000..828181fbad5d
--- /dev/null
+++ b/include/linux/if_team.h
@@ -0,0 +1,242 @@
1/*
2 * include/linux/if_team.h - Network team device driver header
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef _LINUX_IF_TEAM_H_
12#define _LINUX_IF_TEAM_H_
13
14#ifdef __KERNEL__
15
16struct team_pcpu_stats {
17 u64 rx_packets;
18 u64 rx_bytes;
19 u64 rx_multicast;
20 u64 tx_packets;
21 u64 tx_bytes;
22 struct u64_stats_sync syncp;
23 u32 rx_dropped;
24 u32 tx_dropped;
25};
26
27struct team;
28
29struct team_port {
30 struct net_device *dev;
31 struct hlist_node hlist; /* node in hash list */
32 struct list_head list; /* node in ordinary list */
33 struct team *team;
34 int index;
35
36 /*
37 * A place for storing original values of the device before it
38 * become a port.
39 */
40 struct {
41 unsigned char dev_addr[MAX_ADDR_LEN];
42 unsigned int mtu;
43 } orig;
44
45 bool linkup;
46 u32 speed;
47 u8 duplex;
48
49 struct rcu_head rcu;
50};
51
52struct team_mode_ops {
53 int (*init)(struct team *team);
54 void (*exit)(struct team *team);
55 rx_handler_result_t (*receive)(struct team *team,
56 struct team_port *port,
57 struct sk_buff *skb);
58 bool (*transmit)(struct team *team, struct sk_buff *skb);
59 int (*port_enter)(struct team *team, struct team_port *port);
60 void (*port_leave)(struct team *team, struct team_port *port);
61 void (*port_change_mac)(struct team *team, struct team_port *port);
62};
63
64enum team_option_type {
65 TEAM_OPTION_TYPE_U32,
66 TEAM_OPTION_TYPE_STRING,
67};
68
69struct team_option {
70 struct list_head list;
71 const char *name;
72 enum team_option_type type;
73 int (*getter)(struct team *team, void *arg);
74 int (*setter)(struct team *team, void *arg);
75};
76
77struct team_mode {
78 struct list_head list;
79 const char *kind;
80 struct module *owner;
81 size_t priv_size;
82 const struct team_mode_ops *ops;
83};
84
85#define TEAM_PORT_HASHBITS 4
86#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
87
88#define TEAM_MODE_PRIV_LONGS 4
89#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
90
91struct team {
92 struct net_device *dev; /* associated netdevice */
93 struct team_pcpu_stats __percpu *pcpu_stats;
94
95 struct mutex lock; /* used for overall locking, e.g. port lists write */
96
97 /*
98 * port lists with port count
99 */
100 int port_count;
101 struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES];
102 struct list_head port_list;
103
104 struct list_head option_list;
105
106 const struct team_mode *mode;
107 struct team_mode_ops ops;
108 long mode_priv[TEAM_MODE_PRIV_LONGS];
109};
110
111static inline struct hlist_head *team_port_index_hash(struct team *team,
112 int port_index)
113{
114 return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
115}
116
117static inline struct team_port *team_get_port_by_index(struct team *team,
118 int port_index)
119{
120 struct hlist_node *p;
121 struct team_port *port;
122 struct hlist_head *head = team_port_index_hash(team, port_index);
123
124 hlist_for_each_entry(port, p, head, hlist)
125 if (port->index == port_index)
126 return port;
127 return NULL;
128}
129static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
130 int port_index)
131{
132 struct hlist_node *p;
133 struct team_port *port;
134 struct hlist_head *head = team_port_index_hash(team, port_index);
135
136 hlist_for_each_entry_rcu(port, p, head, hlist)
137 if (port->index == port_index)
138 return port;
139 return NULL;
140}
141
142extern int team_port_set_team_mac(struct team_port *port);
143extern int team_options_register(struct team *team,
144 const struct team_option *option,
145 size_t option_count);
146extern void team_options_unregister(struct team *team,
147 const struct team_option *option,
148 size_t option_count);
149extern int team_mode_register(struct team_mode *mode);
150extern int team_mode_unregister(struct team_mode *mode);
151
152#endif /* __KERNEL__ */
153
154#define TEAM_STRING_MAX_LEN 32
155
156/**********************************
157 * NETLINK_GENERIC netlink family.
158 **********************************/
159
160enum {
161 TEAM_CMD_NOOP,
162 TEAM_CMD_OPTIONS_SET,
163 TEAM_CMD_OPTIONS_GET,
164 TEAM_CMD_PORT_LIST_GET,
165
166 __TEAM_CMD_MAX,
167 TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1),
168};
169
170enum {
171 TEAM_ATTR_UNSPEC,
172 TEAM_ATTR_TEAM_IFINDEX, /* u32 */
173 TEAM_ATTR_LIST_OPTION, /* nest */
174 TEAM_ATTR_LIST_PORT, /* nest */
175
176 __TEAM_ATTR_MAX,
177 TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1,
178};
179
180/* Nested layout of get/set msg:
181 *
182 * [TEAM_ATTR_LIST_OPTION]
183 * [TEAM_ATTR_ITEM_OPTION]
184 * [TEAM_ATTR_OPTION_*], ...
185 * [TEAM_ATTR_ITEM_OPTION]
186 * [TEAM_ATTR_OPTION_*], ...
187 * ...
188 * [TEAM_ATTR_LIST_PORT]
189 * [TEAM_ATTR_ITEM_PORT]
190 * [TEAM_ATTR_PORT_*], ...
191 * [TEAM_ATTR_ITEM_PORT]
192 * [TEAM_ATTR_PORT_*], ...
193 * ...
194 */
195
196enum {
197 TEAM_ATTR_ITEM_OPTION_UNSPEC,
198 TEAM_ATTR_ITEM_OPTION, /* nest */
199
200 __TEAM_ATTR_ITEM_OPTION_MAX,
201 TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1,
202};
203
204enum {
205 TEAM_ATTR_OPTION_UNSPEC,
206 TEAM_ATTR_OPTION_NAME, /* string */
207 TEAM_ATTR_OPTION_CHANGED, /* flag */
208 TEAM_ATTR_OPTION_TYPE, /* u8 */
209 TEAM_ATTR_OPTION_DATA, /* dynamic */
210
211 __TEAM_ATTR_OPTION_MAX,
212 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
213};
214
215enum {
216 TEAM_ATTR_ITEM_PORT_UNSPEC,
217 TEAM_ATTR_ITEM_PORT, /* nest */
218
219 __TEAM_ATTR_ITEM_PORT_MAX,
220 TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1,
221};
222
223enum {
224 TEAM_ATTR_PORT_UNSPEC,
225 TEAM_ATTR_PORT_IFINDEX, /* u32 */
226 TEAM_ATTR_PORT_CHANGED, /* flag */
227 TEAM_ATTR_PORT_LINKUP, /* flag */
228 TEAM_ATTR_PORT_SPEED, /* u32 */
229 TEAM_ATTR_PORT_DUPLEX, /* u8 */
230
231 __TEAM_ATTR_PORT_MAX,
232 TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
233};
234
235/*
236 * NETLINK_GENERIC related info
237 */
238#define TEAM_GENL_NAME "team"
239#define TEAM_GENL_VERSION 0x1
240#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event"
241
242#endif /* _LINUX_IF_TEAM_H_ */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 12d5543b14f2..13aff1e2183b 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -74,22 +74,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
74/* found in socket.c */ 74/* found in socket.c */
75extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 75extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
76 76
77/* if this changes, algorithm will have to be reworked because this 77struct vlan_info;
78 * depends on completely exhausting the VLAN identifier space. Thus
79 * it gives constant time look-up, but in many cases it wastes memory.
80 */
81#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
82#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
83
84struct vlan_group {
85 struct net_device *real_dev; /* The ethernet(like) device
86 * the vlan is attached to.
87 */
88 unsigned int nr_vlans;
89 struct hlist_node hlist; /* linked list */
90 struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
91 struct rcu_head rcu;
92};
93 78
94static inline int is_vlan_dev(struct net_device *dev) 79static inline int is_vlan_dev(struct net_device *dev)
95{ 80{
@@ -109,6 +94,13 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
109extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); 94extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
110extern struct sk_buff *vlan_untag(struct sk_buff *skb); 95extern struct sk_buff *vlan_untag(struct sk_buff *skb);
111 96
97extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
98extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
99
100extern int vlan_vids_add_by_dev(struct net_device *dev,
101 const struct net_device *by_dev);
102extern void vlan_vids_del_by_dev(struct net_device *dev,
103 const struct net_device *by_dev);
112#else 104#else
113static inline struct net_device * 105static inline struct net_device *
114__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) 106__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -139,6 +131,26 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
139{ 131{
140 return skb; 132 return skb;
141} 133}
134
135static inline int vlan_vid_add(struct net_device *dev, unsigned short vid)
136{
137 return 0;
138}
139
140static inline void vlan_vid_del(struct net_device *dev, unsigned short vid)
141{
142}
143
144static inline int vlan_vids_add_by_dev(struct net_device *dev,
145 const struct net_device *by_dev)
146{
147 return 0;
148}
149
150static inline void vlan_vids_del_by_dev(struct net_device *dev,
151 const struct net_device *by_dev)
152{
153}
142#endif 154#endif
143 155
144/** 156/**
@@ -310,6 +322,40 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
310 322
311 return protocol; 323 return protocol;
312} 324}
325
326static inline void vlan_set_encap_proto(struct sk_buff *skb,
327 struct vlan_hdr *vhdr)
328{
329 __be16 proto;
330 unsigned char *rawp;
331
332 /*
333 * Was a VLAN packet, grab the encapsulated protocol, which the layer
334 * three protocols care about.
335 */
336
337 proto = vhdr->h_vlan_encapsulated_proto;
338 if (ntohs(proto) >= 1536) {
339 skb->protocol = proto;
340 return;
341 }
342
343 rawp = skb->data;
344 if (*(unsigned short *) rawp == 0xFFFF)
345 /*
346 * This is a magic hack to spot IPX packets. Older Novell
347 * breaks the protocol design and runs IPX over 802.3 without
348 * an 802.2 LLC layer. We look for FFFF which isn't a used
349 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
350 * but does for the rest.
351 */
352 skb->protocol = htons(ETH_P_802_3);
353 else
354 /*
355 * Real 802.2 LLC
356 */
357 skb->protocol = htons(ETH_P_802_2);
358}
313#endif /* __KERNEL__ */ 359#endif /* __KERNEL__ */
314 360
315/* VLAN IOCTLs are found in sockios.h */ 361/* VLAN IOCTLs are found in sockios.h */
@@ -352,7 +398,7 @@ struct vlan_ioctl_args {
352 unsigned int skb_priority; 398 unsigned int skb_priority;
353 unsigned int name_type; 399 unsigned int name_type;
354 unsigned int bind_type; 400 unsigned int bind_type;
355 unsigned int flag; /* Matches vlan_dev_info flags */ 401 unsigned int flag; /* Matches vlan_dev_priv flags */
356 } u; 402 } u;
357 403
358 short vlan_qos; 404 short vlan_qos;
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index abf5028db981..34e8d52c1925 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -22,7 +22,7 @@ struct inet_diag_sockid {
22 22
23/* Request structure */ 23/* Request structure */
24 24
25struct inet_diag_req { 25struct inet_diag_req_compat {
26 __u8 idiag_family; /* Family of addresses. */ 26 __u8 idiag_family; /* Family of addresses. */
27 __u8 idiag_src_len; 27 __u8 idiag_src_len;
28 __u8 idiag_dst_len; 28 __u8 idiag_dst_len;
@@ -34,6 +34,15 @@ struct inet_diag_req {
34 __u32 idiag_dbs; /* Tables to dump (NI) */ 34 __u32 idiag_dbs; /* Tables to dump (NI) */
35}; 35};
36 36
37struct inet_diag_req {
38 __u8 sdiag_family;
39 __u8 sdiag_protocol;
40 __u8 idiag_ext;
41 __u8 pad;
42 __u32 idiag_states;
43 struct inet_diag_sockid id;
44};
45
37enum { 46enum {
38 INET_DIAG_REQ_NONE, 47 INET_DIAG_REQ_NONE,
39 INET_DIAG_REQ_BYTECODE, 48 INET_DIAG_REQ_BYTECODE,
@@ -99,9 +108,10 @@ enum {
99 INET_DIAG_CONG, 108 INET_DIAG_CONG,
100 INET_DIAG_TOS, 109 INET_DIAG_TOS,
101 INET_DIAG_TCLASS, 110 INET_DIAG_TCLASS,
111 INET_DIAG_SKMEMINFO,
102}; 112};
103 113
104#define INET_DIAG_MAX INET_DIAG_TCLASS 114#define INET_DIAG_MAX INET_DIAG_SKMEMINFO
105 115
106 116
107/* INET_DIAG_MEM */ 117/* INET_DIAG_MEM */
@@ -125,16 +135,41 @@ struct tcpvegas_info {
125#ifdef __KERNEL__ 135#ifdef __KERNEL__
126struct sock; 136struct sock;
127struct inet_hashinfo; 137struct inet_hashinfo;
138struct nlattr;
139struct nlmsghdr;
140struct sk_buff;
141struct netlink_callback;
128 142
129struct inet_diag_handler { 143struct inet_diag_handler {
130 struct inet_hashinfo *idiag_hashinfo; 144 void (*dump)(struct sk_buff *skb,
145 struct netlink_callback *cb,
146 struct inet_diag_req *r,
147 struct nlattr *bc);
148
149 int (*dump_one)(struct sk_buff *in_skb,
150 const struct nlmsghdr *nlh,
151 struct inet_diag_req *req);
152
131 void (*idiag_get_info)(struct sock *sk, 153 void (*idiag_get_info)(struct sock *sk,
132 struct inet_diag_msg *r, 154 struct inet_diag_msg *r,
133 void *info); 155 void *info);
134 __u16 idiag_info_size;
135 __u16 idiag_type; 156 __u16 idiag_type;
136}; 157};
137 158
159struct inet_connection_sock;
160int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
161 struct sk_buff *skb, struct inet_diag_req *req,
162 u32 pid, u32 seq, u16 nlmsg_flags,
163 const struct nlmsghdr *unlh);
164void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
165 struct netlink_callback *cb, struct inet_diag_req *r,
166 struct nlattr *bc);
167int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
168 struct sk_buff *in_skb, const struct nlmsghdr *nlh,
169 struct inet_diag_req *req);
170
171int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
172
138extern int inet_diag_register(const struct inet_diag_handler *handler); 173extern int inet_diag_register(const struct inet_diag_handler *handler);
139extern void inet_diag_unregister(const struct inet_diag_handler *handler); 174extern void inet_diag_unregister(const struct inet_diag_handler *handler);
140#endif /* __KERNEL__ */ 175#endif /* __KERNEL__ */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 0c997767429a..6318268dcaf5 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -404,7 +404,7 @@ struct tcp6_sock {
404 404
405extern int inet6_sk_rebuild_header(struct sock *sk); 405extern int inet6_sk_rebuild_header(struct sock *sk);
406 406
407#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 407#if IS_ENABLED(CONFIG_IPV6)
408static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) 408static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
409{ 409{
410 return inet_sk(__sk)->pinet6; 410 return inet_sk(__sk)->pinet6;
@@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
515#define inet6_rcv_saddr(__sk) NULL 515#define inet6_rcv_saddr(__sk) NULL
516#define tcp_twsk_ipv6only(__sk) 0 516#define tcp_twsk_ipv6only(__sk) 0
517#define inet_v6_ipv6only(__sk) 0 517#define inet_v6_ipv6only(__sk) 0
518#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 518#endif /* IS_ENABLED(CONFIG_IPV6) */
519 519
520#define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ 520#define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
521 (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ 521 (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 99834e581b9e..bd4272b61a14 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -91,10 +91,11 @@ static inline unsigned int irq_domain_to_irq(struct irq_domain *d,
91 91
92extern void irq_domain_add(struct irq_domain *domain); 92extern void irq_domain_add(struct irq_domain *domain);
93extern void irq_domain_del(struct irq_domain *domain); 93extern void irq_domain_del(struct irq_domain *domain);
94
95extern struct irq_domain_ops irq_domain_simple_ops;
94#endif /* CONFIG_IRQ_DOMAIN */ 96#endif /* CONFIG_IRQ_DOMAIN */
95 97
96#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) 98#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
97extern struct irq_domain_ops irq_domain_simple_ops;
98extern void irq_domain_add_simple(struct device_node *controller, int irq_base); 99extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
99extern void irq_domain_generate_simple(const struct of_device_id *match, 100extern void irq_domain_generate_simple(const struct of_device_id *match,
100 u64 phys_base, unsigned int irq_start); 101 u64 phys_base, unsigned int irq_start);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d425b50..5ce8b140428f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6#include <linux/workqueue.h>
6 7
7#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 8#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
8 9
@@ -14,6 +15,12 @@ struct jump_label_key {
14#endif 15#endif
15}; 16};
16 17
18struct jump_label_key_deferred {
19 struct jump_label_key key;
20 unsigned long timeout;
21 struct delayed_work work;
22};
23
17# include <asm/jump_label.h> 24# include <asm/jump_label.h>
18# define HAVE_JUMP_LABEL 25# define HAVE_JUMP_LABEL
19#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ 26#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
51extern int jump_label_text_reserved(void *start, void *end); 58extern int jump_label_text_reserved(void *start, void *end);
52extern void jump_label_inc(struct jump_label_key *key); 59extern void jump_label_inc(struct jump_label_key *key);
53extern void jump_label_dec(struct jump_label_key *key); 60extern void jump_label_dec(struct jump_label_key *key);
61extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
54extern bool jump_label_enabled(struct jump_label_key *key); 62extern bool jump_label_enabled(struct jump_label_key *key);
55extern void jump_label_apply_nops(struct module *mod); 63extern void jump_label_apply_nops(struct module *mod);
64extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
65 unsigned long rl);
56 66
57#else /* !HAVE_JUMP_LABEL */ 67#else /* !HAVE_JUMP_LABEL */
58 68
@@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void)
68{ 78{
69} 79}
70 80
81struct jump_label_key_deferred {
82 struct jump_label_key key;
83};
84
71static __always_inline bool static_branch(struct jump_label_key *key) 85static __always_inline bool static_branch(struct jump_label_key *key)
72{ 86{
73 if (unlikely(atomic_read(&key->enabled))) 87 if (unlikely(atomic_read(&key->enabled)))
@@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key)
85 atomic_dec(&key->enabled); 99 atomic_dec(&key->enabled);
86} 100}
87 101
102static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
103{
104 jump_label_dec(&key->key);
105}
106
88static inline int jump_label_text_reserved(void *start, void *end) 107static inline int jump_label_text_reserved(void *start, void *end)
89{ 108{
90 return 0; 109 return 0;
@@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod)
102{ 121{
103 return 0; 122 return 0;
104} 123}
124
125static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
126 unsigned long rl)
127{
128}
105#endif /* HAVE_JUMP_LABEL */ 129#endif /* HAVE_JUMP_LABEL */
106 130
131#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
132#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
133
107#endif /* _LINUX_JUMP_LABEL_H */ 134#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 0cce2db580c3..2fbd9053c2df 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -6,6 +6,7 @@
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/cpumask.h> 7#include <linux/cpumask.h>
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <linux/sched.h>
9#include <asm/irq.h> 10#include <asm/irq.h>
10#include <asm/cputime.h> 11#include <asm/cputime.h>
11 12
@@ -15,21 +16,25 @@
15 * used by rstatd/perfmeter 16 * used by rstatd/perfmeter
16 */ 17 */
17 18
18struct cpu_usage_stat { 19enum cpu_usage_stat {
19 cputime64_t user; 20 CPUTIME_USER,
20 cputime64_t nice; 21 CPUTIME_NICE,
21 cputime64_t system; 22 CPUTIME_SYSTEM,
22 cputime64_t softirq; 23 CPUTIME_SOFTIRQ,
23 cputime64_t irq; 24 CPUTIME_IRQ,
24 cputime64_t idle; 25 CPUTIME_IDLE,
25 cputime64_t iowait; 26 CPUTIME_IOWAIT,
26 cputime64_t steal; 27 CPUTIME_STEAL,
27 cputime64_t guest; 28 CPUTIME_GUEST,
28 cputime64_t guest_nice; 29 CPUTIME_GUEST_NICE,
30 NR_STATS,
31};
32
33struct kernel_cpustat {
34 u64 cpustat[NR_STATS];
29}; 35};
30 36
31struct kernel_stat { 37struct kernel_stat {
32 struct cpu_usage_stat cpustat;
33#ifndef CONFIG_GENERIC_HARDIRQS 38#ifndef CONFIG_GENERIC_HARDIRQS
34 unsigned int irqs[NR_IRQS]; 39 unsigned int irqs[NR_IRQS];
35#endif 40#endif
@@ -38,10 +43,13 @@ struct kernel_stat {
38}; 43};
39 44
40DECLARE_PER_CPU(struct kernel_stat, kstat); 45DECLARE_PER_CPU(struct kernel_stat, kstat);
46DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
41 47
42#define kstat_cpu(cpu) per_cpu(kstat, cpu)
43/* Must have preemption disabled for this to be meaningful. */ 48/* Must have preemption disabled for this to be meaningful. */
44#define kstat_this_cpu __get_cpu_var(kstat) 49#define kstat_this_cpu (&__get_cpu_var(kstat))
50#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
51#define kstat_cpu(cpu) per_cpu(kstat, cpu)
52#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
45 53
46extern unsigned long long nr_context_switches(void); 54extern unsigned long long nr_context_switches(void);
47 55
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index ad81e1c51487..fc615a97e2d3 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -191,8 +191,6 @@ static inline struct kobj_type *get_ktype(struct kobject *kobj)
191} 191}
192 192
193extern struct kobject *kset_find_obj(struct kset *, const char *); 193extern struct kobject *kset_find_obj(struct kset *, const char *);
194extern struct kobject *kset_find_obj_hinted(struct kset *, const char *,
195 struct kobject *);
196 194
197/* The global /sys/kernel/ kobject for people to chain off of */ 195/* The global /sys/kernel/ kobject for people to chain off of */
198extern struct kobject *kernel_kobj; 196extern struct kobject *kernel_kobj;
diff --git a/include/linux/kref.h b/include/linux/kref.h
index d4a62ab2ee5e..abc0120b09b7 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -15,16 +15,81 @@
15#ifndef _KREF_H_ 15#ifndef _KREF_H_
16#define _KREF_H_ 16#define _KREF_H_
17 17
18#include <linux/types.h> 18#include <linux/bug.h>
19#include <linux/atomic.h>
19 20
20struct kref { 21struct kref {
21 atomic_t refcount; 22 atomic_t refcount;
22}; 23};
23 24
24void kref_init(struct kref *kref); 25/**
25void kref_get(struct kref *kref); 26 * kref_init - initialize object.
26int kref_put(struct kref *kref, void (*release) (struct kref *kref)); 27 * @kref: object in question.
27int kref_sub(struct kref *kref, unsigned int count, 28 */
28 void (*release) (struct kref *kref)); 29static inline void kref_init(struct kref *kref)
30{
31 atomic_set(&kref->refcount, 1);
32}
33
34/**
35 * kref_get - increment refcount for object.
36 * @kref: object.
37 */
38static inline void kref_get(struct kref *kref)
39{
40 WARN_ON(!atomic_read(&kref->refcount));
41 atomic_inc(&kref->refcount);
42}
29 43
44/**
45 * kref_sub - subtract a number of refcounts for object.
46 * @kref: object.
47 * @count: Number of recounts to subtract.
48 * @release: pointer to the function that will clean up the object when the
49 * last reference to the object is released.
50 * This pointer is required, and it is not acceptable to pass kfree
51 * in as this function. If the caller does pass kfree to this
52 * function, you will be publicly mocked mercilessly by the kref
53 * maintainer, and anyone else who happens to notice it. You have
54 * been warned.
55 *
56 * Subtract @count from the refcount, and if 0, call release().
57 * Return 1 if the object was removed, otherwise return 0. Beware, if this
58 * function returns 0, you still can not count on the kref from remaining in
59 * memory. Only use the return value if you want to see if the kref is now
60 * gone, not present.
61 */
62static inline int kref_sub(struct kref *kref, unsigned int count,
63 void (*release)(struct kref *kref))
64{
65 WARN_ON(release == NULL);
66
67 if (atomic_sub_and_test((int) count, &kref->refcount)) {
68 release(kref);
69 return 1;
70 }
71 return 0;
72}
73
74/**
75 * kref_put - decrement refcount for object.
76 * @kref: object.
77 * @release: pointer to the function that will clean up the object when the
78 * last reference to the object is released.
79 * This pointer is required, and it is not acceptable to pass kfree
80 * in as this function. If the caller does pass kfree to this
81 * function, you will be publicly mocked mercilessly by the kref
82 * maintainer, and anyone else who happens to notice it. You have
83 * been warned.
84 *
85 * Decrement the refcount, and if 0, call release().
86 * Return 1 if the object was removed, otherwise return 0. Beware, if this
87 * function returns 0, you still can not count on the kref from remaining in
88 * memory. Only use the return value if you want to see if the kref is now
89 * gone, not present.
90 */
91static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
92{
93 return kref_sub(kref, 1, release);
94}
30#endif /* _KREF_H_ */ 95#endif /* _KREF_H_ */
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index b0e99898527c..e23121f9d82a 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -10,6 +10,8 @@
10#define _INCLUDE_GUARD_LATENCYTOP_H_ 10#define _INCLUDE_GUARD_LATENCYTOP_H_
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13struct task_struct;
14
13#ifdef CONFIG_LATENCYTOP 15#ifdef CONFIG_LATENCYTOP
14 16
15#define LT_SAVECOUNT 32 17#define LT_SAVECOUNT 32
@@ -23,7 +25,6 @@ struct latency_record {
23}; 25};
24 26
25 27
26struct task_struct;
27 28
28extern int latencytop_enabled; 29extern int latencytop_enabled;
29void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); 30void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index ff9abff55aa0..90b0656a869e 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap)
301 return ipv4_is_loopback(sin->sin_addr.s_addr); 301 return ipv4_is_loopback(sin->sin_addr.s_addr);
302} 302}
303 303
304#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 304#if IS_ENABLED(CONFIG_IPV6)
305static inline int __nlm_privileged_request6(const struct sockaddr *sap) 305static inline int __nlm_privileged_request6(const struct sockaddr *sap)
306{ 306{
307 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 307 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
@@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap)
314 314
315 return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; 315 return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
316} 316}
317#else /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 317#else /* IS_ENABLED(CONFIG_IPV6) */
318static inline int __nlm_privileged_request6(const struct sockaddr *sap) 318static inline int __nlm_privileged_request6(const struct sockaddr *sap)
319{ 319{
320 return 0; 320 return 0;
321} 321}
322#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 322#endif /* IS_ENABLED(CONFIG_IPV6) */
323 323
324/* 324/*
325 * Ensure incoming requests are from local privileged callers. 325 * Ensure incoming requests are from local privileged callers.
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b6a56e37284c..d36619ead3ba 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -343,6 +343,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
343 343
344#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) 344#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
345 345
346#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
347
346#else /* !LOCKDEP */ 348#else /* !LOCKDEP */
347 349
348static inline void lockdep_off(void) 350static inline void lockdep_off(void)
@@ -392,6 +394,8 @@ struct lock_class_key { };
392 394
393#define lockdep_assert_held(l) do { } while (0) 395#define lockdep_assert_held(l) do { } while (0)
394 396
397#define lockdep_recursing(tsk) (0)
398
395#endif /* !LOCKDEP */ 399#endif /* !LOCKDEP */
396 400
397#ifdef CONFIG_LOCK_STAT 401#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 0fe00cd4c93c..76f52bbbb2f4 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -32,6 +32,8 @@ struct mdiobb_ops {
32 32
33struct mdiobb_ctrl { 33struct mdiobb_ctrl {
34 const struct mdiobb_ops *ops; 34 const struct mdiobb_ops *ops;
35 /* reset callback */
36 int (*reset)(struct mii_bus *bus);
35}; 37};
36 38
37/* The returned bus is not yet registered with the phy layer. */ 39/* The returned bus is not yet registered with the phy layer. */
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index e9d3fdfe41d7..7c9fe3c2be73 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -20,6 +20,8 @@ struct mdio_gpio_platform_data {
20 20
21 unsigned int phy_mask; 21 unsigned int phy_mask;
22 int irqs[PHY_MAX_ADDR]; 22 int irqs[PHY_MAX_ADDR];
23 /* reset callback */
24 int (*reset)(struct mii_bus *bus);
23}; 25};
24 26
25#endif /* __LINUX_MDIO_GPIO_H */ 27#endif /* __LINUX_MDIO_GPIO_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e6b843e16e81..a6bb10235148 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,8 +2,6 @@
2#define _LINUX_MEMBLOCK_H 2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#define MEMBLOCK_ERROR 0
6
7#ifdef CONFIG_HAVE_MEMBLOCK 5#ifdef CONFIG_HAVE_MEMBLOCK
8/* 6/*
9 * Logical memory blocks. 7 * Logical memory blocks.
@@ -19,81 +17,161 @@
19#include <linux/init.h> 17#include <linux/init.h>
20#include <linux/mm.h> 18#include <linux/mm.h>
21 19
22#include <asm/memblock.h>
23
24#define INIT_MEMBLOCK_REGIONS 128 20#define INIT_MEMBLOCK_REGIONS 128
25 21
26struct memblock_region { 22struct memblock_region {
27 phys_addr_t base; 23 phys_addr_t base;
28 phys_addr_t size; 24 phys_addr_t size;
25#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
26 int nid;
27#endif
29}; 28};
30 29
31struct memblock_type { 30struct memblock_type {
32 unsigned long cnt; /* number of regions */ 31 unsigned long cnt; /* number of regions */
33 unsigned long max; /* size of the allocated array */ 32 unsigned long max; /* size of the allocated array */
33 phys_addr_t total_size; /* size of all regions */
34 struct memblock_region *regions; 34 struct memblock_region *regions;
35}; 35};
36 36
37struct memblock { 37struct memblock {
38 phys_addr_t current_limit; 38 phys_addr_t current_limit;
39 phys_addr_t memory_size; /* Updated by memblock_analyze() */
40 struct memblock_type memory; 39 struct memblock_type memory;
41 struct memblock_type reserved; 40 struct memblock_type reserved;
42}; 41};
43 42
44extern struct memblock memblock; 43extern struct memblock memblock;
45extern int memblock_debug; 44extern int memblock_debug;
46extern int memblock_can_resize;
47 45
48#define memblock_dbg(fmt, ...) \ 46#define memblock_dbg(fmt, ...) \
49 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 47 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
50 48
51u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); 49phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
50 phys_addr_t size, phys_addr_t align, int nid);
51phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
52 phys_addr_t size, phys_addr_t align);
52int memblock_free_reserved_regions(void); 53int memblock_free_reserved_regions(void);
53int memblock_reserve_reserved_regions(void); 54int memblock_reserve_reserved_regions(void);
54 55
55extern void memblock_init(void); 56void memblock_allow_resize(void);
56extern void memblock_analyze(void); 57int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
57extern long memblock_add(phys_addr_t base, phys_addr_t size); 58int memblock_add(phys_addr_t base, phys_addr_t size);
58extern long memblock_remove(phys_addr_t base, phys_addr_t size); 59int memblock_remove(phys_addr_t base, phys_addr_t size);
59extern long memblock_free(phys_addr_t base, phys_addr_t size); 60int memblock_free(phys_addr_t base, phys_addr_t size);
60extern long memblock_reserve(phys_addr_t base, phys_addr_t size); 61int memblock_reserve(phys_addr_t base, phys_addr_t size);
62
63#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
64void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
65 unsigned long *out_end_pfn, int *out_nid);
66
67/**
68 * for_each_mem_pfn_range - early memory pfn range iterator
69 * @i: an integer used as loop variable
70 * @nid: node selector, %MAX_NUMNODES for all nodes
71 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
72 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
73 * @p_nid: ptr to int for nid of the range, can be %NULL
74 *
75 * Walks over configured memory ranges. Available after early_node_map is
76 * populated.
77 */
78#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
79 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
80 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
81#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
82
83void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
84 phys_addr_t *out_end, int *out_nid);
85
86/**
87 * for_each_free_mem_range - iterate through free memblock areas
88 * @i: u64 used as loop variable
89 * @nid: node selector, %MAX_NUMNODES for all nodes
90 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
91 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
92 * @p_nid: ptr to int for nid of the range, can be %NULL
93 *
94 * Walks over free (memory && !reserved) areas of memblock. Available as
95 * soon as memblock is initialized.
96 */
97#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
98 for (i = 0, \
99 __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \
100 i != (u64)ULLONG_MAX; \
101 __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
102
103void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
104 phys_addr_t *out_end, int *out_nid);
61 105
62/* The numa aware allocator is only available if 106/**
63 * CONFIG_ARCH_POPULATES_NODE_MAP is set 107 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
108 * @i: u64 used as loop variable
109 * @nid: node selector, %MAX_NUMNODES for all nodes
110 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
111 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
112 * @p_nid: ptr to int for nid of the range, can be %NULL
113 *
114 * Walks over free (memory && !reserved) areas of memblock in reverse
115 * order. Available as soon as memblock is initialized.
64 */ 116 */
65extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, 117#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
66 int nid); 118 for (i = (u64)ULLONG_MAX, \
67extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, 119 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \
68 int nid); 120 i != (u64)ULLONG_MAX; \
121 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
69 122
70extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); 123#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
124int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid);
125
126static inline void memblock_set_region_node(struct memblock_region *r, int nid)
127{
128 r->nid = nid;
129}
130
131static inline int memblock_get_region_node(const struct memblock_region *r)
132{
133 return r->nid;
134}
135#else
136static inline void memblock_set_region_node(struct memblock_region *r, int nid)
137{
138}
139
140static inline int memblock_get_region_node(const struct memblock_region *r)
141{
142 return 0;
143}
144#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
145
146phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
147phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
148
149phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
71 150
72/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 151/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
73#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 152#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
74#define MEMBLOCK_ALLOC_ACCESSIBLE 0 153#define MEMBLOCK_ALLOC_ACCESSIBLE 0
75 154
76extern phys_addr_t memblock_alloc_base(phys_addr_t size, 155phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
77 phys_addr_t align, 156 phys_addr_t max_addr);
78 phys_addr_t max_addr); 157phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
79extern phys_addr_t __memblock_alloc_base(phys_addr_t size, 158 phys_addr_t max_addr);
80 phys_addr_t align, 159phys_addr_t memblock_phys_mem_size(void);
81 phys_addr_t max_addr); 160phys_addr_t memblock_start_of_DRAM(void);
82extern phys_addr_t memblock_phys_mem_size(void); 161phys_addr_t memblock_end_of_DRAM(void);
83extern phys_addr_t memblock_start_of_DRAM(void); 162void memblock_enforce_memory_limit(phys_addr_t memory_limit);
84extern phys_addr_t memblock_end_of_DRAM(void); 163int memblock_is_memory(phys_addr_t addr);
85extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); 164int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
86extern int memblock_is_memory(phys_addr_t addr); 165int memblock_is_reserved(phys_addr_t addr);
87extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 166int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
88extern int memblock_is_reserved(phys_addr_t addr); 167
89extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 168extern void __memblock_dump_all(void);
90 169
91extern void memblock_dump_all(void); 170static inline void memblock_dump_all(void)
92 171{
93/* Provided by the architecture */ 172 if (memblock_debug)
94extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); 173 __memblock_dump_all();
95extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, 174}
96 phys_addr_t addr2, phys_addr_t size2);
97 175
98/** 176/**
99 * memblock_set_current_limit - Set the current allocation limit to allow 177 * memblock_set_current_limit - Set the current allocation limit to allow
@@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
101 * accessible during boot 179 * accessible during boot
102 * @limit: New limit value (physical address) 180 * @limit: New limit value (physical address)
103 */ 181 */
104extern void memblock_set_current_limit(phys_addr_t limit); 182void memblock_set_current_limit(phys_addr_t limit);
105 183
106 184
107/* 185/*
@@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
154 region++) 232 region++)
155 233
156 234
157#ifdef ARCH_DISCARD_MEMBLOCK 235#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
158#define __init_memblock __init 236#define __init_memblock __meminit
159#define __initdata_memblock __initdata 237#define __initdata_memblock __meminitdata
160#else 238#else
161#define __init_memblock 239#define __init_memblock
162#define __initdata_memblock 240#define __initdata_memblock
@@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
165#else 243#else
166static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) 244static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
167{ 245{
168 return MEMBLOCK_ERROR; 246 return 0;
169} 247}
170 248
171#endif /* CONFIG_HAVE_MEMBLOCK */ 249#endif /* CONFIG_HAVE_MEMBLOCK */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b87068a1a09e..9b296ea41bb8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -85,6 +85,9 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
85extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 85extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
86extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 86extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
87 87
88extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
89extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
90
88static inline 91static inline
89int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 92int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
90{ 93{
@@ -381,5 +384,25 @@ mem_cgroup_print_bad_page(struct page *page)
381} 384}
382#endif 385#endif
383 386
387enum {
388 UNDER_LIMIT,
389 SOFT_LIMIT,
390 OVER_LIMIT,
391};
392
393#ifdef CONFIG_INET
394struct sock;
395#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
396void sock_update_memcg(struct sock *sk);
397void sock_release_memcg(struct sock *sk);
398#else
399static inline void sock_update_memcg(struct sock *sk)
400{
401}
402static inline void sock_release_memcg(struct sock *sk)
403{
404}
405#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
406#endif /* CONFIG_INET */
384#endif /* _LINUX_MEMCONTROL_H */ 407#endif /* _LINUX_MEMCONTROL_H */
385 408
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 935699b30b7c..1ac7f6e405f9 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -15,7 +15,6 @@
15#ifndef _LINUX_MEMORY_H_ 15#ifndef _LINUX_MEMORY_H_
16#define _LINUX_MEMORY_H_ 16#define _LINUX_MEMORY_H_
17 17
18#include <linux/sysdev.h>
19#include <linux/node.h> 18#include <linux/node.h>
20#include <linux/compiler.h> 19#include <linux/compiler.h>
21#include <linux/mutex.h> 20#include <linux/mutex.h>
@@ -38,7 +37,7 @@ struct memory_block {
38 int phys_device; /* to which fru does this belong? */ 37 int phys_device; /* to which fru does this belong? */
39 void *hw; /* optional pointer to fw/hw data */ 38 void *hw; /* optional pointer to fw/hw data */
40 int (*phys_callback)(struct memory_block *); 39 int (*phys_callback)(struct memory_block *);
41 struct sys_device sysdev; 40 struct device dev;
42}; 41};
43 42
44int arch_get_memory_phys_device(unsigned long start_pfn); 43int arch_get_memory_phys_device(unsigned long start_pfn);
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 27748230aa69..2783eca629a0 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -9,6 +9,7 @@
9#define __LINUX_MII_H__ 9#define __LINUX_MII_H__
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/ethtool.h>
12 13
13/* Generic MII registers. */ 14/* Generic MII registers. */
14#define MII_BMCR 0x00 /* Basic mode control register */ 15#define MII_BMCR 0x00 /* Basic mode control register */
@@ -240,6 +241,205 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
240} 241}
241 242
242/** 243/**
244 * ethtool_adv_to_mii_adv_t
245 * @ethadv: the ethtool advertisement settings
246 *
247 * A small helper function that translates ethtool advertisement
248 * settings to phy autonegotiation advertisements for the
249 * MII_ADVERTISE register.
250 */
251static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
252{
253 u32 result = 0;
254
255 if (ethadv & ADVERTISED_10baseT_Half)
256 result |= ADVERTISE_10HALF;
257 if (ethadv & ADVERTISED_10baseT_Full)
258 result |= ADVERTISE_10FULL;
259 if (ethadv & ADVERTISED_100baseT_Half)
260 result |= ADVERTISE_100HALF;
261 if (ethadv & ADVERTISED_100baseT_Full)
262 result |= ADVERTISE_100FULL;
263 if (ethadv & ADVERTISED_Pause)
264 result |= ADVERTISE_PAUSE_CAP;
265 if (ethadv & ADVERTISED_Asym_Pause)
266 result |= ADVERTISE_PAUSE_ASYM;
267
268 return result;
269}
270
271/**
272 * mii_adv_to_ethtool_adv_t
273 * @adv: value of the MII_ADVERTISE register
274 *
275 * A small helper function that translates MII_ADVERTISE bits
276 * to ethtool advertisement settings.
277 */
278static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
279{
280 u32 result = 0;
281
282 if (adv & ADVERTISE_10HALF)
283 result |= ADVERTISED_10baseT_Half;
284 if (adv & ADVERTISE_10FULL)
285 result |= ADVERTISED_10baseT_Full;
286 if (adv & ADVERTISE_100HALF)
287 result |= ADVERTISED_100baseT_Half;
288 if (adv & ADVERTISE_100FULL)
289 result |= ADVERTISED_100baseT_Full;
290 if (adv & ADVERTISE_PAUSE_CAP)
291 result |= ADVERTISED_Pause;
292 if (adv & ADVERTISE_PAUSE_ASYM)
293 result |= ADVERTISED_Asym_Pause;
294
295 return result;
296}
297
298/**
299 * ethtool_adv_to_mii_ctrl1000_t
300 * @ethadv: the ethtool advertisement settings
301 *
302 * A small helper function that translates ethtool advertisement
303 * settings to phy autonegotiation advertisements for the
304 * MII_CTRL1000 register when in 1000T mode.
305 */
306static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
307{
308 u32 result = 0;
309
310 if (ethadv & ADVERTISED_1000baseT_Half)
311 result |= ADVERTISE_1000HALF;
312 if (ethadv & ADVERTISED_1000baseT_Full)
313 result |= ADVERTISE_1000FULL;
314
315 return result;
316}
317
318/**
319 * mii_ctrl1000_to_ethtool_adv_t
320 * @adv: value of the MII_CTRL1000 register
321 *
322 * A small helper function that translates MII_CTRL1000
323 * bits, when in 1000Base-T mode, to ethtool
324 * advertisement settings.
325 */
326static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
327{
328 u32 result = 0;
329
330 if (adv & ADVERTISE_1000HALF)
331 result |= ADVERTISED_1000baseT_Half;
332 if (adv & ADVERTISE_1000FULL)
333 result |= ADVERTISED_1000baseT_Full;
334
335 return result;
336}
337
338/**
339 * mii_lpa_to_ethtool_lpa_t
340 * @adv: value of the MII_LPA register
341 *
342 * A small helper function that translates MII_LPA
343 * bits, when in 1000Base-T mode, to ethtool
344 * LP advertisement settings.
345 */
346static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
347{
348 u32 result = 0;
349
350 if (lpa & LPA_LPACK)
351 result |= ADVERTISED_Autoneg;
352
353 return result | mii_adv_to_ethtool_adv_t(lpa);
354}
355
356/**
357 * mii_stat1000_to_ethtool_lpa_t
358 * @adv: value of the MII_STAT1000 register
359 *
360 * A small helper function that translates MII_STAT1000
361 * bits, when in 1000Base-T mode, to ethtool
362 * advertisement settings.
363 */
364static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
365{
366 u32 result = 0;
367
368 if (lpa & LPA_1000HALF)
369 result |= ADVERTISED_1000baseT_Half;
370 if (lpa & LPA_1000FULL)
371 result |= ADVERTISED_1000baseT_Full;
372
373 return result;
374}
375
376/**
377 * ethtool_adv_to_mii_adv_x
378 * @ethadv: the ethtool advertisement settings
379 *
380 * A small helper function that translates ethtool advertisement
381 * settings to phy autonegotiation advertisements for the
382 * MII_CTRL1000 register when in 1000Base-X mode.
383 */
384static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
385{
386 u32 result = 0;
387
388 if (ethadv & ADVERTISED_1000baseT_Half)
389 result |= ADVERTISE_1000XHALF;
390 if (ethadv & ADVERTISED_1000baseT_Full)
391 result |= ADVERTISE_1000XFULL;
392 if (ethadv & ADVERTISED_Pause)
393 result |= ADVERTISE_1000XPAUSE;
394 if (ethadv & ADVERTISED_Asym_Pause)
395 result |= ADVERTISE_1000XPSE_ASYM;
396
397 return result;
398}
399
400/**
401 * mii_adv_to_ethtool_adv_x
402 * @adv: value of the MII_CTRL1000 register
403 *
404 * A small helper function that translates MII_CTRL1000
405 * bits, when in 1000Base-X mode, to ethtool
406 * advertisement settings.
407 */
408static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
409{
410 u32 result = 0;
411
412 if (adv & ADVERTISE_1000XHALF)
413 result |= ADVERTISED_1000baseT_Half;
414 if (adv & ADVERTISE_1000XFULL)
415 result |= ADVERTISED_1000baseT_Full;
416 if (adv & ADVERTISE_1000XPAUSE)
417 result |= ADVERTISED_Pause;
418 if (adv & ADVERTISE_1000XPSE_ASYM)
419 result |= ADVERTISED_Asym_Pause;
420
421 return result;
422}
423
424/**
425 * mii_lpa_to_ethtool_lpa_x
426 * @adv: value of the MII_LPA register
427 *
428 * A small helper function that translates MII_LPA
429 * bits, when in 1000Base-X mode, to ethtool
430 * LP advertisement settings.
431 */
432static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
433{
434 u32 result = 0;
435
436 if (lpa & LPA_LPACK)
437 result |= ADVERTISED_Autoneg;
438
439 return result | mii_adv_to_ethtool_adv_x(lpa);
440}
441
442/**
243 * mii_advertise_flowctrl - get flow control advertisement flags 443 * mii_advertise_flowctrl - get flow control advertisement flags
244 * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) 444 * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
245 */ 445 */
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index b56e4587208d..9958ff2cad3c 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -59,12 +59,15 @@ enum {
59 MLX4_CMD_HW_HEALTH_CHECK = 0x50, 59 MLX4_CMD_HW_HEALTH_CHECK = 0x50,
60 MLX4_CMD_SET_PORT = 0xc, 60 MLX4_CMD_SET_PORT = 0xc,
61 MLX4_CMD_SET_NODE = 0x5a, 61 MLX4_CMD_SET_NODE = 0x5a,
62 MLX4_CMD_QUERY_FUNC = 0x56,
62 MLX4_CMD_ACCESS_DDR = 0x2e, 63 MLX4_CMD_ACCESS_DDR = 0x2e,
63 MLX4_CMD_MAP_ICM = 0xffa, 64 MLX4_CMD_MAP_ICM = 0xffa,
64 MLX4_CMD_UNMAP_ICM = 0xff9, 65 MLX4_CMD_UNMAP_ICM = 0xff9,
65 MLX4_CMD_MAP_ICM_AUX = 0xffc, 66 MLX4_CMD_MAP_ICM_AUX = 0xffc,
66 MLX4_CMD_UNMAP_ICM_AUX = 0xffb, 67 MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
67 MLX4_CMD_SET_ICM_SIZE = 0xffd, 68 MLX4_CMD_SET_ICM_SIZE = 0xffd,
69 /*master notify fw on finish for slave's flr*/
70 MLX4_CMD_INFORM_FLR_DONE = 0x5b,
68 71
69 /* TPT commands */ 72 /* TPT commands */
70 MLX4_CMD_SW2HW_MPT = 0xd, 73 MLX4_CMD_SW2HW_MPT = 0xd,
@@ -119,6 +122,26 @@ enum {
119 /* miscellaneous commands */ 122 /* miscellaneous commands */
120 MLX4_CMD_DIAG_RPRT = 0x30, 123 MLX4_CMD_DIAG_RPRT = 0x30,
121 MLX4_CMD_NOP = 0x31, 124 MLX4_CMD_NOP = 0x31,
125 MLX4_CMD_ACCESS_MEM = 0x2e,
126 MLX4_CMD_SET_VEP = 0x52,
127
128 /* Ethernet specific commands */
129 MLX4_CMD_SET_VLAN_FLTR = 0x47,
130 MLX4_CMD_SET_MCAST_FLTR = 0x48,
131 MLX4_CMD_DUMP_ETH_STATS = 0x49,
132
133 /* Communication channel commands */
134 MLX4_CMD_ARM_COMM_CHANNEL = 0x57,
135 MLX4_CMD_GEN_EQE = 0x58,
136
137 /* virtual commands */
138 MLX4_CMD_ALLOC_RES = 0xf00,
139 MLX4_CMD_FREE_RES = 0xf01,
140 MLX4_CMD_MCAST_ATTACH = 0xf05,
141 MLX4_CMD_UCAST_ATTACH = 0xf06,
142 MLX4_CMD_PROMISC = 0xf08,
143 MLX4_CMD_QUERY_FUNC_CAP = 0xf0a,
144 MLX4_CMD_QP_ATTACH = 0xf0b,
122 145
123 /* debug commands */ 146 /* debug commands */
124 MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, 147 MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
@@ -126,6 +149,7 @@ enum {
126 149
127 /* statistics commands */ 150 /* statistics commands */
128 MLX4_CMD_QUERY_IF_STAT = 0X54, 151 MLX4_CMD_QUERY_IF_STAT = 0X54,
152 MLX4_CMD_SET_IF_STAT = 0X55,
129}; 153};
130 154
131enum { 155enum {
@@ -135,7 +159,8 @@ enum {
135}; 159};
136 160
137enum { 161enum {
138 MLX4_MAILBOX_SIZE = 4096 162 MLX4_MAILBOX_SIZE = 4096,
163 MLX4_ACCESS_MEM_ALIGN = 256,
139}; 164};
140 165
141enum { 166enum {
@@ -148,6 +173,11 @@ enum {
148 MLX4_SET_PORT_GID_TABLE = 0x5, 173 MLX4_SET_PORT_GID_TABLE = 0x5,
149}; 174};
150 175
176enum {
177 MLX4_CMD_WRAPPED,
178 MLX4_CMD_NATIVE
179};
180
151struct mlx4_dev; 181struct mlx4_dev;
152 182
153struct mlx4_cmd_mailbox { 183struct mlx4_cmd_mailbox {
@@ -157,23 +187,24 @@ struct mlx4_cmd_mailbox {
157 187
158int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 188int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
159 int out_is_imm, u32 in_modifier, u8 op_modifier, 189 int out_is_imm, u32 in_modifier, u8 op_modifier,
160 u16 op, unsigned long timeout); 190 u16 op, unsigned long timeout, int native);
161 191
162/* Invoke a command with no output parameter */ 192/* Invoke a command with no output parameter */
163static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, 193static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier,
164 u8 op_modifier, u16 op, unsigned long timeout) 194 u8 op_modifier, u16 op, unsigned long timeout,
195 int native)
165{ 196{
166 return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, 197 return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier,
167 op_modifier, op, timeout); 198 op_modifier, op, timeout, native);
168} 199}
169 200
170/* Invoke a command with an output mailbox */ 201/* Invoke a command with an output mailbox */
171static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, 202static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param,
172 u32 in_modifier, u8 op_modifier, u16 op, 203 u32 in_modifier, u8 op_modifier, u16 op,
173 unsigned long timeout) 204 unsigned long timeout, int native)
174{ 205{
175 return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, 206 return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier,
176 op_modifier, op, timeout); 207 op_modifier, op, timeout, native);
177} 208}
178 209
179/* 210/*
@@ -183,13 +214,17 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param
183 */ 214 */
184static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 215static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
185 u32 in_modifier, u8 op_modifier, u16 op, 216 u32 in_modifier, u8 op_modifier, u16 op,
186 unsigned long timeout) 217 unsigned long timeout, int native)
187{ 218{
188 return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, 219 return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier,
189 op_modifier, op, timeout); 220 op_modifier, op, timeout, native);
190} 221}
191 222
192struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); 223struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
193void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); 224void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
194 225
226u32 mlx4_comm_get_version(void);
227
228#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
229
195#endif /* MLX4_CMD_H */ 230#endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 84b0b1848f17..5c4fe8e5bfe5 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -47,6 +47,9 @@
47enum { 47enum {
48 MLX4_FLAG_MSI_X = 1 << 0, 48 MLX4_FLAG_MSI_X = 1 << 0,
49 MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, 49 MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
50 MLX4_FLAG_MASTER = 1 << 2,
51 MLX4_FLAG_SLAVE = 1 << 3,
52 MLX4_FLAG_SRIOV = 1 << 4,
50}; 53};
51 54
52enum { 55enum {
@@ -58,6 +61,15 @@ enum {
58}; 61};
59 62
60enum { 63enum {
64 MLX4_MAX_NUM_PF = 16,
65 MLX4_MAX_NUM_VF = 64,
66 MLX4_MFUNC_MAX = 80,
67 MLX4_MFUNC_EQ_NUM = 4,
68 MLX4_MFUNC_MAX_EQES = 8,
69 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
70};
71
72enum {
61 MLX4_DEV_CAP_FLAG_RC = 1LL << 0, 73 MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
62 MLX4_DEV_CAP_FLAG_UC = 1LL << 1, 74 MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
63 MLX4_DEV_CAP_FLAG_UD = 1LL << 2, 75 MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
@@ -77,11 +89,13 @@ enum {
77 MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, 89 MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
78 MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, 90 MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
79 MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, 91 MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34,
80 MLX4_DEV_CAP_FLAG_WOL = 1LL << 38, 92 MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37,
93 MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38,
81 MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, 94 MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
82 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, 95 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
83 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, 96 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
84 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 97 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
98 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55
85}; 99};
86 100
87#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 101#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
@@ -116,7 +130,11 @@ enum mlx4_event {
116 MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, 130 MLX4_EVENT_TYPE_PORT_CHANGE = 0x09,
117 MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, 131 MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
118 MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, 132 MLX4_EVENT_TYPE_ECC_DETECT = 0x0e,
119 MLX4_EVENT_TYPE_CMD = 0x0a 133 MLX4_EVENT_TYPE_CMD = 0x0a,
134 MLX4_EVENT_TYPE_VEP_UPDATE = 0x19,
135 MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
136 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
137 MLX4_EVENT_TYPE_NONE = 0xff,
120}; 138};
121 139
122enum { 140enum {
@@ -183,6 +201,7 @@ enum mlx4_qp_region {
183}; 201};
184 202
185enum mlx4_port_type { 203enum mlx4_port_type {
204 MLX4_PORT_TYPE_NONE = 0,
186 MLX4_PORT_TYPE_IB = 1, 205 MLX4_PORT_TYPE_IB = 1,
187 MLX4_PORT_TYPE_ETH = 2, 206 MLX4_PORT_TYPE_ETH = 2,
188 MLX4_PORT_TYPE_AUTO = 3 207 MLX4_PORT_TYPE_AUTO = 3
@@ -215,6 +234,7 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
215 234
216struct mlx4_caps { 235struct mlx4_caps {
217 u64 fw_ver; 236 u64 fw_ver;
237 u32 function;
218 int num_ports; 238 int num_ports;
219 int vl_cap[MLX4_MAX_PORTS + 1]; 239 int vl_cap[MLX4_MAX_PORTS + 1];
220 int ib_mtu_cap[MLX4_MAX_PORTS + 1]; 240 int ib_mtu_cap[MLX4_MAX_PORTS + 1];
@@ -229,6 +249,7 @@ struct mlx4_caps {
229 u64 trans_code[MLX4_MAX_PORTS + 1]; 249 u64 trans_code[MLX4_MAX_PORTS + 1];
230 int local_ca_ack_delay; 250 int local_ca_ack_delay;
231 int num_uars; 251 int num_uars;
252 u32 uar_page_size;
232 int bf_reg_size; 253 int bf_reg_size;
233 int bf_regs_per_page; 254 int bf_regs_per_page;
234 int max_sq_sg; 255 int max_sq_sg;
@@ -252,8 +273,7 @@ struct mlx4_caps {
252 int num_comp_vectors; 273 int num_comp_vectors;
253 int comp_pool; 274 int comp_pool;
254 int num_mpts; 275 int num_mpts;
255 int num_mtt_segs; 276 int num_mtts;
256 int mtts_per_seg;
257 int fmr_reserved_mtts; 277 int fmr_reserved_mtts;
258 int reserved_mtts; 278 int reserved_mtts;
259 int reserved_mrws; 279 int reserved_mrws;
@@ -283,7 +303,9 @@ struct mlx4_caps {
283 int log_num_prios; 303 int log_num_prios;
284 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 304 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
285 u8 supported_type[MLX4_MAX_PORTS + 1]; 305 u8 supported_type[MLX4_MAX_PORTS + 1];
286 u32 port_mask; 306 u8 suggested_type[MLX4_MAX_PORTS + 1];
307 u8 default_sense[MLX4_MAX_PORTS + 1];
308 u32 port_mask[MLX4_MAX_PORTS + 1];
287 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; 309 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
288 u32 max_counters; 310 u32 max_counters;
289 u8 ext_port_cap[MLX4_MAX_PORTS + 1]; 311 u8 ext_port_cap[MLX4_MAX_PORTS + 1];
@@ -303,7 +325,7 @@ struct mlx4_buf {
303}; 325};
304 326
305struct mlx4_mtt { 327struct mlx4_mtt {
306 u32 first_seg; 328 u32 offset;
307 int order; 329 int order;
308 int page_shift; 330 int page_shift;
309}; 331};
@@ -465,10 +487,12 @@ struct mlx4_counter {
465struct mlx4_dev { 487struct mlx4_dev {
466 struct pci_dev *pdev; 488 struct pci_dev *pdev;
467 unsigned long flags; 489 unsigned long flags;
490 unsigned long num_slaves;
468 struct mlx4_caps caps; 491 struct mlx4_caps caps;
469 struct radix_tree_root qp_table_tree; 492 struct radix_tree_root qp_table_tree;
470 u8 rev_id; 493 u8 rev_id;
471 char board_id[MLX4_BOARD_ID_LEN]; 494 char board_id[MLX4_BOARD_ID_LEN];
495 int num_vfs;
472}; 496};
473 497
474struct mlx4_init_port_param { 498struct mlx4_init_port_param {
@@ -487,14 +511,32 @@ struct mlx4_init_port_param {
487 511
488#define mlx4_foreach_port(port, dev, type) \ 512#define mlx4_foreach_port(port, dev, type) \
489 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 513 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
490 if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ 514 if ((type) == (dev)->caps.port_mask[(port)])
491 ~(dev)->caps.port_mask) & 1 << ((port) - 1))
492 515
493#define mlx4_foreach_ib_transport_port(port, dev) \ 516#define mlx4_foreach_ib_transport_port(port, dev) \
494 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 517 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
495 if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \ 518 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
496 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 519 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
497 520
521static inline int mlx4_is_master(struct mlx4_dev *dev)
522{
523 return dev->flags & MLX4_FLAG_MASTER;
524}
525
526static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
527{
528 return (qpn < dev->caps.sqp_start + 8);
529}
530
531static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
532{
533 return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
534}
535
536static inline int mlx4_is_slave(struct mlx4_dev *dev)
537{
538 return dev->flags & MLX4_FLAG_SLAVE;
539}
498 540
499int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 541int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
500 struct mlx4_buf *buf); 542 struct mlx4_buf *buf);
@@ -560,6 +602,10 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
560int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); 602int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
561int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); 603int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
562 604
605int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
606 int block_mcast_loopback, enum mlx4_protocol prot);
607int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
608 enum mlx4_protocol prot);
563int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 609int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
564 int block_mcast_loopback, enum mlx4_protocol protocol); 610 int block_mcast_loopback, enum mlx4_protocol protocol);
565int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 611int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
@@ -570,9 +616,11 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
570int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); 616int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
571int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); 617int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
572 618
573int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap); 619int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
574void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn); 620void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
575int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap); 621int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
622int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
623void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
576 624
577int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 625int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
578int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 626int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 48cc4cb97858..bee8fa231276 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -97,6 +97,33 @@ enum {
97 MLX4_QP_BIT_RIC = 1 << 4, 97 MLX4_QP_BIT_RIC = 1 << 4,
98}; 98};
99 99
100enum {
101 MLX4_RSS_HASH_XOR = 0,
102 MLX4_RSS_HASH_TOP = 1,
103
104 MLX4_RSS_UDP_IPV6 = 1 << 0,
105 MLX4_RSS_UDP_IPV4 = 1 << 1,
106 MLX4_RSS_TCP_IPV6 = 1 << 2,
107 MLX4_RSS_IPV6 = 1 << 3,
108 MLX4_RSS_TCP_IPV4 = 1 << 4,
109 MLX4_RSS_IPV4 = 1 << 5,
110
111 /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
112 MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
113 /* offset of being RSS indirection QP within mlx4_qp_context.flags */
114 MLX4_RSS_QPC_FLAG_OFFSET = 13,
115};
116
117struct mlx4_rss_context {
118 __be32 base_qpn;
119 __be32 default_qpn;
120 u16 reserved;
121 u8 hash_fn;
122 u8 flags;
123 __be32 rss_key[10];
124 __be32 base_qpn_udp;
125};
126
100struct mlx4_qp_path { 127struct mlx4_qp_path {
101 u8 fl; 128 u8 fl;
102 u8 reserved1[2]; 129 u8 reserved1[2];
@@ -183,6 +210,7 @@ struct mlx4_wqe_ctrl_seg {
183 * [4] IP checksum 210 * [4] IP checksum
184 * [3:2] C (generate completion queue entry) 211 * [3:2] C (generate completion queue entry)
185 * [1] SE (solicited event) 212 * [1] SE (solicited event)
213 * [0] FL (force loopback)
186 */ 214 */
187 __be32 srcrb_flags; 215 __be32 srcrb_flags;
188 /* 216 /*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4baadd18f4ad..5d9b4c9813bd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page)
1253extern void free_area_init(unsigned long * zones_size); 1253extern void free_area_init(unsigned long * zones_size);
1254extern void free_area_init_node(int nid, unsigned long * zones_size, 1254extern void free_area_init_node(int nid, unsigned long * zones_size,
1255 unsigned long zone_start_pfn, unsigned long *zholes_size); 1255 unsigned long zone_start_pfn, unsigned long *zholes_size);
1256#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 1256#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1257/* 1257/*
1258 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its 1258 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1259 * zones, allocate the backing mem_map and account for memory holes in a more 1259 * zones, allocate the backing mem_map and account for memory holes in a more
1260 * architecture independent manner. This is a substitute for creating the 1260 * architecture independent manner. This is a substitute for creating the
1261 * zone_sizes[] and zholes_size[] arrays and passing them to 1261 * zone_sizes[] and zholes_size[] arrays and passing them to
1262 * free_area_init_node() 1262 * free_area_init_node()
1263 * 1263 *
1264 * An architecture is expected to register range of page frames backed by 1264 * An architecture is expected to register range of page frames backed by
1265 * physical memory with add_active_range() before calling 1265 * physical memory with memblock_add[_node]() before calling
1266 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1266 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1267 * usage, an architecture is expected to do something like 1267 * usage, an architecture is expected to do something like
1268 * 1268 *
1269 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1269 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1270 * max_highmem_pfn}; 1270 * max_highmem_pfn};
1271 * for_each_valid_physical_page_range() 1271 * for_each_valid_physical_page_range()
1272 * add_active_range(node_id, start_pfn, end_pfn) 1272 * memblock_add_node(base, size, nid)
1273 * free_area_init_nodes(max_zone_pfns); 1273 * free_area_init_nodes(max_zone_pfns);
1274 * 1274 *
1275 * If the architecture guarantees that there are no holes in the ranges 1275 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1276 * registered with add_active_range(), free_bootmem_active_regions() 1276 * registered physical page range. Similarly
1277 * will call free_bootmem_node() for each registered physical page range. 1277 * sparse_memory_present_with_active_regions() calls memory_present() for
1278 * Similarly sparse_memory_present_with_active_regions() calls 1278 * each range when SPARSEMEM is enabled.
1279 * memory_present() for each range when SPARSEMEM is enabled.
1280 * 1279 *
1281 * See mm/page_alloc.c for more information on each function exposed by 1280 * See mm/page_alloc.c for more information on each function exposed by
1282 * CONFIG_ARCH_POPULATES_NODE_MAP 1281 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1283 */ 1282 */
1284extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1283extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1285extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1286 unsigned long end_pfn);
1287extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1288 unsigned long end_pfn);
1289extern void remove_all_active_ranges(void);
1290void sort_node_map(void);
1291unsigned long node_map_pfn_alignment(void); 1284unsigned long node_map_pfn_alignment(void);
1292unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1285unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1293 unsigned long end_pfn); 1286 unsigned long end_pfn);
@@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid,
1300 unsigned long max_low_pfn); 1293 unsigned long max_low_pfn);
1301int add_from_early_node_map(struct range *range, int az, 1294int add_from_early_node_map(struct range *range, int az,
1302 int nr_range, int nid); 1295 int nr_range, int nid);
1303u64 __init find_memory_core_early(int nid, u64 size, u64 align,
1304 u64 goal, u64 limit);
1305typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1306extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1307extern void sparse_memory_present_with_active_regions(int nid); 1296extern void sparse_memory_present_with_active_regions(int nid);
1308#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
1309 1297
1310#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ 1298#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1299
1300#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1311 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1301 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1312static inline int __early_pfn_to_nid(unsigned long pfn) 1302static inline int __early_pfn_to_nid(unsigned long pfn)
1313{ 1303{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 188cb2ffe8db..3ac040f19369 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -598,13 +598,13 @@ struct zonelist {
598#endif 598#endif
599}; 599};
600 600
601#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 601#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
602struct node_active_region { 602struct node_active_region {
603 unsigned long start_pfn; 603 unsigned long start_pfn;
604 unsigned long end_pfn; 604 unsigned long end_pfn;
605 int nid; 605 int nid;
606}; 606};
607#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 607#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
608 608
609#ifndef CONFIG_DISCONTIGMEM 609#ifndef CONFIG_DISCONTIGMEM
610/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 610/* The array of struct pages - for discontigmem use pgdat->lmem_map */
@@ -720,7 +720,7 @@ extern int movable_zone;
720 720
721static inline int zone_movable_is_highmem(void) 721static inline int zone_movable_is_highmem(void)
722{ 722{
723#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) 723#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
724 return movable_zone == ZONE_HIGHMEM; 724 return movable_zone == ZONE_HIGHMEM;
725#else 725#else
726 return 0; 726 return 0;
@@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
938#endif 938#endif
939 939
940#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 940#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
941 !defined(CONFIG_ARCH_POPULATES_NODE_MAP) 941 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
942static inline unsigned long early_pfn_to_nid(unsigned long pfn) 942static inline unsigned long early_pfn_to_nid(unsigned long pfn)
943{ 943{
944 return 0; 944 return 0;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 468819cdde87..83ac0713ed0a 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -542,4 +542,22 @@ struct isapnp_device_id {
542 kernel_ulong_t driver_data; /* data private to the driver */ 542 kernel_ulong_t driver_data; /* data private to the driver */
543}; 543};
544 544
545/**
546 * struct amba_id - identifies a device on an AMBA bus
547 * @id: The significant bits if the hardware device ID
548 * @mask: Bitmask specifying which bits of the id field are significant when
549 * matching. A driver binds to a device when ((hardware device ID) & mask)
550 * == id.
551 * @data: Private data used by the driver.
552 */
553struct amba_id {
554 unsigned int id;
555 unsigned int mask;
556#ifndef __KERNEL__
557 kernel_ulong_t data;
558#else
559 void *data;
560#endif
561};
562
545#endif /* LINUX_MOD_DEVICETABLE_H */ 563#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index a7003b7a695d..b188f68a08c9 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -116,6 +116,7 @@ enum {
116 NDTPA_PROXY_DELAY, /* u64, msecs */ 116 NDTPA_PROXY_DELAY, /* u64, msecs */
117 NDTPA_PROXY_QLEN, /* u32 */ 117 NDTPA_PROXY_QLEN, /* u32 */
118 NDTPA_LOCKTIME, /* u64, msecs */ 118 NDTPA_LOCKTIME, /* u64, msecs */
119 NDTPA_QUEUE_LENBYTES, /* u32 */
119 __NDTPA_MAX 120 __NDTPA_MAX
120}; 121};
121#define NDTPA_MAX (__NDTPA_MAX - 1) 122#define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
new file mode 100644
index 000000000000..77f5202977ce
--- /dev/null
+++ b/include/linux/netdev_features.h
@@ -0,0 +1,146 @@
1/*
2 * Network device features.
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10#ifndef _LINUX_NETDEV_FEATURES_H
11#define _LINUX_NETDEV_FEATURES_H
12
13#include <linux/types.h>
14
15typedef u64 netdev_features_t;
16
17enum {
18 NETIF_F_SG_BIT, /* Scatter/gather IO. */
19 NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */
20 __UNUSED_NETIF_F_1,
21 NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */
22 NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */
23 NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */
24 NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */
25 NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */
26 NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */
27 NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */
28 NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
29 NETIF_F_GSO_BIT, /* Enable software GSO. */
30 NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
31 /* do not use LLTX in new drivers */
32 NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */
33 NETIF_F_GRO_BIT, /* Generic receive offload */
34 NETIF_F_LRO_BIT, /* large receive offload */
35
36 /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */
37 NETIF_F_TSO_BIT /* ... TCPv4 segmentation */
38 = NETIF_F_GSO_SHIFT,
39 NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
40 NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
41 NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
42 NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
43 NETIF_F_FSO_BIT, /* ... FCoE segmentation */
44 NETIF_F_GSO_RESERVED1, /* ... free (fill GSO_MASK to 8 bits) */
45 /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */
46 NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */
47 = NETIF_F_GSO_LAST,
48
49 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
50 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
51 NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
52 NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
53 NETIF_F_RXHASH_BIT, /* Receive hashing offload */
54 NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */
55 NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */
56 NETIF_F_LOOPBACK_BIT, /* Enable loopback */
57
58 /*
59 * Add your fresh new feature above and remember to update
60 * netdev_features_strings[] in net/core/ethtool.c and maybe
61 * some feature mask #defines below. Please also describe it
62 * in Documentation/networking/netdev-features.txt.
63 */
64
65 /**/NETDEV_FEATURE_COUNT
66};
67
68/* copy'n'paste compression ;) */
69#define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit))
70#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT)
71
72#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC)
73#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU)
74#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
75#define NETIF_F_FSO __NETIF_F(FSO)
76#define NETIF_F_GRO __NETIF_F(GRO)
77#define NETIF_F_GSO __NETIF_F(GSO)
78#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
79#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
80#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM)
81#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER)
82#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX)
83#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX)
84#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
85#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
86#define NETIF_F_LLTX __NETIF_F(LLTX)
87#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK)
88#define NETIF_F_LRO __NETIF_F(LRO)
89#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL)
90#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY)
91#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
92#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
93#define NETIF_F_RXHASH __NETIF_F(RXHASH)
94#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM)
95#define NETIF_F_SG __NETIF_F(SG)
96#define NETIF_F_TSO6 __NETIF_F(TSO6)
97#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
98#define NETIF_F_TSO __NETIF_F(TSO)
99#define NETIF_F_UFO __NETIF_F(UFO)
100#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
101
102/* Features valid for ethtool to change */
103/* = all defined minus driver/device-class-related */
104#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
105 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
106
107/* remember that ((t)1 << t_BITS) is undefined in C99 */
108#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
109 (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \
110 ~NETIF_F_NEVER_CHANGE)
111
112/* Segmentation offload feature mask */
113#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
114 __NETIF_F_BIT(NETIF_F_GSO_SHIFT))
115
116/* List of features with software fallbacks. */
117#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
118 NETIF_F_TSO6 | NETIF_F_UFO)
119
120#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM
121#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
122#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
123#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
124
125#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
126
127#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
128 NETIF_F_FSO)
129
130/*
131 * If one device supports one of these features, then enable them
132 * for all in netdev_increment_features.
133 */
134#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
135 NETIF_F_SG | NETIF_F_HIGHDMA | \
136 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
137/*
138 * If one device doesn't support one of these features, then disable it
139 * for all in netdev_increment_features.
140 */
141#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
142
143/* changeable features with no special hardware requirements */
144#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
145
146#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a82ad4dd306a..a1d109590da4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -43,6 +43,7 @@
43#include <linux/rculist.h> 43#include <linux/rculist.h>
44#include <linux/dmaengine.h> 44#include <linux/dmaengine.h>
45#include <linux/workqueue.h> 45#include <linux/workqueue.h>
46#include <linux/dynamic_queue_limits.h>
46 47
47#include <linux/ethtool.h> 48#include <linux/ethtool.h>
48#include <net/net_namespace.h> 49#include <net/net_namespace.h>
@@ -50,8 +51,10 @@
50#ifdef CONFIG_DCB 51#ifdef CONFIG_DCB
51#include <net/dcbnl.h> 52#include <net/dcbnl.h>
52#endif 53#endif
54#include <net/netprio_cgroup.h>
55
56#include <linux/netdev_features.h>
53 57
54struct vlan_group;
55struct netpoll_info; 58struct netpoll_info;
56struct phy_device; 59struct phy_device;
57/* 802.11 specific */ 60/* 802.11 specific */
@@ -141,22 +144,20 @@ static inline bool dev_xmit_complete(int rc)
141 * used. 144 * used.
142 */ 145 */
143 146
144#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 147#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
145# if defined(CONFIG_MAC80211_MESH) 148# if defined(CONFIG_MAC80211_MESH)
146# define LL_MAX_HEADER 128 149# define LL_MAX_HEADER 128
147# else 150# else
148# define LL_MAX_HEADER 96 151# define LL_MAX_HEADER 96
149# endif 152# endif
150#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) 153#elif IS_ENABLED(CONFIG_TR)
151# define LL_MAX_HEADER 48 154# define LL_MAX_HEADER 48
152#else 155#else
153# define LL_MAX_HEADER 32 156# define LL_MAX_HEADER 32
154#endif 157#endif
155 158
156#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ 159#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
157 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ 160 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
158 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
159 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
160#define MAX_HEADER LL_MAX_HEADER 161#define MAX_HEADER LL_MAX_HEADER
161#else 162#else
162#define MAX_HEADER (LL_MAX_HEADER + 48) 163#define MAX_HEADER (LL_MAX_HEADER + 48)
@@ -212,6 +213,11 @@ enum {
212#include <linux/cache.h> 213#include <linux/cache.h>
213#include <linux/skbuff.h> 214#include <linux/skbuff.h>
214 215
216#ifdef CONFIG_RPS
217#include <linux/jump_label.h>
218extern struct jump_label_key rps_needed;
219#endif
220
215struct neighbour; 221struct neighbour;
216struct neigh_parms; 222struct neigh_parms;
217struct sk_buff; 223struct sk_buff;
@@ -272,16 +278,11 @@ struct hh_cache {
272 * 278 *
273 * We could use other alignment values, but we must maintain the 279 * We could use other alignment values, but we must maintain the
274 * relationship HH alignment <= LL alignment. 280 * relationship HH alignment <= LL alignment.
275 *
276 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
277 * may need.
278 */ 281 */
279#define LL_RESERVED_SPACE(dev) \ 282#define LL_RESERVED_SPACE(dev) \
280 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 283 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
281#define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 284#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
282 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 285 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
283#define LL_ALLOCATED_SPACE(dev) \
284 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
285 286
286struct header_ops { 287struct header_ops {
287 int (*create) (struct sk_buff *skb, struct net_device *dev, 288 int (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -516,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n)
516#endif 517#endif
517 518
518enum netdev_queue_state_t { 519enum netdev_queue_state_t {
519 __QUEUE_STATE_XOFF, 520 __QUEUE_STATE_DRV_XOFF,
521 __QUEUE_STATE_STACK_XOFF,
520 __QUEUE_STATE_FROZEN, 522 __QUEUE_STATE_FROZEN,
521#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ 523#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
522 (1 << __QUEUE_STATE_FROZEN)) 524 (1 << __QUEUE_STATE_STACK_XOFF))
525#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
526 (1 << __QUEUE_STATE_FROZEN))
523}; 527};
528/*
529 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
530 * netif_tx_* functions below are used to manipulate this flag. The
531 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
532 * queue independently. The netif_xmit_*stopped functions below are called
533 * to check if the queue has been stopped by the driver or stack (either
534 * of the XOFF bits are set in the state). Drivers should not need to call
535 * netif_xmit*stopped functions, they should only be using netif_tx_*.
536 */
524 537
525struct netdev_queue { 538struct netdev_queue {
526/* 539/*
@@ -528,9 +541,8 @@ struct netdev_queue {
528 */ 541 */
529 struct net_device *dev; 542 struct net_device *dev;
530 struct Qdisc *qdisc; 543 struct Qdisc *qdisc;
531 unsigned long state;
532 struct Qdisc *qdisc_sleeping; 544 struct Qdisc *qdisc_sleeping;
533#if defined(CONFIG_RPS) || defined(CONFIG_XPS) 545#ifdef CONFIG_SYSFS
534 struct kobject kobj; 546 struct kobject kobj;
535#endif 547#endif
536#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 548#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -545,6 +557,18 @@ struct netdev_queue {
545 * please use this field instead of dev->trans_start 557 * please use this field instead of dev->trans_start
546 */ 558 */
547 unsigned long trans_start; 559 unsigned long trans_start;
560
561 /*
562 * Number of TX timeouts for this queue
563 * (/sys/class/net/DEV/Q/trans_timeout)
564 */
565 unsigned long trans_timeout;
566
567 unsigned long state;
568
569#ifdef CONFIG_BQL
570 struct dql dql;
571#endif
548} ____cacheline_aligned_in_smp; 572} ____cacheline_aligned_in_smp;
549 573
550static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 574static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -573,7 +597,7 @@ struct rps_map {
573 struct rcu_head rcu; 597 struct rcu_head rcu;
574 u16 cpus[0]; 598 u16 cpus[0];
575}; 599};
576#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) 600#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
577 601
578/* 602/*
579 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 603 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
@@ -597,7 +621,7 @@ struct rps_dev_flow_table {
597 struct rps_dev_flow flows[0]; 621 struct rps_dev_flow flows[0];
598}; 622};
599#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 623#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
600 (_num * sizeof(struct rps_dev_flow))) 624 ((_num) * sizeof(struct rps_dev_flow)))
601 625
602/* 626/*
603 * The rps_sock_flow_table contains mappings of flows to the last CPU 627 * The rps_sock_flow_table contains mappings of flows to the last CPU
@@ -608,7 +632,7 @@ struct rps_sock_flow_table {
608 u16 ents[0]; 632 u16 ents[0];
609}; 633};
610#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ 634#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
611 (_num * sizeof(u16))) 635 ((_num) * sizeof(u16)))
612 636
613#define RPS_NO_CPU 0xffff 637#define RPS_NO_CPU 0xffff
614 638
@@ -660,7 +684,7 @@ struct xps_map {
660 struct rcu_head rcu; 684 struct rcu_head rcu;
661 u16 queues[0]; 685 u16 queues[0];
662}; 686};
663#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16))) 687#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
664#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ 688#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
665 / sizeof(u16)) 689 / sizeof(u16))
666 690
@@ -683,6 +707,23 @@ struct netdev_tc_txq {
683 u16 offset; 707 u16 offset;
684}; 708};
685 709
710#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
711/*
712 * This structure is to hold information about the device
713 * configured to run FCoE protocol stack.
714 */
715struct netdev_fcoe_hbainfo {
716 char manufacturer[64];
717 char serial_number[64];
718 char hardware_version[64];
719 char driver_version[64];
720 char optionrom_version[64];
721 char firmware_version[64];
722 char model[256];
723 char model_description[256];
724};
725#endif
726
686/* 727/*
687 * This structure defines the management hooks for network devices. 728 * This structure defines the management hooks for network devices.
688 * The following hooks can be defined; unless noted otherwise, they are 729 * The following hooks can be defined; unless noted otherwise, they are
@@ -767,11 +808,11 @@ struct netdev_tc_txq {
767 * 3. Update dev->stats asynchronously and atomically, and define 808 * 3. Update dev->stats asynchronously and atomically, and define
768 * neither operation. 809 * neither operation.
769 * 810 *
770 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); 811 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
771 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) 812 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
772 * this function is called when a VLAN id is registered. 813 * this function is called when a VLAN id is registered.
773 * 814 *
774 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 815 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
775 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) 816 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
776 * this function is called when a VLAN id is unregistered. 817 * this function is called when a VLAN id is unregistered.
777 * 818 *
@@ -823,6 +864,13 @@ struct netdev_tc_txq {
823 * perform necessary setup and returns 1 to indicate the device is set up 864 * perform necessary setup and returns 1 to indicate the device is set up
824 * successfully to perform DDP on this I/O, otherwise this returns 0. 865 * successfully to perform DDP on this I/O, otherwise this returns 0.
825 * 866 *
867 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
868 * struct netdev_fcoe_hbainfo *hbainfo);
869 * Called when the FCoE Protocol stack wants information on the underlying
870 * device. This information is utilized by the FCoE protocol stack to
871 * register attributes with Fiber Channel management service as per the
872 * FC-GS Fabric Device Management Information(FDMI) specification.
873 *
826 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 874 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
827 * Called when the underlying device wants to override default World Wide 875 * Called when the underlying device wants to override default World Wide
828 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 876 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
@@ -845,12 +893,13 @@ struct netdev_tc_txq {
845 * Called to release previously enslaved netdev. 893 * Called to release previously enslaved netdev.
846 * 894 *
847 * Feature/offload setting functions. 895 * Feature/offload setting functions.
848 * u32 (*ndo_fix_features)(struct net_device *dev, u32 features); 896 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
897 * netdev_features_t features);
849 * Adjusts the requested feature flags according to device-specific 898 * Adjusts the requested feature flags according to device-specific
850 * constraints, and returns the resulting flags. Must not modify 899 * constraints, and returns the resulting flags. Must not modify
851 * the device state. 900 * the device state.
852 * 901 *
853 * int (*ndo_set_features)(struct net_device *dev, u32 features); 902 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
854 * Called to update device configuration to new features. Passed 903 * Called to update device configuration to new features. Passed
855 * feature set might be less than what was returned by ndo_fix_features()). 904 * feature set might be less than what was returned by ndo_fix_features()).
856 * Must return >0 or -errno if it changed dev->features itself. 905 * Must return >0 or -errno if it changed dev->features itself.
@@ -885,9 +934,9 @@ struct net_device_ops {
885 struct rtnl_link_stats64 *storage); 934 struct rtnl_link_stats64 *storage);
886 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 935 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
887 936
888 void (*ndo_vlan_rx_add_vid)(struct net_device *dev, 937 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
889 unsigned short vid); 938 unsigned short vid);
890 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 939 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
891 unsigned short vid); 940 unsigned short vid);
892#ifdef CONFIG_NET_POLL_CONTROLLER 941#ifdef CONFIG_NET_POLL_CONTROLLER
893 void (*ndo_poll_controller)(struct net_device *dev); 942 void (*ndo_poll_controller)(struct net_device *dev);
@@ -912,7 +961,7 @@ struct net_device_ops {
912 int (*ndo_get_vf_port)(struct net_device *dev, 961 int (*ndo_get_vf_port)(struct net_device *dev,
913 int vf, struct sk_buff *skb); 962 int vf, struct sk_buff *skb);
914 int (*ndo_setup_tc)(struct net_device *dev, u8 tc); 963 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
915#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 964#if IS_ENABLED(CONFIG_FCOE)
916 int (*ndo_fcoe_enable)(struct net_device *dev); 965 int (*ndo_fcoe_enable)(struct net_device *dev);
917 int (*ndo_fcoe_disable)(struct net_device *dev); 966 int (*ndo_fcoe_disable)(struct net_device *dev);
918 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 967 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
@@ -925,9 +974,11 @@ struct net_device_ops {
925 u16 xid, 974 u16 xid,
926 struct scatterlist *sgl, 975 struct scatterlist *sgl,
927 unsigned int sgc); 976 unsigned int sgc);
977 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
978 struct netdev_fcoe_hbainfo *hbainfo);
928#endif 979#endif
929 980
930#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE) 981#if IS_ENABLED(CONFIG_LIBFCOE)
931#define NETDEV_FCOE_WWNN 0 982#define NETDEV_FCOE_WWNN 0
932#define NETDEV_FCOE_WWPN 1 983#define NETDEV_FCOE_WWPN 1
933 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 984 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
@@ -944,10 +995,12 @@ struct net_device_ops {
944 struct net_device *slave_dev); 995 struct net_device *slave_dev);
945 int (*ndo_del_slave)(struct net_device *dev, 996 int (*ndo_del_slave)(struct net_device *dev,
946 struct net_device *slave_dev); 997 struct net_device *slave_dev);
947 u32 (*ndo_fix_features)(struct net_device *dev, 998 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
948 u32 features); 999 netdev_features_t features);
949 int (*ndo_set_features)(struct net_device *dev, 1000 int (*ndo_set_features)(struct net_device *dev,
950 u32 features); 1001 netdev_features_t features);
1002 int (*ndo_neigh_construct)(struct neighbour *n);
1003 void (*ndo_neigh_destroy)(struct neighbour *n);
951}; 1004};
952 1005
953/* 1006/*
@@ -997,91 +1050,13 @@ struct net_device {
997 struct list_head unreg_list; 1050 struct list_head unreg_list;
998 1051
999 /* currently active device features */ 1052 /* currently active device features */
1000 u32 features; 1053 netdev_features_t features;
1001 /* user-changeable features */ 1054 /* user-changeable features */
1002 u32 hw_features; 1055 netdev_features_t hw_features;
1003 /* user-requested features */ 1056 /* user-requested features */
1004 u32 wanted_features; 1057 netdev_features_t wanted_features;
1005 /* mask of features inheritable by VLAN devices */ 1058 /* mask of features inheritable by VLAN devices */
1006 u32 vlan_features; 1059 netdev_features_t vlan_features;
1007
1008 /* Net device feature bits; if you change something,
1009 * also update netdev_features_strings[] in ethtool.c */
1010
1011#define NETIF_F_SG 1 /* Scatter/gather IO. */
1012#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
1013#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
1014#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
1015#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
1016#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
1017#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
1018#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
1019#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
1020#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
1021#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
1022#define NETIF_F_GSO 2048 /* Enable software GSO. */
1023#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
1024 /* do not use LLTX in new drivers */
1025#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
1026#define NETIF_F_GRO 16384 /* Generic receive offload */
1027#define NETIF_F_LRO 32768 /* large receive offload */
1028
1029/* the GSO_MASK reserves bits 16 through 23 */
1030#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
1031#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
1032#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
1033#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
1034#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
1035#define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
1036#define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */
1037#define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */
1038
1039 /* Segmentation offload features */
1040#define NETIF_F_GSO_SHIFT 16
1041#define NETIF_F_GSO_MASK 0x00ff0000
1042#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
1043#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
1044#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
1045#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
1046#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
1047#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
1048
1049 /* Features valid for ethtool to change */
1050 /* = all defined minus driver/device-class-related */
1051#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
1052 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
1053#define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
1054
1055 /* List of features with software fallbacks. */
1056#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
1057 NETIF_F_TSO6 | NETIF_F_UFO)
1058
1059
1060#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
1061#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
1062#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
1063#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
1064
1065#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1066
1067#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
1068 NETIF_F_FSO)
1069
1070 /*
1071 * If one device supports one of these features, then enable them
1072 * for all in netdev_increment_features.
1073 */
1074#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
1075 NETIF_F_SG | NETIF_F_HIGHDMA | \
1076 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
1077 /*
1078 * If one device doesn't support one of these features, then disable it
1079 * for all in netdev_increment_features.
1080 */
1081#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
1082
1083 /* changeable features with no special hardware requirements */
1084#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
1085 1060
1086 /* Interface index. Unique device identifier */ 1061 /* Interface index. Unique device identifier */
1087 int ifindex; 1062 int ifindex;
@@ -1132,6 +1107,7 @@ struct net_device {
1132 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 1107 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1133 unsigned char addr_assign_type; /* hw address assignment type */ 1108 unsigned char addr_assign_type; /* hw address assignment type */
1134 unsigned char addr_len; /* hardware address length */ 1109 unsigned char addr_len; /* hardware address length */
1110 unsigned char neigh_priv_len;
1135 unsigned short dev_id; /* for shared network cards */ 1111 unsigned short dev_id; /* for shared network cards */
1136 1112
1137 spinlock_t addr_list_lock; 1113 spinlock_t addr_list_lock;
@@ -1144,11 +1120,11 @@ struct net_device {
1144 1120
1145 /* Protocol specific pointers */ 1121 /* Protocol specific pointers */
1146 1122
1147#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1123#if IS_ENABLED(CONFIG_VLAN_8021Q)
1148 struct vlan_group __rcu *vlgrp; /* VLAN group */ 1124 struct vlan_info __rcu *vlan_info; /* VLAN info */
1149#endif 1125#endif
1150#ifdef CONFIG_NET_DSA 1126#if IS_ENABLED(CONFIG_NET_DSA)
1151 void *dsa_ptr; /* dsa specific data */ 1127 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
1152#endif 1128#endif
1153 void *atalk_ptr; /* AppleTalk link */ 1129 void *atalk_ptr; /* AppleTalk link */
1154 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1130 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
@@ -1184,9 +1160,11 @@ struct net_device {
1184 1160
1185 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 1161 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1186 1162
1187#if defined(CONFIG_RPS) || defined(CONFIG_XPS) 1163#ifdef CONFIG_SYSFS
1188 struct kset *queues_kset; 1164 struct kset *queues_kset;
1165#endif
1189 1166
1167#ifdef CONFIG_RPS
1190 struct netdev_rx_queue *_rx; 1168 struct netdev_rx_queue *_rx;
1191 1169
1192 /* Number of RX queues allocated at register_netdev() time */ 1170 /* Number of RX queues allocated at register_netdev() time */
@@ -1308,10 +1286,13 @@ struct net_device {
1308 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 1286 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1309 u8 prio_tc_map[TC_BITMASK + 1]; 1287 u8 prio_tc_map[TC_BITMASK + 1];
1310 1288
1311#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 1289#if IS_ENABLED(CONFIG_FCOE)
1312 /* max exchange id for FCoE LRO by ddp */ 1290 /* max exchange id for FCoE LRO by ddp */
1313 unsigned int fcoe_ddp_xid; 1291 unsigned int fcoe_ddp_xid;
1314#endif 1292#endif
1293#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1294 struct netprio_map __rcu *priomap;
1295#endif
1315 /* phy device may attach itself for hardware timestamping */ 1296 /* phy device may attach itself for hardware timestamping */
1316 struct phy_device *phydev; 1297 struct phy_device *phydev;
1317 1298
@@ -1515,7 +1496,7 @@ struct packet_type {
1515 struct packet_type *, 1496 struct packet_type *,
1516 struct net_device *); 1497 struct net_device *);
1517 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1498 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1518 u32 features); 1499 netdev_features_t features);
1519 int (*gso_send_check)(struct sk_buff *skb); 1500 int (*gso_send_check)(struct sk_buff *skb);
1520 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1501 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1521 struct sk_buff *skb); 1502 struct sk_buff *skb);
@@ -1783,7 +1764,7 @@ extern void __netif_schedule(struct Qdisc *q);
1783 1764
1784static inline void netif_schedule_queue(struct netdev_queue *txq) 1765static inline void netif_schedule_queue(struct netdev_queue *txq)
1785{ 1766{
1786 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) 1767 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1787 __netif_schedule(txq->qdisc); 1768 __netif_schedule(txq->qdisc);
1788} 1769}
1789 1770
@@ -1797,7 +1778,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
1797 1778
1798static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 1779static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1799{ 1780{
1800 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1781 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1801} 1782}
1802 1783
1803/** 1784/**
@@ -1829,7 +1810,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1829 return; 1810 return;
1830 } 1811 }
1831#endif 1812#endif
1832 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) 1813 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1833 __netif_schedule(dev_queue->qdisc); 1814 __netif_schedule(dev_queue->qdisc);
1834} 1815}
1835 1816
@@ -1861,7 +1842,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1861 pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); 1842 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1862 return; 1843 return;
1863 } 1844 }
1864 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1845 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1865} 1846}
1866 1847
1867/** 1848/**
@@ -1888,7 +1869,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
1888 1869
1889static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 1870static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1890{ 1871{
1891 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1872 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1892} 1873}
1893 1874
1894/** 1875/**
@@ -1902,9 +1883,68 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1902 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1883 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1903} 1884}
1904 1885
1905static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) 1886static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
1887{
1888 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1889}
1890
1891static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1892{
1893 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1894}
1895
1896static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1897 unsigned int bytes)
1898{
1899#ifdef CONFIG_BQL
1900 dql_queued(&dev_queue->dql, bytes);
1901 if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
1902 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1903 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1904 clear_bit(__QUEUE_STATE_STACK_XOFF,
1905 &dev_queue->state);
1906 }
1907#endif
1908}
1909
1910static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1906{ 1911{
1907 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; 1912 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1913}
1914
1915static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1916 unsigned pkts, unsigned bytes)
1917{
1918#ifdef CONFIG_BQL
1919 if (likely(bytes)) {
1920 dql_completed(&dev_queue->dql, bytes);
1921 if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
1922 &dev_queue->state) &&
1923 dql_avail(&dev_queue->dql) >= 0)) {
1924 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
1925 &dev_queue->state))
1926 netif_schedule_queue(dev_queue);
1927 }
1928 }
1929#endif
1930}
1931
1932static inline void netdev_completed_queue(struct net_device *dev,
1933 unsigned pkts, unsigned bytes)
1934{
1935 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1936}
1937
1938static inline void netdev_tx_reset_queue(struct netdev_queue *q)
1939{
1940#ifdef CONFIG_BQL
1941 dql_reset(&q->dql);
1942#endif
1943}
1944
1945static inline void netdev_reset_queue(struct net_device *dev_queue)
1946{
1947 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
1908} 1948}
1909 1949
1910/** 1950/**
@@ -1991,7 +2031,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1991 if (netpoll_trap()) 2031 if (netpoll_trap())
1992 return; 2032 return;
1993#endif 2033#endif
1994 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) 2034 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
1995 __netif_schedule(txq->qdisc); 2035 __netif_schedule(txq->qdisc);
1996} 2036}
1997 2037
@@ -2520,7 +2560,8 @@ extern int netdev_set_master(struct net_device *dev, struct net_device *master)
2520extern int netdev_set_bond_master(struct net_device *dev, 2560extern int netdev_set_bond_master(struct net_device *dev,
2521 struct net_device *master); 2561 struct net_device *master);
2522extern int skb_checksum_help(struct sk_buff *skb); 2562extern int skb_checksum_help(struct sk_buff *skb);
2523extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); 2563extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2564 netdev_features_t features);
2524#ifdef CONFIG_BUG 2565#ifdef CONFIG_BUG
2525extern void netdev_rx_csum_fault(struct net_device *dev); 2566extern void netdev_rx_csum_fault(struct net_device *dev);
2526#else 2567#else
@@ -2549,11 +2590,13 @@ extern const char *netdev_drivername(const struct net_device *dev);
2549 2590
2550extern void linkwatch_run_queue(void); 2591extern void linkwatch_run_queue(void);
2551 2592
2552static inline u32 netdev_get_wanted_features(struct net_device *dev) 2593static inline netdev_features_t netdev_get_wanted_features(
2594 struct net_device *dev)
2553{ 2595{
2554 return (dev->features & ~dev->hw_features) | dev->wanted_features; 2596 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2555} 2597}
2556u32 netdev_increment_features(u32 all, u32 one, u32 mask); 2598netdev_features_t netdev_increment_features(netdev_features_t all,
2599 netdev_features_t one, netdev_features_t mask);
2557int __netdev_update_features(struct net_device *dev); 2600int __netdev_update_features(struct net_device *dev);
2558void netdev_update_features(struct net_device *dev); 2601void netdev_update_features(struct net_device *dev);
2559void netdev_change_features(struct net_device *dev); 2602void netdev_change_features(struct net_device *dev);
@@ -2561,21 +2604,31 @@ void netdev_change_features(struct net_device *dev);
2561void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2604void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2562 struct net_device *dev); 2605 struct net_device *dev);
2563 2606
2564u32 netif_skb_features(struct sk_buff *skb); 2607netdev_features_t netif_skb_features(struct sk_buff *skb);
2565 2608
2566static inline int net_gso_ok(u32 features, int gso_type) 2609static inline int net_gso_ok(netdev_features_t features, int gso_type)
2567{ 2610{
2568 int feature = gso_type << NETIF_F_GSO_SHIFT; 2611 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2612
2613 /* check flags correspondence */
2614 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2615 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2616 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2617 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2618 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2619 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2620
2569 return (features & feature) == feature; 2621 return (features & feature) == feature;
2570} 2622}
2571 2623
2572static inline int skb_gso_ok(struct sk_buff *skb, u32 features) 2624static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2573{ 2625{
2574 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 2626 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2575 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2627 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2576} 2628}
2577 2629
2578static inline int netif_needs_gso(struct sk_buff *skb, int features) 2630static inline int netif_needs_gso(struct sk_buff *skb,
2631 netdev_features_t features)
2579{ 2632{
2580 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 2633 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2581 unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 2634 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@ -2594,22 +2647,6 @@ static inline int netif_is_bond_slave(struct net_device *dev)
2594 2647
2595extern struct pernet_operations __net_initdata loopback_net_ops; 2648extern struct pernet_operations __net_initdata loopback_net_ops;
2596 2649
2597static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2598{
2599 if (dev->features & NETIF_F_RXCSUM)
2600 return 1;
2601 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2602 return 0;
2603 return dev->ethtool_ops->get_rx_csum(dev);
2604}
2605
2606static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2607{
2608 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2609 return 0;
2610 return dev->ethtool_ops->get_flags(dev);
2611}
2612
2613/* Logging, debugging and troubleshooting/diagnostic helpers. */ 2650/* Logging, debugging and troubleshooting/diagnostic helpers. */
2614 2651
2615/* netdev_printk helpers, similar to dev_printk */ 2652/* netdev_printk helpers, similar to dev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 857f5026ced6..b809265607d0 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -162,6 +162,24 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
162 162
163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
164 164
165#if defined(CONFIG_JUMP_LABEL)
166#include <linux/jump_label.h>
167extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
168static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
169{
170 if (__builtin_constant_p(pf) &&
171 __builtin_constant_p(hook))
172 return static_branch(&nf_hooks_needed[pf][hook]);
173
174 return !list_empty(&nf_hooks[pf][hook]);
175}
176#else
177static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
178{
179 return !list_empty(&nf_hooks[pf][hook]);
180}
181#endif
182
165int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 183int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
166 struct net_device *indev, struct net_device *outdev, 184 struct net_device *indev, struct net_device *outdev,
167 int (*okfn)(struct sk_buff *), int thresh); 185 int (*okfn)(struct sk_buff *), int thresh);
@@ -179,11 +197,9 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
179 struct net_device *outdev, 197 struct net_device *outdev,
180 int (*okfn)(struct sk_buff *), int thresh) 198 int (*okfn)(struct sk_buff *), int thresh)
181{ 199{
182#ifndef CONFIG_NETFILTER_DEBUG 200 if (nf_hooks_active(pf, hook))
183 if (list_empty(&nf_hooks[pf][hook])) 201 return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
184 return 1; 202 return 1;
185#endif
186 return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
187} 203}
188 204
189static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 205static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index a1b410c76fc3..e144f54185c0 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -5,7 +5,9 @@ header-y += nf_conntrack_ftp.h
5header-y += nf_conntrack_sctp.h 5header-y += nf_conntrack_sctp.h
6header-y += nf_conntrack_tcp.h 6header-y += nf_conntrack_tcp.h
7header-y += nf_conntrack_tuple_common.h 7header-y += nf_conntrack_tuple_common.h
8header-y += nf_nat.h
8header-y += nfnetlink.h 9header-y += nfnetlink.h
10header-y += nfnetlink_acct.h
9header-y += nfnetlink_compat.h 11header-y += nfnetlink_compat.h
10header-y += nfnetlink_conntrack.h 12header-y += nfnetlink_conntrack.h
11header-y += nfnetlink_log.h 13header-y += nfnetlink_log.h
@@ -21,6 +23,7 @@ header-y += xt_DSCP.h
21header-y += xt_IDLETIMER.h 23header-y += xt_IDLETIMER.h
22header-y += xt_LED.h 24header-y += xt_LED.h
23header-y += xt_MARK.h 25header-y += xt_MARK.h
26header-y += xt_nfacct.h
24header-y += xt_NFLOG.h 27header-y += xt_NFLOG.h
25header-y += xt_NFQUEUE.h 28header-y += xt_NFQUEUE.h
26header-y += xt_RATEEST.h 29header-y += xt_RATEEST.h
@@ -40,6 +43,7 @@ header-y += xt_cpu.h
40header-y += xt_dccp.h 43header-y += xt_dccp.h
41header-y += xt_devgroup.h 44header-y += xt_devgroup.h
42header-y += xt_dscp.h 45header-y += xt_dscp.h
46header-y += xt_ecn.h
43header-y += xt_esp.h 47header-y += xt_esp.h
44header-y += xt_hashlimit.h 48header-y += xt_hashlimit.h
45header-y += xt_helper.h 49header-y += xt_helper.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 0d3dd66322ec..9e3a2838291b 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -83,6 +83,10 @@ enum ip_conntrack_status {
83 /* Conntrack is a fake untracked entry */ 83 /* Conntrack is a fake untracked entry */
84 IPS_UNTRACKED_BIT = 12, 84 IPS_UNTRACKED_BIT = 12,
85 IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), 85 IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
86
87 /* Conntrack has a userspace helper. */
88 IPS_USERSPACE_HELPER_BIT = 13,
89 IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT),
86}; 90};
87 91
88/* Connection tracking event types */ 92/* Connection tracking event types */
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h
index 2ea22b018a87..2f6bbc5b8125 100644
--- a/include/linux/netfilter/nf_conntrack_tuple_common.h
+++ b/include/linux/netfilter/nf_conntrack_tuple_common.h
@@ -7,6 +7,33 @@ enum ip_conntrack_dir {
7 IP_CT_DIR_MAX 7 IP_CT_DIR_MAX
8}; 8};
9 9
10/* The protocol-specific manipulable parts of the tuple: always in
11 * network order
12 */
13union nf_conntrack_man_proto {
14 /* Add other protocols here. */
15 __be16 all;
16
17 struct {
18 __be16 port;
19 } tcp;
20 struct {
21 __be16 port;
22 } udp;
23 struct {
24 __be16 id;
25 } icmp;
26 struct {
27 __be16 port;
28 } dccp;
29 struct {
30 __be16 port;
31 } sctp;
32 struct {
33 __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */
34 } gre;
35};
36
10#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) 37#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
11 38
12#endif /* _NF_CONNTRACK_TUPLE_COMMON_H */ 39#endif /* _NF_CONNTRACK_TUPLE_COMMON_H */
diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h
new file mode 100644
index 000000000000..8df2d13730b2
--- /dev/null
+++ b/include/linux/netfilter/nf_nat.h
@@ -0,0 +1,25 @@
1#ifndef _NETFILTER_NF_NAT_H
2#define _NETFILTER_NF_NAT_H
3
4#include <linux/netfilter.h>
5#include <linux/netfilter/nf_conntrack_tuple_common.h>
6
7#define NF_NAT_RANGE_MAP_IPS 1
8#define NF_NAT_RANGE_PROTO_SPECIFIED 2
9#define NF_NAT_RANGE_PROTO_RANDOM 4
10#define NF_NAT_RANGE_PERSISTENT 8
11
12struct nf_nat_ipv4_range {
13 unsigned int flags;
14 __be32 min_ip;
15 __be32 max_ip;
16 union nf_conntrack_man_proto min;
17 union nf_conntrack_man_proto max;
18};
19
20struct nf_nat_ipv4_multi_range_compat {
21 unsigned int rangesize;
22 struct nf_nat_ipv4_range range[1];
23};
24
25#endif /* _NETFILTER_NF_NAT_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 74d33861473c..b64454c2f79f 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -48,7 +48,8 @@ struct nfgenmsg {
48#define NFNL_SUBSYS_ULOG 4 48#define NFNL_SUBSYS_ULOG 4
49#define NFNL_SUBSYS_OSF 5 49#define NFNL_SUBSYS_OSF 5
50#define NFNL_SUBSYS_IPSET 6 50#define NFNL_SUBSYS_IPSET 6
51#define NFNL_SUBSYS_COUNT 7 51#define NFNL_SUBSYS_ACCT 7
52#define NFNL_SUBSYS_COUNT 8
52 53
53#ifdef __KERNEL__ 54#ifdef __KERNEL__
54 55
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
new file mode 100644
index 000000000000..7c4279b4ae7a
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -0,0 +1,36 @@
1#ifndef _NFNL_ACCT_H_
2#define _NFNL_ACCT_H_
3
4#ifndef NFACCT_NAME_MAX
5#define NFACCT_NAME_MAX 32
6#endif
7
8enum nfnl_acct_msg_types {
9 NFNL_MSG_ACCT_NEW,
10 NFNL_MSG_ACCT_GET,
11 NFNL_MSG_ACCT_GET_CTRZERO,
12 NFNL_MSG_ACCT_DEL,
13 NFNL_MSG_ACCT_MAX
14};
15
16enum nfnl_acct_type {
17 NFACCT_UNSPEC,
18 NFACCT_NAME,
19 NFACCT_PKTS,
20 NFACCT_BYTES,
21 NFACCT_USE,
22 __NFACCT_MAX
23};
24#define NFACCT_MAX (__NFACCT_MAX - 1)
25
26#ifdef __KERNEL__
27
28struct nf_acct;
29
30extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
31extern void nfnl_acct_put(struct nf_acct *acct);
32extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
33
34#endif /* __KERNEL__ */
35
36#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index b56e76811c04..6390f0992f36 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -3,7 +3,8 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#define XT_CT_NOTRACK 0x1 6#define XT_CT_NOTRACK 0x1
7#define XT_CT_USERSPACE_HELPER 0x2
7 8
8struct xt_ct_target_info { 9struct xt_ct_target_info {
9 __u16 flags; 10 __u16 flags;
diff --git a/include/linux/netfilter/xt_ecn.h b/include/linux/netfilter/xt_ecn.h
new file mode 100644
index 000000000000..7158fca364f2
--- /dev/null
+++ b/include/linux/netfilter/xt_ecn.h
@@ -0,0 +1,35 @@
1/* iptables module for matching the ECN header in IPv4 and TCP header
2 *
3 * (C) 2002 Harald Welte <laforge@gnumonks.org>
4 *
5 * This software is distributed under GNU GPL v2, 1991
6 *
7 * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
8*/
9#ifndef _XT_ECN_H
10#define _XT_ECN_H
11
12#include <linux/types.h>
13#include <linux/netfilter/xt_dscp.h>
14
15#define XT_ECN_IP_MASK (~XT_DSCP_MASK)
16
17#define XT_ECN_OP_MATCH_IP 0x01
18#define XT_ECN_OP_MATCH_ECE 0x10
19#define XT_ECN_OP_MATCH_CWR 0x20
20
21#define XT_ECN_OP_MATCH_MASK 0xce
22
23/* match info */
24struct xt_ecn_info {
25 __u8 operation;
26 __u8 invert;
27 __u8 ip_ect;
28 union {
29 struct {
30 __u8 ect;
31 } tcp;
32 } proto;
33};
34
35#endif /* _XT_ECN_H */
diff --git a/include/linux/netfilter/xt_nfacct.h b/include/linux/netfilter/xt_nfacct.h
new file mode 100644
index 000000000000..3e19c8a86576
--- /dev/null
+++ b/include/linux/netfilter/xt_nfacct.h
@@ -0,0 +1,13 @@
1#ifndef _XT_NFACCT_MATCH_H
2#define _XT_NFACCT_MATCH_H
3
4#include <linux/netfilter/nfnetlink_acct.h>
5
6struct nf_acct;
7
8struct xt_nfacct_match_info {
9 char name[NFACCT_NAME_MAX];
10 struct nf_acct *nfacct;
11};
12
13#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h
new file mode 100644
index 000000000000..8358d4f71952
--- /dev/null
+++ b/include/linux/netfilter/xt_rpfilter.h
@@ -0,0 +1,23 @@
1#ifndef _XT_RPATH_H
2#define _XT_RPATH_H
3
4#include <linux/types.h>
5
6enum {
7 XT_RPFILTER_LOOSE = 1 << 0,
8 XT_RPFILTER_VALID_MARK = 1 << 1,
9 XT_RPFILTER_ACCEPT_LOCAL = 1 << 2,
10 XT_RPFILTER_INVERT = 1 << 3,
11#ifdef __KERNEL__
12 XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE |
13 XT_RPFILTER_VALID_MARK |
14 XT_RPFILTER_ACCEPT_LOCAL |
15 XT_RPFILTER_INVERT,
16#endif
17};
18
19struct xt_rpfilter_info {
20 __u8 flags;
21};
22
23#endif
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index c3b45480ecf7..f9930c87fff3 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -12,4 +12,3 @@ header-y += ipt_ah.h
12header-y += ipt_ecn.h 12header-y += ipt_ecn.h
13header-y += ipt_realm.h 13header-y += ipt_realm.h
14header-y += ipt_ttl.h 14header-y += ipt_ttl.h
15header-y += nf_nat.h
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h
index eabf95fb7d3e..0e0c063dbf60 100644
--- a/include/linux/netfilter_ipv4/ipt_ecn.h
+++ b/include/linux/netfilter_ipv4/ipt_ecn.h
@@ -1,35 +1,15 @@
1/* iptables module for matching the ECN header in IPv4 and TCP header
2 *
3 * (C) 2002 Harald Welte <laforge@gnumonks.org>
4 *
5 * This software is distributed under GNU GPL v2, 1991
6 *
7 * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
8*/
9#ifndef _IPT_ECN_H 1#ifndef _IPT_ECN_H
10#define _IPT_ECN_H 2#define _IPT_ECN_H
11 3
12#include <linux/types.h> 4#include <linux/netfilter/xt_ecn.h>
13#include <linux/netfilter/xt_dscp.h> 5#define ipt_ecn_info xt_ecn_info
14 6
15#define IPT_ECN_IP_MASK (~XT_DSCP_MASK) 7enum {
16 8 IPT_ECN_IP_MASK = XT_ECN_IP_MASK,
17#define IPT_ECN_OP_MATCH_IP 0x01 9 IPT_ECN_OP_MATCH_IP = XT_ECN_OP_MATCH_IP,
18#define IPT_ECN_OP_MATCH_ECE 0x10 10 IPT_ECN_OP_MATCH_ECE = XT_ECN_OP_MATCH_ECE,
19#define IPT_ECN_OP_MATCH_CWR 0x20 11 IPT_ECN_OP_MATCH_CWR = XT_ECN_OP_MATCH_CWR,
20 12 IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK,
21#define IPT_ECN_OP_MATCH_MASK 0xce
22
23/* match info */
24struct ipt_ecn_info {
25 __u8 operation;
26 __u8 invert;
27 __u8 ip_ect;
28 union {
29 struct {
30 __u8 ect;
31 } tcp;
32 } proto;
33}; 13};
34 14
35#endif /* _IPT_ECN_H */ 15#endif /* IPT_ECN_H */
diff --git a/include/linux/netfilter_ipv4/nf_nat.h b/include/linux/netfilter_ipv4/nf_nat.h
deleted file mode 100644
index 7a861d09fc86..000000000000
--- a/include/linux/netfilter_ipv4/nf_nat.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _LINUX_NF_NAT_H
2#define _LINUX_NF_NAT_H
3
4#include <linux/types.h>
5
6#define IP_NAT_RANGE_MAP_IPS 1
7#define IP_NAT_RANGE_PROTO_SPECIFIED 2
8#define IP_NAT_RANGE_PROTO_RANDOM 4
9#define IP_NAT_RANGE_PERSISTENT 8
10
11/* The protocol-specific manipulable parts of the tuple. */
12union nf_conntrack_man_proto {
13 /* Add other protocols here. */
14 __be16 all;
15
16 struct {
17 __be16 port;
18 } tcp;
19 struct {
20 __be16 port;
21 } udp;
22 struct {
23 __be16 id;
24 } icmp;
25 struct {
26 __be16 port;
27 } dccp;
28 struct {
29 __be16 port;
30 } sctp;
31 struct {
32 __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */
33 } gre;
34};
35
36/* Single range specification. */
37struct nf_nat_range {
38 /* Set to OR of flags above. */
39 unsigned int flags;
40
41 /* Inclusive: network order. */
42 __be32 min_ip, max_ip;
43
44 /* Inclusive: network order */
45 union nf_conntrack_man_proto min, max;
46};
47
48/* For backwards compat: don't use in modern code. */
49struct nf_nat_multi_range_compat {
50 unsigned int rangesize; /* Must be 1. */
51
52 /* hangs off end. */
53 struct nf_nat_range range[1];
54};
55
56#define nf_nat_multi_range nf_nat_multi_range_compat
57
58#endif
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 8374d2967362..52e48959cfa1 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -8,7 +8,7 @@
8#define NETLINK_UNUSED 1 /* Unused number */ 8#define NETLINK_UNUSED 1 /* Unused number */
9#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ 9#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
10#define NETLINK_FIREWALL 3 /* Firewalling hook */ 10#define NETLINK_FIREWALL 3 /* Firewalling hook */
11#define NETLINK_INET_DIAG 4 /* INET socket monitoring */ 11#define NETLINK_SOCK_DIAG 4 /* socket monitoring */
12#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ 12#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */
13#define NETLINK_XFRM 6 /* ipsec */ 13#define NETLINK_XFRM 6 /* ipsec */
14#define NETLINK_SELINUX 7 /* SELinux event notifications */ 14#define NETLINK_SELINUX 7 /* SELinux event notifications */
@@ -27,6 +27,8 @@
27#define NETLINK_RDMA 20 27#define NETLINK_RDMA 20
28#define NETLINK_CRYPTO 21 /* Crypto layer */ 28#define NETLINK_CRYPTO 21 /* Crypto layer */
29 29
30#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG
31
30#define MAX_LINKS 32 32#define MAX_LINKS 32
31 33
32struct sockaddr_nl { 34struct sockaddr_nl {
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 36cb955b05cc..01d4e5d60325 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -62,6 +62,8 @@ enum nfc_commands {
62 NFC_CMD_GET_DEVICE, 62 NFC_CMD_GET_DEVICE,
63 NFC_CMD_DEV_UP, 63 NFC_CMD_DEV_UP,
64 NFC_CMD_DEV_DOWN, 64 NFC_CMD_DEV_DOWN,
65 NFC_CMD_DEP_LINK_UP,
66 NFC_CMD_DEP_LINK_DOWN,
65 NFC_CMD_START_POLL, 67 NFC_CMD_START_POLL,
66 NFC_CMD_STOP_POLL, 68 NFC_CMD_STOP_POLL,
67 NFC_CMD_GET_TARGET, 69 NFC_CMD_GET_TARGET,
@@ -86,6 +88,9 @@ enum nfc_commands {
86 * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID 88 * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID
87 * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the 89 * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the
88 * target is not NFC-Forum compliant) 90 * target is not NFC-Forum compliant)
91 * @NFC_ATTR_TARGET_NFCID1: NFC-A targets identifier, max 10 bytes
92 * @NFC_ATTR_COMM_MODE: Passive or active mode
93 * @NFC_ATTR_RF_MODE: Initiator or target
89 */ 94 */
90enum nfc_attrs { 95enum nfc_attrs {
91 NFC_ATTR_UNSPEC, 96 NFC_ATTR_UNSPEC,
@@ -95,6 +100,9 @@ enum nfc_attrs {
95 NFC_ATTR_TARGET_INDEX, 100 NFC_ATTR_TARGET_INDEX,
96 NFC_ATTR_TARGET_SENS_RES, 101 NFC_ATTR_TARGET_SENS_RES,
97 NFC_ATTR_TARGET_SEL_RES, 102 NFC_ATTR_TARGET_SEL_RES,
103 NFC_ATTR_TARGET_NFCID1,
104 NFC_ATTR_COMM_MODE,
105 NFC_ATTR_RF_MODE,
98/* private: internal use only */ 106/* private: internal use only */
99 __NFC_ATTR_AFTER_LAST 107 __NFC_ATTR_AFTER_LAST
100}; 108};
@@ -111,6 +119,14 @@ enum nfc_attrs {
111 119
112#define NFC_PROTO_MAX 6 120#define NFC_PROTO_MAX 6
113 121
122/* NFC communication modes */
123#define NFC_COMM_ACTIVE 0
124#define NFC_COMM_PASSIVE 1
125
126/* NFC RF modes */
127#define NFC_RF_INITIATOR 0
128#define NFC_RF_TARGET 1
129
114/* NFC protocols masks used in bitsets */ 130/* NFC protocols masks used in bitsets */
115#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) 131#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL)
116#define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) 132#define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE)
@@ -125,9 +141,22 @@ struct sockaddr_nfc {
125 __u32 nfc_protocol; 141 __u32 nfc_protocol;
126}; 142};
127 143
144#define NFC_LLCP_MAX_SERVICE_NAME 63
145struct sockaddr_nfc_llcp {
146 sa_family_t sa_family;
147 __u32 dev_idx;
148 __u32 target_idx;
149 __u32 nfc_protocol;
150 __u8 dsap; /* Destination SAP, if known */
151 __u8 ssap; /* Source SAP to be bound to */
152 char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
153 size_t service_name_len;
154};
155
128/* NFC socket protocols */ 156/* NFC socket protocols */
129#define NFC_SOCKPROTO_RAW 0 157#define NFC_SOCKPROTO_RAW 0
130#define NFC_SOCKPROTO_MAX 1 158#define NFC_SOCKPROTO_LLCP 1
159#define NFC_SOCKPROTO_MAX 2
131 160
132#define NFC_HEADER_SIZE 1 161#define NFC_HEADER_SIZE 1
133 162
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 8049bf77d799..0f5ff3739820 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -509,6 +509,38 @@
509 * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). 509 * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
510 * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame. 510 * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame.
511 * 511 *
512 * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP
513 * (or GO) interface (i.e. hostapd) to ask for unexpected frames to
514 * implement sending deauth to stations that send unexpected class 3
515 * frames. Also used as the event sent by the kernel when such a frame
516 * is received.
517 * For the event, the %NL80211_ATTR_MAC attribute carries the TA and
518 * other attributes like the interface index are present.
519 * If used as the command it must have an interface index and you can
520 * only unsubscribe from the event by closing the socket. Subscription
521 * is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events.
522 *
523 * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the
524 * associated station identified by %NL80211_ATTR_MAC sent a 4addr frame
525 * and wasn't already in a 4-addr VLAN. The event will be sent similarly
526 * to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener.
527 *
528 * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface
529 * by sending a null data frame to it and reporting when the frame is
530 * acknowleged. This is used to allow timing out inactive clients. Uses
531 * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a
532 * direct reply with an %NL80211_ATTR_COOKIE that is later used to match
533 * up the event with the request. The event includes the same data and
534 * has %NL80211_ATTR_ACK set if the frame was ACKed.
535 *
536 * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from
537 * other BSSes when any interfaces are in AP mode. This helps implement
538 * OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME
539 * messages. Note that per PHY only one application may register.
540 *
541 * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
542 * No Acknowledgement Policy should be applied.
543 *
512 * @NL80211_CMD_MAX: highest used command number 544 * @NL80211_CMD_MAX: highest used command number
513 * @__NL80211_CMD_AFTER_LAST: internal use 545 * @__NL80211_CMD_AFTER_LAST: internal use
514 */ 546 */
@@ -638,6 +670,16 @@ enum nl80211_commands {
638 NL80211_CMD_TDLS_OPER, 670 NL80211_CMD_TDLS_OPER,
639 NL80211_CMD_TDLS_MGMT, 671 NL80211_CMD_TDLS_MGMT,
640 672
673 NL80211_CMD_UNEXPECTED_FRAME,
674
675 NL80211_CMD_PROBE_CLIENT,
676
677 NL80211_CMD_REGISTER_BEACONS,
678
679 NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
680
681 NL80211_CMD_SET_NOACK_MAP,
682
641 /* add new commands above here */ 683 /* add new commands above here */
642 684
643 /* used to define NL80211_CMD_MAX below */ 685 /* used to define NL80211_CMD_MAX below */
@@ -658,6 +700,8 @@ enum nl80211_commands {
658#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE 700#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE
659#define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT 701#define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT
660 702
703#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
704
661/* source-level API compatibility */ 705/* source-level API compatibility */
662#define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG 706#define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG
663#define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG 707#define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG
@@ -1109,6 +1153,46 @@ enum nl80211_commands {
1109 * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be 1153 * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be
1110 * used for asking the driver to perform a TDLS operation. 1154 * used for asking the driver to perform a TDLS operation.
1111 * 1155 *
1156 * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices
1157 * that have AP support to indicate that they have the AP SME integrated
1158 * with support for the features listed in this attribute, see
1159 * &enum nl80211_ap_sme_features.
1160 *
1161 * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells
1162 * the driver to not wait for an acknowledgement. Note that due to this,
1163 * it will also not give a status callback nor return a cookie. This is
1164 * mostly useful for probe responses to save airtime.
1165 *
1166 * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from
1167 * &enum nl80211_feature_flags and is advertised in wiphy information.
1168 * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe
1169 *
1170 * requests while operating in AP-mode.
1171 * This attribute holds a bitmap of the supported protocols for
1172 * offloading (see &enum nl80211_probe_resp_offload_support_attr).
1173 *
1174 * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
1175 * probe-response frame. The DA field in the 802.11 header is zero-ed out,
1176 * to be filled by the FW.
1177 * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable
1178 * this feature. Currently, only supported in mac80211 drivers.
1179 * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the
1180 * ATTR_HT_CAPABILITY to which attention should be paid.
1181 * Currently, only mac80211 NICs support this feature.
1182 * The values that may be configured are:
1183 * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40
1184 * AMPDU density and AMPDU factor.
1185 * All values are treated as suggestions and may be ignored
1186 * by the driver as required. The actual values may be seen in
1187 * the station debugfs ht_caps file.
1188 *
1189 * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country
1190 * abides to when initiating radiation on DFS channels. A country maps
1191 * to one DFS region.
1192 *
1193 * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of
1194 * up to 16 TIDs.
1195 *
1112 * @NL80211_ATTR_MAX: highest attribute number currently defined 1196 * @NL80211_ATTR_MAX: highest attribute number currently defined
1113 * @__NL80211_ATTR_AFTER_LAST: internal use 1197 * @__NL80211_ATTR_AFTER_LAST: internal use
1114 */ 1198 */
@@ -1337,6 +1421,23 @@ enum nl80211_attrs {
1337 NL80211_ATTR_TDLS_SUPPORT, 1421 NL80211_ATTR_TDLS_SUPPORT,
1338 NL80211_ATTR_TDLS_EXTERNAL_SETUP, 1422 NL80211_ATTR_TDLS_EXTERNAL_SETUP,
1339 1423
1424 NL80211_ATTR_DEVICE_AP_SME,
1425
1426 NL80211_ATTR_DONT_WAIT_FOR_ACK,
1427
1428 NL80211_ATTR_FEATURE_FLAGS,
1429
1430 NL80211_ATTR_PROBE_RESP_OFFLOAD,
1431
1432 NL80211_ATTR_PROBE_RESP,
1433
1434 NL80211_ATTR_DFS_REGION,
1435
1436 NL80211_ATTR_DISABLE_HT,
1437 NL80211_ATTR_HT_CAPABILITY_MASK,
1438
1439 NL80211_ATTR_NOACK_MAP,
1440
1340 /* add attributes here, update the policy in nl80211.c */ 1441 /* add attributes here, update the policy in nl80211.c */
1341 1442
1342 __NL80211_ATTR_AFTER_LAST, 1443 __NL80211_ATTR_AFTER_LAST,
@@ -1371,6 +1472,7 @@ enum nl80211_attrs {
1371#define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES 1472#define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES
1372#define NL80211_ATTR_KEY NL80211_ATTR_KEY 1473#define NL80211_ATTR_KEY NL80211_ATTR_KEY
1373#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS 1474#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
1475#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
1374 1476
1375#define NL80211_MAX_SUPP_RATES 32 1477#define NL80211_MAX_SUPP_RATES 32
1376#define NL80211_MAX_SUPP_REG_RULES 32 1478#define NL80211_MAX_SUPP_REG_RULES 32
@@ -1434,7 +1536,11 @@ enum nl80211_iftype {
1434 * @NL80211_STA_FLAG_WME: station is WME/QoS capable 1536 * @NL80211_STA_FLAG_WME: station is WME/QoS capable
1435 * @NL80211_STA_FLAG_MFP: station uses management frame protection 1537 * @NL80211_STA_FLAG_MFP: station uses management frame protection
1436 * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated 1538 * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated
1437 * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer 1539 * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should
1540 * only be used in managed mode (even in the flags mask). Note that the
1541 * flag can't be changed, it is only valid while adding a station, and
1542 * attempts to change it will silently be ignored (rather than rejected
1543 * as errors.)
1438 * @NL80211_STA_FLAG_MAX: highest station flag number currently defined 1544 * @NL80211_STA_FLAG_MAX: highest station flag number currently defined
1439 * @__NL80211_STA_FLAG_AFTER_LAST: internal use 1545 * @__NL80211_STA_FLAG_AFTER_LAST: internal use
1440 */ 1546 */
@@ -1549,6 +1655,7 @@ enum nl80211_sta_bss_param {
1549 * containing info as possible, see &enum nl80211_sta_bss_param 1655 * containing info as possible, see &enum nl80211_sta_bss_param
1550 * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected 1656 * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected
1551 * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. 1657 * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update.
1658 * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32)
1552 * @__NL80211_STA_INFO_AFTER_LAST: internal 1659 * @__NL80211_STA_INFO_AFTER_LAST: internal
1553 * @NL80211_STA_INFO_MAX: highest possible station info attribute 1660 * @NL80211_STA_INFO_MAX: highest possible station info attribute
1554 */ 1661 */
@@ -1571,6 +1678,7 @@ enum nl80211_sta_info {
1571 NL80211_STA_INFO_BSS_PARAM, 1678 NL80211_STA_INFO_BSS_PARAM,
1572 NL80211_STA_INFO_CONNECTED_TIME, 1679 NL80211_STA_INFO_CONNECTED_TIME,
1573 NL80211_STA_INFO_STA_FLAGS, 1680 NL80211_STA_INFO_STA_FLAGS,
1681 NL80211_STA_INFO_BEACON_LOSS,
1574 1682
1575 /* keep last */ 1683 /* keep last */
1576 __NL80211_STA_INFO_AFTER_LAST, 1684 __NL80211_STA_INFO_AFTER_LAST,
@@ -1845,6 +1953,21 @@ enum nl80211_reg_rule_flags {
1845}; 1953};
1846 1954
1847/** 1955/**
1956 * enum nl80211_dfs_regions - regulatory DFS regions
1957 *
1958 * @NL80211_DFS_UNSET: Country has no DFS master region specified
1959 * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC
1960 * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI
1961 * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec
1962 */
1963enum nl80211_dfs_regions {
1964 NL80211_DFS_UNSET = 0,
1965 NL80211_DFS_FCC = 1,
1966 NL80211_DFS_ETSI = 2,
1967 NL80211_DFS_JP = 3,
1968};
1969
1970/**
1848 * enum nl80211_survey_info - survey information 1971 * enum nl80211_survey_info - survey information
1849 * 1972 *
1850 * These attribute types are used with %NL80211_ATTR_SURVEY_INFO 1973 * These attribute types are used with %NL80211_ATTR_SURVEY_INFO
@@ -1977,6 +2100,10 @@ enum nl80211_mntr_flags {
1977 * access to a broader network beyond the MBSS. This is done via Root 2100 * access to a broader network beyond the MBSS. This is done via Root
1978 * Announcement frames. 2101 * Announcement frames.
1979 * 2102 *
2103 * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
2104 * TUs) during which a mesh STA can send only one Action frame containing a
2105 * PERR element.
2106 *
1980 * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute 2107 * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
1981 * 2108 *
1982 * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use 2109 * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@@ -2000,6 +2127,7 @@ enum nl80211_meshconf_params {
2000 NL80211_MESHCONF_ELEMENT_TTL, 2127 NL80211_MESHCONF_ELEMENT_TTL,
2001 NL80211_MESHCONF_HWMP_RANN_INTERVAL, 2128 NL80211_MESHCONF_HWMP_RANN_INTERVAL,
2002 NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 2129 NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
2130 NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
2003 2131
2004 /* keep last */ 2132 /* keep last */
2005 __NL80211_MESHCONF_ATTR_AFTER_LAST, 2133 __NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2650,4 +2778,45 @@ enum nl80211_tdls_operation {
2650 NL80211_TDLS_DISABLE_LINK, 2778 NL80211_TDLS_DISABLE_LINK,
2651}; 2779};
2652 2780
2781/*
2782 * enum nl80211_ap_sme_features - device-integrated AP features
2783 * Reserved for future use, no bits are defined in
2784 * NL80211_ATTR_DEVICE_AP_SME yet.
2785enum nl80211_ap_sme_features {
2786};
2787 */
2788
2789/**
2790 * enum nl80211_feature_flags - device/driver features
2791 * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back
2792 * TX status to the socket error queue when requested with the
2793 * socket option.
2794 * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates.
2795 */
2796enum nl80211_feature_flags {
2797 NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
2798 NL80211_FEATURE_HT_IBSS = 1 << 1,
2799};
2800
2801/**
2802 * enum nl80211_probe_resp_offload_support_attr - optional supported
2803 * protocols for probe-response offloading by the driver/FW.
2804 * To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute.
2805 * Each enum value represents a bit in the bitmap of supported
2806 * protocols. Typically a subset of probe-requests belonging to a
2807 * supported protocol will be excluded from offload and uploaded
2808 * to the host.
2809 *
2810 * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1
2811 * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2
2812 * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P
2813 * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u
2814 */
2815enum nl80211_probe_resp_offload_support_attr {
2816 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0,
2817 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1,
2818 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2,
2819 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3,
2820};
2821
2653#endif /* __LINUX_NL80211_H */ 2822#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/node.h b/include/linux/node.h
index 92370e22343c..624e53cecc02 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -14,12 +14,12 @@
14#ifndef _LINUX_NODE_H_ 14#ifndef _LINUX_NODE_H_
15#define _LINUX_NODE_H_ 15#define _LINUX_NODE_H_
16 16
17#include <linux/sysdev.h> 17#include <linux/device.h>
18#include <linux/cpumask.h> 18#include <linux/cpumask.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20 20
21struct node { 21struct node {
22 struct sys_device sysdev; 22 struct device dev;
23 23
24#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) 24#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
25 struct work_struct node_work; 25 struct work_struct node_work;
@@ -80,6 +80,6 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
80} 80}
81#endif 81#endif
82 82
83#define to_node(sys_device) container_of(sys_device, struct node, sysdev) 83#define to_node(device) container_of(device, struct node, dev)
84 84
85#endif /* _LINUX_NODE_H_ */ 85#endif /* _LINUX_NODE_H_ */
diff --git a/include/linux/of.h b/include/linux/of.h
index 4948552d60f5..a75a831e2057 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -65,6 +65,27 @@ struct device_node {
65#endif 65#endif
66}; 66};
67 67
68#define MAX_PHANDLE_ARGS 8
69struct of_phandle_args {
70 struct device_node *np;
71 int args_count;
72 uint32_t args[MAX_PHANDLE_ARGS];
73};
74
75#if defined(CONFIG_SPARC) || !defined(CONFIG_OF)
76/* Dummy ref counting routines - to be implemented later */
77static inline struct device_node *of_node_get(struct device_node *node)
78{
79 return node;
80}
81static inline void of_node_put(struct device_node *node)
82{
83}
84#else
85extern struct device_node *of_node_get(struct device_node *node);
86extern void of_node_put(struct device_node *node);
87#endif
88
68#ifdef CONFIG_OF 89#ifdef CONFIG_OF
69 90
70/* Pointer for first entry in chain of all nodes. */ 91/* Pointer for first entry in chain of all nodes. */
@@ -95,21 +116,6 @@ static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
95 116
96extern struct device_node *of_find_all_nodes(struct device_node *prev); 117extern struct device_node *of_find_all_nodes(struct device_node *prev);
97 118
98#if defined(CONFIG_SPARC)
99/* Dummy ref counting routines - to be implemented later */
100static inline struct device_node *of_node_get(struct device_node *node)
101{
102 return node;
103}
104static inline void of_node_put(struct device_node *node)
105{
106}
107
108#else
109extern struct device_node *of_node_get(struct device_node *node);
110extern void of_node_put(struct device_node *node);
111#endif
112
113/* 119/*
114 * OF address retrieval & translation 120 * OF address retrieval & translation
115 */ 121 */
@@ -219,8 +225,8 @@ extern int of_device_is_available(const struct device_node *device);
219extern const void *of_get_property(const struct device_node *node, 225extern const void *of_get_property(const struct device_node *node,
220 const char *name, 226 const char *name,
221 int *lenp); 227 int *lenp);
222#define for_each_property(pp, properties) \ 228#define for_each_property_of_node(dn, pp) \
223 for (pp = properties; pp != NULL; pp = pp->next) 229 for (pp = dn->properties; pp != NULL; pp = pp->next)
224 230
225extern int of_n_addr_cells(struct device_node *np); 231extern int of_n_addr_cells(struct device_node *np);
226extern int of_n_size_cells(struct device_node *np); 232extern int of_n_size_cells(struct device_node *np);
@@ -230,9 +236,9 @@ extern int of_modalias_node(struct device_node *node, char *modalias, int len);
230extern struct device_node *of_parse_phandle(struct device_node *np, 236extern struct device_node *of_parse_phandle(struct device_node *np,
231 const char *phandle_name, 237 const char *phandle_name,
232 int index); 238 int index);
233extern int of_parse_phandles_with_args(struct device_node *np, 239extern int of_parse_phandle_with_args(struct device_node *np,
234 const char *list_name, const char *cells_name, int index, 240 const char *list_name, const char *cells_name, int index,
235 struct device_node **out_node, const void **out_args); 241 struct of_phandle_args *out_args);
236 242
237extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); 243extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
238extern int of_alias_get_id(struct device_node *np, const char *stem); 244extern int of_alias_get_id(struct device_node *np, const char *stem);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index c84d900fbbb3..ed136ad698ce 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -71,7 +71,7 @@ extern int of_fdt_is_compatible(struct boot_param_header *blob,
71 unsigned long node, 71 unsigned long node,
72 const char *compat); 72 const char *compat);
73extern int of_fdt_match(struct boot_param_header *blob, unsigned long node, 73extern int of_fdt_match(struct boot_param_header *blob, unsigned long node,
74 const char **compat); 74 const char *const *compat);
75extern void of_fdt_unflatten_tree(unsigned long *blob, 75extern void of_fdt_unflatten_tree(unsigned long *blob,
76 struct device_node **mynodes); 76 struct device_node **mynodes);
77 77
@@ -88,7 +88,7 @@ extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
88extern void *of_get_flat_dt_prop(unsigned long node, const char *name, 88extern void *of_get_flat_dt_prop(unsigned long node, const char *name,
89 unsigned long *size); 89 unsigned long *size);
90extern int of_flat_dt_is_compatible(unsigned long node, const char *name); 90extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
91extern int of_flat_dt_match(unsigned long node, const char **matches); 91extern int of_flat_dt_match(unsigned long node, const char *const *matches);
92extern unsigned long of_get_flat_dt_root(void); 92extern unsigned long of_get_flat_dt_root(void);
93 93
94extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, 94extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 52280a2b5e63..b254052a49d7 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/of.h>
21 22
22struct device_node; 23struct device_node;
23 24
@@ -57,8 +58,9 @@ extern int of_mm_gpiochip_add(struct device_node *np,
57extern void of_gpiochip_add(struct gpio_chip *gc); 58extern void of_gpiochip_add(struct gpio_chip *gc);
58extern void of_gpiochip_remove(struct gpio_chip *gc); 59extern void of_gpiochip_remove(struct gpio_chip *gc);
59extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np); 60extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np);
60extern int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, 61extern int of_gpio_simple_xlate(struct gpio_chip *gc,
61 const void *gpio_spec, u32 *flags); 62 const struct of_phandle_args *gpiospec,
63 u32 *flags);
62 64
63#else /* CONFIG_OF_GPIO */ 65#else /* CONFIG_OF_GPIO */
64 66
@@ -75,8 +77,8 @@ static inline unsigned int of_gpio_count(struct device_node *np)
75} 77}
76 78
77static inline int of_gpio_simple_xlate(struct gpio_chip *gc, 79static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
78 struct device_node *np, 80 const struct of_phandle_args *gpiospec,
79 const void *gpio_spec, u32 *flags) 81 u32 *flags)
80{ 82{
81 return -ENOSYS; 83 return -ENOSYS;
82} 84}
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
new file mode 100644
index 000000000000..eb1efa54fe84
--- /dev/null
+++ b/include/linux/openvswitch.h
@@ -0,0 +1,452 @@
1/*
2 * Copyright (c) 2007-2011 Nicira Networks.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#ifndef _LINUX_OPENVSWITCH_H
20#define _LINUX_OPENVSWITCH_H 1
21
22#include <linux/types.h>
23
24/**
25 * struct ovs_header - header for OVS Generic Netlink messages.
26 * @dp_ifindex: ifindex of local port for datapath (0 to make a request not
27 * specific to a datapath).
28 *
29 * Attributes following the header are specific to a particular OVS Generic
30 * Netlink family, but all of the OVS families use this header.
31 */
32
33struct ovs_header {
34 int dp_ifindex;
35};
36
37/* Datapaths. */
38
39#define OVS_DATAPATH_FAMILY "ovs_datapath"
40#define OVS_DATAPATH_MCGROUP "ovs_datapath"
41#define OVS_DATAPATH_VERSION 0x1
42
43enum ovs_datapath_cmd {
44 OVS_DP_CMD_UNSPEC,
45 OVS_DP_CMD_NEW,
46 OVS_DP_CMD_DEL,
47 OVS_DP_CMD_GET,
48 OVS_DP_CMD_SET
49};
50
51/**
52 * enum ovs_datapath_attr - attributes for %OVS_DP_* commands.
53 * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local
54 * port". This is the name of the network device whose dp_ifindex is given in
55 * the &struct ovs_header. Always present in notifications. Required in
56 * %OVS_DP_NEW requests. May be used as an alternative to specifying
57 * dp_ifindex in other requests (with a dp_ifindex of 0).
58 * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially
59 * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on
60 * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
61 * not be sent.
62 * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
63 * datapath. Always present in notifications.
64 *
65 * These attributes follow the &struct ovs_header within the Generic Netlink
66 * payload for %OVS_DP_* commands.
67 */
68enum ovs_datapath_attr {
69 OVS_DP_ATTR_UNSPEC,
70 OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */
71 OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
72 OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
73 __OVS_DP_ATTR_MAX
74};
75
76#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1)
77
78struct ovs_dp_stats {
79 __u64 n_hit; /* Number of flow table matches. */
80 __u64 n_missed; /* Number of flow table misses. */
81 __u64 n_lost; /* Number of misses not sent to userspace. */
82 __u64 n_flows; /* Number of flows present */
83};
84
85struct ovs_vport_stats {
86 __u64 rx_packets; /* total packets received */
87 __u64 tx_packets; /* total packets transmitted */
88 __u64 rx_bytes; /* total bytes received */
89 __u64 tx_bytes; /* total bytes transmitted */
90 __u64 rx_errors; /* bad packets received */
91 __u64 tx_errors; /* packet transmit problems */
92 __u64 rx_dropped; /* no space in linux buffers */
93 __u64 tx_dropped; /* no space available in linux */
94};
95
96/* Fixed logical ports. */
97#define OVSP_LOCAL ((__u16)0)
98
99/* Packet transfer. */
100
101#define OVS_PACKET_FAMILY "ovs_packet"
102#define OVS_PACKET_VERSION 0x1
103
104enum ovs_packet_cmd {
105 OVS_PACKET_CMD_UNSPEC,
106
107 /* Kernel-to-user notifications. */
108 OVS_PACKET_CMD_MISS, /* Flow table miss. */
109 OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */
110
111 /* Userspace commands. */
112 OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */
113};
114
115/**
116 * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands.
117 * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire
118 * packet as received, from the start of the Ethernet header onward. For
119 * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by
120 * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is
121 * the flow key extracted from the packet as originally received.
122 * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key
123 * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows
124 * userspace to adapt its flow setup strategy by comparing its notion of the
125 * flow key against the kernel's.
126 * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used
127 * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes.
128 * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
129 * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
130 * %OVS_USERSPACE_ATTR_USERDATA attribute.
131 *
132 * These attributes follow the &struct ovs_header within the Generic Netlink
133 * payload for %OVS_PACKET_* commands.
134 */
135enum ovs_packet_attr {
136 OVS_PACKET_ATTR_UNSPEC,
137 OVS_PACKET_ATTR_PACKET, /* Packet data. */
138 OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */
139 OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
140 OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */
141 __OVS_PACKET_ATTR_MAX
142};
143
144#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1)
145
146/* Virtual ports. */
147
148#define OVS_VPORT_FAMILY "ovs_vport"
149#define OVS_VPORT_MCGROUP "ovs_vport"
150#define OVS_VPORT_VERSION 0x1
151
152enum ovs_vport_cmd {
153 OVS_VPORT_CMD_UNSPEC,
154 OVS_VPORT_CMD_NEW,
155 OVS_VPORT_CMD_DEL,
156 OVS_VPORT_CMD_GET,
157 OVS_VPORT_CMD_SET
158};
159
160enum ovs_vport_type {
161 OVS_VPORT_TYPE_UNSPEC,
162 OVS_VPORT_TYPE_NETDEV, /* network device */
163 OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
164 __OVS_VPORT_TYPE_MAX
165};
166
167#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1)
168
169/**
170 * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands.
171 * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath.
172 * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type
173 * of vport.
174 * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device
175 * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes
176 * plus a null terminator.
177 * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information.
178 * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that
179 * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on
180 * this port. A value of zero indicates that upcalls should not be sent.
181 * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
182 * packets sent or received through the vport.
183 *
184 * These attributes follow the &struct ovs_header within the Generic Netlink
185 * payload for %OVS_VPORT_* commands.
186 *
187 * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and
188 * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is
189 * optional; if not specified a free port number is automatically selected.
190 * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type
191 * of vport.
192 * and other attributes are ignored.
193 *
194 * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to
195 * look up the vport to operate on; otherwise dp_idx from the &struct
196 * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport.
197 */
198enum ovs_vport_attr {
199 OVS_VPORT_ATTR_UNSPEC,
200 OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */
201 OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */
202 OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */
203 OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */
204 OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */
205 OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */
206 __OVS_VPORT_ATTR_MAX
207};
208
209#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
210
211/* Flows. */
212
213#define OVS_FLOW_FAMILY "ovs_flow"
214#define OVS_FLOW_MCGROUP "ovs_flow"
215#define OVS_FLOW_VERSION 0x1
216
217enum ovs_flow_cmd {
218 OVS_FLOW_CMD_UNSPEC,
219 OVS_FLOW_CMD_NEW,
220 OVS_FLOW_CMD_DEL,
221 OVS_FLOW_CMD_GET,
222 OVS_FLOW_CMD_SET
223};
224
225struct ovs_flow_stats {
226 __u64 n_packets; /* Number of matched packets. */
227 __u64 n_bytes; /* Number of matched bytes. */
228};
229
230enum ovs_key_attr {
231 OVS_KEY_ATTR_UNSPEC,
232 OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */
233 OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */
234 OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */
235 OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */
236 OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */
237 OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */
238 OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */
239 OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */
240 OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */
241 OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */
242 OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */
243 OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */
244 OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */
245 OVS_KEY_ATTR_ND, /* struct ovs_key_nd */
246 __OVS_KEY_ATTR_MAX
247};
248
249#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
250
251/**
252 * enum ovs_frag_type - IPv4 and IPv6 fragment type
253 * @OVS_FRAG_TYPE_NONE: Packet is not a fragment.
254 * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0.
255 * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset.
256 *
257 * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct
258 * ovs_key_ipv6.
259 */
260enum ovs_frag_type {
261 OVS_FRAG_TYPE_NONE,
262 OVS_FRAG_TYPE_FIRST,
263 OVS_FRAG_TYPE_LATER,
264 __OVS_FRAG_TYPE_MAX
265};
266
267#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1)
268
269struct ovs_key_ethernet {
270 __u8 eth_src[6];
271 __u8 eth_dst[6];
272};
273
274struct ovs_key_ipv4 {
275 __be32 ipv4_src;
276 __be32 ipv4_dst;
277 __u8 ipv4_proto;
278 __u8 ipv4_tos;
279 __u8 ipv4_ttl;
280 __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */
281};
282
283struct ovs_key_ipv6 {
284 __be32 ipv6_src[4];
285 __be32 ipv6_dst[4];
286 __be32 ipv6_label; /* 20-bits in least-significant bits. */
287 __u8 ipv6_proto;
288 __u8 ipv6_tclass;
289 __u8 ipv6_hlimit;
290 __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */
291};
292
293struct ovs_key_tcp {
294 __be16 tcp_src;
295 __be16 tcp_dst;
296};
297
298struct ovs_key_udp {
299 __be16 udp_src;
300 __be16 udp_dst;
301};
302
303struct ovs_key_icmp {
304 __u8 icmp_type;
305 __u8 icmp_code;
306};
307
308struct ovs_key_icmpv6 {
309 __u8 icmpv6_type;
310 __u8 icmpv6_code;
311};
312
313struct ovs_key_arp {
314 __be32 arp_sip;
315 __be32 arp_tip;
316 __be16 arp_op;
317 __u8 arp_sha[6];
318 __u8 arp_tha[6];
319};
320
321struct ovs_key_nd {
322 __u32 nd_target[4];
323 __u8 nd_sll[6];
324 __u8 nd_tll[6];
325};
326
327/**
328 * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
329 * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
330 * key. Always present in notifications. Required for all requests (except
331 * dumps).
332 * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying
333 * the actions to take for packets that match the key. Always present in
334 * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for
335 * %OVS_FLOW_CMD_SET requests.
336 * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this
337 * flow. Present in notifications if the stats would be nonzero. Ignored in
338 * requests.
339 * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the
340 * TCP flags seen on packets in this flow. Only present in notifications for
341 * TCP flows, and only if it would be nonzero. Ignored in requests.
342 * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on
343 * the system monotonic clock, at which a packet was last processed for this
344 * flow. Only present in notifications if a packet has been processed for this
345 * flow. Ignored in requests.
346 * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the
347 * last-used time, accumulated TCP flags, and statistics for this flow.
348 * Otherwise ignored in requests. Never present in notifications.
349 *
350 * These attributes follow the &struct ovs_header within the Generic Netlink
351 * payload for %OVS_FLOW_* commands.
352 */
353enum ovs_flow_attr {
354 OVS_FLOW_ATTR_UNSPEC,
355 OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */
356 OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
357 OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */
358 OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */
359 OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */
360 OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */
361 __OVS_FLOW_ATTR_MAX
362};
363
364#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
365
366/**
367 * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
368 * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
369 * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of
370 * %UINT32_MAX samples all packets and intermediate values sample intermediate
371 * fractions of packets.
372 * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event.
373 * Actions are passed as nested attributes.
374 *
375 * Executes the specified actions with the given probability on a per-packet
376 * basis.
377 */
378enum ovs_sample_attr {
379 OVS_SAMPLE_ATTR_UNSPEC,
380 OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
381 OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
382 __OVS_SAMPLE_ATTR_MAX,
383};
384
385#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1)
386
387/**
388 * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
389 * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
390 * message should be sent. Required.
391 * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the
392 * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA,
393 */
394enum ovs_userspace_attr {
395 OVS_USERSPACE_ATTR_UNSPEC,
396 OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */
397 OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */
398 __OVS_USERSPACE_ATTR_MAX
399};
400
401#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
402
403/**
404 * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
405 * @vlan_tpid: Tag protocol identifier (TPID) to push.
406 * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
407 * (but it will not be set in the 802.1Q header that is pushed).
408 *
409 * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID
410 * values are those that the kernel module also parses as 802.1Q headers, to
411 * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
412 * from having surprising results.
413 */
414struct ovs_action_push_vlan {
415 __be16 vlan_tpid; /* 802.1Q TPID. */
416 __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
417};
418
419/**
420 * enum ovs_action_attr - Action types.
421 *
422 * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
423 * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested
424 * %OVS_USERSPACE_ATTR_* attributes.
425 * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The
426 * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
427 * value.
428 * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
429 * packet.
430 * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
431 * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
432 * the nested %OVS_SAMPLE_ATTR_* attributes.
433 *
434 * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
435 * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
436 * type may not be changed.
437 */
438
439enum ovs_action_attr {
440 OVS_ACTION_ATTR_UNSPEC,
441 OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */
442 OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */
443 OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */
444 OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */
445 OVS_ACTION_ATTR_POP_VLAN, /* No argument. */
446 OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */
447 __OVS_ACTION_ATTR_MAX
448};
449
450#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
451
452#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b1f89122bf6a..08855613ceb3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ enum perf_hw_id {
54 PERF_COUNT_HW_BUS_CYCLES = 6, 54 PERF_COUNT_HW_BUS_CYCLES = 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
57 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
57 58
58 PERF_COUNT_HW_MAX, /* non-ABI */ 59 PERF_COUNT_HW_MAX, /* non-ABI */
59}; 60};
@@ -890,6 +891,7 @@ struct perf_event_context {
890 int nr_active; 891 int nr_active;
891 int is_active; 892 int is_active;
892 int nr_stat; 893 int nr_stat;
894 int nr_freq;
893 int rotate_disable; 895 int rotate_disable;
894 atomic_t refcount; 896 atomic_t refcount;
895 struct task_struct *task; 897 struct task_struct *task;
@@ -1063,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1063 } 1065 }
1064} 1066}
1065 1067
1066extern struct jump_label_key perf_sched_events; 1068extern struct jump_label_key_deferred perf_sched_events;
1067 1069
1068static inline void perf_event_task_sched_in(struct task_struct *prev, 1070static inline void perf_event_task_sched_in(struct task_struct *prev,
1069 struct task_struct *task) 1071 struct task_struct *task)
1070{ 1072{
1071 if (static_branch(&perf_sched_events)) 1073 if (static_branch(&perf_sched_events.key))
1072 __perf_event_task_sched_in(prev, task); 1074 __perf_event_task_sched_in(prev, task);
1073} 1075}
1074 1076
@@ -1077,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
1077{ 1079{
1078 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1080 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1079 1081
1080 if (static_branch(&perf_sched_events)) 1082 if (static_branch(&perf_sched_events.key))
1081 __perf_event_task_sched_out(prev, next); 1083 __perf_event_task_sched_out(prev, next);
1082} 1084}
1083 1085
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index f53a4167c5f4..f48bfc80cb4b 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -38,6 +38,7 @@
38#define PNPIPE_ENCAP 1 38#define PNPIPE_ENCAP 1
39#define PNPIPE_IFINDEX 2 39#define PNPIPE_IFINDEX 2
40#define PNPIPE_HANDLE 3 40#define PNPIPE_HANDLE 3
41#define PNPIPE_INITSTATE 4
41 42
42#define PNADDR_ANY 0 43#define PNADDR_ANY 0
43#define PNADDR_BROADCAST 0xFC 44#define PNADDR_BROADCAST 0xFC
@@ -49,6 +50,7 @@
49 50
50/* ioctls */ 51/* ioctls */
51#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) 52#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0)
53#define SIOCPNENABLEPIPE (SIOCPROTOPRIVATE + 13)
52#define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) 54#define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14)
53#define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) 55#define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15)
54 56
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 7281d5acf2f9..8f1b928f777c 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -162,25 +162,24 @@ struct tc_sfq_qopt {
162 unsigned flows; /* Maximal number of flows */ 162 unsigned flows; /* Maximal number of flows */
163}; 163};
164 164
165struct tc_sfq_qopt_v1 {
166 struct tc_sfq_qopt v0;
167 unsigned int depth; /* max number of packets per flow */
168 unsigned int headdrop;
169};
170
171
165struct tc_sfq_xstats { 172struct tc_sfq_xstats {
166 __s32 allot; 173 __s32 allot;
167}; 174};
168 175
169/*
170 * NOTE: limit, divisor and flows are hardwired to code at the moment.
171 *
172 * limit=flows=128, divisor=1024;
173 *
174 * The only reason for this is efficiency, it is possible
175 * to change these parameters in compile time.
176 */
177
178/* RED section */ 176/* RED section */
179 177
180enum { 178enum {
181 TCA_RED_UNSPEC, 179 TCA_RED_UNSPEC,
182 TCA_RED_PARMS, 180 TCA_RED_PARMS,
183 TCA_RED_STAB, 181 TCA_RED_STAB,
182 TCA_RED_MAX_P,
184 __TCA_RED_MAX, 183 __TCA_RED_MAX,
185}; 184};
186 185
@@ -194,8 +193,9 @@ struct tc_red_qopt {
194 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ 193 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
195 unsigned char Scell_log; /* cell size for idle damping */ 194 unsigned char Scell_log; /* cell size for idle damping */
196 unsigned char flags; 195 unsigned char flags;
197#define TC_RED_ECN 1 196#define TC_RED_ECN 1
198#define TC_RED_HARDDROP 2 197#define TC_RED_HARDDROP 2
198#define TC_RED_ADAPTATIVE 4
199}; 199};
200 200
201struct tc_red_xstats { 201struct tc_red_xstats {
@@ -214,6 +214,7 @@ enum {
214 TCA_GRED_PARMS, 214 TCA_GRED_PARMS,
215 TCA_GRED_STAB, 215 TCA_GRED_STAB,
216 TCA_GRED_DPS, 216 TCA_GRED_DPS,
217 TCA_GRED_MAX_P,
217 __TCA_GRED_MAX, 218 __TCA_GRED_MAX,
218}; 219};
219 220
@@ -253,6 +254,7 @@ enum {
253 TCA_CHOKE_UNSPEC, 254 TCA_CHOKE_UNSPEC,
254 TCA_CHOKE_PARMS, 255 TCA_CHOKE_PARMS,
255 TCA_CHOKE_STAB, 256 TCA_CHOKE_STAB,
257 TCA_CHOKE_MAX_P,
256 __TCA_CHOKE_MAX, 258 __TCA_CHOKE_MAX,
257}; 259};
258 260
@@ -465,6 +467,7 @@ enum {
465 TCA_NETEM_REORDER, 467 TCA_NETEM_REORDER,
466 TCA_NETEM_CORRUPT, 468 TCA_NETEM_CORRUPT,
467 TCA_NETEM_LOSS, 469 TCA_NETEM_LOSS,
470 TCA_NETEM_RATE,
468 __TCA_NETEM_MAX, 471 __TCA_NETEM_MAX,
469}; 472};
470 473
@@ -495,6 +498,13 @@ struct tc_netem_corrupt {
495 __u32 correlation; 498 __u32 correlation;
496}; 499};
497 500
501struct tc_netem_rate {
502 __u32 rate; /* byte/s */
503 __s32 packet_overhead;
504 __u32 cell_size;
505 __s32 cell_overhead;
506};
507
498enum { 508enum {
499 NETEM_LOSS_UNSPEC, 509 NETEM_LOSS_UNSPEC,
500 NETEM_LOSS_GI, /* General Intuitive - 4 state model */ 510 NETEM_LOSS_GI, /* General Intuitive - 4 state model */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 2a23f7d1a825..5622fa24e97b 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -63,7 +63,7 @@ struct platform_device_info {
63 u64 dma_mask; 63 u64 dma_mask;
64}; 64};
65extern struct platform_device *platform_device_register_full( 65extern struct platform_device *platform_device_register_full(
66 struct platform_device_info *pdevinfo); 66 const struct platform_device_info *pdevinfo);
67 67
68/** 68/**
69 * platform_device_register_resndata - add a platform-level device with 69 * platform_device_register_resndata - add a platform-level device with
@@ -196,16 +196,8 @@ static inline void platform_set_drvdata(struct platform_device *pdev, void *data
196 * calling it replaces module_init() and module_exit() 196 * calling it replaces module_init() and module_exit()
197 */ 197 */
198#define module_platform_driver(__platform_driver) \ 198#define module_platform_driver(__platform_driver) \
199static int __init __platform_driver##_init(void) \ 199 module_driver(__platform_driver, platform_driver_register, \
200{ \ 200 platform_driver_unregister)
201 return platform_driver_register(&(__platform_driver)); \
202} \
203module_init(__platform_driver##_init); \
204static void __exit __platform_driver##_exit(void) \
205{ \
206 platform_driver_unregister(&(__platform_driver)); \
207} \
208module_exit(__platform_driver##_exit);
209 201
210extern struct platform_device *platform_create_bundle(struct platform_driver *driver, 202extern struct platform_device *platform_create_bundle(struct platform_driver *driver,
211 int (*probe)(struct platform_device *), 203 int (*probe)(struct platform_device *),
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 79159de0e341..2110a81c5e2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -40,12 +40,6 @@
40#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ 40#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
41#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ 41#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
42 42
43#ifdef CONFIG_PHYS_ADDR_T_64BIT
44#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL
45#else
46#define MEMBLOCK_INACTIVE 0x44c9e71bUL
47#endif
48
49#define SLUB_RED_INACTIVE 0xbb 43#define SLUB_RED_INACTIVE 0xbb
50#define SLUB_RED_ACTIVE 0xcc 44#define SLUB_RED_ACTIVE 0xcc
51 45
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 2ca8cde5459d..e1461e143be2 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -22,6 +22,9 @@
22#ifndef _LINUX_PSTORE_H 22#ifndef _LINUX_PSTORE_H
23#define _LINUX_PSTORE_H 23#define _LINUX_PSTORE_H
24 24
25#include <linux/time.h>
26#include <linux/kmsg_dump.h>
27
25/* types */ 28/* types */
26enum pstore_type_id { 29enum pstore_type_id {
27 PSTORE_TYPE_DMESG = 0, 30 PSTORE_TYPE_DMESG = 0,
@@ -41,7 +44,8 @@ struct pstore_info {
41 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 44 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
42 struct timespec *time, char **buf, 45 struct timespec *time, char **buf,
43 struct pstore_info *psi); 46 struct pstore_info *psi);
44 int (*write)(enum pstore_type_id type, u64 *id, 47 int (*write)(enum pstore_type_id type,
48 enum kmsg_dump_reason reason, u64 *id,
45 unsigned int part, size_t size, struct pstore_info *psi); 49 unsigned int part, size_t size, struct pstore_info *psi);
46 int (*erase)(enum pstore_type_id type, u64 id, 50 int (*erase)(enum pstore_type_id type, u64 id,
47 struct pstore_info *psi); 51 struct pstore_info *psi);
@@ -50,18 +54,12 @@ struct pstore_info {
50 54
51#ifdef CONFIG_PSTORE 55#ifdef CONFIG_PSTORE
52extern int pstore_register(struct pstore_info *); 56extern int pstore_register(struct pstore_info *);
53extern int pstore_write(enum pstore_type_id type, char *buf, size_t size);
54#else 57#else
55static inline int 58static inline int
56pstore_register(struct pstore_info *psi) 59pstore_register(struct pstore_info *psi)
57{ 60{
58 return -ENODEV; 61 return -ENODEV;
59} 62}
60static inline int
61pstore_write(enum pstore_type_id type, char *buf, size_t size)
62{
63 return -ENODEV;
64}
65#endif 63#endif
66 64
67#endif /*_LINUX_PSTORE_H*/ 65#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2cf4226ade7e..81c04f4348ec 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,6 +51,8 @@ extern int rcutorture_runnable; /* for sysctl */
51#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 51#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
52extern void rcutorture_record_test_transition(void); 52extern void rcutorture_record_test_transition(void);
53extern void rcutorture_record_progress(unsigned long vernum); 53extern void rcutorture_record_progress(unsigned long vernum);
54extern void do_trace_rcu_torture_read(char *rcutorturename,
55 struct rcu_head *rhp);
54#else 56#else
55static inline void rcutorture_record_test_transition(void) 57static inline void rcutorture_record_test_transition(void)
56{ 58{
@@ -58,6 +60,12 @@ static inline void rcutorture_record_test_transition(void)
58static inline void rcutorture_record_progress(unsigned long vernum) 60static inline void rcutorture_record_progress(unsigned long vernum)
59{ 61{
60} 62}
63#ifdef CONFIG_RCU_TRACE
64extern void do_trace_rcu_torture_read(char *rcutorturename,
65 struct rcu_head *rhp);
66#else
67#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
68#endif
61#endif 69#endif
62 70
63#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) 71#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
@@ -177,23 +185,10 @@ extern void rcu_sched_qs(int cpu);
177extern void rcu_bh_qs(int cpu); 185extern void rcu_bh_qs(int cpu);
178extern void rcu_check_callbacks(int cpu, int user); 186extern void rcu_check_callbacks(int cpu, int user);
179struct notifier_block; 187struct notifier_block;
180 188extern void rcu_idle_enter(void);
181#ifdef CONFIG_NO_HZ 189extern void rcu_idle_exit(void);
182 190extern void rcu_irq_enter(void);
183extern void rcu_enter_nohz(void); 191extern void rcu_irq_exit(void);
184extern void rcu_exit_nohz(void);
185
186#else /* #ifdef CONFIG_NO_HZ */
187
188static inline void rcu_enter_nohz(void)
189{
190}
191
192static inline void rcu_exit_nohz(void)
193{
194}
195
196#endif /* #else #ifdef CONFIG_NO_HZ */
197 192
198/* 193/*
199 * Infrastructure to implement the synchronize_() primitives in 194 * Infrastructure to implement the synchronize_() primitives in
@@ -233,22 +228,30 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
233 228
234#ifdef CONFIG_DEBUG_LOCK_ALLOC 229#ifdef CONFIG_DEBUG_LOCK_ALLOC
235 230
236extern struct lockdep_map rcu_lock_map; 231#ifdef CONFIG_PROVE_RCU
237# define rcu_read_acquire() \ 232extern int rcu_is_cpu_idle(void);
238 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) 233#else /* !CONFIG_PROVE_RCU */
239# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) 234static inline int rcu_is_cpu_idle(void)
235{
236 return 0;
237}
238#endif /* else !CONFIG_PROVE_RCU */
240 239
241extern struct lockdep_map rcu_bh_lock_map; 240static inline void rcu_lock_acquire(struct lockdep_map *map)
242# define rcu_read_acquire_bh() \ 241{
243 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) 242 WARN_ON_ONCE(rcu_is_cpu_idle());
244# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) 243 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
244}
245 245
246extern struct lockdep_map rcu_sched_lock_map; 246static inline void rcu_lock_release(struct lockdep_map *map)
247# define rcu_read_acquire_sched() \ 247{
248 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) 248 WARN_ON_ONCE(rcu_is_cpu_idle());
249# define rcu_read_release_sched() \ 249 lock_release(map, 1, _THIS_IP_);
250 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) 250}
251 251
252extern struct lockdep_map rcu_lock_map;
253extern struct lockdep_map rcu_bh_lock_map;
254extern struct lockdep_map rcu_sched_lock_map;
252extern int debug_lockdep_rcu_enabled(void); 255extern int debug_lockdep_rcu_enabled(void);
253 256
254/** 257/**
@@ -262,11 +265,18 @@ extern int debug_lockdep_rcu_enabled(void);
262 * 265 *
263 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 266 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
264 * and while lockdep is disabled. 267 * and while lockdep is disabled.
268 *
269 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
270 * occur in the same context, for example, it is illegal to invoke
271 * rcu_read_unlock() in process context if the matching rcu_read_lock()
272 * was invoked from within an irq handler.
265 */ 273 */
266static inline int rcu_read_lock_held(void) 274static inline int rcu_read_lock_held(void)
267{ 275{
268 if (!debug_lockdep_rcu_enabled()) 276 if (!debug_lockdep_rcu_enabled())
269 return 1; 277 return 1;
278 if (rcu_is_cpu_idle())
279 return 0;
270 return lock_is_held(&rcu_lock_map); 280 return lock_is_held(&rcu_lock_map);
271} 281}
272 282
@@ -290,6 +300,19 @@ extern int rcu_read_lock_bh_held(void);
290 * 300 *
291 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 301 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
292 * and while lockdep is disabled. 302 * and while lockdep is disabled.
303 *
304 * Note that if the CPU is in the idle loop from an RCU point of
305 * view (ie: that we are in the section between rcu_idle_enter() and
306 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
307 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
308 * that are in such a section, considering these as in extended quiescent
309 * state, so such a CPU is effectively never in an RCU read-side critical
310 * section regardless of what RCU primitives it invokes. This state of
311 * affairs is required --- we need to keep an RCU-free window in idle
312 * where the CPU may possibly enter into low power mode. This way we can
313 * notice an extended quiescent state to other CPUs that started a grace
314 * period. Otherwise we would delay any grace period as long as we run in
315 * the idle task.
293 */ 316 */
294#ifdef CONFIG_PREEMPT_COUNT 317#ifdef CONFIG_PREEMPT_COUNT
295static inline int rcu_read_lock_sched_held(void) 318static inline int rcu_read_lock_sched_held(void)
@@ -298,6 +321,8 @@ static inline int rcu_read_lock_sched_held(void)
298 321
299 if (!debug_lockdep_rcu_enabled()) 322 if (!debug_lockdep_rcu_enabled())
300 return 1; 323 return 1;
324 if (rcu_is_cpu_idle())
325 return 0;
301 if (debug_locks) 326 if (debug_locks)
302 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 327 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
303 return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); 328 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -311,12 +336,8 @@ static inline int rcu_read_lock_sched_held(void)
311 336
312#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 337#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
313 338
314# define rcu_read_acquire() do { } while (0) 339# define rcu_lock_acquire(a) do { } while (0)
315# define rcu_read_release() do { } while (0) 340# define rcu_lock_release(a) do { } while (0)
316# define rcu_read_acquire_bh() do { } while (0)
317# define rcu_read_release_bh() do { } while (0)
318# define rcu_read_acquire_sched() do { } while (0)
319# define rcu_read_release_sched() do { } while (0)
320 341
321static inline int rcu_read_lock_held(void) 342static inline int rcu_read_lock_held(void)
322{ 343{
@@ -637,7 +658,7 @@ static inline void rcu_read_lock(void)
637{ 658{
638 __rcu_read_lock(); 659 __rcu_read_lock();
639 __acquire(RCU); 660 __acquire(RCU);
640 rcu_read_acquire(); 661 rcu_lock_acquire(&rcu_lock_map);
641} 662}
642 663
643/* 664/*
@@ -657,7 +678,7 @@ static inline void rcu_read_lock(void)
657 */ 678 */
658static inline void rcu_read_unlock(void) 679static inline void rcu_read_unlock(void)
659{ 680{
660 rcu_read_release(); 681 rcu_lock_release(&rcu_lock_map);
661 __release(RCU); 682 __release(RCU);
662 __rcu_read_unlock(); 683 __rcu_read_unlock();
663} 684}
@@ -673,12 +694,17 @@ static inline void rcu_read_unlock(void)
673 * critical sections in interrupt context can use just rcu_read_lock(), 694 * critical sections in interrupt context can use just rcu_read_lock(),
674 * though this should at least be commented to avoid confusing people 695 * though this should at least be commented to avoid confusing people
675 * reading the code. 696 * reading the code.
697 *
698 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
699 * must occur in the same context, for example, it is illegal to invoke
700 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
701 * was invoked from some other task.
676 */ 702 */
677static inline void rcu_read_lock_bh(void) 703static inline void rcu_read_lock_bh(void)
678{ 704{
679 local_bh_disable(); 705 local_bh_disable();
680 __acquire(RCU_BH); 706 __acquire(RCU_BH);
681 rcu_read_acquire_bh(); 707 rcu_lock_acquire(&rcu_bh_lock_map);
682} 708}
683 709
684/* 710/*
@@ -688,7 +714,7 @@ static inline void rcu_read_lock_bh(void)
688 */ 714 */
689static inline void rcu_read_unlock_bh(void) 715static inline void rcu_read_unlock_bh(void)
690{ 716{
691 rcu_read_release_bh(); 717 rcu_lock_release(&rcu_bh_lock_map);
692 __release(RCU_BH); 718 __release(RCU_BH);
693 local_bh_enable(); 719 local_bh_enable();
694} 720}
@@ -700,12 +726,17 @@ static inline void rcu_read_unlock_bh(void)
700 * are being done using call_rcu_sched() or synchronize_rcu_sched(). 726 * are being done using call_rcu_sched() or synchronize_rcu_sched().
701 * Read-side critical sections can also be introduced by anything that 727 * Read-side critical sections can also be introduced by anything that
702 * disables preemption, including local_irq_disable() and friends. 728 * disables preemption, including local_irq_disable() and friends.
729 *
730 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
731 * must occur in the same context, for example, it is illegal to invoke
732 * rcu_read_unlock_sched() from process context if the matching
733 * rcu_read_lock_sched() was invoked from an NMI handler.
703 */ 734 */
704static inline void rcu_read_lock_sched(void) 735static inline void rcu_read_lock_sched(void)
705{ 736{
706 preempt_disable(); 737 preempt_disable();
707 __acquire(RCU_SCHED); 738 __acquire(RCU_SCHED);
708 rcu_read_acquire_sched(); 739 rcu_lock_acquire(&rcu_sched_lock_map);
709} 740}
710 741
711/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 742/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -722,7 +753,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
722 */ 753 */
723static inline void rcu_read_unlock_sched(void) 754static inline void rcu_read_unlock_sched(void)
724{ 755{
725 rcu_read_release_sched(); 756 rcu_lock_release(&rcu_sched_lock_map);
726 __release(RCU_SCHED); 757 __release(RCU_SCHED);
727 preempt_enable(); 758 preempt_enable();
728} 759}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c4f3e9b9bc5..cf0eb342bcba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu);
273 273
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275extern void select_nohz_load_balancer(int stop_tick); 275extern void select_nohz_load_balancer(int stop_tick);
276extern void set_cpu_sd_state_idle(void);
276extern int get_nohz_timer_target(void); 277extern int get_nohz_timer_target(void);
277#else 278#else
278static inline void select_nohz_load_balancer(int stop_tick) { } 279static inline void select_nohz_load_balancer(int stop_tick) { }
280static inline void set_cpu_sd_state_idle(void) { }
279#endif 281#endif
280 282
281/* 283/*
@@ -483,8 +485,8 @@ struct task_cputime {
483 485
484#define INIT_CPUTIME \ 486#define INIT_CPUTIME \
485 (struct task_cputime) { \ 487 (struct task_cputime) { \
486 .utime = cputime_zero, \ 488 .utime = 0, \
487 .stime = cputime_zero, \ 489 .stime = 0, \
488 .sum_exec_runtime = 0, \ 490 .sum_exec_runtime = 0, \
489 } 491 }
490 492
@@ -901,6 +903,10 @@ struct sched_group_power {
901 * single CPU. 903 * single CPU.
902 */ 904 */
903 unsigned int power, power_orig; 905 unsigned int power, power_orig;
906 /*
907 * Number of busy cpus in this group.
908 */
909 atomic_t nr_busy_cpus;
904}; 910};
905 911
906struct sched_group { 912struct sched_group {
@@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
925 return to_cpumask(sg->cpumask); 931 return to_cpumask(sg->cpumask);
926} 932}
927 933
934/**
935 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
936 * @group: The group whose first cpu is to be returned.
937 */
938static inline unsigned int group_first_cpu(struct sched_group *group)
939{
940 return cpumask_first(sched_group_cpus(group));
941}
942
928struct sched_domain_attr { 943struct sched_domain_attr {
929 int relax_domain_level; 944 int relax_domain_level;
930}; 945};
@@ -1315,8 +1330,8 @@ struct task_struct {
1315 * older sibling, respectively. (p->father can be replaced with 1330 * older sibling, respectively. (p->father can be replaced with
1316 * p->real_parent->pid) 1331 * p->real_parent->pid)
1317 */ 1332 */
1318 struct task_struct *real_parent; /* real parent process */ 1333 struct task_struct __rcu *real_parent; /* real parent process */
1319 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ 1334 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1320 /* 1335 /*
1321 * children/sibling forms the list of my natural children 1336 * children/sibling forms the list of my natural children
1322 */ 1337 */
@@ -2070,6 +2085,14 @@ extern int sched_setscheduler(struct task_struct *, int,
2070extern int sched_setscheduler_nocheck(struct task_struct *, int, 2085extern int sched_setscheduler_nocheck(struct task_struct *, int,
2071 const struct sched_param *); 2086 const struct sched_param *);
2072extern struct task_struct *idle_task(int cpu); 2087extern struct task_struct *idle_task(int cpu);
2088/**
2089 * is_idle_task - is the specified task an idle task?
2090 * @tsk: the task in question.
2091 */
2092static inline bool is_idle_task(struct task_struct *p)
2093{
2094 return p->pid == 0;
2095}
2073extern struct task_struct *curr_task(int cpu); 2096extern struct task_struct *curr_task(int cpu);
2074extern void set_curr_task(int cpu, struct task_struct *p); 2097extern void set_curr_task(int cpu, struct task_struct *p);
2075 2098
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fe864885c1ed..50db9b04a552 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -30,6 +30,7 @@
30#include <linux/dmaengine.h> 30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/netdev_features.h>
33 34
34/* Don't change this without changing skb_csum_unnecessary! */ 35/* Don't change this without changing skb_csum_unnecessary! */
35#define CHECKSUM_NONE 0 36#define CHECKSUM_NONE 0
@@ -87,7 +88,6 @@
87 * at device setup time. 88 * at device setup time.
88 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 89 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
89 * everything. 90 * everything.
90 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
91 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 91 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
92 * TCP/UDP over IPv4. Sigh. Vendors like this 92 * TCP/UDP over IPv4. Sigh. Vendors like this
93 * way by an unknown reason. Though, see comment above 93 * way by an unknown reason. Though, see comment above
@@ -128,13 +128,17 @@ struct sk_buff_head {
128 128
129struct sk_buff; 129struct sk_buff;
130 130
131/* To allow 64K frame to be packed as single skb without frag_list. Since 131/* To allow 64K frame to be packed as single skb without frag_list we
132 * GRO uses frags we allocate at least 16 regardless of page size. 132 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
133 * buffers which do not start on a page boundary.
134 *
135 * Since GRO uses frags we allocate at least 16 regardless of page
136 * size.
133 */ 137 */
134#if (65536/PAGE_SIZE + 2) < 16 138#if (65536/PAGE_SIZE + 1) < 16
135#define MAX_SKB_FRAGS 16UL 139#define MAX_SKB_FRAGS 16UL
136#else 140#else
137#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 141#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
138#endif 142#endif
139 143
140typedef struct skb_frag_struct skb_frag_t; 144typedef struct skb_frag_struct skb_frag_t;
@@ -218,6 +222,9 @@ enum {
218 222
219 /* device driver supports TX zero-copy buffers */ 223 /* device driver supports TX zero-copy buffers */
220 SKBTX_DEV_ZEROCOPY = 1 << 4, 224 SKBTX_DEV_ZEROCOPY = 1 << 4,
225
226 /* generate wifi status information (where possible) */
227 SKBTX_WIFI_STATUS = 1 << 5,
221}; 228};
222 229
223/* 230/*
@@ -235,15 +242,15 @@ struct ubuf_info {
235 * the end of the header data, ie. at skb->end. 242 * the end of the header data, ie. at skb->end.
236 */ 243 */
237struct skb_shared_info { 244struct skb_shared_info {
238 unsigned short nr_frags; 245 unsigned char nr_frags;
246 __u8 tx_flags;
239 unsigned short gso_size; 247 unsigned short gso_size;
240 /* Warning: this field is not always filled in (UFO)! */ 248 /* Warning: this field is not always filled in (UFO)! */
241 unsigned short gso_segs; 249 unsigned short gso_segs;
242 unsigned short gso_type; 250 unsigned short gso_type;
243 __be32 ip6_frag_id;
244 __u8 tx_flags;
245 struct sk_buff *frag_list; 251 struct sk_buff *frag_list;
246 struct skb_shared_hwtstamps hwtstamps; 252 struct skb_shared_hwtstamps hwtstamps;
253 __be32 ip6_frag_id;
247 254
248 /* 255 /*
249 * Warning : all fields before dataref are cleared in __alloc_skb() 256 * Warning : all fields before dataref are cleared in __alloc_skb()
@@ -352,6 +359,8 @@ typedef unsigned char *sk_buff_data_t;
352 * @ooo_okay: allow the mapping of a socket to a queue to be changed 359 * @ooo_okay: allow the mapping of a socket to a queue to be changed
353 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport 360 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
354 * ports. 361 * ports.
362 * @wifi_acked_valid: wifi_acked was set
363 * @wifi_acked: whether frame was acked on wifi or not
355 * @dma_cookie: a cookie to one of several possible DMA operations 364 * @dma_cookie: a cookie to one of several possible DMA operations
356 * done by skb DMA functions 365 * done by skb DMA functions
357 * @secmark: security marking 366 * @secmark: security marking
@@ -445,10 +454,11 @@ struct sk_buff {
445#endif 454#endif
446 __u8 ooo_okay:1; 455 __u8 ooo_okay:1;
447 __u8 l4_rxhash:1; 456 __u8 l4_rxhash:1;
457 __u8 wifi_acked_valid:1;
458 __u8 wifi_acked:1;
459 /* 10/12 bit hole (depending on ndisc_nodetype presence) */
448 kmemcheck_bitfield_end(flags2); 460 kmemcheck_bitfield_end(flags2);
449 461
450 /* 0/13 bit hole */
451
452#ifdef CONFIG_NET_DMA 462#ifdef CONFIG_NET_DMA
453 dma_cookie_t dma_cookie; 463 dma_cookie_t dma_cookie;
454#endif 464#endif
@@ -540,6 +550,7 @@ extern void consume_skb(struct sk_buff *skb);
540extern void __kfree_skb(struct sk_buff *skb); 550extern void __kfree_skb(struct sk_buff *skb);
541extern struct sk_buff *__alloc_skb(unsigned int size, 551extern struct sk_buff *__alloc_skb(unsigned int size,
542 gfp_t priority, int fclone, int node); 552 gfp_t priority, int fclone, int node);
553extern struct sk_buff *build_skb(void *data);
543static inline struct sk_buff *alloc_skb(unsigned int size, 554static inline struct sk_buff *alloc_skb(unsigned int size,
544 gfp_t priority) 555 gfp_t priority)
545{ 556{
@@ -561,8 +572,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb,
561 gfp_t priority); 572 gfp_t priority);
562extern struct sk_buff *skb_copy(const struct sk_buff *skb, 573extern struct sk_buff *skb_copy(const struct sk_buff *skb,
563 gfp_t priority); 574 gfp_t priority);
564extern struct sk_buff *pskb_copy(struct sk_buff *skb, 575extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
565 gfp_t gfp_mask); 576 int headroom, gfp_t gfp_mask);
577
566extern int pskb_expand_head(struct sk_buff *skb, 578extern int pskb_expand_head(struct sk_buff *skb,
567 int nhead, int ntail, 579 int nhead, int ntail,
568 gfp_t gfp_mask); 580 gfp_t gfp_mask);
@@ -1662,38 +1674,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1662} 1674}
1663 1675
1664/** 1676/**
1665 * __netdev_alloc_page - allocate a page for ps-rx on a specific device
1666 * @dev: network device to receive on
1667 * @gfp_mask: alloc_pages_node mask
1668 *
1669 * Allocate a new page. dev currently unused.
1670 *
1671 * %NULL is returned if there is no free memory.
1672 */
1673static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
1674{
1675 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1676}
1677
1678/**
1679 * netdev_alloc_page - allocate a page for ps-rx on a specific device
1680 * @dev: network device to receive on
1681 *
1682 * Allocate a new page. dev currently unused.
1683 *
1684 * %NULL is returned if there is no free memory.
1685 */
1686static inline struct page *netdev_alloc_page(struct net_device *dev)
1687{
1688 return __netdev_alloc_page(dev, GFP_ATOMIC);
1689}
1690
1691static inline void netdev_free_page(struct net_device *dev, struct page *page)
1692{
1693 __free_page(page);
1694}
1695
1696/**
1697 * skb_frag_page - retrieve the page refered to by a paged fragment 1677 * skb_frag_page - retrieve the page refered to by a paged fragment
1698 * @frag: the paged fragment 1678 * @frag: the paged fragment
1699 * 1679 *
@@ -1824,6 +1804,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1824 frag->page_offset + offset, size, dir); 1804 frag->page_offset + offset, size, dir);
1825} 1805}
1826 1806
1807static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
1808 gfp_t gfp_mask)
1809{
1810 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
1811}
1812
1827/** 1813/**
1828 * skb_clone_writable - is the header of a clone writable 1814 * skb_clone_writable - is the header of a clone writable
1829 * @skb: buffer to check 1815 * @skb: buffer to check
@@ -2105,7 +2091,8 @@ extern void skb_split(struct sk_buff *skb,
2105extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 2091extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2106 int shiftlen); 2092 int shiftlen);
2107 2093
2108extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features); 2094extern struct sk_buff *skb_segment(struct sk_buff *skb,
2095 netdev_features_t features);
2109 2096
2110static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2097static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2111 int len, void *buffer) 2098 int len, void *buffer)
@@ -2263,6 +2250,15 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
2263 sw_tx_timestamp(skb); 2250 sw_tx_timestamp(skb);
2264} 2251}
2265 2252
2253/**
2254 * skb_complete_wifi_ack - deliver skb with wifi status
2255 *
2256 * @skb: the original outgoing packet
2257 * @acked: ack status
2258 *
2259 */
2260void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2261
2266extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2262extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2267extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2263extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2268 2264
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
new file mode 100644
index 000000000000..ce718cbce435
--- /dev/null
+++ b/include/linux/smscphy.h
@@ -0,0 +1,25 @@
1#ifndef __LINUX_SMSCPHY_H__
2#define __LINUX_SMSCPHY_H__
3
4#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
5#define MII_LAN83C185_IM 30 /* Interrupt Mask */
6#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
7
8#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
9#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
10#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
11#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
12#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
13#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
14#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
15
16#define MII_LAN83C185_ISF_INT_ALL (0x0e)
17
18#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
19 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
20 MII_LAN83C185_ISF_INT7)
21
22#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
23#define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */
24
25#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
new file mode 100644
index 000000000000..251729a47880
--- /dev/null
+++ b/include/linux/sock_diag.h
@@ -0,0 +1,48 @@
1#ifndef __SOCK_DIAG_H__
2#define __SOCK_DIAG_H__
3
4#include <linux/types.h>
5
6#define SOCK_DIAG_BY_FAMILY 20
7
8struct sock_diag_req {
9 __u8 sdiag_family;
10 __u8 sdiag_protocol;
11};
12
13enum {
14 SK_MEMINFO_RMEM_ALLOC,
15 SK_MEMINFO_RCVBUF,
16 SK_MEMINFO_WMEM_ALLOC,
17 SK_MEMINFO_SNDBUF,
18 SK_MEMINFO_FWD_ALLOC,
19 SK_MEMINFO_WMEM_QUEUED,
20 SK_MEMINFO_OPTMEM,
21
22 SK_MEMINFO_VARS,
23};
24
25#ifdef __KERNEL__
26struct sk_buff;
27struct nlmsghdr;
28struct sock;
29
30struct sock_diag_handler {
31 __u8 family;
32 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
33};
34
35int sock_diag_register(struct sock_diag_handler *h);
36void sock_diag_unregister(struct sock_diag_handler *h);
37
38void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
39void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
40
41int sock_diag_check_cookie(void *sk, __u32 *cookie);
42void sock_diag_save_cookie(void *sk, __u32 *cookie);
43
44int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
45
46extern struct sock *sock_diag_nlsk;
47#endif /* KERNEL */
48#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index bb4f5fbbbd8e..176fce9cc6b1 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -200,6 +200,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
200 driver_unregister(&sdrv->driver); 200 driver_unregister(&sdrv->driver);
201} 201}
202 202
203/**
204 * module_spi_driver() - Helper macro for registering a SPI driver
205 * @__spi_driver: spi_driver struct
206 *
207 * Helper macro for SPI drivers which do not do anything special in module
208 * init/exit. This eliminates a lot of boilerplate. Each module may only
209 * use this macro once, and calling it replaces module_init() and module_exit()
210 */
211#define module_spi_driver(__spi_driver) \
212 module_driver(__spi_driver, spi_register_driver, \
213 spi_unregister_driver)
203 214
204/** 215/**
205 * struct spi_master - interface to SPI master controller 216 * struct spi_master - interface to SPI master controller
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 58971e891f48..e1b005918bbb 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -28,6 +28,7 @@
28#define _LINUX_SRCU_H 28#define _LINUX_SRCU_H
29 29
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/rcupdate.h>
31 32
32struct srcu_struct_array { 33struct srcu_struct_array {
33 int c[2]; 34 int c[2];
@@ -60,18 +61,10 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
60 __init_srcu_struct((sp), #sp, &__srcu_key); \ 61 __init_srcu_struct((sp), #sp, &__srcu_key); \
61}) 62})
62 63
63# define srcu_read_acquire(sp) \
64 lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_)
65# define srcu_read_release(sp) \
66 lock_release(&(sp)->dep_map, 1, _THIS_IP_)
67
68#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 64#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
69 65
70int init_srcu_struct(struct srcu_struct *sp); 66int init_srcu_struct(struct srcu_struct *sp);
71 67
72# define srcu_read_acquire(sp) do { } while (0)
73# define srcu_read_release(sp) do { } while (0)
74
75#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 68#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
76 69
77void cleanup_srcu_struct(struct srcu_struct *sp); 70void cleanup_srcu_struct(struct srcu_struct *sp);
@@ -90,12 +83,32 @@ long srcu_batches_completed(struct srcu_struct *sp);
90 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 83 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
91 * this assumes we are in an SRCU read-side critical section unless it can 84 * this assumes we are in an SRCU read-side critical section unless it can
92 * prove otherwise. 85 * prove otherwise.
86 *
87 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
88 * and while lockdep is disabled.
89 *
90 * Note that if the CPU is in the idle loop from an RCU point of view
91 * (ie: that we are in the section between rcu_idle_enter() and
92 * rcu_idle_exit()) then srcu_read_lock_held() returns false even if
93 * the CPU did an srcu_read_lock(). The reason for this is that RCU
94 * ignores CPUs that are in such a section, considering these as in
95 * extended quiescent state, so such a CPU is effectively never in an
96 * RCU read-side critical section regardless of what RCU primitives it
97 * invokes. This state of affairs is required --- we need to keep an
98 * RCU-free window in idle where the CPU may possibly enter into low
99 * power mode. This way we can notice an extended quiescent state to
100 * other CPUs that started a grace period. Otherwise we would delay any
101 * grace period as long as we run in the idle task.
93 */ 102 */
94static inline int srcu_read_lock_held(struct srcu_struct *sp) 103static inline int srcu_read_lock_held(struct srcu_struct *sp)
95{ 104{
96 if (debug_locks) 105 if (rcu_is_cpu_idle())
97 return lock_is_held(&sp->dep_map); 106 return 0;
98 return 1; 107
108 if (!debug_lockdep_rcu_enabled())
109 return 1;
110
111 return lock_is_held(&sp->dep_map);
99} 112}
100 113
101#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 114#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -145,12 +158,17 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
145 * one way to indirectly wait on an SRCU grace period is to acquire 158 * one way to indirectly wait on an SRCU grace period is to acquire
146 * a mutex that is held elsewhere while calling synchronize_srcu() or 159 * a mutex that is held elsewhere while calling synchronize_srcu() or
147 * synchronize_srcu_expedited(). 160 * synchronize_srcu_expedited().
161 *
162 * Note that srcu_read_lock() and the matching srcu_read_unlock() must
163 * occur in the same context, for example, it is illegal to invoke
164 * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
165 * was invoked in process context.
148 */ 166 */
149static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) 167static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
150{ 168{
151 int retval = __srcu_read_lock(sp); 169 int retval = __srcu_read_lock(sp);
152 170
153 srcu_read_acquire(sp); 171 rcu_lock_acquire(&(sp)->dep_map);
154 return retval; 172 return retval;
155} 173}
156 174
@@ -164,8 +182,51 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
164static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 182static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
165 __releases(sp) 183 __releases(sp)
166{ 184{
167 srcu_read_release(sp); 185 rcu_lock_release(&(sp)->dep_map);
186 __srcu_read_unlock(sp, idx);
187}
188
189/**
190 * srcu_read_lock_raw - register a new reader for an SRCU-protected structure.
191 * @sp: srcu_struct in which to register the new reader.
192 *
193 * Enter an SRCU read-side critical section. Similar to srcu_read_lock(),
194 * but avoids the RCU-lockdep checking. This means that it is legal to
195 * use srcu_read_lock_raw() in one context, for example, in an exception
196 * handler, and then have the matching srcu_read_unlock_raw() in another
197 * context, for example in the task that took the exception.
198 *
199 * However, the entire SRCU read-side critical section must reside within a
200 * single task. For example, beware of using srcu_read_lock_raw() in
201 * a device interrupt handler and srcu_read_unlock() in the interrupted
202 * task: This will not work if interrupts are threaded.
203 */
204static inline int srcu_read_lock_raw(struct srcu_struct *sp)
205{
206 unsigned long flags;
207 int ret;
208
209 local_irq_save(flags);
210 ret = __srcu_read_lock(sp);
211 local_irq_restore(flags);
212 return ret;
213}
214
215/**
216 * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure.
217 * @sp: srcu_struct in which to unregister the old reader.
218 * @idx: return value from corresponding srcu_read_lock_raw().
219 *
220 * Exit an SRCU read-side critical section without lockdep-RCU checking.
221 * See srcu_read_lock_raw() for more details.
222 */
223static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx)
224{
225 unsigned long flags;
226
227 local_irq_save(flags);
168 __srcu_read_unlock(sp, idx); 228 __srcu_read_unlock(sp, idx);
229 local_irq_restore(flags);
169} 230}
170 231
171#endif 232#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 061e560251b4..dcf35b0f303a 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -94,6 +94,15 @@ struct ssb_sprom {
94 } ghz5; /* 5GHz band */ 94 } ghz5; /* 5GHz band */
95 } antenna_gain; 95 } antenna_gain;
96 96
97 struct {
98 struct {
99 u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
100 } ghz2;
101 struct {
102 u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
103 } ghz5;
104 } fem;
105
97 /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */ 106 /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */
98}; 107};
99 108
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 98941203a27f..c814ae6eeb22 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -432,6 +432,23 @@
432#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ 432#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */
433#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ 433#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */
434#define SSB_SPROM8_RXPO5G_SHIFT 8 434#define SSB_SPROM8_RXPO5G_SHIFT 8
435#define SSB_SPROM8_FEM2G 0x00AE
436#define SSB_SPROM8_FEM5G 0x00B0
437#define SSB_SROM8_FEM_TSSIPOS 0x0001
438#define SSB_SROM8_FEM_TSSIPOS_SHIFT 0
439#define SSB_SROM8_FEM_EXTPA_GAIN 0x0006
440#define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1
441#define SSB_SROM8_FEM_PDET_RANGE 0x00F8
442#define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3
443#define SSB_SROM8_FEM_TR_ISO 0x0700
444#define SSB_SROM8_FEM_TR_ISO_SHIFT 8
445#define SSB_SROM8_FEM_ANTSWLUT 0xF800
446#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11
447#define SSB_SPROM8_THERMAL 0x00B2
448#define SSB_SPROM8_MPWR_RAWTS 0x00B4
449#define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6
450#define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8
451#define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA
435#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ 452#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */
436#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ 453#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */
437#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ 454#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 3d8f9c44e27d..2c5993a17c33 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
215 return true; 215 return true;
216} 216}
217 217
218#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 218#if IS_ENABLED(CONFIG_IPV6)
219static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 219static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
220 const struct sockaddr *sap2) 220 const struct sockaddr *sap2)
221{ 221{
@@ -237,10 +237,10 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
237 struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; 237 struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
238 238
239 dsin6->sin6_family = ssin6->sin6_family; 239 dsin6->sin6_family = ssin6->sin6_family;
240 ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr); 240 dsin6->sin6_addr = ssin6->sin6_addr;
241 return true; 241 return true;
242} 242}
243#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ 243#else /* !(IS_ENABLED(CONFIG_IPV6) */
244static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 244static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
245 const struct sockaddr *sap2) 245 const struct sockaddr *sap2)
246{ 246{
@@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
252{ 252{
253 return false; 253 return false;
254} 254}
255#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ 255#endif /* !(IS_ENABLED(CONFIG_IPV6) */
256 256
257/** 257/**
258 * rpc_cmp_addr - compare the address portion of two sockaddrs. 258 * rpc_cmp_addr - compare the address portion of two sockaddrs.
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7f59ee946983..46a85c9e1f25 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -238,6 +238,11 @@ struct tcp_sack_block {
238 u32 end_seq; 238 u32 end_seq;
239}; 239};
240 240
241/*These are used to set the sack_ok field in struct tcp_options_received */
242#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
243#define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/
244#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
245
241struct tcp_options_received { 246struct tcp_options_received {
242/* PAWS/RTTM data */ 247/* PAWS/RTTM data */
243 long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ 248 long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b232ccc0ee29..ab8be90b5cc9 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -7,6 +7,7 @@
7#define _LINUX_TICK_H 7#define _LINUX_TICK_H
8 8
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/irqflags.h>
10 11
11#ifdef CONFIG_GENERIC_CLOCKEVENTS 12#ifdef CONFIG_GENERIC_CLOCKEVENTS
12 13
@@ -121,14 +122,16 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
121#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 122#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
122 123
123# ifdef CONFIG_NO_HZ 124# ifdef CONFIG_NO_HZ
124extern void tick_nohz_stop_sched_tick(int inidle); 125extern void tick_nohz_idle_enter(void);
125extern void tick_nohz_restart_sched_tick(void); 126extern void tick_nohz_idle_exit(void);
127extern void tick_nohz_irq_exit(void);
126extern ktime_t tick_nohz_get_sleep_length(void); 128extern ktime_t tick_nohz_get_sleep_length(void);
127extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 129extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
128extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 130extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
129# else 131# else
130static inline void tick_nohz_stop_sched_tick(int inidle) { } 132static inline void tick_nohz_idle_enter(void) { }
131static inline void tick_nohz_restart_sched_tick(void) { } 133static inline void tick_nohz_idle_exit(void) { }
134
132static inline ktime_t tick_nohz_get_sleep_length(void) 135static inline ktime_t tick_nohz_get_sleep_length(void)
133{ 136{
134 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; 137 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h
new file mode 100644
index 000000000000..b1d2bf16b33c
--- /dev/null
+++ b/include/linux/unix_diag.h
@@ -0,0 +1,54 @@
1#ifndef __UNIX_DIAG_H__
2#define __UNIX_DIAG_H__
3
4#include <linux/types.h>
5
6struct unix_diag_req {
7 __u8 sdiag_family;
8 __u8 sdiag_protocol;
9 __u16 pad;
10 __u32 udiag_states;
11 __u32 udiag_ino;
12 __u32 udiag_show;
13 __u32 udiag_cookie[2];
14};
15
16#define UDIAG_SHOW_NAME 0x00000001 /* show name (not path) */
17#define UDIAG_SHOW_VFS 0x00000002 /* show VFS inode info */
18#define UDIAG_SHOW_PEER 0x00000004 /* show peer socket info */
19#define UDIAG_SHOW_ICONS 0x00000008 /* show pending connections */
20#define UDIAG_SHOW_RQLEN 0x00000010 /* show skb receive queue len */
21#define UDIAG_SHOW_MEMINFO 0x00000020 /* show memory info of a socket */
22
23struct unix_diag_msg {
24 __u8 udiag_family;
25 __u8 udiag_type;
26 __u8 udiag_state;
27 __u8 pad;
28
29 __u32 udiag_ino;
30 __u32 udiag_cookie[2];
31};
32
33enum {
34 UNIX_DIAG_NAME,
35 UNIX_DIAG_VFS,
36 UNIX_DIAG_PEER,
37 UNIX_DIAG_ICONS,
38 UNIX_DIAG_RQLEN,
39 UNIX_DIAG_MEMINFO,
40
41 UNIX_DIAG_MAX,
42};
43
44struct unix_diag_vfs {
45 __u32 udiag_vfs_ino;
46 __u32 udiag_vfs_dev;
47};
48
49struct unix_diag_rqlen {
50 __u32 udiag_rqueue;
51 __u32 udiag_wqueue;
52};
53
54#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a59321779f8b..7f8d4d61ca47 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -953,6 +953,18 @@ extern int usb_register_driver(struct usb_driver *, struct module *,
953 953
954extern void usb_deregister(struct usb_driver *); 954extern void usb_deregister(struct usb_driver *);
955 955
956/**
957 * module_usb_driver() - Helper macro for registering a USB driver
958 * @__usb_driver: usb_driver struct
959 *
960 * Helper macro for USB drivers which do not do anything special in module
961 * init/exit. This eliminates a lot of boilerplate. Each module may only
962 * use this macro once, and calling it replaces module_init() and module_exit()
963 */
964#define module_usb_driver(__usb_driver) \
965 module_driver(__usb_driver, usb_register, \
966 usb_deregister)
967
956extern int usb_register_device_driver(struct usb_device_driver *, 968extern int usb_register_device_driver(struct usb_device_driver *,
957 struct module *); 969 struct module *);
958extern void usb_deregister_device_driver(struct usb_device_driver *); 970extern void usb_deregister_device_driver(struct usb_device_driver *);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index e9e72bda1b72..5206d6541da5 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -102,6 +102,10 @@
102 * vdev: the virtio_device 102 * vdev: the virtio_device
103 * This gives the final feature bits for the device: it can change 103 * This gives the final feature bits for the device: it can change
104 * the dev->feature bits if it wants. 104 * the dev->feature bits if it wants.
105 * @bus_name: return the bus name associated with the device
106 * vdev: the virtio_device
107 * This returns a pointer to the bus name a la pci_name from which
108 * the caller can then copy.
105 */ 109 */
106typedef void vq_callback_t(struct virtqueue *); 110typedef void vq_callback_t(struct virtqueue *);
107struct virtio_config_ops { 111struct virtio_config_ops {
@@ -119,6 +123,7 @@ struct virtio_config_ops {
119 void (*del_vqs)(struct virtio_device *); 123 void (*del_vqs)(struct virtio_device *);
120 u32 (*get_features)(struct virtio_device *vdev); 124 u32 (*get_features)(struct virtio_device *vdev);
121 void (*finalize_features)(struct virtio_device *vdev); 125 void (*finalize_features)(struct virtio_device *vdev);
126 const char *(*bus_name)(struct virtio_device *vdev);
122}; 127};
123 128
124/* If driver didn't advertise the feature, it will never appear. */ 129/* If driver didn't advertise the feature, it will never appear. */
@@ -184,5 +189,14 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
184 return ERR_PTR(err); 189 return ERR_PTR(err);
185 return vq; 190 return vq;
186} 191}
192
193static inline
194const char *virtio_bus_name(struct virtio_device *vdev)
195{
196 if (!vdev->config->bus_name)
197 return "virtio";
198 return vdev->config->bus_name(vdev);
199}
200
187#endif /* __KERNEL__ */ 201#endif /* __KERNEL__ */
188#endif /* _LINUX_VIRTIO_CONFIG_H */ 202#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4bde182fcf93..dcdfc2bda922 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -131,6 +131,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
131 */ 131 */
132extern rwlock_t vmlist_lock; 132extern rwlock_t vmlist_lock;
133extern struct vm_struct *vmlist; 133extern struct vm_struct *vmlist;
134extern __init void vm_area_add_early(struct vm_struct *vm);
134extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 135extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
135 136
136#ifdef CONFIG_SMP 137#ifdef CONFIG_SMP
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3efc9f3f43a0..a9ce45e8501c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,13 +77,13 @@ struct task_struct;
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, } 78 { .flags = word, .bit_nr = bit, }
79 79
80extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); 80extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
81 81
82#define init_waitqueue_head(q) \ 82#define init_waitqueue_head(q) \
83 do { \ 83 do { \
84 static struct lock_class_key __key; \ 84 static struct lock_class_key __key; \
85 \ 85 \
86 __init_waitqueue_head((q), &__key); \ 86 __init_waitqueue_head((q), #q, &__key); \
87 } while (0) 87 } while (0)
88 88
89#ifdef CONFIG_LOCKDEP 89#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index 4b697395326e..0d6373195d32 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -54,6 +54,9 @@ struct wl12xx_platform_data {
54 int board_ref_clock; 54 int board_ref_clock;
55 int board_tcxo_clock; 55 int board_tcxo_clock;
56 unsigned long platform_quirks; 56 unsigned long platform_quirks;
57 bool pwr_in_suspend;
58
59 struct wl1271_if_operations *ops;
57}; 60};
58 61
59/* Platform does not support level trigger interrupts */ 62/* Platform does not support level trigger interrupts */
@@ -73,6 +76,6 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
73 76
74#endif 77#endif
75 78
76const struct wl12xx_platform_data *wl12xx_get_platform_data(void); 79struct wl12xx_platform_data *wl12xx_get_platform_data(void);
77 80
78#endif 81#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index cbc6bb0a6838..f68dce2d8d88 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -151,7 +151,8 @@ extern int ipv6_chk_mcast_addr(struct net_device *dev,
151 const struct in6_addr *src_addr); 151 const struct in6_addr *src_addr);
152extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); 152extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr);
153 153
154extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len); 154extern void addrconf_prefix_rcv(struct net_device *dev,
155 u8 *opt, int len, bool sllao);
155 156
156/* 157/*
157 * anycast prototypes (anycast.c) 158 * anycast prototypes (anycast.c)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 91ab5b01678a..5a4e29b168c9 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -11,10 +11,13 @@ extern void unix_notinflight(struct file *fp);
11extern void unix_gc(void); 11extern void unix_gc(void);
12extern void wait_for_unix_gc(void); 12extern void wait_for_unix_gc(void);
13extern struct sock *unix_get_socket(struct file *filp); 13extern struct sock *unix_get_socket(struct file *filp);
14extern struct sock *unix_peer_get(struct sock *);
14 15
15#define UNIX_HASH_SIZE 256 16#define UNIX_HASH_SIZE 256
16 17
17extern unsigned int unix_tot_inflight; 18extern unsigned int unix_tot_inflight;
19extern spinlock_t unix_table_lock;
20extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
18 21
19struct unix_address { 22struct unix_address {
20 atomic_t refcnt; 23 atomic_t refcnt;
@@ -63,6 +66,9 @@ struct unix_sock {
63 66
64#define peer_wait peer_wq.wait 67#define peer_wait peer_wq.wait
65 68
69long unix_inq_len(struct sock *sk);
70long unix_outq_len(struct sock *sk);
71
66#ifdef CONFIG_SYSCTL 72#ifdef CONFIG_SYSCTL
67extern int unix_sysctl_register(struct net *net); 73extern int unix_sysctl_register(struct net *net);
68extern void unix_sysctl_unregister(struct net *net); 74extern void unix_sysctl_unregister(struct net *net);
diff --git a/include/net/arp.h b/include/net/arp.h
index 4979af8b1559..0013dc87940b 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -23,7 +23,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct neigh_table *tbl, str
23 23
24 rcu_read_lock_bh(); 24 rcu_read_lock_bh();
25 nht = rcu_dereference_bh(tbl->nht); 25 nht = rcu_dereference_bh(tbl->nht);
26 hash_val = arp_hashfn(key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 26 hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
27 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 27 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
28 n != NULL; 28 n != NULL;
29 n = rcu_dereference_bh(n->next)) { 29 n = rcu_dereference_bh(n->next)) {
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
index 497ef6444a7a..5865924d4aac 100644
--- a/include/net/atmclip.h
+++ b/include/net/atmclip.h
@@ -15,7 +15,6 @@
15 15
16 16
17#define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back)) 17#define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
18#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key)
19 18
20struct sk_buff; 19struct sk_buff;
21 20
@@ -36,24 +35,18 @@ struct clip_vcc {
36 35
37 36
38struct atmarp_entry { 37struct atmarp_entry {
39 __be32 ip; /* IP address */
40 struct clip_vcc *vccs; /* active VCCs; NULL if resolution is 38 struct clip_vcc *vccs; /* active VCCs; NULL if resolution is
41 pending */ 39 pending */
42 unsigned long expires; /* entry expiration time */ 40 unsigned long expires; /* entry expiration time */
43 struct neighbour *neigh; /* neighbour back-pointer */ 41 struct neighbour *neigh; /* neighbour back-pointer */
44}; 42};
45 43
46
47#define PRIV(dev) ((struct clip_priv *) netdev_priv(dev)) 44#define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
48 45
49
50struct clip_priv { 46struct clip_priv {
51 int number; /* for convenience ... */ 47 int number; /* for convenience ... */
52 spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ 48 spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */
53 struct net_device *next; /* next CLIP interface */ 49 struct net_device *next; /* next CLIP interface */
54}; 50};
55 51
56
57extern struct neigh_table *clip_tbl_hook;
58
59#endif 52#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e86af08293a8..abaad6ed9b83 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -36,6 +36,11 @@
36#define PF_BLUETOOTH AF_BLUETOOTH 36#define PF_BLUETOOTH AF_BLUETOOTH
37#endif 37#endif
38 38
39/* Bluetooth versions */
40#define BLUETOOTH_VER_1_1 1
41#define BLUETOOTH_VER_1_2 2
42#define BLUETOOTH_VER_2_0 3
43
39/* Reserv for core and drivers use */ 44/* Reserv for core and drivers use */
40#define BT_SKB_RESERVE 8 45#define BT_SKB_RESERVE 8
41 46
@@ -77,6 +82,33 @@ struct bt_power {
77#define BT_POWER_FORCE_ACTIVE_OFF 0 82#define BT_POWER_FORCE_ACTIVE_OFF 0
78#define BT_POWER_FORCE_ACTIVE_ON 1 83#define BT_POWER_FORCE_ACTIVE_ON 1
79 84
85#define BT_CHANNEL_POLICY 10
86
87/* BR/EDR only (default policy)
88 * AMP controllers cannot be used.
89 * Channel move requests from the remote device are denied.
90 * If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
91 */
92#define BT_CHANNEL_POLICY_BREDR_ONLY 0
93
94/* BR/EDR Preferred
95 * Allow use of AMP controllers.
96 * If the L2CAP channel is currently on AMP, move it to BR/EDR.
97 * Channel move requests from the remote device are allowed.
98 */
99#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1
100
101/* AMP Preferred
102 * Allow use of AMP controllers
103 * If the L2CAP channel is currently on BR/EDR and AMP controller
104 * resources are available, initiate a channel move to AMP.
105 * Channel move requests from the remote device are allowed.
106 * If the L2CAP socket has not been connected yet, try to create
107 * and configure the channel directly on an AMP controller rather
108 * than BR/EDR.
109 */
110#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
111
80__printf(2, 3) 112__printf(2, 3)
81int bt_printk(const char *level, const char *fmt, ...); 113int bt_printk(const char *level, const char *fmt, ...);
82 114
@@ -158,7 +190,7 @@ struct bt_skb_cb {
158 __u8 pkt_type; 190 __u8 pkt_type;
159 __u8 incoming; 191 __u8 incoming;
160 __u16 expect; 192 __u16 expect;
161 __u8 tx_seq; 193 __u16 tx_seq;
162 __u8 retries; 194 __u8 retries;
163 __u8 sar; 195 __u8 sar;
164 unsigned short channel; 196 unsigned short channel;
@@ -218,32 +250,10 @@ extern void bt_sysfs_cleanup(void);
218 250
219extern struct dentry *bt_debugfs; 251extern struct dentry *bt_debugfs;
220 252
221#ifdef CONFIG_BT_L2CAP
222int l2cap_init(void); 253int l2cap_init(void);
223void l2cap_exit(void); 254void l2cap_exit(void);
224#else
225static inline int l2cap_init(void)
226{
227 return 0;
228}
229
230static inline void l2cap_exit(void)
231{
232}
233#endif
234 255
235#ifdef CONFIG_BT_SCO
236int sco_init(void); 256int sco_init(void);
237void sco_exit(void); 257void sco_exit(void);
238#else
239static inline int sco_init(void)
240{
241 return 0;
242}
243
244static inline void sco_exit(void)
245{
246}
247#endif
248 258
249#endif /* __BLUETOOTH_H */ 259#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index aaf79af72432..5b2fed5eebf2 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -88,6 +88,14 @@ enum {
88 HCI_RESET, 88 HCI_RESET,
89}; 89};
90 90
91/*
92 * BR/EDR and/or LE controller flags: the flags defined here should represent
93 * states from the controller.
94 */
95enum {
96 HCI_LE_SCAN,
97};
98
91/* HCI ioctl defines */ 99/* HCI ioctl defines */
92#define HCIDEVUP _IOW('H', 201, int) 100#define HCIDEVUP _IOW('H', 201, int)
93#define HCIDEVDOWN _IOW('H', 202, int) 101#define HCIDEVDOWN _IOW('H', 202, int)
@@ -202,6 +210,7 @@ enum {
202 210
203#define LMP_EV4 0x01 211#define LMP_EV4 0x01
204#define LMP_EV5 0x02 212#define LMP_EV5 0x02
213#define LMP_NO_BREDR 0x20
205#define LMP_LE 0x40 214#define LMP_LE 0x40
206 215
207#define LMP_SNIFF_SUBR 0x02 216#define LMP_SNIFF_SUBR 0x02
@@ -264,6 +273,17 @@ enum {
264#define HCI_LK_SMP_IRK 0x82 273#define HCI_LK_SMP_IRK 0x82
265#define HCI_LK_SMP_CSRK 0x83 274#define HCI_LK_SMP_CSRK 0x83
266 275
276/* ---- HCI Error Codes ---- */
277#define HCI_ERROR_AUTH_FAILURE 0x05
278#define HCI_ERROR_REJ_BAD_ADDR 0x0f
279#define HCI_ERROR_REMOTE_USER_TERM 0x13
280#define HCI_ERROR_LOCAL_HOST_TERM 0x16
281#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
282
283/* Flow control modes */
284#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
285#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01
286
267/* ----- HCI Commands ---- */ 287/* ----- HCI Commands ---- */
268#define HCI_OP_NOP 0x0000 288#define HCI_OP_NOP 0x0000
269 289
@@ -446,6 +466,14 @@ struct hci_rp_user_confirm_reply {
446 466
447#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d 467#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
448 468
469#define HCI_OP_USER_PASSKEY_REPLY 0x042e
470struct hci_cp_user_passkey_reply {
471 bdaddr_t bdaddr;
472 __le32 passkey;
473} __packed;
474
475#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f
476
449#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430 477#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430
450struct hci_cp_remote_oob_data_reply { 478struct hci_cp_remote_oob_data_reply {
451 bdaddr_t bdaddr; 479 bdaddr_t bdaddr;
@@ -662,6 +690,12 @@ struct hci_rp_read_local_oob_data {
662 690
663#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 691#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
664 692
693#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
694struct hci_rp_read_flow_control_mode {
695 __u8 status;
696 __u8 mode;
697} __packed;
698
665#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d 699#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
666struct hci_cp_write_le_host_supported { 700struct hci_cp_write_le_host_supported {
667 __u8 le; 701 __u8 le;
@@ -716,6 +750,14 @@ struct hci_rp_read_bd_addr {
716 bdaddr_t bdaddr; 750 bdaddr_t bdaddr;
717} __packed; 751} __packed;
718 752
753#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a
754struct hci_rp_read_data_block_size {
755 __u8 status;
756 __le16 max_acl_len;
757 __le16 block_len;
758 __le16 num_blocks;
759} __packed;
760
719#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c 761#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
720struct hci_cp_write_page_scan_activity { 762struct hci_cp_write_page_scan_activity {
721 __le16 interval; 763 __le16 interval;
@@ -726,6 +768,21 @@ struct hci_cp_write_page_scan_activity {
726 #define PAGE_SCAN_TYPE_STANDARD 0x00 768 #define PAGE_SCAN_TYPE_STANDARD 0x00
727 #define PAGE_SCAN_TYPE_INTERLACED 0x01 769 #define PAGE_SCAN_TYPE_INTERLACED 0x01
728 770
771#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
772struct hci_rp_read_local_amp_info {
773 __u8 status;
774 __u8 amp_status;
775 __le32 total_bw;
776 __le32 max_bw;
777 __le32 min_latency;
778 __le32 max_pdu;
779 __u8 amp_type;
780 __le16 pal_cap;
781 __le16 max_assoc_size;
782 __le32 max_flush_to;
783 __le32 be_flush_to;
784} __packed;
785
729#define HCI_OP_LE_SET_EVENT_MASK 0x2001 786#define HCI_OP_LE_SET_EVENT_MASK 0x2001
730struct hci_cp_le_set_event_mask { 787struct hci_cp_le_set_event_mask {
731 __u8 mask[8]; 788 __u8 mask[8];
@@ -738,6 +795,18 @@ struct hci_rp_le_read_buffer_size {
738 __u8 le_max_pkt; 795 __u8 le_max_pkt;
739} __packed; 796} __packed;
740 797
798#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
799struct hci_cp_le_set_scan_param {
800 __u8 type;
801 __le16 interval;
802 __le16 window;
803 __u8 own_address_type;
804 __u8 filter_policy;
805} __packed;
806
807#define LE_SCANNING_DISABLED 0x00
808#define LE_SCANNING_ENABLED 0x01
809
741#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c 810#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
742struct hci_cp_le_set_scan_enable { 811struct hci_cp_le_set_scan_enable {
743 __u8 enable; 812 __u8 enable;
@@ -913,9 +982,14 @@ struct hci_ev_role_change {
913} __packed; 982} __packed;
914 983
915#define HCI_EV_NUM_COMP_PKTS 0x13 984#define HCI_EV_NUM_COMP_PKTS 0x13
985struct hci_comp_pkts_info {
986 __le16 handle;
987 __le16 count;
988} __packed;
989
916struct hci_ev_num_comp_pkts { 990struct hci_ev_num_comp_pkts {
917 __u8 num_hndl; 991 __u8 num_hndl;
918 /* variable length part */ 992 struct hci_comp_pkts_info handles[0];
919} __packed; 993} __packed;
920 994
921#define HCI_EV_MODE_CHANGE 0x14 995#define HCI_EV_MODE_CHANGE 0x14
@@ -1054,6 +1128,11 @@ struct hci_ev_user_confirm_req {
1054 __le32 passkey; 1128 __le32 passkey;
1055} __packed; 1129} __packed;
1056 1130
1131#define HCI_EV_USER_PASSKEY_REQUEST 0x34
1132struct hci_ev_user_passkey_req {
1133 bdaddr_t bdaddr;
1134} __packed;
1135
1057#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35 1136#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
1058struct hci_ev_remote_oob_data_request { 1137struct hci_ev_remote_oob_data_request {
1059 bdaddr_t bdaddr; 1138 bdaddr_t bdaddr;
@@ -1309,4 +1388,6 @@ struct hci_inquiry_req {
1309}; 1388};
1310#define IREQ_CACHE_FLUSH 0x0001 1389#define IREQ_CACHE_FLUSH 0x0001
1311 1390
1391extern int enable_hs;
1392
1312#endif /* __HCI_H */ 1393#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3779ea362257..5e2e98458496 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -28,9 +28,8 @@
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <net/bluetooth/hci.h> 29#include <net/bluetooth/hci.h>
30 30
31/* HCI upper protocols */ 31/* HCI priority */
32#define HCI_PROTO_L2CAP 0 32#define HCI_PRIO_MAX 7
33#define HCI_PROTO_SCO 1
34 33
35/* HCI Core structures */ 34/* HCI Core structures */
36struct inquiry_data { 35struct inquiry_data {
@@ -51,14 +50,12 @@ struct inquiry_entry {
51}; 50};
52 51
53struct inquiry_cache { 52struct inquiry_cache {
54 spinlock_t lock;
55 __u32 timestamp; 53 __u32 timestamp;
56 struct inquiry_entry *list; 54 struct inquiry_entry *list;
57}; 55};
58 56
59struct hci_conn_hash { 57struct hci_conn_hash {
60 struct list_head list; 58 struct list_head list;
61 spinlock_t lock;
62 unsigned int acl_num; 59 unsigned int acl_num;
63 unsigned int sco_num; 60 unsigned int sco_num;
64 unsigned int le_num; 61 unsigned int le_num;
@@ -115,7 +112,7 @@ struct adv_entry {
115#define NUM_REASSEMBLY 4 112#define NUM_REASSEMBLY 4
116struct hci_dev { 113struct hci_dev {
117 struct list_head list; 114 struct list_head list;
118 spinlock_t lock; 115 struct mutex lock;
119 atomic_t refcnt; 116 atomic_t refcnt;
120 117
121 char name[8]; 118 char name[8];
@@ -150,6 +147,19 @@ struct hci_dev {
150 __u16 sniff_min_interval; 147 __u16 sniff_min_interval;
151 __u16 sniff_max_interval; 148 __u16 sniff_max_interval;
152 149
150 __u8 amp_status;
151 __u32 amp_total_bw;
152 __u32 amp_max_bw;
153 __u32 amp_min_latency;
154 __u32 amp_max_pdu;
155 __u8 amp_type;
156 __u16 amp_pal_cap;
157 __u16 amp_assoc_size;
158 __u32 amp_max_flush_to;
159 __u32 amp_be_flush_to;
160
161 __u8 flow_ctl_mode;
162
153 unsigned int auto_accept_delay; 163 unsigned int auto_accept_delay;
154 164
155 unsigned long quirks; 165 unsigned long quirks;
@@ -166,6 +176,11 @@ struct hci_dev {
166 unsigned int sco_pkts; 176 unsigned int sco_pkts;
167 unsigned int le_pkts; 177 unsigned int le_pkts;
168 178
179 __u16 block_len;
180 __u16 block_mtu;
181 __u16 num_blocks;
182 __u16 block_cnt;
183
169 unsigned long acl_last_tx; 184 unsigned long acl_last_tx;
170 unsigned long sco_last_tx; 185 unsigned long sco_last_tx;
171 unsigned long le_last_tx; 186 unsigned long le_last_tx;
@@ -173,13 +188,18 @@ struct hci_dev {
173 struct workqueue_struct *workqueue; 188 struct workqueue_struct *workqueue;
174 189
175 struct work_struct power_on; 190 struct work_struct power_on;
176 struct work_struct power_off; 191 struct delayed_work power_off;
177 struct timer_list off_timer; 192
193 __u16 discov_timeout;
194 struct delayed_work discov_off;
195
196 struct delayed_work service_cache;
178 197
179 struct timer_list cmd_timer; 198 struct timer_list cmd_timer;
180 struct tasklet_struct cmd_task; 199
181 struct tasklet_struct rx_task; 200 struct work_struct rx_work;
182 struct tasklet_struct tx_task; 201 struct work_struct cmd_work;
202 struct work_struct tx_work;
183 203
184 struct sk_buff_head rx_q; 204 struct sk_buff_head rx_q;
185 struct sk_buff_head raw_q; 205 struct sk_buff_head raw_q;
@@ -195,6 +215,8 @@ struct hci_dev {
195 215
196 __u16 init_last_cmd; 216 __u16 init_last_cmd;
197 217
218 struct list_head mgmt_pending;
219
198 struct inquiry_cache inq_cache; 220 struct inquiry_cache inq_cache;
199 struct hci_conn_hash conn_hash; 221 struct hci_conn_hash conn_hash;
200 struct list_head blacklist; 222 struct list_head blacklist;
@@ -206,7 +228,7 @@ struct hci_dev {
206 struct list_head remote_oob_data; 228 struct list_head remote_oob_data;
207 229
208 struct list_head adv_entries; 230 struct list_head adv_entries;
209 struct timer_list adv_timer; 231 struct delayed_work adv_work;
210 232
211 struct hci_dev_stats stat; 233 struct hci_dev_stats stat;
212 234
@@ -226,6 +248,8 @@ struct hci_dev {
226 248
227 struct module *owner; 249 struct module *owner;
228 250
251 unsigned long dev_flags;
252
229 int (*open)(struct hci_dev *hdev); 253 int (*open)(struct hci_dev *hdev);
230 int (*close)(struct hci_dev *hdev); 254 int (*close)(struct hci_dev *hdev);
231 int (*flush)(struct hci_dev *hdev); 255 int (*flush)(struct hci_dev *hdev);
@@ -273,20 +297,19 @@ struct hci_conn {
273 unsigned int sent; 297 unsigned int sent;
274 298
275 struct sk_buff_head data_q; 299 struct sk_buff_head data_q;
300 struct list_head chan_list;
276 301
277 struct timer_list disc_timer; 302 struct delayed_work disc_work;
278 struct timer_list idle_timer; 303 struct timer_list idle_timer;
279 struct timer_list auto_accept_timer; 304 struct timer_list auto_accept_timer;
280 305
281 struct work_struct work_add;
282 struct work_struct work_del;
283
284 struct device dev; 306 struct device dev;
285 atomic_t devref; 307 atomic_t devref;
286 308
287 struct hci_dev *hdev; 309 struct hci_dev *hdev;
288 void *l2cap_data; 310 void *l2cap_data;
289 void *sco_data; 311 void *sco_data;
312 void *smp_conn;
290 313
291 struct hci_conn *link; 314 struct hci_conn *link;
292 315
@@ -295,25 +318,39 @@ struct hci_conn {
295 void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); 318 void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
296}; 319};
297 320
298extern struct hci_proto *hci_proto[]; 321struct hci_chan {
322 struct list_head list;
323
324 struct hci_conn *conn;
325 struct sk_buff_head data_q;
326 unsigned int sent;
327};
328
299extern struct list_head hci_dev_list; 329extern struct list_head hci_dev_list;
300extern struct list_head hci_cb_list; 330extern struct list_head hci_cb_list;
301extern rwlock_t hci_dev_list_lock; 331extern rwlock_t hci_dev_list_lock;
302extern rwlock_t hci_cb_list_lock; 332extern rwlock_t hci_cb_list_lock;
303 333
334/* ----- HCI interface to upper protocols ----- */
335extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
336extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
337extern int l2cap_disconn_ind(struct hci_conn *hcon);
338extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
339extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
340extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
341
342extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
343extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
344extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
345extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
346
304/* ----- Inquiry cache ----- */ 347/* ----- Inquiry cache ----- */
305#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ 348#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
306#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ 349#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
307 350
308#define inquiry_cache_lock(c) spin_lock(&c->lock)
309#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
310#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
311#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
312
313static inline void inquiry_cache_init(struct hci_dev *hdev) 351static inline void inquiry_cache_init(struct hci_dev *hdev)
314{ 352{
315 struct inquiry_cache *c = &hdev->inq_cache; 353 struct inquiry_cache *c = &hdev->inq_cache;
316 spin_lock_init(&c->lock);
317 c->list = NULL; 354 c->list = NULL;
318} 355}
319 356
@@ -353,15 +390,15 @@ static inline void hci_conn_hash_init(struct hci_dev *hdev)
353{ 390{
354 struct hci_conn_hash *h = &hdev->conn_hash; 391 struct hci_conn_hash *h = &hdev->conn_hash;
355 INIT_LIST_HEAD(&h->list); 392 INIT_LIST_HEAD(&h->list);
356 spin_lock_init(&h->lock);
357 h->acl_num = 0; 393 h->acl_num = 0;
358 h->sco_num = 0; 394 h->sco_num = 0;
395 h->le_num = 0;
359} 396}
360 397
361static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) 398static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
362{ 399{
363 struct hci_conn_hash *h = &hdev->conn_hash; 400 struct hci_conn_hash *h = &hdev->conn_hash;
364 list_add(&c->list, &h->list); 401 list_add_rcu(&c->list, &h->list);
365 switch (c->type) { 402 switch (c->type) {
366 case ACL_LINK: 403 case ACL_LINK:
367 h->acl_num++; 404 h->acl_num++;
@@ -379,7 +416,10 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
379static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) 416static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
380{ 417{
381 struct hci_conn_hash *h = &hdev->conn_hash; 418 struct hci_conn_hash *h = &hdev->conn_hash;
382 list_del(&c->list); 419
420 list_del_rcu(&c->list);
421 synchronize_rcu();
422
383 switch (c->type) { 423 switch (c->type) {
384 case ACL_LINK: 424 case ACL_LINK:
385 h->acl_num--; 425 h->acl_num--;
@@ -414,14 +454,18 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
414 __u16 handle) 454 __u16 handle)
415{ 455{
416 struct hci_conn_hash *h = &hdev->conn_hash; 456 struct hci_conn_hash *h = &hdev->conn_hash;
417 struct list_head *p;
418 struct hci_conn *c; 457 struct hci_conn *c;
419 458
420 list_for_each(p, &h->list) { 459 rcu_read_lock();
421 c = list_entry(p, struct hci_conn, list); 460
422 if (c->handle == handle) 461 list_for_each_entry_rcu(c, &h->list, list) {
462 if (c->handle == handle) {
463 rcu_read_unlock();
423 return c; 464 return c;
465 }
424 } 466 }
467 rcu_read_unlock();
468
425 return NULL; 469 return NULL;
426} 470}
427 471
@@ -429,14 +473,19 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
429 __u8 type, bdaddr_t *ba) 473 __u8 type, bdaddr_t *ba)
430{ 474{
431 struct hci_conn_hash *h = &hdev->conn_hash; 475 struct hci_conn_hash *h = &hdev->conn_hash;
432 struct list_head *p;
433 struct hci_conn *c; 476 struct hci_conn *c;
434 477
435 list_for_each(p, &h->list) { 478 rcu_read_lock();
436 c = list_entry(p, struct hci_conn, list); 479
437 if (c->type == type && !bacmp(&c->dst, ba)) 480 list_for_each_entry_rcu(c, &h->list, list) {
481 if (c->type == type && !bacmp(&c->dst, ba)) {
482 rcu_read_unlock();
438 return c; 483 return c;
484 }
439 } 485 }
486
487 rcu_read_unlock();
488
440 return NULL; 489 return NULL;
441} 490}
442 491
@@ -444,14 +493,19 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
444 __u8 type, __u16 state) 493 __u8 type, __u16 state)
445{ 494{
446 struct hci_conn_hash *h = &hdev->conn_hash; 495 struct hci_conn_hash *h = &hdev->conn_hash;
447 struct list_head *p;
448 struct hci_conn *c; 496 struct hci_conn *c;
449 497
450 list_for_each(p, &h->list) { 498 rcu_read_lock();
451 c = list_entry(p, struct hci_conn, list); 499
452 if (c->type == type && c->state == state) 500 list_for_each_entry_rcu(c, &h->list, list) {
501 if (c->type == type && c->state == state) {
502 rcu_read_unlock();
453 return c; 503 return c;
504 }
454 } 505 }
506
507 rcu_read_unlock();
508
455 return NULL; 509 return NULL;
456} 510}
457 511
@@ -466,6 +520,10 @@ int hci_conn_del(struct hci_conn *conn);
466void hci_conn_hash_flush(struct hci_dev *hdev); 520void hci_conn_hash_flush(struct hci_dev *hdev);
467void hci_conn_check_pending(struct hci_dev *hdev); 521void hci_conn_check_pending(struct hci_dev *hdev);
468 522
523struct hci_chan *hci_chan_create(struct hci_conn *conn);
524int hci_chan_del(struct hci_chan *chan);
525void hci_chan_list_flush(struct hci_conn *conn);
526
469struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, 527struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
470 __u8 sec_level, __u8 auth_type); 528 __u8 sec_level, __u8 auth_type);
471int hci_conn_check_link_mode(struct hci_conn *conn); 529int hci_conn_check_link_mode(struct hci_conn *conn);
@@ -475,7 +533,6 @@ int hci_conn_change_link_key(struct hci_conn *conn);
475int hci_conn_switch_role(struct hci_conn *conn, __u8 role); 533int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
476 534
477void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); 535void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
478void hci_conn_enter_sniff_mode(struct hci_conn *conn);
479 536
480void hci_conn_hold_device(struct hci_conn *conn); 537void hci_conn_hold_device(struct hci_conn *conn);
481void hci_conn_put_device(struct hci_conn *conn); 538void hci_conn_put_device(struct hci_conn *conn);
@@ -483,7 +540,7 @@ void hci_conn_put_device(struct hci_conn *conn);
483static inline void hci_conn_hold(struct hci_conn *conn) 540static inline void hci_conn_hold(struct hci_conn *conn)
484{ 541{
485 atomic_inc(&conn->refcnt); 542 atomic_inc(&conn->refcnt);
486 del_timer(&conn->disc_timer); 543 cancel_delayed_work_sync(&conn->disc_work);
487} 544}
488 545
489static inline void hci_conn_put(struct hci_conn *conn) 546static inline void hci_conn_put(struct hci_conn *conn)
@@ -502,7 +559,9 @@ static inline void hci_conn_put(struct hci_conn *conn)
502 } else { 559 } else {
503 timeo = msecs_to_jiffies(10); 560 timeo = msecs_to_jiffies(10);
504 } 561 }
505 mod_timer(&conn->disc_timer, jiffies + timeo); 562 cancel_delayed_work_sync(&conn->disc_work);
563 queue_delayed_work(conn->hdev->workqueue,
564 &conn->disc_work, jiffies + timeo);
506 } 565 }
507} 566}
508 567
@@ -534,10 +593,8 @@ static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
534 try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \ 593 try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \
535}) 594})
536 595
537#define hci_dev_lock(d) spin_lock(&d->lock) 596#define hci_dev_lock(d) mutex_lock(&d->lock)
538#define hci_dev_unlock(d) spin_unlock(&d->lock) 597#define hci_dev_unlock(d) mutex_unlock(&d->lock)
539#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
540#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
541 598
542struct hci_dev *hci_dev_get(int index); 599struct hci_dev *hci_dev_get(int index);
543struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); 600struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
@@ -545,7 +602,7 @@ struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
545struct hci_dev *hci_alloc_dev(void); 602struct hci_dev *hci_alloc_dev(void);
546void hci_free_dev(struct hci_dev *hdev); 603void hci_free_dev(struct hci_dev *hdev);
547int hci_register_dev(struct hci_dev *hdev); 604int hci_register_dev(struct hci_dev *hdev);
548int hci_unregister_dev(struct hci_dev *hdev); 605void hci_unregister_dev(struct hci_dev *hdev);
549int hci_suspend_dev(struct hci_dev *hdev); 606int hci_suspend_dev(struct hci_dev *hdev);
550int hci_resume_dev(struct hci_dev *hdev); 607int hci_resume_dev(struct hci_dev *hdev);
551int hci_dev_open(__u16 dev); 608int hci_dev_open(__u16 dev);
@@ -599,8 +656,9 @@ int hci_recv_frame(struct sk_buff *skb);
599int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); 656int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
600int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); 657int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
601 658
602int hci_register_sysfs(struct hci_dev *hdev); 659void hci_init_sysfs(struct hci_dev *hdev);
603void hci_unregister_sysfs(struct hci_dev *hdev); 660int hci_add_sysfs(struct hci_dev *hdev);
661void hci_del_sysfs(struct hci_dev *hdev);
604void hci_conn_init_sysfs(struct hci_conn *conn); 662void hci_conn_init_sysfs(struct hci_conn *conn);
605void hci_conn_add_sysfs(struct hci_conn *conn); 663void hci_conn_add_sysfs(struct hci_conn *conn);
606void hci_conn_del_sysfs(struct hci_conn *conn); 664void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -621,53 +679,40 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
621#define lmp_host_le_capable(dev) ((dev)->extfeatures[0] & LMP_HOST_LE) 679#define lmp_host_le_capable(dev) ((dev)->extfeatures[0] & LMP_HOST_LE)
622 680
623/* ----- HCI protocols ----- */ 681/* ----- HCI protocols ----- */
624struct hci_proto {
625 char *name;
626 unsigned int id;
627 unsigned long flags;
628
629 void *priv;
630
631 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr,
632 __u8 type);
633 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
634 int (*disconn_ind) (struct hci_conn *conn);
635 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
636 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb,
637 __u16 flags);
638 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
639 int (*security_cfm) (struct hci_conn *conn, __u8 status,
640 __u8 encrypt);
641};
642
643static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, 682static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
644 __u8 type) 683 __u8 type)
645{ 684{
646 register struct hci_proto *hp; 685 switch (type) {
647 int mask = 0; 686 case ACL_LINK:
648 687 return l2cap_connect_ind(hdev, bdaddr);
649 hp = hci_proto[HCI_PROTO_L2CAP];
650 if (hp && hp->connect_ind)
651 mask |= hp->connect_ind(hdev, bdaddr, type);
652 688
653 hp = hci_proto[HCI_PROTO_SCO]; 689 case SCO_LINK:
654 if (hp && hp->connect_ind) 690 case ESCO_LINK:
655 mask |= hp->connect_ind(hdev, bdaddr, type); 691 return sco_connect_ind(hdev, bdaddr);
656 692
657 return mask; 693 default:
694 BT_ERR("unknown link type %d", type);
695 return -EINVAL;
696 }
658} 697}
659 698
660static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) 699static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
661{ 700{
662 register struct hci_proto *hp; 701 switch (conn->type) {
702 case ACL_LINK:
703 case LE_LINK:
704 l2cap_connect_cfm(conn, status);
705 break;
663 706
664 hp = hci_proto[HCI_PROTO_L2CAP]; 707 case SCO_LINK:
665 if (hp && hp->connect_cfm) 708 case ESCO_LINK:
666 hp->connect_cfm(conn, status); 709 sco_connect_cfm(conn, status);
710 break;
667 711
668 hp = hci_proto[HCI_PROTO_SCO]; 712 default:
669 if (hp && hp->connect_cfm) 713 BT_ERR("unknown link type %d", conn->type);
670 hp->connect_cfm(conn, status); 714 break;
715 }
671 716
672 if (conn->connect_cfm_cb) 717 if (conn->connect_cfm_cb)
673 conn->connect_cfm_cb(conn, status); 718 conn->connect_cfm_cb(conn, status);
@@ -675,31 +720,29 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
675 720
676static inline int hci_proto_disconn_ind(struct hci_conn *conn) 721static inline int hci_proto_disconn_ind(struct hci_conn *conn)
677{ 722{
678 register struct hci_proto *hp; 723 if (conn->type != ACL_LINK && conn->type != LE_LINK)
679 int reason = 0x13; 724 return HCI_ERROR_REMOTE_USER_TERM;
680
681 hp = hci_proto[HCI_PROTO_L2CAP];
682 if (hp && hp->disconn_ind)
683 reason = hp->disconn_ind(conn);
684 725
685 hp = hci_proto[HCI_PROTO_SCO]; 726 return l2cap_disconn_ind(conn);
686 if (hp && hp->disconn_ind)
687 reason = hp->disconn_ind(conn);
688
689 return reason;
690} 727}
691 728
692static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) 729static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
693{ 730{
694 register struct hci_proto *hp; 731 switch (conn->type) {
732 case ACL_LINK:
733 case LE_LINK:
734 l2cap_disconn_cfm(conn, reason);
735 break;
695 736
696 hp = hci_proto[HCI_PROTO_L2CAP]; 737 case SCO_LINK:
697 if (hp && hp->disconn_cfm) 738 case ESCO_LINK:
698 hp->disconn_cfm(conn, reason); 739 sco_disconn_cfm(conn, reason);
740 break;
699 741
700 hp = hci_proto[HCI_PROTO_SCO]; 742 default:
701 if (hp && hp->disconn_cfm) 743 BT_ERR("unknown link type %d", conn->type);
702 hp->disconn_cfm(conn, reason); 744 break;
745 }
703 746
704 if (conn->disconn_cfm_cb) 747 if (conn->disconn_cfm_cb)
705 conn->disconn_cfm_cb(conn, reason); 748 conn->disconn_cfm_cb(conn, reason);
@@ -707,21 +750,16 @@ static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
707 750
708static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) 751static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
709{ 752{
710 register struct hci_proto *hp;
711 __u8 encrypt; 753 __u8 encrypt;
712 754
755 if (conn->type != ACL_LINK && conn->type != LE_LINK)
756 return;
757
713 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 758 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
714 return; 759 return;
715 760
716 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; 761 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
717 762 l2cap_security_cfm(conn, status, encrypt);
718 hp = hci_proto[HCI_PROTO_L2CAP];
719 if (hp && hp->security_cfm)
720 hp->security_cfm(conn, status, encrypt);
721
722 hp = hci_proto[HCI_PROTO_SCO];
723 if (hp && hp->security_cfm)
724 hp->security_cfm(conn, status, encrypt);
725 763
726 if (conn->security_cfm_cb) 764 if (conn->security_cfm_cb)
727 conn->security_cfm_cb(conn, status); 765 conn->security_cfm_cb(conn, status);
@@ -730,23 +768,15 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
730static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, 768static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
731 __u8 encrypt) 769 __u8 encrypt)
732{ 770{
733 register struct hci_proto *hp; 771 if (conn->type != ACL_LINK && conn->type != LE_LINK)
734 772 return;
735 hp = hci_proto[HCI_PROTO_L2CAP];
736 if (hp && hp->security_cfm)
737 hp->security_cfm(conn, status, encrypt);
738 773
739 hp = hci_proto[HCI_PROTO_SCO]; 774 l2cap_security_cfm(conn, status, encrypt);
740 if (hp && hp->security_cfm)
741 hp->security_cfm(conn, status, encrypt);
742 775
743 if (conn->security_cfm_cb) 776 if (conn->security_cfm_cb)
744 conn->security_cfm_cb(conn, status); 777 conn->security_cfm_cb(conn, status);
745} 778}
746 779
747int hci_register_proto(struct hci_proto *hproto);
748int hci_unregister_proto(struct hci_proto *hproto);
749
750/* ----- HCI callbacks ----- */ 780/* ----- HCI callbacks ----- */
751struct hci_cb { 781struct hci_cb {
752 struct list_head list; 782 struct list_head list;
@@ -771,13 +801,13 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
771 801
772 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; 802 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
773 803
774 read_lock_bh(&hci_cb_list_lock); 804 read_lock(&hci_cb_list_lock);
775 list_for_each(p, &hci_cb_list) { 805 list_for_each(p, &hci_cb_list) {
776 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 806 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
777 if (cb->security_cfm) 807 if (cb->security_cfm)
778 cb->security_cfm(conn, status, encrypt); 808 cb->security_cfm(conn, status, encrypt);
779 } 809 }
780 read_unlock_bh(&hci_cb_list_lock); 810 read_unlock(&hci_cb_list_lock);
781} 811}
782 812
783static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, 813static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
@@ -793,26 +823,26 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
793 823
794 hci_proto_encrypt_cfm(conn, status, encrypt); 824 hci_proto_encrypt_cfm(conn, status, encrypt);
795 825
796 read_lock_bh(&hci_cb_list_lock); 826 read_lock(&hci_cb_list_lock);
797 list_for_each(p, &hci_cb_list) { 827 list_for_each(p, &hci_cb_list) {
798 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 828 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
799 if (cb->security_cfm) 829 if (cb->security_cfm)
800 cb->security_cfm(conn, status, encrypt); 830 cb->security_cfm(conn, status, encrypt);
801 } 831 }
802 read_unlock_bh(&hci_cb_list_lock); 832 read_unlock(&hci_cb_list_lock);
803} 833}
804 834
805static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) 835static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
806{ 836{
807 struct list_head *p; 837 struct list_head *p;
808 838
809 read_lock_bh(&hci_cb_list_lock); 839 read_lock(&hci_cb_list_lock);
810 list_for_each(p, &hci_cb_list) { 840 list_for_each(p, &hci_cb_list) {
811 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 841 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
812 if (cb->key_change_cfm) 842 if (cb->key_change_cfm)
813 cb->key_change_cfm(conn, status); 843 cb->key_change_cfm(conn, status);
814 } 844 }
815 read_unlock_bh(&hci_cb_list_lock); 845 read_unlock(&hci_cb_list_lock);
816} 846}
817 847
818static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, 848static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
@@ -820,13 +850,13 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
820{ 850{
821 struct list_head *p; 851 struct list_head *p;
822 852
823 read_lock_bh(&hci_cb_list_lock); 853 read_lock(&hci_cb_list_lock);
824 list_for_each(p, &hci_cb_list) { 854 list_for_each(p, &hci_cb_list) {
825 struct hci_cb *cb = list_entry(p, struct hci_cb, list); 855 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
826 if (cb->role_switch_cfm) 856 if (cb->role_switch_cfm)
827 cb->role_switch_cfm(conn, status, role); 857 cb->role_switch_cfm(conn, status, role);
828 } 858 }
829 read_unlock_bh(&hci_cb_list_lock); 859 read_unlock(&hci_cb_list_lock);
830} 860}
831 861
832int hci_register_cb(struct hci_cb *hcb); 862int hci_register_cb(struct hci_cb *hcb);
@@ -836,7 +866,7 @@ int hci_register_notifier(struct notifier_block *nb);
836int hci_unregister_notifier(struct notifier_block *nb); 866int hci_unregister_notifier(struct notifier_block *nb);
837 867
838int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param); 868int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
839void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); 869void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
840void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 870void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
841 871
842void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); 872void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
@@ -849,44 +879,63 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
849 879
850/* Management interface */ 880/* Management interface */
851int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); 881int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
852int mgmt_index_added(u16 index); 882int mgmt_index_added(struct hci_dev *hdev);
853int mgmt_index_removed(u16 index); 883int mgmt_index_removed(struct hci_dev *hdev);
854int mgmt_powered(u16 index, u8 powered); 884int mgmt_powered(struct hci_dev *hdev, u8 powered);
855int mgmt_discoverable(u16 index, u8 discoverable); 885int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
856int mgmt_connectable(u16 index, u8 connectable); 886int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
857int mgmt_new_key(u16 index, struct link_key *key, u8 persistent); 887int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
858int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type); 888int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
859int mgmt_disconnected(u16 index, bdaddr_t *bdaddr); 889 u8 persistent);
860int mgmt_disconnect_failed(u16 index); 890int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
861int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status); 891 u8 addr_type);
862int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure); 892int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
863int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); 893 u8 addr_type);
864int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); 894int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
865int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value, 895int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
866 u8 confirm_hint); 896 u8 addr_type, u8 status);
867int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); 897int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
868int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, 898int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
869 u8 status); 899 u8 status);
870int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status); 900int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
871int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status);
872int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
873 u8 status); 901 u8 status);
874int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, 902int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
875 u8 *eir); 903 __le32 value, u8 confirm_hint);
876int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name); 904int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
877int mgmt_discovering(u16 index, u8 discovering); 905 u8 status);
878int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr); 906int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
879int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr); 907 bdaddr_t *bdaddr, u8 status);
908int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr);
909int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
910 u8 status);
911int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
912 bdaddr_t *bdaddr, u8 status);
913int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
914int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
915int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
916 u8 *randomizer, u8 status);
917int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
918 u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir);
919int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name);
920int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
921int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
922int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
923int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
924int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
880 925
881/* HCI info for socket */ 926/* HCI info for socket */
882#define hci_pi(sk) ((struct hci_pinfo *) sk) 927#define hci_pi(sk) ((struct hci_pinfo *) sk)
883 928
929/* HCI socket flags */
930#define HCI_PI_MGMT_INIT 0
931
884struct hci_pinfo { 932struct hci_pinfo {
885 struct bt_sock bt; 933 struct bt_sock bt;
886 struct hci_dev *hdev; 934 struct hci_dev *hdev;
887 struct hci_filter filter; 935 struct hci_filter filter;
888 __u32 cmsg_mask; 936 __u32 cmsg_mask;
889 unsigned short channel; 937 unsigned short channel;
938 unsigned long flags;
890}; 939};
891 940
892/* HCI security filter */ 941/* HCI security filter */
@@ -915,4 +964,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
915void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]); 964void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
916void hci_le_ltk_neg_reply(struct hci_conn *conn); 965void hci_le_ltk_neg_reply(struct hci_conn *conn);
917 966
967int hci_do_inquiry(struct hci_dev *hdev, u8 length);
968int hci_cancel_inquiry(struct hci_dev *hdev);
969
918#endif /* __HCI_CORE_H */ 970#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 6cc18f371675..68f589150692 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -27,17 +27,23 @@
27#ifndef __L2CAP_H 27#ifndef __L2CAP_H
28#define __L2CAP_H 28#define __L2CAP_H
29 29
30#include <asm/unaligned.h>
31
30/* L2CAP defaults */ 32/* L2CAP defaults */
31#define L2CAP_DEFAULT_MTU 672 33#define L2CAP_DEFAULT_MTU 672
32#define L2CAP_DEFAULT_MIN_MTU 48 34#define L2CAP_DEFAULT_MIN_MTU 48
33#define L2CAP_DEFAULT_FLUSH_TO 0xffff 35#define L2CAP_DEFAULT_FLUSH_TO 0xffff
34#define L2CAP_DEFAULT_TX_WINDOW 63 36#define L2CAP_DEFAULT_TX_WINDOW 63
37#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
35#define L2CAP_DEFAULT_MAX_TX 3 38#define L2CAP_DEFAULT_MAX_TX 3
36#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ 39#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
37#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ 40#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
38#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ 41#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
39#define L2CAP_DEFAULT_ACK_TO 200 42#define L2CAP_DEFAULT_ACK_TO 200
40#define L2CAP_LE_DEFAULT_MTU 23 43#define L2CAP_LE_DEFAULT_MTU 23
44#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
45#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
46#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
41 47
42#define L2CAP_DISC_TIMEOUT (100) 48#define L2CAP_DISC_TIMEOUT (100)
43#define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */ 49#define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */
@@ -91,52 +97,82 @@ struct l2cap_conninfo {
91#define L2CAP_ECHO_RSP 0x09 97#define L2CAP_ECHO_RSP 0x09
92#define L2CAP_INFO_REQ 0x0a 98#define L2CAP_INFO_REQ 0x0a
93#define L2CAP_INFO_RSP 0x0b 99#define L2CAP_INFO_RSP 0x0b
100#define L2CAP_CREATE_CHAN_REQ 0x0c
101#define L2CAP_CREATE_CHAN_RSP 0x0d
102#define L2CAP_MOVE_CHAN_REQ 0x0e
103#define L2CAP_MOVE_CHAN_RSP 0x0f
104#define L2CAP_MOVE_CHAN_CFM 0x10
105#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
94#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12 106#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
95#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13 107#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
96 108
97/* L2CAP feature mask */ 109/* L2CAP extended feature mask */
98#define L2CAP_FEAT_FLOWCTL 0x00000001 110#define L2CAP_FEAT_FLOWCTL 0x00000001
99#define L2CAP_FEAT_RETRANS 0x00000002 111#define L2CAP_FEAT_RETRANS 0x00000002
112#define L2CAP_FEAT_BIDIR_QOS 0x00000004
100#define L2CAP_FEAT_ERTM 0x00000008 113#define L2CAP_FEAT_ERTM 0x00000008
101#define L2CAP_FEAT_STREAMING 0x00000010 114#define L2CAP_FEAT_STREAMING 0x00000010
102#define L2CAP_FEAT_FCS 0x00000020 115#define L2CAP_FEAT_FCS 0x00000020
116#define L2CAP_FEAT_EXT_FLOW 0x00000040
103#define L2CAP_FEAT_FIXED_CHAN 0x00000080 117#define L2CAP_FEAT_FIXED_CHAN 0x00000080
118#define L2CAP_FEAT_EXT_WINDOW 0x00000100
119#define L2CAP_FEAT_UCD 0x00000200
104 120
105/* L2CAP checksum option */ 121/* L2CAP checksum option */
106#define L2CAP_FCS_NONE 0x00 122#define L2CAP_FCS_NONE 0x00
107#define L2CAP_FCS_CRC16 0x01 123#define L2CAP_FCS_CRC16 0x01
108 124
125/* L2CAP fixed channels */
126#define L2CAP_FC_L2CAP 0x02
127#define L2CAP_FC_A2MP 0x08
128
109/* L2CAP Control Field bit masks */ 129/* L2CAP Control Field bit masks */
110#define L2CAP_CTRL_SAR 0xC000 130#define L2CAP_CTRL_SAR 0xC000
111#define L2CAP_CTRL_REQSEQ 0x3F00 131#define L2CAP_CTRL_REQSEQ 0x3F00
112#define L2CAP_CTRL_TXSEQ 0x007E 132#define L2CAP_CTRL_TXSEQ 0x007E
113#define L2CAP_CTRL_RETRANS 0x0080 133#define L2CAP_CTRL_SUPERVISE 0x000C
114#define L2CAP_CTRL_FINAL 0x0080 134
115#define L2CAP_CTRL_POLL 0x0010 135#define L2CAP_CTRL_RETRANS 0x0080
116#define L2CAP_CTRL_SUPERVISE 0x000C 136#define L2CAP_CTRL_FINAL 0x0080
117#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ 137#define L2CAP_CTRL_POLL 0x0010
118 138#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
119#define L2CAP_CTRL_TXSEQ_SHIFT 1 139
120#define L2CAP_CTRL_REQSEQ_SHIFT 8 140#define L2CAP_CTRL_TXSEQ_SHIFT 1
121#define L2CAP_CTRL_SAR_SHIFT 14 141#define L2CAP_CTRL_SUPER_SHIFT 2
142#define L2CAP_CTRL_REQSEQ_SHIFT 8
143#define L2CAP_CTRL_SAR_SHIFT 14
144
145/* L2CAP Extended Control Field bit mask */
146#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000
147#define L2CAP_EXT_CTRL_SAR 0x00030000
148#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000
149#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC
150
151#define L2CAP_EXT_CTRL_POLL 0x00040000
152#define L2CAP_EXT_CTRL_FINAL 0x00000002
153#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
154
155#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
156#define L2CAP_EXT_CTRL_SAR_SHIFT 16
157#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
158#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
122 159
123/* L2CAP Supervisory Function */ 160/* L2CAP Supervisory Function */
124#define L2CAP_SUPER_RCV_READY 0x0000 161#define L2CAP_SUPER_RR 0x00
125#define L2CAP_SUPER_REJECT 0x0004 162#define L2CAP_SUPER_REJ 0x01
126#define L2CAP_SUPER_RCV_NOT_READY 0x0008 163#define L2CAP_SUPER_RNR 0x02
127#define L2CAP_SUPER_SELECT_REJECT 0x000C 164#define L2CAP_SUPER_SREJ 0x03
128 165
129/* L2CAP Segmentation and Reassembly */ 166/* L2CAP Segmentation and Reassembly */
130#define L2CAP_SDU_UNSEGMENTED 0x0000 167#define L2CAP_SAR_UNSEGMENTED 0x00
131#define L2CAP_SDU_START 0x4000 168#define L2CAP_SAR_START 0x01
132#define L2CAP_SDU_END 0x8000 169#define L2CAP_SAR_END 0x02
133#define L2CAP_SDU_CONTINUE 0xC000 170#define L2CAP_SAR_CONTINUE 0x03
134 171
135/* L2CAP Command rej. reasons */ 172/* L2CAP Command rej. reasons */
136#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000 173#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000
137#define L2CAP_REJ_MTU_EXCEEDED 0x0001 174#define L2CAP_REJ_MTU_EXCEEDED 0x0001
138#define L2CAP_REJ_INVALID_CID 0x0002 175#define L2CAP_REJ_INVALID_CID 0x0002
139
140 176
141/* L2CAP structures */ 177/* L2CAP structures */
142struct l2cap_hdr { 178struct l2cap_hdr {
@@ -144,6 +180,12 @@ struct l2cap_hdr {
144 __le16 cid; 180 __le16 cid;
145} __packed; 181} __packed;
146#define L2CAP_HDR_SIZE 4 182#define L2CAP_HDR_SIZE 4
183#define L2CAP_ENH_HDR_SIZE 6
184#define L2CAP_EXT_HDR_SIZE 8
185
186#define L2CAP_FCS_SIZE 2
187#define L2CAP_SDULEN_SIZE 2
188#define L2CAP_PSMLEN_SIZE 2
147 189
148struct l2cap_cmd_hdr { 190struct l2cap_cmd_hdr {
149 __u8 code; 191 __u8 code;
@@ -188,14 +230,15 @@ struct l2cap_conn_rsp {
188#define L2CAP_CID_DYN_START 0x0040 230#define L2CAP_CID_DYN_START 0x0040
189#define L2CAP_CID_DYN_END 0xffff 231#define L2CAP_CID_DYN_END 0xffff
190 232
191/* connect result */ 233/* connect/create channel results */
192#define L2CAP_CR_SUCCESS 0x0000 234#define L2CAP_CR_SUCCESS 0x0000
193#define L2CAP_CR_PEND 0x0001 235#define L2CAP_CR_PEND 0x0001
194#define L2CAP_CR_BAD_PSM 0x0002 236#define L2CAP_CR_BAD_PSM 0x0002
195#define L2CAP_CR_SEC_BLOCK 0x0003 237#define L2CAP_CR_SEC_BLOCK 0x0003
196#define L2CAP_CR_NO_MEM 0x0004 238#define L2CAP_CR_NO_MEM 0x0004
239#define L2CAP_CR_BAD_AMP 0x0005
197 240
198/* connect status */ 241/* connect/create channel status */
199#define L2CAP_CS_NO_INFO 0x0000 242#define L2CAP_CS_NO_INFO 0x0000
200#define L2CAP_CS_AUTHEN_PEND 0x0001 243#define L2CAP_CS_AUTHEN_PEND 0x0001
201#define L2CAP_CS_AUTHOR_PEND 0x0002 244#define L2CAP_CS_AUTHOR_PEND 0x0002
@@ -217,6 +260,8 @@ struct l2cap_conf_rsp {
217#define L2CAP_CONF_UNACCEPT 0x0001 260#define L2CAP_CONF_UNACCEPT 0x0001
218#define L2CAP_CONF_REJECT 0x0002 261#define L2CAP_CONF_REJECT 0x0002
219#define L2CAP_CONF_UNKNOWN 0x0003 262#define L2CAP_CONF_UNKNOWN 0x0003
263#define L2CAP_CONF_PENDING 0x0004
264#define L2CAP_CONF_EFS_REJECT 0x0005
220 265
221struct l2cap_conf_opt { 266struct l2cap_conf_opt {
222 __u8 type; 267 __u8 type;
@@ -233,6 +278,8 @@ struct l2cap_conf_opt {
233#define L2CAP_CONF_QOS 0x03 278#define L2CAP_CONF_QOS 0x03
234#define L2CAP_CONF_RFC 0x04 279#define L2CAP_CONF_RFC 0x04
235#define L2CAP_CONF_FCS 0x05 280#define L2CAP_CONF_FCS 0x05
281#define L2CAP_CONF_EFS 0x06
282#define L2CAP_CONF_EWS 0x07
236 283
237#define L2CAP_CONF_MAX_SIZE 22 284#define L2CAP_CONF_MAX_SIZE 22
238 285
@@ -251,6 +298,21 @@ struct l2cap_conf_rfc {
251#define L2CAP_MODE_ERTM 0x03 298#define L2CAP_MODE_ERTM 0x03
252#define L2CAP_MODE_STREAMING 0x04 299#define L2CAP_MODE_STREAMING 0x04
253 300
301struct l2cap_conf_efs {
302 __u8 id;
303 __u8 stype;
304 __le16 msdu;
305 __le32 sdu_itime;
306 __le32 acc_lat;
307 __le32 flush_to;
308} __packed;
309
310#define L2CAP_SERV_NOTRAFIC 0x00
311#define L2CAP_SERV_BESTEFFORT 0x01
312#define L2CAP_SERV_GUARANTEED 0x02
313
314#define L2CAP_BESTEFFORT_ID 0x01
315
254struct l2cap_disconn_req { 316struct l2cap_disconn_req {
255 __le16 dcid; 317 __le16 dcid;
256 __le16 scid; 318 __le16 scid;
@@ -271,14 +333,57 @@ struct l2cap_info_rsp {
271 __u8 data[0]; 333 __u8 data[0];
272} __packed; 334} __packed;
273 335
336struct l2cap_create_chan_req {
337 __le16 psm;
338 __le16 scid;
339 __u8 amp_id;
340} __packed;
341
342struct l2cap_create_chan_rsp {
343 __le16 dcid;
344 __le16 scid;
345 __le16 result;
346 __le16 status;
347} __packed;
348
349struct l2cap_move_chan_req {
350 __le16 icid;
351 __u8 dest_amp_id;
352} __packed;
353
354struct l2cap_move_chan_rsp {
355 __le16 icid;
356 __le16 result;
357} __packed;
358
359#define L2CAP_MR_SUCCESS 0x0000
360#define L2CAP_MR_PEND 0x0001
361#define L2CAP_MR_BAD_ID 0x0002
362#define L2CAP_MR_SAME_ID 0x0003
363#define L2CAP_MR_NOT_SUPP 0x0004
364#define L2CAP_MR_COLLISION 0x0005
365#define L2CAP_MR_NOT_ALLOWED 0x0006
366
367struct l2cap_move_chan_cfm {
368 __le16 icid;
369 __le16 result;
370} __packed;
371
372#define L2CAP_MC_CONFIRMED 0x0000
373#define L2CAP_MC_UNCONFIRMED 0x0001
374
375struct l2cap_move_chan_cfm_rsp {
376 __le16 icid;
377} __packed;
378
274/* info type */ 379/* info type */
275#define L2CAP_IT_CL_MTU 0x0001 380#define L2CAP_IT_CL_MTU 0x0001
276#define L2CAP_IT_FEAT_MASK 0x0002 381#define L2CAP_IT_FEAT_MASK 0x0002
277#define L2CAP_IT_FIXED_CHAN 0x0003 382#define L2CAP_IT_FIXED_CHAN 0x0003
278 383
279/* info result */ 384/* info result */
280#define L2CAP_IR_SUCCESS 0x0000 385#define L2CAP_IR_SUCCESS 0x0000
281#define L2CAP_IR_NOTSUPP 0x0001 386#define L2CAP_IR_NOTSUPP 0x0001
282 387
283struct l2cap_conn_param_update_req { 388struct l2cap_conn_param_update_req {
284 __le16 min; 389 __le16 min;
@@ -297,7 +402,7 @@ struct l2cap_conn_param_update_rsp {
297 402
298/* ----- L2CAP channels and connections ----- */ 403/* ----- L2CAP channels and connections ----- */
299struct srej_list { 404struct srej_list {
300 __u8 tx_seq; 405 __u16 tx_seq;
301 struct list_head list; 406 struct list_head list;
302}; 407};
303 408
@@ -319,14 +424,11 @@ struct l2cap_chan {
319 __u16 flush_to; 424 __u16 flush_to;
320 __u8 mode; 425 __u8 mode;
321 __u8 chan_type; 426 __u8 chan_type;
427 __u8 chan_policy;
322 428
323 __le16 sport; 429 __le16 sport;
324 430
325 __u8 sec_level; 431 __u8 sec_level;
326 __u8 role_switch;
327 __u8 force_reliable;
328 __u8 flushable;
329 __u8 force_active;
330 432
331 __u8 ident; 433 __u8 ident;
332 434
@@ -337,7 +439,8 @@ struct l2cap_chan {
337 439
338 __u8 fcs; 440 __u8 fcs;
339 441
340 __u8 tx_win; 442 __u16 tx_win;
443 __u16 tx_win_max;
341 __u8 max_tx; 444 __u8 max_tx;
342 __u16 retrans_timeout; 445 __u16 retrans_timeout;
343 __u16 monitor_timeout; 446 __u16 monitor_timeout;
@@ -345,29 +448,45 @@ struct l2cap_chan {
345 448
346 unsigned long conf_state; 449 unsigned long conf_state;
347 unsigned long conn_state; 450 unsigned long conn_state;
348 451 unsigned long flags;
349 __u8 next_tx_seq; 452
350 __u8 expected_ack_seq; 453 __u16 next_tx_seq;
351 __u8 expected_tx_seq; 454 __u16 expected_ack_seq;
352 __u8 buffer_seq; 455 __u16 expected_tx_seq;
353 __u8 buffer_seq_srej; 456 __u16 buffer_seq;
354 __u8 srej_save_reqseq; 457 __u16 buffer_seq_srej;
355 __u8 frames_sent; 458 __u16 srej_save_reqseq;
356 __u8 unacked_frames; 459 __u16 frames_sent;
460 __u16 unacked_frames;
357 __u8 retry_count; 461 __u8 retry_count;
358 __u8 num_acked; 462 __u8 num_acked;
359 __u16 sdu_len; 463 __u16 sdu_len;
360 struct sk_buff *sdu; 464 struct sk_buff *sdu;
361 struct sk_buff *sdu_last_frag; 465 struct sk_buff *sdu_last_frag;
362 466
363 __u8 remote_tx_win; 467 __u16 remote_tx_win;
364 __u8 remote_max_tx; 468 __u8 remote_max_tx;
365 __u16 remote_mps; 469 __u16 remote_mps;
366 470
367 struct timer_list chan_timer; 471 __u8 local_id;
368 struct timer_list retrans_timer; 472 __u8 local_stype;
369 struct timer_list monitor_timer; 473 __u16 local_msdu;
370 struct timer_list ack_timer; 474 __u32 local_sdu_itime;
475 __u32 local_acc_lat;
476 __u32 local_flush_to;
477
478 __u8 remote_id;
479 __u8 remote_stype;
480 __u16 remote_msdu;
481 __u32 remote_sdu_itime;
482 __u32 remote_acc_lat;
483 __u32 remote_flush_to;
484
485 struct delayed_work chan_timer;
486 struct delayed_work retrans_timer;
487 struct delayed_work monitor_timer;
488 struct delayed_work ack_timer;
489
371 struct sk_buff *tx_send_head; 490 struct sk_buff *tx_send_head;
372 struct sk_buff_head tx_q; 491 struct sk_buff_head tx_q;
373 struct sk_buff_head srej_q; 492 struct sk_buff_head srej_q;
@@ -391,6 +510,7 @@ struct l2cap_ops {
391 510
392struct l2cap_conn { 511struct l2cap_conn {
393 struct hci_conn *hcon; 512 struct hci_conn *hcon;
513 struct hci_chan *hchan;
394 514
395 bdaddr_t *dst; 515 bdaddr_t *dst;
396 bdaddr_t *src; 516 bdaddr_t *src;
@@ -402,7 +522,7 @@ struct l2cap_conn {
402 __u8 info_state; 522 __u8 info_state;
403 __u8 info_ident; 523 __u8 info_ident;
404 524
405 struct timer_list info_timer; 525 struct delayed_work info_timer;
406 526
407 spinlock_t lock; 527 spinlock_t lock;
408 528
@@ -412,11 +532,11 @@ struct l2cap_conn {
412 532
413 __u8 disc_reason; 533 __u8 disc_reason;
414 534
415 struct timer_list security_timer; 535 struct delayed_work security_timer;
416 struct smp_chan *smp_chan; 536 struct smp_chan *smp_chan;
417 537
418 struct list_head chan_l; 538 struct list_head chan_l;
419 rwlock_t chan_lock; 539 struct mutex chan_lock;
420}; 540};
421 541
422#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 542#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
@@ -445,6 +565,9 @@ enum {
445 CONF_CONNECT_PEND, 565 CONF_CONNECT_PEND,
446 CONF_NO_FCS_RECV, 566 CONF_NO_FCS_RECV,
447 CONF_STATE2_DEVICE, 567 CONF_STATE2_DEVICE,
568 CONF_EWS_RECV,
569 CONF_LOC_CONF_PEND,
570 CONF_REM_CONF_PEND,
448}; 571};
449 572
450#define L2CAP_CONF_MAX_CONF_REQ 2 573#define L2CAP_CONF_MAX_CONF_REQ 2
@@ -462,6 +585,44 @@ enum {
462 CONN_RNR_SENT, 585 CONN_RNR_SENT,
463}; 586};
464 587
588/* Definitions for flags in l2cap_chan */
589enum {
590 FLAG_ROLE_SWITCH,
591 FLAG_FORCE_ACTIVE,
592 FLAG_FORCE_RELIABLE,
593 FLAG_FLUSHABLE,
594 FLAG_EXT_CTRL,
595 FLAG_EFS_ENABLE,
596};
597
598static inline void l2cap_chan_hold(struct l2cap_chan *c)
599{
600 atomic_inc(&c->refcnt);
601}
602
603static inline void l2cap_chan_put(struct l2cap_chan *c)
604{
605 if (atomic_dec_and_test(&c->refcnt))
606 kfree(c);
607}
608
609static inline void l2cap_set_timer(struct l2cap_chan *chan,
610 struct delayed_work *work, long timeout)
611{
612 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
613
614 if (!__cancel_delayed_work(work))
615 l2cap_chan_hold(chan);
616 schedule_delayed_work(work, timeout);
617}
618
619static inline void l2cap_clear_timer(struct l2cap_chan *chan,
620 struct delayed_work *work)
621{
622 if (__cancel_delayed_work(work))
623 l2cap_chan_put(chan);
624}
625
465#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) 626#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
466#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) 627#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
467#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \ 628#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
@@ -474,6 +635,22 @@ enum {
474 L2CAP_DEFAULT_ACK_TO); 635 L2CAP_DEFAULT_ACK_TO);
475#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer) 636#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
476 637
638static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
639{
640 int offset;
641
642 offset = (seq1 - seq2) % (chan->tx_win_max + 1);
643 if (offset < 0)
644 offset += (chan->tx_win_max + 1);
645
646 return offset;
647}
648
649static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
650{
651 return (seq + 1) % (chan->tx_win_max + 1);
652}
653
477static inline int l2cap_tx_window_full(struct l2cap_chan *ch) 654static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
478{ 655{
479 int sub; 656 int sub;
@@ -486,13 +663,164 @@ static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
486 return sub == ch->remote_tx_win; 663 return sub == ch->remote_tx_win;
487} 664}
488 665
489#define __get_txseq(ctrl) (((ctrl) & L2CAP_CTRL_TXSEQ) >> 1) 666static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
490#define __get_reqseq(ctrl) (((ctrl) & L2CAP_CTRL_REQSEQ) >> 8) 667{
491#define __is_iframe(ctrl) (!((ctrl) & L2CAP_CTRL_FRAME_TYPE)) 668 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
492#define __is_sframe(ctrl) ((ctrl) & L2CAP_CTRL_FRAME_TYPE) 669 return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
493#define __is_sar_start(ctrl) (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START) 670 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
671 else
672 return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
673}
674
675static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
676{
677 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
678 return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
679 L2CAP_EXT_CTRL_REQSEQ;
680 else
681 return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
682}
683
684static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl)
685{
686 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
687 return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
688 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
689 else
690 return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
691}
692
693static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq)
694{
695 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
696 return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
697 L2CAP_EXT_CTRL_TXSEQ;
698 else
699 return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
700}
701
702static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
703{
704 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
705 return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
706 else
707 return ctrl & L2CAP_CTRL_FRAME_TYPE;
708}
709
710static inline __u32 __set_sframe(struct l2cap_chan *chan)
711{
712 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
713 return L2CAP_EXT_CTRL_FRAME_TYPE;
714 else
715 return L2CAP_CTRL_FRAME_TYPE;
716}
717
718static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
719{
720 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
721 return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
722 else
723 return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
724}
725
726static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
727{
728 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
729 return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
730 else
731 return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
732}
733
734static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
735{
736 return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
737}
738
739static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
740{
741 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
742 return L2CAP_EXT_CTRL_SAR;
743 else
744 return L2CAP_CTRL_SAR;
745}
746
747static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
748{
749 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
750 return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
751 L2CAP_EXT_CTRL_SUPER_SHIFT;
752 else
753 return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
754}
755
756static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
757{
758 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
759 return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
760 L2CAP_EXT_CTRL_SUPERVISE;
761 else
762 return (super << L2CAP_CTRL_SUPER_SHIFT) &
763 L2CAP_CTRL_SUPERVISE;
764}
765
766static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
767{
768 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
769 return L2CAP_EXT_CTRL_FINAL;
770 else
771 return L2CAP_CTRL_FINAL;
772}
773
774static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
775{
776 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
777 return ctrl & L2CAP_EXT_CTRL_FINAL;
778 else
779 return ctrl & L2CAP_CTRL_FINAL;
780}
781
782static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
783{
784 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
785 return L2CAP_EXT_CTRL_POLL;
786 else
787 return L2CAP_CTRL_POLL;
788}
789
790static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
791{
792 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
793 return ctrl & L2CAP_EXT_CTRL_POLL;
794 else
795 return ctrl & L2CAP_CTRL_POLL;
796}
797
798static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
799{
800 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
801 return get_unaligned_le32(p);
802 else
803 return get_unaligned_le16(p);
804}
805
806static inline void __put_control(struct l2cap_chan *chan, __u32 control,
807 void *p)
808{
809 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
810 return put_unaligned_le32(control, p);
811 else
812 return put_unaligned_le16(control, p);
813}
814
815static inline __u8 __ctrl_size(struct l2cap_chan *chan)
816{
817 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
818 return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
819 else
820 return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
821}
494 822
495extern int disable_ertm; 823extern bool disable_ertm;
496 824
497int l2cap_init_sockets(void); 825int l2cap_init_sockets(void);
498void l2cap_cleanup_sockets(void); 826void l2cap_cleanup_sockets(void);
@@ -506,8 +834,11 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
506struct l2cap_chan *l2cap_chan_create(struct sock *sk); 834struct l2cap_chan *l2cap_chan_create(struct sock *sk);
507void l2cap_chan_close(struct l2cap_chan *chan, int reason); 835void l2cap_chan_close(struct l2cap_chan *chan, int reason);
508void l2cap_chan_destroy(struct l2cap_chan *chan); 836void l2cap_chan_destroy(struct l2cap_chan *chan);
509int l2cap_chan_connect(struct l2cap_chan *chan); 837inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
510int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len); 838 bdaddr_t *dst);
839int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
840 u32 priority);
511void l2cap_chan_busy(struct l2cap_chan *chan, int busy); 841void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
842int l2cap_chan_check_security(struct l2cap_chan *chan);
512 843
513#endif /* __L2CAP_H */ 844#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index d66da0f94f95..be65d3417883 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -23,6 +23,23 @@
23 23
24#define MGMT_INDEX_NONE 0xFFFF 24#define MGMT_INDEX_NONE 0xFFFF
25 25
26#define MGMT_STATUS_SUCCESS 0x00
27#define MGMT_STATUS_UNKNOWN_COMMAND 0x01
28#define MGMT_STATUS_NOT_CONNECTED 0x02
29#define MGMT_STATUS_FAILED 0x03
30#define MGMT_STATUS_CONNECT_FAILED 0x04
31#define MGMT_STATUS_AUTH_FAILED 0x05
32#define MGMT_STATUS_NOT_PAIRED 0x06
33#define MGMT_STATUS_NO_RESOURCES 0x07
34#define MGMT_STATUS_TIMEOUT 0x08
35#define MGMT_STATUS_ALREADY_CONNECTED 0x09
36#define MGMT_STATUS_BUSY 0x0a
37#define MGMT_STATUS_REJECTED 0x0b
38#define MGMT_STATUS_NOT_SUPPORTED 0x0c
39#define MGMT_STATUS_INVALID_PARAMS 0x0d
40#define MGMT_STATUS_DISCONNECTED 0x0e
41#define MGMT_STATUS_NOT_POWERED 0x0f
42
26struct mgmt_hdr { 43struct mgmt_hdr {
27 __le16 opcode; 44 __le16 opcode;
28 __le16 index; 45 __le16 index;
@@ -44,22 +61,29 @@ struct mgmt_rp_read_index_list {
44/* Reserve one extra byte for names in management messages so that they 61/* Reserve one extra byte for names in management messages so that they
45 * are always guaranteed to be nul-terminated */ 62 * are always guaranteed to be nul-terminated */
46#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1) 63#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
64#define MGMT_MAX_SHORT_NAME_LENGTH (10 + 1)
65
66#define MGMT_SETTING_POWERED 0x00000001
67#define MGMT_SETTING_CONNECTABLE 0x00000002
68#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
69#define MGMT_SETTING_DISCOVERABLE 0x00000008
70#define MGMT_SETTING_PAIRABLE 0x00000010
71#define MGMT_SETTING_LINK_SECURITY 0x00000020
72#define MGMT_SETTING_SSP 0x00000040
73#define MGMT_SETTING_BREDR 0x00000080
74#define MGMT_SETTING_HS 0x00000100
75#define MGMT_SETTING_LE 0x00000200
47 76
48#define MGMT_OP_READ_INFO 0x0004 77#define MGMT_OP_READ_INFO 0x0004
49struct mgmt_rp_read_info { 78struct mgmt_rp_read_info {
50 __u8 type;
51 __u8 powered;
52 __u8 connectable;
53 __u8 discoverable;
54 __u8 pairable;
55 __u8 sec_mode;
56 bdaddr_t bdaddr; 79 bdaddr_t bdaddr;
80 __u8 version;
81 __le16 manufacturer;
82 __le32 supported_settings;
83 __le32 current_settings;
57 __u8 dev_class[3]; 84 __u8 dev_class[3];
58 __u8 features[8];
59 __u16 manufacturer;
60 __u8 hci_ver;
61 __u16 hci_rev;
62 __u8 name[MGMT_MAX_NAME_LENGTH]; 85 __u8 name[MGMT_MAX_NAME_LENGTH];
86 __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
63} __packed; 87} __packed;
64 88
65struct mgmt_mode { 89struct mgmt_mode {
@@ -69,70 +93,97 @@ struct mgmt_mode {
69#define MGMT_OP_SET_POWERED 0x0005 93#define MGMT_OP_SET_POWERED 0x0005
70 94
71#define MGMT_OP_SET_DISCOVERABLE 0x0006 95#define MGMT_OP_SET_DISCOVERABLE 0x0006
96struct mgmt_cp_set_discoverable {
97 __u8 val;
98 __u16 timeout;
99} __packed;
72 100
73#define MGMT_OP_SET_CONNECTABLE 0x0007 101#define MGMT_OP_SET_CONNECTABLE 0x0007
74 102
75#define MGMT_OP_SET_PAIRABLE 0x0008 103#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
76 104
77#define MGMT_OP_ADD_UUID 0x0009 105#define MGMT_OP_SET_PAIRABLE 0x0009
78struct mgmt_cp_add_uuid {
79 __u8 uuid[16];
80 __u8 svc_hint;
81} __packed;
82 106
83#define MGMT_OP_REMOVE_UUID 0x000A 107#define MGMT_OP_SET_LINK_SECURITY 0x000A
84struct mgmt_cp_remove_uuid { 108
85 __u8 uuid[16]; 109#define MGMT_OP_SET_SSP 0x000B
86} __packed; 110
111#define MGMT_OP_SET_HS 0x000C
112
113#define MGMT_OP_SET_LE 0x000D
87 114
88#define MGMT_OP_SET_DEV_CLASS 0x000B 115#define MGMT_OP_SET_DEV_CLASS 0x000E
89struct mgmt_cp_set_dev_class { 116struct mgmt_cp_set_dev_class {
90 __u8 major; 117 __u8 major;
91 __u8 minor; 118 __u8 minor;
92} __packed; 119} __packed;
93 120
94#define MGMT_OP_SET_SERVICE_CACHE 0x000C 121#define MGMT_OP_SET_LOCAL_NAME 0x000F
95struct mgmt_cp_set_service_cache { 122struct mgmt_cp_set_local_name {
96 __u8 enable; 123 __u8 name[MGMT_MAX_NAME_LENGTH];
124} __packed;
125
126#define MGMT_OP_ADD_UUID 0x0010
127struct mgmt_cp_add_uuid {
128 __u8 uuid[16];
129 __u8 svc_hint;
130} __packed;
131
132#define MGMT_OP_REMOVE_UUID 0x0011
133struct mgmt_cp_remove_uuid {
134 __u8 uuid[16];
97} __packed; 135} __packed;
98 136
99struct mgmt_key_info { 137struct mgmt_link_key_info {
100 bdaddr_t bdaddr; 138 bdaddr_t bdaddr;
101 u8 type; 139 u8 type;
102 u8 val[16]; 140 u8 val[16];
103 u8 pin_len; 141 u8 pin_len;
104 u8 dlen;
105 u8 data[0];
106} __packed; 142} __packed;
107 143
108#define MGMT_OP_LOAD_KEYS 0x000D 144#define MGMT_OP_LOAD_LINK_KEYS 0x0012
109struct mgmt_cp_load_keys { 145struct mgmt_cp_load_link_keys {
110 __u8 debug_keys; 146 __u8 debug_keys;
111 __le16 key_count; 147 __le16 key_count;
112 struct mgmt_key_info keys[0]; 148 struct mgmt_link_key_info keys[0];
113} __packed; 149} __packed;
114 150
115#define MGMT_OP_REMOVE_KEY 0x000E 151#define MGMT_OP_REMOVE_KEYS 0x0013
116struct mgmt_cp_remove_key { 152struct mgmt_cp_remove_keys {
117 bdaddr_t bdaddr; 153 bdaddr_t bdaddr;
118 __u8 disconnect; 154 __u8 disconnect;
119} __packed; 155} __packed;
156struct mgmt_rp_remove_keys {
157 bdaddr_t bdaddr;
158 __u8 status;
159};
120 160
121#define MGMT_OP_DISCONNECT 0x000F 161#define MGMT_OP_DISCONNECT 0x0014
122struct mgmt_cp_disconnect { 162struct mgmt_cp_disconnect {
123 bdaddr_t bdaddr; 163 bdaddr_t bdaddr;
124} __packed; 164} __packed;
125struct mgmt_rp_disconnect { 165struct mgmt_rp_disconnect {
126 bdaddr_t bdaddr; 166 bdaddr_t bdaddr;
167 __u8 status;
127} __packed; 168} __packed;
128 169
129#define MGMT_OP_GET_CONNECTIONS 0x0010 170#define MGMT_ADDR_BREDR 0x00
171#define MGMT_ADDR_LE_PUBLIC 0x01
172#define MGMT_ADDR_LE_RANDOM 0x02
173#define MGMT_ADDR_INVALID 0xff
174
175struct mgmt_addr_info {
176 bdaddr_t bdaddr;
177 __u8 type;
178} __packed;
179
180#define MGMT_OP_GET_CONNECTIONS 0x0015
130struct mgmt_rp_get_connections { 181struct mgmt_rp_get_connections {
131 __le16 conn_count; 182 __le16 conn_count;
132 bdaddr_t conn[0]; 183 struct mgmt_addr_info addr[0];
133} __packed; 184} __packed;
134 185
135#define MGMT_OP_PIN_CODE_REPLY 0x0011 186#define MGMT_OP_PIN_CODE_REPLY 0x0016
136struct mgmt_cp_pin_code_reply { 187struct mgmt_cp_pin_code_reply {
137 bdaddr_t bdaddr; 188 bdaddr_t bdaddr;
138 __u8 pin_len; 189 __u8 pin_len;
@@ -143,27 +194,27 @@ struct mgmt_rp_pin_code_reply {
143 uint8_t status; 194 uint8_t status;
144} __packed; 195} __packed;
145 196
146#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012 197#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
147struct mgmt_cp_pin_code_neg_reply { 198struct mgmt_cp_pin_code_neg_reply {
148 bdaddr_t bdaddr; 199 bdaddr_t bdaddr;
149} __packed; 200} __packed;
150 201
151#define MGMT_OP_SET_IO_CAPABILITY 0x0013 202#define MGMT_OP_SET_IO_CAPABILITY 0x0018
152struct mgmt_cp_set_io_capability { 203struct mgmt_cp_set_io_capability {
153 __u8 io_capability; 204 __u8 io_capability;
154} __packed; 205} __packed;
155 206
156#define MGMT_OP_PAIR_DEVICE 0x0014 207#define MGMT_OP_PAIR_DEVICE 0x0019
157struct mgmt_cp_pair_device { 208struct mgmt_cp_pair_device {
158 bdaddr_t bdaddr; 209 struct mgmt_addr_info addr;
159 __u8 io_cap; 210 __u8 io_cap;
160} __packed; 211} __packed;
161struct mgmt_rp_pair_device { 212struct mgmt_rp_pair_device {
162 bdaddr_t bdaddr; 213 struct mgmt_addr_info addr;
163 __u8 status; 214 __u8 status;
164} __packed; 215} __packed;
165 216
166#define MGMT_OP_USER_CONFIRM_REPLY 0x0015 217#define MGMT_OP_USER_CONFIRM_REPLY 0x001A
167struct mgmt_cp_user_confirm_reply { 218struct mgmt_cp_user_confirm_reply {
168 bdaddr_t bdaddr; 219 bdaddr_t bdaddr;
169} __packed; 220} __packed;
@@ -172,48 +223,69 @@ struct mgmt_rp_user_confirm_reply {
172 __u8 status; 223 __u8 status;
173} __packed; 224} __packed;
174 225
175#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016 226#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001B
227struct mgmt_cp_user_confirm_neg_reply {
228 bdaddr_t bdaddr;
229} __packed;
176 230
177#define MGMT_OP_SET_LOCAL_NAME 0x0017 231#define MGMT_OP_USER_PASSKEY_REPLY 0x001C
178struct mgmt_cp_set_local_name { 232struct mgmt_cp_user_passkey_reply {
179 __u8 name[MGMT_MAX_NAME_LENGTH]; 233 bdaddr_t bdaddr;
234 __le32 passkey;
235} __packed;
236struct mgmt_rp_user_passkey_reply {
237 bdaddr_t bdaddr;
238 __u8 status;
180} __packed; 239} __packed;
181 240
182#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0018 241#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001D
242struct mgmt_cp_user_passkey_neg_reply {
243 bdaddr_t bdaddr;
244} __packed;
245
246#define MGMT_OP_READ_LOCAL_OOB_DATA 0x001E
183struct mgmt_rp_read_local_oob_data { 247struct mgmt_rp_read_local_oob_data {
184 __u8 hash[16]; 248 __u8 hash[16];
185 __u8 randomizer[16]; 249 __u8 randomizer[16];
186} __packed; 250} __packed;
187 251
188#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0019 252#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x001F
189struct mgmt_cp_add_remote_oob_data { 253struct mgmt_cp_add_remote_oob_data {
190 bdaddr_t bdaddr; 254 bdaddr_t bdaddr;
191 __u8 hash[16]; 255 __u8 hash[16];
192 __u8 randomizer[16]; 256 __u8 randomizer[16];
193} __packed; 257} __packed;
194 258
195#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x001A 259#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0020
196struct mgmt_cp_remove_remote_oob_data { 260struct mgmt_cp_remove_remote_oob_data {
197 bdaddr_t bdaddr; 261 bdaddr_t bdaddr;
198} __packed; 262} __packed;
199 263
200#define MGMT_OP_START_DISCOVERY 0x001B 264#define MGMT_OP_START_DISCOVERY 0x0021
265struct mgmt_cp_start_discovery {
266 __u8 type;
267} __packed;
201 268
202#define MGMT_OP_STOP_DISCOVERY 0x001C 269#define MGMT_OP_STOP_DISCOVERY 0x0022
203 270
204#define MGMT_OP_BLOCK_DEVICE 0x001D 271#define MGMT_OP_CONFIRM_NAME 0x0023
205struct mgmt_cp_block_device { 272struct mgmt_cp_confirm_name {
206 bdaddr_t bdaddr; 273 bdaddr_t bdaddr;
274 __u8 name_known;
275} __packed;
276struct mgmt_rp_confirm_name {
277 bdaddr_t bdaddr;
278 __u8 status;
207} __packed; 279} __packed;
208 280
209#define MGMT_OP_UNBLOCK_DEVICE 0x001E 281#define MGMT_OP_BLOCK_DEVICE 0x0024
210struct mgmt_cp_unblock_device { 282struct mgmt_cp_block_device {
211 bdaddr_t bdaddr; 283 bdaddr_t bdaddr;
212} __packed; 284} __packed;
213 285
214#define MGMT_OP_SET_FAST_CONNECTABLE 0x001F 286#define MGMT_OP_UNBLOCK_DEVICE 0x0025
215struct mgmt_cp_set_fast_connectable { 287struct mgmt_cp_unblock_device {
216 __u8 enable; 288 bdaddr_t bdaddr;
217} __packed; 289} __packed;
218 290
219#define MGMT_EV_CMD_COMPLETE 0x0001 291#define MGMT_EV_CMD_COMPLETE 0x0001
@@ -237,83 +309,82 @@ struct mgmt_ev_controller_error {
237 309
238#define MGMT_EV_INDEX_REMOVED 0x0005 310#define MGMT_EV_INDEX_REMOVED 0x0005
239 311
240#define MGMT_EV_POWERED 0x0006 312#define MGMT_EV_NEW_SETTINGS 0x0006
241 313
242#define MGMT_EV_DISCOVERABLE 0x0007 314#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
243 315struct mgmt_ev_class_of_dev_changed {
244#define MGMT_EV_CONNECTABLE 0x0008 316 __u8 dev_class[3];
317};
245 318
246#define MGMT_EV_PAIRABLE 0x0009 319#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
320struct mgmt_ev_local_name_changed {
321 __u8 name[MGMT_MAX_NAME_LENGTH];
322 __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
323} __packed;
247 324
248#define MGMT_EV_NEW_KEY 0x000A 325#define MGMT_EV_NEW_LINK_KEY 0x0009
249struct mgmt_ev_new_key { 326struct mgmt_ev_new_link_key {
250 __u8 store_hint; 327 __u8 store_hint;
251 struct mgmt_key_info key; 328 struct mgmt_link_key_info key;
252} __packed; 329} __packed;
253 330
254#define MGMT_EV_CONNECTED 0x000B 331#define MGMT_EV_CONNECTED 0x000A
255struct mgmt_ev_connected {
256 bdaddr_t bdaddr;
257 __u8 link_type;
258} __packed;
259 332
260#define MGMT_EV_DISCONNECTED 0x000C 333#define MGMT_EV_DISCONNECTED 0x000B
261struct mgmt_ev_disconnected {
262 bdaddr_t bdaddr;
263} __packed;
264 334
265#define MGMT_EV_CONNECT_FAILED 0x000D 335#define MGMT_EV_CONNECT_FAILED 0x000C
266struct mgmt_ev_connect_failed { 336struct mgmt_ev_connect_failed {
267 bdaddr_t bdaddr; 337 struct mgmt_addr_info addr;
268 __u8 status; 338 __u8 status;
269} __packed; 339} __packed;
270 340
271#define MGMT_EV_PIN_CODE_REQUEST 0x000E 341#define MGMT_EV_PIN_CODE_REQUEST 0x000D
272struct mgmt_ev_pin_code_request { 342struct mgmt_ev_pin_code_request {
273 bdaddr_t bdaddr; 343 bdaddr_t bdaddr;
274 __u8 secure; 344 __u8 secure;
275} __packed; 345} __packed;
276 346
277#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F 347#define MGMT_EV_USER_CONFIRM_REQUEST 0x000E
278struct mgmt_ev_user_confirm_request { 348struct mgmt_ev_user_confirm_request {
279 bdaddr_t bdaddr; 349 bdaddr_t bdaddr;
280 __u8 confirm_hint; 350 __u8 confirm_hint;
281 __le32 value; 351 __le32 value;
282} __packed; 352} __packed;
283 353
354#define MGMT_EV_USER_PASSKEY_REQUEST 0x000F
355struct mgmt_ev_user_passkey_request {
356 bdaddr_t bdaddr;
357} __packed;
358
284#define MGMT_EV_AUTH_FAILED 0x0010 359#define MGMT_EV_AUTH_FAILED 0x0010
285struct mgmt_ev_auth_failed { 360struct mgmt_ev_auth_failed {
286 bdaddr_t bdaddr; 361 bdaddr_t bdaddr;
287 __u8 status; 362 __u8 status;
288} __packed; 363} __packed;
289 364
290#define MGMT_EV_LOCAL_NAME_CHANGED 0x0011 365#define MGMT_EV_DEVICE_FOUND 0x0011
291struct mgmt_ev_local_name_changed {
292 __u8 name[MGMT_MAX_NAME_LENGTH];
293} __packed;
294
295#define MGMT_EV_DEVICE_FOUND 0x0012
296struct mgmt_ev_device_found { 366struct mgmt_ev_device_found {
297 bdaddr_t bdaddr; 367 struct mgmt_addr_info addr;
298 __u8 dev_class[3]; 368 __u8 dev_class[3];
299 __s8 rssi; 369 __s8 rssi;
370 __u8 confirm_name;
300 __u8 eir[HCI_MAX_EIR_LENGTH]; 371 __u8 eir[HCI_MAX_EIR_LENGTH];
301} __packed; 372} __packed;
302 373
303#define MGMT_EV_REMOTE_NAME 0x0013 374#define MGMT_EV_REMOTE_NAME 0x0012
304struct mgmt_ev_remote_name { 375struct mgmt_ev_remote_name {
305 bdaddr_t bdaddr; 376 bdaddr_t bdaddr;
306 __u8 name[MGMT_MAX_NAME_LENGTH]; 377 __u8 name[MGMT_MAX_NAME_LENGTH];
307} __packed; 378} __packed;
308 379
309#define MGMT_EV_DISCOVERING 0x0014 380#define MGMT_EV_DISCOVERING 0x0013
310 381
311#define MGMT_EV_DEVICE_BLOCKED 0x0015 382#define MGMT_EV_DEVICE_BLOCKED 0x0014
312struct mgmt_ev_device_blocked { 383struct mgmt_ev_device_blocked {
313 bdaddr_t bdaddr; 384 bdaddr_t bdaddr;
314} __packed; 385} __packed;
315 386
316#define MGMT_EV_DEVICE_UNBLOCKED 0x0016 387#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
317struct mgmt_ev_device_unblocked { 388struct mgmt_ev_device_unblocked {
318 bdaddr_t bdaddr; 389 bdaddr_t bdaddr;
319} __packed; 390} __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index 15b97d549441..aeaf5fa2b9f1 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -115,6 +115,10 @@ struct smp_cmd_security_req {
115#define SMP_MIN_ENC_KEY_SIZE 7 115#define SMP_MIN_ENC_KEY_SIZE 7
116#define SMP_MAX_ENC_KEY_SIZE 16 116#define SMP_MAX_ENC_KEY_SIZE 16
117 117
118#define SMP_FLAG_TK_VALID 1
119#define SMP_FLAG_CFM_PENDING 2
120#define SMP_FLAG_MITM_AUTH 3
121
118struct smp_chan { 122struct smp_chan {
119 struct l2cap_conn *conn; 123 struct l2cap_conn *conn;
120 u8 preq[7]; /* SMP Pairing Request */ 124 u8 preq[7]; /* SMP Pairing Request */
@@ -124,6 +128,7 @@ struct smp_chan {
124 u8 pcnf[16]; /* SMP Pairing Confirm */ 128 u8 pcnf[16]; /* SMP Pairing Confirm */
125 u8 tk[16]; /* SMP Temporary Key */ 129 u8 tk[16]; /* SMP Temporary Key */
126 u8 smp_key_size; 130 u8 smp_key_size;
131 unsigned long smp_flags;
127 struct crypto_blkcipher *tfm; 132 struct crypto_blkcipher *tfm;
128 struct work_struct confirm; 133 struct work_struct confirm;
129 struct work_struct random; 134 struct work_struct random;
@@ -134,6 +139,7 @@ struct smp_chan {
134int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level); 139int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
135int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); 140int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
136int smp_distribute_keys(struct l2cap_conn *conn, __u8 force); 141int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
142int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
137 143
138void smp_chan_destroy(struct l2cap_conn *conn); 144void smp_chan_destroy(struct l2cap_conn *conn);
139 145
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index c011281d92c0..ef2dd9438bb1 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -9,6 +9,7 @@
9 9
10#include <net/caif/caif_layer.h> 10#include <net/caif/caif_layer.h>
11#include <net/caif/cfcnfg.h> 11#include <net/caif/cfcnfg.h>
12#include <net/caif/caif_device.h>
12#include <linux/caif/caif_socket.h> 13#include <linux/caif/caif_socket.h>
13#include <linux/if.h> 14#include <linux/if.h>
14#include <linux/net.h> 15#include <linux/net.h>
@@ -104,4 +105,24 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer,
104 */ 105 */
105void caif_free_client(struct cflayer *adap_layer); 106void caif_free_client(struct cflayer *adap_layer);
106 107
108/**
109 * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer
110 * @dev: Network device to enroll.
111 * @caifdev: Configuration information from CAIF Link Layer
112 * @link_support: Link layer support layer
113 * @head_room: Head room needed by link support layer
114 * @layer: Lowest layer in CAIF stack
115 * @rcv_fun: Receive function for CAIF stack.
116 *
117 * This function enroll a CAIF link layer into CAIF Stack and
118 * expects the interface to be able to handle CAIF payload.
119 * The link_support layer is used to add any Link Layer specific
120 * framing.
121 */
122void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
123 struct cflayer *link_support, int head_room,
124 struct cflayer **layer, int (**rcv_func)(
125 struct sk_buff *, struct net_device *,
126 struct packet_type *, struct net_device *));
127
107#endif /* CAIF_DEV_H_ */ 128#endif /* CAIF_DEV_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 35bc7883cf97..0f3a39125f90 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -121,9 +121,7 @@ enum caif_direction {
121 * @transmit: Packet transmit funciton. 121 * @transmit: Packet transmit funciton.
122 * @ctrlcmd: Used for control signalling upwards in the stack. 122 * @ctrlcmd: Used for control signalling upwards in the stack.
123 * @modemcmd: Used for control signaling downwards in the stack. 123 * @modemcmd: Used for control signaling downwards in the stack.
124 * @prio: Priority of this layer.
125 * @id: The identity of this layer 124 * @id: The identity of this layer
126 * @type: The type of this layer
127 * @name: Name of the layer. 125 * @name: Name of the layer.
128 * 126 *
129 * This structure defines the layered structure in CAIF. 127 * This structure defines the layered structure in CAIF.
@@ -230,9 +228,7 @@ struct cflayer {
230 */ 228 */
231 int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl); 229 int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
232 230
233 unsigned short prio;
234 unsigned int id; 231 unsigned int id;
235 unsigned int type;
236 char name[CAIF_LAYER_NAME_SZ]; 232 char name[CAIF_LAYER_NAME_SZ];
237}; 233};
238 234
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index 87c3d11b8e55..aa6a485b0545 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -55,8 +55,8 @@
55struct cfspi_xfer { 55struct cfspi_xfer {
56 u16 tx_dma_len; 56 u16 tx_dma_len;
57 u16 rx_dma_len; 57 u16 rx_dma_len;
58 void *va_tx; 58 void *va_tx[2];
59 dma_addr_t pa_tx; 59 dma_addr_t pa_tx[2];
60 void *va_rx; 60 void *va_rx;
61 dma_addr_t pa_rx; 61 dma_addr_t pa_rx;
62}; 62};
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 3e93a4a4b677..90b4ff8bad83 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -14,18 +14,6 @@
14struct cfcnfg; 14struct cfcnfg;
15 15
16/** 16/**
17 * enum cfcnfg_phy_type - Types of physical layers defined in CAIF Stack
18 *
19 * @CFPHYTYPE_FRAG: Fragmented frames physical interface.
20 * @CFPHYTYPE_CAIF: Generic CAIF physical interface
21 */
22enum cfcnfg_phy_type {
23 CFPHYTYPE_FRAG = 1,
24 CFPHYTYPE_CAIF,
25 CFPHYTYPE_MAX
26};
27
28/**
29 * enum cfcnfg_phy_preference - Physical preference HW Abstraction 17 * enum cfcnfg_phy_preference - Physical preference HW Abstraction
30 * 18 *
31 * @CFPHYPREF_UNSPECIFIED: Default physical interface 19 * @CFPHYPREF_UNSPECIFIED: Default physical interface
@@ -66,21 +54,20 @@ void cfcnfg_remove(struct cfcnfg *cfg);
66 * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack. 54 * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
67 * @cnfg: Pointer to a CAIF configuration object, created by 55 * @cnfg: Pointer to a CAIF configuration object, created by
68 * cfcnfg_create(). 56 * cfcnfg_create().
69 * @phy_type: Specifies the type of physical interface, e.g.
70 * CFPHYTYPE_FRAG.
71 * @dev: Pointer to link layer device 57 * @dev: Pointer to link layer device
72 * @phy_layer: Specify the physical layer. The transmit function 58 * @phy_layer: Specify the physical layer. The transmit function
73 * MUST be set in the structure. 59 * MUST be set in the structure.
74 * @pref: The phy (link layer) preference. 60 * @pref: The phy (link layer) preference.
61 * @link_support: Protocol implementation for link layer specific protocol.
75 * @fcs: Specify if checksum is used in CAIF Framing Layer. 62 * @fcs: Specify if checksum is used in CAIF Framing Layer.
76 * @stx: Specify if Start Of Frame eXtention is used. 63 * @head_room: Head space needed by link specific protocol.
77 */ 64 */
78
79void 65void
80cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 66cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
81 struct net_device *dev, struct cflayer *phy_layer, 67 struct net_device *dev, struct cflayer *phy_layer,
82 enum cfcnfg_phy_preference pref, 68 enum cfcnfg_phy_preference pref,
83 bool fcs, bool stx); 69 struct cflayer *link_support,
70 bool fcs, int head_room);
84 71
85/** 72/**
86 * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack. 73 * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index b8374321b362..f121299a3427 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -8,5 +8,5 @@
8#define CFSERL_H_ 8#define CFSERL_H_
9#include <net/caif/caif_layer.h> 9#include <net/caif/caif_layer.h>
10 10
11struct cflayer *cfserl_create(int type, int instance, bool use_stx); 11struct cflayer *cfserl_create(int instance, bool use_stx);
12#endif /* CFSERL_H_ */ 12#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 95852e36713b..15f4be7d768e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -391,6 +391,8 @@ struct cfg80211_crypto_settings {
391 * @assocresp_ies: extra information element(s) to add into (Re)Association 391 * @assocresp_ies: extra information element(s) to add into (Re)Association
392 * Response frames or %NULL 392 * Response frames or %NULL
393 * @assocresp_ies_len: length of assocresp_ies in octets 393 * @assocresp_ies_len: length of assocresp_ies in octets
394 * @probe_resp_len: length of probe response template (@probe_resp)
395 * @probe_resp: probe response template (AP mode only)
394 */ 396 */
395struct beacon_parameters { 397struct beacon_parameters {
396 u8 *head, *tail; 398 u8 *head, *tail;
@@ -408,6 +410,8 @@ struct beacon_parameters {
408 size_t proberesp_ies_len; 410 size_t proberesp_ies_len;
409 const u8 *assocresp_ies; 411 const u8 *assocresp_ies;
410 size_t assocresp_ies_len; 412 size_t assocresp_ies_len;
413 int probe_resp_len;
414 u8 *probe_resp;
411}; 415};
412 416
413/** 417/**
@@ -501,6 +505,7 @@ struct station_parameters {
501 * @STATION_INFO_CONNECTED_TIME: @connected_time filled 505 * @STATION_INFO_CONNECTED_TIME: @connected_time filled
502 * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled 506 * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
503 * @STATION_INFO_STA_FLAGS: @sta_flags filled 507 * @STATION_INFO_STA_FLAGS: @sta_flags filled
508 * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
504 */ 509 */
505enum station_info_flags { 510enum station_info_flags {
506 STATION_INFO_INACTIVE_TIME = 1<<0, 511 STATION_INFO_INACTIVE_TIME = 1<<0,
@@ -521,7 +526,8 @@ enum station_info_flags {
521 STATION_INFO_BSS_PARAM = 1<<15, 526 STATION_INFO_BSS_PARAM = 1<<15,
522 STATION_INFO_CONNECTED_TIME = 1<<16, 527 STATION_INFO_CONNECTED_TIME = 1<<16,
523 STATION_INFO_ASSOC_REQ_IES = 1<<17, 528 STATION_INFO_ASSOC_REQ_IES = 1<<17,
524 STATION_INFO_STA_FLAGS = 1<<18 529 STATION_INFO_STA_FLAGS = 1<<18,
530 STATION_INFO_BEACON_LOSS_COUNT = 1<<19
525}; 531};
526 532
527/** 533/**
@@ -619,6 +625,7 @@ struct sta_bss_parameters {
619 * the cfg80211_new_sta() calls to notify user space of the IEs. 625 * the cfg80211_new_sta() calls to notify user space of the IEs.
620 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. 626 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
621 * @sta_flags: station flags mask & values 627 * @sta_flags: station flags mask & values
628 * @beacon_loss_count: Number of times beacon loss event has triggered.
622 */ 629 */
623struct station_info { 630struct station_info {
624 u32 filled; 631 u32 filled;
@@ -646,6 +653,8 @@ struct station_info {
646 const u8 *assoc_req_ies; 653 const u8 *assoc_req_ies;
647 size_t assoc_req_ies_len; 654 size_t assoc_req_ies_len;
648 655
656 u32 beacon_loss_count;
657
649 /* 658 /*
650 * Note: Add a new enum station_info_flags value for each new field and 659 * Note: Add a new enum station_info_flags value for each new field and
651 * use it to check which fields are initialized. 660 * use it to check which fields are initialized.
@@ -778,6 +787,7 @@ struct mesh_config {
778 u16 min_discovery_timeout; 787 u16 min_discovery_timeout;
779 u32 dot11MeshHWMPactivePathTimeout; 788 u32 dot11MeshHWMPactivePathTimeout;
780 u16 dot11MeshHWMPpreqMinInterval; 789 u16 dot11MeshHWMPpreqMinInterval;
790 u16 dot11MeshHWMPperrMinInterval;
781 u16 dot11MeshHWMPnetDiameterTraversalTime; 791 u16 dot11MeshHWMPnetDiameterTraversalTime;
782 u8 dot11MeshHWMPRootMode; 792 u8 dot11MeshHWMPRootMode;
783 u16 dot11MeshHWMPRannInterval; 793 u16 dot11MeshHWMPRannInterval;
@@ -798,6 +808,7 @@ struct mesh_config {
798 * @ie_len: length of vendor information elements 808 * @ie_len: length of vendor information elements
799 * @is_authenticated: this mesh requires authentication 809 * @is_authenticated: this mesh requires authentication
800 * @is_secure: this mesh uses security 810 * @is_secure: this mesh uses security
811 * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
801 * 812 *
802 * These parameters are fixed when the mesh is created. 813 * These parameters are fixed when the mesh is created.
803 */ 814 */
@@ -810,6 +821,7 @@ struct mesh_setup {
810 u8 ie_len; 821 u8 ie_len;
811 bool is_authenticated; 822 bool is_authenticated;
812 bool is_secure; 823 bool is_secure;
824 int mcast_rate[IEEE80211_NUM_BANDS];
813}; 825};
814 826
815/** 827/**
@@ -1040,6 +1052,15 @@ struct cfg80211_auth_request {
1040}; 1052};
1041 1053
1042/** 1054/**
1055 * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
1056 *
1057 * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
1058 */
1059enum cfg80211_assoc_req_flags {
1060 ASSOC_REQ_DISABLE_HT = BIT(0),
1061};
1062
1063/**
1043 * struct cfg80211_assoc_request - (Re)Association request data 1064 * struct cfg80211_assoc_request - (Re)Association request data
1044 * 1065 *
1045 * This structure provides information needed to complete IEEE 802.11 1066 * This structure provides information needed to complete IEEE 802.11
@@ -1050,6 +1071,10 @@ struct cfg80211_auth_request {
1050 * @use_mfp: Use management frame protection (IEEE 802.11w) in this association 1071 * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
1051 * @crypto: crypto settings 1072 * @crypto: crypto settings
1052 * @prev_bssid: previous BSSID, if not %NULL use reassociate frame 1073 * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
1074 * @flags: See &enum cfg80211_assoc_req_flags
1075 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
1076 * will be used in ht_capa. Un-supported values will be ignored.
1077 * @ht_capa_mask: The bits of ht_capa which are to be used.
1053 */ 1078 */
1054struct cfg80211_assoc_request { 1079struct cfg80211_assoc_request {
1055 struct cfg80211_bss *bss; 1080 struct cfg80211_bss *bss;
@@ -1057,6 +1082,9 @@ struct cfg80211_assoc_request {
1057 size_t ie_len; 1082 size_t ie_len;
1058 struct cfg80211_crypto_settings crypto; 1083 struct cfg80211_crypto_settings crypto;
1059 bool use_mfp; 1084 bool use_mfp;
1085 u32 flags;
1086 struct ieee80211_ht_cap ht_capa;
1087 struct ieee80211_ht_cap ht_capa_mask;
1060}; 1088};
1061 1089
1062/** 1090/**
@@ -1126,6 +1154,7 @@ struct cfg80211_ibss_params {
1126 u8 *ssid; 1154 u8 *ssid;
1127 u8 *bssid; 1155 u8 *bssid;
1128 struct ieee80211_channel *channel; 1156 struct ieee80211_channel *channel;
1157 enum nl80211_channel_type channel_type;
1129 u8 *ie; 1158 u8 *ie;
1130 u8 ssid_len, ie_len; 1159 u8 ssid_len, ie_len;
1131 u16 beacon_interval; 1160 u16 beacon_interval;
@@ -1155,6 +1184,10 @@ struct cfg80211_ibss_params {
1155 * @key_len: length of WEP key for shared key authentication 1184 * @key_len: length of WEP key for shared key authentication
1156 * @key_idx: index of WEP key for shared key authentication 1185 * @key_idx: index of WEP key for shared key authentication
1157 * @key: WEP key for shared key authentication 1186 * @key: WEP key for shared key authentication
1187 * @flags: See &enum cfg80211_assoc_req_flags
1188 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
1189 * will be used in ht_capa. Un-supported values will be ignored.
1190 * @ht_capa_mask: The bits of ht_capa which are to be used.
1158 */ 1191 */
1159struct cfg80211_connect_params { 1192struct cfg80211_connect_params {
1160 struct ieee80211_channel *channel; 1193 struct ieee80211_channel *channel;
@@ -1168,6 +1201,9 @@ struct cfg80211_connect_params {
1168 struct cfg80211_crypto_settings crypto; 1201 struct cfg80211_crypto_settings crypto;
1169 const u8 *key; 1202 const u8 *key;
1170 u8 key_len, key_idx; 1203 u8 key_len, key_idx;
1204 u32 flags;
1205 struct ieee80211_ht_cap ht_capa;
1206 struct ieee80211_ht_cap ht_capa_mask;
1171}; 1207};
1172 1208
1173/** 1209/**
@@ -1315,7 +1351,12 @@ struct cfg80211_gtk_rekey_data {
1315 * 1351 *
1316 * @add_station: Add a new station. 1352 * @add_station: Add a new station.
1317 * @del_station: Remove a station; @mac may be NULL to remove all stations. 1353 * @del_station: Remove a station; @mac may be NULL to remove all stations.
1318 * @change_station: Modify a given station. 1354 * @change_station: Modify a given station. Note that flags changes are not much
1355 * validated in cfg80211, in particular the auth/assoc/authorized flags
1356 * might come to the driver in invalid combinations -- make sure to check
1357 * them, also against the existing state! Also, supported_rates changes are
1358 * not checked in station mode -- drivers need to reject (or ignore) them
1359 * for anything but TDLS peers.
1319 * @get_station: get station information for the station identified by @mac 1360 * @get_station: get station information for the station identified by @mac
1320 * @dump_station: dump station callback -- resume dump at index @idx 1361 * @dump_station: dump station callback -- resume dump at index @idx
1321 * 1362 *
@@ -1342,6 +1383,9 @@ struct cfg80211_gtk_rekey_data {
1342 * doesn't verify much. Note, however, that the passed netdev may be 1383 * doesn't verify much. Note, however, that the passed netdev may be
1343 * %NULL as well if the user requested changing the channel for the 1384 * %NULL as well if the user requested changing the channel for the
1344 * device itself, or for a monitor interface. 1385 * device itself, or for a monitor interface.
1386 * @get_channel: Get the current operating channel, should return %NULL if
1387 * there's no single defined operating channel if for example the
1388 * device implements channel hopping for multi-channel virtual interfaces.
1345 * 1389 *
1346 * @scan: Request to do a scan. If returning zero, the scan request is given 1390 * @scan: Request to do a scan. If returning zero, the scan request is given
1347 * the driver, and will be valid until passed to cfg80211_scan_done(). 1391 * the driver, and will be valid until passed to cfg80211_scan_done().
@@ -1369,7 +1413,8 @@ struct cfg80211_gtk_rekey_data {
1369 * have changed. The actual parameter values are available in 1413 * have changed. The actual parameter values are available in
1370 * struct wiphy. If returning an error, no value should be changed. 1414 * struct wiphy. If returning an error, no value should be changed.
1371 * 1415 *
1372 * @set_tx_power: set the transmit power according to the parameters 1416 * @set_tx_power: set the transmit power according to the parameters,
1417 * the power passed is in mBm, to get dBm use MBM_TO_DBM().
1373 * @get_tx_power: store the current TX power into the dbm variable; 1418 * @get_tx_power: store the current TX power into the dbm variable;
1374 * return 0 if successful 1419 * return 0 if successful
1375 * 1420 *
@@ -1432,6 +1477,11 @@ struct cfg80211_gtk_rekey_data {
1432 * 1477 *
1433 * @tdls_mgmt: Transmit a TDLS management frame. 1478 * @tdls_mgmt: Transmit a TDLS management frame.
1434 * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup). 1479 * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
1480 *
1481 * @probe_client: probe an associated client, must return a cookie that it
1482 * later passes to cfg80211_probe_status().
1483 *
1484 * @set_noack_map: Set the NoAck Map for the TIDs.
1435 */ 1485 */
1436struct cfg80211_ops { 1486struct cfg80211_ops {
1437 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); 1487 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1585,7 +1635,7 @@ struct cfg80211_ops {
1585 enum nl80211_channel_type channel_type, 1635 enum nl80211_channel_type channel_type,
1586 bool channel_type_valid, unsigned int wait, 1636 bool channel_type_valid, unsigned int wait,
1587 const u8 *buf, size_t len, bool no_cck, 1637 const u8 *buf, size_t len, bool no_cck,
1588 u64 *cookie); 1638 bool dont_wait_for_ack, u64 *cookie);
1589 int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy, 1639 int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
1590 struct net_device *dev, 1640 struct net_device *dev,
1591 u64 cookie); 1641 u64 cookie);
@@ -1621,6 +1671,15 @@ struct cfg80211_ops {
1621 u16 status_code, const u8 *buf, size_t len); 1671 u16 status_code, const u8 *buf, size_t len);
1622 int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, 1672 int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
1623 u8 *peer, enum nl80211_tdls_operation oper); 1673 u8 *peer, enum nl80211_tdls_operation oper);
1674
1675 int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
1676 const u8 *peer, u64 *cookie);
1677
1678 int (*set_noack_map)(struct wiphy *wiphy,
1679 struct net_device *dev,
1680 u16 noack_map);
1681
1682 struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
1624}; 1683};
1625 1684
1626/* 1685/*
@@ -1645,7 +1704,9 @@ struct cfg80211_ops {
1645 * regulatory domain no user regulatory domain can enable these channels 1704 * regulatory domain no user regulatory domain can enable these channels
1646 * at a later time. This can be used for devices which do not have 1705 * at a later time. This can be used for devices which do not have
1647 * calibration information guaranteed for frequencies or settings 1706 * calibration information guaranteed for frequencies or settings
1648 * outside of its regulatory domain. 1707 * outside of its regulatory domain. If used in combination with
1708 * WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings
1709 * will be followed.
1649 * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure 1710 * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
1650 * that passive scan flags and beaconing flags may not be lifted by 1711 * that passive scan flags and beaconing flags may not be lifted by
1651 * cfg80211 due to regulatory beacon hints. For more information on beacon 1712 * cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -1679,6 +1740,14 @@ struct cfg80211_ops {
1679 * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT 1740 * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
1680 * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be 1741 * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
1681 * used for asking the driver/firmware to perform a TDLS operation. 1742 * used for asking the driver/firmware to perform a TDLS operation.
1743 * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME
1744 * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
1745 * when there are virtual interfaces in AP mode by calling
1746 * cfg80211_report_obss_beacon().
1747 * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
1748 * responds to probe-requests in hardware.
1749 * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
1750 * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
1682 */ 1751 */
1683enum wiphy_flags { 1752enum wiphy_flags {
1684 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), 1753 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@@ -1697,6 +1766,11 @@ enum wiphy_flags {
1697 WIPHY_FLAG_AP_UAPSD = BIT(14), 1766 WIPHY_FLAG_AP_UAPSD = BIT(14),
1698 WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), 1767 WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
1699 WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16), 1768 WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16),
1769 WIPHY_FLAG_HAVE_AP_SME = BIT(17),
1770 WIPHY_FLAG_REPORTS_OBSS = BIT(18),
1771 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
1772 WIPHY_FLAG_OFFCHAN_TX = BIT(20),
1773 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
1700}; 1774};
1701 1775
1702/** 1776/**
@@ -1869,6 +1943,7 @@ struct wiphy_wowlan_support {
1869 * @software_iftypes: bitmask of software interface types, these are not 1943 * @software_iftypes: bitmask of software interface types, these are not
1870 * subject to any restrictions since they are purely managed in SW. 1944 * subject to any restrictions since they are purely managed in SW.
1871 * @flags: wiphy flags, see &enum wiphy_flags 1945 * @flags: wiphy flags, see &enum wiphy_flags
1946 * @features: features advertised to nl80211, see &enum nl80211_feature_flags.
1872 * @bss_priv_size: each BSS struct has private data allocated with it, 1947 * @bss_priv_size: each BSS struct has private data allocated with it,
1873 * this variable determines its size 1948 * this variable determines its size
1874 * @max_scan_ssids: maximum number of SSIDs the device can scan for in 1949 * @max_scan_ssids: maximum number of SSIDs the device can scan for in
@@ -1907,6 +1982,10 @@ struct wiphy_wowlan_support {
1907 * may request, if implemented. 1982 * may request, if implemented.
1908 * 1983 *
1909 * @wowlan: WoWLAN support information 1984 * @wowlan: WoWLAN support information
1985 *
1986 * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
1987 * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden.
1988 * If null, then none can be over-ridden.
1910 */ 1989 */
1911struct wiphy { 1990struct wiphy {
1912 /* assign these fields before you register the wiphy */ 1991 /* assign these fields before you register the wiphy */
@@ -1928,7 +2007,9 @@ struct wiphy {
1928 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ 2007 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
1929 u16 interface_modes; 2008 u16 interface_modes;
1930 2009
1931 u32 flags; 2010 u32 flags, features;
2011
2012 u32 ap_sme_capa;
1932 2013
1933 enum cfg80211_signal_type signal_type; 2014 enum cfg80211_signal_type signal_type;
1934 2015
@@ -1960,6 +2041,13 @@ struct wiphy {
1960 u32 available_antennas_tx; 2041 u32 available_antennas_tx;
1961 u32 available_antennas_rx; 2042 u32 available_antennas_rx;
1962 2043
2044 /*
2045 * Bitmap of supported protocols for probe response offloading
2046 * see &enum nl80211_probe_resp_offload_support_attr. Only valid
2047 * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
2048 */
2049 u32 probe_resp_offload;
2050
1963 /* If multiple wiphys are registered and you're handed e.g. 2051 /* If multiple wiphys are registered and you're handed e.g.
1964 * a regular netdev with assigned ieee80211_ptr, you won't 2052 * a regular netdev with assigned ieee80211_ptr, you won't
1965 * know whether it points to a wiphy your driver has registered 2053 * know whether it points to a wiphy your driver has registered
@@ -1987,6 +2075,8 @@ struct wiphy {
1987 /* dir in debugfs: ieee80211/<wiphyname> */ 2075 /* dir in debugfs: ieee80211/<wiphyname> */
1988 struct dentry *debugfsdir; 2076 struct dentry *debugfsdir;
1989 2077
2078 const struct ieee80211_ht_cap *ht_capa_mod_mask;
2079
1990#ifdef CONFIG_NET_NS 2080#ifdef CONFIG_NET_NS
1991 /* the network namespace this phy lives in currently */ 2081 /* the network namespace this phy lives in currently */
1992 struct net *_net; 2082 struct net *_net;
@@ -2183,6 +2273,8 @@ struct wireless_dev {
2183 2273
2184 int beacon_interval; 2274 int beacon_interval;
2185 2275
2276 u32 ap_unexpected_nlpid;
2277
2186#ifdef CONFIG_CFG80211_WEXT 2278#ifdef CONFIG_CFG80211_WEXT
2187 /* wext data */ 2279 /* wext data */
2188 struct { 2280 struct {
@@ -2349,69 +2441,6 @@ extern int ieee80211_radiotap_iterator_next(
2349extern const unsigned char rfc1042_header[6]; 2441extern const unsigned char rfc1042_header[6];
2350extern const unsigned char bridge_tunnel_header[6]; 2442extern const unsigned char bridge_tunnel_header[6];
2351 2443
2352/* Parsed Information Elements */
2353struct ieee802_11_elems {
2354 u8 *ie_start;
2355 size_t total_len;
2356
2357 /* pointers to IEs */
2358 u8 *ssid;
2359 u8 *supp_rates;
2360 u8 *fh_params;
2361 u8 *ds_params;
2362 u8 *cf_params;
2363 struct ieee80211_tim_ie *tim;
2364 u8 *ibss_params;
2365 u8 *challenge;
2366 u8 *wpa;
2367 u8 *rsn;
2368 u8 *erp_info;
2369 u8 *ext_supp_rates;
2370 u8 *wmm_info;
2371 u8 *wmm_param;
2372 struct ieee80211_ht_cap *ht_cap_elem;
2373 struct ieee80211_ht_info *ht_info_elem;
2374 struct ieee80211_meshconf_ie *mesh_config;
2375 u8 *mesh_id;
2376 u8 *peering;
2377 u8 *preq;
2378 u8 *prep;
2379 u8 *perr;
2380 struct ieee80211_rann_ie *rann;
2381 u8 *ch_switch_elem;
2382 u8 *country_elem;
2383 u8 *pwr_constr_elem;
2384 u8 *quiet_elem; /* first quite element */
2385 u8 *timeout_int;
2386
2387 /* length of them, respectively */
2388 u8 ssid_len;
2389 u8 supp_rates_len;
2390 u8 fh_params_len;
2391 u8 ds_params_len;
2392 u8 cf_params_len;
2393 u8 tim_len;
2394 u8 ibss_params_len;
2395 u8 challenge_len;
2396 u8 wpa_len;
2397 u8 rsn_len;
2398 u8 erp_info_len;
2399 u8 ext_supp_rates_len;
2400 u8 wmm_info_len;
2401 u8 wmm_param_len;
2402 u8 mesh_id_len;
2403 u8 peering_len;
2404 u8 preq_len;
2405 u8 prep_len;
2406 u8 perr_len;
2407 u8 ch_switch_elem_len;
2408 u8 country_elem_len;
2409 u8 pwr_constr_elem_len;
2410 u8 quiet_elem_len;
2411 u8 num_of_quiet_elem; /* can be more the one */
2412 u8 timeout_int_len;
2413};
2414
2415/** 2444/**
2416 * ieee80211_get_hdrlen_from_skb - get header length from data 2445 * ieee80211_get_hdrlen_from_skb - get header length from data
2417 * 2446 *
@@ -2636,8 +2665,10 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
2636 * 2665 *
2637 * This informs cfg80211 that BSS information was found and 2666 * This informs cfg80211 that BSS information was found and
2638 * the BSS should be updated/added. 2667 * the BSS should be updated/added.
2668 *
2669 * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
2639 */ 2670 */
2640struct cfg80211_bss* 2671struct cfg80211_bss * __must_check
2641cfg80211_inform_bss_frame(struct wiphy *wiphy, 2672cfg80211_inform_bss_frame(struct wiphy *wiphy,
2642 struct ieee80211_channel *channel, 2673 struct ieee80211_channel *channel,
2643 struct ieee80211_mgmt *mgmt, size_t len, 2674 struct ieee80211_mgmt *mgmt, size_t len,
@@ -2659,8 +2690,10 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
2659 * 2690 *
2660 * This informs cfg80211 that BSS information was found and 2691 * This informs cfg80211 that BSS information was found and
2661 * the BSS should be updated/added. 2692 * the BSS should be updated/added.
2693 *
2694 * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
2662 */ 2695 */
2663struct cfg80211_bss* 2696struct cfg80211_bss * __must_check
2664cfg80211_inform_bss(struct wiphy *wiphy, 2697cfg80211_inform_bss(struct wiphy *wiphy,
2665 struct ieee80211_channel *channel, 2698 struct ieee80211_channel *channel,
2666 const u8 *bssid, 2699 const u8 *bssid,
@@ -3043,6 +3076,32 @@ void cfg80211_roamed(struct net_device *dev,
3043 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); 3076 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
3044 3077
3045/** 3078/**
3079 * cfg80211_roamed_bss - notify cfg80211 of roaming
3080 *
3081 * @dev: network device
3082 * @bss: entry of bss to which STA got roamed
3083 * @req_ie: association request IEs (maybe be %NULL)
3084 * @req_ie_len: association request IEs length
3085 * @resp_ie: association response IEs (may be %NULL)
3086 * @resp_ie_len: assoc response IEs length
3087 * @gfp: allocation flags
3088 *
3089 * This is just a wrapper to notify cfg80211 of roaming event with driver
3090 * passing bss to avoid a race in timeout of the bss entry. It should be
3091 * called by the underlying driver whenever it roamed from one AP to another
3092 * while connected. Drivers which have roaming implemented in firmware
3093 * may use this function to avoid a race in bss entry timeout where the bss
3094 * entry of the new AP is seen in the driver, but gets timed out by the time
3095 * it is accessed in __cfg80211_roamed() due to delay in scheduling
3096 * rdev->event_work. In case of any failures, the reference is released
3097 * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise,
3098 * it will be released while diconneting from the current bss.
3099 */
3100void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
3101 const u8 *req_ie, size_t req_ie_len,
3102 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
3103
3104/**
3046 * cfg80211_disconnected - notify cfg80211 that connection was dropped 3105 * cfg80211_disconnected - notify cfg80211 that connection was dropped
3047 * 3106 *
3048 * @dev: network device 3107 * @dev: network device
@@ -3189,6 +3248,74 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
3189void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, 3248void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
3190 const u8 *bssid, bool preauth, gfp_t gfp); 3249 const u8 *bssid, bool preauth, gfp_t gfp);
3191 3250
3251/**
3252 * cfg80211_rx_spurious_frame - inform userspace about a spurious frame
3253 * @dev: The device the frame matched to
3254 * @addr: the transmitter address
3255 * @gfp: context flags
3256 *
3257 * This function is used in AP mode (only!) to inform userspace that
3258 * a spurious class 3 frame was received, to be able to deauth the
3259 * sender.
3260 * Returns %true if the frame was passed to userspace (or this failed
3261 * for a reason other than not having a subscription.)
3262 */
3263bool cfg80211_rx_spurious_frame(struct net_device *dev,
3264 const u8 *addr, gfp_t gfp);
3265
3266/**
3267 * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
3268 * @dev: The device the frame matched to
3269 * @addr: the transmitter address
3270 * @gfp: context flags
3271 *
3272 * This function is used in AP mode (only!) to inform userspace that
3273 * an associated station sent a 4addr frame but that wasn't expected.
3274 * It is allowed and desirable to send this event only once for each
3275 * station to avoid event flooding.
3276 * Returns %true if the frame was passed to userspace (or this failed
3277 * for a reason other than not having a subscription.)
3278 */
3279bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
3280 const u8 *addr, gfp_t gfp);
3281
3282/**
3283 * cfg80211_probe_status - notify userspace about probe status
3284 * @dev: the device the probe was sent on
3285 * @addr: the address of the peer
3286 * @cookie: the cookie filled in @probe_client previously
3287 * @acked: indicates whether probe was acked or not
3288 * @gfp: allocation flags
3289 */
3290void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
3291 u64 cookie, bool acked, gfp_t gfp);
3292
3293/**
3294 * cfg80211_report_obss_beacon - report beacon from other APs
3295 * @wiphy: The wiphy that received the beacon
3296 * @frame: the frame
3297 * @len: length of the frame
3298 * @freq: frequency the frame was received on
3299 * @gfp: allocation flags
3300 *
3301 * Use this function to report to userspace when a beacon was
3302 * received. It is not useful to call this when there is no
3303 * netdev that is in AP/GO mode.
3304 */
3305void cfg80211_report_obss_beacon(struct wiphy *wiphy,
3306 const u8 *frame, size_t len,
3307 int freq, gfp_t gfp);
3308
3309/*
3310 * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
3311 * @wiphy: the wiphy
3312 * @chan: main channel
3313 * @channel_type: HT mode
3314 */
3315int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
3316 struct ieee80211_channel *chan,
3317 enum nl80211_channel_type channel_type);
3318
3192/* Logging, debugging and troubleshooting/diagnostic helpers. */ 3319/* Logging, debugging and troubleshooting/diagnostic helpers. */
3193 3320
3194/* wiphy_printk helpers, similar to dev_printk */ 3321/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 839f768f9e35..7828ebf99ee1 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -11,6 +11,11 @@
11#ifndef __LINUX_NET_DSA_H 11#ifndef __LINUX_NET_DSA_H
12#define __LINUX_NET_DSA_H 12#define __LINUX_NET_DSA_H
13 13
14#include <linux/if_ether.h>
15#include <linux/list.h>
16#include <linux/timer.h>
17#include <linux/workqueue.h>
18
14#define DSA_MAX_SWITCHES 4 19#define DSA_MAX_SWITCHES 4
15#define DSA_MAX_PORTS 12 20#define DSA_MAX_PORTS 12
16 21
@@ -54,8 +59,143 @@ struct dsa_platform_data {
54 struct dsa_chip_data *chip; 59 struct dsa_chip_data *chip;
55}; 60};
56 61
57extern bool dsa_uses_dsa_tags(void *dsa_ptr); 62struct dsa_switch_tree {
58extern bool dsa_uses_trailer_tags(void *dsa_ptr); 63 /*
64 * Configuration data for the platform device that owns
65 * this dsa switch tree instance.
66 */
67 struct dsa_platform_data *pd;
68
69 /*
70 * Reference to network device to use, and which tagging
71 * protocol to use.
72 */
73 struct net_device *master_netdev;
74 __be16 tag_protocol;
75
76 /*
77 * The switch and port to which the CPU is attached.
78 */
79 s8 cpu_switch;
80 s8 cpu_port;
81
82 /*
83 * Link state polling.
84 */
85 int link_poll_needed;
86 struct work_struct link_poll_work;
87 struct timer_list link_poll_timer;
88
89 /*
90 * Data for the individual switch chips.
91 */
92 struct dsa_switch *ds[DSA_MAX_SWITCHES];
93};
94
95struct dsa_switch {
96 /*
97 * Parent switch tree, and switch index.
98 */
99 struct dsa_switch_tree *dst;
100 int index;
101
102 /*
103 * Configuration data for this switch.
104 */
105 struct dsa_chip_data *pd;
106
107 /*
108 * The used switch driver.
109 */
110 struct dsa_switch_driver *drv;
111
112 /*
113 * Reference to mii bus to use.
114 */
115 struct mii_bus *master_mii_bus;
116
117 /*
118 * Slave mii_bus and devices for the individual ports.
119 */
120 u32 dsa_port_mask;
121 u32 phys_port_mask;
122 struct mii_bus *slave_mii_bus;
123 struct net_device *ports[DSA_MAX_PORTS];
124};
125
126static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
127{
128 return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
129}
130
131static inline u8 dsa_upstream_port(struct dsa_switch *ds)
132{
133 struct dsa_switch_tree *dst = ds->dst;
134
135 /*
136 * If this is the root switch (i.e. the switch that connects
137 * to the CPU), return the cpu port number on this switch.
138 * Else return the (DSA) port number that connects to the
139 * switch that is one hop closer to the cpu.
140 */
141 if (dst->cpu_switch == ds->index)
142 return dst->cpu_port;
143 else
144 return ds->pd->rtable[dst->cpu_switch];
145}
146
147struct dsa_switch_driver {
148 struct list_head list;
149
150 __be16 tag_protocol;
151 int priv_size;
152
153 /*
154 * Probing and setup.
155 */
156 char *(*probe)(struct mii_bus *bus, int sw_addr);
157 int (*setup)(struct dsa_switch *ds);
158 int (*set_addr)(struct dsa_switch *ds, u8 *addr);
159
160 /*
161 * Access to the switch's PHY registers.
162 */
163 int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
164 int (*phy_write)(struct dsa_switch *ds, int port,
165 int regnum, u16 val);
166
167 /*
168 * Link state polling and IRQ handling.
169 */
170 void (*poll_link)(struct dsa_switch *ds);
171
172 /*
173 * ethtool hardware statistics.
174 */
175 void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
176 void (*get_ethtool_stats)(struct dsa_switch *ds,
177 int port, uint64_t *data);
178 int (*get_sset_count)(struct dsa_switch *ds);
179};
180
181void register_switch_driver(struct dsa_switch_driver *type);
182void unregister_switch_driver(struct dsa_switch_driver *type);
183
184/*
185 * The original DSA tag format and some other tag formats have no
186 * ethertype, which means that we need to add a little hack to the
187 * networking receive path to make sure that received frames get
188 * the right ->protocol assigned to them when one of those tag
189 * formats is in use.
190 */
191static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
192{
193 return !!(dst->tag_protocol == htons(ETH_P_DSA));
194}
59 195
196static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
197{
198 return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
199}
60 200
61#endif 201#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 75766b42660e..344c8dd02874 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -87,12 +87,12 @@ struct dst_entry {
87 }; 87 };
88}; 88};
89 89
90static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst) 90static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
91{ 91{
92 return rcu_dereference(dst->_neighbour); 92 return rcu_dereference(dst->_neighbour);
93} 93}
94 94
95static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst) 95static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
96{ 96{
97 return rcu_dereference_raw(dst->_neighbour); 97 return rcu_dereference_raw(dst->_neighbour);
98} 98}
@@ -393,7 +393,7 @@ static inline void dst_confirm(struct dst_entry *dst)
393 struct neighbour *n; 393 struct neighbour *n;
394 394
395 rcu_read_lock(); 395 rcu_read_lock();
396 n = dst_get_neighbour(dst); 396 n = dst_get_neighbour_noref(dst);
397 neigh_confirm(n); 397 neigh_confirm(n);
398 rcu_read_unlock(); 398 rcu_read_unlock();
399 } 399 }
diff --git a/include/net/flow.h b/include/net/flow.h
index 57f15a7f1cdd..da1f064a81b3 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -59,8 +59,11 @@ struct flowi4 {
59#define flowi4_proto __fl_common.flowic_proto 59#define flowi4_proto __fl_common.flowic_proto
60#define flowi4_flags __fl_common.flowic_flags 60#define flowi4_flags __fl_common.flowic_flags
61#define flowi4_secid __fl_common.flowic_secid 61#define flowi4_secid __fl_common.flowic_secid
62 __be32 daddr; 62
63 /* (saddr,daddr) must be grouped, same order as in IP header */
63 __be32 saddr; 64 __be32 saddr;
65 __be32 daddr;
66
64 union flowi_uli uli; 67 union flowi_uli uli;
65#define fl4_sport uli.ports.sport 68#define fl4_sport uli.ports.sport
66#define fl4_dport uli.ports.dport 69#define fl4_dport uli.ports.dport
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
new file mode 100644
index 000000000000..80461c1ae9ef
--- /dev/null
+++ b/include/net/flow_keys.h
@@ -0,0 +1,16 @@
1#ifndef _NET_FLOW_KEYS_H
2#define _NET_FLOW_KEYS_H
3
4struct flow_keys {
5 /* (src,dst) must be grouped, in the same way than in IP header */
6 __be32 src;
7 __be32 dst;
8 union {
9 __be32 ports;
10 __be16 port16[2];
11 };
12 u8 ip_proto;
13};
14
15extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
16#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 82d8d09faa44..7db32995ccd3 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -128,6 +128,8 @@ extern int genl_register_mc_group(struct genl_family *family,
128 struct genl_multicast_group *grp); 128 struct genl_multicast_group *grp);
129extern void genl_unregister_mc_group(struct genl_family *family, 129extern void genl_unregister_mc_group(struct genl_family *family,
130 struct genl_multicast_group *grp); 130 struct genl_multicast_group *grp);
131extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
132 u32 group, struct nlmsghdr *nlh, gfp_t flags);
131 133
132/** 134/**
133 * genlmsg_put - Add generic netlink header to netlink message 135 * genlmsg_put - Add generic netlink header to netlink message
diff --git a/include/net/icmp.h b/include/net/icmp.h
index f0698b955b73..75d615649071 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -31,8 +31,8 @@ struct icmp_err {
31extern const struct icmp_err icmp_err_convert[]; 31extern const struct icmp_err icmp_err_convert[];
32#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) 32#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
33#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) 33#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
34#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256) 34#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
35#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field) 35#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
36 36
37struct dst_entry; 37struct dst_entry;
38struct net_proto_family; 38struct net_proto_family;
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 7e2c4d483ad0..71392545d0a1 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -271,14 +271,6 @@ enum ieee80211_radiotap_type {
271#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 271#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
272 272
273 273
274/* Ugly macro to convert literal channel numbers into their mhz equivalents
275 * There are certianly some conditions that will break this (like feeding it '30')
276 * but they shouldn't arise since nothing talks on channel 30. */
277#define ieee80211chan2mhz(x) \
278 (((x) <= 14) ? \
279 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
280 ((x) + 1000) * 5)
281
282/* helpers */ 274/* helpers */
283static inline int ieee80211_get_radiotap_len(unsigned char *data) 275static inline int ieee80211_get_radiotap_len(unsigned char *data)
284{ 276{
diff --git a/include/net/ieee802154.h b/include/net/ieee802154.h
index d52685defb11..ee59f8b188dd 100644
--- a/include/net/ieee802154.h
+++ b/include/net/ieee802154.h
@@ -21,11 +21,14 @@
21 * Maxim Gorbachyov <maxim.gorbachev@siemens.com> 21 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
22 * Maxim Osipov <maxim.osipov@siemens.com> 22 * Maxim Osipov <maxim.osipov@siemens.com>
23 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 23 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
24 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
24 */ 25 */
25 26
26#ifndef NET_IEEE802154_H 27#ifndef NET_IEEE802154_H
27#define NET_IEEE802154_H 28#define NET_IEEE802154_H
28 29
30#define IEEE802154_MTU 127
31
29#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ 32#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
30#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ 33#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
31#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ 34#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */
@@ -56,6 +59,9 @@
56 (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) 59 (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
57 60
58 61
62/* MAC footer size */
63#define IEEE802154_MFR_SIZE 2 /* 2 octets */
64
59/* MAC's Command Frames Identifiers */ 65/* MAC's Command Frames Identifiers */
60#define IEEE802154_CMD_ASSOCIATION_REQ 0x01 66#define IEEE802154_CMD_ASSOCIATION_REQ 0x01
61#define IEEE802154_CMD_ASSOCIATION_RESP 0x02 67#define IEEE802154_CMD_ASSOCIATION_RESP 0x02
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index e46674d5daea..00cbb4384c79 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -15,7 +15,7 @@
15#define _INET6_HASHTABLES_H 15#define _INET6_HASHTABLES_H
16 16
17 17
18#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 18#if IS_ENABLED(CONFIG_IPV6)
19#include <linux/in6.h> 19#include <linux/in6.h>
20#include <linux/ipv6.h> 20#include <linux/ipv6.h>
21#include <linux/types.h> 21#include <linux/types.h>
@@ -110,5 +110,5 @@ extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo
110 const struct in6_addr *saddr, const __be16 sport, 110 const struct in6_addr *saddr, const __be16 sport,
111 const struct in6_addr *daddr, const __be16 dport, 111 const struct in6_addr *daddr, const __be16 dport,
112 const int dif); 112 const int dif);
113#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */ 113#endif /* IS_ENABLED(CONFIG_IPV6) */
114#endif /* _INET6_HASHTABLES_H */ 114#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index e6db62e756dc..dbf9aab34c82 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -143,9 +143,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
143 return (void *)inet_csk(sk)->icsk_ca_priv; 143 return (void *)inet_csk(sk)->icsk_ca_priv;
144} 144}
145 145
146extern struct sock *inet_csk_clone(struct sock *sk, 146extern struct sock *inet_csk_clone_lock(const struct sock *sk,
147 const struct request_sock *req, 147 const struct request_sock *req,
148 const gfp_t priority); 148 const gfp_t priority);
149 149
150enum inet_csk_ack_state_t { 150enum inet_csk_ack_state_t {
151 ICSK_ACK_SCHED = 1, 151 ICSK_ACK_SCHED = 1,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index f941964a9931..e3e405106afe 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -71,7 +71,7 @@ struct ip_options_data {
71 71
72struct inet_request_sock { 72struct inet_request_sock {
73 struct request_sock req; 73 struct request_sock req;
74#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 74#if IS_ENABLED(CONFIG_IPV6)
75 u16 inet6_rsk_offset; 75 u16 inet6_rsk_offset;
76#endif 76#endif
77 __be16 loc_port; 77 __be16 loc_port;
@@ -139,7 +139,7 @@ struct rtable;
139struct inet_sock { 139struct inet_sock {
140 /* sk and pinet6 has to be the first two members of inet_sock */ 140 /* sk and pinet6 has to be the first two members of inet_sock */
141 struct sock sk; 141 struct sock sk;
142#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 142#if IS_ENABLED(CONFIG_IPV6)
143 struct ipv6_pinfo *pinet6; 143 struct ipv6_pinfo *pinet6;
144#endif 144#endif
145 /* Socket demultiplex comparisons on incoming packets. */ 145 /* Socket demultiplex comparisons on incoming packets. */
@@ -188,7 +188,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
188 memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1, 188 memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
189 sk_from->sk_prot->obj_size - ancestor_size); 189 sk_from->sk_prot->obj_size - ancestor_size);
190} 190}
191#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) 191#if !(IS_ENABLED(CONFIG_IPV6))
192static inline void inet_sk_copy_descendant(struct sock *sk_to, 192static inline void inet_sk_copy_descendant(struct sock *sk_to,
193 const struct sock *sk_from) 193 const struct sock *sk_from)
194{ 194{
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index e8c25b981205..ba52c830a7a5 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -218,20 +218,12 @@ extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
218static inline 218static inline
219struct net *twsk_net(const struct inet_timewait_sock *twsk) 219struct net *twsk_net(const struct inet_timewait_sock *twsk)
220{ 220{
221#ifdef CONFIG_NET_NS 221 return read_pnet(&twsk->tw_net);
222 return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
223 /* reference counting, */
224 /* initialization, or RCU. */
225#else
226 return &init_net;
227#endif
228} 222}
229 223
230static inline 224static inline
231void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net) 225void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
232{ 226{
233#ifdef CONFIG_NET_NS 227 write_pnet(&twsk->tw_net, net);
234 rcu_assign_pointer(twsk->tw_net, net);
235#endif
236} 228}
237#endif /* _INET_TIMEWAIT_SOCK_ */ 229#endif /* _INET_TIMEWAIT_SOCK_ */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index e9ff3fc5e688..06b795dd5906 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -87,7 +87,7 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
87{ 87{
88 struct inetpeer_addr daddr; 88 struct inetpeer_addr daddr;
89 89
90 ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr); 90 *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
91 daddr.family = AF_INET6; 91 daddr.family = AF_INET6;
92 return inet_getpeer(&daddr, create); 92 return inet_getpeer(&daddr, create);
93} 93}
diff --git a/include/net/ip.h b/include/net/ip.h
index eca0ef7a495e..775009f9eaba 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -353,14 +353,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast,
353 memcpy(buf, &naddr, sizeof(naddr)); 353 memcpy(buf, &naddr, sizeof(naddr));
354} 354}
355 355
356#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 356#if IS_ENABLED(CONFIG_IPV6)
357#include <linux/ipv6.h> 357#include <linux/ipv6.h>
358#endif 358#endif
359 359
360static __inline__ void inet_reset_saddr(struct sock *sk) 360static __inline__ void inet_reset_saddr(struct sock *sk)
361{ 361{
362 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; 362 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
363#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 363#if IS_ENABLED(CONFIG_IPV6)
364 if (sk->sk_family == PF_INET6) { 364 if (sk->sk_family == PF_INET6) {
365 struct ipv6_pinfo *np = inet6_sk(sk); 365 struct ipv6_pinfo *np = inet6_sk(sk);
366 366
@@ -379,7 +379,7 @@ static inline int sk_mc_loop(struct sock *sk)
379 switch (sk->sk_family) { 379 switch (sk->sk_family) {
380 case AF_INET: 380 case AF_INET:
381 return inet_sk(sk)->mc_loop; 381 return inet_sk(sk)->mc_loop;
382#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 382#if IS_ENABLED(CONFIG_IPV6)
383 case AF_INET6: 383 case AF_INET6:
384 return inet6_sk(sk)->mc_loop; 384 return inet6_sk(sk)->mc_loop;
385#endif 385#endif
@@ -450,7 +450,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
450 * Functions provided by ip_sockglue.c 450 * Functions provided by ip_sockglue.c
451 */ 451 */
452 452
453extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 453extern void ipv4_pktinfo_prepare(struct sk_buff *skb);
454extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); 454extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
455extern int ip_cmsg_send(struct net *net, 455extern int ip_cmsg_send(struct net *net,
456 struct msghdr *msg, struct ipcm_cookie *ipc); 456 struct msghdr *msg, struct ipcm_cookie *ipc);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5735a0f979c3..b26bb8101981 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -86,9 +86,6 @@ struct fib6_table;
86struct rt6_info { 86struct rt6_info {
87 struct dst_entry dst; 87 struct dst_entry dst;
88 88
89#define rt6i_dev dst.dev
90#define rt6i_expires dst.expires
91
92 /* 89 /*
93 * Tail elements of dst_entry (__refcnt etc.) 90 * Tail elements of dst_entry (__refcnt etc.)
94 * and these elements (rarely used in hot path) are in 91 * and these elements (rarely used in hot path) are in
@@ -202,6 +199,10 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
202 const struct in6_addr *daddr, int dst_len, 199 const struct in6_addr *daddr, int dst_len,
203 const struct in6_addr *saddr, int src_len); 200 const struct in6_addr *saddr, int src_len);
204 201
202extern void fib6_clean_all_ro(struct net *net,
203 int (*func)(struct rt6_info *, void *arg),
204 int prune, void *arg);
205
205extern void fib6_clean_all(struct net *net, 206extern void fib6_clean_all(struct net *net,
206 int (*func)(struct rt6_info *, void *arg), 207 int (*func)(struct rt6_info *, void *arg),
207 int prune, void *arg); 208 int prune, void *arg);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5e91b72fc718..2ad92ca4e6f3 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -70,6 +70,8 @@ extern void ip6_route_input(struct sk_buff *skb);
70extern struct dst_entry * ip6_route_output(struct net *net, 70extern struct dst_entry * ip6_route_output(struct net *net,
71 const struct sock *sk, 71 const struct sock *sk,
72 struct flowi6 *fl6); 72 struct flowi6 *fl6);
73extern struct dst_entry * ip6_route_lookup(struct net *net,
74 struct flowi6 *fl6, int flags);
73 75
74extern int ip6_route_init(void); 76extern int ip6_route_init(void);
75extern void ip6_route_cleanup(void); 77extern void ip6_route_cleanup(void);
@@ -95,14 +97,14 @@ extern struct rt6_info *rt6_lookup(struct net *net,
95 97
96extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev, 98extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
97 struct neighbour *neigh, 99 struct neighbour *neigh,
98 const struct in6_addr *addr); 100 struct flowi6 *fl6);
99extern int icmp6_dst_gc(void); 101extern int icmp6_dst_gc(void);
100 102
101extern void fib6_force_start_gc(struct net *net); 103extern void fib6_force_start_gc(struct net *net);
102 104
103extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 105extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
104 const struct in6_addr *addr, 106 const struct in6_addr *addr,
105 int anycast); 107 bool anycast);
106 108
107extern int ip6_dst_hoplimit(struct dst_entry *dst); 109extern int ip6_dst_hoplimit(struct dst_entry *dst);
108 110
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index e5a7b9aaf552..ebe517f2da9f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -21,7 +21,7 @@
21#include <linux/netfilter.h> /* for union nf_inet_addr */ 21#include <linux/netfilter.h> /* for union nf_inet_addr */
22#include <linux/ip.h> 22#include <linux/ip.h>
23#include <linux/ipv6.h> /* for struct ipv6hdr */ 23#include <linux/ipv6.h> /* for struct ipv6hdr */
24#include <net/ipv6.h> /* for ipv6_addr_copy */ 24#include <net/ipv6.h>
25#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 25#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
26#include <net/netfilter/nf_conntrack.h> 26#include <net/netfilter/nf_conntrack.h>
27#endif 27#endif
@@ -119,8 +119,8 @@ ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
119 const struct ipv6hdr *iph = nh; 119 const struct ipv6hdr *iph = nh;
120 iphdr->len = sizeof(struct ipv6hdr); 120 iphdr->len = sizeof(struct ipv6hdr);
121 iphdr->protocol = iph->nexthdr; 121 iphdr->protocol = iph->nexthdr;
122 ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr); 122 iphdr->saddr.in6 = iph->saddr;
123 ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr); 123 iphdr->daddr.in6 = iph->daddr;
124 } else 124 } else
125#endif 125#endif
126 { 126 {
@@ -137,7 +137,7 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
137{ 137{
138#ifdef CONFIG_IP_VS_IPV6 138#ifdef CONFIG_IP_VS_IPV6
139 if (af == AF_INET6) 139 if (af == AF_INET6)
140 ipv6_addr_copy(&dst->in6, &src->in6); 140 dst->in6 = src->in6;
141 else 141 else
142#endif 142#endif
143 dst->ip = src->ip; 143 dst->ip = src->ip;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a366a8a1fe23..e4170a22fc6f 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -132,6 +132,15 @@ extern struct ctl_path net_ipv6_ctl_path[];
132 SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ 132 SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
133}) 133})
134 134
135/* per device and per net counters are atomic_long_t */
136#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
137({ \
138 struct inet6_dev *_idev = (idev); \
139 if (likely(_idev != NULL)) \
140 SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
141 SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
142})
143
135#define _DEVADD(net, statname, modifier, idev, field, val) \ 144#define _DEVADD(net, statname, modifier, idev, field, val) \
136({ \ 145({ \
137 struct inet6_dev *_idev = (idev); \ 146 struct inet6_dev *_idev = (idev); \
@@ -168,11 +177,11 @@ extern struct ctl_path net_ipv6_ctl_path[];
168 _DEVINCATOMIC(net, icmpv6, _BH, idev, field) 177 _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
169 178
170#define ICMP6MSGOUT_INC_STATS(net, idev, field) \ 179#define ICMP6MSGOUT_INC_STATS(net, idev, field) \
171 _DEVINCATOMIC(net, icmpv6msg, , idev, field +256) 180 _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
172#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \ 181#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \
173 _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field +256) 182 _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
174#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \ 183#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
175 _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field) 184 _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
176 185
177struct ip6_ra_chain { 186struct ip6_ra_chain {
178 struct ip6_ra_chain *next; 187 struct ip6_ra_chain *next;
@@ -300,11 +309,6 @@ ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
300 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); 309 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
301} 310}
302 311
303static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
304{
305 memcpy(a1, a2, sizeof(struct in6_addr));
306}
307
308static inline void ipv6_addr_prefix(struct in6_addr *pfx, 312static inline void ipv6_addr_prefix(struct in6_addr *pfx,
309 const struct in6_addr *addr, 313 const struct in6_addr *addr,
310 int plen) 314 int plen)
@@ -554,7 +558,7 @@ extern void ipv6_push_frag_opts(struct sk_buff *skb,
554 u8 *proto); 558 u8 *proto);
555 559
556extern int ipv6_skip_exthdr(const struct sk_buff *, int start, 560extern int ipv6_skip_exthdr(const struct sk_buff *, int start,
557 u8 *nexthdrp); 561 u8 *nexthdrp, __be16 *frag_offp);
558 562
559extern int ipv6_ext_hdr(u8 nexthdr); 563extern int ipv6_ext_hdr(u8 nexthdr);
560 564
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f2419cf44cef..0954ec959159 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -27,7 +27,6 @@ enum {
27 IUCV_OPEN, 27 IUCV_OPEN,
28 IUCV_BOUND, 28 IUCV_BOUND,
29 IUCV_LISTEN, 29 IUCV_LISTEN,
30 IUCV_SEVERED,
31 IUCV_DISCONN, 30 IUCV_DISCONN,
32 IUCV_CLOSING, 31 IUCV_CLOSING,
33 IUCV_CLOSED 32 IUCV_CLOSED
@@ -146,7 +145,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
146 poll_table *wait); 145 poll_table *wait);
147void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); 146void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
148void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); 147void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
149int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
150void iucv_accept_enqueue(struct sock *parent, struct sock *sk); 148void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
151void iucv_accept_unlink(struct sock *sk); 149void iucv_accept_unlink(struct sock *sk);
152struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock); 150struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 72eddd1b410b..2a7523edd9b5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -166,6 +166,7 @@ struct ieee80211_low_level_stats {
166 * that it is only ever disabled for station mode. 166 * that it is only ever disabled for station mode.
167 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. 167 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
168 * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode) 168 * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
169 * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
169 */ 170 */
170enum ieee80211_bss_change { 171enum ieee80211_bss_change {
171 BSS_CHANGED_ASSOC = 1<<0, 172 BSS_CHANGED_ASSOC = 1<<0,
@@ -184,6 +185,7 @@ enum ieee80211_bss_change {
184 BSS_CHANGED_QOS = 1<<13, 185 BSS_CHANGED_QOS = 1<<13,
185 BSS_CHANGED_IDLE = 1<<14, 186 BSS_CHANGED_IDLE = 1<<14,
186 BSS_CHANGED_SSID = 1<<15, 187 BSS_CHANGED_SSID = 1<<15,
188 BSS_CHANGED_AP_PROBE_RESP = 1<<16,
187 189
188 /* when adding here, make sure to change ieee80211_reconfig */ 190 /* when adding here, make sure to change ieee80211_reconfig */
189}; 191};
@@ -518,7 +520,7 @@ struct ieee80211_tx_rate {
518 * @flags: transmit info flags, defined above 520 * @flags: transmit info flags, defined above
519 * @band: the band to transmit on (use for checking for races) 521 * @band: the band to transmit on (use for checking for races)
520 * @antenna_sel_tx: antenna to use, 0 for automatic diversity 522 * @antenna_sel_tx: antenna to use, 0 for automatic diversity
521 * @pad: padding, ignore 523 * @ack_frame_id: internal frame ID for TX status, used internally
522 * @control: union for control data 524 * @control: union for control data
523 * @status: union for status data 525 * @status: union for status data
524 * @driver_data: array of driver_data pointers 526 * @driver_data: array of driver_data pointers
@@ -535,8 +537,7 @@ struct ieee80211_tx_info {
535 537
536 u8 antenna_sel_tx; 538 u8 antenna_sel_tx;
537 539
538 /* 2 byte hole */ 540 u16 ack_frame_id;
539 u8 pad[2];
540 541
541 union { 542 union {
542 struct { 543 struct {
@@ -901,6 +902,10 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
901 * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a 902 * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
902 * CCMP key if it requires CCMP encryption of management frames (MFP) to 903 * CCMP key if it requires CCMP encryption of management frames (MFP) to
903 * be done in software. 904 * be done in software.
905 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
906 * for a CCMP key if space should be prepared for the IV, but the IV
907 * itself should not be generated. Do not set together with
908 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
904 */ 909 */
905enum ieee80211_key_flags { 910enum ieee80211_key_flags {
906 IEEE80211_KEY_FLAG_WMM_STA = 1<<0, 911 IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
@@ -908,6 +913,7 @@ enum ieee80211_key_flags {
908 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, 913 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
909 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, 914 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
910 IEEE80211_KEY_FLAG_SW_MGMT = 1<<4, 915 IEEE80211_KEY_FLAG_SW_MGMT = 1<<4,
916 IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
911}; 917};
912 918
913/** 919/**
@@ -1304,6 +1310,16 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1304} 1310}
1305 1311
1306/** 1312/**
1313 * ieee80211_free_txskb - free TX skb
1314 * @hw: the hardware
1315 * @skb: the skb
1316 *
1317 * Free a transmit skb. Use this funtion when some failure
1318 * to transmit happened and thus status cannot be reported.
1319 */
1320void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
1321
1322/**
1307 * DOC: Hardware crypto acceleration 1323 * DOC: Hardware crypto acceleration
1308 * 1324 *
1309 * mac80211 is capable of taking advantage of many hardware 1325 * mac80211 is capable of taking advantage of many hardware
@@ -1744,11 +1760,21 @@ enum ieee80211_frame_release_type {
1744 * skb contains the buffer starting from the IEEE 802.11 header. 1760 * skb contains the buffer starting from the IEEE 802.11 header.
1745 * The low-level driver should send the frame out based on 1761 * The low-level driver should send the frame out based on
1746 * configuration in the TX control data. This handler should, 1762 * configuration in the TX control data. This handler should,
1747 * preferably, never fail and stop queues appropriately, more 1763 * preferably, never fail and stop queues appropriately.
1748 * importantly, however, it must never fail for A-MPDU-queues. 1764 * This must be implemented if @tx_frags is not.
1749 * This function should return NETDEV_TX_OK except in very 1765 * Must be atomic.
1750 * limited cases. 1766 *
1751 * Must be implemented and atomic. 1767 * @tx_frags: Called to transmit multiple fragments of a single MSDU.
1768 * This handler must consume all fragments, sending out some of
1769 * them only is useless and it can't ask for some of them to be
1770 * queued again. If the frame is not fragmented the queue has a
1771 * single SKB only. To avoid issues with the networking stack
1772 * when TX status is reported the frames should be removed from
1773 * the skb queue.
1774 * If this is used, the tx_info @vif and @sta pointers will be
1775 * invalid -- you must not use them in that case.
1776 * This must be implemented if @tx isn't.
1777 * Must be atomic.
1752 * 1778 *
1753 * @start: Called before the first netdevice attached to the hardware 1779 * @start: Called before the first netdevice attached to the hardware
1754 * is enabled. This should turn on the hardware and must turn on 1780 * is enabled. This should turn on the hardware and must turn on
@@ -2085,6 +2111,8 @@ enum ieee80211_frame_release_type {
2085 */ 2111 */
2086struct ieee80211_ops { 2112struct ieee80211_ops {
2087 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 2113 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
2114 void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2115 struct ieee80211_sta *sta, struct sk_buff_head *skbs);
2088 int (*start)(struct ieee80211_hw *hw); 2116 int (*start)(struct ieee80211_hw *hw);
2089 void (*stop)(struct ieee80211_hw *hw); 2117 void (*stop)(struct ieee80211_hw *hw);
2090#ifdef CONFIG_PM 2118#ifdef CONFIG_PM
@@ -2661,6 +2689,19 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2661} 2689}
2662 2690
2663/** 2691/**
2692 * ieee80211_proberesp_get - retrieve a Probe Response template
2693 * @hw: pointer obtained from ieee80211_alloc_hw().
2694 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2695 *
2696 * Creates a Probe Response template which can, for example, be uploaded to
2697 * hardware. The destination address should be set by the caller.
2698 *
2699 * Can only be called in AP mode.
2700 */
2701struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
2702 struct ieee80211_vif *vif);
2703
2704/**
2664 * ieee80211_pspoll_get - retrieve a PS Poll template 2705 * ieee80211_pspoll_get - retrieve a PS Poll template
2665 * @hw: pointer obtained from ieee80211_alloc_hw(). 2706 * @hw: pointer obtained from ieee80211_alloc_hw().
2666 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2707 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -3461,9 +3502,12 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
3461 * 3502 *
3462 * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have 3503 * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have
3463 * changed, rate control algorithm can update its internal state if needed. 3504 * changed, rate control algorithm can update its internal state if needed.
3505 * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate
3506 * control algorithm needs to adjust accordingly.
3464 */ 3507 */
3465enum rate_control_changed { 3508enum rate_control_changed {
3466 IEEE80211_RC_HT_CHANGED = BIT(0) 3509 IEEE80211_RC_HT_CHANGED = BIT(0),
3510 IEEE80211_RC_SMPS_CHANGED = BIT(1),
3467}; 3511};
3468 3512
3469/** 3513/**
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 62beeb97c4b1..e3133c23980e 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -79,6 +79,42 @@ struct nd_opt_hdr {
79 __u8 nd_opt_len; 79 __u8 nd_opt_len;
80} __packed; 80} __packed;
81 81
82static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
83{
84 const u32 *p32 = pkey;
85
86 return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
87 (p32[1] * hash_rnd[1]) +
88 (p32[2] * hash_rnd[2]) +
89 (p32[3] * hash_rnd[3]));
90}
91
92static inline struct neighbour *__ipv6_neigh_lookup(struct neigh_table *tbl, struct net_device *dev, const void *pkey)
93{
94 struct neigh_hash_table *nht;
95 const u32 *p32 = pkey;
96 struct neighbour *n;
97 u32 hash_val;
98
99 rcu_read_lock_bh();
100 nht = rcu_dereference_bh(tbl->nht);
101 hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
102 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
103 n != NULL;
104 n = rcu_dereference_bh(n->next)) {
105 u32 *n32 = (u32 *) n->primary_key;
106 if (n->dev == dev &&
107 ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
108 (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) {
109 if (!atomic_inc_not_zero(&n->refcnt))
110 n = NULL;
111 break;
112 }
113 }
114 rcu_read_unlock_bh();
115
116 return n;
117}
82 118
83extern int ndisc_init(void); 119extern int ndisc_init(void);
84 120
@@ -145,13 +181,4 @@ int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl,
145extern void inet6_ifinfo_notify(int event, 181extern void inet6_ifinfo_notify(int event,
146 struct inet6_dev *idev); 182 struct inet6_dev *idev);
147 183
148static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr)
149{
150
151 if (dev)
152 return __neigh_lookup_errno(&nd_tbl, addr, dev);
153
154 return ERR_PTR(-ENODEV);
155}
156
157#endif 184#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 2720884287c3..34c996f46181 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -59,7 +59,7 @@ struct neigh_parms {
59 int reachable_time; 59 int reachable_time;
60 int delay_probe_time; 60 int delay_probe_time;
61 61
62 int queue_len; 62 int queue_len_bytes;
63 int ucast_probes; 63 int ucast_probes;
64 int app_probes; 64 int app_probes;
65 int mcast_probes; 65 int mcast_probes;
@@ -99,6 +99,7 @@ struct neighbour {
99 rwlock_t lock; 99 rwlock_t lock;
100 atomic_t refcnt; 100 atomic_t refcnt;
101 struct sk_buff_head arp_queue; 101 struct sk_buff_head arp_queue;
102 unsigned int arp_queue_len_bytes;
102 struct timer_list timer; 103 struct timer_list timer;
103 unsigned long used; 104 unsigned long used;
104 atomic_t probes; 105 atomic_t probes;
@@ -138,10 +139,12 @@ struct pneigh_entry {
138 * neighbour table manipulation 139 * neighbour table manipulation
139 */ 140 */
140 141
142#define NEIGH_NUM_HASH_RND 4
143
141struct neigh_hash_table { 144struct neigh_hash_table {
142 struct neighbour __rcu **hash_buckets; 145 struct neighbour __rcu **hash_buckets;
143 unsigned int hash_shift; 146 unsigned int hash_shift;
144 __u32 hash_rnd; 147 __u32 hash_rnd[NEIGH_NUM_HASH_RND];
145 struct rcu_head rcu; 148 struct rcu_head rcu;
146}; 149};
147 150
@@ -153,7 +156,7 @@ struct neigh_table {
153 int key_len; 156 int key_len;
154 __u32 (*hash)(const void *pkey, 157 __u32 (*hash)(const void *pkey,
155 const struct net_device *dev, 158 const struct net_device *dev,
156 __u32 hash_rnd); 159 __u32 *hash_rnd);
157 int (*constructor)(struct neighbour *); 160 int (*constructor)(struct neighbour *);
158 int (*pconstructor)(struct pneigh_entry *); 161 int (*pconstructor)(struct pneigh_entry *);
159 void (*pdestructor)(struct pneigh_entry *); 162 void (*pdestructor)(struct pneigh_entry *);
@@ -172,12 +175,18 @@ struct neigh_table {
172 atomic_t entries; 175 atomic_t entries;
173 rwlock_t lock; 176 rwlock_t lock;
174 unsigned long last_rand; 177 unsigned long last_rand;
175 struct kmem_cache *kmem_cachep;
176 struct neigh_statistics __percpu *stats; 178 struct neigh_statistics __percpu *stats;
177 struct neigh_hash_table __rcu *nht; 179 struct neigh_hash_table __rcu *nht;
178 struct pneigh_entry **phash_buckets; 180 struct pneigh_entry **phash_buckets;
179}; 181};
180 182
183#define NEIGH_PRIV_ALIGN sizeof(long long)
184
185static inline void *neighbour_priv(const struct neighbour *n)
186{
187 return (char *)n + ALIGN(sizeof(*n) + n->tbl->key_len, NEIGH_PRIV_ALIGN);
188}
189
181/* flags for neigh_update() */ 190/* flags for neigh_update() */
182#define NEIGH_UPDATE_F_OVERRIDE 0x00000001 191#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
183#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 192#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 3bb6fa0eace0..ee547c149810 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -77,7 +77,7 @@ struct net {
77 struct netns_packet packet; 77 struct netns_packet packet;
78 struct netns_unix unx; 78 struct netns_unix unx;
79 struct netns_ipv4 ipv4; 79 struct netns_ipv4 ipv4;
80#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 80#if IS_ENABLED(CONFIG_IPV6)
81 struct netns_ipv6 ipv6; 81 struct netns_ipv6 ipv6;
82#endif 82#endif
83#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) 83#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 4e9c63a20db2..463ae8e16696 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -15,8 +15,8 @@
15#include <net/netfilter/nf_conntrack_extend.h> 15#include <net/netfilter/nf_conntrack_extend.h>
16 16
17struct nf_conn_counter { 17struct nf_conn_counter {
18 u_int64_t packets; 18 atomic64_t packets;
19 u_int64_t bytes; 19 atomic64_t bytes;
20}; 20};
21 21
22static inline 22static inline
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 0f8a8c587532..4619caadd9d1 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -91,7 +91,6 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
91 91
92void nf_ct_remove_expectations(struct nf_conn *ct); 92void nf_ct_remove_expectations(struct nf_conn *ct);
93void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); 93void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
94void nf_ct_remove_userspace_expectations(void);
95 94
96/* Allocate space for an expectation: this is mandatory before calling 95/* Allocate space for an expectation: this is mandatory before calling
97 nf_ct_expect_related. You will have to call put afterwards. */ 96 nf_ct_expect_related. You will have to call put afterwards. */
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 2f8fb77bfdd1..aea3f8221be0 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -12,7 +12,6 @@
12 12
13#include <linux/netfilter/x_tables.h> 13#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter/nf_conntrack_tuple_common.h> 14#include <linux/netfilter/nf_conntrack_tuple_common.h>
15#include <linux/netfilter_ipv4/nf_nat.h>
16#include <linux/list_nulls.h> 15#include <linux/list_nulls.h>
17 16
18/* A `tuple' is a structure containing the information to uniquely 17/* A `tuple' is a structure containing the information to uniquely
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index b8872df7285f..b4de990b55f1 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -1,14 +1,12 @@
1#ifndef _NF_NAT_H 1#ifndef _NF_NAT_H
2#define _NF_NAT_H 2#define _NF_NAT_H
3#include <linux/netfilter_ipv4.h> 3#include <linux/netfilter_ipv4.h>
4#include <linux/netfilter_ipv4/nf_nat.h> 4#include <linux/netfilter/nf_nat.h>
5#include <net/netfilter/nf_conntrack_tuple.h> 5#include <net/netfilter/nf_conntrack_tuple.h>
6 6
7#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16
8
9enum nf_nat_manip_type { 7enum nf_nat_manip_type {
10 IP_NAT_MANIP_SRC, 8 NF_NAT_MANIP_SRC,
11 IP_NAT_MANIP_DST 9 NF_NAT_MANIP_DST
12}; 10};
13 11
14/* SRC manip occurs POST_ROUTING or LOCAL_IN */ 12/* SRC manip occurs POST_ROUTING or LOCAL_IN */
@@ -52,7 +50,7 @@ struct nf_conn_nat {
52 50
53/* Set up the info structure to map into this range. */ 51/* Set up the info structure to map into this range. */
54extern unsigned int nf_nat_setup_info(struct nf_conn *ct, 52extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
55 const struct nf_nat_range *range, 53 const struct nf_nat_ipv4_range *range,
56 enum nf_nat_manip_type maniptype); 54 enum nf_nat_manip_type maniptype);
57 55
58/* Is this tuple already taken? (not by us)*/ 56/* Is this tuple already taken? (not by us)*/
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 3dc7b98effeb..b13d8d18d595 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -20,7 +20,7 @@ extern int nf_nat_icmp_reply_translation(struct nf_conn *ct,
20static inline int nf_nat_initialized(struct nf_conn *ct, 20static inline int nf_nat_initialized(struct nf_conn *ct,
21 enum nf_nat_manip_type manip) 21 enum nf_nat_manip_type manip)
22{ 22{
23 if (manip == IP_NAT_MANIP_SRC) 23 if (manip == NF_NAT_MANIP_SRC)
24 return ct->status & IPS_SRC_NAT_DONE; 24 return ct->status & IPS_SRC_NAT_DONE;
25 else 25 else
26 return ct->status & IPS_DST_NAT_DONE; 26 return ct->status & IPS_DST_NAT_DONE;
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
index 93cc90d28e66..7b0b51165f70 100644
--- a/include/net/netfilter/nf_nat_protocol.h
+++ b/include/net/netfilter/nf_nat_protocol.h
@@ -4,14 +4,12 @@
4#include <net/netfilter/nf_nat.h> 4#include <net/netfilter/nf_nat.h>
5#include <linux/netfilter/nfnetlink_conntrack.h> 5#include <linux/netfilter/nfnetlink_conntrack.h>
6 6
7struct nf_nat_range; 7struct nf_nat_ipv4_range;
8 8
9struct nf_nat_protocol { 9struct nf_nat_protocol {
10 /* Protocol number. */ 10 /* Protocol number. */
11 unsigned int protonum; 11 unsigned int protonum;
12 12
13 struct module *me;
14
15 /* Translate a packet to the target according to manip type. 13 /* Translate a packet to the target according to manip type.
16 Return true if succeeded. */ 14 Return true if succeeded. */
17 bool (*manip_pkt)(struct sk_buff *skb, 15 bool (*manip_pkt)(struct sk_buff *skb,
@@ -30,15 +28,12 @@ struct nf_nat_protocol {
30 possible. Per-protocol part of tuple is initialized to the 28 possible. Per-protocol part of tuple is initialized to the
31 incoming packet. */ 29 incoming packet. */
32 void (*unique_tuple)(struct nf_conntrack_tuple *tuple, 30 void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
33 const struct nf_nat_range *range, 31 const struct nf_nat_ipv4_range *range,
34 enum nf_nat_manip_type maniptype, 32 enum nf_nat_manip_type maniptype,
35 const struct nf_conn *ct); 33 const struct nf_conn *ct);
36 34
37 int (*range_to_nlattr)(struct sk_buff *skb,
38 const struct nf_nat_range *range);
39
40 int (*nlattr_to_range)(struct nlattr *tb[], 35 int (*nlattr_to_range)(struct nlattr *tb[],
41 struct nf_nat_range *range); 36 struct nf_nat_ipv4_range *range);
42}; 37};
43 38
44/* Protocol registration. */ 39/* Protocol registration. */
@@ -61,14 +56,12 @@ extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
61 const union nf_conntrack_man_proto *max); 56 const union nf_conntrack_man_proto *max);
62 57
63extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, 58extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
64 const struct nf_nat_range *range, 59 const struct nf_nat_ipv4_range *range,
65 enum nf_nat_manip_type maniptype, 60 enum nf_nat_manip_type maniptype,
66 const struct nf_conn *ct, 61 const struct nf_conn *ct,
67 u_int16_t *rover); 62 u_int16_t *rover);
68 63
69extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
70 const struct nf_nat_range *range);
71extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], 64extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
72 struct nf_nat_range *range); 65 struct nf_nat_ipv4_range *range);
73 66
74#endif /*_NF_NAT_PROTO_H*/ 67#endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
index e505358d8999..75ca9291cf2c 100644
--- a/include/net/netfilter/nf_tproxy_core.h
+++ b/include/net/netfilter/nf_tproxy_core.h
@@ -131,7 +131,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
131 return sk; 131 return sk;
132} 132}
133 133
134#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 134#if IS_ENABLED(CONFIG_IPV6)
135static inline struct sock * 135static inline struct sock *
136nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, 136nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
137 const struct in6_addr *saddr, const struct in6_addr *daddr, 137 const struct in6_addr *saddr, const struct in6_addr *daddr,
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d786b4fc02a4..bbd023a1c9b9 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -55,6 +55,7 @@ struct netns_ipv4 {
55 int current_rt_cache_rebuild_count; 55 int current_rt_cache_rebuild_count;
56 56
57 unsigned int sysctl_ping_group_range[2]; 57 unsigned int sysctl_ping_group_range[2];
58 long sysctl_tcp_mem[3];
58 59
59 atomic_t rt_genid; 60 atomic_t rt_genid;
60 atomic_t dev_addr_genid; 61 atomic_t dev_addr_genid;
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 0b44112e2366..d542a4b28cca 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -10,15 +10,15 @@ struct netns_mib {
10 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); 10 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
11 DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); 11 DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
12 DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); 12 DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
13 DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics); 13 DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
14 14
15#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 15#if IS_ENABLED(CONFIG_IPV6)
16 struct proc_dir_entry *proc_net_devsnmp6; 16 struct proc_dir_entry *proc_net_devsnmp6;
17 DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6); 17 DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
18 DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6); 18 DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
19 DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); 19 DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
20 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); 20 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
21 DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics); 21 DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
22#endif 22#endif
23#ifdef CONFIG_XFRM_STATISTICS 23#ifdef CONFIG_XFRM_STATISTICS
24 DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics); 24 DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 748f91f87cd5..5299e69a32af 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -56,7 +56,7 @@ struct netns_xfrm {
56#endif 56#endif
57 57
58 struct dst_ops xfrm4_dst_ops; 58 struct dst_ops xfrm4_dst_ops;
59#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 59#if IS_ENABLED(CONFIG_IPV6)
60 struct dst_ops xfrm6_dst_ops; 60 struct dst_ops xfrm6_dst_ops;
61#endif 61#endif
62}; 62};
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
new file mode 100644
index 000000000000..e503b87c4c1b
--- /dev/null
+++ b/include/net/netprio_cgroup.h
@@ -0,0 +1,57 @@
1/*
2 * netprio_cgroup.h Control Group Priority set
3 *
4 *
5 * Authors: Neil Horman <nhorman@tuxdriver.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13
14#ifndef _NETPRIO_CGROUP_H
15#define _NETPRIO_CGROUP_H
16#include <linux/module.h>
17#include <linux/cgroup.h>
18#include <linux/hardirq.h>
19#include <linux/rcupdate.h>
20
21
22struct netprio_map {
23 struct rcu_head rcu;
24 u32 priomap_len;
25 u32 priomap[];
26};
27
28#ifdef CONFIG_CGROUPS
29
30struct cgroup_netprio_state {
31 struct cgroup_subsys_state css;
32 u32 prioidx;
33};
34
35#ifndef CONFIG_NETPRIO_CGROUP
36extern int net_prio_subsys_id;
37#endif
38
39extern void sock_update_netprioidx(struct sock *sk);
40
41static inline struct cgroup_netprio_state
42 *task_netprio_state(struct task_struct *p)
43{
44#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
45 return container_of(task_subsys_state(p, net_prio_subsys_id),
46 struct cgroup_netprio_state, css);
47#else
48 return NULL;
49#endif
50}
51
52#else
53
54#define sock_update_netprioidx(sk)
55#endif
56
57#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 39b85bc0804f..2be95e2626c0 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -34,32 +34,30 @@
34#define NCI_MAX_NUM_CONN 10 34#define NCI_MAX_NUM_CONN 10
35 35
36/* NCI Status Codes */ 36/* NCI Status Codes */
37#define NCI_STATUS_OK 0x00 37#define NCI_STATUS_OK 0x00
38#define NCI_STATUS_REJECTED 0x01 38#define NCI_STATUS_REJECTED 0x01
39#define NCI_STATUS_MESSAGE_CORRUPTED 0x02 39#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02
40#define NCI_STATUS_BUFFER_FULL 0x03 40#define NCI_STATUS_FAILED 0x03
41#define NCI_STATUS_FAILED 0x04 41#define NCI_STATUS_NOT_INITIALIZED 0x04
42#define NCI_STATUS_NOT_INITIALIZED 0x05 42#define NCI_STATUS_SYNTAX_ERROR 0x05
43#define NCI_STATUS_SYNTAX_ERROR 0x06 43#define NCI_STATUS_SEMANTIC_ERROR 0x06
44#define NCI_STATUS_SEMANTIC_ERROR 0x07 44#define NCI_STATUS_UNKNOWN_GID 0x07
45#define NCI_STATUS_UNKNOWN_GID 0x08 45#define NCI_STATUS_UNKNOWN_OID 0x08
46#define NCI_STATUS_UNKNOWN_OID 0x09 46#define NCI_STATUS_INVALID_PARAM 0x09
47#define NCI_STATUS_INVALID_PARAM 0x0a 47#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a
48#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0b
49/* Discovery Specific Status Codes */ 48/* Discovery Specific Status Codes */
50#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0 49#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
51#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1 50#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
51#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2
52/* RF Interface Specific Status Codes */ 52/* RF Interface Specific Status Codes */
53#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0 53#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
54#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1 54#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
55#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2 55#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
56#define NCI_STATUS_RF_LINK_LOSS_ERROR 0xb3
57/* NFCEE Interface Specific Status Codes */ 56/* NFCEE Interface Specific Status Codes */
58#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0 57#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc0
59#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1 58#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc1
60#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2 59#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2
61#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3 60#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3
62#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4
63 61
64/* NCI RF Technology and Mode */ 62/* NCI RF Technology and Mode */
65#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00 63#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00
@@ -67,11 +65,28 @@
67#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02 65#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
68#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03 66#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
69#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05 67#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
68#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06
70#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80 69#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
71#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81 70#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
72#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82 71#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
73#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 72#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
74#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 73#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
74#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86
75
76/* NCI RF Technologies */
77#define NCI_NFC_RF_TECHNOLOGY_A 0x00
78#define NCI_NFC_RF_TECHNOLOGY_B 0x01
79#define NCI_NFC_RF_TECHNOLOGY_F 0x02
80#define NCI_NFC_RF_TECHNOLOGY_15693 0x03
81
82/* NCI Bit Rates */
83#define NCI_NFC_BIT_RATE_106 0x00
84#define NCI_NFC_BIT_RATE_212 0x01
85#define NCI_NFC_BIT_RATE_424 0x02
86#define NCI_NFC_BIT_RATE_848 0x03
87#define NCI_NFC_BIT_RATE_1695 0x04
88#define NCI_NFC_BIT_RATE_3390 0x05
89#define NCI_NFC_BIT_RATE_6780 0x06
75 90
76/* NCI RF Protocols */ 91/* NCI RF Protocols */
77#define NCI_RF_PROTOCOL_UNKNOWN 0x00 92#define NCI_RF_PROTOCOL_UNKNOWN 0x00
@@ -82,37 +97,30 @@
82#define NCI_RF_PROTOCOL_NFC_DEP 0x05 97#define NCI_RF_PROTOCOL_NFC_DEP 0x05
83 98
84/* NCI RF Interfaces */ 99/* NCI RF Interfaces */
85#define NCI_RF_INTERFACE_RFU 0x00 100#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
86#define NCI_RF_INTERFACE_FRAME 0x01 101#define NCI_RF_INTERFACE_FRAME 0x01
87#define NCI_RF_INTERFACE_ISO_DEP 0x02 102#define NCI_RF_INTERFACE_ISO_DEP 0x02
88#define NCI_RF_INTERFACE_NFC_DEP 0x03 103#define NCI_RF_INTERFACE_NFC_DEP 0x03
104
105/* NCI Reset types */
106#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
107#define NCI_RESET_TYPE_RESET_CONFIG 0x01
108
109/* NCI Static RF connection ID */
110#define NCI_STATIC_RF_CONN_ID 0x00
111
112/* NCI Data Flow Control */
113#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff
89 114
90/* NCI RF_DISCOVER_MAP_CMD modes */ 115/* NCI RF_DISCOVER_MAP_CMD modes */
91#define NCI_DISC_MAP_MODE_POLL 0x01 116#define NCI_DISC_MAP_MODE_POLL 0x01
92#define NCI_DISC_MAP_MODE_LISTEN 0x02 117#define NCI_DISC_MAP_MODE_LISTEN 0x02
93#define NCI_DISC_MAP_MODE_BOTH 0x03
94
95/* NCI Discovery Types */
96#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00
97#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01
98#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02
99#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03
100#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05
101#define NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE 0x06
102#define NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE 0x07
103#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09
104#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80
105#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81
106#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82
107#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83
108#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85
109 118
110/* NCI Deactivation Type */ 119/* NCI Deactivation Type */
111#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00 120#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
112#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01 121#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
113#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02 122#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
114#define NCI_DEACTIVATE_TYPE_RF_LINK_LOSS 0x03 123#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03
115#define NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR 0x04
116 124
117/* Message Type (MT) */ 125/* Message Type (MT) */
118#define NCI_MT_DATA_PKT 0x00 126#define NCI_MT_DATA_PKT 0x00
@@ -144,10 +152,10 @@
144#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f) 152#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f)
145 153
146/* GID values */ 154/* GID values */
147#define NCI_GID_CORE 0x0 155#define NCI_GID_CORE 0x0
148#define NCI_GID_RF_MGMT 0x1 156#define NCI_GID_RF_MGMT 0x1
149#define NCI_GID_NFCEE_MGMT 0x2 157#define NCI_GID_NFCEE_MGMT 0x2
150#define NCI_GID_PROPRIETARY 0xf 158#define NCI_GID_PROPRIETARY 0xf
151 159
152/* ---- NCI Packet structures ---- */ 160/* ---- NCI Packet structures ---- */
153#define NCI_CTRL_HDR_SIZE 3 161#define NCI_CTRL_HDR_SIZE 3
@@ -169,24 +177,17 @@ struct nci_data_hdr {
169/* ----- NCI Commands ---- */ 177/* ----- NCI Commands ---- */
170/* ------------------------ */ 178/* ------------------------ */
171#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00) 179#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00)
172 180struct nci_core_reset_cmd {
173#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01) 181 __u8 reset_type;
174
175#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
176
177#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04)
178struct nci_core_conn_create_cmd {
179 __u8 target_handle;
180 __u8 num_target_specific_params;
181} __packed; 182} __packed;
182 183
183#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x06) 184#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
184 185
185#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) 186#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
186struct disc_map_config { 187struct disc_map_config {
187 __u8 rf_protocol; 188 __u8 rf_protocol;
188 __u8 mode; 189 __u8 mode;
189 __u8 rf_interface_type; 190 __u8 rf_interface;
190} __packed; 191} __packed;
191 192
192struct nci_rf_disc_map_cmd { 193struct nci_rf_disc_map_cmd {
@@ -197,7 +198,7 @@ struct nci_rf_disc_map_cmd {
197 198
198#define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) 199#define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
199struct disc_config { 200struct disc_config {
200 __u8 type; 201 __u8 rf_tech_and_mode;
201 __u8 frequency; 202 __u8 frequency;
202} __packed; 203} __packed;
203 204
@@ -218,6 +219,7 @@ struct nci_rf_deactivate_cmd {
218struct nci_core_reset_rsp { 219struct nci_core_reset_rsp {
219 __u8 status; 220 __u8 status;
220 __u8 nci_ver; 221 __u8 nci_ver;
222 __u8 config_status;
221} __packed; 223} __packed;
222 224
223#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01) 225#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01)
@@ -232,24 +234,12 @@ struct nci_core_init_rsp_1 {
232struct nci_core_init_rsp_2 { 234struct nci_core_init_rsp_2 {
233 __u8 max_logical_connections; 235 __u8 max_logical_connections;
234 __le16 max_routing_table_size; 236 __le16 max_routing_table_size;
235 __u8 max_control_packet_payload_length; 237 __u8 max_ctrl_pkt_payload_len;
236 __le16 rf_sending_buffer_size; 238 __le16 max_size_for_large_params;
237 __le16 rf_receiving_buffer_size; 239 __u8 manufact_id;
238 __le16 manufacturer_id; 240 __le32 manufact_specific_info;
239} __packed; 241} __packed;
240 242
241#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
242
243#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
244struct nci_core_conn_create_rsp {
245 __u8 status;
246 __u8 max_pkt_payload_size;
247 __u8 initial_num_credits;
248 __u8 conn_id;
249} __packed;
250
251#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x06)
252
253#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) 243#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
254 244
255#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) 245#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -259,7 +249,7 @@ struct nci_core_conn_create_rsp {
259/* --------------------------- */ 249/* --------------------------- */
260/* ---- NCI Notifications ---- */ 250/* ---- NCI Notifications ---- */
261/* --------------------------- */ 251/* --------------------------- */
262#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x07) 252#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06)
263struct conn_credit_entry { 253struct conn_credit_entry {
264 __u8 conn_id; 254 __u8 conn_id;
265 __u8 credits; 255 __u8 credits;
@@ -270,12 +260,13 @@ struct nci_core_conn_credit_ntf {
270 struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN]; 260 struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN];
271} __packed; 261} __packed;
272 262
273#define NCI_OP_RF_FIELD_INFO_NTF nci_opcode_pack(NCI_GID_CORE, 0x08) 263#define NCI_OP_CORE_INTF_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x08)
274struct nci_rf_field_info_ntf { 264struct nci_core_intf_error_ntf {
275 __u8 rf_field_status; 265 __u8 status;
266 __u8 conn_id;
276} __packed; 267} __packed;
277 268
278#define NCI_OP_RF_ACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05) 269#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
279struct rf_tech_specific_params_nfca_poll { 270struct rf_tech_specific_params_nfca_poll {
280 __u16 sens_res; 271 __u16 sens_res;
281 __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */ 272 __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
@@ -289,17 +280,22 @@ struct activation_params_nfca_poll_iso_dep {
289 __u8 rats_res[20]; 280 __u8 rats_res[20];
290}; 281};
291 282
292struct nci_rf_activate_ntf { 283struct nci_rf_intf_activated_ntf {
293 __u8 target_handle; 284 __u8 rf_discovery_id;
285 __u8 rf_interface;
294 __u8 rf_protocol; 286 __u8 rf_protocol;
295 __u8 rf_tech_and_mode; 287 __u8 activation_rf_tech_and_mode;
288 __u8 max_data_pkt_payload_size;
289 __u8 initial_num_credits;
296 __u8 rf_tech_specific_params_len; 290 __u8 rf_tech_specific_params_len;
297 291
298 union { 292 union {
299 struct rf_tech_specific_params_nfca_poll nfca_poll; 293 struct rf_tech_specific_params_nfca_poll nfca_poll;
300 } rf_tech_specific_params; 294 } rf_tech_specific_params;
301 295
302 __u8 rf_interface_type; 296 __u8 data_exch_rf_tech_and_mode;
297 __u8 data_exch_tx_bit_rate;
298 __u8 data_exch_rx_bit_rate;
303 __u8 activation_params_len; 299 __u8 activation_params_len;
304 300
305 union { 301 union {
@@ -309,5 +305,9 @@ struct nci_rf_activate_ntf {
309} __packed; 305} __packed;
310 306
311#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06) 307#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
308struct nci_rf_deactivate_ntf {
309 __u8 type;
310 __u8 reason;
311} __packed;
312 312
313#endif /* __NCI_H */ 313#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index b8b4bbd7e0fc..bccd89e9d4c2 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -109,15 +109,14 @@ struct nci_dev {
109 [NCI_MAX_SUPPORTED_RF_INTERFACES]; 109 [NCI_MAX_SUPPORTED_RF_INTERFACES];
110 __u8 max_logical_connections; 110 __u8 max_logical_connections;
111 __u16 max_routing_table_size; 111 __u16 max_routing_table_size;
112 __u8 max_control_packet_payload_length; 112 __u8 max_ctrl_pkt_payload_len;
113 __u16 rf_sending_buffer_size; 113 __u16 max_size_for_large_params;
114 __u16 rf_receiving_buffer_size; 114 __u8 manufact_id;
115 __u16 manufacturer_id; 115 __u32 manufact_specific_info;
116 116
117 /* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */ 117 /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */
118 __u8 max_pkt_payload_size; 118 __u8 max_data_pkt_payload_size;
119 __u8 initial_num_credits; 119 __u8 initial_num_credits;
120 __u8 conn_id;
121 120
122 /* stored during nci_data_exchange */ 121 /* stored during nci_data_exchange */
123 data_exchange_cb_t data_exchange_cb; 122 data_exchange_cb_t data_exchange_cb;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 6a7f602aa841..8696b773a695 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -52,6 +52,9 @@ struct nfc_ops {
52 int (*dev_down)(struct nfc_dev *dev); 52 int (*dev_down)(struct nfc_dev *dev);
53 int (*start_poll)(struct nfc_dev *dev, u32 protocols); 53 int (*start_poll)(struct nfc_dev *dev, u32 protocols);
54 void (*stop_poll)(struct nfc_dev *dev); 54 void (*stop_poll)(struct nfc_dev *dev);
55 int (*dep_link_up)(struct nfc_dev *dev, int target_idx,
56 u8 comm_mode, u8 rf_mode);
57 int (*dep_link_down)(struct nfc_dev *dev);
55 int (*activate_target)(struct nfc_dev *dev, u32 target_idx, 58 int (*activate_target)(struct nfc_dev *dev, u32 target_idx,
56 u32 protocol); 59 u32 protocol);
57 void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx); 60 void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx);
@@ -60,11 +63,17 @@ struct nfc_ops {
60 void *cb_context); 63 void *cb_context);
61}; 64};
62 65
66#define NFC_TARGET_IDX_ANY -1
67#define NFC_MAX_GT_LEN 48
68#define NFC_MAX_NFCID1_LEN 10
69
63struct nfc_target { 70struct nfc_target {
64 u32 idx; 71 u32 idx;
65 u32 supported_protocols; 72 u32 supported_protocols;
66 u16 sens_res; 73 u16 sens_res;
67 u8 sel_res; 74 u8 sel_res;
75 u8 nfcid1_len;
76 u8 nfcid1[NFC_MAX_NFCID1_LEN];
68}; 77};
69 78
70struct nfc_genl_data { 79struct nfc_genl_data {
@@ -83,6 +92,8 @@ struct nfc_dev {
83 bool dev_up; 92 bool dev_up;
84 bool polling; 93 bool polling;
85 bool remote_activated; 94 bool remote_activated;
95 bool dep_link_up;
96 u32 dep_rf_mode;
86 struct nfc_genl_data genl_data; 97 struct nfc_genl_data genl_data;
87 u32 supported_protocols; 98 u32 supported_protocols;
88 99
@@ -157,9 +168,20 @@ static inline const char *nfc_device_name(struct nfc_dev *dev)
157 return dev_name(&dev->dev); 168 return dev_name(&dev->dev);
158} 169}
159 170
160struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp); 171struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
172 unsigned int flags, unsigned int size,
173 unsigned int *err);
174struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
175
176int nfc_set_remote_general_bytes(struct nfc_dev *dev,
177 u8 *gt, u8 gt_len);
178
179u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len);
161 180
162int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, 181int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
163 int ntargets); 182 int ntargets);
164 183
184int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
185 u8 comm_mode, u8 rf_mode);
186
165#endif /* __NET_NFC_H */ 187#endif /* __NET_NFC_H */
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 6f7eb800974a..875f4895b033 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -25,7 +25,7 @@
25#define _PROTOCOL_H 25#define _PROTOCOL_H
26 26
27#include <linux/in6.h> 27#include <linux/in6.h>
28#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 28#if IS_ENABLED(CONFIG_IPV6)
29#include <linux/ipv6.h> 29#include <linux/ipv6.h>
30#endif 30#endif
31 31
@@ -38,7 +38,7 @@ struct net_protocol {
38 void (*err_handler)(struct sk_buff *skb, u32 info); 38 void (*err_handler)(struct sk_buff *skb, u32 info);
39 int (*gso_send_check)(struct sk_buff *skb); 39 int (*gso_send_check)(struct sk_buff *skb);
40 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 40 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
41 u32 features); 41 netdev_features_t features);
42 struct sk_buff **(*gro_receive)(struct sk_buff **head, 42 struct sk_buff **(*gro_receive)(struct sk_buff **head,
43 struct sk_buff *skb); 43 struct sk_buff *skb);
44 int (*gro_complete)(struct sk_buff *skb); 44 int (*gro_complete)(struct sk_buff *skb);
@@ -46,7 +46,7 @@ struct net_protocol {
46 netns_ok:1; 46 netns_ok:1;
47}; 47};
48 48
49#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 49#if IS_ENABLED(CONFIG_IPV6)
50struct inet6_protocol { 50struct inet6_protocol {
51 int (*handler)(struct sk_buff *skb); 51 int (*handler)(struct sk_buff *skb);
52 52
@@ -57,7 +57,7 @@ struct inet6_protocol {
57 57
58 int (*gso_send_check)(struct sk_buff *skb); 58 int (*gso_send_check)(struct sk_buff *skb);
59 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 59 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
60 u32 features); 60 netdev_features_t features);
61 struct sk_buff **(*gro_receive)(struct sk_buff **head, 61 struct sk_buff **(*gro_receive)(struct sk_buff **head,
62 struct sk_buff *skb); 62 struct sk_buff *skb);
63 int (*gro_complete)(struct sk_buff *skb); 63 int (*gro_complete)(struct sk_buff *skb);
@@ -91,7 +91,7 @@ struct inet_protosw {
91 91
92extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS]; 92extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
93 93
94#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 94#if IS_ENABLED(CONFIG_IPV6)
95extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS]; 95extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
96#endif 96#endif
97 97
@@ -100,7 +100,7 @@ extern int inet_del_protocol(const struct net_protocol *prot, unsigned char num)
100extern void inet_register_protosw(struct inet_protosw *p); 100extern void inet_register_protosw(struct inet_protosw *p);
101extern void inet_unregister_protosw(struct inet_protosw *p); 101extern void inet_unregister_protosw(struct inet_protosw *p);
102 102
103#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 103#if IS_ENABLED(CONFIG_IPV6)
104extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num); 104extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
105extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num); 105extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
106extern int inet6_register_protosw(struct inet_protosw *p); 106extern int inet6_register_protosw(struct inet_protosw *p);
diff --git a/include/net/red.h b/include/net/red.h
index b72a3b833936..baab385a4736 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -5,6 +5,7 @@
5#include <net/pkt_sched.h> 5#include <net/pkt_sched.h>
6#include <net/inet_ecn.h> 6#include <net/inet_ecn.h>
7#include <net/dsfield.h> 7#include <net/dsfield.h>
8#include <linux/reciprocal_div.h>
8 9
9/* Random Early Detection (RED) algorithm. 10/* Random Early Detection (RED) algorithm.
10 ======================================= 11 =======================================
@@ -87,6 +88,29 @@
87 etc. 88 etc.
88 */ 89 */
89 90
91/*
92 * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
93 * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
94 *
95 * Every 500 ms:
96 * if (avg > target and max_p <= 0.5)
97 * increase max_p : max_p += alpha;
98 * else if (avg < target and max_p >= 0.01)
99 * decrease max_p : max_p *= beta;
100 *
101 * target :[qth_min + 0.4*(qth_min - qth_max),
102 * qth_min + 0.6*(qth_min - qth_max)].
103 * alpha : min(0.01, max_p / 4)
104 * beta : 0.9
105 * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
106 * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
107 */
108#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
109
110#define MAX_P_MIN (1 * RED_ONE_PERCENT)
111#define MAX_P_MAX (50 * RED_ONE_PERCENT)
112#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
113
90#define RED_STAB_SIZE 256 114#define RED_STAB_SIZE 256
91#define RED_STAB_MASK (RED_STAB_SIZE - 1) 115#define RED_STAB_MASK (RED_STAB_SIZE - 1)
92 116
@@ -101,76 +125,109 @@ struct red_stats {
101 125
102struct red_parms { 126struct red_parms {
103 /* Parameters */ 127 /* Parameters */
104 u32 qth_min; /* Min avg length threshold: A scaled */ 128 u32 qth_min; /* Min avg length threshold: Wlog scaled */
105 u32 qth_max; /* Max avg length threshold: A scaled */ 129 u32 qth_max; /* Max avg length threshold: Wlog scaled */
106 u32 Scell_max; 130 u32 Scell_max;
107 u32 Rmask; /* Cached random mask, see red_rmask */ 131 u32 max_P; /* probability, [0 .. 1.0] 32 scaled */
132 u32 max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */
133 u32 qth_delta; /* max_th - min_th */
134 u32 target_min; /* min_th + 0.4*(max_th - min_th) */
135 u32 target_max; /* min_th + 0.6*(max_th - min_th) */
108 u8 Scell_log; 136 u8 Scell_log;
109 u8 Wlog; /* log(W) */ 137 u8 Wlog; /* log(W) */
110 u8 Plog; /* random number bits */ 138 u8 Plog; /* random number bits */
111 u8 Stab[RED_STAB_SIZE]; 139 u8 Stab[RED_STAB_SIZE];
140};
112 141
142struct red_vars {
113 /* Variables */ 143 /* Variables */
114 int qcount; /* Number of packets since last random 144 int qcount; /* Number of packets since last random
115 number generation */ 145 number generation */
116 u32 qR; /* Cached random number */ 146 u32 qR; /* Cached random number */
117 147
118 unsigned long qavg; /* Average queue length: A scaled */ 148 unsigned long qavg; /* Average queue length: Wlog scaled */
119 ktime_t qidlestart; /* Start of current idle period */ 149 ktime_t qidlestart; /* Start of current idle period */
120}; 150};
121 151
122static inline u32 red_rmask(u8 Plog) 152static inline u32 red_maxp(u8 Plog)
123{ 153{
124 return Plog < 32 ? ((1 << Plog) - 1) : ~0UL; 154 return Plog < 32 ? (~0U >> Plog) : ~0U;
125} 155}
126 156
127static inline void red_set_parms(struct red_parms *p, 157static inline void red_set_vars(struct red_vars *v)
128 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
129 u8 Scell_log, u8 *stab)
130{ 158{
131 /* Reset average queue length, the value is strictly bound 159 /* Reset average queue length, the value is strictly bound
132 * to the parameters below, reseting hurts a bit but leaving 160 * to the parameters below, reseting hurts a bit but leaving
133 * it might result in an unreasonable qavg for a while. --TGR 161 * it might result in an unreasonable qavg for a while. --TGR
134 */ 162 */
135 p->qavg = 0; 163 v->qavg = 0;
164
165 v->qcount = -1;
166}
167
168static inline void red_set_parms(struct red_parms *p,
169 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
170 u8 Scell_log, u8 *stab, u32 max_P)
171{
172 int delta = qth_max - qth_min;
173 u32 max_p_delta;
136 174
137 p->qcount = -1;
138 p->qth_min = qth_min << Wlog; 175 p->qth_min = qth_min << Wlog;
139 p->qth_max = qth_max << Wlog; 176 p->qth_max = qth_max << Wlog;
140 p->Wlog = Wlog; 177 p->Wlog = Wlog;
141 p->Plog = Plog; 178 p->Plog = Plog;
142 p->Rmask = red_rmask(Plog); 179 if (delta < 0)
180 delta = 1;
181 p->qth_delta = delta;
182 if (!max_P) {
183 max_P = red_maxp(Plog);
184 max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
185 }
186 p->max_P = max_P;
187 max_p_delta = max_P / delta;
188 max_p_delta = max(max_p_delta, 1U);
189 p->max_P_reciprocal = reciprocal_value(max_p_delta);
190
191 /* RED Adaptative target :
192 * [min_th + 0.4*(min_th - max_th),
193 * min_th + 0.6*(min_th - max_th)].
194 */
195 delta /= 5;
196 p->target_min = qth_min + 2*delta;
197 p->target_max = qth_min + 3*delta;
198
143 p->Scell_log = Scell_log; 199 p->Scell_log = Scell_log;
144 p->Scell_max = (255 << Scell_log); 200 p->Scell_max = (255 << Scell_log);
145 201
146 memcpy(p->Stab, stab, sizeof(p->Stab)); 202 memcpy(p->Stab, stab, sizeof(p->Stab));
147} 203}
148 204
149static inline int red_is_idling(struct red_parms *p) 205static inline int red_is_idling(const struct red_vars *v)
150{ 206{
151 return p->qidlestart.tv64 != 0; 207 return v->qidlestart.tv64 != 0;
152} 208}
153 209
154static inline void red_start_of_idle_period(struct red_parms *p) 210static inline void red_start_of_idle_period(struct red_vars *v)
155{ 211{
156 p->qidlestart = ktime_get(); 212 v->qidlestart = ktime_get();
157} 213}
158 214
159static inline void red_end_of_idle_period(struct red_parms *p) 215static inline void red_end_of_idle_period(struct red_vars *v)
160{ 216{
161 p->qidlestart.tv64 = 0; 217 v->qidlestart.tv64 = 0;
162} 218}
163 219
164static inline void red_restart(struct red_parms *p) 220static inline void red_restart(struct red_vars *v)
165{ 221{
166 red_end_of_idle_period(p); 222 red_end_of_idle_period(v);
167 p->qavg = 0; 223 v->qavg = 0;
168 p->qcount = -1; 224 v->qcount = -1;
169} 225}
170 226
171static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) 227static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
228 const struct red_vars *v)
172{ 229{
173 s64 delta = ktime_us_delta(ktime_get(), p->qidlestart); 230 s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
174 long us_idle = min_t(s64, delta, p->Scell_max); 231 long us_idle = min_t(s64, delta, p->Scell_max);
175 int shift; 232 int shift;
176 233
@@ -197,7 +254,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
197 shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; 254 shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
198 255
199 if (shift) 256 if (shift)
200 return p->qavg >> shift; 257 return v->qavg >> shift;
201 else { 258 else {
202 /* Approximate initial part of exponent with linear function: 259 /* Approximate initial part of exponent with linear function:
203 * 260 *
@@ -206,16 +263,17 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
206 * Seems, it is the best solution to 263 * Seems, it is the best solution to
207 * problem of too coarse exponent tabulation. 264 * problem of too coarse exponent tabulation.
208 */ 265 */
209 us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; 266 us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
210 267
211 if (us_idle < (p->qavg >> 1)) 268 if (us_idle < (v->qavg >> 1))
212 return p->qavg - us_idle; 269 return v->qavg - us_idle;
213 else 270 else
214 return p->qavg >> 1; 271 return v->qavg >> 1;
215 } 272 }
216} 273}
217 274
218static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p, 275static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
276 const struct red_vars *v,
219 unsigned int backlog) 277 unsigned int backlog)
220{ 278{
221 /* 279 /*
@@ -227,42 +285,46 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
227 * 285 *
228 * --ANK (980924) 286 * --ANK (980924)
229 */ 287 */
230 return p->qavg + (backlog - (p->qavg >> p->Wlog)); 288 return v->qavg + (backlog - (v->qavg >> p->Wlog));
231} 289}
232 290
233static inline unsigned long red_calc_qavg(struct red_parms *p, 291static inline unsigned long red_calc_qavg(const struct red_parms *p,
292 const struct red_vars *v,
234 unsigned int backlog) 293 unsigned int backlog)
235{ 294{
236 if (!red_is_idling(p)) 295 if (!red_is_idling(v))
237 return red_calc_qavg_no_idle_time(p, backlog); 296 return red_calc_qavg_no_idle_time(p, v, backlog);
238 else 297 else
239 return red_calc_qavg_from_idle_time(p); 298 return red_calc_qavg_from_idle_time(p, v);
240} 299}
241 300
242static inline u32 red_random(struct red_parms *p) 301
302static inline u32 red_random(const struct red_parms *p)
243{ 303{
244 return net_random() & p->Rmask; 304 return reciprocal_divide(net_random(), p->max_P_reciprocal);
245} 305}
246 306
247static inline int red_mark_probability(struct red_parms *p, unsigned long qavg) 307static inline int red_mark_probability(const struct red_parms *p,
308 const struct red_vars *v,
309 unsigned long qavg)
248{ 310{
249 /* The formula used below causes questions. 311 /* The formula used below causes questions.
250 312
251 OK. qR is random number in the interval 0..Rmask 313 OK. qR is random number in the interval
314 (0..1/max_P)*(qth_max-qth_min)
252 i.e. 0..(2^Plog). If we used floating point 315 i.e. 0..(2^Plog). If we used floating point
253 arithmetics, it would be: (2^Plog)*rnd_num, 316 arithmetics, it would be: (2^Plog)*rnd_num,
254 where rnd_num is less 1. 317 where rnd_num is less 1.
255 318
256 Taking into account, that qavg have fixed 319 Taking into account, that qavg have fixed
257 point at Wlog, and Plog is related to max_P by 320 point at Wlog, two lines
258 max_P = (qth_max-qth_min)/2^Plog; two lines
259 below have the following floating point equivalent: 321 below have the following floating point equivalent:
260 322
261 max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount 323 max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
262 324
263 Any questions? --ANK (980924) 325 Any questions? --ANK (980924)
264 */ 326 */
265 return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR); 327 return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
266} 328}
267 329
268enum { 330enum {
@@ -271,7 +333,7 @@ enum {
271 RED_ABOVE_MAX_TRESH, 333 RED_ABOVE_MAX_TRESH,
272}; 334};
273 335
274static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg) 336static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
275{ 337{
276 if (qavg < p->qth_min) 338 if (qavg < p->qth_min)
277 return RED_BELOW_MIN_THRESH; 339 return RED_BELOW_MIN_THRESH;
@@ -287,27 +349,29 @@ enum {
287 RED_HARD_MARK, 349 RED_HARD_MARK,
288}; 350};
289 351
290static inline int red_action(struct red_parms *p, unsigned long qavg) 352static inline int red_action(const struct red_parms *p,
353 struct red_vars *v,
354 unsigned long qavg)
291{ 355{
292 switch (red_cmp_thresh(p, qavg)) { 356 switch (red_cmp_thresh(p, qavg)) {
293 case RED_BELOW_MIN_THRESH: 357 case RED_BELOW_MIN_THRESH:
294 p->qcount = -1; 358 v->qcount = -1;
295 return RED_DONT_MARK; 359 return RED_DONT_MARK;
296 360
297 case RED_BETWEEN_TRESH: 361 case RED_BETWEEN_TRESH:
298 if (++p->qcount) { 362 if (++v->qcount) {
299 if (red_mark_probability(p, qavg)) { 363 if (red_mark_probability(p, v, qavg)) {
300 p->qcount = 0; 364 v->qcount = 0;
301 p->qR = red_random(p); 365 v->qR = red_random(p);
302 return RED_PROB_MARK; 366 return RED_PROB_MARK;
303 } 367 }
304 } else 368 } else
305 p->qR = red_random(p); 369 v->qR = red_random(p);
306 370
307 return RED_DONT_MARK; 371 return RED_DONT_MARK;
308 372
309 case RED_ABOVE_MAX_TRESH: 373 case RED_ABOVE_MAX_TRESH:
310 p->qcount = -1; 374 v->qcount = -1;
311 return RED_HARD_MARK; 375 return RED_HARD_MARK;
312 } 376 }
313 377
@@ -315,4 +379,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg)
315 return RED_DONT_MARK; 379 return RED_DONT_MARK;
316} 380}
317 381
382static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
383{
384 unsigned long qavg;
385 u32 max_p_delta;
386
387 qavg = v->qavg;
388 if (red_is_idling(v))
389 qavg = red_calc_qavg_from_idle_time(p, v);
390
391 /* p->qavg is fixed point number with point at Wlog */
392 qavg >>= p->Wlog;
393
394 if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
395 p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
396 else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
397 p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
398
399 max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
400 max_p_delta = max(max_p_delta, 1U);
401 p->max_P_reciprocal = reciprocal_value(max_p_delta);
402}
318#endif 403#endif
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index eb7d3c2d4274..a5f79933e211 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -48,6 +48,10 @@ enum environment_cap {
48 * 99 - built by driver but a specific alpha2 cannot be determined 48 * 99 - built by driver but a specific alpha2 cannot be determined
49 * 98 - result of an intersection between two regulatory domains 49 * 98 - result of an intersection between two regulatory domains
50 * 97 - regulatory domain has not yet been configured 50 * 97 - regulatory domain has not yet been configured
51 * @dfs_region: If CRDA responded with a regulatory domain that requires
52 * DFS master operation on a known DFS region (NL80211_DFS_*),
53 * dfs_region represents that region. Drivers can use this and the
54 * @alpha2 to adjust their device's DFS parameters as required.
51 * @intersect: indicates whether the wireless core should intersect 55 * @intersect: indicates whether the wireless core should intersect
52 * the requested regulatory domain with the presently set regulatory 56 * the requested regulatory domain with the presently set regulatory
53 * domain. 57 * domain.
@@ -67,6 +71,7 @@ struct regulatory_request {
67 int wiphy_idx; 71 int wiphy_idx;
68 enum nl80211_reg_initiator initiator; 72 enum nl80211_reg_initiator initiator;
69 char alpha2[2]; 73 char alpha2[2];
74 u8 dfs_region;
70 bool intersect; 75 bool intersect;
71 bool processed; 76 bool processed;
72 enum environment_cap country_ie_env; 77 enum environment_cap country_ie_env;
@@ -93,6 +98,7 @@ struct ieee80211_reg_rule {
93struct ieee80211_regdomain { 98struct ieee80211_regdomain {
94 u32 n_reg_rules; 99 u32 n_reg_rules;
95 char alpha2[2]; 100 char alpha2[2];
101 u8 dfs_region;
96 struct ieee80211_reg_rule reg_rules[]; 102 struct ieee80211_reg_rule reg_rules[];
97}; 103};
98 104
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 6a72a58cde59..d3685615a8b0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -71,7 +71,7 @@
71#include <linux/jiffies.h> 71#include <linux/jiffies.h>
72#include <linux/idr.h> 72#include <linux/idr.h>
73 73
74#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 74#if IS_ENABLED(CONFIG_IPV6)
75#include <net/ipv6.h> 75#include <net/ipv6.h>
76#include <net/ip6_route.h> 76#include <net/ip6_route.h>
77#endif 77#endif
@@ -383,7 +383,7 @@ static inline void sctp_sysctl_unregister(void) { return; }
383/* Size of Supported Address Parameter for 'x' address types. */ 383/* Size of Supported Address Parameter for 'x' address types. */
384#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16)) 384#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
385 385
386#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 386#if IS_ENABLED(CONFIG_IPV6)
387 387
388void sctp_v6_pf_init(void); 388void sctp_v6_pf_init(void);
389void sctp_v6_pf_exit(void); 389void sctp_v6_pf_exit(void);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a15432da27c3..88949a994538 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -235,7 +235,7 @@ extern struct sctp_globals {
235 235
236 /* Flag to indicate whether computing and verifying checksum 236 /* Flag to indicate whether computing and verifying checksum
237 * is disabled. */ 237 * is disabled. */
238 int checksum_disable; 238 bool checksum_disable;
239 239
240 /* Threshold for rwnd update SACKS. Receive buffer shifted this many 240 /* Threshold for rwnd update SACKS. Receive buffer shifted this many
241 * bits is an indicator of when to send and window update SACK. 241 * bits is an indicator of when to send and window update SACK.
@@ -369,7 +369,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
369 return (struct sock *)sp; 369 return (struct sock *)sp;
370} 370}
371 371
372#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 372#if IS_ENABLED(CONFIG_IPV6)
373struct sctp6_sock { 373struct sctp6_sock {
374 struct sctp_sock sctp; 374 struct sctp_sock sctp;
375 struct ipv6_pinfo inet6; 375 struct ipv6_pinfo inet6;
@@ -1089,6 +1089,7 @@ void sctp_transport_burst_reset(struct sctp_transport *);
1089unsigned long sctp_transport_timeout(struct sctp_transport *); 1089unsigned long sctp_transport_timeout(struct sctp_transport *);
1090void sctp_transport_reset(struct sctp_transport *); 1090void sctp_transport_reset(struct sctp_transport *);
1091void sctp_transport_update_pmtu(struct sctp_transport *, u32); 1091void sctp_transport_update_pmtu(struct sctp_transport *, u32);
1092void sctp_transport_immediate_rtx(struct sctp_transport *);
1092 1093
1093 1094
1094/* This is the structure we use to queue packets as they come into 1095/* This is the structure we use to queue packets as they come into
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8f0f9ac0307f..2f65e1686fc8 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -67,7 +67,7 @@ struct icmp_mib {
67 67
68#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX 68#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
69struct icmpmsg_mib { 69struct icmpmsg_mib {
70 unsigned long mibs[ICMPMSG_MIB_MAX]; 70 atomic_long_t mibs[ICMPMSG_MIB_MAX];
71}; 71};
72 72
73/* ICMP6 (IPv6-ICMP) */ 73/* ICMP6 (IPv6-ICMP) */
@@ -84,7 +84,7 @@ struct icmpv6_mib_device {
84#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX 84#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
85/* per network ns counters */ 85/* per network ns counters */
86struct icmpv6msg_mib { 86struct icmpv6msg_mib {
87 unsigned long mibs[ICMP6MSG_MIB_MAX]; 87 atomic_long_t mibs[ICMP6MSG_MIB_MAX];
88}; 88};
89/* per device counters, (shared on all cpus) */ 89/* per device counters, (shared on all cpus) */
90struct icmpv6msg_mib_device { 90struct icmpv6msg_mib_device {
diff --git a/include/net/sock.h b/include/net/sock.h
index 32e39371fba6..bb972d254dff 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -53,6 +53,8 @@
53#include <linux/security.h> 53#include <linux/security.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/uaccess.h> 55#include <linux/uaccess.h>
56#include <linux/memcontrol.h>
57#include <linux/res_counter.h>
56 58
57#include <linux/filter.h> 59#include <linux/filter.h>
58#include <linux/rculist_nulls.h> 60#include <linux/rculist_nulls.h>
@@ -62,6 +64,22 @@
62#include <net/dst.h> 64#include <net/dst.h>
63#include <net/checksum.h> 65#include <net/checksum.h>
64 66
67struct cgroup;
68struct cgroup_subsys;
69#ifdef CONFIG_NET
70int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
71void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
72#else
73static inline
74int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
75{
76 return 0;
77}
78static inline
79void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
80{
81}
82#endif
65/* 83/*
66 * This structure really needs to be cleaned up. 84 * This structure really needs to be cleaned up.
67 * Most of it is for TCP, and not used by any of 85 * Most of it is for TCP, and not used by any of
@@ -167,6 +185,7 @@ struct sock_common {
167 /* public: */ 185 /* public: */
168}; 186};
169 187
188struct cg_proto;
170/** 189/**
171 * struct sock - network layer representation of sockets 190 * struct sock - network layer representation of sockets
172 * @__sk_common: shared layout with inet_timewait_sock 191 * @__sk_common: shared layout with inet_timewait_sock
@@ -227,6 +246,7 @@ struct sock_common {
227 * @sk_security: used by security modules 246 * @sk_security: used by security modules
228 * @sk_mark: generic packet mark 247 * @sk_mark: generic packet mark
229 * @sk_classid: this socket's cgroup classid 248 * @sk_classid: this socket's cgroup classid
249 * @sk_cgrp: this socket's cgroup-specific proto data
230 * @sk_write_pending: a write to stream socket waits to start 250 * @sk_write_pending: a write to stream socket waits to start
231 * @sk_state_change: callback to indicate change in the state of the sock 251 * @sk_state_change: callback to indicate change in the state of the sock
232 * @sk_data_ready: callback to indicate there is data to be processed 252 * @sk_data_ready: callback to indicate there is data to be processed
@@ -306,8 +326,8 @@ struct sock {
306 kmemcheck_bitfield_end(flags); 326 kmemcheck_bitfield_end(flags);
307 int sk_wmem_queued; 327 int sk_wmem_queued;
308 gfp_t sk_allocation; 328 gfp_t sk_allocation;
309 int sk_route_caps; 329 netdev_features_t sk_route_caps;
310 int sk_route_nocaps; 330 netdev_features_t sk_route_nocaps;
311 int sk_gso_type; 331 int sk_gso_type;
312 unsigned int sk_gso_max_size; 332 unsigned int sk_gso_max_size;
313 int sk_rcvlowat; 333 int sk_rcvlowat;
@@ -320,6 +340,9 @@ struct sock {
320 unsigned short sk_ack_backlog; 340 unsigned short sk_ack_backlog;
321 unsigned short sk_max_ack_backlog; 341 unsigned short sk_max_ack_backlog;
322 __u32 sk_priority; 342 __u32 sk_priority;
343#ifdef CONFIG_CGROUPS
344 __u32 sk_cgrp_prioidx;
345#endif
323 struct pid *sk_peer_pid; 346 struct pid *sk_peer_pid;
324 const struct cred *sk_peer_cred; 347 const struct cred *sk_peer_cred;
325 long sk_rcvtimeo; 348 long sk_rcvtimeo;
@@ -338,6 +361,7 @@ struct sock {
338#endif 361#endif
339 __u32 sk_mark; 362 __u32 sk_mark;
340 u32 sk_classid; 363 u32 sk_classid;
364 struct cg_proto *sk_cgrp;
341 void (*sk_state_change)(struct sock *sk); 365 void (*sk_state_change)(struct sock *sk);
342 void (*sk_data_ready)(struct sock *sk, int bytes); 366 void (*sk_data_ready)(struct sock *sk, int bytes);
343 void (*sk_write_space)(struct sock *sk); 367 void (*sk_write_space)(struct sock *sk);
@@ -563,6 +587,7 @@ enum sock_flags {
563 SOCK_FASYNC, /* fasync() active */ 587 SOCK_FASYNC, /* fasync() active */
564 SOCK_RXQ_OVFL, 588 SOCK_RXQ_OVFL,
565 SOCK_ZEROCOPY, /* buffers from userspace */ 589 SOCK_ZEROCOPY, /* buffers from userspace */
590 SOCK_WIFI_STATUS, /* push wifi status to userspace */
566}; 591};
567 592
568static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 593static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -835,6 +860,37 @@ struct proto {
835#ifdef SOCK_REFCNT_DEBUG 860#ifdef SOCK_REFCNT_DEBUG
836 atomic_t socks; 861 atomic_t socks;
837#endif 862#endif
863#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
864 /*
865 * cgroup specific init/deinit functions. Called once for all
866 * protocols that implement it, from cgroups populate function.
867 * This function has to setup any files the protocol want to
868 * appear in the kmem cgroup filesystem.
869 */
870 int (*init_cgroup)(struct cgroup *cgrp,
871 struct cgroup_subsys *ss);
872 void (*destroy_cgroup)(struct cgroup *cgrp,
873 struct cgroup_subsys *ss);
874 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
875#endif
876};
877
878struct cg_proto {
879 void (*enter_memory_pressure)(struct sock *sk);
880 struct res_counter *memory_allocated; /* Current allocated memory. */
881 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
882 int *memory_pressure;
883 long *sysctl_mem;
884 /*
885 * memcg field is used to find which memcg we belong directly
886 * Each memcg struct can hold more than one cg_proto, so container_of
887 * won't really cut.
888 *
889 * The elegant solution would be having an inverse function to
890 * proto_cgroup in struct proto, but that means polluting the structure
891 * for everybody, instead of just for memcg users.
892 */
893 struct mem_cgroup *memcg;
838}; 894};
839 895
840extern int proto_register(struct proto *prot, int alloc_slab); 896extern int proto_register(struct proto *prot, int alloc_slab);
@@ -853,7 +909,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
853 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 909 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
854} 910}
855 911
856static inline void sk_refcnt_debug_release(const struct sock *sk) 912inline void sk_refcnt_debug_release(const struct sock *sk)
857{ 913{
858 if (atomic_read(&sk->sk_refcnt) != 1) 914 if (atomic_read(&sk->sk_refcnt) != 1)
859 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 915 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
@@ -865,6 +921,208 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
865#define sk_refcnt_debug_release(sk) do { } while (0) 921#define sk_refcnt_debug_release(sk) do { } while (0)
866#endif /* SOCK_REFCNT_DEBUG */ 922#endif /* SOCK_REFCNT_DEBUG */
867 923
924#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
925extern struct jump_label_key memcg_socket_limit_enabled;
926static inline struct cg_proto *parent_cg_proto(struct proto *proto,
927 struct cg_proto *cg_proto)
928{
929 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
930}
931#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
932#else
933#define mem_cgroup_sockets_enabled 0
934static inline struct cg_proto *parent_cg_proto(struct proto *proto,
935 struct cg_proto *cg_proto)
936{
937 return NULL;
938}
939#endif
940
941
942static inline bool sk_has_memory_pressure(const struct sock *sk)
943{
944 return sk->sk_prot->memory_pressure != NULL;
945}
946
947static inline bool sk_under_memory_pressure(const struct sock *sk)
948{
949 if (!sk->sk_prot->memory_pressure)
950 return false;
951
952 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
953 return !!*sk->sk_cgrp->memory_pressure;
954
955 return !!*sk->sk_prot->memory_pressure;
956}
957
958static inline void sk_leave_memory_pressure(struct sock *sk)
959{
960 int *memory_pressure = sk->sk_prot->memory_pressure;
961
962 if (!memory_pressure)
963 return;
964
965 if (*memory_pressure)
966 *memory_pressure = 0;
967
968 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
969 struct cg_proto *cg_proto = sk->sk_cgrp;
970 struct proto *prot = sk->sk_prot;
971
972 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
973 if (*cg_proto->memory_pressure)
974 *cg_proto->memory_pressure = 0;
975 }
976
977}
978
979static inline void sk_enter_memory_pressure(struct sock *sk)
980{
981 if (!sk->sk_prot->enter_memory_pressure)
982 return;
983
984 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
985 struct cg_proto *cg_proto = sk->sk_cgrp;
986 struct proto *prot = sk->sk_prot;
987
988 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
989 cg_proto->enter_memory_pressure(sk);
990 }
991
992 sk->sk_prot->enter_memory_pressure(sk);
993}
994
995static inline long sk_prot_mem_limits(const struct sock *sk, int index)
996{
997 long *prot = sk->sk_prot->sysctl_mem;
998 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
999 prot = sk->sk_cgrp->sysctl_mem;
1000 return prot[index];
1001}
1002
1003static inline void memcg_memory_allocated_add(struct cg_proto *prot,
1004 unsigned long amt,
1005 int *parent_status)
1006{
1007 struct res_counter *fail;
1008 int ret;
1009
1010 ret = res_counter_charge(prot->memory_allocated,
1011 amt << PAGE_SHIFT, &fail);
1012
1013 if (ret < 0)
1014 *parent_status = OVER_LIMIT;
1015}
1016
1017static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
1018 unsigned long amt)
1019{
1020 res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
1021}
1022
1023static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
1024{
1025 u64 ret;
1026 ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
1027 return ret >> PAGE_SHIFT;
1028}
1029
1030static inline long
1031sk_memory_allocated(const struct sock *sk)
1032{
1033 struct proto *prot = sk->sk_prot;
1034 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1035 return memcg_memory_allocated_read(sk->sk_cgrp);
1036
1037 return atomic_long_read(prot->memory_allocated);
1038}
1039
1040static inline long
1041sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1042{
1043 struct proto *prot = sk->sk_prot;
1044
1045 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1046 memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
1047 /* update the root cgroup regardless */
1048 atomic_long_add_return(amt, prot->memory_allocated);
1049 return memcg_memory_allocated_read(sk->sk_cgrp);
1050 }
1051
1052 return atomic_long_add_return(amt, prot->memory_allocated);
1053}
1054
1055static inline void
1056sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
1057{
1058 struct proto *prot = sk->sk_prot;
1059
1060 if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
1061 parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
1062 memcg_memory_allocated_sub(sk->sk_cgrp, amt);
1063
1064 atomic_long_sub(amt, prot->memory_allocated);
1065}
1066
1067static inline void sk_sockets_allocated_dec(struct sock *sk)
1068{
1069 struct proto *prot = sk->sk_prot;
1070
1071 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1072 struct cg_proto *cg_proto = sk->sk_cgrp;
1073
1074 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1075 percpu_counter_dec(cg_proto->sockets_allocated);
1076 }
1077
1078 percpu_counter_dec(prot->sockets_allocated);
1079}
1080
1081static inline void sk_sockets_allocated_inc(struct sock *sk)
1082{
1083 struct proto *prot = sk->sk_prot;
1084
1085 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1086 struct cg_proto *cg_proto = sk->sk_cgrp;
1087
1088 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1089 percpu_counter_inc(cg_proto->sockets_allocated);
1090 }
1091
1092 percpu_counter_inc(prot->sockets_allocated);
1093}
1094
1095static inline int
1096sk_sockets_allocated_read_positive(struct sock *sk)
1097{
1098 struct proto *prot = sk->sk_prot;
1099
1100 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1101 return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
1102
1103 return percpu_counter_sum_positive(prot->sockets_allocated);
1104}
1105
1106static inline int
1107proto_sockets_allocated_sum_positive(struct proto *prot)
1108{
1109 return percpu_counter_sum_positive(prot->sockets_allocated);
1110}
1111
1112static inline long
1113proto_memory_allocated(struct proto *prot)
1114{
1115 return atomic_long_read(prot->memory_allocated);
1116}
1117
1118static inline bool
1119proto_memory_pressure(struct proto *prot)
1120{
1121 if (!prot->memory_pressure)
1122 return false;
1123 return !!*prot->memory_pressure;
1124}
1125
868 1126
869#ifdef CONFIG_PROC_FS 1127#ifdef CONFIG_PROC_FS
870/* Called with local bh disabled */ 1128/* Called with local bh disabled */
@@ -1091,8 +1349,8 @@ extern struct sock *sk_alloc(struct net *net, int family,
1091 struct proto *prot); 1349 struct proto *prot);
1092extern void sk_free(struct sock *sk); 1350extern void sk_free(struct sock *sk);
1093extern void sk_release_kernel(struct sock *sk); 1351extern void sk_release_kernel(struct sock *sk);
1094extern struct sock *sk_clone(const struct sock *sk, 1352extern struct sock *sk_clone_lock(const struct sock *sk,
1095 const gfp_t priority); 1353 const gfp_t priority);
1096 1354
1097extern struct sk_buff *sock_wmalloc(struct sock *sk, 1355extern struct sk_buff *sock_wmalloc(struct sock *sk,
1098 unsigned long size, int force, 1356 unsigned long size, int force,
@@ -1395,7 +1653,7 @@ static inline int sk_can_gso(const struct sock *sk)
1395 1653
1396extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1654extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1397 1655
1398static inline void sk_nocaps_add(struct sock *sk, int flags) 1656static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1399{ 1657{
1400 sk->sk_route_nocaps |= flags; 1658 sk->sk_route_nocaps |= flags;
1401 sk->sk_route_caps &= ~flags; 1659 sk->sk_route_caps &= ~flags;
@@ -1672,7 +1930,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
1672 1930
1673 page = alloc_pages(sk->sk_allocation, 0); 1931 page = alloc_pages(sk->sk_allocation, 0);
1674 if (!page) { 1932 if (!page) {
1675 sk->sk_prot->enter_memory_pressure(sk); 1933 sk_enter_memory_pressure(sk);
1676 sk_stream_moderate_sndbuf(sk); 1934 sk_stream_moderate_sndbuf(sk);
1677 } 1935 }
1678 return page; 1936 return page;
@@ -1716,6 +1974,8 @@ static inline int sock_intr_errno(long timeo)
1716 1974
1717extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 1975extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1718 struct sk_buff *skb); 1976 struct sk_buff *skb);
1977extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
1978 struct sk_buff *skb);
1719 1979
1720static __inline__ void 1980static __inline__ void
1721sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1981sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -1743,6 +2003,9 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1743 __sock_recv_timestamp(msg, sk, skb); 2003 __sock_recv_timestamp(msg, sk, skb);
1744 else 2004 else
1745 sk->sk_stamp = kt; 2005 sk->sk_stamp = kt;
2006
2007 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2008 __sock_recv_wifi_status(msg, sk, skb);
1746} 2009}
1747 2010
1748extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2011extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bb18c4d69aba..0118ea999f67 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -44,6 +44,7 @@
44#include <net/dst.h> 44#include <net/dst.h>
45 45
46#include <linux/seq_file.h> 46#include <linux/seq_file.h>
47#include <linux/memcontrol.h>
47 48
48extern struct inet_hashinfo tcp_hashinfo; 49extern struct inet_hashinfo tcp_hashinfo;
49 50
@@ -229,7 +230,6 @@ extern int sysctl_tcp_fack;
229extern int sysctl_tcp_reordering; 230extern int sysctl_tcp_reordering;
230extern int sysctl_tcp_ecn; 231extern int sysctl_tcp_ecn;
231extern int sysctl_tcp_dsack; 232extern int sysctl_tcp_dsack;
232extern long sysctl_tcp_mem[3];
233extern int sysctl_tcp_wmem[3]; 233extern int sysctl_tcp_wmem[3];
234extern int sysctl_tcp_rmem[3]; 234extern int sysctl_tcp_rmem[3];
235extern int sysctl_tcp_app_win; 235extern int sysctl_tcp_app_win;
@@ -285,7 +285,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
285 } 285 }
286 286
287 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 287 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
288 atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) 288 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
289 return true; 289 return true;
290 return false; 290 return false;
291} 291}
@@ -628,7 +628,7 @@ extern u32 __tcp_select_window(struct sock *sk);
628struct tcp_skb_cb { 628struct tcp_skb_cb {
629 union { 629 union {
630 struct inet_skb_parm h4; 630 struct inet_skb_parm h4;
631#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 631#if IS_ENABLED(CONFIG_IPV6)
632 struct inet6_skb_parm h6; 632 struct inet6_skb_parm h6;
633#endif 633#endif
634 } header; /* For incoming frames */ 634 } header; /* For incoming frames */
@@ -773,12 +773,12 @@ static inline int tcp_is_reno(const struct tcp_sock *tp)
773 773
774static inline int tcp_is_fack(const struct tcp_sock *tp) 774static inline int tcp_is_fack(const struct tcp_sock *tp)
775{ 775{
776 return tp->rx_opt.sack_ok & 2; 776 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
777} 777}
778 778
779static inline void tcp_enable_fack(struct tcp_sock *tp) 779static inline void tcp_enable_fack(struct tcp_sock *tp)
780{ 780{
781 tp->rx_opt.sack_ok |= 2; 781 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
782} 782}
783 783
784static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 784static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
@@ -834,6 +834,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
834extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); 834extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
835extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 835extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
836 836
837/* The maximum number of MSS of available cwnd for which TSO defers
838 * sending if not using sysctl_tcp_tso_win_divisor.
839 */
840static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
841{
842 return 3;
843}
844
837/* Slow start with delack produces 3 packets of burst, so that 845/* Slow start with delack produces 3 packets of burst, so that
838 * it is safe "de facto". This will be the default - same as 846 * it is safe "de facto". This will be the default - same as
839 * the default reordering threshold - but if reordering increases, 847 * the default reordering threshold - but if reordering increases,
@@ -1144,7 +1152,7 @@ struct tcp6_md5sig_key {
1144/* - sock block */ 1152/* - sock block */
1145struct tcp_md5sig_info { 1153struct tcp_md5sig_info {
1146 struct tcp4_md5sig_key *keys4; 1154 struct tcp4_md5sig_key *keys4;
1147#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1155#if IS_ENABLED(CONFIG_IPV6)
1148 struct tcp6_md5sig_key *keys6; 1156 struct tcp6_md5sig_key *keys6;
1149 u32 entries6; 1157 u32 entries6;
1150 u32 alloced6; 1158 u32 alloced6;
@@ -1171,7 +1179,7 @@ struct tcp6_pseudohdr {
1171 1179
1172union tcp_md5sum_block { 1180union tcp_md5sum_block {
1173 struct tcp4_pseudohdr ip4; 1181 struct tcp4_pseudohdr ip4;
1174#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1182#if IS_ENABLED(CONFIG_IPV6)
1175 struct tcp6_pseudohdr ip6; 1183 struct tcp6_pseudohdr ip6;
1176#endif 1184#endif
1177}; 1185};
@@ -1430,7 +1438,8 @@ extern struct request_sock_ops tcp6_request_sock_ops;
1430extern void tcp_v4_destroy_sock(struct sock *sk); 1438extern void tcp_v4_destroy_sock(struct sock *sk);
1431 1439
1432extern int tcp_v4_gso_send_check(struct sk_buff *skb); 1440extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1433extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features); 1441extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1442 netdev_features_t features);
1434extern struct sk_buff **tcp_gro_receive(struct sk_buff **head, 1443extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1435 struct sk_buff *skb); 1444 struct sk_buff *skb);
1436extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, 1445extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
new file mode 100644
index 000000000000..3512082fa909
--- /dev/null
+++ b/include/net/tcp_memcontrol.h
@@ -0,0 +1,19 @@
1#ifndef _TCP_MEMCG_H
2#define _TCP_MEMCG_H
3
4struct tcp_memcontrol {
5 struct cg_proto cg_proto;
6 /* per-cgroup tcp memory pressure knobs */
7 struct res_counter tcp_memory_allocated;
8 struct percpu_counter tcp_sockets_allocated;
9 /* those two are read-mostly, leave them at the end */
10 long tcp_prot_mem[3];
11 int tcp_memory_pressure;
12};
13
14struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
15int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
16void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
17unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
18void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
19#endif /* _TCP_MEMCG_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 3b285f402f48..e39592f682c3 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -41,7 +41,7 @@
41struct udp_skb_cb { 41struct udp_skb_cb {
42 union { 42 union {
43 struct inet_skb_parm h4; 43 struct inet_skb_parm h4;
44#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 44#if IS_ENABLED(CONFIG_IPV6)
45 struct inet6_skb_parm h6; 45 struct inet6_skb_parm h6;
46#endif 46#endif
47 } header; 47 } header;
@@ -194,9 +194,15 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
194extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 194extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
195 __be32 daddr, __be16 dport, 195 __be32 daddr, __be16 dport,
196 int dif); 196 int dif);
197extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
198 __be32 daddr, __be16 dport,
199 int dif, struct udp_table *tbl);
197extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 200extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
198 const struct in6_addr *daddr, __be16 dport, 201 const struct in6_addr *daddr, __be16 dport,
199 int dif); 202 int dif);
203extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
204 const struct in6_addr *daddr, __be16 dport,
205 int dif, struct udp_table *tbl);
200 206
201/* 207/*
202 * SNMP statistics for UDP and UDP-Lite 208 * SNMP statistics for UDP and UDP-Lite
@@ -217,7 +223,7 @@ extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *sadd
217 else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \ 223 else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \
218} while(0) 224} while(0)
219 225
220#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 226#if IS_ENABLED(CONFIG_IPV6)
221#define UDPX_INC_STATS_BH(sk, field) \ 227#define UDPX_INC_STATS_BH(sk, field) \
222 do { \ 228 do { \
223 if ((sk)->sk_family == AF_INET) \ 229 if ((sk)->sk_family == AF_INET) \
@@ -258,5 +264,6 @@ extern void udp4_proc_exit(void);
258extern void udp_init(void); 264extern void udp_init(void);
259 265
260extern int udp4_ufo_send_check(struct sk_buff *skb); 266extern int udp4_ufo_send_check(struct sk_buff *skb);
261extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features); 267extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
268 netdev_features_t features);
262#endif /* _UDP_H */ 269#endif /* _UDP_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b203e14d26b7..89174e29dca9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -827,6 +827,14 @@ static inline bool addr_match(const void *token1, const void *token2,
827 return true; 827 return true;
828} 828}
829 829
830static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
831{
832 /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
833 if (prefixlen == 0)
834 return true;
835 return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
836}
837
830static __inline__ 838static __inline__
831__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) 839__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
832{ 840{
@@ -1209,8 +1217,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
1209 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); 1217 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1210 break; 1218 break;
1211 case AF_INET6: 1219 case AF_INET6:
1212 ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr); 1220 *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
1213 ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr); 1221 *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
1214 break; 1222 break;
1215 } 1223 }
1216} 1224}
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 669fbd62ec25..d2d88bed891b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -241,24 +241,73 @@ TRACE_EVENT(rcu_fqs,
241 241
242/* 242/*
243 * Tracepoint for dyntick-idle entry/exit events. These take a string 243 * Tracepoint for dyntick-idle entry/exit events. These take a string
244 * as argument: "Start" for entering dyntick-idle mode and "End" for 244 * as argument: "Start" for entering dyntick-idle mode, "End" for
245 * leaving it. 245 * leaving it, "--=" for events moving towards idle, and "++=" for events
246 * moving away from idle. "Error on entry: not idle task" and "Error on
247 * exit: not idle task" indicate that a non-idle task is erroneously
248 * toying with the idle loop.
249 *
250 * These events also take a pair of numbers, which indicate the nesting
251 * depth before and after the event of interest. Note that task-related
252 * events use the upper bits of each number, while interrupt-related
253 * events use the lower bits.
246 */ 254 */
247TRACE_EVENT(rcu_dyntick, 255TRACE_EVENT(rcu_dyntick,
248 256
249 TP_PROTO(char *polarity), 257 TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
250 258
251 TP_ARGS(polarity), 259 TP_ARGS(polarity, oldnesting, newnesting),
252 260
253 TP_STRUCT__entry( 261 TP_STRUCT__entry(
254 __field(char *, polarity) 262 __field(char *, polarity)
263 __field(long long, oldnesting)
264 __field(long long, newnesting)
255 ), 265 ),
256 266
257 TP_fast_assign( 267 TP_fast_assign(
258 __entry->polarity = polarity; 268 __entry->polarity = polarity;
269 __entry->oldnesting = oldnesting;
270 __entry->newnesting = newnesting;
271 ),
272
273 TP_printk("%s %llx %llx", __entry->polarity,
274 __entry->oldnesting, __entry->newnesting)
275);
276
277/*
278 * Tracepoint for RCU preparation for idle, the goal being to get RCU
279 * processing done so that the current CPU can shut off its scheduling
280 * clock and enter dyntick-idle mode. One way to accomplish this is
281 * to drain all RCU callbacks from this CPU, and the other is to have
282 * done everything RCU requires for the current grace period. In this
283 * latter case, the CPU will be awakened at the end of the current grace
284 * period in order to process the remainder of its callbacks.
285 *
286 * These tracepoints take a string as argument:
287 *
288 * "No callbacks": Nothing to do, no callbacks on this CPU.
289 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
290 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
291 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
292 * "More callbacks": Still more callbacks, try again to clear them out.
293 * "Callbacks drained": All callbacks processed, off to dyntick idle!
294 * "Timer": Timer fired to cause CPU to continue processing callbacks.
295 */
296TRACE_EVENT(rcu_prep_idle,
297
298 TP_PROTO(char *reason),
299
300 TP_ARGS(reason),
301
302 TP_STRUCT__entry(
303 __field(char *, reason)
304 ),
305
306 TP_fast_assign(
307 __entry->reason = reason;
259 ), 308 ),
260 309
261 TP_printk("%s", __entry->polarity) 310 TP_printk("%s", __entry->reason)
262); 311);
263 312
264/* 313/*
@@ -412,27 +461,71 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
412 461
413/* 462/*
414 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been 463 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
415 * invoked. The first argument is the name of the RCU flavor and 464 * invoked. The first argument is the name of the RCU flavor,
416 * the second argument is number of callbacks actually invoked. 465 * the second argument is number of callbacks actually invoked,
466 * the third argument (cb) is whether or not any of the callbacks that
467 * were ready to invoke at the beginning of this batch are still
468 * queued, the fourth argument (nr) is the return value of need_resched(),
469 * the fifth argument (iit) is 1 if the current task is the idle task,
470 * and the sixth argument (risk) is the return value from
471 * rcu_is_callbacks_kthread().
417 */ 472 */
418TRACE_EVENT(rcu_batch_end, 473TRACE_EVENT(rcu_batch_end,
419 474
420 TP_PROTO(char *rcuname, int callbacks_invoked), 475 TP_PROTO(char *rcuname, int callbacks_invoked,
476 bool cb, bool nr, bool iit, bool risk),
421 477
422 TP_ARGS(rcuname, callbacks_invoked), 478 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
423 479
424 TP_STRUCT__entry( 480 TP_STRUCT__entry(
425 __field(char *, rcuname) 481 __field(char *, rcuname)
426 __field(int, callbacks_invoked) 482 __field(int, callbacks_invoked)
483 __field(bool, cb)
484 __field(bool, nr)
485 __field(bool, iit)
486 __field(bool, risk)
427 ), 487 ),
428 488
429 TP_fast_assign( 489 TP_fast_assign(
430 __entry->rcuname = rcuname; 490 __entry->rcuname = rcuname;
431 __entry->callbacks_invoked = callbacks_invoked; 491 __entry->callbacks_invoked = callbacks_invoked;
492 __entry->cb = cb;
493 __entry->nr = nr;
494 __entry->iit = iit;
495 __entry->risk = risk;
496 ),
497
498 TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
499 __entry->rcuname, __entry->callbacks_invoked,
500 __entry->cb ? 'C' : '.',
501 __entry->nr ? 'S' : '.',
502 __entry->iit ? 'I' : '.',
503 __entry->risk ? 'R' : '.')
504);
505
506/*
507 * Tracepoint for rcutorture readers. The first argument is the name
508 * of the RCU flavor from rcutorture's viewpoint and the second argument
509 * is the callback address.
510 */
511TRACE_EVENT(rcu_torture_read,
512
513 TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
514
515 TP_ARGS(rcutorturename, rhp),
516
517 TP_STRUCT__entry(
518 __field(char *, rcutorturename)
519 __field(struct rcu_head *, rhp)
520 ),
521
522 TP_fast_assign(
523 __entry->rcutorturename = rcutorturename;
524 __entry->rhp = rhp;
432 ), 525 ),
433 526
434 TP_printk("%s CBs-invoked=%d", 527 TP_printk("%s torture read %p",
435 __entry->rcuname, __entry->callbacks_invoked) 528 __entry->rcutorturename, __entry->rhp)
436); 529);
437 530
438#else /* #ifdef CONFIG_RCU_TRACE */ 531#else /* #ifdef CONFIG_RCU_TRACE */
@@ -443,13 +536,16 @@ TRACE_EVENT(rcu_batch_end,
443#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 536#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
444#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) 537#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
445#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 538#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
446#define trace_rcu_dyntick(polarity) do { } while (0) 539#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
540#define trace_rcu_prep_idle(reason) do { } while (0)
447#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) 541#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
448#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) 542#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
449#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) 543#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
450#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) 544#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
451#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) 545#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
452#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0) 546#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
547 do { } while (0)
548#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
453 549
454#endif /* #else #ifdef CONFIG_RCU_TRACE */ 550#endif /* #else #ifdef CONFIG_RCU_TRACE */
455 551
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 959ff18b63b6..6ba596b07a72 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -331,6 +331,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
331 TP_ARGS(tsk, delay)); 331 TP_ARGS(tsk, delay));
332 332
333/* 333/*
334 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
335 */
336DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
337 TP_PROTO(struct task_struct *tsk, u64 delay),
338 TP_ARGS(tsk, delay));
339
340/*
334 * Tracepoint for accounting runtime (time the task is executing 341 * Tracepoint for accounting runtime (time the task is executing
335 * on a CPU). 342 * on a CPU).
336 */ 343 */
@@ -363,6 +370,56 @@ TRACE_EVENT(sched_stat_runtime,
363 (unsigned long long)__entry->vruntime) 370 (unsigned long long)__entry->vruntime)
364); 371);
365 372
373#ifdef CREATE_TRACE_POINTS
374static inline u64 trace_get_sleeptime(struct task_struct *tsk)
375{
376#ifdef CONFIG_SCHEDSTATS
377 u64 block, sleep;
378
379 block = tsk->se.statistics.block_start;
380 sleep = tsk->se.statistics.sleep_start;
381 tsk->se.statistics.block_start = 0;
382 tsk->se.statistics.sleep_start = 0;
383
384 return block ? block : sleep ? sleep : 0;
385#else
386 return 0;
387#endif
388}
389#endif
390
391/*
392 * Tracepoint for accounting sleeptime (time the task is sleeping
393 * or waiting for I/O).
394 */
395TRACE_EVENT(sched_stat_sleeptime,
396
397 TP_PROTO(struct task_struct *tsk, u64 now),
398
399 TP_ARGS(tsk, now),
400
401 TP_STRUCT__entry(
402 __array( char, comm, TASK_COMM_LEN )
403 __field( pid_t, pid )
404 __field( u64, sleeptime )
405 ),
406
407 TP_fast_assign(
408 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
409 __entry->pid = tsk->pid;
410 __entry->sleeptime = trace_get_sleeptime(tsk);
411 __entry->sleeptime = __entry->sleeptime ?
412 now - __entry->sleeptime : 0;
413 )
414 TP_perf_assign(
415 __perf_count(__entry->sleeptime);
416 ),
417
418 TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
419 __entry->comm, __entry->pid,
420 (unsigned long long)__entry->sleeptime)
421);
422
366/* 423/*
367 * Tracepoint for showing priority inheritance modifying a tasks 424 * Tracepoint for showing priority inheritance modifying a tasks
368 * priority. 425 * priority.
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index d29c153705bc..cc2e1a7e44ec 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -29,11 +29,11 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages,
29 bool highmem); 29 bool highmem);
30void free_xenballooned_pages(int nr_pages, struct page **pages); 30void free_xenballooned_pages(int nr_pages, struct page **pages);
31 31
32struct sys_device; 32struct device;
33#ifdef CONFIG_XEN_SELFBALLOONING 33#ifdef CONFIG_XEN_SELFBALLOONING
34extern int register_xen_selfballooning(struct sys_device *sysdev); 34extern int register_xen_selfballooning(struct device *dev);
35#else 35#else
36static inline int register_xen_selfballooning(struct sys_device *sysdev) 36static inline int register_xen_selfballooning(struct device *dev)
37{ 37{
38 return -ENOSYS; 38 return -ENOSYS;
39} 39}