aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/button.h4
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/qspinlock.h4
-rw-r--r--include/asm-generic/word-at-a-time.h80
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_dp_helper.h4
-rw-r--r--include/drm/drm_dp_mst_helper.h4
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/backing-dev.h80
-rw-r--r--include/linux/blk-cgroup.h4
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blkdev.h52
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/cgroup-defs.h27
-rw-r--r--include/linux/clockchips.h29
-rw-r--r--include/linux/cma.h2
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/compiler.h66
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/devfreq.h24
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/iova.h4
-rw-r--r--include/linux/irq.h81
-rw-r--r--include/linux/irqdesc.h41
-rw-r--r--include/linux/irqdomain.h5
-rw-r--r--include/linux/irqhandler.h2
-rw-r--r--include/linux/jump_label.h10
-rw-r--r--include/linux/memcontrol.h9
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mm.h21
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/omap-dma.h2
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/rcupdate.h11
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/xprtsock.h3
-rw-r--r--include/linux/thermal.h8
-rw-r--r--include/linux/tick.h9
-rw-r--r--include/linux/usb/renesas_usbhs.h2
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/net/dst_metadata.h32
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/inet_timewait_sock.h14
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_tunnel.h17
-rw-r--r--include/net/ip_fib.h30
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sock.h8
-rw-r--r--include/rdma/opa_port_info.h4
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/uapi/asm-generic/signal.h2
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/linux/lwtunnel.h4
-rw-r--r--include/uapi/linux/openvswitch.h37
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/linux/userfaultfd.h2
-rw-r--r--include/xen/interface/sched.h8
72 files changed, 573 insertions, 322 deletions
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 97eea0e4c016..1cad8b2d460c 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5 5
6#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) 6#if IS_ENABLED(CONFIG_ACPI_BUTTON)
7extern int acpi_lid_notifier_register(struct notifier_block *nb); 7extern int acpi_lid_notifier_register(struct notifier_block *nb);
8extern int acpi_lid_notifier_unregister(struct notifier_block *nb); 8extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
9extern int acpi_lid_open(void); 9extern int acpi_lid_open(void);
@@ -20,6 +20,6 @@ static inline int acpi_lid_open(void)
20{ 20{
21 return 1; 21 return 1;
22} 22}
23#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ 23#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */
24 24
25#endif /* ACPI_BUTTON_H */ 25#endif /* ACPI_BUTTON_H */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index e840b294c6f5..c62392d9b52a 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -24,7 +24,7 @@ enum acpi_backlight_type {
24 acpi_backlight_native, 24 acpi_backlight_native,
25}; 25};
26 26
27#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) 27#if IS_ENABLED(CONFIG_ACPI_VIDEO)
28extern int acpi_video_register(void); 28extern int acpi_video_register(void);
29extern void acpi_video_unregister(void); 29extern void acpi_video_unregister(void);
30extern int acpi_video_get_edid(struct acpi_device *device, int type, 30extern int acpi_video_get_edid(struct acpi_device *device, int type,
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index f20f407ce45d..4b4b056a6eb0 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -73,7 +73,7 @@
73 * Convert a physical address to a Page Frame Number and back 73 * Convert a physical address to a Page Frame Number and back
74 */ 74 */
75#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) 75#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
76#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 76#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
77 77
78#define page_to_pfn __page_to_pfn 78#define page_to_pfn __page_to_pfn
79#define pfn_to_page __pfn_to_page 79#define pfn_to_page __pfn_to_page
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 83bfb87f5bf1..e2aadbc7151f 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
111 cpu_relax(); 111 cpu_relax();
112} 112}
113 113
114#ifndef virt_queued_spin_lock 114#ifndef virt_spin_lock
115static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) 115static __always_inline bool virt_spin_lock(struct qspinlock *lock)
116{ 116{
117 return false; 117 return false;
118} 118}
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 94f9ea8abcae..011dde083f23 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -1,15 +1,10 @@
1#ifndef _ASM_WORD_AT_A_TIME_H 1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H 2#define _ASM_WORD_AT_A_TIME_H
3 3
4/*
5 * This says "generic", but it's actually big-endian only.
6 * Little-endian can use more efficient versions of these
7 * interfaces, see for example
8 * arch/x86/include/asm/word-at-a-time.h
9 * for those.
10 */
11
12#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/byteorder.h>
6
7#ifdef __BIG_ENDIAN
13 8
14struct word_at_a_time { 9struct word_at_a_time {
15 const unsigned long high_bits, low_bits; 10 const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
53#define zero_bytemask(mask) (~1ul << __fls(mask)) 48#define zero_bytemask(mask) (~1ul << __fls(mask))
54#endif 49#endif
55 50
51#else
52
53/*
54 * The optimal byte mask counting is probably going to be something
55 * that is architecture-specific. If you have a reliably fast
56 * bit count instruction, that might be better than the multiply
57 * and shift, for example.
58 */
59struct word_at_a_time {
60 const unsigned long one_bits, high_bits;
61};
62
63#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
64
65#ifdef CONFIG_64BIT
66
67/*
68 * Jan Achrenius on G+: microoptimized version of
69 * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
70 * that works for the bytemasks without having to
71 * mask them first.
72 */
73static inline long count_masked_bytes(unsigned long mask)
74{
75 return mask*0x0001020304050608ul >> 56;
76}
77
78#else /* 32-bit case */
79
80/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
81static inline long count_masked_bytes(long mask)
82{
83 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
84 long a = (0x0ff0001+mask) >> 23;
85 /* Fix the 1 for 00 case */
86 return a & mask;
87}
88
89#endif
90
91/* Return nonzero if it has a zero */
92static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
93{
94 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
95 *bits = mask;
96 return mask;
97}
98
99static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
100{
101 return bits;
102}
103
104static inline unsigned long create_zero_mask(unsigned long bits)
105{
106 bits = (bits - 1) & ~bits;
107 return bits >> 7;
108}
109
110/* The mask we created is directly usable as a bytemask */
111#define zero_bytemask(mask) (mask)
112
113static inline unsigned long find_zero(unsigned long mask)
114{
115 return count_masked_bytes(mask);
116}
117
118#endif /* __BIG_ENDIAN */
119
56#endif /* _ASM_WORD_AT_A_TIME_H */ 120#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 2a747a91fded..3febb4b9fce9 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
240 240
241extern void drm_kms_helper_poll_disable(struct drm_device *dev); 241extern void drm_kms_helper_poll_disable(struct drm_device *dev);
242extern void drm_kms_helper_poll_enable(struct drm_device *dev); 242extern void drm_kms_helper_poll_enable(struct drm_device *dev);
243extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
243 244
244#endif 245#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 499e9f625aef..0212d139a480 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -568,6 +568,10 @@
568#define MODE_I2C_READ 4 568#define MODE_I2C_READ 4
569#define MODE_I2C_STOP 8 569#define MODE_I2C_STOP 8
570 570
571/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
572#define DP_MST_PHYSICAL_PORT_0 0
573#define DP_MST_LOGICAL_PORT_0 8
574
571#define DP_LINK_STATUS_SIZE 6 575#define DP_LINK_STATUS_SIZE 6
572bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], 576bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
573 int lane_count); 577 int lane_count);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 86d0b25ed054..5340099741ae 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write {
253 u8 *bytes; 253 u8 *bytes;
254}; 254};
255 255
256#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
256struct drm_dp_remote_i2c_read { 257struct drm_dp_remote_i2c_read {
257 u8 num_transactions; 258 u8 num_transactions;
258 u8 port_number; 259 u8 port_number;
@@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read {
262 u8 *bytes; 263 u8 *bytes;
263 u8 no_stop_bit; 264 u8 no_stop_bit;
264 u8 i2c_transaction_delay; 265 u8 i2c_transaction_delay;
265 } transactions[4]; 266 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
266 u8 read_i2c_device_id; 267 u8 read_i2c_device_id;
267 u8 num_bytes_read; 268 u8 num_bytes_read;
268}; 269};
@@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr;
374struct drm_dp_mst_topology_cbs { 375struct drm_dp_mst_topology_cbs {
375 /* create a connector for a port */ 376 /* create a connector for a port */
376 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); 377 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
378 void (*register_connector)(struct drm_connector *connector);
377 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, 379 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
378 struct drm_connector *connector); 380 struct drm_connector *connector);
379 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); 381 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index d901f1a47be6..4e14dac282bb 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -35,11 +35,7 @@
35#define VGIC_V3_MAX_LRS 16 35#define VGIC_V3_MAX_LRS 16
36#define VGIC_MAX_IRQS 1024 36#define VGIC_MAX_IRQS 1024
37#define VGIC_V2_MAX_CPUS 8 37#define VGIC_V2_MAX_CPUS 8
38 38#define VGIC_V3_MAX_CPUS 255
39/* Sanity checks... */
40#if (KVM_MAX_VCPUS > 255)
41#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
42#endif
43 39
44#if (VGIC_NR_IRQS_LEGACY & 31) 40#if (VGIC_NR_IRQS_LEGACY & 31)
45#error "VGIC_NR_IRQS must be a multiple of 32" 41#error "VGIC_NR_IRQS must be a multiple of 32"
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 7235c4851460..43856d19cf4d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -217,6 +217,7 @@ struct pci_dev;
217 217
218int acpi_pci_irq_enable (struct pci_dev *dev); 218int acpi_pci_irq_enable (struct pci_dev *dev);
219void acpi_penalize_isa_irq(int irq, int active); 219void acpi_penalize_isa_irq(int irq, int active);
220bool acpi_isa_irq_available(int irq);
220void acpi_penalize_sci_irq(int irq, int trigger, int polarity); 221void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
221void acpi_pci_irq_disable (struct pci_dev *dev); 222void acpi_pci_irq_disable (struct pci_dev *dev);
222 223
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index a23209b43842..1b4d69f68c33 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,6 +116,8 @@ struct bdi_writeback {
116 struct list_head work_list; 116 struct list_head work_list;
117 struct delayed_work dwork; /* work item used for writeback */ 117 struct delayed_work dwork; /* work item used for writeback */
118 118
119 struct list_head bdi_node; /* anchored at bdi->wb_list */
120
119#ifdef CONFIG_CGROUP_WRITEBACK 121#ifdef CONFIG_CGROUP_WRITEBACK
120 struct percpu_ref refcnt; /* used only for !root wb's */ 122 struct percpu_ref refcnt; /* used only for !root wb's */
121 struct fprop_local_percpu memcg_completions; 123 struct fprop_local_percpu memcg_completions;
@@ -150,6 +152,7 @@ struct backing_dev_info {
150 atomic_long_t tot_write_bandwidth; 152 atomic_long_t tot_write_bandwidth;
151 153
152 struct bdi_writeback wb; /* the root writeback info for this bdi */ 154 struct bdi_writeback wb; /* the root writeback info for this bdi */
155 struct list_head wb_list; /* list of all wbs */
153#ifdef CONFIG_CGROUP_WRITEBACK 156#ifdef CONFIG_CGROUP_WRITEBACK
154 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 157 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
155 struct rb_root cgwb_congested_tree; /* their congested states */ 158 struct rb_root cgwb_congested_tree; /* their congested states */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5a5d79ee256f..c85f74946a8b 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,18 +13,23 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/writeback.h> 15#include <linux/writeback.h>
16#include <linux/memcontrol.h>
16#include <linux/blk-cgroup.h> 17#include <linux/blk-cgroup.h>
17#include <linux/backing-dev-defs.h> 18#include <linux/backing-dev-defs.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
19 20
20int __must_check bdi_init(struct backing_dev_info *bdi); 21int __must_check bdi_init(struct backing_dev_info *bdi);
21void bdi_destroy(struct backing_dev_info *bdi); 22void bdi_exit(struct backing_dev_info *bdi);
22 23
23__printf(3, 4) 24__printf(3, 4)
24int bdi_register(struct backing_dev_info *bdi, struct device *parent, 25int bdi_register(struct backing_dev_info *bdi, struct device *parent,
25 const char *fmt, ...); 26 const char *fmt, ...);
26int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 27int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
28void bdi_unregister(struct backing_dev_info *bdi);
29
27int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 30int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
31void bdi_destroy(struct backing_dev_info *bdi);
32
28void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 33void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
29 bool range_cyclic, enum wb_reason reason); 34 bool range_cyclic, enum wb_reason reason);
30void wb_start_background_writeback(struct bdi_writeback *wb); 35void wb_start_background_writeback(struct bdi_writeback *wb);
@@ -252,13 +257,19 @@ int inode_congested(struct inode *inode, int cong_bits);
252 * @inode: inode of interest 257 * @inode: inode of interest
253 * 258 *
254 * cgroup writeback requires support from both the bdi and filesystem. 259 * cgroup writeback requires support from both the bdi and filesystem.
255 * Test whether @inode has both. 260 * Also, both memcg and iocg have to be on the default hierarchy. Test
261 * whether all conditions are met.
262 *
263 * Note that the test result may change dynamically on the same inode
264 * depending on how memcg and iocg are configured.
256 */ 265 */
257static inline bool inode_cgwb_enabled(struct inode *inode) 266static inline bool inode_cgwb_enabled(struct inode *inode)
258{ 267{
259 struct backing_dev_info *bdi = inode_to_bdi(inode); 268 struct backing_dev_info *bdi = inode_to_bdi(inode);
260 269
261 return bdi_cap_account_dirty(bdi) && 270 return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
271 cgroup_on_dfl(blkcg_root_css->cgroup) &&
272 bdi_cap_account_dirty(bdi) &&
262 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && 273 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
263 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 274 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
264} 275}
@@ -401,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
401 rcu_read_unlock(); 412 rcu_read_unlock();
402} 413}
403 414
404struct wb_iter {
405 int start_memcg_id;
406 struct radix_tree_iter tree_iter;
407 void **slot;
408};
409
410static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
411 struct backing_dev_info *bdi)
412{
413 struct radix_tree_iter *titer = &iter->tree_iter;
414
415 WARN_ON_ONCE(!rcu_read_lock_held());
416
417 if (iter->start_memcg_id >= 0) {
418 iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
419 iter->start_memcg_id = -1;
420 } else {
421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
422 }
423
424 if (!iter->slot)
425 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
426 if (iter->slot)
427 return *iter->slot;
428 return NULL;
429}
430
431static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
432 struct backing_dev_info *bdi,
433 int start_memcg_id)
434{
435 iter->start_memcg_id = start_memcg_id;
436
437 if (start_memcg_id)
438 return __wb_iter_next(iter, bdi);
439 else
440 return &bdi->wb;
441}
442
443/**
444 * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
445 * @wb_cur: cursor struct bdi_writeback pointer
446 * @bdi: bdi to walk wb's of
447 * @iter: pointer to struct wb_iter to be used as iteration buffer
448 * @start_memcg_id: memcg ID to start iteration from
449 *
450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
451 * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
452 * to be used as temp storage during iteration. rcu_read_lock() must be
453 * held throughout iteration.
454 */
455#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
458
459#else /* CONFIG_CGROUP_WRITEBACK */ 415#else /* CONFIG_CGROUP_WRITEBACK */
460 416
461static inline bool inode_cgwb_enabled(struct inode *inode) 417static inline bool inode_cgwb_enabled(struct inode *inode)
@@ -515,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
515{ 471{
516} 472}
517 473
518struct wb_iter {
519 int next_id;
520};
521
522#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
523 for ((iter)->next_id = (start_blkcg_id); \
524 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
525
526static inline int inode_congested(struct inode *inode, int cong_bits) 474static inline int inode_congested(struct inode *inode, int cong_bits)
527{ 475{
528 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 476 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 0a5cc7a1109b..c02e669945e9 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
713 713
714 if (!throtl) { 714 if (!throtl) {
715 blkg = blkg ?: q->root_blkg; 715 blkg = blkg ?: q->root_blkg;
716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, 716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
717 bio->bi_iter.bi_size); 717 bio->bi_iter.bi_size);
718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); 718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
719 } 719 }
720 720
721 rcu_read_unlock(); 721 rcu_read_unlock();
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 37d1602c4f7a..5e7d43ab61c0 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -145,7 +145,6 @@ enum {
145 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 145 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
146 BLK_MQ_F_TAG_SHARED = 1 << 1, 146 BLK_MQ_F_TAG_SHARED = 1 << 1,
147 BLK_MQ_F_SG_MERGE = 1 << 2, 147 BLK_MQ_F_SG_MERGE = 1 << 2,
148 BLK_MQ_F_SYSFS_UP = 1 << 3,
149 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 148 BLK_MQ_F_DEFER_ISSUE = 1 << 4,
150 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 149 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
151 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 150 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
215void blk_mq_cancel_requeue_work(struct request_queue *q); 214void blk_mq_cancel_requeue_work(struct request_queue *q);
216void blk_mq_kick_requeue_list(struct request_queue *q); 215void blk_mq_kick_requeue_list(struct request_queue *q);
217void blk_mq_abort_requeue_list(struct request_queue *q); 216void blk_mq_abort_requeue_list(struct request_queue *q);
218void blk_mq_complete_request(struct request *rq); 217void blk_mq_complete_request(struct request *rq, int error);
219 218
220void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 219void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
221void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 220void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
224void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 223void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
225void blk_mq_run_hw_queues(struct request_queue *q, bool async); 224void blk_mq_run_hw_queues(struct request_queue *q, bool async);
226void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 225void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
227void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
228 void *priv);
229void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, 226void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
230 void *priv); 227 void *priv);
231void blk_mq_freeze_queue(struct request_queue *q); 228void blk_mq_freeze_queue(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 38a5ff772a37..19c2e947d4d1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -456,6 +456,8 @@ struct request_queue {
456 struct blk_mq_tag_set *tag_set; 456 struct blk_mq_tag_set *tag_set;
457 struct list_head tag_set_list; 457 struct list_head tag_set_list;
458 struct bio_set *bio_split; 458 struct bio_set *bio_split;
459
460 bool mq_sysfs_init_done;
459}; 461};
460 462
461#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 463#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -1368,6 +1370,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
1368 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1370 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1369} 1371}
1370 1372
1373static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1374 struct bio *next)
1375{
1376 if (!bio_has_data(prev))
1377 return false;
1378
1379 return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
1380 next->bi_io_vec[0].bv_offset);
1381}
1382
1383static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1384{
1385 return bio_will_gap(req->q, req->biotail, bio);
1386}
1387
1388static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1389{
1390 return bio_will_gap(req->q, bio, req->bio);
1391}
1392
1371struct work_struct; 1393struct work_struct;
1372int kblockd_schedule_work(struct work_struct *work); 1394int kblockd_schedule_work(struct work_struct *work);
1373int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1395int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1494,6 +1516,26 @@ queue_max_integrity_segments(struct request_queue *q)
1494 return q->limits.max_integrity_segments; 1516 return q->limits.max_integrity_segments;
1495} 1517}
1496 1518
1519static inline bool integrity_req_gap_back_merge(struct request *req,
1520 struct bio *next)
1521{
1522 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1523 struct bio_integrity_payload *bip_next = bio_integrity(next);
1524
1525 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1526 bip_next->bip_vec[0].bv_offset);
1527}
1528
1529static inline bool integrity_req_gap_front_merge(struct request *req,
1530 struct bio *bio)
1531{
1532 struct bio_integrity_payload *bip = bio_integrity(bio);
1533 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1534
1535 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1536 bip_next->bip_vec[0].bv_offset);
1537}
1538
1497#else /* CONFIG_BLK_DEV_INTEGRITY */ 1539#else /* CONFIG_BLK_DEV_INTEGRITY */
1498 1540
1499struct bio; 1541struct bio;
@@ -1560,6 +1602,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
1560{ 1602{
1561 return 0; 1603 return 0;
1562} 1604}
1605static inline bool integrity_req_gap_back_merge(struct request *req,
1606 struct bio *next)
1607{
1608 return false;
1609}
1610static inline bool integrity_req_gap_front_merge(struct request *req,
1611 struct bio *bio)
1612{
1613 return false;
1614}
1563 1615
1564#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1616#endif /* CONFIG_BLK_DEV_INTEGRITY */
1565 1617
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 4763ad64e832..f89b31d45cc8 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features)
107 CEPH_FEATURE_OSDMAP_ENC | \ 107 CEPH_FEATURE_OSDMAP_ENC | \
108 CEPH_FEATURE_CRUSH_TUNABLES3 | \ 108 CEPH_FEATURE_CRUSH_TUNABLES3 | \
109 CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ 109 CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
110 CEPH_FEATURE_MSGR_KEEPALIVE2 | \
110 CEPH_FEATURE_CRUSH_V4) 111 CEPH_FEATURE_CRUSH_V4)
111 112
112#define CEPH_FEATURES_REQUIRED_DEFAULT \ 113#define CEPH_FEATURES_REQUIRED_DEFAULT \
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 7e1252e97a30..b2371d9b51fa 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -238,6 +238,8 @@ struct ceph_connection {
238 bool out_kvec_is_msg; /* kvec refers to out_msg */ 238 bool out_kvec_is_msg; /* kvec refers to out_msg */
239 int out_more; /* there is more data after the kvecs */ 239 int out_more; /* there is more data after the kvecs */
240 __le64 out_temp_ack; /* for writing an ack */ 240 __le64 out_temp_ack; /* for writing an ack */
241 struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
242 stamp */
241 243
242 /* message in temps */ 244 /* message in temps */
243 struct ceph_msg_header in_hdr; 245 struct ceph_msg_header in_hdr;
@@ -248,7 +250,7 @@ struct ceph_connection {
248 int in_base_pos; /* bytes read */ 250 int in_base_pos; /* bytes read */
249 __le64 in_temp_ack; /* for reading an ack */ 251 __le64 in_temp_ack; /* for reading an ack */
250 252
251 struct timespec last_keepalive_ack; 253 struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
252 254
253 struct delayed_work work; /* send|recv work */ 255 struct delayed_work work; /* send|recv work */
254 unsigned long delay; /* current delay interval */ 256 unsigned long delay; /* current delay interval */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4d8fcf2187dc..8492721b39be 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -473,31 +473,8 @@ struct cgroup_subsys {
473 unsigned int depends_on; 473 unsigned int depends_on;
474}; 474};
475 475
476extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 476void cgroup_threadgroup_change_begin(struct task_struct *tsk);
477 477void cgroup_threadgroup_change_end(struct task_struct *tsk);
478/**
479 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
480 * @tsk: target task
481 *
482 * Called from threadgroup_change_begin() and allows cgroup operations to
483 * synchronize against threadgroup changes using a percpu_rw_semaphore.
484 */
485static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
486{
487 percpu_down_read(&cgroup_threadgroup_rwsem);
488}
489
490/**
491 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
492 * @tsk: target task
493 *
494 * Called from threadgroup_change_end(). Counterpart of
495 * cgroup_threadcgroup_change_begin().
496 */
497static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
498{
499 percpu_up_read(&cgroup_threadgroup_rwsem);
500}
501 478
502#else /* CONFIG_CGROUPS */ 479#else /* CONFIG_CGROUPS */
503 480
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 31ce435981fe..bdcf358dfce2 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -18,15 +18,6 @@
18struct clock_event_device; 18struct clock_event_device;
19struct module; 19struct module;
20 20
21/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
22enum clock_event_mode {
23 CLOCK_EVT_MODE_UNUSED,
24 CLOCK_EVT_MODE_SHUTDOWN,
25 CLOCK_EVT_MODE_PERIODIC,
26 CLOCK_EVT_MODE_ONESHOT,
27 CLOCK_EVT_MODE_RESUME,
28};
29
30/* 21/*
31 * Possible states of a clock event device. 22 * Possible states of a clock event device.
32 * 23 *
@@ -86,16 +77,14 @@ enum clock_event_state {
86 * @min_delta_ns: minimum delta value in ns 77 * @min_delta_ns: minimum delta value in ns
87 * @mult: nanosecond to cycles multiplier 78 * @mult: nanosecond to cycles multiplier
88 * @shift: nanoseconds to cycles divisor (power of two) 79 * @shift: nanoseconds to cycles divisor (power of two)
89 * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
90 * @state_use_accessors:current state of the device, assigned by the core code 80 * @state_use_accessors:current state of the device, assigned by the core code
91 * @features: features 81 * @features: features
92 * @retries: number of forced programming retries 82 * @retries: number of forced programming retries
93 * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. 83 * @set_state_periodic: switch state to periodic
94 * @set_state_periodic: switch state to periodic, if !set_mode 84 * @set_state_oneshot: switch state to oneshot
95 * @set_state_oneshot: switch state to oneshot, if !set_mode 85 * @set_state_oneshot_stopped: switch state to oneshot_stopped
96 * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode 86 * @set_state_shutdown: switch state to shutdown
97 * @set_state_shutdown: switch state to shutdown, if !set_mode 87 * @tick_resume: resume clkevt device
98 * @tick_resume: resume clkevt device, if !set_mode
99 * @broadcast: function to broadcast events 88 * @broadcast: function to broadcast events
100 * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration 89 * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
101 * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration 90 * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -116,18 +105,10 @@ struct clock_event_device {
116 u64 min_delta_ns; 105 u64 min_delta_ns;
117 u32 mult; 106 u32 mult;
118 u32 shift; 107 u32 shift;
119 enum clock_event_mode mode;
120 enum clock_event_state state_use_accessors; 108 enum clock_event_state state_use_accessors;
121 unsigned int features; 109 unsigned int features;
122 unsigned long retries; 110 unsigned long retries;
123 111
124 /*
125 * State transition callback(s): Only one of the two groups should be
126 * defined:
127 * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
128 * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
129 */
130 void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
131 int (*set_state_periodic)(struct clock_event_device *); 112 int (*set_state_periodic)(struct clock_event_device *);
132 int (*set_state_oneshot)(struct clock_event_device *); 113 int (*set_state_oneshot)(struct clock_event_device *);
133 int (*set_state_oneshot_stopped)(struct clock_event_device *); 114 int (*set_state_oneshot_stopped)(struct clock_event_device *);
diff --git a/include/linux/cma.h b/include/linux/cma.h
index f7ef093ec49a..29f9e774ab76 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
26extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 26extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
27 unsigned int order_per_bit, 27 unsigned int order_per_bit,
28 struct cma **res_cma); 28 struct cma **res_cma);
29extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); 29extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
30extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); 30extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
31#endif 31#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index dfaa7b3e9ae9..8efb40e61d6e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -237,12 +237,25 @@
237#define KASAN_ABI_VERSION 3 237#define KASAN_ABI_VERSION 3
238#endif 238#endif
239 239
240#if GCC_VERSION >= 40902
241/*
242 * Tell the compiler that address safety instrumentation (KASAN)
243 * should not be applied to that function.
244 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
245 */
246#define __no_sanitize_address __attribute__((no_sanitize_address))
247#endif
248
240#endif /* gcc version >= 40000 specific checks */ 249#endif /* gcc version >= 40000 specific checks */
241 250
242#if !defined(__noclone) 251#if !defined(__noclone)
243#define __noclone /* not needed */ 252#define __noclone /* not needed */
244#endif 253#endif
245 254
255#if !defined(__no_sanitize_address)
256#define __no_sanitize_address
257#endif
258
246/* 259/*
247 * A trick to suppress uninitialized variable warning without generating any 260 * A trick to suppress uninitialized variable warning without generating any
248 * code 261 * code
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c836eb2dc44d..3d7810341b57 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
198 198
199#include <uapi/linux/types.h> 199#include <uapi/linux/types.h>
200 200
201static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 201#define __READ_ONCE_SIZE \
202({ \
203 switch (size) { \
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
208 default: \
209 barrier(); \
210 __builtin_memcpy((void *)res, (const void *)p, size); \
211 barrier(); \
212 } \
213})
214
215static __always_inline
216void __read_once_size(const volatile void *p, void *res, int size)
202{ 217{
203 switch (size) { 218 __READ_ONCE_SIZE;
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 219}
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; 220
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; 221#ifdef CONFIG_KASAN
207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; 222/*
208 default: 223 * This function is not 'inline' because __no_sanitize_address confilcts
209 barrier(); 224 * with inlining. Attempt to inline it may cause a build failure.
210 __builtin_memcpy((void *)res, (const void *)p, size); 225 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
211 barrier(); 226 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
212 } 227 */
228static __no_sanitize_address __maybe_unused
229void __read_once_size_nocheck(const volatile void *p, void *res, int size)
230{
231 __READ_ONCE_SIZE;
232}
233#else
234static __always_inline
235void __read_once_size_nocheck(const volatile void *p, void *res, int size)
236{
237 __READ_ONCE_SIZE;
213} 238}
239#endif
214 240
215static __always_inline void __write_once_size(volatile void *p, void *res, int size) 241static __always_inline void __write_once_size(volatile void *p, void *res, int size)
216{ 242{
@@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
248 * required ordering. 274 * required ordering.
249 */ 275 */
250 276
251#define READ_ONCE(x) \ 277#define __READ_ONCE(x, check) \
252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 278({ \
279 union { typeof(x) __val; char __c[1]; } __u; \
280 if (check) \
281 __read_once_size(&(x), __u.__c, sizeof(x)); \
282 else \
283 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
284 __u.__val; \
285})
286#define READ_ONCE(x) __READ_ONCE(x, 1)
287
288/*
289 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
290 * to hide memory access from KASAN.
291 */
292#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
253 293
254#define WRITE_ONCE(x, val) \ 294#define WRITE_ONCE(x, val) \
255({ \ 295({ \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 430efcbea48e..dca22de98d94 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -127,9 +127,14 @@ struct cpufreq_policy {
127#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ 127#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
128 128
129#ifdef CONFIG_CPU_FREQ 129#ifdef CONFIG_CPU_FREQ
130struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
130struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); 131struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
131void cpufreq_cpu_put(struct cpufreq_policy *policy); 132void cpufreq_cpu_put(struct cpufreq_policy *policy);
132#else 133#else
134static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
135{
136 return NULL;
137}
133static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 138static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
134{ 139{
135 return NULL; 140 return NULL;
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index ce447f0f1bad..68030e22af35 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -65,7 +65,10 @@ struct devfreq_dev_status {
65 * The "flags" parameter's possible values are 65 * The "flags" parameter's possible values are
66 * explained above with "DEVFREQ_FLAG_*" macros. 66 * explained above with "DEVFREQ_FLAG_*" macros.
67 * @get_dev_status: The device should provide the current performance 67 * @get_dev_status: The device should provide the current performance
68 * status to devfreq, which is used by governors. 68 * status to devfreq. Governors are recommended not to
69 * use this directly. Instead, governors are recommended
70 * to use devfreq_update_stats() along with
71 * devfreq.last_status.
69 * @get_cur_freq: The device should provide the current frequency 72 * @get_cur_freq: The device should provide the current frequency
70 * at which it is operating. 73 * at which it is operating.
71 * @exit: An optional callback that is called when devfreq 74 * @exit: An optional callback that is called when devfreq
@@ -161,6 +164,7 @@ struct devfreq {
161 struct delayed_work work; 164 struct delayed_work work;
162 165
163 unsigned long previous_freq; 166 unsigned long previous_freq;
167 struct devfreq_dev_status last_status;
164 168
165 void *data; /* private data for governors */ 169 void *data; /* private data for governors */
166 170
@@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
204extern void devm_devfreq_unregister_opp_notifier(struct device *dev, 208extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
205 struct devfreq *devfreq); 209 struct devfreq *devfreq);
206 210
211/**
212 * devfreq_update_stats() - update the last_status pointer in struct devfreq
213 * @df: the devfreq instance whose status needs updating
214 *
215 * Governors are recommended to use this function along with last_status,
216 * which allows other entities to reuse the last_status without affecting
217 * the values fetched later by governors.
218 */
219static inline int devfreq_update_stats(struct devfreq *df)
220{
221 return df->profile->get_dev_status(df->dev.parent, &df->last_status);
222}
223
207#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) 224#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
208/** 225/**
209 * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq 226 * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
@@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
289 struct devfreq *devfreq) 306 struct devfreq *devfreq)
290{ 307{
291} 308}
309
310static inline int devfreq_update_stats(struct devfreq *df)
311{
312 return -EINVAL;
313}
292#endif /* CONFIG_PM_DEVFREQ */ 314#endif /* CONFIG_PM_DEVFREQ */
293 315
294#endif /* __LINUX_DEVFREQ_H__ */ 316#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 569bbd039896..fec734df1524 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
111 return ret; 111 return ret;
112} 112}
113 113
114struct page *dma_alloc_from_contiguous(struct device *dev, int count, 114struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
115 unsigned int order); 115 unsigned int order);
116bool dma_release_from_contiguous(struct device *dev, struct page *pages, 116bool dma_release_from_contiguous(struct device *dev, struct page *pages,
117 int count); 117 int count);
@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
144} 144}
145 145
146static inline 146static inline
147struct page *dma_alloc_from_contiguous(struct device *dev, int count, 147struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
148 unsigned int order) 148 unsigned int order)
149{ 149{
150 return NULL; 150 return NULL;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d0b380ee7d67..e38681f4912d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,6 +25,13 @@
25extern struct files_struct init_files; 25extern struct files_struct init_files;
26extern struct fs_struct init_fs; 26extern struct fs_struct init_fs;
27 27
28#ifdef CONFIG_CGROUPS
29#define INIT_GROUP_RWSEM(sig) \
30 .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
31#else
32#define INIT_GROUP_RWSEM(sig)
33#endif
34
28#ifdef CONFIG_CPUSETS 35#ifdef CONFIG_CPUSETS
29#define INIT_CPUSET_SEQ(tsk) \ 36#define INIT_CPUSET_SEQ(tsk) \
30 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), 37 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -57,6 +64,7 @@ extern struct fs_struct init_fs;
57 INIT_PREV_CPUTIME(sig) \ 64 INIT_PREV_CPUTIME(sig) \
58 .cred_guard_mutex = \ 65 .cred_guard_mutex = \
59 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 66 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
67 INIT_GROUP_RWSEM(sig) \
60} 68}
61 69
62extern struct nsproxy init_nsproxy; 70extern struct nsproxy init_nsproxy;
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 3920a19d8194..92f7177db2ce 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
68 return iova >> iova_shift(iovad); 68 return iova >> iova_shift(iovad);
69} 69}
70 70
71int iommu_iova_cache_init(void); 71int iova_cache_get(void);
72void iommu_iova_cache_destroy(void); 72void iova_cache_put(void);
73 73
74struct iova *alloc_iova_mem(void); 74struct iova *alloc_iova_mem(void);
75void free_iova_mem(struct iova *iova); 75void free_iova_mem(struct iova *iova);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 6f8b34066442..11bf09288ddb 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -110,8 +110,8 @@ enum {
110/* 110/*
111 * Return value for chip->irq_set_affinity() 111 * Return value for chip->irq_set_affinity()
112 * 112 *
113 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity 113 * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
114 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity 114 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
115 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to 115 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
116 * support stacked irqchips, which indicates skipping 116 * support stacked irqchips, which indicates skipping
117 * all descendent irqchips. 117 * all descendent irqchips.
@@ -129,9 +129,19 @@ struct irq_domain;
129 * struct irq_common_data - per irq data shared by all irqchips 129 * struct irq_common_data - per irq data shared by all irqchips
130 * @state_use_accessors: status information for irq chip functions. 130 * @state_use_accessors: status information for irq chip functions.
131 * Use accessor functions to deal with it 131 * Use accessor functions to deal with it
132 * @node: node index useful for balancing
133 * @handler_data: per-IRQ data for the irq_chip methods
134 * @affinity: IRQ affinity on SMP
135 * @msi_desc: MSI descriptor
132 */ 136 */
133struct irq_common_data { 137struct irq_common_data {
134 unsigned int state_use_accessors; 138 unsigned int state_use_accessors;
139#ifdef CONFIG_NUMA
140 unsigned int node;
141#endif
142 void *handler_data;
143 struct msi_desc *msi_desc;
144 cpumask_var_t affinity;
135}; 145};
136 146
137/** 147/**
@@ -139,38 +149,26 @@ struct irq_common_data {
139 * @mask: precomputed bitmask for accessing the chip registers 149 * @mask: precomputed bitmask for accessing the chip registers
140 * @irq: interrupt number 150 * @irq: interrupt number
141 * @hwirq: hardware interrupt number, local to the interrupt domain 151 * @hwirq: hardware interrupt number, local to the interrupt domain
142 * @node: node index useful for balancing
143 * @common: point to data shared by all irqchips 152 * @common: point to data shared by all irqchips
144 * @chip: low level interrupt hardware access 153 * @chip: low level interrupt hardware access
145 * @domain: Interrupt translation domain; responsible for mapping 154 * @domain: Interrupt translation domain; responsible for mapping
146 * between hwirq number and linux irq number. 155 * between hwirq number and linux irq number.
147 * @parent_data: pointer to parent struct irq_data to support hierarchy 156 * @parent_data: pointer to parent struct irq_data to support hierarchy
148 * irq_domain 157 * irq_domain
149 * @handler_data: per-IRQ data for the irq_chip methods
150 * @chip_data: platform-specific per-chip private data for the chip 158 * @chip_data: platform-specific per-chip private data for the chip
151 * methods, to allow shared chip implementations 159 * methods, to allow shared chip implementations
152 * @msi_desc: MSI descriptor
153 * @affinity: IRQ affinity on SMP
154 *
155 * The fields here need to overlay the ones in irq_desc until we
156 * cleaned up the direct references and switched everything over to
157 * irq_data.
158 */ 160 */
159struct irq_data { 161struct irq_data {
160 u32 mask; 162 u32 mask;
161 unsigned int irq; 163 unsigned int irq;
162 unsigned long hwirq; 164 unsigned long hwirq;
163 unsigned int node;
164 struct irq_common_data *common; 165 struct irq_common_data *common;
165 struct irq_chip *chip; 166 struct irq_chip *chip;
166 struct irq_domain *domain; 167 struct irq_domain *domain;
167#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 168#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
168 struct irq_data *parent_data; 169 struct irq_data *parent_data;
169#endif 170#endif
170 void *handler_data;
171 void *chip_data; 171 void *chip_data;
172 struct msi_desc *msi_desc;
173 cpumask_var_t affinity;
174}; 172};
175 173
176/* 174/*
@@ -190,6 +188,7 @@ struct irq_data {
190 * IRQD_IRQ_MASKED - Masked state of the interrupt 188 * IRQD_IRQ_MASKED - Masked state of the interrupt
191 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 189 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
192 * IRQD_WAKEUP_ARMED - Wakeup mode armed 190 * IRQD_WAKEUP_ARMED - Wakeup mode armed
191 * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
193 */ 192 */
194enum { 193enum {
195 IRQD_TRIGGER_MASK = 0xf, 194 IRQD_TRIGGER_MASK = 0xf,
@@ -204,6 +203,7 @@ enum {
204 IRQD_IRQ_MASKED = (1 << 17), 203 IRQD_IRQ_MASKED = (1 << 17),
205 IRQD_IRQ_INPROGRESS = (1 << 18), 204 IRQD_IRQ_INPROGRESS = (1 << 18),
206 IRQD_WAKEUP_ARMED = (1 << 19), 205 IRQD_WAKEUP_ARMED = (1 << 19),
206 IRQD_FORWARDED_TO_VCPU = (1 << 20),
207}; 207};
208 208
209#define __irqd_to_state(d) ((d)->common->state_use_accessors) 209#define __irqd_to_state(d) ((d)->common->state_use_accessors)
@@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
282 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; 282 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
283} 283}
284 284
285static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
286{
287 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
288}
289
290static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
291{
292 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
293}
294
295static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
296{
297 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
298}
285 299
286/* 300/*
287 * Functions for chained handlers which can be enabled/disabled by the 301 * Functions for chained handlers which can be enabled/disabled by the
@@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq)
461 * Built-in IRQ handlers for various IRQ types, 475 * Built-in IRQ handlers for various IRQ types,
462 * callable via desc->handle_irq() 476 * callable via desc->handle_irq()
463 */ 477 */
464extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); 478extern void handle_level_irq(struct irq_desc *desc);
465extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); 479extern void handle_fasteoi_irq(struct irq_desc *desc);
466extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); 480extern void handle_edge_irq(struct irq_desc *desc);
467extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); 481extern void handle_edge_eoi_irq(struct irq_desc *desc);
468extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 482extern void handle_simple_irq(struct irq_desc *desc);
469extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 483extern void handle_percpu_irq(struct irq_desc *desc);
470extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); 484extern void handle_percpu_devid_irq(struct irq_desc *desc);
471extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 485extern void handle_bad_irq(struct irq_desc *desc);
472extern void handle_nested_irq(unsigned int irq); 486extern void handle_nested_irq(unsigned int irq);
473 487
474extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); 488extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
@@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
627static inline void *irq_get_handler_data(unsigned int irq) 641static inline void *irq_get_handler_data(unsigned int irq)
628{ 642{
629 struct irq_data *d = irq_get_irq_data(irq); 643 struct irq_data *d = irq_get_irq_data(irq);
630 return d ? d->handler_data : NULL; 644 return d ? d->common->handler_data : NULL;
631} 645}
632 646
633static inline void *irq_data_get_irq_handler_data(struct irq_data *d) 647static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
634{ 648{
635 return d->handler_data; 649 return d->common->handler_data;
636} 650}
637 651
638static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) 652static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
639{ 653{
640 struct irq_data *d = irq_get_irq_data(irq); 654 struct irq_data *d = irq_get_irq_data(irq);
641 return d ? d->msi_desc : NULL; 655 return d ? d->common->msi_desc : NULL;
642} 656}
643 657
644static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) 658static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
645{ 659{
646 return d->msi_desc; 660 return d->common->msi_desc;
647} 661}
648 662
649static inline u32 irq_get_trigger_type(unsigned int irq) 663static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
652 return d ? irqd_get_trigger_type(d) : 0; 666 return d ? irqd_get_trigger_type(d) : 0;
653} 667}
654 668
655static inline int irq_data_get_node(struct irq_data *d) 669static inline int irq_common_data_get_node(struct irq_common_data *d)
656{ 670{
671#ifdef CONFIG_NUMA
657 return d->node; 672 return d->node;
673#else
674 return 0;
675#endif
676}
677
678static inline int irq_data_get_node(struct irq_data *d)
679{
680 return irq_common_data_get_node(d->common);
658} 681}
659 682
660static inline struct cpumask *irq_get_affinity_mask(int irq) 683static inline struct cpumask *irq_get_affinity_mask(int irq)
661{ 684{
662 struct irq_data *d = irq_get_irq_data(irq); 685 struct irq_data *d = irq_get_irq_data(irq);
663 686
664 return d ? d->affinity : NULL; 687 return d ? d->common->affinity : NULL;
665} 688}
666 689
667static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) 690static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
668{ 691{
669 return d->affinity; 692 return d->common->affinity;
670} 693}
671 694
672unsigned int arch_dynirq_lower_bound(unsigned int from); 695unsigned int arch_dynirq_lower_bound(unsigned int from);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 5acfa26602e1..a587a33363c7 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
98 98
99static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) 99static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
100{ 100{
101#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 101 return container_of(data->common, struct irq_desc, irq_common_data);
102 return irq_to_desc(data->irq);
103#else
104 return container_of(data, struct irq_desc, irq_data);
105#endif
106} 102}
107 103
108static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) 104static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
@@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
127 123
128static inline void *irq_desc_get_handler_data(struct irq_desc *desc) 124static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
129{ 125{
130 return desc->irq_data.handler_data; 126 return desc->irq_common_data.handler_data;
131} 127}
132 128
133static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) 129static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
134{ 130{
135 return desc->irq_data.msi_desc; 131 return desc->irq_common_data.msi_desc;
136} 132}
137 133
138/* 134/*
139 * Architectures call this to let the generic IRQ layer 135 * Architectures call this to let the generic IRQ layer
140 * handle an interrupt. If the descriptor is attached to an 136 * handle an interrupt.
141 * irqchip-style controller then we call the ->handle_irq() handler,
142 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
143 */ 137 */
144static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) 138static inline void generic_handle_irq_desc(struct irq_desc *desc)
145{ 139{
146 desc->handle_irq(irq, desc); 140 desc->handle_irq(desc);
147} 141}
148 142
149int generic_handle_irq(unsigned int irq); 143int generic_handle_irq(unsigned int irq);
@@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq)
176 return irq_desc_has_action(irq_to_desc(irq)); 170 return irq_desc_has_action(irq_to_desc(irq));
177} 171}
178 172
179/* caller has locked the irq_desc and both params are valid */
180static inline void __irq_set_handler_locked(unsigned int irq,
181 irq_flow_handler_t handler)
182{
183 struct irq_desc *desc;
184
185 desc = irq_to_desc(irq);
186 desc->handle_irq = handler;
187}
188
189/* caller has locked the irq_desc and both params are valid */
190static inline void
191__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
192 irq_flow_handler_t handler, const char *name)
193{
194 struct irq_desc *desc;
195
196 desc = irq_to_desc(irq);
197 irq_desc_get_irq_data(desc)->chip = chip;
198 desc->handle_irq = handler;
199 desc->name = name;
200}
201
202/** 173/**
203 * irq_set_handler_locked - Set irq handler from a locked region 174 * irq_set_handler_locked - Set irq handler from a locked region
204 * @data: Pointer to the irq_data structure which identifies the irq 175 * @data: Pointer to the irq_data structure which identifies the irq
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index d3ca79236fb0..f644fdb06dd6 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -161,6 +161,11 @@ enum {
161 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), 161 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
162}; 162};
163 163
164static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
165{
166 return d->of_node;
167}
168
164#ifdef CONFIG_IRQ_DOMAIN 169#ifdef CONFIG_IRQ_DOMAIN
165struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 170struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
166 irq_hw_number_t hwirq_max, int direct_max, 171 irq_hw_number_t hwirq_max, int direct_max,
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 62d543004197..661bed0ed1f3 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
8 8
9struct irq_desc; 9struct irq_desc;
10struct irq_data; 10struct irq_data;
11typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); 11typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
12typedef void (*irq_preflow_handler_t)(struct irq_data *data); 12typedef void (*irq_preflow_handler_t)(struct irq_data *data);
13 13
14#endif 14#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 7f653e8f6690..f1094238ab2a 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -21,8 +21,8 @@
21 * 21 *
22 * DEFINE_STATIC_KEY_TRUE(key); 22 * DEFINE_STATIC_KEY_TRUE(key);
23 * DEFINE_STATIC_KEY_FALSE(key); 23 * DEFINE_STATIC_KEY_FALSE(key);
24 * static_key_likely() 24 * static_branch_likely()
25 * statick_key_unlikely() 25 * static_branch_unlikely()
26 * 26 *
27 * Jump labels provide an interface to generate dynamic branches using 27 * Jump labels provide an interface to generate dynamic branches using
28 * self-modifying code. Assuming toolchain and architecture support, if we 28 * self-modifying code. Assuming toolchain and architecture support, if we
@@ -45,12 +45,10 @@
45 * statement, setting the key to true requires us to patch in a jump 45 * statement, setting the key to true requires us to patch in a jump
46 * to the out-of-line of true branch. 46 * to the out-of-line of true branch.
47 * 47 *
48 * In addtion to static_branch_{enable,disable}, we can also reference count 48 * In addition to static_branch_{enable,disable}, we can also reference count
49 * the key or branch direction via static_branch_{inc,dec}. Thus, 49 * the key or branch direction via static_branch_{inc,dec}. Thus,
50 * static_branch_inc() can be thought of as a 'make more true' and 50 * static_branch_inc() can be thought of as a 'make more true' and
51 * static_branch_dec() as a 'make more false'. The inc()/dec() 51 * static_branch_dec() as a 'make more false'.
52 * interface is meant to be used exclusively from the inc()/dec() for a given
53 * key.
54 * 52 *
55 * Since this relies on modifying code, the branch modifying functions 53 * Since this relies on modifying code, the branch modifying functions
56 * must be considered absolute slow paths (machine wide synchronization etc.). 54 * must be considered absolute slow paths (machine wide synchronization etc.).
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad800e62cb7a..3e3318ddfc0e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -242,7 +242,6 @@ struct mem_cgroup {
242 * percpu counter. 242 * percpu counter.
243 */ 243 */
244 struct mem_cgroup_stat_cpu __percpu *stat; 244 struct mem_cgroup_stat_cpu __percpu *stat;
245 spinlock_t pcp_counter_lock;
246 245
247#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 246#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
248 struct cg_proto tcp_mem; 247 struct cg_proto tcp_mem;
@@ -677,8 +676,9 @@ enum {
677 676
678struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); 677struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
679struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 678struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
680void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 679void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
681 unsigned long *pdirty, unsigned long *pwriteback); 680 unsigned long *pheadroom, unsigned long *pdirty,
681 unsigned long *pwriteback);
682 682
683#else /* CONFIG_CGROUP_WRITEBACK */ 683#else /* CONFIG_CGROUP_WRITEBACK */
684 684
@@ -688,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
688} 688}
689 689
690static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 690static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
691 unsigned long *pavail, 691 unsigned long *pfilepages,
692 unsigned long *pheadroom,
692 unsigned long *pdirty, 693 unsigned long *pdirty,
693 unsigned long *pwriteback) 694 unsigned long *pwriteback)
694{ 695{
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8eb3b19af2a4..250b1ff8b48d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out {
402 u8 rsvd[8]; 402 u8 rsvd[8];
403}; 403};
404 404
405struct mlx5_cmd_query_special_contexts_mbox_in {
406 struct mlx5_inbox_hdr hdr;
407 u8 rsvd[8];
408};
409
410struct mlx5_cmd_query_special_contexts_mbox_out {
411 struct mlx5_outbox_hdr hdr;
412 __be32 dump_fill_mkey;
413 __be32 resd_lkey;
414};
415
416struct mlx5_cmd_layout { 405struct mlx5_cmd_layout {
417 u8 type; 406 u8 type;
418 u8 rsvd0[3]; 407 u8 rsvd0[3];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 27b53f9a24ad..8b6d6f2154a4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
845int mlx5_register_interface(struct mlx5_interface *intf); 845int mlx5_register_interface(struct mlx5_interface *intf);
846void mlx5_unregister_interface(struct mlx5_interface *intf); 846void mlx5_unregister_interface(struct mlx5_interface *intf);
847int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 847int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
848int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
849 848
850struct mlx5_profile { 849struct mlx5_profile {
851 u64 mask; 850 u64 mask;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 91c08f6f0dc9..80001de019ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
905#endif 905#endif
906} 906}
907 907
908#ifdef CONFIG_MEMCG
909static inline struct mem_cgroup *page_memcg(struct page *page)
910{
911 return page->mem_cgroup;
912}
913
914static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
915{
916 page->mem_cgroup = memcg;
917}
918#else
919static inline struct mem_cgroup *page_memcg(struct page *page)
920{
921 return NULL;
922}
923
924static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
925{
926}
927#endif
928
908/* 929/*
909 * Some inline functions in vmstat.h depend on page_zone() 930 * Some inline functions in vmstat.h depend on page_zone()
910 */ 931 */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 88a00694eda5..210d11a75e4f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n)
507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
508 smp_mb__before_atomic(); 508 smp_mb__before_atomic();
509 clear_bit(NAPI_STATE_SCHED, &n->state); 509 clear_bit(NAPI_STATE_SCHED, &n->state);
510 clear_bit(NAPI_STATE_NPSVC, &n->state);
510} 511}
511 512
512#ifdef CONFIG_SMP 513#ifdef CONFIG_SMP
@@ -1053,6 +1054,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1053 * This function is used to pass protocol port error state information 1054 * This function is used to pass protocol port error state information
1054 * to the switch driver. The switch driver can react to the proto_down 1055 * to the switch driver. The switch driver can react to the proto_down
1055 * by doing a phys down on the associated switch port. 1056 * by doing a phys down on the associated switch port.
1057 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1058 * This function is used to get egress tunnel information for given skb.
1059 * This is useful for retrieving outer tunnel header parameters while
1060 * sampling packet.
1056 * 1061 *
1057 */ 1062 */
1058struct net_device_ops { 1063struct net_device_ops {
@@ -1226,6 +1231,8 @@ struct net_device_ops {
1226 int (*ndo_get_iflink)(const struct net_device *dev); 1231 int (*ndo_get_iflink)(const struct net_device *dev);
1227 int (*ndo_change_proto_down)(struct net_device *dev, 1232 int (*ndo_change_proto_down)(struct net_device *dev,
1228 bool proto_down); 1233 bool proto_down);
1234 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1235 struct sk_buff *skb);
1229}; 1236};
1230 1237
1231/** 1238/**
@@ -2202,6 +2209,7 @@ void dev_add_offload(struct packet_offload *po);
2202void dev_remove_offload(struct packet_offload *po); 2209void dev_remove_offload(struct packet_offload *po);
2203 2210
2204int dev_get_iflink(const struct net_device *dev); 2211int dev_get_iflink(const struct net_device *dev);
2212int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2205struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 2213struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2206 unsigned short mask); 2214 unsigned short mask);
2207struct net_device *dev_get_by_name(struct net *net, const char *name); 2215struct net_device *dev_get_by_name(struct net *net, const char *name);
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index e5a70132a240..88fa8af2b937 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -17,7 +17,7 @@
17 17
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19 19
20#define INT_DMA_LCD 25 20#define INT_DMA_LCD (NR_IRQS_LEGACY + 25)
21 21
22#define OMAP1_DMA_TOUT_IRQ (1 << 0) 22#define OMAP1_DMA_TOUT_IRQ (1 << 0)
23#define OMAP_DMA_DROP_IRQ (1 << 1) 23#define OMAP_DMA_DROP_IRQ (1 << 1)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 962387a192f1..4a4e3a092337 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -19,6 +19,7 @@
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/ethtool.h> 20#include <linux/ethtool.h>
21#include <linux/mii.h> 21#include <linux/mii.h>
22#include <linux/module.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/workqueue.h> 24#include <linux/workqueue.h>
24#include <linux/mod_devicetable.h> 25#include <linux/mod_devicetable.h>
@@ -153,6 +154,7 @@ struct sk_buff;
153 * PHYs should register using this structure 154 * PHYs should register using this structure
154 */ 155 */
155struct mii_bus { 156struct mii_bus {
157 struct module *owner;
156 const char *name; 158 const char *name;
157 char id[MII_BUS_ID_SIZE]; 159 char id[MII_BUS_ID_SIZE];
158 void *priv; 160 void *priv;
@@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void)
198 return mdiobus_alloc_size(0); 200 return mdiobus_alloc_size(0);
199} 201}
200 202
201int mdiobus_register(struct mii_bus *bus); 203int __mdiobus_register(struct mii_bus *bus, struct module *owner);
204#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
202void mdiobus_unregister(struct mii_bus *bus); 205void mdiobus_unregister(struct mii_bus *bus);
203void mdiobus_free(struct mii_bus *bus); 206void mdiobus_free(struct mii_bus *bus);
204struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); 207struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
742 struct phy_c45_device_ids *c45_ids); 745 struct phy_c45_device_ids *c45_ids);
743struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); 746struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
744int phy_device_register(struct phy_device *phy); 747int phy_device_register(struct phy_device *phy);
748void phy_device_remove(struct phy_device *phydev);
745int phy_init_hw(struct phy_device *phydev); 749int phy_init_hw(struct phy_device *phydev);
746int phy_suspend(struct phy_device *phydev); 750int phy_suspend(struct phy_device *phydev);
747int phy_resume(struct phy_device *phydev); 751int phy_resume(struct phy_device *phydev);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ff476515f716..581abf848566 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
230 struct rcu_synchronize *rs_array); 230 struct rcu_synchronize *rs_array);
231 231
232#define _wait_rcu_gp(checktiny, ...) \ 232#define _wait_rcu_gp(checktiny, ...) \
233do { \ 233do { \
234 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ 234 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
235 const int __n = ARRAY_SIZE(__crcu_array); \ 235 struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
236 struct rcu_synchronize __rs_array[__n]; \ 236 __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
237 \ 237 __crcu_array, __rs_array); \
238 __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
239} while (0) 238} while (0)
240 239
241#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) 240#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 45932228cbf5..9c2903e58adb 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -245,6 +245,7 @@ enum regulator_type {
245 * @linear_min_sel: Minimal selector for starting linear mapping 245 * @linear_min_sel: Minimal selector for starting linear mapping
246 * @fixed_uV: Fixed voltage of rails. 246 * @fixed_uV: Fixed voltage of rails.
247 * @ramp_delay: Time to settle down after voltage change (unit: uV/us) 247 * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
248 * @min_dropout_uV: The minimum dropout voltage this regulator can handle
248 * @linear_ranges: A constant table of possible voltage ranges. 249 * @linear_ranges: A constant table of possible voltage ranges.
249 * @n_linear_ranges: Number of entries in the @linear_ranges table. 250 * @n_linear_ranges: Number of entries in the @linear_ranges table.
250 * @volt_table: Voltage mapping table (if table based mapping) 251 * @volt_table: Voltage mapping table (if table based mapping)
@@ -292,6 +293,7 @@ struct regulator_desc {
292 unsigned int linear_min_sel; 293 unsigned int linear_min_sel;
293 int fixed_uV; 294 int fixed_uV;
294 unsigned int ramp_delay; 295 unsigned int ramp_delay;
296 int min_dropout_uV;
295 297
296 const struct regulator_linear_range *linear_ranges; 298 const struct regulator_linear_range *linear_ranges;
297 int n_linear_ranges; 299 int n_linear_ranges;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a4ab9daa387c..b7b9501b41af 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -762,6 +762,18 @@ struct signal_struct {
762 unsigned audit_tty_log_passwd; 762 unsigned audit_tty_log_passwd;
763 struct tty_audit_buf *tty_audit_buf; 763 struct tty_audit_buf *tty_audit_buf;
764#endif 764#endif
765#ifdef CONFIG_CGROUPS
766 /*
767 * group_rwsem prevents new tasks from entering the threadgroup and
768 * member tasks from exiting,a more specifically, setting of
769 * PF_EXITING. fork and exit paths are protected with this rwsem
770 * using threadgroup_change_begin/end(). Users which require
771 * threadgroup to remain stable should use threadgroup_[un]lock()
772 * which also takes care of exec path. Currently, cgroup is the
773 * only user.
774 */
775 struct rw_semaphore group_rwsem;
776#endif
765 777
766 oom_flags_t oom_flags; 778 oom_flags_t oom_flags;
767 short oom_score_adj; /* OOM kill score adjustment */ 779 short oom_score_adj; /* OOM kill score adjustment */
diff --git a/include/linux/security.h b/include/linux/security.h
index 79d85ddf8093..2f4c1f7aa7db 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
946 unsigned long arg4, 946 unsigned long arg4,
947 unsigned long arg5) 947 unsigned long arg5)
948{ 948{
949 return cap_task_prctl(option, arg2, arg3, arg3, arg5); 949 return cap_task_prctl(option, arg2, arg3, arg4, arg5);
950} 950}
951 951
952static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) 952static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2738d355cdf9..4398411236f1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -179,6 +179,9 @@ struct nf_bridge_info {
179 u8 bridged_dnat:1; 179 u8 bridged_dnat:1;
180 __u16 frag_max_size; 180 __u16 frag_max_size;
181 struct net_device *physindev; 181 struct net_device *physindev;
182
183 /* always valid & non-NULL from FORWARD on, for physdev match */
184 struct net_device *physoutdev;
182 union { 185 union {
183 /* prerouting: detect dnat in orig/reply direction */ 186 /* prerouting: detect dnat in orig/reply direction */
184 __be32 ipv4_daddr; 187 __be32 ipv4_daddr;
@@ -189,9 +192,6 @@ struct nf_bridge_info {
189 * skb is out in neigh layer. 192 * skb is out in neigh layer.
190 */ 193 */
191 char neigh_header[8]; 194 char neigh_header[8];
192
193 /* always valid & non-NULL from FORWARD on, for physdev match */
194 struct net_device *physoutdev;
195 }; 195 };
196}; 196};
197#endif 197#endif
@@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
2707{ 2707{
2708 if (skb->ip_summed == CHECKSUM_COMPLETE) 2708 if (skb->ip_summed == CHECKSUM_COMPLETE)
2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2710 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2711 skb_checksum_start_offset(skb) < 0)
2712 skb->ip_summed = CHECKSUM_NONE;
2710} 2713}
2711 2714
2712unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2715unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 269e8afd3e2a..6b00f18f5e6b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type;
34 34
35/** 35/**
36 * struct spi_statistics - statistics for spi transfers 36 * struct spi_statistics - statistics for spi transfers
37 * @clock: lock protecting this structure 37 * @lock: lock protecting this structure
38 * 38 *
39 * @messages: number of spi-messages handled 39 * @messages: number of spi-messages handled
40 * @transfers: number of spi_transfers handled 40 * @transfers: number of spi_transfers handled
diff --git a/include/linux/string.h b/include/linux/string.h
index a8d90db9c4b0..9ef7795e65e4 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
25#ifndef __HAVE_ARCH_STRLCPY 25#ifndef __HAVE_ARCH_STRLCPY
26size_t strlcpy(char *, const char *, size_t); 26size_t strlcpy(char *, const char *, size_t);
27#endif 27#endif
28#ifndef __HAVE_ARCH_STRSCPY
29ssize_t __must_check strscpy(char *, const char *, size_t);
30#endif
28#ifndef __HAVE_ARCH_STRCAT 31#ifndef __HAVE_ARCH_STRCAT
29extern char * strcat(char *, const char *); 32extern char * strcat(char *, const char *);
30#endif 33#endif
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 7591788e9fbf..357e44c1a46b 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -42,6 +42,7 @@ struct sock_xprt {
42 /* 42 /*
43 * Connection of transports 43 * Connection of transports
44 */ 44 */
45 unsigned long sock_state;
45 struct delayed_work connect_worker; 46 struct delayed_work connect_worker;
46 struct sockaddr_storage srcaddr; 47 struct sockaddr_storage srcaddr;
47 unsigned short srcport; 48 unsigned short srcport;
@@ -76,6 +77,8 @@ struct sock_xprt {
76 */ 77 */
77#define TCP_RPC_REPLY (1UL << 6) 78#define TCP_RPC_REPLY (1UL << 6)
78 79
80#define XPRT_SOCK_CONNECTING 1U
81
79#endif /* __KERNEL__ */ 82#endif /* __KERNEL__ */
80 83
81#endif /* _LINUX_SUNRPC_XPRTSOCK_H */ 84#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 17292fee8686..157d366e761b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -360,7 +360,7 @@ static inline struct thermal_zone_device *
360thermal_zone_of_sensor_register(struct device *dev, int id, void *data, 360thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
361 const struct thermal_zone_of_device_ops *ops) 361 const struct thermal_zone_of_device_ops *ops)
362{ 362{
363 return NULL; 363 return ERR_PTR(-ENODEV);
364} 364}
365 365
366static inline 366static inline
@@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
380 380
381int power_actor_get_max_power(struct thermal_cooling_device *, 381int power_actor_get_max_power(struct thermal_cooling_device *,
382 struct thermal_zone_device *tz, u32 *max_power); 382 struct thermal_zone_device *tz, u32 *max_power);
383int power_actor_get_min_power(struct thermal_cooling_device *,
384 struct thermal_zone_device *tz, u32 *min_power);
383int power_actor_set_power(struct thermal_cooling_device *, 385int power_actor_set_power(struct thermal_cooling_device *,
384 struct thermal_instance *, u32); 386 struct thermal_instance *, u32);
385struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 387struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
@@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
415static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, 417static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
416 struct thermal_zone_device *tz, u32 *max_power) 418 struct thermal_zone_device *tz, u32 *max_power)
417{ return 0; } 419{ return 0; }
420static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
421 struct thermal_zone_device *tz,
422 u32 *min_power)
423{ return -ENODEV; }
418static inline int power_actor_set_power(struct thermal_cooling_device *cdev, 424static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
419 struct thermal_instance *tz, u32 power) 425 struct thermal_instance *tz, u32 power)
420{ return 0; } 426{ return 0; }
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 48d901f83f92..e312219ff823 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
147 cpumask_or(mask, mask, tick_nohz_full_mask); 147 cpumask_or(mask, mask, tick_nohz_full_mask);
148} 148}
149 149
150static inline int housekeeping_any_cpu(void)
151{
152 return cpumask_any_and(housekeeping_mask, cpu_online_mask);
153}
154
150extern void tick_nohz_full_kick(void); 155extern void tick_nohz_full_kick(void);
151extern void tick_nohz_full_kick_cpu(int cpu); 156extern void tick_nohz_full_kick_cpu(int cpu);
152extern void tick_nohz_full_kick_all(void); 157extern void tick_nohz_full_kick_all(void);
153extern void __tick_nohz_task_switch(void); 158extern void __tick_nohz_task_switch(void);
154#else 159#else
160static inline int housekeeping_any_cpu(void)
161{
162 return smp_processor_id();
163}
155static inline bool tick_nohz_full_enabled(void) { return false; } 164static inline bool tick_nohz_full_enabled(void) { return false; }
156static inline bool tick_nohz_full_cpu(int cpu) { return false; } 165static inline bool tick_nohz_full_cpu(int cpu) { return false; }
157static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 166static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 3dd5a781da99..bfb74723f151 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param {
157 */ 157 */
158 int pio_dma_border; /* default is 64byte */ 158 int pio_dma_border; /* default is 64byte */
159 159
160 u32 type; 160 uintptr_t type;
161 u32 enable_gpio; 161 u32 enable_gpio;
162 162
163 /* 163 /*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d3d077228d4c..1e1bf9f963a9 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
147 147
148typedef int wait_bit_action_f(struct wait_bit_key *); 148typedef int wait_bit_action_f(struct wait_bit_key *);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
151 void *key);
152void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
153void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
154void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
180#define wake_up_poll(x, m) \ 179#define wake_up_poll(x, m) \
181 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 180 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
182#define wake_up_locked_poll(x, m) \ 181#define wake_up_locked_poll(x, m) \
183 __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) 182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
184#define wake_up_interruptible_poll(x, m) \ 183#define wake_up_interruptible_poll(x, m) \
185 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
186#define wake_up_interruptible_sync_poll(x, m) \ 185#define wake_up_interruptible_sync_poll(x, m) \
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 4a167b30a12f..b36d837c701e 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -63,7 +63,11 @@ struct unix_sock {
63#define UNIX_GC_MAYBE_CYCLE 1 63#define UNIX_GC_MAYBE_CYCLE 1
64 struct socket_wq peer_wq; 64 struct socket_wq peer_wq;
65}; 65};
66#define unix_sk(__sk) ((struct unix_sock *)__sk) 66
67static inline struct unix_sock *unix_sk(const struct sock *sk)
68{
69 return (struct unix_sock *)sk;
70}
67 71
68#define peer_wait peer_wq.wait 72#define peer_wait peer_wq.wait
69 73
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index af9d5382f6cb..ce009710120c 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -60,6 +60,38 @@ static inline struct metadata_dst *tun_rx_dst(int md_size)
60 return tun_dst; 60 return tun_dst;
61} 61}
62 62
63static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
64{
65 struct metadata_dst *md_dst = skb_metadata_dst(skb);
66 int md_size = md_dst->u.tun_info.options_len;
67 struct metadata_dst *new_md;
68
69 if (!md_dst)
70 return ERR_PTR(-EINVAL);
71
72 new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
73 if (!new_md)
74 return ERR_PTR(-ENOMEM);
75
76 memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
77 sizeof(struct ip_tunnel_info) + md_size);
78 skb_dst_drop(skb);
79 dst_hold(&new_md->dst);
80 skb_dst_set(skb, &new_md->dst);
81 return new_md;
82}
83
84static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
85{
86 struct metadata_dst *dst;
87
88 dst = tun_dst_unclone(skb);
89 if (IS_ERR(dst))
90 return NULL;
91
92 return &dst->u.tun_info;
93}
94
63static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, 95static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
64 __be16 flags, 96 __be16 flags,
65 __be64 tunnel_id, 97 __be64 tunnel_id,
diff --git a/include/net/flow.h b/include/net/flow.h
index acd6a096250e..9b85db85f13c 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -35,6 +35,7 @@ struct flowi_common {
35#define FLOWI_FLAG_ANYSRC 0x01 35#define FLOWI_FLAG_ANYSRC 0x01
36#define FLOWI_FLAG_KNOWN_NH 0x02 36#define FLOWI_FLAG_KNOWN_NH 0x02
37#define FLOWI_FLAG_VRFSRC 0x04 37#define FLOWI_FLAG_VRFSRC 0x04
38#define FLOWI_FLAG_SKIP_NH_OIF 0x08
38 __u32 flowic_secid; 39 __u32 flowic_secid;
39 struct flowi_tunnel flowic_tun_key; 40 struct flowi_tunnel flowic_tun_key;
40}; 41};
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 879d6e5a973b..fc1937698625 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
110void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, 110void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
111 struct inet_hashinfo *hashinfo); 111 struct inet_hashinfo *hashinfo);
112 112
113void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); 113void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
114 bool rearm);
115
116static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
117{
118 __inet_twsk_schedule(tw, timeo, false);
119}
120
121static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
122{
123 __inet_twsk_schedule(tw, timeo, true);
124}
125
114void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); 126void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
115 127
116void inet_twsk_purge(struct inet_hashinfo *hashinfo, 128void inet_twsk_purge(struct inet_hashinfo *hashinfo,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 063d30474cf6..aaf9700fc9e5 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -275,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
275 struct nl_info *info, struct mx6_config *mxc); 275 struct nl_info *info, struct mx6_config *mxc);
276int fib6_del(struct rt6_info *rt, struct nl_info *info); 276int fib6_del(struct rt6_info *rt, struct nl_info *info);
277 277
278void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); 278void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
279 unsigned int flags);
279 280
280void fib6_run_gc(unsigned long expires, struct net *net, bool force); 281void fib6_run_gc(unsigned long expires, struct net *net, bool force);
281 282
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index b8529aa1dae7..fa915fa0f703 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -32,6 +32,12 @@ struct __ip6_tnl_parm {
32 __be32 o_key; 32 __be32 o_key;
33}; 33};
34 34
35struct ip6_tnl_dst {
36 seqlock_t lock;
37 struct dst_entry __rcu *dst;
38 u32 cookie;
39};
40
35/* IPv6 tunnel */ 41/* IPv6 tunnel */
36struct ip6_tnl { 42struct ip6_tnl {
37 struct ip6_tnl __rcu *next; /* next tunnel in list */ 43 struct ip6_tnl __rcu *next; /* next tunnel in list */
@@ -39,8 +45,7 @@ struct ip6_tnl {
39 struct net *net; /* netns for packet i/o */ 45 struct net *net; /* netns for packet i/o */
40 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ 46 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
41 struct flowi fl; /* flowi template for xmit */ 47 struct flowi fl; /* flowi template for xmit */
42 struct dst_entry *dst_cache; /* cached dst */ 48 struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */
43 u32 dst_cookie;
44 49
45 int err_count; 50 int err_count;
46 unsigned long err_time; 51 unsigned long err_time;
@@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim {
60 __u8 encap_limit; /* tunnel encapsulation limit */ 65 __u8 encap_limit; /* tunnel encapsulation limit */
61} __packed; 66} __packed;
62 67
63struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); 68struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
69int ip6_tnl_dst_init(struct ip6_tnl *t);
70void ip6_tnl_dst_destroy(struct ip6_tnl *t);
64void ip6_tnl_dst_reset(struct ip6_tnl *t); 71void ip6_tnl_dst_reset(struct ip6_tnl *t);
65void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); 72void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
66int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 73int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
67 const struct in6_addr *raddr); 74 const struct in6_addr *raddr);
68int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 75int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
@@ -79,7 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
79 struct net_device_stats *stats = &dev->stats; 86 struct net_device_stats *stats = &dev->stats;
80 int pkt_len, err; 87 int pkt_len, err;
81 88
82 pkt_len = skb->len; 89 pkt_len = skb->len - skb_inner_network_offset(skb);
83 err = ip6_local_out_sk(sk, skb); 90 err = ip6_local_out_sk(sk, skb);
84 91
85 if (net_xmit_eval(err) == 0) { 92 if (net_xmit_eval(err) == 0) {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a37d0432bebd..727d6e9a9685 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -236,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
236 rcu_read_lock(); 236 rcu_read_lock();
237 237
238 tb = fib_get_table(net, RT_TABLE_MAIN); 238 tb = fib_get_table(net, RT_TABLE_MAIN);
239 if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF)) 239 if (tb)
240 err = 0; 240 err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
241
242 if (err == -EAGAIN)
243 err = -ENETUNREACH;
241 244
242 rcu_read_unlock(); 245 rcu_read_unlock();
243 246
@@ -258,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
258 struct fib_result *res, unsigned int flags) 261 struct fib_result *res, unsigned int flags)
259{ 262{
260 struct fib_table *tb; 263 struct fib_table *tb;
261 int err; 264 int err = -ENETUNREACH;
262 265
263 flags |= FIB_LOOKUP_NOREF; 266 flags |= FIB_LOOKUP_NOREF;
264 if (net->ipv4.fib_has_custom_rules) 267 if (net->ipv4.fib_has_custom_rules)
@@ -268,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
268 271
269 res->tclassid = 0; 272 res->tclassid = 0;
270 273
271 for (err = 0; !err; err = -ENETUNREACH) { 274 tb = rcu_dereference_rtnl(net->ipv4.fib_main);
272 tb = rcu_dereference_rtnl(net->ipv4.fib_main); 275 if (tb)
273 if (tb && !fib_table_lookup(tb, flp, res, flags)) 276 err = fib_table_lookup(tb, flp, res, flags);
274 break; 277
278 if (!err)
279 goto out;
280
281 tb = rcu_dereference_rtnl(net->ipv4.fib_default);
282 if (tb)
283 err = fib_table_lookup(tb, flp, res, flags);
275 284
276 tb = rcu_dereference_rtnl(net->ipv4.fib_default); 285out:
277 if (tb && !fib_table_lookup(tb, flp, res, flags)) 286 if (err == -EAGAIN)
278 break; 287 err = -ENETUNREACH;
279 }
280 288
281 rcu_read_unlock(); 289 rcu_read_unlock();
282 290
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9a6a3ba888e8..f6dafec9102c 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -276,6 +276,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
276int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 276int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
277 __be32 src, __be32 dst, u8 proto, 277 __be32 src, __be32 dst, u8 proto,
278 u8 tos, u8 ttl, __be16 df, bool xnet); 278 u8 tos, u8 ttl, __be16 df, bool xnet);
279struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
280 gfp_t flags);
279 281
280struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, 282struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
281 int gso_type_mask); 283 int gso_type_mask);
diff --git a/include/net/route.h b/include/net/route.h
index cc61cb95f059..f46af256880c 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -255,7 +255,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
255 flow_flags |= FLOWI_FLAG_ANYSRC; 255 flow_flags |= FLOWI_FLAG_ANYSRC;
256 256
257 if (netif_index_is_vrf(sock_net(sk), oif)) 257 if (netif_index_is_vrf(sock_net(sk), oif))
258 flow_flags |= FLOWI_FLAG_VRFSRC; 258 flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF;
259 259
260 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, 260 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
261 protocol, flow_flags, dst, src, dport, sport); 261 protocol, flow_flags, dst, src, dport, sport);
diff --git a/include/net/sock.h b/include/net/sock.h
index 7aa78440559a..e23717013a4e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -828,6 +828,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
828 if (sk_rcvqueues_full(sk, limit)) 828 if (sk_rcvqueues_full(sk, limit))
829 return -ENOBUFS; 829 return -ENOBUFS;
830 830
831 /*
832 * If the skb was allocated from pfmemalloc reserves, only
833 * allow SOCK_MEMALLOC sockets to use it as this socket is
834 * helping free memory
835 */
836 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
837 return -ENOMEM;
838
831 __sk_add_backlog(sk, skb); 839 __sk_add_backlog(sk, skb);
832 sk->sk_backlog.len += skb->truesize; 840 sk->sk_backlog.len += skb->truesize;
833 return 0; 841 return 0;
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 391dae1931c0..a0fa975cd1c1 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -294,8 +294,8 @@ struct opa_port_states {
294 294
295struct opa_port_state_info { 295struct opa_port_state_info {
296 struct opa_port_states port_states; 296 struct opa_port_states port_states;
297 u16 link_width_downgrade_tx_active; 297 __be16 link_width_downgrade_tx_active;
298 u16 link_width_downgrade_rx_active; 298 __be16 link_width_downgrade_rx_active;
299}; 299};
300 300
301struct opa_port_info { 301struct opa_port_info {
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 884e728b09d9..26ede14597da 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -86,7 +86,7 @@
86 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 86 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
87 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 87 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
88 .tlv.p = (tlv_array),\ 88 .tlv.p = (tlv_array),\
89 .info = snd_soc_info_volsw, \ 89 .info = snd_soc_info_volsw_sx, \
90 .get = snd_soc_get_volsw_sx,\ 90 .get = snd_soc_get_volsw_sx,\
91 .put = snd_soc_put_volsw_sx, \ 91 .put = snd_soc_put_volsw_sx, \
92 .private_value = (unsigned long)&(struct soc_mixer_control) \ 92 .private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -156,7 +156,7 @@
156 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 156 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
157 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 157 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
158 .tlv.p = (tlv_array), \ 158 .tlv.p = (tlv_array), \
159 .info = snd_soc_info_volsw, \ 159 .info = snd_soc_info_volsw_sx, \
160 .get = snd_soc_get_volsw_sx, \ 160 .get = snd_soc_get_volsw_sx, \
161 .put = snd_soc_put_volsw_sx, \ 161 .put = snd_soc_put_volsw_sx, \
162 .private_value = (unsigned long)&(struct soc_mixer_control) \ 162 .private_value = (unsigned long)&(struct soc_mixer_control) \
@@ -574,6 +574,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
574 struct snd_ctl_elem_value *ucontrol); 574 struct snd_ctl_elem_value *ucontrol);
575int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, 575int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
576 struct snd_ctl_elem_info *uinfo); 576 struct snd_ctl_elem_info *uinfo);
577int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
578 struct snd_ctl_elem_info *uinfo);
577#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info 579#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info
578int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, 580int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
579 struct snd_ctl_elem_value *ucontrol); 581 struct snd_ctl_elem_value *ucontrol);
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 898be3a8db9a..6d8f8fba3341 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -119,7 +119,7 @@
119#define WM8904_MIC_REGS 2 119#define WM8904_MIC_REGS 2
120#define WM8904_GPIO_REGS 4 120#define WM8904_GPIO_REGS 4
121#define WM8904_DRC_REGS 4 121#define WM8904_DRC_REGS 4
122#define WM8904_EQ_REGS 25 122#define WM8904_EQ_REGS 24
123 123
124/** 124/**
125 * DRC configurations are specified with a label and a set of register 125 * DRC configurations are specified with a label and a set of register
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index ac9bf1c0e42d..5f48754dc36a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -730,6 +730,7 @@ struct se_device {
730#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 730#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
731#define DF_USING_UDEV_PATH 0x00000008 731#define DF_USING_UDEV_PATH 0x00000008
732#define DF_USING_ALIAS 0x00000010 732#define DF_USING_ALIAS 0x00000010
733#define DF_READ_ONLY 0x00000020
733 /* Physical device queue depth */ 734 /* Physical device queue depth */
734 u32 queue_depth; 735 u32 queue_depth;
735 /* Used for SPC-2 reservations enforce of ISIDs */ 736 /* Used for SPC-2 reservations enforce of ISIDs */
diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h
index 9df61f1edb0f..3094618d382f 100644
--- a/include/uapi/asm-generic/signal.h
+++ b/include/uapi/asm-generic/signal.h
@@ -80,8 +80,10 @@
80 * SA_RESTORER 0x04000000 80 * SA_RESTORER 0x04000000
81 */ 81 */
82 82
83#if !defined MINSIGSTKSZ || !defined SIGSTKSZ
83#define MINSIGSTKSZ 2048 84#define MINSIGSTKSZ 2048
84#define SIGSTKSZ 8192 85#define SIGSTKSZ 8192
86#endif
85 87
86#ifndef __ASSEMBLY__ 88#ifndef __ASSEMBLY__
87typedef struct { 89typedef struct {
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 8da542a2874d..ee124009e12a 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -709,17 +709,19 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
709__SYSCALL(__NR_bpf, sys_bpf) 709__SYSCALL(__NR_bpf, sys_bpf)
710#define __NR_execveat 281 710#define __NR_execveat 281
711__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) 711__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
712#define __NR_membarrier 282 712#define __NR_userfaultfd 282
713__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
714#define __NR_membarrier 283
713__SYSCALL(__NR_membarrier, sys_membarrier) 715__SYSCALL(__NR_membarrier, sys_membarrier)
714 716
715#undef __NR_syscalls 717#undef __NR_syscalls
716#define __NR_syscalls 283 718#define __NR_syscalls 284
717 719
718/* 720/*
719 * All syscalls below here should go away really, 721 * All syscalls below here should go away really,
720 * these are provided for both review and as a porting 722 * these are provided for both review and as a porting
721 * help for the C library version. 723 * help for the C library version.
722* 724 *
723 * Last chance: are any of these important enough to 725 * Last chance: are any of these important enough to
724 * enable by default? 726 * enable by default?
725 */ 727 */
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index 34141a5dfe74..f8b01887a495 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -21,8 +21,6 @@ enum lwtunnel_ip_t {
21 LWTUNNEL_IP_SRC, 21 LWTUNNEL_IP_SRC,
22 LWTUNNEL_IP_TTL, 22 LWTUNNEL_IP_TTL,
23 LWTUNNEL_IP_TOS, 23 LWTUNNEL_IP_TOS,
24 LWTUNNEL_IP_SPORT,
25 LWTUNNEL_IP_DPORT,
26 LWTUNNEL_IP_FLAGS, 24 LWTUNNEL_IP_FLAGS,
27 __LWTUNNEL_IP_MAX, 25 __LWTUNNEL_IP_MAX,
28}; 26};
@@ -36,8 +34,6 @@ enum lwtunnel_ip6_t {
36 LWTUNNEL_IP6_SRC, 34 LWTUNNEL_IP6_SRC,
37 LWTUNNEL_IP6_HOPLIMIT, 35 LWTUNNEL_IP6_HOPLIMIT,
38 LWTUNNEL_IP6_TC, 36 LWTUNNEL_IP6_TC,
39 LWTUNNEL_IP6_SPORT,
40 LWTUNNEL_IP6_DPORT,
41 LWTUNNEL_IP6_FLAGS, 37 LWTUNNEL_IP6_FLAGS,
42 __LWTUNNEL_IP6_MAX, 38 __LWTUNNEL_IP6_MAX,
43}; 39};
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 32e07d8cbaf4..e663627a8ef3 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -323,10 +323,10 @@ enum ovs_key_attr {
323 OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. 323 OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
324 * The implementation may restrict 324 * The implementation may restrict
325 * the accepted length of the array. */ 325 * the accepted length of the array. */
326 OVS_KEY_ATTR_CT_STATE, /* u8 bitmask of OVS_CS_F_* */ 326 OVS_KEY_ATTR_CT_STATE, /* u32 bitmask of OVS_CS_F_* */
327 OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */ 327 OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */
328 OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */ 328 OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */
329 OVS_KEY_ATTR_CT_LABEL, /* 16-octet connection tracking label */ 329 OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */
330 330
331#ifdef __KERNEL__ 331#ifdef __KERNEL__
332 OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ 332 OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
@@ -439,9 +439,9 @@ struct ovs_key_nd {
439 __u8 nd_tll[ETH_ALEN]; 439 __u8 nd_tll[ETH_ALEN];
440}; 440};
441 441
442#define OVS_CT_LABEL_LEN 16 442#define OVS_CT_LABELS_LEN 16
443struct ovs_key_ct_label { 443struct ovs_key_ct_labels {
444 __u8 ct_label[OVS_CT_LABEL_LEN]; 444 __u8 ct_labels[OVS_CT_LABELS_LEN];
445}; 445};
446 446
447/* OVS_KEY_ATTR_CT_STATE flags */ 447/* OVS_KEY_ATTR_CT_STATE flags */
@@ -449,9 +449,9 @@ struct ovs_key_ct_label {
449#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */ 449#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */
450#define OVS_CS_F_RELATED 0x04 /* Related to an established 450#define OVS_CS_F_RELATED 0x04 /* Related to an established
451 * connection. */ 451 * connection. */
452#define OVS_CS_F_INVALID 0x20 /* Could not track connection. */ 452#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */
453#define OVS_CS_F_REPLY_DIR 0x40 /* Flow is in the reply direction. */ 453#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */
454#define OVS_CS_F_TRACKED 0x80 /* Conntrack has occurred. */ 454#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */
455 455
456/** 456/**
457 * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. 457 * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
@@ -618,22 +618,25 @@ struct ovs_action_hash {
618 618
619/** 619/**
620 * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action. 620 * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
621 * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags. 621 * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
622 * table. This allows future packets for the same connection to be identified
623 * as 'established' or 'related'. The flow key for the current packet will
624 * retain the pre-commit connection state.
622 * @OVS_CT_ATTR_ZONE: u16 connection tracking zone. 625 * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
623 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the 626 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
624 * mask, the corresponding bit in the value is copied to the connection 627 * mask, the corresponding bit in the value is copied to the connection
625 * tracking mark field in the connection. 628 * tracking mark field in the connection.
626 * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN 629 * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
627 * mask. For each bit set in the mask, the corresponding bit in the value is 630 * mask. For each bit set in the mask, the corresponding bit in the value is
628 * copied to the connection tracking label field in the connection. 631 * copied to the connection tracking label field in the connection.
629 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. 632 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
630 */ 633 */
631enum ovs_ct_attr { 634enum ovs_ct_attr {
632 OVS_CT_ATTR_UNSPEC, 635 OVS_CT_ATTR_UNSPEC,
633 OVS_CT_ATTR_FLAGS, /* u8 bitmask of OVS_CT_F_*. */ 636 OVS_CT_ATTR_COMMIT, /* No argument, commits connection. */
634 OVS_CT_ATTR_ZONE, /* u16 zone id. */ 637 OVS_CT_ATTR_ZONE, /* u16 zone id. */
635 OVS_CT_ATTR_MARK, /* mark to associate with this connection. */ 638 OVS_CT_ATTR_MARK, /* mark to associate with this connection. */
636 OVS_CT_ATTR_LABEL, /* label to associate with this connection. */ 639 OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */
637 OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of 640 OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of
638 related connections. */ 641 related connections. */
639 __OVS_CT_ATTR_MAX 642 __OVS_CT_ATTR_MAX
@@ -641,14 +644,6 @@ enum ovs_ct_attr {
641 644
642#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1) 645#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
643 646
644/*
645 * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
646 * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
647 * future packets for the same connection to be identified as 'established'
648 * or 'related'.
649 */
650#define OVS_CT_F_COMMIT 0x01
651
652/** 647/**
653 * enum ovs_action_attr - Action types. 648 * enum ovs_action_attr - Action types.
654 * 649 *
@@ -705,7 +700,7 @@ enum ovs_action_attr {
705 * data immediately followed by a mask. 700 * data immediately followed by a mask.
706 * The data must be zero for the unmasked 701 * The data must be zero for the unmasked
707 * bits. */ 702 * bits. */
708 OVS_ACTION_ATTR_CT, /* One nested OVS_CT_ATTR_* . */ 703 OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */
709 704
710 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted 705 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
711 * from userspace. */ 706 * from userspace. */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 702024769c74..9d8f5d10c1e5 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -160,7 +160,7 @@ struct rtattr {
160 160
161/* Macros to handle rtattributes */ 161/* Macros to handle rtattributes */
162 162
163#define RTA_ALIGNTO 4 163#define RTA_ALIGNTO 4U
164#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) ) 164#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
165#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \ 165#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
166 (rta)->rta_len >= sizeof(struct rtattr) && \ 166 (rta)->rta_len >= sizeof(struct rtattr) && \
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index df0e09bb7dd5..9057d7af3ae1 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -11,8 +11,6 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#include <linux/compiler.h>
15
16#define UFFD_API ((__u64)0xAA) 14#define UFFD_API ((__u64)0xAA)
17/* 15/*
18 * After implementing the respective features it will become: 16 * After implementing the respective features it will become:
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 9ce083960a25..f18490985fc8 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -107,5 +107,13 @@ struct sched_watchdog {
107#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ 107#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
108#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ 108#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
109#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ 109#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
110/*
111 * Domain asked to perform 'soft reset' for it. The expected behavior is to
112 * reset internal Xen state for the domain returning it to the point where it
113 * was created but leaving the domain's memory contents and vCPU contexts
114 * intact. This will allow the domain to start over and set up all Xen specific
115 * interfaces again.
116 */
117#define SHUTDOWN_soft_reset 5
110 118
111#endif /* __XEN_PUBLIC_SCHED_H__ */ 119#endif /* __XEN_PUBLIC_SCHED_H__ */