aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-09 03:02:35 -0400
commit1236d6bb6e19fc72ffc6bbcdeb1bfefe450e54ee (patch)
tree47da3feee8e263e8c9352c85cf518e624be3c211 /include/linux
parent750b1a6894ecc9b178c6e3d0a1170122971b2036 (diff)
parent8a5776a5f49812d29fe4b2d0a2d71675c3facf3f (diff)
Merge 4.14-rc4 into staging-next
We want the staging/iio fixes in here as well to handle merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/audit.h6
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bitfield.h2
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/cpuhotplug.h21
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h3
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/key.h2
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmu_notifier.h5
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/nmi.h121
-rw-r--r--include/linux/nvme-fc-driver.h13
-rw-r--r--include/linux/nvme.h19
-rw-r--r--include/linux/of_platform.h7
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/sched.h64
-rw-r--r--include/linux/sched/mm.h6
-rw-r--r--include/linux/seccomp.h3
-rw-r--r--include/linux/smpboot.h4
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/timer.h14
-rw-r--r--include/linux/trace_events.h1
31 files changed, 212 insertions, 133 deletions
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 74d4d4e8e3db..cb708eb8accc 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -314,11 +314,7 @@ void audit_core_dumps(long signr);
314 314
315static inline void audit_seccomp(unsigned long syscall, long signr, int code) 315static inline void audit_seccomp(unsigned long syscall, long signr, int code)
316{ 316{
317 if (!audit_enabled) 317 if (audit_enabled && unlikely(!audit_dummy_context()))
318 return;
319
320 /* Force a record to be reported if a signal was delivered. */
321 if (signr || unlikely(!audit_dummy_context()))
322 __audit_seccomp(syscall, signr, code); 318 __audit_seccomp(syscall, signr, code);
323} 319}
324 320
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fb44d6180ca0..18d05b5491f3 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -131,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
131 int executable_stack); 131 int executable_stack);
132extern int transfer_args_to_stack(struct linux_binprm *bprm, 132extern int transfer_args_to_stack(struct linux_binprm *bprm,
133 unsigned long *sp_location); 133 unsigned long *sp_location);
134extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); 134extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
135extern int copy_strings_kernel(int argc, const char *const *argv, 135extern int copy_strings_kernel(int argc, const char *const *argv,
136 struct linux_binprm *bprm); 136 struct linux_binprm *bprm);
137extern int prepare_bprm_creds(struct linux_binprm *bprm); 137extern int prepare_bprm_creds(struct linux_binprm *bprm);
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 8b9d6fff002d..f2deb71958b2 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -92,7 +92,7 @@
92/** 92/**
93 * FIELD_GET() - extract a bitfield element 93 * FIELD_GET() - extract a bitfield element
94 * @_mask: shifted mask defining the field's length and position 94 * @_mask: shifted mask defining the field's length and position
95 * @_reg: 32bit value of entire bitfield 95 * @_reg: value of entire bitfield
96 * 96 *
97 * FIELD_GET() extracts the field specified by @_mask from the 97 * FIELD_GET() extracts the field specified by @_mask from the
98 * bitfield passed in as @_reg by masking and shifting it down. 98 * bitfield passed in as @_reg by masking and shifting it down.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 460294bb0fa5..02fa42d24b52 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -551,6 +551,7 @@ struct request_queue {
551 int node; 551 int node;
552#ifdef CONFIG_BLK_DEV_IO_TRACE 552#ifdef CONFIG_BLK_DEV_IO_TRACE
553 struct blk_trace *blk_trace; 553 struct blk_trace *blk_trace;
554 struct mutex blk_trace_mutex;
554#endif 555#endif
555 /* 556 /*
556 * for flush operations 557 * for flush operations
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index f24bfb2b9a2d..6d508767e144 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -3,8 +3,27 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6/*
7 * CPU-up CPU-down
8 *
9 * BP AP BP AP
10 *
11 * OFFLINE OFFLINE
12 * | ^
13 * v |
14 * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead)
15 * | AP_OFFLINE
16 * v (IRQ-off) ,---------------^
17 * AP_ONLNE | (stop_machine)
18 * | TEARDOWN_CPU <- AP_ONLINE_IDLE
19 * | ^
20 * v |
21 * AP_ACTIVE AP_ACTIVE
22 */
23
6enum cpuhp_state { 24enum cpuhp_state {
7 CPUHP_OFFLINE, 25 CPUHP_INVALID = -1,
26 CPUHP_OFFLINE = 0,
8 CPUHP_CREATE_THREADS, 27 CPUHP_CREATE_THREADS,
9 CPUHP_PERF_PREPARE, 28 CPUHP_PERF_PREPARE,
10 CPUHP_PERF_X86_PREPARE, 29 CPUHP_PERF_X86_PREPARE,
diff --git a/include/linux/device.h b/include/linux/device.h
index c6f27207dbe8..66fe271c2544 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -307,8 +307,6 @@ struct driver_attribute {
307 size_t count); 307 size_t count);
308}; 308};
309 309
310#define DRIVER_ATTR(_name, _mode, _show, _store) \
311 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
312#define DRIVER_ATTR_RW(_name) \ 310#define DRIVER_ATTR_RW(_name) \
313 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) 311 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
314#define DRIVER_ATTR_RO(_name) \ 312#define DRIVER_ATTR_RO(_name) \
@@ -838,7 +836,7 @@ struct dev_links_info {
838 * @driver_data: Private pointer for driver specific info. 836 * @driver_data: Private pointer for driver specific info.
839 * @links: Links to suppliers and consumers of this device. 837 * @links: Links to suppliers and consumers of this device.
840 * @power: For device power management. 838 * @power: For device power management.
841 * See Documentation/power/admin-guide/devices.rst for details. 839 * See Documentation/driver-api/pm/devices.rst for details.
842 * @pm_domain: Provide callbacks that are executed during system suspend, 840 * @pm_domain: Provide callbacks that are executed during system suspend,
843 * hibernation, system resume and during runtime PM transitions 841 * hibernation, system resume and during runtime PM transitions
844 * along with subsystem-level and driver-level callbacks. 842 * along with subsystem-level and driver-level callbacks.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 339e73742e73..13dab191a23e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -403,7 +403,7 @@ struct address_space {
403 unsigned long flags; /* error bits */ 403 unsigned long flags; /* error bits */
404 spinlock_t private_lock; /* for use by the address_space */ 404 spinlock_t private_lock; /* for use by the address_space */
405 gfp_t gfp_mask; /* implicit gfp mask for allocations */ 405 gfp_t gfp_mask; /* implicit gfp mask for allocations */
406 struct list_head private_list; /* ditto */ 406 struct list_head private_list; /* for use by the address_space */
407 void *private_data; /* ditto */ 407 void *private_data; /* ditto */
408 errseq_t wb_err; 408 errseq_t wb_err;
409} __attribute__((aligned(sizeof(long)))) __randomize_layout; 409} __attribute__((aligned(sizeof(long)))) __randomize_layout;
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5ba430cc9a87..1fc7abd28b0b 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
111int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, 111int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
112 unsigned int size, unsigned int *val); 112 unsigned int size, unsigned int *val);
113 113
114int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
115 unsigned int reset_length);
116
114int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, 117int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
115 const struct iio_chan_spec *chan, int *val); 118 const struct iio_chan_spec *chan, int *val);
116int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, 119int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
diff --git a/include/linux/input.h b/include/linux/input.h
index a65e3b24fb18..fb5e23c7ed98 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -529,6 +529,7 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code,
529 529
530int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); 530int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file);
531int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); 531int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file);
532int input_ff_flush(struct input_dev *dev, struct file *file);
532 533
533int input_ff_create_memless(struct input_dev *dev, void *data, 534int input_ff_create_memless(struct input_dev *dev, void *data,
534 int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); 535 int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a7f2ac689d29..41b8c5757859 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -167,11 +167,11 @@ struct iommu_resv_region {
167 * @map: map a physically contiguous memory region to an iommu domain 167 * @map: map a physically contiguous memory region to an iommu domain
168 * @unmap: unmap a physically contiguous memory region from an iommu domain 168 * @unmap: unmap a physically contiguous memory region from an iommu domain
169 * @map_sg: map a scatter-gather list of physically contiguous memory chunks 169 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
170 * to an iommu domain
170 * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain 171 * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
171 * @tlb_range_add: Add a given iova range to the flush queue for this domain 172 * @tlb_range_add: Add a given iova range to the flush queue for this domain
172 * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 173 * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
173 * queue 174 * queue
174 * to an iommu domain
175 * @iova_to_phys: translate iova to physical address 175 * @iova_to_phys: translate iova to physical address
176 * @add_device: add device to iommu grouping 176 * @add_device: add device to iommu grouping
177 * @remove_device: remove device from iommu grouping 177 * @remove_device: remove device from iommu grouping
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b99a784635ff..d4728bf6a537 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -783,10 +783,7 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
783static inline 783static inline
784struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) 784struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
785{ 785{
786 if (!cpumask_empty(d->common->effective_affinity)) 786 return d->common->effective_affinity;
787 return d->common->effective_affinity;
788
789 return d->common->affinity;
790} 787}
791static inline void irq_data_update_effective_affinity(struct irq_data *d, 788static inline void irq_data_update_effective_affinity(struct irq_data *d,
792 const struct cpumask *m) 789 const struct cpumask *m)
diff --git a/include/linux/key.h b/include/linux/key.h
index 044114185120..e315e16b6ff8 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -187,6 +187,7 @@ struct key {
187#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ 187#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
188#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ 188#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
189#define KEY_FLAG_KEEP 10 /* set if key should not be removed */ 189#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
190#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
190 191
191 /* the key type and key description string 192 /* the key type and key description string
192 * - the desc is used to match a key against search criteria 193 * - the desc is used to match a key against search criteria
@@ -243,6 +244,7 @@ extern struct key *key_alloc(struct key_type *type,
243#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ 244#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
244#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ 245#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
245#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ 246#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
247#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
246 248
247extern void key_revoke(struct key *key); 249extern void key_revoke(struct key *key);
248extern void key_invalidate(struct key *key); 250extern void key_invalidate(struct key *key);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index eaf4ad209c8f..e32dbc4934db 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -980,7 +980,6 @@ enum mlx5_cap_type {
980 MLX5_CAP_RESERVED, 980 MLX5_CAP_RESERVED,
981 MLX5_CAP_VECTOR_CALC, 981 MLX5_CAP_VECTOR_CALC,
982 MLX5_CAP_QOS, 982 MLX5_CAP_QOS,
983 MLX5_CAP_FPGA,
984 /* NUM OF CAP Types */ 983 /* NUM OF CAP Types */
985 MLX5_CAP_NUM 984 MLX5_CAP_NUM
986}; 985};
@@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
1110 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) 1109 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1111 1110
1112#define MLX5_CAP_FPGA(mdev, cap) \ 1111#define MLX5_CAP_FPGA(mdev, cap) \
1113 MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) 1112 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1114 1113
1115#define MLX5_CAP64_FPGA(mdev, cap) \ 1114#define MLX5_CAP64_FPGA(mdev, cap) \
1116 MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) 1115 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1117 1116
1118enum { 1117enum {
1119 MLX5_CMD_STAT_OK = 0x0, 1118 MLX5_CMD_STAT_OK = 0x0,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 02ff700e4f30..401c8972cc3a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -774,6 +774,7 @@ struct mlx5_core_dev {
774 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 774 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
775 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; 775 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
776 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; 776 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
777 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
777 } caps; 778 } caps;
778 phys_addr_t iseg_base; 779 phys_addr_t iseg_base;
779 struct mlx5_init_seg __iomem *iseg; 780 struct mlx5_init_seg __iomem *iseg;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a528b35a022e..69772347f866 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
327 u8 reserved_at_80[0x18]; 327 u8 reserved_at_80[0x18];
328 u8 log_max_destination[0x8]; 328 u8 log_max_destination[0x8];
329 329
330 u8 reserved_at_a0[0x18]; 330 u8 log_max_flow_counter[0x8];
331 u8 reserved_at_a8[0x10];
331 u8 log_max_flow[0x8]; 332 u8 log_max_flow[0x8];
332 333
333 u8 reserved_at_c0[0x40]; 334 u8 reserved_at_c0[0x40];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f8c10d336e42..065d99deb847 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -240,7 +240,7 @@ extern unsigned int kobjsize(const void *objp);
240 240
241#if defined(CONFIG_X86_INTEL_MPX) 241#if defined(CONFIG_X86_INTEL_MPX)
242/* MPX specific bounds table or bounds directory */ 242/* MPX specific bounds table or bounds directory */
243# define VM_MPX VM_HIGH_ARCH_BIT_4 243# define VM_MPX VM_HIGH_ARCH_4
244#else 244#else
245# define VM_MPX VM_NONE 245# define VM_MPX VM_NONE
246#endif 246#endif
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index f3f2d07feb2a..9a43763a68ad 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -316,7 +316,7 @@ struct mmc_host {
316#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ 316#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
317#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ 317#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
318#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ 318#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
319#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */ 319/* (1 << 21) is free for reuse */
320#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ 320#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
321#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ 321#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
322#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ 322#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 7b2e31b1745a..6866e8126982 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
400 400
401#else /* CONFIG_MMU_NOTIFIER */ 401#else /* CONFIG_MMU_NOTIFIER */
402 402
403static inline int mm_has_notifiers(struct mm_struct *mm)
404{
405 return 0;
406}
407
403static inline void mmu_notifier_release(struct mm_struct *mm) 408static inline void mmu_notifier_release(struct mm_struct *mm)
404{ 409{
405} 410}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 356a814e7c8e..c8f89417740b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1094,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1094#error Allocator MAX_ORDER exceeds SECTION_SIZE 1094#error Allocator MAX_ORDER exceeds SECTION_SIZE
1095#endif 1095#endif
1096 1096
1097#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) 1097static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1098#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) 1098{
1099 return pfn >> PFN_SECTION_SHIFT;
1100}
1101static inline unsigned long section_nr_to_pfn(unsigned long sec)
1102{
1103 return sec << PFN_SECTION_SHIFT;
1104}
1099 1105
1100#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1106#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1101#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1107#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a36abe2da13e..27e249ed7c5c 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -12,11 +12,31 @@
12 12
13#ifdef CONFIG_LOCKUP_DETECTOR 13#ifdef CONFIG_LOCKUP_DETECTOR
14void lockup_detector_init(void); 14void lockup_detector_init(void);
15void lockup_detector_soft_poweroff(void);
16void lockup_detector_cleanup(void);
17bool is_hardlockup(void);
18
19extern int watchdog_user_enabled;
20extern int nmi_watchdog_user_enabled;
21extern int soft_watchdog_user_enabled;
22extern int watchdog_thresh;
23extern unsigned long watchdog_enabled;
24
25extern struct cpumask watchdog_cpumask;
26extern unsigned long *watchdog_cpumask_bits;
27#ifdef CONFIG_SMP
28extern int sysctl_softlockup_all_cpu_backtrace;
29extern int sysctl_hardlockup_all_cpu_backtrace;
15#else 30#else
16static inline void lockup_detector_init(void) 31#define sysctl_softlockup_all_cpu_backtrace 0
17{ 32#define sysctl_hardlockup_all_cpu_backtrace 0
18} 33#endif /* !CONFIG_SMP */
19#endif 34
35#else /* CONFIG_LOCKUP_DETECTOR */
36static inline void lockup_detector_init(void) { }
37static inline void lockup_detector_soft_poweroff(void) { }
38static inline void lockup_detector_cleanup(void) { }
39#endif /* !CONFIG_LOCKUP_DETECTOR */
20 40
21#ifdef CONFIG_SOFTLOCKUP_DETECTOR 41#ifdef CONFIG_SOFTLOCKUP_DETECTOR
22extern void touch_softlockup_watchdog_sched(void); 42extern void touch_softlockup_watchdog_sched(void);
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void);
24extern void touch_softlockup_watchdog_sync(void); 44extern void touch_softlockup_watchdog_sync(void);
25extern void touch_all_softlockup_watchdogs(void); 45extern void touch_all_softlockup_watchdogs(void);
26extern unsigned int softlockup_panic; 46extern unsigned int softlockup_panic;
27extern int soft_watchdog_enabled;
28extern atomic_t watchdog_park_in_progress;
29#else 47#else
30static inline void touch_softlockup_watchdog_sched(void) 48static inline void touch_softlockup_watchdog_sched(void) { }
31{ 49static inline void touch_softlockup_watchdog(void) { }
32} 50static inline void touch_softlockup_watchdog_sync(void) { }
33static inline void touch_softlockup_watchdog(void) 51static inline void touch_all_softlockup_watchdogs(void) { }
34{
35}
36static inline void touch_softlockup_watchdog_sync(void)
37{
38}
39static inline void touch_all_softlockup_watchdogs(void)
40{
41}
42#endif 52#endif
43 53
44#ifdef CONFIG_DETECT_HUNG_TASK 54#ifdef CONFIG_DETECT_HUNG_TASK
45void reset_hung_task_detector(void); 55void reset_hung_task_detector(void);
46#else 56#else
47static inline void reset_hung_task_detector(void) 57static inline void reset_hung_task_detector(void) { }
48{
49}
50#endif 58#endif
51 59
52/* 60/*
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void)
54 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - 62 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
55 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. 63 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
56 * 64 *
57 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' 65 * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
58 * are variables that are only used as an 'interface' between the parameters 66 * 'soft_watchdog_user_enabled' are variables that are only used as an
59 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The 67 * 'interface' between the parameters in /proc/sys/kernel and the internal
60 * 'watchdog_thresh' variable is handled differently because its value is not 68 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
61 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' 69 * handled differently because its value is not boolean, and the lockup
62 * is equal zero. 70 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
63 */ 71 */
64#define NMI_WATCHDOG_ENABLED_BIT 0 72#define NMI_WATCHDOG_ENABLED_BIT 0
65#define SOFT_WATCHDOG_ENABLED_BIT 1 73#define SOFT_WATCHDOG_ENABLED_BIT 1
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic;
73static inline void hardlockup_detector_disable(void) {} 81static inline void hardlockup_detector_disable(void) {}
74#endif 82#endif
75 83
84#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
85# define NMI_WATCHDOG_SYSCTL_PERM 0644
86#else
87# define NMI_WATCHDOG_SYSCTL_PERM 0444
88#endif
89
76#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) 90#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
77extern void arch_touch_nmi_watchdog(void); 91extern void arch_touch_nmi_watchdog(void);
92extern void hardlockup_detector_perf_stop(void);
93extern void hardlockup_detector_perf_restart(void);
94extern void hardlockup_detector_perf_disable(void);
95extern void hardlockup_detector_perf_enable(void);
96extern void hardlockup_detector_perf_cleanup(void);
97extern int hardlockup_detector_perf_init(void);
78#else 98#else
79#if !defined(CONFIG_HAVE_NMI_WATCHDOG) 99static inline void hardlockup_detector_perf_stop(void) { }
100static inline void hardlockup_detector_perf_restart(void) { }
101static inline void hardlockup_detector_perf_disable(void) { }
102static inline void hardlockup_detector_perf_enable(void) { }
103static inline void hardlockup_detector_perf_cleanup(void) { }
104# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
105static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
80static inline void arch_touch_nmi_watchdog(void) {} 106static inline void arch_touch_nmi_watchdog(void) {}
107# else
108static inline int hardlockup_detector_perf_init(void) { return 0; }
109# endif
81#endif 110#endif
82#endif 111
112void watchdog_nmi_stop(void);
113void watchdog_nmi_start(void);
114int watchdog_nmi_probe(void);
83 115
84/** 116/**
85 * touch_nmi_watchdog - restart NMI watchdog timeout. 117 * touch_nmi_watchdog - restart NMI watchdog timeout.
86 * 118 *
87 * If the architecture supports the NMI watchdog, touch_nmi_watchdog() 119 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
88 * may be used to reset the timeout - for code which intentionally 120 * may be used to reset the timeout - for code which intentionally
89 * disables interrupts for a long time. This call is stateless. 121 * disables interrupts for a long time. This call is stateless.
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
153u64 hw_nmi_get_sample_period(int watchdog_thresh); 185u64 hw_nmi_get_sample_period(int watchdog_thresh);
154#endif 186#endif
155 187
156#ifdef CONFIG_LOCKUP_DETECTOR
157extern int nmi_watchdog_enabled;
158extern int watchdog_user_enabled;
159extern int watchdog_thresh;
160extern unsigned long watchdog_enabled;
161extern struct cpumask watchdog_cpumask;
162extern unsigned long *watchdog_cpumask_bits;
163extern int __read_mostly watchdog_suspended;
164#ifdef CONFIG_SMP
165extern int sysctl_softlockup_all_cpu_backtrace;
166extern int sysctl_hardlockup_all_cpu_backtrace;
167#else
168#define sysctl_softlockup_all_cpu_backtrace 0
169#define sysctl_hardlockup_all_cpu_backtrace 0
170#endif
171
172#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ 188#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
173 defined(CONFIG_HARDLOCKUP_DETECTOR) 189 defined(CONFIG_HARDLOCKUP_DETECTOR)
174void watchdog_update_hrtimer_threshold(u64 period); 190void watchdog_update_hrtimer_threshold(u64 period);
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
176static inline void watchdog_update_hrtimer_threshold(u64 period) { } 192static inline void watchdog_update_hrtimer_threshold(u64 period) { }
177#endif 193#endif
178 194
179extern bool is_hardlockup(void);
180struct ctl_table; 195struct ctl_table;
181extern int proc_watchdog(struct ctl_table *, int , 196extern int proc_watchdog(struct ctl_table *, int ,
182 void __user *, size_t *, loff_t *); 197 void __user *, size_t *, loff_t *);
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
188 void __user *, size_t *, loff_t *); 203 void __user *, size_t *, loff_t *);
189extern int proc_watchdog_cpumask(struct ctl_table *, int, 204extern int proc_watchdog_cpumask(struct ctl_table *, int,
190 void __user *, size_t *, loff_t *); 205 void __user *, size_t *, loff_t *);
191extern int lockup_detector_suspend(void);
192extern void lockup_detector_resume(void);
193#else
194static inline int lockup_detector_suspend(void)
195{
196 return 0;
197}
198
199static inline void lockup_detector_resume(void)
200{
201}
202#endif
203 206
204#ifdef CONFIG_HAVE_ACPI_APEI_NMI 207#ifdef CONFIG_HAVE_ACPI_APEI_NMI
205#include <asm/nmi.h> 208#include <asm/nmi.h>
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 9c5cb4480806..a726f96010d5 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -346,11 +346,6 @@ struct nvme_fc_remote_port {
346 * indicating an FC transport Aborted status. 346 * indicating an FC transport Aborted status.
347 * Entrypoint is Mandatory. 347 * Entrypoint is Mandatory.
348 * 348 *
349 * @defer_rcv: Called by the transport to signal the LLLD that it has
350 * begun processing of a previously received NVME CMD IU. The LLDD
351 * is now free to re-use the rcv buffer associated with the
352 * nvmefc_tgt_fcp_req.
353 *
354 * @max_hw_queues: indicates the maximum number of hw queues the LLDD 349 * @max_hw_queues: indicates the maximum number of hw queues the LLDD
355 * supports for cpu affinitization. 350 * supports for cpu affinitization.
356 * Value is Mandatory. Must be at least 1. 351 * Value is Mandatory. Must be at least 1.
@@ -806,11 +801,19 @@ struct nvmet_fc_target_port {
806 * outstanding operation (if there was one) to complete, then will 801 * outstanding operation (if there was one) to complete, then will
807 * call the fcp_req_release() callback to return the command's 802 * call the fcp_req_release() callback to return the command's
808 * exchange context back to the LLDD. 803 * exchange context back to the LLDD.
804 * Entrypoint is Mandatory.
809 * 805 *
810 * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req 806 * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
811 * to the LLDD after all operations on the fcp operation are complete. 807 * to the LLDD after all operations on the fcp operation are complete.
812 * This may be due to the command completing or upon completion of 808 * This may be due to the command completing or upon completion of
813 * abort cleanup. 809 * abort cleanup.
810 * Entrypoint is Mandatory.
811 *
812 * @defer_rcv: Called by the transport to signal the LLLD that it has
813 * begun processing of a previously received NVME CMD IU. The LLDD
814 * is now free to re-use the rcv buffer associated with the
815 * nvmefc_tgt_fcp_req.
816 * Entrypoint is Optional.
814 * 817 *
815 * @max_hw_queues: indicates the maximum number of hw queues the LLDD 818 * @max_hw_queues: indicates the maximum number of hw queues the LLDD
816 * supports for cpu affinitization. 819 * supports for cpu affinitization.
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 87723c86f136..9310ce77d8e1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -471,12 +471,14 @@ enum nvme_opcode {
471 * 471 *
472 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block 472 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
473 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block 473 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
474 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
474 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation 475 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
475 * request subtype 476 * request subtype
476 */ 477 */
477enum { 478enum {
478 NVME_SGL_FMT_ADDRESS = 0x00, 479 NVME_SGL_FMT_ADDRESS = 0x00,
479 NVME_SGL_FMT_OFFSET = 0x01, 480 NVME_SGL_FMT_OFFSET = 0x01,
481 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
480 NVME_SGL_FMT_INVALIDATE = 0x0f, 482 NVME_SGL_FMT_INVALIDATE = 0x0f,
481}; 483};
482 484
@@ -490,12 +492,16 @@ enum {
490 * 492 *
491 * For struct nvme_keyed_sgl_desc: 493 * For struct nvme_keyed_sgl_desc:
492 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor 494 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
495 *
496 * Transport-specific SGL types:
497 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
493 */ 498 */
494enum { 499enum {
495 NVME_SGL_FMT_DATA_DESC = 0x00, 500 NVME_SGL_FMT_DATA_DESC = 0x00,
496 NVME_SGL_FMT_SEG_DESC = 0x02, 501 NVME_SGL_FMT_SEG_DESC = 0x02,
497 NVME_SGL_FMT_LAST_SEG_DESC = 0x03, 502 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
498 NVME_KEY_SGL_FMT_DATA_DESC = 0x04, 503 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
504 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
499}; 505};
500 506
501struct nvme_sgl_desc { 507struct nvme_sgl_desc {
@@ -1127,19 +1133,6 @@ enum {
1127 NVME_SC_UNWRITTEN_BLOCK = 0x287, 1133 NVME_SC_UNWRITTEN_BLOCK = 0x287,
1128 1134
1129 NVME_SC_DNR = 0x4000, 1135 NVME_SC_DNR = 0x4000,
1130
1131
1132 /*
1133 * FC Transport-specific error status values for NVME commands
1134 *
1135 * Transport-specific status code values must be in the range 0xB0..0xBF
1136 */
1137
1138 /* Generic FC failure - catchall */
1139 NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
1140
1141 /* I/O failure due to FC ABTS'd */
1142 NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
1143}; 1136};
1144 1137
1145struct nvme_completion { 1138struct nvme_completion {
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index e0d1946270f3..fb908e598348 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -57,7 +57,14 @@ extern const struct of_device_id of_default_bus_match_table[];
57extern struct platform_device *of_device_alloc(struct device_node *np, 57extern struct platform_device *of_device_alloc(struct device_node *np,
58 const char *bus_id, 58 const char *bus_id,
59 struct device *parent); 59 struct device *parent);
60#ifdef CONFIG_OF
60extern struct platform_device *of_find_device_by_node(struct device_node *np); 61extern struct platform_device *of_find_device_by_node(struct device_node *np);
62#else
63static inline struct platform_device *of_find_device_by_node(struct device_node *np)
64{
65 return NULL;
66}
67#endif
61 68
62/* Platform devices and busses creation */ 69/* Platform devices and busses creation */
63extern struct platform_device *of_platform_device_create(struct device_node *np, 70extern struct platform_device *of_platform_device_create(struct device_node *np,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f68c58a93dd0..f4f8ee5a7362 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1685,6 +1685,8 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1685 1685
1686#define dev_is_pci(d) (false) 1686#define dev_is_pci(d) (false)
1687#define dev_is_pf(d) (false) 1687#define dev_is_pf(d) (false)
1688static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
1689{ return false; }
1688#endif /* CONFIG_PCI */ 1690#endif /* CONFIG_PCI */
1689 1691
1690/* Include architecture-dependent settings and functions */ 1692/* Include architecture-dependent settings and functions */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 92fb8dd5a9e4..26a7df4e558c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -65,25 +65,23 @@ struct task_group;
65 */ 65 */
66 66
67/* Used in tsk->state: */ 67/* Used in tsk->state: */
68#define TASK_RUNNING 0 68#define TASK_RUNNING 0x0000
69#define TASK_INTERRUPTIBLE 1 69#define TASK_INTERRUPTIBLE 0x0001
70#define TASK_UNINTERRUPTIBLE 2 70#define TASK_UNINTERRUPTIBLE 0x0002
71#define __TASK_STOPPED 4 71#define __TASK_STOPPED 0x0004
72#define __TASK_TRACED 8 72#define __TASK_TRACED 0x0008
73/* Used in tsk->exit_state: */ 73/* Used in tsk->exit_state: */
74#define EXIT_DEAD 16 74#define EXIT_DEAD 0x0010
75#define EXIT_ZOMBIE 32 75#define EXIT_ZOMBIE 0x0020
76#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 76#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
77/* Used in tsk->state again: */ 77/* Used in tsk->state again: */
78#define TASK_DEAD 64 78#define TASK_PARKED 0x0040
79#define TASK_WAKEKILL 128 79#define TASK_DEAD 0x0080
80#define TASK_WAKING 256 80#define TASK_WAKEKILL 0x0100
81#define TASK_PARKED 512 81#define TASK_WAKING 0x0200
82#define TASK_NOLOAD 1024 82#define TASK_NOLOAD 0x0400
83#define TASK_NEW 2048 83#define TASK_NEW 0x0800
84#define TASK_STATE_MAX 4096 84#define TASK_STATE_MAX 0x1000
85
86#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
87 85
88/* Convenience macros for the sake of set_current_state: */ 86/* Convenience macros for the sake of set_current_state: */
89#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 87#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -99,7 +97,8 @@ struct task_group;
99/* get_task_state(): */ 97/* get_task_state(): */
100#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 98#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 99 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 100 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
101 TASK_PARKED)
103 102
104#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 103#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
105 104
@@ -1243,17 +1242,34 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1243 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1242 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1244} 1243}
1245 1244
1246static inline char task_state_to_char(struct task_struct *task) 1245#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1246#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1247
1248static inline unsigned int __get_task_state(struct task_struct *tsk)
1249{
1250 unsigned int tsk_state = READ_ONCE(tsk->state);
1251 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1252
1253 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1254
1255 if (tsk_state == TASK_IDLE)
1256 state = TASK_REPORT_IDLE;
1257
1258 return fls(state);
1259}
1260
1261static inline char __task_state_to_char(unsigned int state)
1247{ 1262{
1248 const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 1263 static const char state_char[] = "RSDTtXZPI";
1249 unsigned long state = task->state;
1250 1264
1251 state = state ? __ffs(state) + 1 : 0; 1265 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1252 1266
1253 /* Make sure the string lines up properly with the number of task states: */ 1267 return state_char[state];
1254 BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); 1268}
1255 1269
1256 return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'; 1270static inline char task_state_to_char(struct task_struct *tsk)
1271{
1272 return __task_state_to_char(__get_task_state(tsk));
1257} 1273}
1258 1274
1259/** 1275/**
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3a19c253bdb1..ae53e413fb13 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
84 84
85/* mmput gets rid of the mappings and all user-space */ 85/* mmput gets rid of the mappings and all user-space */
86extern void mmput(struct mm_struct *); 86extern void mmput(struct mm_struct *);
87#ifdef CONFIG_MMU
88/* same as above but performs the slow path from the async context. Can
89 * be called from the atomic context as well
90 */
91void mmput_async(struct mm_struct *);
92#endif
87 93
88/* Grab a reference to a task's mm, if it is not already going away */ 94/* Grab a reference to a task's mm, if it is not already going away */
89extern struct mm_struct *get_task_mm(struct task_struct *task); 95extern struct mm_struct *get_task_mm(struct task_struct *task);
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index ecc296c137cd..c8bef436b61d 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,7 +3,8 @@
3 3
4#include <uapi/linux/seccomp.h> 4#include <uapi/linux/seccomp.h>
5 5
6#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) 6#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
7 SECCOMP_FILTER_FLAG_LOG)
7 8
8#ifdef CONFIG_SECCOMP 9#ifdef CONFIG_SECCOMP
9 10
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 12910cf19869..c149aa7bedf3 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
55} 55}
56 56
57void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); 57void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
58int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, 58void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
59 const struct cpumask *); 59 const struct cpumask *);
60 60
61#endif 61#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 95606a2d556f..a78186d826d7 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -221,21 +221,25 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
221 } \ 221 } \
222 static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 222 static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
223 223
224#ifdef TIF_FSCHECK
225/* 224/*
226 * Called before coming back to user-mode. Returning to user-mode with an 225 * Called before coming back to user-mode. Returning to user-mode with an
227 * address limit different than USER_DS can allow to overwrite kernel memory. 226 * address limit different than USER_DS can allow to overwrite kernel memory.
228 */ 227 */
229static inline void addr_limit_user_check(void) 228static inline void addr_limit_user_check(void)
230{ 229{
231 230#ifdef TIF_FSCHECK
232 if (!test_thread_flag(TIF_FSCHECK)) 231 if (!test_thread_flag(TIF_FSCHECK))
233 return; 232 return;
233#endif
234 234
235 BUG_ON(!segment_eq(get_fs(), USER_DS)); 235 if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS),
236 "Invalid address limit on user-mode return"))
237 force_sig(SIGKILL, current);
238
239#ifdef TIF_FSCHECK
236 clear_thread_flag(TIF_FSCHECK); 240 clear_thread_flag(TIF_FSCHECK);
237}
238#endif 241#endif
242}
239 243
240asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, 244asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
241 qid_t id, void __user *addr); 245 qid_t id, void __user *addr);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e6789b8757d5..6383c528b148 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -168,6 +168,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
168#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ 168#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
169 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) 169 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
170 170
171#define TIMER_DATA_TYPE unsigned long
172#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
173
174static inline void timer_setup(struct timer_list *timer,
175 void (*callback)(struct timer_list *),
176 unsigned int flags)
177{
178 __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
179 (TIMER_DATA_TYPE)timer, flags);
180}
181
182#define from_timer(var, callback_timer, timer_fieldname) \
183 container_of(callback_timer, typeof(*var), timer_fieldname)
184
171/** 185/**
172 * timer_pending - is a timer pending? 186 * timer_pending - is a timer pending?
173 * @timer: the timer in question 187 * @timer: the timer in question
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 7f11050746ae..2e0f22298fe9 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -272,6 +272,7 @@ struct trace_event_call {
272 int perf_refcount; 272 int perf_refcount;
273 struct hlist_head __percpu *perf_events; 273 struct hlist_head __percpu *perf_events;
274 struct bpf_prog *prog; 274 struct bpf_prog *prog;
275 struct perf_event *bpf_prog_owner;
275 276
276 int (*perf_perm)(struct trace_event_call *, 277 int (*perf_perm)(struct trace_event_call *,
277 struct perf_event *); 278 struct perf_event *);