diff options
Diffstat (limited to 'include')
36 files changed, 215 insertions, 131 deletions
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 094a906a0e98..da5b057d775d 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h | |||
@@ -424,7 +424,8 @@ enum acpi_dmar_type { | |||
424 | ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, | 424 | ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, |
425 | ACPI_DMAR_TYPE_ATSR = 2, | 425 | ACPI_DMAR_TYPE_ATSR = 2, |
426 | ACPI_DMAR_HARDWARE_AFFINITY = 3, | 426 | ACPI_DMAR_HARDWARE_AFFINITY = 3, |
427 | ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */ | 427 | ACPI_DMAR_TYPE_ANDD = 4, |
428 | ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */ | ||
428 | }; | 429 | }; |
429 | 430 | ||
430 | /* DMAR Device Scope structure */ | 431 | /* DMAR Device Scope structure */ |
@@ -445,7 +446,8 @@ enum acpi_dmar_scope_type { | |||
445 | ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, | 446 | ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, |
446 | ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, | 447 | ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, |
447 | ACPI_DMAR_SCOPE_TYPE_HPET = 4, | 448 | ACPI_DMAR_SCOPE_TYPE_HPET = 4, |
448 | ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ | 449 | ACPI_DMAR_SCOPE_TYPE_ACPI = 5, |
450 | ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */ | ||
449 | }; | 451 | }; |
450 | 452 | ||
451 | struct acpi_dmar_pci_path { | 453 | struct acpi_dmar_pci_path { |
@@ -507,6 +509,15 @@ struct acpi_dmar_rhsa { | |||
507 | u32 proximity_domain; | 509 | u32 proximity_domain; |
508 | }; | 510 | }; |
509 | 511 | ||
512 | /* 4: ACPI Namespace Device Declaration Structure */ | ||
513 | |||
514 | struct acpi_dmar_andd { | ||
515 | struct acpi_dmar_header header; | ||
516 | u8 reserved[3]; | ||
517 | u8 device_number; | ||
518 | u8 object_name[]; | ||
519 | }; | ||
520 | |||
510 | /******************************************************************************* | 521 | /******************************************************************************* |
511 | * | 522 | * |
512 | * HPET - High Precision Event Timer table | 523 | * HPET - High Precision Event Timer table |
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h index a1116a3b54ef..8c1603b10665 100644 --- a/include/dt-bindings/clock/tegra124-car.h +++ b/include/dt-bindings/clock/tegra124-car.h | |||
@@ -36,10 +36,10 @@ | |||
36 | #define TEGRA124_CLK_PWM 17 | 36 | #define TEGRA124_CLK_PWM 17 |
37 | #define TEGRA124_CLK_I2S2 18 | 37 | #define TEGRA124_CLK_I2S2 18 |
38 | /* 20 (register bit affects vi and vi_sensor) */ | 38 | /* 20 (register bit affects vi and vi_sensor) */ |
39 | #define TEGRA124_CLK_GR_2D 21 | 39 | /* 21 */ |
40 | #define TEGRA124_CLK_USBD 22 | 40 | #define TEGRA124_CLK_USBD 22 |
41 | #define TEGRA124_CLK_ISP 23 | 41 | #define TEGRA124_CLK_ISP 23 |
42 | #define TEGRA124_CLK_GR_3D 24 | 42 | /* 26 */ |
43 | /* 25 */ | 43 | /* 25 */ |
44 | #define TEGRA124_CLK_DISP2 26 | 44 | #define TEGRA124_CLK_DISP2 26 |
45 | #define TEGRA124_CLK_DISP1 27 | 45 | #define TEGRA124_CLK_DISP1 27 |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index be85127bfed3..f27000f55a83 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add | |||
171 | return 0; | 171 | return 0; |
172 | } | 172 | } |
173 | 173 | ||
174 | static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | ||
175 | { | ||
176 | return -ENXIO; | ||
177 | } | ||
178 | |||
174 | static inline int kvm_vgic_init(struct kvm *kvm) | 179 | static inline int kvm_vgic_init(struct kvm *kvm) |
175 | { | 180 | { |
176 | return 0; | 181 | return 0; |
diff --git a/include/linux/audit.h b/include/linux/audit.h index aa865a9a4c4f..ec1464df4c60 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -43,6 +43,7 @@ struct mq_attr; | |||
43 | struct mqstat; | 43 | struct mqstat; |
44 | struct audit_watch; | 44 | struct audit_watch; |
45 | struct audit_tree; | 45 | struct audit_tree; |
46 | struct sk_buff; | ||
46 | 47 | ||
47 | struct audit_krule { | 48 | struct audit_krule { |
48 | int vers_ops; | 49 | int vers_ops; |
@@ -463,7 +464,7 @@ extern int audit_filter_user(int type); | |||
463 | extern int audit_filter_type(int type); | 464 | extern int audit_filter_type(int type); |
464 | extern int audit_rule_change(int type, __u32 portid, int seq, | 465 | extern int audit_rule_change(int type, __u32 portid, int seq, |
465 | void *data, size_t datasz); | 466 | void *data, size_t datasz); |
466 | extern int audit_list_rules_send(__u32 portid, int seq); | 467 | extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); |
467 | 468 | ||
468 | extern u32 audit_enabled; | 469 | extern u32 audit_enabled; |
469 | #else /* CONFIG_AUDIT */ | 470 | #else /* CONFIG_AUDIT */ |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index abc9ca778456..be5fd38bd5a0 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -196,6 +196,21 @@ static inline unsigned long __ffs64(u64 word) | |||
196 | 196 | ||
197 | #ifdef __KERNEL__ | 197 | #ifdef __KERNEL__ |
198 | 198 | ||
199 | #ifndef set_mask_bits | ||
200 | #define set_mask_bits(ptr, _mask, _bits) \ | ||
201 | ({ \ | ||
202 | const typeof(*ptr) mask = (_mask), bits = (_bits); \ | ||
203 | typeof(*ptr) old, new; \ | ||
204 | \ | ||
205 | do { \ | ||
206 | old = ACCESS_ONCE(*ptr); \ | ||
207 | new = (old & ~mask) | bits; \ | ||
208 | } while (cmpxchg(ptr, old, new) != old); \ | ||
209 | \ | ||
210 | new; \ | ||
211 | }) | ||
212 | #endif | ||
213 | |||
199 | #ifndef find_last_bit | 214 | #ifndef find_last_bit |
200 | /** | 215 | /** |
201 | * find_last_bit - find the last set bit in a memory region | 216 | * find_last_bit - find the last set bit in a memory region |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 18ba8a627f46..2ff2e8d982be 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc | |||
121 | 121 | ||
122 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); | 122 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
123 | 123 | ||
124 | void blk_mq_insert_request(struct request_queue *, struct request *, | 124 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
125 | bool, bool); | ||
126 | void blk_mq_run_queues(struct request_queue *q, bool async); | 125 | void blk_mq_run_queues(struct request_queue *q, bool async); |
127 | void blk_mq_free_request(struct request *rq); | 126 | void blk_mq_free_request(struct request *rq); |
128 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 127 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
@@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind | |||
134 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); | 133 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); |
135 | void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); | 134 | void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); |
136 | 135 | ||
137 | void blk_mq_end_io(struct request *rq, int error); | 136 | bool blk_mq_end_io_partial(struct request *rq, int error, |
137 | unsigned int nr_bytes); | ||
138 | static inline void blk_mq_end_io(struct request *rq, int error) | ||
139 | { | ||
140 | bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); | ||
141 | BUG_ON(!done); | ||
142 | } | ||
138 | 143 | ||
139 | void blk_mq_complete_request(struct request *rq); | 144 | void blk_mq_complete_request(struct request *rq); |
140 | 145 | ||
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 092b64168d7f..4a21a872dbbd 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h | |||
@@ -245,6 +245,10 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, | |||
245 | void omap2_init_clk_clkdm(struct clk_hw *clk); | 245 | void omap2_init_clk_clkdm(struct clk_hw *clk); |
246 | unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, | 246 | unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, |
247 | unsigned long parent_rate); | 247 | unsigned long parent_rate); |
248 | int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, | ||
249 | unsigned long parent_rate); | ||
250 | long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, | ||
251 | unsigned long *prate); | ||
248 | int omap2_clkops_enable_clkdm(struct clk_hw *hw); | 252 | int omap2_clkops_enable_clkdm(struct clk_hw *hw); |
249 | void omap2_clkops_disable_clkdm(struct clk_hw *hw); | 253 | void omap2_clkops_disable_clkdm(struct clk_hw *hw); |
250 | int omap2_clk_disable_autoidle_all(void); | 254 | int omap2_clk_disable_autoidle_all(void); |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index eccb0c0c6cf6..23c8db129560 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <linux/irqreturn.h> | 27 | #include <linux/irqreturn.h> |
28 | #include <linux/rwsem.h> | ||
29 | #include <linux/rcupdate.h> | ||
28 | 30 | ||
29 | struct acpi_dmar_header; | 31 | struct acpi_dmar_header; |
30 | 32 | ||
@@ -34,13 +36,19 @@ struct acpi_dmar_header; | |||
34 | 36 | ||
35 | struct intel_iommu; | 37 | struct intel_iommu; |
36 | 38 | ||
39 | struct dmar_dev_scope { | ||
40 | struct device __rcu *dev; | ||
41 | u8 bus; | ||
42 | u8 devfn; | ||
43 | }; | ||
44 | |||
37 | #ifdef CONFIG_DMAR_TABLE | 45 | #ifdef CONFIG_DMAR_TABLE |
38 | extern struct acpi_table_header *dmar_tbl; | 46 | extern struct acpi_table_header *dmar_tbl; |
39 | struct dmar_drhd_unit { | 47 | struct dmar_drhd_unit { |
40 | struct list_head list; /* list of drhd units */ | 48 | struct list_head list; /* list of drhd units */ |
41 | struct acpi_dmar_header *hdr; /* ACPI header */ | 49 | struct acpi_dmar_header *hdr; /* ACPI header */ |
42 | u64 reg_base_addr; /* register base address*/ | 50 | u64 reg_base_addr; /* register base address*/ |
43 | struct pci_dev **devices; /* target device array */ | 51 | struct dmar_dev_scope *devices;/* target device array */ |
44 | int devices_cnt; /* target device count */ | 52 | int devices_cnt; /* target device count */ |
45 | u16 segment; /* PCI domain */ | 53 | u16 segment; /* PCI domain */ |
46 | u8 ignored:1; /* ignore drhd */ | 54 | u8 ignored:1; /* ignore drhd */ |
@@ -48,33 +56,66 @@ struct dmar_drhd_unit { | |||
48 | struct intel_iommu *iommu; | 56 | struct intel_iommu *iommu; |
49 | }; | 57 | }; |
50 | 58 | ||
59 | struct dmar_pci_notify_info { | ||
60 | struct pci_dev *dev; | ||
61 | unsigned long event; | ||
62 | int bus; | ||
63 | u16 seg; | ||
64 | u16 level; | ||
65 | struct acpi_dmar_pci_path path[]; | ||
66 | } __attribute__((packed)); | ||
67 | |||
68 | extern struct rw_semaphore dmar_global_lock; | ||
51 | extern struct list_head dmar_drhd_units; | 69 | extern struct list_head dmar_drhd_units; |
52 | 70 | ||
53 | #define for_each_drhd_unit(drhd) \ | 71 | #define for_each_drhd_unit(drhd) \ |
54 | list_for_each_entry(drhd, &dmar_drhd_units, list) | 72 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) |
55 | 73 | ||
56 | #define for_each_active_drhd_unit(drhd) \ | 74 | #define for_each_active_drhd_unit(drhd) \ |
57 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | 75 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ |
58 | if (drhd->ignored) {} else | 76 | if (drhd->ignored) {} else |
59 | 77 | ||
60 | #define for_each_active_iommu(i, drhd) \ | 78 | #define for_each_active_iommu(i, drhd) \ |
61 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | 79 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ |
62 | if (i=drhd->iommu, drhd->ignored) {} else | 80 | if (i=drhd->iommu, drhd->ignored) {} else |
63 | 81 | ||
64 | #define for_each_iommu(i, drhd) \ | 82 | #define for_each_iommu(i, drhd) \ |
65 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | 83 | list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ |
66 | if (i=drhd->iommu, 0) {} else | 84 | if (i=drhd->iommu, 0) {} else |
67 | 85 | ||
86 | static inline bool dmar_rcu_check(void) | ||
87 | { | ||
88 | return rwsem_is_locked(&dmar_global_lock) || | ||
89 | system_state == SYSTEM_BOOTING; | ||
90 | } | ||
91 | |||
92 | #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) | ||
93 | |||
94 | #define for_each_dev_scope(a, c, p, d) \ | ||
95 | for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \ | ||
96 | NULL, (p) < (c)); (p)++) | ||
97 | |||
98 | #define for_each_active_dev_scope(a, c, p, d) \ | ||
99 | for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else | ||
100 | |||
68 | extern int dmar_table_init(void); | 101 | extern int dmar_table_init(void); |
69 | extern int dmar_dev_scope_init(void); | 102 | extern int dmar_dev_scope_init(void); |
70 | extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, | 103 | extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, |
71 | struct pci_dev ***devices, u16 segment); | 104 | struct dmar_dev_scope **devices, u16 segment); |
72 | extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt); | 105 | extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); |
73 | 106 | extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); | |
107 | extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, | ||
108 | void *start, void*end, u16 segment, | ||
109 | struct dmar_dev_scope *devices, | ||
110 | int devices_cnt); | ||
111 | extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, | ||
112 | u16 segment, struct dmar_dev_scope *devices, | ||
113 | int count); | ||
74 | /* Intel IOMMU detection */ | 114 | /* Intel IOMMU detection */ |
75 | extern int detect_intel_iommu(void); | 115 | extern int detect_intel_iommu(void); |
76 | extern int enable_drhd_fault_handling(void); | 116 | extern int enable_drhd_fault_handling(void); |
77 | #else | 117 | #else |
118 | struct dmar_pci_notify_info; | ||
78 | static inline int detect_intel_iommu(void) | 119 | static inline int detect_intel_iommu(void) |
79 | { | 120 | { |
80 | return -ENODEV; | 121 | return -ENODEV; |
@@ -138,30 +179,9 @@ extern int arch_setup_dmar_msi(unsigned int irq); | |||
138 | 179 | ||
139 | #ifdef CONFIG_INTEL_IOMMU | 180 | #ifdef CONFIG_INTEL_IOMMU |
140 | extern int iommu_detected, no_iommu; | 181 | extern int iommu_detected, no_iommu; |
141 | extern struct list_head dmar_rmrr_units; | ||
142 | struct dmar_rmrr_unit { | ||
143 | struct list_head list; /* list of rmrr units */ | ||
144 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
145 | u64 base_address; /* reserved base address*/ | ||
146 | u64 end_address; /* reserved end address */ | ||
147 | struct pci_dev **devices; /* target devices */ | ||
148 | int devices_cnt; /* target device count */ | ||
149 | }; | ||
150 | |||
151 | #define for_each_rmrr_units(rmrr) \ | ||
152 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) | ||
153 | |||
154 | struct dmar_atsr_unit { | ||
155 | struct list_head list; /* list of ATSR units */ | ||
156 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
157 | struct pci_dev **devices; /* target devices */ | ||
158 | int devices_cnt; /* target device count */ | ||
159 | u8 include_all:1; /* include all ports */ | ||
160 | }; | ||
161 | |||
162 | int dmar_parse_rmrr_atsr_dev(void); | ||
163 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | 182 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); |
164 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | 183 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); |
184 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); | ||
165 | extern int intel_iommu_init(void); | 185 | extern int intel_iommu_init(void); |
166 | #else /* !CONFIG_INTEL_IOMMU: */ | 186 | #else /* !CONFIG_INTEL_IOMMU: */ |
167 | static inline int intel_iommu_init(void) { return -ENODEV; } | 187 | static inline int intel_iommu_init(void) { return -ENODEV; } |
@@ -173,7 +193,7 @@ static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | |||
173 | { | 193 | { |
174 | return 0; | 194 | return 0; |
175 | } | 195 | } |
176 | static inline int dmar_parse_rmrr_atsr_dev(void) | 196 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) |
177 | { | 197 | { |
178 | return 0; | 198 | return 0; |
179 | } | 199 | } |
diff --git a/include/linux/file.h b/include/linux/file.h index cbacf4faf447..4d69123377a2 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
@@ -28,33 +28,36 @@ static inline void fput_light(struct file *file, int fput_needed) | |||
28 | 28 | ||
29 | struct fd { | 29 | struct fd { |
30 | struct file *file; | 30 | struct file *file; |
31 | int need_put; | 31 | unsigned int flags; |
32 | }; | 32 | }; |
33 | #define FDPUT_FPUT 1 | ||
34 | #define FDPUT_POS_UNLOCK 2 | ||
33 | 35 | ||
34 | static inline void fdput(struct fd fd) | 36 | static inline void fdput(struct fd fd) |
35 | { | 37 | { |
36 | if (fd.need_put) | 38 | if (fd.flags & FDPUT_FPUT) |
37 | fput(fd.file); | 39 | fput(fd.file); |
38 | } | 40 | } |
39 | 41 | ||
40 | extern struct file *fget(unsigned int fd); | 42 | extern struct file *fget(unsigned int fd); |
41 | extern struct file *fget_light(unsigned int fd, int *fput_needed); | 43 | extern struct file *fget_raw(unsigned int fd); |
44 | extern unsigned long __fdget(unsigned int fd); | ||
45 | extern unsigned long __fdget_raw(unsigned int fd); | ||
46 | extern unsigned long __fdget_pos(unsigned int fd); | ||
42 | 47 | ||
43 | static inline struct fd fdget(unsigned int fd) | 48 | static inline struct fd __to_fd(unsigned long v) |
44 | { | 49 | { |
45 | int b; | 50 | return (struct fd){(struct file *)(v & ~3),v & 3}; |
46 | struct file *f = fget_light(fd, &b); | ||
47 | return (struct fd){f,b}; | ||
48 | } | 51 | } |
49 | 52 | ||
50 | extern struct file *fget_raw(unsigned int fd); | 53 | static inline struct fd fdget(unsigned int fd) |
51 | extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); | 54 | { |
55 | return __to_fd(__fdget(fd)); | ||
56 | } | ||
52 | 57 | ||
53 | static inline struct fd fdget_raw(unsigned int fd) | 58 | static inline struct fd fdget_raw(unsigned int fd) |
54 | { | 59 | { |
55 | int b; | 60 | return __to_fd(__fdget_raw(fd)); |
56 | struct file *f = fget_raw_light(fd, &b); | ||
57 | return (struct fd){f,b}; | ||
58 | } | 61 | } |
59 | 62 | ||
60 | extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); | 63 | extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); |
diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 5d7782e42b8f..c3683bdf28fe 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h | |||
@@ -200,6 +200,7 @@ struct fw_device { | |||
200 | unsigned irmc:1; | 200 | unsigned irmc:1; |
201 | unsigned bc_implemented:2; | 201 | unsigned bc_implemented:2; |
202 | 202 | ||
203 | work_func_t workfn; | ||
203 | struct delayed_work work; | 204 | struct delayed_work work; |
204 | struct fw_attribute_group attribute_group; | 205 | struct fw_attribute_group attribute_group; |
205 | }; | 206 | }; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 60829565e552..23b2a35d712e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -123,6 +123,9 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
123 | /* File is opened with O_PATH; almost nothing can be done with it */ | 123 | /* File is opened with O_PATH; almost nothing can be done with it */ |
124 | #define FMODE_PATH ((__force fmode_t)0x4000) | 124 | #define FMODE_PATH ((__force fmode_t)0x4000) |
125 | 125 | ||
126 | /* File needs atomic accesses to f_pos */ | ||
127 | #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) | ||
128 | |||
126 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 129 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
127 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) | 130 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) |
128 | 131 | ||
@@ -780,13 +783,14 @@ struct file { | |||
780 | const struct file_operations *f_op; | 783 | const struct file_operations *f_op; |
781 | 784 | ||
782 | /* | 785 | /* |
783 | * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR. | 786 | * Protects f_ep_links, f_flags. |
784 | * Must not be taken from IRQ context. | 787 | * Must not be taken from IRQ context. |
785 | */ | 788 | */ |
786 | spinlock_t f_lock; | 789 | spinlock_t f_lock; |
787 | atomic_long_t f_count; | 790 | atomic_long_t f_count; |
788 | unsigned int f_flags; | 791 | unsigned int f_flags; |
789 | fmode_t f_mode; | 792 | fmode_t f_mode; |
793 | struct mutex f_pos_lock; | ||
790 | loff_t f_pos; | 794 | loff_t f_pos; |
791 | struct fown_struct f_owner; | 795 | struct fown_struct f_owner; |
792 | const struct cred *f_cred; | 796 | const struct cred *f_cred; |
@@ -808,7 +812,7 @@ struct file { | |||
808 | #ifdef CONFIG_DEBUG_WRITECOUNT | 812 | #ifdef CONFIG_DEBUG_WRITECOUNT |
809 | unsigned long f_mnt_write_state; | 813 | unsigned long f_mnt_write_state; |
810 | #endif | 814 | #endif |
811 | }; | 815 | } __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ |
812 | 816 | ||
813 | struct file_handle { | 817 | struct file_handle { |
814 | __u32 handle_bytes; | 818 | __u32 handle_bytes; |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4e4cc28623ad..4cdb3a17bcb5 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -495,10 +495,6 @@ enum { | |||
495 | FILTER_TRACE_FN, | 495 | FILTER_TRACE_FN, |
496 | }; | 496 | }; |
497 | 497 | ||
498 | #define EVENT_STORAGE_SIZE 128 | ||
499 | extern struct mutex event_storage_mutex; | ||
500 | extern char event_storage[EVENT_STORAGE_SIZE]; | ||
501 | |||
502 | extern int trace_event_raw_init(struct ftrace_event_call *call); | 498 | extern int trace_event_raw_init(struct ftrace_event_call *call); |
503 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, | 499 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, |
504 | const char *name, int offset, int size, | 500 | const char *name, int offset, int size, |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0437439bc047..39b81dc7d01a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -123,6 +123,10 @@ struct vm_area_struct; | |||
123 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ | 123 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ |
124 | __GFP_NO_KSWAPD) | 124 | __GFP_NO_KSWAPD) |
125 | 125 | ||
126 | /* | ||
127 | * GFP_THISNODE does not perform any reclaim, you most likely want to | ||
128 | * use __GFP_THISNODE to allocate from a given node without fallback! | ||
129 | */ | ||
126 | #ifdef CONFIG_NUMA | 130 | #ifdef CONFIG_NUMA |
127 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 131 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
128 | #else | 132 | #else |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index db512014e061..b826239bdce0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page) | |||
157 | return HPAGE_PMD_NR; | 157 | return HPAGE_PMD_NR; |
158 | return 1; | 158 | return 1; |
159 | } | 159 | } |
160 | /* | ||
161 | * compound_trans_head() should be used instead of compound_head(), | ||
162 | * whenever the "page" passed as parameter could be the tail of a | ||
163 | * transparent hugepage that could be undergoing a | ||
164 | * __split_huge_page_refcount(). The page structure layout often | ||
165 | * changes across releases and it makes extensive use of unions. So if | ||
166 | * the page structure layout will change in a way that | ||
167 | * page->first_page gets clobbered by __split_huge_page_refcount, the | ||
168 | * implementation making use of smp_rmb() will be required. | ||
169 | * | ||
170 | * Currently we define compound_trans_head as compound_head, because | ||
171 | * page->private is in the same union with page->first_page, and | ||
172 | * page->private isn't clobbered. However this also means we're | ||
173 | * currently leaving dirt into the page->private field of anonymous | ||
174 | * pages resulting from a THP split, instead of setting page->private | ||
175 | * to zero like for every other page that has PG_private not set. But | ||
176 | * anonymous pages don't use page->private so this is not a problem. | ||
177 | */ | ||
178 | #if 0 | ||
179 | /* This will be needed if page->private will be clobbered in split_huge_page */ | ||
180 | static inline struct page *compound_trans_head(struct page *page) | ||
181 | { | ||
182 | if (PageTail(page)) { | ||
183 | struct page *head; | ||
184 | head = page->first_page; | ||
185 | smp_rmb(); | ||
186 | /* | ||
187 | * head may be a dangling pointer. | ||
188 | * __split_huge_page_refcount clears PageTail before | ||
189 | * overwriting first_page, so if PageTail is still | ||
190 | * there it means the head pointer isn't dangling. | ||
191 | */ | ||
192 | if (PageTail(page)) | ||
193 | return head; | ||
194 | } | ||
195 | return page; | ||
196 | } | ||
197 | #else | ||
198 | #define compound_trans_head(page) compound_head(page) | ||
199 | #endif | ||
200 | 160 | ||
201 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | 161 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
202 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); | 162 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); |
@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page) | |||
226 | do { } while (0) | 186 | do { } while (0) |
227 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ | 187 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ |
228 | do { } while (0) | 188 | do { } while (0) |
229 | #define compound_trans_head(page) compound_head(page) | ||
230 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 189 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
231 | unsigned long *vm_flags, int advice) | 190 | unsigned long *vm_flags, int advice) |
232 | { | 191 | { |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2c4bed593b32..0a2da5188217 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -319,6 +319,7 @@ struct intel_iommu { | |||
319 | int agaw; /* agaw of this iommu */ | 319 | int agaw; /* agaw of this iommu */ |
320 | int msagaw; /* max sagaw of this iommu */ | 320 | int msagaw; /* max sagaw of this iommu */ |
321 | unsigned int irq; | 321 | unsigned int irq; |
322 | u16 segment; /* PCI segment# */ | ||
322 | unsigned char name[13]; /* Device Name */ | 323 | unsigned char name[13]; /* Device Name */ |
323 | 324 | ||
324 | #ifdef CONFIG_INTEL_IOMMU | 325 | #ifdef CONFIG_INTEL_IOMMU |
diff --git a/include/linux/iova.h b/include/linux/iova.h index 76a0759e88ec..3277f4711349 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
@@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); | |||
47 | void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); | 47 | void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); |
48 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); | 48 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); |
49 | void put_iova_domain(struct iova_domain *iovad); | 49 | void put_iova_domain(struct iova_domain *iovad); |
50 | struct iova *split_and_remove_iova(struct iova_domain *iovad, | ||
51 | struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); | ||
50 | 52 | ||
51 | #endif | 53 | #endif |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f28f46eade6a..c1b7414c7bef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp); | |||
175 | * Special vmas that are non-mergable, non-mlock()able. | 175 | * Special vmas that are non-mergable, non-mlock()able. |
176 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. | 176 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. |
177 | */ | 177 | */ |
178 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) | 178 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * mapping from the currently active vm_flags protection bits (the | 181 | * mapping from the currently active vm_flags protection bits (the |
@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page, | |||
399 | 399 | ||
400 | static inline struct page *compound_head(struct page *page) | 400 | static inline struct page *compound_head(struct page *page) |
401 | { | 401 | { |
402 | if (unlikely(PageTail(page))) | 402 | if (unlikely(PageTail(page))) { |
403 | return page->first_page; | 403 | struct page *head = page->first_page; |
404 | |||
405 | /* | ||
406 | * page->first_page may be a dangling pointer to an old | ||
407 | * compound page, so recheck that it is still a tail | ||
408 | * page before returning. | ||
409 | */ | ||
410 | smp_rmb(); | ||
411 | if (likely(PageTail(page))) | ||
412 | return head; | ||
413 | } | ||
404 | return page; | 414 | return page; |
405 | } | 415 | } |
406 | 416 | ||
@@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) | |||
757 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS | 767 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
758 | static inline int page_cpupid_xchg_last(struct page *page, int cpupid) | 768 | static inline int page_cpupid_xchg_last(struct page *page, int cpupid) |
759 | { | 769 | { |
760 | return xchg(&page->_last_cpupid, cpupid); | 770 | return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); |
761 | } | 771 | } |
762 | 772 | ||
763 | static inline int page_cpupid_last(struct page *page) | 773 | static inline int page_cpupid_last(struct page *page) |
@@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page) | |||
766 | } | 776 | } |
767 | static inline void page_cpupid_reset_last(struct page *page) | 777 | static inline void page_cpupid_reset_last(struct page *page) |
768 | { | 778 | { |
769 | page->_last_cpupid = -1; | 779 | page->_last_cpupid = -1 & LAST_CPUPID_MASK; |
770 | } | 780 | } |
771 | #else | 781 | #else |
772 | static inline int page_cpupid_last(struct page *page) | 782 | static inline int page_cpupid_last(struct page *page) |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5f2052c83154..9b61b9bf81ac 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone) | |||
590 | 590 | ||
591 | /* | 591 | /* |
592 | * The NUMA zonelists are doubled because we need zonelists that restrict the | 592 | * The NUMA zonelists are doubled because we need zonelists that restrict the |
593 | * allocations to a single node for GFP_THISNODE. | 593 | * allocations to a single node for __GFP_THISNODE. |
594 | * | 594 | * |
595 | * [0] : Zonelist with fallback | 595 | * [0] : Zonelist with fallback |
596 | * [1] : No fallback (GFP_THISNODE) | 596 | * [1] : No fallback (__GFP_THISNODE) |
597 | */ | 597 | */ |
598 | #define MAX_ZONELISTS 2 | 598 | #define MAX_ZONELISTS 2 |
599 | 599 | ||
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 1005ebf17575..5a09a48f2658 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -163,4 +163,11 @@ enum { | |||
163 | /* changeable features with no special hardware requirements */ | 163 | /* changeable features with no special hardware requirements */ |
164 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | 164 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) |
165 | 165 | ||
166 | #define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ | ||
167 | NETIF_F_HW_VLAN_CTAG_RX | \ | ||
168 | NETIF_F_HW_VLAN_CTAG_TX | \ | ||
169 | NETIF_F_HW_VLAN_STAG_FILTER | \ | ||
170 | NETIF_F_HW_VLAN_STAG_RX | \ | ||
171 | NETIF_F_HW_VLAN_STAG_TX) | ||
172 | |||
166 | #endif /* _LINUX_NETDEV_FEATURES_H */ | 173 | #endif /* _LINUX_NETDEV_FEATURES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e8eeebd49a98..daafd9561cbc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3014,7 +3014,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | |||
3014 | { | 3014 | { |
3015 | return __skb_gso_segment(skb, features, true); | 3015 | return __skb_gso_segment(skb, features, true); |
3016 | } | 3016 | } |
3017 | __be16 skb_network_protocol(struct sk_buff *skb); | 3017 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth); |
3018 | 3018 | ||
3019 | static inline bool can_checksum_protocol(netdev_features_t features, | 3019 | static inline bool can_checksum_protocol(netdev_features_t features, |
3020 | __be16 protocol) | 3020 | __be16 protocol) |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index b2fb167b2e6d..5624e4e2763c 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -467,9 +467,14 @@ struct nfs_lockt_res { | |||
467 | }; | 467 | }; |
468 | 468 | ||
469 | struct nfs_release_lockowner_args { | 469 | struct nfs_release_lockowner_args { |
470 | struct nfs4_sequence_args seq_args; | ||
470 | struct nfs_lowner lock_owner; | 471 | struct nfs_lowner lock_owner; |
471 | }; | 472 | }; |
472 | 473 | ||
474 | struct nfs_release_lockowner_res { | ||
475 | struct nfs4_sequence_res seq_res; | ||
476 | }; | ||
477 | |||
473 | struct nfs4_delegreturnargs { | 478 | struct nfs4_delegreturnargs { |
474 | struct nfs4_sequence_args seq_args; | 479 | struct nfs4_sequence_args seq_args; |
475 | const struct nfs_fh *fhandle; | 480 | const struct nfs_fh *fhandle; |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1da693d51255..b66c2110cb1f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -250,8 +250,7 @@ struct rmap_walk_control { | |||
250 | int (*rmap_one)(struct page *page, struct vm_area_struct *vma, | 250 | int (*rmap_one)(struct page *page, struct vm_area_struct *vma, |
251 | unsigned long addr, void *arg); | 251 | unsigned long addr, void *arg); |
252 | int (*done)(struct page *page); | 252 | int (*done)(struct page *page); |
253 | int (*file_nonlinear)(struct page *, struct address_space *, | 253 | int (*file_nonlinear)(struct page *, struct address_space *, void *arg); |
254 | struct vm_area_struct *vma); | ||
255 | struct anon_vma *(*anon_lock)(struct page *page); | 254 | struct anon_vma *(*anon_lock)(struct page *page); |
256 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); | 255 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); |
257 | }; | 256 | }; |
diff --git a/include/linux/security.h b/include/linux/security.h index 5623a7f965b7..2fc42d191f79 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1040 | * Allocate a security structure to the xp->security field; the security | 1040 | * Allocate a security structure to the xp->security field; the security |
1041 | * field is initialized to NULL when the xfrm_policy is allocated. | 1041 | * field is initialized to NULL when the xfrm_policy is allocated. |
1042 | * Return 0 if operation was successful (memory to allocate, legal context) | 1042 | * Return 0 if operation was successful (memory to allocate, legal context) |
1043 | * @gfp is to specify the context for the allocation | ||
1043 | * @xfrm_policy_clone_security: | 1044 | * @xfrm_policy_clone_security: |
1044 | * @old_ctx contains an existing xfrm_sec_ctx. | 1045 | * @old_ctx contains an existing xfrm_sec_ctx. |
1045 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. | 1046 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. |
@@ -1683,7 +1684,7 @@ struct security_operations { | |||
1683 | 1684 | ||
1684 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1685 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
1685 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, | 1686 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, |
1686 | struct xfrm_user_sec_ctx *sec_ctx); | 1687 | struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); |
1687 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); | 1688 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); |
1688 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); | 1689 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); |
1689 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); | 1690 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); |
@@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | |||
2859 | 2860 | ||
2860 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2861 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
2861 | 2862 | ||
2862 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx); | 2863 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
2864 | struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); | ||
2863 | int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); | 2865 | int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); |
2864 | void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); | 2866 | void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); |
2865 | int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); | 2867 | int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); |
@@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); | |||
2877 | 2879 | ||
2878 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ | 2880 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ |
2879 | 2881 | ||
2880 | static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) | 2882 | static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
2883 | struct xfrm_user_sec_ctx *sec_ctx, | ||
2884 | gfp_t gfp) | ||
2881 | { | 2885 | { |
2882 | return 0; | 2886 | return 0; |
2883 | } | 2887 | } |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ebbbe7b6d05..15ede6a823a6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2451,8 +2451,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
2451 | unsigned int flags); | 2451 | unsigned int flags); |
2452 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); | 2452 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); |
2453 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); | 2453 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); |
2454 | void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, | 2454 | int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, |
2455 | int len, int hlen); | 2455 | int len, int hlen); |
2456 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); | 2456 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); |
2457 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); | 2457 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); |
2458 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); | 2458 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); |
@@ -2725,7 +2725,7 @@ static inline void nf_reset(struct sk_buff *skb) | |||
2725 | 2725 | ||
2726 | static inline void nf_reset_trace(struct sk_buff *skb) | 2726 | static inline void nf_reset_trace(struct sk_buff *skb) |
2727 | { | 2727 | { |
2728 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | 2728 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) |
2729 | skb->nf_trace = 0; | 2729 | skb->nf_trace = 0; |
2730 | #endif | 2730 | #endif |
2731 | } | 2731 | } |
@@ -2742,6 +2742,9 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) | |||
2742 | dst->nf_bridge = src->nf_bridge; | 2742 | dst->nf_bridge = src->nf_bridge; |
2743 | nf_bridge_get(src->nf_bridge); | 2743 | nf_bridge_get(src->nf_bridge); |
2744 | #endif | 2744 | #endif |
2745 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) | ||
2746 | dst->nf_trace = src->nf_trace; | ||
2747 | #endif | ||
2745 | } | 2748 | } |
2746 | 2749 | ||
2747 | static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) | 2750 | static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 9260abdd67df..b5b2df60299e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | |||
410 | * | 410 | * |
411 | * %GFP_NOWAIT - Allocation will not sleep. | 411 | * %GFP_NOWAIT - Allocation will not sleep. |
412 | * | 412 | * |
413 | * %GFP_THISNODE - Allocate node-local memory only. | 413 | * %__GFP_THISNODE - Allocate node-local memory only. |
414 | * | 414 | * |
415 | * %GFP_DMA - Allocation suitable for DMA. | 415 | * %GFP_DMA - Allocation suitable for DMA. |
416 | * Should only be used for kmalloc() caches. Otherwise, use a | 416 | * Should only be used for kmalloc() caches. Otherwise, use a |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index accc497f8d72..7159a0a933df 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -60,6 +60,12 @@ struct tp_module { | |||
60 | unsigned int num_tracepoints; | 60 | unsigned int num_tracepoints; |
61 | struct tracepoint * const *tracepoints_ptrs; | 61 | struct tracepoint * const *tracepoints_ptrs; |
62 | }; | 62 | }; |
63 | bool trace_module_has_bad_taint(struct module *mod); | ||
64 | #else | ||
65 | static inline bool trace_module_has_bad_taint(struct module *mod) | ||
66 | { | ||
67 | return false; | ||
68 | } | ||
63 | #endif /* CONFIG_MODULES */ | 69 | #endif /* CONFIG_MODULES */ |
64 | 70 | ||
65 | struct tracepoint_iter { | 71 | struct tracepoint_iter { |
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index c3fa80745996..2c14d9cdd57a 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
@@ -88,6 +88,7 @@ | |||
88 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) | 88 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) |
89 | 89 | ||
90 | struct cdc_ncm_ctx { | 90 | struct cdc_ncm_ctx { |
91 | struct usb_cdc_ncm_ntb_parameters ncm_parm; | ||
91 | struct hrtimer tx_timer; | 92 | struct hrtimer tx_timer; |
92 | struct tasklet_struct bh; | 93 | struct tasklet_struct bh; |
93 | 94 | ||
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index e303eef94dd5..0662e98fef72 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -30,7 +30,7 @@ struct usbnet { | |||
30 | struct driver_info *driver_info; | 30 | struct driver_info *driver_info; |
31 | const char *driver_name; | 31 | const char *driver_name; |
32 | void *driver_priv; | 32 | void *driver_priv; |
33 | wait_queue_head_t *wait; | 33 | wait_queue_head_t wait; |
34 | struct mutex phy_mutex; | 34 | struct mutex phy_mutex; |
35 | unsigned char suspend_count; | 35 | unsigned char suspend_count; |
36 | unsigned char pkt_cnt, pkt_err; | 36 | unsigned char pkt_cnt, pkt_err; |
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 9650a3ffd2d2..b4956a5fcc3f 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h | |||
@@ -31,8 +31,10 @@ | |||
31 | #define IF_PREFIX_AUTOCONF 0x02 | 31 | #define IF_PREFIX_AUTOCONF 0x02 |
32 | 32 | ||
33 | enum { | 33 | enum { |
34 | INET6_IFADDR_STATE_PREDAD, | ||
34 | INET6_IFADDR_STATE_DAD, | 35 | INET6_IFADDR_STATE_DAD, |
35 | INET6_IFADDR_STATE_POSTDAD, | 36 | INET6_IFADDR_STATE_POSTDAD, |
37 | INET6_IFADDR_STATE_ERRDAD, | ||
36 | INET6_IFADDR_STATE_UP, | 38 | INET6_IFADDR_STATE_UP, |
37 | INET6_IFADDR_STATE_DEAD, | 39 | INET6_IFADDR_STATE_DEAD, |
38 | }; | 40 | }; |
@@ -58,7 +60,7 @@ struct inet6_ifaddr { | |||
58 | unsigned long cstamp; /* created timestamp */ | 60 | unsigned long cstamp; /* created timestamp */ |
59 | unsigned long tstamp; /* updated timestamp */ | 61 | unsigned long tstamp; /* updated timestamp */ |
60 | 62 | ||
61 | struct timer_list dad_timer; | 63 | struct delayed_work dad_work; |
62 | 64 | ||
63 | struct inet6_dev *idev; | 65 | struct inet6_dev *idev; |
64 | struct rt6_info *rt; | 66 | struct rt6_info *rt; |
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 48ed75c21260..e77c10405d51 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
@@ -129,6 +129,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], | |||
129 | int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], | 129 | int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], |
130 | struct ip_tunnel_parm *p); | 130 | struct ip_tunnel_parm *p); |
131 | void ip_tunnel_setup(struct net_device *dev, int net_id); | 131 | void ip_tunnel_setup(struct net_device *dev, int net_id); |
132 | void ip_tunnel_dst_reset_all(struct ip_tunnel *t); | ||
132 | 133 | ||
133 | /* Extract dsfield from inner protocol */ | 134 | /* Extract dsfield from inner protocol */ |
134 | static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, | 135 | static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, |
diff --git a/include/net/sock.h b/include/net/sock.h index 5c3f7c3624aa..b9586a137cad 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1488,6 +1488,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | |||
1488 | */ | 1488 | */ |
1489 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) | 1489 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) |
1490 | 1490 | ||
1491 | static inline void sock_release_ownership(struct sock *sk) | ||
1492 | { | ||
1493 | sk->sk_lock.owned = 0; | ||
1494 | } | ||
1495 | |||
1491 | /* | 1496 | /* |
1492 | * Macro so as to not evaluate some arguments when | 1497 | * Macro so as to not evaluate some arguments when |
1493 | * lockdep is not enabled. | 1498 | * lockdep is not enabled. |
@@ -2186,7 +2191,6 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
2186 | { | 2191 | { |
2187 | #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ | 2192 | #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ |
2188 | (1UL << SOCK_RCVTSTAMP) | \ | 2193 | (1UL << SOCK_RCVTSTAMP) | \ |
2189 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ | ||
2190 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ | 2194 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ |
2191 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ | 2195 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ |
2192 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) | 2196 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 56fc366da6d5..743accec6c76 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
480 | #ifdef CONFIG_SYN_COOKIES | 480 | #ifdef CONFIG_SYN_COOKIES |
481 | #include <linux/ktime.h> | 481 | #include <linux/ktime.h> |
482 | 482 | ||
483 | /* Syncookies use a monotonic timer which increments every 64 seconds. | 483 | /* Syncookies use a monotonic timer which increments every 60 seconds. |
484 | * This counter is used both as a hash input and partially encoded into | 484 | * This counter is used both as a hash input and partially encoded into |
485 | * the cookie value. A cookie is only validated further if the delta | 485 | * the cookie value. A cookie is only validated further if the delta |
486 | * between the current counter value and the encoded one is less than this, | 486 | * between the current counter value and the encoded one is less than this, |
487 | * i.e. a sent cookie is valid only at most for 128 seconds (or less if | 487 | * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if |
488 | * the counter advances immediately after a cookie is generated). | 488 | * the counter advances immediately after a cookie is generated). |
489 | */ | 489 | */ |
490 | #define MAX_SYNCOOKIE_AGE 2 | 490 | #define MAX_SYNCOOKIE_AGE 2 |
491 | 491 | ||
492 | static inline u32 tcp_cookie_time(void) | 492 | static inline u32 tcp_cookie_time(void) |
493 | { | 493 | { |
494 | struct timespec now; | 494 | u64 val = get_jiffies_64(); |
495 | getnstimeofday(&now); | 495 | |
496 | return now.tv_sec >> 6; /* 64 seconds granularity */ | 496 | do_div(val, 60 * HZ); |
497 | return val; | ||
497 | } | 498 | } |
498 | 499 | ||
499 | u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, | 500 | u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, |
@@ -1303,7 +1304,8 @@ struct tcp_fastopen_request { | |||
1303 | /* Fast Open cookie. Size 0 means a cookie request */ | 1304 | /* Fast Open cookie. Size 0 means a cookie request */ |
1304 | struct tcp_fastopen_cookie cookie; | 1305 | struct tcp_fastopen_cookie cookie; |
1305 | struct msghdr *data; /* data in MSG_FASTOPEN */ | 1306 | struct msghdr *data; /* data in MSG_FASTOPEN */ |
1306 | u16 copied; /* queued in tcp_connect() */ | 1307 | size_t size; |
1308 | int copied; /* queued in tcp_connect() */ | ||
1307 | }; | 1309 | }; |
1308 | void tcp_free_fastopen_req(struct tcp_sock *tp); | 1310 | void tcp_free_fastopen_req(struct tcp_sock *tp); |
1309 | 1311 | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index afa5730fb3bd..fb5654a8ca3c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -1648,6 +1648,11 @@ static inline int xfrm_aevent_is_on(struct net *net) | |||
1648 | } | 1648 | } |
1649 | #endif | 1649 | #endif |
1650 | 1650 | ||
1651 | static inline int aead_len(struct xfrm_algo_aead *alg) | ||
1652 | { | ||
1653 | return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); | ||
1654 | } | ||
1655 | |||
1651 | static inline int xfrm_alg_len(const struct xfrm_algo *alg) | 1656 | static inline int xfrm_alg_len(const struct xfrm_algo *alg) |
1652 | { | 1657 | { |
1653 | return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); | 1658 | return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); |
@@ -1686,6 +1691,12 @@ static inline int xfrm_replay_clone(struct xfrm_state *x, | |||
1686 | return 0; | 1691 | return 0; |
1687 | } | 1692 | } |
1688 | 1693 | ||
1694 | static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig) | ||
1695 | { | ||
1696 | return kmemdup(orig, aead_len(orig), GFP_KERNEL); | ||
1697 | } | ||
1698 | |||
1699 | |||
1689 | static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) | 1700 | static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) |
1690 | { | 1701 | { |
1691 | return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); | 1702 | return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); |
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index ae5a17111968..4483fadfa68d 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h | |||
@@ -12,6 +12,7 @@ struct iscsit_transport { | |||
12 | int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); | 12 | int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); |
13 | int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); | 13 | int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); |
14 | void (*iscsit_free_np)(struct iscsi_np *); | 14 | void (*iscsit_free_np)(struct iscsi_np *); |
15 | void (*iscsit_wait_conn)(struct iscsi_conn *); | ||
15 | void (*iscsit_free_conn)(struct iscsi_conn *); | 16 | void (*iscsit_free_conn)(struct iscsi_conn *); |
16 | int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); | 17 | int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); |
17 | int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); | 18 | int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); |
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ddc179b7a105..1fef3e6e9436 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h | |||
@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(rpc_task_running, | |||
83 | ), | 83 | ), |
84 | 84 | ||
85 | TP_fast_assign( | 85 | TP_fast_assign( |
86 | __entry->client_id = clnt->cl_clid; | 86 | __entry->client_id = clnt ? clnt->cl_clid : -1; |
87 | __entry->task_id = task->tk_pid; | 87 | __entry->task_id = task->tk_pid; |
88 | __entry->action = action; | 88 | __entry->action = action; |
89 | __entry->runstate = task->tk_runstate; | 89 | __entry->runstate = task->tk_runstate; |
@@ -91,7 +91,7 @@ DECLARE_EVENT_CLASS(rpc_task_running, | |||
91 | __entry->flags = task->tk_flags; | 91 | __entry->flags = task->tk_flags; |
92 | ), | 92 | ), |
93 | 93 | ||
94 | TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf", | 94 | TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf", |
95 | __entry->task_id, __entry->client_id, | 95 | __entry->task_id, __entry->client_id, |
96 | __entry->flags, | 96 | __entry->flags, |
97 | __entry->runstate, | 97 | __entry->runstate, |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1a8b28db3775..1ee19a24cc5f 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -310,15 +310,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |||
310 | #undef __array | 310 | #undef __array |
311 | #define __array(type, item, len) \ | 311 | #define __array(type, item, len) \ |
312 | do { \ | 312 | do { \ |
313 | mutex_lock(&event_storage_mutex); \ | 313 | char *type_str = #type"["__stringify(len)"]"; \ |
314 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 314 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
315 | snprintf(event_storage, sizeof(event_storage), \ | 315 | ret = trace_define_field(event_call, type_str, #item, \ |
316 | "%s[%d]", #type, len); \ | ||
317 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
318 | offsetof(typeof(field), item), \ | 316 | offsetof(typeof(field), item), \ |
319 | sizeof(field.item), \ | 317 | sizeof(field.item), \ |
320 | is_signed_type(type), FILTER_OTHER); \ | 318 | is_signed_type(type), FILTER_OTHER); \ |
321 | mutex_unlock(&event_storage_mutex); \ | ||
322 | if (ret) \ | 319 | if (ret) \ |
323 | return ret; \ | 320 | return ret; \ |
324 | } while (0); | 321 | } while (0); |