aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/processor.h8
-rw-r--r--include/asm-generic/tlb.h8
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/ceph/osd_client.h4
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/kdb.h62
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/sdhci.h1
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/perf_event.h12
-rw-r--r--include/linux/perf_regs.h16
-rw-r--r--include/linux/phy/omap_control_phy.h6
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_backend_configfs.h2
-rw-r--r--include/target/target_core_base.h3
-rw-r--r--include/uapi/asm-generic/fcntl.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h37
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/xen/interface/nmi.h51
26 files changed, 206 insertions, 68 deletions
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3ca9b751f122..b95dc32a6e6b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
196struct acpi_processor { 196struct acpi_processor {
197 acpi_handle handle; 197 acpi_handle handle;
198 u32 acpi_id; 198 u32 acpi_id;
199 u32 apic_id; 199 u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
200 u32 id; 200 u32 id; /* CPU logical ID allocated by OS */
201 u32 pblk; 201 u32 pblk;
202 int performance_platform_limit; 202 int performance_platform_limit;
203 int throttling_platform_limit; 203 int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
310#endif /* CONFIG_CPU_FREQ */ 310#endif /* CONFIG_CPU_FREQ */
311 311
312/* in processor_core.c */ 312/* in processor_core.c */
313int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); 313int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
314int acpi_map_cpuid(int apic_id, u32 acpi_id); 314int acpi_map_cpuid(int phys_id, u32 acpi_id);
315int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); 315int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
316 316
317/* in processor_pdc.c */ 317/* in processor_pdc.c */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 08848050922e..db284bff29dc 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
136 136
137static inline void __tlb_reset_range(struct mmu_gather *tlb) 137static inline void __tlb_reset_range(struct mmu_gather *tlb)
138{ 138{
139 tlb->start = TASK_SIZE; 139 if (tlb->fullmm) {
140 tlb->end = 0; 140 tlb->start = tlb->end = ~0;
141 } else {
142 tlb->start = TASK_SIZE;
143 tlb->end = 0;
144 }
141} 145}
142 146
143/* 147/*
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 856d381b1d5b..d459cd17b477 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
147 147
148#ifdef CONFIG_ACPI_HOTPLUG_CPU 148#ifdef CONFIG_ACPI_HOTPLUG_CPU
149/* Arch dependent functions for cpu hotplug support */ 149/* Arch dependent functions for cpu hotplug support */
150int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); 150int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
151int acpi_unmap_lsapic(int cpu); 151int acpi_unmap_cpu(int cpu);
152#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 152#endif /* CONFIG_ACPI_HOTPLUG_CPU */
153 153
154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); 154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aded9ab2e4e..5735e7130d63 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
34 unsigned long flags; /* BLK_MQ_F_* flags */ 34 unsigned long flags; /* BLK_MQ_F_* flags */
35 35
36 struct request_queue *queue; 36 struct request_queue *queue;
37 unsigned int queue_num;
38 struct blk_flush_queue *fq; 37 struct blk_flush_queue *fq;
39 38
40 void *driver_data; 39 void *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
54 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 53 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
55 54
56 unsigned int numa_node; 55 unsigned int numa_node;
57 unsigned int cmd_size; /* per-request extra data */ 56 unsigned int queue_num;
58 57
59 atomic_t nr_active; 58 atomic_t nr_active;
60 59
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
195struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 194struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
196struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 195struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
197 196
197int blk_mq_request_started(struct request *rq);
198void blk_mq_start_request(struct request *rq); 198void blk_mq_start_request(struct request *rq);
199void blk_mq_end_request(struct request *rq, int error); 199void blk_mq_end_request(struct request *rq, int error);
200void __blk_mq_end_request(struct request *rq, int error); 200void __blk_mq_end_request(struct request *rq, int error);
201 201
202void blk_mq_requeue_request(struct request *rq); 202void blk_mq_requeue_request(struct request *rq);
203void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 203void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
204void blk_mq_cancel_requeue_work(struct request_queue *q);
204void blk_mq_kick_requeue_list(struct request_queue *q); 205void blk_mq_kick_requeue_list(struct request_queue *q);
206void blk_mq_abort_requeue_list(struct request_queue *q);
205void blk_mq_complete_request(struct request *rq); 207void blk_mq_complete_request(struct request *rq);
206 208
207void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 209void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
212void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 214void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
213void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 215void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
214 void *priv); 216 void *priv);
217void blk_mq_unfreeze_queue(struct request_queue *q);
218void blk_mq_freeze_queue_start(struct request_queue *q);
215 219
216/* 220/*
217 * Driver command data is immediately after the request. So subtract request 221 * Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 445d59231bc4..c294e3e25e37 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -190,6 +190,7 @@ enum rq_flag_bits {
190 __REQ_PM, /* runtime pm request */ 190 __REQ_PM, /* runtime pm request */
191 __REQ_HASHED, /* on IO scheduler merge hash */ 191 __REQ_HASHED, /* on IO scheduler merge hash */
192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
193 __REQ_NO_TIMEOUT, /* requests may never expire */
193 __REQ_NR_BITS, /* stops here */ 194 __REQ_NR_BITS, /* stops here */
194}; 195};
195 196
@@ -243,5 +244,6 @@ enum rq_flag_bits {
243#define REQ_PM (1ULL << __REQ_PM) 244#define REQ_PM (1ULL << __REQ_PM)
244#define REQ_HASHED (1ULL << __REQ_HASHED) 245#define REQ_HASHED (1ULL << __REQ_HASHED)
245#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 246#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
247#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
246 248
247#endif /* __LINUX_BLK_TYPES_H */ 249#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5d86416d35f2..61b19c46bdb3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
87 struct ceph_osd_data osd_data; 87 struct ceph_osd_data osd_data;
88 } extent; 88 } extent;
89 struct { 89 struct {
90 __le32 name_len; 90 u32 name_len;
91 __le32 value_len; 91 u32 value_len;
92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ 92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ 93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
94 struct ceph_osd_data osd_data; 94 struct ceph_osd_data osd_data;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a1c81f80978e..33063f872ee3 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
215 } 215 }
216} 216}
217 217
218static __always_inline void __assign_once_size(volatile void *p, void *res, int size) 218static __always_inline void __write_once_size(volatile void *p, void *res, int size)
219{ 219{
220 switch (size) { 220 switch (size) {
221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
235/* 235/*
236 * Prevent the compiler from merging or refetching reads or writes. The 236 * Prevent the compiler from merging or refetching reads or writes. The
237 * compiler is also forbidden from reordering successive instances of 237 * compiler is also forbidden from reordering successive instances of
238 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the 238 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
239 * compiler is aware of some particular ordering. One way to make the 239 * compiler is aware of some particular ordering. One way to make the
240 * compiler aware of ordering is to put the two invocations of READ_ONCE, 240 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. 241 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
242 * 242 *
243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244 * data types like structs or unions. If the size of the accessed data 244 * data types like structs or unions. If the size of the accessed data
245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246 * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a 246 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
247 * compile-time warning. 247 * compile-time warning.
248 * 248 *
249 * Their two major use cases are: (1) Mediating communication between 249 * Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
257#define READ_ONCE(x) \ 257#define READ_ONCE(x) \
258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) 258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259 259
260#define ASSIGN_ONCE(val, x) \ 260#define WRITE_ONCE(x, val) \
261 ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) 261 ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
262 262
263#endif /* __KERNEL__ */ 263#endif /* __KERNEL__ */
264 264
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f90c0282c114..42efe13077b6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
135#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 135#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
136 136
137/* File was opened by fanotify and shouldn't generate fanotify events */ 137/* File was opened by fanotify and shouldn't generate fanotify events */
138#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 138#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
139 139
140/* 140/*
141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector 141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 290db1269c4c..75ae2e2631fc 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,11 +13,54 @@
13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> 13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
14 */ 14 */
15 15
16/* Shifted versions of the command enable bits are be used if the command
17 * has no arguments (see kdb_check_flags). This allows commands, such as
18 * go, to have different permissions depending upon whether it is called
19 * with an argument.
20 */
21#define KDB_ENABLE_NO_ARGS_SHIFT 10
22
16typedef enum { 23typedef enum {
17 KDB_REPEAT_NONE = 0, /* Do not repeat this command */ 24 KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
18 KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ 25 KDB_ENABLE_MEM_READ = (1 << 1),
19 KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ 26 KDB_ENABLE_MEM_WRITE = (1 << 2),
20} kdb_repeat_t; 27 KDB_ENABLE_REG_READ = (1 << 3),
28 KDB_ENABLE_REG_WRITE = (1 << 4),
29 KDB_ENABLE_INSPECT = (1 << 5),
30 KDB_ENABLE_FLOW_CTRL = (1 << 6),
31 KDB_ENABLE_SIGNAL = (1 << 7),
32 KDB_ENABLE_REBOOT = (1 << 8),
33 /* User exposed values stop here, all remaining flags are
34 * exclusively used to describe a commands behaviour.
35 */
36
37 KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
38 KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
39
40 KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
41 KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
42 << KDB_ENABLE_NO_ARGS_SHIFT,
43 KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
44 << KDB_ENABLE_NO_ARGS_SHIFT,
45 KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
46 << KDB_ENABLE_NO_ARGS_SHIFT,
47 KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
48 << KDB_ENABLE_NO_ARGS_SHIFT,
49 KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
50 << KDB_ENABLE_NO_ARGS_SHIFT,
51 KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
52 << KDB_ENABLE_NO_ARGS_SHIFT,
53 KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
54 << KDB_ENABLE_NO_ARGS_SHIFT,
55 KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
56 << KDB_ENABLE_NO_ARGS_SHIFT,
57 KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
58 << KDB_ENABLE_NO_ARGS_SHIFT,
59 KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
60
61 KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
62 KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
63} kdb_cmdflags_t;
21 64
22typedef int (*kdb_func_t)(int, const char **); 65typedef int (*kdb_func_t)(int, const char **);
23 66
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
62#define KDB_BADLENGTH (-19) 105#define KDB_BADLENGTH (-19)
63#define KDB_NOBP (-20) 106#define KDB_NOBP (-20)
64#define KDB_BADADDR (-21) 107#define KDB_BADADDR (-21)
108#define KDB_NOPERM (-22)
65 109
66/* 110/*
67 * kdb_diemsg 111 * kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
146 190
147/* Dynamic kdb shell command registration */ 191/* Dynamic kdb shell command registration */
148extern int kdb_register(char *, kdb_func_t, char *, char *, short); 192extern int kdb_register(char *, kdb_func_t, char *, char *, short);
149extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, 193extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
150 short, kdb_repeat_t); 194 short, kdb_cmdflags_t);
151extern int kdb_unregister(char *); 195extern int kdb_unregister(char *);
152#else /* ! CONFIG_KGDB_KDB */ 196#else /* ! CONFIG_KGDB_KDB */
153static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } 197static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
154static inline void kdb_init(int level) {} 198static inline void kdb_init(int level) {}
155static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, 199static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
156 char *help, short minlen) { return 0; } 200 char *help, short minlen) { return 0; }
157static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, 201static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
158 char *help, short minlen, 202 char *help, short minlen,
159 kdb_repeat_t repeat) { return 0; } 203 kdb_cmdflags_t flags) { return 0; }
160static inline int kdb_unregister(char *cmd) { return 0; } 204static inline int kdb_unregister(char *cmd) { return 0; }
161#endif /* CONFIG_KGDB_KDB */ 205#endif /* CONFIG_KGDB_KDB */
162enum { 206enum {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..80fc92a49649 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
1952#if VM_GROWSUP 1952#if VM_GROWSUP
1953extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1953extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1954#else 1954#else
1955 #define expand_upwards(vma, address) do { } while (0) 1955 #define expand_upwards(vma, address) (0)
1956#endif 1956#endif
1957 1957
1958/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1958/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 375af80bde7d..f767a0de611f 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -137,6 +137,7 @@ struct sdhci_host {
137#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ 137#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
138#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ 138#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
139#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ 139#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
140#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
140 141
141 unsigned int version; /* SDHCI spec. version */ 142 unsigned int version; /* SDHCI spec. version */
142 143
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 679e6e90aa4c..52fd8e8694cf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
852 * 3. Update dev->stats asynchronously and atomically, and define 852 * 3. Update dev->stats asynchronously and atomically, and define
853 * neither operation. 853 * neither operation.
854 * 854 *
855 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); 855 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
856 * If device support VLAN filtering this function is called when a 856 * If device support VLAN filtering this function is called when a
857 * VLAN id is registered. 857 * VLAN id is registered.
858 * 858 *
859 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 859 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
860 * If device support VLAN filtering this function is called when a 860 * If device support VLAN filtering this function is called when a
861 * VLAN id is unregistered. 861 * VLAN id is unregistered.
862 * 862 *
@@ -2085,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
2085 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2085 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2086#define for_each_netdev_in_bond_rcu(bond, slave) \ 2086#define for_each_netdev_in_bond_rcu(bond, slave) \
2087 for_each_netdev_rcu(&init_net, slave) \ 2087 for_each_netdev_rcu(&init_net, slave) \
2088 if (netdev_master_upper_dev_get_rcu(slave) == bond) 2088 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2089#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2089#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2090 2090
2091static inline struct net_device *next_net_device(struct net_device *dev) 2091static inline struct net_device *next_net_device(struct net_device *dev)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1e37fbb78f7a..ddea982355f3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -74,6 +74,9 @@ struct nfs_client {
74 /* idmapper */ 74 /* idmapper */
75 struct idmap * cl_idmap; 75 struct idmap * cl_idmap;
76 76
77 /* Client owner identifier */
78 const char * cl_owner_id;
79
77 /* Our own IP address, as a null-terminated string. 80 /* Our own IP address, as a null-terminated string.
78 * This is used to generate the mv0 callback address. 81 * This is used to generate the mv0 callback address.
79 */ 82 */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 486e84ccb1f9..4f7a61ca4b39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -79,11 +79,6 @@ struct perf_branch_stack {
79 struct perf_branch_entry entries[0]; 79 struct perf_branch_entry entries[0];
80}; 80};
81 81
82struct perf_regs {
83 __u64 abi;
84 struct pt_regs *regs;
85};
86
87struct task_struct; 82struct task_struct;
88 83
89/* 84/*
@@ -610,7 +605,14 @@ struct perf_sample_data {
610 u32 reserved; 605 u32 reserved;
611 } cpu_entry; 606 } cpu_entry;
612 struct perf_callchain_entry *callchain; 607 struct perf_callchain_entry *callchain;
608
609 /*
610 * regs_user may point to task_pt_regs or to regs_user_copy, depending
611 * on arch details.
612 */
613 struct perf_regs regs_user; 613 struct perf_regs regs_user;
614 struct pt_regs regs_user_copy;
615
614 struct perf_regs regs_intr; 616 struct perf_regs regs_intr;
615 u64 stack_user_size; 617 u64 stack_user_size;
616} ____cacheline_aligned; 618} ____cacheline_aligned;
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 3c73d5fe18be..a5f98d53d732 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,11 +1,19 @@
1#ifndef _LINUX_PERF_REGS_H 1#ifndef _LINUX_PERF_REGS_H
2#define _LINUX_PERF_REGS_H 2#define _LINUX_PERF_REGS_H
3 3
4struct perf_regs {
5 __u64 abi;
6 struct pt_regs *regs;
7};
8
4#ifdef CONFIG_HAVE_PERF_REGS 9#ifdef CONFIG_HAVE_PERF_REGS
5#include <asm/perf_regs.h> 10#include <asm/perf_regs.h>
6u64 perf_reg_value(struct pt_regs *regs, int idx); 11u64 perf_reg_value(struct pt_regs *regs, int idx);
7int perf_reg_validate(u64 mask); 12int perf_reg_validate(u64 mask);
8u64 perf_reg_abi(struct task_struct *task); 13u64 perf_reg_abi(struct task_struct *task);
14void perf_get_regs_user(struct perf_regs *regs_user,
15 struct pt_regs *regs,
16 struct pt_regs *regs_user_copy);
9#else 17#else
10static inline u64 perf_reg_value(struct pt_regs *regs, int idx) 18static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
11{ 19{
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
21{ 29{
22 return PERF_SAMPLE_REGS_ABI_NONE; 30 return PERF_SAMPLE_REGS_ABI_NONE;
23} 31}
32
33static inline void perf_get_regs_user(struct perf_regs *regs_user,
34 struct pt_regs *regs,
35 struct pt_regs *regs_user_copy)
36{
37 regs_user->regs = task_pt_regs(current);
38 regs_user->abi = perf_reg_abi(current);
39}
24#endif /* CONFIG_HAVE_PERF_REGS */ 40#endif /* CONFIG_HAVE_PERF_REGS */
25#endif /* _LINUX_PERF_REGS_H */ 41#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h
index e9e6cfbfbb58..eb7d4a135a9e 100644
--- a/include/linux/phy/omap_control_phy.h
+++ b/include/linux/phy/omap_control_phy.h
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
66#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 66#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0
67 67
68#define OMAP_CTRL_PCIE_PCS_MASK 0xff 68#define OMAP_CTRL_PCIE_PCS_MASK 0xff
69#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8 69#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16
70 70
71#define OMAP_CTRL_USB2_PHY_PD BIT(28) 71#define OMAP_CTRL_USB2_PHY_PD BIT(28)
72 72
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
79void omap_control_phy_power(struct device *dev, int on); 79void omap_control_phy_power(struct device *dev, int on);
80void omap_control_usb_set_mode(struct device *dev, 80void omap_control_usb_set_mode(struct device *dev,
81 enum omap_control_usb_mode mode); 81 enum omap_control_usb_mode mode);
82void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay); 82void omap_control_pcie_pcs(struct device *dev, u8 delay);
83#else 83#else
84 84
85static inline void omap_control_phy_power(struct device *dev, int on) 85static inline void omap_control_phy_power(struct device *dev, int on)
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
91{ 91{
92} 92}
93 93
94static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) 94static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
95{ 95{
96} 96}
97#endif 97#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c0c2bce6b0b7..d9d7e7e56352 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -37,6 +37,16 @@ struct anon_vma {
37 atomic_t refcount; 37 atomic_t refcount;
38 38
39 /* 39 /*
40 * Count of child anon_vmas and VMAs which points to this anon_vma.
41 *
42 * This counter is used for making decision about reusing anon_vma
43 * instead of forking new one. See comments in function anon_vma_clone.
44 */
45 unsigned degree;
46
47 struct anon_vma *parent; /* Parent of this anon_vma */
48
49 /*
40 * NOTE: the LSB of the rb_root.rb_node is set by 50 * NOTE: the LSB of the rb_root.rb_node is set by
41 * mm_take_all_locks() _after_ taking the above lock. So the 51 * mm_take_all_locks() _after_ taking the above lock. So the
42 * rb_root must only be read/written after taking the above lock 52 * rb_root must only be read/written after taking the above lock
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a219be961c0a..00048339c23e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
177 struct writeback_control *wbc, writepage_t writepage, 177 struct writeback_control *wbc, writepage_t writepage,
178 void *data); 178 void *data);
179int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 179int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
180void set_page_dirty_balance(struct page *page);
181void writeback_set_ratelimit(void); 180void writeback_set_ratelimit(void);
182void tag_pages_for_writeback(struct address_space *mapping, 181void tag_pages_for_writeback(struct address_space *mapping,
183 pgoff_t start, pgoff_t end); 182 pgoff_t start, pgoff_t end);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 58d719ddaa60..29c7be8808d5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
1270 * 1270 *
1271 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the 1271 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
1272 * driver to indicate that it requires IV generation for this 1272 * driver to indicate that it requires IV generation for this
1273 * particular key. Setting this flag does not necessarily mean that SKBs 1273 * particular key.
1274 * will have sufficient tailroom for ICV or MIC.
1275 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by 1274 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
1276 * the driver for a TKIP key if it requires Michael MIC 1275 * the driver for a TKIP key if it requires Michael MIC
1277 * generation in software. 1276 * generation in software.
@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
1283 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver 1282 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
1284 * if space should be prepared for the IV, but the IV 1283 * if space should be prepared for the IV, but the IV
1285 * itself should not be generated. Do not set together with 1284 * itself should not be generated. Do not set together with
1286 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does 1285 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
1287 * not necessarily mean that SKBs will have sufficient tailroom for ICV or
1288 * MIC.
1289 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received 1286 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
1290 * management frames. The flag can help drivers that have a hardware 1287 * management frames. The flag can help drivers that have a hardware
1291 * crypto implementation that doesn't deal with management frames 1288 * crypto implementation that doesn't deal with management frames
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 430cfaf92285..db81c65b8f48 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
135int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 135int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
136int se_dev_set_queue_depth(struct se_device *, u32); 136int se_dev_set_queue_depth(struct se_device *, u32);
137int se_dev_set_max_sectors(struct se_device *, u32); 137int se_dev_set_max_sectors(struct se_device *, u32);
138int se_dev_set_fabric_max_sectors(struct se_device *, u32);
139int se_dev_set_optimal_sectors(struct se_device *, u32); 138int se_dev_set_optimal_sectors(struct se_device *, u32);
140int se_dev_set_block_size(struct se_device *, u32); 139int se_dev_set_block_size(struct se_device *, u32);
141 140
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
index 3247d7530107..186f7a923570 100644
--- a/include/target/target_core_backend_configfs.h
+++ b/include/target/target_core_backend_configfs.h
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ 98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ 99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ 100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
101 DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
102 TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
103 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ 101 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
104 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ 102 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
105 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ 103 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 397fb635766a..4a8795a87b9e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -77,8 +77,6 @@
77#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 77#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
78/* Default max_write_same_len, disabled by default */ 78/* Default max_write_same_len, disabled by default */
79#define DA_MAX_WRITE_SAME_LEN 0 79#define DA_MAX_WRITE_SAME_LEN 0
80/* Default max transfer length */
81#define DA_FABRIC_MAX_SECTORS 8192
82/* Use a model alias based on the configfs backend device name */ 80/* Use a model alias based on the configfs backend device name */
83#define DA_EMULATE_MODEL_ALIAS 0 81#define DA_EMULATE_MODEL_ALIAS 0
84/* Emulation for Direct Page Out */ 82/* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
694 u32 hw_block_size; 692 u32 hw_block_size;
695 u32 block_size; 693 u32 block_size;
696 u32 hw_max_sectors; 694 u32 hw_max_sectors;
697 u32 fabric_max_sectors;
698 u32 optimal_sectors; 695 u32 optimal_sectors;
699 u32 hw_queue_depth; 696 u32 hw_queue_depth;
700 u32 queue_depth; 697 u32 queue_depth;
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 7543b3e51331..e063effe0cc1 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -5,7 +5,7 @@
5 5
6/* 6/*
7 * FMODE_EXEC is 0x20 7 * FMODE_EXEC is 0x20
8 * FMODE_NONOTIFY is 0x1000000 8 * FMODE_NONOTIFY is 0x4000000
9 * These cannot be used by userspace O_* until internal and external open 9 * These cannot be used by userspace O_* until internal and external open
10 * flags are split. 10 * flags are split.
11 * -Eric Paris 11 * -Eric Paris
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 7acef41fc209..af94f31e33ac 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
128 uint32_t pad; 128 uint32_t pad;
129}; 129};
130 130
131#define KFD_IOC_MAGIC 'K' 131#define AMDKFD_IOCTL_BASE 'K'
132#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
133#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
134#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
135#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
132 136
133#define KFD_IOC_GET_VERSION \ 137#define AMDKFD_IOC_GET_VERSION \
134 _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args) 138 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
135 139
136#define KFD_IOC_CREATE_QUEUE \ 140#define AMDKFD_IOC_CREATE_QUEUE \
137 _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args) 141 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
138 142
139#define KFD_IOC_DESTROY_QUEUE \ 143#define AMDKFD_IOC_DESTROY_QUEUE \
140 _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args) 144 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
141 145
142#define KFD_IOC_SET_MEMORY_POLICY \ 146#define AMDKFD_IOC_SET_MEMORY_POLICY \
143 _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args) 147 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
144 148
145#define KFD_IOC_GET_CLOCK_COUNTERS \ 149#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
146 _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args) 150 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
147 151
148#define KFD_IOC_GET_PROCESS_APERTURES \ 152#define AMDKFD_IOC_GET_PROCESS_APERTURES \
149 _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args) 153 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
150 154
151#define KFD_IOC_UPDATE_QUEUE \ 155#define AMDKFD_IOC_UPDATE_QUEUE \
152 _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args) 156 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
157
158#define AMDKFD_COMMAND_START 0x01
159#define AMDKFD_COMMAND_END 0x08
153 160
154#endif 161#endif
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 3a6dcaa359b7..f714e8633352 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
174 OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ 174 OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
175 OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* 175 OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
176 attributes. */ 176 attributes. */
177 OVS_PACKET_ATTR_UNUSED1,
178 OVS_PACKET_ATTR_UNUSED2,
179 OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
180 error logging should be suppressed. */
177 __OVS_PACKET_ATTR_MAX 181 __OVS_PACKET_ATTR_MAX
178}; 182};
179 183
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644
index 000000000000..b47d9d06fade
--- /dev/null
+++ b/include/xen/interface/nmi.h
@@ -0,0 +1,51 @@
1/******************************************************************************
2 * nmi.h
3 *
4 * NMI callback registration and reason codes.
5 *
6 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
7 */
8
9#ifndef __XEN_PUBLIC_NMI_H__
10#define __XEN_PUBLIC_NMI_H__
11
12#include <xen/interface/xen.h>
13
14/*
15 * NMI reason codes:
16 * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
17 */
18 /* I/O-check error reported via ISA port 0x61, bit 6. */
19#define _XEN_NMIREASON_io_error 0
20#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
21 /* PCI SERR reported via ISA port 0x61, bit 7. */
22#define _XEN_NMIREASON_pci_serr 1
23#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
24 /* Unknown hardware-generated NMI. */
25#define _XEN_NMIREASON_unknown 2
26#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
27
28/*
29 * long nmi_op(unsigned int cmd, void *arg)
30 * NB. All ops return zero on success, else a negative error code.
31 */
32
33/*
34 * Register NMI callback for this (calling) VCPU. Currently this only makes
35 * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
36 * arg == pointer to xennmi_callback structure.
37 */
38#define XENNMI_register_callback 0
39struct xennmi_callback {
40 unsigned long handler_address;
41 unsigned long pad;
42};
43DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
44
45/*
46 * Deregister NMI callback for this (calling) VCPU.
47 * arg == NULL.
48 */
49#define XENNMI_unregister_callback 1
50
51#endif /* __XEN_PUBLIC_NMI_H__ */