aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorFelipe Balbi <balbi@ti.com>2013-12-23 12:22:46 -0500
committerFelipe Balbi <balbi@ti.com>2013-12-23 12:22:46 -0500
commite90b8417af0d01cf8c64da6937c914c89ccf6dc1 (patch)
treecbc5e3b975b2efbb786e12b91714f8c3c3979316 /include
parent845c071b7853c0046693022f4e95c9cdd043e2db (diff)
parent413541dd66d51f791a0b169d9b9014e4f56be13c (diff)
Merge tag 'v3.13-rc5' into next
Linux 3.13-rc5 * tag 'v3.13-rc5': (231 commits) Linux 3.13-rc5 aio: clean up and fix aio_setup_ring page mapping aio/migratepages: make aio migrate pages sane aio: fix kioctx leak introduced by "aio: Fix a trinity splat" Don't set the INITRD_COMPRESS environment variable automatically mm: fix build of split ptlock code pstore: Don't allow high traffic options on fragile devices mm: do not allocate page->ptl dynamically, if spinlock_t fits to long mm: page_alloc: revert NUMA aspect of fair allocation policy Revert "mm: page_alloc: exclude unreclaimable allocations from zone fairness policy" mm: Fix NULL pointer dereference in madvise(MADV_WILLNEED) support qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure target: Remove extra percpu_ref_init arm64: ptrace: avoid using HW_BREAKPOINT_EMPTY for disabled events ARC: Allow conditional multiple inclusion of uapi/asm/unistd.h target/file: Update hw_max_sectors based on current block_size iser-target: Move INIT_WORK setup into isert_create_device_ib_res iscsi-target: Fix incorrect np->np_thread NULL assignment mm/hugetlb: check for pte NULL pointer in __page_check_address() fix build with make 3.80 ... Conflicts: drivers/usb/phy/Kconfig
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h52
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/xen/interface/io/blkif.h10
14 files changed, 124 insertions, 46 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f330d28e4d0e..db0923458940 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
217#endif 217#endif
218 218
219#ifndef pte_accessible 219#ifndef pte_accessible
220# define pte_accessible(pte) ((void)(pte),1) 220# define pte_accessible(mm, pte) ((void)(pte), 1)
221#endif 221#endif
222 222
223#ifndef flush_tlb_fix_spurious_fault 223#ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
599#ifdef CONFIG_TRANSPARENT_HUGEPAGE 599#ifdef CONFIG_TRANSPARENT_HUGEPAGE
600 barrier(); 600 barrier();
601#endif 601#endif
602 if (pmd_none(pmdval)) 602 if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
603 return 1; 603 return 1;
604 if (unlikely(pmd_bad(pmdval))) { 604 if (unlikely(pmd_bad(pmdval))) {
605 if (!pmd_trans_huge(pmdval)) 605 pmd_clear_bad(pmd);
606 pmd_clear_bad(pmd);
607 return 1; 606 return 1;
608 } 607 }
609 return 0; 608 return 0;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b420ac8f..1cd3f5d767a8 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
6/* 6#define PREEMPT_ENABLED (0)
7 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users 7
8 * that think a non-zero value indicates we cannot preempt.
9 */
10static __always_inline int preempt_count(void) 8static __always_inline int preempt_count(void)
11{ 9{
12 return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; 10 return current_thread_info()->preempt_count;
13} 11}
14 12
15static __always_inline int *preempt_count_ptr(void) 13static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
17 return &current_thread_info()->preempt_count; 15 return &current_thread_info()->preempt_count;
18} 16}
19 17
20/*
21 * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
22 * alternative is loosing a reschedule. Better schedule too often -- also this
23 * should be a very rare operation.
24 */
25static __always_inline void preempt_count_set(int pc) 18static __always_inline void preempt_count_set(int pc)
26{ 19{
27 *preempt_count_ptr() = pc; 20 *preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
41 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ 34 task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
42} while (0) 35} while (0)
43 36
44/*
45 * We fold the NEED_RESCHED bit into the preempt count such that
46 * preempt_enable() can decrement and test for needing to reschedule with a
47 * single instruction.
48 *
49 * We invert the actual bit, so that when the decrement hits 0 we know we both
50 * need to resched (the bit is cleared) and can resched (no preempt count).
51 */
52
53static __always_inline void set_preempt_need_resched(void) 37static __always_inline void set_preempt_need_resched(void)
54{ 38{
55 *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
56} 39}
57 40
58static __always_inline void clear_preempt_need_resched(void) 41static __always_inline void clear_preempt_need_resched(void)
59{ 42{
60 *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
61} 43}
62 44
63static __always_inline bool test_preempt_need_resched(void) 45static __always_inline bool test_preempt_need_resched(void)
64{ 46{
65 return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); 47 return false;
66} 48}
67 49
68/* 50/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
81 63
82static __always_inline bool __preempt_count_dec_and_test(void) 64static __always_inline bool __preempt_count_dec_and_test(void)
83{ 65{
84 return !--*preempt_count_ptr(); 66 /*
67 * Because of load-store architectures cannot do per-cpu atomic
68 * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
69 * lost.
70 */
71 return !--*preempt_count_ptr() && tif_need_resched();
85} 72}
86 73
87/* 74/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
89 */ 76 */
90static __always_inline bool should_resched(void) 77static __always_inline bool should_resched(void)
91{ 78{
92 return unlikely(!*preempt_count_ptr()); 79 return unlikely(!preempt_count() && tif_need_resched());
93} 80}
94 81
95#ifdef CONFIG_PREEMPT 82#ifdef CONFIG_PREEMPT
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3832db..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
19 19
20#define USE_CMPXCHG_LOCKREF \ 20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ 21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) 22 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
23 23
24struct lockref { 24struct lockref {
25 union { 25 union {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
138#ifndef mul_u64_u32_shr
139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140{
141 return (u64)(((unsigned __int128)a * mul) >> shift);
142}
143#endif /* mul_u64_u32_shr */
144
145#else
146
147#ifndef mul_u64_u32_shr
148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
149{
150 u32 ah, al;
151 u64 ret;
152
153 al = a;
154 ah = a >> 32;
155
156 ret = ((u64)al * mul) >> shift;
157 if (ah)
158 ret += ((u64)ah * mul) << (32 - shift);
159
160 return ret;
161}
162#endif /* mul_u64_u32_shr */
163
164#endif
165
136#endif /* _LINUX_MATH64_H */ 166#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b58b20d..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page); 55 struct page *newpage, struct page *page);
56extern int migrate_page_move_mapping(struct address_space *mapping, 56extern int migrate_page_move_mapping(struct address_space *mapping,
57 struct page *newpage, struct page *page, 57 struct page *newpage, struct page *page,
58 struct buffer_head *head, enum migrate_mode mode); 58 struct buffer_head *head, enum migrate_mode mode,
59 int extra_count);
59#else 60#else
60 61
61static inline void putback_lru_pages(struct list_head *l) {} 62static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
90#endif /* CONFIG_MIGRATION */ 91#endif /* CONFIG_MIGRATION */
91 92
92#ifdef CONFIG_NUMA_BALANCING 93#ifdef CONFIG_NUMA_BALANCING
94extern bool pmd_trans_migrating(pmd_t pmd);
95extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
93extern int migrate_misplaced_page(struct page *page, 96extern int migrate_misplaced_page(struct page *page,
94 struct vm_area_struct *vma, int node); 97 struct vm_area_struct *vma, int node);
95extern bool migrate_ratelimited(int node); 98extern bool migrate_ratelimited(int node);
96#else 99#else
100static inline bool pmd_trans_migrating(pmd_t pmd)
101{
102 return false;
103}
104static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
105{
106}
97static inline int migrate_misplaced_page(struct page *page, 107static inline int migrate_misplaced_page(struct page *page,
98 struct vm_area_struct *vma, int node) 108 struct vm_area_struct *vma, int node)
99{ 109{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd000cf29..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1318 1318
1319#if USE_SPLIT_PTE_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1320#if BLOATED_SPINLOCKS 1320#if ALLOC_SPLIT_PTLOCKS
1321extern bool ptlock_alloc(struct page *page); 1321extern bool ptlock_alloc(struct page *page);
1322extern void ptlock_free(struct page *page); 1322extern void ptlock_free(struct page *page);
1323 1323
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1325{ 1325{
1326 return page->ptl; 1326 return page->ptl;
1327} 1327}
1328#else /* BLOATED_SPINLOCKS */ 1328#else /* ALLOC_SPLIT_PTLOCKS */
1329static inline bool ptlock_alloc(struct page *page) 1329static inline bool ptlock_alloc(struct page *page)
1330{ 1330{
1331 return true; 1331 return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1339{ 1339{
1340 return &page->ptl; 1340 return &page->ptl;
1341} 1341}
1342#endif /* BLOATED_SPINLOCKS */ 1342#endif /* ALLOC_SPLIT_PTLOCKS */
1343 1343
1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1345{ 1345{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bd299418a934..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
29 30
30/* 31/*
31 * Each physical page in the system has a struct page associated with 32 * Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
155 * system if PG_buddy is set. 156 * system if PG_buddy is set.
156 */ 157 */
157#if USE_SPLIT_PTE_PTLOCKS 158#if USE_SPLIT_PTE_PTLOCKS
158#if BLOATED_SPINLOCKS 159#if ALLOC_SPLIT_PTLOCKS
159 spinlock_t *ptl; 160 spinlock_t *ptl;
160#else 161#else
161 spinlock_t ptl; 162 spinlock_t ptl;
@@ -443,6 +444,14 @@ struct mm_struct {
443 /* numa_scan_seq prevents two threads setting pte_numa */ 444 /* numa_scan_seq prevents two threads setting pte_numa */
444 int numa_scan_seq; 445 int numa_scan_seq;
445#endif 446#endif
447#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
448 /*
449 * An operation with batched TLB flushing is going on. Anything that
450 * can move process memory needs to flush the TLB when moving a
451 * PROT_NONE or PROT_NUMA mapped page.
452 */
453 bool tlb_flush_pending;
454#endif
446 struct uprobes_state uprobes_state; 455 struct uprobes_state uprobes_state;
447}; 456};
448 457
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
459 return mm->cpu_vm_mask_var; 468 return mm->cpu_vm_mask_var;
460} 469}
461 470
471#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
472/*
473 * Memory barriers to keep this state in sync are graciously provided by
474 * the page table locks, outside of which no page table modifications happen.
475 * The barriers below prevent the compiler from re-ordering the instructions
476 * around the memory barriers that are already present in the code.
477 */
478static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
479{
480 barrier();
481 return mm->tlb_flush_pending;
482}
483static inline void set_tlb_flush_pending(struct mm_struct *mm)
484{
485 mm->tlb_flush_pending = true;
486
487 /*
488 * Guarantee that the tlb_flush_pending store does not leak into the
489 * critical section updating the page tables
490 */
491 smp_mb__before_spinlock();
492}
493/* Clearing is done after a TLB flush, which also provides a barrier. */
494static inline void clear_tlb_flush_pending(struct mm_struct *mm)
495{
496 barrier();
497 mm->tlb_flush_pending = false;
498}
499#else
500static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
501{
502 return false;
503}
504static inline void set_tlb_flush_pending(struct mm_struct *mm)
505{
506}
507static inline void clear_tlb_flush_pending(struct mm_struct *mm)
508{
509}
510#endif
511
462#endif /* _LINUX_MM_TYPES_H */ 512#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
51 char *buf; 51 char *buf;
52 size_t bufsize; 52 size_t bufsize;
53 struct mutex read_mutex; /* serialize open/read/close */ 53 struct mutex read_mutex; /* serialize open/read/close */
54 int flags;
54 int (*open)(struct pstore_info *psi); 55 int (*open)(struct pstore_info *psi);
55 int (*close)(struct pstore_info *psi); 56 int (*close)(struct pstore_info *psi);
56 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 57 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
70 void *data; 71 void *data;
71}; 72};
72 73
74#define PSTORE_FLAGS_FRAGILE 1
75
73#ifdef CONFIG_PSTORE 76#ifdef CONFIG_PSTORE
74extern int pstore_register(struct pstore_info *); 77extern int pstore_register(struct pstore_info *);
75extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); 78extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
43 * Architecture-specific implementations of sys_reboot commands. 43 * Architecture-specific implementations of sys_reboot commands.
44 */ 44 */
45 45
46extern void migrate_to_reboot_cpu(void);
46extern void machine_restart(char *cmd); 47extern void machine_restart(char *cmd);
47extern void machine_halt(void); 48extern void machine_halt(void);
48extern void machine_power_off(void); 49extern void machine_power_off(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 768b037dfacb..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
440 .sum_exec_runtime = 0, \ 440 .sum_exec_runtime = 0, \
441 } 441 }
442 442
443#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
444
445#ifdef CONFIG_PREEMPT_COUNT 443#ifdef CONFIG_PREEMPT_COUNT
446#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 444#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
447#else 445#else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
932struct uts_namespace; 930struct uts_namespace;
933 931
934struct load_weight { 932struct load_weight {
935 unsigned long weight, inv_weight; 933 unsigned long weight;
934 u32 inv_weight;
936}; 935};
937 936
938struct sched_avg { 937struct sched_avg {
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 45412a6afa69..321301c0a643 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -517,10 +517,6 @@ struct se_node_acl {
517 u32 acl_index; 517 u32 acl_index;
518#define MAX_ACL_TAG_SIZE 64 518#define MAX_ACL_TAG_SIZE 64
519 char acl_tag[MAX_ACL_TAG_SIZE]; 519 char acl_tag[MAX_ACL_TAG_SIZE];
520 u64 num_cmds;
521 u64 read_bytes;
522 u64 write_bytes;
523 spinlock_t stats_lock;
524 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 520 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
525 atomic_t acl_pr_ref_count; 521 atomic_t acl_pr_ref_count;
526 struct se_dev_entry **device_list; 522 struct se_dev_entry **device_list;
@@ -624,6 +620,7 @@ struct se_dev_attrib {
624 u32 unmap_granularity; 620 u32 unmap_granularity;
625 u32 unmap_granularity_alignment; 621 u32 unmap_granularity_alignment;
626 u32 max_write_same_len; 622 u32 max_write_same_len;
623 u32 max_bytes_per_io;
627 struct se_device *da_dev; 624 struct se_device *da_dev;
628 struct config_group da_group; 625 struct config_group da_group;
629}; 626};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index bcb0912afe7a..f854ca4a1372 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -75,6 +75,7 @@
75#define DRM_VMW_PARAM_FIFO_CAPS 4 75#define DRM_VMW_PARAM_FIFO_CAPS 4
76#define DRM_VMW_PARAM_MAX_FB_SIZE 5 76#define DRM_VMW_PARAM_MAX_FB_SIZE 5
77#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 77#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
78#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
78 79
79/** 80/**
80 * struct drm_vmw_getparam_arg 81 * struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e1802d6153ae..959d454f76a1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -679,6 +679,7 @@ enum perf_event_type {
679 * 679 *
680 * { u64 weight; } && PERF_SAMPLE_WEIGHT 680 * { u64 weight; } && PERF_SAMPLE_WEIGHT
681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
682 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
682 * }; 683 * };
683 */ 684 */
684 PERF_RECORD_SAMPLE = 9, 685 PERF_RECORD_SAMPLE = 9,
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e12099ef89..ae665ac59c36 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
146struct blkif_request_rw { 146struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 147 uint8_t nr_segments; /* number of segments */
148 blkif_vdev_t handle; /* only for read/write requests */ 148 blkif_vdev_t handle; /* only for read/write requests */
149#ifdef CONFIG_X86_64 149#ifndef CONFIG_X86_32
150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ 150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
151#endif 151#endif
152 uint64_t id; /* private guest value, echoed in resp */ 152 uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ 163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ 164#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
165 blkif_vdev_t _pad1; /* only for read/write requests */ 165 blkif_vdev_t _pad1; /* only for read/write requests */
166#ifdef CONFIG_X86_64 166#ifndef CONFIG_X86_32
167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
168#endif 168#endif
169 uint64_t id; /* private guest value, echoed in resp */ 169 uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
175struct blkif_request_other { 175struct blkif_request_other {
176 uint8_t _pad1; 176 uint8_t _pad1;
177 blkif_vdev_t _pad2; /* only for read/write requests */ 177 blkif_vdev_t _pad2; /* only for read/write requests */
178#ifdef CONFIG_X86_64 178#ifndef CONFIG_X86_32
179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
180#endif 180#endif
181 uint64_t id; /* private guest value, echoed in resp */ 181 uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
184struct blkif_request_indirect { 184struct blkif_request_indirect {
185 uint8_t indirect_op; 185 uint8_t indirect_op;
186 uint16_t nr_segments; 186 uint16_t nr_segments;
187#ifdef CONFIG_X86_64 187#ifndef CONFIG_X86_32
188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ 188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
189#endif 189#endif
190 uint64_t id; 190 uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
192 blkif_vdev_t handle; 192 blkif_vdev_t handle;
193 uint16_t _pad2; 193 uint16_t _pad2;
194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
195#ifdef CONFIG_X86_64 195#ifndef CONFIG_X86_32
196 uint32_t _pad3; /* make it 64 byte aligned */ 196 uint32_t _pad3; /* make it 64 byte aligned */
197#else 197#else
198 uint64_t _pad3; /* make it 64 byte aligned */ 198 uint64_t _pad3; /* make it 64 byte aligned */