aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-12-28 18:38:32 -0500
committerOlof Johansson <olof@lixom.net>2013-12-28 18:38:32 -0500
commit9b17c16525552b247cb2d9bb8eeadc87950b36ff (patch)
tree449426f2b37bb340e286fe0def9bf9cef29ec2a7 /include/linux
parent4cff6123536236fa84f826d9b93452903b90fe2e (diff)
parent82f4fe707836e80b0fcaae9a4a1756e6e89c5e62 (diff)
Merge tag 'omap-for-v3.13/intc-ldp-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
From Tony Lindgren: Fix a regression for wrong interrupt numbers for some devices after the sparse IRQ conversion, fix DRA7 console output for earlyprintk, and fix the LDP LCD backlight when DSS is built into the kernel and not as a loadable module. * tag 'omap-for-v3.13/intc-ldp-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: ARM: OMAP2+: Fix LCD panel backlight regression for LDP legacy booting ARM: OMAP2+: hwmod_data: fix missing OMAP_INTC_START in irq data ARM: DRA7: hwmod: Fix boot crash with DEBUG_LL + v3.13-rc5 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h52
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/sched.h5
8 files changed, 102 insertions, 9 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3832db..4bfde0e99ed5 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
19 19
20#define USE_CMPXCHG_LOCKREF \ 20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ 21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) 22 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
23 23
24struct lockref { 24struct lockref {
25 union { 25 union {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5e9f6e..c45c089bfdac 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137
138#ifndef mul_u64_u32_shr
139static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
140{
141 return (u64)(((unsigned __int128)a * mul) >> shift);
142}
143#endif /* mul_u64_u32_shr */
144
145#else
146
147#ifndef mul_u64_u32_shr
148static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
149{
150 u32 ah, al;
151 u64 ret;
152
153 al = a;
154 ah = a >> 32;
155
156 ret = ((u64)al * mul) >> shift;
157 if (ah)
158 ret += ((u64)ah * mul) << (32 - shift);
159
160 return ret;
161}
162#endif /* mul_u64_u32_shr */
163
164#endif
165
136#endif /* _LINUX_MATH64_H */ 166#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b58b20d..f015c059e159 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page); 55 struct page *newpage, struct page *page);
56extern int migrate_page_move_mapping(struct address_space *mapping, 56extern int migrate_page_move_mapping(struct address_space *mapping,
57 struct page *newpage, struct page *page, 57 struct page *newpage, struct page *page,
58 struct buffer_head *head, enum migrate_mode mode); 58 struct buffer_head *head, enum migrate_mode mode,
59 int extra_count);
59#else 60#else
60 61
61static inline void putback_lru_pages(struct list_head *l) {} 62static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
90#endif /* CONFIG_MIGRATION */ 91#endif /* CONFIG_MIGRATION */
91 92
92#ifdef CONFIG_NUMA_BALANCING 93#ifdef CONFIG_NUMA_BALANCING
94extern bool pmd_trans_migrating(pmd_t pmd);
95extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
93extern int migrate_misplaced_page(struct page *page, 96extern int migrate_misplaced_page(struct page *page,
94 struct vm_area_struct *vma, int node); 97 struct vm_area_struct *vma, int node);
95extern bool migrate_ratelimited(int node); 98extern bool migrate_ratelimited(int node);
96#else 99#else
100static inline bool pmd_trans_migrating(pmd_t pmd)
101{
102 return false;
103}
104static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
105{
106}
97static inline int migrate_misplaced_page(struct page *page, 107static inline int migrate_misplaced_page(struct page *page,
98 struct vm_area_struct *vma, int node) 108 struct vm_area_struct *vma, int node)
99{ 109{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd000cf29..35527173cf50 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1317#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1318 1318
1319#if USE_SPLIT_PTE_PTLOCKS 1319#if USE_SPLIT_PTE_PTLOCKS
1320#if BLOATED_SPINLOCKS 1320#if ALLOC_SPLIT_PTLOCKS
1321extern bool ptlock_alloc(struct page *page); 1321extern bool ptlock_alloc(struct page *page);
1322extern void ptlock_free(struct page *page); 1322extern void ptlock_free(struct page *page);
1323 1323
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1325{ 1325{
1326 return page->ptl; 1326 return page->ptl;
1327} 1327}
1328#else /* BLOATED_SPINLOCKS */ 1328#else /* ALLOC_SPLIT_PTLOCKS */
1329static inline bool ptlock_alloc(struct page *page) 1329static inline bool ptlock_alloc(struct page *page)
1330{ 1330{
1331 return true; 1331 return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1339{ 1339{
1340 return &page->ptl; 1340 return &page->ptl;
1341} 1341}
1342#endif /* BLOATED_SPINLOCKS */ 1342#endif /* ALLOC_SPLIT_PTLOCKS */
1343 1343
1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1344static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1345{ 1345{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bd299418a934..290901a8c1de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) 28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
29 30
30/* 31/*
31 * Each physical page in the system has a struct page associated with 32 * Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
155 * system if PG_buddy is set. 156 * system if PG_buddy is set.
156 */ 157 */
157#if USE_SPLIT_PTE_PTLOCKS 158#if USE_SPLIT_PTE_PTLOCKS
158#if BLOATED_SPINLOCKS 159#if ALLOC_SPLIT_PTLOCKS
159 spinlock_t *ptl; 160 spinlock_t *ptl;
160#else 161#else
161 spinlock_t ptl; 162 spinlock_t ptl;
@@ -443,6 +444,14 @@ struct mm_struct {
443 /* numa_scan_seq prevents two threads setting pte_numa */ 444 /* numa_scan_seq prevents two threads setting pte_numa */
444 int numa_scan_seq; 445 int numa_scan_seq;
445#endif 446#endif
447#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
448 /*
449 * An operation with batched TLB flushing is going on. Anything that
450 * can move process memory needs to flush the TLB when moving a
451 * PROT_NONE or PROT_NUMA mapped page.
452 */
453 bool tlb_flush_pending;
454#endif
446 struct uprobes_state uprobes_state; 455 struct uprobes_state uprobes_state;
447}; 456};
448 457
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
459 return mm->cpu_vm_mask_var; 468 return mm->cpu_vm_mask_var;
460} 469}
461 470
471#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
472/*
473 * Memory barriers to keep this state in sync are graciously provided by
474 * the page table locks, outside of which no page table modifications happen.
475 * The barriers below prevent the compiler from re-ordering the instructions
476 * around the memory barriers that are already present in the code.
477 */
478static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
479{
480 barrier();
481 return mm->tlb_flush_pending;
482}
483static inline void set_tlb_flush_pending(struct mm_struct *mm)
484{
485 mm->tlb_flush_pending = true;
486
487 /*
488 * Guarantee that the tlb_flush_pending store does not leak into the
489 * critical section updating the page tables
490 */
491 smp_mb__before_spinlock();
492}
493/* Clearing is done after a TLB flush, which also provides a barrier. */
494static inline void clear_tlb_flush_pending(struct mm_struct *mm)
495{
496 barrier();
497 mm->tlb_flush_pending = false;
498}
499#else
500static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
501{
502 return false;
503}
504static inline void set_tlb_flush_pending(struct mm_struct *mm)
505{
506}
507static inline void clear_tlb_flush_pending(struct mm_struct *mm)
508{
509}
510#endif
511
462#endif /* _LINUX_MM_TYPES_H */ 512#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d0a8a7..ece0c6bbfcc5 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
51 char *buf; 51 char *buf;
52 size_t bufsize; 52 size_t bufsize;
53 struct mutex read_mutex; /* serialize open/read/close */ 53 struct mutex read_mutex; /* serialize open/read/close */
54 int flags;
54 int (*open)(struct pstore_info *psi); 55 int (*open)(struct pstore_info *psi);
55 int (*close)(struct pstore_info *psi); 56 int (*close)(struct pstore_info *psi);
56 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 57 ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
70 void *data; 71 void *data;
71}; 72};
72 73
74#define PSTORE_FLAGS_FRAGILE 1
75
73#ifdef CONFIG_PSTORE 76#ifdef CONFIG_PSTORE
74extern int pstore_register(struct pstore_info *); 77extern int pstore_register(struct pstore_info *);
75extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); 78extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f6f963..9e7db9e73cc1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
43 * Architecture-specific implementations of sys_reboot commands. 43 * Architecture-specific implementations of sys_reboot commands.
44 */ 44 */
45 45
46extern void migrate_to_reboot_cpu(void);
46extern void machine_restart(char *cmd); 47extern void machine_restart(char *cmd);
47extern void machine_halt(void); 48extern void machine_halt(void);
48extern void machine_power_off(void); 49extern void machine_power_off(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 768b037dfacb..53f97eb8dbc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
440 .sum_exec_runtime = 0, \ 440 .sum_exec_runtime = 0, \
441 } 441 }
442 442
443#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
444
445#ifdef CONFIG_PREEMPT_COUNT 443#ifdef CONFIG_PREEMPT_COUNT
446#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 444#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
447#else 445#else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
932struct uts_namespace; 930struct uts_namespace;
933 931
934struct load_weight { 932struct load_weight {
935 unsigned long weight, inv_weight; 933 unsigned long weight;
934 u32 inv_weight;
936}; 935};
937 936
938struct sched_avg { 937struct sched_avg {