aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2011-08-03 11:30:42 -0400
committerLen Brown <len.brown@intel.com>2011-08-03 11:30:42 -0400
commitd0e323b47057f4492b8fa22345f38d80a469bf8d (patch)
treefeb760c7e2cdb1e43640417409428ab858910ea3 /include/linux
parentc027a474a68065391c8773f6e83ed5412657e369 (diff)
parentc3e6088e1036f8084bc7444b38437da136b7588b (diff)
Merge branch 'apei' into apei-release
Some trivial conflicts due to other various merges adding to the end of common lists sooner than this one. arch/ia64/Kconfig arch/powerpc/Kconfig arch/x86/Kconfig lib/Kconfig lib/Makefile Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/genalloc.h34
-rw-r--r--include/linux/llist.h126
-rw-r--r--include/linux/mm.h1
5 files changed, 158 insertions, 6 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1deb2a73c2da..e19527de6a93 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -280,6 +280,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
280#define OSC_SB_CPUHP_OST_SUPPORT 8 280#define OSC_SB_CPUHP_OST_SUPPORT 8
281#define OSC_SB_APEI_SUPPORT 16 281#define OSC_SB_APEI_SUPPORT 16
282 282
283extern bool osc_sb_apei_support_acked;
284
283/* PCI defined _OSC bits */ 285/* PCI defined _OSC bits */
284/* _OSC DW1 Definition (OS Support Fields) */ 286/* _OSC DW1 Definition (OS Support Fields) */
285#define OSC_EXT_PCI_CONFIG_SUPPORT 1 287#define OSC_EXT_PCI_CONFIG_SUPPORT 1
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 3bac44cce142..7ad634501e48 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -146,6 +146,7 @@ extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
146extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); 146extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
147extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); 147extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
148 148
149#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
149#define BITMAP_LAST_WORD_MASK(nbits) \ 150#define BITMAP_LAST_WORD_MASK(nbits) \
150( \ 151( \
151 ((nbits) % BITS_PER_LONG) ? \ 152 ((nbits) % BITS_PER_LONG) ? \
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5bbebda78b02..5e98eeb2af3b 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -1,8 +1,26 @@
1/* 1/*
2 * Basic general purpose allocator for managing special purpose memory 2 * Basic general purpose allocator for managing special purpose
3 * not managed by the regular kmalloc/kfree interface. 3 * memory, for example, memory that is not managed by the regular
4 * Uses for this includes on-device special memory, uncached memory 4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * etc. 5 * memory, uncached memory etc.
6 *
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
12 * for each CPU.
13 *
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
18 *
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
6 * 24 *
7 * This source code is licensed under the GNU General Public License, 25 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details. 26 * Version 2. See the file COPYING for more details.
@@ -15,7 +33,7 @@
15 * General purpose special memory pool descriptor. 33 * General purpose special memory pool descriptor.
16 */ 34 */
17struct gen_pool { 35struct gen_pool {
18 rwlock_t lock; 36 spinlock_t lock;
19 struct list_head chunks; /* list of chunks in this pool */ 37 struct list_head chunks; /* list of chunks in this pool */
20 int min_alloc_order; /* minimum allocation order */ 38 int min_alloc_order; /* minimum allocation order */
21}; 39};
@@ -24,8 +42,8 @@ struct gen_pool {
24 * General purpose special memory pool chunk descriptor. 42 * General purpose special memory pool chunk descriptor.
25 */ 43 */
26struct gen_pool_chunk { 44struct gen_pool_chunk {
27 spinlock_t lock;
28 struct list_head next_chunk; /* next chunk in pool */ 45 struct list_head next_chunk; /* next chunk in pool */
46 atomic_t avail;
29 phys_addr_t phys_addr; /* physical starting address of memory chunk */ 47 phys_addr_t phys_addr; /* physical starting address of memory chunk */
30 unsigned long start_addr; /* starting address of memory chunk */ 48 unsigned long start_addr; /* starting address of memory chunk */
31 unsigned long end_addr; /* ending address of memory chunk */ 49 unsigned long end_addr; /* ending address of memory chunk */
@@ -56,4 +74,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
56extern void gen_pool_destroy(struct gen_pool *); 74extern void gen_pool_destroy(struct gen_pool *);
57extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); 75extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
58extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); 76extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
77extern void gen_pool_for_each_chunk(struct gen_pool *,
78 void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
79extern size_t gen_pool_avail(struct gen_pool *);
80extern size_t gen_pool_size(struct gen_pool *);
59#endif /* __GENALLOC_H__ */ 81#endif /* __GENALLOC_H__ */
diff --git a/include/linux/llist.h b/include/linux/llist.h
new file mode 100644
index 000000000000..aa0c8b5b3cd0
--- /dev/null
+++ b/include/linux/llist.h
@@ -0,0 +1,126 @@
1#ifndef LLIST_H
2#define LLIST_H
3/*
4 * Lock-less NULL terminated single linked list
5 *
6 * If there are multiple producers and multiple consumers, llist_add
7 * can be used in producers and llist_del_all can be used in
8 * consumers. They can work simultaneously without lock. But
9 * llist_del_first can not be used here. Because llist_del_first
10 * depends on list->first->next does not changed if list->first is not
11 * changed during its operation, but llist_del_first, llist_add,
12 * llist_add (or llist_del_all, llist_add, llist_add) sequence in
13 * another consumer may violate that.
14 *
15 * If there are multiple producers and one consumer, llist_add can be
16 * used in producers and llist_del_all or llist_del_first can be used
17 * in the consumer.
18 *
19 * This can be summarized as follow:
20 *
21 * | add | del_first | del_all
22 * add | - | - | -
23 * del_first | | L | L
24 * del_all | | | -
25 *
26 * Where "-" stands for no lock is needed, while "L" stands for lock
27 * is needed.
28 *
29 * The list entries deleted via llist_del_all can be traversed with
30 * traversing function such as llist_for_each etc. But the list
31 * entries can not be traversed safely before deleted from the list.
32 * The order of deleted entries is from the newest to the oldest added
33 * one. If you want to traverse from the oldest to the newest, you
34 * must reverse the order by yourself before traversing.
35 *
36 * The basic atomic operation of this list is cmpxchg on long. On
37 * architectures that don't have NMI-safe cmpxchg implementation, the
38 * list can NOT be used in NMI handler. So code uses the list in NMI
39 * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
40 */
41
42struct llist_head {
43 struct llist_node *first;
44};
45
46struct llist_node {
47 struct llist_node *next;
48};
49
50#define LLIST_HEAD_INIT(name) { NULL }
51#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
52
53/**
54 * init_llist_head - initialize lock-less list head
55 * @head: the head for your lock-less list
56 */
57static inline void init_llist_head(struct llist_head *list)
58{
59 list->first = NULL;
60}
61
62/**
63 * llist_entry - get the struct of this entry
64 * @ptr: the &struct llist_node pointer.
65 * @type: the type of the struct this is embedded in.
66 * @member: the name of the llist_node within the struct.
67 */
68#define llist_entry(ptr, type, member) \
69 container_of(ptr, type, member)
70
71/**
72 * llist_for_each - iterate over some deleted entries of a lock-less list
73 * @pos: the &struct llist_node to use as a loop cursor
74 * @node: the first entry of deleted list entries
75 *
76 * In general, some entries of the lock-less list can be traversed
77 * safely only after being deleted from list, so start with an entry
78 * instead of list head.
79 *
80 * If being used on entries deleted from lock-less list directly, the
81 * traverse order is from the newest to the oldest added entry. If
82 * you want to traverse from the oldest to the newest, you must
83 * reverse the order by yourself before traversing.
84 */
85#define llist_for_each(pos, node) \
86 for ((pos) = (node); pos; (pos) = (pos)->next)
87
88/**
89 * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
90 * @pos: the type * to use as a loop cursor.
91 * @node: the fist entry of deleted list entries.
92 * @member: the name of the llist_node with the struct.
93 *
94 * In general, some entries of the lock-less list can be traversed
95 * safely only after being removed from list, so start with an entry
96 * instead of list head.
97 *
98 * If being used on entries deleted from lock-less list directly, the
99 * traverse order is from the newest to the oldest added entry. If
100 * you want to traverse from the oldest to the newest, you must
101 * reverse the order by yourself before traversing.
102 */
103#define llist_for_each_entry(pos, node, member) \
104 for ((pos) = llist_entry((node), typeof(*(pos)), member); \
105 &(pos)->member != NULL; \
106 (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
107
108/**
109 * llist_empty - tests whether a lock-less list is empty
110 * @head: the list to test
111 *
112 * Not guaranteed to be accurate or up to date. Just a quick way to
113 * test whether the list is empty without deleting something from the
114 * list.
115 */
116static inline int llist_empty(const struct llist_head *head)
117{
118 return ACCESS_ONCE(head->first) == NULL;
119}
120
121void llist_add(struct llist_node *new, struct llist_head *head);
122void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
123 struct llist_head *head);
124struct llist_node *llist_del_first(struct llist_head *head);
125struct llist_node *llist_del_all(struct llist_head *head);
126#endif /* LLIST_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3172a1c0f08e..f2690cf49827 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1600,6 +1600,7 @@ enum mf_flags {
1600}; 1600};
1601extern void memory_failure(unsigned long pfn, int trapno); 1601extern void memory_failure(unsigned long pfn, int trapno);
1602extern int __memory_failure(unsigned long pfn, int trapno, int flags); 1602extern int __memory_failure(unsigned long pfn, int trapno, int flags);
1603extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1603extern int unpoison_memory(unsigned long pfn); 1604extern int unpoison_memory(unsigned long pfn);
1604extern int sysctl_memory_failure_early_kill; 1605extern int sysctl_memory_failure_early_kill;
1605extern int sysctl_memory_failure_recovery; 1606extern int sysctl_memory_failure_recovery;