aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Makefile2
-rw-r--r--lib/cpu_rmap.c269
-rw-r--r--lib/list_debug.c39
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/radix-tree.c7
-rw-r--r--lib/rbtree.c3
-rw-r--r--lib/swiotlb.c6
-rw-r--r--lib/textsearch.c10
-rw-r--r--lib/xz/Kconfig12
11 files changed, 336 insertions, 35 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 0ee67e08ad3e..3a55a43c43eb 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
201 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 201 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
202 depends on EXPERIMENTAL && BROKEN 202 depends on EXPERIMENTAL && BROKEN
203 203
204config CPU_RMAP
205 bool
206 depends on SMP
207
204# 208#
205# Netlink attribute parsing support is select'ed if needed 209# Netlink attribute parsing support is select'ed if needed
206# 210#
@@ -217,6 +221,13 @@ config LRU_CACHE
217 tristate 221 tristate
218 222
219config AVERAGE 223config AVERAGE
220 bool 224 bool "Averaging functions"
225 help
226 This option is provided for the case where no in-kernel-tree
227 modules require averaging functions, but a module built outside
228 the kernel tree does. Such modules that use library averaging
229 functions require Y here.
230
231 If unsure, say N.
221 232
222endmenu 233endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2d05adb98401..2b97418c67e2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -657,7 +657,7 @@ config DEBUG_HIGHMEM
657 Disable for production systems. 657 Disable for production systems.
658 658
659config DEBUG_BUGVERBOSE 659config DEBUG_BUGVERBOSE
660 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED 660 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
661 depends on BUG 661 depends on BUG
662 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 662 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
663 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 663 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
@@ -729,8 +729,8 @@ config DEBUG_WRITECOUNT
729 If unsure, say N. 729 If unsure, say N.
730 730
731config DEBUG_MEMORY_INIT 731config DEBUG_MEMORY_INIT
732 bool "Debug memory initialisation" if EMBEDDED 732 bool "Debug memory initialisation" if EXPERT
733 default !EMBEDDED 733 default !EXPERT
734 help 734 help
735 Enable this for additional checks during memory initialisation. 735 Enable this for additional checks during memory initialisation.
736 The sanity checks verify aspects of the VM such as the memory model 736 The sanity checks verify aspects of the VM such as the memory model
@@ -805,7 +805,7 @@ config ARCH_WANT_FRAME_POINTERS
805config FRAME_POINTER 805config FRAME_POINTER
806 bool "Compile the kernel with frame pointers" 806 bool "Compile the kernel with frame pointers"
807 depends on DEBUG_KERNEL && \ 807 depends on DEBUG_KERNEL && \
808 (CRIS || M68K || M68KNOMMU || FRV || UML || \ 808 (CRIS || M68K || FRV || UML || \
809 AVR32 || SUPERH || BLACKFIN || MN10300) || \ 809 AVR32 || SUPERH || BLACKFIN || MN10300) || \
810 ARCH_WANT_FRAME_POINTERS 810 ARCH_WANT_FRAME_POINTERS
811 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 811 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
diff --git a/lib/Makefile b/lib/Makefile
index cbb774f7d41d..b73ba01a818a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -110,6 +110,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
110 110
111obj-$(CONFIG_AVERAGE) += average.o 111obj-$(CONFIG_AVERAGE) += average.o
112 112
113obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
114
113hostprogs-y := gen_crc32table 115hostprogs-y := gen_crc32table
114clean-files := crc32table.h 116clean-files := crc32table.h
115 117
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 000000000000..987acfafeb83
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,269 @@
1/*
2 * cpu_rmap.c: CPU affinity reverse-map support
3 * Copyright 2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/cpu_rmap.h>
11#ifdef CONFIG_GENERIC_HARDIRQS
12#include <linux/interrupt.h>
13#endif
14#include <linux/module.h>
15
16/*
17 * These functions maintain a mapping from CPUs to some ordered set of
18 * objects with CPU affinities. This can be seen as a reverse-map of
19 * CPU affinity. However, we do not assume that the object affinities
20 * cover all CPUs in the system. For those CPUs not directly covered
21 * by object affinities, we attempt to find a nearest object based on
22 * CPU topology.
23 */
24
25/**
26 * alloc_cpu_rmap - allocate CPU affinity reverse-map
27 * @size: Number of objects to be mapped
28 * @flags: Allocation flags e.g. %GFP_KERNEL
29 */
30struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
31{
32 struct cpu_rmap *rmap;
33 unsigned int cpu;
34 size_t obj_offset;
35
36 /* This is a silly number of objects, and we use u16 indices. */
37 if (size > 0xffff)
38 return NULL;
39
40 /* Offset of object pointer array from base structure */
41 obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
42 sizeof(void *));
43
44 rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
45 if (!rmap)
46 return NULL;
47
48 rmap->obj = (void **)((char *)rmap + obj_offset);
49
50 /* Initially assign CPUs to objects on a rota, since we have
51 * no idea where the objects are. Use infinite distance, so
52 * any object with known distance is preferable. Include the
53 * CPUs that are not present/online, since we definitely want
54 * any newly-hotplugged CPUs to have some object assigned.
55 */
56 for_each_possible_cpu(cpu) {
57 rmap->near[cpu].index = cpu % size;
58 rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
59 }
60
61 rmap->size = size;
62 return rmap;
63}
64EXPORT_SYMBOL(alloc_cpu_rmap);
65
66/* Reevaluate nearest object for given CPU, comparing with the given
67 * neighbours at the given distance.
68 */
69static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
70 const struct cpumask *mask, u16 dist)
71{
72 int neigh;
73
74 for_each_cpu(neigh, mask) {
75 if (rmap->near[cpu].dist > dist &&
76 rmap->near[neigh].dist <= dist) {
77 rmap->near[cpu].index = rmap->near[neigh].index;
78 rmap->near[cpu].dist = dist;
79 return true;
80 }
81 }
82 return false;
83}
84
85#ifdef DEBUG
86static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
87{
88 unsigned index;
89 unsigned int cpu;
90
91 pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
92
93 for_each_possible_cpu(cpu) {
94 index = rmap->near[cpu].index;
95 pr_info("cpu %d -> obj %u (distance %u)\n",
96 cpu, index, rmap->near[cpu].dist);
97 }
98}
99#else
100static inline void
101debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
102{
103}
104#endif
105
106/**
107 * cpu_rmap_add - add object to a rmap
108 * @rmap: CPU rmap allocated with alloc_cpu_rmap()
109 * @obj: Object to add to rmap
110 *
111 * Return index of object.
112 */
113int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
114{
115 u16 index;
116
117 BUG_ON(rmap->used >= rmap->size);
118 index = rmap->used++;
119 rmap->obj[index] = obj;
120 return index;
121}
122EXPORT_SYMBOL(cpu_rmap_add);
123
124/**
125 * cpu_rmap_update - update CPU rmap following a change of object affinity
126 * @rmap: CPU rmap to update
127 * @index: Index of object whose affinity changed
128 * @affinity: New CPU affinity of object
129 */
130int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
131 const struct cpumask *affinity)
132{
133 cpumask_var_t update_mask;
134 unsigned int cpu;
135
136 if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
137 return -ENOMEM;
138
139 /* Invalidate distance for all CPUs for which this used to be
140 * the nearest object. Mark those CPUs for update.
141 */
142 for_each_online_cpu(cpu) {
143 if (rmap->near[cpu].index == index) {
144 rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
145 cpumask_set_cpu(cpu, update_mask);
146 }
147 }
148
149 debug_print_rmap(rmap, "after invalidating old distances");
150
151 /* Set distance to 0 for all CPUs in the new affinity mask.
152 * Mark all CPUs within their NUMA nodes for update.
153 */
154 for_each_cpu(cpu, affinity) {
155 rmap->near[cpu].index = index;
156 rmap->near[cpu].dist = 0;
157 cpumask_or(update_mask, update_mask,
158 cpumask_of_node(cpu_to_node(cpu)));
159 }
160
161 debug_print_rmap(rmap, "after updating neighbours");
162
163 /* Update distances based on topology */
164 for_each_cpu(cpu, update_mask) {
165 if (cpu_rmap_copy_neigh(rmap, cpu,
166 topology_thread_cpumask(cpu), 1))
167 continue;
168 if (cpu_rmap_copy_neigh(rmap, cpu,
169 topology_core_cpumask(cpu), 2))
170 continue;
171 if (cpu_rmap_copy_neigh(rmap, cpu,
172 cpumask_of_node(cpu_to_node(cpu)), 3))
173 continue;
174 /* We could continue into NUMA node distances, but for now
175 * we give up.
176 */
177 }
178
179 debug_print_rmap(rmap, "after copying neighbours");
180
181 free_cpumask_var(update_mask);
182 return 0;
183}
184EXPORT_SYMBOL(cpu_rmap_update);
185
186#ifdef CONFIG_GENERIC_HARDIRQS
187
188/* Glue between IRQ affinity notifiers and CPU rmaps */
189
190struct irq_glue {
191 struct irq_affinity_notify notify;
192 struct cpu_rmap *rmap;
193 u16 index;
194};
195
196/**
197 * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
198 * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
199 *
200 * Must be called in process context, before freeing the IRQs, and
201 * without holding any locks required by global workqueue items.
202 */
203void free_irq_cpu_rmap(struct cpu_rmap *rmap)
204{
205 struct irq_glue *glue;
206 u16 index;
207
208 if (!rmap)
209 return;
210
211 for (index = 0; index < rmap->used; index++) {
212 glue = rmap->obj[index];
213 irq_set_affinity_notifier(glue->notify.irq, NULL);
214 }
215 irq_run_affinity_notifiers();
216
217 kfree(rmap);
218}
219EXPORT_SYMBOL(free_irq_cpu_rmap);
220
221static void
222irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
223{
224 struct irq_glue *glue =
225 container_of(notify, struct irq_glue, notify);
226 int rc;
227
228 rc = cpu_rmap_update(glue->rmap, glue->index, mask);
229 if (rc)
230 pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
231}
232
233static void irq_cpu_rmap_release(struct kref *ref)
234{
235 struct irq_glue *glue =
236 container_of(ref, struct irq_glue, notify.kref);
237 kfree(glue);
238}
239
240/**
241 * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
242 * @rmap: The reverse-map
243 * @irq: The IRQ number
244 *
245 * This adds an IRQ affinity notifier that will update the reverse-map
246 * automatically.
247 *
248 * Must be called in process context, after the IRQ is allocated but
249 * before it is bound with request_irq().
250 */
251int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
252{
253 struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
254 int rc;
255
256 if (!glue)
257 return -ENOMEM;
258 glue->notify.notify = irq_cpu_rmap_notify;
259 glue->notify.release = irq_cpu_rmap_release;
260 glue->rmap = rmap;
261 glue->index = cpu_rmap_add(rmap, glue);
262 rc = irq_set_affinity_notifier(irq, &glue->notify);
263 if (rc)
264 kfree(glue);
265 return rc;
266}
267EXPORT_SYMBOL(irq_cpu_rmap_add);
268
269#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 344c710d16ca..b8029a5583ff 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new,
35} 35}
36EXPORT_SYMBOL(__list_add); 36EXPORT_SYMBOL(__list_add);
37 37
38void __list_del_entry(struct list_head *entry)
39{
40 struct list_head *prev, *next;
41
42 prev = entry->prev;
43 next = entry->next;
44
45 if (WARN(next == LIST_POISON1,
46 "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
47 entry, LIST_POISON1) ||
48 WARN(prev == LIST_POISON2,
49 "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
50 entry, LIST_POISON2) ||
51 WARN(prev->next != entry,
52 "list_del corruption. prev->next should be %p, "
53 "but was %p\n", entry, prev->next) ||
54 WARN(next->prev != entry,
55 "list_del corruption. next->prev should be %p, "
56 "but was %p\n", entry, next->prev))
57 return;
58
59 __list_del(prev, next);
60}
61EXPORT_SYMBOL(__list_del_entry);
62
38/** 63/**
39 * list_del - deletes entry from list. 64 * list_del - deletes entry from list.
40 * @entry: the element to delete from the list. 65 * @entry: the element to delete from the list.
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add);
43 */ 68 */
44void list_del(struct list_head *entry) 69void list_del(struct list_head *entry)
45{ 70{
46 WARN(entry->next == LIST_POISON1, 71 __list_del_entry(entry);
47 "list_del corruption, next is LIST_POISON1 (%p)\n",
48 LIST_POISON1);
49 WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
50 "list_del corruption, prev is LIST_POISON2 (%p)\n",
51 LIST_POISON2);
52 WARN(entry->prev->next != entry,
53 "list_del corruption. prev->next should be %p, "
54 "but was %p\n", entry, entry->prev->next);
55 WARN(entry->next->prev != entry,
56 "list_del corruption. next->prev should be %p, "
57 "but was %p\n", entry, entry->next->prev);
58 __list_del(entry->prev, entry->next);
59 entry->next = LIST_POISON1; 72 entry->next = LIST_POISON1;
60 entry->prev = LIST_POISON2; 73 entry->prev = LIST_POISON2;
61} 74}
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 5021cbc34411..ac09f2226dc7 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
148{ 148{
149 int i, len = 0; 149 int i, len = 0;
150 150
151 for (i = 0; i < n; i++) { 151 for (i = 0; i < n; i++, p++) {
152 if (p->len) 152 if (p->len)
153 len += nla_total_size(p->len); 153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type]) 154 else if (nla_attr_minlen[p->type])
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5086bb962b4d..7ea2e033d715 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -736,10 +736,11 @@ next:
736 } 736 }
737 } 737 }
738 /* 738 /*
739 * The iftag must have been set somewhere because otherwise 739 * We need not to tag the root tag if there is no tag which is set with
740 * we would return immediated at the beginning of the function 740 * settag within the range from *first_indexp to last_index.
741 */ 741 */
742 root_tag_set(root, settag); 742 if (tagged > 0)
743 root_tag_set(root, settag);
743 *first_indexp = index; 744 *first_indexp = index;
744 745
745 return tagged; 746 return tagged;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4693f79195d3..a16be19a1305 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -315,6 +315,7 @@ void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
315 315
316 rb_augment_path(node, func, data); 316 rb_augment_path(node, func, data);
317} 317}
318EXPORT_SYMBOL(rb_augment_insert);
318 319
319/* 320/*
320 * before removing the node, find the deepest node on the rebalance path 321 * before removing the node, find the deepest node on the rebalance path
@@ -340,6 +341,7 @@ struct rb_node *rb_augment_erase_begin(struct rb_node *node)
340 341
341 return deepest; 342 return deepest;
342} 343}
344EXPORT_SYMBOL(rb_augment_erase_begin);
343 345
344/* 346/*
345 * after removal, update the tree to account for the removed entry 347 * after removal, update the tree to account for the removed entry
@@ -350,6 +352,7 @@ void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
350 if (node) 352 if (node)
351 rb_augment_path(node, func, data); 353 rb_augment_path(node, func, data);
352} 354}
355EXPORT_SYMBOL(rb_augment_erase_end);
353 356
354/* 357/*
355 * This function returns the first node (in sort order) of the tree. 358 * This function returns the first node (in sort order) of the tree.
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe11b804..93ca08b8a451 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
686 /* 686 /*
687 * Ensure that the address returned is DMA'ble 687 * Ensure that the address returned is DMA'ble
688 */ 688 */
689 if (!dma_capable(dev, dev_addr, size)) 689 if (!dma_capable(dev, dev_addr, size)) {
690 panic("map_single: bounce buffer is not DMA'ble"); 690 swiotlb_tbl_unmap_single(dev, map, size, dir);
691 dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
692 }
691 693
692 return dev_addr; 694 return dev_addr;
693} 695}
diff --git a/lib/textsearch.c b/lib/textsearch.c
index d608331b3e47..e0cc0146ae62 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -13,7 +13,7 @@
13 * 13 *
14 * INTRODUCTION 14 * INTRODUCTION
15 * 15 *
16 * The textsearch infrastructure provides text searching facitilies for 16 * The textsearch infrastructure provides text searching facilities for
17 * both linear and non-linear data. Individual search algorithms are 17 * both linear and non-linear data. Individual search algorithms are
18 * implemented in modules and chosen by the user. 18 * implemented in modules and chosen by the user.
19 * 19 *
@@ -43,7 +43,7 @@
43 * to the algorithm to store persistent variables. 43 * to the algorithm to store persistent variables.
44 * (4) Core eventually resets the search offset and forwards the find() 44 * (4) Core eventually resets the search offset and forwards the find()
45 * request to the algorithm. 45 * request to the algorithm.
46 * (5) Algorithm calls get_next_block() provided by the user continously 46 * (5) Algorithm calls get_next_block() provided by the user continuously
47 * to fetch the data to be searched in block by block. 47 * to fetch the data to be searched in block by block.
48 * (6) Algorithm invokes finish() after the last call to get_next_block 48 * (6) Algorithm invokes finish() after the last call to get_next_block
49 * to clean up any leftovers from get_next_block. (Optional) 49 * to clean up any leftovers from get_next_block. (Optional)
@@ -58,15 +58,15 @@
58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE 58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
59 * to perform case insensitive matching. But it might slow down 59 * to perform case insensitive matching. But it might slow down
60 * performance of algorithm, so you should use it at own your risk. 60 * performance of algorithm, so you should use it at own your risk.
61 * The returned configuration may then be used for an arbitary 61 * The returned configuration may then be used for an arbitrary
62 * amount of times and even in parallel as long as a separate struct 62 * amount of times and even in parallel as long as a separate struct
63 * ts_state variable is provided to every instance. 63 * ts_state variable is provided to every instance.
64 * 64 *
65 * The actual search is performed by either calling textsearch_find_- 65 * The actual search is performed by either calling textsearch_find_-
66 * continuous() for linear data or by providing an own get_next_block() 66 * continuous() for linear data or by providing an own get_next_block()
67 * implementation and calling textsearch_find(). Both functions return 67 * implementation and calling textsearch_find(). Both functions return
68 * the position of the first occurrence of the patern or UINT_MAX if 68 * the position of the first occurrence of the pattern or UINT_MAX if
69 * no match was found. Subsequent occurences can be found by calling 69 * no match was found. Subsequent occurrences can be found by calling
70 * textsearch_next() regardless of the linearity of the data. 70 * textsearch_next() regardless of the linearity of the data.
71 * 71 *
72 * Once you're done using a configuration it must be given back via 72 * Once you're done using a configuration it must be given back via
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index e3b6e18fdac5..60a6088d0e5e 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -7,37 +7,37 @@ config XZ_DEC
7 CRC32 is supported. See Documentation/xz.txt for more information. 7 CRC32 is supported. See Documentation/xz.txt for more information.
8 8
9config XZ_DEC_X86 9config XZ_DEC_X86
10 bool "x86 BCJ filter decoder" if EMBEDDED 10 bool "x86 BCJ filter decoder" if EXPERT
11 default y 11 default y
12 depends on XZ_DEC 12 depends on XZ_DEC
13 select XZ_DEC_BCJ 13 select XZ_DEC_BCJ
14 14
15config XZ_DEC_POWERPC 15config XZ_DEC_POWERPC
16 bool "PowerPC BCJ filter decoder" if EMBEDDED 16 bool "PowerPC BCJ filter decoder" if EXPERT
17 default y 17 default y
18 depends on XZ_DEC 18 depends on XZ_DEC
19 select XZ_DEC_BCJ 19 select XZ_DEC_BCJ
20 20
21config XZ_DEC_IA64 21config XZ_DEC_IA64
22 bool "IA-64 BCJ filter decoder" if EMBEDDED 22 bool "IA-64 BCJ filter decoder" if EXPERT
23 default y 23 default y
24 depends on XZ_DEC 24 depends on XZ_DEC
25 select XZ_DEC_BCJ 25 select XZ_DEC_BCJ
26 26
27config XZ_DEC_ARM 27config XZ_DEC_ARM
28 bool "ARM BCJ filter decoder" if EMBEDDED 28 bool "ARM BCJ filter decoder" if EXPERT
29 default y 29 default y
30 depends on XZ_DEC 30 depends on XZ_DEC
31 select XZ_DEC_BCJ 31 select XZ_DEC_BCJ
32 32
33config XZ_DEC_ARMTHUMB 33config XZ_DEC_ARMTHUMB
34 bool "ARM-Thumb BCJ filter decoder" if EMBEDDED 34 bool "ARM-Thumb BCJ filter decoder" if EXPERT
35 default y 35 default y
36 depends on XZ_DEC 36 depends on XZ_DEC
37 select XZ_DEC_BCJ 37 select XZ_DEC_BCJ
38 38
39config XZ_DEC_SPARC 39config XZ_DEC_SPARC
40 bool "SPARC BCJ filter decoder" if EMBEDDED 40 bool "SPARC BCJ filter decoder" if EXPERT
41 default y 41 default y
42 depends on XZ_DEC 42 depends on XZ_DEC
43 select XZ_DEC_BCJ 43 select XZ_DEC_BCJ