aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-24 02:09:26 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-24 02:09:26 -0400
commit28afe961a18f77b2249062499bdbf70fd2ec6bba (patch)
tree71a5cb32924b8c8256bbc0f2f81c6b8c2ac79108 /lib
parent1e01cb0c6ff7e9ddb6547551794c6aa82785a7cb (diff)
parent338b9bb3adac0d2c5a1e180491d9b001d624c402 (diff)
Merge branch 'linus' into tracing/urgent
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug29
-rw-r--r--lib/Kconfig.kgdb3
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/kobject.c10
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/scatterlist.c176
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/textsearch.c16
-rw-r--r--lib/ts_bm.c26
-rw-r--r--lib/ts_fsm.c6
-rw-r--r--lib/ts_kmp.c29
11 files changed, 235 insertions, 78 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index df27132a56f4..882c51048993 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
74 debugging files into. Enable this option to be able to read and 74 debugging files into. Enable this option to be able to read and
75 write to these files. 75 write to these files.
76 76
77 For detailed documentation on the debugfs API, see
78 Documentation/DocBook/filesystems.
79
77 If unsure, say N. 80 If unsure, say N.
78 81
79config HEADERS_CHECK 82config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
147 help 150 help
148 Say Y here to enable the kernel to detect "soft lockups", 151 Say Y here to enable the kernel to detect "soft lockups",
149 which are bugs that cause the kernel to loop in kernel 152 which are bugs that cause the kernel to loop in kernel
150 mode for more than 10 seconds, without giving other tasks a 153 mode for more than 60 seconds, without giving other tasks a
151 chance to run. 154 chance to run.
152 155
153 When a soft-lockup is detected, the kernel will print the 156 When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
159 can be detected via the NMI-watchdog, on platforms that 162 can be detected via the NMI-watchdog, on platforms that
160 support it.) 163 support it.)
161 164
165config BOOTPARAM_SOFTLOCKUP_PANIC
166 bool "Panic (Reboot) On Soft Lockups"
167 depends on DETECT_SOFTLOCKUP
168 help
169 Say Y here to enable the kernel to panic on "soft lockups",
170 which are bugs that cause the kernel to loop in kernel
171 mode for more than 60 seconds, without giving other tasks a
172 chance to run.
173
174 The panic can be used in combination with panic_timeout,
175 to cause the system to reboot automatically after a
176 lockup has been detected. This feature is useful for
177 high-availability systems that have uptime guarantees and
178 where a lockup must be resolved ASAP.
179
180 Say N if unsure.
181
182config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
183 int
184 depends on DETECT_SOFTLOCKUP
185 range 0 1
186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
188
162config SCHED_DEBUG 189config SCHED_DEBUG
163 bool "Collect scheduler debugging info" 190 bool "Collect scheduler debugging info"
164 depends on DEBUG_KERNEL && PROC_FS 191 depends on DEBUG_KERNEL && PROC_FS
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a5..2cfd2721f7ed 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,7 +1,4 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB 2config HAVE_ARCH_KGDB
6 bool 3 bool
7 4
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18#if NR_CPUS > 64
19int __next_cpu_nr(int n, const cpumask_t *srcp)
20{
21 return min_t(int, nr_cpu_ids,
22 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
23}
24EXPORT_SYMBOL(__next_cpu_nr);
25#endif
26
18int __any_online_cpu(const cpumask_t *mask) 27int __any_online_cpu(const cpumask_t *mask)
19{ 28{
20 int cpu; 29 int cpu;
diff --git a/lib/kobject.c b/lib/kobject.c
index dcade0543bd2..744401571ed7 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -216,13 +216,19 @@ static int kobject_add_internal(struct kobject *kobj)
216static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 216static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
217 va_list vargs) 217 va_list vargs)
218{ 218{
219 /* Free the old name, if necessary. */ 219 const char *old_name = kobj->name;
220 kfree(kobj->name); 220 char *s;
221 221
222 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 222 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
223 if (!kobj->name) 223 if (!kobj->name)
224 return -ENOMEM; 224 return -ENOMEM;
225 225
226 /* ewww... some of these buggers have '/' in the name ... */
227 s = strchr(kobj->name, '/');
228 if (s)
229 s[0] = '!';
230
231 kfree(old_name);
226 return 0; 232 return 0;
227} 233}
228 234
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a63160..9f8d599459d1 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
245 if (retval) 245 if (retval)
246 goto exit; 246 goto exit;
247 247
248 call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); 248 retval = call_usermodehelper(argv[0], argv,
249 env->envp, UMH_WAIT_EXEC);
249 } 250 }
250 251
251exit: 252exit:
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..876ba6d5b670 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
295EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296 296
297/** 297/**
298 * sg_miter_start - start mapping iteration over a sg list
299 * @miter: sg mapping iter to be started
300 * @sgl: sg list to iterate over
301 * @nents: number of sg entries
302 *
303 * Description:
304 * Starts mapping iterator @miter.
305 *
306 * Context:
307 * Don't care.
308 */
309void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
310 unsigned int nents, unsigned int flags)
311{
312 memset(miter, 0, sizeof(struct sg_mapping_iter));
313
314 miter->__sg = sgl;
315 miter->__nents = nents;
316 miter->__offset = 0;
317 miter->__flags = flags;
318}
319EXPORT_SYMBOL(sg_miter_start);
320
321/**
322 * sg_miter_next - proceed mapping iterator to the next mapping
323 * @miter: sg mapping iter to proceed
324 *
325 * Description:
326 * Proceeds @miter@ to the next mapping. @miter@ should have been
327 * started using sg_miter_start(). On successful return,
328 * @miter@->page, @miter@->addr and @miter@->length point to the
329 * current mapping.
330 *
331 * Context:
332 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
333 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
334 *
335 * Returns:
336 * true if @miter contains the next mapping. false if end of sg
337 * list is reached.
338 */
339bool sg_miter_next(struct sg_mapping_iter *miter)
340{
341 unsigned int off, len;
342
343 /* check for end and drop resources from the last iteration */
344 if (!miter->__nents)
345 return false;
346
347 sg_miter_stop(miter);
348
349 /* get to the next sg if necessary. __offset is adjusted by stop */
350 if (miter->__offset == miter->__sg->length && --miter->__nents) {
351 miter->__sg = sg_next(miter->__sg);
352 miter->__offset = 0;
353 }
354
355 /* map the next page */
356 off = miter->__sg->offset + miter->__offset;
357 len = miter->__sg->length - miter->__offset;
358
359 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
360 off &= ~PAGE_MASK;
361 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
362 miter->consumed = miter->length;
363
364 if (miter->__flags & SG_MITER_ATOMIC)
365 miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
366 else
367 miter->addr = kmap(miter->page) + off;
368
369 return true;
370}
371EXPORT_SYMBOL(sg_miter_next);
372
373/**
374 * sg_miter_stop - stop mapping iteration
375 * @miter: sg mapping iter to be stopped
376 *
377 * Description:
378 * Stops mapping iterator @miter. @miter should have been started
379 * started using sg_miter_start(). A stopped iteration can be
380 * resumed by calling sg_miter_next() on it. This is useful when
381 * resources (kmap) need to be released during iteration.
382 *
383 * Context:
384 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
385 */
386void sg_miter_stop(struct sg_mapping_iter *miter)
387{
388 WARN_ON(miter->consumed > miter->length);
389
390 /* drop resources from the last iteration */
391 if (miter->addr) {
392 miter->__offset += miter->consumed;
393
394 if (miter->__flags & SG_MITER_ATOMIC) {
395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else
398 kunmap(miter->addr);
399
400 miter->page = NULL;
401 miter->addr = NULL;
402 miter->length = 0;
403 miter->consumed = 0;
404 }
405}
406EXPORT_SYMBOL(sg_miter_stop);
407
408/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list 409 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list 410 * @sgl: The SG list
300 * @nents: Number of SG entries 411 * @nents: Number of SG entries
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 420static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer) 421 void *buf, size_t buflen, int to_buffer)
311{ 422{
312 struct scatterlist *sg; 423 unsigned int offset = 0;
313 size_t buf_off = 0; 424 struct sg_mapping_iter miter;
314 int i; 425
315 426 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
316 WARN_ON(!irqs_disabled()); 427
317 428 while (sg_miter_next(&miter) && offset < buflen) {
318 for_each_sg(sgl, sg, nents, i) { 429 unsigned int len;
319 struct page *page; 430
320 int n = 0; 431 len = min(miter.length, buflen - offset);
321 unsigned int sg_off = sg->offset; 432
322 unsigned int sg_copy = sg->length; 433 if (to_buffer)
323 434 memcpy(buf + offset, miter.addr, len);
324 if (sg_copy > buflen) 435 else {
325 sg_copy = buflen; 436 memcpy(miter.addr, buf + offset, len);
326 buflen -= sg_copy; 437 flush_kernel_dcache_page(miter.page);
327
328 while (sg_copy > 0) {
329 unsigned int page_copy;
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 } 438 }
356 439
357 if (!buflen) 440 offset += len;
358 break;
359 } 441 }
360 442
361 return buf_off; 443 sg_miter_stop(&miter);
444
445 return offset;
362} 446}
363 447
364/** 448/**
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 3b4dc098181e..c4381d9516f6 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void)
11{ 11{
12 unsigned long preempt_count = preempt_count(); 12 unsigned long preempt_count = preempt_count();
13 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
14 cpumask_t this_mask; 14 cpumask_of_cpu_ptr_declare(this_mask);
15 15
16 if (likely(preempt_count)) 16 if (likely(preempt_count))
17 goto out; 17 goto out;
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void)
23 * Kernel threads bound to a single CPU can safely use 23 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 24 * smp_processor_id():
25 */ 25 */
26 this_mask = cpumask_of_cpu(this_cpu); 26 cpumask_of_cpu_ptr_next(this_mask, this_cpu);
27 27
28 if (cpus_equal(current->cpus_allowed, this_mask)) 28 if (cpus_equal(current->cpus_allowed, *this_mask))
29 goto out; 29 goto out;
30 30
31 /* 31 /*
diff --git a/lib/textsearch.c b/lib/textsearch.c
index a3e500ad51d7..9fbcb44c554f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -54,10 +54,13 @@
54 * USAGE 54 * USAGE
55 * 55 *
56 * Before a search can be performed, a configuration must be created 56 * Before a search can be performed, a configuration must be created
57 * by calling textsearch_prepare() specyfing the searching algorithm and 57 * by calling textsearch_prepare() specifying the searching algorithm,
58 * the pattern to look for. The returned configuration may then be used 58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
59 * for an arbitary amount of times and even in parallel as long as a 59 * to perform case insensitive matching. But it might slow down
60 * separate struct ts_state variable is provided to every instance. 60 * performance of algorithm, so you should use it at own your risk.
61 * The returned configuration may then be used for an arbitary
62 * amount of times and even in parallel as long as a separate struct
63 * ts_state variable is provided to every instance.
61 * 64 *
62 * The actual search is performed by either calling textsearch_find_- 65 * The actual search is performed by either calling textsearch_find_-
63 * continuous() for linear data or by providing an own get_next_block() 66 * continuous() for linear data or by providing an own get_next_block()
@@ -89,7 +92,6 @@
89 * panic("Oh my god, dancing chickens at %d\n", pos); 92 * panic("Oh my god, dancing chickens at %d\n", pos);
90 * 93 *
91 * textsearch_destroy(conf); 94 * textsearch_destroy(conf);
92 *
93 * ========================================================================== 95 * ==========================================================================
94 */ 96 */
95 97
@@ -265,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
265 return ERR_PTR(-EINVAL); 267 return ERR_PTR(-EINVAL);
266 268
267 ops = lookup_ts_algo(algo); 269 ops = lookup_ts_algo(algo);
268#ifdef CONFIG_KMOD 270#ifdef CONFIG_MODULES
269 /* 271 /*
270 * Why not always autoload you may ask. Some users are 272 * Why not always autoload you may ask. Some users are
271 * in a situation where requesting a module may deadlock, 273 * in a situation where requesting a module may deadlock,
@@ -280,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
280 if (ops == NULL) 282 if (ops == NULL)
281 goto errout; 283 goto errout;
282 284
283 conf = ops->init(pattern, len, gfp_mask); 285 conf = ops->init(pattern, len, gfp_mask, flags);
284 if (IS_ERR(conf)) { 286 if (IS_ERR(conf)) {
285 err = PTR_ERR(conf); 287 err = PTR_ERR(conf);
286 goto errout; 288 goto errout;
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 4a7fce72898e..9e66ee4020e9 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/string.h> 41#include <linux/string.h>
42#include <linux/ctype.h>
42#include <linux/textsearch.h> 43#include <linux/textsearch.h>
43 44
44/* Alphabet size, use ASCII */ 45/* Alphabet size, use ASCII */
@@ -64,6 +65,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
64 unsigned int i, text_len, consumed = state->offset; 65 unsigned int i, text_len, consumed = state->offset;
65 const u8 *text; 66 const u8 *text;
66 int shift = bm->patlen - 1, bs; 67 int shift = bm->patlen - 1, bs;
68 const u8 icase = conf->flags & TS_IGNORECASE;
67 69
68 for (;;) { 70 for (;;) {
69 text_len = conf->get_next_block(consumed, &text, conf, state); 71 text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
75 DEBUGP("Searching in position %d (%c)\n", 77 DEBUGP("Searching in position %d (%c)\n",
76 shift, text[shift]); 78 shift, text[shift]);
77 for (i = 0; i < bm->patlen; i++) 79 for (i = 0; i < bm->patlen; i++)
78 if (text[shift-i] != bm->pattern[bm->patlen-1-i]) 80 if ((icase ? toupper(text[shift-i])
81 : text[shift-i])
82 != bm->pattern[bm->patlen-1-i])
79 goto next; 83 goto next;
80 84
81 /* London calling... */ 85 /* London calling... */
@@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g)
111 return ret; 115 return ret;
112} 116}
113 117
114static void compute_prefix_tbl(struct ts_bm *bm) 118static void compute_prefix_tbl(struct ts_bm *bm, int flags)
115{ 119{
116 int i, j, g; 120 int i, j, g;
117 121
118 for (i = 0; i < ASIZE; i++) 122 for (i = 0; i < ASIZE; i++)
119 bm->bad_shift[i] = bm->patlen; 123 bm->bad_shift[i] = bm->patlen;
120 for (i = 0; i < bm->patlen - 1; i++) 124 for (i = 0; i < bm->patlen - 1; i++) {
121 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; 125 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
126 if (flags & TS_IGNORECASE)
127 bm->bad_shift[tolower(bm->pattern[i])]
128 = bm->patlen - 1 - i;
129 }
122 130
123 /* Compute the good shift array, used to match reocurrences 131 /* Compute the good shift array, used to match reocurrences
124 * of a subpattern */ 132 * of a subpattern */
@@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm)
135} 143}
136 144
137static struct ts_config *bm_init(const void *pattern, unsigned int len, 145static struct ts_config *bm_init(const void *pattern, unsigned int len,
138 gfp_t gfp_mask) 146 gfp_t gfp_mask, int flags)
139{ 147{
140 struct ts_config *conf; 148 struct ts_config *conf;
141 struct ts_bm *bm; 149 struct ts_bm *bm;
150 int i;
142 unsigned int prefix_tbl_len = len * sizeof(unsigned int); 151 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
143 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len; 152 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
144 153
@@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
146 if (IS_ERR(conf)) 155 if (IS_ERR(conf))
147 return conf; 156 return conf;
148 157
158 conf->flags = flags;
149 bm = ts_config_priv(conf); 159 bm = ts_config_priv(conf);
150 bm->patlen = len; 160 bm->patlen = len;
151 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; 161 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
152 memcpy(bm->pattern, pattern, len); 162 if (flags & TS_IGNORECASE)
153 compute_prefix_tbl(bm); 163 for (i = 0; i < len; i++)
164 bm->pattern[i] = toupper(((u8 *)pattern)[i]);
165 else
166 memcpy(bm->pattern, pattern, len);
167 compute_prefix_tbl(bm, flags);
154 168
155 return conf; 169 return conf;
156} 170}
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index af575b61526b..5696a35184e4 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -257,7 +257,7 @@ found_match:
257} 257}
258 258
259static struct ts_config *fsm_init(const void *pattern, unsigned int len, 259static struct ts_config *fsm_init(const void *pattern, unsigned int len,
260 gfp_t gfp_mask) 260 gfp_t gfp_mask, int flags)
261{ 261{
262 int i, err = -EINVAL; 262 int i, err = -EINVAL;
263 struct ts_config *conf; 263 struct ts_config *conf;
@@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
269 if (len % sizeof(struct ts_fsm_token) || ntokens < 1) 269 if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
270 goto errout; 270 goto errout;
271 271
272 if (flags & TS_IGNORECASE)
273 goto errout;
274
272 for (i = 0; i < ntokens; i++) { 275 for (i = 0; i < ntokens; i++) {
273 struct ts_fsm_token *t = &tokens[i]; 276 struct ts_fsm_token *t = &tokens[i];
274 277
@@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
284 if (IS_ERR(conf)) 287 if (IS_ERR(conf))
285 return conf; 288 return conf;
286 289
290 conf->flags = flags;
287 fsm = ts_config_priv(conf); 291 fsm = ts_config_priv(conf);
288 fsm->ntokens = ntokens; 292 fsm->ntokens = ntokens;
289 memcpy(fsm->tokens, pattern, len); 293 memcpy(fsm->tokens, pattern, len);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 3ced628cab4b..632f783e65f1 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/ctype.h>
36#include <linux/textsearch.h> 37#include <linux/textsearch.h>
37 38
38struct ts_kmp 39struct ts_kmp
@@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
47 struct ts_kmp *kmp = ts_config_priv(conf); 48 struct ts_kmp *kmp = ts_config_priv(conf);
48 unsigned int i, q = 0, text_len, consumed = state->offset; 49 unsigned int i, q = 0, text_len, consumed = state->offset;
49 const u8 *text; 50 const u8 *text;
51 const int icase = conf->flags & TS_IGNORECASE;
50 52
51 for (;;) { 53 for (;;) {
52 text_len = conf->get_next_block(consumed, &text, conf, state); 54 text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
55 break; 57 break;
56 58
57 for (i = 0; i < text_len; i++) { 59 for (i = 0; i < text_len; i++) {
58 while (q > 0 && kmp->pattern[q] != text[i]) 60 while (q > 0 && kmp->pattern[q]
61 != (icase ? toupper(text[i]) : text[i]))
59 q = kmp->prefix_tbl[q - 1]; 62 q = kmp->prefix_tbl[q - 1];
60 if (kmp->pattern[q] == text[i]) 63 if (kmp->pattern[q]
64 == (icase ? toupper(text[i]) : text[i]))
61 q++; 65 q++;
62 if (unlikely(q == kmp->pattern_len)) { 66 if (unlikely(q == kmp->pattern_len)) {
63 state->offset = consumed + i + 1; 67 state->offset = consumed + i + 1;
@@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
72} 76}
73 77
74static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, 78static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
75 unsigned int *prefix_tbl) 79 unsigned int *prefix_tbl, int flags)
76{ 80{
77 unsigned int k, q; 81 unsigned int k, q;
82 const u8 icase = flags & TS_IGNORECASE;
78 83
79 for (k = 0, q = 1; q < len; q++) { 84 for (k = 0, q = 1; q < len; q++) {
80 while (k > 0 && pattern[k] != pattern[q]) 85 while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
86 != (icase ? toupper(pattern[q]) : pattern[q]))
81 k = prefix_tbl[k-1]; 87 k = prefix_tbl[k-1];
82 if (pattern[k] == pattern[q]) 88 if ((icase ? toupper(pattern[k]) : pattern[k])
89 == (icase ? toupper(pattern[q]) : pattern[q]))
83 k++; 90 k++;
84 prefix_tbl[q] = k; 91 prefix_tbl[q] = k;
85 } 92 }
86} 93}
87 94
88static struct ts_config *kmp_init(const void *pattern, unsigned int len, 95static struct ts_config *kmp_init(const void *pattern, unsigned int len,
89 gfp_t gfp_mask) 96 gfp_t gfp_mask, int flags)
90{ 97{
91 struct ts_config *conf; 98 struct ts_config *conf;
92 struct ts_kmp *kmp; 99 struct ts_kmp *kmp;
100 int i;
93 unsigned int prefix_tbl_len = len * sizeof(unsigned int); 101 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
94 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; 102 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
95 103
@@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len,
97 if (IS_ERR(conf)) 105 if (IS_ERR(conf))
98 return conf; 106 return conf;
99 107
108 conf->flags = flags;
100 kmp = ts_config_priv(conf); 109 kmp = ts_config_priv(conf);
101 kmp->pattern_len = len; 110 kmp->pattern_len = len;
102 compute_prefix_tbl(pattern, len, kmp->prefix_tbl); 111 compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
103 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; 112 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
104 memcpy(kmp->pattern, pattern, len); 113 if (flags & TS_IGNORECASE)
114 for (i = 0; i < len; i++)
115 kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
116 else
117 memcpy(kmp->pattern, pattern, len);
105 118
106 return conf; 119 return conf;
107} 120}