aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2005-08-18 01:56:07 -0400
committerDave Jones <davej@redhat.com>2005-08-18 01:56:07 -0400
commita8b3e6f10f08f66ae1072efd087b30966a3654f6 (patch)
tree1d1409855f8ad5beabafe061c6453edd84ba94c8 /lib
parent46acac3b4fd8ef66eec63b51de8d556a17c7d4f7 (diff)
parent099d44e869f1886b5eb02a5145ca97b5e4142e28 (diff)
Merge /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig19
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile15
-rw-r--r--lib/bitmap.c3
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/genalloc.c188
-rw-r--r--lib/idr.c2
-rw-r--r--lib/inflate.c16
-rw-r--r--lib/kernel_lock.c55
-rw-r--r--lib/klist.c265
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/sha1.c2
-rw-r--r--lib/smp_processor_id.c55
-rw-r--r--lib/textsearch.c317
-rw-r--r--lib/ts_fsm.c338
-rw-r--r--lib/ts_kmp.c145
18 files changed, 1356 insertions, 78 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index eeb45225248f..eeb429a52152 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -40,6 +40,12 @@ config ZLIB_DEFLATE
40 tristate 40 tristate
41 41
42# 42#
43# Generic allocator support is selected if needed
44#
45config GENERIC_ALLOCATOR
46 boolean
47
48#
43# reed solomon support is select'ed if needed 49# reed solomon support is select'ed if needed
44# 50#
45config REED_SOLOMON 51config REED_SOLOMON
@@ -57,5 +63,16 @@ config REED_SOLOMON_ENC16
57config REED_SOLOMON_DEC16 63config REED_SOLOMON_DEC16
58 boolean 64 boolean
59 65
60endmenu 66#
67# Textsearch support is select'ed if needed
68#
69config TEXTSEARCH
70 boolean
71
72config TEXTSEARCH_KMP
73 tristate
61 74
75config TEXTSEARCH_FSM
76 tristate
77
78endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0c421295e613..299f7f3b5b08 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -141,7 +141,7 @@ config DEBUG_IOREMAP
141 141
142config DEBUG_FS 142config DEBUG_FS
143 bool "Debug Filesystem" 143 bool "Debug Filesystem"
144 depends on DEBUG_KERNEL 144 depends on DEBUG_KERNEL && SYSFS
145 help 145 help
146 debugfs is a virtual file system that kernel developers use to put 146 debugfs is a virtual file system that kernel developers use to put
147 debugging files into. Enable this option to be able to read and 147 debugging files into. Enable this option to be able to read and
diff --git a/lib/Makefile b/lib/Makefile
index 7c70db79c0e0..f28d9031303c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -4,11 +4,12 @@
4 4
5lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 kobject.o kref.o idr.o div64.o int_sqrt.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 bitmap.o extable.o kobject_uevent.o prio_tree.o sha1.o \ 8 sha1.o
9 halfmd4.o
10 9
11obj-y += sort.o parser.o 10lib-y += kobject.o kref.o kobject_uevent.o klist.o
11
12obj-y += sort.o parser.o halfmd4.o
12 13
13ifeq ($(CONFIG_DEBUG_KOBJECT),y) 14ifeq ($(CONFIG_DEBUG_KOBJECT),y)
14CFLAGS_kobject.o += -DDEBUG 15CFLAGS_kobject.o += -DDEBUG
@@ -19,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
19lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 20lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
20lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 21lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
21obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 22obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
23obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
22 24
23ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 25ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
24 lib-y += dec_and_lock.o 26 lib-y += dec_and_lock.o
@@ -28,11 +30,16 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
28obj-$(CONFIG_CRC32) += crc32.o 30obj-$(CONFIG_CRC32) += crc32.o
29obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 31obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
30obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 32obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
33obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
31 34
32obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ 35obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
33obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ 36obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
34obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ 37obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
35 38
39obj-$(CONFIG_TEXTSEARCH) += textsearch.o
40obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
41obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
42
36hostprogs-y := gen_crc32table 43hostprogs-y := gen_crc32table
37clean-files := crc32table.h 44clean-files := crc32table.h
38 45
diff --git a/lib/bitmap.c b/lib/bitmap.c
index d1388a5ce89c..fb9371fdd44a 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -289,7 +289,6 @@ EXPORT_SYMBOL(__bitmap_weight);
289 289
290#define CHUNKSZ 32 290#define CHUNKSZ 32
291#define nbits_to_hold_value(val) fls(val) 291#define nbits_to_hold_value(val) fls(val)
292#define roundup_power2(val,modulus) (((val) + (modulus) - 1) & ~((modulus) - 1))
293#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10)) 292#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
294#define BASEDEC 10 /* fancier cpuset lists input in decimal */ 293#define BASEDEC 10 /* fancier cpuset lists input in decimal */
295 294
@@ -316,7 +315,7 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
316 if (chunksz == 0) 315 if (chunksz == 0)
317 chunksz = CHUNKSZ; 316 chunksz = CHUNKSZ;
318 317
319 i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ; 318 i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
320 for (; i >= 0; i -= CHUNKSZ) { 319 for (; i >= 0; i -= CHUNKSZ) {
321 chunkmask = ((1ULL << chunksz) - 1); 320 chunkmask = ((1ULL << chunksz) - 1);
322 word = i / BITS_PER_LONG; 321 word = i / BITS_PER_LONG;
diff --git a/lib/crc32.c b/lib/crc32.c
index 58b222783f9c..065198f98b3f 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -473,7 +473,7 @@ static u32 test_step(u32 init, unsigned char *buf, size_t len)
473 init = bitreverse(init); 473 init = bitreverse(init);
474 crc2 = bitreverse(crc1); 474 crc2 = bitreverse(crc1);
475 if (crc1 != bitreverse(crc2)) 475 if (crc1 != bitreverse(crc2))
476 printf("\nBit reversal fail: 0x%08x -> %0x08x -> 0x%08x\n", 476 printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n",
477 crc1, crc2, bitreverse(crc2)); 477 crc1, crc2, bitreverse(crc2));
478 crc1 = crc32_le(init, buf, len); 478 crc1 = crc32_le(init, buf, len);
479 if (crc1 != crc2) 479 if (crc1 != crc2)
diff --git a/lib/genalloc.c b/lib/genalloc.c
new file mode 100644
index 000000000000..d6d30d2e7166
--- /dev/null
+++ b/lib/genalloc.c
@@ -0,0 +1,188 @@
1/*
2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
5 * etc.
6 *
7 * This code is based on the buddy allocator found in the sym53c8xx_2
8 * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
9 * and adapted for general purpose use.
10 *
11 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
12 *
13 * This source code is licensed under the GNU General Public License,
14 * Version 2. See the file COPYING for more details.
15 */
16
17#include <linux/module.h>
18#include <linux/stddef.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/spinlock.h>
25#include <linux/genalloc.h>
26
27#include <asm/page.h>
28
29
30struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
31 unsigned long (*fp)(struct gen_pool *),
32 unsigned long data)
33{
34 struct gen_pool *poolp;
35 unsigned long tmp;
36 int i;
37
38 /*
39 * This is really an arbitrary limit, +10 is enough for
40 * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
41 * this can be increased without problems.
42 */
43 if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
44 ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
45 return NULL;
46
47 if (!max_chunk_shift)
48 max_chunk_shift = PAGE_SHIFT;
49
50 poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
51 if (!poolp)
52 return NULL;
53 memset(poolp, 0, sizeof(struct gen_pool));
54 poolp->h = kmalloc(sizeof(struct gen_pool_link) *
55 (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
56 GFP_KERNEL);
57 if (!poolp->h) {
58 printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
59 kfree(poolp);
60 return NULL;
61 }
62 memset(poolp->h, 0, sizeof(struct gen_pool_link) *
63 (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
64
65 spin_lock_init(&poolp->lock);
66 poolp->get_new_chunk = fp;
67 poolp->max_chunk_shift = max_chunk_shift;
68 poolp->private = data;
69
70 for (i = 0; i < nr_chunks; i++) {
71 tmp = poolp->get_new_chunk(poolp);
72 printk(KERN_INFO "allocated %lx\n", tmp);
73 if (!tmp)
74 break;
75 gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
76 }
77
78 return poolp;
79}
80EXPORT_SYMBOL(gen_pool_create);
81
82
83/*
84 * Simple power of two buddy-like generic allocator.
85 * Provides naturally aligned memory chunks.
86 */
87unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
88{
89 int j, i, s, max_chunk_size;
90 unsigned long a, flags;
91 struct gen_pool_link *h = poolp->h;
92
93 max_chunk_size = 1 << poolp->max_chunk_shift;
94
95 if (size > max_chunk_size)
96 return 0;
97
98 i = 0;
99
100 size = max(size, 1 << ALLOC_MIN_SHIFT);
101 s = roundup_pow_of_two(size);
102
103 j = i;
104
105 spin_lock_irqsave(&poolp->lock, flags);
106 while (!h[j].next) {
107 if (s == max_chunk_size) {
108 struct gen_pool_link *ptr;
109 spin_unlock_irqrestore(&poolp->lock, flags);
110 ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
111 spin_lock_irqsave(&poolp->lock, flags);
112 h[j].next = ptr;
113 if (h[j].next)
114 h[j].next->next = NULL;
115 break;
116 }
117 j++;
118 s <<= 1;
119 }
120 a = (unsigned long) h[j].next;
121 if (a) {
122 h[j].next = h[j].next->next;
123 /*
124 * This should be split into a seperate function doing
125 * the chunk split in order to support custom
126 * handling memory not physically accessible by host
127 */
128 while (j > i) {
129 j -= 1;
130 s >>= 1;
131 h[j].next = (struct gen_pool_link *) (a + s);
132 h[j].next->next = NULL;
133 }
134 }
135 spin_unlock_irqrestore(&poolp->lock, flags);
136 return a;
137}
138EXPORT_SYMBOL(gen_pool_alloc);
139
140
141/*
142 * Counter-part of the generic allocator.
143 */
144void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
145{
146 struct gen_pool_link *q;
147 struct gen_pool_link *h = poolp->h;
148 unsigned long a, b, flags;
149 int i, s, max_chunk_size;
150
151 max_chunk_size = 1 << poolp->max_chunk_shift;
152
153 if (size > max_chunk_size)
154 return;
155
156 i = 0;
157
158 size = max(size, 1 << ALLOC_MIN_SHIFT);
159 s = roundup_pow_of_two(size);
160
161 a = ptr;
162
163 spin_lock_irqsave(&poolp->lock, flags);
164 while (1) {
165 if (s == max_chunk_size) {
166 ((struct gen_pool_link *)a)->next = h[i].next;
167 h[i].next = (struct gen_pool_link *)a;
168 break;
169 }
170 b = a ^ s;
171 q = &h[i];
172
173 while (q->next && q->next != (struct gen_pool_link *)b)
174 q = q->next;
175
176 if (!q->next) {
177 ((struct gen_pool_link *)a)->next = h[i].next;
178 h[i].next = (struct gen_pool_link *)a;
179 break;
180 }
181 q->next = q->next->next;
182 a = a & b;
183 s <<= 1;
184 i++;
185 }
186 spin_unlock_irqrestore(&poolp->lock, flags);
187}
188EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/idr.c b/lib/idr.c
index 81fc430602ee..c5be889de449 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -175,7 +175,7 @@ build_up:
175 * Add a new layer to the top of the tree if the requested 175 * Add a new layer to the top of the tree if the requested
176 * id is larger than the currently allocated space. 176 * id is larger than the currently allocated space.
177 */ 177 */
178 while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) { 178 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
179 layers++; 179 layers++;
180 if (!p->count) 180 if (!p->count)
181 continue; 181 continue;
diff --git a/lib/inflate.c b/lib/inflate.c
index 75e7d303c72e..6db6e98d1637 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -326,7 +326,7 @@ DEBG("huft1 ");
326 { 326 {
327 *t = (struct huft *)NULL; 327 *t = (struct huft *)NULL;
328 *m = 0; 328 *m = 0;
329 return 0; 329 return 2;
330 } 330 }
331 331
332DEBG("huft2 "); 332DEBG("huft2 ");
@@ -374,6 +374,7 @@ DEBG("huft5 ");
374 if ((j = *p++) != 0) 374 if ((j = *p++) != 0)
375 v[x[j]++] = i; 375 v[x[j]++] = i;
376 } while (++i < n); 376 } while (++i < n);
377 n = x[g]; /* set n to length of v */
377 378
378DEBG("h6 "); 379DEBG("h6 ");
379 380
@@ -410,12 +411,13 @@ DEBG1("1 ");
410DEBG1("2 "); 411DEBG1("2 ");
411 f -= a + 1; /* deduct codes from patterns left */ 412 f -= a + 1; /* deduct codes from patterns left */
412 xp = c + k; 413 xp = c + k;
413 while (++j < z) /* try smaller tables up to z bits */ 414 if (j < z)
414 { 415 while (++j < z) /* try smaller tables up to z bits */
415 if ((f <<= 1) <= *++xp) 416 {
416 break; /* enough codes to use up j bits */ 417 if ((f <<= 1) <= *++xp)
417 f -= *xp; /* else deduct codes from patterns */ 418 break; /* enough codes to use up j bits */
418 } 419 f -= *xp; /* else deduct codes from patterns */
420 }
419 } 421 }
420DEBG1("3 "); 422DEBG1("3 ");
421 z = 1 << j; /* table entries for j-bit table */ 423 z = 1 << j; /* table entries for j-bit table */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 99b0ae3d51dd..bd2bc5d887b8 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,61 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11 11
12#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
13 defined(CONFIG_DEBUG_PREEMPT)
14
15/*
16 * Debugging check.
17 */
18unsigned int smp_processor_id(void)
19{
20 unsigned long preempt_count = preempt_count();
21 int this_cpu = __smp_processor_id();
22 cpumask_t this_mask;
23
24 if (likely(preempt_count))
25 goto out;
26
27 if (irqs_disabled())
28 goto out;
29
30 /*
31 * Kernel threads bound to a single CPU can safely use
32 * smp_processor_id():
33 */
34 this_mask = cpumask_of_cpu(this_cpu);
35
36 if (cpus_equal(current->cpus_allowed, this_mask))
37 goto out;
38
39 /*
40 * It is valid to assume CPU-locality during early bootup:
41 */
42 if (system_state != SYSTEM_RUNNING)
43 goto out;
44
45 /*
46 * Avoid recursion:
47 */
48 preempt_disable();
49
50 if (!printk_ratelimit())
51 goto out_enable;
52
53 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
54 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
55 dump_stack();
56
57out_enable:
58 preempt_enable_no_resched();
59out:
60 return this_cpu;
61}
62
63EXPORT_SYMBOL(smp_processor_id);
64
65#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
66
67#ifdef CONFIG_PREEMPT_BKL 12#ifdef CONFIG_PREEMPT_BKL
68/* 13/*
69 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
diff --git a/lib/klist.c b/lib/klist.c
new file mode 100644
index 000000000000..738ab810160a
--- /dev/null
+++ b/lib/klist.c
@@ -0,0 +1,265 @@
1/*
2 * klist.c - Routines for manipulating klists.
3 *
4 *
5 * This klist interface provides a couple of structures that wrap around
6 * struct list_head to provide explicit list "head" (struct klist) and
7 * list "node" (struct klist_node) objects. For struct klist, a spinlock
8 * is included that protects access to the actual list itself. struct
9 * klist_node provides a pointer to the klist that owns it and a kref
10 * reference count that indicates the number of current users of that node
11 * in the list.
12 *
13 * The entire point is to provide an interface for iterating over a list
14 * that is safe and allows for modification of the list during the
15 * iteration (e.g. insertion and removal), including modification of the
16 * current node on the list.
17 *
18 * It works using a 3rd object type - struct klist_iter - that is declared
19 * and initialized before an iteration. klist_next() is used to acquire the
20 * next element in the list. It returns NULL if there are no more items.
21 * Internally, that routine takes the klist's lock, decrements the reference
22 * count of the previous klist_node and increments the count of the next
23 * klist_node. It then drops the lock and returns.
24 *
25 * There are primitives for adding and removing nodes to/from a klist.
26 * When deleting, klist_del() will simply decrement the reference count.
27 * Only when the count goes to 0 is the node removed from the list.
28 * klist_remove() will try to delete the node from the list and block
29 * until it is actually removed. This is useful for objects (like devices)
30 * that have been removed from the system and must be freed (but must wait
31 * until all accessors have finished).
32 *
33 * Copyright (C) 2005 Patrick Mochel
34 *
35 * This file is released under the GPL v2.
36 */
37
38#include <linux/klist.h>
39#include <linux/module.h>
40
41
42/**
43 * klist_init - Initialize a klist structure.
44 * @k: The klist we're initializing.
45 */
46
47void klist_init(struct klist * k)
48{
49 INIT_LIST_HEAD(&k->k_list);
50 spin_lock_init(&k->k_lock);
51}
52
53EXPORT_SYMBOL_GPL(klist_init);
54
55
56static void add_head(struct klist * k, struct klist_node * n)
57{
58 spin_lock(&k->k_lock);
59 list_add(&n->n_node, &k->k_list);
60 spin_unlock(&k->k_lock);
61}
62
63static void add_tail(struct klist * k, struct klist_node * n)
64{
65 spin_lock(&k->k_lock);
66 list_add_tail(&n->n_node, &k->k_list);
67 spin_unlock(&k->k_lock);
68}
69
70
71static void klist_node_init(struct klist * k, struct klist_node * n)
72{
73 INIT_LIST_HEAD(&n->n_node);
74 init_completion(&n->n_removed);
75 kref_init(&n->n_ref);
76 n->n_klist = k;
77}
78
79
80/**
81 * klist_add_head - Initialize a klist_node and add it to front.
82 * @k: klist it's going on.
83 * @n: node we're adding.
84 */
85
86void klist_add_head(struct klist * k, struct klist_node * n)
87{
88 klist_node_init(k, n);
89 add_head(k, n);
90}
91
92EXPORT_SYMBOL_GPL(klist_add_head);
93
94
95/**
96 * klist_add_tail - Initialize a klist_node and add it to back.
97 * @k: klist it's going on.
98 * @n: node we're adding.
99 */
100
101void klist_add_tail(struct klist * k, struct klist_node * n)
102{
103 klist_node_init(k, n);
104 add_tail(k, n);
105}
106
107EXPORT_SYMBOL_GPL(klist_add_tail);
108
109
110static void klist_release(struct kref * kref)
111{
112 struct klist_node * n = container_of(kref, struct klist_node, n_ref);
113 list_del(&n->n_node);
114 complete(&n->n_removed);
115 n->n_klist = NULL;
116}
117
118static int klist_dec_and_del(struct klist_node * n)
119{
120 return kref_put(&n->n_ref, klist_release);
121}
122
123
124/**
125 * klist_del - Decrement the reference count of node and try to remove.
126 * @n: node we're deleting.
127 */
128
129void klist_del(struct klist_node * n)
130{
131 struct klist * k = n->n_klist;
132
133 spin_lock(&k->k_lock);
134 klist_dec_and_del(n);
135 spin_unlock(&k->k_lock);
136}
137
138EXPORT_SYMBOL_GPL(klist_del);
139
140
141/**
142 * klist_remove - Decrement the refcount of node and wait for it to go away.
143 * @n: node we're removing.
144 */
145
146void klist_remove(struct klist_node * n)
147{
148 struct klist * k = n->n_klist;
149 spin_lock(&k->k_lock);
150 klist_dec_and_del(n);
151 spin_unlock(&k->k_lock);
152 wait_for_completion(&n->n_removed);
153}
154
155EXPORT_SYMBOL_GPL(klist_remove);
156
157
158/**
159 * klist_node_attached - Say whether a node is bound to a list or not.
160 * @n: Node that we're testing.
161 */
162
163int klist_node_attached(struct klist_node * n)
164{
165 return (n->n_klist != NULL);
166}
167
168EXPORT_SYMBOL_GPL(klist_node_attached);
169
170
171/**
172 * klist_iter_init_node - Initialize a klist_iter structure.
173 * @k: klist we're iterating.
174 * @i: klist_iter we're filling.
175 * @n: node to start with.
176 *
177 * Similar to klist_iter_init(), but starts the action off with @n,
178 * instead of with the list head.
179 */
180
181void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
182{
183 i->i_klist = k;
184 i->i_head = &k->k_list;
185 i->i_cur = n;
186}
187
188EXPORT_SYMBOL_GPL(klist_iter_init_node);
189
190
191/**
192 * klist_iter_init - Iniitalize a klist_iter structure.
193 * @k: klist we're iterating.
194 * @i: klist_iter structure we're filling.
195 *
196 * Similar to klist_iter_init_node(), but start with the list head.
197 */
198
199void klist_iter_init(struct klist * k, struct klist_iter * i)
200{
201 klist_iter_init_node(k, i, NULL);
202}
203
204EXPORT_SYMBOL_GPL(klist_iter_init);
205
206
207/**
208 * klist_iter_exit - Finish a list iteration.
209 * @i: Iterator structure.
210 *
211 * Must be called when done iterating over list, as it decrements the
212 * refcount of the current node. Necessary in case iteration exited before
213 * the end of the list was reached, and always good form.
214 */
215
216void klist_iter_exit(struct klist_iter * i)
217{
218 if (i->i_cur) {
219 klist_del(i->i_cur);
220 i->i_cur = NULL;
221 }
222}
223
224EXPORT_SYMBOL_GPL(klist_iter_exit);
225
226
227static struct klist_node * to_klist_node(struct list_head * n)
228{
229 return container_of(n, struct klist_node, n_node);
230}
231
232
233/**
234 * klist_next - Ante up next node in list.
235 * @i: Iterator structure.
236 *
237 * First grab list lock. Decrement the reference count of the previous
238 * node, if there was one. Grab the next node, increment its reference
239 * count, drop the lock, and return that next node.
240 */
241
242struct klist_node * klist_next(struct klist_iter * i)
243{
244 struct list_head * next;
245 struct klist_node * knode = NULL;
246
247 spin_lock(&i->i_klist->k_lock);
248 if (i->i_cur) {
249 next = i->i_cur->n_node.next;
250 klist_dec_and_del(i->i_cur);
251 } else
252 next = i->i_head->next;
253
254 if (next != i->i_head) {
255 knode = to_klist_node(next);
256 kref_get(&knode->n_ref);
257 }
258 i->i_cur = knode;
259 spin_unlock(&i->i_klist->k_lock);
260 return knode;
261}
262
263EXPORT_SYMBOL_GPL(klist_next);
264
265
diff --git a/lib/kobject.c b/lib/kobject.c
index 94048826624c..dd0917dd9fa9 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(kobject_set_name);
279 * @new_name: object's new name 279 * @new_name: object's new name
280 */ 280 */
281 281
282int kobject_rename(struct kobject * kobj, char *new_name) 282int kobject_rename(struct kobject * kobj, const char *new_name)
283{ 283{
284 int error = 0; 284 int error = 0;
285 285
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2a4e7671eaf4..8e49d21057e4 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -197,7 +197,7 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
197 int i = 0; 197 int i = 0;
198 int retval; 198 int retval;
199 char *kobj_path = NULL; 199 char *kobj_path = NULL;
200 char *name = NULL; 200 const char *name = NULL;
201 char *action_string; 201 char *action_string;
202 u64 seq; 202 u64 seq;
203 struct kobject *top_kobj = kobj; 203 struct kobject *top_kobj = kobj;
@@ -246,10 +246,10 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
246 if (hotplug_ops->name) 246 if (hotplug_ops->name)
247 name = hotplug_ops->name(kset, kobj); 247 name = hotplug_ops->name(kset, kobj);
248 if (name == NULL) 248 if (name == NULL)
249 name = kset->kobj.name; 249 name = kobject_name(&kset->kobj);
250 250
251 argv [0] = hotplug_path; 251 argv [0] = hotplug_path;
252 argv [1] = name; 252 argv [1] = (char *)name; /* won't be changed but 'const' has to go */
253 argv [2] = NULL; 253 argv [2] = NULL;
254 254
255 /* minimal command environment */ 255 /* minimal command environment */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 04d664377f2c..10bed1c8c3c3 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -58,7 +58,7 @@ struct radix_tree_path {
58#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) 58#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
59#define RADIX_TREE_MAX_PATH (RADIX_TREE_INDEX_BITS/RADIX_TREE_MAP_SHIFT + 2) 59#define RADIX_TREE_MAX_PATH (RADIX_TREE_INDEX_BITS/RADIX_TREE_MAP_SHIFT + 2)
60 60
61static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH]; 61static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH] __read_mostly;
62 62
63/* 63/*
64 * Radix tree node cache. 64 * Radix tree node cache.
diff --git a/lib/sha1.c b/lib/sha1.c
index 2f7f1148dfde..1cdabe3065f9 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -41,7 +41,7 @@ void sha_transform(__u32 *digest, const char *in, __u32 *W)
41 __u32 a, b, c, d, e, t, i; 41 __u32 a, b, c, d, e, t, i;
42 42
43 for (i = 0; i < 16; i++) 43 for (i = 0; i < 16; i++)
44 W[i] = be32_to_cpu(((const __u32 *)in)[i]); 44 W[i] = be32_to_cpu(((const __be32 *)in)[i]);
45 45
46 for (i = 0; i < 64; i++) 46 for (i = 0; i < 64; i++)
47 W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); 47 W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
new file mode 100644
index 000000000000..42c08ef828c5
--- /dev/null
+++ b/lib/smp_processor_id.c
@@ -0,0 +1,55 @@
1/*
2 * lib/smp_processor_id.c
3 *
4 * DEBUG_PREEMPT variant of smp_processor_id().
5 */
6#include <linux/module.h>
7#include <linux/kallsyms.h>
8
9unsigned int debug_smp_processor_id(void)
10{
11 unsigned long preempt_count = preempt_count();
12 int this_cpu = raw_smp_processor_id();
13 cpumask_t this_mask;
14
15 if (likely(preempt_count))
16 goto out;
17
18 if (irqs_disabled())
19 goto out;
20
21 /*
22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id():
24 */
25 this_mask = cpumask_of_cpu(this_cpu);
26
27 if (cpus_equal(current->cpus_allowed, this_mask))
28 goto out;
29
30 /*
31 * It is valid to assume CPU-locality during early bootup:
32 */
33 if (system_state != SYSTEM_RUNNING)
34 goto out;
35
36 /*
37 * Avoid recursion:
38 */
39 preempt_disable();
40
41 if (!printk_ratelimit())
42 goto out_enable;
43
44 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
45 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
46 dump_stack();
47
48out_enable:
49 preempt_enable_no_resched();
50out:
51 return this_cpu;
52}
53
54EXPORT_SYMBOL(debug_smp_processor_id);
55
diff --git a/lib/textsearch.c b/lib/textsearch.c
new file mode 100644
index 000000000000..1e934c196f0f
--- /dev/null
+++ b/lib/textsearch.c
@@ -0,0 +1,317 @@
1/*
2 * lib/textsearch.c Generic text search interface
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 * Pablo Neira Ayuso <pablo@eurodev.net>
11 *
12 * ==========================================================================
13 *
14 * INTRODUCTION
15 *
16 * The textsearch infrastructure provides text searching facitilies for
17 * both linear and non-linear data. Individual search algorithms are
18 * implemented in modules and chosen by the user.
19 *
20 * ARCHITECTURE
21 *
22 * User
23 * +----------------+
24 * | finish()|<--------------(6)-----------------+
25 * |get_next_block()|<--------------(5)---------------+ |
26 * | | Algorithm | |
27 * | | +------------------------------+
28 * | | | init() find() destroy() |
29 * | | +------------------------------+
30 * | | Core API ^ ^ ^
31 * | | +---------------+ (2) (4) (8)
32 * | (1)|----->| prepare() |---+ | |
33 * | (3)|----->| find()/next() |-----------+ |
34 * | (7)|----->| destroy() |----------------------+
35 * +----------------+ +---------------+
36 *
37 * (1) User configures a search by calling _prepare() specifying the
38 * search parameters such as the pattern and algorithm name.
39 * (2) Core requests the algorithm to allocate and initialize a search
40 * configuration according to the specified parameters.
41 * (3) User starts the search(es) by calling _find() or _next() to
42 * fetch subsequent occurrences. A state variable is provided
43 * to the algorihtm to store persistant variables.
44 * (4) Core eventually resets the search offset and forwards the find()
45 * request to the algorithm.
46 * (5) Algorithm calls get_next_block() provided by the user continously
47 * to fetch the data to be searched in block by block.
48 * (6) Algorithm invokes finish() after the last call to get_next_block
49 * to clean up any leftovers from get_next_block. (Optional)
50 * (7) User destroys the configuration by calling _destroy().
51 * (8) Core notifies the algorithm to destroy algorithm specific
52 * allocations. (Optional)
53 *
54 * USAGE
55 *
56 * Before a search can be performed, a configuration must be created
57 * by calling textsearch_prepare() specyfing the searching algorithm and
58 * the pattern to look for. The returned configuration may then be used
59 * for an arbitary amount of times and even in parallel as long as a
60 * separate struct ts_state variable is provided to every instance.
61 *
62 * The actual search is performed by either calling textsearch_find_-
63 * continuous() for linear data or by providing an own get_next_block()
64 * implementation and calling textsearch_find(). Both functions return
65 * the position of the first occurrence of the patern or UINT_MAX if
66 * no match was found. Subsequent occurences can be found by calling
67 * textsearch_next() regardless of the linearity of the data.
68 *
69 * Once you're done using a configuration it must be given back via
70 * textsearch_destroy.
71 *
72 * EXAMPLE
73 *
74 * int pos;
75 * struct ts_config *conf;
76 * struct ts_state state;
77 * const char *pattern = "chicken";
78 * const char *example = "We dance the funky chicken";
79 *
80 * conf = textsearch_prepare("kmp", pattern, strlen(pattern),
81 * GFP_KERNEL, TS_AUTOLOAD);
82 * if (IS_ERR(conf)) {
83 * err = PTR_ERR(conf);
84 * goto errout;
85 * }
86 *
87 * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
88 * if (pos != UINT_MAX)
89 * panic("Oh my god, dancing chickens at %d\n", pos);
90 *
91 * textsearch_destroy(conf);
92 *
93 * ==========================================================================
94 */
95
96#include <linux/config.h>
97#include <linux/module.h>
98#include <linux/types.h>
99#include <linux/string.h>
100#include <linux/init.h>
101#include <linux/rcupdate.h>
102#include <linux/err.h>
103#include <linux/textsearch.h>
104
105static LIST_HEAD(ts_ops);
106static DEFINE_SPINLOCK(ts_mod_lock);
107
108static inline struct ts_ops *lookup_ts_algo(const char *name)
109{
110 struct ts_ops *o;
111
112 rcu_read_lock();
113 list_for_each_entry_rcu(o, &ts_ops, list) {
114 if (!strcmp(name, o->name)) {
115 if (!try_module_get(o->owner))
116 o = NULL;
117 rcu_read_unlock();
118 return o;
119 }
120 }
121 rcu_read_unlock();
122
123 return NULL;
124}
125
126/**
127 * textsearch_register - register a textsearch module
128 * @ops: operations lookup table
129 *
130 * This function must be called by textsearch modules to announce
131 * their presence. The specified &@ops must have %name set to a
132 * unique identifier and the callbacks find(), init(), get_pattern(),
133 * and get_pattern_len() must be implemented.
134 *
135 * Returns 0 or -EEXISTS if another module has already registered
136 * with same name.
137 */
138int textsearch_register(struct ts_ops *ops)
139{
140 int err = -EEXIST;
141 struct ts_ops *o;
142
143 if (ops->name == NULL || ops->find == NULL || ops->init == NULL ||
144 ops->get_pattern == NULL || ops->get_pattern_len == NULL)
145 return -EINVAL;
146
147 spin_lock(&ts_mod_lock);
148 list_for_each_entry(o, &ts_ops, list) {
149 if (!strcmp(ops->name, o->name))
150 goto errout;
151 }
152
153 list_add_tail_rcu(&ops->list, &ts_ops);
154 err = 0;
155errout:
156 spin_unlock(&ts_mod_lock);
157 return err;
158}
159
160/**
161 * textsearch_unregister - unregister a textsearch module
162 * @ops: operations lookup table
163 *
164 * This function must be called by textsearch modules to announce
165 * their disappearance for examples when the module gets unloaded.
166 * The &ops parameter must be the same as the one during the
167 * registration.
168 *
169 * Returns 0 on success or -ENOENT if no matching textsearch
170 * registration was found.
171 */
172int textsearch_unregister(struct ts_ops *ops)
173{
174 int err = 0;
175 struct ts_ops *o;
176
177 spin_lock(&ts_mod_lock);
178 list_for_each_entry(o, &ts_ops, list) {
179 if (o == ops) {
180 list_del_rcu(&o->list);
181 goto out;
182 }
183 }
184
185 err = -ENOENT;
186out:
187 spin_unlock(&ts_mod_lock);
188 return err;
189}
190
191struct ts_linear_state
192{
193 unsigned int len;
194 const void *data;
195};
196
197static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
198 struct ts_config *conf,
199 struct ts_state *state)
200{
201 struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
202
203 if (likely(consumed < st->len)) {
204 *dst = st->data + consumed;
205 return st->len - consumed;
206 }
207
208 return 0;
209}
210
211/**
212 * textsearch_find_continuous - search a pattern in continuous/linear data
213 * @conf: search configuration
214 * @state: search state
215 * @data: data to search in
216 * @len: length of data
217 *
218 * A simplified version of textsearch_find() for continuous/linear data.
219 * Call textsearch_next() to retrieve subsequent matches.
220 *
221 * Returns the position of first occurrence of the pattern or
222 * UINT_MAX if no occurrence was found.
223 */
224unsigned int textsearch_find_continuous(struct ts_config *conf,
225 struct ts_state *state,
226 const void *data, unsigned int len)
227{
228 struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
229
230 conf->get_next_block = get_linear_data;
231 st->data = data;
232 st->len = len;
233
234 return textsearch_find(conf, state);
235}
236
237/**
238 * textsearch_prepare - Prepare a search
239 * @algo: name of search algorithm
240 * @pattern: pattern data
241 * @len: length of pattern
242 * @gfp_mask: allocation mask
243 * @flags: search flags
244 *
245 * Looks up the search algorithm module and creates a new textsearch
246 * configuration for the specified pattern. Upon completion all
247 * necessary refcnts are held and the configuration must be put back
248 * using textsearch_put() after usage.
249 *
250 * Note: The format of the pattern may not be compatible between
251 * the various search algorithms.
252 *
253 * Returns a new textsearch configuration according to the specified
254 * parameters or a ERR_PTR().
255 */
256struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
257 unsigned int len, int gfp_mask, int flags)
258{
259 int err = -ENOENT;
260 struct ts_config *conf;
261 struct ts_ops *ops;
262
263 ops = lookup_ts_algo(algo);
264#ifdef CONFIG_KMOD
265 /*
266 * Why not always autoload you may ask. Some users are
267 * in a situation where requesting a module may deadlock,
268 * especially when the module is located on a NFS mount.
269 */
270 if (ops == NULL && flags & TS_AUTOLOAD) {
271 request_module("ts_%s", algo);
272 ops = lookup_ts_algo(algo);
273 }
274#endif
275
276 if (ops == NULL)
277 goto errout;
278
279 conf = ops->init(pattern, len, gfp_mask);
280 if (IS_ERR(conf)) {
281 err = PTR_ERR(conf);
282 goto errout;
283 }
284
285 conf->ops = ops;
286 return conf;
287
288errout:
289 if (ops)
290 module_put(ops->owner);
291
292 return ERR_PTR(err);
293}
294
295/**
296 * textsearch_destroy - destroy a search configuration
297 * @conf: search configuration
298 *
299 * Releases all references of the configuration and frees
300 * up the memory.
301 */
302void textsearch_destroy(struct ts_config *conf)
303{
304 if (conf->ops) {
305 if (conf->ops->destroy)
306 conf->ops->destroy(conf);
307 module_put(conf->ops->owner);
308 }
309
310 kfree(conf);
311}
312
313EXPORT_SYMBOL(textsearch_register);
314EXPORT_SYMBOL(textsearch_unregister);
315EXPORT_SYMBOL(textsearch_prepare);
316EXPORT_SYMBOL(textsearch_find_continuous);
317EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
new file mode 100644
index 000000000000..d27c0a072940
--- /dev/null
+++ b/lib/ts_fsm.c
@@ -0,0 +1,338 @@
1/*
2 * lib/ts_fsm.c A naive finite state machine text search approach
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 *
11 * ==========================================================================
12 *
13 * A finite state machine consists of n states (struct ts_fsm_token)
14 * representing the pattern as a finite automation. The data is read
15 * sequentially on a octet basis. Every state token specifies the number
16 * of recurrences and the type of value accepted which can be either a
17 * specific character or ctype based set of characters. The available
18 * type of recurrences include 1, (0|1), [0 n], and [1 n].
19 *
20 * The algorithm differs between strict/non-strict mode specyfing
21 * whether the pattern has to start at the first octect. Strict mode
22 * is enabled by default and can be disabled by inserting
23 * TS_FSM_HEAD_IGNORE as the first token in the chain.
24 *
25 * The runtime performance of the algorithm should be around O(n),
26 * however while in strict mode the average runtime can be better.
27 */
28
29#include <linux/config.h>
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/ctype.h>
34#include <linux/textsearch.h>
35#include <linux/textsearch_fsm.h>
36
37struct ts_fsm
38{
39 unsigned int ntokens;
40 struct ts_fsm_token tokens[0];
41};
42
43/* other values derived from ctype.h */
44#define _A 0x100 /* ascii */
45#define _W 0x200 /* wildcard */
46
47/* Map to _ctype flags and some magic numbers */
48static u16 token_map[TS_FSM_TYPE_MAX+1] = {
49 [TS_FSM_SPECIFIC] = 0,
50 [TS_FSM_WILDCARD] = _W,
51 [TS_FSM_CNTRL] = _C,
52 [TS_FSM_LOWER] = _L,
53 [TS_FSM_UPPER] = _U,
54 [TS_FSM_PUNCT] = _P,
55 [TS_FSM_SPACE] = _S,
56 [TS_FSM_DIGIT] = _D,
57 [TS_FSM_XDIGIT] = _D | _X,
58 [TS_FSM_ALPHA] = _U | _L,
59 [TS_FSM_ALNUM] = _U | _L | _D,
60 [TS_FSM_PRINT] = _P | _U | _L | _D | _SP,
61 [TS_FSM_GRAPH] = _P | _U | _L | _D,
62 [TS_FSM_ASCII] = _A,
63};
64
65static u16 token_lookup_tbl[256] = {
66_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
67_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
68_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */
69_W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */
70_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */
71_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */
72_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */
73_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */
74_W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */
75_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */
76_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */
77_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */
78_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */
79_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */
80_W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */
81_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */
82_W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */
83_W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */
84_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */
85_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */
86_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */
87_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */
88_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */
89_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */
90_W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */
91_W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */
92_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */
93_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */
94_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */
95_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */
96_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */
97_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */
98_W, _W, _W, _W, /* 128-131 */
99_W, _W, _W, _W, /* 132-135 */
100_W, _W, _W, _W, /* 136-139 */
101_W, _W, _W, _W, /* 140-143 */
102_W, _W, _W, _W, /* 144-147 */
103_W, _W, _W, _W, /* 148-151 */
104_W, _W, _W, _W, /* 152-155 */
105_W, _W, _W, _W, /* 156-159 */
106_W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */
107_W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */
108_W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */
109_W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */
110_W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */
111_W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */
112_W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */
113_W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */
114_W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */
115_W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */
116_W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */
117_W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */
118_W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */
119_W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */
120_W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */
121_W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */
122_W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */
123_W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */
124_W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */
125_W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */
126_W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */
127_W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */
128_W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */
129_W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */
130
131static inline int match_token(struct ts_fsm_token *t, u8 d)
132{
133 if (t->type)
134 return (token_lookup_tbl[d] & t->type) != 0;
135 else
136 return t->value == d;
137}
138
139static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state)
140{
141 struct ts_fsm *fsm = ts_config_priv(conf);
142 struct ts_fsm_token *cur = NULL, *next;
143 unsigned int match_start, block_idx = 0, tok_idx;
144 unsigned block_len = 0, strict, consumed = state->offset;
145 const u8 *data;
146
147#define GET_NEXT_BLOCK() \
148({ consumed += block_idx; \
149 block_idx = 0; \
150 block_len = conf->get_next_block(consumed, &data, conf, state); })
151
152#define TOKEN_MISMATCH() \
153 do { \
154 if (strict) \
155 goto no_match; \
156 block_idx++; \
157 goto startover; \
158 } while(0)
159
160#define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK())
161
162 if (end_of_data())
163 goto no_match;
164
165 strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE;
166
167startover:
168 match_start = consumed + block_idx;
169
170 for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) {
171 cur = &fsm->tokens[tok_idx];
172
173 if (likely(tok_idx < (fsm->ntokens - 1)))
174 next = &fsm->tokens[tok_idx + 1];
175 else
176 next = NULL;
177
178 switch (cur->recur) {
179 case TS_FSM_SINGLE:
180 if (end_of_data())
181 goto no_match;
182
183 if (!match_token(cur, data[block_idx]))
184 TOKEN_MISMATCH();
185 break;
186
187 case TS_FSM_PERHAPS:
188 if (end_of_data() ||
189 !match_token(cur, data[block_idx]))
190 continue;
191 break;
192
193 case TS_FSM_MULTI:
194 if (end_of_data())
195 goto no_match;
196
197 if (!match_token(cur, data[block_idx]))
198 TOKEN_MISMATCH();
199
200 block_idx++;
201 /* fall through */
202
203 case TS_FSM_ANY:
204 if (next == NULL)
205 goto found_match;
206
207 if (end_of_data())
208 continue;
209
210 while (!match_token(next, data[block_idx])) {
211 if (!match_token(cur, data[block_idx]))
212 TOKEN_MISMATCH();
213 block_idx++;
214 if (end_of_data())
215 goto no_match;
216 }
217 continue;
218
219 /*
220 * Optimization: Prefer small local loop over jumping
221 * back and forth until garbage at head is munched.
222 */
223 case TS_FSM_HEAD_IGNORE:
224 if (end_of_data())
225 continue;
226
227 while (!match_token(next, data[block_idx])) {
228 /*
229 * Special case, don't start over upon
230 * a mismatch, give the user the
231 * chance to specify the type of data
232 * allowed to be ignored.
233 */
234 if (!match_token(cur, data[block_idx]))
235 goto no_match;
236
237 block_idx++;
238 if (end_of_data())
239 goto no_match;
240 }
241
242 match_start = consumed + block_idx;
243 continue;
244 }
245
246 block_idx++;
247 }
248
249 if (end_of_data())
250 goto found_match;
251
252no_match:
253 return UINT_MAX;
254
255found_match:
256 state->offset = consumed + block_idx;
257 return match_start;
258}
259
260static struct ts_config *fsm_init(const void *pattern, unsigned int len,
261 int gfp_mask)
262{
263 int i, err = -EINVAL;
264 struct ts_config *conf;
265 struct ts_fsm *fsm;
266 struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern;
267 unsigned int ntokens = len / sizeof(*tokens);
268 size_t priv_size = sizeof(*fsm) + len;
269
270 if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
271 goto errout;
272
273 for (i = 0; i < ntokens; i++) {
274 struct ts_fsm_token *t = &tokens[i];
275
276 if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX)
277 goto errout;
278
279 if (t->recur == TS_FSM_HEAD_IGNORE &&
280 (i != 0 || i == (ntokens - 1)))
281 goto errout;
282 }
283
284 conf = alloc_ts_config(priv_size, gfp_mask);
285 if (IS_ERR(conf))
286 return conf;
287
288 fsm = ts_config_priv(conf);
289 fsm->ntokens = ntokens;
290 memcpy(fsm->tokens, pattern, len);
291
292 for (i = 0; i < fsm->ntokens; i++) {
293 struct ts_fsm_token *t = &fsm->tokens[i];
294 t->type = token_map[t->type];
295 }
296
297 return conf;
298
299errout:
300 return ERR_PTR(err);
301}
302
303static void *fsm_get_pattern(struct ts_config *conf)
304{
305 struct ts_fsm *fsm = ts_config_priv(conf);
306 return fsm->tokens;
307}
308
309static unsigned int fsm_get_pattern_len(struct ts_config *conf)
310{
311 struct ts_fsm *fsm = ts_config_priv(conf);
312 return fsm->ntokens * sizeof(struct ts_fsm_token);
313}
314
315static struct ts_ops fsm_ops = {
316 .name = "fsm",
317 .find = fsm_find,
318 .init = fsm_init,
319 .get_pattern = fsm_get_pattern,
320 .get_pattern_len = fsm_get_pattern_len,
321 .owner = THIS_MODULE,
322 .list = LIST_HEAD_INIT(fsm_ops.list)
323};
324
325static int __init init_fsm(void)
326{
327 return textsearch_register(&fsm_ops);
328}
329
330static void __exit exit_fsm(void)
331{
332 textsearch_unregister(&fsm_ops);
333}
334
335MODULE_LICENSE("GPL");
336
337module_init(init_fsm);
338module_exit(exit_fsm);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
new file mode 100644
index 000000000000..73266b975585
--- /dev/null
+++ b/lib/ts_kmp.c
@@ -0,0 +1,145 @@
1/*
2 * lib/ts_kmp.c Knuth-Morris-Pratt text search implementation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 *
11 * ==========================================================================
12 *
13 * Implements a linear-time string-matching algorithm due to Knuth,
14 * Morris, and Pratt [1]. Their algorithm avoids the explicit
15 * computation of the transition function DELTA altogether. Its
16 * matching time is O(n), for n being length(text), using just an
17 * auxiliary function PI[1..m], for m being length(pattern),
18 * precomputed from the pattern in time O(m). The array PI allows
19 * the transition function DELTA to be computed efficiently
20 * "on the fly" as needed. Roughly speaking, for any state
21 * "q" = 0,1,...,m and any character "a" in SIGMA, the value
22 * PI["q"] contains the information that is independent of "a" and
23 * is needed to compute DELTA("q", "a") [2]. Since the array PI
24 * has only m entries, whereas DELTA has O(m|SIGMA|) entries, we
25 * save a factor of |SIGMA| in the preprocessing time by computing
26 * PI rather than DELTA.
27 *
28 * [1] Cormen, Leiserson, Rivest, Stein
29 * Introdcution to Algorithms, 2nd Edition, MIT Press
30 * [2] See finite automation theory
31 */
32
33#include <linux/config.h>
34#include <linux/module.h>
35#include <linux/types.h>
36#include <linux/string.h>
37#include <linux/textsearch.h>
38
39struct ts_kmp
40{
41 u8 * pattern;
42 unsigned int pattern_len;
43 unsigned int prefix_tbl[0];
44};
45
46static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
47{
48 struct ts_kmp *kmp = ts_config_priv(conf);
49 unsigned int i, q = 0, text_len, consumed = state->offset;
50 const u8 *text;
51
52 for (;;) {
53 text_len = conf->get_next_block(consumed, &text, conf, state);
54
55 if (unlikely(text_len == 0))
56 break;
57
58 for (i = 0; i < text_len; i++) {
59 while (q > 0 && kmp->pattern[q] != text[i])
60 q = kmp->prefix_tbl[q - 1];
61 if (kmp->pattern[q] == text[i])
62 q++;
63 if (unlikely(q == kmp->pattern_len)) {
64 state->offset = consumed + i + 1;
65 return state->offset - kmp->pattern_len;
66 }
67 }
68
69 consumed += text_len;
70 }
71
72 return UINT_MAX;
73}
74
75static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
76 unsigned int *prefix_tbl)
77{
78 unsigned int k, q;
79
80 for (k = 0, q = 1; q < len; q++) {
81 while (k > 0 && pattern[k] != pattern[q])
82 k = prefix_tbl[k-1];
83 if (pattern[k] == pattern[q])
84 k++;
85 prefix_tbl[q] = k;
86 }
87}
88
89static struct ts_config *kmp_init(const void *pattern, unsigned int len,
90 int gfp_mask)
91{
92 struct ts_config *conf;
93 struct ts_kmp *kmp;
94 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
95 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
96
97 conf = alloc_ts_config(priv_size, gfp_mask);
98 if (IS_ERR(conf))
99 return conf;
100
101 kmp = ts_config_priv(conf);
102 kmp->pattern_len = len;
103 compute_prefix_tbl(pattern, len, kmp->prefix_tbl);
104 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
105 memcpy(kmp->pattern, pattern, len);
106
107 return conf;
108}
109
110static void *kmp_get_pattern(struct ts_config *conf)
111{
112 struct ts_kmp *kmp = ts_config_priv(conf);
113 return kmp->pattern;
114}
115
116static unsigned int kmp_get_pattern_len(struct ts_config *conf)
117{
118 struct ts_kmp *kmp = ts_config_priv(conf);
119 return kmp->pattern_len;
120}
121
122static struct ts_ops kmp_ops = {
123 .name = "kmp",
124 .find = kmp_find,
125 .init = kmp_init,
126 .get_pattern = kmp_get_pattern,
127 .get_pattern_len = kmp_get_pattern_len,
128 .owner = THIS_MODULE,
129 .list = LIST_HEAD_INIT(kmp_ops.list)
130};
131
132static int __init init_kmp(void)
133{
134 return textsearch_register(&kmp_ops);
135}
136
137static void __exit exit_kmp(void)
138{
139 textsearch_unregister(&kmp_ops);
140}
141
142MODULE_LICENSE("GPL");
143
144module_init(init_kmp);
145module_exit(exit_kmp);