aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug23
-rw-r--r--lib/Makefile4
-rw-r--r--lib/cpumask.c16
-rw-r--r--lib/errno.c7
-rw-r--r--lib/genalloc.c63
-rw-r--r--lib/ioremap.c92
-rw-r--r--lib/list_debug.c76
-rw-r--r--lib/rbtree.c6
-rw-r--r--lib/rwsem.c2
-rw-r--r--lib/sort.c10
-rw-r--r--lib/spinlock_debug.c15
-rw-r--r--lib/ts_fsm.c10
12 files changed, 284 insertions, 40 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b0f5ca72599f..756a908c441d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -320,6 +320,15 @@ config DEBUG_VM
320 320
321 If unsure, say N. 321 If unsure, say N.
322 322
323config DEBUG_LIST
324 bool "Debug linked list manipulation"
325 depends on DEBUG_KERNEL
326 help
327 Enable this to turn on extended checks in the linked-list
328 walking routines.
329
330 If unsure, say N.
331
323config FRAME_POINTER 332config FRAME_POINTER
324 bool "Compile the kernel with frame pointers" 333 bool "Compile the kernel with frame pointers"
325 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH) 334 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH)
@@ -375,3 +384,17 @@ config RCU_TORTURE_TEST
375 at boot time (you probably don't). 384 at boot time (you probably don't).
376 Say M if you want the RCU torture tests to build as a module. 385 Say M if you want the RCU torture tests to build as a module.
377 Say N if you are unsure. 386 Say N if you are unsure.
387
388config LKDTM
389 tristate "Linux Kernel Dump Test Tool Module"
390 depends on KPROBES
391 default n
392 help
393 This module enables testing of the different dumping mechanisms by
394 inducing system failures at predefined crash points.
395 If you don't need it: say N
396 Choose M here to compile this code as a module. The module will be
397 called lkdtm.
398
399 Documentation on how to use the module can be found in
400 drivers/misc/lkdtm.c
diff --git a/lib/Makefile b/lib/Makefile
index ef1d37afbbb6..b0361756e22e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,11 +2,12 @@
2# Makefile for some libs needed in the kernel. 2# Makefile for some libs needed in the kernel.
3# 3#
4 4
5lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o 8 sha1.o
9 9
10lib-$(CONFIG_MMU) += ioremap.o
10lib-$(CONFIG_SMP) += cpumask.o 11lib-$(CONFIG_SMP) += cpumask.o
11 12
12lib-y += kobject.o kref.o kobject_uevent.o klist.o 13lib-y += kobject.o kref.o kobject_uevent.o klist.o
@@ -28,6 +29,7 @@ lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
28obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 29obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
29obj-$(CONFIG_PLIST) += plist.o 30obj-$(CONFIG_PLIST) += plist.o
30obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 31obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
32obj-$(CONFIG_DEBUG_LIST) += list_debug.o
31 33
32ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 34ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
33 lib-y += dec_and_lock.o 35 lib-y += dec_and_lock.o
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 3a67dc5ada7d..7a2a73f88d59 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -43,3 +43,19 @@ int __any_online_cpu(const cpumask_t *mask)
43 return cpu; 43 return cpu;
44} 44}
45EXPORT_SYMBOL(__any_online_cpu); 45EXPORT_SYMBOL(__any_online_cpu);
46
47#if MAX_NUMNODES > 1
48/*
49 * Find the highest possible node id.
50 */
51int highest_possible_node_id(void)
52{
53 unsigned int node;
54 unsigned int highest = 0;
55
56 for_each_node_mask(node, node_possible_map)
57 highest = node;
58 return highest;
59}
60EXPORT_SYMBOL(highest_possible_node_id);
61#endif
diff --git a/lib/errno.c b/lib/errno.c
deleted file mode 100644
index 41cb9d76c052..000000000000
--- a/lib/errno.c
+++ /dev/null
@@ -1,7 +0,0 @@
1/*
2 * linux/lib/errno.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7int errno;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 71338b48e889..75ae68ce03e1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -14,11 +14,13 @@
14#include <linux/genalloc.h> 14#include <linux/genalloc.h>
15 15
16 16
17/* 17/**
18 * Create a new special memory pool. 18 * gen_pool_create - create a new special memory pool
19 *
20 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 19 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
21 * @nid: node id of the node the pool structure should be allocated on, or -1 20 * @nid: node id of the node the pool structure should be allocated on, or -1
21 *
22 * Create a new special memory pool that can be used to manage special purpose
23 * memory not managed by the regular kmalloc/kfree interface.
22 */ 24 */
23struct gen_pool *gen_pool_create(int min_alloc_order, int nid) 25struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
24{ 26{
@@ -34,15 +36,15 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
34} 36}
35EXPORT_SYMBOL(gen_pool_create); 37EXPORT_SYMBOL(gen_pool_create);
36 38
37 39/**
38/* 40 * gen_pool_add - add a new chunk of special memory to the pool
39 * Add a new chunk of memory to the specified pool.
40 *
41 * @pool: pool to add new memory chunk to 41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool 42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool 43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be 44 * @nid: node id of the node the chunk structure and bitmap should be
45 * allocated on, or -1 45 * allocated on, or -1
46 *
47 * Add a new chunk of special memory to the specified pool.
46 */ 48 */
47int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, 49int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
48 int nid) 50 int nid)
@@ -69,13 +71,44 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
69} 71}
70EXPORT_SYMBOL(gen_pool_add); 72EXPORT_SYMBOL(gen_pool_add);
71 73
72 74/**
73/* 75 * gen_pool_destroy - destroy a special memory pool
74 * Allocate the requested number of bytes from the specified pool. 76 * @pool: pool to destroy
75 * Uses a first-fit algorithm.
76 * 77 *
78 * Destroy the specified special memory pool. Verifies that there are no
79 * outstanding allocations.
80 */
81void gen_pool_destroy(struct gen_pool *pool)
82{
83 struct list_head *_chunk, *_next_chunk;
84 struct gen_pool_chunk *chunk;
85 int order = pool->min_alloc_order;
86 int bit, end_bit;
87
88
89 write_lock(&pool->lock);
90 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
91 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
92 list_del(&chunk->next_chunk);
93
94 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
95 bit = find_next_bit(chunk->bits, end_bit, 0);
96 BUG_ON(bit < end_bit);
97
98 kfree(chunk);
99 }
100 kfree(pool);
101 return;
102}
103EXPORT_SYMBOL(gen_pool_destroy);
104
105/**
106 * gen_pool_alloc - allocate special memory from the pool
77 * @pool: pool to allocate from 107 * @pool: pool to allocate from
78 * @size: number of bytes to allocate from the pool 108 * @size: number of bytes to allocate from the pool
109 *
110 * Allocate the requested number of bytes from the specified pool.
111 * Uses a first-fit algorithm.
79 */ 112 */
80unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) 113unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
81{ 114{
@@ -127,13 +160,13 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
127} 160}
128EXPORT_SYMBOL(gen_pool_alloc); 161EXPORT_SYMBOL(gen_pool_alloc);
129 162
130 163/**
131/* 164 * gen_pool_free - free allocated special memory back to the pool
132 * Free the specified memory back to the specified pool.
133 *
134 * @pool: pool to free to 165 * @pool: pool to free to
135 * @addr: starting address of memory to free back to pool 166 * @addr: starting address of memory to free back to pool
136 * @size: size in bytes of memory to free 167 * @size: size in bytes of memory to free
168 *
169 * Free previously allocated special memory back to the specified pool.
137 */ 170 */
138void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) 171void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
139{ 172{
diff --git a/lib/ioremap.c b/lib/ioremap.c
new file mode 100644
index 000000000000..99fa277f9f7b
--- /dev/null
+++ b/lib/ioremap.c
@@ -0,0 +1,92 @@
1/*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8#include <linux/io.h>
9#include <linux/vmalloc.h>
10#include <linux/mm.h>
11
12#include <asm/cacheflush.h>
13#include <asm/pgtable.h>
14
15static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
16 unsigned long end, unsigned long phys_addr, pgprot_t prot)
17{
18 pte_t *pte;
19 unsigned long pfn;
20
21 pfn = phys_addr >> PAGE_SHIFT;
22 pte = pte_alloc_kernel(pmd, addr);
23 if (!pte)
24 return -ENOMEM;
25 do {
26 BUG_ON(!pte_none(*pte));
27 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
28 pfn++;
29 } while (pte++, addr += PAGE_SIZE, addr != end);
30 return 0;
31}
32
33static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
34 unsigned long end, unsigned long phys_addr, pgprot_t prot)
35{
36 pmd_t *pmd;
37 unsigned long next;
38
39 phys_addr -= addr;
40 pmd = pmd_alloc(&init_mm, pud, addr);
41 if (!pmd)
42 return -ENOMEM;
43 do {
44 next = pmd_addr_end(addr, end);
45 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
46 return -ENOMEM;
47 } while (pmd++, addr = next, addr != end);
48 return 0;
49}
50
51static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
52 unsigned long end, unsigned long phys_addr, pgprot_t prot)
53{
54 pud_t *pud;
55 unsigned long next;
56
57 phys_addr -= addr;
58 pud = pud_alloc(&init_mm, pgd, addr);
59 if (!pud)
60 return -ENOMEM;
61 do {
62 next = pud_addr_end(addr, end);
63 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
64 return -ENOMEM;
65 } while (pud++, addr = next, addr != end);
66 return 0;
67}
68
69int ioremap_page_range(unsigned long addr,
70 unsigned long end, unsigned long phys_addr, pgprot_t prot)
71{
72 pgd_t *pgd;
73 unsigned long start;
74 unsigned long next;
75 int err;
76
77 BUG_ON(addr >= end);
78
79 start = addr;
80 phys_addr -= addr;
81 pgd = pgd_offset_k(addr);
82 do {
83 next = pgd_addr_end(addr, end);
84 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
85 if (err)
86 break;
87 } while (pgd++, addr = next, addr != end);
88
89 flush_cache_vmap(start, end);
90
91 return err;
92}
diff --git a/lib/list_debug.c b/lib/list_debug.c
new file mode 100644
index 000000000000..7ba9d823d388
--- /dev/null
+++ b/lib/list_debug.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2006, Red Hat, Inc., Dave Jones
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the linked list implementations for
6 * DEBUG_LIST.
7 */
8
9#include <linux/module.h>
10#include <linux/list.h>
11
12/*
13 * Insert a new entry between two known consecutive entries.
14 *
15 * This is only for internal list manipulation where we know
16 * the prev/next entries already!
17 */
18
19void __list_add(struct list_head *new,
20 struct list_head *prev,
21 struct list_head *next)
22{
23 if (unlikely(next->prev != prev)) {
24 printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n",
25 prev, next->prev);
26 BUG();
27 }
28 if (unlikely(prev->next != next)) {
29 printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n",
30 next, prev->next);
31 BUG();
32 }
33 next->prev = new;
34 new->next = next;
35 new->prev = prev;
36 prev->next = new;
37}
38EXPORT_SYMBOL(__list_add);
39
40/**
41 * list_add - add a new entry
42 * @new: new entry to be added
43 * @head: list head to add it after
44 *
45 * Insert a new entry after the specified head.
46 * This is good for implementing stacks.
47 */
48void list_add(struct list_head *new, struct list_head *head)
49{
50 __list_add(new, head, head->next);
51}
52EXPORT_SYMBOL(list_add);
53
54/**
55 * list_del - deletes entry from list.
56 * @entry: the element to delete from the list.
57 * Note: list_empty on entry does not return true after this, the entry is
58 * in an undefined state.
59 */
60void list_del(struct list_head *entry)
61{
62 if (unlikely(entry->prev->next != entry)) {
63 printk(KERN_ERR "list_del corruption. prev->next should be %p, "
64 "but was %p\n", entry, entry->prev->next);
65 BUG();
66 }
67 if (unlikely(entry->next->prev != entry)) {
68 printk(KERN_ERR "list_del corruption. next->prev should be %p, "
69 "but was %p\n", entry, entry->next->prev);
70 BUG();
71 }
72 __list_del(entry->prev, entry->next);
73 entry->next = LIST_POISON1;
74 entry->prev = LIST_POISON2;
75}
76EXPORT_SYMBOL(list_del);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1e55ba1c2edf..48499c2d88cc 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -322,6 +322,9 @@ struct rb_node *rb_next(struct rb_node *node)
322{ 322{
323 struct rb_node *parent; 323 struct rb_node *parent;
324 324
325 if (rb_parent(node) == node)
326 return NULL;
327
325 /* If we have a right-hand child, go down and then left as far 328 /* If we have a right-hand child, go down and then left as far
326 as we can. */ 329 as we can. */
327 if (node->rb_right) { 330 if (node->rb_right) {
@@ -348,6 +351,9 @@ struct rb_node *rb_prev(struct rb_node *node)
348{ 351{
349 struct rb_node *parent; 352 struct rb_node *parent;
350 353
354 if (rb_parent(node) == node)
355 return NULL;
356
351 /* If we have a left-hand child, go down and then right as far 357 /* If we have a left-hand child, go down and then right as far
352 as we can. */ 358 as we can. */
353 if (node->rb_left) { 359 if (node->rb_left) {
diff --git a/lib/rwsem.c b/lib/rwsem.c
index b322421c2969..901d0e7da892 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -146,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
146/* 146/*
147 * wait for a lock to be granted 147 * wait for a lock to be granted
148 */ 148 */
149static inline struct rw_semaphore * 149static struct rw_semaphore *
150rwsem_down_failed_common(struct rw_semaphore *sem, 150rwsem_down_failed_common(struct rw_semaphore *sem,
151 struct rwsem_waiter *waiter, signed long adjustment) 151 struct rwsem_waiter *waiter, signed long adjustment)
152{ 152{
diff --git a/lib/sort.c b/lib/sort.c
index 5f3b51ffa1dc..488788b341cb 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -49,15 +49,15 @@ void sort(void *base, size_t num, size_t size,
49 void (*swap)(void *, void *, int size)) 49 void (*swap)(void *, void *, int size))
50{ 50{
51 /* pre-scale counters for performance */ 51 /* pre-scale counters for performance */
52 int i = (num/2) * size, n = num * size, c, r; 52 int i = (num/2 - 1) * size, n = num * size, c, r;
53 53
54 if (!swap) 54 if (!swap)
55 swap = (size == 4 ? u32_swap : generic_swap); 55 swap = (size == 4 ? u32_swap : generic_swap);
56 56
57 /* heapify */ 57 /* heapify */
58 for ( ; i >= 0; i -= size) { 58 for ( ; i >= 0; i -= size) {
59 for (r = i; r * 2 < n; r = c) { 59 for (r = i; r * 2 + size < n; r = c) {
60 c = r * 2; 60 c = r * 2 + size;
61 if (c < n - size && cmp(base + c, base + c + size) < 0) 61 if (c < n - size && cmp(base + c, base + c + size) < 0)
62 c += size; 62 c += size;
63 if (cmp(base + r, base + c) >= 0) 63 if (cmp(base + r, base + c) >= 0)
@@ -69,8 +69,8 @@ void sort(void *base, size_t num, size_t size,
69 /* sort */ 69 /* sort */
70 for (i = n - size; i >= 0; i -= size) { 70 for (i = n - size; i >= 0; i -= size) {
71 swap(base, base + i, size); 71 swap(base, base + i, size);
72 for (r = 0; r * 2 < i; r = c) { 72 for (r = 0; r * 2 + size < i; r = c) {
73 c = r * 2; 73 c = r * 2 + size;
74 if (c < i - size && cmp(base + c, base + c + size) < 0) 74 if (c < i - size && cmp(base + c, base + c + size) < 0)
75 c += size; 75 c += size;
76 if (cmp(base + r, base + c) >= 0) 76 if (cmp(base + r, base + c) >= 0)
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 58c577dd82e5..dafaf1de2491 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -99,11 +99,12 @@ static inline void debug_spin_unlock(spinlock_t *lock)
99 99
100static void __spin_lock_debug(spinlock_t *lock) 100static void __spin_lock_debug(spinlock_t *lock)
101{ 101{
102 int print_once = 1;
103 u64 i; 102 u64 i;
103 u64 loops = loops_per_jiffy * HZ;
104 int print_once = 1;
104 105
105 for (;;) { 106 for (;;) {
106 for (i = 0; i < loops_per_jiffy * HZ; i++) { 107 for (i = 0; i < loops; i++) {
107 if (__raw_spin_trylock(&lock->raw_lock)) 108 if (__raw_spin_trylock(&lock->raw_lock))
108 return; 109 return;
109 __delay(1); 110 __delay(1);
@@ -165,11 +166,12 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
165#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ 166#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
166static void __read_lock_debug(rwlock_t *lock) 167static void __read_lock_debug(rwlock_t *lock)
167{ 168{
168 int print_once = 1;
169 u64 i; 169 u64 i;
170 u64 loops = loops_per_jiffy * HZ;
171 int print_once = 1;
170 172
171 for (;;) { 173 for (;;) {
172 for (i = 0; i < loops_per_jiffy * HZ; i++) { 174 for (i = 0; i < loops; i++) {
173 if (__raw_read_trylock(&lock->raw_lock)) 175 if (__raw_read_trylock(&lock->raw_lock))
174 return; 176 return;
175 __delay(1); 177 __delay(1);
@@ -239,11 +241,12 @@ static inline void debug_write_unlock(rwlock_t *lock)
239#if 0 /* This can cause lockups */ 241#if 0 /* This can cause lockups */
240static void __write_lock_debug(rwlock_t *lock) 242static void __write_lock_debug(rwlock_t *lock)
241{ 243{
242 int print_once = 1;
243 u64 i; 244 u64 i;
245 u64 loops = loops_per_jiffy * HZ;
246 int print_once = 1;
244 247
245 for (;;) { 248 for (;;) {
246 for (i = 0; i < loops_per_jiffy * HZ; i++) { 249 for (i = 0; i < loops; i++) {
247 if (__raw_write_trylock(&lock->raw_lock)) 250 if (__raw_write_trylock(&lock->raw_lock))
248 return; 251 return;
249 __delay(1); 252 __delay(1);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 87847c2ae9e2..af575b61526b 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -12,13 +12,13 @@
12 * 12 *
13 * A finite state machine consists of n states (struct ts_fsm_token) 13 * A finite state machine consists of n states (struct ts_fsm_token)
14 * representing the pattern as a finite automation. The data is read 14 * representing the pattern as a finite automation. The data is read
15 * sequentially on a octet basis. Every state token specifies the number 15 * sequentially on an octet basis. Every state token specifies the number
16 * of recurrences and the type of value accepted which can be either a 16 * of recurrences and the type of value accepted which can be either a
17 * specific character or ctype based set of characters. The available 17 * specific character or ctype based set of characters. The available
18 * type of recurrences include 1, (0|1), [0 n], and [1 n]. 18 * type of recurrences include 1, (0|1), [0 n], and [1 n].
19 * 19 *
20 * The algorithm differs between strict/non-strict mode specyfing 20 * The algorithm differs between strict/non-strict mode specifying
21 * whether the pattern has to start at the first octect. Strict mode 21 * whether the pattern has to start at the first octet. Strict mode
22 * is enabled by default and can be disabled by inserting 22 * is enabled by default and can be disabled by inserting
23 * TS_FSM_HEAD_IGNORE as the first token in the chain. 23 * TS_FSM_HEAD_IGNORE as the first token in the chain.
24 * 24 *
@@ -44,7 +44,7 @@ struct ts_fsm
44#define _W 0x200 /* wildcard */ 44#define _W 0x200 /* wildcard */
45 45
46/* Map to _ctype flags and some magic numbers */ 46/* Map to _ctype flags and some magic numbers */
47static u16 token_map[TS_FSM_TYPE_MAX+1] = { 47static const u16 token_map[TS_FSM_TYPE_MAX+1] = {
48 [TS_FSM_SPECIFIC] = 0, 48 [TS_FSM_SPECIFIC] = 0,
49 [TS_FSM_WILDCARD] = _W, 49 [TS_FSM_WILDCARD] = _W,
50 [TS_FSM_CNTRL] = _C, 50 [TS_FSM_CNTRL] = _C,
@@ -61,7 +61,7 @@ static u16 token_map[TS_FSM_TYPE_MAX+1] = {
61 [TS_FSM_ASCII] = _A, 61 [TS_FSM_ASCII] = _A,
62}; 62};
63 63
64static u16 token_lookup_tbl[256] = { 64static const u16 token_lookup_tbl[256] = {
65_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */ 65_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
66_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */ 66_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
67_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */ 67_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */