aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/slip.c14
-rw-r--r--include/linux/netdevice.h11
-rw-r--r--include/linux/pkt_cls.h1
-rw-r--r--include/linux/rtnetlink.h7
-rw-r--r--include/linux/skbuff.h23
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/tc_ematch/tc_em_text.h19
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/textsearch.h180
-rw-r--r--include/linux/textsearch_fsm.h48
-rw-r--r--include/net/tcp.h4
-rw-r--r--lib/Kconfig29
-rw-r--r--lib/Makefile4
-rw-r--r--lib/textsearch.c317
-rw-r--r--lib/ts_fsm.c338
-rw-r--r--lib/ts_kmp.c145
-rw-r--r--net/core/dev.c125
-rw-r--r--net/core/skbuff.c157
-rw-r--r--net/core/sysctl_net_core.c46
-rw-r--r--net/ipv4/tcp.c31
-rw-r--r--net/ipv4/tcp_cong.c46
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/em_text.c157
26 files changed, 1538 insertions, 183 deletions
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 19112712daf0..c79e0ad4ba02 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -198,18 +198,12 @@ err_exit:
198static void 198static void
199sl_free_bufs(struct slip *sl) 199sl_free_bufs(struct slip *sl)
200{ 200{
201 void * tmp;
202
203 /* Free all SLIP frame buffers. */ 201 /* Free all SLIP frame buffers. */
204 tmp = xchg(&sl->rbuff, NULL); 202 kfree(xchg(&sl->rbuff, NULL));
205 kfree(tmp); 203 kfree(xchg(&sl->xbuff, NULL));
206 tmp = xchg(&sl->xbuff, NULL);
207 kfree(tmp);
208#ifdef SL_INCLUDE_CSLIP 204#ifdef SL_INCLUDE_CSLIP
209 tmp = xchg(&sl->cbuff, NULL); 205 kfree(xchg(&sl->cbuff, NULL));
210 kfree(tmp); 206 slhc_free(xchg(&sl->slcomp, NULL));
211 if ((tmp = xchg(&sl->slcomp, NULL)) != NULL)
212 slhc_free(tmp);
213#endif 207#endif
214} 208}
215 209
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d89816ad642f..3a0ed7f9e801 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -164,12 +164,6 @@ struct netif_rx_stats
164 unsigned total; 164 unsigned total;
165 unsigned dropped; 165 unsigned dropped;
166 unsigned time_squeeze; 166 unsigned time_squeeze;
167 unsigned throttled;
168 unsigned fastroute_hit;
169 unsigned fastroute_success;
170 unsigned fastroute_defer;
171 unsigned fastroute_deferred_out;
172 unsigned fastroute_latency_reduction;
173 unsigned cpu_collision; 167 unsigned cpu_collision;
174}; 168};
175 169
@@ -562,12 +556,9 @@ static inline int unregister_gifconf(unsigned int family)
562 556
563struct softnet_data 557struct softnet_data
564{ 558{
565 int throttle; 559 struct net_device *output_queue;
566 int cng_level;
567 int avg_blog;
568 struct sk_buff_head input_pkt_queue; 560 struct sk_buff_head input_pkt_queue;
569 struct list_head poll_list; 561 struct list_head poll_list;
570 struct net_device *output_queue;
571 struct sk_buff *completion_queue; 562 struct sk_buff *completion_queue;
572 563
573 struct net_device backlog_dev; /* Sorry. 8) */ 564 struct net_device backlog_dev; /* Sorry. 8) */
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index d2aa214d6803..25d2d67c1faf 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -408,6 +408,7 @@ enum
408 TCF_EM_NBYTE, 408 TCF_EM_NBYTE,
409 TCF_EM_U32, 409 TCF_EM_U32,
410 TCF_EM_META, 410 TCF_EM_META,
411 TCF_EM_TEXT,
411 __TCF_EM_MAX 412 __TCF_EM_MAX
412}; 413};
413 414
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index e68dbf0bf579..d021888b58f1 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -892,10 +892,13 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
892 goto rtattr_failure; \ 892 goto rtattr_failure; \
893 __rta_fill(skb, attrtype, attrlen, data); }) 893 __rta_fill(skb, attrtype, attrlen, data); })
894 894
895#define RTA_PUT_NOHDR(skb, attrlen, data) \ 895#define RTA_APPEND(skb, attrlen, data) \
896({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ 896({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
897 goto rtattr_failure; \ 897 goto rtattr_failure; \
898 memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); }) 898 memcpy(skb_put(skb, attrlen), data, attrlen); })
899
900#define RTA_PUT_NOHDR(skb, attrlen, data) \
901 RTA_APPEND(skb, RTA_ALIGN(attrlen), data)
899 902
900#define RTA_PUT_U8(skb, attrtype, value) \ 903#define RTA_PUT_U8(skb, attrtype, value) \
901({ u8 _tmp = (value); \ 904({ u8 _tmp = (value); \
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d7c839a21842..416a2e4024b2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -27,6 +27,7 @@
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/poll.h> 28#include <linux/poll.h>
29#include <linux/net.h> 29#include <linux/net.h>
30#include <linux/textsearch.h>
30#include <net/checksum.h> 31#include <net/checksum.h>
31 32
32#define HAVE_ALLOC_SKB /* For the drivers to know */ 33#define HAVE_ALLOC_SKB /* For the drivers to know */
@@ -321,6 +322,28 @@ extern void skb_over_panic(struct sk_buff *skb, int len,
321extern void skb_under_panic(struct sk_buff *skb, int len, 322extern void skb_under_panic(struct sk_buff *skb, int len,
322 void *here); 323 void *here);
323 324
325struct skb_seq_state
326{
327 __u32 lower_offset;
328 __u32 upper_offset;
329 __u32 frag_idx;
330 __u32 stepped_offset;
331 struct sk_buff *root_skb;
332 struct sk_buff *cur_skb;
333 __u8 *frag_data;
334};
335
336extern void skb_prepare_seq_read(struct sk_buff *skb,
337 unsigned int from, unsigned int to,
338 struct skb_seq_state *st);
339extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
340 struct skb_seq_state *st);
341extern void skb_abort_seq_read(struct skb_seq_state *st);
342
343extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
344 unsigned int to, struct ts_config *config,
345 struct ts_state *state);
346
324/* Internal */ 347/* Internal */
325#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) 348#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
326 349
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 72965bfe6cfb..ebfe1250f0a4 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -243,6 +243,7 @@ enum
243 NET_CORE_MOD_CONG=16, 243 NET_CORE_MOD_CONG=16,
244 NET_CORE_DEV_WEIGHT=17, 244 NET_CORE_DEV_WEIGHT=17,
245 NET_CORE_SOMAXCONN=18, 245 NET_CORE_SOMAXCONN=18,
246 NET_CORE_BUDGET=19,
246}; 247};
247 248
248/* /proc/sys/net/ethernet */ 249/* /proc/sys/net/ethernet */
diff --git a/include/linux/tc_ematch/tc_em_text.h b/include/linux/tc_ematch/tc_em_text.h
new file mode 100644
index 000000000000..7cd43e99c7f5
--- /dev/null
+++ b/include/linux/tc_ematch/tc_em_text.h
@@ -0,0 +1,19 @@
1#ifndef __LINUX_TC_EM_TEXT_H
2#define __LINUX_TC_EM_TEXT_H
3
4#include <linux/pkt_cls.h>
5
6#define TC_EM_TEXT_ALGOSIZ 16
7
8struct tcf_em_text
9{
10 char algo[TC_EM_TEXT_ALGOSIZ];
11 __u16 from_offset;
12 __u16 to_offset;
13 __u16 pattern_len;
14 __u8 from_layer:4;
15 __u8 to_layer:4;
16 __u8 pad;
17};
18
19#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3ea75dd6640a..dfd93d03f5d2 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -127,6 +127,7 @@ enum {
127#define TCP_WINDOW_CLAMP 10 /* Bound advertised window */ 127#define TCP_WINDOW_CLAMP 10 /* Bound advertised window */
128#define TCP_INFO 11 /* Information about this connection. */ 128#define TCP_INFO 11 /* Information about this connection. */
129#define TCP_QUICKACK 12 /* Block/reenable quick acks */ 129#define TCP_QUICKACK 12 /* Block/reenable quick acks */
130#define TCP_CONGESTION 13 /* Congestion control algorithm */
130 131
131#define TCPI_OPT_TIMESTAMPS 1 132#define TCPI_OPT_TIMESTAMPS 1
132#define TCPI_OPT_SACK 2 133#define TCPI_OPT_SACK 2
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
new file mode 100644
index 000000000000..941f45ac117a
--- /dev/null
+++ b/include/linux/textsearch.h
@@ -0,0 +1,180 @@
1#ifndef __LINUX_TEXTSEARCH_H
2#define __LINUX_TEXTSEARCH_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <linux/list.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/err.h>
11
12struct ts_config;
13
14/**
15 * TS_AUTOLOAD - Automatically load textsearch modules when needed
16 */
17#define TS_AUTOLOAD 1
18
19/**
20 * struct ts_state - search state
21 * @offset: offset for next match
22 * @cb: control buffer, for persistant variables of get_next_block()
23 */
24struct ts_state
25{
26 unsigned int offset;
27 char cb[40];
28};
29
30/**
31 * struct ts_ops - search module operations
32 * @name: name of search algorithm
33 * @init: initialization function to prepare a search
34 * @find: find the next occurrence of the pattern
35 * @destroy: destroy algorithm specific parts of a search configuration
36 * @get_pattern: return head of pattern
37 * @get_pattern_len: return length of pattern
38 * @owner: module reference to algorithm
39 */
40struct ts_ops
41{
42 const char *name;
43 struct ts_config * (*init)(const void *, unsigned int, int);
44 unsigned int (*find)(struct ts_config *,
45 struct ts_state *);
46 void (*destroy)(struct ts_config *);
47 void * (*get_pattern)(struct ts_config *);
48 unsigned int (*get_pattern_len)(struct ts_config *);
49 struct module *owner;
50 struct list_head list;
51};
52
53/**
54 * struct ts_config - search configuration
55 * @ops: operations of chosen algorithm
56 * @get_next_block: callback to fetch the next block to search in
57 * @finish: callback to finalize a search
58 */
59struct ts_config
60{
61 struct ts_ops *ops;
62
63 /**
64 * get_next_block - fetch next block of data
65 * @consumed: number of bytes consumed by the caller
66 * @dst: destination buffer
67 * @conf: search configuration
68 * @state: search state
69 *
70 * Called repeatedly until 0 is returned. Must assign the
71 * head of the next block of data to &*dst and return the length
72 * of the block or 0 if at the end. consumed == 0 indicates
73 * a new search. May store/read persistant values in state->cb.
74 */
75 unsigned int (*get_next_block)(unsigned int consumed,
76 const u8 **dst,
77 struct ts_config *conf,
78 struct ts_state *state);
79
80 /**
81 * finish - finalize/clean a series of get_next_block() calls
82 * @conf: search configuration
83 * @state: search state
84 *
85 * Called after the last use of get_next_block(), may be used
86 * to cleanup any leftovers.
87 */
88 void (*finish)(struct ts_config *conf,
89 struct ts_state *state);
90};
91
92/**
93 * textsearch_next - continue searching for a pattern
94 * @conf: search configuration
95 * @state: search state
96 *
97 * Continues a search looking for more occurrences of the pattern.
98 * textsearch_find() must be called to find the first occurrence
99 * in order to reset the state.
100 *
101 * Returns the position of the next occurrence of the pattern or
102 * UINT_MAX if not match was found.
103 */
104static inline unsigned int textsearch_next(struct ts_config *conf,
105 struct ts_state *state)
106{
107 unsigned int ret = conf->ops->find(conf, state);
108
109 if (conf->finish)
110 conf->finish(conf, state);
111
112 return ret;
113}
114
115/**
116 * textsearch_find - start searching for a pattern
117 * @conf: search configuration
118 * @state: search state
119 *
120 * Returns the position of first occurrence of the pattern or
121 * UINT_MAX if no match was found.
122 */
123static inline unsigned int textsearch_find(struct ts_config *conf,
124 struct ts_state *state)
125{
126 state->offset = 0;
127 return textsearch_next(conf, state);
128}
129
130/**
131 * textsearch_get_pattern - return head of the pattern
132 * @conf: search configuration
133 */
134static inline void *textsearch_get_pattern(struct ts_config *conf)
135{
136 return conf->ops->get_pattern(conf);
137}
138
139/**
140 * textsearch_get_pattern_len - return length of the pattern
141 * @conf: search configuration
142 */
143static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf)
144{
145 return conf->ops->get_pattern_len(conf);
146}
147
148extern int textsearch_register(struct ts_ops *);
149extern int textsearch_unregister(struct ts_ops *);
150extern struct ts_config *textsearch_prepare(const char *, const void *,
151 unsigned int, int, int);
152extern void textsearch_destroy(struct ts_config *conf);
153extern unsigned int textsearch_find_continuous(struct ts_config *,
154 struct ts_state *,
155 const void *, unsigned int);
156
157
158#define TS_PRIV_ALIGNTO 8
159#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1))
160
161static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask)
162{
163 struct ts_config *conf;
164
165 conf = kmalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
166 if (conf == NULL)
167 return ERR_PTR(-ENOMEM);
168
169 memset(conf, 0, TS_PRIV_ALIGN(sizeof(*conf)) + payload);
170 return conf;
171}
172
173static inline void *ts_config_priv(struct ts_config *conf)
174{
175 return ((u8 *) conf + TS_PRIV_ALIGN(sizeof(struct ts_config)));
176}
177
178#endif /* __KERNEL__ */
179
180#endif
diff --git a/include/linux/textsearch_fsm.h b/include/linux/textsearch_fsm.h
new file mode 100644
index 000000000000..fdfa078c66e5
--- /dev/null
+++ b/include/linux/textsearch_fsm.h
@@ -0,0 +1,48 @@
1#ifndef __LINUX_TEXTSEARCH_FSM_H
2#define __LINUX_TEXTSEARCH_FSM_H
3
4#include <linux/types.h>
5
6enum {
7 TS_FSM_SPECIFIC, /* specific character */
8 TS_FSM_WILDCARD, /* any character */
9 TS_FSM_DIGIT, /* isdigit() */
10 TS_FSM_XDIGIT, /* isxdigit() */
11 TS_FSM_PRINT, /* isprint() */
12 TS_FSM_ALPHA, /* isalpha() */
13 TS_FSM_ALNUM, /* isalnum() */
14 TS_FSM_ASCII, /* isascii() */
15 TS_FSM_CNTRL, /* iscntrl() */
16 TS_FSM_GRAPH, /* isgraph() */
17 TS_FSM_LOWER, /* islower() */
18 TS_FSM_UPPER, /* isupper() */
19 TS_FSM_PUNCT, /* ispunct() */
20 TS_FSM_SPACE, /* isspace() */
21 __TS_FSM_TYPE_MAX,
22};
23#define TS_FSM_TYPE_MAX (__TS_FSM_TYPE_MAX - 1)
24
25enum {
26 TS_FSM_SINGLE, /* 1 occurrence */
27 TS_FSM_PERHAPS, /* 1 or 0 occurrence */
28 TS_FSM_ANY, /* 0..n occurrences */
29 TS_FSM_MULTI, /* 1..n occurrences */
30 TS_FSM_HEAD_IGNORE, /* 0..n ignored occurrences at head */
31 __TS_FSM_RECUR_MAX,
32};
33#define TS_FSM_RECUR_MAX (__TS_FSM_RECUR_MAX - 1)
34
35/**
36 * struct ts_fsm_token - state machine token (state)
37 * @type: type of token
38 * @recur: number of recurrences
39 * @value: character value for TS_FSM_SPECIFIC
40 */
41struct ts_fsm_token
42{
43 __u16 type;
44 __u8 recur;
45 __u8 value;
46};
47
48#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e427cf35915c..ec9e20c27179 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1162,12 +1162,14 @@ extern void tcp_init_congestion_control(struct tcp_sock *tp);
1162extern void tcp_cleanup_congestion_control(struct tcp_sock *tp); 1162extern void tcp_cleanup_congestion_control(struct tcp_sock *tp);
1163extern int tcp_set_default_congestion_control(const char *name); 1163extern int tcp_set_default_congestion_control(const char *name);
1164extern void tcp_get_default_congestion_control(char *name); 1164extern void tcp_get_default_congestion_control(char *name);
1165extern int tcp_set_congestion_control(struct tcp_sock *tp, const char *name);
1165 1166
1166extern struct tcp_congestion_ops tcp_reno; 1167extern struct tcp_congestion_ops tcp_init_congestion_ops;
1167extern u32 tcp_reno_ssthresh(struct tcp_sock *tp); 1168extern u32 tcp_reno_ssthresh(struct tcp_sock *tp);
1168extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, 1169extern void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack,
1169 u32 rtt, u32 in_flight, int flag); 1170 u32 rtt, u32 in_flight, int flag);
1170extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp); 1171extern u32 tcp_reno_min_cwnd(struct tcp_sock *tp);
1172extern struct tcp_congestion_ops tcp_reno;
1171 1173
1172static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state) 1174static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
1173{ 1175{
diff --git a/lib/Kconfig b/lib/Kconfig
index 2d4d4e3bc4aa..455833a9e31a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -63,5 +63,32 @@ config REED_SOLOMON_ENC16
63config REED_SOLOMON_DEC16 63config REED_SOLOMON_DEC16
64 boolean 64 boolean
65 65
66endmenu 66config TEXTSEARCH
67 boolean "Textsearch infrastructure"
68 default y
69 help
70 Say Y here if you want to provide a textsearch infrastructure
71 to other subsystems.
72
73config TEXTSEARCH_KMP
74 depends on TEXTSEARCH
75 tristate "Knuth-Morris-Pratt"
76 help
77 Say Y here if you want to be able to search text using the
78 Knuth-Morris-Pratt textsearch algorithm.
67 79
80 To compile this code as a module, choose M here: the
81 module will be called ts_kmp.
82
83config TEXTSEARCH_FSM
84 depends on TEXTSEARCH
85 tristate "Finite state machine"
86 help
87 Say Y here if you want to be able to search text using a
88 naive finite state machine approach implementing a subset
89 of regular expressions.
90
91 To compile this code as a module, choose M here: the
92 module will be called ts_fsm.
93
94endmenu
diff --git a/lib/Makefile b/lib/Makefile
index dcb4231916e2..beed1585294c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,6 +36,10 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
36obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ 36obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
37obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ 37obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
38 38
39obj-$(CONFIG_TEXTSEARCH) += textsearch.o
40obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
41obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
42
39hostprogs-y := gen_crc32table 43hostprogs-y := gen_crc32table
40clean-files := crc32table.h 44clean-files := crc32table.h
41 45
diff --git a/lib/textsearch.c b/lib/textsearch.c
new file mode 100644
index 000000000000..1e934c196f0f
--- /dev/null
+++ b/lib/textsearch.c
@@ -0,0 +1,317 @@
1/*
2 * lib/textsearch.c Generic text search interface
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 * Pablo Neira Ayuso <pablo@eurodev.net>
11 *
12 * ==========================================================================
13 *
14 * INTRODUCTION
15 *
16 * The textsearch infrastructure provides text searching facitilies for
17 * both linear and non-linear data. Individual search algorithms are
18 * implemented in modules and chosen by the user.
19 *
20 * ARCHITECTURE
21 *
22 * User
23 * +----------------+
24 * | finish()|<--------------(6)-----------------+
25 * |get_next_block()|<--------------(5)---------------+ |
26 * | | Algorithm | |
27 * | | +------------------------------+
28 * | | | init() find() destroy() |
29 * | | +------------------------------+
30 * | | Core API ^ ^ ^
31 * | | +---------------+ (2) (4) (8)
32 * | (1)|----->| prepare() |---+ | |
33 * | (3)|----->| find()/next() |-----------+ |
34 * | (7)|----->| destroy() |----------------------+
35 * +----------------+ +---------------+
36 *
37 * (1) User configures a search by calling _prepare() specifying the
38 * search parameters such as the pattern and algorithm name.
39 * (2) Core requests the algorithm to allocate and initialize a search
40 * configuration according to the specified parameters.
41 * (3) User starts the search(es) by calling _find() or _next() to
42 * fetch subsequent occurrences. A state variable is provided
43 * to the algorihtm to store persistant variables.
44 * (4) Core eventually resets the search offset and forwards the find()
45 * request to the algorithm.
46 * (5) Algorithm calls get_next_block() provided by the user continously
47 * to fetch the data to be searched in block by block.
48 * (6) Algorithm invokes finish() after the last call to get_next_block
49 * to clean up any leftovers from get_next_block. (Optional)
50 * (7) User destroys the configuration by calling _destroy().
51 * (8) Core notifies the algorithm to destroy algorithm specific
52 * allocations. (Optional)
53 *
54 * USAGE
55 *
56 * Before a search can be performed, a configuration must be created
57 * by calling textsearch_prepare() specyfing the searching algorithm and
58 * the pattern to look for. The returned configuration may then be used
59 * for an arbitary amount of times and even in parallel as long as a
60 * separate struct ts_state variable is provided to every instance.
61 *
62 * The actual search is performed by either calling textsearch_find_-
63 * continuous() for linear data or by providing an own get_next_block()
64 * implementation and calling textsearch_find(). Both functions return
65 * the position of the first occurrence of the patern or UINT_MAX if
66 * no match was found. Subsequent occurences can be found by calling
67 * textsearch_next() regardless of the linearity of the data.
68 *
69 * Once you're done using a configuration it must be given back via
70 * textsearch_destroy.
71 *
72 * EXAMPLE
73 *
74 * int pos;
75 * struct ts_config *conf;
76 * struct ts_state state;
77 * const char *pattern = "chicken";
78 * const char *example = "We dance the funky chicken";
79 *
80 * conf = textsearch_prepare("kmp", pattern, strlen(pattern),
81 * GFP_KERNEL, TS_AUTOLOAD);
82 * if (IS_ERR(conf)) {
83 * err = PTR_ERR(conf);
84 * goto errout;
85 * }
86 *
87 * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
88 * if (pos != UINT_MAX)
89 * panic("Oh my god, dancing chickens at %d\n", pos);
90 *
91 * textsearch_destroy(conf);
92 *
93 * ==========================================================================
94 */
95
96#include <linux/config.h>
97#include <linux/module.h>
98#include <linux/types.h>
99#include <linux/string.h>
100#include <linux/init.h>
101#include <linux/rcupdate.h>
102#include <linux/err.h>
103#include <linux/textsearch.h>
104
105static LIST_HEAD(ts_ops);
106static DEFINE_SPINLOCK(ts_mod_lock);
107
108static inline struct ts_ops *lookup_ts_algo(const char *name)
109{
110 struct ts_ops *o;
111
112 rcu_read_lock();
113 list_for_each_entry_rcu(o, &ts_ops, list) {
114 if (!strcmp(name, o->name)) {
115 if (!try_module_get(o->owner))
116 o = NULL;
117 rcu_read_unlock();
118 return o;
119 }
120 }
121 rcu_read_unlock();
122
123 return NULL;
124}
125
126/**
127 * textsearch_register - register a textsearch module
128 * @ops: operations lookup table
129 *
130 * This function must be called by textsearch modules to announce
131 * their presence. The specified &@ops must have %name set to a
132 * unique identifier and the callbacks find(), init(), get_pattern(),
133 * and get_pattern_len() must be implemented.
134 *
135 * Returns 0 or -EEXISTS if another module has already registered
136 * with same name.
137 */
138int textsearch_register(struct ts_ops *ops)
139{
140 int err = -EEXIST;
141 struct ts_ops *o;
142
143 if (ops->name == NULL || ops->find == NULL || ops->init == NULL ||
144 ops->get_pattern == NULL || ops->get_pattern_len == NULL)
145 return -EINVAL;
146
147 spin_lock(&ts_mod_lock);
148 list_for_each_entry(o, &ts_ops, list) {
149 if (!strcmp(ops->name, o->name))
150 goto errout;
151 }
152
153 list_add_tail_rcu(&ops->list, &ts_ops);
154 err = 0;
155errout:
156 spin_unlock(&ts_mod_lock);
157 return err;
158}
159
160/**
161 * textsearch_unregister - unregister a textsearch module
162 * @ops: operations lookup table
163 *
164 * This function must be called by textsearch modules to announce
165 * their disappearance for examples when the module gets unloaded.
166 * The &ops parameter must be the same as the one during the
167 * registration.
168 *
169 * Returns 0 on success or -ENOENT if no matching textsearch
170 * registration was found.
171 */
172int textsearch_unregister(struct ts_ops *ops)
173{
174 int err = 0;
175 struct ts_ops *o;
176
177 spin_lock(&ts_mod_lock);
178 list_for_each_entry(o, &ts_ops, list) {
179 if (o == ops) {
180 list_del_rcu(&o->list);
181 goto out;
182 }
183 }
184
185 err = -ENOENT;
186out:
187 spin_unlock(&ts_mod_lock);
188 return err;
189}
190
191struct ts_linear_state
192{
193 unsigned int len;
194 const void *data;
195};
196
197static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
198 struct ts_config *conf,
199 struct ts_state *state)
200{
201 struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
202
203 if (likely(consumed < st->len)) {
204 *dst = st->data + consumed;
205 return st->len - consumed;
206 }
207
208 return 0;
209}
210
211/**
212 * textsearch_find_continuous - search a pattern in continuous/linear data
213 * @conf: search configuration
214 * @state: search state
215 * @data: data to search in
216 * @len: length of data
217 *
218 * A simplified version of textsearch_find() for continuous/linear data.
219 * Call textsearch_next() to retrieve subsequent matches.
220 *
221 * Returns the position of first occurrence of the pattern or
222 * UINT_MAX if no occurrence was found.
223 */
224unsigned int textsearch_find_continuous(struct ts_config *conf,
225 struct ts_state *state,
226 const void *data, unsigned int len)
227{
228 struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
229
230 conf->get_next_block = get_linear_data;
231 st->data = data;
232 st->len = len;
233
234 return textsearch_find(conf, state);
235}
236
237/**
238 * textsearch_prepare - Prepare a search
239 * @algo: name of search algorithm
240 * @pattern: pattern data
241 * @len: length of pattern
242 * @gfp_mask: allocation mask
243 * @flags: search flags
244 *
245 * Looks up the search algorithm module and creates a new textsearch
246 * configuration for the specified pattern. Upon completion all
247 * necessary refcnts are held and the configuration must be put back
248 * using textsearch_put() after usage.
249 *
250 * Note: The format of the pattern may not be compatible between
251 * the various search algorithms.
252 *
253 * Returns a new textsearch configuration according to the specified
254 * parameters or a ERR_PTR().
255 */
256struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
257 unsigned int len, int gfp_mask, int flags)
258{
259 int err = -ENOENT;
260 struct ts_config *conf;
261 struct ts_ops *ops;
262
263 ops = lookup_ts_algo(algo);
264#ifdef CONFIG_KMOD
265 /*
266 * Why not always autoload you may ask. Some users are
267 * in a situation where requesting a module may deadlock,
268 * especially when the module is located on a NFS mount.
269 */
270 if (ops == NULL && flags & TS_AUTOLOAD) {
271 request_module("ts_%s", algo);
272 ops = lookup_ts_algo(algo);
273 }
274#endif
275
276 if (ops == NULL)
277 goto errout;
278
279 conf = ops->init(pattern, len, gfp_mask);
280 if (IS_ERR(conf)) {
281 err = PTR_ERR(conf);
282 goto errout;
283 }
284
285 conf->ops = ops;
286 return conf;
287
288errout:
289 if (ops)
290 module_put(ops->owner);
291
292 return ERR_PTR(err);
293}
294
295/**
296 * textsearch_destroy - destroy a search configuration
297 * @conf: search configuration
298 *
299 * Releases all references of the configuration and frees
300 * up the memory.
301 */
302void textsearch_destroy(struct ts_config *conf)
303{
304 if (conf->ops) {
305 if (conf->ops->destroy)
306 conf->ops->destroy(conf);
307 module_put(conf->ops->owner);
308 }
309
310 kfree(conf);
311}
312
313EXPORT_SYMBOL(textsearch_register);
314EXPORT_SYMBOL(textsearch_unregister);
315EXPORT_SYMBOL(textsearch_prepare);
316EXPORT_SYMBOL(textsearch_find_continuous);
317EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
new file mode 100644
index 000000000000..d27c0a072940
--- /dev/null
+++ b/lib/ts_fsm.c
@@ -0,0 +1,338 @@
1/*
2 * lib/ts_fsm.c A naive finite state machine text search approach
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 *
11 * ==========================================================================
12 *
13 * A finite state machine consists of n states (struct ts_fsm_token)
14 * representing the pattern as a finite automation. The data is read
15 * sequentially on a octet basis. Every state token specifies the number
16 * of recurrences and the type of value accepted which can be either a
17 * specific character or ctype based set of characters. The available
18 * type of recurrences include 1, (0|1), [0 n], and [1 n].
19 *
20 * The algorithm differs between strict/non-strict mode specyfing
21 * whether the pattern has to start at the first octect. Strict mode
22 * is enabled by default and can be disabled by inserting
23 * TS_FSM_HEAD_IGNORE as the first token in the chain.
24 *
25 * The runtime performance of the algorithm should be around O(n),
26 * however while in strict mode the average runtime can be better.
27 */
28
29#include <linux/config.h>
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/ctype.h>
34#include <linux/textsearch.h>
35#include <linux/textsearch_fsm.h>
36
37struct ts_fsm
38{
39 unsigned int ntokens;
40 struct ts_fsm_token tokens[0];
41};
42
43/* other values derived from ctype.h */
44#define _A 0x100 /* ascii */
45#define _W 0x200 /* wildcard */
46
47/* Map to _ctype flags and some magic numbers */
48static u16 token_map[TS_FSM_TYPE_MAX+1] = {
49 [TS_FSM_SPECIFIC] = 0,
50 [TS_FSM_WILDCARD] = _W,
51 [TS_FSM_CNTRL] = _C,
52 [TS_FSM_LOWER] = _L,
53 [TS_FSM_UPPER] = _U,
54 [TS_FSM_PUNCT] = _P,
55 [TS_FSM_SPACE] = _S,
56 [TS_FSM_DIGIT] = _D,
57 [TS_FSM_XDIGIT] = _D | _X,
58 [TS_FSM_ALPHA] = _U | _L,
59 [TS_FSM_ALNUM] = _U | _L | _D,
60 [TS_FSM_PRINT] = _P | _U | _L | _D | _SP,
61 [TS_FSM_GRAPH] = _P | _U | _L | _D,
62 [TS_FSM_ASCII] = _A,
63};
64
65static u16 token_lookup_tbl[256] = {
66_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
67_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
68_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */
69_W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */
70_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */
71_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */
72_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */
73_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */
74_W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */
75_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */
76_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */
77_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */
78_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */
79_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */
80_W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */
81_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */
82_W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */
83_W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */
84_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */
85_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */
86_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */
87_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */
88_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */
89_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */
90_W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */
91_W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */
92_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */
93_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */
94_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */
95_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */
96_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */
97_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */
98_W, _W, _W, _W, /* 128-131 */
99_W, _W, _W, _W, /* 132-135 */
100_W, _W, _W, _W, /* 136-139 */
101_W, _W, _W, _W, /* 140-143 */
102_W, _W, _W, _W, /* 144-147 */
103_W, _W, _W, _W, /* 148-151 */
104_W, _W, _W, _W, /* 152-155 */
105_W, _W, _W, _W, /* 156-159 */
106_W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */
107_W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */
108_W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */
109_W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */
110_W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */
111_W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */
112_W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */
113_W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */
114_W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */
115_W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */
116_W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */
117_W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */
118_W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */
119_W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */
120_W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */
121_W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */
122_W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */
123_W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */
124_W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */
125_W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */
126_W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */
127_W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */
128_W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */
129_W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */
130
131static inline int match_token(struct ts_fsm_token *t, u8 d)
132{
133 if (t->type)
134 return (token_lookup_tbl[d] & t->type) != 0;
135 else
136 return t->value == d;
137}
138
139static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state)
140{
141 struct ts_fsm *fsm = ts_config_priv(conf);
142 struct ts_fsm_token *cur = NULL, *next;
143 unsigned int match_start, block_idx = 0, tok_idx;
144 unsigned block_len = 0, strict, consumed = state->offset;
145 const u8 *data;
146
147#define GET_NEXT_BLOCK() \
148({ consumed += block_idx; \
149 block_idx = 0; \
150 block_len = conf->get_next_block(consumed, &data, conf, state); })
151
152#define TOKEN_MISMATCH() \
153 do { \
154 if (strict) \
155 goto no_match; \
156 block_idx++; \
157 goto startover; \
158 } while(0)
159
160#define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK())
161
162 if (end_of_data())
163 goto no_match;
164
165 strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE;
166
167startover:
168 match_start = consumed + block_idx;
169
170 for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) {
171 cur = &fsm->tokens[tok_idx];
172
173 if (likely(tok_idx < (fsm->ntokens - 1)))
174 next = &fsm->tokens[tok_idx + 1];
175 else
176 next = NULL;
177
178 switch (cur->recur) {
179 case TS_FSM_SINGLE:
180 if (end_of_data())
181 goto no_match;
182
183 if (!match_token(cur, data[block_idx]))
184 TOKEN_MISMATCH();
185 break;
186
187 case TS_FSM_PERHAPS:
188 if (end_of_data() ||
189 !match_token(cur, data[block_idx]))
190 continue;
191 break;
192
193 case TS_FSM_MULTI:
194 if (end_of_data())
195 goto no_match;
196
197 if (!match_token(cur, data[block_idx]))
198 TOKEN_MISMATCH();
199
200 block_idx++;
201 /* fall through */
202
203 case TS_FSM_ANY:
204 if (next == NULL)
205 goto found_match;
206
207 if (end_of_data())
208 continue;
209
210 while (!match_token(next, data[block_idx])) {
211 if (!match_token(cur, data[block_idx]))
212 TOKEN_MISMATCH();
213 block_idx++;
214 if (end_of_data())
215 goto no_match;
216 }
217 continue;
218
219 /*
220 * Optimization: Prefer small local loop over jumping
221 * back and forth until garbage at head is munched.
222 */
223 case TS_FSM_HEAD_IGNORE:
224 if (end_of_data())
225 continue;
226
227 while (!match_token(next, data[block_idx])) {
228 /*
229 * Special case, don't start over upon
230 * a mismatch, give the user the
231 * chance to specify the type of data
232 * allowed to be ignored.
233 */
234 if (!match_token(cur, data[block_idx]))
235 goto no_match;
236
237 block_idx++;
238 if (end_of_data())
239 goto no_match;
240 }
241
242 match_start = consumed + block_idx;
243 continue;
244 }
245
246 block_idx++;
247 }
248
249 if (end_of_data())
250 goto found_match;
251
252no_match:
253 return UINT_MAX;
254
255found_match:
256 state->offset = consumed + block_idx;
257 return match_start;
258}
259
260static struct ts_config *fsm_init(const void *pattern, unsigned int len,
261 int gfp_mask)
262{
263 int i, err = -EINVAL;
264 struct ts_config *conf;
265 struct ts_fsm *fsm;
266 struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern;
267 unsigned int ntokens = len / sizeof(*tokens);
268 size_t priv_size = sizeof(*fsm) + len;
269
270 if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
271 goto errout;
272
273 for (i = 0; i < ntokens; i++) {
274 struct ts_fsm_token *t = &tokens[i];
275
276 if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX)
277 goto errout;
278
279 if (t->recur == TS_FSM_HEAD_IGNORE &&
280 (i != 0 || i == (ntokens - 1)))
281 goto errout;
282 }
283
284 conf = alloc_ts_config(priv_size, gfp_mask);
285 if (IS_ERR(conf))
286 return conf;
287
288 fsm = ts_config_priv(conf);
289 fsm->ntokens = ntokens;
290 memcpy(fsm->tokens, pattern, len);
291
292 for (i = 0; i < fsm->ntokens; i++) {
293 struct ts_fsm_token *t = &fsm->tokens[i];
294 t->type = token_map[t->type];
295 }
296
297 return conf;
298
299errout:
300 return ERR_PTR(err);
301}
302
303static void *fsm_get_pattern(struct ts_config *conf)
304{
305 struct ts_fsm *fsm = ts_config_priv(conf);
306 return fsm->tokens;
307}
308
309static unsigned int fsm_get_pattern_len(struct ts_config *conf)
310{
311 struct ts_fsm *fsm = ts_config_priv(conf);
312 return fsm->ntokens * sizeof(struct ts_fsm_token);
313}
314
315static struct ts_ops fsm_ops = {
316 .name = "fsm",
317 .find = fsm_find,
318 .init = fsm_init,
319 .get_pattern = fsm_get_pattern,
320 .get_pattern_len = fsm_get_pattern_len,
321 .owner = THIS_MODULE,
322 .list = LIST_HEAD_INIT(fsm_ops.list)
323};
324
325static int __init init_fsm(void)
326{
327 return textsearch_register(&fsm_ops);
328}
329
330static void __exit exit_fsm(void)
331{
332 textsearch_unregister(&fsm_ops);
333}
334
335MODULE_LICENSE("GPL");
336
337module_init(init_fsm);
338module_exit(exit_fsm);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
new file mode 100644
index 000000000000..73266b975585
--- /dev/null
+++ b/lib/ts_kmp.c
@@ -0,0 +1,145 @@
1/*
2 * lib/ts_kmp.c Knuth-Morris-Pratt text search implementation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 *
11 * ==========================================================================
12 *
13 * Implements a linear-time string-matching algorithm due to Knuth,
14 * Morris, and Pratt [1]. Their algorithm avoids the explicit
15 * computation of the transition function DELTA altogether. Its
16 * matching time is O(n), for n being length(text), using just an
17 * auxiliary function PI[1..m], for m being length(pattern),
18 * precomputed from the pattern in time O(m). The array PI allows
19 * the transition function DELTA to be computed efficiently
20 * "on the fly" as needed. Roughly speaking, for any state
21 * "q" = 0,1,...,m and any character "a" in SIGMA, the value
22 * PI["q"] contains the information that is independent of "a" and
23 * is needed to compute DELTA("q", "a") [2]. Since the array PI
24 * has only m entries, whereas DELTA has O(m|SIGMA|) entries, we
25 * save a factor of |SIGMA| in the preprocessing time by computing
26 * PI rather than DELTA.
27 *
28 * [1] Cormen, Leiserson, Rivest, Stein
29 * Introdcution to Algorithms, 2nd Edition, MIT Press
30 * [2] See finite automation theory
31 */
32
33#include <linux/config.h>
34#include <linux/module.h>
35#include <linux/types.h>
36#include <linux/string.h>
37#include <linux/textsearch.h>
38
39struct ts_kmp
40{
41 u8 * pattern;
42 unsigned int pattern_len;
43 unsigned int prefix_tbl[0];
44};
45
46static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
47{
48 struct ts_kmp *kmp = ts_config_priv(conf);
49 unsigned int i, q = 0, text_len, consumed = state->offset;
50 const u8 *text;
51
52 for (;;) {
53 text_len = conf->get_next_block(consumed, &text, conf, state);
54
55 if (unlikely(text_len == 0))
56 break;
57
58 for (i = 0; i < text_len; i++) {
59 while (q > 0 && kmp->pattern[q] != text[i])
60 q = kmp->prefix_tbl[q - 1];
61 if (kmp->pattern[q] == text[i])
62 q++;
63 if (unlikely(q == kmp->pattern_len)) {
64 state->offset = consumed + i + 1;
65 return state->offset - kmp->pattern_len;
66 }
67 }
68
69 consumed += text_len;
70 }
71
72 return UINT_MAX;
73}
74
75static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
76 unsigned int *prefix_tbl)
77{
78 unsigned int k, q;
79
80 for (k = 0, q = 1; q < len; q++) {
81 while (k > 0 && pattern[k] != pattern[q])
82 k = prefix_tbl[k-1];
83 if (pattern[k] == pattern[q])
84 k++;
85 prefix_tbl[q] = k;
86 }
87}
88
89static struct ts_config *kmp_init(const void *pattern, unsigned int len,
90 int gfp_mask)
91{
92 struct ts_config *conf;
93 struct ts_kmp *kmp;
94 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
95 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
96
97 conf = alloc_ts_config(priv_size, gfp_mask);
98 if (IS_ERR(conf))
99 return conf;
100
101 kmp = ts_config_priv(conf);
102 kmp->pattern_len = len;
103 compute_prefix_tbl(pattern, len, kmp->prefix_tbl);
104 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
105 memcpy(kmp->pattern, pattern, len);
106
107 return conf;
108}
109
110static void *kmp_get_pattern(struct ts_config *conf)
111{
112 struct ts_kmp *kmp = ts_config_priv(conf);
113 return kmp->pattern;
114}
115
116static unsigned int kmp_get_pattern_len(struct ts_config *conf)
117{
118 struct ts_kmp *kmp = ts_config_priv(conf);
119 return kmp->pattern_len;
120}
121
122static struct ts_ops kmp_ops = {
123 .name = "kmp",
124 .find = kmp_find,
125 .init = kmp_init,
126 .get_pattern = kmp_get_pattern,
127 .get_pattern_len = kmp_get_pattern_len,
128 .owner = THIS_MODULE,
129 .list = LIST_HEAD_INIT(kmp_ops.list)
130};
131
132static int __init init_kmp(void)
133{
134 return textsearch_register(&kmp_ops);
135}
136
137static void __exit exit_kmp(void)
138{
139 textsearch_unregister(&kmp_ops);
140}
141
142MODULE_LICENSE("GPL");
143
144module_init(init_kmp);
145module_exit(exit_kmp);
diff --git a/net/core/dev.c b/net/core/dev.c
index ab935778ce81..7016e0c36b3d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -115,18 +115,6 @@
115#endif /* CONFIG_NET_RADIO */ 115#endif /* CONFIG_NET_RADIO */
116#include <asm/current.h> 116#include <asm/current.h>
117 117
118/* This define, if set, will randomly drop a packet when congestion
119 * is more than moderate. It helps fairness in the multi-interface
120 * case when one of them is a hog, but it kills performance for the
121 * single interface case so it is off now by default.
122 */
123#undef RAND_LIE
124
125/* Setting this will sample the queue lengths and thus congestion
126 * via a timer instead of as each packet is received.
127 */
128#undef OFFLINE_SAMPLE
129
130/* 118/*
131 * The list of packet types we will receive (as opposed to discard) 119 * The list of packet types we will receive (as opposed to discard)
132 * and the routines to invoke. 120 * and the routines to invoke.
@@ -159,11 +147,6 @@ static DEFINE_SPINLOCK(ptype_lock);
159static struct list_head ptype_base[16]; /* 16 way hashed list */ 147static struct list_head ptype_base[16]; /* 16 way hashed list */
160static struct list_head ptype_all; /* Taps */ 148static struct list_head ptype_all; /* Taps */
161 149
162#ifdef OFFLINE_SAMPLE
163static void sample_queue(unsigned long dummy);
164static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
165#endif
166
167/* 150/*
168 * The @dev_base list is protected by @dev_base_lock and the rtln 151 * The @dev_base list is protected by @dev_base_lock and the rtln
169 * semaphore. 152 * semaphore.
@@ -215,7 +198,7 @@ static struct notifier_block *netdev_chain;
215 * Device drivers call our routines to queue packets here. We empty the 198 * Device drivers call our routines to queue packets here. We empty the
216 * queue in the local softnet handler. 199 * queue in the local softnet handler.
217 */ 200 */
218DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, }; 201DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
219 202
220#ifdef CONFIG_SYSFS 203#ifdef CONFIG_SYSFS
221extern int netdev_sysfs_init(void); 204extern int netdev_sysfs_init(void);
@@ -1363,71 +1346,13 @@ out:
1363 Receiver routines 1346 Receiver routines
1364 =======================================================================*/ 1347 =======================================================================*/
1365 1348
1366int netdev_max_backlog = 300; 1349int netdev_max_backlog = 1000;
1350int netdev_budget = 300;
1367int weight_p = 64; /* old backlog weight */ 1351int weight_p = 64; /* old backlog weight */
1368/* These numbers are selected based on intuition and some
1369 * experimentatiom, if you have more scientific way of doing this
1370 * please go ahead and fix things.
1371 */
1372int no_cong_thresh = 10;
1373int no_cong = 20;
1374int lo_cong = 100;
1375int mod_cong = 290;
1376 1352
1377DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; 1353DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1378 1354
1379 1355
1380static void get_sample_stats(int cpu)
1381{
1382#ifdef RAND_LIE
1383 unsigned long rd;
1384 int rq;
1385#endif
1386 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
1387 int blog = sd->input_pkt_queue.qlen;
1388 int avg_blog = sd->avg_blog;
1389
1390 avg_blog = (avg_blog >> 1) + (blog >> 1);
1391
1392 if (avg_blog > mod_cong) {
1393 /* Above moderate congestion levels. */
1394 sd->cng_level = NET_RX_CN_HIGH;
1395#ifdef RAND_LIE
1396 rd = net_random();
1397 rq = rd % netdev_max_backlog;
1398 if (rq < avg_blog) /* unlucky bastard */
1399 sd->cng_level = NET_RX_DROP;
1400#endif
1401 } else if (avg_blog > lo_cong) {
1402 sd->cng_level = NET_RX_CN_MOD;
1403#ifdef RAND_LIE
1404 rd = net_random();
1405 rq = rd % netdev_max_backlog;
1406 if (rq < avg_blog) /* unlucky bastard */
1407 sd->cng_level = NET_RX_CN_HIGH;
1408#endif
1409 } else if (avg_blog > no_cong)
1410 sd->cng_level = NET_RX_CN_LOW;
1411 else /* no congestion */
1412 sd->cng_level = NET_RX_SUCCESS;
1413
1414 sd->avg_blog = avg_blog;
1415}
1416
1417#ifdef OFFLINE_SAMPLE
1418static void sample_queue(unsigned long dummy)
1419{
1420/* 10 ms 0r 1ms -- i don't care -- JHS */
1421 int next_tick = 1;
1422 int cpu = smp_processor_id();
1423
1424 get_sample_stats(cpu);
1425 next_tick += jiffies;
1426 mod_timer(&samp_timer, next_tick);
1427}
1428#endif
1429
1430
1431/** 1356/**
1432 * netif_rx - post buffer to the network code 1357 * netif_rx - post buffer to the network code
1433 * @skb: buffer to post 1358 * @skb: buffer to post
@@ -1448,7 +1373,6 @@ static void sample_queue(unsigned long dummy)
1448 1373
1449int netif_rx(struct sk_buff *skb) 1374int netif_rx(struct sk_buff *skb)
1450{ 1375{
1451 int this_cpu;
1452 struct softnet_data *queue; 1376 struct softnet_data *queue;
1453 unsigned long flags; 1377 unsigned long flags;
1454 1378
@@ -1464,38 +1388,22 @@ int netif_rx(struct sk_buff *skb)
1464 * short when CPU is congested, but is still operating. 1388 * short when CPU is congested, but is still operating.
1465 */ 1389 */
1466 local_irq_save(flags); 1390 local_irq_save(flags);
1467 this_cpu = smp_processor_id();
1468 queue = &__get_cpu_var(softnet_data); 1391 queue = &__get_cpu_var(softnet_data);
1469 1392
1470 __get_cpu_var(netdev_rx_stat).total++; 1393 __get_cpu_var(netdev_rx_stat).total++;
1471 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { 1394 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1472 if (queue->input_pkt_queue.qlen) { 1395 if (queue->input_pkt_queue.qlen) {
1473 if (queue->throttle)
1474 goto drop;
1475
1476enqueue: 1396enqueue:
1477 dev_hold(skb->dev); 1397 dev_hold(skb->dev);
1478 __skb_queue_tail(&queue->input_pkt_queue, skb); 1398 __skb_queue_tail(&queue->input_pkt_queue, skb);
1479#ifndef OFFLINE_SAMPLE
1480 get_sample_stats(this_cpu);
1481#endif
1482 local_irq_restore(flags); 1399 local_irq_restore(flags);
1483 return queue->cng_level; 1400 return NET_RX_SUCCESS;
1484 } 1401 }
1485 1402
1486 if (queue->throttle)
1487 queue->throttle = 0;
1488
1489 netif_rx_schedule(&queue->backlog_dev); 1403 netif_rx_schedule(&queue->backlog_dev);
1490 goto enqueue; 1404 goto enqueue;
1491 } 1405 }
1492 1406
1493 if (!queue->throttle) {
1494 queue->throttle = 1;
1495 __get_cpu_var(netdev_rx_stat).throttled++;
1496 }
1497
1498drop:
1499 __get_cpu_var(netdev_rx_stat).dropped++; 1407 __get_cpu_var(netdev_rx_stat).dropped++;
1500 local_irq_restore(flags); 1408 local_irq_restore(flags);
1501 1409
@@ -1780,8 +1688,6 @@ job_done:
1780 smp_mb__before_clear_bit(); 1688 smp_mb__before_clear_bit();
1781 netif_poll_enable(backlog_dev); 1689 netif_poll_enable(backlog_dev);
1782 1690
1783 if (queue->throttle)
1784 queue->throttle = 0;
1785 local_irq_enable(); 1691 local_irq_enable();
1786 return 0; 1692 return 0;
1787} 1693}
@@ -1790,8 +1696,7 @@ static void net_rx_action(struct softirq_action *h)
1790{ 1696{
1791 struct softnet_data *queue = &__get_cpu_var(softnet_data); 1697 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1792 unsigned long start_time = jiffies; 1698 unsigned long start_time = jiffies;
1793 int budget = netdev_max_backlog; 1699 int budget = netdev_budget;
1794
1795 1700
1796 local_irq_disable(); 1701 local_irq_disable();
1797 1702
@@ -2055,15 +1960,9 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
2055 struct netif_rx_stats *s = v; 1960 struct netif_rx_stats *s = v;
2056 1961
2057 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 1962 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2058 s->total, s->dropped, s->time_squeeze, s->throttled, 1963 s->total, s->dropped, s->time_squeeze, 0,
2059 s->fastroute_hit, s->fastroute_success, s->fastroute_defer, 1964 0, 0, 0, 0, /* was fastroute */
2060 s->fastroute_deferred_out, 1965 s->cpu_collision );
2061#if 0
2062 s->fastroute_latency_reduction
2063#else
2064 s->cpu_collision
2065#endif
2066 );
2067 return 0; 1966 return 0;
2068} 1967}
2069 1968
@@ -3305,9 +3204,6 @@ static int __init net_dev_init(void)
3305 3204
3306 queue = &per_cpu(softnet_data, i); 3205 queue = &per_cpu(softnet_data, i);
3307 skb_queue_head_init(&queue->input_pkt_queue); 3206 skb_queue_head_init(&queue->input_pkt_queue);
3308 queue->throttle = 0;
3309 queue->cng_level = 0;
3310 queue->avg_blog = 10; /* arbitrary non-zero */
3311 queue->completion_queue = NULL; 3207 queue->completion_queue = NULL;
3312 INIT_LIST_HEAD(&queue->poll_list); 3208 INIT_LIST_HEAD(&queue->poll_list);
3313 set_bit(__LINK_STATE_START, &queue->backlog_dev.state); 3209 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
@@ -3316,11 +3212,6 @@ static int __init net_dev_init(void)
3316 atomic_set(&queue->backlog_dev.refcnt, 1); 3212 atomic_set(&queue->backlog_dev.refcnt, 1);
3317 } 3213 }
3318 3214
3319#ifdef OFFLINE_SAMPLE
3320 samp_timer.expires = jiffies + (10 * HZ);
3321 add_timer(&samp_timer);
3322#endif
3323
3324 dev_boot_phase = 0; 3215 dev_boot_phase = 0;
3325 3216
3326 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); 3217 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6d68c03bc051..bb73b2190ec7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1500,6 +1500,159 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
1500 skb_split_no_header(skb, skb1, len, pos); 1500 skb_split_no_header(skb, skb1, len, pos);
1501} 1501}
1502 1502
1503/**
1504 * skb_prepare_seq_read - Prepare a sequential read of skb data
1505 * @skb: the buffer to read
1506 * @from: lower offset of data to be read
1507 * @to: upper offset of data to be read
1508 * @st: state variable
1509 *
1510 * Initializes the specified state variable. Must be called before
1511 * invoking skb_seq_read() for the first time.
1512 */
1513void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1514 unsigned int to, struct skb_seq_state *st)
1515{
1516 st->lower_offset = from;
1517 st->upper_offset = to;
1518 st->root_skb = st->cur_skb = skb;
1519 st->frag_idx = st->stepped_offset = 0;
1520 st->frag_data = NULL;
1521}
1522
1523/**
1524 * skb_seq_read - Sequentially read skb data
1525 * @consumed: number of bytes consumed by the caller so far
1526 * @data: destination pointer for data to be returned
1527 * @st: state variable
1528 *
1529 * Reads a block of skb data at &consumed relative to the
1530 * lower offset specified to skb_prepare_seq_read(). Assigns
1531 * the head of the data block to &data and returns the length
1532 * of the block or 0 if the end of the skb data or the upper
1533 * offset has been reached.
1534 *
1535 * The caller is not required to consume all of the data
1536 * returned, i.e. &consumed is typically set to the number
1537 * of bytes already consumed and the next call to
1538 * skb_seq_read() will return the remaining part of the block.
1539 *
1540 * Note: The size of each block of data returned can be arbitary,
1541 * this limitation is the cost for zerocopy seqeuental
1542 * reads of potentially non linear data.
1543 *
1544 * Note: Fragment lists within fragments are not implemented
1545 * at the moment, state->root_skb could be replaced with
1546 * a stack for this purpose.
1547 */
1548unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1549 struct skb_seq_state *st)
1550{
1551 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
1552 skb_frag_t *frag;
1553
1554 if (unlikely(abs_offset >= st->upper_offset))
1555 return 0;
1556
1557next_skb:
1558 block_limit = skb_headlen(st->cur_skb);
1559
1560 if (abs_offset < block_limit) {
1561 *data = st->cur_skb->data + abs_offset;
1562 return block_limit - abs_offset;
1563 }
1564
1565 if (st->frag_idx == 0 && !st->frag_data)
1566 st->stepped_offset += skb_headlen(st->cur_skb);
1567
1568 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
1569 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
1570 block_limit = frag->size + st->stepped_offset;
1571
1572 if (abs_offset < block_limit) {
1573 if (!st->frag_data)
1574 st->frag_data = kmap_skb_frag(frag);
1575
1576 *data = (u8 *) st->frag_data + frag->page_offset +
1577 (abs_offset - st->stepped_offset);
1578
1579 return block_limit - abs_offset;
1580 }
1581
1582 if (st->frag_data) {
1583 kunmap_skb_frag(st->frag_data);
1584 st->frag_data = NULL;
1585 }
1586
1587 st->frag_idx++;
1588 st->stepped_offset += frag->size;
1589 }
1590
1591 if (st->cur_skb->next) {
1592 st->cur_skb = st->cur_skb->next;
1593 st->frag_idx = 0;
1594 goto next_skb;
1595 } else if (st->root_skb == st->cur_skb &&
1596 skb_shinfo(st->root_skb)->frag_list) {
1597 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
1598 goto next_skb;
1599 }
1600
1601 return 0;
1602}
1603
1604/**
1605 * skb_abort_seq_read - Abort a sequential read of skb data
1606 * @st: state variable
1607 *
1608 * Must be called if skb_seq_read() was not called until it
1609 * returned 0.
1610 */
1611void skb_abort_seq_read(struct skb_seq_state *st)
1612{
1613 if (st->frag_data)
1614 kunmap_skb_frag(st->frag_data);
1615}
1616
1617#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
1618
1619static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
1620 struct ts_config *conf,
1621 struct ts_state *state)
1622{
1623 return skb_seq_read(offset, text, TS_SKB_CB(state));
1624}
1625
1626static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
1627{
1628 skb_abort_seq_read(TS_SKB_CB(state));
1629}
1630
1631/**
1632 * skb_find_text - Find a text pattern in skb data
1633 * @skb: the buffer to look in
1634 * @from: search offset
1635 * @to: search limit
1636 * @config: textsearch configuration
1637 * @state: uninitialized textsearch state variable
1638 *
1639 * Finds a pattern in the skb data according to the specified
1640 * textsearch configuration. Use textsearch_next() to retrieve
1641 * subsequent occurrences of the pattern. Returns the offset
1642 * to the first occurrence or UINT_MAX if no match was found.
1643 */
1644unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1645 unsigned int to, struct ts_config *config,
1646 struct ts_state *state)
1647{
1648 config->get_next_block = skb_ts_get_next_block;
1649 config->finish = skb_ts_finish;
1650
1651 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
1652
1653 return textsearch_find(config, state);
1654}
1655
1503void __init skb_init(void) 1656void __init skb_init(void)
1504{ 1657{
1505 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 1658 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
@@ -1538,3 +1691,7 @@ EXPORT_SYMBOL(skb_queue_tail);
1538EXPORT_SYMBOL(skb_unlink); 1691EXPORT_SYMBOL(skb_unlink);
1539EXPORT_SYMBOL(skb_append); 1692EXPORT_SYMBOL(skb_append);
1540EXPORT_SYMBOL(skb_split); 1693EXPORT_SYMBOL(skb_split);
1694EXPORT_SYMBOL(skb_prepare_seq_read);
1695EXPORT_SYMBOL(skb_seq_read);
1696EXPORT_SYMBOL(skb_abort_seq_read);
1697EXPORT_SYMBOL(skb_find_text);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 880a88815211..8f817ad9f546 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -13,12 +13,8 @@
13#ifdef CONFIG_SYSCTL 13#ifdef CONFIG_SYSCTL
14 14
15extern int netdev_max_backlog; 15extern int netdev_max_backlog;
16extern int netdev_budget;
16extern int weight_p; 17extern int weight_p;
17extern int no_cong_thresh;
18extern int no_cong;
19extern int lo_cong;
20extern int mod_cong;
21extern int netdev_fastroute;
22extern int net_msg_cost; 18extern int net_msg_cost;
23extern int net_msg_burst; 19extern int net_msg_burst;
24 20
@@ -86,38 +82,6 @@ ctl_table core_table[] = {
86 .proc_handler = &proc_dointvec 82 .proc_handler = &proc_dointvec
87 }, 83 },
88 { 84 {
89 .ctl_name = NET_CORE_NO_CONG_THRESH,
90 .procname = "no_cong_thresh",
91 .data = &no_cong_thresh,
92 .maxlen = sizeof(int),
93 .mode = 0644,
94 .proc_handler = &proc_dointvec
95 },
96 {
97 .ctl_name = NET_CORE_NO_CONG,
98 .procname = "no_cong",
99 .data = &no_cong,
100 .maxlen = sizeof(int),
101 .mode = 0644,
102 .proc_handler = &proc_dointvec
103 },
104 {
105 .ctl_name = NET_CORE_LO_CONG,
106 .procname = "lo_cong",
107 .data = &lo_cong,
108 .maxlen = sizeof(int),
109 .mode = 0644,
110 .proc_handler = &proc_dointvec
111 },
112 {
113 .ctl_name = NET_CORE_MOD_CONG,
114 .procname = "mod_cong",
115 .data = &mod_cong,
116 .maxlen = sizeof(int),
117 .mode = 0644,
118 .proc_handler = &proc_dointvec
119 },
120 {
121 .ctl_name = NET_CORE_MSG_COST, 85 .ctl_name = NET_CORE_MSG_COST,
122 .procname = "message_cost", 86 .procname = "message_cost",
123 .data = &net_msg_cost, 87 .data = &net_msg_cost,
@@ -161,6 +125,14 @@ ctl_table core_table[] = {
161 .mode = 0644, 125 .mode = 0644,
162 .proc_handler = &proc_dointvec 126 .proc_handler = &proc_dointvec
163 }, 127 },
128 {
129 .ctl_name = NET_CORE_BUDGET,
130 .procname = "netdev_budget",
131 .data = &netdev_budget,
132 .maxlen = sizeof(int),
133 .mode = 0644,
134 .proc_handler = &proc_dointvec
135 },
164 { .ctl_name = 0 } 136 { .ctl_name = 0 }
165}; 137};
166 138
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f3dbc8dc1263..882436da9a3a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1927,6 +1927,25 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1927 return tp->af_specific->setsockopt(sk, level, optname, 1927 return tp->af_specific->setsockopt(sk, level, optname,
1928 optval, optlen); 1928 optval, optlen);
1929 1929
1930 /* This is a string value all the others are int's */
1931 if (optname == TCP_CONGESTION) {
1932 char name[TCP_CA_NAME_MAX];
1933
1934 if (optlen < 1)
1935 return -EINVAL;
1936
1937 val = strncpy_from_user(name, optval,
1938 min(TCP_CA_NAME_MAX-1, optlen));
1939 if (val < 0)
1940 return -EFAULT;
1941 name[val] = 0;
1942
1943 lock_sock(sk);
1944 err = tcp_set_congestion_control(tp, name);
1945 release_sock(sk);
1946 return err;
1947 }
1948
1930 if (optlen < sizeof(int)) 1949 if (optlen < sizeof(int))
1931 return -EINVAL; 1950 return -EINVAL;
1932 1951
@@ -2211,6 +2230,16 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2211 case TCP_QUICKACK: 2230 case TCP_QUICKACK:
2212 val = !tp->ack.pingpong; 2231 val = !tp->ack.pingpong;
2213 break; 2232 break;
2233
2234 case TCP_CONGESTION:
2235 if (get_user(len, optlen))
2236 return -EFAULT;
2237 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2238 if (put_user(len, optlen))
2239 return -EFAULT;
2240 if (copy_to_user(optval, tp->ca_ops->name, len))
2241 return -EFAULT;
2242 return 0;
2214 default: 2243 default:
2215 return -ENOPROTOOPT; 2244 return -ENOPROTOOPT;
2216 }; 2245 };
@@ -2224,7 +2253,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2224 2253
2225 2254
2226extern void __skb_cb_too_small_for_tcp(int, int); 2255extern void __skb_cb_too_small_for_tcp(int, int);
2227extern void tcpdiag_init(void); 2256extern struct tcp_congestion_ops tcp_reno;
2228 2257
2229static __initdata unsigned long thash_entries; 2258static __initdata unsigned long thash_entries;
2230static int __init set_thash_entries(char *str) 2259static int __init set_thash_entries(char *str)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 665394a63ae4..4970d10a7785 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -21,7 +21,7 @@ static struct tcp_congestion_ops *tcp_ca_find(const char *name)
21{ 21{
22 struct tcp_congestion_ops *e; 22 struct tcp_congestion_ops *e;
23 23
24 list_for_each_entry(e, &tcp_cong_list, list) { 24 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
25 if (strcmp(e->name, name) == 0) 25 if (strcmp(e->name, name) == 0)
26 return e; 26 return e;
27 } 27 }
@@ -77,6 +77,9 @@ void tcp_init_congestion_control(struct tcp_sock *tp)
77{ 77{
78 struct tcp_congestion_ops *ca; 78 struct tcp_congestion_ops *ca;
79 79
80 if (tp->ca_ops != &tcp_init_congestion_ops)
81 return;
82
80 rcu_read_lock(); 83 rcu_read_lock();
81 list_for_each_entry_rcu(ca, &tcp_cong_list, list) { 84 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
82 if (try_module_get(ca->owner)) { 85 if (try_module_get(ca->owner)) {
@@ -139,6 +142,34 @@ void tcp_get_default_congestion_control(char *name)
139 rcu_read_unlock(); 142 rcu_read_unlock();
140} 143}
141 144
145/* Change congestion control for socket */
146int tcp_set_congestion_control(struct tcp_sock *tp, const char *name)
147{
148 struct tcp_congestion_ops *ca;
149 int err = 0;
150
151 rcu_read_lock();
152 ca = tcp_ca_find(name);
153 if (ca == tp->ca_ops)
154 goto out;
155
156 if (!ca)
157 err = -ENOENT;
158
159 else if (!try_module_get(ca->owner))
160 err = -EBUSY;
161
162 else {
163 tcp_cleanup_congestion_control(tp);
164 tp->ca_ops = ca;
165 if (tp->ca_ops->init)
166 tp->ca_ops->init(tp);
167 }
168 out:
169 rcu_read_unlock();
170 return err;
171}
172
142/* 173/*
143 * TCP Reno congestion control 174 * TCP Reno congestion control
144 * This is special case used for fallback as well. 175 * This is special case used for fallback as well.
@@ -192,4 +223,15 @@ struct tcp_congestion_ops tcp_reno = {
192 .min_cwnd = tcp_reno_min_cwnd, 223 .min_cwnd = tcp_reno_min_cwnd,
193}; 224};
194 225
195EXPORT_SYMBOL_GPL(tcp_reno); 226/* Initial congestion control used (until SYN)
227 * really reno under another name so we can tell difference
228 * during tcp_set_default_congestion_control
229 */
230struct tcp_congestion_ops tcp_init_congestion_ops = {
231 .name = "",
232 .owner = THIS_MODULE,
233 .ssthresh = tcp_reno_ssthresh,
234 .cong_avoid = tcp_reno_cong_avoid,
235 .min_cwnd = tcp_reno_min_cwnd,
236};
237EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9122814c13ad..ebf112347a97 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2048,7 +2048,7 @@ static int tcp_v4_init_sock(struct sock *sk)
2048 tp->mss_cache_std = tp->mss_cache = 536; 2048 tp->mss_cache_std = tp->mss_cache = 536;
2049 2049
2050 tp->reordering = sysctl_tcp_reordering; 2050 tp->reordering = sysctl_tcp_reordering;
2051 tp->ca_ops = &tcp_reno; 2051 tp->ca_ops = &tcp_init_congestion_ops;
2052 2052
2053 sk->sk_state = TCP_CLOSE; 2053 sk->sk_state = TCP_CLOSE;
2054 2054
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index fce56039b0e9..9dac7fdf4726 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2025,7 +2025,7 @@ static int tcp_v6_init_sock(struct sock *sk)
2025 sk->sk_state = TCP_CLOSE; 2025 sk->sk_state = TCP_CLOSE;
2026 2026
2027 tp->af_specific = &ipv6_specific; 2027 tp->af_specific = &ipv6_specific;
2028 tp->ca_ops = &tcp_reno; 2028 tp->ca_ops = &tcp_init_congestion_ops;
2029 sk->sk_write_space = sk_stream_write_space; 2029 sk->sk_write_space = sk_stream_write_space;
2030 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 2030 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2031 2031
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index b22c9beb604d..447b89e556b1 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -449,6 +449,18 @@ config NET_EMATCH_META
449 To compile this code as a module, choose M here: the 449 To compile this code as a module, choose M here: the
450 module will be called em_meta. 450 module will be called em_meta.
451 451
452config NET_EMATCH_TEXT
453 tristate "Textsearch"
454 depends on NET_EMATCH
455 select TEXTSEARCH
456 ---help---
457 Say Y here if you want to be ablt to classify packets based on
458 textsearch comparisons. Please select the appropriate textsearch
459 algorithms in the Library section.
460
461 To compile this code as a module, choose M here: the
462 module will be called em_text.
463
452config NET_CLS_ACT 464config NET_CLS_ACT
453 bool "Packet ACTION" 465 bool "Packet ACTION"
454 depends on EXPERIMENTAL && NET_CLS && NET_QOS 466 depends on EXPERIMENTAL && NET_CLS && NET_QOS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index eb3fe583eba8..8f58cecd6266 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o
40obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o 40obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
41obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o 41obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o
42obj-$(CONFIG_NET_EMATCH_META) += em_meta.o 42obj-$(CONFIG_NET_EMATCH_META) += em_meta.o
43obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
new file mode 100644
index 000000000000..873840d8d072
--- /dev/null
+++ b/net/sched/em_text.c
@@ -0,0 +1,157 @@
1/*
2 * net/sched/em_text.c Textsearch ematch
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/string.h>
18#include <linux/skbuff.h>
19#include <linux/textsearch.h>
20#include <linux/tc_ematch/tc_em_text.h>
21#include <net/pkt_cls.h>
22
23struct text_match
24{
25 u16 from_offset;
26 u16 to_offset;
27 u8 from_layer;
28 u8 to_layer;
29 struct ts_config *config;
30};
31
32#define EM_TEXT_PRIV(m) ((struct text_match *) (m)->data)
33
34static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
35 struct tcf_pkt_info *info)
36{
37 struct text_match *tm = EM_TEXT_PRIV(m);
38 int from, to;
39 struct ts_state state;
40
41 from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
42 from += tm->from_offset;
43
44 to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
45 to += tm->to_offset;
46
47 return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX;
48}
49
50static int em_text_change(struct tcf_proto *tp, void *data, int len,
51 struct tcf_ematch *m)
52{
53 struct text_match *tm;
54 struct tcf_em_text *conf = data;
55 struct ts_config *ts_conf;
56 int flags = 0;
57
58 printk("Configuring text: %s from %d:%d to %d:%d len %d\n", conf->algo, conf->from_offset,
59 conf->from_layer, conf->to_offset, conf->to_layer, conf->pattern_len);
60
61 if (len < sizeof(*conf) || len < (sizeof(*conf) + conf->pattern_len))
62 return -EINVAL;
63
64 if (conf->from_layer > conf->to_layer)
65 return -EINVAL;
66
67 if (conf->from_layer == conf->to_layer &&
68 conf->from_offset > conf->to_offset)
69 return -EINVAL;
70
71retry:
72 ts_conf = textsearch_prepare(conf->algo, (u8 *) conf + sizeof(*conf),
73 conf->pattern_len, GFP_KERNEL, flags);
74
75 if (flags & TS_AUTOLOAD)
76 rtnl_lock();
77
78 if (IS_ERR(ts_conf)) {
79 if (PTR_ERR(ts_conf) == -ENOENT && !(flags & TS_AUTOLOAD)) {
80 rtnl_unlock();
81 flags |= TS_AUTOLOAD;
82 goto retry;
83 } else
84 return PTR_ERR(ts_conf);
85 } else if (flags & TS_AUTOLOAD) {
86 textsearch_destroy(ts_conf);
87 return -EAGAIN;
88 }
89
90 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
91 if (tm == NULL) {
92 textsearch_destroy(ts_conf);
93 return -ENOBUFS;
94 }
95
96 tm->from_offset = conf->from_offset;
97 tm->to_offset = conf->to_offset;
98 tm->from_layer = conf->from_layer;
99 tm->to_layer = conf->to_layer;
100 tm->config = ts_conf;
101
102 m->datalen = sizeof(*tm);
103 m->data = (unsigned long) tm;
104
105 return 0;
106}
107
108static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
109{
110 textsearch_destroy(EM_TEXT_PRIV(m)->config);
111}
112
113static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
114{
115 struct text_match *tm = EM_TEXT_PRIV(m);
116 struct tcf_em_text conf;
117
118 strncpy(conf.algo, tm->config->ops->name, sizeof(conf.algo) - 1);
119 conf.from_offset = tm->from_offset;
120 conf.to_offset = tm->to_offset;
121 conf.from_layer = tm->from_layer;
122 conf.to_layer = tm->to_layer;
123 conf.pattern_len = textsearch_get_pattern_len(tm->config);
124 conf.pad = 0;
125
126 RTA_PUT_NOHDR(skb, sizeof(conf), &conf);
127 RTA_APPEND(skb, conf.pattern_len, textsearch_get_pattern(tm->config));
128 return 0;
129
130rtattr_failure:
131 return -1;
132}
133
134static struct tcf_ematch_ops em_text_ops = {
135 .kind = TCF_EM_TEXT,
136 .change = em_text_change,
137 .match = em_text_match,
138 .destroy = em_text_destroy,
139 .dump = em_text_dump,
140 .owner = THIS_MODULE,
141 .link = LIST_HEAD_INIT(em_text_ops.link)
142};
143
144static int __init init_em_text(void)
145{
146 return tcf_em_register(&em_text_ops);
147}
148
149static void __exit exit_em_text(void)
150{
151 tcf_em_unregister(&em_text_ops);
152}
153
154MODULE_LICENSE("GPL");
155
156module_init(init_em_text);
157module_exit(exit_em_text);