aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile6
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/idr.c2
-rw-r--r--lib/inflate.c16
-rw-r--r--lib/klist.c8
-rw-r--r--lib/kobject_uevent.c4
-rw-r--r--lib/semaphore-sleepers.c177
-rw-r--r--lib/ts_bm.c185
-rw-r--r--lib/vsprintf.c5
11 files changed, 392 insertions, 18 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index eeb429a52152..e43197efeb9c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -72,6 +72,9 @@ config TEXTSEARCH
72config TEXTSEARCH_KMP 72config TEXTSEARCH_KMP
73 tristate 73 tristate
74 74
75config TEXTSEARCH_BM
76 tristate
77
75config TEXTSEARCH_FSM 78config TEXTSEARCH_FSM
76 tristate 79 tristate
77 80
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0c421295e613..299f7f3b5b08 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -141,7 +141,7 @@ config DEBUG_IOREMAP
141 141
142config DEBUG_FS 142config DEBUG_FS
143 bool "Debug Filesystem" 143 bool "Debug Filesystem"
144 depends on DEBUG_KERNEL 144 depends on DEBUG_KERNEL && SYSFS
145 help 145 help
146 debugfs is a virtual file system that kernel developers use to put 146 debugfs is a virtual file system that kernel developers use to put
147 debugging files into. Enable this option to be able to read and 147 debugging files into. Enable this option to be able to read and
diff --git a/lib/Makefile b/lib/Makefile
index beed1585294c..3e2bd0df23bb 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,11 +5,11 @@
5lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o halfmd4.o 8 sha1.o
9 9
10lib-y += kobject.o kref.o kobject_uevent.o klist.o 10lib-y += kobject.o kref.o kobject_uevent.o klist.o
11 11
12obj-y += sort.o parser.o 12obj-y += sort.o parser.o halfmd4.o
13 13
14ifeq ($(CONFIG_DEBUG_KOBJECT),y) 14ifeq ($(CONFIG_DEBUG_KOBJECT),y)
15CFLAGS_kobject.o += -DDEBUG 15CFLAGS_kobject.o += -DDEBUG
@@ -18,6 +18,7 @@ endif
18 18
19lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 19lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
20lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 20lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
21lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
21lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 22lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
22obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 23obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
23obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 24obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
@@ -38,6 +39,7 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
38 39
39obj-$(CONFIG_TEXTSEARCH) += textsearch.o 40obj-$(CONFIG_TEXTSEARCH) += textsearch.o
40obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 41obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
42obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
41obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o 43obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
42 44
43hostprogs-y := gen_crc32table 45hostprogs-y := gen_crc32table
diff --git a/lib/crc32.c b/lib/crc32.c
index 58b222783f9c..065198f98b3f 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -473,7 +473,7 @@ static u32 test_step(u32 init, unsigned char *buf, size_t len)
473 init = bitreverse(init); 473 init = bitreverse(init);
474 crc2 = bitreverse(crc1); 474 crc2 = bitreverse(crc1);
475 if (crc1 != bitreverse(crc2)) 475 if (crc1 != bitreverse(crc2))
476 printf("\nBit reversal fail: 0x%08x -> %0x08x -> 0x%08x\n", 476 printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n",
477 crc1, crc2, bitreverse(crc2)); 477 crc1, crc2, bitreverse(crc2));
478 crc1 = crc32_le(init, buf, len); 478 crc1 = crc32_le(init, buf, len);
479 if (crc1 != crc2) 479 if (crc1 != crc2)
diff --git a/lib/idr.c b/lib/idr.c
index c5be889de449..6415d053e2bf 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -207,7 +207,7 @@ build_up:
207} 207}
208 208
209/** 209/**
210 * idr_get_new_above - allocate new idr entry above a start id 210 * idr_get_new_above - allocate new idr entry above or equal to a start id
211 * @idp: idr handle 211 * @idp: idr handle
212 * @ptr: pointer you want associated with the ide 212 * @ptr: pointer you want associated with the ide
213 * @start_id: id to start search at 213 * @start_id: id to start search at
diff --git a/lib/inflate.c b/lib/inflate.c
index 75e7d303c72e..6db6e98d1637 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -326,7 +326,7 @@ DEBG("huft1 ");
326 { 326 {
327 *t = (struct huft *)NULL; 327 *t = (struct huft *)NULL;
328 *m = 0; 328 *m = 0;
329 return 0; 329 return 2;
330 } 330 }
331 331
332DEBG("huft2 "); 332DEBG("huft2 ");
@@ -374,6 +374,7 @@ DEBG("huft5 ");
374 if ((j = *p++) != 0) 374 if ((j = *p++) != 0)
375 v[x[j]++] = i; 375 v[x[j]++] = i;
376 } while (++i < n); 376 } while (++i < n);
377 n = x[g]; /* set n to length of v */
377 378
378DEBG("h6 "); 379DEBG("h6 ");
379 380
@@ -410,12 +411,13 @@ DEBG1("1 ");
410DEBG1("2 "); 411DEBG1("2 ");
411 f -= a + 1; /* deduct codes from patterns left */ 412 f -= a + 1; /* deduct codes from patterns left */
412 xp = c + k; 413 xp = c + k;
413 while (++j < z) /* try smaller tables up to z bits */ 414 if (j < z)
414 { 415 while (++j < z) /* try smaller tables up to z bits */
415 if ((f <<= 1) <= *++xp) 416 {
416 break; /* enough codes to use up j bits */ 417 if ((f <<= 1) <= *++xp)
417 f -= *xp; /* else deduct codes from patterns */ 418 break; /* enough codes to use up j bits */
418 } 419 f -= *xp; /* else deduct codes from patterns */
420 }
419 } 421 }
420DEBG1("3 "); 422DEBG1("3 ");
421 z = 1 << j; /* table entries for j-bit table */ 423 z = 1 << j; /* table entries for j-bit table */
diff --git a/lib/klist.c b/lib/klist.c
index 738ab810160a..a70c836c5c4c 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -79,11 +79,11 @@ static void klist_node_init(struct klist * k, struct klist_node * n)
79 79
80/** 80/**
81 * klist_add_head - Initialize a klist_node and add it to front. 81 * klist_add_head - Initialize a klist_node and add it to front.
82 * @k: klist it's going on.
83 * @n: node we're adding. 82 * @n: node we're adding.
83 * @k: klist it's going on.
84 */ 84 */
85 85
86void klist_add_head(struct klist * k, struct klist_node * n) 86void klist_add_head(struct klist_node * n, struct klist * k)
87{ 87{
88 klist_node_init(k, n); 88 klist_node_init(k, n);
89 add_head(k, n); 89 add_head(k, n);
@@ -94,11 +94,11 @@ EXPORT_SYMBOL_GPL(klist_add_head);
94 94
95/** 95/**
96 * klist_add_tail - Initialize a klist_node and add it to back. 96 * klist_add_tail - Initialize a klist_node and add it to back.
97 * @k: klist it's going on.
98 * @n: node we're adding. 97 * @n: node we're adding.
98 * @k: klist it's going on.
99 */ 99 */
100 100
101void klist_add_tail(struct klist * k, struct klist_node * n) 101void klist_add_tail(struct klist_node * n, struct klist * k)
102{ 102{
103 klist_node_init(k, n); 103 klist_node_init(k, n);
104 add_tail(k, n); 104 add_tail(k, n);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 8e49d21057e4..04ca4429ddfa 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -93,6 +93,7 @@ static int send_uevent(const char *signal, const char *obj,
93 } 93 }
94 } 94 }
95 95
96 NETLINK_CB(skb).dst_group = 1;
96 return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask); 97 return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask);
97} 98}
98 99
@@ -153,7 +154,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_atomic);
153 154
154static int __init kobject_uevent_init(void) 155static int __init kobject_uevent_init(void)
155{ 156{
156 uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, NULL); 157 uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL,
158 THIS_MODULE);
157 159
158 if (!uevent_sock) { 160 if (!uevent_sock) {
159 printk(KERN_ERR 161 printk(KERN_ERR
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
new file mode 100644
index 000000000000..4d5f18889fa5
--- /dev/null
+++ b/lib/semaphore-sleepers.c
@@ -0,0 +1,177 @@
1/*
2 * i386 and x86-64 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15#include <linux/config.h>
16#include <linux/sched.h>
17#include <linux/err.h>
18#include <linux/init.h>
19#include <asm/semaphore.h>
20
21/*
22 * Semaphores are implemented using a two-way counter:
23 * The "count" variable is decremented for each process
24 * that tries to acquire the semaphore, while the "sleeping"
25 * variable is a count of such acquires.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * "sleeping" and the contention routine ordering is protected
33 * by the spinlock in the semaphore's waitqueue head.
34 *
35 * Note that these functions are only called when there is
36 * contention on the lock, and as such all this is the
37 * "non-critical" part of the whole semaphore business. The
38 * critical part is the inline stuff in <asm/semaphore.h>
39 * where we want to avoid any extra jumps and calls.
40 */
41
42/*
43 * Logic:
44 * - only on a boundary condition do we need to care. When we go
45 * from a negative count to a non-negative, we wake people up.
46 * - when we go from a non-negative count to a negative do we
47 * (a) synchronize with the "sleeper" count and (b) make sure
48 * that we're on the wakeup list before we synchronize so that
49 * we cannot lose wakeup events.
50 */
51
52fastcall void __up(struct semaphore *sem)
53{
54 wake_up(&sem->wait);
55}
56
57fastcall void __sched __down(struct semaphore * sem)
58{
59 struct task_struct *tsk = current;
60 DECLARE_WAITQUEUE(wait, tsk);
61 unsigned long flags;
62
63 tsk->state = TASK_UNINTERRUPTIBLE;
64 spin_lock_irqsave(&sem->wait.lock, flags);
65 add_wait_queue_exclusive_locked(&sem->wait, &wait);
66
67 sem->sleepers++;
68 for (;;) {
69 int sleepers = sem->sleepers;
70
71 /*
72 * Add "everybody else" into it. They aren't
73 * playing, because we own the spinlock in
74 * the wait_queue_head.
75 */
76 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
77 sem->sleepers = 0;
78 break;
79 }
80 sem->sleepers = 1; /* us - see -1 above */
81 spin_unlock_irqrestore(&sem->wait.lock, flags);
82
83 schedule();
84
85 spin_lock_irqsave(&sem->wait.lock, flags);
86 tsk->state = TASK_UNINTERRUPTIBLE;
87 }
88 remove_wait_queue_locked(&sem->wait, &wait);
89 wake_up_locked(&sem->wait);
90 spin_unlock_irqrestore(&sem->wait.lock, flags);
91 tsk->state = TASK_RUNNING;
92}
93
94fastcall int __sched __down_interruptible(struct semaphore * sem)
95{
96 int retval = 0;
97 struct task_struct *tsk = current;
98 DECLARE_WAITQUEUE(wait, tsk);
99 unsigned long flags;
100
101 tsk->state = TASK_INTERRUPTIBLE;
102 spin_lock_irqsave(&sem->wait.lock, flags);
103 add_wait_queue_exclusive_locked(&sem->wait, &wait);
104
105 sem->sleepers++;
106 for (;;) {
107 int sleepers = sem->sleepers;
108
109 /*
110 * With signals pending, this turns into
111 * the trylock failure case - we won't be
112 * sleeping, and we* can't get the lock as
113 * it has contention. Just correct the count
114 * and exit.
115 */
116 if (signal_pending(current)) {
117 retval = -EINTR;
118 sem->sleepers = 0;
119 atomic_add(sleepers, &sem->count);
120 break;
121 }
122
123 /*
124 * Add "everybody else" into it. They aren't
125 * playing, because we own the spinlock in
126 * wait_queue_head. The "-1" is because we're
127 * still hoping to get the semaphore.
128 */
129 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
130 sem->sleepers = 0;
131 break;
132 }
133 sem->sleepers = 1; /* us - see -1 above */
134 spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136 schedule();
137
138 spin_lock_irqsave(&sem->wait.lock, flags);
139 tsk->state = TASK_INTERRUPTIBLE;
140 }
141 remove_wait_queue_locked(&sem->wait, &wait);
142 wake_up_locked(&sem->wait);
143 spin_unlock_irqrestore(&sem->wait.lock, flags);
144
145 tsk->state = TASK_RUNNING;
146 return retval;
147}
148
149/*
150 * Trylock failed - make sure we correct for
151 * having decremented the count.
152 *
153 * We could have done the trylock with a
154 * single "cmpxchg" without failure cases,
155 * but then it wouldn't work on a 386.
156 */
157fastcall int __down_trylock(struct semaphore * sem)
158{
159 int sleepers;
160 unsigned long flags;
161
162 spin_lock_irqsave(&sem->wait.lock, flags);
163 sleepers = sem->sleepers + 1;
164 sem->sleepers = 0;
165
166 /*
167 * Add "everybody else" and us into it. They aren't
168 * playing, because we own the spinlock in the
169 * wait_queue_head.
170 */
171 if (!atomic_add_negative(sleepers, &sem->count)) {
172 wake_up_locked(&sem->wait);
173 }
174
175 spin_unlock_irqrestore(&sem->wait.lock, flags);
176 return 1;
177}
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
new file mode 100644
index 000000000000..2cc79112ecc3
--- /dev/null
+++ b/lib/ts_bm.c
@@ -0,0 +1,185 @@
1/*
2 * lib/ts_bm.c Boyer-Moore text search implementation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pablo Neira Ayuso <pablo@eurodev.net>
10 *
11 * ==========================================================================
12 *
13 * Implements Boyer-Moore string matching algorithm:
14 *
15 * [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
16 * Communications of the Association for Computing Machinery,
17 * 20(10), 1977, pp. 762-772.
18 * http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
19 *
20 * [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
21 * http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
22 *
23 * Note: Since Boyer-Moore (BM) performs searches for matchings from right
24 * to left, it's still possible that a matching could be spread over
25 * multiple blocks, in that case this algorithm won't find any coincidence.
26 *
27 * If you're willing to ensure that such thing won't ever happen, use the
28 * Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose
29 * the proper string search algorithm depending on your setting.
30 *
31 * Say you're using the textsearch infrastructure for filtering, NIDS or
32 * any similar security focused purpose, then go KMP. Otherwise, if you
33 * really care about performance, say you're classifying packets to apply
34 * Quality of Service (QoS) policies, and you don't mind about possible
35 * matchings spread over multiple fragments, then go BM.
36 */
37
38#include <linux/config.h>
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/types.h>
42#include <linux/string.h>
43#include <linux/textsearch.h>
44
45/* Alphabet size, use ASCII */
46#define ASIZE 256
47
48#if 0
49#define DEBUGP printk
50#else
51#define DEBUGP(args, format...)
52#endif
53
54struct ts_bm
55{
56 u8 * pattern;
57 unsigned int patlen;
58 unsigned int bad_shift[ASIZE];
59 unsigned int good_shift[0];
60};
61
62static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
63{
64 struct ts_bm *bm = ts_config_priv(conf);
65 unsigned int i, text_len, consumed = state->offset;
66 const u8 *text;
67 int shift = bm->patlen, bs;
68
69 for (;;) {
70 text_len = conf->get_next_block(consumed, &text, conf, state);
71
72 if (unlikely(text_len == 0))
73 break;
74
75 while (shift < text_len) {
76 DEBUGP("Searching in position %d (%c)\n",
77 shift, text[shift]);
78 for (i = 0; i < bm->patlen; i++)
79 if (text[shift-i] != bm->pattern[bm->patlen-1-i])
80 goto next;
81
82 /* London calling... */
83 DEBUGP("found!\n");
84 return consumed += (shift-(bm->patlen-1));
85
86next: bs = bm->bad_shift[text[shift-i]];
87
88 /* Now jumping to... */
89 shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
90 }
91 consumed += text_len;
92 }
93
94 return UINT_MAX;
95}
96
97static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
98 unsigned int len)
99{
100 int i, j, ended, l[ASIZE];
101
102 for (i = 0; i < ASIZE; i++)
103 bm->bad_shift[i] = len;
104 for (i = 0; i < len - 1; i++)
105 bm->bad_shift[pattern[i]] = len - 1 - i;
106
107 /* Compute the good shift array, used to match reocurrences
108 * of a subpattern */
109 for (i = 1; i < bm->patlen; i++) {
110 for (j = 0; j < bm->patlen && bm->pattern[bm->patlen - 1 - j]
111 == bm->pattern[bm->patlen - 1 - i - j]; j++);
112 l[i] = j;
113 }
114
115 bm->good_shift[0] = 1;
116 for (i = 1; i < bm->patlen; i++)
117 bm->good_shift[i] = bm->patlen;
118 for (i = bm->patlen - 1; i > 0; i--)
119 bm->good_shift[l[i]] = i;
120 ended = 0;
121 for (i = 0; i < bm->patlen; i++) {
122 if (l[i] == bm->patlen - 1 - i)
123 ended = i;
124 if (ended)
125 bm->good_shift[i] = ended;
126 }
127}
128
129static struct ts_config *bm_init(const void *pattern, unsigned int len,
130 int gfp_mask)
131{
132 struct ts_config *conf;
133 struct ts_bm *bm;
134 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
135 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
136
137 conf = alloc_ts_config(priv_size, gfp_mask);
138 if (IS_ERR(conf))
139 return conf;
140
141 bm = ts_config_priv(conf);
142 bm->patlen = len;
143 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
144 compute_prefix_tbl(bm, pattern, len);
145 memcpy(bm->pattern, pattern, len);
146
147 return conf;
148}
149
150static void *bm_get_pattern(struct ts_config *conf)
151{
152 struct ts_bm *bm = ts_config_priv(conf);
153 return bm->pattern;
154}
155
156static unsigned int bm_get_pattern_len(struct ts_config *conf)
157{
158 struct ts_bm *bm = ts_config_priv(conf);
159 return bm->patlen;
160}
161
162static struct ts_ops bm_ops = {
163 .name = "bm",
164 .find = bm_find,
165 .init = bm_init,
166 .get_pattern = bm_get_pattern,
167 .get_pattern_len = bm_get_pattern_len,
168 .owner = THIS_MODULE,
169 .list = LIST_HEAD_INIT(bm_ops.list)
170};
171
172static int __init init_bm(void)
173{
174 return textsearch_register(&bm_ops);
175}
176
177static void __exit exit_bm(void)
178{
179 textsearch_unregister(&bm_ops);
180}
181
182MODULE_LICENSE("GPL");
183
184module_init(init_bm);
185module_exit(exit_bm);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index a9bda0a361f3..e4e9031dd9c3 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -269,6 +269,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
269 int qualifier; /* 'h', 'l', or 'L' for integer fields */ 269 int qualifier; /* 'h', 'l', or 'L' for integer fields */
270 /* 'z' support added 23/7/1999 S.H. */ 270 /* 'z' support added 23/7/1999 S.H. */
271 /* 'z' changed to 'Z' --davidm 1/25/99 */ 271 /* 'z' changed to 'Z' --davidm 1/25/99 */
272 /* 't' added for ptrdiff_t */
272 273
273 /* Reject out-of-range values early */ 274 /* Reject out-of-range values early */
274 if (unlikely((int) size < 0)) { 275 if (unlikely((int) size < 0)) {
@@ -339,7 +340,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
339 /* get the conversion qualifier */ 340 /* get the conversion qualifier */
340 qualifier = -1; 341 qualifier = -1;
341 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 342 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
342 *fmt =='Z' || *fmt == 'z') { 343 *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
343 qualifier = *fmt; 344 qualifier = *fmt;
344 ++fmt; 345 ++fmt;
345 if (qualifier == 'l' && *fmt == 'l') { 346 if (qualifier == 'l' && *fmt == 'l') {
@@ -467,6 +468,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
467 num = (signed long) num; 468 num = (signed long) num;
468 } else if (qualifier == 'Z' || qualifier == 'z') { 469 } else if (qualifier == 'Z' || qualifier == 'z') {
469 num = va_arg(args, size_t); 470 num = va_arg(args, size_t);
471 } else if (qualifier == 't') {
472 num = va_arg(args, ptrdiff_t);
470 } else if (qualifier == 'h') { 473 } else if (qualifier == 'h') {
471 num = (unsigned short) va_arg(args, int); 474 num = (unsigned short) va_arg(args, int);
472 if (flags & SIGN) 475 if (flags & SIGN)