aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-06-23 06:26:22 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-06-23 06:26:22 -0400
commit3357d4c75f1fb67e7304998c4ad4e9a9fed66fa4 (patch)
treeceba46966a5a1112a05d257d8ecb25ae5eee95e0 /lib
parent364f6c717deef4a3ac4982e670fa9846b43cd060 (diff)
parentee98689be1b054897ff17655008c3048fe88be94 (diff)
Automatic merge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile9
-rw-r--r--lib/genalloc.c188
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kernel_lock.c55
-rw-r--r--lib/klist.c265
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/smp_processor_id.c55
9 files changed, 525 insertions, 63 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index eeb4522524..2d4d4e3bc4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -40,6 +40,12 @@ config ZLIB_DEFLATE
40 tristate 40 tristate
41 41
42# 42#
43# Generic allocator support is selected if needed
44#
45config GENERIC_ALLOCATOR
46 boolean
47
48#
43# reed solomon support is select'ed if needed 49# reed solomon support is select'ed if needed
44# 50#
45config REED_SOLOMON 51config REED_SOLOMON
diff --git a/lib/Makefile b/lib/Makefile
index 7c70db79c0..dcb4231916 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -4,9 +4,10 @@
4 4
5lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 kobject.o kref.o idr.o div64.o int_sqrt.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 bitmap.o extable.o kobject_uevent.o prio_tree.o sha1.o \ 8 sha1.o halfmd4.o
9 halfmd4.o 9
10lib-y += kobject.o kref.o kobject_uevent.o klist.o
10 11
11obj-y += sort.o parser.o 12obj-y += sort.o parser.o
12 13
@@ -19,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
19lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 20lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
20lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 21lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
21obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 22obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
23obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
22 24
23ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 25ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
24 lib-y += dec_and_lock.o 26 lib-y += dec_and_lock.o
@@ -28,6 +30,7 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
28obj-$(CONFIG_CRC32) += crc32.o 30obj-$(CONFIG_CRC32) += crc32.o
29obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 31obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
30obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 32obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
33obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
31 34
32obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ 35obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
33obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ 36obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
diff --git a/lib/genalloc.c b/lib/genalloc.c
new file mode 100644
index 0000000000..d6d30d2e71
--- /dev/null
+++ b/lib/genalloc.c
@@ -0,0 +1,188 @@
1/*
2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
5 * etc.
6 *
7 * This code is based on the buddy allocator found in the sym53c8xx_2
8 * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
9 * and adapted for general purpose use.
10 *
11 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
12 *
13 * This source code is licensed under the GNU General Public License,
14 * Version 2. See the file COPYING for more details.
15 */
16
17#include <linux/module.h>
18#include <linux/stddef.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/spinlock.h>
25#include <linux/genalloc.h>
26
27#include <asm/page.h>
28
29
30struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
31 unsigned long (*fp)(struct gen_pool *),
32 unsigned long data)
33{
34 struct gen_pool *poolp;
35 unsigned long tmp;
36 int i;
37
38 /*
39 * This is really an arbitrary limit, +10 is enough for
40 * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
41 * this can be increased without problems.
42 */
43 if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
44 ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
45 return NULL;
46
47 if (!max_chunk_shift)
48 max_chunk_shift = PAGE_SHIFT;
49
50 poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
51 if (!poolp)
52 return NULL;
53 memset(poolp, 0, sizeof(struct gen_pool));
54 poolp->h = kmalloc(sizeof(struct gen_pool_link) *
55 (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
56 GFP_KERNEL);
57 if (!poolp->h) {
58 printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
59 kfree(poolp);
60 return NULL;
61 }
62 memset(poolp->h, 0, sizeof(struct gen_pool_link) *
63 (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
64
65 spin_lock_init(&poolp->lock);
66 poolp->get_new_chunk = fp;
67 poolp->max_chunk_shift = max_chunk_shift;
68 poolp->private = data;
69
70 for (i = 0; i < nr_chunks; i++) {
71 tmp = poolp->get_new_chunk(poolp);
72 printk(KERN_INFO "allocated %lx\n", tmp);
73 if (!tmp)
74 break;
75 gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
76 }
77
78 return poolp;
79}
80EXPORT_SYMBOL(gen_pool_create);
81
82
83/*
84 * Simple power of two buddy-like generic allocator.
85 * Provides naturally aligned memory chunks.
86 */
87unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
88{
89 int j, i, s, max_chunk_size;
90 unsigned long a, flags;
91 struct gen_pool_link *h = poolp->h;
92
93 max_chunk_size = 1 << poolp->max_chunk_shift;
94
95 if (size > max_chunk_size)
96 return 0;
97
98 i = 0;
99
100 size = max(size, 1 << ALLOC_MIN_SHIFT);
101 s = roundup_pow_of_two(size);
102
103 j = i;
104
105 spin_lock_irqsave(&poolp->lock, flags);
106 while (!h[j].next) {
107 if (s == max_chunk_size) {
108 struct gen_pool_link *ptr;
109 spin_unlock_irqrestore(&poolp->lock, flags);
110 ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
111 spin_lock_irqsave(&poolp->lock, flags);
112 h[j].next = ptr;
113 if (h[j].next)
114 h[j].next->next = NULL;
115 break;
116 }
117 j++;
118 s <<= 1;
119 }
120 a = (unsigned long) h[j].next;
121 if (a) {
122 h[j].next = h[j].next->next;
123 /*
124 * This should be split into a seperate function doing
125 * the chunk split in order to support custom
126 * handling memory not physically accessible by host
127 */
128 while (j > i) {
129 j -= 1;
130 s >>= 1;
131 h[j].next = (struct gen_pool_link *) (a + s);
132 h[j].next->next = NULL;
133 }
134 }
135 spin_unlock_irqrestore(&poolp->lock, flags);
136 return a;
137}
138EXPORT_SYMBOL(gen_pool_alloc);
139
140
141/*
142 * Counter-part of the generic allocator.
143 */
144void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
145{
146 struct gen_pool_link *q;
147 struct gen_pool_link *h = poolp->h;
148 unsigned long a, b, flags;
149 int i, s, max_chunk_size;
150
151 max_chunk_size = 1 << poolp->max_chunk_shift;
152
153 if (size > max_chunk_size)
154 return;
155
156 i = 0;
157
158 size = max(size, 1 << ALLOC_MIN_SHIFT);
159 s = roundup_pow_of_two(size);
160
161 a = ptr;
162
163 spin_lock_irqsave(&poolp->lock, flags);
164 while (1) {
165 if (s == max_chunk_size) {
166 ((struct gen_pool_link *)a)->next = h[i].next;
167 h[i].next = (struct gen_pool_link *)a;
168 break;
169 }
170 b = a ^ s;
171 q = &h[i];
172
173 while (q->next && q->next != (struct gen_pool_link *)b)
174 q = q->next;
175
176 if (!q->next) {
177 ((struct gen_pool_link *)a)->next = h[i].next;
178 h[i].next = (struct gen_pool_link *)a;
179 break;
180 }
181 q->next = q->next->next;
182 a = a & b;
183 s <<= 1;
184 i++;
185 }
186 spin_unlock_irqrestore(&poolp->lock, flags);
187}
188EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/idr.c b/lib/idr.c
index 81fc430602..c5be889de4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -175,7 +175,7 @@ build_up:
175 * Add a new layer to the top of the tree if the requested 175 * Add a new layer to the top of the tree if the requested
176 * id is larger than the currently allocated space. 176 * id is larger than the currently allocated space.
177 */ 177 */
178 while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) { 178 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
179 layers++; 179 layers++;
180 if (!p->count) 180 if (!p->count)
181 continue; 181 continue;
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 99b0ae3d51..bd2bc5d887 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,61 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11 11
12#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
13 defined(CONFIG_DEBUG_PREEMPT)
14
15/*
16 * Debugging check.
17 */
18unsigned int smp_processor_id(void)
19{
20 unsigned long preempt_count = preempt_count();
21 int this_cpu = __smp_processor_id();
22 cpumask_t this_mask;
23
24 if (likely(preempt_count))
25 goto out;
26
27 if (irqs_disabled())
28 goto out;
29
30 /*
31 * Kernel threads bound to a single CPU can safely use
32 * smp_processor_id():
33 */
34 this_mask = cpumask_of_cpu(this_cpu);
35
36 if (cpus_equal(current->cpus_allowed, this_mask))
37 goto out;
38
39 /*
40 * It is valid to assume CPU-locality during early bootup:
41 */
42 if (system_state != SYSTEM_RUNNING)
43 goto out;
44
45 /*
46 * Avoid recursion:
47 */
48 preempt_disable();
49
50 if (!printk_ratelimit())
51 goto out_enable;
52
53 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
54 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
55 dump_stack();
56
57out_enable:
58 preempt_enable_no_resched();
59out:
60 return this_cpu;
61}
62
63EXPORT_SYMBOL(smp_processor_id);
64
65#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
66
67#ifdef CONFIG_PREEMPT_BKL 12#ifdef CONFIG_PREEMPT_BKL
68/* 13/*
69 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
diff --git a/lib/klist.c b/lib/klist.c
new file mode 100644
index 0000000000..738ab81016
--- /dev/null
+++ b/lib/klist.c
@@ -0,0 +1,265 @@
1/*
2 * klist.c - Routines for manipulating klists.
3 *
4 *
5 * This klist interface provides a couple of structures that wrap around
6 * struct list_head to provide explicit list "head" (struct klist) and
7 * list "node" (struct klist_node) objects. For struct klist, a spinlock
8 * is included that protects access to the actual list itself. struct
9 * klist_node provides a pointer to the klist that owns it and a kref
10 * reference count that indicates the number of current users of that node
11 * in the list.
12 *
13 * The entire point is to provide an interface for iterating over a list
14 * that is safe and allows for modification of the list during the
15 * iteration (e.g. insertion and removal), including modification of the
16 * current node on the list.
17 *
18 * It works using a 3rd object type - struct klist_iter - that is declared
19 * and initialized before an iteration. klist_next() is used to acquire the
20 * next element in the list. It returns NULL if there are no more items.
21 * Internally, that routine takes the klist's lock, decrements the reference
22 * count of the previous klist_node and increments the count of the next
23 * klist_node. It then drops the lock and returns.
24 *
25 * There are primitives for adding and removing nodes to/from a klist.
26 * When deleting, klist_del() will simply decrement the reference count.
27 * Only when the count goes to 0 is the node removed from the list.
28 * klist_remove() will try to delete the node from the list and block
29 * until it is actually removed. This is useful for objects (like devices)
30 * that have been removed from the system and must be freed (but must wait
31 * until all accessors have finished).
32 *
33 * Copyright (C) 2005 Patrick Mochel
34 *
35 * This file is released under the GPL v2.
36 */
37
38#include <linux/klist.h>
39#include <linux/module.h>
40
41
42/**
43 * klist_init - Initialize a klist structure.
44 * @k: The klist we're initializing.
45 */
46
47void klist_init(struct klist * k)
48{
49 INIT_LIST_HEAD(&k->k_list);
50 spin_lock_init(&k->k_lock);
51}
52
53EXPORT_SYMBOL_GPL(klist_init);
54
55
56static void add_head(struct klist * k, struct klist_node * n)
57{
58 spin_lock(&k->k_lock);
59 list_add(&n->n_node, &k->k_list);
60 spin_unlock(&k->k_lock);
61}
62
63static void add_tail(struct klist * k, struct klist_node * n)
64{
65 spin_lock(&k->k_lock);
66 list_add_tail(&n->n_node, &k->k_list);
67 spin_unlock(&k->k_lock);
68}
69
70
71static void klist_node_init(struct klist * k, struct klist_node * n)
72{
73 INIT_LIST_HEAD(&n->n_node);
74 init_completion(&n->n_removed);
75 kref_init(&n->n_ref);
76 n->n_klist = k;
77}
78
79
80/**
81 * klist_add_head - Initialize a klist_node and add it to front.
82 * @k: klist it's going on.
83 * @n: node we're adding.
84 */
85
86void klist_add_head(struct klist * k, struct klist_node * n)
87{
88 klist_node_init(k, n);
89 add_head(k, n);
90}
91
92EXPORT_SYMBOL_GPL(klist_add_head);
93
94
95/**
96 * klist_add_tail - Initialize a klist_node and add it to back.
97 * @k: klist it's going on.
98 * @n: node we're adding.
99 */
100
101void klist_add_tail(struct klist * k, struct klist_node * n)
102{
103 klist_node_init(k, n);
104 add_tail(k, n);
105}
106
107EXPORT_SYMBOL_GPL(klist_add_tail);
108
109
110static void klist_release(struct kref * kref)
111{
112 struct klist_node * n = container_of(kref, struct klist_node, n_ref);
113 list_del(&n->n_node);
114 complete(&n->n_removed);
115 n->n_klist = NULL;
116}
117
118static int klist_dec_and_del(struct klist_node * n)
119{
120 return kref_put(&n->n_ref, klist_release);
121}
122
123
124/**
125 * klist_del - Decrement the reference count of node and try to remove.
126 * @n: node we're deleting.
127 */
128
129void klist_del(struct klist_node * n)
130{
131 struct klist * k = n->n_klist;
132
133 spin_lock(&k->k_lock);
134 klist_dec_and_del(n);
135 spin_unlock(&k->k_lock);
136}
137
138EXPORT_SYMBOL_GPL(klist_del);
139
140
141/**
142 * klist_remove - Decrement the refcount of node and wait for it to go away.
143 * @n: node we're removing.
144 */
145
146void klist_remove(struct klist_node * n)
147{
148 struct klist * k = n->n_klist;
149 spin_lock(&k->k_lock);
150 klist_dec_and_del(n);
151 spin_unlock(&k->k_lock);
152 wait_for_completion(&n->n_removed);
153}
154
155EXPORT_SYMBOL_GPL(klist_remove);
156
157
158/**
159 * klist_node_attached - Say whether a node is bound to a list or not.
160 * @n: Node that we're testing.
161 */
162
163int klist_node_attached(struct klist_node * n)
164{
165 return (n->n_klist != NULL);
166}
167
168EXPORT_SYMBOL_GPL(klist_node_attached);
169
170
171/**
172 * klist_iter_init_node - Initialize a klist_iter structure.
173 * @k: klist we're iterating.
174 * @i: klist_iter we're filling.
175 * @n: node to start with.
176 *
177 * Similar to klist_iter_init(), but starts the action off with @n,
178 * instead of with the list head.
179 */
180
181void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
182{
183 i->i_klist = k;
184 i->i_head = &k->k_list;
185 i->i_cur = n;
186}
187
188EXPORT_SYMBOL_GPL(klist_iter_init_node);
189
190
191/**
192 * klist_iter_init - Iniitalize a klist_iter structure.
193 * @k: klist we're iterating.
194 * @i: klist_iter structure we're filling.
195 *
196 * Similar to klist_iter_init_node(), but start with the list head.
197 */
198
199void klist_iter_init(struct klist * k, struct klist_iter * i)
200{
201 klist_iter_init_node(k, i, NULL);
202}
203
204EXPORT_SYMBOL_GPL(klist_iter_init);
205
206
207/**
208 * klist_iter_exit - Finish a list iteration.
209 * @i: Iterator structure.
210 *
211 * Must be called when done iterating over list, as it decrements the
212 * refcount of the current node. Necessary in case iteration exited before
213 * the end of the list was reached, and always good form.
214 */
215
216void klist_iter_exit(struct klist_iter * i)
217{
218 if (i->i_cur) {
219 klist_del(i->i_cur);
220 i->i_cur = NULL;
221 }
222}
223
224EXPORT_SYMBOL_GPL(klist_iter_exit);
225
226
227static struct klist_node * to_klist_node(struct list_head * n)
228{
229 return container_of(n, struct klist_node, n_node);
230}
231
232
233/**
234 * klist_next - Ante up next node in list.
235 * @i: Iterator structure.
236 *
237 * First grab list lock. Decrement the reference count of the previous
238 * node, if there was one. Grab the next node, increment its reference
239 * count, drop the lock, and return that next node.
240 */
241
242struct klist_node * klist_next(struct klist_iter * i)
243{
244 struct list_head * next;
245 struct klist_node * knode = NULL;
246
247 spin_lock(&i->i_klist->k_lock);
248 if (i->i_cur) {
249 next = i->i_cur->n_node.next;
250 klist_dec_and_del(i->i_cur);
251 } else
252 next = i->i_head->next;
253
254 if (next != i->i_head) {
255 knode = to_klist_node(next);
256 kref_get(&knode->n_ref);
257 }
258 i->i_cur = knode;
259 spin_unlock(&i->i_klist->k_lock);
260 return knode;
261}
262
263EXPORT_SYMBOL_GPL(klist_next);
264
265
diff --git a/lib/kobject.c b/lib/kobject.c
index 9404882662..dd0917dd9f 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(kobject_set_name);
279 * @new_name: object's new name 279 * @new_name: object's new name
280 */ 280 */
281 281
282int kobject_rename(struct kobject * kobj, char *new_name) 282int kobject_rename(struct kobject * kobj, const char *new_name)
283{ 283{
284 int error = 0; 284 int error = 0;
285 285
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2a4e7671ea..8e49d21057 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -197,7 +197,7 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
197 int i = 0; 197 int i = 0;
198 int retval; 198 int retval;
199 char *kobj_path = NULL; 199 char *kobj_path = NULL;
200 char *name = NULL; 200 const char *name = NULL;
201 char *action_string; 201 char *action_string;
202 u64 seq; 202 u64 seq;
203 struct kobject *top_kobj = kobj; 203 struct kobject *top_kobj = kobj;
@@ -246,10 +246,10 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
246 if (hotplug_ops->name) 246 if (hotplug_ops->name)
247 name = hotplug_ops->name(kset, kobj); 247 name = hotplug_ops->name(kset, kobj);
248 if (name == NULL) 248 if (name == NULL)
249 name = kset->kobj.name; 249 name = kobject_name(&kset->kobj);
250 250
251 argv [0] = hotplug_path; 251 argv [0] = hotplug_path;
252 argv [1] = name; 252 argv [1] = (char *)name; /* won't be changed but 'const' has to go */
253 argv [2] = NULL; 253 argv [2] = NULL;
254 254
255 /* minimal command environment */ 255 /* minimal command environment */
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
new file mode 100644
index 0000000000..42c08ef828
--- /dev/null
+++ b/lib/smp_processor_id.c
@@ -0,0 +1,55 @@
1/*
2 * lib/smp_processor_id.c
3 *
4 * DEBUG_PREEMPT variant of smp_processor_id().
5 */
6#include <linux/module.h>
7#include <linux/kallsyms.h>
8
9unsigned int debug_smp_processor_id(void)
10{
11 unsigned long preempt_count = preempt_count();
12 int this_cpu = raw_smp_processor_id();
13 cpumask_t this_mask;
14
15 if (likely(preempt_count))
16 goto out;
17
18 if (irqs_disabled())
19 goto out;
20
21 /*
22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id():
24 */
25 this_mask = cpumask_of_cpu(this_cpu);
26
27 if (cpus_equal(current->cpus_allowed, this_mask))
28 goto out;
29
30 /*
31 * It is valid to assume CPU-locality during early bootup:
32 */
33 if (system_state != SYSTEM_RUNNING)
34 goto out;
35
36 /*
37 * Avoid recursion:
38 */
39 preempt_disable();
40
41 if (!printk_ratelimit())
42 goto out_enable;
43
44 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
45 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
46 dump_stack();
47
48out_enable:
49 preempt_enable_no_resched();
50out:
51 return this_cpu;
52}
53
54EXPORT_SYMBOL(debug_smp_processor_id);
55