aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-04-24 11:26:50 -0400
committerSteve French <sfrench@us.ibm.com>2008-04-24 11:26:50 -0400
commit36d99df2fb474222ab47fbe8ae7385661033223b (patch)
tree962e068491b752a944f61c454fad3f8619a1ea3f /lib
parent076d8423a98659a92837b07aa494cb74bfefe77c (diff)
parent3dc5063786b273f1aee545844f6bd4e9651ebffe (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug37
-rw-r--r--lib/Kconfig.kgdb58
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap.c16
-rw-r--r--lib/kernel_lock.c1
-rw-r--r--lib/kobject.c19
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/lmb.c428
-rw-r--r--lib/pcounter.c58
-rw-r--r--lib/reed_solomon/reed_solomon.c1
-rw-r--r--lib/scatterlist.c102
-rw-r--r--lib/semaphore-sleepers.c176
13 files changed, 649 insertions, 260 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index ba3d104994d9..2d53dc092e8b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -141,4 +141,7 @@ config HAS_DMA
141config CHECK_SIGNATURE 141config CHECK_SIGNATURE
142 bool 142 bool
143 143
144config HAVE_LMB
145 boolean
146
144endmenu 147endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a090c0..623ef24c2381 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -211,7 +211,7 @@ config SLUB_DEBUG_ON
211config SLUB_STATS 211config SLUB_STATS
212 default n 212 default n
213 bool "Enable SLUB performance statistics" 213 bool "Enable SLUB performance statistics"
214 depends on SLUB 214 depends on SLUB && SLUB_DEBUG && SYSFS
215 help 215 help
216 SLUB statistics are useful to debug SLUBs allocation behavior in 216 SLUB statistics are useful to debug SLUBs allocation behavior in
217 order find ways to optimize the allocator. This should never be 217 order find ways to optimize the allocator. This should never be
@@ -265,16 +265,6 @@ config DEBUG_MUTEXES
265 This feature allows mutex semantics violations to be detected and 265 This feature allows mutex semantics violations to be detected and
266 reported. 266 reported.
267 267
268config DEBUG_SEMAPHORE
269 bool "Semaphore debugging"
270 depends on DEBUG_KERNEL
271 depends on ALPHA || FRV
272 default n
273 help
274 If you say Y here then semaphore processing will issue lots of
275 verbose debugging messages. If you suspect a semaphore problem or a
276 kernel hacker asks for this option then say Y. Otherwise say N.
277
278config DEBUG_LOCK_ALLOC 268config DEBUG_LOCK_ALLOC
279 bool "Lock debugging: detect incorrect freeing of live locks" 269 bool "Lock debugging: detect incorrect freeing of live locks"
280 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 270 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -437,6 +427,16 @@ config DEBUG_VM
437 427
438 If unsure, say N. 428 If unsure, say N.
439 429
430config DEBUG_WRITECOUNT
431 bool "Debug filesystem writers count"
432 depends on DEBUG_KERNEL
433 help
434 Enable this to catch wrong use of the writers count in struct
435 vfsmount. This will increase the size of each file struct by
436 32 bits.
437
438 If unsure, say N.
439
440config DEBUG_LIST 440config DEBUG_LIST
441 bool "Debug linked list manipulation" 441 bool "Debug linked list manipulation"
442 depends on DEBUG_KERNEL 442 depends on DEBUG_KERNEL
@@ -593,7 +593,7 @@ config LATENCYTOP
593 to find out which userspace is blocking on what kernel operations. 593 to find out which userspace is blocking on what kernel operations.
594 594
595config PROVIDE_OHCI1394_DMA_INIT 595config PROVIDE_OHCI1394_DMA_INIT
596 bool "Provide code for enabling DMA over FireWire early on boot" 596 bool "Remote debugging over FireWire early on boot"
597 depends on PCI && X86 597 depends on PCI && X86
598 help 598 help
599 If you want to debug problems which hang or crash the kernel early 599 If you want to debug problems which hang or crash the kernel early
@@ -621,4 +621,17 @@ config PROVIDE_OHCI1394_DMA_INIT
621 621
622 See Documentation/debugging-via-ohci1394.txt for more information. 622 See Documentation/debugging-via-ohci1394.txt for more information.
623 623
624config FIREWIRE_OHCI_REMOTE_DMA
625 bool "Remote debugging over FireWire with firewire-ohci"
626 depends on FIREWIRE_OHCI
627 help
628 This option lets you use the FireWire bus for remote debugging
629 with help of the firewire-ohci driver. It enables unfiltered
630 remote DMA in firewire-ohci.
631 See Documentation/debugging-via-ohci1394.txt for more information.
632
633 If unsure, say N.
634
624source "samples/Kconfig" 635source "samples/Kconfig"
636
637source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
new file mode 100644
index 000000000000..f2e01ac5ab09
--- /dev/null
+++ b/lib/Kconfig.kgdb
@@ -0,0 +1,58 @@
1
2menuconfig KGDB
3 bool "KGDB: kernel debugging with remote gdb"
4 select FRAME_POINTER
5 depends on HAVE_ARCH_KGDB
6 depends on DEBUG_KERNEL && EXPERIMENTAL
7 help
8 If you say Y here, it will be possible to remotely debug the
9 kernel using gdb. Documentation of kernel debugger is available
10 at http://kgdb.sourceforge.net as well as in DocBook form
11 in Documentation/DocBook/. If unsure, say N.
12
13config HAVE_ARCH_KGDB_SHADOW_INFO
14 bool
15
16config HAVE_ARCH_KGDB
17 bool
18
19config KGDB_SERIAL_CONSOLE
20 tristate "KGDB: use kgdb over the serial console"
21 depends on KGDB
22 select CONSOLE_POLL
23 select MAGIC_SYSRQ
24 default y
25 help
26 Share a serial console with kgdb. Sysrq-g must be used
27 to break in initially.
28
29config KGDB_TESTS
30 bool "KGDB: internal test suite"
31 depends on KGDB
32 default n
33 help
34 This is a kgdb I/O module specifically designed to test
35 kgdb's internal functions. This kgdb I/O module is
36 intended to for the development of new kgdb stubs
37 as well as regression testing the kgdb internals.
38 See the drivers/misc/kgdbts.c for the details about
39 the tests. The most basic of this I/O module is to boot
40 a kernel boot arguments "kgdbwait kgdbts=V1F100"
41
42config KGDB_TESTS_ON_BOOT
43 bool "KGDB: Run tests on boot"
44 depends on KGDB_TESTS
45 default n
46 help
47 Run the kgdb tests on boot up automatically without the need
48 to pass in a kernel parameter
49
50config KGDB_TESTS_BOOT_STRING
51 string "KGDB: which internal kgdb tests to run"
52 depends on KGDB_TESTS_ON_BOOT
53 default "V1F100"
54 help
55 This is the command string to send the kgdb test suite on
56 boot. See the drivers/misc/kgdbts.c for detailed
57 information about other strings you could use beyond the
58 default of V1F100.
diff --git a/lib/Makefile b/lib/Makefile
index 23de261a4c83..bf8000fc7d48 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
32lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
33lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 32lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
34obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 33obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
35obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 34obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
@@ -61,7 +60,6 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
61obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o 60obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
62obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o 61obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
63obj-$(CONFIG_SMP) += percpu_counter.o 62obj-$(CONFIG_SMP) += percpu_counter.o
64obj-$(CONFIG_SMP) += pcounter.o
65obj-$(CONFIG_AUDIT_GENERIC) += audit.o 63obj-$(CONFIG_AUDIT_GENERIC) += audit.o
66 64
67obj-$(CONFIG_SWIOTLB) += swiotlb.o 65obj-$(CONFIG_SWIOTLB) += swiotlb.o
@@ -70,6 +68,8 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
70 68
71lib-$(CONFIG_GENERIC_BUG) += bug.o 69lib-$(CONFIG_GENERIC_BUG) += bug.o
72 70
71obj-$(CONFIG_HAVE_LMB) += lmb.o
72
73hostprogs-y := gen_crc32table 73hostprogs-y := gen_crc32table
74clean-files := crc32table.h 74clean-files := crc32table.h
75 75
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2c9242e3fed0..a6939e18d7bb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,6 +316,22 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
316EXPORT_SYMBOL(bitmap_scnprintf); 316EXPORT_SYMBOL(bitmap_scnprintf);
317 317
318/** 318/**
319 * bitmap_scnprintf_len - return buffer length needed to convert
320 * bitmap to an ASCII hex string.
321 * @len: number of bits to be converted
322 */
323int bitmap_scnprintf_len(unsigned int len)
324{
325 /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */
326 int bitslen = ALIGN(len, CHUNKSZ);
327 int wordlen = CHUNKSZ / 4;
328 int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char);
329
330 return buflen;
331}
332EXPORT_SYMBOL(bitmap_scnprintf_len);
333
334/**
319 * __bitmap_parse - convert an ASCII hex string into a bitmap. 335 * __bitmap_parse - convert an ASCII hex string into a bitmap.
320 * @buf: pointer to buffer containing string. 336 * @buf: pointer to buffer containing string.
321 * @buflen: buffer size in bytes. If string is smaller than this 337 * @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf00844b..cd3e82530b03 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
8#include <linux/smp_lock.h> 8#include <linux/smp_lock.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <linux/semaphore.h>
11 12
12/* 13/*
13 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
diff --git a/lib/kobject.c b/lib/kobject.c
index 0d03252f87a8..2c6490370922 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -58,11 +58,6 @@ static int create_dir(struct kobject *kobj)
58 return error; 58 return error;
59} 59}
60 60
61static inline struct kobject *to_kobj(struct list_head *entry)
62{
63 return container_of(entry, struct kobject, entry);
64}
65
66static int get_kobj_path_length(struct kobject *kobj) 61static int get_kobj_path_length(struct kobject *kobj)
67{ 62{
68 int length = 1; 63 int length = 1;
@@ -592,8 +587,15 @@ static void kobject_release(struct kref *kref)
592 */ 587 */
593void kobject_put(struct kobject *kobj) 588void kobject_put(struct kobject *kobj)
594{ 589{
595 if (kobj) 590 if (kobj) {
591 if (!kobj->state_initialized) {
592 printk(KERN_WARNING "kobject: '%s' (%p): is not "
593 "initialized, yet kobject_put() is being "
594 "called.\n", kobject_name(kobj), kobj);
595 WARN_ON(1);
596 }
596 kref_put(&kobj->kref, kobject_release); 597 kref_put(&kobj->kref, kobject_release);
598 }
597} 599}
598 600
599static void dynamic_kobj_release(struct kobject *kobj) 601static void dynamic_kobj_release(struct kobject *kobj)
@@ -745,12 +747,11 @@ void kset_unregister(struct kset *k)
745 */ 747 */
746struct kobject *kset_find_obj(struct kset *kset, const char *name) 748struct kobject *kset_find_obj(struct kset *kset, const char *name)
747{ 749{
748 struct list_head *entry; 750 struct kobject *k;
749 struct kobject *ret = NULL; 751 struct kobject *ret = NULL;
750 752
751 spin_lock(&kset->list_lock); 753 spin_lock(&kset->list_lock);
752 list_for_each(entry, &kset->list) { 754 list_for_each_entry(k, &kset->list, entry) {
753 struct kobject *k = to_kobj(entry);
754 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 755 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
755 ret = kobject_get(k); 756 ret = kobject_get(k);
756 break; 757 break;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5b6d7f6956b9..9fb6b86cf6b1 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -15,11 +15,13 @@
15 */ 15 */
16 16
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/kobject.h>
20#include <linux/module.h>
21
18#include <linux/socket.h> 22#include <linux/socket.h>
19#include <linux/skbuff.h> 23#include <linux/skbuff.h>
20#include <linux/netlink.h> 24#include <linux/netlink.h>
21#include <linux/string.h>
22#include <linux/kobject.h>
23#include <net/sock.h> 25#include <net/sock.h>
24 26
25 27
diff --git a/lib/lmb.c b/lib/lmb.c
new file mode 100644
index 000000000000..896e2832099e
--- /dev/null
+++ b/lib/lmb.c
@@ -0,0 +1,428 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#define LMB_ALLOC_ANYWHERE 0
19
20struct lmb lmb;
21
22void lmb_dump_all(void)
23{
24#ifdef DEBUG
25 unsigned long i;
26
27 pr_debug("lmb_dump_all:\n");
28 pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
29 pr_debug(" memory.size = 0x%llx\n",
30 (unsigned long long)lmb.memory.size);
31 for (i=0; i < lmb.memory.cnt ;i++) {
32 pr_debug(" memory.region[0x%x].base = 0x%llx\n",
33 i, (unsigned long long)lmb.memory.region[i].base);
34 pr_debug(" .size = 0x%llx\n",
35 (unsigned long long)lmb.memory.region[i].size);
36 }
37
38 pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
39 pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
40 for (i=0; i < lmb.reserved.cnt ;i++) {
41 pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
42 i, (unsigned long long)lmb.reserved.region[i].base);
43 pr_debug(" .size = 0x%llx\n",
44 (unsigned long long)lmb.reserved.region[i].size);
45 }
46#endif /* DEBUG */
47}
48
49static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1,
50 u64 base2, u64 size2)
51{
52 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
53}
54
55static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
56 u64 base2, u64 size2)
57{
58 if (base2 == base1 + size1)
59 return 1;
60 else if (base1 == base2 + size2)
61 return -1;
62
63 return 0;
64}
65
66static long __init lmb_regions_adjacent(struct lmb_region *rgn,
67 unsigned long r1, unsigned long r2)
68{
69 u64 base1 = rgn->region[r1].base;
70 u64 size1 = rgn->region[r1].size;
71 u64 base2 = rgn->region[r2].base;
72 u64 size2 = rgn->region[r2].size;
73
74 return lmb_addrs_adjacent(base1, size1, base2, size2);
75}
76
77static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
78{
79 unsigned long i;
80
81 for (i = r; i < rgn->cnt - 1; i++) {
82 rgn->region[i].base = rgn->region[i + 1].base;
83 rgn->region[i].size = rgn->region[i + 1].size;
84 }
85 rgn->cnt--;
86}
87
88/* Assumption: base addr of region 1 < base addr of region 2 */
89static void __init lmb_coalesce_regions(struct lmb_region *rgn,
90 unsigned long r1, unsigned long r2)
91{
92 rgn->region[r1].size += rgn->region[r2].size;
93 lmb_remove_region(rgn, r2);
94}
95
96void __init lmb_init(void)
97{
98 /* Create a dummy zero size LMB which will get coalesced away later.
99 * This simplifies the lmb_add() code below...
100 */
101 lmb.memory.region[0].base = 0;
102 lmb.memory.region[0].size = 0;
103 lmb.memory.cnt = 1;
104
105 /* Ditto. */
106 lmb.reserved.region[0].base = 0;
107 lmb.reserved.region[0].size = 0;
108 lmb.reserved.cnt = 1;
109}
110
111void __init lmb_analyze(void)
112{
113 int i;
114
115 lmb.memory.size = 0;
116
117 for (i = 0; i < lmb.memory.cnt; i++)
118 lmb.memory.size += lmb.memory.region[i].size;
119}
120
121static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
122{
123 unsigned long coalesced = 0;
124 long adjacent, i;
125
126 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
127 rgn->region[0].base = base;
128 rgn->region[0].size = size;
129 return 0;
130 }
131
132 /* First try and coalesce this LMB with another. */
133 for (i = 0; i < rgn->cnt; i++) {
134 u64 rgnbase = rgn->region[i].base;
135 u64 rgnsize = rgn->region[i].size;
136
137 if ((rgnbase == base) && (rgnsize == size))
138 /* Already have this region, so we're done */
139 return 0;
140
141 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
142 if (adjacent > 0) {
143 rgn->region[i].base -= size;
144 rgn->region[i].size += size;
145 coalesced++;
146 break;
147 } else if (adjacent < 0) {
148 rgn->region[i].size += size;
149 coalesced++;
150 break;
151 }
152 }
153
154 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
155 lmb_coalesce_regions(rgn, i, i+1);
156 coalesced++;
157 }
158
159 if (coalesced)
160 return coalesced;
161 if (rgn->cnt >= MAX_LMB_REGIONS)
162 return -1;
163
164 /* Couldn't coalesce the LMB, so add it to the sorted table. */
165 for (i = rgn->cnt - 1; i >= 0; i--) {
166 if (base < rgn->region[i].base) {
167 rgn->region[i+1].base = rgn->region[i].base;
168 rgn->region[i+1].size = rgn->region[i].size;
169 } else {
170 rgn->region[i+1].base = base;
171 rgn->region[i+1].size = size;
172 break;
173 }
174 }
175
176 if (base < rgn->region[0].base) {
177 rgn->region[0].base = base;
178 rgn->region[0].size = size;
179 }
180 rgn->cnt++;
181
182 return 0;
183}
184
185long __init lmb_add(u64 base, u64 size)
186{
187 struct lmb_region *_rgn = &lmb.memory;
188
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */
190 if (base == 0)
191 lmb.rmo_size = size;
192
193 return lmb_add_region(_rgn, base, size);
194
195}
196
197long __init lmb_reserve(u64 base, u64 size)
198{
199 struct lmb_region *_rgn = &lmb.reserved;
200
201 BUG_ON(0 == size);
202
203 return lmb_add_region(_rgn, base, size);
204}
205
206long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
207{
208 unsigned long i;
209
210 for (i = 0; i < rgn->cnt; i++) {
211 u64 rgnbase = rgn->region[i].base;
212 u64 rgnsize = rgn->region[i].size;
213 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
214 break;
215 }
216
217 return (i < rgn->cnt) ? i : -1;
218}
219
220static u64 lmb_align_down(u64 addr, u64 size)
221{
222 return addr & ~(size - 1);
223}
224
225static u64 lmb_align_up(u64 addr, u64 size)
226{
227 return (addr + (size - 1)) & ~(size - 1);
228}
229
230static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
231 u64 size, u64 align)
232{
233 u64 base, res_base;
234 long j;
235
236 base = lmb_align_down((end - size), align);
237 while (start <= base) {
238 j = lmb_overlaps_region(&lmb.reserved, base, size);
239 if (j < 0) {
240 /* this area isn't reserved, take it */
241 if (lmb_add_region(&lmb.reserved, base,
242 lmb_align_up(size, align)) < 0)
243 base = ~(u64)0;
244 return base;
245 }
246 res_base = lmb.reserved.region[j].base;
247 if (res_base < size)
248 break;
249 base = lmb_align_down(res_base - size, align);
250 }
251
252 return ~(u64)0;
253}
254
255static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
256 u64 (*nid_range)(u64, u64, int *),
257 u64 size, u64 align, int nid)
258{
259 u64 start, end;
260
261 start = mp->base;
262 end = start + mp->size;
263
264 start = lmb_align_up(start, align);
265 while (start < end) {
266 u64 this_end;
267 int this_nid;
268
269 this_end = nid_range(start, end, &this_nid);
270 if (this_nid == nid) {
271 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
272 size, align);
273 if (ret != ~(u64)0)
274 return ret;
275 }
276 start = this_end;
277 }
278
279 return ~(u64)0;
280}
281
282u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
283 u64 (*nid_range)(u64 start, u64 end, int *nid))
284{
285 struct lmb_region *mem = &lmb.memory;
286 int i;
287
288 for (i = 0; i < mem->cnt; i++) {
289 u64 ret = lmb_alloc_nid_region(&mem->region[i],
290 nid_range,
291 size, align, nid);
292 if (ret != ~(u64)0)
293 return ret;
294 }
295
296 return lmb_alloc(size, align);
297}
298
299u64 __init lmb_alloc(u64 size, u64 align)
300{
301 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
302}
303
304u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
305{
306 u64 alloc;
307
308 alloc = __lmb_alloc_base(size, align, max_addr);
309
310 if (alloc == 0)
311 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
312 (unsigned long long) size, (unsigned long long) max_addr);
313
314 return alloc;
315}
316
317u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
318{
319 long i, j;
320 u64 base = 0;
321 u64 res_base;
322
323 BUG_ON(0 == size);
324
325 /* On some platforms, make sure we allocate lowmem */
326 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
327 if (max_addr == LMB_ALLOC_ANYWHERE)
328 max_addr = LMB_REAL_LIMIT;
329
330 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
331 u64 lmbbase = lmb.memory.region[i].base;
332 u64 lmbsize = lmb.memory.region[i].size;
333
334 if (lmbsize < size)
335 continue;
336 if (max_addr == LMB_ALLOC_ANYWHERE)
337 base = lmb_align_down(lmbbase + lmbsize - size, align);
338 else if (lmbbase < max_addr) {
339 base = min(lmbbase + lmbsize, max_addr);
340 base = lmb_align_down(base - size, align);
341 } else
342 continue;
343
344 while (base && lmbbase <= base) {
345 j = lmb_overlaps_region(&lmb.reserved, base, size);
346 if (j < 0) {
347 /* this area isn't reserved, take it */
348 if (lmb_add_region(&lmb.reserved, base,
349 size) < 0)
350 return 0;
351 return base;
352 }
353 res_base = lmb.reserved.region[j].base;
354 if (res_base < size)
355 break;
356 base = lmb_align_down(res_base - size, align);
357 }
358 }
359 return 0;
360}
361
362/* You must call lmb_analyze() before this. */
363u64 __init lmb_phys_mem_size(void)
364{
365 return lmb.memory.size;
366}
367
368u64 __init lmb_end_of_DRAM(void)
369{
370 int idx = lmb.memory.cnt - 1;
371
372 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
373}
374
375/* You must call lmb_analyze() after this. */
376void __init lmb_enforce_memory_limit(u64 memory_limit)
377{
378 unsigned long i;
379 u64 limit;
380 struct lmb_property *p;
381
382 if (!memory_limit)
383 return;
384
385 /* Truncate the lmb regions to satisfy the memory limit. */
386 limit = memory_limit;
387 for (i = 0; i < lmb.memory.cnt; i++) {
388 if (limit > lmb.memory.region[i].size) {
389 limit -= lmb.memory.region[i].size;
390 continue;
391 }
392
393 lmb.memory.region[i].size = limit;
394 lmb.memory.cnt = i + 1;
395 break;
396 }
397
398 if (lmb.memory.region[0].size < lmb.rmo_size)
399 lmb.rmo_size = lmb.memory.region[0].size;
400
401 /* And truncate any reserves above the limit also. */
402 for (i = 0; i < lmb.reserved.cnt; i++) {
403 p = &lmb.reserved.region[i];
404
405 if (p->base > memory_limit)
406 p->size = 0;
407 else if ((p->base + p->size) > memory_limit)
408 p->size = memory_limit - p->base;
409
410 if (p->size == 0) {
411 lmb_remove_region(&lmb.reserved, i);
412 i--;
413 }
414 }
415}
416
417int __init lmb_is_reserved(u64 addr)
418{
419 int i;
420
421 for (i = 0; i < lmb.reserved.cnt; i++) {
422 u64 upper = lmb.reserved.region[i].base +
423 lmb.reserved.region[i].size - 1;
424 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
425 return 1;
426 }
427 return 0;
428}
diff --git a/lib/pcounter.c b/lib/pcounter.c
deleted file mode 100644
index 9b56807da93b..000000000000
--- a/lib/pcounter.c
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * Define default pcounter functions
3 * Note that often used pcounters use dedicated functions to get a speed increase.
4 * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER)
5 */
6
7#include <linux/module.h>
8#include <linux/pcounter.h>
9#include <linux/smp.h>
10#include <linux/cpumask.h>
11
12static void pcounter_dyn_add(struct pcounter *self, int inc)
13{
14 per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc;
15}
16
17static int pcounter_dyn_getval(const struct pcounter *self, int cpu)
18{
19 return per_cpu_ptr(self->per_cpu_values, cpu)[0];
20}
21
22int pcounter_getval(const struct pcounter *self)
23{
24 int res = 0, cpu;
25
26 for_each_possible_cpu(cpu)
27 res += self->getval(self, cpu);
28
29 return res;
30}
31EXPORT_SYMBOL_GPL(pcounter_getval);
32
33int pcounter_alloc(struct pcounter *self)
34{
35 int rc = 0;
36 if (self->add == NULL) {
37 self->per_cpu_values = alloc_percpu(int);
38 if (self->per_cpu_values != NULL) {
39 self->add = pcounter_dyn_add;
40 self->getval = pcounter_dyn_getval;
41 } else
42 rc = 1;
43 }
44 return rc;
45}
46EXPORT_SYMBOL_GPL(pcounter_alloc);
47
48void pcounter_free(struct pcounter *self)
49{
50 if (self->per_cpu_values != NULL) {
51 free_percpu(self->per_cpu_values);
52 self->per_cpu_values = NULL;
53 self->getval = NULL;
54 self->add = NULL;
55 }
56}
57EXPORT_SYMBOL_GPL(pcounter_free);
58
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index 3ea2db94d5b0..06d04cfa9339 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -45,7 +45,6 @@
45#include <linux/rslib.h> 45#include <linux/rslib.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <asm/semaphore.h>
49 48
50/* This list holds all currently allocated rs control structures */ 49/* This list holds all currently allocated rs control structures */
51static LIST_HEAD (rslist); 50static LIST_HEAD (rslist);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index acca4901046c..b80c21100d78 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/scatterlist.h> 10#include <linux/scatterlist.h>
11#include <linux/highmem.h>
11 12
12/** 13/**
13 * sg_next - return the next scatterlist entry in a list 14 * sg_next - return the next scatterlist entry in a list
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
292 return ret; 293 return ret;
293} 294}
294EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296
297/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list
300 * @nents: Number of SG entries
301 * @buf: Where to copy from
302 * @buflen: The number of bytes to copy
303 * @to_buffer: transfer direction (non zero == from an sg list to a
304 * buffer, 0 == from a buffer to an sg list
305 *
306 * Returns the number of copied bytes.
307 *
308 **/
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer)
311{
312 struct scatterlist *sg;
313 size_t buf_off = 0;
314 int i;
315
316 WARN_ON(!irqs_disabled());
317
318 for_each_sg(sgl, sg, nents, i) {
319 struct page *page;
320 int n = 0;
321 unsigned int sg_off = sg->offset;
322 unsigned int sg_copy = sg->length;
323
324 if (sg_copy > buflen)
325 sg_copy = buflen;
326 buflen -= sg_copy;
327
328 while (sg_copy > 0) {
329 unsigned int page_copy;
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 }
356
357 if (!buflen)
358 break;
359 }
360
361 return buf_off;
362}
363
364/**
365 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
366 * @sgl: The SG list
367 * @nents: Number of SG entries
368 * @buf: Where to copy from
369 * @buflen: The number of bytes to copy
370 *
371 * Returns the number of copied bytes.
372 *
373 **/
374size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
375 void *buf, size_t buflen)
376{
377 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
378}
379EXPORT_SYMBOL(sg_copy_from_buffer);
380
381/**
382 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
383 * @sgl: The SG list
384 * @nents: Number of SG entries
385 * @buf: Where to copy to
386 * @buflen: The number of bytes to copy
387 *
388 * Returns the number of copied bytes.
389 *
390 **/
391size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
392 void *buf, size_t buflen)
393{
394 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
395}
396EXPORT_SYMBOL(sg_copy_to_buffer);
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782cdacb..000000000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * i386 and x86-64 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <asm/semaphore.h>
19
20/*
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * "sleeping" and the contention routine ordering is protected
32 * by the spinlock in the semaphore's waitqueue head.
33 *
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
39 */
40
41/*
42 * Logic:
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
49 */
50
51void __up(struct semaphore *sem)
52{
53 wake_up(&sem->wait);
54}
55
56void __sched __down(struct semaphore *sem)
57{
58 struct task_struct *tsk = current;
59 DECLARE_WAITQUEUE(wait, tsk);
60 unsigned long flags;
61
62 tsk->state = TASK_UNINTERRUPTIBLE;
63 spin_lock_irqsave(&sem->wait.lock, flags);
64 add_wait_queue_exclusive_locked(&sem->wait, &wait);
65
66 sem->sleepers++;
67 for (;;) {
68 int sleepers = sem->sleepers;
69
70 /*
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock in
73 * the wait_queue_head.
74 */
75 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
76 sem->sleepers = 0;
77 break;
78 }
79 sem->sleepers = 1; /* us - see -1 above */
80 spin_unlock_irqrestore(&sem->wait.lock, flags);
81
82 schedule();
83
84 spin_lock_irqsave(&sem->wait.lock, flags);
85 tsk->state = TASK_UNINTERRUPTIBLE;
86 }
87 remove_wait_queue_locked(&sem->wait, &wait);
88 wake_up_locked(&sem->wait);
89 spin_unlock_irqrestore(&sem->wait.lock, flags);
90 tsk->state = TASK_RUNNING;
91}
92
93int __sched __down_interruptible(struct semaphore *sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 unsigned long flags;
99
100 tsk->state = TASK_INTERRUPTIBLE;
101 spin_lock_irqsave(&sem->wait.lock, flags);
102 add_wait_queue_exclusive_locked(&sem->wait, &wait);
103
104 sem->sleepers++;
105 for (;;) {
106 int sleepers = sem->sleepers;
107
108 /*
109 * With signals pending, this turns into
110 * the trylock failure case - we won't be
111 * sleeping, and we* can't get the lock as
112 * it has contention. Just correct the count
113 * and exit.
114 */
115 if (signal_pending(current)) {
116 retval = -EINTR;
117 sem->sleepers = 0;
118 atomic_add(sleepers, &sem->count);
119 break;
120 }
121
122 /*
123 * Add "everybody else" into it. They aren't
124 * playing, because we own the spinlock in
125 * wait_queue_head. The "-1" is because we're
126 * still hoping to get the semaphore.
127 */
128 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
129 sem->sleepers = 0;
130 break;
131 }
132 sem->sleepers = 1; /* us - see -1 above */
133 spin_unlock_irqrestore(&sem->wait.lock, flags);
134
135 schedule();
136
137 spin_lock_irqsave(&sem->wait.lock, flags);
138 tsk->state = TASK_INTERRUPTIBLE;
139 }
140 remove_wait_queue_locked(&sem->wait, &wait);
141 wake_up_locked(&sem->wait);
142 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144 tsk->state = TASK_RUNNING;
145 return retval;
146}
147
148/*
149 * Trylock failed - make sure we correct for
150 * having decremented the count.
151 *
152 * We could have done the trylock with a
153 * single "cmpxchg" without failure cases,
154 * but then it wouldn't work on a 386.
155 */
156int __down_trylock(struct semaphore *sem)
157{
158 int sleepers;
159 unsigned long flags;
160
161 spin_lock_irqsave(&sem->wait.lock, flags);
162 sleepers = sem->sleepers + 1;
163 sem->sleepers = 0;
164
165 /*
166 * Add "everybody else" and us into it. They aren't
167 * playing, because we own the spinlock in the
168 * wait_queue_head.
169 */
170 if (!atomic_add_negative(sleepers, &sem->count)) {
171 wake_up_locked(&sem->wait);
172 }
173
174 spin_unlock_irqrestore(&sem->wait.lock, flags);
175 return 1;
176}