aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor@insightbb.com>2007-05-01 00:24:54 -0400
committerDmitry Torokhov <dtor@insightbb.com>2007-05-01 00:24:54 -0400
commitbc95f3669f5e6f63cf0b84fe4922c3c6dd4aa775 (patch)
tree427fcf2a7287c16d4b5aa6cbf494d59579a6a8b1 /lib
parent3d29cdff999c37b3876082278a8134a0642a02cd (diff)
parentdc87c3985e9b442c60994308a96f887579addc39 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/usb/input/Makefile drivers/usb/input/gtco.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Makefile5
-rw-r--r--lib/bitmap.c16
-rw-r--r--lib/cpumask.c18
-rw-r--r--lib/devres.c26
-rw-r--r--lib/div64.c22
-rw-r--r--lib/fault-inject.c41
-rw-r--r--lib/genalloc.c4
-rw-r--r--lib/kobject.c67
-rw-r--r--lib/kobject_uevent.c28
-rw-r--r--lib/kref.c2
-rw-r--r--lib/string.c28
-rw-r--r--lib/swiotlb.c184
-rw-r--r--lib/textsearch.c2
-rw-r--r--lib/vsprintf.c26
15 files changed, 247 insertions, 234 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4448f91b865c..79afd00bbe5f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -261,7 +261,7 @@ config LOCKDEP
261 bool 261 bool
262 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 262 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
263 select STACKTRACE 263 select STACKTRACE
264 select FRAME_POINTER if !X86 264 select FRAME_POINTER if !X86 && !MIPS
265 select KALLSYMS 265 select KALLSYMS
266 select KALLSYMS_ALL 266 select KALLSYMS_ALL
267 267
@@ -411,8 +411,6 @@ config LKDTM
411config FAULT_INJECTION 411config FAULT_INJECTION
412 bool "Fault-injection framework" 412 bool "Fault-injection framework"
413 depends on DEBUG_KERNEL 413 depends on DEBUG_KERNEL
414 depends on STACKTRACE
415 select FRAME_POINTER
416 help 414 help
417 Provide fault-injection framework. 415 Provide fault-injection framework.
418 For more details, see Documentation/fault-injection/. 416 For more details, see Documentation/fault-injection/.
@@ -440,3 +438,11 @@ config FAULT_INJECTION_DEBUG_FS
440 depends on FAULT_INJECTION && SYSFS && DEBUG_FS 438 depends on FAULT_INJECTION && SYSFS && DEBUG_FS
441 help 439 help
442 Enable configuration of fault-injection capabilities via debugfs. 440 Enable configuration of fault-injection capabilities via debugfs.
441
442config FAULT_INJECTION_STACKTRACE_FILTER
443 bool "stacktrace filter for fault-injection capabilities"
444 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
445 select STACKTRACE
446 select FRAME_POINTER
447 help
448 Provide stacktrace filter for fault-injection capabilities
diff --git a/lib/Makefile b/lib/Makefile
index 992a39ef9ffd..ae57f357fec0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 rbtree.o radix-tree.o dump_stack.o \ 6 rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o 8 sha1.o irq_regs.o reciprocal_div.o
9 9
10lib-$(CONFIG_MMU) += ioremap.o 10lib-$(CONFIG_MMU) += ioremap.o
@@ -12,7 +12,8 @@ lib-$(CONFIG_SMP) += cpumask.o
12 12
13lib-y += kobject.o kref.o kobject_uevent.o klist.o 13lib-y += kobject.o kref.o kobject_uevent.o klist.o
14 14
15obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o 15obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
16 bust_spinlocks.o
16 17
17ifeq ($(CONFIG_DEBUG_KOBJECT),y) 18ifeq ($(CONFIG_DEBUG_KOBJECT),y)
18CFLAGS_kobject.o += -DDEBUG 19CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ee6e58fce8f7..26ebafa8c41d 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -97,10 +97,10 @@ EXPORT_SYMBOL(__bitmap_complement);
97 97
98/** 98/**
99 * __bitmap_shift_right - logical right shift of the bits in a bitmap 99 * __bitmap_shift_right - logical right shift of the bits in a bitmap
100 * @dst - destination bitmap 100 * @dst : destination bitmap
101 * @src - source bitmap 101 * @src : source bitmap
102 * @nbits - shift by this many bits 102 * @shift : shift by this many bits
103 * @bits - bitmap size, in bits 103 * @bits : bitmap size, in bits
104 * 104 *
105 * Shifting right (dividing) means moving bits in the MS -> LS bit 105 * Shifting right (dividing) means moving bits in the MS -> LS bit
106 * direction. Zeros are fed into the vacated MS positions and the 106 * direction. Zeros are fed into the vacated MS positions and the
@@ -141,10 +141,10 @@ EXPORT_SYMBOL(__bitmap_shift_right);
141 141
142/** 142/**
143 * __bitmap_shift_left - logical left shift of the bits in a bitmap 143 * __bitmap_shift_left - logical left shift of the bits in a bitmap
144 * @dst - destination bitmap 144 * @dst : destination bitmap
145 * @src - source bitmap 145 * @src : source bitmap
146 * @nbits - shift by this many bits 146 * @shift : shift by this many bits
147 * @bits - bitmap size, in bits 147 * @bits : bitmap size, in bits
148 * 148 *
149 * Shifting left (multiplying) means moving bits in the LS -> MS 149 * Shifting left (multiplying) means moving bits in the LS -> MS
150 * direction. Zeros are fed into the vacated LS bit positions 150 * direction. Zeros are fed into the vacated LS bit positions
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 3a67dc5ada7d..1ea2c184315d 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,22 +15,8 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18/* 18int nr_cpu_ids;
19 * Find the highest possible smp_processor_id() 19EXPORT_SYMBOL(nr_cpu_ids);
20 *
21 * Note: if we're prepared to assume that cpu_possible_map never changes
22 * (reasonable) then this function should cache its return value.
23 */
24int highest_possible_processor_id(void)
25{
26 unsigned int cpu;
27 unsigned highest = 0;
28
29 for_each_cpu_mask(cpu, cpu_possible_map)
30 highest = cpu;
31 return highest;
32}
33EXPORT_SYMBOL(highest_possible_processor_id);
34 20
35int __any_online_cpu(const cpumask_t *mask) 21int __any_online_cpu(const cpumask_t *mask)
36{ 22{
diff --git a/lib/devres.c b/lib/devres.c
index eb38849aa717..b1d336ce7f3d 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -296,5 +296,31 @@ int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
296 return rc; 296 return rc;
297} 297}
298EXPORT_SYMBOL(pcim_iomap_regions); 298EXPORT_SYMBOL(pcim_iomap_regions);
299
300/**
301 * pcim_iounmap_regions - Unmap and release PCI BARs
302 * @pdev: PCI device to map IO resources for
303 * @mask: Mask of BARs to unmap and release
304 *
305 * Unamp and release regions specified by @mask.
306 */
307void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
308{
309 void __iomem * const *iomap;
310 int i;
311
312 iomap = pcim_iomap_table(pdev);
313 if (!iomap)
314 return;
315
316 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
317 if (!(mask & (1 << i)))
318 continue;
319
320 pcim_iounmap(pdev, iomap[i]);
321 pci_release_region(pdev, i);
322 }
323}
324EXPORT_SYMBOL(pcim_iounmap_regions);
299#endif 325#endif
300#endif 326#endif
diff --git a/lib/div64.c b/lib/div64.c
index 365719f84832..b71cf93c529a 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -23,7 +23,7 @@
23/* Not needed on 64bit architectures */ 23/* Not needed on 64bit architectures */
24#if BITS_PER_LONG == 32 24#if BITS_PER_LONG == 32
25 25
26uint32_t __div64_32(uint64_t *n, uint32_t base) 26uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
27{ 27{
28 uint64_t rem = *n; 28 uint64_t rem = *n;
29 uint64_t b = base; 29 uint64_t b = base;
@@ -58,4 +58,24 @@ uint32_t __div64_32(uint64_t *n, uint32_t base)
58 58
59EXPORT_SYMBOL(__div64_32); 59EXPORT_SYMBOL(__div64_32);
60 60
61/* 64bit divisor, dividend and result. dynamic precision */
62uint64_t div64_64(uint64_t dividend, uint64_t divisor)
63{
64 uint32_t high, d;
65
66 high = divisor >> 32;
67 if (high) {
68 unsigned int shift = fls(high);
69
70 d = divisor >> shift;
71 dividend >>= shift;
72 } else
73 d = divisor;
74
75 do_div(dividend, d);
76
77 return dividend;
78}
79EXPORT_SYMBOL(div64_64);
80
61#endif /* BITS_PER_LONG == 32 */ 81#endif /* BITS_PER_LONG == 32 */
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index b5a90fc056d3..0fabd12c39d7 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -55,7 +55,7 @@ static bool fail_task(struct fault_attr *attr, struct task_struct *task)
55 55
56#define MAX_STACK_TRACE_DEPTH 32 56#define MAX_STACK_TRACE_DEPTH 32
57 57
58#if defined(CONFIG_STACKTRACE) 58#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
59 59
60static bool fail_stacktrace(struct fault_attr *attr) 60static bool fail_stacktrace(struct fault_attr *attr)
61{ 61{
@@ -90,17 +90,10 @@ static bool fail_stacktrace(struct fault_attr *attr)
90 90
91static inline bool fail_stacktrace(struct fault_attr *attr) 91static inline bool fail_stacktrace(struct fault_attr *attr)
92{ 92{
93 static bool firsttime = true; 93 return true;
94
95 if (firsttime) {
96 printk(KERN_WARNING
97 "This architecture does not implement save_stack_trace()\n");
98 firsttime = false;
99 }
100 return false;
101} 94}
102 95
103#endif 96#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
104 97
105/* 98/*
106 * This code is stolen from failmalloc-1.0 99 * This code is stolen from failmalloc-1.0
@@ -217,6 +210,8 @@ void cleanup_fault_attr_dentries(struct fault_attr *attr)
217 debugfs_remove(attr->dentries.task_filter_file); 210 debugfs_remove(attr->dentries.task_filter_file);
218 attr->dentries.task_filter_file = NULL; 211 attr->dentries.task_filter_file = NULL;
219 212
213#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
214
220 debugfs_remove(attr->dentries.stacktrace_depth_file); 215 debugfs_remove(attr->dentries.stacktrace_depth_file);
221 attr->dentries.stacktrace_depth_file = NULL; 216 attr->dentries.stacktrace_depth_file = NULL;
222 217
@@ -232,6 +227,8 @@ void cleanup_fault_attr_dentries(struct fault_attr *attr)
232 debugfs_remove(attr->dentries.reject_end_file); 227 debugfs_remove(attr->dentries.reject_end_file);
233 attr->dentries.reject_end_file = NULL; 228 attr->dentries.reject_end_file = NULL;
234 229
230#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
231
235 if (attr->dentries.dir) 232 if (attr->dentries.dir)
236 WARN_ON(!simple_empty(attr->dentries.dir)); 233 WARN_ON(!simple_empty(attr->dentries.dir));
237 234
@@ -269,6 +266,13 @@ int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
269 attr->dentries.task_filter_file = debugfs_create_bool("task-filter", 266 attr->dentries.task_filter_file = debugfs_create_bool("task-filter",
270 mode, dir, &attr->task_filter); 267 mode, dir, &attr->task_filter);
271 268
269 if (!attr->dentries.probability_file || !attr->dentries.interval_file ||
270 !attr->dentries.times_file || !attr->dentries.space_file ||
271 !attr->dentries.verbose_file || !attr->dentries.task_filter_file)
272 goto fail;
273
274#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
275
272 attr->dentries.stacktrace_depth_file = 276 attr->dentries.stacktrace_depth_file =
273 debugfs_create_ul_MAX_STACK_TRACE_DEPTH( 277 debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
274 "stacktrace-depth", mode, dir, &attr->stacktrace_depth); 278 "stacktrace-depth", mode, dir, &attr->stacktrace_depth);
@@ -285,18 +289,15 @@ int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
285 attr->dentries.reject_end_file = 289 attr->dentries.reject_end_file =
286 debugfs_create_ul("reject-end", mode, dir, &attr->reject_end); 290 debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
287 291
288 292 if (!attr->dentries.stacktrace_depth_file ||
289 if (!attr->dentries.probability_file || !attr->dentries.interval_file 293 !attr->dentries.require_start_file ||
290 || !attr->dentries.times_file || !attr->dentries.space_file 294 !attr->dentries.require_end_file ||
291 || !attr->dentries.verbose_file || !attr->dentries.task_filter_file 295 !attr->dentries.reject_start_file ||
292 || !attr->dentries.stacktrace_depth_file 296 !attr->dentries.reject_end_file)
293 || !attr->dentries.require_start_file
294 || !attr->dentries.require_end_file
295 || !attr->dentries.reject_start_file
296 || !attr->dentries.reject_end_file
297 )
298 goto fail; 297 goto fail;
299 298
299#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
300
300 return 0; 301 return 0;
301fail: 302fail:
302 cleanup_fault_attr_dentries(attr); 303 cleanup_fault_attr_dentries(attr);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 75ae68ce03e1..eb7c2bab9ebf 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -148,7 +148,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
148 addr = chunk->start_addr + 148 addr = chunk->start_addr +
149 ((unsigned long)start_bit << order); 149 ((unsigned long)start_bit << order);
150 while (nbits--) 150 while (nbits--)
151 __set_bit(start_bit++, &chunk->bits); 151 __set_bit(start_bit++, chunk->bits);
152 spin_unlock_irqrestore(&chunk->lock, flags); 152 spin_unlock_irqrestore(&chunk->lock, flags);
153 read_unlock(&pool->lock); 153 read_unlock(&pool->lock);
154 return addr; 154 return addr;
@@ -187,7 +187,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
187 spin_lock_irqsave(&chunk->lock, flags); 187 spin_lock_irqsave(&chunk->lock, flags);
188 bit = (addr - chunk->start_addr) >> order; 188 bit = (addr - chunk->start_addr) >> order;
189 while (nbits--) 189 while (nbits--)
190 __clear_bit(bit++, &chunk->bits); 190 __clear_bit(bit++, chunk->bits);
191 spin_unlock_irqrestore(&chunk->lock, flags); 191 spin_unlock_irqrestore(&chunk->lock, flags);
192 break; 192 break;
193 } 193 }
diff --git a/lib/kobject.c b/lib/kobject.c
index 2782f49e906e..cecf2fbede3e 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -157,7 +157,7 @@ static void unlink(struct kobject * kobj)
157} 157}
158 158
159/** 159/**
160 * kobject_add - add an object to the hierarchy. 160 * kobject_shadow_add - add an object to the hierarchy.
161 * @kobj: object. 161 * @kobj: object.
162 * @shadow_parent: sysfs directory to add to. 162 * @shadow_parent: sysfs directory to add to.
163 */ 163 */
@@ -171,9 +171,10 @@ int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
171 return -ENOENT; 171 return -ENOENT;
172 if (!kobj->k_name) 172 if (!kobj->k_name)
173 kobj->k_name = kobj->name; 173 kobj->k_name = kobj->name;
174 if (!kobj->k_name) { 174 if (!*kobj->k_name) {
175 pr_debug("kobject attempted to be registered with no name!\n"); 175 pr_debug("kobject attempted to be registered with no name!\n");
176 WARN_ON(1); 176 WARN_ON(1);
177 kobject_put(kobj);
177 return -EINVAL; 178 return -EINVAL;
178 } 179 }
179 parent = kobject_get(kobj->parent); 180 parent = kobject_get(kobj->parent);
@@ -190,8 +191,8 @@ int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
190 191
191 list_add_tail(&kobj->entry,&kobj->kset->list); 192 list_add_tail(&kobj->entry,&kobj->kset->list);
192 spin_unlock(&kobj->kset->list_lock); 193 spin_unlock(&kobj->kset->list_lock);
194 kobj->parent = parent;
193 } 195 }
194 kobj->parent = parent;
195 196
196 error = create_dir(kobj, shadow_parent); 197 error = create_dir(kobj, shadow_parent);
197 if (error) { 198 if (error) {
@@ -311,13 +312,43 @@ EXPORT_SYMBOL(kobject_set_name);
311int kobject_rename(struct kobject * kobj, const char *new_name) 312int kobject_rename(struct kobject * kobj, const char *new_name)
312{ 313{
313 int error = 0; 314 int error = 0;
315 const char *devpath = NULL;
316 char *devpath_string = NULL;
317 char *envp[2];
314 318
315 kobj = kobject_get(kobj); 319 kobj = kobject_get(kobj);
316 if (!kobj) 320 if (!kobj)
317 return -EINVAL; 321 return -EINVAL;
318 if (!kobj->parent) 322 if (!kobj->parent)
319 return -EINVAL; 323 return -EINVAL;
324
325 devpath = kobject_get_path(kobj, GFP_KERNEL);
326 if (!devpath) {
327 error = -ENOMEM;
328 goto out;
329 }
330 devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
331 if (!devpath_string) {
332 error = -ENOMEM;
333 goto out;
334 }
335 sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
336 envp[0] = devpath_string;
337 envp[1] = NULL;
338 /* Note : if we want to send the new name alone, not the full path,
339 * we could probably use kobject_name(kobj); */
340
320 error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name); 341 error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name);
342
343 /* This function is mostly/only used for network interface.
344 * Some hotplug package track interfaces by their name and
345 * therefore want to know when the name is changed by the user. */
346 if (!error)
347 kobject_uevent_env(kobj, KOBJ_MOVE, envp);
348
349out:
350 kfree(devpath_string);
351 kfree(devpath);
321 kobject_put(kobj); 352 kobject_put(kobj);
322 353
323 return error; 354 return error;
@@ -326,6 +357,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
326/** 357/**
327 * kobject_rename - change the name of an object 358 * kobject_rename - change the name of an object
328 * @kobj: object in question. 359 * @kobj: object in question.
360 * @new_parent: object's new parent
329 * @new_name: object's new name 361 * @new_name: object's new name
330 */ 362 */
331 363
@@ -384,9 +416,11 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
384 goto out; 416 goto out;
385 old_parent = kobj->parent; 417 old_parent = kobj->parent;
386 kobj->parent = new_parent; 418 kobj->parent = new_parent;
419 new_parent = NULL;
387 kobject_put(old_parent); 420 kobject_put(old_parent);
388 kobject_uevent_env(kobj, KOBJ_MOVE, envp); 421 kobject_uevent_env(kobj, KOBJ_MOVE, envp);
389out: 422out:
423 kobject_put(new_parent);
390 kobject_put(kobj); 424 kobject_put(kobj);
391 kfree(devpath_string); 425 kfree(devpath_string);
392 kfree(devpath); 426 kfree(devpath);
@@ -485,13 +519,15 @@ static struct kobj_type dir_ktype = {
485}; 519};
486 520
487/** 521/**
488 * kobject_add_dir - add sub directory of object. 522 * kobject_kset_add_dir - add sub directory of object.
523 * @kset: kset the directory is belongs to.
489 * @parent: object in which a directory is created. 524 * @parent: object in which a directory is created.
490 * @name: directory name. 525 * @name: directory name.
491 * 526 *
492 * Add a plain directory object as child of given object. 527 * Add a plain directory object as child of given object.
493 */ 528 */
494struct kobject *kobject_add_dir(struct kobject *parent, const char *name) 529struct kobject *kobject_kset_add_dir(struct kset *kset,
530 struct kobject *parent, const char *name)
495{ 531{
496 struct kobject *k; 532 struct kobject *k;
497 int ret; 533 int ret;
@@ -503,13 +539,14 @@ struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
503 if (!k) 539 if (!k)
504 return NULL; 540 return NULL;
505 541
542 k->kset = kset;
506 k->parent = parent; 543 k->parent = parent;
507 k->ktype = &dir_ktype; 544 k->ktype = &dir_ktype;
508 kobject_set_name(k, name); 545 kobject_set_name(k, name);
509 ret = kobject_register(k); 546 ret = kobject_register(k);
510 if (ret < 0) { 547 if (ret < 0) {
511 printk(KERN_WARNING "kobject_add_dir: " 548 printk(KERN_WARNING "%s: kobject_register error: %d\n",
512 "kobject_register error: %d\n", ret); 549 __func__, ret);
513 kobject_del(k); 550 kobject_del(k);
514 return NULL; 551 return NULL;
515 } 552 }
@@ -518,6 +555,18 @@ struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
518} 555}
519 556
520/** 557/**
558 * kobject_add_dir - add sub directory of object.
559 * @parent: object in which a directory is created.
560 * @name: directory name.
561 *
562 * Add a plain directory object as child of given object.
563 */
564struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
565{
566 return kobject_kset_add_dir(NULL, parent, name);
567}
568
569/**
521 * kset_init - initialize a kset for use 570 * kset_init - initialize a kset for use
522 * @k: kset 571 * @k: kset
523 */ 572 */
@@ -610,7 +659,6 @@ struct kobject * kset_find_obj(struct kset * kset, const char * name)
610 659
611void subsystem_init(struct subsystem * s) 660void subsystem_init(struct subsystem * s)
612{ 661{
613 init_rwsem(&s->rwsem);
614 kset_init(&s->kset); 662 kset_init(&s->kset);
615} 663}
616 664
@@ -619,8 +667,7 @@ void subsystem_init(struct subsystem * s)
619 * @s: the subsystem we're registering. 667 * @s: the subsystem we're registering.
620 * 668 *
621 * Once we register the subsystem, we want to make sure that 669 * Once we register the subsystem, we want to make sure that
622 * the kset points back to this subsystem for correct usage of 670 * the kset points back to this subsystem.
623 * the rwsem.
624 */ 671 */
625 672
626int subsystem_register(struct subsystem * s) 673int subsystem_register(struct subsystem * s)
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 84272ed77f03..12e311dc664c 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -42,10 +42,6 @@ static char *action_to_string(enum kobject_action action)
42 return "remove"; 42 return "remove";
43 case KOBJ_CHANGE: 43 case KOBJ_CHANGE:
44 return "change"; 44 return "change";
45 case KOBJ_MOUNT:
46 return "mount";
47 case KOBJ_UMOUNT:
48 return "umount";
49 case KOBJ_OFFLINE: 45 case KOBJ_OFFLINE:
50 return "offline"; 46 return "offline";
51 case KOBJ_ONLINE: 47 case KOBJ_ONLINE:
@@ -95,10 +91,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
95 91
96 /* search the kset we belong to */ 92 /* search the kset we belong to */
97 top_kobj = kobj; 93 top_kobj = kobj;
98 if (!top_kobj->kset && top_kobj->parent) { 94 while (!top_kobj->kset && top_kobj->parent) {
99 do { 95 top_kobj = top_kobj->parent;
100 top_kobj = top_kobj->parent;
101 } while (!top_kobj->kset && top_kobj->parent);
102 } 96 }
103 if (!top_kobj->kset) { 97 if (!top_kobj->kset) {
104 pr_debug("kobject attempted to send uevent without kset!\n"); 98 pr_debug("kobject attempted to send uevent without kset!\n");
@@ -115,6 +109,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
115 return 0; 109 return 0;
116 } 110 }
117 111
112 /* originating subsystem */
113 if (uevent_ops && uevent_ops->name)
114 subsystem = uevent_ops->name(kset, kobj);
115 else
116 subsystem = kobject_name(&kset->kobj);
117 if (!subsystem) {
118 pr_debug("unset subsytem caused the event to drop!\n");
119 return 0;
120 }
121
118 /* environment index */ 122 /* environment index */
119 envp = kzalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL); 123 envp = kzalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL);
120 if (!envp) 124 if (!envp)
@@ -134,12 +138,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
134 goto exit; 138 goto exit;
135 } 139 }
136 140
137 /* originating subsystem */
138 if (uevent_ops && uevent_ops->name)
139 subsystem = uevent_ops->name(kset, kobj);
140 else
141 subsystem = kobject_name(&kset->kobj);
142
143 /* event environemnt for helper process only */ 141 /* event environemnt for helper process only */
144 envp[i++] = "HOME=/"; 142 envp[i++] = "HOME=/";
145 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 143 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
@@ -293,7 +291,7 @@ EXPORT_SYMBOL_GPL(add_uevent_var);
293static int __init kobject_uevent_init(void) 291static int __init kobject_uevent_init(void)
294{ 292{
295 uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, 293 uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL,
296 THIS_MODULE); 294 NULL, THIS_MODULE);
297 295
298 if (!uevent_sock) { 296 if (!uevent_sock) {
299 printk(KERN_ERR 297 printk(KERN_ERR
diff --git a/lib/kref.c b/lib/kref.c
index 0d07cc31c818..a6dc3ec328e0 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -21,6 +21,7 @@
21void kref_init(struct kref *kref) 21void kref_init(struct kref *kref)
22{ 22{
23 atomic_set(&kref->refcount,1); 23 atomic_set(&kref->refcount,1);
24 smp_mb();
24} 25}
25 26
26/** 27/**
@@ -31,6 +32,7 @@ void kref_get(struct kref *kref)
31{ 32{
32 WARN_ON(!atomic_read(&kref->refcount)); 33 WARN_ON(!atomic_read(&kref->refcount));
33 atomic_inc(&kref->refcount); 34 atomic_inc(&kref->refcount);
35 smp_mb__after_atomic_inc();
34} 36}
35 37
36/** 38/**
diff --git a/lib/string.c b/lib/string.c
index bab440fb0dfc..5efafed3d6b6 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -60,6 +60,34 @@ int strnicmp(const char *s1, const char *s2, size_t len)
60EXPORT_SYMBOL(strnicmp); 60EXPORT_SYMBOL(strnicmp);
61#endif 61#endif
62 62
63#ifndef __HAVE_ARCH_STRCASECMP
64int strcasecmp(const char *s1, const char *s2)
65{
66 int c1, c2;
67
68 do {
69 c1 = tolower(*s1++);
70 c2 = tolower(*s2++);
71 } while (c1 == c2 && c1 != 0);
72 return c1 - c2;
73}
74EXPORT_SYMBOL(strcasecmp);
75#endif
76
77#ifndef __HAVE_ARCH_STRNCASECMP
78int strncasecmp(const char *s1, const char *s2, size_t n)
79{
80 int c1, c2;
81
82 do {
83 c1 = tolower(*s1++);
84 c2 = tolower(*s2++);
85 } while ((--n > 0) && c1 == c2 && c1 != 0);
86 return c1 - c2;
87}
88EXPORT_SYMBOL(strncasecmp);
89#endif
90
63#ifndef __HAVE_ARCH_STRCPY 91#ifndef __HAVE_ARCH_STRCPY
64/** 92/**
65 * strcpy - Copy a %NUL terminated string 93 * strcpy - Copy a %NUL terminated string
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 623a68af8b18..9970e55c90bd 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -28,7 +28,6 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/scatterlist.h> 30#include <asm/scatterlist.h>
31#include <asm/swiotlb.h>
32 31
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/bootmem.h> 33#include <linux/bootmem.h>
@@ -36,10 +35,8 @@
36#define OFFSET(val,align) ((unsigned long) \ 35#define OFFSET(val,align) ((unsigned long) \
37 ( (val) & ( (align) - 1))) 36 ( (val) & ( (align) - 1)))
38 37
39#ifndef SG_ENT_VIRT_ADDRESS
40#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
41#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) 39#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
42#endif
43 40
44/* 41/*
45 * Maximum allowable number of contiguous slabs to map, 42 * Maximum allowable number of contiguous slabs to map,
@@ -104,25 +101,13 @@ static unsigned int io_tlb_index;
104 * We need to save away the original address corresponding to a mapped entry 101 * We need to save away the original address corresponding to a mapped entry
105 * for the sync operations. 102 * for the sync operations.
106 */ 103 */
107#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T 104static unsigned char **io_tlb_orig_addr;
108typedef char *io_tlb_addr_t;
109#define swiotlb_orig_addr_null(buffer) (!(buffer))
110#define ptr_to_io_tlb_addr(ptr) (ptr)
111#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
112#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
113#endif
114static io_tlb_addr_t *io_tlb_orig_addr;
115 105
116/* 106/*
117 * Protect the above data structures in the map and unmap calls 107 * Protect the above data structures in the map and unmap calls
118 */ 108 */
119static DEFINE_SPINLOCK(io_tlb_lock); 109static DEFINE_SPINLOCK(io_tlb_lock);
120 110
121#ifdef SWIOTLB_EXTRA_VARIABLES
122SWIOTLB_EXTRA_VARIABLES;
123#endif
124
125#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
126static int __init 111static int __init
127setup_io_tlb_npages(char *str) 112setup_io_tlb_npages(char *str)
128{ 113{
@@ -137,25 +122,9 @@ setup_io_tlb_npages(char *str)
137 swiotlb_force = 1; 122 swiotlb_force = 1;
138 return 1; 123 return 1;
139} 124}
140#endif
141__setup("swiotlb=", setup_io_tlb_npages); 125__setup("swiotlb=", setup_io_tlb_npages);
142/* make io_tlb_overflow tunable too? */ 126/* make io_tlb_overflow tunable too? */
143 127
144#ifndef swiotlb_adjust_size
145#define swiotlb_adjust_size(size) ((void)0)
146#endif
147
148#ifndef swiotlb_adjust_seg
149#define swiotlb_adjust_seg(start, size) ((void)0)
150#endif
151
152#ifndef swiotlb_print_info
153#define swiotlb_print_info(bytes) \
154 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
155 "0x%lx\n", bytes >> 20, \
156 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
157#endif
158
159/* 128/*
160 * Statically reserve bounce buffer space and initialize bounce buffer data 129 * Statically reserve bounce buffer space and initialize bounce buffer data
161 * structures for the software IO TLB used to implement the DMA API. 130 * structures for the software IO TLB used to implement the DMA API.
@@ -169,8 +138,6 @@ swiotlb_init_with_default_size(size_t default_size)
169 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
170 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
171 } 140 }
172 swiotlb_adjust_size(io_tlb_nslabs);
173 swiotlb_adjust_size(io_tlb_overflow);
174 141
175 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 142 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
176 143
@@ -188,14 +155,10 @@ swiotlb_init_with_default_size(size_t default_size)
188 * between io_tlb_start and io_tlb_end. 155 * between io_tlb_start and io_tlb_end.
189 */ 156 */
190 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 157 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
191 for (i = 0; i < io_tlb_nslabs; i++) { 158 for (i = 0; i < io_tlb_nslabs; i++)
192 if ( !(i % IO_TLB_SEGSIZE) )
193 swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
194 IO_TLB_SEGSIZE << IO_TLB_SHIFT);
195 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 159 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
196 }
197 io_tlb_index = 0; 160 io_tlb_index = 0;
198 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t)); 161 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
199 162
200 /* 163 /*
201 * Get the overflow emergency buffer 164 * Get the overflow emergency buffer
@@ -203,21 +166,17 @@ swiotlb_init_with_default_size(size_t default_size)
203 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 166 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
204 if (!io_tlb_overflow_buffer) 167 if (!io_tlb_overflow_buffer)
205 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 168 panic("Cannot allocate SWIOTLB overflow buffer!\n");
206 swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
207 169
208 swiotlb_print_info(bytes); 170 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
171 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
209} 172}
210#ifndef __swiotlb_init_with_default_size
211#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
212#endif
213 173
214void __init 174void __init
215swiotlb_init(void) 175swiotlb_init(void)
216{ 176{
217 __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 177 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
218} 178}
219 179
220#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
221/* 180/*
222 * Systems with larger DMA zones (those that don't support ISA) can 181 * Systems with larger DMA zones (those that don't support ISA) can
223 * initialize the swiotlb later using the slab allocator if needed. 182 * initialize the swiotlb later using the slab allocator if needed.
@@ -275,12 +234,12 @@ swiotlb_late_init_with_default_size(size_t default_size)
275 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 234 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
276 io_tlb_index = 0; 235 io_tlb_index = 0;
277 236
278 io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL, 237 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
279 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); 238 get_order(io_tlb_nslabs * sizeof(char *)));
280 if (!io_tlb_orig_addr) 239 if (!io_tlb_orig_addr)
281 goto cleanup3; 240 goto cleanup3;
282 241
283 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t)); 242 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
284 243
285 /* 244 /*
286 * Get the overflow emergency buffer 245 * Get the overflow emergency buffer
@@ -290,17 +249,19 @@ swiotlb_late_init_with_default_size(size_t default_size)
290 if (!io_tlb_overflow_buffer) 249 if (!io_tlb_overflow_buffer)
291 goto cleanup4; 250 goto cleanup4;
292 251
293 swiotlb_print_info(bytes); 252 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
253 "0x%lx\n", bytes >> 20,
254 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
294 255
295 return 0; 256 return 0;
296 257
297cleanup4: 258cleanup4:
298 free_pages((unsigned long)io_tlb_orig_addr, 259 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
299 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); 260 sizeof(char *)));
300 io_tlb_orig_addr = NULL; 261 io_tlb_orig_addr = NULL;
301cleanup3: 262cleanup3:
302 free_pages((unsigned long)io_tlb_list, 263 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
303 get_order(io_tlb_nslabs * sizeof(int))); 264 sizeof(int)));
304 io_tlb_list = NULL; 265 io_tlb_list = NULL;
305cleanup2: 266cleanup2:
306 io_tlb_end = NULL; 267 io_tlb_end = NULL;
@@ -310,9 +271,7 @@ cleanup1:
310 io_tlb_nslabs = req_nslabs; 271 io_tlb_nslabs = req_nslabs;
311 return -ENOMEM; 272 return -ENOMEM;
312} 273}
313#endif
314 274
315#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
316static int 275static int
317address_needs_mapping(struct device *hwdev, dma_addr_t addr) 276address_needs_mapping(struct device *hwdev, dma_addr_t addr)
318{ 277{
@@ -323,35 +282,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
323 return (addr & ~mask) != 0; 282 return (addr & ~mask) != 0;
324} 283}
325 284
326static inline int range_needs_mapping(const void *ptr, size_t size)
327{
328 return swiotlb_force;
329}
330
331static inline int order_needs_mapping(unsigned int order)
332{
333 return 0;
334}
335#endif
336
337static void
338__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
339{
340#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
341 if (dir == DMA_TO_DEVICE)
342 memcpy(dma_addr, buffer, size);
343 else
344 memcpy(buffer, dma_addr, size);
345#else
346 __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
347#endif
348}
349
350/* 285/*
351 * Allocates bounce buffer and returns its kernel virtual address. 286 * Allocates bounce buffer and returns its kernel virtual address.
352 */ 287 */
353static void * 288static void *
354map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir) 289map_single(struct device *hwdev, char *buffer, size_t size, int dir)
355{ 290{
356 unsigned long flags; 291 unsigned long flags;
357 char *dma_addr; 292 char *dma_addr;
@@ -424,7 +359,7 @@ map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
424 */ 359 */
425 io_tlb_orig_addr[index] = buffer; 360 io_tlb_orig_addr[index] = buffer;
426 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 361 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
427 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 362 memcpy(dma_addr, buffer, size);
428 363
429 return dma_addr; 364 return dma_addr;
430} 365}
@@ -438,18 +373,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
438 unsigned long flags; 373 unsigned long flags;
439 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 374 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 375 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
441 io_tlb_addr_t buffer = io_tlb_orig_addr[index]; 376 char *buffer = io_tlb_orig_addr[index];
442 377
443 /* 378 /*
444 * First, sync the memory before unmapping the entry 379 * First, sync the memory before unmapping the entry
445 */ 380 */
446 if (!swiotlb_orig_addr_null(buffer) 381 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
447 && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
448 /* 382 /*
449 * bounce... copy the data back into the original buffer * and 383 * bounce... copy the data back into the original buffer * and
450 * delete the bounce buffer. 384 * delete the bounce buffer.
451 */ 385 */
452 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); 386 memcpy(buffer, dma_addr, size);
453 387
454 /* 388 /*
455 * Return the buffer to the free list by setting the corresponding 389 * Return the buffer to the free list by setting the corresponding
@@ -482,18 +416,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
482 int dir, int target) 416 int dir, int target)
483{ 417{
484 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 418 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
485 io_tlb_addr_t buffer = io_tlb_orig_addr[index]; 419 char *buffer = io_tlb_orig_addr[index];
486 420
487 switch (target) { 421 switch (target) {
488 case SYNC_FOR_CPU: 422 case SYNC_FOR_CPU:
489 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 423 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
490 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); 424 memcpy(buffer, dma_addr, size);
491 else 425 else
492 BUG_ON(dir != DMA_TO_DEVICE); 426 BUG_ON(dir != DMA_TO_DEVICE);
493 break; 427 break;
494 case SYNC_FOR_DEVICE: 428 case SYNC_FOR_DEVICE:
495 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 429 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
496 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 430 memcpy(dma_addr, buffer, size);
497 else 431 else
498 BUG_ON(dir != DMA_FROM_DEVICE); 432 BUG_ON(dir != DMA_FROM_DEVICE);
499 break; 433 break;
@@ -502,8 +436,6 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
502 } 436 }
503} 437}
504 438
505#ifdef SWIOTLB_ARCH_NEED_ALLOC
506
507void * 439void *
508swiotlb_alloc_coherent(struct device *hwdev, size_t size, 440swiotlb_alloc_coherent(struct device *hwdev, size_t size,
509 dma_addr_t *dma_handle, gfp_t flags) 441 dma_addr_t *dma_handle, gfp_t flags)
@@ -519,10 +451,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
519 */ 451 */
520 flags |= GFP_DMA; 452 flags |= GFP_DMA;
521 453
522 if (!order_needs_mapping(order)) 454 ret = (void *)__get_free_pages(flags, order);
523 ret = (void *)__get_free_pages(flags, order);
524 else
525 ret = NULL;
526 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { 455 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
527 /* 456 /*
528 * The allocated memory isn't reachable by the device. 457 * The allocated memory isn't reachable by the device.
@@ -560,7 +489,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
560 *dma_handle = dev_addr; 489 *dma_handle = dev_addr;
561 return ret; 490 return ret;
562} 491}
563EXPORT_SYMBOL(swiotlb_alloc_coherent);
564 492
565void 493void
566swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 494swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -573,9 +501,6 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
573 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 501 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
574 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 502 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
575} 503}
576EXPORT_SYMBOL(swiotlb_free_coherent);
577
578#endif
579 504
580static void 505static void
581swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 506swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -617,14 +542,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
617 * we can safely return the device addr and not worry about bounce 542 * we can safely return the device addr and not worry about bounce
618 * buffering it. 543 * buffering it.
619 */ 544 */
620 if (!range_needs_mapping(ptr, size) 545 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
621 && !address_needs_mapping(hwdev, dev_addr))
622 return dev_addr; 546 return dev_addr;
623 547
624 /* 548 /*
625 * Oh well, have to allocate and map a bounce buffer. 549 * Oh well, have to allocate and map a bounce buffer.
626 */ 550 */
627 map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir); 551 map = map_single(hwdev, ptr, size, dir);
628 if (!map) { 552 if (!map) {
629 swiotlb_full(hwdev, size, dir, 1); 553 swiotlb_full(hwdev, size, dir, 1);
630 map = io_tlb_overflow_buffer; 554 map = io_tlb_overflow_buffer;
@@ -752,16 +676,17 @@ int
752swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, 676swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
753 int dir) 677 int dir)
754{ 678{
679 void *addr;
755 dma_addr_t dev_addr; 680 dma_addr_t dev_addr;
756 int i; 681 int i;
757 682
758 BUG_ON(dir == DMA_NONE); 683 BUG_ON(dir == DMA_NONE);
759 684
760 for (i = 0; i < nelems; i++, sg++) { 685 for (i = 0; i < nelems; i++, sg++) {
761 dev_addr = SG_ENT_PHYS_ADDRESS(sg); 686 addr = SG_ENT_VIRT_ADDRESS(sg);
762 if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length) 687 dev_addr = virt_to_bus(addr);
763 || address_needs_mapping(hwdev, dev_addr)) { 688 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
764 void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir); 689 void *map = map_single(hwdev, addr, sg->length, dir);
765 if (!map) { 690 if (!map) {
766 /* Don't panic here, we expect map_sg users 691 /* Don't panic here, we expect map_sg users
767 to do proper error handling. */ 692 to do proper error handling. */
@@ -835,44 +760,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
835 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 760 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
836} 761}
837 762
838#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
839
840dma_addr_t
841swiotlb_map_page(struct device *hwdev, struct page *page,
842 unsigned long offset, size_t size,
843 enum dma_data_direction direction)
844{
845 dma_addr_t dev_addr;
846 char *map;
847
848 dev_addr = page_to_bus(page) + offset;
849 if (address_needs_mapping(hwdev, dev_addr)) {
850 map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
851 if (!map) {
852 swiotlb_full(hwdev, size, direction, 1);
853 map = io_tlb_overflow_buffer;
854 }
855 dev_addr = virt_to_bus(map);
856 }
857
858 return dev_addr;
859}
860
861void
862swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
863 size_t size, enum dma_data_direction direction)
864{
865 char *dma_addr = bus_to_virt(dev_addr);
866
867 BUG_ON(direction == DMA_NONE);
868 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
869 unmap_single(hwdev, dma_addr, size, direction);
870 else if (direction == DMA_FROM_DEVICE)
871 dma_mark_clean(dma_addr, size);
872}
873
874#endif
875
876int 763int
877swiotlb_dma_mapping_error(dma_addr_t dma_addr) 764swiotlb_dma_mapping_error(dma_addr_t dma_addr)
878{ 765{
@@ -885,13 +772,10 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
885 * during bus mastering, then you would pass 0x00ffffff as the mask to 772 * during bus mastering, then you would pass 0x00ffffff as the mask to
886 * this function. 773 * this function.
887 */ 774 */
888#ifndef __swiotlb_dma_supported
889#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
890#endif
891int 775int
892swiotlb_dma_supported(struct device *hwdev, u64 mask) 776swiotlb_dma_supported(struct device *hwdev, u64 mask)
893{ 777{
894 return __swiotlb_dma_supported(hwdev, mask); 778 return virt_to_bus(io_tlb_end - 1) <= mask;
895} 779}
896 780
897EXPORT_SYMBOL(swiotlb_init); 781EXPORT_SYMBOL(swiotlb_init);
@@ -906,4 +790,6 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
906EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 790EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
907EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 791EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
908EXPORT_SYMBOL(swiotlb_dma_mapping_error); 792EXPORT_SYMBOL(swiotlb_dma_mapping_error);
793EXPORT_SYMBOL(swiotlb_alloc_coherent);
794EXPORT_SYMBOL(swiotlb_free_coherent);
909EXPORT_SYMBOL(swiotlb_dma_supported); 795EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 9e2a002c5b54..88c98a2ec8d9 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -40,7 +40,7 @@
40 * configuration according to the specified parameters. 40 * configuration according to the specified parameters.
41 * (3) User starts the search(es) by calling _find() or _next() to 41 * (3) User starts the search(es) by calling _find() or _next() to
42 * fetch subsequent occurrences. A state variable is provided 42 * fetch subsequent occurrences. A state variable is provided
43 * to the algorihtm to store persistent variables. 43 * to the algorithm to store persistent variables.
44 * (4) Core eventually resets the search offset and forwards the find() 44 * (4) Core eventually resets the search offset and forwards the find()
45 * request to the algorithm. 45 * request to the algorithm.
46 * (5) Algorithm calls get_next_block() provided by the user continously 46 * (5) Algorithm calls get_next_block() provided by the user continously
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b025864d2e43..cbab1df150cf 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -851,23 +851,35 @@ EXPORT_SYMBOL(sscanf);
851 851
852 852
853/* Simplified asprintf. */ 853/* Simplified asprintf. */
854char *kasprintf(gfp_t gfp, const char *fmt, ...) 854char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
855{ 855{
856 va_list ap;
857 unsigned int len; 856 unsigned int len;
858 char *p; 857 char *p;
858 va_list aq;
859 859
860 va_start(ap, fmt); 860 va_copy(aq, ap);
861 len = vsnprintf(NULL, 0, fmt, ap); 861 len = vsnprintf(NULL, 0, fmt, aq);
862 va_end(ap); 862 va_end(aq);
863 863
864 p = kmalloc(len+1, gfp); 864 p = kmalloc(len+1, gfp);
865 if (!p) 865 if (!p)
866 return NULL; 866 return NULL;
867 va_start(ap, fmt); 867
868 vsnprintf(p, len+1, fmt, ap); 868 vsnprintf(p, len+1, fmt, ap);
869 va_end(ap); 869
870 return p; 870 return p;
871} 871}
872EXPORT_SYMBOL(kvasprintf);
873
874char *kasprintf(gfp_t gfp, const char *fmt, ...)
875{
876 va_list ap;
877 char *p;
872 878
879 va_start(ap, fmt);
880 p = kvasprintf(gfp, fmt, ap);
881 va_end(ap);
882
883 return p;
884}
873EXPORT_SYMBOL(kasprintf); 885EXPORT_SYMBOL(kasprintf);