aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2010-10-24 12:57:05 -0400
committerPekka Enberg <penberg@kernel.org>2010-10-24 12:57:05 -0400
commit6d4121f6c20a0e86231d52f535f1c82423b3326f (patch)
tree5c235cac699ca86b504850aa663ddadde0455a61 /lib
parent92a5bbc11ff2442a54b2f1d313088c245828ef4e (diff)
parent35da7a307c535f9c2929cae277f3df425c9f9b1e (diff)
Merge branch 'master' into for-linus
Conflicts: include/linux/percpu.h mm/percpu.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug58
-rw-r--r--lib/bug.c6
-rw-r--r--lib/dma-debug.c1
-rw-r--r--lib/dynamic_debug.c140
-rw-r--r--lib/kobject.c39
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/swiotlb.c18
8 files changed, 183 insertions, 83 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b6263651a955..69a32664c289 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -461,6 +461,15 @@ config DEBUG_MUTEXES
461 This feature allows mutex semantics violations to be detected and 461 This feature allows mutex semantics violations to be detected and
462 reported. 462 reported.
463 463
464config BKL
465 bool "Big Kernel Lock" if (SMP || PREEMPT)
466 default y
467 help
468 This is the traditional lock that is used in old code instead
469 of proper locking. All drivers that use the BKL should depend
470 on this symbol.
471 Say Y here unless you are working on removing the BKL.
472
464config DEBUG_LOCK_ALLOC 473config DEBUG_LOCK_ALLOC
465 bool "Lock debugging: detect incorrect freeing of live locks" 474 bool "Lock debugging: detect incorrect freeing of live locks"
466 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 475 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -482,6 +491,7 @@ config PROVE_LOCKING
482 select DEBUG_SPINLOCK 491 select DEBUG_SPINLOCK
483 select DEBUG_MUTEXES 492 select DEBUG_MUTEXES
484 select DEBUG_LOCK_ALLOC 493 select DEBUG_LOCK_ALLOC
494 select TRACE_IRQFLAGS
485 default n 495 default n
486 help 496 help
487 This feature enables the kernel to prove that all locking 497 This feature enables the kernel to prove that all locking
@@ -539,6 +549,23 @@ config PROVE_RCU_REPEATEDLY
539 disabling, allowing multiple RCU-lockdep warnings to be printed 549 disabling, allowing multiple RCU-lockdep warnings to be printed
540 on a single reboot. 550 on a single reboot.
541 551
552 Say Y to allow multiple RCU-lockdep warnings per boot.
553
554 Say N if you are unsure.
555
556config SPARSE_RCU_POINTER
557 bool "RCU debugging: sparse-based checks for pointer usage"
558 default n
559 help
560 This feature enables the __rcu sparse annotation for
561 RCU-protected pointers. This annotation will cause sparse
562 to flag any non-RCU used of annotated pointers. This can be
563 helpful when debugging RCU usage. Please note that this feature
564 is not intended to enforce code cleanliness; it is instead merely
565 a debugging aid.
566
567 Say Y to make sparse flag questionable use of RCU-protected pointers
568
542 Say N if you are unsure. 569 Say N if you are unsure.
543 570
544config LOCKDEP 571config LOCKDEP
@@ -579,11 +606,10 @@ config DEBUG_LOCKDEP
579 of more runtime overhead. 606 of more runtime overhead.
580 607
581config TRACE_IRQFLAGS 608config TRACE_IRQFLAGS
582 depends on DEBUG_KERNEL
583 bool 609 bool
584 default y 610 help
585 depends on TRACE_IRQFLAGS_SUPPORT 611 Enables hooks to interrupt enabling and disabling for
586 depends on PROVE_LOCKING 612 either tracing or lock debugging.
587 613
588config DEBUG_SPINLOCK_SLEEP 614config DEBUG_SPINLOCK_SLEEP
589 bool "Spinlock debugging: sleep-inside-spinlock checking" 615 bool "Spinlock debugging: sleep-inside-spinlock checking"
@@ -832,6 +858,30 @@ config RCU_CPU_STALL_DETECTOR
832 858
833 Say Y if you are unsure. 859 Say Y if you are unsure.
834 860
861config RCU_CPU_STALL_TIMEOUT
862 int "RCU CPU stall timeout in seconds"
863 depends on RCU_CPU_STALL_DETECTOR
864 range 3 300
865 default 60
866 help
867 If a given RCU grace period extends more than the specified
868 number of seconds, a CPU stall warning is printed. If the
869 RCU grace period persists, additional CPU stall warnings are
870 printed at more widely spaced intervals.
871
872config RCU_CPU_STALL_DETECTOR_RUNNABLE
873 bool "RCU CPU stall checking starts automatically at boot"
874 depends on RCU_CPU_STALL_DETECTOR
875 default y
876 help
877 If set, start checking for RCU CPU stalls immediately on
878 boot. Otherwise, RCU CPU stall checking must be manually
879 enabled.
880
881 Say Y if you are unsure.
882
883 Say N if you wish to suppress RCU CPU stall checking during boot.
884
835config RCU_CPU_STALL_VERBOSE 885config RCU_CPU_STALL_VERBOSE
836 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" 886 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
837 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU 887 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/bug.c b/lib/bug.c
index 7cdfad88128f..19552096d16b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
72 return NULL; 72 return NULL;
73} 73}
74 74
75int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 75void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
76 struct module *mod) 76 struct module *mod)
77{ 77{
78 char *secstrings; 78 char *secstrings;
79 unsigned int i; 79 unsigned int i;
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
97 * could potentially lead to deadlock and thus be counter-productive. 97 * could potentially lead to deadlock and thus be counter-productive.
98 */ 98 */
99 list_add(&mod->bug_list, &module_bug_list); 99 list_add(&mod->bug_list, &module_bug_list);
100
101 return 0;
102} 100}
103 101
104void module_bug_cleanup(struct module *mod) 102void module_bug_cleanup(struct module *mod)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 01e64270e246..4bfb0471f106 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -590,6 +590,7 @@ out_unlock:
590static const struct file_operations filter_fops = { 590static const struct file_operations filter_fops = {
591 .read = filter_read, 591 .read = filter_read,
592 .write = filter_write, 592 .write = filter_write,
593 .llseek = default_llseek,
593}; 594};
594 595
595static int dma_debug_fs_init(void) 596static int dma_debug_fs_init(void)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 02afc2533728..3094318bfea7 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -26,19 +26,11 @@
26#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/jump_label.h>
29 30
30extern struct _ddebug __start___verbose[]; 31extern struct _ddebug __start___verbose[];
31extern struct _ddebug __stop___verbose[]; 32extern struct _ddebug __stop___verbose[];
32 33
33/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
34 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
35 * use independent hash functions, to reduce the chance of false positives.
36 */
37long long dynamic_debug_enabled;
38EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
39long long dynamic_debug_enabled2;
40EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
41
42struct ddebug_table { 34struct ddebug_table {
43 struct list_head link; 35 struct list_head link;
44 char *mod_name; 36 char *mod_name;
@@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
88} 80}
89 81
90/* 82/*
91 * must be called with ddebug_lock held
92 */
93
94static int disabled_hash(char hash, bool first_table)
95{
96 struct ddebug_table *dt;
97 char table_hash_value;
98
99 list_for_each_entry(dt, &ddebug_tables, link) {
100 if (first_table)
101 table_hash_value = dt->ddebugs->primary_hash;
102 else
103 table_hash_value = dt->ddebugs->secondary_hash;
104 if (dt->num_enabled && (hash == table_hash_value))
105 return 0;
106 }
107 return 1;
108}
109
110/*
111 * Search the tables for _ddebug's which match the given 83 * Search the tables for _ddebug's which match the given
112 * `query' and apply the `flags' and `mask' to them. Tells 84 * `query' and apply the `flags' and `mask' to them. Tells
113 * the user which ddebug's were changed, or whether none 85 * the user which ddebug's were changed, or whether none
@@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
170 dt->num_enabled++; 142 dt->num_enabled++;
171 dp->flags = newflags; 143 dp->flags = newflags;
172 if (newflags) { 144 if (newflags) {
173 dynamic_debug_enabled |= 145 jump_label_enable(&dp->enabled);
174 (1LL << dp->primary_hash);
175 dynamic_debug_enabled2 |=
176 (1LL << dp->secondary_hash);
177 } else { 146 } else {
178 if (disabled_hash(dp->primary_hash, true)) 147 jump_label_disable(&dp->enabled);
179 dynamic_debug_enabled &=
180 ~(1LL << dp->primary_hash);
181 if (disabled_hash(dp->secondary_hash, false))
182 dynamic_debug_enabled2 &=
183 ~(1LL << dp->secondary_hash);
184 } 148 }
185 if (verbose) 149 if (verbose)
186 printk(KERN_INFO 150 printk(KERN_INFO
@@ -429,6 +393,40 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
429 return 0; 393 return 0;
430} 394}
431 395
396static int ddebug_exec_query(char *query_string)
397{
398 unsigned int flags = 0, mask = 0;
399 struct ddebug_query query;
400#define MAXWORDS 9
401 int nwords;
402 char *words[MAXWORDS];
403
404 nwords = ddebug_tokenize(query_string, words, MAXWORDS);
405 if (nwords <= 0)
406 return -EINVAL;
407 if (ddebug_parse_query(words, nwords-1, &query))
408 return -EINVAL;
409 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
410 return -EINVAL;
411
412 /* actually go and implement the change */
413 ddebug_change(&query, flags, mask);
414 return 0;
415}
416
417static __initdata char ddebug_setup_string[1024];
418static __init int ddebug_setup_query(char *str)
419{
420 if (strlen(str) >= 1024) {
421 pr_warning("ddebug boot param string too large\n");
422 return 0;
423 }
424 strcpy(ddebug_setup_string, str);
425 return 1;
426}
427
428__setup("ddebug_query=", ddebug_setup_query);
429
432/* 430/*
433 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the 431 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
434 * command text from userspace, parses and executes it. 432 * command text from userspace, parses and executes it.
@@ -436,12 +434,8 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
436static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, 434static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
437 size_t len, loff_t *offp) 435 size_t len, loff_t *offp)
438{ 436{
439 unsigned int flags = 0, mask = 0;
440 struct ddebug_query query;
441#define MAXWORDS 9
442 int nwords;
443 char *words[MAXWORDS];
444 char tmpbuf[256]; 437 char tmpbuf[256];
438 int ret;
445 439
446 if (len == 0) 440 if (len == 0)
447 return 0; 441 return 0;
@@ -455,16 +449,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
455 printk(KERN_INFO "%s: read %d bytes from userspace\n", 449 printk(KERN_INFO "%s: read %d bytes from userspace\n",
456 __func__, (int)len); 450 __func__, (int)len);
457 451
458 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); 452 ret = ddebug_exec_query(tmpbuf);
459 if (nwords <= 0) 453 if (ret)
460 return -EINVAL; 454 return ret;
461 if (ddebug_parse_query(words, nwords-1, &query))
462 return -EINVAL;
463 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
464 return -EINVAL;
465
466 /* actually go and implement the change */
467 ddebug_change(&query, flags, mask);
468 455
469 *offp += len; 456 *offp += len;
470 return len; 457 return len;
@@ -725,13 +712,14 @@ static void ddebug_remove_all_tables(void)
725 mutex_unlock(&ddebug_lock); 712 mutex_unlock(&ddebug_lock);
726} 713}
727 714
728static int __init dynamic_debug_init(void) 715static __initdata int ddebug_init_success;
716
717static int __init dynamic_debug_init_debugfs(void)
729{ 718{
730 struct dentry *dir, *file; 719 struct dentry *dir, *file;
731 struct _ddebug *iter, *iter_start; 720
732 const char *modname = NULL; 721 if (!ddebug_init_success)
733 int ret = 0; 722 return -ENODEV;
734 int n = 0;
735 723
736 dir = debugfs_create_dir("dynamic_debug", NULL); 724 dir = debugfs_create_dir("dynamic_debug", NULL);
737 if (!dir) 725 if (!dir)
@@ -742,6 +730,16 @@ static int __init dynamic_debug_init(void)
742 debugfs_remove(dir); 730 debugfs_remove(dir);
743 return -ENOMEM; 731 return -ENOMEM;
744 } 732 }
733 return 0;
734}
735
736static int __init dynamic_debug_init(void)
737{
738 struct _ddebug *iter, *iter_start;
739 const char *modname = NULL;
740 int ret = 0;
741 int n = 0;
742
745 if (__start___verbose != __stop___verbose) { 743 if (__start___verbose != __stop___verbose) {
746 iter = __start___verbose; 744 iter = __start___verbose;
747 modname = iter->modname; 745 modname = iter->modname;
@@ -759,12 +757,26 @@ static int __init dynamic_debug_init(void)
759 } 757 }
760 ret = ddebug_add_module(iter_start, n, modname); 758 ret = ddebug_add_module(iter_start, n, modname);
761 } 759 }
760
761 /* ddebug_query boot param got passed -> set it up */
762 if (ddebug_setup_string[0] != '\0') {
763 ret = ddebug_exec_query(ddebug_setup_string);
764 if (ret)
765 pr_warning("Invalid ddebug boot param %s",
766 ddebug_setup_string);
767 else
768 pr_info("ddebug initialized with string %s",
769 ddebug_setup_string);
770 }
771
762out_free: 772out_free:
763 if (ret) { 773 if (ret)
764 ddebug_remove_all_tables(); 774 ddebug_remove_all_tables();
765 debugfs_remove(dir); 775 else
766 debugfs_remove(file); 776 ddebug_init_success = 1;
767 }
768 return 0; 777 return 0;
769} 778}
770module_init(dynamic_debug_init); 779/* Allow early initialization for boot messages via boot param */
780arch_initcall(dynamic_debug_init);
781/* Debugfs setup must be done later */
782module_init(dynamic_debug_init_debugfs);
diff --git a/lib/kobject.c b/lib/kobject.c
index f07c57252e82..82dc34c095c2 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -746,17 +746,56 @@ void kset_unregister(struct kset *k)
746 */ 746 */
747struct kobject *kset_find_obj(struct kset *kset, const char *name) 747struct kobject *kset_find_obj(struct kset *kset, const char *name)
748{ 748{
749 return kset_find_obj_hinted(kset, name, NULL);
750}
751
752/**
753 * kset_find_obj_hinted - search for object in kset given a predecessor hint.
754 * @kset: kset we're looking in.
755 * @name: object's name.
756 * @hint: hint to possible object's predecessor.
757 *
758 * Check the hint's next object and if it is a match return it directly,
759 * otherwise, fall back to the behavior of kset_find_obj(). Either way
760 * a reference for the returned object is held and the reference on the
761 * hinted object is released.
762 */
763struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
764 struct kobject *hint)
765{
749 struct kobject *k; 766 struct kobject *k;
750 struct kobject *ret = NULL; 767 struct kobject *ret = NULL;
751 768
752 spin_lock(&kset->list_lock); 769 spin_lock(&kset->list_lock);
770
771 if (!hint)
772 goto slow_search;
773
774 /* end of list detection */
775 if (hint->entry.next == kset->list.next)
776 goto slow_search;
777
778 k = container_of(hint->entry.next, struct kobject, entry);
779 if (!kobject_name(k) || strcmp(kobject_name(k), name))
780 goto slow_search;
781
782 ret = kobject_get(k);
783 goto unlock_exit;
784
785slow_search:
753 list_for_each_entry(k, &kset->list, entry) { 786 list_for_each_entry(k, &kset->list, entry) {
754 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 787 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
755 ret = kobject_get(k); 788 ret = kobject_get(k);
756 break; 789 break;
757 } 790 }
758 } 791 }
792
793unlock_exit:
759 spin_unlock(&kset->list_lock); 794 spin_unlock(&kset->list_lock);
795
796 if (hint)
797 kobject_put(hint);
798
760 return ret; 799 return ret;
761} 800}
762 801
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 4b5cb794c38b..a7616fa3162e 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv,
70 * element comparison is needed, so the client's cmp() 70 * element comparison is needed, so the client's cmp()
71 * routine can invoke cond_resched() periodically. 71 * routine can invoke cond_resched() periodically.
72 */ 72 */
73 (*cmp)(priv, tail, tail); 73 (*cmp)(priv, tail->next, tail->next);
74 74
75 tail->next->prev = tail; 75 tail->next->prev = tail;
76 tail = tail->next; 76 tail = tail->next;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index efd16fa80b1c..6f412ab4c24f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@ struct radix_tree_node {
49 unsigned int height; /* Height from the bottom */ 49 unsigned int height; /* Height from the bottom */
50 unsigned int count; 50 unsigned int count;
51 struct rcu_head rcu_head; 51 struct rcu_head rcu_head;
52 void *slots[RADIX_TREE_MAP_SIZE]; 52 void __rcu *slots[RADIX_TREE_MAP_SIZE];
53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; 53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
54}; 54};
55 55
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 34e3082632d8..7c06ee51a29a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
70 */ 70 */
71static unsigned long io_tlb_overflow = 32*1024; 71static unsigned long io_tlb_overflow = 32*1024;
72 72
73void *io_tlb_overflow_buffer; 73static void *io_tlb_overflow_buffer;
74 74
75/* 75/*
76 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
148 * between io_tlb_start and io_tlb_end. 148 * between io_tlb_start and io_tlb_end.
149 */ 149 */
150 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 150 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
151 for (i = 0; i < io_tlb_nslabs; i++) 151 for (i = 0; i < io_tlb_nslabs; i++)
152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
153 io_tlb_index = 0; 153 io_tlb_index = 0;
154 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 154 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
155 155
156 /* 156 /*
157 * Get the overflow emergency buffer 157 * Get the overflow emergency buffer
158 */ 158 */
159 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
160 if (!io_tlb_overflow_buffer) 160 if (!io_tlb_overflow_buffer)
161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 161 panic("Cannot allocate SWIOTLB overflow buffer!\n");
162 if (verbose) 162 if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
182 /* 182 /*
183 * Get IO TLB memory from the low pages 183 * Get IO TLB memory from the low pages
184 */ 184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes); 185 io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
186 if (!io_tlb_start) 186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer"); 187 panic("Cannot allocate SWIOTLB buffer");
188 188
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
309 } else { 309 } else {
310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 310 free_bootmem_late(__pa(io_tlb_overflow_buffer),
311 io_tlb_overflow); 311 PAGE_ALIGN(io_tlb_overflow));
312 free_bootmem_late(__pa(io_tlb_orig_addr), 312 free_bootmem_late(__pa(io_tlb_orig_addr),
313 io_tlb_nslabs * sizeof(phys_addr_t)); 313 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
314 free_bootmem_late(__pa(io_tlb_list), 314 free_bootmem_late(__pa(io_tlb_list),
315 io_tlb_nslabs * sizeof(int)); 315 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
316 free_bootmem_late(__pa(io_tlb_start), 316 free_bootmem_late(__pa(io_tlb_start),
317 io_tlb_nslabs << IO_TLB_SHIFT); 317 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
318 } 318 }
319} 319}
320 320