diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 122 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 3 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bug.c | 3 | ||||
-rw-r--r-- | lib/checksum.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 21 | ||||
-rw-r--r-- | lib/decompress_unlzo.c | 2 | ||||
-rw-r--r-- | lib/devres.c | 60 | ||||
-rw-r--r-- | lib/digsig.c | 41 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 165 | ||||
-rw-r--r-- | lib/hexdump.c | 4 | ||||
-rw-r--r-- | lib/idr.c | 446 | ||||
-rw-r--r-- | lib/kfifo.c | 607 | ||||
-rw-r--r-- | lib/locking-selftest.c | 34 | ||||
-rw-r--r-- | lib/lru_cache.c | 3 | ||||
-rw-r--r-- | lib/lzo/Makefile | 2 | ||||
-rw-r--r-- | lib/lzo/lzo1x_compress.c | 335 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress.c | 255 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress_safe.c | 237 | ||||
-rw-r--r-- | lib/lzo/lzodefs.h | 38 | ||||
-rw-r--r-- | lib/mpi/mpi-internal.h | 4 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 8 | ||||
-rw-r--r-- | lib/parser.c | 6 | ||||
-rw-r--r-- | lib/rwsem-spinlock.c | 69 | ||||
-rw-r--r-- | lib/rwsem.c | 75 | ||||
-rw-r--r-- | lib/scatterlist.c | 86 | ||||
-rw-r--r-- | lib/swiotlb.c | 47 | ||||
-rw-r--r-- | lib/vsprintf.c | 7 | ||||
-rw-r--r-- | lib/xz/Kconfig | 34 |
30 files changed, 1816 insertions, 904 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 75cdb77fa49d..3958dc4389f9 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -322,7 +322,7 @@ config CPUMASK_OFFSTACK | |||
322 | 322 | ||
323 | config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | 323 | config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS |
324 | bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS | 324 | bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS |
325 | depends on EXPERIMENTAL && BROKEN | 325 | depends on BROKEN |
326 | 326 | ||
327 | config CPU_RMAP | 327 | config CPU_RMAP |
328 | bool | 328 | bool |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 67604e599384..e4a7f808fa06 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -243,8 +243,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | |||
243 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 243 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
244 | 244 | ||
245 | config PANIC_ON_OOPS | 245 | config PANIC_ON_OOPS |
246 | bool "Panic on Oops" if EXPERT | 246 | bool "Panic on Oops" |
247 | default n | ||
248 | help | 247 | help |
249 | Say Y here to enable the kernel to panic when it oopses. This | 248 | Say Y here to enable the kernel to panic when it oopses. This |
250 | has the same effect as setting oops=panic on the kernel command | 249 | has the same effect as setting oops=panic on the kernel command |
@@ -455,7 +454,7 @@ config HAVE_DEBUG_KMEMLEAK | |||
455 | 454 | ||
456 | config DEBUG_KMEMLEAK | 455 | config DEBUG_KMEMLEAK |
457 | bool "Kernel memory leak detector" | 456 | bool "Kernel memory leak detector" |
458 | depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK | 457 | depends on DEBUG_KERNEL && HAVE_DEBUG_KMEMLEAK |
459 | select DEBUG_FS | 458 | select DEBUG_FS |
460 | select STACKTRACE if STACKTRACE_SUPPORT | 459 | select STACKTRACE if STACKTRACE_SUPPORT |
461 | select KALLSYMS | 460 | select KALLSYMS |
@@ -605,61 +604,6 @@ config PROVE_LOCKING | |||
605 | 604 | ||
606 | For more details, see Documentation/lockdep-design.txt. | 605 | For more details, see Documentation/lockdep-design.txt. |
607 | 606 | ||
608 | config PROVE_RCU | ||
609 | bool "RCU debugging: prove RCU correctness" | ||
610 | depends on PROVE_LOCKING | ||
611 | default n | ||
612 | help | ||
613 | This feature enables lockdep extensions that check for correct | ||
614 | use of RCU APIs. This is currently under development. Say Y | ||
615 | if you want to debug RCU usage or help work on the PROVE_RCU | ||
616 | feature. | ||
617 | |||
618 | Say N if you are unsure. | ||
619 | |||
620 | config PROVE_RCU_REPEATEDLY | ||
621 | bool "RCU debugging: don't disable PROVE_RCU on first splat" | ||
622 | depends on PROVE_RCU | ||
623 | default n | ||
624 | help | ||
625 | By itself, PROVE_RCU will disable checking upon issuing the | ||
626 | first warning (or "splat"). This feature prevents such | ||
627 | disabling, allowing multiple RCU-lockdep warnings to be printed | ||
628 | on a single reboot. | ||
629 | |||
630 | Say Y to allow multiple RCU-lockdep warnings per boot. | ||
631 | |||
632 | Say N if you are unsure. | ||
633 | |||
634 | config PROVE_RCU_DELAY | ||
635 | bool "RCU debugging: preemptible RCU race provocation" | ||
636 | depends on DEBUG_KERNEL && PREEMPT_RCU | ||
637 | default n | ||
638 | help | ||
639 | There is a class of races that involve an unlikely preemption | ||
640 | of __rcu_read_unlock() just after ->rcu_read_lock_nesting has | ||
641 | been set to INT_MIN. This feature inserts a delay at that | ||
642 | point to increase the probability of these races. | ||
643 | |||
644 | Say Y to increase probability of preemption of __rcu_read_unlock(). | ||
645 | |||
646 | Say N if you are unsure. | ||
647 | |||
648 | config SPARSE_RCU_POINTER | ||
649 | bool "RCU debugging: sparse-based checks for pointer usage" | ||
650 | default n | ||
651 | help | ||
652 | This feature enables the __rcu sparse annotation for | ||
653 | RCU-protected pointers. This annotation will cause sparse | ||
654 | to flag any non-RCU used of annotated pointers. This can be | ||
655 | helpful when debugging RCU usage. Please note that this feature | ||
656 | is not intended to enforce code cleanliness; it is instead merely | ||
657 | a debugging aid. | ||
658 | |||
659 | Say Y to make sparse flag questionable use of RCU-protected pointers | ||
660 | |||
661 | Say N if you are unsure. | ||
662 | |||
663 | config LOCKDEP | 607 | config LOCKDEP |
664 | bool | 608 | bool |
665 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 609 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
@@ -937,6 +881,63 @@ config BOOT_PRINTK_DELAY | |||
937 | BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect | 881 | BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect |
938 | what it believes to be lockup conditions. | 882 | what it believes to be lockup conditions. |
939 | 883 | ||
884 | menu "RCU Debugging" | ||
885 | |||
886 | config PROVE_RCU | ||
887 | bool "RCU debugging: prove RCU correctness" | ||
888 | depends on PROVE_LOCKING | ||
889 | default n | ||
890 | help | ||
891 | This feature enables lockdep extensions that check for correct | ||
892 | use of RCU APIs. This is currently under development. Say Y | ||
893 | if you want to debug RCU usage or help work on the PROVE_RCU | ||
894 | feature. | ||
895 | |||
896 | Say N if you are unsure. | ||
897 | |||
898 | config PROVE_RCU_REPEATEDLY | ||
899 | bool "RCU debugging: don't disable PROVE_RCU on first splat" | ||
900 | depends on PROVE_RCU | ||
901 | default n | ||
902 | help | ||
903 | By itself, PROVE_RCU will disable checking upon issuing the | ||
904 | first warning (or "splat"). This feature prevents such | ||
905 | disabling, allowing multiple RCU-lockdep warnings to be printed | ||
906 | on a single reboot. | ||
907 | |||
908 | Say Y to allow multiple RCU-lockdep warnings per boot. | ||
909 | |||
910 | Say N if you are unsure. | ||
911 | |||
912 | config PROVE_RCU_DELAY | ||
913 | bool "RCU debugging: preemptible RCU race provocation" | ||
914 | depends on DEBUG_KERNEL && PREEMPT_RCU | ||
915 | default n | ||
916 | help | ||
917 | There is a class of races that involve an unlikely preemption | ||
918 | of __rcu_read_unlock() just after ->rcu_read_lock_nesting has | ||
919 | been set to INT_MIN. This feature inserts a delay at that | ||
920 | point to increase the probability of these races. | ||
921 | |||
922 | Say Y to increase probability of preemption of __rcu_read_unlock(). | ||
923 | |||
924 | Say N if you are unsure. | ||
925 | |||
926 | config SPARSE_RCU_POINTER | ||
927 | bool "RCU debugging: sparse-based checks for pointer usage" | ||
928 | default n | ||
929 | help | ||
930 | This feature enables the __rcu sparse annotation for | ||
931 | RCU-protected pointers. This annotation will cause sparse | ||
932 | to flag any non-RCU used of annotated pointers. This can be | ||
933 | helpful when debugging RCU usage. Please note that this feature | ||
934 | is not intended to enforce code cleanliness; it is instead merely | ||
935 | a debugging aid. | ||
936 | |||
937 | Say Y to make sparse flag questionable use of RCU-protected pointers | ||
938 | |||
939 | Say N if you are unsure. | ||
940 | |||
940 | config RCU_TORTURE_TEST | 941 | config RCU_TORTURE_TEST |
941 | tristate "torture tests for RCU" | 942 | tristate "torture tests for RCU" |
942 | depends on DEBUG_KERNEL | 943 | depends on DEBUG_KERNEL |
@@ -970,7 +971,7 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
970 | 971 | ||
971 | config RCU_CPU_STALL_TIMEOUT | 972 | config RCU_CPU_STALL_TIMEOUT |
972 | int "RCU CPU stall timeout in seconds" | 973 | int "RCU CPU stall timeout in seconds" |
973 | depends on TREE_RCU || TREE_PREEMPT_RCU | 974 | depends on RCU_STALL_COMMON |
974 | range 3 300 | 975 | range 3 300 |
975 | default 21 | 976 | default 21 |
976 | help | 977 | help |
@@ -1008,6 +1009,7 @@ config RCU_CPU_STALL_INFO | |||
1008 | config RCU_TRACE | 1009 | config RCU_TRACE |
1009 | bool "Enable tracing for RCU" | 1010 | bool "Enable tracing for RCU" |
1010 | depends on DEBUG_KERNEL | 1011 | depends on DEBUG_KERNEL |
1012 | select TRACE_CLOCK | ||
1011 | help | 1013 | help |
1012 | This option provides tracing in RCU which presents stats | 1014 | This option provides tracing in RCU which presents stats |
1013 | in debugfs for debugging RCU implementation. | 1015 | in debugfs for debugging RCU implementation. |
@@ -1015,6 +1017,8 @@ config RCU_TRACE | |||
1015 | Say Y here if you want to enable RCU tracing | 1017 | Say Y here if you want to enable RCU tracing |
1016 | Say N if you are unsure. | 1018 | Say N if you are unsure. |
1017 | 1019 | ||
1020 | endmenu # "RCU Debugging" | ||
1021 | |||
1018 | config KPROBES_SANITY_TEST | 1022 | config KPROBES_SANITY_TEST |
1019 | bool "Kprobes sanity tests" | 1023 | bool "Kprobes sanity tests" |
1020 | depends on DEBUG_KERNEL | 1024 | depends on DEBUG_KERNEL |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 960fa2ecd6e0..140e87824173 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
@@ -5,7 +5,7 @@ config HAVE_ARCH_KGDB | |||
5 | menuconfig KGDB | 5 | menuconfig KGDB |
6 | bool "KGDB: kernel debugger" | 6 | bool "KGDB: kernel debugger" |
7 | depends on HAVE_ARCH_KGDB | 7 | depends on HAVE_ARCH_KGDB |
8 | depends on DEBUG_KERNEL && EXPERIMENTAL | 8 | depends on DEBUG_KERNEL |
9 | help | 9 | help |
10 | If you say Y here, it will be possible to remotely debug the | 10 | If you say Y here, it will be possible to remotely debug the |
11 | kernel using gdb. It is recommended but not required, that | 11 | kernel using gdb. It is recommended but not required, that |
@@ -22,6 +22,7 @@ config KGDB_SERIAL_CONSOLE | |||
22 | tristate "KGDB: use kgdb over the serial console" | 22 | tristate "KGDB: use kgdb over the serial console" |
23 | select CONSOLE_POLL | 23 | select CONSOLE_POLL |
24 | select MAGIC_SYSRQ | 24 | select MAGIC_SYSRQ |
25 | depends on TTY | ||
25 | default y | 26 | default y |
26 | help | 27 | help |
27 | Share a serial console with kgdb. Sysrq-g must be used | 28 | Share a serial console with kgdb. Sysrq-g must be used |
diff --git a/lib/Makefile b/lib/Makefile index 02ed6c04cd7d..d7946ff75b2e 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o | |||
23 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 23 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
24 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 24 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
25 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ | 25 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
26 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o | 26 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o |
27 | obj-y += kstrtox.o | 27 | obj-y += kstrtox.o |
28 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 28 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
29 | 29 | ||
@@ -166,7 +166,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
166 | print_modules(); | 166 | print_modules(); |
167 | show_regs(regs); | 167 | show_regs(regs); |
168 | print_oops_end_marker(); | 168 | print_oops_end_marker(); |
169 | add_taint(BUG_GET_TAINT(bug)); | 169 | /* Just a warning, don't kill lockdep. */ |
170 | add_taint(BUG_GET_TAINT(bug), LOCKDEP_STILL_OK); | ||
170 | return BUG_TRAP_TYPE_WARN; | 171 | return BUG_TRAP_TYPE_WARN; |
171 | } | 172 | } |
172 | 173 | ||
diff --git a/lib/checksum.c b/lib/checksum.c index 12dceb27ff20..129775eb6de6 100644 --- a/lib/checksum.c +++ b/lib/checksum.c | |||
@@ -102,6 +102,7 @@ out: | |||
102 | } | 102 | } |
103 | #endif | 103 | #endif |
104 | 104 | ||
105 | #ifndef ip_fast_csum | ||
105 | /* | 106 | /* |
106 | * This is a version of ip_compute_csum() optimized for IP headers, | 107 | * This is a version of ip_compute_csum() optimized for IP headers, |
107 | * which always checksum on 4 octet boundaries. | 108 | * which always checksum on 4 octet boundaries. |
@@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
111 | return (__force __sum16)~do_csum(iph, ihl*4); | 112 | return (__force __sum16)~do_csum(iph, ihl*4); |
112 | } | 113 | } |
113 | EXPORT_SYMBOL(ip_fast_csum); | 114 | EXPORT_SYMBOL(ip_fast_csum); |
115 | #endif | ||
114 | 116 | ||
115 | /* | 117 | /* |
116 | * computes the checksum of a memory block at buff, length len, | 118 | * computes the checksum of a memory block at buff, length len, |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index d11808ca4bc4..37061ede8b81 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -109,11 +109,10 @@ static void fill_pool(void) | |||
109 | */ | 109 | */ |
110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | 110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
111 | { | 111 | { |
112 | struct hlist_node *node; | ||
113 | struct debug_obj *obj; | 112 | struct debug_obj *obj; |
114 | int cnt = 0; | 113 | int cnt = 0; |
115 | 114 | ||
116 | hlist_for_each_entry(obj, node, &b->list, node) { | 115 | hlist_for_each_entry(obj, &b->list, node) { |
117 | cnt++; | 116 | cnt++; |
118 | if (obj->object == addr) | 117 | if (obj->object == addr) |
119 | return obj; | 118 | return obj; |
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj) | |||
213 | static void debug_objects_oom(void) | 212 | static void debug_objects_oom(void) |
214 | { | 213 | { |
215 | struct debug_bucket *db = obj_hash; | 214 | struct debug_bucket *db = obj_hash; |
216 | struct hlist_node *node, *tmp; | 215 | struct hlist_node *tmp; |
217 | HLIST_HEAD(freelist); | 216 | HLIST_HEAD(freelist); |
218 | struct debug_obj *obj; | 217 | struct debug_obj *obj; |
219 | unsigned long flags; | 218 | unsigned long flags; |
@@ -227,7 +226,7 @@ static void debug_objects_oom(void) | |||
227 | raw_spin_unlock_irqrestore(&db->lock, flags); | 226 | raw_spin_unlock_irqrestore(&db->lock, flags); |
228 | 227 | ||
229 | /* Now free them */ | 228 | /* Now free them */ |
230 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 229 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
231 | hlist_del(&obj->node); | 230 | hlist_del(&obj->node); |
232 | free_object(obj); | 231 | free_object(obj); |
233 | } | 232 | } |
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |||
658 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 657 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
659 | { | 658 | { |
660 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 659 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
661 | struct hlist_node *node, *tmp; | 660 | struct hlist_node *tmp; |
662 | HLIST_HEAD(freelist); | 661 | HLIST_HEAD(freelist); |
663 | struct debug_obj_descr *descr; | 662 | struct debug_obj_descr *descr; |
664 | enum debug_obj_state state; | 663 | enum debug_obj_state state; |
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
678 | repeat: | 677 | repeat: |
679 | cnt = 0; | 678 | cnt = 0; |
680 | raw_spin_lock_irqsave(&db->lock, flags); | 679 | raw_spin_lock_irqsave(&db->lock, flags); |
681 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 680 | hlist_for_each_entry_safe(obj, tmp, &db->list, node) { |
682 | cnt++; | 681 | cnt++; |
683 | oaddr = (unsigned long) obj->object; | 682 | oaddr = (unsigned long) obj->object; |
684 | if (oaddr < saddr || oaddr >= eaddr) | 683 | if (oaddr < saddr || oaddr >= eaddr) |
@@ -702,7 +701,7 @@ repeat: | |||
702 | raw_spin_unlock_irqrestore(&db->lock, flags); | 701 | raw_spin_unlock_irqrestore(&db->lock, flags); |
703 | 702 | ||
704 | /* Now free them */ | 703 | /* Now free them */ |
705 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 704 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
706 | hlist_del(&obj->node); | 705 | hlist_del(&obj->node); |
707 | free_object(obj); | 706 | free_object(obj); |
708 | } | 707 | } |
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void) | |||
1013 | static int __init debug_objects_replace_static_objects(void) | 1012 | static int __init debug_objects_replace_static_objects(void) |
1014 | { | 1013 | { |
1015 | struct debug_bucket *db = obj_hash; | 1014 | struct debug_bucket *db = obj_hash; |
1016 | struct hlist_node *node, *tmp; | 1015 | struct hlist_node *tmp; |
1017 | struct debug_obj *obj, *new; | 1016 | struct debug_obj *obj, *new; |
1018 | HLIST_HEAD(objects); | 1017 | HLIST_HEAD(objects); |
1019 | int i, cnt = 0; | 1018 | int i, cnt = 0; |
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1033 | local_irq_disable(); | 1032 | local_irq_disable(); |
1034 | 1033 | ||
1035 | /* Remove the statically allocated objects from the pool */ | 1034 | /* Remove the statically allocated objects from the pool */ |
1036 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) | 1035 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
1037 | hlist_del(&obj->node); | 1036 | hlist_del(&obj->node); |
1038 | /* Move the allocated objects to the pool */ | 1037 | /* Move the allocated objects to the pool */ |
1039 | hlist_move_list(&objects, &obj_pool); | 1038 | hlist_move_list(&objects, &obj_pool); |
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1042 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 1041 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
1043 | hlist_move_list(&db->list, &objects); | 1042 | hlist_move_list(&db->list, &objects); |
1044 | 1043 | ||
1045 | hlist_for_each_entry(obj, node, &objects, node) { | 1044 | hlist_for_each_entry(obj, &objects, node) { |
1046 | new = hlist_entry(obj_pool.first, typeof(*obj), node); | 1045 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1047 | hlist_del(&new->node); | 1046 | hlist_del(&new->node); |
1048 | /* copy object data */ | 1047 | /* copy object data */ |
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1057 | obj_pool_used); | 1056 | obj_pool_used); |
1058 | return 0; | 1057 | return 0; |
1059 | free: | 1058 | free: |
1060 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { | 1059 | hlist_for_each_entry_safe(obj, tmp, &objects, node) { |
1061 | hlist_del(&obj->node); | 1060 | hlist_del(&obj->node); |
1062 | kmem_cache_free(obj_cache, obj); | 1061 | kmem_cache_free(obj_cache, obj); |
1063 | } | 1062 | } |
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index 4531294fa62f..960183d4258f 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c | |||
@@ -31,7 +31,7 @@ | |||
31 | */ | 31 | */ |
32 | 32 | ||
33 | #ifdef STATIC | 33 | #ifdef STATIC |
34 | #include "lzo/lzo1x_decompress.c" | 34 | #include "lzo/lzo1x_decompress_safe.c" |
35 | #else | 35 | #else |
36 | #include <linux/decompress/unlzo.h> | 36 | #include <linux/decompress/unlzo.h> |
37 | #endif | 37 | #endif |
diff --git a/lib/devres.c b/lib/devres.c index 80b9c76d436a..823533138fa0 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/err.h> | ||
1 | #include <linux/pci.h> | 2 | #include <linux/pci.h> |
2 | #include <linux/io.h> | 3 | #include <linux/io.h> |
3 | #include <linux/gfp.h> | 4 | #include <linux/gfp.h> |
@@ -86,22 +87,24 @@ void devm_iounmap(struct device *dev, void __iomem *addr) | |||
86 | EXPORT_SYMBOL(devm_iounmap); | 87 | EXPORT_SYMBOL(devm_iounmap); |
87 | 88 | ||
88 | /** | 89 | /** |
89 | * devm_request_and_ioremap() - Check, request region, and ioremap resource | 90 | * devm_ioremap_resource() - check, request region, and ioremap resource |
90 | * @dev: Generic device to handle the resource for | 91 | * @dev: generic device to handle the resource for |
91 | * @res: resource to be handled | 92 | * @res: resource to be handled |
92 | * | 93 | * |
93 | * Takes all necessary steps to ioremap a mem resource. Uses managed device, so | 94 | * Checks that a resource is a valid memory region, requests the memory region |
94 | * everything is undone on driver detach. Checks arguments, so you can feed | 95 | * and ioremaps it either as cacheable or as non-cacheable memory depending on |
95 | * it the result from e.g. platform_get_resource() directly. Returns the | 96 | * the resource's flags. All operations are managed and will be undone on |
96 | * remapped pointer or NULL on error. Usage example: | 97 | * driver detach. |
98 | * | ||
99 | * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code | ||
100 | * on failure. Usage example: | ||
97 | * | 101 | * |
98 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 102 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
99 | * base = devm_request_and_ioremap(&pdev->dev, res); | 103 | * base = devm_ioremap_resource(&pdev->dev, res); |
100 | * if (!base) | 104 | * if (IS_ERR(base)) |
101 | * return -EADDRNOTAVAIL; | 105 | * return PTR_ERR(base); |
102 | */ | 106 | */ |
103 | void __iomem *devm_request_and_ioremap(struct device *dev, | 107 | void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) |
104 | struct resource *res) | ||
105 | { | 108 | { |
106 | resource_size_t size; | 109 | resource_size_t size; |
107 | const char *name; | 110 | const char *name; |
@@ -111,7 +114,7 @@ void __iomem *devm_request_and_ioremap(struct device *dev, | |||
111 | 114 | ||
112 | if (!res || resource_type(res) != IORESOURCE_MEM) { | 115 | if (!res || resource_type(res) != IORESOURCE_MEM) { |
113 | dev_err(dev, "invalid resource\n"); | 116 | dev_err(dev, "invalid resource\n"); |
114 | return NULL; | 117 | return ERR_PTR(-EINVAL); |
115 | } | 118 | } |
116 | 119 | ||
117 | size = resource_size(res); | 120 | size = resource_size(res); |
@@ -119,7 +122,7 @@ void __iomem *devm_request_and_ioremap(struct device *dev, | |||
119 | 122 | ||
120 | if (!devm_request_mem_region(dev, res->start, size, name)) { | 123 | if (!devm_request_mem_region(dev, res->start, size, name)) { |
121 | dev_err(dev, "can't request region for resource %pR\n", res); | 124 | dev_err(dev, "can't request region for resource %pR\n", res); |
122 | return NULL; | 125 | return ERR_PTR(-EBUSY); |
123 | } | 126 | } |
124 | 127 | ||
125 | if (res->flags & IORESOURCE_CACHEABLE) | 128 | if (res->flags & IORESOURCE_CACHEABLE) |
@@ -130,10 +133,39 @@ void __iomem *devm_request_and_ioremap(struct device *dev, | |||
130 | if (!dest_ptr) { | 133 | if (!dest_ptr) { |
131 | dev_err(dev, "ioremap failed for resource %pR\n", res); | 134 | dev_err(dev, "ioremap failed for resource %pR\n", res); |
132 | devm_release_mem_region(dev, res->start, size); | 135 | devm_release_mem_region(dev, res->start, size); |
136 | dest_ptr = ERR_PTR(-ENOMEM); | ||
133 | } | 137 | } |
134 | 138 | ||
135 | return dest_ptr; | 139 | return dest_ptr; |
136 | } | 140 | } |
141 | EXPORT_SYMBOL(devm_ioremap_resource); | ||
142 | |||
143 | /** | ||
144 | * devm_request_and_ioremap() - Check, request region, and ioremap resource | ||
145 | * @dev: Generic device to handle the resource for | ||
146 | * @res: resource to be handled | ||
147 | * | ||
148 | * Takes all necessary steps to ioremap a mem resource. Uses managed device, so | ||
149 | * everything is undone on driver detach. Checks arguments, so you can feed | ||
150 | * it the result from e.g. platform_get_resource() directly. Returns the | ||
151 | * remapped pointer or NULL on error. Usage example: | ||
152 | * | ||
153 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
154 | * base = devm_request_and_ioremap(&pdev->dev, res); | ||
155 | * if (!base) | ||
156 | * return -EADDRNOTAVAIL; | ||
157 | */ | ||
158 | void __iomem *devm_request_and_ioremap(struct device *device, | ||
159 | struct resource *res) | ||
160 | { | ||
161 | void __iomem *dest_ptr; | ||
162 | |||
163 | dest_ptr = devm_ioremap_resource(device, res); | ||
164 | if (IS_ERR(dest_ptr)) | ||
165 | return NULL; | ||
166 | |||
167 | return dest_ptr; | ||
168 | } | ||
137 | EXPORT_SYMBOL(devm_request_and_ioremap); | 169 | EXPORT_SYMBOL(devm_request_and_ioremap); |
138 | 170 | ||
139 | #ifdef CONFIG_HAS_IOPORT | 171 | #ifdef CONFIG_HAS_IOPORT |
@@ -195,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
195 | devm_ioport_map_match, (void *)addr)); | 227 | devm_ioport_map_match, (void *)addr)); |
196 | } | 228 | } |
197 | EXPORT_SYMBOL(devm_ioport_unmap); | 229 | EXPORT_SYMBOL(devm_ioport_unmap); |
230 | #endif /* CONFIG_HAS_IOPORT */ | ||
198 | 231 | ||
199 | #ifdef CONFIG_PCI | 232 | #ifdef CONFIG_PCI |
200 | /* | 233 | /* |
@@ -400,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) | |||
400 | } | 433 | } |
401 | EXPORT_SYMBOL(pcim_iounmap_regions); | 434 | EXPORT_SYMBOL(pcim_iounmap_regions); |
402 | #endif /* CONFIG_PCI */ | 435 | #endif /* CONFIG_PCI */ |
403 | #endif /* CONFIG_HAS_IOPORT */ | ||
diff --git a/lib/digsig.c b/lib/digsig.c index dc2be7ed1765..2f31e6a45f0a 100644 --- a/lib/digsig.c +++ b/lib/digsig.c | |||
@@ -30,11 +30,10 @@ | |||
30 | 30 | ||
31 | static struct crypto_shash *shash; | 31 | static struct crypto_shash *shash; |
32 | 32 | ||
33 | static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, | 33 | static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg, |
34 | unsigned long msglen, | 34 | unsigned long msglen, |
35 | unsigned long modulus_bitlen, | 35 | unsigned long modulus_bitlen, |
36 | unsigned char *out, | 36 | unsigned long *outlen) |
37 | unsigned long *outlen) | ||
38 | { | 37 | { |
39 | unsigned long modulus_len, ps_len, i; | 38 | unsigned long modulus_len, ps_len, i; |
40 | 39 | ||
@@ -42,11 +41,11 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, | |||
42 | 41 | ||
43 | /* test message size */ | 42 | /* test message size */ |
44 | if ((msglen > modulus_len) || (modulus_len < 11)) | 43 | if ((msglen > modulus_len) || (modulus_len < 11)) |
45 | return -EINVAL; | 44 | return NULL; |
46 | 45 | ||
47 | /* separate encoded message */ | 46 | /* separate encoded message */ |
48 | if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) | 47 | if (msg[0] != 0x00 || msg[1] != 0x01) |
49 | return -EINVAL; | 48 | return NULL; |
50 | 49 | ||
51 | for (i = 2; i < modulus_len - 1; i++) | 50 | for (i = 2; i < modulus_len - 1; i++) |
52 | if (msg[i] != 0xFF) | 51 | if (msg[i] != 0xFF) |
@@ -56,19 +55,13 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, | |||
56 | if (msg[i] != 0) | 55 | if (msg[i] != 0) |
57 | /* There was no octet with hexadecimal value 0x00 | 56 | /* There was no octet with hexadecimal value 0x00 |
58 | to separate ps from m. */ | 57 | to separate ps from m. */ |
59 | return -EINVAL; | 58 | return NULL; |
60 | 59 | ||
61 | ps_len = i - 2; | 60 | ps_len = i - 2; |
62 | 61 | ||
63 | if (*outlen < (msglen - (2 + ps_len + 1))) { | ||
64 | *outlen = msglen - (2 + ps_len + 1); | ||
65 | return -EOVERFLOW; | ||
66 | } | ||
67 | |||
68 | *outlen = (msglen - (2 + ps_len + 1)); | 62 | *outlen = (msglen - (2 + ps_len + 1)); |
69 | memcpy(out, &msg[2 + ps_len + 1], *outlen); | ||
70 | 63 | ||
71 | return 0; | 64 | return msg + 2 + ps_len + 1; |
72 | } | 65 | } |
73 | 66 | ||
74 | /* | 67 | /* |
@@ -83,7 +76,8 @@ static int digsig_verify_rsa(struct key *key, | |||
83 | unsigned long mlen, mblen; | 76 | unsigned long mlen, mblen; |
84 | unsigned nret, l; | 77 | unsigned nret, l; |
85 | int head, i; | 78 | int head, i; |
86 | unsigned char *out1 = NULL, *out2 = NULL; | 79 | unsigned char *out1 = NULL; |
80 | const char *m; | ||
87 | MPI in = NULL, res = NULL, pkey[2]; | 81 | MPI in = NULL, res = NULL, pkey[2]; |
88 | uint8_t *p, *datap, *endp; | 82 | uint8_t *p, *datap, *endp; |
89 | struct user_key_payload *ukp; | 83 | struct user_key_payload *ukp; |
@@ -120,7 +114,7 @@ static int digsig_verify_rsa(struct key *key, | |||
120 | } | 114 | } |
121 | 115 | ||
122 | mblen = mpi_get_nbits(pkey[0]); | 116 | mblen = mpi_get_nbits(pkey[0]); |
123 | mlen = (mblen + 7)/8; | 117 | mlen = DIV_ROUND_UP(mblen, 8); |
124 | 118 | ||
125 | if (mlen == 0) | 119 | if (mlen == 0) |
126 | goto err; | 120 | goto err; |
@@ -129,10 +123,6 @@ static int digsig_verify_rsa(struct key *key, | |||
129 | if (!out1) | 123 | if (!out1) |
130 | goto err; | 124 | goto err; |
131 | 125 | ||
132 | out2 = kzalloc(mlen, GFP_KERNEL); | ||
133 | if (!out2) | ||
134 | goto err; | ||
135 | |||
136 | nret = siglen; | 126 | nret = siglen; |
137 | in = mpi_read_from_buffer(sig, &nret); | 127 | in = mpi_read_from_buffer(sig, &nret); |
138 | if (!in) | 128 | if (!in) |
@@ -164,18 +154,15 @@ static int digsig_verify_rsa(struct key *key, | |||
164 | 154 | ||
165 | kfree(p); | 155 | kfree(p); |
166 | 156 | ||
167 | err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); | 157 | m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len); |
168 | if (err) | ||
169 | goto err; | ||
170 | 158 | ||
171 | if (len != hlen || memcmp(out2, h, hlen)) | 159 | if (!m || len != hlen || memcmp(m, h, hlen)) |
172 | err = -EINVAL; | 160 | err = -EINVAL; |
173 | 161 | ||
174 | err: | 162 | err: |
175 | mpi_free(in); | 163 | mpi_free(in); |
176 | mpi_free(res); | 164 | mpi_free(res); |
177 | kfree(out1); | 165 | kfree(out1); |
178 | kfree(out2); | ||
179 | while (--i >= 0) | 166 | while (--i >= 0) |
180 | mpi_free(pkey[i]); | 167 | mpi_free(pkey[i]); |
181 | err1: | 168 | err1: |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 1db1fc660538..5276b99ca650 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -59,7 +59,7 @@ struct ddebug_iter { | |||
59 | 59 | ||
60 | static DEFINE_MUTEX(ddebug_lock); | 60 | static DEFINE_MUTEX(ddebug_lock); |
61 | static LIST_HEAD(ddebug_tables); | 61 | static LIST_HEAD(ddebug_tables); |
62 | static int verbose = 0; | 62 | static int verbose; |
63 | module_param(verbose, int, 0644); | 63 | module_param(verbose, int, 0644); |
64 | 64 | ||
65 | /* Return the path relative to source root */ | 65 | /* Return the path relative to source root */ |
@@ -100,24 +100,32 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, | |||
100 | return buf; | 100 | return buf; |
101 | } | 101 | } |
102 | 102 | ||
103 | #define vpr_info(fmt, ...) \ | 103 | #define vpr_info(fmt, ...) \ |
104 | if (verbose) do { pr_info(fmt, ##__VA_ARGS__); } while (0) | ||
105 | |||
106 | #define vpr_info_dq(q, msg) \ | ||
107 | do { \ | 104 | do { \ |
108 | /* trim last char off format print */ \ | 105 | if (verbose) \ |
109 | vpr_info("%s: func=\"%s\" file=\"%s\" " \ | 106 | pr_info(fmt, ##__VA_ARGS__); \ |
110 | "module=\"%s\" format=\"%.*s\" " \ | ||
111 | "lineno=%u-%u", \ | ||
112 | msg, \ | ||
113 | q->function ? q->function : "", \ | ||
114 | q->filename ? q->filename : "", \ | ||
115 | q->module ? q->module : "", \ | ||
116 | (int)(q->format ? strlen(q->format) - 1 : 0), \ | ||
117 | q->format ? q->format : "", \ | ||
118 | q->first_lineno, q->last_lineno); \ | ||
119 | } while (0) | 107 | } while (0) |
120 | 108 | ||
109 | static void vpr_info_dq(const struct ddebug_query *query, const char *msg) | ||
110 | { | ||
111 | /* trim any trailing newlines */ | ||
112 | int fmtlen = 0; | ||
113 | |||
114 | if (query->format) { | ||
115 | fmtlen = strlen(query->format); | ||
116 | while (fmtlen && query->format[fmtlen - 1] == '\n') | ||
117 | fmtlen--; | ||
118 | } | ||
119 | |||
120 | vpr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n", | ||
121 | msg, | ||
122 | query->function ? query->function : "", | ||
123 | query->filename ? query->filename : "", | ||
124 | query->module ? query->module : "", | ||
125 | fmtlen, query->format ? query->format : "", | ||
126 | query->first_lineno, query->last_lineno); | ||
127 | } | ||
128 | |||
121 | /* | 129 | /* |
122 | * Search the tables for _ddebug's which match the given `query' and | 130 | * Search the tables for _ddebug's which match the given `query' and |
123 | * apply the `flags' and `mask' to them. Returns number of matching | 131 | * apply the `flags' and `mask' to them. Returns number of matching |
@@ -141,7 +149,7 @@ static int ddebug_change(const struct ddebug_query *query, | |||
141 | if (query->module && strcmp(query->module, dt->mod_name)) | 149 | if (query->module && strcmp(query->module, dt->mod_name)) |
142 | continue; | 150 | continue; |
143 | 151 | ||
144 | for (i = 0 ; i < dt->num_ddebugs ; i++) { | 152 | for (i = 0; i < dt->num_ddebugs; i++) { |
145 | struct _ddebug *dp = &dt->ddebugs[i]; | 153 | struct _ddebug *dp = &dt->ddebugs[i]; |
146 | 154 | ||
147 | /* match against the source filename */ | 155 | /* match against the source filename */ |
@@ -176,10 +184,10 @@ static int ddebug_change(const struct ddebug_query *query, | |||
176 | continue; | 184 | continue; |
177 | dp->flags = newflags; | 185 | dp->flags = newflags; |
178 | vpr_info("changed %s:%d [%s]%s =%s\n", | 186 | vpr_info("changed %s:%d [%s]%s =%s\n", |
179 | trim_prefix(dp->filename), dp->lineno, | 187 | trim_prefix(dp->filename), dp->lineno, |
180 | dt->mod_name, dp->function, | 188 | dt->mod_name, dp->function, |
181 | ddebug_describe_flags(dp, flagbuf, | 189 | ddebug_describe_flags(dp, flagbuf, |
182 | sizeof(flagbuf))); | 190 | sizeof(flagbuf))); |
183 | } | 191 | } |
184 | } | 192 | } |
185 | mutex_unlock(&ddebug_lock); | 193 | mutex_unlock(&ddebug_lock); |
@@ -213,19 +221,23 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
213 | /* find `end' of word, whitespace separated or quoted */ | 221 | /* find `end' of word, whitespace separated or quoted */ |
214 | if (*buf == '"' || *buf == '\'') { | 222 | if (*buf == '"' || *buf == '\'') { |
215 | int quote = *buf++; | 223 | int quote = *buf++; |
216 | for (end = buf ; *end && *end != quote ; end++) | 224 | for (end = buf; *end && *end != quote; end++) |
217 | ; | 225 | ; |
218 | if (!*end) | 226 | if (!*end) { |
227 | pr_err("unclosed quote: %s\n", buf); | ||
219 | return -EINVAL; /* unclosed quote */ | 228 | return -EINVAL; /* unclosed quote */ |
229 | } | ||
220 | } else { | 230 | } else { |
221 | for (end = buf ; *end && !isspace(*end) ; end++) | 231 | for (end = buf; *end && !isspace(*end); end++) |
222 | ; | 232 | ; |
223 | BUG_ON(end == buf); | 233 | BUG_ON(end == buf); |
224 | } | 234 | } |
225 | 235 | ||
226 | /* `buf' is start of word, `end' is one past its end */ | 236 | /* `buf' is start of word, `end' is one past its end */ |
227 | if (nwords == maxwords) | 237 | if (nwords == maxwords) { |
238 | pr_err("too many words, legal max <=%d\n", maxwords); | ||
228 | return -EINVAL; /* ran out of words[] before bytes */ | 239 | return -EINVAL; /* ran out of words[] before bytes */ |
240 | } | ||
229 | if (*end) | 241 | if (*end) |
230 | *end++ = '\0'; /* terminate the word */ | 242 | *end++ = '\0'; /* terminate the word */ |
231 | words[nwords++] = buf; | 243 | words[nwords++] = buf; |
@@ -235,7 +247,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
235 | if (verbose) { | 247 | if (verbose) { |
236 | int i; | 248 | int i; |
237 | pr_info("split into words:"); | 249 | pr_info("split into words:"); |
238 | for (i = 0 ; i < nwords ; i++) | 250 | for (i = 0; i < nwords; i++) |
239 | pr_cont(" \"%s\"", words[i]); | 251 | pr_cont(" \"%s\"", words[i]); |
240 | pr_cont("\n"); | 252 | pr_cont("\n"); |
241 | } | 253 | } |
@@ -257,7 +269,11 @@ static inline int parse_lineno(const char *str, unsigned int *val) | |||
257 | return 0; | 269 | return 0; |
258 | } | 270 | } |
259 | *val = simple_strtoul(str, &end, 10); | 271 | *val = simple_strtoul(str, &end, 10); |
260 | return end == NULL || end == str || *end != '\0' ? -EINVAL : 0; | 272 | if (end == NULL || end == str || *end != '\0') { |
273 | pr_err("bad line-number: %s\n", str); | ||
274 | return -EINVAL; | ||
275 | } | ||
276 | return 0; | ||
261 | } | 277 | } |
262 | 278 | ||
263 | /* | 279 | /* |
@@ -286,11 +302,11 @@ static char *unescape(char *str) | |||
286 | in += 2; | 302 | in += 2; |
287 | continue; | 303 | continue; |
288 | } else if (isodigit(in[1]) && | 304 | } else if (isodigit(in[1]) && |
289 | isodigit(in[2]) && | 305 | isodigit(in[2]) && |
290 | isodigit(in[3])) { | 306 | isodigit(in[3])) { |
291 | *out++ = ((in[1] - '0')<<6) | | 307 | *out++ = (((in[1] - '0') << 6) | |
292 | ((in[2] - '0')<<3) | | 308 | ((in[2] - '0') << 3) | |
293 | (in[3] - '0'); | 309 | (in[3] - '0')); |
294 | in += 4; | 310 | in += 4; |
295 | continue; | 311 | continue; |
296 | } | 312 | } |
@@ -308,8 +324,8 @@ static int check_set(const char **dest, char *src, char *name) | |||
308 | 324 | ||
309 | if (*dest) { | 325 | if (*dest) { |
310 | rc = -EINVAL; | 326 | rc = -EINVAL; |
311 | pr_err("match-spec:%s val:%s overridden by %s", | 327 | pr_err("match-spec:%s val:%s overridden by %s\n", |
312 | name, *dest, src); | 328 | name, *dest, src); |
313 | } | 329 | } |
314 | *dest = src; | 330 | *dest = src; |
315 | return rc; | 331 | return rc; |
@@ -337,40 +353,46 @@ static int ddebug_parse_query(char *words[], int nwords, | |||
337 | int rc; | 353 | int rc; |
338 | 354 | ||
339 | /* check we have an even number of words */ | 355 | /* check we have an even number of words */ |
340 | if (nwords % 2 != 0) | 356 | if (nwords % 2 != 0) { |
357 | pr_err("expecting pairs of match-spec <value>\n"); | ||
341 | return -EINVAL; | 358 | return -EINVAL; |
359 | } | ||
342 | memset(query, 0, sizeof(*query)); | 360 | memset(query, 0, sizeof(*query)); |
343 | 361 | ||
344 | if (modname) | 362 | if (modname) |
345 | /* support $modname.dyndbg=<multiple queries> */ | 363 | /* support $modname.dyndbg=<multiple queries> */ |
346 | query->module = modname; | 364 | query->module = modname; |
347 | 365 | ||
348 | for (i = 0 ; i < nwords ; i += 2) { | 366 | for (i = 0; i < nwords; i += 2) { |
349 | if (!strcmp(words[i], "func")) | 367 | if (!strcmp(words[i], "func")) { |
350 | rc = check_set(&query->function, words[i+1], "func"); | 368 | rc = check_set(&query->function, words[i+1], "func"); |
351 | else if (!strcmp(words[i], "file")) | 369 | } else if (!strcmp(words[i], "file")) { |
352 | rc = check_set(&query->filename, words[i+1], "file"); | 370 | rc = check_set(&query->filename, words[i+1], "file"); |
353 | else if (!strcmp(words[i], "module")) | 371 | } else if (!strcmp(words[i], "module")) { |
354 | rc = check_set(&query->module, words[i+1], "module"); | 372 | rc = check_set(&query->module, words[i+1], "module"); |
355 | else if (!strcmp(words[i], "format")) | 373 | } else if (!strcmp(words[i], "format")) { |
356 | rc = check_set(&query->format, unescape(words[i+1]), | 374 | rc = check_set(&query->format, unescape(words[i+1]), |
357 | "format"); | 375 | "format"); |
358 | else if (!strcmp(words[i], "line")) { | 376 | } else if (!strcmp(words[i], "line")) { |
359 | char *first = words[i+1]; | 377 | char *first = words[i+1]; |
360 | char *last = strchr(first, '-'); | 378 | char *last = strchr(first, '-'); |
361 | if (query->first_lineno || query->last_lineno) { | 379 | if (query->first_lineno || query->last_lineno) { |
362 | pr_err("match-spec:line given 2 times\n"); | 380 | pr_err("match-spec: line used 2x\n"); |
363 | return -EINVAL; | 381 | return -EINVAL; |
364 | } | 382 | } |
365 | if (last) | 383 | if (last) |
366 | *last++ = '\0'; | 384 | *last++ = '\0'; |
367 | if (parse_lineno(first, &query->first_lineno) < 0) | 385 | if (parse_lineno(first, &query->first_lineno) < 0) { |
386 | pr_err("line-number is <0\n"); | ||
368 | return -EINVAL; | 387 | return -EINVAL; |
388 | } | ||
369 | if (last) { | 389 | if (last) { |
370 | /* range <first>-<last> */ | 390 | /* range <first>-<last> */ |
371 | if (parse_lineno(last, &query->last_lineno) | 391 | if (parse_lineno(last, &query->last_lineno) |
372 | < query->first_lineno) { | 392 | < query->first_lineno) { |
373 | pr_err("last-line < 1st-line\n"); | 393 | pr_err("last-line:%d < 1st-line:%d\n", |
394 | query->last_lineno, | ||
395 | query->first_lineno); | ||
374 | return -EINVAL; | 396 | return -EINVAL; |
375 | } | 397 | } |
376 | } else { | 398 | } else { |
@@ -406,19 +428,22 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
406 | op = *str++; | 428 | op = *str++; |
407 | break; | 429 | break; |
408 | default: | 430 | default: |
431 | pr_err("bad flag-op %c, at start of %s\n", *str, str); | ||
409 | return -EINVAL; | 432 | return -EINVAL; |
410 | } | 433 | } |
411 | vpr_info("op='%c'\n", op); | 434 | vpr_info("op='%c'\n", op); |
412 | 435 | ||
413 | for ( ; *str ; ++str) { | 436 | for (; *str ; ++str) { |
414 | for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { | 437 | for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { |
415 | if (*str == opt_array[i].opt_char) { | 438 | if (*str == opt_array[i].opt_char) { |
416 | flags |= opt_array[i].flag; | 439 | flags |= opt_array[i].flag; |
417 | break; | 440 | break; |
418 | } | 441 | } |
419 | } | 442 | } |
420 | if (i < 0) | 443 | if (i < 0) { |
444 | pr_err("unknown flag '%c' in \"%s\"\n", *str, str); | ||
421 | return -EINVAL; | 445 | return -EINVAL; |
446 | } | ||
422 | } | 447 | } |
423 | vpr_info("flags=0x%x\n", flags); | 448 | vpr_info("flags=0x%x\n", flags); |
424 | 449 | ||
@@ -450,16 +475,22 @@ static int ddebug_exec_query(char *query_string, const char *modname) | |||
450 | char *words[MAXWORDS]; | 475 | char *words[MAXWORDS]; |
451 | 476 | ||
452 | nwords = ddebug_tokenize(query_string, words, MAXWORDS); | 477 | nwords = ddebug_tokenize(query_string, words, MAXWORDS); |
453 | if (nwords <= 0) | 478 | if (nwords <= 0) { |
479 | pr_err("tokenize failed\n"); | ||
454 | return -EINVAL; | 480 | return -EINVAL; |
455 | if (ddebug_parse_query(words, nwords-1, &query, modname)) | 481 | } |
482 | /* check flags 1st (last arg) so query is pairs of spec,val */ | ||
483 | if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) { | ||
484 | pr_err("flags parse failed\n"); | ||
456 | return -EINVAL; | 485 | return -EINVAL; |
457 | if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) | 486 | } |
487 | if (ddebug_parse_query(words, nwords-1, &query, modname)) { | ||
488 | pr_err("query parse failed\n"); | ||
458 | return -EINVAL; | 489 | return -EINVAL; |
459 | 490 | } | |
460 | /* actually go and implement the change */ | 491 | /* actually go and implement the change */ |
461 | nfound = ddebug_change(&query, flags, mask); | 492 | nfound = ddebug_change(&query, flags, mask); |
462 | vpr_info_dq((&query), (nfound) ? "applied" : "no-match"); | 493 | vpr_info_dq(&query, nfound ? "applied" : "no-match"); |
463 | 494 | ||
464 | return nfound; | 495 | return nfound; |
465 | } | 496 | } |
@@ -488,8 +519,9 @@ static int ddebug_exec_queries(char *query, const char *modname) | |||
488 | if (rc < 0) { | 519 | if (rc < 0) { |
489 | errs++; | 520 | errs++; |
490 | exitcode = rc; | 521 | exitcode = rc; |
491 | } else | 522 | } else { |
492 | nfound += rc; | 523 | nfound += rc; |
524 | } | ||
493 | i++; | 525 | i++; |
494 | } | 526 | } |
495 | vpr_info("processed %d queries, with %d matches, %d errs\n", | 527 | vpr_info("processed %d queries, with %d matches, %d errs\n", |
@@ -765,7 +797,7 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos) | |||
765 | struct _ddebug *dp; | 797 | struct _ddebug *dp; |
766 | 798 | ||
767 | vpr_info("called m=%p p=%p *pos=%lld\n", | 799 | vpr_info("called m=%p p=%p *pos=%lld\n", |
768 | m, p, (unsigned long long)*pos); | 800 | m, p, (unsigned long long)*pos); |
769 | 801 | ||
770 | if (p == SEQ_START_TOKEN) | 802 | if (p == SEQ_START_TOKEN) |
771 | dp = ddebug_iter_first(iter); | 803 | dp = ddebug_iter_first(iter); |
@@ -791,14 +823,14 @@ static int ddebug_proc_show(struct seq_file *m, void *p) | |||
791 | 823 | ||
792 | if (p == SEQ_START_TOKEN) { | 824 | if (p == SEQ_START_TOKEN) { |
793 | seq_puts(m, | 825 | seq_puts(m, |
794 | "# filename:lineno [module]function flags format\n"); | 826 | "# filename:lineno [module]function flags format\n"); |
795 | return 0; | 827 | return 0; |
796 | } | 828 | } |
797 | 829 | ||
798 | seq_printf(m, "%s:%u [%s]%s =%s \"", | 830 | seq_printf(m, "%s:%u [%s]%s =%s \"", |
799 | trim_prefix(dp->filename), dp->lineno, | 831 | trim_prefix(dp->filename), dp->lineno, |
800 | iter->table->mod_name, dp->function, | 832 | iter->table->mod_name, dp->function, |
801 | ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); | 833 | ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); |
802 | seq_escape(m, dp->format, "\t\r\n\""); | 834 | seq_escape(m, dp->format, "\t\r\n\""); |
803 | seq_puts(m, "\"\n"); | 835 | seq_puts(m, "\"\n"); |
804 | 836 | ||
@@ -845,7 +877,7 @@ static int ddebug_proc_open(struct inode *inode, struct file *file) | |||
845 | kfree(iter); | 877 | kfree(iter); |
846 | return err; | 878 | return err; |
847 | } | 879 | } |
848 | ((struct seq_file *) file->private_data)->private = iter; | 880 | ((struct seq_file *)file->private_data)->private = iter; |
849 | return 0; | 881 | return 0; |
850 | } | 882 | } |
851 | 883 | ||
@@ -1002,8 +1034,7 @@ static int __init dynamic_debug_init(void) | |||
1002 | int verbose_bytes = 0; | 1034 | int verbose_bytes = 0; |
1003 | 1035 | ||
1004 | if (__start___verbose == __stop___verbose) { | 1036 | if (__start___verbose == __stop___verbose) { |
1005 | pr_warn("_ddebug table is empty in a " | 1037 | pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); |
1006 | "CONFIG_DYNAMIC_DEBUG build"); | ||
1007 | return 1; | 1038 | return 1; |
1008 | } | 1039 | } |
1009 | iter = __start___verbose; | 1040 | iter = __start___verbose; |
@@ -1030,18 +1061,16 @@ static int __init dynamic_debug_init(void) | |||
1030 | goto out_err; | 1061 | goto out_err; |
1031 | 1062 | ||
1032 | ddebug_init_success = 1; | 1063 | ddebug_init_success = 1; |
1033 | vpr_info("%d modules, %d entries and %d bytes in ddebug tables," | 1064 | vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in (readonly) verbose section\n", |
1034 | " %d bytes in (readonly) verbose section\n", | 1065 | modct, entries, (int)(modct * sizeof(struct ddebug_table)), |
1035 | modct, entries, (int)( modct * sizeof(struct ddebug_table)), | 1066 | verbose_bytes + (int)(__stop___verbose - __start___verbose)); |
1036 | verbose_bytes + (int)(__stop___verbose - __start___verbose)); | ||
1037 | 1067 | ||
1038 | /* apply ddebug_query boot param, dont unload tables on err */ | 1068 | /* apply ddebug_query boot param, dont unload tables on err */ |
1039 | if (ddebug_setup_string[0] != '\0') { | 1069 | if (ddebug_setup_string[0] != '\0') { |
1040 | pr_warn("ddebug_query param name is deprecated," | 1070 | pr_warn("ddebug_query param name is deprecated, change it to dyndbg\n"); |
1041 | " change it to dyndbg\n"); | ||
1042 | ret = ddebug_exec_queries(ddebug_setup_string, NULL); | 1071 | ret = ddebug_exec_queries(ddebug_setup_string, NULL); |
1043 | if (ret < 0) | 1072 | if (ret < 0) |
1044 | pr_warn("Invalid ddebug boot param %s", | 1073 | pr_warn("Invalid ddebug boot param %s\n", |
1045 | ddebug_setup_string); | 1074 | ddebug_setup_string); |
1046 | else | 1075 | else |
1047 | pr_info("%d changes by ddebug_query\n", ret); | 1076 | pr_info("%d changes by ddebug_query\n", ret); |
diff --git a/lib/hexdump.c b/lib/hexdump.c index 6540d657dca4..3f0494c9d57a 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
@@ -227,6 +227,7 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, | |||
227 | } | 227 | } |
228 | EXPORT_SYMBOL(print_hex_dump); | 228 | EXPORT_SYMBOL(print_hex_dump); |
229 | 229 | ||
230 | #if !defined(CONFIG_DYNAMIC_DEBUG) | ||
230 | /** | 231 | /** |
231 | * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params | 232 | * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params |
232 | * @prefix_str: string to prefix each line with; | 233 | * @prefix_str: string to prefix each line with; |
@@ -246,4 +247,5 @@ void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | |||
246 | buf, len, true); | 247 | buf, len, true); |
247 | } | 248 | } |
248 | EXPORT_SYMBOL(print_hex_dump_bytes); | 249 | EXPORT_SYMBOL(print_hex_dump_bytes); |
249 | #endif | 250 | #endif /* !defined(CONFIG_DYNAMIC_DEBUG) */ |
251 | #endif /* defined(CONFIG_PRINTK) */ | ||
@@ -35,10 +35,41 @@ | |||
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/percpu.h> | ||
39 | #include <linux/hardirq.h> | ||
40 | |||
41 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) | ||
42 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) | ||
43 | |||
44 | /* Leave the possibility of an incomplete final layer */ | ||
45 | #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) | ||
46 | |||
47 | /* Number of id_layer structs to leave in free list */ | ||
48 | #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) | ||
38 | 49 | ||
39 | static struct kmem_cache *idr_layer_cache; | 50 | static struct kmem_cache *idr_layer_cache; |
51 | static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); | ||
52 | static DEFINE_PER_CPU(int, idr_preload_cnt); | ||
40 | static DEFINE_SPINLOCK(simple_ida_lock); | 53 | static DEFINE_SPINLOCK(simple_ida_lock); |
41 | 54 | ||
55 | /* the maximum ID which can be allocated given idr->layers */ | ||
56 | static int idr_max(int layers) | ||
57 | { | ||
58 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); | ||
59 | |||
60 | return (1 << bits) - 1; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is | ||
65 | * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and | ||
66 | * so on. | ||
67 | */ | ||
68 | static int idr_layer_prefix_mask(int layer) | ||
69 | { | ||
70 | return ~idr_max(layer + 1); | ||
71 | } | ||
72 | |||
42 | static struct idr_layer *get_from_free_list(struct idr *idp) | 73 | static struct idr_layer *get_from_free_list(struct idr *idp) |
43 | { | 74 | { |
44 | struct idr_layer *p; | 75 | struct idr_layer *p; |
@@ -54,6 +85,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp) | |||
54 | return(p); | 85 | return(p); |
55 | } | 86 | } |
56 | 87 | ||
88 | /** | ||
89 | * idr_layer_alloc - allocate a new idr_layer | ||
90 | * @gfp_mask: allocation mask | ||
91 | * @layer_idr: optional idr to allocate from | ||
92 | * | ||
93 | * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch | ||
94 | * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch | ||
95 | * an idr_layer from @idr->id_free. | ||
96 | * | ||
97 | * @layer_idr is to maintain backward compatibility with the old alloc | ||
98 | * interface - idr_pre_get() and idr_get_new*() - and will be removed | ||
99 | * together with per-pool preload buffer. | ||
100 | */ | ||
101 | static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | ||
102 | { | ||
103 | struct idr_layer *new; | ||
104 | |||
105 | /* this is the old path, bypass to get_from_free_list() */ | ||
106 | if (layer_idr) | ||
107 | return get_from_free_list(layer_idr); | ||
108 | |||
109 | /* try to allocate directly from kmem_cache */ | ||
110 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
111 | if (new) | ||
112 | return new; | ||
113 | |||
114 | /* | ||
115 | * Try to fetch one from the per-cpu preload buffer if in process | ||
116 | * context. See idr_preload() for details. | ||
117 | */ | ||
118 | if (in_interrupt()) | ||
119 | return NULL; | ||
120 | |||
121 | preempt_disable(); | ||
122 | new = __this_cpu_read(idr_preload_head); | ||
123 | if (new) { | ||
124 | __this_cpu_write(idr_preload_head, new->ary[0]); | ||
125 | __this_cpu_dec(idr_preload_cnt); | ||
126 | new->ary[0] = NULL; | ||
127 | } | ||
128 | preempt_enable(); | ||
129 | return new; | ||
130 | } | ||
131 | |||
57 | static void idr_layer_rcu_free(struct rcu_head *head) | 132 | static void idr_layer_rcu_free(struct rcu_head *head) |
58 | { | 133 | { |
59 | struct idr_layer *layer; | 134 | struct idr_layer *layer; |
@@ -62,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head) | |||
62 | kmem_cache_free(idr_layer_cache, layer); | 137 | kmem_cache_free(idr_layer_cache, layer); |
63 | } | 138 | } |
64 | 139 | ||
65 | static inline void free_layer(struct idr_layer *p) | 140 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
66 | { | 141 | { |
142 | if (idr->hint && idr->hint == p) | ||
143 | RCU_INIT_POINTER(idr->hint, NULL); | ||
67 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | 144 | call_rcu(&p->rcu_head, idr_layer_rcu_free); |
68 | } | 145 | } |
69 | 146 | ||
@@ -92,18 +169,18 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
92 | struct idr_layer *p = pa[0]; | 169 | struct idr_layer *p = pa[0]; |
93 | int l = 0; | 170 | int l = 0; |
94 | 171 | ||
95 | __set_bit(id & IDR_MASK, &p->bitmap); | 172 | __set_bit(id & IDR_MASK, p->bitmap); |
96 | /* | 173 | /* |
97 | * If this layer is full mark the bit in the layer above to | 174 | * If this layer is full mark the bit in the layer above to |
98 | * show that this part of the radix tree is full. This may | 175 | * show that this part of the radix tree is full. This may |
99 | * complete the layer above and require walking up the radix | 176 | * complete the layer above and require walking up the radix |
100 | * tree. | 177 | * tree. |
101 | */ | 178 | */ |
102 | while (p->bitmap == IDR_FULL) { | 179 | while (bitmap_full(p->bitmap, IDR_SIZE)) { |
103 | if (!(p = pa[++l])) | 180 | if (!(p = pa[++l])) |
104 | break; | 181 | break; |
105 | id = id >> IDR_BITS; | 182 | id = id >> IDR_BITS; |
106 | __set_bit((id & IDR_MASK), &p->bitmap); | 183 | __set_bit((id & IDR_MASK), p->bitmap); |
107 | } | 184 | } |
108 | } | 185 | } |
109 | 186 | ||
@@ -133,12 +210,29 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
133 | } | 210 | } |
134 | EXPORT_SYMBOL(idr_pre_get); | 211 | EXPORT_SYMBOL(idr_pre_get); |
135 | 212 | ||
136 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | 213 | /** |
214 | * sub_alloc - try to allocate an id without growing the tree depth | ||
215 | * @idp: idr handle | ||
216 | * @starting_id: id to start search at | ||
217 | * @id: pointer to the allocated handle | ||
218 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer | ||
219 | * @gfp_mask: allocation mask for idr_layer_alloc() | ||
220 | * @layer_idr: optional idr passed to idr_layer_alloc() | ||
221 | * | ||
222 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without | ||
223 | * growing its depth. Returns | ||
224 | * | ||
225 | * the allocated id >= 0 if successful, | ||
226 | * -EAGAIN if the tree needs to grow for allocation to succeed, | ||
227 | * -ENOSPC if the id space is exhausted, | ||
228 | * -ENOMEM if more idr_layers need to be allocated. | ||
229 | */ | ||
230 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, | ||
231 | gfp_t gfp_mask, struct idr *layer_idr) | ||
137 | { | 232 | { |
138 | int n, m, sh; | 233 | int n, m, sh; |
139 | struct idr_layer *p, *new; | 234 | struct idr_layer *p, *new; |
140 | int l, id, oid; | 235 | int l, id, oid; |
141 | unsigned long bm; | ||
142 | 236 | ||
143 | id = *starting_id; | 237 | id = *starting_id; |
144 | restart: | 238 | restart: |
@@ -150,8 +244,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
150 | * We run around this while until we reach the leaf node... | 244 | * We run around this while until we reach the leaf node... |
151 | */ | 245 | */ |
152 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | 246 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
153 | bm = ~p->bitmap; | 247 | m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
154 | m = find_next_bit(&bm, IDR_SIZE, n); | ||
155 | if (m == IDR_SIZE) { | 248 | if (m == IDR_SIZE) { |
156 | /* no space available go back to previous layer. */ | 249 | /* no space available go back to previous layer. */ |
157 | l++; | 250 | l++; |
@@ -161,7 +254,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
161 | /* if already at the top layer, we need to grow */ | 254 | /* if already at the top layer, we need to grow */ |
162 | if (id >= 1 << (idp->layers * IDR_BITS)) { | 255 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
163 | *starting_id = id; | 256 | *starting_id = id; |
164 | return IDR_NEED_TO_GROW; | 257 | return -EAGAIN; |
165 | } | 258 | } |
166 | p = pa[l]; | 259 | p = pa[l]; |
167 | BUG_ON(!p); | 260 | BUG_ON(!p); |
@@ -180,17 +273,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
180 | id = ((id >> sh) ^ n ^ m) << sh; | 273 | id = ((id >> sh) ^ n ^ m) << sh; |
181 | } | 274 | } |
182 | if ((id >= MAX_IDR_BIT) || (id < 0)) | 275 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
183 | return IDR_NOMORE_SPACE; | 276 | return -ENOSPC; |
184 | if (l == 0) | 277 | if (l == 0) |
185 | break; | 278 | break; |
186 | /* | 279 | /* |
187 | * Create the layer below if it is missing. | 280 | * Create the layer below if it is missing. |
188 | */ | 281 | */ |
189 | if (!p->ary[m]) { | 282 | if (!p->ary[m]) { |
190 | new = get_from_free_list(idp); | 283 | new = idr_layer_alloc(gfp_mask, layer_idr); |
191 | if (!new) | 284 | if (!new) |
192 | return -1; | 285 | return -ENOMEM; |
193 | new->layer = l-1; | 286 | new->layer = l-1; |
287 | new->prefix = id & idr_layer_prefix_mask(new->layer); | ||
194 | rcu_assign_pointer(p->ary[m], new); | 288 | rcu_assign_pointer(p->ary[m], new); |
195 | p->count++; | 289 | p->count++; |
196 | } | 290 | } |
@@ -203,7 +297,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
203 | } | 297 | } |
204 | 298 | ||
205 | static int idr_get_empty_slot(struct idr *idp, int starting_id, | 299 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
206 | struct idr_layer **pa) | 300 | struct idr_layer **pa, gfp_t gfp_mask, |
301 | struct idr *layer_idr) | ||
207 | { | 302 | { |
208 | struct idr_layer *p, *new; | 303 | struct idr_layer *p, *new; |
209 | int layers, v, id; | 304 | int layers, v, id; |
@@ -214,8 +309,8 @@ build_up: | |||
214 | p = idp->top; | 309 | p = idp->top; |
215 | layers = idp->layers; | 310 | layers = idp->layers; |
216 | if (unlikely(!p)) { | 311 | if (unlikely(!p)) { |
217 | if (!(p = get_from_free_list(idp))) | 312 | if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
218 | return -1; | 313 | return -ENOMEM; |
219 | p->layer = 0; | 314 | p->layer = 0; |
220 | layers = 1; | 315 | layers = 1; |
221 | } | 316 | } |
@@ -223,7 +318,7 @@ build_up: | |||
223 | * Add a new layer to the top of the tree if the requested | 318 | * Add a new layer to the top of the tree if the requested |
224 | * id is larger than the currently allocated space. | 319 | * id is larger than the currently allocated space. |
225 | */ | 320 | */ |
226 | while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 321 | while (id > idr_max(layers)) { |
227 | layers++; | 322 | layers++; |
228 | if (!p->count) { | 323 | if (!p->count) { |
229 | /* special case: if the tree is currently empty, | 324 | /* special case: if the tree is currently empty, |
@@ -231,9 +326,10 @@ build_up: | |||
231 | * upwards. | 326 | * upwards. |
232 | */ | 327 | */ |
233 | p->layer++; | 328 | p->layer++; |
329 | WARN_ON_ONCE(p->prefix); | ||
234 | continue; | 330 | continue; |
235 | } | 331 | } |
236 | if (!(new = get_from_free_list(idp))) { | 332 | if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
237 | /* | 333 | /* |
238 | * The allocation failed. If we built part of | 334 | * The allocation failed. If we built part of |
239 | * the structure tear it down. | 335 | * the structure tear it down. |
@@ -242,45 +338,42 @@ build_up: | |||
242 | for (new = p; p && p != idp->top; new = p) { | 338 | for (new = p; p && p != idp->top; new = p) { |
243 | p = p->ary[0]; | 339 | p = p->ary[0]; |
244 | new->ary[0] = NULL; | 340 | new->ary[0] = NULL; |
245 | new->bitmap = new->count = 0; | 341 | new->count = 0; |
342 | bitmap_clear(new->bitmap, 0, IDR_SIZE); | ||
246 | __move_to_free_list(idp, new); | 343 | __move_to_free_list(idp, new); |
247 | } | 344 | } |
248 | spin_unlock_irqrestore(&idp->lock, flags); | 345 | spin_unlock_irqrestore(&idp->lock, flags); |
249 | return -1; | 346 | return -ENOMEM; |
250 | } | 347 | } |
251 | new->ary[0] = p; | 348 | new->ary[0] = p; |
252 | new->count = 1; | 349 | new->count = 1; |
253 | new->layer = layers-1; | 350 | new->layer = layers-1; |
254 | if (p->bitmap == IDR_FULL) | 351 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
255 | __set_bit(0, &new->bitmap); | 352 | if (bitmap_full(p->bitmap, IDR_SIZE)) |
353 | __set_bit(0, new->bitmap); | ||
256 | p = new; | 354 | p = new; |
257 | } | 355 | } |
258 | rcu_assign_pointer(idp->top, p); | 356 | rcu_assign_pointer(idp->top, p); |
259 | idp->layers = layers; | 357 | idp->layers = layers; |
260 | v = sub_alloc(idp, &id, pa); | 358 | v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
261 | if (v == IDR_NEED_TO_GROW) | 359 | if (v == -EAGAIN) |
262 | goto build_up; | 360 | goto build_up; |
263 | return(v); | 361 | return(v); |
264 | } | 362 | } |
265 | 363 | ||
266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 364 | /* |
365 | * @id and @pa are from a successful allocation from idr_get_empty_slot(). | ||
366 | * Install the user pointer @ptr and mark the slot full. | ||
367 | */ | ||
368 | static void idr_fill_slot(struct idr *idr, void *ptr, int id, | ||
369 | struct idr_layer **pa) | ||
267 | { | 370 | { |
268 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 371 | /* update hint used for lookup, cleared from free_layer() */ |
269 | int id; | 372 | rcu_assign_pointer(idr->hint, pa[0]); |
270 | |||
271 | id = idr_get_empty_slot(idp, starting_id, pa); | ||
272 | if (id >= 0) { | ||
273 | /* | ||
274 | * Successfully found an empty slot. Install the user | ||
275 | * pointer and mark the slot full. | ||
276 | */ | ||
277 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], | ||
278 | (struct idr_layer *)ptr); | ||
279 | pa[0]->count++; | ||
280 | idr_mark_full(pa, id); | ||
281 | } | ||
282 | 373 | ||
283 | return id; | 374 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
375 | pa[0]->count++; | ||
376 | idr_mark_full(pa, id); | ||
284 | } | 377 | } |
285 | 378 | ||
286 | /** | 379 | /** |
@@ -303,49 +396,124 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
303 | */ | 396 | */ |
304 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 397 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
305 | { | 398 | { |
399 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | ||
306 | int rv; | 400 | int rv; |
307 | 401 | ||
308 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 402 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
309 | /* | ||
310 | * This is a cheap hack until the IDR code can be fixed to | ||
311 | * return proper error values. | ||
312 | */ | ||
313 | if (rv < 0) | 403 | if (rv < 0) |
314 | return _idr_rc_to_errno(rv); | 404 | return rv == -ENOMEM ? -EAGAIN : rv; |
405 | |||
406 | idr_fill_slot(idp, ptr, rv, pa); | ||
315 | *id = rv; | 407 | *id = rv; |
316 | return 0; | 408 | return 0; |
317 | } | 409 | } |
318 | EXPORT_SYMBOL(idr_get_new_above); | 410 | EXPORT_SYMBOL(idr_get_new_above); |
319 | 411 | ||
320 | /** | 412 | /** |
321 | * idr_get_new - allocate new idr entry | 413 | * idr_preload - preload for idr_alloc() |
322 | * @idp: idr handle | 414 | * @gfp_mask: allocation mask to use for preloading |
323 | * @ptr: pointer you want associated with the id | ||
324 | * @id: pointer to the allocated handle | ||
325 | * | 415 | * |
326 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | 416 | * Preload per-cpu layer buffer for idr_alloc(). Can only be used from |
327 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill | 417 | * process context and each idr_preload() invocation should be matched with |
328 | * IDR's preallocation and then retry the idr_get_new_above() call. | 418 | * idr_preload_end(). Note that preemption is disabled while preloaded. |
329 | * | 419 | * |
330 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | 420 | * The first idr_alloc() in the preloaded section can be treated as if it |
421 | * were invoked with @gfp_mask used for preloading. This allows using more | ||
422 | * permissive allocation masks for idrs protected by spinlocks. | ||
423 | * | ||
424 | * For example, if idr_alloc() below fails, the failure can be treated as | ||
425 | * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. | ||
426 | * | ||
427 | * idr_preload(GFP_KERNEL); | ||
428 | * spin_lock(lock); | ||
429 | * | ||
430 | * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); | ||
331 | * | 431 | * |
332 | * @id returns a value in the range %0 ... %0x7fffffff | 432 | * spin_unlock(lock); |
433 | * idr_preload_end(); | ||
434 | * if (id < 0) | ||
435 | * error; | ||
333 | */ | 436 | */ |
334 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 437 | void idr_preload(gfp_t gfp_mask) |
335 | { | 438 | { |
336 | int rv; | 439 | /* |
440 | * Consuming preload buffer from non-process context breaks preload | ||
441 | * allocation guarantee. Disallow usage from those contexts. | ||
442 | */ | ||
443 | WARN_ON_ONCE(in_interrupt()); | ||
444 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
445 | |||
446 | preempt_disable(); | ||
337 | 447 | ||
338 | rv = idr_get_new_above_int(idp, ptr, 0); | ||
339 | /* | 448 | /* |
340 | * This is a cheap hack until the IDR code can be fixed to | 449 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and |
341 | * return proper error values. | 450 | * return value from idr_alloc() needs to be checked for failure |
451 | * anyway. Silently give up if allocation fails. The caller can | ||
452 | * treat failures from idr_alloc() as if idr_alloc() were called | ||
453 | * with @gfp_mask which should be enough. | ||
342 | */ | 454 | */ |
343 | if (rv < 0) | 455 | while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { |
344 | return _idr_rc_to_errno(rv); | 456 | struct idr_layer *new; |
345 | *id = rv; | 457 | |
346 | return 0; | 458 | preempt_enable(); |
459 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
460 | preempt_disable(); | ||
461 | if (!new) | ||
462 | break; | ||
463 | |||
464 | /* link the new one to per-cpu preload list */ | ||
465 | new->ary[0] = __this_cpu_read(idr_preload_head); | ||
466 | __this_cpu_write(idr_preload_head, new); | ||
467 | __this_cpu_inc(idr_preload_cnt); | ||
468 | } | ||
347 | } | 469 | } |
348 | EXPORT_SYMBOL(idr_get_new); | 470 | EXPORT_SYMBOL(idr_preload); |
471 | |||
472 | /** | ||
473 | * idr_alloc - allocate new idr entry | ||
474 | * @idr: the (initialized) idr | ||
475 | * @ptr: pointer to be associated with the new id | ||
476 | * @start: the minimum id (inclusive) | ||
477 | * @end: the maximum id (exclusive, <= 0 for max) | ||
478 | * @gfp_mask: memory allocation flags | ||
479 | * | ||
480 | * Allocate an id in [start, end) and associate it with @ptr. If no ID is | ||
481 | * available in the specified range, returns -ENOSPC. On memory allocation | ||
482 | * failure, returns -ENOMEM. | ||
483 | * | ||
484 | * Note that @end is treated as max when <= 0. This is to always allow | ||
485 | * using @start + N as @end as long as N is inside integer range. | ||
486 | * | ||
487 | * The user is responsible for exclusively synchronizing all operations | ||
488 | * which may modify @idr. However, read-only accesses such as idr_find() | ||
489 | * or iteration can be performed under RCU read lock provided the user | ||
490 | * destroys @ptr in RCU-safe way after removal from idr. | ||
491 | */ | ||
492 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) | ||
493 | { | ||
494 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ | ||
495 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | ||
496 | int id; | ||
497 | |||
498 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
499 | |||
500 | /* sanity checks */ | ||
501 | if (WARN_ON_ONCE(start < 0)) | ||
502 | return -EINVAL; | ||
503 | if (unlikely(max < start)) | ||
504 | return -ENOSPC; | ||
505 | |||
506 | /* allocate id */ | ||
507 | id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); | ||
508 | if (unlikely(id < 0)) | ||
509 | return id; | ||
510 | if (unlikely(id > max)) | ||
511 | return -ENOSPC; | ||
512 | |||
513 | idr_fill_slot(idr, ptr, id, pa); | ||
514 | return id; | ||
515 | } | ||
516 | EXPORT_SYMBOL_GPL(idr_alloc); | ||
349 | 517 | ||
350 | static void idr_remove_warning(int id) | 518 | static void idr_remove_warning(int id) |
351 | { | 519 | { |
@@ -357,7 +525,7 @@ static void idr_remove_warning(int id) | |||
357 | static void sub_remove(struct idr *idp, int shift, int id) | 525 | static void sub_remove(struct idr *idp, int shift, int id) |
358 | { | 526 | { |
359 | struct idr_layer *p = idp->top; | 527 | struct idr_layer *p = idp->top; |
360 | struct idr_layer **pa[MAX_IDR_LEVEL]; | 528 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
361 | struct idr_layer ***paa = &pa[0]; | 529 | struct idr_layer ***paa = &pa[0]; |
362 | struct idr_layer *to_free; | 530 | struct idr_layer *to_free; |
363 | int n; | 531 | int n; |
@@ -367,26 +535,26 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
367 | 535 | ||
368 | while ((shift > 0) && p) { | 536 | while ((shift > 0) && p) { |
369 | n = (id >> shift) & IDR_MASK; | 537 | n = (id >> shift) & IDR_MASK; |
370 | __clear_bit(n, &p->bitmap); | 538 | __clear_bit(n, p->bitmap); |
371 | *++paa = &p->ary[n]; | 539 | *++paa = &p->ary[n]; |
372 | p = p->ary[n]; | 540 | p = p->ary[n]; |
373 | shift -= IDR_BITS; | 541 | shift -= IDR_BITS; |
374 | } | 542 | } |
375 | n = id & IDR_MASK; | 543 | n = id & IDR_MASK; |
376 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 544 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
377 | __clear_bit(n, &p->bitmap); | 545 | __clear_bit(n, p->bitmap); |
378 | rcu_assign_pointer(p->ary[n], NULL); | 546 | rcu_assign_pointer(p->ary[n], NULL); |
379 | to_free = NULL; | 547 | to_free = NULL; |
380 | while(*paa && ! --((**paa)->count)){ | 548 | while(*paa && ! --((**paa)->count)){ |
381 | if (to_free) | 549 | if (to_free) |
382 | free_layer(to_free); | 550 | free_layer(idp, to_free); |
383 | to_free = **paa; | 551 | to_free = **paa; |
384 | **paa-- = NULL; | 552 | **paa-- = NULL; |
385 | } | 553 | } |
386 | if (!*paa) | 554 | if (!*paa) |
387 | idp->layers = 0; | 555 | idp->layers = 0; |
388 | if (to_free) | 556 | if (to_free) |
389 | free_layer(to_free); | 557 | free_layer(idp, to_free); |
390 | } else | 558 | } else |
391 | idr_remove_warning(id); | 559 | idr_remove_warning(id); |
392 | } | 560 | } |
@@ -401,8 +569,9 @@ void idr_remove(struct idr *idp, int id) | |||
401 | struct idr_layer *p; | 569 | struct idr_layer *p; |
402 | struct idr_layer *to_free; | 570 | struct idr_layer *to_free; |
403 | 571 | ||
404 | /* Mask off upper bits we don't use for the search. */ | 572 | /* see comment in idr_find_slowpath() */ |
405 | id &= MAX_IDR_MASK; | 573 | if (WARN_ON_ONCE(id < 0)) |
574 | return; | ||
406 | 575 | ||
407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 576 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 577 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
@@ -417,8 +586,9 @@ void idr_remove(struct idr *idp, int id) | |||
417 | p = idp->top->ary[0]; | 586 | p = idp->top->ary[0]; |
418 | rcu_assign_pointer(idp->top, p); | 587 | rcu_assign_pointer(idp->top, p); |
419 | --idp->layers; | 588 | --idp->layers; |
420 | to_free->bitmap = to_free->count = 0; | 589 | to_free->count = 0; |
421 | free_layer(to_free); | 590 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
591 | free_layer(idp, to_free); | ||
422 | } | 592 | } |
423 | while (idp->id_free_cnt >= MAX_IDR_FREE) { | 593 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
424 | p = get_from_free_list(idp); | 594 | p = get_from_free_list(idp); |
@@ -433,34 +603,21 @@ void idr_remove(struct idr *idp, int id) | |||
433 | } | 603 | } |
434 | EXPORT_SYMBOL(idr_remove); | 604 | EXPORT_SYMBOL(idr_remove); |
435 | 605 | ||
436 | /** | 606 | void __idr_remove_all(struct idr *idp) |
437 | * idr_remove_all - remove all ids from the given idr tree | ||
438 | * @idp: idr handle | ||
439 | * | ||
440 | * idr_destroy() only frees up unused, cached idp_layers, but this | ||
441 | * function will remove all id mappings and leave all idp_layers | ||
442 | * unused. | ||
443 | * | ||
444 | * A typical clean-up sequence for objects stored in an idr tree will | ||
445 | * use idr_for_each() to free all objects, if necessay, then | ||
446 | * idr_remove_all() to remove all ids, and idr_destroy() to free | ||
447 | * up the cached idr_layers. | ||
448 | */ | ||
449 | void idr_remove_all(struct idr *idp) | ||
450 | { | 607 | { |
451 | int n, id, max; | 608 | int n, id, max; |
452 | int bt_mask; | 609 | int bt_mask; |
453 | struct idr_layer *p; | 610 | struct idr_layer *p; |
454 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 611 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
455 | struct idr_layer **paa = &pa[0]; | 612 | struct idr_layer **paa = &pa[0]; |
456 | 613 | ||
457 | n = idp->layers * IDR_BITS; | 614 | n = idp->layers * IDR_BITS; |
458 | p = idp->top; | 615 | p = idp->top; |
459 | rcu_assign_pointer(idp->top, NULL); | 616 | rcu_assign_pointer(idp->top, NULL); |
460 | max = 1 << n; | 617 | max = idr_max(idp->layers); |
461 | 618 | ||
462 | id = 0; | 619 | id = 0; |
463 | while (id < max) { | 620 | while (id >= 0 && id <= max) { |
464 | while (n > IDR_BITS && p) { | 621 | while (n > IDR_BITS && p) { |
465 | n -= IDR_BITS; | 622 | n -= IDR_BITS; |
466 | *paa++ = p; | 623 | *paa++ = p; |
@@ -472,21 +629,32 @@ void idr_remove_all(struct idr *idp) | |||
472 | /* Get the highest bit that the above add changed from 0->1. */ | 629 | /* Get the highest bit that the above add changed from 0->1. */ |
473 | while (n < fls(id ^ bt_mask)) { | 630 | while (n < fls(id ^ bt_mask)) { |
474 | if (p) | 631 | if (p) |
475 | free_layer(p); | 632 | free_layer(idp, p); |
476 | n += IDR_BITS; | 633 | n += IDR_BITS; |
477 | p = *--paa; | 634 | p = *--paa; |
478 | } | 635 | } |
479 | } | 636 | } |
480 | idp->layers = 0; | 637 | idp->layers = 0; |
481 | } | 638 | } |
482 | EXPORT_SYMBOL(idr_remove_all); | 639 | EXPORT_SYMBOL(__idr_remove_all); |
483 | 640 | ||
484 | /** | 641 | /** |
485 | * idr_destroy - release all cached layers within an idr tree | 642 | * idr_destroy - release all cached layers within an idr tree |
486 | * @idp: idr handle | 643 | * @idp: idr handle |
644 | * | ||
645 | * Free all id mappings and all idp_layers. After this function, @idp is | ||
646 | * completely unused and can be freed / recycled. The caller is | ||
647 | * responsible for ensuring that no one else accesses @idp during or after | ||
648 | * idr_destroy(). | ||
649 | * | ||
650 | * A typical clean-up sequence for objects stored in an idr tree will use | ||
651 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to | ||
652 | * free up the id mappings and cached idr_layers. | ||
487 | */ | 653 | */ |
488 | void idr_destroy(struct idr *idp) | 654 | void idr_destroy(struct idr *idp) |
489 | { | 655 | { |
656 | __idr_remove_all(idp); | ||
657 | |||
490 | while (idp->id_free_cnt) { | 658 | while (idp->id_free_cnt) { |
491 | struct idr_layer *p = get_from_free_list(idp); | 659 | struct idr_layer *p = get_from_free_list(idp); |
492 | kmem_cache_free(idr_layer_cache, p); | 660 | kmem_cache_free(idr_layer_cache, p); |
@@ -494,32 +662,28 @@ void idr_destroy(struct idr *idp) | |||
494 | } | 662 | } |
495 | EXPORT_SYMBOL(idr_destroy); | 663 | EXPORT_SYMBOL(idr_destroy); |
496 | 664 | ||
497 | /** | 665 | void *idr_find_slowpath(struct idr *idp, int id) |
498 | * idr_find - return pointer for given id | ||
499 | * @idp: idr handle | ||
500 | * @id: lookup key | ||
501 | * | ||
502 | * Return the pointer given the id it has been registered with. A %NULL | ||
503 | * return indicates that @id is not valid or you passed %NULL in | ||
504 | * idr_get_new(). | ||
505 | * | ||
506 | * This function can be called under rcu_read_lock(), given that the leaf | ||
507 | * pointers lifetimes are correctly managed. | ||
508 | */ | ||
509 | void *idr_find(struct idr *idp, int id) | ||
510 | { | 666 | { |
511 | int n; | 667 | int n; |
512 | struct idr_layer *p; | 668 | struct idr_layer *p; |
513 | 669 | ||
670 | /* | ||
671 | * If @id is negative, idr_find() used to ignore the sign bit and | ||
672 | * performed lookup with the rest of bits, which is weird and can | ||
673 | * lead to very obscure bugs. We're now returning NULL for all | ||
674 | * negative IDs but just in case somebody was depending on the sign | ||
675 | * bit being ignored, let's trigger WARN_ON_ONCE() so that they can | ||
676 | * be detected and fixed. WARN_ON_ONCE() can later be removed. | ||
677 | */ | ||
678 | if (WARN_ON_ONCE(id < 0)) | ||
679 | return NULL; | ||
680 | |||
514 | p = rcu_dereference_raw(idp->top); | 681 | p = rcu_dereference_raw(idp->top); |
515 | if (!p) | 682 | if (!p) |
516 | return NULL; | 683 | return NULL; |
517 | n = (p->layer+1) * IDR_BITS; | 684 | n = (p->layer+1) * IDR_BITS; |
518 | 685 | ||
519 | /* Mask off upper bits we don't use for the search. */ | 686 | if (id > idr_max(p->layer + 1)) |
520 | id &= MAX_IDR_MASK; | ||
521 | |||
522 | if (id >= (1 << n)) | ||
523 | return NULL; | 687 | return NULL; |
524 | BUG_ON(n == 0); | 688 | BUG_ON(n == 0); |
525 | 689 | ||
@@ -530,7 +694,7 @@ void *idr_find(struct idr *idp, int id) | |||
530 | } | 694 | } |
531 | return((void *)p); | 695 | return((void *)p); |
532 | } | 696 | } |
533 | EXPORT_SYMBOL(idr_find); | 697 | EXPORT_SYMBOL(idr_find_slowpath); |
534 | 698 | ||
535 | /** | 699 | /** |
536 | * idr_for_each - iterate through all stored pointers | 700 | * idr_for_each - iterate through all stored pointers |
@@ -555,15 +719,15 @@ int idr_for_each(struct idr *idp, | |||
555 | { | 719 | { |
556 | int n, id, max, error = 0; | 720 | int n, id, max, error = 0; |
557 | struct idr_layer *p; | 721 | struct idr_layer *p; |
558 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 722 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
559 | struct idr_layer **paa = &pa[0]; | 723 | struct idr_layer **paa = &pa[0]; |
560 | 724 | ||
561 | n = idp->layers * IDR_BITS; | 725 | n = idp->layers * IDR_BITS; |
562 | p = rcu_dereference_raw(idp->top); | 726 | p = rcu_dereference_raw(idp->top); |
563 | max = 1 << n; | 727 | max = idr_max(idp->layers); |
564 | 728 | ||
565 | id = 0; | 729 | id = 0; |
566 | while (id < max) { | 730 | while (id >= 0 && id <= max) { |
567 | while (n > 0 && p) { | 731 | while (n > 0 && p) { |
568 | n -= IDR_BITS; | 732 | n -= IDR_BITS; |
569 | *paa++ = p; | 733 | *paa++ = p; |
@@ -601,7 +765,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
601 | */ | 765 | */ |
602 | void *idr_get_next(struct idr *idp, int *nextidp) | 766 | void *idr_get_next(struct idr *idp, int *nextidp) |
603 | { | 767 | { |
604 | struct idr_layer *p, *pa[MAX_IDR_LEVEL]; | 768 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
605 | struct idr_layer **paa = &pa[0]; | 769 | struct idr_layer **paa = &pa[0]; |
606 | int id = *nextidp; | 770 | int id = *nextidp; |
607 | int n, max; | 771 | int n, max; |
@@ -611,9 +775,9 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
611 | if (!p) | 775 | if (!p) |
612 | return NULL; | 776 | return NULL; |
613 | n = (p->layer + 1) * IDR_BITS; | 777 | n = (p->layer + 1) * IDR_BITS; |
614 | max = 1 << n; | 778 | max = idr_max(p->layer + 1); |
615 | 779 | ||
616 | while (id < max) { | 780 | while (id >= 0 && id <= max) { |
617 | while (n > 0 && p) { | 781 | while (n > 0 && p) { |
618 | n -= IDR_BITS; | 782 | n -= IDR_BITS; |
619 | *paa++ = p; | 783 | *paa++ = p; |
@@ -625,7 +789,14 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
625 | return p; | 789 | return p; |
626 | } | 790 | } |
627 | 791 | ||
628 | id += 1 << n; | 792 | /* |
793 | * Proceed to the next layer at the current level. Unlike | ||
794 | * idr_for_each(), @id isn't guaranteed to be aligned to | ||
795 | * layer boundary at this point and adding 1 << n may | ||
796 | * incorrectly skip IDs. Make sure we jump to the | ||
797 | * beginning of the next layer using round_up(). | ||
798 | */ | ||
799 | id = round_up(id + 1, 1 << n); | ||
629 | while (n < fls(id)) { | 800 | while (n < fls(id)) { |
630 | n += IDR_BITS; | 801 | n += IDR_BITS; |
631 | p = *--paa; | 802 | p = *--paa; |
@@ -653,14 +824,16 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
653 | int n; | 824 | int n; |
654 | struct idr_layer *p, *old_p; | 825 | struct idr_layer *p, *old_p; |
655 | 826 | ||
827 | /* see comment in idr_find_slowpath() */ | ||
828 | if (WARN_ON_ONCE(id < 0)) | ||
829 | return ERR_PTR(-EINVAL); | ||
830 | |||
656 | p = idp->top; | 831 | p = idp->top; |
657 | if (!p) | 832 | if (!p) |
658 | return ERR_PTR(-EINVAL); | 833 | return ERR_PTR(-EINVAL); |
659 | 834 | ||
660 | n = (p->layer+1) * IDR_BITS; | 835 | n = (p->layer+1) * IDR_BITS; |
661 | 836 | ||
662 | id &= MAX_IDR_MASK; | ||
663 | |||
664 | if (id >= (1 << n)) | 837 | if (id >= (1 << n)) |
665 | return ERR_PTR(-EINVAL); | 838 | return ERR_PTR(-EINVAL); |
666 | 839 | ||
@@ -671,7 +844,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
671 | } | 844 | } |
672 | 845 | ||
673 | n = id & IDR_MASK; | 846 | n = id & IDR_MASK; |
674 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | 847 | if (unlikely(p == NULL || !test_bit(n, p->bitmap))) |
675 | return ERR_PTR(-ENOENT); | 848 | return ERR_PTR(-ENOENT); |
676 | 849 | ||
677 | old_p = p->ary[n]; | 850 | old_p = p->ary[n]; |
@@ -780,7 +953,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
780 | */ | 953 | */ |
781 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 954 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
782 | { | 955 | { |
783 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 956 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
784 | struct ida_bitmap *bitmap; | 957 | struct ida_bitmap *bitmap; |
785 | unsigned long flags; | 958 | unsigned long flags; |
786 | int idr_id = starting_id / IDA_BITMAP_BITS; | 959 | int idr_id = starting_id / IDA_BITMAP_BITS; |
@@ -789,9 +962,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
789 | 962 | ||
790 | restart: | 963 | restart: |
791 | /* get vacant slot */ | 964 | /* get vacant slot */ |
792 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 965 | t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
793 | if (t < 0) | 966 | if (t < 0) |
794 | return _idr_rc_to_errno(t); | 967 | return t == -ENOMEM ? -EAGAIN : t; |
795 | 968 | ||
796 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) | 969 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
797 | return -ENOSPC; | 970 | return -ENOSPC; |
@@ -852,25 +1025,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
852 | EXPORT_SYMBOL(ida_get_new_above); | 1025 | EXPORT_SYMBOL(ida_get_new_above); |
853 | 1026 | ||
854 | /** | 1027 | /** |
855 | * ida_get_new - allocate new ID | ||
856 | * @ida: idr handle | ||
857 | * @p_id: pointer to the allocated handle | ||
858 | * | ||
859 | * Allocate new ID. It should be called with any required locks. | ||
860 | * | ||
861 | * If memory is required, it will return %-EAGAIN, you should unlock | ||
862 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
863 | * return %-ENOSPC. | ||
864 | * | ||
865 | * @p_id returns a value in the range %0 ... %0x7fffffff. | ||
866 | */ | ||
867 | int ida_get_new(struct ida *ida, int *p_id) | ||
868 | { | ||
869 | return ida_get_new_above(ida, 0, p_id); | ||
870 | } | ||
871 | EXPORT_SYMBOL(ida_get_new); | ||
872 | |||
873 | /** | ||
874 | * ida_remove - remove the given ID | 1028 | * ida_remove - remove the given ID |
875 | * @ida: ida handle | 1029 | * @ida: ida handle |
876 | * @id: ID to free | 1030 | * @id: ID to free |
@@ -887,7 +1041,7 @@ void ida_remove(struct ida *ida, int id) | |||
887 | /* clear full bits while looking up the leaf idr_layer */ | 1041 | /* clear full bits while looking up the leaf idr_layer */ |
888 | while ((shift > 0) && p) { | 1042 | while ((shift > 0) && p) { |
889 | n = (idr_id >> shift) & IDR_MASK; | 1043 | n = (idr_id >> shift) & IDR_MASK; |
890 | __clear_bit(n, &p->bitmap); | 1044 | __clear_bit(n, p->bitmap); |
891 | p = p->ary[n]; | 1045 | p = p->ary[n]; |
892 | shift -= IDR_BITS; | 1046 | shift -= IDR_BITS; |
893 | } | 1047 | } |
@@ -896,7 +1050,7 @@ void ida_remove(struct ida *ida, int id) | |||
896 | goto err; | 1050 | goto err; |
897 | 1051 | ||
898 | n = idr_id & IDR_MASK; | 1052 | n = idr_id & IDR_MASK; |
899 | __clear_bit(n, &p->bitmap); | 1053 | __clear_bit(n, p->bitmap); |
900 | 1054 | ||
901 | bitmap = (void *)p->ary[n]; | 1055 | bitmap = (void *)p->ary[n]; |
902 | if (!test_bit(offset, bitmap->bitmap)) | 1056 | if (!test_bit(offset, bitmap->bitmap)) |
@@ -905,7 +1059,7 @@ void ida_remove(struct ida *ida, int id) | |||
905 | /* update bitmap and remove it if empty */ | 1059 | /* update bitmap and remove it if empty */ |
906 | __clear_bit(offset, bitmap->bitmap); | 1060 | __clear_bit(offset, bitmap->bitmap); |
907 | if (--bitmap->nr_busy == 0) { | 1061 | if (--bitmap->nr_busy == 0) { |
908 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | 1062 | __set_bit(n, p->bitmap); /* to please idr_remove() */ |
909 | idr_remove(&ida->idr, idr_id); | 1063 | idr_remove(&ida->idr, idr_id); |
910 | free_bitmap(ida, bitmap); | 1064 | free_bitmap(ida, bitmap); |
911 | } | 1065 | } |
diff --git a/lib/kfifo.c b/lib/kfifo.c new file mode 100644 index 000000000000..7b7f83027b7b --- /dev/null +++ b/lib/kfifo.c | |||
@@ -0,0 +1,607 @@ | |||
1 | /* | ||
2 | * A generic kernel FIFO implementation | ||
3 | * | ||
4 | * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/export.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/log2.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/kfifo.h> | ||
29 | |||
30 | /* | ||
31 | * internal helper to calculate the unused elements in a fifo | ||
32 | */ | ||
33 | static inline unsigned int kfifo_unused(struct __kfifo *fifo) | ||
34 | { | ||
35 | return (fifo->mask + 1) - (fifo->in - fifo->out); | ||
36 | } | ||
37 | |||
38 | int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, | ||
39 | size_t esize, gfp_t gfp_mask) | ||
40 | { | ||
41 | /* | ||
42 | * round down to the next power of 2, since our 'let the indices | ||
43 | * wrap' technique works only in this case. | ||
44 | */ | ||
45 | size = roundup_pow_of_two(size); | ||
46 | |||
47 | fifo->in = 0; | ||
48 | fifo->out = 0; | ||
49 | fifo->esize = esize; | ||
50 | |||
51 | if (size < 2) { | ||
52 | fifo->data = NULL; | ||
53 | fifo->mask = 0; | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | |||
57 | fifo->data = kmalloc(size * esize, gfp_mask); | ||
58 | |||
59 | if (!fifo->data) { | ||
60 | fifo->mask = 0; | ||
61 | return -ENOMEM; | ||
62 | } | ||
63 | fifo->mask = size - 1; | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | EXPORT_SYMBOL(__kfifo_alloc); | ||
68 | |||
69 | void __kfifo_free(struct __kfifo *fifo) | ||
70 | { | ||
71 | kfree(fifo->data); | ||
72 | fifo->in = 0; | ||
73 | fifo->out = 0; | ||
74 | fifo->esize = 0; | ||
75 | fifo->data = NULL; | ||
76 | fifo->mask = 0; | ||
77 | } | ||
78 | EXPORT_SYMBOL(__kfifo_free); | ||
79 | |||
80 | int __kfifo_init(struct __kfifo *fifo, void *buffer, | ||
81 | unsigned int size, size_t esize) | ||
82 | { | ||
83 | size /= esize; | ||
84 | |||
85 | size = roundup_pow_of_two(size); | ||
86 | |||
87 | fifo->in = 0; | ||
88 | fifo->out = 0; | ||
89 | fifo->esize = esize; | ||
90 | fifo->data = buffer; | ||
91 | |||
92 | if (size < 2) { | ||
93 | fifo->mask = 0; | ||
94 | return -EINVAL; | ||
95 | } | ||
96 | fifo->mask = size - 1; | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | EXPORT_SYMBOL(__kfifo_init); | ||
101 | |||
102 | static void kfifo_copy_in(struct __kfifo *fifo, const void *src, | ||
103 | unsigned int len, unsigned int off) | ||
104 | { | ||
105 | unsigned int size = fifo->mask + 1; | ||
106 | unsigned int esize = fifo->esize; | ||
107 | unsigned int l; | ||
108 | |||
109 | off &= fifo->mask; | ||
110 | if (esize != 1) { | ||
111 | off *= esize; | ||
112 | size *= esize; | ||
113 | len *= esize; | ||
114 | } | ||
115 | l = min(len, size - off); | ||
116 | |||
117 | memcpy(fifo->data + off, src, l); | ||
118 | memcpy(fifo->data, src + l, len - l); | ||
119 | /* | ||
120 | * make sure that the data in the fifo is up to date before | ||
121 | * incrementing the fifo->in index counter | ||
122 | */ | ||
123 | smp_wmb(); | ||
124 | } | ||
125 | |||
126 | unsigned int __kfifo_in(struct __kfifo *fifo, | ||
127 | const void *buf, unsigned int len) | ||
128 | { | ||
129 | unsigned int l; | ||
130 | |||
131 | l = kfifo_unused(fifo); | ||
132 | if (len > l) | ||
133 | len = l; | ||
134 | |||
135 | kfifo_copy_in(fifo, buf, len, fifo->in); | ||
136 | fifo->in += len; | ||
137 | return len; | ||
138 | } | ||
139 | EXPORT_SYMBOL(__kfifo_in); | ||
140 | |||
141 | static void kfifo_copy_out(struct __kfifo *fifo, void *dst, | ||
142 | unsigned int len, unsigned int off) | ||
143 | { | ||
144 | unsigned int size = fifo->mask + 1; | ||
145 | unsigned int esize = fifo->esize; | ||
146 | unsigned int l; | ||
147 | |||
148 | off &= fifo->mask; | ||
149 | if (esize != 1) { | ||
150 | off *= esize; | ||
151 | size *= esize; | ||
152 | len *= esize; | ||
153 | } | ||
154 | l = min(len, size - off); | ||
155 | |||
156 | memcpy(dst, fifo->data + off, l); | ||
157 | memcpy(dst + l, fifo->data, len - l); | ||
158 | /* | ||
159 | * make sure that the data is copied before | ||
160 | * incrementing the fifo->out index counter | ||
161 | */ | ||
162 | smp_wmb(); | ||
163 | } | ||
164 | |||
165 | unsigned int __kfifo_out_peek(struct __kfifo *fifo, | ||
166 | void *buf, unsigned int len) | ||
167 | { | ||
168 | unsigned int l; | ||
169 | |||
170 | l = fifo->in - fifo->out; | ||
171 | if (len > l) | ||
172 | len = l; | ||
173 | |||
174 | kfifo_copy_out(fifo, buf, len, fifo->out); | ||
175 | return len; | ||
176 | } | ||
177 | EXPORT_SYMBOL(__kfifo_out_peek); | ||
178 | |||
179 | unsigned int __kfifo_out(struct __kfifo *fifo, | ||
180 | void *buf, unsigned int len) | ||
181 | { | ||
182 | len = __kfifo_out_peek(fifo, buf, len); | ||
183 | fifo->out += len; | ||
184 | return len; | ||
185 | } | ||
186 | EXPORT_SYMBOL(__kfifo_out); | ||
187 | |||
188 | static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, | ||
189 | const void __user *from, unsigned int len, unsigned int off, | ||
190 | unsigned int *copied) | ||
191 | { | ||
192 | unsigned int size = fifo->mask + 1; | ||
193 | unsigned int esize = fifo->esize; | ||
194 | unsigned int l; | ||
195 | unsigned long ret; | ||
196 | |||
197 | off &= fifo->mask; | ||
198 | if (esize != 1) { | ||
199 | off *= esize; | ||
200 | size *= esize; | ||
201 | len *= esize; | ||
202 | } | ||
203 | l = min(len, size - off); | ||
204 | |||
205 | ret = copy_from_user(fifo->data + off, from, l); | ||
206 | if (unlikely(ret)) | ||
207 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
208 | else { | ||
209 | ret = copy_from_user(fifo->data, from + l, len - l); | ||
210 | if (unlikely(ret)) | ||
211 | ret = DIV_ROUND_UP(ret, esize); | ||
212 | } | ||
213 | /* | ||
214 | * make sure that the data in the fifo is up to date before | ||
215 | * incrementing the fifo->in index counter | ||
216 | */ | ||
217 | smp_wmb(); | ||
218 | *copied = len - ret; | ||
219 | /* return the number of elements which are not copied */ | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, | ||
224 | unsigned long len, unsigned int *copied) | ||
225 | { | ||
226 | unsigned int l; | ||
227 | unsigned long ret; | ||
228 | unsigned int esize = fifo->esize; | ||
229 | int err; | ||
230 | |||
231 | if (esize != 1) | ||
232 | len /= esize; | ||
233 | |||
234 | l = kfifo_unused(fifo); | ||
235 | if (len > l) | ||
236 | len = l; | ||
237 | |||
238 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); | ||
239 | if (unlikely(ret)) { | ||
240 | len -= ret; | ||
241 | err = -EFAULT; | ||
242 | } else | ||
243 | err = 0; | ||
244 | fifo->in += len; | ||
245 | return err; | ||
246 | } | ||
247 | EXPORT_SYMBOL(__kfifo_from_user); | ||
248 | |||
249 | static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, | ||
250 | unsigned int len, unsigned int off, unsigned int *copied) | ||
251 | { | ||
252 | unsigned int l; | ||
253 | unsigned long ret; | ||
254 | unsigned int size = fifo->mask + 1; | ||
255 | unsigned int esize = fifo->esize; | ||
256 | |||
257 | off &= fifo->mask; | ||
258 | if (esize != 1) { | ||
259 | off *= esize; | ||
260 | size *= esize; | ||
261 | len *= esize; | ||
262 | } | ||
263 | l = min(len, size - off); | ||
264 | |||
265 | ret = copy_to_user(to, fifo->data + off, l); | ||
266 | if (unlikely(ret)) | ||
267 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
268 | else { | ||
269 | ret = copy_to_user(to + l, fifo->data, len - l); | ||
270 | if (unlikely(ret)) | ||
271 | ret = DIV_ROUND_UP(ret, esize); | ||
272 | } | ||
273 | /* | ||
274 | * make sure that the data is copied before | ||
275 | * incrementing the fifo->out index counter | ||
276 | */ | ||
277 | smp_wmb(); | ||
278 | *copied = len - ret; | ||
279 | /* return the number of elements which are not copied */ | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | int __kfifo_to_user(struct __kfifo *fifo, void __user *to, | ||
284 | unsigned long len, unsigned int *copied) | ||
285 | { | ||
286 | unsigned int l; | ||
287 | unsigned long ret; | ||
288 | unsigned int esize = fifo->esize; | ||
289 | int err; | ||
290 | |||
291 | if (esize != 1) | ||
292 | len /= esize; | ||
293 | |||
294 | l = fifo->in - fifo->out; | ||
295 | if (len > l) | ||
296 | len = l; | ||
297 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); | ||
298 | if (unlikely(ret)) { | ||
299 | len -= ret; | ||
300 | err = -EFAULT; | ||
301 | } else | ||
302 | err = 0; | ||
303 | fifo->out += len; | ||
304 | return err; | ||
305 | } | ||
306 | EXPORT_SYMBOL(__kfifo_to_user); | ||
307 | |||
308 | static int setup_sgl_buf(struct scatterlist *sgl, void *buf, | ||
309 | int nents, unsigned int len) | ||
310 | { | ||
311 | int n; | ||
312 | unsigned int l; | ||
313 | unsigned int off; | ||
314 | struct page *page; | ||
315 | |||
316 | if (!nents) | ||
317 | return 0; | ||
318 | |||
319 | if (!len) | ||
320 | return 0; | ||
321 | |||
322 | n = 0; | ||
323 | page = virt_to_page(buf); | ||
324 | off = offset_in_page(buf); | ||
325 | l = 0; | ||
326 | |||
327 | while (len >= l + PAGE_SIZE - off) { | ||
328 | struct page *npage; | ||
329 | |||
330 | l += PAGE_SIZE; | ||
331 | buf += PAGE_SIZE; | ||
332 | npage = virt_to_page(buf); | ||
333 | if (page_to_phys(page) != page_to_phys(npage) - l) { | ||
334 | sg_set_page(sgl, page, l - off, off); | ||
335 | sgl = sg_next(sgl); | ||
336 | if (++n == nents || sgl == NULL) | ||
337 | return n; | ||
338 | page = npage; | ||
339 | len -= l - off; | ||
340 | l = off = 0; | ||
341 | } | ||
342 | } | ||
343 | sg_set_page(sgl, page, len, off); | ||
344 | return n + 1; | ||
345 | } | ||
346 | |||
347 | static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | ||
348 | int nents, unsigned int len, unsigned int off) | ||
349 | { | ||
350 | unsigned int size = fifo->mask + 1; | ||
351 | unsigned int esize = fifo->esize; | ||
352 | unsigned int l; | ||
353 | unsigned int n; | ||
354 | |||
355 | off &= fifo->mask; | ||
356 | if (esize != 1) { | ||
357 | off *= esize; | ||
358 | size *= esize; | ||
359 | len *= esize; | ||
360 | } | ||
361 | l = min(len, size - off); | ||
362 | |||
363 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | ||
364 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | ||
365 | |||
366 | return n; | ||
367 | } | ||
368 | |||
369 | unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, | ||
370 | struct scatterlist *sgl, int nents, unsigned int len) | ||
371 | { | ||
372 | unsigned int l; | ||
373 | |||
374 | l = kfifo_unused(fifo); | ||
375 | if (len > l) | ||
376 | len = l; | ||
377 | |||
378 | return setup_sgl(fifo, sgl, nents, len, fifo->in); | ||
379 | } | ||
380 | EXPORT_SYMBOL(__kfifo_dma_in_prepare); | ||
381 | |||
382 | unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, | ||
383 | struct scatterlist *sgl, int nents, unsigned int len) | ||
384 | { | ||
385 | unsigned int l; | ||
386 | |||
387 | l = fifo->in - fifo->out; | ||
388 | if (len > l) | ||
389 | len = l; | ||
390 | |||
391 | return setup_sgl(fifo, sgl, nents, len, fifo->out); | ||
392 | } | ||
393 | EXPORT_SYMBOL(__kfifo_dma_out_prepare); | ||
394 | |||
395 | unsigned int __kfifo_max_r(unsigned int len, size_t recsize) | ||
396 | { | ||
397 | unsigned int max = (1 << (recsize << 3)) - 1; | ||
398 | |||
399 | if (len > max) | ||
400 | return max; | ||
401 | return len; | ||
402 | } | ||
403 | EXPORT_SYMBOL(__kfifo_max_r); | ||
404 | |||
405 | #define __KFIFO_PEEK(data, out, mask) \ | ||
406 | ((data)[(out) & (mask)]) | ||
407 | /* | ||
408 | * __kfifo_peek_n internal helper function for determinate the length of | ||
409 | * the next record in the fifo | ||
410 | */ | ||
411 | static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) | ||
412 | { | ||
413 | unsigned int l; | ||
414 | unsigned int mask = fifo->mask; | ||
415 | unsigned char *data = fifo->data; | ||
416 | |||
417 | l = __KFIFO_PEEK(data, fifo->out, mask); | ||
418 | |||
419 | if (--recsize) | ||
420 | l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; | ||
421 | |||
422 | return l; | ||
423 | } | ||
424 | |||
425 | #define __KFIFO_POKE(data, in, mask, val) \ | ||
426 | ( \ | ||
427 | (data)[(in) & (mask)] = (unsigned char)(val) \ | ||
428 | ) | ||
429 | |||
430 | /* | ||
431 | * __kfifo_poke_n internal helper function for storeing the length of | ||
432 | * the record into the fifo | ||
433 | */ | ||
434 | static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) | ||
435 | { | ||
436 | unsigned int mask = fifo->mask; | ||
437 | unsigned char *data = fifo->data; | ||
438 | |||
439 | __KFIFO_POKE(data, fifo->in, mask, n); | ||
440 | |||
441 | if (recsize > 1) | ||
442 | __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); | ||
443 | } | ||
444 | |||
445 | unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) | ||
446 | { | ||
447 | return __kfifo_peek_n(fifo, recsize); | ||
448 | } | ||
449 | EXPORT_SYMBOL(__kfifo_len_r); | ||
450 | |||
451 | unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, | ||
452 | unsigned int len, size_t recsize) | ||
453 | { | ||
454 | if (len + recsize > kfifo_unused(fifo)) | ||
455 | return 0; | ||
456 | |||
457 | __kfifo_poke_n(fifo, len, recsize); | ||
458 | |||
459 | kfifo_copy_in(fifo, buf, len, fifo->in + recsize); | ||
460 | fifo->in += len + recsize; | ||
461 | return len; | ||
462 | } | ||
463 | EXPORT_SYMBOL(__kfifo_in_r); | ||
464 | |||
465 | static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, | ||
466 | void *buf, unsigned int len, size_t recsize, unsigned int *n) | ||
467 | { | ||
468 | *n = __kfifo_peek_n(fifo, recsize); | ||
469 | |||
470 | if (len > *n) | ||
471 | len = *n; | ||
472 | |||
473 | kfifo_copy_out(fifo, buf, len, fifo->out + recsize); | ||
474 | return len; | ||
475 | } | ||
476 | |||
477 | unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, | ||
478 | unsigned int len, size_t recsize) | ||
479 | { | ||
480 | unsigned int n; | ||
481 | |||
482 | if (fifo->in == fifo->out) | ||
483 | return 0; | ||
484 | |||
485 | return kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
486 | } | ||
487 | EXPORT_SYMBOL(__kfifo_out_peek_r); | ||
488 | |||
489 | unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, | ||
490 | unsigned int len, size_t recsize) | ||
491 | { | ||
492 | unsigned int n; | ||
493 | |||
494 | if (fifo->in == fifo->out) | ||
495 | return 0; | ||
496 | |||
497 | len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
498 | fifo->out += n + recsize; | ||
499 | return len; | ||
500 | } | ||
501 | EXPORT_SYMBOL(__kfifo_out_r); | ||
502 | |||
503 | void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize) | ||
504 | { | ||
505 | unsigned int n; | ||
506 | |||
507 | n = __kfifo_peek_n(fifo, recsize); | ||
508 | fifo->out += n + recsize; | ||
509 | } | ||
510 | EXPORT_SYMBOL(__kfifo_skip_r); | ||
511 | |||
512 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, | ||
513 | unsigned long len, unsigned int *copied, size_t recsize) | ||
514 | { | ||
515 | unsigned long ret; | ||
516 | |||
517 | len = __kfifo_max_r(len, recsize); | ||
518 | |||
519 | if (len + recsize > kfifo_unused(fifo)) { | ||
520 | *copied = 0; | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | __kfifo_poke_n(fifo, len, recsize); | ||
525 | |||
526 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); | ||
527 | if (unlikely(ret)) { | ||
528 | *copied = 0; | ||
529 | return -EFAULT; | ||
530 | } | ||
531 | fifo->in += len + recsize; | ||
532 | return 0; | ||
533 | } | ||
534 | EXPORT_SYMBOL(__kfifo_from_user_r); | ||
535 | |||
536 | int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, | ||
537 | unsigned long len, unsigned int *copied, size_t recsize) | ||
538 | { | ||
539 | unsigned long ret; | ||
540 | unsigned int n; | ||
541 | |||
542 | if (fifo->in == fifo->out) { | ||
543 | *copied = 0; | ||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | n = __kfifo_peek_n(fifo, recsize); | ||
548 | if (len > n) | ||
549 | len = n; | ||
550 | |||
551 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); | ||
552 | if (unlikely(ret)) { | ||
553 | *copied = 0; | ||
554 | return -EFAULT; | ||
555 | } | ||
556 | fifo->out += n + recsize; | ||
557 | return 0; | ||
558 | } | ||
559 | EXPORT_SYMBOL(__kfifo_to_user_r); | ||
560 | |||
561 | unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, | ||
562 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | ||
563 | { | ||
564 | if (!nents) | ||
565 | BUG(); | ||
566 | |||
567 | len = __kfifo_max_r(len, recsize); | ||
568 | |||
569 | if (len + recsize > kfifo_unused(fifo)) | ||
570 | return 0; | ||
571 | |||
572 | return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); | ||
573 | } | ||
574 | EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); | ||
575 | |||
576 | void __kfifo_dma_in_finish_r(struct __kfifo *fifo, | ||
577 | unsigned int len, size_t recsize) | ||
578 | { | ||
579 | len = __kfifo_max_r(len, recsize); | ||
580 | __kfifo_poke_n(fifo, len, recsize); | ||
581 | fifo->in += len + recsize; | ||
582 | } | ||
583 | EXPORT_SYMBOL(__kfifo_dma_in_finish_r); | ||
584 | |||
585 | unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, | ||
586 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | ||
587 | { | ||
588 | if (!nents) | ||
589 | BUG(); | ||
590 | |||
591 | len = __kfifo_max_r(len, recsize); | ||
592 | |||
593 | if (len + recsize > fifo->in - fifo->out) | ||
594 | return 0; | ||
595 | |||
596 | return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); | ||
597 | } | ||
598 | EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); | ||
599 | |||
600 | void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) | ||
601 | { | ||
602 | unsigned int len; | ||
603 | |||
604 | len = __kfifo_peek_n(fifo, recsize); | ||
605 | fifo->out += len + recsize; | ||
606 | } | ||
607 | EXPORT_SYMBOL(__kfifo_dma_out_finish_r); | ||
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 7aae0f2a5e0a..c3eb261a7df3 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
@@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); | |||
47 | * Normal standalone locks, for the circular and irq-context | 47 | * Normal standalone locks, for the circular and irq-context |
48 | * dependency tests: | 48 | * dependency tests: |
49 | */ | 49 | */ |
50 | static DEFINE_SPINLOCK(lock_A); | 50 | static DEFINE_RAW_SPINLOCK(lock_A); |
51 | static DEFINE_SPINLOCK(lock_B); | 51 | static DEFINE_RAW_SPINLOCK(lock_B); |
52 | static DEFINE_SPINLOCK(lock_C); | 52 | static DEFINE_RAW_SPINLOCK(lock_C); |
53 | static DEFINE_SPINLOCK(lock_D); | 53 | static DEFINE_RAW_SPINLOCK(lock_D); |
54 | 54 | ||
55 | static DEFINE_RWLOCK(rwlock_A); | 55 | static DEFINE_RWLOCK(rwlock_A); |
56 | static DEFINE_RWLOCK(rwlock_B); | 56 | static DEFINE_RWLOCK(rwlock_B); |
@@ -73,12 +73,12 @@ static DECLARE_RWSEM(rwsem_D); | |||
73 | * but X* and Y* are different classes. We do this so that | 73 | * but X* and Y* are different classes. We do this so that |
74 | * we do not trigger a real lockup: | 74 | * we do not trigger a real lockup: |
75 | */ | 75 | */ |
76 | static DEFINE_SPINLOCK(lock_X1); | 76 | static DEFINE_RAW_SPINLOCK(lock_X1); |
77 | static DEFINE_SPINLOCK(lock_X2); | 77 | static DEFINE_RAW_SPINLOCK(lock_X2); |
78 | static DEFINE_SPINLOCK(lock_Y1); | 78 | static DEFINE_RAW_SPINLOCK(lock_Y1); |
79 | static DEFINE_SPINLOCK(lock_Y2); | 79 | static DEFINE_RAW_SPINLOCK(lock_Y2); |
80 | static DEFINE_SPINLOCK(lock_Z1); | 80 | static DEFINE_RAW_SPINLOCK(lock_Z1); |
81 | static DEFINE_SPINLOCK(lock_Z2); | 81 | static DEFINE_RAW_SPINLOCK(lock_Z2); |
82 | 82 | ||
83 | static DEFINE_RWLOCK(rwlock_X1); | 83 | static DEFINE_RWLOCK(rwlock_X1); |
84 | static DEFINE_RWLOCK(rwlock_X2); | 84 | static DEFINE_RWLOCK(rwlock_X2); |
@@ -107,10 +107,10 @@ static DECLARE_RWSEM(rwsem_Z2); | |||
107 | */ | 107 | */ |
108 | #define INIT_CLASS_FUNC(class) \ | 108 | #define INIT_CLASS_FUNC(class) \ |
109 | static noinline void \ | 109 | static noinline void \ |
110 | init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \ | 110 | init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \ |
111 | struct rw_semaphore *rwsem) \ | 111 | struct mutex *mutex, struct rw_semaphore *rwsem)\ |
112 | { \ | 112 | { \ |
113 | spin_lock_init(lock); \ | 113 | raw_spin_lock_init(lock); \ |
114 | rwlock_init(rwlock); \ | 114 | rwlock_init(rwlock); \ |
115 | mutex_init(mutex); \ | 115 | mutex_init(mutex); \ |
116 | init_rwsem(rwsem); \ | 116 | init_rwsem(rwsem); \ |
@@ -168,10 +168,10 @@ static void init_shared_classes(void) | |||
168 | * Shortcuts for lock/unlock API variants, to keep | 168 | * Shortcuts for lock/unlock API variants, to keep |
169 | * the testcases compact: | 169 | * the testcases compact: |
170 | */ | 170 | */ |
171 | #define L(x) spin_lock(&lock_##x) | 171 | #define L(x) raw_spin_lock(&lock_##x) |
172 | #define U(x) spin_unlock(&lock_##x) | 172 | #define U(x) raw_spin_unlock(&lock_##x) |
173 | #define LU(x) L(x); U(x) | 173 | #define LU(x) L(x); U(x) |
174 | #define SI(x) spin_lock_init(&lock_##x) | 174 | #define SI(x) raw_spin_lock_init(&lock_##x) |
175 | 175 | ||
176 | #define WL(x) write_lock(&rwlock_##x) | 176 | #define WL(x) write_lock(&rwlock_##x) |
177 | #define WU(x) write_unlock(&rwlock_##x) | 177 | #define WU(x) write_unlock(&rwlock_##x) |
@@ -911,7 +911,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) | |||
911 | 911 | ||
912 | #define I2(x) \ | 912 | #define I2(x) \ |
913 | do { \ | 913 | do { \ |
914 | spin_lock_init(&lock_##x); \ | 914 | raw_spin_lock_init(&lock_##x); \ |
915 | rwlock_init(&rwlock_##x); \ | 915 | rwlock_init(&rwlock_##x); \ |
916 | mutex_init(&mutex_##x); \ | 916 | mutex_init(&mutex_##x); \ |
917 | init_rwsem(&rwsem_##x); \ | 917 | init_rwsem(&rwsem_##x); \ |
diff --git a/lib/lru_cache.c b/lib/lru_cache.c index d71d89498943..8335d39d2ccd 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c | |||
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) | |||
262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, | 262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, |
263 | bool include_changing) | 263 | bool include_changing) |
264 | { | 264 | { |
265 | struct hlist_node *n; | ||
266 | struct lc_element *e; | 265 | struct lc_element *e; |
267 | 266 | ||
268 | BUG_ON(!lc); | 267 | BUG_ON(!lc); |
269 | BUG_ON(!lc->nr_elements); | 268 | BUG_ON(!lc->nr_elements); |
270 | hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { | 269 | hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) { |
271 | /* "about to be changed" elements, pending transaction commit, | 270 | /* "about to be changed" elements, pending transaction commit, |
272 | * are hashed by their "new number". "Normal" elements have | 271 | * are hashed by their "new number". "Normal" elements have |
273 | * lc_number == lc_new_number. */ | 272 | * lc_number == lc_new_number. */ |
diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index e764116ea12d..f0f7d7ca2b83 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | lzo_compress-objs := lzo1x_compress.o | 1 | lzo_compress-objs := lzo1x_compress.o |
2 | lzo_decompress-objs := lzo1x_decompress.o | 2 | lzo_decompress-objs := lzo1x_decompress_safe.o |
3 | 3 | ||
4 | obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o | 4 | obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o |
5 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o | 5 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o |
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index a6040990a62e..236eb21167b5 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c | |||
@@ -1,194 +1,243 @@ | |||
1 | /* | 1 | /* |
2 | * LZO1X Compressor from MiniLZO | 2 | * LZO1X Compressor from LZO |
3 | * | 3 | * |
4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | 4 | * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> |
5 | * | 5 | * |
6 | * The full LZO package can be found at: | 6 | * The full LZO package can be found at: |
7 | * http://www.oberhumer.com/opensource/lzo/ | 7 | * http://www.oberhumer.com/opensource/lzo/ |
8 | * | 8 | * |
9 | * Changed for kernel use by: | 9 | * Changed for Linux kernel use by: |
10 | * Nitin Gupta <nitingupta910@gmail.com> | 10 | * Nitin Gupta <nitingupta910@gmail.com> |
11 | * Richard Purdie <rpurdie@openedhand.com> | 11 | * Richard Purdie <rpurdie@openedhand.com> |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/lzo.h> | ||
17 | #include <asm/unaligned.h> | 16 | #include <asm/unaligned.h> |
17 | #include <linux/lzo.h> | ||
18 | #include "lzodefs.h" | 18 | #include "lzodefs.h" |
19 | 19 | ||
20 | static noinline size_t | 20 | static noinline size_t |
21 | _lzo1x_1_do_compress(const unsigned char *in, size_t in_len, | 21 | lzo1x_1_do_compress(const unsigned char *in, size_t in_len, |
22 | unsigned char *out, size_t *out_len, void *wrkmem) | 22 | unsigned char *out, size_t *out_len, |
23 | size_t ti, void *wrkmem) | ||
23 | { | 24 | { |
25 | const unsigned char *ip; | ||
26 | unsigned char *op; | ||
24 | const unsigned char * const in_end = in + in_len; | 27 | const unsigned char * const in_end = in + in_len; |
25 | const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5; | 28 | const unsigned char * const ip_end = in + in_len - 20; |
26 | const unsigned char ** const dict = wrkmem; | 29 | const unsigned char *ii; |
27 | const unsigned char *ip = in, *ii = ip; | 30 | lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; |
28 | const unsigned char *end, *m, *m_pos; | ||
29 | size_t m_off, m_len, dindex; | ||
30 | unsigned char *op = out; | ||
31 | 31 | ||
32 | ip += 4; | 32 | op = out; |
33 | ip = in; | ||
34 | ii = ip; | ||
35 | ip += ti < 4 ? 4 - ti : 0; | ||
33 | 36 | ||
34 | for (;;) { | 37 | for (;;) { |
35 | dindex = ((size_t)(0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK; | 38 | const unsigned char *m_pos; |
36 | m_pos = dict[dindex]; | 39 | size_t t, m_len, m_off; |
37 | 40 | u32 dv; | |
38 | if (m_pos < in) | ||
39 | goto literal; | ||
40 | |||
41 | if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET)) | ||
42 | goto literal; | ||
43 | |||
44 | m_off = ip - m_pos; | ||
45 | if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) | ||
46 | goto try_match; | ||
47 | |||
48 | dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f); | ||
49 | m_pos = dict[dindex]; | ||
50 | |||
51 | if (m_pos < in) | ||
52 | goto literal; | ||
53 | |||
54 | if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET)) | ||
55 | goto literal; | ||
56 | |||
57 | m_off = ip - m_pos; | ||
58 | if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) | ||
59 | goto try_match; | ||
60 | |||
61 | goto literal; | ||
62 | |||
63 | try_match: | ||
64 | if (get_unaligned((const unsigned short *)m_pos) | ||
65 | == get_unaligned((const unsigned short *)ip)) { | ||
66 | if (likely(m_pos[2] == ip[2])) | ||
67 | goto match; | ||
68 | } | ||
69 | |||
70 | literal: | 41 | literal: |
71 | dict[dindex] = ip; | 42 | ip += 1 + ((ip - ii) >> 5); |
72 | ++ip; | 43 | next: |
73 | if (unlikely(ip >= ip_end)) | 44 | if (unlikely(ip >= ip_end)) |
74 | break; | 45 | break; |
75 | continue; | 46 | dv = get_unaligned_le32(ip); |
76 | 47 | t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; | |
77 | match: | 48 | m_pos = in + dict[t]; |
78 | dict[dindex] = ip; | 49 | dict[t] = (lzo_dict_t) (ip - in); |
79 | if (ip != ii) { | 50 | if (unlikely(dv != get_unaligned_le32(m_pos))) |
80 | size_t t = ip - ii; | 51 | goto literal; |
81 | 52 | ||
53 | ii -= ti; | ||
54 | ti = 0; | ||
55 | t = ip - ii; | ||
56 | if (t != 0) { | ||
82 | if (t <= 3) { | 57 | if (t <= 3) { |
83 | op[-2] |= t; | 58 | op[-2] |= t; |
84 | } else if (t <= 18) { | 59 | COPY4(op, ii); |
60 | op += t; | ||
61 | } else if (t <= 16) { | ||
85 | *op++ = (t - 3); | 62 | *op++ = (t - 3); |
63 | COPY8(op, ii); | ||
64 | COPY8(op + 8, ii + 8); | ||
65 | op += t; | ||
86 | } else { | 66 | } else { |
87 | size_t tt = t - 18; | 67 | if (t <= 18) { |
88 | 68 | *op++ = (t - 3); | |
89 | *op++ = 0; | 69 | } else { |
90 | while (tt > 255) { | 70 | size_t tt = t - 18; |
91 | tt -= 255; | ||
92 | *op++ = 0; | 71 | *op++ = 0; |
72 | while (unlikely(tt > 255)) { | ||
73 | tt -= 255; | ||
74 | *op++ = 0; | ||
75 | } | ||
76 | *op++ = tt; | ||
93 | } | 77 | } |
94 | *op++ = tt; | 78 | do { |
79 | COPY8(op, ii); | ||
80 | COPY8(op + 8, ii + 8); | ||
81 | op += 16; | ||
82 | ii += 16; | ||
83 | t -= 16; | ||
84 | } while (t >= 16); | ||
85 | if (t > 0) do { | ||
86 | *op++ = *ii++; | ||
87 | } while (--t > 0); | ||
95 | } | 88 | } |
96 | do { | ||
97 | *op++ = *ii++; | ||
98 | } while (--t > 0); | ||
99 | } | 89 | } |
100 | 90 | ||
101 | ip += 3; | 91 | m_len = 4; |
102 | if (m_pos[3] != *ip++ || m_pos[4] != *ip++ | 92 | { |
103 | || m_pos[5] != *ip++ || m_pos[6] != *ip++ | 93 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) |
104 | || m_pos[7] != *ip++ || m_pos[8] != *ip++) { | 94 | u64 v; |
105 | --ip; | 95 | v = get_unaligned((const u64 *) (ip + m_len)) ^ |
106 | m_len = ip - ii; | 96 | get_unaligned((const u64 *) (m_pos + m_len)); |
97 | if (unlikely(v == 0)) { | ||
98 | do { | ||
99 | m_len += 8; | ||
100 | v = get_unaligned((const u64 *) (ip + m_len)) ^ | ||
101 | get_unaligned((const u64 *) (m_pos + m_len)); | ||
102 | if (unlikely(ip + m_len >= ip_end)) | ||
103 | goto m_len_done; | ||
104 | } while (v == 0); | ||
105 | } | ||
106 | # if defined(__LITTLE_ENDIAN) | ||
107 | m_len += (unsigned) __builtin_ctzll(v) / 8; | ||
108 | # elif defined(__BIG_ENDIAN) | ||
109 | m_len += (unsigned) __builtin_clzll(v) / 8; | ||
110 | # else | ||
111 | # error "missing endian definition" | ||
112 | # endif | ||
113 | #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32) | ||
114 | u32 v; | ||
115 | v = get_unaligned((const u32 *) (ip + m_len)) ^ | ||
116 | get_unaligned((const u32 *) (m_pos + m_len)); | ||
117 | if (unlikely(v == 0)) { | ||
118 | do { | ||
119 | m_len += 4; | ||
120 | v = get_unaligned((const u32 *) (ip + m_len)) ^ | ||
121 | get_unaligned((const u32 *) (m_pos + m_len)); | ||
122 | if (v != 0) | ||
123 | break; | ||
124 | m_len += 4; | ||
125 | v = get_unaligned((const u32 *) (ip + m_len)) ^ | ||
126 | get_unaligned((const u32 *) (m_pos + m_len)); | ||
127 | if (unlikely(ip + m_len >= ip_end)) | ||
128 | goto m_len_done; | ||
129 | } while (v == 0); | ||
130 | } | ||
131 | # if defined(__LITTLE_ENDIAN) | ||
132 | m_len += (unsigned) __builtin_ctz(v) / 8; | ||
133 | # elif defined(__BIG_ENDIAN) | ||
134 | m_len += (unsigned) __builtin_clz(v) / 8; | ||
135 | # else | ||
136 | # error "missing endian definition" | ||
137 | # endif | ||
138 | #else | ||
139 | if (unlikely(ip[m_len] == m_pos[m_len])) { | ||
140 | do { | ||
141 | m_len += 1; | ||
142 | if (ip[m_len] != m_pos[m_len]) | ||
143 | break; | ||
144 | m_len += 1; | ||
145 | if (ip[m_len] != m_pos[m_len]) | ||
146 | break; | ||
147 | m_len += 1; | ||
148 | if (ip[m_len] != m_pos[m_len]) | ||
149 | break; | ||
150 | m_len += 1; | ||
151 | if (ip[m_len] != m_pos[m_len]) | ||
152 | break; | ||
153 | m_len += 1; | ||
154 | if (ip[m_len] != m_pos[m_len]) | ||
155 | break; | ||
156 | m_len += 1; | ||
157 | if (ip[m_len] != m_pos[m_len]) | ||
158 | break; | ||
159 | m_len += 1; | ||
160 | if (ip[m_len] != m_pos[m_len]) | ||
161 | break; | ||
162 | m_len += 1; | ||
163 | if (unlikely(ip + m_len >= ip_end)) | ||
164 | goto m_len_done; | ||
165 | } while (ip[m_len] == m_pos[m_len]); | ||
166 | } | ||
167 | #endif | ||
168 | } | ||
169 | m_len_done: | ||
107 | 170 | ||
108 | if (m_off <= M2_MAX_OFFSET) { | 171 | m_off = ip - m_pos; |
109 | m_off -= 1; | 172 | ip += m_len; |
110 | *op++ = (((m_len - 1) << 5) | 173 | ii = ip; |
111 | | ((m_off & 7) << 2)); | 174 | if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { |
112 | *op++ = (m_off >> 3); | 175 | m_off -= 1; |
113 | } else if (m_off <= M3_MAX_OFFSET) { | 176 | *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); |
114 | m_off -= 1; | 177 | *op++ = (m_off >> 3); |
178 | } else if (m_off <= M3_MAX_OFFSET) { | ||
179 | m_off -= 1; | ||
180 | if (m_len <= M3_MAX_LEN) | ||
115 | *op++ = (M3_MARKER | (m_len - 2)); | 181 | *op++ = (M3_MARKER | (m_len - 2)); |
116 | goto m3_m4_offset; | 182 | else { |
117 | } else { | 183 | m_len -= M3_MAX_LEN; |
118 | m_off -= 0x4000; | 184 | *op++ = M3_MARKER | 0; |
119 | 185 | while (unlikely(m_len > 255)) { | |
120 | *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11) | 186 | m_len -= 255; |
121 | | (m_len - 2)); | 187 | *op++ = 0; |
122 | goto m3_m4_offset; | 188 | } |
189 | *op++ = (m_len); | ||
123 | } | 190 | } |
191 | *op++ = (m_off << 2); | ||
192 | *op++ = (m_off >> 6); | ||
124 | } else { | 193 | } else { |
125 | end = in_end; | 194 | m_off -= 0x4000; |
126 | m = m_pos + M2_MAX_LEN + 1; | 195 | if (m_len <= M4_MAX_LEN) |
127 | 196 | *op++ = (M4_MARKER | ((m_off >> 11) & 8) | |
128 | while (ip < end && *m == *ip) { | ||
129 | m++; | ||
130 | ip++; | ||
131 | } | ||
132 | m_len = ip - ii; | ||
133 | |||
134 | if (m_off <= M3_MAX_OFFSET) { | ||
135 | m_off -= 1; | ||
136 | if (m_len <= 33) { | ||
137 | *op++ = (M3_MARKER | (m_len - 2)); | ||
138 | } else { | ||
139 | m_len -= 33; | ||
140 | *op++ = M3_MARKER | 0; | ||
141 | goto m3_m4_len; | ||
142 | } | ||
143 | } else { | ||
144 | m_off -= 0x4000; | ||
145 | if (m_len <= M4_MAX_LEN) { | ||
146 | *op++ = (M4_MARKER | ||
147 | | ((m_off & 0x4000) >> 11) | ||
148 | | (m_len - 2)); | 197 | | (m_len - 2)); |
149 | } else { | 198 | else { |
150 | m_len -= M4_MAX_LEN; | 199 | m_len -= M4_MAX_LEN; |
151 | *op++ = (M4_MARKER | 200 | *op++ = (M4_MARKER | ((m_off >> 11) & 8)); |
152 | | ((m_off & 0x4000) >> 11)); | 201 | while (unlikely(m_len > 255)) { |
153 | m3_m4_len: | 202 | m_len -= 255; |
154 | while (m_len > 255) { | 203 | *op++ = 0; |
155 | m_len -= 255; | ||
156 | *op++ = 0; | ||
157 | } | ||
158 | |||
159 | *op++ = (m_len); | ||
160 | } | 204 | } |
205 | *op++ = (m_len); | ||
161 | } | 206 | } |
162 | m3_m4_offset: | 207 | *op++ = (m_off << 2); |
163 | *op++ = ((m_off & 63) << 2); | ||
164 | *op++ = (m_off >> 6); | 208 | *op++ = (m_off >> 6); |
165 | } | 209 | } |
166 | 210 | goto next; | |
167 | ii = ip; | ||
168 | if (unlikely(ip >= ip_end)) | ||
169 | break; | ||
170 | } | 211 | } |
171 | |||
172 | *out_len = op - out; | 212 | *out_len = op - out; |
173 | return in_end - ii; | 213 | return in_end - (ii - ti); |
174 | } | 214 | } |
175 | 215 | ||
176 | int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, | 216 | int lzo1x_1_compress(const unsigned char *in, size_t in_len, |
177 | size_t *out_len, void *wrkmem) | 217 | unsigned char *out, size_t *out_len, |
218 | void *wrkmem) | ||
178 | { | 219 | { |
179 | const unsigned char *ii; | 220 | const unsigned char *ip = in; |
180 | unsigned char *op = out; | 221 | unsigned char *op = out; |
181 | size_t t; | 222 | size_t l = in_len; |
223 | size_t t = 0; | ||
182 | 224 | ||
183 | if (unlikely(in_len <= M2_MAX_LEN + 5)) { | 225 | while (l > 20) { |
184 | t = in_len; | 226 | size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); |
185 | } else { | 227 | uintptr_t ll_end = (uintptr_t) ip + ll; |
186 | t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem); | 228 | if ((ll_end + ((t + ll) >> 5)) <= ll_end) |
229 | break; | ||
230 | BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); | ||
231 | memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); | ||
232 | t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); | ||
233 | ip += ll; | ||
187 | op += *out_len; | 234 | op += *out_len; |
235 | l -= ll; | ||
188 | } | 236 | } |
237 | t += l; | ||
189 | 238 | ||
190 | if (t > 0) { | 239 | if (t > 0) { |
191 | ii = in + in_len - t; | 240 | const unsigned char *ii = in + in_len - t; |
192 | 241 | ||
193 | if (op == out && t <= 238) { | 242 | if (op == out && t <= 238) { |
194 | *op++ = (17 + t); | 243 | *op++ = (17 + t); |
@@ -198,16 +247,21 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, | |||
198 | *op++ = (t - 3); | 247 | *op++ = (t - 3); |
199 | } else { | 248 | } else { |
200 | size_t tt = t - 18; | 249 | size_t tt = t - 18; |
201 | |||
202 | *op++ = 0; | 250 | *op++ = 0; |
203 | while (tt > 255) { | 251 | while (tt > 255) { |
204 | tt -= 255; | 252 | tt -= 255; |
205 | *op++ = 0; | 253 | *op++ = 0; |
206 | } | 254 | } |
207 | |||
208 | *op++ = tt; | 255 | *op++ = tt; |
209 | } | 256 | } |
210 | do { | 257 | if (t >= 16) do { |
258 | COPY8(op, ii); | ||
259 | COPY8(op + 8, ii + 8); | ||
260 | op += 16; | ||
261 | ii += 16; | ||
262 | t -= 16; | ||
263 | } while (t >= 16); | ||
264 | if (t > 0) do { | ||
211 | *op++ = *ii++; | 265 | *op++ = *ii++; |
212 | } while (--t > 0); | 266 | } while (--t > 0); |
213 | } | 267 | } |
@@ -223,4 +277,3 @@ EXPORT_SYMBOL_GPL(lzo1x_1_compress); | |||
223 | 277 | ||
224 | MODULE_LICENSE("GPL"); | 278 | MODULE_LICENSE("GPL"); |
225 | MODULE_DESCRIPTION("LZO1X-1 Compressor"); | 279 | MODULE_DESCRIPTION("LZO1X-1 Compressor"); |
226 | |||
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c deleted file mode 100644 index f2fd09850223..000000000000 --- a/lib/lzo/lzo1x_decompress.c +++ /dev/null | |||
@@ -1,255 +0,0 @@ | |||
1 | /* | ||
2 | * LZO1X Decompressor from MiniLZO | ||
3 | * | ||
4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | ||
5 | * | ||
6 | * The full LZO package can be found at: | ||
7 | * http://www.oberhumer.com/opensource/lzo/ | ||
8 | * | ||
9 | * Changed for kernel use by: | ||
10 | * Nitin Gupta <nitingupta910@gmail.com> | ||
11 | * Richard Purdie <rpurdie@openedhand.com> | ||
12 | */ | ||
13 | |||
14 | #ifndef STATIC | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #endif | ||
18 | |||
19 | #include <asm/unaligned.h> | ||
20 | #include <linux/lzo.h> | ||
21 | #include "lzodefs.h" | ||
22 | |||
23 | #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) | ||
24 | #define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x)) | ||
25 | #define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op) | ||
26 | |||
27 | #define COPY4(dst, src) \ | ||
28 | put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) | ||
29 | |||
30 | int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | ||
31 | unsigned char *out, size_t *out_len) | ||
32 | { | ||
33 | const unsigned char * const ip_end = in + in_len; | ||
34 | unsigned char * const op_end = out + *out_len; | ||
35 | const unsigned char *ip = in, *m_pos; | ||
36 | unsigned char *op = out; | ||
37 | size_t t; | ||
38 | |||
39 | *out_len = 0; | ||
40 | |||
41 | if (*ip > 17) { | ||
42 | t = *ip++ - 17; | ||
43 | if (t < 4) | ||
44 | goto match_next; | ||
45 | if (HAVE_OP(t, op_end, op)) | ||
46 | goto output_overrun; | ||
47 | if (HAVE_IP(t + 1, ip_end, ip)) | ||
48 | goto input_overrun; | ||
49 | do { | ||
50 | *op++ = *ip++; | ||
51 | } while (--t > 0); | ||
52 | goto first_literal_run; | ||
53 | } | ||
54 | |||
55 | while ((ip < ip_end)) { | ||
56 | t = *ip++; | ||
57 | if (t >= 16) | ||
58 | goto match; | ||
59 | if (t == 0) { | ||
60 | if (HAVE_IP(1, ip_end, ip)) | ||
61 | goto input_overrun; | ||
62 | while (*ip == 0) { | ||
63 | t += 255; | ||
64 | ip++; | ||
65 | if (HAVE_IP(1, ip_end, ip)) | ||
66 | goto input_overrun; | ||
67 | } | ||
68 | t += 15 + *ip++; | ||
69 | } | ||
70 | if (HAVE_OP(t + 3, op_end, op)) | ||
71 | goto output_overrun; | ||
72 | if (HAVE_IP(t + 4, ip_end, ip)) | ||
73 | goto input_overrun; | ||
74 | |||
75 | COPY4(op, ip); | ||
76 | op += 4; | ||
77 | ip += 4; | ||
78 | if (--t > 0) { | ||
79 | if (t >= 4) { | ||
80 | do { | ||
81 | COPY4(op, ip); | ||
82 | op += 4; | ||
83 | ip += 4; | ||
84 | t -= 4; | ||
85 | } while (t >= 4); | ||
86 | if (t > 0) { | ||
87 | do { | ||
88 | *op++ = *ip++; | ||
89 | } while (--t > 0); | ||
90 | } | ||
91 | } else { | ||
92 | do { | ||
93 | *op++ = *ip++; | ||
94 | } while (--t > 0); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | first_literal_run: | ||
99 | t = *ip++; | ||
100 | if (t >= 16) | ||
101 | goto match; | ||
102 | m_pos = op - (1 + M2_MAX_OFFSET); | ||
103 | m_pos -= t >> 2; | ||
104 | m_pos -= *ip++ << 2; | ||
105 | |||
106 | if (HAVE_LB(m_pos, out, op)) | ||
107 | goto lookbehind_overrun; | ||
108 | |||
109 | if (HAVE_OP(3, op_end, op)) | ||
110 | goto output_overrun; | ||
111 | *op++ = *m_pos++; | ||
112 | *op++ = *m_pos++; | ||
113 | *op++ = *m_pos; | ||
114 | |||
115 | goto match_done; | ||
116 | |||
117 | do { | ||
118 | match: | ||
119 | if (t >= 64) { | ||
120 | m_pos = op - 1; | ||
121 | m_pos -= (t >> 2) & 7; | ||
122 | m_pos -= *ip++ << 3; | ||
123 | t = (t >> 5) - 1; | ||
124 | if (HAVE_LB(m_pos, out, op)) | ||
125 | goto lookbehind_overrun; | ||
126 | if (HAVE_OP(t + 3 - 1, op_end, op)) | ||
127 | goto output_overrun; | ||
128 | goto copy_match; | ||
129 | } else if (t >= 32) { | ||
130 | t &= 31; | ||
131 | if (t == 0) { | ||
132 | if (HAVE_IP(1, ip_end, ip)) | ||
133 | goto input_overrun; | ||
134 | while (*ip == 0) { | ||
135 | t += 255; | ||
136 | ip++; | ||
137 | if (HAVE_IP(1, ip_end, ip)) | ||
138 | goto input_overrun; | ||
139 | } | ||
140 | t += 31 + *ip++; | ||
141 | } | ||
142 | m_pos = op - 1; | ||
143 | m_pos -= get_unaligned_le16(ip) >> 2; | ||
144 | ip += 2; | ||
145 | } else if (t >= 16) { | ||
146 | m_pos = op; | ||
147 | m_pos -= (t & 8) << 11; | ||
148 | |||
149 | t &= 7; | ||
150 | if (t == 0) { | ||
151 | if (HAVE_IP(1, ip_end, ip)) | ||
152 | goto input_overrun; | ||
153 | while (*ip == 0) { | ||
154 | t += 255; | ||
155 | ip++; | ||
156 | if (HAVE_IP(1, ip_end, ip)) | ||
157 | goto input_overrun; | ||
158 | } | ||
159 | t += 7 + *ip++; | ||
160 | } | ||
161 | m_pos -= get_unaligned_le16(ip) >> 2; | ||
162 | ip += 2; | ||
163 | if (m_pos == op) | ||
164 | goto eof_found; | ||
165 | m_pos -= 0x4000; | ||
166 | } else { | ||
167 | m_pos = op - 1; | ||
168 | m_pos -= t >> 2; | ||
169 | m_pos -= *ip++ << 2; | ||
170 | |||
171 | if (HAVE_LB(m_pos, out, op)) | ||
172 | goto lookbehind_overrun; | ||
173 | if (HAVE_OP(2, op_end, op)) | ||
174 | goto output_overrun; | ||
175 | |||
176 | *op++ = *m_pos++; | ||
177 | *op++ = *m_pos; | ||
178 | goto match_done; | ||
179 | } | ||
180 | |||
181 | if (HAVE_LB(m_pos, out, op)) | ||
182 | goto lookbehind_overrun; | ||
183 | if (HAVE_OP(t + 3 - 1, op_end, op)) | ||
184 | goto output_overrun; | ||
185 | |||
186 | if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { | ||
187 | COPY4(op, m_pos); | ||
188 | op += 4; | ||
189 | m_pos += 4; | ||
190 | t -= 4 - (3 - 1); | ||
191 | do { | ||
192 | COPY4(op, m_pos); | ||
193 | op += 4; | ||
194 | m_pos += 4; | ||
195 | t -= 4; | ||
196 | } while (t >= 4); | ||
197 | if (t > 0) | ||
198 | do { | ||
199 | *op++ = *m_pos++; | ||
200 | } while (--t > 0); | ||
201 | } else { | ||
202 | copy_match: | ||
203 | *op++ = *m_pos++; | ||
204 | *op++ = *m_pos++; | ||
205 | do { | ||
206 | *op++ = *m_pos++; | ||
207 | } while (--t > 0); | ||
208 | } | ||
209 | match_done: | ||
210 | t = ip[-2] & 3; | ||
211 | if (t == 0) | ||
212 | break; | ||
213 | match_next: | ||
214 | if (HAVE_OP(t, op_end, op)) | ||
215 | goto output_overrun; | ||
216 | if (HAVE_IP(t + 1, ip_end, ip)) | ||
217 | goto input_overrun; | ||
218 | |||
219 | *op++ = *ip++; | ||
220 | if (t > 1) { | ||
221 | *op++ = *ip++; | ||
222 | if (t > 2) | ||
223 | *op++ = *ip++; | ||
224 | } | ||
225 | |||
226 | t = *ip++; | ||
227 | } while (ip < ip_end); | ||
228 | } | ||
229 | |||
230 | *out_len = op - out; | ||
231 | return LZO_E_EOF_NOT_FOUND; | ||
232 | |||
233 | eof_found: | ||
234 | *out_len = op - out; | ||
235 | return (ip == ip_end ? LZO_E_OK : | ||
236 | (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); | ||
237 | input_overrun: | ||
238 | *out_len = op - out; | ||
239 | return LZO_E_INPUT_OVERRUN; | ||
240 | |||
241 | output_overrun: | ||
242 | *out_len = op - out; | ||
243 | return LZO_E_OUTPUT_OVERRUN; | ||
244 | |||
245 | lookbehind_overrun: | ||
246 | *out_len = op - out; | ||
247 | return LZO_E_LOOKBEHIND_OVERRUN; | ||
248 | } | ||
249 | #ifndef STATIC | ||
250 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); | ||
251 | |||
252 | MODULE_LICENSE("GPL"); | ||
253 | MODULE_DESCRIPTION("LZO1X Decompressor"); | ||
254 | |||
255 | #endif | ||
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c new file mode 100644 index 000000000000..569985d522d5 --- /dev/null +++ b/lib/lzo/lzo1x_decompress_safe.c | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * LZO1X Decompressor from LZO | ||
3 | * | ||
4 | * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> | ||
5 | * | ||
6 | * The full LZO package can be found at: | ||
7 | * http://www.oberhumer.com/opensource/lzo/ | ||
8 | * | ||
9 | * Changed for Linux kernel use by: | ||
10 | * Nitin Gupta <nitingupta910@gmail.com> | ||
11 | * Richard Purdie <rpurdie@openedhand.com> | ||
12 | */ | ||
13 | |||
14 | #ifndef STATIC | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #endif | ||
18 | #include <asm/unaligned.h> | ||
19 | #include <linux/lzo.h> | ||
20 | #include "lzodefs.h" | ||
21 | |||
22 | #define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) | ||
23 | #define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) | ||
24 | #define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun | ||
25 | #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun | ||
26 | #define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun | ||
27 | |||
28 | int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | ||
29 | unsigned char *out, size_t *out_len) | ||
30 | { | ||
31 | unsigned char *op; | ||
32 | const unsigned char *ip; | ||
33 | size_t t, next; | ||
34 | size_t state = 0; | ||
35 | const unsigned char *m_pos; | ||
36 | const unsigned char * const ip_end = in + in_len; | ||
37 | unsigned char * const op_end = out + *out_len; | ||
38 | |||
39 | op = out; | ||
40 | ip = in; | ||
41 | |||
42 | if (unlikely(in_len < 3)) | ||
43 | goto input_overrun; | ||
44 | if (*ip > 17) { | ||
45 | t = *ip++ - 17; | ||
46 | if (t < 4) { | ||
47 | next = t; | ||
48 | goto match_next; | ||
49 | } | ||
50 | goto copy_literal_run; | ||
51 | } | ||
52 | |||
53 | for (;;) { | ||
54 | t = *ip++; | ||
55 | if (t < 16) { | ||
56 | if (likely(state == 0)) { | ||
57 | if (unlikely(t == 0)) { | ||
58 | while (unlikely(*ip == 0)) { | ||
59 | t += 255; | ||
60 | ip++; | ||
61 | NEED_IP(1); | ||
62 | } | ||
63 | t += 15 + *ip++; | ||
64 | } | ||
65 | t += 3; | ||
66 | copy_literal_run: | ||
67 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
68 | if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { | ||
69 | const unsigned char *ie = ip + t; | ||
70 | unsigned char *oe = op + t; | ||
71 | do { | ||
72 | COPY8(op, ip); | ||
73 | op += 8; | ||
74 | ip += 8; | ||
75 | COPY8(op, ip); | ||
76 | op += 8; | ||
77 | ip += 8; | ||
78 | } while (ip < ie); | ||
79 | ip = ie; | ||
80 | op = oe; | ||
81 | } else | ||
82 | #endif | ||
83 | { | ||
84 | NEED_OP(t); | ||
85 | NEED_IP(t + 3); | ||
86 | do { | ||
87 | *op++ = *ip++; | ||
88 | } while (--t > 0); | ||
89 | } | ||
90 | state = 4; | ||
91 | continue; | ||
92 | } else if (state != 4) { | ||
93 | next = t & 3; | ||
94 | m_pos = op - 1; | ||
95 | m_pos -= t >> 2; | ||
96 | m_pos -= *ip++ << 2; | ||
97 | TEST_LB(m_pos); | ||
98 | NEED_OP(2); | ||
99 | op[0] = m_pos[0]; | ||
100 | op[1] = m_pos[1]; | ||
101 | op += 2; | ||
102 | goto match_next; | ||
103 | } else { | ||
104 | next = t & 3; | ||
105 | m_pos = op - (1 + M2_MAX_OFFSET); | ||
106 | m_pos -= t >> 2; | ||
107 | m_pos -= *ip++ << 2; | ||
108 | t = 3; | ||
109 | } | ||
110 | } else if (t >= 64) { | ||
111 | next = t & 3; | ||
112 | m_pos = op - 1; | ||
113 | m_pos -= (t >> 2) & 7; | ||
114 | m_pos -= *ip++ << 3; | ||
115 | t = (t >> 5) - 1 + (3 - 1); | ||
116 | } else if (t >= 32) { | ||
117 | t = (t & 31) + (3 - 1); | ||
118 | if (unlikely(t == 2)) { | ||
119 | while (unlikely(*ip == 0)) { | ||
120 | t += 255; | ||
121 | ip++; | ||
122 | NEED_IP(1); | ||
123 | } | ||
124 | t += 31 + *ip++; | ||
125 | NEED_IP(2); | ||
126 | } | ||
127 | m_pos = op - 1; | ||
128 | next = get_unaligned_le16(ip); | ||
129 | ip += 2; | ||
130 | m_pos -= next >> 2; | ||
131 | next &= 3; | ||
132 | } else { | ||
133 | m_pos = op; | ||
134 | m_pos -= (t & 8) << 11; | ||
135 | t = (t & 7) + (3 - 1); | ||
136 | if (unlikely(t == 2)) { | ||
137 | while (unlikely(*ip == 0)) { | ||
138 | t += 255; | ||
139 | ip++; | ||
140 | NEED_IP(1); | ||
141 | } | ||
142 | t += 7 + *ip++; | ||
143 | NEED_IP(2); | ||
144 | } | ||
145 | next = get_unaligned_le16(ip); | ||
146 | ip += 2; | ||
147 | m_pos -= next >> 2; | ||
148 | next &= 3; | ||
149 | if (m_pos == op) | ||
150 | goto eof_found; | ||
151 | m_pos -= 0x4000; | ||
152 | } | ||
153 | TEST_LB(m_pos); | ||
154 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
155 | if (op - m_pos >= 8) { | ||
156 | unsigned char *oe = op + t; | ||
157 | if (likely(HAVE_OP(t + 15))) { | ||
158 | do { | ||
159 | COPY8(op, m_pos); | ||
160 | op += 8; | ||
161 | m_pos += 8; | ||
162 | COPY8(op, m_pos); | ||
163 | op += 8; | ||
164 | m_pos += 8; | ||
165 | } while (op < oe); | ||
166 | op = oe; | ||
167 | if (HAVE_IP(6)) { | ||
168 | state = next; | ||
169 | COPY4(op, ip); | ||
170 | op += next; | ||
171 | ip += next; | ||
172 | continue; | ||
173 | } | ||
174 | } else { | ||
175 | NEED_OP(t); | ||
176 | do { | ||
177 | *op++ = *m_pos++; | ||
178 | } while (op < oe); | ||
179 | } | ||
180 | } else | ||
181 | #endif | ||
182 | { | ||
183 | unsigned char *oe = op + t; | ||
184 | NEED_OP(t); | ||
185 | op[0] = m_pos[0]; | ||
186 | op[1] = m_pos[1]; | ||
187 | op += 2; | ||
188 | m_pos += 2; | ||
189 | do { | ||
190 | *op++ = *m_pos++; | ||
191 | } while (op < oe); | ||
192 | } | ||
193 | match_next: | ||
194 | state = next; | ||
195 | t = next; | ||
196 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
197 | if (likely(HAVE_IP(6) && HAVE_OP(4))) { | ||
198 | COPY4(op, ip); | ||
199 | op += t; | ||
200 | ip += t; | ||
201 | } else | ||
202 | #endif | ||
203 | { | ||
204 | NEED_IP(t + 3); | ||
205 | NEED_OP(t); | ||
206 | while (t > 0) { | ||
207 | *op++ = *ip++; | ||
208 | t--; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | |||
213 | eof_found: | ||
214 | *out_len = op - out; | ||
215 | return (t != 3 ? LZO_E_ERROR : | ||
216 | ip == ip_end ? LZO_E_OK : | ||
217 | ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); | ||
218 | |||
219 | input_overrun: | ||
220 | *out_len = op - out; | ||
221 | return LZO_E_INPUT_OVERRUN; | ||
222 | |||
223 | output_overrun: | ||
224 | *out_len = op - out; | ||
225 | return LZO_E_OUTPUT_OVERRUN; | ||
226 | |||
227 | lookbehind_overrun: | ||
228 | *out_len = op - out; | ||
229 | return LZO_E_LOOKBEHIND_OVERRUN; | ||
230 | } | ||
231 | #ifndef STATIC | ||
232 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); | ||
233 | |||
234 | MODULE_LICENSE("GPL"); | ||
235 | MODULE_DESCRIPTION("LZO1X Decompressor"); | ||
236 | |||
237 | #endif | ||
diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h index b6d482c492ef..6710b83ce72e 100644 --- a/lib/lzo/lzodefs.h +++ b/lib/lzo/lzodefs.h | |||
@@ -1,19 +1,37 @@ | |||
1 | /* | 1 | /* |
2 | * lzodefs.h -- architecture, OS and compiler specific defines | 2 | * lzodefs.h -- architecture, OS and compiler specific defines |
3 | * | 3 | * |
4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | 4 | * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> |
5 | * | 5 | * |
6 | * The full LZO package can be found at: | 6 | * The full LZO package can be found at: |
7 | * http://www.oberhumer.com/opensource/lzo/ | 7 | * http://www.oberhumer.com/opensource/lzo/ |
8 | * | 8 | * |
9 | * Changed for kernel use by: | 9 | * Changed for Linux kernel use by: |
10 | * Nitin Gupta <nitingupta910@gmail.com> | 10 | * Nitin Gupta <nitingupta910@gmail.com> |
11 | * Richard Purdie <rpurdie@openedhand.com> | 11 | * Richard Purdie <rpurdie@openedhand.com> |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define LZO_VERSION 0x2020 | 14 | |
15 | #define LZO_VERSION_STRING "2.02" | 15 | #define COPY4(dst, src) \ |
16 | #define LZO_VERSION_DATE "Oct 17 2005" | 16 | put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) |
17 | #if defined(__x86_64__) | ||
18 | #define COPY8(dst, src) \ | ||
19 | put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) | ||
20 | #else | ||
21 | #define COPY8(dst, src) \ | ||
22 | COPY4(dst, src); COPY4((dst) + 4, (src) + 4) | ||
23 | #endif | ||
24 | |||
25 | #if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) | ||
26 | #error "conflicting endian definitions" | ||
27 | #elif defined(__x86_64__) | ||
28 | #define LZO_USE_CTZ64 1 | ||
29 | #define LZO_USE_CTZ32 1 | ||
30 | #elif defined(__i386__) || defined(__powerpc__) | ||
31 | #define LZO_USE_CTZ32 1 | ||
32 | #elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5) | ||
33 | #define LZO_USE_CTZ32 1 | ||
34 | #endif | ||
17 | 35 | ||
18 | #define M1_MAX_OFFSET 0x0400 | 36 | #define M1_MAX_OFFSET 0x0400 |
19 | #define M2_MAX_OFFSET 0x0800 | 37 | #define M2_MAX_OFFSET 0x0800 |
@@ -34,10 +52,8 @@ | |||
34 | #define M3_MARKER 32 | 52 | #define M3_MARKER 32 |
35 | #define M4_MARKER 16 | 53 | #define M4_MARKER 16 |
36 | 54 | ||
37 | #define D_BITS 14 | 55 | #define lzo_dict_t unsigned short |
38 | #define D_MASK ((1u << D_BITS) - 1) | 56 | #define D_BITS 13 |
57 | #define D_SIZE (1u << D_BITS) | ||
58 | #define D_MASK (D_SIZE - 1) | ||
39 | #define D_HIGH ((D_MASK >> 1) + 1) | 59 | #define D_HIGH ((D_MASK >> 1) + 1) |
40 | |||
41 | #define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \ | ||
42 | << (s1)) ^ (p)[0]) | ||
43 | #define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0]) | ||
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 77adcf6bc257..60cf765628e9 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h | |||
@@ -65,10 +65,6 @@ | |||
65 | typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ | 65 | typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ |
66 | typedef int mpi_size_t; /* (must be a signed type) */ | 66 | typedef int mpi_size_t; /* (must be a signed type) */ |
67 | 67 | ||
68 | #define ABS(x) (x >= 0 ? x : -x) | ||
69 | #define MIN(l, o) ((l) < (o) ? (l) : (o)) | ||
70 | #define MAX(h, i) ((h) > (i) ? (h) : (i)) | ||
71 | |||
72 | static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) | 68 | static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) |
73 | { | 69 | { |
74 | if (a->alloced < b) | 70 | if (a->alloced < b) |
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 3962b7f7fe3f..5f9c44cdf1f5 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c | |||
@@ -52,7 +52,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) | |||
52 | else | 52 | else |
53 | nbits = 0; | 53 | nbits = 0; |
54 | 54 | ||
55 | nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; | 55 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); |
56 | val = mpi_alloc(nlimbs); | 56 | val = mpi_alloc(nlimbs); |
57 | if (!val) | 57 | if (!val) |
58 | return NULL; | 58 | return NULL; |
@@ -96,8 +96,8 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) | |||
96 | buffer += 2; | 96 | buffer += 2; |
97 | nread = 2; | 97 | nread = 2; |
98 | 98 | ||
99 | nbytes = (nbits + 7) / 8; | 99 | nbytes = DIV_ROUND_UP(nbits, 8); |
100 | nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; | 100 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); |
101 | val = mpi_alloc(nlimbs); | 101 | val = mpi_alloc(nlimbs); |
102 | if (!val) | 102 | if (!val) |
103 | return NULL; | 103 | return NULL; |
@@ -193,7 +193,7 @@ int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) | |||
193 | int nlimbs; | 193 | int nlimbs; |
194 | int i; | 194 | int i; |
195 | 195 | ||
196 | nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; | 196 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); |
197 | if (RESIZE_IF_NEEDED(a, nlimbs) < 0) | 197 | if (RESIZE_IF_NEEDED(a, nlimbs) < 0) |
198 | return -ENOMEM; | 198 | return -ENOMEM; |
199 | a->sign = sign; | 199 | a->sign = sign; |
diff --git a/lib/parser.c b/lib/parser.c index 52cfa69f73df..807b2aaa33fa 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
@@ -157,7 +157,7 @@ static int match_number(substring_t *s, int *result, int base) | |||
157 | * | 157 | * |
158 | * Description: Attempts to parse the &substring_t @s as a decimal integer. On | 158 | * Description: Attempts to parse the &substring_t @s as a decimal integer. On |
159 | * success, sets @result to the integer represented by the string and returns 0. | 159 | * success, sets @result to the integer represented by the string and returns 0. |
160 | * Returns either -ENOMEM or -EINVAL on failure. | 160 | * Returns -ENOMEM, -EINVAL, or -ERANGE on failure. |
161 | */ | 161 | */ |
162 | int match_int(substring_t *s, int *result) | 162 | int match_int(substring_t *s, int *result) |
163 | { | 163 | { |
@@ -171,7 +171,7 @@ int match_int(substring_t *s, int *result) | |||
171 | * | 171 | * |
172 | * Description: Attempts to parse the &substring_t @s as an octal integer. On | 172 | * Description: Attempts to parse the &substring_t @s as an octal integer. On |
173 | * success, sets @result to the integer represented by the string and returns | 173 | * success, sets @result to the integer represented by the string and returns |
174 | * 0. Returns either -ENOMEM or -EINVAL on failure. | 174 | * 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. |
175 | */ | 175 | */ |
176 | int match_octal(substring_t *s, int *result) | 176 | int match_octal(substring_t *s, int *result) |
177 | { | 177 | { |
@@ -185,7 +185,7 @@ int match_octal(substring_t *s, int *result) | |||
185 | * | 185 | * |
186 | * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. | 186 | * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. |
187 | * On success, sets @result to the integer represented by the string and | 187 | * On success, sets @result to the integer represented by the string and |
188 | * returns 0. Returns either -ENOMEM or -EINVAL on failure. | 188 | * returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. |
189 | */ | 189 | */ |
190 | int match_hex(substring_t *s, int *result) | 190 | int match_hex(substring_t *s, int *result) |
191 | { | 191 | { |
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 7e0d6a58fc83..7542afbb22b3 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -73,20 +73,13 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
73 | goto dont_wake_writers; | 73 | goto dont_wake_writers; |
74 | } | 74 | } |
75 | 75 | ||
76 | /* if we are allowed to wake writers try to grant a single write lock | 76 | /* |
77 | * if there's a writer at the front of the queue | 77 | * as we support write lock stealing, we can't set sem->activity |
78 | * - we leave the 'waiting count' incremented to signify potential | 78 | * to -1 here to indicate we get the lock. Instead, we wake it up |
79 | * contention | 79 | * to let it go get it again. |
80 | */ | 80 | */ |
81 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { | 81 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { |
82 | sem->activity = -1; | 82 | wake_up_process(waiter->task); |
83 | list_del(&waiter->list); | ||
84 | tsk = waiter->task; | ||
85 | /* Don't touch waiter after ->task has been NULLed */ | ||
86 | smp_mb(); | ||
87 | waiter->task = NULL; | ||
88 | wake_up_process(tsk); | ||
89 | put_task_struct(tsk); | ||
90 | goto out; | 83 | goto out; |
91 | } | 84 | } |
92 | 85 | ||
@@ -121,18 +114,10 @@ static inline struct rw_semaphore * | |||
121 | __rwsem_wake_one_writer(struct rw_semaphore *sem) | 114 | __rwsem_wake_one_writer(struct rw_semaphore *sem) |
122 | { | 115 | { |
123 | struct rwsem_waiter *waiter; | 116 | struct rwsem_waiter *waiter; |
124 | struct task_struct *tsk; | ||
125 | |||
126 | sem->activity = -1; | ||
127 | 117 | ||
128 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 118 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
129 | list_del(&waiter->list); | 119 | wake_up_process(waiter->task); |
130 | 120 | ||
131 | tsk = waiter->task; | ||
132 | smp_mb(); | ||
133 | waiter->task = NULL; | ||
134 | wake_up_process(tsk); | ||
135 | put_task_struct(tsk); | ||
136 | return sem; | 121 | return sem; |
137 | } | 122 | } |
138 | 123 | ||
@@ -204,7 +189,6 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
204 | 189 | ||
205 | /* | 190 | /* |
206 | * get a write lock on the semaphore | 191 | * get a write lock on the semaphore |
207 | * - we increment the waiting count anyway to indicate an exclusive lock | ||
208 | */ | 192 | */ |
209 | void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | 193 | void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
210 | { | 194 | { |
@@ -214,37 +198,32 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
214 | 198 | ||
215 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 199 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
216 | 200 | ||
217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | ||
218 | /* granted */ | ||
219 | sem->activity = -1; | ||
220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
221 | goto out; | ||
222 | } | ||
223 | |||
224 | tsk = current; | ||
225 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
226 | |||
227 | /* set up my own style of waitqueue */ | 201 | /* set up my own style of waitqueue */ |
202 | tsk = current; | ||
228 | waiter.task = tsk; | 203 | waiter.task = tsk; |
229 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | 204 | waiter.flags = RWSEM_WAITING_FOR_WRITE; |
230 | get_task_struct(tsk); | ||
231 | |||
232 | list_add_tail(&waiter.list, &sem->wait_list); | 205 | list_add_tail(&waiter.list, &sem->wait_list); |
233 | 206 | ||
234 | /* we don't need to touch the semaphore struct anymore */ | 207 | /* wait for someone to release the lock */ |
235 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
236 | |||
237 | /* wait to be given the lock */ | ||
238 | for (;;) { | 208 | for (;;) { |
239 | if (!waiter.task) | 209 | /* |
210 | * That is the key to support write lock stealing: allows the | ||
211 | * task already on CPU to get the lock soon rather than put | ||
212 | * itself into sleep and waiting for system woke it or someone | ||
213 | * else in the head of the wait list up. | ||
214 | */ | ||
215 | if (sem->activity == 0) | ||
240 | break; | 216 | break; |
241 | schedule(); | ||
242 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 217 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
218 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
219 | schedule(); | ||
220 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | ||
243 | } | 221 | } |
222 | /* got the lock */ | ||
223 | sem->activity = -1; | ||
224 | list_del(&waiter.list); | ||
244 | 225 | ||
245 | tsk->state = TASK_RUNNING; | 226 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
246 | out: | ||
247 | ; | ||
248 | } | 227 | } |
249 | 228 | ||
250 | void __sched __down_write(struct rw_semaphore *sem) | 229 | void __sched __down_write(struct rw_semaphore *sem) |
@@ -262,8 +241,8 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
262 | 241 | ||
263 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 242 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
264 | 243 | ||
265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 244 | if (sem->activity == 0) { |
266 | /* granted */ | 245 | /* got the lock */ |
267 | sem->activity = -1; | 246 | sem->activity = -1; |
268 | ret = 1; | 247 | ret = 1; |
269 | } | 248 | } |
diff --git a/lib/rwsem.c b/lib/rwsem.c index 8337e1b9bb8d..ad5e0df16ab4 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -2,6 +2,8 @@ | |||
2 | * | 2 | * |
3 | * Written by David Howells (dhowells@redhat.com). | 3 | * Written by David Howells (dhowells@redhat.com). |
4 | * Derived from arch/i386/kernel/semaphore.c | 4 | * Derived from arch/i386/kernel/semaphore.c |
5 | * | ||
6 | * Writer lock-stealing by Alex Shi <alex.shi@intel.com> | ||
5 | */ | 7 | */ |
6 | #include <linux/rwsem.h> | 8 | #include <linux/rwsem.h> |
7 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
@@ -60,7 +62,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | |||
60 | struct rwsem_waiter *waiter; | 62 | struct rwsem_waiter *waiter; |
61 | struct task_struct *tsk; | 63 | struct task_struct *tsk; |
62 | struct list_head *next; | 64 | struct list_head *next; |
63 | signed long oldcount, woken, loop, adjustment; | 65 | signed long woken, loop, adjustment; |
64 | 66 | ||
65 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 67 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
66 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) | 68 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) |
@@ -72,30 +74,8 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | |||
72 | */ | 74 | */ |
73 | goto out; | 75 | goto out; |
74 | 76 | ||
75 | /* There's a writer at the front of the queue - try to grant it the | 77 | /* Wake up the writing waiter and let the task grab the sem: */ |
76 | * write lock. However, we only wake this writer if we can transition | 78 | wake_up_process(waiter->task); |
77 | * the active part of the count from 0 -> 1 | ||
78 | */ | ||
79 | adjustment = RWSEM_ACTIVE_WRITE_BIAS; | ||
80 | if (waiter->list.next == &sem->wait_list) | ||
81 | adjustment -= RWSEM_WAITING_BIAS; | ||
82 | |||
83 | try_again_write: | ||
84 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | ||
85 | if (oldcount & RWSEM_ACTIVE_MASK) | ||
86 | /* Someone grabbed the sem already */ | ||
87 | goto undo_write; | ||
88 | |||
89 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | ||
90 | * It is an allocated on the waiter's stack and may become invalid at | ||
91 | * any time after that point (due to a wakeup from another source). | ||
92 | */ | ||
93 | list_del(&waiter->list); | ||
94 | tsk = waiter->task; | ||
95 | smp_mb(); | ||
96 | waiter->task = NULL; | ||
97 | wake_up_process(tsk); | ||
98 | put_task_struct(tsk); | ||
99 | goto out; | 79 | goto out; |
100 | 80 | ||
101 | readers_only: | 81 | readers_only: |
@@ -157,12 +137,40 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | |||
157 | 137 | ||
158 | out: | 138 | out: |
159 | return sem; | 139 | return sem; |
140 | } | ||
141 | |||
142 | /* Try to get write sem, caller holds sem->wait_lock: */ | ||
143 | static int try_get_writer_sem(struct rw_semaphore *sem, | ||
144 | struct rwsem_waiter *waiter) | ||
145 | { | ||
146 | struct rwsem_waiter *fwaiter; | ||
147 | long oldcount, adjustment; | ||
160 | 148 | ||
161 | /* undo the change to the active count, but check for a transition | 149 | /* only steal when first waiter is writing */ |
162 | * 1->0 */ | 150 | fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
163 | undo_write: | 151 | if (!(fwaiter->flags & RWSEM_WAITING_FOR_WRITE)) |
152 | return 0; | ||
153 | |||
154 | adjustment = RWSEM_ACTIVE_WRITE_BIAS; | ||
155 | /* Only one waiter in the queue: */ | ||
156 | if (fwaiter == waiter && waiter->list.next == &sem->wait_list) | ||
157 | adjustment -= RWSEM_WAITING_BIAS; | ||
158 | |||
159 | try_again_write: | ||
160 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | ||
161 | if (!(oldcount & RWSEM_ACTIVE_MASK)) { | ||
162 | /* No active lock: */ | ||
163 | struct task_struct *tsk = waiter->task; | ||
164 | |||
165 | list_del(&waiter->list); | ||
166 | smp_mb(); | ||
167 | put_task_struct(tsk); | ||
168 | tsk->state = TASK_RUNNING; | ||
169 | return 1; | ||
170 | } | ||
171 | /* some one grabbed the sem already */ | ||
164 | if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) | 172 | if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) |
165 | goto out; | 173 | return 0; |
166 | goto try_again_write; | 174 | goto try_again_write; |
167 | } | 175 | } |
168 | 176 | ||
@@ -210,6 +218,15 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
210 | for (;;) { | 218 | for (;;) { |
211 | if (!waiter.task) | 219 | if (!waiter.task) |
212 | break; | 220 | break; |
221 | |||
222 | raw_spin_lock_irq(&sem->wait_lock); | ||
223 | /* Try to get the writer sem, may steal from the head writer: */ | ||
224 | if (flags == RWSEM_WAITING_FOR_WRITE) | ||
225 | if (try_get_writer_sem(sem, &waiter)) { | ||
226 | raw_spin_unlock_irq(&sem->wait_lock); | ||
227 | return sem; | ||
228 | } | ||
229 | raw_spin_unlock_irq(&sem->wait_lock); | ||
213 | schedule(); | 230 | schedule(); |
214 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 231 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
215 | } | 232 | } |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 7874b01e816e..b83c144d731f 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, | |||
394 | } | 394 | } |
395 | EXPORT_SYMBOL(sg_alloc_table_from_pages); | 395 | EXPORT_SYMBOL(sg_alloc_table_from_pages); |
396 | 396 | ||
397 | void __sg_page_iter_start(struct sg_page_iter *piter, | ||
398 | struct scatterlist *sglist, unsigned int nents, | ||
399 | unsigned long pgoffset) | ||
400 | { | ||
401 | piter->__pg_advance = 0; | ||
402 | piter->__nents = nents; | ||
403 | |||
404 | piter->page = NULL; | ||
405 | piter->sg = sglist; | ||
406 | piter->sg_pgoffset = pgoffset; | ||
407 | } | ||
408 | EXPORT_SYMBOL(__sg_page_iter_start); | ||
409 | |||
410 | static int sg_page_count(struct scatterlist *sg) | ||
411 | { | ||
412 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; | ||
413 | } | ||
414 | |||
415 | bool __sg_page_iter_next(struct sg_page_iter *piter) | ||
416 | { | ||
417 | if (!piter->__nents || !piter->sg) | ||
418 | return false; | ||
419 | |||
420 | piter->sg_pgoffset += piter->__pg_advance; | ||
421 | piter->__pg_advance = 1; | ||
422 | |||
423 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { | ||
424 | piter->sg_pgoffset -= sg_page_count(piter->sg); | ||
425 | piter->sg = sg_next(piter->sg); | ||
426 | if (!--piter->__nents || !piter->sg) | ||
427 | return false; | ||
428 | } | ||
429 | piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset); | ||
430 | |||
431 | return true; | ||
432 | } | ||
433 | EXPORT_SYMBOL(__sg_page_iter_next); | ||
434 | |||
397 | /** | 435 | /** |
398 | * sg_miter_start - start mapping iteration over a sg list | 436 | * sg_miter_start - start mapping iteration over a sg list |
399 | * @miter: sg mapping iter to be started | 437 | * @miter: sg mapping iter to be started |
@@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | |||
411 | { | 449 | { |
412 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | 450 | memset(miter, 0, sizeof(struct sg_mapping_iter)); |
413 | 451 | ||
414 | miter->__sg = sgl; | 452 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); |
415 | miter->__nents = nents; | ||
416 | miter->__offset = 0; | ||
417 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); | 453 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); |
418 | miter->__flags = flags; | 454 | miter->__flags = flags; |
419 | } | 455 | } |
@@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start); | |||
438 | */ | 474 | */ |
439 | bool sg_miter_next(struct sg_mapping_iter *miter) | 475 | bool sg_miter_next(struct sg_mapping_iter *miter) |
440 | { | 476 | { |
441 | unsigned int off, len; | ||
442 | |||
443 | /* check for end and drop resources from the last iteration */ | ||
444 | if (!miter->__nents) | ||
445 | return false; | ||
446 | |||
447 | sg_miter_stop(miter); | 477 | sg_miter_stop(miter); |
448 | 478 | ||
449 | /* get to the next sg if necessary. __offset is adjusted by stop */ | 479 | /* |
450 | while (miter->__offset == miter->__sg->length) { | 480 | * Get to the next page if necessary. |
451 | if (--miter->__nents) { | 481 | * __remaining, __offset is adjusted by sg_miter_stop |
452 | miter->__sg = sg_next(miter->__sg); | 482 | */ |
453 | miter->__offset = 0; | 483 | if (!miter->__remaining) { |
454 | } else | 484 | struct scatterlist *sg; |
485 | unsigned long pgoffset; | ||
486 | |||
487 | if (!__sg_page_iter_next(&miter->piter)) | ||
455 | return false; | 488 | return false; |
456 | } | ||
457 | 489 | ||
458 | /* map the next page */ | 490 | sg = miter->piter.sg; |
459 | off = miter->__sg->offset + miter->__offset; | 491 | pgoffset = miter->piter.sg_pgoffset; |
460 | len = miter->__sg->length - miter->__offset; | ||
461 | 492 | ||
462 | miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); | 493 | miter->__offset = pgoffset ? 0 : sg->offset; |
463 | off &= ~PAGE_MASK; | 494 | miter->__remaining = sg->offset + sg->length - |
464 | miter->length = min_t(unsigned int, len, PAGE_SIZE - off); | 495 | (pgoffset << PAGE_SHIFT) - miter->__offset; |
465 | miter->consumed = miter->length; | 496 | miter->__remaining = min_t(unsigned long, miter->__remaining, |
497 | PAGE_SIZE - miter->__offset); | ||
498 | } | ||
499 | miter->page = miter->piter.page; | ||
500 | miter->consumed = miter->length = miter->__remaining; | ||
466 | 501 | ||
467 | if (miter->__flags & SG_MITER_ATOMIC) | 502 | if (miter->__flags & SG_MITER_ATOMIC) |
468 | miter->addr = kmap_atomic(miter->page) + off; | 503 | miter->addr = kmap_atomic(miter->page) + miter->__offset; |
469 | else | 504 | else |
470 | miter->addr = kmap(miter->page) + off; | 505 | miter->addr = kmap(miter->page) + miter->__offset; |
471 | 506 | ||
472 | return true; | 507 | return true; |
473 | } | 508 | } |
@@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) | |||
494 | /* drop resources from the last iteration */ | 529 | /* drop resources from the last iteration */ |
495 | if (miter->addr) { | 530 | if (miter->addr) { |
496 | miter->__offset += miter->consumed; | 531 | miter->__offset += miter->consumed; |
532 | miter->__remaining -= miter->consumed; | ||
497 | 533 | ||
498 | if (miter->__flags & SG_MITER_TO_SG) | 534 | if (miter->__flags & SG_MITER_TO_SG) |
499 | flush_kernel_dcache_page(miter->page); | 535 | flush_kernel_dcache_page(miter->page); |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 196b06984dec..bfe02b8fc55b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -122,11 +122,18 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
122 | return phys_to_dma(hwdev, virt_to_phys(address)); | 122 | return phys_to_dma(hwdev, virt_to_phys(address)); |
123 | } | 123 | } |
124 | 124 | ||
125 | static bool no_iotlb_memory; | ||
126 | |||
125 | void swiotlb_print_info(void) | 127 | void swiotlb_print_info(void) |
126 | { | 128 | { |
127 | unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 129 | unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
128 | unsigned char *vstart, *vend; | 130 | unsigned char *vstart, *vend; |
129 | 131 | ||
132 | if (no_iotlb_memory) { | ||
133 | pr_warn("software IO TLB: No low mem\n"); | ||
134 | return; | ||
135 | } | ||
136 | |||
130 | vstart = phys_to_virt(io_tlb_start); | 137 | vstart = phys_to_virt(io_tlb_start); |
131 | vend = phys_to_virt(io_tlb_end); | 138 | vend = phys_to_virt(io_tlb_end); |
132 | 139 | ||
@@ -136,7 +143,7 @@ void swiotlb_print_info(void) | |||
136 | bytes >> 20, vstart, vend - 1); | 143 | bytes >> 20, vstart, vend - 1); |
137 | } | 144 | } |
138 | 145 | ||
139 | void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | 146 | int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) |
140 | { | 147 | { |
141 | void *v_overflow_buffer; | 148 | void *v_overflow_buffer; |
142 | unsigned long i, bytes; | 149 | unsigned long i, bytes; |
@@ -150,9 +157,10 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
150 | /* | 157 | /* |
151 | * Get the overflow emergency buffer | 158 | * Get the overflow emergency buffer |
152 | */ | 159 | */ |
153 | v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); | 160 | v_overflow_buffer = alloc_bootmem_low_pages_nopanic( |
161 | PAGE_ALIGN(io_tlb_overflow)); | ||
154 | if (!v_overflow_buffer) | 162 | if (!v_overflow_buffer) |
155 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 163 | return -ENOMEM; |
156 | 164 | ||
157 | io_tlb_overflow_buffer = __pa(v_overflow_buffer); | 165 | io_tlb_overflow_buffer = __pa(v_overflow_buffer); |
158 | 166 | ||
@@ -169,15 +177,19 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
169 | 177 | ||
170 | if (verbose) | 178 | if (verbose) |
171 | swiotlb_print_info(); | 179 | swiotlb_print_info(); |
180 | |||
181 | return 0; | ||
172 | } | 182 | } |
173 | 183 | ||
174 | /* | 184 | /* |
175 | * Statically reserve bounce buffer space and initialize bounce buffer data | 185 | * Statically reserve bounce buffer space and initialize bounce buffer data |
176 | * structures for the software IO TLB used to implement the DMA API. | 186 | * structures for the software IO TLB used to implement the DMA API. |
177 | */ | 187 | */ |
178 | static void __init | 188 | void __init |
179 | swiotlb_init_with_default_size(size_t default_size, int verbose) | 189 | swiotlb_init(int verbose) |
180 | { | 190 | { |
191 | /* default to 64MB */ | ||
192 | size_t default_size = 64UL<<20; | ||
181 | unsigned char *vstart; | 193 | unsigned char *vstart; |
182 | unsigned long bytes; | 194 | unsigned long bytes; |
183 | 195 | ||
@@ -188,20 +200,16 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) | |||
188 | 200 | ||
189 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 201 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
190 | 202 | ||
191 | /* | 203 | /* Get IO TLB memory from the low pages */ |
192 | * Get IO TLB memory from the low pages | 204 | vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); |
193 | */ | 205 | if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) |
194 | vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); | 206 | return; |
195 | if (!vstart) | ||
196 | panic("Cannot allocate SWIOTLB buffer"); | ||
197 | |||
198 | swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose); | ||
199 | } | ||
200 | 207 | ||
201 | void __init | 208 | if (io_tlb_start) |
202 | swiotlb_init(int verbose) | 209 | free_bootmem(io_tlb_start, |
203 | { | 210 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
204 | swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ | 211 | pr_warn("Cannot allocate SWIOTLB buffer"); |
212 | no_iotlb_memory = true; | ||
205 | } | 213 | } |
206 | 214 | ||
207 | /* | 215 | /* |
@@ -405,6 +413,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | |||
405 | unsigned long offset_slots; | 413 | unsigned long offset_slots; |
406 | unsigned long max_slots; | 414 | unsigned long max_slots; |
407 | 415 | ||
416 | if (no_iotlb_memory) | ||
417 | panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); | ||
418 | |||
408 | mask = dma_get_seg_boundary(hwdev); | 419 | mask = dma_get_seg_boundary(hwdev); |
409 | 420 | ||
410 | tbl_dma_addr &= mask; | 421 | tbl_dma_addr &= mask; |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index fab33a9c5318..0d62fd700f68 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -1030,6 +1030,7 @@ int kptr_restrict __read_mostly; | |||
1030 | * N no separator | 1030 | * N no separator |
1031 | * The maximum supported length is 64 bytes of the input. Consider | 1031 | * The maximum supported length is 64 bytes of the input. Consider |
1032 | * to use print_hex_dump() for the larger input. | 1032 | * to use print_hex_dump() for the larger input. |
1033 | * - 'a' For a phys_addr_t type and its derivative types (passed by reference) | ||
1033 | * | 1034 | * |
1034 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 1035 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
1035 | * function pointers are really function descriptors, which contain a | 1036 | * function pointers are really function descriptors, which contain a |
@@ -1120,6 +1121,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
1120 | return netdev_feature_string(buf, end, ptr, spec); | 1121 | return netdev_feature_string(buf, end, ptr, spec); |
1121 | } | 1122 | } |
1122 | break; | 1123 | break; |
1124 | case 'a': | ||
1125 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
1126 | spec.field_width = sizeof(phys_addr_t) * 2 + 2; | ||
1127 | spec.base = 16; | ||
1128 | return number(buf, end, | ||
1129 | (unsigned long long) *((phys_addr_t *)ptr), spec); | ||
1123 | } | 1130 | } |
1124 | spec.flags |= SMALL; | 1131 | spec.flags |= SMALL; |
1125 | if (spec.field_width == -1) { | 1132 | if (spec.field_width == -1) { |
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 60a6088d0e5e..82a04d7ba99e 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig | |||
@@ -6,42 +6,40 @@ config XZ_DEC | |||
6 | the .xz file format as the container. For integrity checking, | 6 | the .xz file format as the container. For integrity checking, |
7 | CRC32 is supported. See Documentation/xz.txt for more information. | 7 | CRC32 is supported. See Documentation/xz.txt for more information. |
8 | 8 | ||
9 | if XZ_DEC | ||
10 | |||
9 | config XZ_DEC_X86 | 11 | config XZ_DEC_X86 |
10 | bool "x86 BCJ filter decoder" if EXPERT | 12 | bool "x86 BCJ filter decoder" |
11 | default y | 13 | default y if X86 |
12 | depends on XZ_DEC | ||
13 | select XZ_DEC_BCJ | 14 | select XZ_DEC_BCJ |
14 | 15 | ||
15 | config XZ_DEC_POWERPC | 16 | config XZ_DEC_POWERPC |
16 | bool "PowerPC BCJ filter decoder" if EXPERT | 17 | bool "PowerPC BCJ filter decoder" |
17 | default y | 18 | default y if POWERPC |
18 | depends on XZ_DEC | ||
19 | select XZ_DEC_BCJ | 19 | select XZ_DEC_BCJ |
20 | 20 | ||
21 | config XZ_DEC_IA64 | 21 | config XZ_DEC_IA64 |
22 | bool "IA-64 BCJ filter decoder" if EXPERT | 22 | bool "IA-64 BCJ filter decoder" |
23 | default y | 23 | default y if IA64 |
24 | depends on XZ_DEC | ||
25 | select XZ_DEC_BCJ | 24 | select XZ_DEC_BCJ |
26 | 25 | ||
27 | config XZ_DEC_ARM | 26 | config XZ_DEC_ARM |
28 | bool "ARM BCJ filter decoder" if EXPERT | 27 | bool "ARM BCJ filter decoder" |
29 | default y | 28 | default y if ARM |
30 | depends on XZ_DEC | ||
31 | select XZ_DEC_BCJ | 29 | select XZ_DEC_BCJ |
32 | 30 | ||
33 | config XZ_DEC_ARMTHUMB | 31 | config XZ_DEC_ARMTHUMB |
34 | bool "ARM-Thumb BCJ filter decoder" if EXPERT | 32 | bool "ARM-Thumb BCJ filter decoder" |
35 | default y | 33 | default y if (ARM && ARM_THUMB) |
36 | depends on XZ_DEC | ||
37 | select XZ_DEC_BCJ | 34 | select XZ_DEC_BCJ |
38 | 35 | ||
39 | config XZ_DEC_SPARC | 36 | config XZ_DEC_SPARC |
40 | bool "SPARC BCJ filter decoder" if EXPERT | 37 | bool "SPARC BCJ filter decoder" |
41 | default y | 38 | default y if SPARC |
42 | depends on XZ_DEC | ||
43 | select XZ_DEC_BCJ | 39 | select XZ_DEC_BCJ |
44 | 40 | ||
41 | endif | ||
42 | |||
45 | config XZ_DEC_BCJ | 43 | config XZ_DEC_BCJ |
46 | bool | 44 | bool |
47 | default n | 45 | default n |