diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 4 | ||||
| -rw-r--r-- | lib/Kconfig.kgdb | 18 | ||||
| -rw-r--r-- | lib/Makefile | 2 | ||||
| -rw-r--r-- | lib/bug.c | 3 | ||||
| -rw-r--r-- | lib/checksum.c | 2 | ||||
| -rw-r--r-- | lib/debugobjects.c | 21 | ||||
| -rw-r--r-- | lib/decompress_unlzo.c | 2 | ||||
| -rw-r--r-- | lib/devres.c | 2 | ||||
| -rw-r--r-- | lib/idr.c | 436 | ||||
| -rw-r--r-- | lib/kfifo.c | 607 | ||||
| -rw-r--r-- | lib/lru_cache.c | 3 | ||||
| -rw-r--r-- | lib/lzo/Makefile | 2 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_compress.c | 335 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_decompress.c | 255 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_decompress_safe.c | 237 | ||||
| -rw-r--r-- | lib/lzo/lzodefs.h | 38 | ||||
| -rw-r--r-- | lib/scatterlist.c | 86 |
17 files changed, 1455 insertions, 598 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4a7f808fa06..28be08c09bab 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -674,7 +674,7 @@ config STACKTRACE | |||
| 674 | 674 | ||
| 675 | config DEBUG_STACK_USAGE | 675 | config DEBUG_STACK_USAGE |
| 676 | bool "Stack utilization instrumentation" | 676 | bool "Stack utilization instrumentation" |
| 677 | depends on DEBUG_KERNEL && !IA64 && !PARISC | 677 | depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG |
| 678 | help | 678 | help |
| 679 | Enables the display of the minimum amount of free stack which each | 679 | Enables the display of the minimum amount of free stack which each |
| 680 | task has ever had available in the sysrq-T and sysrq-P debug output. | 680 | task has ever had available in the sysrq-T and sysrq-P debug output. |
| @@ -855,7 +855,7 @@ config FRAME_POINTER | |||
| 855 | bool "Compile the kernel with frame pointers" | 855 | bool "Compile the kernel with frame pointers" |
| 856 | depends on DEBUG_KERNEL && \ | 856 | depends on DEBUG_KERNEL && \ |
| 857 | (CRIS || M68K || FRV || UML || \ | 857 | (CRIS || M68K || FRV || UML || \ |
| 858 | AVR32 || SUPERH || BLACKFIN || MN10300) || \ | 858 | AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ |
| 859 | ARCH_WANT_FRAME_POINTERS | 859 | ARCH_WANT_FRAME_POINTERS |
| 860 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS | 860 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS |
| 861 | help | 861 | help |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index dbb58ae1b8e0..140e87824173 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
| @@ -80,4 +80,22 @@ config KDB_KEYBOARD | |||
| 80 | help | 80 | help |
| 81 | KDB can use a PS/2 type keyboard for an input device | 81 | KDB can use a PS/2 type keyboard for an input device |
| 82 | 82 | ||
| 83 | config KDB_CONTINUE_CATASTROPHIC | ||
| 84 | int "KDB: continue after catastrophic errors" | ||
| 85 | depends on KGDB_KDB | ||
| 86 | default "0" | ||
| 87 | help | ||
| 88 | This integer controls the behaviour of kdb when the kernel gets a | ||
| 89 | catastrophic error, i.e. for a panic or oops. | ||
| 90 | When KDB is active and a catastrophic error occurs, nothing extra | ||
| 91 | will happen until you type 'go'. | ||
| 92 | CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time | ||
| 93 | you type 'go', you will be warned by kdb. The secend time you type | ||
| 94 | 'go', KDB tries to continue. No guarantees that the | ||
| 95 | kernel is still usable in this situation. | ||
| 96 | CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue. | ||
| 97 | No guarantees that the kernel is still usable in this situation. | ||
| 98 | CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot. | ||
| 99 | If you are not sure, say 0. | ||
| 100 | |||
| 83 | endif # KGDB | 101 | endif # KGDB |
diff --git a/lib/Makefile b/lib/Makefile index 02ed6c04cd7d..d7946ff75b2e 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -23,7 +23,7 @@ lib-y += kobject.o klist.o | |||
| 23 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 23 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
| 24 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 24 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
| 25 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ | 25 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
| 26 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o | 26 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o |
| 27 | obj-y += kstrtox.o | 27 | obj-y += kstrtox.o |
| 28 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 28 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
| 29 | 29 | ||
| @@ -166,7 +166,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
| 166 | print_modules(); | 166 | print_modules(); |
| 167 | show_regs(regs); | 167 | show_regs(regs); |
| 168 | print_oops_end_marker(); | 168 | print_oops_end_marker(); |
| 169 | add_taint(BUG_GET_TAINT(bug)); | 169 | /* Just a warning, don't kill lockdep. */ |
| 170 | add_taint(BUG_GET_TAINT(bug), LOCKDEP_STILL_OK); | ||
| 170 | return BUG_TRAP_TYPE_WARN; | 171 | return BUG_TRAP_TYPE_WARN; |
| 171 | } | 172 | } |
| 172 | 173 | ||
diff --git a/lib/checksum.c b/lib/checksum.c index 12dceb27ff20..129775eb6de6 100644 --- a/lib/checksum.c +++ b/lib/checksum.c | |||
| @@ -102,6 +102,7 @@ out: | |||
| 102 | } | 102 | } |
| 103 | #endif | 103 | #endif |
| 104 | 104 | ||
| 105 | #ifndef ip_fast_csum | ||
| 105 | /* | 106 | /* |
| 106 | * This is a version of ip_compute_csum() optimized for IP headers, | 107 | * This is a version of ip_compute_csum() optimized for IP headers, |
| 107 | * which always checksum on 4 octet boundaries. | 108 | * which always checksum on 4 octet boundaries. |
| @@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
| 111 | return (__force __sum16)~do_csum(iph, ihl*4); | 112 | return (__force __sum16)~do_csum(iph, ihl*4); |
| 112 | } | 113 | } |
| 113 | EXPORT_SYMBOL(ip_fast_csum); | 114 | EXPORT_SYMBOL(ip_fast_csum); |
| 115 | #endif | ||
| 114 | 116 | ||
| 115 | /* | 117 | /* |
| 116 | * computes the checksum of a memory block at buff, length len, | 118 | * computes the checksum of a memory block at buff, length len, |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index d11808ca4bc4..37061ede8b81 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -109,11 +109,10 @@ static void fill_pool(void) | |||
| 109 | */ | 109 | */ |
| 110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | 110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
| 111 | { | 111 | { |
| 112 | struct hlist_node *node; | ||
| 113 | struct debug_obj *obj; | 112 | struct debug_obj *obj; |
| 114 | int cnt = 0; | 113 | int cnt = 0; |
| 115 | 114 | ||
| 116 | hlist_for_each_entry(obj, node, &b->list, node) { | 115 | hlist_for_each_entry(obj, &b->list, node) { |
| 117 | cnt++; | 116 | cnt++; |
| 118 | if (obj->object == addr) | 117 | if (obj->object == addr) |
| 119 | return obj; | 118 | return obj; |
| @@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj) | |||
| 213 | static void debug_objects_oom(void) | 212 | static void debug_objects_oom(void) |
| 214 | { | 213 | { |
| 215 | struct debug_bucket *db = obj_hash; | 214 | struct debug_bucket *db = obj_hash; |
| 216 | struct hlist_node *node, *tmp; | 215 | struct hlist_node *tmp; |
| 217 | HLIST_HEAD(freelist); | 216 | HLIST_HEAD(freelist); |
| 218 | struct debug_obj *obj; | 217 | struct debug_obj *obj; |
| 219 | unsigned long flags; | 218 | unsigned long flags; |
| @@ -227,7 +226,7 @@ static void debug_objects_oom(void) | |||
| 227 | raw_spin_unlock_irqrestore(&db->lock, flags); | 226 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 228 | 227 | ||
| 229 | /* Now free them */ | 228 | /* Now free them */ |
| 230 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 229 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
| 231 | hlist_del(&obj->node); | 230 | hlist_del(&obj->node); |
| 232 | free_object(obj); | 231 | free_object(obj); |
| 233 | } | 232 | } |
| @@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |||
| 658 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 657 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
| 659 | { | 658 | { |
| 660 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 659 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
| 661 | struct hlist_node *node, *tmp; | 660 | struct hlist_node *tmp; |
| 662 | HLIST_HEAD(freelist); | 661 | HLIST_HEAD(freelist); |
| 663 | struct debug_obj_descr *descr; | 662 | struct debug_obj_descr *descr; |
| 664 | enum debug_obj_state state; | 663 | enum debug_obj_state state; |
| @@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
| 678 | repeat: | 677 | repeat: |
| 679 | cnt = 0; | 678 | cnt = 0; |
| 680 | raw_spin_lock_irqsave(&db->lock, flags); | 679 | raw_spin_lock_irqsave(&db->lock, flags); |
| 681 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 680 | hlist_for_each_entry_safe(obj, tmp, &db->list, node) { |
| 682 | cnt++; | 681 | cnt++; |
| 683 | oaddr = (unsigned long) obj->object; | 682 | oaddr = (unsigned long) obj->object; |
| 684 | if (oaddr < saddr || oaddr >= eaddr) | 683 | if (oaddr < saddr || oaddr >= eaddr) |
| @@ -702,7 +701,7 @@ repeat: | |||
| 702 | raw_spin_unlock_irqrestore(&db->lock, flags); | 701 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 703 | 702 | ||
| 704 | /* Now free them */ | 703 | /* Now free them */ |
| 705 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 704 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
| 706 | hlist_del(&obj->node); | 705 | hlist_del(&obj->node); |
| 707 | free_object(obj); | 706 | free_object(obj); |
| 708 | } | 707 | } |
| @@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void) | |||
| 1013 | static int __init debug_objects_replace_static_objects(void) | 1012 | static int __init debug_objects_replace_static_objects(void) |
| 1014 | { | 1013 | { |
| 1015 | struct debug_bucket *db = obj_hash; | 1014 | struct debug_bucket *db = obj_hash; |
| 1016 | struct hlist_node *node, *tmp; | 1015 | struct hlist_node *tmp; |
| 1017 | struct debug_obj *obj, *new; | 1016 | struct debug_obj *obj, *new; |
| 1018 | HLIST_HEAD(objects); | 1017 | HLIST_HEAD(objects); |
| 1019 | int i, cnt = 0; | 1018 | int i, cnt = 0; |
| @@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
| 1033 | local_irq_disable(); | 1032 | local_irq_disable(); |
| 1034 | 1033 | ||
| 1035 | /* Remove the statically allocated objects from the pool */ | 1034 | /* Remove the statically allocated objects from the pool */ |
| 1036 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) | 1035 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
| 1037 | hlist_del(&obj->node); | 1036 | hlist_del(&obj->node); |
| 1038 | /* Move the allocated objects to the pool */ | 1037 | /* Move the allocated objects to the pool */ |
| 1039 | hlist_move_list(&objects, &obj_pool); | 1038 | hlist_move_list(&objects, &obj_pool); |
| @@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
| 1042 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 1041 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
| 1043 | hlist_move_list(&db->list, &objects); | 1042 | hlist_move_list(&db->list, &objects); |
| 1044 | 1043 | ||
| 1045 | hlist_for_each_entry(obj, node, &objects, node) { | 1044 | hlist_for_each_entry(obj, &objects, node) { |
| 1046 | new = hlist_entry(obj_pool.first, typeof(*obj), node); | 1045 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
| 1047 | hlist_del(&new->node); | 1046 | hlist_del(&new->node); |
| 1048 | /* copy object data */ | 1047 | /* copy object data */ |
| @@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
| 1057 | obj_pool_used); | 1056 | obj_pool_used); |
| 1058 | return 0; | 1057 | return 0; |
| 1059 | free: | 1058 | free: |
| 1060 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { | 1059 | hlist_for_each_entry_safe(obj, tmp, &objects, node) { |
| 1061 | hlist_del(&obj->node); | 1060 | hlist_del(&obj->node); |
| 1062 | kmem_cache_free(obj_cache, obj); | 1061 | kmem_cache_free(obj_cache, obj); |
| 1063 | } | 1062 | } |
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index 4531294fa62f..960183d4258f 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | */ | 31 | */ |
| 32 | 32 | ||
| 33 | #ifdef STATIC | 33 | #ifdef STATIC |
| 34 | #include "lzo/lzo1x_decompress.c" | 34 | #include "lzo/lzo1x_decompress_safe.c" |
| 35 | #else | 35 | #else |
| 36 | #include <linux/decompress/unlzo.h> | 36 | #include <linux/decompress/unlzo.h> |
| 37 | #endif | 37 | #endif |
diff --git a/lib/devres.c b/lib/devres.c index 88ad75952a76..823533138fa0 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
| @@ -227,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
| 227 | devm_ioport_map_match, (void *)addr)); | 227 | devm_ioport_map_match, (void *)addr)); |
| 228 | } | 228 | } |
| 229 | EXPORT_SYMBOL(devm_ioport_unmap); | 229 | EXPORT_SYMBOL(devm_ioport_unmap); |
| 230 | #endif /* CONFIG_HAS_IOPORT */ | ||
| 230 | 231 | ||
| 231 | #ifdef CONFIG_PCI | 232 | #ifdef CONFIG_PCI |
| 232 | /* | 233 | /* |
| @@ -432,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) | |||
| 432 | } | 433 | } |
| 433 | EXPORT_SYMBOL(pcim_iounmap_regions); | 434 | EXPORT_SYMBOL(pcim_iounmap_regions); |
| 434 | #endif /* CONFIG_PCI */ | 435 | #endif /* CONFIG_PCI */ |
| 435 | #endif /* CONFIG_HAS_IOPORT */ | ||
| @@ -35,10 +35,41 @@ | |||
| 35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
| 36 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
| 37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
| 38 | #include <linux/percpu.h> | ||
| 39 | #include <linux/hardirq.h> | ||
| 40 | |||
| 41 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) | ||
| 42 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) | ||
| 43 | |||
| 44 | /* Leave the possibility of an incomplete final layer */ | ||
| 45 | #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) | ||
| 46 | |||
| 47 | /* Number of id_layer structs to leave in free list */ | ||
| 48 | #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) | ||
| 38 | 49 | ||
| 39 | static struct kmem_cache *idr_layer_cache; | 50 | static struct kmem_cache *idr_layer_cache; |
| 51 | static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); | ||
| 52 | static DEFINE_PER_CPU(int, idr_preload_cnt); | ||
| 40 | static DEFINE_SPINLOCK(simple_ida_lock); | 53 | static DEFINE_SPINLOCK(simple_ida_lock); |
| 41 | 54 | ||
| 55 | /* the maximum ID which can be allocated given idr->layers */ | ||
| 56 | static int idr_max(int layers) | ||
| 57 | { | ||
| 58 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); | ||
| 59 | |||
| 60 | return (1 << bits) - 1; | ||
| 61 | } | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is | ||
| 65 | * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and | ||
| 66 | * so on. | ||
| 67 | */ | ||
| 68 | static int idr_layer_prefix_mask(int layer) | ||
| 69 | { | ||
| 70 | return ~idr_max(layer + 1); | ||
| 71 | } | ||
| 72 | |||
| 42 | static struct idr_layer *get_from_free_list(struct idr *idp) | 73 | static struct idr_layer *get_from_free_list(struct idr *idp) |
| 43 | { | 74 | { |
| 44 | struct idr_layer *p; | 75 | struct idr_layer *p; |
| @@ -54,6 +85,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp) | |||
| 54 | return(p); | 85 | return(p); |
| 55 | } | 86 | } |
| 56 | 87 | ||
| 88 | /** | ||
| 89 | * idr_layer_alloc - allocate a new idr_layer | ||
| 90 | * @gfp_mask: allocation mask | ||
| 91 | * @layer_idr: optional idr to allocate from | ||
| 92 | * | ||
| 93 | * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch | ||
| 94 | * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch | ||
| 95 | * an idr_layer from @idr->id_free. | ||
| 96 | * | ||
| 97 | * @layer_idr is to maintain backward compatibility with the old alloc | ||
| 98 | * interface - idr_pre_get() and idr_get_new*() - and will be removed | ||
| 99 | * together with per-pool preload buffer. | ||
| 100 | */ | ||
| 101 | static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | ||
| 102 | { | ||
| 103 | struct idr_layer *new; | ||
| 104 | |||
| 105 | /* this is the old path, bypass to get_from_free_list() */ | ||
| 106 | if (layer_idr) | ||
| 107 | return get_from_free_list(layer_idr); | ||
| 108 | |||
| 109 | /* try to allocate directly from kmem_cache */ | ||
| 110 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
| 111 | if (new) | ||
| 112 | return new; | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Try to fetch one from the per-cpu preload buffer if in process | ||
| 116 | * context. See idr_preload() for details. | ||
| 117 | */ | ||
| 118 | if (in_interrupt()) | ||
| 119 | return NULL; | ||
| 120 | |||
| 121 | preempt_disable(); | ||
| 122 | new = __this_cpu_read(idr_preload_head); | ||
| 123 | if (new) { | ||
| 124 | __this_cpu_write(idr_preload_head, new->ary[0]); | ||
| 125 | __this_cpu_dec(idr_preload_cnt); | ||
| 126 | new->ary[0] = NULL; | ||
| 127 | } | ||
| 128 | preempt_enable(); | ||
| 129 | return new; | ||
| 130 | } | ||
| 131 | |||
| 57 | static void idr_layer_rcu_free(struct rcu_head *head) | 132 | static void idr_layer_rcu_free(struct rcu_head *head) |
| 58 | { | 133 | { |
| 59 | struct idr_layer *layer; | 134 | struct idr_layer *layer; |
| @@ -62,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head) | |||
| 62 | kmem_cache_free(idr_layer_cache, layer); | 137 | kmem_cache_free(idr_layer_cache, layer); |
| 63 | } | 138 | } |
| 64 | 139 | ||
| 65 | static inline void free_layer(struct idr_layer *p) | 140 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
| 66 | { | 141 | { |
| 142 | if (idr->hint && idr->hint == p) | ||
| 143 | RCU_INIT_POINTER(idr->hint, NULL); | ||
| 67 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | 144 | call_rcu(&p->rcu_head, idr_layer_rcu_free); |
| 68 | } | 145 | } |
| 69 | 146 | ||
| @@ -92,18 +169,18 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
| 92 | struct idr_layer *p = pa[0]; | 169 | struct idr_layer *p = pa[0]; |
| 93 | int l = 0; | 170 | int l = 0; |
| 94 | 171 | ||
| 95 | __set_bit(id & IDR_MASK, &p->bitmap); | 172 | __set_bit(id & IDR_MASK, p->bitmap); |
| 96 | /* | 173 | /* |
| 97 | * If this layer is full mark the bit in the layer above to | 174 | * If this layer is full mark the bit in the layer above to |
| 98 | * show that this part of the radix tree is full. This may | 175 | * show that this part of the radix tree is full. This may |
| 99 | * complete the layer above and require walking up the radix | 176 | * complete the layer above and require walking up the radix |
| 100 | * tree. | 177 | * tree. |
| 101 | */ | 178 | */ |
| 102 | while (p->bitmap == IDR_FULL) { | 179 | while (bitmap_full(p->bitmap, IDR_SIZE)) { |
| 103 | if (!(p = pa[++l])) | 180 | if (!(p = pa[++l])) |
| 104 | break; | 181 | break; |
| 105 | id = id >> IDR_BITS; | 182 | id = id >> IDR_BITS; |
| 106 | __set_bit((id & IDR_MASK), &p->bitmap); | 183 | __set_bit((id & IDR_MASK), p->bitmap); |
| 107 | } | 184 | } |
| 108 | } | 185 | } |
| 109 | 186 | ||
| @@ -133,12 +210,29 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
| 133 | } | 210 | } |
| 134 | EXPORT_SYMBOL(idr_pre_get); | 211 | EXPORT_SYMBOL(idr_pre_get); |
| 135 | 212 | ||
| 136 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | 213 | /** |
| 214 | * sub_alloc - try to allocate an id without growing the tree depth | ||
| 215 | * @idp: idr handle | ||
| 216 | * @starting_id: id to start search at | ||
| 217 | * @id: pointer to the allocated handle | ||
| 218 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer | ||
| 219 | * @gfp_mask: allocation mask for idr_layer_alloc() | ||
| 220 | * @layer_idr: optional idr passed to idr_layer_alloc() | ||
| 221 | * | ||
| 222 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without | ||
| 223 | * growing its depth. Returns | ||
| 224 | * | ||
| 225 | * the allocated id >= 0 if successful, | ||
| 226 | * -EAGAIN if the tree needs to grow for allocation to succeed, | ||
| 227 | * -ENOSPC if the id space is exhausted, | ||
| 228 | * -ENOMEM if more idr_layers need to be allocated. | ||
| 229 | */ | ||
| 230 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, | ||
| 231 | gfp_t gfp_mask, struct idr *layer_idr) | ||
| 137 | { | 232 | { |
| 138 | int n, m, sh; | 233 | int n, m, sh; |
| 139 | struct idr_layer *p, *new; | 234 | struct idr_layer *p, *new; |
| 140 | int l, id, oid; | 235 | int l, id, oid; |
| 141 | unsigned long bm; | ||
| 142 | 236 | ||
| 143 | id = *starting_id; | 237 | id = *starting_id; |
| 144 | restart: | 238 | restart: |
| @@ -150,8 +244,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 150 | * We run around this while until we reach the leaf node... | 244 | * We run around this while until we reach the leaf node... |
| 151 | */ | 245 | */ |
| 152 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | 246 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
| 153 | bm = ~p->bitmap; | 247 | m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
| 154 | m = find_next_bit(&bm, IDR_SIZE, n); | ||
| 155 | if (m == IDR_SIZE) { | 248 | if (m == IDR_SIZE) { |
| 156 | /* no space available go back to previous layer. */ | 249 | /* no space available go back to previous layer. */ |
| 157 | l++; | 250 | l++; |
| @@ -161,7 +254,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 161 | /* if already at the top layer, we need to grow */ | 254 | /* if already at the top layer, we need to grow */ |
| 162 | if (id >= 1 << (idp->layers * IDR_BITS)) { | 255 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
| 163 | *starting_id = id; | 256 | *starting_id = id; |
| 164 | return IDR_NEED_TO_GROW; | 257 | return -EAGAIN; |
| 165 | } | 258 | } |
| 166 | p = pa[l]; | 259 | p = pa[l]; |
| 167 | BUG_ON(!p); | 260 | BUG_ON(!p); |
| @@ -180,17 +273,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 180 | id = ((id >> sh) ^ n ^ m) << sh; | 273 | id = ((id >> sh) ^ n ^ m) << sh; |
| 181 | } | 274 | } |
| 182 | if ((id >= MAX_IDR_BIT) || (id < 0)) | 275 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
| 183 | return IDR_NOMORE_SPACE; | 276 | return -ENOSPC; |
| 184 | if (l == 0) | 277 | if (l == 0) |
| 185 | break; | 278 | break; |
| 186 | /* | 279 | /* |
| 187 | * Create the layer below if it is missing. | 280 | * Create the layer below if it is missing. |
| 188 | */ | 281 | */ |
| 189 | if (!p->ary[m]) { | 282 | if (!p->ary[m]) { |
| 190 | new = get_from_free_list(idp); | 283 | new = idr_layer_alloc(gfp_mask, layer_idr); |
| 191 | if (!new) | 284 | if (!new) |
| 192 | return -1; | 285 | return -ENOMEM; |
| 193 | new->layer = l-1; | 286 | new->layer = l-1; |
| 287 | new->prefix = id & idr_layer_prefix_mask(new->layer); | ||
| 194 | rcu_assign_pointer(p->ary[m], new); | 288 | rcu_assign_pointer(p->ary[m], new); |
| 195 | p->count++; | 289 | p->count++; |
| 196 | } | 290 | } |
| @@ -203,7 +297,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 203 | } | 297 | } |
| 204 | 298 | ||
| 205 | static int idr_get_empty_slot(struct idr *idp, int starting_id, | 299 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
| 206 | struct idr_layer **pa) | 300 | struct idr_layer **pa, gfp_t gfp_mask, |
| 301 | struct idr *layer_idr) | ||
| 207 | { | 302 | { |
| 208 | struct idr_layer *p, *new; | 303 | struct idr_layer *p, *new; |
| 209 | int layers, v, id; | 304 | int layers, v, id; |
| @@ -214,8 +309,8 @@ build_up: | |||
| 214 | p = idp->top; | 309 | p = idp->top; |
| 215 | layers = idp->layers; | 310 | layers = idp->layers; |
| 216 | if (unlikely(!p)) { | 311 | if (unlikely(!p)) { |
| 217 | if (!(p = get_from_free_list(idp))) | 312 | if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
| 218 | return -1; | 313 | return -ENOMEM; |
| 219 | p->layer = 0; | 314 | p->layer = 0; |
| 220 | layers = 1; | 315 | layers = 1; |
| 221 | } | 316 | } |
| @@ -223,7 +318,7 @@ build_up: | |||
| 223 | * Add a new layer to the top of the tree if the requested | 318 | * Add a new layer to the top of the tree if the requested |
| 224 | * id is larger than the currently allocated space. | 319 | * id is larger than the currently allocated space. |
| 225 | */ | 320 | */ |
| 226 | while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 321 | while (id > idr_max(layers)) { |
| 227 | layers++; | 322 | layers++; |
| 228 | if (!p->count) { | 323 | if (!p->count) { |
| 229 | /* special case: if the tree is currently empty, | 324 | /* special case: if the tree is currently empty, |
| @@ -231,9 +326,10 @@ build_up: | |||
| 231 | * upwards. | 326 | * upwards. |
| 232 | */ | 327 | */ |
| 233 | p->layer++; | 328 | p->layer++; |
| 329 | WARN_ON_ONCE(p->prefix); | ||
| 234 | continue; | 330 | continue; |
| 235 | } | 331 | } |
| 236 | if (!(new = get_from_free_list(idp))) { | 332 | if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
| 237 | /* | 333 | /* |
| 238 | * The allocation failed. If we built part of | 334 | * The allocation failed. If we built part of |
| 239 | * the structure tear it down. | 335 | * the structure tear it down. |
| @@ -242,45 +338,42 @@ build_up: | |||
| 242 | for (new = p; p && p != idp->top; new = p) { | 338 | for (new = p; p && p != idp->top; new = p) { |
| 243 | p = p->ary[0]; | 339 | p = p->ary[0]; |
| 244 | new->ary[0] = NULL; | 340 | new->ary[0] = NULL; |
| 245 | new->bitmap = new->count = 0; | 341 | new->count = 0; |
| 342 | bitmap_clear(new->bitmap, 0, IDR_SIZE); | ||
| 246 | __move_to_free_list(idp, new); | 343 | __move_to_free_list(idp, new); |
| 247 | } | 344 | } |
| 248 | spin_unlock_irqrestore(&idp->lock, flags); | 345 | spin_unlock_irqrestore(&idp->lock, flags); |
| 249 | return -1; | 346 | return -ENOMEM; |
| 250 | } | 347 | } |
| 251 | new->ary[0] = p; | 348 | new->ary[0] = p; |
| 252 | new->count = 1; | 349 | new->count = 1; |
| 253 | new->layer = layers-1; | 350 | new->layer = layers-1; |
| 254 | if (p->bitmap == IDR_FULL) | 351 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
| 255 | __set_bit(0, &new->bitmap); | 352 | if (bitmap_full(p->bitmap, IDR_SIZE)) |
| 353 | __set_bit(0, new->bitmap); | ||
| 256 | p = new; | 354 | p = new; |
| 257 | } | 355 | } |
| 258 | rcu_assign_pointer(idp->top, p); | 356 | rcu_assign_pointer(idp->top, p); |
| 259 | idp->layers = layers; | 357 | idp->layers = layers; |
| 260 | v = sub_alloc(idp, &id, pa); | 358 | v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
| 261 | if (v == IDR_NEED_TO_GROW) | 359 | if (v == -EAGAIN) |
| 262 | goto build_up; | 360 | goto build_up; |
| 263 | return(v); | 361 | return(v); |
| 264 | } | 362 | } |
| 265 | 363 | ||
| 266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 364 | /* |
| 365 | * @id and @pa are from a successful allocation from idr_get_empty_slot(). | ||
| 366 | * Install the user pointer @ptr and mark the slot full. | ||
| 367 | */ | ||
| 368 | static void idr_fill_slot(struct idr *idr, void *ptr, int id, | ||
| 369 | struct idr_layer **pa) | ||
| 267 | { | 370 | { |
| 268 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 371 | /* update hint used for lookup, cleared from free_layer() */ |
| 269 | int id; | 372 | rcu_assign_pointer(idr->hint, pa[0]); |
| 270 | |||
| 271 | id = idr_get_empty_slot(idp, starting_id, pa); | ||
| 272 | if (id >= 0) { | ||
| 273 | /* | ||
| 274 | * Successfully found an empty slot. Install the user | ||
| 275 | * pointer and mark the slot full. | ||
| 276 | */ | ||
| 277 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], | ||
| 278 | (struct idr_layer *)ptr); | ||
| 279 | pa[0]->count++; | ||
| 280 | idr_mark_full(pa, id); | ||
| 281 | } | ||
| 282 | 373 | ||
| 283 | return id; | 374 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
| 375 | pa[0]->count++; | ||
| 376 | idr_mark_full(pa, id); | ||
| 284 | } | 377 | } |
| 285 | 378 | ||
| 286 | /** | 379 | /** |
| @@ -303,49 +396,124 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
| 303 | */ | 396 | */ |
| 304 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 397 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
| 305 | { | 398 | { |
| 399 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | ||
| 306 | int rv; | 400 | int rv; |
| 307 | 401 | ||
| 308 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 402 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
| 309 | /* | ||
| 310 | * This is a cheap hack until the IDR code can be fixed to | ||
| 311 | * return proper error values. | ||
| 312 | */ | ||
| 313 | if (rv < 0) | 403 | if (rv < 0) |
| 314 | return _idr_rc_to_errno(rv); | 404 | return rv == -ENOMEM ? -EAGAIN : rv; |
| 405 | |||
| 406 | idr_fill_slot(idp, ptr, rv, pa); | ||
| 315 | *id = rv; | 407 | *id = rv; |
| 316 | return 0; | 408 | return 0; |
| 317 | } | 409 | } |
| 318 | EXPORT_SYMBOL(idr_get_new_above); | 410 | EXPORT_SYMBOL(idr_get_new_above); |
| 319 | 411 | ||
| 320 | /** | 412 | /** |
| 321 | * idr_get_new - allocate new idr entry | 413 | * idr_preload - preload for idr_alloc() |
| 322 | * @idp: idr handle | 414 | * @gfp_mask: allocation mask to use for preloading |
| 323 | * @ptr: pointer you want associated with the id | ||
| 324 | * @id: pointer to the allocated handle | ||
| 325 | * | 415 | * |
| 326 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | 416 | * Preload per-cpu layer buffer for idr_alloc(). Can only be used from |
| 327 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill | 417 | * process context and each idr_preload() invocation should be matched with |
| 328 | * IDR's preallocation and then retry the idr_get_new_above() call. | 418 | * idr_preload_end(). Note that preemption is disabled while preloaded. |
| 329 | * | 419 | * |
| 330 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | 420 | * The first idr_alloc() in the preloaded section can be treated as if it |
| 421 | * were invoked with @gfp_mask used for preloading. This allows using more | ||
| 422 | * permissive allocation masks for idrs protected by spinlocks. | ||
| 423 | * | ||
| 424 | * For example, if idr_alloc() below fails, the failure can be treated as | ||
| 425 | * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. | ||
| 426 | * | ||
| 427 | * idr_preload(GFP_KERNEL); | ||
| 428 | * spin_lock(lock); | ||
| 331 | * | 429 | * |
| 332 | * @id returns a value in the range %0 ... %0x7fffffff | 430 | * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); |
| 431 | * | ||
| 432 | * spin_unlock(lock); | ||
| 433 | * idr_preload_end(); | ||
| 434 | * if (id < 0) | ||
| 435 | * error; | ||
| 333 | */ | 436 | */ |
| 334 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 437 | void idr_preload(gfp_t gfp_mask) |
| 335 | { | 438 | { |
| 336 | int rv; | 439 | /* |
| 440 | * Consuming preload buffer from non-process context breaks preload | ||
| 441 | * allocation guarantee. Disallow usage from those contexts. | ||
| 442 | */ | ||
| 443 | WARN_ON_ONCE(in_interrupt()); | ||
| 444 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
| 445 | |||
| 446 | preempt_disable(); | ||
| 337 | 447 | ||
| 338 | rv = idr_get_new_above_int(idp, ptr, 0); | ||
| 339 | /* | 448 | /* |
| 340 | * This is a cheap hack until the IDR code can be fixed to | 449 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and |
| 341 | * return proper error values. | 450 | * return value from idr_alloc() needs to be checked for failure |
| 451 | * anyway. Silently give up if allocation fails. The caller can | ||
| 452 | * treat failures from idr_alloc() as if idr_alloc() were called | ||
| 453 | * with @gfp_mask which should be enough. | ||
| 342 | */ | 454 | */ |
| 343 | if (rv < 0) | 455 | while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { |
| 344 | return _idr_rc_to_errno(rv); | 456 | struct idr_layer *new; |
| 345 | *id = rv; | 457 | |
| 346 | return 0; | 458 | preempt_enable(); |
| 459 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
| 460 | preempt_disable(); | ||
| 461 | if (!new) | ||
| 462 | break; | ||
| 463 | |||
| 464 | /* link the new one to per-cpu preload list */ | ||
| 465 | new->ary[0] = __this_cpu_read(idr_preload_head); | ||
| 466 | __this_cpu_write(idr_preload_head, new); | ||
| 467 | __this_cpu_inc(idr_preload_cnt); | ||
| 468 | } | ||
| 347 | } | 469 | } |
| 348 | EXPORT_SYMBOL(idr_get_new); | 470 | EXPORT_SYMBOL(idr_preload); |
| 471 | |||
| 472 | /** | ||
| 473 | * idr_alloc - allocate new idr entry | ||
| 474 | * @idr: the (initialized) idr | ||
| 475 | * @ptr: pointer to be associated with the new id | ||
| 476 | * @start: the minimum id (inclusive) | ||
| 477 | * @end: the maximum id (exclusive, <= 0 for max) | ||
| 478 | * @gfp_mask: memory allocation flags | ||
| 479 | * | ||
| 480 | * Allocate an id in [start, end) and associate it with @ptr. If no ID is | ||
| 481 | * available in the specified range, returns -ENOSPC. On memory allocation | ||
| 482 | * failure, returns -ENOMEM. | ||
| 483 | * | ||
| 484 | * Note that @end is treated as max when <= 0. This is to always allow | ||
| 485 | * using @start + N as @end as long as N is inside integer range. | ||
| 486 | * | ||
| 487 | * The user is responsible for exclusively synchronizing all operations | ||
| 488 | * which may modify @idr. However, read-only accesses such as idr_find() | ||
| 489 | * or iteration can be performed under RCU read lock provided the user | ||
| 490 | * destroys @ptr in RCU-safe way after removal from idr. | ||
| 491 | */ | ||
| 492 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) | ||
| 493 | { | ||
| 494 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ | ||
| 495 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | ||
| 496 | int id; | ||
| 497 | |||
| 498 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
| 499 | |||
| 500 | /* sanity checks */ | ||
| 501 | if (WARN_ON_ONCE(start < 0)) | ||
| 502 | return -EINVAL; | ||
| 503 | if (unlikely(max < start)) | ||
| 504 | return -ENOSPC; | ||
| 505 | |||
| 506 | /* allocate id */ | ||
| 507 | id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); | ||
| 508 | if (unlikely(id < 0)) | ||
| 509 | return id; | ||
| 510 | if (unlikely(id > max)) | ||
| 511 | return -ENOSPC; | ||
| 512 | |||
| 513 | idr_fill_slot(idr, ptr, id, pa); | ||
| 514 | return id; | ||
| 515 | } | ||
| 516 | EXPORT_SYMBOL_GPL(idr_alloc); | ||
| 349 | 517 | ||
| 350 | static void idr_remove_warning(int id) | 518 | static void idr_remove_warning(int id) |
| 351 | { | 519 | { |
| @@ -357,7 +525,7 @@ static void idr_remove_warning(int id) | |||
| 357 | static void sub_remove(struct idr *idp, int shift, int id) | 525 | static void sub_remove(struct idr *idp, int shift, int id) |
| 358 | { | 526 | { |
| 359 | struct idr_layer *p = idp->top; | 527 | struct idr_layer *p = idp->top; |
| 360 | struct idr_layer **pa[MAX_IDR_LEVEL]; | 528 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
| 361 | struct idr_layer ***paa = &pa[0]; | 529 | struct idr_layer ***paa = &pa[0]; |
| 362 | struct idr_layer *to_free; | 530 | struct idr_layer *to_free; |
| 363 | int n; | 531 | int n; |
| @@ -367,26 +535,26 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
| 367 | 535 | ||
| 368 | while ((shift > 0) && p) { | 536 | while ((shift > 0) && p) { |
| 369 | n = (id >> shift) & IDR_MASK; | 537 | n = (id >> shift) & IDR_MASK; |
| 370 | __clear_bit(n, &p->bitmap); | 538 | __clear_bit(n, p->bitmap); |
| 371 | *++paa = &p->ary[n]; | 539 | *++paa = &p->ary[n]; |
| 372 | p = p->ary[n]; | 540 | p = p->ary[n]; |
| 373 | shift -= IDR_BITS; | 541 | shift -= IDR_BITS; |
| 374 | } | 542 | } |
| 375 | n = id & IDR_MASK; | 543 | n = id & IDR_MASK; |
| 376 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 544 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
| 377 | __clear_bit(n, &p->bitmap); | 545 | __clear_bit(n, p->bitmap); |
| 378 | rcu_assign_pointer(p->ary[n], NULL); | 546 | rcu_assign_pointer(p->ary[n], NULL); |
| 379 | to_free = NULL; | 547 | to_free = NULL; |
| 380 | while(*paa && ! --((**paa)->count)){ | 548 | while(*paa && ! --((**paa)->count)){ |
| 381 | if (to_free) | 549 | if (to_free) |
| 382 | free_layer(to_free); | 550 | free_layer(idp, to_free); |
| 383 | to_free = **paa; | 551 | to_free = **paa; |
| 384 | **paa-- = NULL; | 552 | **paa-- = NULL; |
| 385 | } | 553 | } |
| 386 | if (!*paa) | 554 | if (!*paa) |
| 387 | idp->layers = 0; | 555 | idp->layers = 0; |
| 388 | if (to_free) | 556 | if (to_free) |
| 389 | free_layer(to_free); | 557 | free_layer(idp, to_free); |
| 390 | } else | 558 | } else |
| 391 | idr_remove_warning(id); | 559 | idr_remove_warning(id); |
| 392 | } | 560 | } |
| @@ -401,8 +569,8 @@ void idr_remove(struct idr *idp, int id) | |||
| 401 | struct idr_layer *p; | 569 | struct idr_layer *p; |
| 402 | struct idr_layer *to_free; | 570 | struct idr_layer *to_free; |
| 403 | 571 | ||
| 404 | /* Mask off upper bits we don't use for the search. */ | 572 | if (id < 0) |
| 405 | id &= MAX_IDR_MASK; | 573 | return; |
| 406 | 574 | ||
| 407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 575 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
| 408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 576 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
| @@ -417,8 +585,9 @@ void idr_remove(struct idr *idp, int id) | |||
| 417 | p = idp->top->ary[0]; | 585 | p = idp->top->ary[0]; |
| 418 | rcu_assign_pointer(idp->top, p); | 586 | rcu_assign_pointer(idp->top, p); |
| 419 | --idp->layers; | 587 | --idp->layers; |
| 420 | to_free->bitmap = to_free->count = 0; | 588 | to_free->count = 0; |
| 421 | free_layer(to_free); | 589 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
| 590 | free_layer(idp, to_free); | ||
| 422 | } | 591 | } |
| 423 | while (idp->id_free_cnt >= MAX_IDR_FREE) { | 592 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
| 424 | p = get_from_free_list(idp); | 593 | p = get_from_free_list(idp); |
| @@ -433,34 +602,21 @@ void idr_remove(struct idr *idp, int id) | |||
| 433 | } | 602 | } |
| 434 | EXPORT_SYMBOL(idr_remove); | 603 | EXPORT_SYMBOL(idr_remove); |
| 435 | 604 | ||
| 436 | /** | 605 | void __idr_remove_all(struct idr *idp) |
| 437 | * idr_remove_all - remove all ids from the given idr tree | ||
| 438 | * @idp: idr handle | ||
| 439 | * | ||
| 440 | * idr_destroy() only frees up unused, cached idp_layers, but this | ||
| 441 | * function will remove all id mappings and leave all idp_layers | ||
| 442 | * unused. | ||
| 443 | * | ||
| 444 | * A typical clean-up sequence for objects stored in an idr tree will | ||
| 445 | * use idr_for_each() to free all objects, if necessay, then | ||
| 446 | * idr_remove_all() to remove all ids, and idr_destroy() to free | ||
| 447 | * up the cached idr_layers. | ||
| 448 | */ | ||
| 449 | void idr_remove_all(struct idr *idp) | ||
| 450 | { | 606 | { |
| 451 | int n, id, max; | 607 | int n, id, max; |
| 452 | int bt_mask; | 608 | int bt_mask; |
| 453 | struct idr_layer *p; | 609 | struct idr_layer *p; |
| 454 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 610 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
| 455 | struct idr_layer **paa = &pa[0]; | 611 | struct idr_layer **paa = &pa[0]; |
| 456 | 612 | ||
| 457 | n = idp->layers * IDR_BITS; | 613 | n = idp->layers * IDR_BITS; |
| 458 | p = idp->top; | 614 | p = idp->top; |
| 459 | rcu_assign_pointer(idp->top, NULL); | 615 | rcu_assign_pointer(idp->top, NULL); |
| 460 | max = 1 << n; | 616 | max = idr_max(idp->layers); |
| 461 | 617 | ||
| 462 | id = 0; | 618 | id = 0; |
| 463 | while (id < max) { | 619 | while (id >= 0 && id <= max) { |
| 464 | while (n > IDR_BITS && p) { | 620 | while (n > IDR_BITS && p) { |
| 465 | n -= IDR_BITS; | 621 | n -= IDR_BITS; |
| 466 | *paa++ = p; | 622 | *paa++ = p; |
| @@ -472,21 +628,32 @@ void idr_remove_all(struct idr *idp) | |||
| 472 | /* Get the highest bit that the above add changed from 0->1. */ | 628 | /* Get the highest bit that the above add changed from 0->1. */ |
| 473 | while (n < fls(id ^ bt_mask)) { | 629 | while (n < fls(id ^ bt_mask)) { |
| 474 | if (p) | 630 | if (p) |
| 475 | free_layer(p); | 631 | free_layer(idp, p); |
| 476 | n += IDR_BITS; | 632 | n += IDR_BITS; |
| 477 | p = *--paa; | 633 | p = *--paa; |
| 478 | } | 634 | } |
| 479 | } | 635 | } |
| 480 | idp->layers = 0; | 636 | idp->layers = 0; |
| 481 | } | 637 | } |
| 482 | EXPORT_SYMBOL(idr_remove_all); | 638 | EXPORT_SYMBOL(__idr_remove_all); |
| 483 | 639 | ||
| 484 | /** | 640 | /** |
| 485 | * idr_destroy - release all cached layers within an idr tree | 641 | * idr_destroy - release all cached layers within an idr tree |
| 486 | * @idp: idr handle | 642 | * @idp: idr handle |
| 643 | * | ||
| 644 | * Free all id mappings and all idp_layers. After this function, @idp is | ||
| 645 | * completely unused and can be freed / recycled. The caller is | ||
| 646 | * responsible for ensuring that no one else accesses @idp during or after | ||
| 647 | * idr_destroy(). | ||
| 648 | * | ||
| 649 | * A typical clean-up sequence for objects stored in an idr tree will use | ||
| 650 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to | ||
| 651 | * free up the id mappings and cached idr_layers. | ||
| 487 | */ | 652 | */ |
| 488 | void idr_destroy(struct idr *idp) | 653 | void idr_destroy(struct idr *idp) |
| 489 | { | 654 | { |
| 655 | __idr_remove_all(idp); | ||
| 656 | |||
| 490 | while (idp->id_free_cnt) { | 657 | while (idp->id_free_cnt) { |
| 491 | struct idr_layer *p = get_from_free_list(idp); | 658 | struct idr_layer *p = get_from_free_list(idp); |
| 492 | kmem_cache_free(idr_layer_cache, p); | 659 | kmem_cache_free(idr_layer_cache, p); |
| @@ -494,32 +661,20 @@ void idr_destroy(struct idr *idp) | |||
| 494 | } | 661 | } |
| 495 | EXPORT_SYMBOL(idr_destroy); | 662 | EXPORT_SYMBOL(idr_destroy); |
| 496 | 663 | ||
| 497 | /** | 664 | void *idr_find_slowpath(struct idr *idp, int id) |
| 498 | * idr_find - return pointer for given id | ||
| 499 | * @idp: idr handle | ||
| 500 | * @id: lookup key | ||
| 501 | * | ||
| 502 | * Return the pointer given the id it has been registered with. A %NULL | ||
| 503 | * return indicates that @id is not valid or you passed %NULL in | ||
| 504 | * idr_get_new(). | ||
| 505 | * | ||
| 506 | * This function can be called under rcu_read_lock(), given that the leaf | ||
| 507 | * pointers lifetimes are correctly managed. | ||
| 508 | */ | ||
| 509 | void *idr_find(struct idr *idp, int id) | ||
| 510 | { | 665 | { |
| 511 | int n; | 666 | int n; |
| 512 | struct idr_layer *p; | 667 | struct idr_layer *p; |
| 513 | 668 | ||
| 669 | if (id < 0) | ||
| 670 | return NULL; | ||
| 671 | |||
| 514 | p = rcu_dereference_raw(idp->top); | 672 | p = rcu_dereference_raw(idp->top); |
| 515 | if (!p) | 673 | if (!p) |
| 516 | return NULL; | 674 | return NULL; |
| 517 | n = (p->layer+1) * IDR_BITS; | 675 | n = (p->layer+1) * IDR_BITS; |
| 518 | 676 | ||
| 519 | /* Mask off upper bits we don't use for the search. */ | 677 | if (id > idr_max(p->layer + 1)) |
| 520 | id &= MAX_IDR_MASK; | ||
| 521 | |||
| 522 | if (id >= (1 << n)) | ||
| 523 | return NULL; | 678 | return NULL; |
| 524 | BUG_ON(n == 0); | 679 | BUG_ON(n == 0); |
| 525 | 680 | ||
| @@ -530,7 +685,7 @@ void *idr_find(struct idr *idp, int id) | |||
| 530 | } | 685 | } |
| 531 | return((void *)p); | 686 | return((void *)p); |
| 532 | } | 687 | } |
| 533 | EXPORT_SYMBOL(idr_find); | 688 | EXPORT_SYMBOL(idr_find_slowpath); |
| 534 | 689 | ||
| 535 | /** | 690 | /** |
| 536 | * idr_for_each - iterate through all stored pointers | 691 | * idr_for_each - iterate through all stored pointers |
| @@ -555,15 +710,15 @@ int idr_for_each(struct idr *idp, | |||
| 555 | { | 710 | { |
| 556 | int n, id, max, error = 0; | 711 | int n, id, max, error = 0; |
| 557 | struct idr_layer *p; | 712 | struct idr_layer *p; |
| 558 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 713 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
| 559 | struct idr_layer **paa = &pa[0]; | 714 | struct idr_layer **paa = &pa[0]; |
| 560 | 715 | ||
| 561 | n = idp->layers * IDR_BITS; | 716 | n = idp->layers * IDR_BITS; |
| 562 | p = rcu_dereference_raw(idp->top); | 717 | p = rcu_dereference_raw(idp->top); |
| 563 | max = 1 << n; | 718 | max = idr_max(idp->layers); |
| 564 | 719 | ||
| 565 | id = 0; | 720 | id = 0; |
| 566 | while (id < max) { | 721 | while (id >= 0 && id <= max) { |
| 567 | while (n > 0 && p) { | 722 | while (n > 0 && p) { |
| 568 | n -= IDR_BITS; | 723 | n -= IDR_BITS; |
| 569 | *paa++ = p; | 724 | *paa++ = p; |
| @@ -601,7 +756,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
| 601 | */ | 756 | */ |
| 602 | void *idr_get_next(struct idr *idp, int *nextidp) | 757 | void *idr_get_next(struct idr *idp, int *nextidp) |
| 603 | { | 758 | { |
| 604 | struct idr_layer *p, *pa[MAX_IDR_LEVEL]; | 759 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
| 605 | struct idr_layer **paa = &pa[0]; | 760 | struct idr_layer **paa = &pa[0]; |
| 606 | int id = *nextidp; | 761 | int id = *nextidp; |
| 607 | int n, max; | 762 | int n, max; |
| @@ -611,9 +766,9 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
| 611 | if (!p) | 766 | if (!p) |
| 612 | return NULL; | 767 | return NULL; |
| 613 | n = (p->layer + 1) * IDR_BITS; | 768 | n = (p->layer + 1) * IDR_BITS; |
| 614 | max = 1 << n; | 769 | max = idr_max(p->layer + 1); |
| 615 | 770 | ||
| 616 | while (id < max) { | 771 | while (id >= 0 && id <= max) { |
| 617 | while (n > 0 && p) { | 772 | while (n > 0 && p) { |
| 618 | n -= IDR_BITS; | 773 | n -= IDR_BITS; |
| 619 | *paa++ = p; | 774 | *paa++ = p; |
| @@ -625,7 +780,14 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
| 625 | return p; | 780 | return p; |
| 626 | } | 781 | } |
| 627 | 782 | ||
| 628 | id += 1 << n; | 783 | /* |
| 784 | * Proceed to the next layer at the current level. Unlike | ||
| 785 | * idr_for_each(), @id isn't guaranteed to be aligned to | ||
| 786 | * layer boundary at this point and adding 1 << n may | ||
| 787 | * incorrectly skip IDs. Make sure we jump to the | ||
| 788 | * beginning of the next layer using round_up(). | ||
| 789 | */ | ||
| 790 | id = round_up(id + 1, 1 << n); | ||
| 629 | while (n < fls(id)) { | 791 | while (n < fls(id)) { |
| 630 | n += IDR_BITS; | 792 | n += IDR_BITS; |
| 631 | p = *--paa; | 793 | p = *--paa; |
| @@ -653,14 +815,15 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
| 653 | int n; | 815 | int n; |
| 654 | struct idr_layer *p, *old_p; | 816 | struct idr_layer *p, *old_p; |
| 655 | 817 | ||
| 818 | if (id < 0) | ||
| 819 | return ERR_PTR(-EINVAL); | ||
| 820 | |||
| 656 | p = idp->top; | 821 | p = idp->top; |
| 657 | if (!p) | 822 | if (!p) |
| 658 | return ERR_PTR(-EINVAL); | 823 | return ERR_PTR(-EINVAL); |
| 659 | 824 | ||
| 660 | n = (p->layer+1) * IDR_BITS; | 825 | n = (p->layer+1) * IDR_BITS; |
| 661 | 826 | ||
| 662 | id &= MAX_IDR_MASK; | ||
| 663 | |||
| 664 | if (id >= (1 << n)) | 827 | if (id >= (1 << n)) |
| 665 | return ERR_PTR(-EINVAL); | 828 | return ERR_PTR(-EINVAL); |
| 666 | 829 | ||
| @@ -671,7 +834,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
| 671 | } | 834 | } |
| 672 | 835 | ||
| 673 | n = id & IDR_MASK; | 836 | n = id & IDR_MASK; |
| 674 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | 837 | if (unlikely(p == NULL || !test_bit(n, p->bitmap))) |
| 675 | return ERR_PTR(-ENOENT); | 838 | return ERR_PTR(-ENOENT); |
| 676 | 839 | ||
| 677 | old_p = p->ary[n]; | 840 | old_p = p->ary[n]; |
| @@ -780,7 +943,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
| 780 | */ | 943 | */ |
| 781 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 944 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
| 782 | { | 945 | { |
| 783 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 946 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
| 784 | struct ida_bitmap *bitmap; | 947 | struct ida_bitmap *bitmap; |
| 785 | unsigned long flags; | 948 | unsigned long flags; |
| 786 | int idr_id = starting_id / IDA_BITMAP_BITS; | 949 | int idr_id = starting_id / IDA_BITMAP_BITS; |
| @@ -789,9 +952,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
| 789 | 952 | ||
| 790 | restart: | 953 | restart: |
| 791 | /* get vacant slot */ | 954 | /* get vacant slot */ |
| 792 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 955 | t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
| 793 | if (t < 0) | 956 | if (t < 0) |
| 794 | return _idr_rc_to_errno(t); | 957 | return t == -ENOMEM ? -EAGAIN : t; |
| 795 | 958 | ||
| 796 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) | 959 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
| 797 | return -ENOSPC; | 960 | return -ENOSPC; |
| @@ -852,25 +1015,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
| 852 | EXPORT_SYMBOL(ida_get_new_above); | 1015 | EXPORT_SYMBOL(ida_get_new_above); |
| 853 | 1016 | ||
| 854 | /** | 1017 | /** |
| 855 | * ida_get_new - allocate new ID | ||
| 856 | * @ida: idr handle | ||
| 857 | * @p_id: pointer to the allocated handle | ||
| 858 | * | ||
| 859 | * Allocate new ID. It should be called with any required locks. | ||
| 860 | * | ||
| 861 | * If memory is required, it will return %-EAGAIN, you should unlock | ||
| 862 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
| 863 | * return %-ENOSPC. | ||
| 864 | * | ||
| 865 | * @p_id returns a value in the range %0 ... %0x7fffffff. | ||
| 866 | */ | ||
| 867 | int ida_get_new(struct ida *ida, int *p_id) | ||
| 868 | { | ||
| 869 | return ida_get_new_above(ida, 0, p_id); | ||
| 870 | } | ||
| 871 | EXPORT_SYMBOL(ida_get_new); | ||
| 872 | |||
| 873 | /** | ||
| 874 | * ida_remove - remove the given ID | 1018 | * ida_remove - remove the given ID |
| 875 | * @ida: ida handle | 1019 | * @ida: ida handle |
| 876 | * @id: ID to free | 1020 | * @id: ID to free |
| @@ -887,7 +1031,7 @@ void ida_remove(struct ida *ida, int id) | |||
| 887 | /* clear full bits while looking up the leaf idr_layer */ | 1031 | /* clear full bits while looking up the leaf idr_layer */ |
| 888 | while ((shift > 0) && p) { | 1032 | while ((shift > 0) && p) { |
| 889 | n = (idr_id >> shift) & IDR_MASK; | 1033 | n = (idr_id >> shift) & IDR_MASK; |
| 890 | __clear_bit(n, &p->bitmap); | 1034 | __clear_bit(n, p->bitmap); |
| 891 | p = p->ary[n]; | 1035 | p = p->ary[n]; |
| 892 | shift -= IDR_BITS; | 1036 | shift -= IDR_BITS; |
| 893 | } | 1037 | } |
| @@ -896,7 +1040,7 @@ void ida_remove(struct ida *ida, int id) | |||
| 896 | goto err; | 1040 | goto err; |
| 897 | 1041 | ||
| 898 | n = idr_id & IDR_MASK; | 1042 | n = idr_id & IDR_MASK; |
| 899 | __clear_bit(n, &p->bitmap); | 1043 | __clear_bit(n, p->bitmap); |
| 900 | 1044 | ||
| 901 | bitmap = (void *)p->ary[n]; | 1045 | bitmap = (void *)p->ary[n]; |
| 902 | if (!test_bit(offset, bitmap->bitmap)) | 1046 | if (!test_bit(offset, bitmap->bitmap)) |
| @@ -905,7 +1049,7 @@ void ida_remove(struct ida *ida, int id) | |||
| 905 | /* update bitmap and remove it if empty */ | 1049 | /* update bitmap and remove it if empty */ |
| 906 | __clear_bit(offset, bitmap->bitmap); | 1050 | __clear_bit(offset, bitmap->bitmap); |
| 907 | if (--bitmap->nr_busy == 0) { | 1051 | if (--bitmap->nr_busy == 0) { |
| 908 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | 1052 | __set_bit(n, p->bitmap); /* to please idr_remove() */ |
| 909 | idr_remove(&ida->idr, idr_id); | 1053 | idr_remove(&ida->idr, idr_id); |
| 910 | free_bitmap(ida, bitmap); | 1054 | free_bitmap(ida, bitmap); |
| 911 | } | 1055 | } |
diff --git a/lib/kfifo.c b/lib/kfifo.c new file mode 100644 index 000000000000..7b7f83027b7b --- /dev/null +++ b/lib/kfifo.c | |||
| @@ -0,0 +1,607 @@ | |||
| 1 | /* | ||
| 2 | * A generic kernel FIFO implementation | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 19 | * | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/export.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/err.h> | ||
| 26 | #include <linux/log2.h> | ||
| 27 | #include <linux/uaccess.h> | ||
| 28 | #include <linux/kfifo.h> | ||
| 29 | |||
| 30 | /* | ||
| 31 | * internal helper to calculate the unused elements in a fifo | ||
| 32 | */ | ||
| 33 | static inline unsigned int kfifo_unused(struct __kfifo *fifo) | ||
| 34 | { | ||
| 35 | return (fifo->mask + 1) - (fifo->in - fifo->out); | ||
| 36 | } | ||
| 37 | |||
| 38 | int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, | ||
| 39 | size_t esize, gfp_t gfp_mask) | ||
| 40 | { | ||
| 41 | /* | ||
| 42 | * round down to the next power of 2, since our 'let the indices | ||
| 43 | * wrap' technique works only in this case. | ||
| 44 | */ | ||
| 45 | size = roundup_pow_of_two(size); | ||
| 46 | |||
| 47 | fifo->in = 0; | ||
| 48 | fifo->out = 0; | ||
| 49 | fifo->esize = esize; | ||
| 50 | |||
| 51 | if (size < 2) { | ||
| 52 | fifo->data = NULL; | ||
| 53 | fifo->mask = 0; | ||
| 54 | return -EINVAL; | ||
| 55 | } | ||
| 56 | |||
| 57 | fifo->data = kmalloc(size * esize, gfp_mask); | ||
| 58 | |||
| 59 | if (!fifo->data) { | ||
| 60 | fifo->mask = 0; | ||
| 61 | return -ENOMEM; | ||
| 62 | } | ||
| 63 | fifo->mask = size - 1; | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | EXPORT_SYMBOL(__kfifo_alloc); | ||
| 68 | |||
| 69 | void __kfifo_free(struct __kfifo *fifo) | ||
| 70 | { | ||
| 71 | kfree(fifo->data); | ||
| 72 | fifo->in = 0; | ||
| 73 | fifo->out = 0; | ||
| 74 | fifo->esize = 0; | ||
| 75 | fifo->data = NULL; | ||
| 76 | fifo->mask = 0; | ||
| 77 | } | ||
| 78 | EXPORT_SYMBOL(__kfifo_free); | ||
| 79 | |||
| 80 | int __kfifo_init(struct __kfifo *fifo, void *buffer, | ||
| 81 | unsigned int size, size_t esize) | ||
| 82 | { | ||
| 83 | size /= esize; | ||
| 84 | |||
| 85 | size = roundup_pow_of_two(size); | ||
| 86 | |||
| 87 | fifo->in = 0; | ||
| 88 | fifo->out = 0; | ||
| 89 | fifo->esize = esize; | ||
| 90 | fifo->data = buffer; | ||
| 91 | |||
| 92 | if (size < 2) { | ||
| 93 | fifo->mask = 0; | ||
| 94 | return -EINVAL; | ||
| 95 | } | ||
| 96 | fifo->mask = size - 1; | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | EXPORT_SYMBOL(__kfifo_init); | ||
| 101 | |||
| 102 | static void kfifo_copy_in(struct __kfifo *fifo, const void *src, | ||
| 103 | unsigned int len, unsigned int off) | ||
| 104 | { | ||
| 105 | unsigned int size = fifo->mask + 1; | ||
| 106 | unsigned int esize = fifo->esize; | ||
| 107 | unsigned int l; | ||
| 108 | |||
| 109 | off &= fifo->mask; | ||
| 110 | if (esize != 1) { | ||
| 111 | off *= esize; | ||
| 112 | size *= esize; | ||
| 113 | len *= esize; | ||
| 114 | } | ||
| 115 | l = min(len, size - off); | ||
| 116 | |||
| 117 | memcpy(fifo->data + off, src, l); | ||
| 118 | memcpy(fifo->data, src + l, len - l); | ||
| 119 | /* | ||
| 120 | * make sure that the data in the fifo is up to date before | ||
| 121 | * incrementing the fifo->in index counter | ||
| 122 | */ | ||
| 123 | smp_wmb(); | ||
| 124 | } | ||
| 125 | |||
| 126 | unsigned int __kfifo_in(struct __kfifo *fifo, | ||
| 127 | const void *buf, unsigned int len) | ||
| 128 | { | ||
| 129 | unsigned int l; | ||
| 130 | |||
| 131 | l = kfifo_unused(fifo); | ||
| 132 | if (len > l) | ||
| 133 | len = l; | ||
| 134 | |||
| 135 | kfifo_copy_in(fifo, buf, len, fifo->in); | ||
| 136 | fifo->in += len; | ||
| 137 | return len; | ||
| 138 | } | ||
| 139 | EXPORT_SYMBOL(__kfifo_in); | ||
| 140 | |||
| 141 | static void kfifo_copy_out(struct __kfifo *fifo, void *dst, | ||
| 142 | unsigned int len, unsigned int off) | ||
| 143 | { | ||
| 144 | unsigned int size = fifo->mask + 1; | ||
| 145 | unsigned int esize = fifo->esize; | ||
| 146 | unsigned int l; | ||
| 147 | |||
| 148 | off &= fifo->mask; | ||
| 149 | if (esize != 1) { | ||
| 150 | off *= esize; | ||
| 151 | size *= esize; | ||
| 152 | len *= esize; | ||
| 153 | } | ||
| 154 | l = min(len, size - off); | ||
| 155 | |||
| 156 | memcpy(dst, fifo->data + off, l); | ||
| 157 | memcpy(dst + l, fifo->data, len - l); | ||
| 158 | /* | ||
| 159 | * make sure that the data is copied before | ||
| 160 | * incrementing the fifo->out index counter | ||
| 161 | */ | ||
| 162 | smp_wmb(); | ||
| 163 | } | ||
| 164 | |||
| 165 | unsigned int __kfifo_out_peek(struct __kfifo *fifo, | ||
| 166 | void *buf, unsigned int len) | ||
| 167 | { | ||
| 168 | unsigned int l; | ||
| 169 | |||
| 170 | l = fifo->in - fifo->out; | ||
| 171 | if (len > l) | ||
| 172 | len = l; | ||
| 173 | |||
| 174 | kfifo_copy_out(fifo, buf, len, fifo->out); | ||
| 175 | return len; | ||
| 176 | } | ||
| 177 | EXPORT_SYMBOL(__kfifo_out_peek); | ||
| 178 | |||
| 179 | unsigned int __kfifo_out(struct __kfifo *fifo, | ||
| 180 | void *buf, unsigned int len) | ||
| 181 | { | ||
| 182 | len = __kfifo_out_peek(fifo, buf, len); | ||
| 183 | fifo->out += len; | ||
| 184 | return len; | ||
| 185 | } | ||
| 186 | EXPORT_SYMBOL(__kfifo_out); | ||
| 187 | |||
| 188 | static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, | ||
| 189 | const void __user *from, unsigned int len, unsigned int off, | ||
| 190 | unsigned int *copied) | ||
| 191 | { | ||
| 192 | unsigned int size = fifo->mask + 1; | ||
| 193 | unsigned int esize = fifo->esize; | ||
| 194 | unsigned int l; | ||
| 195 | unsigned long ret; | ||
| 196 | |||
| 197 | off &= fifo->mask; | ||
| 198 | if (esize != 1) { | ||
| 199 | off *= esize; | ||
| 200 | size *= esize; | ||
| 201 | len *= esize; | ||
| 202 | } | ||
| 203 | l = min(len, size - off); | ||
| 204 | |||
| 205 | ret = copy_from_user(fifo->data + off, from, l); | ||
| 206 | if (unlikely(ret)) | ||
| 207 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
| 208 | else { | ||
| 209 | ret = copy_from_user(fifo->data, from + l, len - l); | ||
| 210 | if (unlikely(ret)) | ||
| 211 | ret = DIV_ROUND_UP(ret, esize); | ||
| 212 | } | ||
| 213 | /* | ||
| 214 | * make sure that the data in the fifo is up to date before | ||
| 215 | * incrementing the fifo->in index counter | ||
| 216 | */ | ||
| 217 | smp_wmb(); | ||
| 218 | *copied = len - ret; | ||
| 219 | /* return the number of elements which are not copied */ | ||
| 220 | return ret; | ||
| 221 | } | ||
| 222 | |||
| 223 | int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, | ||
| 224 | unsigned long len, unsigned int *copied) | ||
| 225 | { | ||
| 226 | unsigned int l; | ||
| 227 | unsigned long ret; | ||
| 228 | unsigned int esize = fifo->esize; | ||
| 229 | int err; | ||
| 230 | |||
| 231 | if (esize != 1) | ||
| 232 | len /= esize; | ||
| 233 | |||
| 234 | l = kfifo_unused(fifo); | ||
| 235 | if (len > l) | ||
| 236 | len = l; | ||
| 237 | |||
| 238 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); | ||
| 239 | if (unlikely(ret)) { | ||
| 240 | len -= ret; | ||
| 241 | err = -EFAULT; | ||
| 242 | } else | ||
| 243 | err = 0; | ||
| 244 | fifo->in += len; | ||
| 245 | return err; | ||
| 246 | } | ||
| 247 | EXPORT_SYMBOL(__kfifo_from_user); | ||
| 248 | |||
| 249 | static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, | ||
| 250 | unsigned int len, unsigned int off, unsigned int *copied) | ||
| 251 | { | ||
| 252 | unsigned int l; | ||
| 253 | unsigned long ret; | ||
| 254 | unsigned int size = fifo->mask + 1; | ||
| 255 | unsigned int esize = fifo->esize; | ||
| 256 | |||
| 257 | off &= fifo->mask; | ||
| 258 | if (esize != 1) { | ||
| 259 | off *= esize; | ||
| 260 | size *= esize; | ||
| 261 | len *= esize; | ||
| 262 | } | ||
| 263 | l = min(len, size - off); | ||
| 264 | |||
| 265 | ret = copy_to_user(to, fifo->data + off, l); | ||
| 266 | if (unlikely(ret)) | ||
| 267 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
| 268 | else { | ||
| 269 | ret = copy_to_user(to + l, fifo->data, len - l); | ||
| 270 | if (unlikely(ret)) | ||
| 271 | ret = DIV_ROUND_UP(ret, esize); | ||
| 272 | } | ||
| 273 | /* | ||
| 274 | * make sure that the data is copied before | ||
| 275 | * incrementing the fifo->out index counter | ||
| 276 | */ | ||
| 277 | smp_wmb(); | ||
| 278 | *copied = len - ret; | ||
| 279 | /* return the number of elements which are not copied */ | ||
| 280 | return ret; | ||
| 281 | } | ||
| 282 | |||
| 283 | int __kfifo_to_user(struct __kfifo *fifo, void __user *to, | ||
| 284 | unsigned long len, unsigned int *copied) | ||
| 285 | { | ||
| 286 | unsigned int l; | ||
| 287 | unsigned long ret; | ||
| 288 | unsigned int esize = fifo->esize; | ||
| 289 | int err; | ||
| 290 | |||
| 291 | if (esize != 1) | ||
| 292 | len /= esize; | ||
| 293 | |||
| 294 | l = fifo->in - fifo->out; | ||
| 295 | if (len > l) | ||
| 296 | len = l; | ||
| 297 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); | ||
| 298 | if (unlikely(ret)) { | ||
| 299 | len -= ret; | ||
| 300 | err = -EFAULT; | ||
| 301 | } else | ||
| 302 | err = 0; | ||
| 303 | fifo->out += len; | ||
| 304 | return err; | ||
| 305 | } | ||
| 306 | EXPORT_SYMBOL(__kfifo_to_user); | ||
| 307 | |||
| 308 | static int setup_sgl_buf(struct scatterlist *sgl, void *buf, | ||
| 309 | int nents, unsigned int len) | ||
| 310 | { | ||
| 311 | int n; | ||
| 312 | unsigned int l; | ||
| 313 | unsigned int off; | ||
| 314 | struct page *page; | ||
| 315 | |||
| 316 | if (!nents) | ||
| 317 | return 0; | ||
| 318 | |||
| 319 | if (!len) | ||
| 320 | return 0; | ||
| 321 | |||
| 322 | n = 0; | ||
| 323 | page = virt_to_page(buf); | ||
| 324 | off = offset_in_page(buf); | ||
| 325 | l = 0; | ||
| 326 | |||
| 327 | while (len >= l + PAGE_SIZE - off) { | ||
| 328 | struct page *npage; | ||
| 329 | |||
| 330 | l += PAGE_SIZE; | ||
| 331 | buf += PAGE_SIZE; | ||
| 332 | npage = virt_to_page(buf); | ||
| 333 | if (page_to_phys(page) != page_to_phys(npage) - l) { | ||
| 334 | sg_set_page(sgl, page, l - off, off); | ||
| 335 | sgl = sg_next(sgl); | ||
| 336 | if (++n == nents || sgl == NULL) | ||
| 337 | return n; | ||
| 338 | page = npage; | ||
| 339 | len -= l - off; | ||
| 340 | l = off = 0; | ||
| 341 | } | ||
| 342 | } | ||
| 343 | sg_set_page(sgl, page, len, off); | ||
| 344 | return n + 1; | ||
| 345 | } | ||
| 346 | |||
| 347 | static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | ||
| 348 | int nents, unsigned int len, unsigned int off) | ||
| 349 | { | ||
| 350 | unsigned int size = fifo->mask + 1; | ||
| 351 | unsigned int esize = fifo->esize; | ||
| 352 | unsigned int l; | ||
| 353 | unsigned int n; | ||
| 354 | |||
| 355 | off &= fifo->mask; | ||
| 356 | if (esize != 1) { | ||
| 357 | off *= esize; | ||
| 358 | size *= esize; | ||
| 359 | len *= esize; | ||
| 360 | } | ||
| 361 | l = min(len, size - off); | ||
| 362 | |||
| 363 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | ||
| 364 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | ||
| 365 | |||
| 366 | return n; | ||
| 367 | } | ||
| 368 | |||
| 369 | unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, | ||
| 370 | struct scatterlist *sgl, int nents, unsigned int len) | ||
| 371 | { | ||
| 372 | unsigned int l; | ||
| 373 | |||
| 374 | l = kfifo_unused(fifo); | ||
| 375 | if (len > l) | ||
| 376 | len = l; | ||
| 377 | |||
| 378 | return setup_sgl(fifo, sgl, nents, len, fifo->in); | ||
| 379 | } | ||
| 380 | EXPORT_SYMBOL(__kfifo_dma_in_prepare); | ||
| 381 | |||
| 382 | unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, | ||
| 383 | struct scatterlist *sgl, int nents, unsigned int len) | ||
| 384 | { | ||
| 385 | unsigned int l; | ||
| 386 | |||
| 387 | l = fifo->in - fifo->out; | ||
| 388 | if (len > l) | ||
| 389 | len = l; | ||
| 390 | |||
| 391 | return setup_sgl(fifo, sgl, nents, len, fifo->out); | ||
| 392 | } | ||
| 393 | EXPORT_SYMBOL(__kfifo_dma_out_prepare); | ||
| 394 | |||
| 395 | unsigned int __kfifo_max_r(unsigned int len, size_t recsize) | ||
| 396 | { | ||
| 397 | unsigned int max = (1 << (recsize << 3)) - 1; | ||
| 398 | |||
| 399 | if (len > max) | ||
| 400 | return max; | ||
| 401 | return len; | ||
| 402 | } | ||
| 403 | EXPORT_SYMBOL(__kfifo_max_r); | ||
| 404 | |||
| 405 | #define __KFIFO_PEEK(data, out, mask) \ | ||
| 406 | ((data)[(out) & (mask)]) | ||
| 407 | /* | ||
| 408 | * __kfifo_peek_n internal helper function for determinate the length of | ||
| 409 | * the next record in the fifo | ||
| 410 | */ | ||
| 411 | static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) | ||
| 412 | { | ||
| 413 | unsigned int l; | ||
| 414 | unsigned int mask = fifo->mask; | ||
| 415 | unsigned char *data = fifo->data; | ||
| 416 | |||
| 417 | l = __KFIFO_PEEK(data, fifo->out, mask); | ||
| 418 | |||
| 419 | if (--recsize) | ||
| 420 | l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; | ||
| 421 | |||
| 422 | return l; | ||
| 423 | } | ||
| 424 | |||
| 425 | #define __KFIFO_POKE(data, in, mask, val) \ | ||
| 426 | ( \ | ||
| 427 | (data)[(in) & (mask)] = (unsigned char)(val) \ | ||
| 428 | ) | ||
| 429 | |||
| 430 | /* | ||
| 431 | * __kfifo_poke_n internal helper function for storeing the length of | ||
| 432 | * the record into the fifo | ||
| 433 | */ | ||
| 434 | static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) | ||
| 435 | { | ||
| 436 | unsigned int mask = fifo->mask; | ||
| 437 | unsigned char *data = fifo->data; | ||
| 438 | |||
| 439 | __KFIFO_POKE(data, fifo->in, mask, n); | ||
| 440 | |||
| 441 | if (recsize > 1) | ||
| 442 | __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); | ||
| 443 | } | ||
| 444 | |||
| 445 | unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) | ||
| 446 | { | ||
| 447 | return __kfifo_peek_n(fifo, recsize); | ||
| 448 | } | ||
| 449 | EXPORT_SYMBOL(__kfifo_len_r); | ||
| 450 | |||
| 451 | unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, | ||
| 452 | unsigned int len, size_t recsize) | ||
| 453 | { | ||
| 454 | if (len + recsize > kfifo_unused(fifo)) | ||
| 455 | return 0; | ||
| 456 | |||
| 457 | __kfifo_poke_n(fifo, len, recsize); | ||
| 458 | |||
| 459 | kfifo_copy_in(fifo, buf, len, fifo->in + recsize); | ||
| 460 | fifo->in += len + recsize; | ||
| 461 | return len; | ||
| 462 | } | ||
| 463 | EXPORT_SYMBOL(__kfifo_in_r); | ||
| 464 | |||
| 465 | static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, | ||
| 466 | void *buf, unsigned int len, size_t recsize, unsigned int *n) | ||
| 467 | { | ||
| 468 | *n = __kfifo_peek_n(fifo, recsize); | ||
| 469 | |||
| 470 | if (len > *n) | ||
| 471 | len = *n; | ||
| 472 | |||
| 473 | kfifo_copy_out(fifo, buf, len, fifo->out + recsize); | ||
| 474 | return len; | ||
| 475 | } | ||
| 476 | |||
| 477 | unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, | ||
| 478 | unsigned int len, size_t recsize) | ||
| 479 | { | ||
| 480 | unsigned int n; | ||
| 481 | |||
| 482 | if (fifo->in == fifo->out) | ||
| 483 | return 0; | ||
| 484 | |||
| 485 | return kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
| 486 | } | ||
| 487 | EXPORT_SYMBOL(__kfifo_out_peek_r); | ||
| 488 | |||
| 489 | unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, | ||
| 490 | unsigned int len, size_t recsize) | ||
| 491 | { | ||
| 492 | unsigned int n; | ||
| 493 | |||
| 494 | if (fifo->in == fifo->out) | ||
| 495 | return 0; | ||
| 496 | |||
| 497 | len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
| 498 | fifo->out += n + recsize; | ||
| 499 | return len; | ||
| 500 | } | ||
| 501 | EXPORT_SYMBOL(__kfifo_out_r); | ||
| 502 | |||
| 503 | void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize) | ||
| 504 | { | ||
| 505 | unsigned int n; | ||
| 506 | |||
| 507 | n = __kfifo_peek_n(fifo, recsize); | ||
| 508 | fifo->out += n + recsize; | ||
| 509 | } | ||
| 510 | EXPORT_SYMBOL(__kfifo_skip_r); | ||
| 511 | |||
| 512 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, | ||
| 513 | unsigned long len, unsigned int *copied, size_t recsize) | ||
| 514 | { | ||
| 515 | unsigned long ret; | ||
| 516 | |||
| 517 | len = __kfifo_max_r(len, recsize); | ||
| 518 | |||
| 519 | if (len + recsize > kfifo_unused(fifo)) { | ||
| 520 | *copied = 0; | ||
| 521 | return 0; | ||
| 522 | } | ||
| 523 | |||
| 524 | __kfifo_poke_n(fifo, len, recsize); | ||
| 525 | |||
| 526 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); | ||
| 527 | if (unlikely(ret)) { | ||
| 528 | *copied = 0; | ||
| 529 | return -EFAULT; | ||
| 530 | } | ||
| 531 | fifo->in += len + recsize; | ||
| 532 | return 0; | ||
| 533 | } | ||
| 534 | EXPORT_SYMBOL(__kfifo_from_user_r); | ||
| 535 | |||
| 536 | int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, | ||
| 537 | unsigned long len, unsigned int *copied, size_t recsize) | ||
| 538 | { | ||
| 539 | unsigned long ret; | ||
| 540 | unsigned int n; | ||
| 541 | |||
| 542 | if (fifo->in == fifo->out) { | ||
| 543 | *copied = 0; | ||
| 544 | return 0; | ||
| 545 | } | ||
| 546 | |||
| 547 | n = __kfifo_peek_n(fifo, recsize); | ||
| 548 | if (len > n) | ||
| 549 | len = n; | ||
| 550 | |||
| 551 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); | ||
| 552 | if (unlikely(ret)) { | ||
| 553 | *copied = 0; | ||
| 554 | return -EFAULT; | ||
| 555 | } | ||
| 556 | fifo->out += n + recsize; | ||
| 557 | return 0; | ||
| 558 | } | ||
| 559 | EXPORT_SYMBOL(__kfifo_to_user_r); | ||
| 560 | |||
| 561 | unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, | ||
| 562 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | ||
| 563 | { | ||
| 564 | if (!nents) | ||
| 565 | BUG(); | ||
| 566 | |||
| 567 | len = __kfifo_max_r(len, recsize); | ||
| 568 | |||
| 569 | if (len + recsize > kfifo_unused(fifo)) | ||
| 570 | return 0; | ||
| 571 | |||
| 572 | return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); | ||
| 573 | } | ||
| 574 | EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); | ||
| 575 | |||
| 576 | void __kfifo_dma_in_finish_r(struct __kfifo *fifo, | ||
| 577 | unsigned int len, size_t recsize) | ||
| 578 | { | ||
| 579 | len = __kfifo_max_r(len, recsize); | ||
| 580 | __kfifo_poke_n(fifo, len, recsize); | ||
| 581 | fifo->in += len + recsize; | ||
| 582 | } | ||
| 583 | EXPORT_SYMBOL(__kfifo_dma_in_finish_r); | ||
| 584 | |||
| 585 | unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, | ||
| 586 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | ||
| 587 | { | ||
| 588 | if (!nents) | ||
| 589 | BUG(); | ||
| 590 | |||
| 591 | len = __kfifo_max_r(len, recsize); | ||
| 592 | |||
| 593 | if (len + recsize > fifo->in - fifo->out) | ||
| 594 | return 0; | ||
| 595 | |||
| 596 | return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); | ||
| 597 | } | ||
| 598 | EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); | ||
| 599 | |||
| 600 | void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) | ||
| 601 | { | ||
| 602 | unsigned int len; | ||
| 603 | |||
| 604 | len = __kfifo_peek_n(fifo, recsize); | ||
| 605 | fifo->out += len + recsize; | ||
| 606 | } | ||
| 607 | EXPORT_SYMBOL(__kfifo_dma_out_finish_r); | ||
diff --git a/lib/lru_cache.c b/lib/lru_cache.c index d71d89498943..8335d39d2ccd 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c | |||
| @@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) | |||
| 262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, | 262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, |
| 263 | bool include_changing) | 263 | bool include_changing) |
| 264 | { | 264 | { |
| 265 | struct hlist_node *n; | ||
| 266 | struct lc_element *e; | 265 | struct lc_element *e; |
| 267 | 266 | ||
| 268 | BUG_ON(!lc); | 267 | BUG_ON(!lc); |
| 269 | BUG_ON(!lc->nr_elements); | 268 | BUG_ON(!lc->nr_elements); |
| 270 | hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { | 269 | hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) { |
| 271 | /* "about to be changed" elements, pending transaction commit, | 270 | /* "about to be changed" elements, pending transaction commit, |
| 272 | * are hashed by their "new number". "Normal" elements have | 271 | * are hashed by their "new number". "Normal" elements have |
| 273 | * lc_number == lc_new_number. */ | 272 | * lc_number == lc_new_number. */ |
diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index e764116ea12d..f0f7d7ca2b83 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | lzo_compress-objs := lzo1x_compress.o | 1 | lzo_compress-objs := lzo1x_compress.o |
| 2 | lzo_decompress-objs := lzo1x_decompress.o | 2 | lzo_decompress-objs := lzo1x_decompress_safe.o |
| 3 | 3 | ||
| 4 | obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o | 4 | obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o |
| 5 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o | 5 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o |
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index a6040990a62e..236eb21167b5 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c | |||
| @@ -1,194 +1,243 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * LZO1X Compressor from MiniLZO | 2 | * LZO1X Compressor from LZO |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | 4 | * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> |
| 5 | * | 5 | * |
| 6 | * The full LZO package can be found at: | 6 | * The full LZO package can be found at: |
| 7 | * http://www.oberhumer.com/opensource/lzo/ | 7 | * http://www.oberhumer.com/opensource/lzo/ |
| 8 | * | 8 | * |
| 9 | * Changed for kernel use by: | 9 | * Changed for Linux kernel use by: |
| 10 | * Nitin Gupta <nitingupta910@gmail.com> | 10 | * Nitin Gupta <nitingupta910@gmail.com> |
| 11 | * Richard Purdie <rpurdie@openedhand.com> | 11 | * Richard Purdie <rpurdie@openedhand.com> |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/lzo.h> | ||
| 17 | #include <asm/unaligned.h> | 16 | #include <asm/unaligned.h> |
| 17 | #include <linux/lzo.h> | ||
| 18 | #include "lzodefs.h" | 18 | #include "lzodefs.h" |
| 19 | 19 | ||
| 20 | static noinline size_t | 20 | static noinline size_t |
| 21 | _lzo1x_1_do_compress(const unsigned char *in, size_t in_len, | 21 | lzo1x_1_do_compress(const unsigned char *in, size_t in_len, |
| 22 | unsigned char *out, size_t *out_len, void *wrkmem) | 22 | unsigned char *out, size_t *out_len, |
| 23 | size_t ti, void *wrkmem) | ||
| 23 | { | 24 | { |
| 25 | const unsigned char *ip; | ||
| 26 | unsigned char *op; | ||
| 24 | const unsigned char * const in_end = in + in_len; | 27 | const unsigned char * const in_end = in + in_len; |
| 25 | const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5; | 28 | const unsigned char * const ip_end = in + in_len - 20; |
| 26 | const unsigned char ** const dict = wrkmem; | 29 | const unsigned char *ii; |
| 27 | const unsigned char *ip = in, *ii = ip; | 30 | lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; |
| 28 | const unsigned char *end, *m, *m_pos; | ||
| 29 | size_t m_off, m_len, dindex; | ||
| 30 | unsigned char *op = out; | ||
| 31 | 31 | ||
| 32 | ip += 4; | 32 | op = out; |
| 33 | ip = in; | ||
| 34 | ii = ip; | ||
| 35 | ip += ti < 4 ? 4 - ti : 0; | ||
| 33 | 36 | ||
| 34 | for (;;) { | 37 | for (;;) { |
| 35 | dindex = ((size_t)(0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK; | 38 | const unsigned char *m_pos; |
| 36 | m_pos = dict[dindex]; | 39 | size_t t, m_len, m_off; |
| 37 | 40 | u32 dv; | |
| 38 | if (m_pos < in) | ||
| 39 | goto literal; | ||
| 40 | |||
| 41 | if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET)) | ||
| 42 | goto literal; | ||
| 43 | |||
| 44 | m_off = ip - m_pos; | ||
| 45 | if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) | ||
| 46 | goto try_match; | ||
| 47 | |||
| 48 | dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f); | ||
| 49 | m_pos = dict[dindex]; | ||
| 50 | |||
| 51 | if (m_pos < in) | ||
| 52 | goto literal; | ||
| 53 | |||
| 54 | if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET)) | ||
| 55 | goto literal; | ||
| 56 | |||
| 57 | m_off = ip - m_pos; | ||
| 58 | if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) | ||
| 59 | goto try_match; | ||
| 60 | |||
| 61 | goto literal; | ||
| 62 | |||
| 63 | try_match: | ||
| 64 | if (get_unaligned((const unsigned short *)m_pos) | ||
| 65 | == get_unaligned((const unsigned short *)ip)) { | ||
| 66 | if (likely(m_pos[2] == ip[2])) | ||
| 67 | goto match; | ||
| 68 | } | ||
| 69 | |||
| 70 | literal: | 41 | literal: |
| 71 | dict[dindex] = ip; | 42 | ip += 1 + ((ip - ii) >> 5); |
| 72 | ++ip; | 43 | next: |
| 73 | if (unlikely(ip >= ip_end)) | 44 | if (unlikely(ip >= ip_end)) |
| 74 | break; | 45 | break; |
| 75 | continue; | 46 | dv = get_unaligned_le32(ip); |
| 76 | 47 | t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; | |
| 77 | match: | 48 | m_pos = in + dict[t]; |
| 78 | dict[dindex] = ip; | 49 | dict[t] = (lzo_dict_t) (ip - in); |
| 79 | if (ip != ii) { | 50 | if (unlikely(dv != get_unaligned_le32(m_pos))) |
| 80 | size_t t = ip - ii; | 51 | goto literal; |
| 81 | 52 | ||
| 53 | ii -= ti; | ||
| 54 | ti = 0; | ||
| 55 | t = ip - ii; | ||
| 56 | if (t != 0) { | ||
| 82 | if (t <= 3) { | 57 | if (t <= 3) { |
| 83 | op[-2] |= t; | 58 | op[-2] |= t; |
| 84 | } else if (t <= 18) { | 59 | COPY4(op, ii); |
| 60 | op += t; | ||
| 61 | } else if (t <= 16) { | ||
| 85 | *op++ = (t - 3); | 62 | *op++ = (t - 3); |
| 63 | COPY8(op, ii); | ||
| 64 | COPY8(op + 8, ii + 8); | ||
| 65 | op += t; | ||
| 86 | } else { | 66 | } else { |
| 87 | size_t tt = t - 18; | 67 | if (t <= 18) { |
| 88 | 68 | *op++ = (t - 3); | |
| 89 | *op++ = 0; | 69 | } else { |
| 90 | while (tt > 255) { | 70 | size_t tt = t - 18; |
| 91 | tt -= 255; | ||
| 92 | *op++ = 0; | 71 | *op++ = 0; |
| 72 | while (unlikely(tt > 255)) { | ||
| 73 | tt -= 255; | ||
| 74 | *op++ = 0; | ||
| 75 | } | ||
| 76 | *op++ = tt; | ||
| 93 | } | 77 | } |
| 94 | *op++ = tt; | 78 | do { |
| 79 | COPY8(op, ii); | ||
| 80 | COPY8(op + 8, ii + 8); | ||
| 81 | op += 16; | ||
| 82 | ii += 16; | ||
| 83 | t -= 16; | ||
| 84 | } while (t >= 16); | ||
| 85 | if (t > 0) do { | ||
| 86 | *op++ = *ii++; | ||
| 87 | } while (--t > 0); | ||
| 95 | } | 88 | } |
| 96 | do { | ||
| 97 | *op++ = *ii++; | ||
| 98 | } while (--t > 0); | ||
| 99 | } | 89 | } |
| 100 | 90 | ||
| 101 | ip += 3; | 91 | m_len = 4; |
| 102 | if (m_pos[3] != *ip++ || m_pos[4] != *ip++ | 92 | { |
| 103 | || m_pos[5] != *ip++ || m_pos[6] != *ip++ | 93 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) |
| 104 | || m_pos[7] != *ip++ || m_pos[8] != *ip++) { | 94 | u64 v; |
| 105 | --ip; | 95 | v = get_unaligned((const u64 *) (ip + m_len)) ^ |
| 106 | m_len = ip - ii; | 96 | get_unaligned((const u64 *) (m_pos + m_len)); |
| 97 | if (unlikely(v == 0)) { | ||
| 98 | do { | ||
| 99 | m_len += 8; | ||
| 100 | v = get_unaligned((const u64 *) (ip + m_len)) ^ | ||
| 101 | get_unaligned((const u64 *) (m_pos + m_len)); | ||
| 102 | if (unlikely(ip + m_len >= ip_end)) | ||
| 103 | goto m_len_done; | ||
| 104 | } while (v == 0); | ||
| 105 | } | ||
| 106 | # if defined(__LITTLE_ENDIAN) | ||
| 107 | m_len += (unsigned) __builtin_ctzll(v) / 8; | ||
| 108 | # elif defined(__BIG_ENDIAN) | ||
| 109 | m_len += (unsigned) __builtin_clzll(v) / 8; | ||
| 110 | # else | ||
| 111 | # error "missing endian definition" | ||
| 112 | # endif | ||
| 113 | #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32) | ||
| 114 | u32 v; | ||
| 115 | v = get_unaligned((const u32 *) (ip + m_len)) ^ | ||
| 116 | get_unaligned((const u32 *) (m_pos + m_len)); | ||
| 117 | if (unlikely(v == 0)) { | ||
| 118 | do { | ||
| 119 | m_len += 4; | ||
| 120 | v = get_unaligned((const u32 *) (ip + m_len)) ^ | ||
| 121 | get_unaligned((const u32 *) (m_pos + m_len)); | ||
| 122 | if (v != 0) | ||
| 123 | break; | ||
| 124 | m_len += 4; | ||
| 125 | v = get_unaligned((const u32 *) (ip + m_len)) ^ | ||
| 126 | get_unaligned((const u32 *) (m_pos + m_len)); | ||
| 127 | if (unlikely(ip + m_len >= ip_end)) | ||
| 128 | goto m_len_done; | ||
| 129 | } while (v == 0); | ||
| 130 | } | ||
| 131 | # if defined(__LITTLE_ENDIAN) | ||
| 132 | m_len += (unsigned) __builtin_ctz(v) / 8; | ||
| 133 | # elif defined(__BIG_ENDIAN) | ||
| 134 | m_len += (unsigned) __builtin_clz(v) / 8; | ||
| 135 | # else | ||
| 136 | # error "missing endian definition" | ||
| 137 | # endif | ||
| 138 | #else | ||
| 139 | if (unlikely(ip[m_len] == m_pos[m_len])) { | ||
| 140 | do { | ||
| 141 | m_len += 1; | ||
| 142 | if (ip[m_len] != m_pos[m_len]) | ||
| 143 | break; | ||
| 144 | m_len += 1; | ||
| 145 | if (ip[m_len] != m_pos[m_len]) | ||
| 146 | break; | ||
| 147 | m_len += 1; | ||
| 148 | if (ip[m_len] != m_pos[m_len]) | ||
| 149 | break; | ||
| 150 | m_len += 1; | ||
| 151 | if (ip[m_len] != m_pos[m_len]) | ||
| 152 | break; | ||
| 153 | m_len += 1; | ||
| 154 | if (ip[m_len] != m_pos[m_len]) | ||
| 155 | break; | ||
| 156 | m_len += 1; | ||
| 157 | if (ip[m_len] != m_pos[m_len]) | ||
| 158 | break; | ||
| 159 | m_len += 1; | ||
| 160 | if (ip[m_len] != m_pos[m_len]) | ||
| 161 | break; | ||
| 162 | m_len += 1; | ||
| 163 | if (unlikely(ip + m_len >= ip_end)) | ||
| 164 | goto m_len_done; | ||
| 165 | } while (ip[m_len] == m_pos[m_len]); | ||
| 166 | } | ||
| 167 | #endif | ||
| 168 | } | ||
| 169 | m_len_done: | ||
| 107 | 170 | ||
| 108 | if (m_off <= M2_MAX_OFFSET) { | 171 | m_off = ip - m_pos; |
| 109 | m_off -= 1; | 172 | ip += m_len; |
| 110 | *op++ = (((m_len - 1) << 5) | 173 | ii = ip; |
| 111 | | ((m_off & 7) << 2)); | 174 | if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { |
| 112 | *op++ = (m_off >> 3); | 175 | m_off -= 1; |
| 113 | } else if (m_off <= M3_MAX_OFFSET) { | 176 | *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); |
| 114 | m_off -= 1; | 177 | *op++ = (m_off >> 3); |
| 178 | } else if (m_off <= M3_MAX_OFFSET) { | ||
| 179 | m_off -= 1; | ||
| 180 | if (m_len <= M3_MAX_LEN) | ||
| 115 | *op++ = (M3_MARKER | (m_len - 2)); | 181 | *op++ = (M3_MARKER | (m_len - 2)); |
| 116 | goto m3_m4_offset; | 182 | else { |
| 117 | } else { | 183 | m_len -= M3_MAX_LEN; |
| 118 | m_off -= 0x4000; | 184 | *op++ = M3_MARKER | 0; |
| 119 | 185 | while (unlikely(m_len > 255)) { | |
| 120 | *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11) | 186 | m_len -= 255; |
| 121 | | (m_len - 2)); | 187 | *op++ = 0; |
| 122 | goto m3_m4_offset; | 188 | } |
| 189 | *op++ = (m_len); | ||
| 123 | } | 190 | } |
| 191 | *op++ = (m_off << 2); | ||
| 192 | *op++ = (m_off >> 6); | ||
| 124 | } else { | 193 | } else { |
| 125 | end = in_end; | 194 | m_off -= 0x4000; |
| 126 | m = m_pos + M2_MAX_LEN + 1; | 195 | if (m_len <= M4_MAX_LEN) |
| 127 | 196 | *op++ = (M4_MARKER | ((m_off >> 11) & 8) | |
| 128 | while (ip < end && *m == *ip) { | ||
| 129 | m++; | ||
| 130 | ip++; | ||
| 131 | } | ||
| 132 | m_len = ip - ii; | ||
| 133 | |||
| 134 | if (m_off <= M3_MAX_OFFSET) { | ||
| 135 | m_off -= 1; | ||
| 136 | if (m_len <= 33) { | ||
| 137 | *op++ = (M3_MARKER | (m_len - 2)); | ||
| 138 | } else { | ||
| 139 | m_len -= 33; | ||
| 140 | *op++ = M3_MARKER | 0; | ||
| 141 | goto m3_m4_len; | ||
| 142 | } | ||
| 143 | } else { | ||
| 144 | m_off -= 0x4000; | ||
| 145 | if (m_len <= M4_MAX_LEN) { | ||
| 146 | *op++ = (M4_MARKER | ||
| 147 | | ((m_off & 0x4000) >> 11) | ||
| 148 | | (m_len - 2)); | 197 | | (m_len - 2)); |
| 149 | } else { | 198 | else { |
| 150 | m_len -= M4_MAX_LEN; | 199 | m_len -= M4_MAX_LEN; |
| 151 | *op++ = (M4_MARKER | 200 | *op++ = (M4_MARKER | ((m_off >> 11) & 8)); |
| 152 | | ((m_off & 0x4000) >> 11)); | 201 | while (unlikely(m_len > 255)) { |
| 153 | m3_m4_len: | 202 | m_len -= 255; |
| 154 | while (m_len > 255) { | 203 | *op++ = 0; |
| 155 | m_len -= 255; | ||
| 156 | *op++ = 0; | ||
| 157 | } | ||
| 158 | |||
| 159 | *op++ = (m_len); | ||
| 160 | } | 204 | } |
| 205 | *op++ = (m_len); | ||
| 161 | } | 206 | } |
| 162 | m3_m4_offset: | 207 | *op++ = (m_off << 2); |
| 163 | *op++ = ((m_off & 63) << 2); | ||
| 164 | *op++ = (m_off >> 6); | 208 | *op++ = (m_off >> 6); |
| 165 | } | 209 | } |
| 166 | 210 | goto next; | |
| 167 | ii = ip; | ||
| 168 | if (unlikely(ip >= ip_end)) | ||
| 169 | break; | ||
| 170 | } | 211 | } |
| 171 | |||
| 172 | *out_len = op - out; | 212 | *out_len = op - out; |
| 173 | return in_end - ii; | 213 | return in_end - (ii - ti); |
| 174 | } | 214 | } |
| 175 | 215 | ||
| 176 | int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, | 216 | int lzo1x_1_compress(const unsigned char *in, size_t in_len, |
| 177 | size_t *out_len, void *wrkmem) | 217 | unsigned char *out, size_t *out_len, |
| 218 | void *wrkmem) | ||
| 178 | { | 219 | { |
| 179 | const unsigned char *ii; | 220 | const unsigned char *ip = in; |
| 180 | unsigned char *op = out; | 221 | unsigned char *op = out; |
| 181 | size_t t; | 222 | size_t l = in_len; |
| 223 | size_t t = 0; | ||
| 182 | 224 | ||
| 183 | if (unlikely(in_len <= M2_MAX_LEN + 5)) { | 225 | while (l > 20) { |
| 184 | t = in_len; | 226 | size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); |
| 185 | } else { | 227 | uintptr_t ll_end = (uintptr_t) ip + ll; |
| 186 | t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem); | 228 | if ((ll_end + ((t + ll) >> 5)) <= ll_end) |
| 229 | break; | ||
| 230 | BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); | ||
| 231 | memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); | ||
| 232 | t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); | ||
| 233 | ip += ll; | ||
| 187 | op += *out_len; | 234 | op += *out_len; |
| 235 | l -= ll; | ||
| 188 | } | 236 | } |
| 237 | t += l; | ||
| 189 | 238 | ||
| 190 | if (t > 0) { | 239 | if (t > 0) { |
| 191 | ii = in + in_len - t; | 240 | const unsigned char *ii = in + in_len - t; |
| 192 | 241 | ||
| 193 | if (op == out && t <= 238) { | 242 | if (op == out && t <= 238) { |
| 194 | *op++ = (17 + t); | 243 | *op++ = (17 + t); |
| @@ -198,16 +247,21 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, | |||
| 198 | *op++ = (t - 3); | 247 | *op++ = (t - 3); |
| 199 | } else { | 248 | } else { |
| 200 | size_t tt = t - 18; | 249 | size_t tt = t - 18; |
| 201 | |||
| 202 | *op++ = 0; | 250 | *op++ = 0; |
| 203 | while (tt > 255) { | 251 | while (tt > 255) { |
| 204 | tt -= 255; | 252 | tt -= 255; |
| 205 | *op++ = 0; | 253 | *op++ = 0; |
| 206 | } | 254 | } |
| 207 | |||
| 208 | *op++ = tt; | 255 | *op++ = tt; |
| 209 | } | 256 | } |
| 210 | do { | 257 | if (t >= 16) do { |
| 258 | COPY8(op, ii); | ||
| 259 | COPY8(op + 8, ii + 8); | ||
| 260 | op += 16; | ||
| 261 | ii += 16; | ||
| 262 | t -= 16; | ||
| 263 | } while (t >= 16); | ||
| 264 | if (t > 0) do { | ||
| 211 | *op++ = *ii++; | 265 | *op++ = *ii++; |
| 212 | } while (--t > 0); | 266 | } while (--t > 0); |
| 213 | } | 267 | } |
| @@ -223,4 +277,3 @@ EXPORT_SYMBOL_GPL(lzo1x_1_compress); | |||
| 223 | 277 | ||
| 224 | MODULE_LICENSE("GPL"); | 278 | MODULE_LICENSE("GPL"); |
| 225 | MODULE_DESCRIPTION("LZO1X-1 Compressor"); | 279 | MODULE_DESCRIPTION("LZO1X-1 Compressor"); |
| 226 | |||
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c deleted file mode 100644 index f2fd09850223..000000000000 --- a/lib/lzo/lzo1x_decompress.c +++ /dev/null | |||
| @@ -1,255 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * LZO1X Decompressor from MiniLZO | ||
| 3 | * | ||
| 4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | ||
| 5 | * | ||
| 6 | * The full LZO package can be found at: | ||
| 7 | * http://www.oberhumer.com/opensource/lzo/ | ||
| 8 | * | ||
| 9 | * Changed for kernel use by: | ||
| 10 | * Nitin Gupta <nitingupta910@gmail.com> | ||
| 11 | * Richard Purdie <rpurdie@openedhand.com> | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef STATIC | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #endif | ||
| 18 | |||
| 19 | #include <asm/unaligned.h> | ||
| 20 | #include <linux/lzo.h> | ||
| 21 | #include "lzodefs.h" | ||
| 22 | |||
| 23 | #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) | ||
| 24 | #define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x)) | ||
| 25 | #define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op) | ||
| 26 | |||
| 27 | #define COPY4(dst, src) \ | ||
| 28 | put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) | ||
| 29 | |||
| 30 | int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | ||
| 31 | unsigned char *out, size_t *out_len) | ||
| 32 | { | ||
| 33 | const unsigned char * const ip_end = in + in_len; | ||
| 34 | unsigned char * const op_end = out + *out_len; | ||
| 35 | const unsigned char *ip = in, *m_pos; | ||
| 36 | unsigned char *op = out; | ||
| 37 | size_t t; | ||
| 38 | |||
| 39 | *out_len = 0; | ||
| 40 | |||
| 41 | if (*ip > 17) { | ||
| 42 | t = *ip++ - 17; | ||
| 43 | if (t < 4) | ||
| 44 | goto match_next; | ||
| 45 | if (HAVE_OP(t, op_end, op)) | ||
| 46 | goto output_overrun; | ||
| 47 | if (HAVE_IP(t + 1, ip_end, ip)) | ||
| 48 | goto input_overrun; | ||
| 49 | do { | ||
| 50 | *op++ = *ip++; | ||
| 51 | } while (--t > 0); | ||
| 52 | goto first_literal_run; | ||
| 53 | } | ||
| 54 | |||
| 55 | while ((ip < ip_end)) { | ||
| 56 | t = *ip++; | ||
| 57 | if (t >= 16) | ||
| 58 | goto match; | ||
| 59 | if (t == 0) { | ||
| 60 | if (HAVE_IP(1, ip_end, ip)) | ||
| 61 | goto input_overrun; | ||
| 62 | while (*ip == 0) { | ||
| 63 | t += 255; | ||
| 64 | ip++; | ||
| 65 | if (HAVE_IP(1, ip_end, ip)) | ||
| 66 | goto input_overrun; | ||
| 67 | } | ||
| 68 | t += 15 + *ip++; | ||
| 69 | } | ||
| 70 | if (HAVE_OP(t + 3, op_end, op)) | ||
| 71 | goto output_overrun; | ||
| 72 | if (HAVE_IP(t + 4, ip_end, ip)) | ||
| 73 | goto input_overrun; | ||
| 74 | |||
| 75 | COPY4(op, ip); | ||
| 76 | op += 4; | ||
| 77 | ip += 4; | ||
| 78 | if (--t > 0) { | ||
| 79 | if (t >= 4) { | ||
| 80 | do { | ||
| 81 | COPY4(op, ip); | ||
| 82 | op += 4; | ||
| 83 | ip += 4; | ||
| 84 | t -= 4; | ||
| 85 | } while (t >= 4); | ||
| 86 | if (t > 0) { | ||
| 87 | do { | ||
| 88 | *op++ = *ip++; | ||
| 89 | } while (--t > 0); | ||
| 90 | } | ||
| 91 | } else { | ||
| 92 | do { | ||
| 93 | *op++ = *ip++; | ||
| 94 | } while (--t > 0); | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | first_literal_run: | ||
| 99 | t = *ip++; | ||
| 100 | if (t >= 16) | ||
| 101 | goto match; | ||
| 102 | m_pos = op - (1 + M2_MAX_OFFSET); | ||
| 103 | m_pos -= t >> 2; | ||
| 104 | m_pos -= *ip++ << 2; | ||
| 105 | |||
| 106 | if (HAVE_LB(m_pos, out, op)) | ||
| 107 | goto lookbehind_overrun; | ||
| 108 | |||
| 109 | if (HAVE_OP(3, op_end, op)) | ||
| 110 | goto output_overrun; | ||
| 111 | *op++ = *m_pos++; | ||
| 112 | *op++ = *m_pos++; | ||
| 113 | *op++ = *m_pos; | ||
| 114 | |||
| 115 | goto match_done; | ||
| 116 | |||
| 117 | do { | ||
| 118 | match: | ||
| 119 | if (t >= 64) { | ||
| 120 | m_pos = op - 1; | ||
| 121 | m_pos -= (t >> 2) & 7; | ||
| 122 | m_pos -= *ip++ << 3; | ||
| 123 | t = (t >> 5) - 1; | ||
| 124 | if (HAVE_LB(m_pos, out, op)) | ||
| 125 | goto lookbehind_overrun; | ||
| 126 | if (HAVE_OP(t + 3 - 1, op_end, op)) | ||
| 127 | goto output_overrun; | ||
| 128 | goto copy_match; | ||
| 129 | } else if (t >= 32) { | ||
| 130 | t &= 31; | ||
| 131 | if (t == 0) { | ||
| 132 | if (HAVE_IP(1, ip_end, ip)) | ||
| 133 | goto input_overrun; | ||
| 134 | while (*ip == 0) { | ||
| 135 | t += 255; | ||
| 136 | ip++; | ||
| 137 | if (HAVE_IP(1, ip_end, ip)) | ||
| 138 | goto input_overrun; | ||
| 139 | } | ||
| 140 | t += 31 + *ip++; | ||
| 141 | } | ||
| 142 | m_pos = op - 1; | ||
| 143 | m_pos -= get_unaligned_le16(ip) >> 2; | ||
| 144 | ip += 2; | ||
| 145 | } else if (t >= 16) { | ||
| 146 | m_pos = op; | ||
| 147 | m_pos -= (t & 8) << 11; | ||
| 148 | |||
| 149 | t &= 7; | ||
| 150 | if (t == 0) { | ||
| 151 | if (HAVE_IP(1, ip_end, ip)) | ||
| 152 | goto input_overrun; | ||
| 153 | while (*ip == 0) { | ||
| 154 | t += 255; | ||
| 155 | ip++; | ||
| 156 | if (HAVE_IP(1, ip_end, ip)) | ||
| 157 | goto input_overrun; | ||
| 158 | } | ||
| 159 | t += 7 + *ip++; | ||
| 160 | } | ||
| 161 | m_pos -= get_unaligned_le16(ip) >> 2; | ||
| 162 | ip += 2; | ||
| 163 | if (m_pos == op) | ||
| 164 | goto eof_found; | ||
| 165 | m_pos -= 0x4000; | ||
| 166 | } else { | ||
| 167 | m_pos = op - 1; | ||
| 168 | m_pos -= t >> 2; | ||
| 169 | m_pos -= *ip++ << 2; | ||
| 170 | |||
| 171 | if (HAVE_LB(m_pos, out, op)) | ||
| 172 | goto lookbehind_overrun; | ||
| 173 | if (HAVE_OP(2, op_end, op)) | ||
| 174 | goto output_overrun; | ||
| 175 | |||
| 176 | *op++ = *m_pos++; | ||
| 177 | *op++ = *m_pos; | ||
| 178 | goto match_done; | ||
| 179 | } | ||
| 180 | |||
| 181 | if (HAVE_LB(m_pos, out, op)) | ||
| 182 | goto lookbehind_overrun; | ||
| 183 | if (HAVE_OP(t + 3 - 1, op_end, op)) | ||
| 184 | goto output_overrun; | ||
| 185 | |||
| 186 | if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { | ||
| 187 | COPY4(op, m_pos); | ||
| 188 | op += 4; | ||
| 189 | m_pos += 4; | ||
| 190 | t -= 4 - (3 - 1); | ||
| 191 | do { | ||
| 192 | COPY4(op, m_pos); | ||
| 193 | op += 4; | ||
| 194 | m_pos += 4; | ||
| 195 | t -= 4; | ||
| 196 | } while (t >= 4); | ||
| 197 | if (t > 0) | ||
| 198 | do { | ||
| 199 | *op++ = *m_pos++; | ||
| 200 | } while (--t > 0); | ||
| 201 | } else { | ||
| 202 | copy_match: | ||
| 203 | *op++ = *m_pos++; | ||
| 204 | *op++ = *m_pos++; | ||
| 205 | do { | ||
| 206 | *op++ = *m_pos++; | ||
| 207 | } while (--t > 0); | ||
| 208 | } | ||
| 209 | match_done: | ||
| 210 | t = ip[-2] & 3; | ||
| 211 | if (t == 0) | ||
| 212 | break; | ||
| 213 | match_next: | ||
| 214 | if (HAVE_OP(t, op_end, op)) | ||
| 215 | goto output_overrun; | ||
| 216 | if (HAVE_IP(t + 1, ip_end, ip)) | ||
| 217 | goto input_overrun; | ||
| 218 | |||
| 219 | *op++ = *ip++; | ||
| 220 | if (t > 1) { | ||
| 221 | *op++ = *ip++; | ||
| 222 | if (t > 2) | ||
| 223 | *op++ = *ip++; | ||
| 224 | } | ||
| 225 | |||
| 226 | t = *ip++; | ||
| 227 | } while (ip < ip_end); | ||
| 228 | } | ||
| 229 | |||
| 230 | *out_len = op - out; | ||
| 231 | return LZO_E_EOF_NOT_FOUND; | ||
| 232 | |||
| 233 | eof_found: | ||
| 234 | *out_len = op - out; | ||
| 235 | return (ip == ip_end ? LZO_E_OK : | ||
| 236 | (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); | ||
| 237 | input_overrun: | ||
| 238 | *out_len = op - out; | ||
| 239 | return LZO_E_INPUT_OVERRUN; | ||
| 240 | |||
| 241 | output_overrun: | ||
| 242 | *out_len = op - out; | ||
| 243 | return LZO_E_OUTPUT_OVERRUN; | ||
| 244 | |||
| 245 | lookbehind_overrun: | ||
| 246 | *out_len = op - out; | ||
| 247 | return LZO_E_LOOKBEHIND_OVERRUN; | ||
| 248 | } | ||
| 249 | #ifndef STATIC | ||
| 250 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); | ||
| 251 | |||
| 252 | MODULE_LICENSE("GPL"); | ||
| 253 | MODULE_DESCRIPTION("LZO1X Decompressor"); | ||
| 254 | |||
| 255 | #endif | ||
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c new file mode 100644 index 000000000000..569985d522d5 --- /dev/null +++ b/lib/lzo/lzo1x_decompress_safe.c | |||
| @@ -0,0 +1,237 @@ | |||
| 1 | /* | ||
| 2 | * LZO1X Decompressor from LZO | ||
| 3 | * | ||
| 4 | * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> | ||
| 5 | * | ||
| 6 | * The full LZO package can be found at: | ||
| 7 | * http://www.oberhumer.com/opensource/lzo/ | ||
| 8 | * | ||
| 9 | * Changed for Linux kernel use by: | ||
| 10 | * Nitin Gupta <nitingupta910@gmail.com> | ||
| 11 | * Richard Purdie <rpurdie@openedhand.com> | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef STATIC | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #endif | ||
| 18 | #include <asm/unaligned.h> | ||
| 19 | #include <linux/lzo.h> | ||
| 20 | #include "lzodefs.h" | ||
| 21 | |||
| 22 | #define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) | ||
| 23 | #define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) | ||
| 24 | #define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun | ||
| 25 | #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun | ||
| 26 | #define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun | ||
| 27 | |||
| 28 | int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | ||
| 29 | unsigned char *out, size_t *out_len) | ||
| 30 | { | ||
| 31 | unsigned char *op; | ||
| 32 | const unsigned char *ip; | ||
| 33 | size_t t, next; | ||
| 34 | size_t state = 0; | ||
| 35 | const unsigned char *m_pos; | ||
| 36 | const unsigned char * const ip_end = in + in_len; | ||
| 37 | unsigned char * const op_end = out + *out_len; | ||
| 38 | |||
| 39 | op = out; | ||
| 40 | ip = in; | ||
| 41 | |||
| 42 | if (unlikely(in_len < 3)) | ||
| 43 | goto input_overrun; | ||
| 44 | if (*ip > 17) { | ||
| 45 | t = *ip++ - 17; | ||
| 46 | if (t < 4) { | ||
| 47 | next = t; | ||
| 48 | goto match_next; | ||
| 49 | } | ||
| 50 | goto copy_literal_run; | ||
| 51 | } | ||
| 52 | |||
| 53 | for (;;) { | ||
| 54 | t = *ip++; | ||
| 55 | if (t < 16) { | ||
| 56 | if (likely(state == 0)) { | ||
| 57 | if (unlikely(t == 0)) { | ||
| 58 | while (unlikely(*ip == 0)) { | ||
| 59 | t += 255; | ||
| 60 | ip++; | ||
| 61 | NEED_IP(1); | ||
| 62 | } | ||
| 63 | t += 15 + *ip++; | ||
| 64 | } | ||
| 65 | t += 3; | ||
| 66 | copy_literal_run: | ||
| 67 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
| 68 | if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { | ||
| 69 | const unsigned char *ie = ip + t; | ||
| 70 | unsigned char *oe = op + t; | ||
| 71 | do { | ||
| 72 | COPY8(op, ip); | ||
| 73 | op += 8; | ||
| 74 | ip += 8; | ||
| 75 | COPY8(op, ip); | ||
| 76 | op += 8; | ||
| 77 | ip += 8; | ||
| 78 | } while (ip < ie); | ||
| 79 | ip = ie; | ||
| 80 | op = oe; | ||
| 81 | } else | ||
| 82 | #endif | ||
| 83 | { | ||
| 84 | NEED_OP(t); | ||
| 85 | NEED_IP(t + 3); | ||
| 86 | do { | ||
| 87 | *op++ = *ip++; | ||
| 88 | } while (--t > 0); | ||
| 89 | } | ||
| 90 | state = 4; | ||
| 91 | continue; | ||
| 92 | } else if (state != 4) { | ||
| 93 | next = t & 3; | ||
| 94 | m_pos = op - 1; | ||
| 95 | m_pos -= t >> 2; | ||
| 96 | m_pos -= *ip++ << 2; | ||
| 97 | TEST_LB(m_pos); | ||
| 98 | NEED_OP(2); | ||
| 99 | op[0] = m_pos[0]; | ||
| 100 | op[1] = m_pos[1]; | ||
| 101 | op += 2; | ||
| 102 | goto match_next; | ||
| 103 | } else { | ||
| 104 | next = t & 3; | ||
| 105 | m_pos = op - (1 + M2_MAX_OFFSET); | ||
| 106 | m_pos -= t >> 2; | ||
| 107 | m_pos -= *ip++ << 2; | ||
| 108 | t = 3; | ||
| 109 | } | ||
| 110 | } else if (t >= 64) { | ||
| 111 | next = t & 3; | ||
| 112 | m_pos = op - 1; | ||
| 113 | m_pos -= (t >> 2) & 7; | ||
| 114 | m_pos -= *ip++ << 3; | ||
| 115 | t = (t >> 5) - 1 + (3 - 1); | ||
| 116 | } else if (t >= 32) { | ||
| 117 | t = (t & 31) + (3 - 1); | ||
| 118 | if (unlikely(t == 2)) { | ||
| 119 | while (unlikely(*ip == 0)) { | ||
| 120 | t += 255; | ||
| 121 | ip++; | ||
| 122 | NEED_IP(1); | ||
| 123 | } | ||
| 124 | t += 31 + *ip++; | ||
| 125 | NEED_IP(2); | ||
| 126 | } | ||
| 127 | m_pos = op - 1; | ||
| 128 | next = get_unaligned_le16(ip); | ||
| 129 | ip += 2; | ||
| 130 | m_pos -= next >> 2; | ||
| 131 | next &= 3; | ||
| 132 | } else { | ||
| 133 | m_pos = op; | ||
| 134 | m_pos -= (t & 8) << 11; | ||
| 135 | t = (t & 7) + (3 - 1); | ||
| 136 | if (unlikely(t == 2)) { | ||
| 137 | while (unlikely(*ip == 0)) { | ||
| 138 | t += 255; | ||
| 139 | ip++; | ||
| 140 | NEED_IP(1); | ||
| 141 | } | ||
| 142 | t += 7 + *ip++; | ||
| 143 | NEED_IP(2); | ||
| 144 | } | ||
| 145 | next = get_unaligned_le16(ip); | ||
| 146 | ip += 2; | ||
| 147 | m_pos -= next >> 2; | ||
| 148 | next &= 3; | ||
| 149 | if (m_pos == op) | ||
| 150 | goto eof_found; | ||
| 151 | m_pos -= 0x4000; | ||
| 152 | } | ||
| 153 | TEST_LB(m_pos); | ||
| 154 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
| 155 | if (op - m_pos >= 8) { | ||
| 156 | unsigned char *oe = op + t; | ||
| 157 | if (likely(HAVE_OP(t + 15))) { | ||
| 158 | do { | ||
| 159 | COPY8(op, m_pos); | ||
| 160 | op += 8; | ||
| 161 | m_pos += 8; | ||
| 162 | COPY8(op, m_pos); | ||
| 163 | op += 8; | ||
| 164 | m_pos += 8; | ||
| 165 | } while (op < oe); | ||
| 166 | op = oe; | ||
| 167 | if (HAVE_IP(6)) { | ||
| 168 | state = next; | ||
| 169 | COPY4(op, ip); | ||
| 170 | op += next; | ||
| 171 | ip += next; | ||
| 172 | continue; | ||
| 173 | } | ||
| 174 | } else { | ||
| 175 | NEED_OP(t); | ||
| 176 | do { | ||
| 177 | *op++ = *m_pos++; | ||
| 178 | } while (op < oe); | ||
| 179 | } | ||
| 180 | } else | ||
| 181 | #endif | ||
| 182 | { | ||
| 183 | unsigned char *oe = op + t; | ||
| 184 | NEED_OP(t); | ||
| 185 | op[0] = m_pos[0]; | ||
| 186 | op[1] = m_pos[1]; | ||
| 187 | op += 2; | ||
| 188 | m_pos += 2; | ||
| 189 | do { | ||
| 190 | *op++ = *m_pos++; | ||
| 191 | } while (op < oe); | ||
| 192 | } | ||
| 193 | match_next: | ||
| 194 | state = next; | ||
| 195 | t = next; | ||
| 196 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
| 197 | if (likely(HAVE_IP(6) && HAVE_OP(4))) { | ||
| 198 | COPY4(op, ip); | ||
| 199 | op += t; | ||
| 200 | ip += t; | ||
| 201 | } else | ||
| 202 | #endif | ||
| 203 | { | ||
| 204 | NEED_IP(t + 3); | ||
| 205 | NEED_OP(t); | ||
| 206 | while (t > 0) { | ||
| 207 | *op++ = *ip++; | ||
| 208 | t--; | ||
| 209 | } | ||
| 210 | } | ||
| 211 | } | ||
| 212 | |||
| 213 | eof_found: | ||
| 214 | *out_len = op - out; | ||
| 215 | return (t != 3 ? LZO_E_ERROR : | ||
| 216 | ip == ip_end ? LZO_E_OK : | ||
| 217 | ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); | ||
| 218 | |||
| 219 | input_overrun: | ||
| 220 | *out_len = op - out; | ||
| 221 | return LZO_E_INPUT_OVERRUN; | ||
| 222 | |||
| 223 | output_overrun: | ||
| 224 | *out_len = op - out; | ||
| 225 | return LZO_E_OUTPUT_OVERRUN; | ||
| 226 | |||
| 227 | lookbehind_overrun: | ||
| 228 | *out_len = op - out; | ||
| 229 | return LZO_E_LOOKBEHIND_OVERRUN; | ||
| 230 | } | ||
| 231 | #ifndef STATIC | ||
| 232 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); | ||
| 233 | |||
| 234 | MODULE_LICENSE("GPL"); | ||
| 235 | MODULE_DESCRIPTION("LZO1X Decompressor"); | ||
| 236 | |||
| 237 | #endif | ||
diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h index b6d482c492ef..6710b83ce72e 100644 --- a/lib/lzo/lzodefs.h +++ b/lib/lzo/lzodefs.h | |||
| @@ -1,19 +1,37 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * lzodefs.h -- architecture, OS and compiler specific defines | 2 | * lzodefs.h -- architecture, OS and compiler specific defines |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> | 4 | * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> |
| 5 | * | 5 | * |
| 6 | * The full LZO package can be found at: | 6 | * The full LZO package can be found at: |
| 7 | * http://www.oberhumer.com/opensource/lzo/ | 7 | * http://www.oberhumer.com/opensource/lzo/ |
| 8 | * | 8 | * |
| 9 | * Changed for kernel use by: | 9 | * Changed for Linux kernel use by: |
| 10 | * Nitin Gupta <nitingupta910@gmail.com> | 10 | * Nitin Gupta <nitingupta910@gmail.com> |
| 11 | * Richard Purdie <rpurdie@openedhand.com> | 11 | * Richard Purdie <rpurdie@openedhand.com> |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #define LZO_VERSION 0x2020 | 14 | |
| 15 | #define LZO_VERSION_STRING "2.02" | 15 | #define COPY4(dst, src) \ |
| 16 | #define LZO_VERSION_DATE "Oct 17 2005" | 16 | put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) |
| 17 | #if defined(__x86_64__) | ||
| 18 | #define COPY8(dst, src) \ | ||
| 19 | put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) | ||
| 20 | #else | ||
| 21 | #define COPY8(dst, src) \ | ||
| 22 | COPY4(dst, src); COPY4((dst) + 4, (src) + 4) | ||
| 23 | #endif | ||
| 24 | |||
| 25 | #if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) | ||
| 26 | #error "conflicting endian definitions" | ||
| 27 | #elif defined(__x86_64__) | ||
| 28 | #define LZO_USE_CTZ64 1 | ||
| 29 | #define LZO_USE_CTZ32 1 | ||
| 30 | #elif defined(__i386__) || defined(__powerpc__) | ||
| 31 | #define LZO_USE_CTZ32 1 | ||
| 32 | #elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5) | ||
| 33 | #define LZO_USE_CTZ32 1 | ||
| 34 | #endif | ||
| 17 | 35 | ||
| 18 | #define M1_MAX_OFFSET 0x0400 | 36 | #define M1_MAX_OFFSET 0x0400 |
| 19 | #define M2_MAX_OFFSET 0x0800 | 37 | #define M2_MAX_OFFSET 0x0800 |
| @@ -34,10 +52,8 @@ | |||
| 34 | #define M3_MARKER 32 | 52 | #define M3_MARKER 32 |
| 35 | #define M4_MARKER 16 | 53 | #define M4_MARKER 16 |
| 36 | 54 | ||
| 37 | #define D_BITS 14 | 55 | #define lzo_dict_t unsigned short |
| 38 | #define D_MASK ((1u << D_BITS) - 1) | 56 | #define D_BITS 13 |
| 57 | #define D_SIZE (1u << D_BITS) | ||
| 58 | #define D_MASK (D_SIZE - 1) | ||
| 39 | #define D_HIGH ((D_MASK >> 1) + 1) | 59 | #define D_HIGH ((D_MASK >> 1) + 1) |
| 40 | |||
| 41 | #define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \ | ||
| 42 | << (s1)) ^ (p)[0]) | ||
| 43 | #define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0]) | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 7874b01e816e..b83c144d731f 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, | |||
| 394 | } | 394 | } |
| 395 | EXPORT_SYMBOL(sg_alloc_table_from_pages); | 395 | EXPORT_SYMBOL(sg_alloc_table_from_pages); |
| 396 | 396 | ||
| 397 | void __sg_page_iter_start(struct sg_page_iter *piter, | ||
| 398 | struct scatterlist *sglist, unsigned int nents, | ||
| 399 | unsigned long pgoffset) | ||
| 400 | { | ||
| 401 | piter->__pg_advance = 0; | ||
| 402 | piter->__nents = nents; | ||
| 403 | |||
| 404 | piter->page = NULL; | ||
| 405 | piter->sg = sglist; | ||
| 406 | piter->sg_pgoffset = pgoffset; | ||
| 407 | } | ||
| 408 | EXPORT_SYMBOL(__sg_page_iter_start); | ||
| 409 | |||
| 410 | static int sg_page_count(struct scatterlist *sg) | ||
| 411 | { | ||
| 412 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; | ||
| 413 | } | ||
| 414 | |||
| 415 | bool __sg_page_iter_next(struct sg_page_iter *piter) | ||
| 416 | { | ||
| 417 | if (!piter->__nents || !piter->sg) | ||
| 418 | return false; | ||
| 419 | |||
| 420 | piter->sg_pgoffset += piter->__pg_advance; | ||
| 421 | piter->__pg_advance = 1; | ||
| 422 | |||
| 423 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { | ||
| 424 | piter->sg_pgoffset -= sg_page_count(piter->sg); | ||
| 425 | piter->sg = sg_next(piter->sg); | ||
| 426 | if (!--piter->__nents || !piter->sg) | ||
| 427 | return false; | ||
| 428 | } | ||
| 429 | piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset); | ||
| 430 | |||
| 431 | return true; | ||
| 432 | } | ||
| 433 | EXPORT_SYMBOL(__sg_page_iter_next); | ||
| 434 | |||
| 397 | /** | 435 | /** |
| 398 | * sg_miter_start - start mapping iteration over a sg list | 436 | * sg_miter_start - start mapping iteration over a sg list |
| 399 | * @miter: sg mapping iter to be started | 437 | * @miter: sg mapping iter to be started |
| @@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | |||
| 411 | { | 449 | { |
| 412 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | 450 | memset(miter, 0, sizeof(struct sg_mapping_iter)); |
| 413 | 451 | ||
| 414 | miter->__sg = sgl; | 452 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); |
| 415 | miter->__nents = nents; | ||
| 416 | miter->__offset = 0; | ||
| 417 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); | 453 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); |
| 418 | miter->__flags = flags; | 454 | miter->__flags = flags; |
| 419 | } | 455 | } |
| @@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start); | |||
| 438 | */ | 474 | */ |
| 439 | bool sg_miter_next(struct sg_mapping_iter *miter) | 475 | bool sg_miter_next(struct sg_mapping_iter *miter) |
| 440 | { | 476 | { |
| 441 | unsigned int off, len; | ||
| 442 | |||
| 443 | /* check for end and drop resources from the last iteration */ | ||
| 444 | if (!miter->__nents) | ||
| 445 | return false; | ||
| 446 | |||
| 447 | sg_miter_stop(miter); | 477 | sg_miter_stop(miter); |
| 448 | 478 | ||
| 449 | /* get to the next sg if necessary. __offset is adjusted by stop */ | 479 | /* |
| 450 | while (miter->__offset == miter->__sg->length) { | 480 | * Get to the next page if necessary. |
| 451 | if (--miter->__nents) { | 481 | * __remaining, __offset is adjusted by sg_miter_stop |
| 452 | miter->__sg = sg_next(miter->__sg); | 482 | */ |
| 453 | miter->__offset = 0; | 483 | if (!miter->__remaining) { |
| 454 | } else | 484 | struct scatterlist *sg; |
| 485 | unsigned long pgoffset; | ||
| 486 | |||
| 487 | if (!__sg_page_iter_next(&miter->piter)) | ||
| 455 | return false; | 488 | return false; |
| 456 | } | ||
| 457 | 489 | ||
| 458 | /* map the next page */ | 490 | sg = miter->piter.sg; |
| 459 | off = miter->__sg->offset + miter->__offset; | 491 | pgoffset = miter->piter.sg_pgoffset; |
| 460 | len = miter->__sg->length - miter->__offset; | ||
| 461 | 492 | ||
| 462 | miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); | 493 | miter->__offset = pgoffset ? 0 : sg->offset; |
| 463 | off &= ~PAGE_MASK; | 494 | miter->__remaining = sg->offset + sg->length - |
| 464 | miter->length = min_t(unsigned int, len, PAGE_SIZE - off); | 495 | (pgoffset << PAGE_SHIFT) - miter->__offset; |
| 465 | miter->consumed = miter->length; | 496 | miter->__remaining = min_t(unsigned long, miter->__remaining, |
| 497 | PAGE_SIZE - miter->__offset); | ||
| 498 | } | ||
| 499 | miter->page = miter->piter.page; | ||
| 500 | miter->consumed = miter->length = miter->__remaining; | ||
| 466 | 501 | ||
| 467 | if (miter->__flags & SG_MITER_ATOMIC) | 502 | if (miter->__flags & SG_MITER_ATOMIC) |
| 468 | miter->addr = kmap_atomic(miter->page) + off; | 503 | miter->addr = kmap_atomic(miter->page) + miter->__offset; |
| 469 | else | 504 | else |
| 470 | miter->addr = kmap(miter->page) + off; | 505 | miter->addr = kmap(miter->page) + miter->__offset; |
| 471 | 506 | ||
| 472 | return true; | 507 | return true; |
| 473 | } | 508 | } |
| @@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) | |||
| 494 | /* drop resources from the last iteration */ | 529 | /* drop resources from the last iteration */ |
| 495 | if (miter->addr) { | 530 | if (miter->addr) { |
| 496 | miter->__offset += miter->consumed; | 531 | miter->__offset += miter->consumed; |
| 532 | miter->__remaining -= miter->consumed; | ||
| 497 | 533 | ||
| 498 | if (miter->__flags & SG_MITER_TO_SG) | 534 | if (miter->__flags & SG_MITER_TO_SG) |
| 499 | flush_kernel_dcache_page(miter->page); | 535 | flush_kernel_dcache_page(miter->page); |
