diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 21 | ||||
-rw-r--r-- | lib/devres.c | 2 | ||||
-rw-r--r-- | lib/idr.c | 446 | ||||
-rw-r--r-- | lib/kfifo.c | 607 | ||||
-rw-r--r-- | lib/lru_cache.c | 3 | ||||
-rw-r--r-- | lib/scatterlist.c | 86 |
7 files changed, 981 insertions, 186 deletions
diff --git a/lib/Makefile b/lib/Makefile index 02ed6c04cd7d..d7946ff75b2e 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o | |||
23 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 23 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
24 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 24 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
25 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ | 25 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
26 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o | 26 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o |
27 | obj-y += kstrtox.o | 27 | obj-y += kstrtox.o |
28 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 28 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
29 | 29 | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index d11808ca4bc4..37061ede8b81 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -109,11 +109,10 @@ static void fill_pool(void) | |||
109 | */ | 109 | */ |
110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | 110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
111 | { | 111 | { |
112 | struct hlist_node *node; | ||
113 | struct debug_obj *obj; | 112 | struct debug_obj *obj; |
114 | int cnt = 0; | 113 | int cnt = 0; |
115 | 114 | ||
116 | hlist_for_each_entry(obj, node, &b->list, node) { | 115 | hlist_for_each_entry(obj, &b->list, node) { |
117 | cnt++; | 116 | cnt++; |
118 | if (obj->object == addr) | 117 | if (obj->object == addr) |
119 | return obj; | 118 | return obj; |
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj) | |||
213 | static void debug_objects_oom(void) | 212 | static void debug_objects_oom(void) |
214 | { | 213 | { |
215 | struct debug_bucket *db = obj_hash; | 214 | struct debug_bucket *db = obj_hash; |
216 | struct hlist_node *node, *tmp; | 215 | struct hlist_node *tmp; |
217 | HLIST_HEAD(freelist); | 216 | HLIST_HEAD(freelist); |
218 | struct debug_obj *obj; | 217 | struct debug_obj *obj; |
219 | unsigned long flags; | 218 | unsigned long flags; |
@@ -227,7 +226,7 @@ static void debug_objects_oom(void) | |||
227 | raw_spin_unlock_irqrestore(&db->lock, flags); | 226 | raw_spin_unlock_irqrestore(&db->lock, flags); |
228 | 227 | ||
229 | /* Now free them */ | 228 | /* Now free them */ |
230 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 229 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
231 | hlist_del(&obj->node); | 230 | hlist_del(&obj->node); |
232 | free_object(obj); | 231 | free_object(obj); |
233 | } | 232 | } |
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, | |||
658 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | 657 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
659 | { | 658 | { |
660 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 659 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
661 | struct hlist_node *node, *tmp; | 660 | struct hlist_node *tmp; |
662 | HLIST_HEAD(freelist); | 661 | HLIST_HEAD(freelist); |
663 | struct debug_obj_descr *descr; | 662 | struct debug_obj_descr *descr; |
664 | enum debug_obj_state state; | 663 | enum debug_obj_state state; |
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
678 | repeat: | 677 | repeat: |
679 | cnt = 0; | 678 | cnt = 0; |
680 | raw_spin_lock_irqsave(&db->lock, flags); | 679 | raw_spin_lock_irqsave(&db->lock, flags); |
681 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 680 | hlist_for_each_entry_safe(obj, tmp, &db->list, node) { |
682 | cnt++; | 681 | cnt++; |
683 | oaddr = (unsigned long) obj->object; | 682 | oaddr = (unsigned long) obj->object; |
684 | if (oaddr < saddr || oaddr >= eaddr) | 683 | if (oaddr < saddr || oaddr >= eaddr) |
@@ -702,7 +701,7 @@ repeat: | |||
702 | raw_spin_unlock_irqrestore(&db->lock, flags); | 701 | raw_spin_unlock_irqrestore(&db->lock, flags); |
703 | 702 | ||
704 | /* Now free them */ | 703 | /* Now free them */ |
705 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 704 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
706 | hlist_del(&obj->node); | 705 | hlist_del(&obj->node); |
707 | free_object(obj); | 706 | free_object(obj); |
708 | } | 707 | } |
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void) | |||
1013 | static int __init debug_objects_replace_static_objects(void) | 1012 | static int __init debug_objects_replace_static_objects(void) |
1014 | { | 1013 | { |
1015 | struct debug_bucket *db = obj_hash; | 1014 | struct debug_bucket *db = obj_hash; |
1016 | struct hlist_node *node, *tmp; | 1015 | struct hlist_node *tmp; |
1017 | struct debug_obj *obj, *new; | 1016 | struct debug_obj *obj, *new; |
1018 | HLIST_HEAD(objects); | 1017 | HLIST_HEAD(objects); |
1019 | int i, cnt = 0; | 1018 | int i, cnt = 0; |
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1033 | local_irq_disable(); | 1032 | local_irq_disable(); |
1034 | 1033 | ||
1035 | /* Remove the statically allocated objects from the pool */ | 1034 | /* Remove the statically allocated objects from the pool */ |
1036 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) | 1035 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
1037 | hlist_del(&obj->node); | 1036 | hlist_del(&obj->node); |
1038 | /* Move the allocated objects to the pool */ | 1037 | /* Move the allocated objects to the pool */ |
1039 | hlist_move_list(&objects, &obj_pool); | 1038 | hlist_move_list(&objects, &obj_pool); |
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1042 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 1041 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
1043 | hlist_move_list(&db->list, &objects); | 1042 | hlist_move_list(&db->list, &objects); |
1044 | 1043 | ||
1045 | hlist_for_each_entry(obj, node, &objects, node) { | 1044 | hlist_for_each_entry(obj, &objects, node) { |
1046 | new = hlist_entry(obj_pool.first, typeof(*obj), node); | 1045 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1047 | hlist_del(&new->node); | 1046 | hlist_del(&new->node); |
1048 | /* copy object data */ | 1047 | /* copy object data */ |
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void) | |||
1057 | obj_pool_used); | 1056 | obj_pool_used); |
1058 | return 0; | 1057 | return 0; |
1059 | free: | 1058 | free: |
1060 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { | 1059 | hlist_for_each_entry_safe(obj, tmp, &objects, node) { |
1061 | hlist_del(&obj->node); | 1060 | hlist_del(&obj->node); |
1062 | kmem_cache_free(obj_cache, obj); | 1061 | kmem_cache_free(obj_cache, obj); |
1063 | } | 1062 | } |
diff --git a/lib/devres.c b/lib/devres.c index 88ad75952a76..823533138fa0 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -227,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
227 | devm_ioport_map_match, (void *)addr)); | 227 | devm_ioport_map_match, (void *)addr)); |
228 | } | 228 | } |
229 | EXPORT_SYMBOL(devm_ioport_unmap); | 229 | EXPORT_SYMBOL(devm_ioport_unmap); |
230 | #endif /* CONFIG_HAS_IOPORT */ | ||
230 | 231 | ||
231 | #ifdef CONFIG_PCI | 232 | #ifdef CONFIG_PCI |
232 | /* | 233 | /* |
@@ -432,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) | |||
432 | } | 433 | } |
433 | EXPORT_SYMBOL(pcim_iounmap_regions); | 434 | EXPORT_SYMBOL(pcim_iounmap_regions); |
434 | #endif /* CONFIG_PCI */ | 435 | #endif /* CONFIG_PCI */ |
435 | #endif /* CONFIG_HAS_IOPORT */ | ||
@@ -35,10 +35,41 @@ | |||
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/percpu.h> | ||
39 | #include <linux/hardirq.h> | ||
40 | |||
41 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) | ||
42 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) | ||
43 | |||
44 | /* Leave the possibility of an incomplete final layer */ | ||
45 | #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) | ||
46 | |||
47 | /* Number of id_layer structs to leave in free list */ | ||
48 | #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) | ||
38 | 49 | ||
39 | static struct kmem_cache *idr_layer_cache; | 50 | static struct kmem_cache *idr_layer_cache; |
51 | static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); | ||
52 | static DEFINE_PER_CPU(int, idr_preload_cnt); | ||
40 | static DEFINE_SPINLOCK(simple_ida_lock); | 53 | static DEFINE_SPINLOCK(simple_ida_lock); |
41 | 54 | ||
55 | /* the maximum ID which can be allocated given idr->layers */ | ||
56 | static int idr_max(int layers) | ||
57 | { | ||
58 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); | ||
59 | |||
60 | return (1 << bits) - 1; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is | ||
65 | * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and | ||
66 | * so on. | ||
67 | */ | ||
68 | static int idr_layer_prefix_mask(int layer) | ||
69 | { | ||
70 | return ~idr_max(layer + 1); | ||
71 | } | ||
72 | |||
42 | static struct idr_layer *get_from_free_list(struct idr *idp) | 73 | static struct idr_layer *get_from_free_list(struct idr *idp) |
43 | { | 74 | { |
44 | struct idr_layer *p; | 75 | struct idr_layer *p; |
@@ -54,6 +85,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp) | |||
54 | return(p); | 85 | return(p); |
55 | } | 86 | } |
56 | 87 | ||
88 | /** | ||
89 | * idr_layer_alloc - allocate a new idr_layer | ||
90 | * @gfp_mask: allocation mask | ||
91 | * @layer_idr: optional idr to allocate from | ||
92 | * | ||
93 | * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch | ||
94 | * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch | ||
95 | * an idr_layer from @idr->id_free. | ||
96 | * | ||
97 | * @layer_idr is to maintain backward compatibility with the old alloc | ||
98 | * interface - idr_pre_get() and idr_get_new*() - and will be removed | ||
99 | * together with per-pool preload buffer. | ||
100 | */ | ||
101 | static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | ||
102 | { | ||
103 | struct idr_layer *new; | ||
104 | |||
105 | /* this is the old path, bypass to get_from_free_list() */ | ||
106 | if (layer_idr) | ||
107 | return get_from_free_list(layer_idr); | ||
108 | |||
109 | /* try to allocate directly from kmem_cache */ | ||
110 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
111 | if (new) | ||
112 | return new; | ||
113 | |||
114 | /* | ||
115 | * Try to fetch one from the per-cpu preload buffer if in process | ||
116 | * context. See idr_preload() for details. | ||
117 | */ | ||
118 | if (in_interrupt()) | ||
119 | return NULL; | ||
120 | |||
121 | preempt_disable(); | ||
122 | new = __this_cpu_read(idr_preload_head); | ||
123 | if (new) { | ||
124 | __this_cpu_write(idr_preload_head, new->ary[0]); | ||
125 | __this_cpu_dec(idr_preload_cnt); | ||
126 | new->ary[0] = NULL; | ||
127 | } | ||
128 | preempt_enable(); | ||
129 | return new; | ||
130 | } | ||
131 | |||
57 | static void idr_layer_rcu_free(struct rcu_head *head) | 132 | static void idr_layer_rcu_free(struct rcu_head *head) |
58 | { | 133 | { |
59 | struct idr_layer *layer; | 134 | struct idr_layer *layer; |
@@ -62,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head) | |||
62 | kmem_cache_free(idr_layer_cache, layer); | 137 | kmem_cache_free(idr_layer_cache, layer); |
63 | } | 138 | } |
64 | 139 | ||
65 | static inline void free_layer(struct idr_layer *p) | 140 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
66 | { | 141 | { |
142 | if (idr->hint && idr->hint == p) | ||
143 | RCU_INIT_POINTER(idr->hint, NULL); | ||
67 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | 144 | call_rcu(&p->rcu_head, idr_layer_rcu_free); |
68 | } | 145 | } |
69 | 146 | ||
@@ -92,18 +169,18 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
92 | struct idr_layer *p = pa[0]; | 169 | struct idr_layer *p = pa[0]; |
93 | int l = 0; | 170 | int l = 0; |
94 | 171 | ||
95 | __set_bit(id & IDR_MASK, &p->bitmap); | 172 | __set_bit(id & IDR_MASK, p->bitmap); |
96 | /* | 173 | /* |
97 | * If this layer is full mark the bit in the layer above to | 174 | * If this layer is full mark the bit in the layer above to |
98 | * show that this part of the radix tree is full. This may | 175 | * show that this part of the radix tree is full. This may |
99 | * complete the layer above and require walking up the radix | 176 | * complete the layer above and require walking up the radix |
100 | * tree. | 177 | * tree. |
101 | */ | 178 | */ |
102 | while (p->bitmap == IDR_FULL) { | 179 | while (bitmap_full(p->bitmap, IDR_SIZE)) { |
103 | if (!(p = pa[++l])) | 180 | if (!(p = pa[++l])) |
104 | break; | 181 | break; |
105 | id = id >> IDR_BITS; | 182 | id = id >> IDR_BITS; |
106 | __set_bit((id & IDR_MASK), &p->bitmap); | 183 | __set_bit((id & IDR_MASK), p->bitmap); |
107 | } | 184 | } |
108 | } | 185 | } |
109 | 186 | ||
@@ -133,12 +210,29 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
133 | } | 210 | } |
134 | EXPORT_SYMBOL(idr_pre_get); | 211 | EXPORT_SYMBOL(idr_pre_get); |
135 | 212 | ||
136 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | 213 | /** |
214 | * sub_alloc - try to allocate an id without growing the tree depth | ||
215 | * @idp: idr handle | ||
216 | * @starting_id: id to start search at | ||
217 | * @id: pointer to the allocated handle | ||
218 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer | ||
219 | * @gfp_mask: allocation mask for idr_layer_alloc() | ||
220 | * @layer_idr: optional idr passed to idr_layer_alloc() | ||
221 | * | ||
222 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without | ||
223 | * growing its depth. Returns | ||
224 | * | ||
225 | * the allocated id >= 0 if successful, | ||
226 | * -EAGAIN if the tree needs to grow for allocation to succeed, | ||
227 | * -ENOSPC if the id space is exhausted, | ||
228 | * -ENOMEM if more idr_layers need to be allocated. | ||
229 | */ | ||
230 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, | ||
231 | gfp_t gfp_mask, struct idr *layer_idr) | ||
137 | { | 232 | { |
138 | int n, m, sh; | 233 | int n, m, sh; |
139 | struct idr_layer *p, *new; | 234 | struct idr_layer *p, *new; |
140 | int l, id, oid; | 235 | int l, id, oid; |
141 | unsigned long bm; | ||
142 | 236 | ||
143 | id = *starting_id; | 237 | id = *starting_id; |
144 | restart: | 238 | restart: |
@@ -150,8 +244,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
150 | * We run around this while until we reach the leaf node... | 244 | * We run around this while until we reach the leaf node... |
151 | */ | 245 | */ |
152 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | 246 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
153 | bm = ~p->bitmap; | 247 | m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
154 | m = find_next_bit(&bm, IDR_SIZE, n); | ||
155 | if (m == IDR_SIZE) { | 248 | if (m == IDR_SIZE) { |
156 | /* no space available go back to previous layer. */ | 249 | /* no space available go back to previous layer. */ |
157 | l++; | 250 | l++; |
@@ -161,7 +254,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
161 | /* if already at the top layer, we need to grow */ | 254 | /* if already at the top layer, we need to grow */ |
162 | if (id >= 1 << (idp->layers * IDR_BITS)) { | 255 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
163 | *starting_id = id; | 256 | *starting_id = id; |
164 | return IDR_NEED_TO_GROW; | 257 | return -EAGAIN; |
165 | } | 258 | } |
166 | p = pa[l]; | 259 | p = pa[l]; |
167 | BUG_ON(!p); | 260 | BUG_ON(!p); |
@@ -180,17 +273,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
180 | id = ((id >> sh) ^ n ^ m) << sh; | 273 | id = ((id >> sh) ^ n ^ m) << sh; |
181 | } | 274 | } |
182 | if ((id >= MAX_IDR_BIT) || (id < 0)) | 275 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
183 | return IDR_NOMORE_SPACE; | 276 | return -ENOSPC; |
184 | if (l == 0) | 277 | if (l == 0) |
185 | break; | 278 | break; |
186 | /* | 279 | /* |
187 | * Create the layer below if it is missing. | 280 | * Create the layer below if it is missing. |
188 | */ | 281 | */ |
189 | if (!p->ary[m]) { | 282 | if (!p->ary[m]) { |
190 | new = get_from_free_list(idp); | 283 | new = idr_layer_alloc(gfp_mask, layer_idr); |
191 | if (!new) | 284 | if (!new) |
192 | return -1; | 285 | return -ENOMEM; |
193 | new->layer = l-1; | 286 | new->layer = l-1; |
287 | new->prefix = id & idr_layer_prefix_mask(new->layer); | ||
194 | rcu_assign_pointer(p->ary[m], new); | 288 | rcu_assign_pointer(p->ary[m], new); |
195 | p->count++; | 289 | p->count++; |
196 | } | 290 | } |
@@ -203,7 +297,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
203 | } | 297 | } |
204 | 298 | ||
205 | static int idr_get_empty_slot(struct idr *idp, int starting_id, | 299 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
206 | struct idr_layer **pa) | 300 | struct idr_layer **pa, gfp_t gfp_mask, |
301 | struct idr *layer_idr) | ||
207 | { | 302 | { |
208 | struct idr_layer *p, *new; | 303 | struct idr_layer *p, *new; |
209 | int layers, v, id; | 304 | int layers, v, id; |
@@ -214,8 +309,8 @@ build_up: | |||
214 | p = idp->top; | 309 | p = idp->top; |
215 | layers = idp->layers; | 310 | layers = idp->layers; |
216 | if (unlikely(!p)) { | 311 | if (unlikely(!p)) { |
217 | if (!(p = get_from_free_list(idp))) | 312 | if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
218 | return -1; | 313 | return -ENOMEM; |
219 | p->layer = 0; | 314 | p->layer = 0; |
220 | layers = 1; | 315 | layers = 1; |
221 | } | 316 | } |
@@ -223,7 +318,7 @@ build_up: | |||
223 | * Add a new layer to the top of the tree if the requested | 318 | * Add a new layer to the top of the tree if the requested |
224 | * id is larger than the currently allocated space. | 319 | * id is larger than the currently allocated space. |
225 | */ | 320 | */ |
226 | while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 321 | while (id > idr_max(layers)) { |
227 | layers++; | 322 | layers++; |
228 | if (!p->count) { | 323 | if (!p->count) { |
229 | /* special case: if the tree is currently empty, | 324 | /* special case: if the tree is currently empty, |
@@ -231,9 +326,10 @@ build_up: | |||
231 | * upwards. | 326 | * upwards. |
232 | */ | 327 | */ |
233 | p->layer++; | 328 | p->layer++; |
329 | WARN_ON_ONCE(p->prefix); | ||
234 | continue; | 330 | continue; |
235 | } | 331 | } |
236 | if (!(new = get_from_free_list(idp))) { | 332 | if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
237 | /* | 333 | /* |
238 | * The allocation failed. If we built part of | 334 | * The allocation failed. If we built part of |
239 | * the structure tear it down. | 335 | * the structure tear it down. |
@@ -242,45 +338,42 @@ build_up: | |||
242 | for (new = p; p && p != idp->top; new = p) { | 338 | for (new = p; p && p != idp->top; new = p) { |
243 | p = p->ary[0]; | 339 | p = p->ary[0]; |
244 | new->ary[0] = NULL; | 340 | new->ary[0] = NULL; |
245 | new->bitmap = new->count = 0; | 341 | new->count = 0; |
342 | bitmap_clear(new->bitmap, 0, IDR_SIZE); | ||
246 | __move_to_free_list(idp, new); | 343 | __move_to_free_list(idp, new); |
247 | } | 344 | } |
248 | spin_unlock_irqrestore(&idp->lock, flags); | 345 | spin_unlock_irqrestore(&idp->lock, flags); |
249 | return -1; | 346 | return -ENOMEM; |
250 | } | 347 | } |
251 | new->ary[0] = p; | 348 | new->ary[0] = p; |
252 | new->count = 1; | 349 | new->count = 1; |
253 | new->layer = layers-1; | 350 | new->layer = layers-1; |
254 | if (p->bitmap == IDR_FULL) | 351 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
255 | __set_bit(0, &new->bitmap); | 352 | if (bitmap_full(p->bitmap, IDR_SIZE)) |
353 | __set_bit(0, new->bitmap); | ||
256 | p = new; | 354 | p = new; |
257 | } | 355 | } |
258 | rcu_assign_pointer(idp->top, p); | 356 | rcu_assign_pointer(idp->top, p); |
259 | idp->layers = layers; | 357 | idp->layers = layers; |
260 | v = sub_alloc(idp, &id, pa); | 358 | v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
261 | if (v == IDR_NEED_TO_GROW) | 359 | if (v == -EAGAIN) |
262 | goto build_up; | 360 | goto build_up; |
263 | return(v); | 361 | return(v); |
264 | } | 362 | } |
265 | 363 | ||
266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 364 | /* |
365 | * @id and @pa are from a successful allocation from idr_get_empty_slot(). | ||
366 | * Install the user pointer @ptr and mark the slot full. | ||
367 | */ | ||
368 | static void idr_fill_slot(struct idr *idr, void *ptr, int id, | ||
369 | struct idr_layer **pa) | ||
267 | { | 370 | { |
268 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 371 | /* update hint used for lookup, cleared from free_layer() */ |
269 | int id; | 372 | rcu_assign_pointer(idr->hint, pa[0]); |
270 | |||
271 | id = idr_get_empty_slot(idp, starting_id, pa); | ||
272 | if (id >= 0) { | ||
273 | /* | ||
274 | * Successfully found an empty slot. Install the user | ||
275 | * pointer and mark the slot full. | ||
276 | */ | ||
277 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], | ||
278 | (struct idr_layer *)ptr); | ||
279 | pa[0]->count++; | ||
280 | idr_mark_full(pa, id); | ||
281 | } | ||
282 | 373 | ||
283 | return id; | 374 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
375 | pa[0]->count++; | ||
376 | idr_mark_full(pa, id); | ||
284 | } | 377 | } |
285 | 378 | ||
286 | /** | 379 | /** |
@@ -303,49 +396,124 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
303 | */ | 396 | */ |
304 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 397 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
305 | { | 398 | { |
399 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | ||
306 | int rv; | 400 | int rv; |
307 | 401 | ||
308 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 402 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
309 | /* | ||
310 | * This is a cheap hack until the IDR code can be fixed to | ||
311 | * return proper error values. | ||
312 | */ | ||
313 | if (rv < 0) | 403 | if (rv < 0) |
314 | return _idr_rc_to_errno(rv); | 404 | return rv == -ENOMEM ? -EAGAIN : rv; |
405 | |||
406 | idr_fill_slot(idp, ptr, rv, pa); | ||
315 | *id = rv; | 407 | *id = rv; |
316 | return 0; | 408 | return 0; |
317 | } | 409 | } |
318 | EXPORT_SYMBOL(idr_get_new_above); | 410 | EXPORT_SYMBOL(idr_get_new_above); |
319 | 411 | ||
320 | /** | 412 | /** |
321 | * idr_get_new - allocate new idr entry | 413 | * idr_preload - preload for idr_alloc() |
322 | * @idp: idr handle | 414 | * @gfp_mask: allocation mask to use for preloading |
323 | * @ptr: pointer you want associated with the id | ||
324 | * @id: pointer to the allocated handle | ||
325 | * | 415 | * |
326 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | 416 | * Preload per-cpu layer buffer for idr_alloc(). Can only be used from |
327 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill | 417 | * process context and each idr_preload() invocation should be matched with |
328 | * IDR's preallocation and then retry the idr_get_new_above() call. | 418 | * idr_preload_end(). Note that preemption is disabled while preloaded. |
329 | * | 419 | * |
330 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | 420 | * The first idr_alloc() in the preloaded section can be treated as if it |
421 | * were invoked with @gfp_mask used for preloading. This allows using more | ||
422 | * permissive allocation masks for idrs protected by spinlocks. | ||
423 | * | ||
424 | * For example, if idr_alloc() below fails, the failure can be treated as | ||
425 | * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. | ||
426 | * | ||
427 | * idr_preload(GFP_KERNEL); | ||
428 | * spin_lock(lock); | ||
429 | * | ||
430 | * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); | ||
331 | * | 431 | * |
332 | * @id returns a value in the range %0 ... %0x7fffffff | 432 | * spin_unlock(lock); |
433 | * idr_preload_end(); | ||
434 | * if (id < 0) | ||
435 | * error; | ||
333 | */ | 436 | */ |
334 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 437 | void idr_preload(gfp_t gfp_mask) |
335 | { | 438 | { |
336 | int rv; | 439 | /* |
440 | * Consuming preload buffer from non-process context breaks preload | ||
441 | * allocation guarantee. Disallow usage from those contexts. | ||
442 | */ | ||
443 | WARN_ON_ONCE(in_interrupt()); | ||
444 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
445 | |||
446 | preempt_disable(); | ||
337 | 447 | ||
338 | rv = idr_get_new_above_int(idp, ptr, 0); | ||
339 | /* | 448 | /* |
340 | * This is a cheap hack until the IDR code can be fixed to | 449 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and |
341 | * return proper error values. | 450 | * return value from idr_alloc() needs to be checked for failure |
451 | * anyway. Silently give up if allocation fails. The caller can | ||
452 | * treat failures from idr_alloc() as if idr_alloc() were called | ||
453 | * with @gfp_mask which should be enough. | ||
342 | */ | 454 | */ |
343 | if (rv < 0) | 455 | while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { |
344 | return _idr_rc_to_errno(rv); | 456 | struct idr_layer *new; |
345 | *id = rv; | 457 | |
346 | return 0; | 458 | preempt_enable(); |
459 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
460 | preempt_disable(); | ||
461 | if (!new) | ||
462 | break; | ||
463 | |||
464 | /* link the new one to per-cpu preload list */ | ||
465 | new->ary[0] = __this_cpu_read(idr_preload_head); | ||
466 | __this_cpu_write(idr_preload_head, new); | ||
467 | __this_cpu_inc(idr_preload_cnt); | ||
468 | } | ||
347 | } | 469 | } |
348 | EXPORT_SYMBOL(idr_get_new); | 470 | EXPORT_SYMBOL(idr_preload); |
471 | |||
472 | /** | ||
473 | * idr_alloc - allocate new idr entry | ||
474 | * @idr: the (initialized) idr | ||
475 | * @ptr: pointer to be associated with the new id | ||
476 | * @start: the minimum id (inclusive) | ||
477 | * @end: the maximum id (exclusive, <= 0 for max) | ||
478 | * @gfp_mask: memory allocation flags | ||
479 | * | ||
480 | * Allocate an id in [start, end) and associate it with @ptr. If no ID is | ||
481 | * available in the specified range, returns -ENOSPC. On memory allocation | ||
482 | * failure, returns -ENOMEM. | ||
483 | * | ||
484 | * Note that @end is treated as max when <= 0. This is to always allow | ||
485 | * using @start + N as @end as long as N is inside integer range. | ||
486 | * | ||
487 | * The user is responsible for exclusively synchronizing all operations | ||
488 | * which may modify @idr. However, read-only accesses such as idr_find() | ||
489 | * or iteration can be performed under RCU read lock provided the user | ||
490 | * destroys @ptr in RCU-safe way after removal from idr. | ||
491 | */ | ||
492 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) | ||
493 | { | ||
494 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ | ||
495 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | ||
496 | int id; | ||
497 | |||
498 | might_sleep_if(gfp_mask & __GFP_WAIT); | ||
499 | |||
500 | /* sanity checks */ | ||
501 | if (WARN_ON_ONCE(start < 0)) | ||
502 | return -EINVAL; | ||
503 | if (unlikely(max < start)) | ||
504 | return -ENOSPC; | ||
505 | |||
506 | /* allocate id */ | ||
507 | id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); | ||
508 | if (unlikely(id < 0)) | ||
509 | return id; | ||
510 | if (unlikely(id > max)) | ||
511 | return -ENOSPC; | ||
512 | |||
513 | idr_fill_slot(idr, ptr, id, pa); | ||
514 | return id; | ||
515 | } | ||
516 | EXPORT_SYMBOL_GPL(idr_alloc); | ||
349 | 517 | ||
350 | static void idr_remove_warning(int id) | 518 | static void idr_remove_warning(int id) |
351 | { | 519 | { |
@@ -357,7 +525,7 @@ static void idr_remove_warning(int id) | |||
357 | static void sub_remove(struct idr *idp, int shift, int id) | 525 | static void sub_remove(struct idr *idp, int shift, int id) |
358 | { | 526 | { |
359 | struct idr_layer *p = idp->top; | 527 | struct idr_layer *p = idp->top; |
360 | struct idr_layer **pa[MAX_IDR_LEVEL]; | 528 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
361 | struct idr_layer ***paa = &pa[0]; | 529 | struct idr_layer ***paa = &pa[0]; |
362 | struct idr_layer *to_free; | 530 | struct idr_layer *to_free; |
363 | int n; | 531 | int n; |
@@ -367,26 +535,26 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
367 | 535 | ||
368 | while ((shift > 0) && p) { | 536 | while ((shift > 0) && p) { |
369 | n = (id >> shift) & IDR_MASK; | 537 | n = (id >> shift) & IDR_MASK; |
370 | __clear_bit(n, &p->bitmap); | 538 | __clear_bit(n, p->bitmap); |
371 | *++paa = &p->ary[n]; | 539 | *++paa = &p->ary[n]; |
372 | p = p->ary[n]; | 540 | p = p->ary[n]; |
373 | shift -= IDR_BITS; | 541 | shift -= IDR_BITS; |
374 | } | 542 | } |
375 | n = id & IDR_MASK; | 543 | n = id & IDR_MASK; |
376 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 544 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
377 | __clear_bit(n, &p->bitmap); | 545 | __clear_bit(n, p->bitmap); |
378 | rcu_assign_pointer(p->ary[n], NULL); | 546 | rcu_assign_pointer(p->ary[n], NULL); |
379 | to_free = NULL; | 547 | to_free = NULL; |
380 | while(*paa && ! --((**paa)->count)){ | 548 | while(*paa && ! --((**paa)->count)){ |
381 | if (to_free) | 549 | if (to_free) |
382 | free_layer(to_free); | 550 | free_layer(idp, to_free); |
383 | to_free = **paa; | 551 | to_free = **paa; |
384 | **paa-- = NULL; | 552 | **paa-- = NULL; |
385 | } | 553 | } |
386 | if (!*paa) | 554 | if (!*paa) |
387 | idp->layers = 0; | 555 | idp->layers = 0; |
388 | if (to_free) | 556 | if (to_free) |
389 | free_layer(to_free); | 557 | free_layer(idp, to_free); |
390 | } else | 558 | } else |
391 | idr_remove_warning(id); | 559 | idr_remove_warning(id); |
392 | } | 560 | } |
@@ -401,8 +569,9 @@ void idr_remove(struct idr *idp, int id) | |||
401 | struct idr_layer *p; | 569 | struct idr_layer *p; |
402 | struct idr_layer *to_free; | 570 | struct idr_layer *to_free; |
403 | 571 | ||
404 | /* Mask off upper bits we don't use for the search. */ | 572 | /* see comment in idr_find_slowpath() */ |
405 | id &= MAX_IDR_MASK; | 573 | if (WARN_ON_ONCE(id < 0)) |
574 | return; | ||
406 | 575 | ||
407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 576 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 577 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
@@ -417,8 +586,9 @@ void idr_remove(struct idr *idp, int id) | |||
417 | p = idp->top->ary[0]; | 586 | p = idp->top->ary[0]; |
418 | rcu_assign_pointer(idp->top, p); | 587 | rcu_assign_pointer(idp->top, p); |
419 | --idp->layers; | 588 | --idp->layers; |
420 | to_free->bitmap = to_free->count = 0; | 589 | to_free->count = 0; |
421 | free_layer(to_free); | 590 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
591 | free_layer(idp, to_free); | ||
422 | } | 592 | } |
423 | while (idp->id_free_cnt >= MAX_IDR_FREE) { | 593 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
424 | p = get_from_free_list(idp); | 594 | p = get_from_free_list(idp); |
@@ -433,34 +603,21 @@ void idr_remove(struct idr *idp, int id) | |||
433 | } | 603 | } |
434 | EXPORT_SYMBOL(idr_remove); | 604 | EXPORT_SYMBOL(idr_remove); |
435 | 605 | ||
436 | /** | 606 | void __idr_remove_all(struct idr *idp) |
437 | * idr_remove_all - remove all ids from the given idr tree | ||
438 | * @idp: idr handle | ||
439 | * | ||
440 | * idr_destroy() only frees up unused, cached idp_layers, but this | ||
441 | * function will remove all id mappings and leave all idp_layers | ||
442 | * unused. | ||
443 | * | ||
444 | * A typical clean-up sequence for objects stored in an idr tree will | ||
445 | * use idr_for_each() to free all objects, if necessay, then | ||
446 | * idr_remove_all() to remove all ids, and idr_destroy() to free | ||
447 | * up the cached idr_layers. | ||
448 | */ | ||
449 | void idr_remove_all(struct idr *idp) | ||
450 | { | 607 | { |
451 | int n, id, max; | 608 | int n, id, max; |
452 | int bt_mask; | 609 | int bt_mask; |
453 | struct idr_layer *p; | 610 | struct idr_layer *p; |
454 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 611 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
455 | struct idr_layer **paa = &pa[0]; | 612 | struct idr_layer **paa = &pa[0]; |
456 | 613 | ||
457 | n = idp->layers * IDR_BITS; | 614 | n = idp->layers * IDR_BITS; |
458 | p = idp->top; | 615 | p = idp->top; |
459 | rcu_assign_pointer(idp->top, NULL); | 616 | rcu_assign_pointer(idp->top, NULL); |
460 | max = 1 << n; | 617 | max = idr_max(idp->layers); |
461 | 618 | ||
462 | id = 0; | 619 | id = 0; |
463 | while (id < max) { | 620 | while (id >= 0 && id <= max) { |
464 | while (n > IDR_BITS && p) { | 621 | while (n > IDR_BITS && p) { |
465 | n -= IDR_BITS; | 622 | n -= IDR_BITS; |
466 | *paa++ = p; | 623 | *paa++ = p; |
@@ -472,21 +629,32 @@ void idr_remove_all(struct idr *idp) | |||
472 | /* Get the highest bit that the above add changed from 0->1. */ | 629 | /* Get the highest bit that the above add changed from 0->1. */ |
473 | while (n < fls(id ^ bt_mask)) { | 630 | while (n < fls(id ^ bt_mask)) { |
474 | if (p) | 631 | if (p) |
475 | free_layer(p); | 632 | free_layer(idp, p); |
476 | n += IDR_BITS; | 633 | n += IDR_BITS; |
477 | p = *--paa; | 634 | p = *--paa; |
478 | } | 635 | } |
479 | } | 636 | } |
480 | idp->layers = 0; | 637 | idp->layers = 0; |
481 | } | 638 | } |
482 | EXPORT_SYMBOL(idr_remove_all); | 639 | EXPORT_SYMBOL(__idr_remove_all); |
483 | 640 | ||
484 | /** | 641 | /** |
485 | * idr_destroy - release all cached layers within an idr tree | 642 | * idr_destroy - release all cached layers within an idr tree |
486 | * @idp: idr handle | 643 | * @idp: idr handle |
644 | * | ||
645 | * Free all id mappings and all idp_layers. After this function, @idp is | ||
646 | * completely unused and can be freed / recycled. The caller is | ||
647 | * responsible for ensuring that no one else accesses @idp during or after | ||
648 | * idr_destroy(). | ||
649 | * | ||
650 | * A typical clean-up sequence for objects stored in an idr tree will use | ||
651 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to | ||
652 | * free up the id mappings and cached idr_layers. | ||
487 | */ | 653 | */ |
488 | void idr_destroy(struct idr *idp) | 654 | void idr_destroy(struct idr *idp) |
489 | { | 655 | { |
656 | __idr_remove_all(idp); | ||
657 | |||
490 | while (idp->id_free_cnt) { | 658 | while (idp->id_free_cnt) { |
491 | struct idr_layer *p = get_from_free_list(idp); | 659 | struct idr_layer *p = get_from_free_list(idp); |
492 | kmem_cache_free(idr_layer_cache, p); | 660 | kmem_cache_free(idr_layer_cache, p); |
@@ -494,32 +662,28 @@ void idr_destroy(struct idr *idp) | |||
494 | } | 662 | } |
495 | EXPORT_SYMBOL(idr_destroy); | 663 | EXPORT_SYMBOL(idr_destroy); |
496 | 664 | ||
497 | /** | 665 | void *idr_find_slowpath(struct idr *idp, int id) |
498 | * idr_find - return pointer for given id | ||
499 | * @idp: idr handle | ||
500 | * @id: lookup key | ||
501 | * | ||
502 | * Return the pointer given the id it has been registered with. A %NULL | ||
503 | * return indicates that @id is not valid or you passed %NULL in | ||
504 | * idr_get_new(). | ||
505 | * | ||
506 | * This function can be called under rcu_read_lock(), given that the leaf | ||
507 | * pointers lifetimes are correctly managed. | ||
508 | */ | ||
509 | void *idr_find(struct idr *idp, int id) | ||
510 | { | 666 | { |
511 | int n; | 667 | int n; |
512 | struct idr_layer *p; | 668 | struct idr_layer *p; |
513 | 669 | ||
670 | /* | ||
671 | * If @id is negative, idr_find() used to ignore the sign bit and | ||
672 | * performed lookup with the rest of bits, which is weird and can | ||
673 | * lead to very obscure bugs. We're now returning NULL for all | ||
674 | * negative IDs but just in case somebody was depending on the sign | ||
675 | * bit being ignored, let's trigger WARN_ON_ONCE() so that they can | ||
676 | * be detected and fixed. WARN_ON_ONCE() can later be removed. | ||
677 | */ | ||
678 | if (WARN_ON_ONCE(id < 0)) | ||
679 | return NULL; | ||
680 | |||
514 | p = rcu_dereference_raw(idp->top); | 681 | p = rcu_dereference_raw(idp->top); |
515 | if (!p) | 682 | if (!p) |
516 | return NULL; | 683 | return NULL; |
517 | n = (p->layer+1) * IDR_BITS; | 684 | n = (p->layer+1) * IDR_BITS; |
518 | 685 | ||
519 | /* Mask off upper bits we don't use for the search. */ | 686 | if (id > idr_max(p->layer + 1)) |
520 | id &= MAX_IDR_MASK; | ||
521 | |||
522 | if (id >= (1 << n)) | ||
523 | return NULL; | 687 | return NULL; |
524 | BUG_ON(n == 0); | 688 | BUG_ON(n == 0); |
525 | 689 | ||
@@ -530,7 +694,7 @@ void *idr_find(struct idr *idp, int id) | |||
530 | } | 694 | } |
531 | return((void *)p); | 695 | return((void *)p); |
532 | } | 696 | } |
533 | EXPORT_SYMBOL(idr_find); | 697 | EXPORT_SYMBOL(idr_find_slowpath); |
534 | 698 | ||
535 | /** | 699 | /** |
536 | * idr_for_each - iterate through all stored pointers | 700 | * idr_for_each - iterate through all stored pointers |
@@ -555,15 +719,15 @@ int idr_for_each(struct idr *idp, | |||
555 | { | 719 | { |
556 | int n, id, max, error = 0; | 720 | int n, id, max, error = 0; |
557 | struct idr_layer *p; | 721 | struct idr_layer *p; |
558 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 722 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
559 | struct idr_layer **paa = &pa[0]; | 723 | struct idr_layer **paa = &pa[0]; |
560 | 724 | ||
561 | n = idp->layers * IDR_BITS; | 725 | n = idp->layers * IDR_BITS; |
562 | p = rcu_dereference_raw(idp->top); | 726 | p = rcu_dereference_raw(idp->top); |
563 | max = 1 << n; | 727 | max = idr_max(idp->layers); |
564 | 728 | ||
565 | id = 0; | 729 | id = 0; |
566 | while (id < max) { | 730 | while (id >= 0 && id <= max) { |
567 | while (n > 0 && p) { | 731 | while (n > 0 && p) { |
568 | n -= IDR_BITS; | 732 | n -= IDR_BITS; |
569 | *paa++ = p; | 733 | *paa++ = p; |
@@ -601,7 +765,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
601 | */ | 765 | */ |
602 | void *idr_get_next(struct idr *idp, int *nextidp) | 766 | void *idr_get_next(struct idr *idp, int *nextidp) |
603 | { | 767 | { |
604 | struct idr_layer *p, *pa[MAX_IDR_LEVEL]; | 768 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
605 | struct idr_layer **paa = &pa[0]; | 769 | struct idr_layer **paa = &pa[0]; |
606 | int id = *nextidp; | 770 | int id = *nextidp; |
607 | int n, max; | 771 | int n, max; |
@@ -611,9 +775,9 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
611 | if (!p) | 775 | if (!p) |
612 | return NULL; | 776 | return NULL; |
613 | n = (p->layer + 1) * IDR_BITS; | 777 | n = (p->layer + 1) * IDR_BITS; |
614 | max = 1 << n; | 778 | max = idr_max(p->layer + 1); |
615 | 779 | ||
616 | while (id < max) { | 780 | while (id >= 0 && id <= max) { |
617 | while (n > 0 && p) { | 781 | while (n > 0 && p) { |
618 | n -= IDR_BITS; | 782 | n -= IDR_BITS; |
619 | *paa++ = p; | 783 | *paa++ = p; |
@@ -625,7 +789,14 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
625 | return p; | 789 | return p; |
626 | } | 790 | } |
627 | 791 | ||
628 | id += 1 << n; | 792 | /* |
793 | * Proceed to the next layer at the current level. Unlike | ||
794 | * idr_for_each(), @id isn't guaranteed to be aligned to | ||
795 | * layer boundary at this point and adding 1 << n may | ||
796 | * incorrectly skip IDs. Make sure we jump to the | ||
797 | * beginning of the next layer using round_up(). | ||
798 | */ | ||
799 | id = round_up(id + 1, 1 << n); | ||
629 | while (n < fls(id)) { | 800 | while (n < fls(id)) { |
630 | n += IDR_BITS; | 801 | n += IDR_BITS; |
631 | p = *--paa; | 802 | p = *--paa; |
@@ -653,14 +824,16 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
653 | int n; | 824 | int n; |
654 | struct idr_layer *p, *old_p; | 825 | struct idr_layer *p, *old_p; |
655 | 826 | ||
827 | /* see comment in idr_find_slowpath() */ | ||
828 | if (WARN_ON_ONCE(id < 0)) | ||
829 | return ERR_PTR(-EINVAL); | ||
830 | |||
656 | p = idp->top; | 831 | p = idp->top; |
657 | if (!p) | 832 | if (!p) |
658 | return ERR_PTR(-EINVAL); | 833 | return ERR_PTR(-EINVAL); |
659 | 834 | ||
660 | n = (p->layer+1) * IDR_BITS; | 835 | n = (p->layer+1) * IDR_BITS; |
661 | 836 | ||
662 | id &= MAX_IDR_MASK; | ||
663 | |||
664 | if (id >= (1 << n)) | 837 | if (id >= (1 << n)) |
665 | return ERR_PTR(-EINVAL); | 838 | return ERR_PTR(-EINVAL); |
666 | 839 | ||
@@ -671,7 +844,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
671 | } | 844 | } |
672 | 845 | ||
673 | n = id & IDR_MASK; | 846 | n = id & IDR_MASK; |
674 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | 847 | if (unlikely(p == NULL || !test_bit(n, p->bitmap))) |
675 | return ERR_PTR(-ENOENT); | 848 | return ERR_PTR(-ENOENT); |
676 | 849 | ||
677 | old_p = p->ary[n]; | 850 | old_p = p->ary[n]; |
@@ -780,7 +953,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
780 | */ | 953 | */ |
781 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 954 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
782 | { | 955 | { |
783 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 956 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
784 | struct ida_bitmap *bitmap; | 957 | struct ida_bitmap *bitmap; |
785 | unsigned long flags; | 958 | unsigned long flags; |
786 | int idr_id = starting_id / IDA_BITMAP_BITS; | 959 | int idr_id = starting_id / IDA_BITMAP_BITS; |
@@ -789,9 +962,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
789 | 962 | ||
790 | restart: | 963 | restart: |
791 | /* get vacant slot */ | 964 | /* get vacant slot */ |
792 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 965 | t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
793 | if (t < 0) | 966 | if (t < 0) |
794 | return _idr_rc_to_errno(t); | 967 | return t == -ENOMEM ? -EAGAIN : t; |
795 | 968 | ||
796 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) | 969 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
797 | return -ENOSPC; | 970 | return -ENOSPC; |
@@ -852,25 +1025,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
852 | EXPORT_SYMBOL(ida_get_new_above); | 1025 | EXPORT_SYMBOL(ida_get_new_above); |
853 | 1026 | ||
854 | /** | 1027 | /** |
855 | * ida_get_new - allocate new ID | ||
856 | * @ida: idr handle | ||
857 | * @p_id: pointer to the allocated handle | ||
858 | * | ||
859 | * Allocate new ID. It should be called with any required locks. | ||
860 | * | ||
861 | * If memory is required, it will return %-EAGAIN, you should unlock | ||
862 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
863 | * return %-ENOSPC. | ||
864 | * | ||
865 | * @p_id returns a value in the range %0 ... %0x7fffffff. | ||
866 | */ | ||
867 | int ida_get_new(struct ida *ida, int *p_id) | ||
868 | { | ||
869 | return ida_get_new_above(ida, 0, p_id); | ||
870 | } | ||
871 | EXPORT_SYMBOL(ida_get_new); | ||
872 | |||
873 | /** | ||
874 | * ida_remove - remove the given ID | 1028 | * ida_remove - remove the given ID |
875 | * @ida: ida handle | 1029 | * @ida: ida handle |
876 | * @id: ID to free | 1030 | * @id: ID to free |
@@ -887,7 +1041,7 @@ void ida_remove(struct ida *ida, int id) | |||
887 | /* clear full bits while looking up the leaf idr_layer */ | 1041 | /* clear full bits while looking up the leaf idr_layer */ |
888 | while ((shift > 0) && p) { | 1042 | while ((shift > 0) && p) { |
889 | n = (idr_id >> shift) & IDR_MASK; | 1043 | n = (idr_id >> shift) & IDR_MASK; |
890 | __clear_bit(n, &p->bitmap); | 1044 | __clear_bit(n, p->bitmap); |
891 | p = p->ary[n]; | 1045 | p = p->ary[n]; |
892 | shift -= IDR_BITS; | 1046 | shift -= IDR_BITS; |
893 | } | 1047 | } |
@@ -896,7 +1050,7 @@ void ida_remove(struct ida *ida, int id) | |||
896 | goto err; | 1050 | goto err; |
897 | 1051 | ||
898 | n = idr_id & IDR_MASK; | 1052 | n = idr_id & IDR_MASK; |
899 | __clear_bit(n, &p->bitmap); | 1053 | __clear_bit(n, p->bitmap); |
900 | 1054 | ||
901 | bitmap = (void *)p->ary[n]; | 1055 | bitmap = (void *)p->ary[n]; |
902 | if (!test_bit(offset, bitmap->bitmap)) | 1056 | if (!test_bit(offset, bitmap->bitmap)) |
@@ -905,7 +1059,7 @@ void ida_remove(struct ida *ida, int id) | |||
905 | /* update bitmap and remove it if empty */ | 1059 | /* update bitmap and remove it if empty */ |
906 | __clear_bit(offset, bitmap->bitmap); | 1060 | __clear_bit(offset, bitmap->bitmap); |
907 | if (--bitmap->nr_busy == 0) { | 1061 | if (--bitmap->nr_busy == 0) { |
908 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | 1062 | __set_bit(n, p->bitmap); /* to please idr_remove() */ |
909 | idr_remove(&ida->idr, idr_id); | 1063 | idr_remove(&ida->idr, idr_id); |
910 | free_bitmap(ida, bitmap); | 1064 | free_bitmap(ida, bitmap); |
911 | } | 1065 | } |
diff --git a/lib/kfifo.c b/lib/kfifo.c new file mode 100644 index 000000000000..7b7f83027b7b --- /dev/null +++ b/lib/kfifo.c | |||
@@ -0,0 +1,607 @@ | |||
1 | /* | ||
2 | * A generic kernel FIFO implementation | ||
3 | * | ||
4 | * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/export.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/log2.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/kfifo.h> | ||
29 | |||
30 | /* | ||
31 | * internal helper to calculate the unused elements in a fifo | ||
32 | */ | ||
33 | static inline unsigned int kfifo_unused(struct __kfifo *fifo) | ||
34 | { | ||
35 | return (fifo->mask + 1) - (fifo->in - fifo->out); | ||
36 | } | ||
37 | |||
38 | int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, | ||
39 | size_t esize, gfp_t gfp_mask) | ||
40 | { | ||
41 | /* | ||
42 | * round down to the next power of 2, since our 'let the indices | ||
43 | * wrap' technique works only in this case. | ||
44 | */ | ||
45 | size = roundup_pow_of_two(size); | ||
46 | |||
47 | fifo->in = 0; | ||
48 | fifo->out = 0; | ||
49 | fifo->esize = esize; | ||
50 | |||
51 | if (size < 2) { | ||
52 | fifo->data = NULL; | ||
53 | fifo->mask = 0; | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | |||
57 | fifo->data = kmalloc(size * esize, gfp_mask); | ||
58 | |||
59 | if (!fifo->data) { | ||
60 | fifo->mask = 0; | ||
61 | return -ENOMEM; | ||
62 | } | ||
63 | fifo->mask = size - 1; | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | EXPORT_SYMBOL(__kfifo_alloc); | ||
68 | |||
69 | void __kfifo_free(struct __kfifo *fifo) | ||
70 | { | ||
71 | kfree(fifo->data); | ||
72 | fifo->in = 0; | ||
73 | fifo->out = 0; | ||
74 | fifo->esize = 0; | ||
75 | fifo->data = NULL; | ||
76 | fifo->mask = 0; | ||
77 | } | ||
78 | EXPORT_SYMBOL(__kfifo_free); | ||
79 | |||
80 | int __kfifo_init(struct __kfifo *fifo, void *buffer, | ||
81 | unsigned int size, size_t esize) | ||
82 | { | ||
83 | size /= esize; | ||
84 | |||
85 | size = roundup_pow_of_two(size); | ||
86 | |||
87 | fifo->in = 0; | ||
88 | fifo->out = 0; | ||
89 | fifo->esize = esize; | ||
90 | fifo->data = buffer; | ||
91 | |||
92 | if (size < 2) { | ||
93 | fifo->mask = 0; | ||
94 | return -EINVAL; | ||
95 | } | ||
96 | fifo->mask = size - 1; | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | EXPORT_SYMBOL(__kfifo_init); | ||
101 | |||
102 | static void kfifo_copy_in(struct __kfifo *fifo, const void *src, | ||
103 | unsigned int len, unsigned int off) | ||
104 | { | ||
105 | unsigned int size = fifo->mask + 1; | ||
106 | unsigned int esize = fifo->esize; | ||
107 | unsigned int l; | ||
108 | |||
109 | off &= fifo->mask; | ||
110 | if (esize != 1) { | ||
111 | off *= esize; | ||
112 | size *= esize; | ||
113 | len *= esize; | ||
114 | } | ||
115 | l = min(len, size - off); | ||
116 | |||
117 | memcpy(fifo->data + off, src, l); | ||
118 | memcpy(fifo->data, src + l, len - l); | ||
119 | /* | ||
120 | * make sure that the data in the fifo is up to date before | ||
121 | * incrementing the fifo->in index counter | ||
122 | */ | ||
123 | smp_wmb(); | ||
124 | } | ||
125 | |||
126 | unsigned int __kfifo_in(struct __kfifo *fifo, | ||
127 | const void *buf, unsigned int len) | ||
128 | { | ||
129 | unsigned int l; | ||
130 | |||
131 | l = kfifo_unused(fifo); | ||
132 | if (len > l) | ||
133 | len = l; | ||
134 | |||
135 | kfifo_copy_in(fifo, buf, len, fifo->in); | ||
136 | fifo->in += len; | ||
137 | return len; | ||
138 | } | ||
139 | EXPORT_SYMBOL(__kfifo_in); | ||
140 | |||
141 | static void kfifo_copy_out(struct __kfifo *fifo, void *dst, | ||
142 | unsigned int len, unsigned int off) | ||
143 | { | ||
144 | unsigned int size = fifo->mask + 1; | ||
145 | unsigned int esize = fifo->esize; | ||
146 | unsigned int l; | ||
147 | |||
148 | off &= fifo->mask; | ||
149 | if (esize != 1) { | ||
150 | off *= esize; | ||
151 | size *= esize; | ||
152 | len *= esize; | ||
153 | } | ||
154 | l = min(len, size - off); | ||
155 | |||
156 | memcpy(dst, fifo->data + off, l); | ||
157 | memcpy(dst + l, fifo->data, len - l); | ||
158 | /* | ||
159 | * make sure that the data is copied before | ||
160 | * incrementing the fifo->out index counter | ||
161 | */ | ||
162 | smp_wmb(); | ||
163 | } | ||
164 | |||
165 | unsigned int __kfifo_out_peek(struct __kfifo *fifo, | ||
166 | void *buf, unsigned int len) | ||
167 | { | ||
168 | unsigned int l; | ||
169 | |||
170 | l = fifo->in - fifo->out; | ||
171 | if (len > l) | ||
172 | len = l; | ||
173 | |||
174 | kfifo_copy_out(fifo, buf, len, fifo->out); | ||
175 | return len; | ||
176 | } | ||
177 | EXPORT_SYMBOL(__kfifo_out_peek); | ||
178 | |||
179 | unsigned int __kfifo_out(struct __kfifo *fifo, | ||
180 | void *buf, unsigned int len) | ||
181 | { | ||
182 | len = __kfifo_out_peek(fifo, buf, len); | ||
183 | fifo->out += len; | ||
184 | return len; | ||
185 | } | ||
186 | EXPORT_SYMBOL(__kfifo_out); | ||
187 | |||
188 | static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, | ||
189 | const void __user *from, unsigned int len, unsigned int off, | ||
190 | unsigned int *copied) | ||
191 | { | ||
192 | unsigned int size = fifo->mask + 1; | ||
193 | unsigned int esize = fifo->esize; | ||
194 | unsigned int l; | ||
195 | unsigned long ret; | ||
196 | |||
197 | off &= fifo->mask; | ||
198 | if (esize != 1) { | ||
199 | off *= esize; | ||
200 | size *= esize; | ||
201 | len *= esize; | ||
202 | } | ||
203 | l = min(len, size - off); | ||
204 | |||
205 | ret = copy_from_user(fifo->data + off, from, l); | ||
206 | if (unlikely(ret)) | ||
207 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
208 | else { | ||
209 | ret = copy_from_user(fifo->data, from + l, len - l); | ||
210 | if (unlikely(ret)) | ||
211 | ret = DIV_ROUND_UP(ret, esize); | ||
212 | } | ||
213 | /* | ||
214 | * make sure that the data in the fifo is up to date before | ||
215 | * incrementing the fifo->in index counter | ||
216 | */ | ||
217 | smp_wmb(); | ||
218 | *copied = len - ret; | ||
219 | /* return the number of elements which are not copied */ | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, | ||
224 | unsigned long len, unsigned int *copied) | ||
225 | { | ||
226 | unsigned int l; | ||
227 | unsigned long ret; | ||
228 | unsigned int esize = fifo->esize; | ||
229 | int err; | ||
230 | |||
231 | if (esize != 1) | ||
232 | len /= esize; | ||
233 | |||
234 | l = kfifo_unused(fifo); | ||
235 | if (len > l) | ||
236 | len = l; | ||
237 | |||
238 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); | ||
239 | if (unlikely(ret)) { | ||
240 | len -= ret; | ||
241 | err = -EFAULT; | ||
242 | } else | ||
243 | err = 0; | ||
244 | fifo->in += len; | ||
245 | return err; | ||
246 | } | ||
247 | EXPORT_SYMBOL(__kfifo_from_user); | ||
248 | |||
249 | static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, | ||
250 | unsigned int len, unsigned int off, unsigned int *copied) | ||
251 | { | ||
252 | unsigned int l; | ||
253 | unsigned long ret; | ||
254 | unsigned int size = fifo->mask + 1; | ||
255 | unsigned int esize = fifo->esize; | ||
256 | |||
257 | off &= fifo->mask; | ||
258 | if (esize != 1) { | ||
259 | off *= esize; | ||
260 | size *= esize; | ||
261 | len *= esize; | ||
262 | } | ||
263 | l = min(len, size - off); | ||
264 | |||
265 | ret = copy_to_user(to, fifo->data + off, l); | ||
266 | if (unlikely(ret)) | ||
267 | ret = DIV_ROUND_UP(ret + len - l, esize); | ||
268 | else { | ||
269 | ret = copy_to_user(to + l, fifo->data, len - l); | ||
270 | if (unlikely(ret)) | ||
271 | ret = DIV_ROUND_UP(ret, esize); | ||
272 | } | ||
273 | /* | ||
274 | * make sure that the data is copied before | ||
275 | * incrementing the fifo->out index counter | ||
276 | */ | ||
277 | smp_wmb(); | ||
278 | *copied = len - ret; | ||
279 | /* return the number of elements which are not copied */ | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | int __kfifo_to_user(struct __kfifo *fifo, void __user *to, | ||
284 | unsigned long len, unsigned int *copied) | ||
285 | { | ||
286 | unsigned int l; | ||
287 | unsigned long ret; | ||
288 | unsigned int esize = fifo->esize; | ||
289 | int err; | ||
290 | |||
291 | if (esize != 1) | ||
292 | len /= esize; | ||
293 | |||
294 | l = fifo->in - fifo->out; | ||
295 | if (len > l) | ||
296 | len = l; | ||
297 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); | ||
298 | if (unlikely(ret)) { | ||
299 | len -= ret; | ||
300 | err = -EFAULT; | ||
301 | } else | ||
302 | err = 0; | ||
303 | fifo->out += len; | ||
304 | return err; | ||
305 | } | ||
306 | EXPORT_SYMBOL(__kfifo_to_user); | ||
307 | |||
308 | static int setup_sgl_buf(struct scatterlist *sgl, void *buf, | ||
309 | int nents, unsigned int len) | ||
310 | { | ||
311 | int n; | ||
312 | unsigned int l; | ||
313 | unsigned int off; | ||
314 | struct page *page; | ||
315 | |||
316 | if (!nents) | ||
317 | return 0; | ||
318 | |||
319 | if (!len) | ||
320 | return 0; | ||
321 | |||
322 | n = 0; | ||
323 | page = virt_to_page(buf); | ||
324 | off = offset_in_page(buf); | ||
325 | l = 0; | ||
326 | |||
327 | while (len >= l + PAGE_SIZE - off) { | ||
328 | struct page *npage; | ||
329 | |||
330 | l += PAGE_SIZE; | ||
331 | buf += PAGE_SIZE; | ||
332 | npage = virt_to_page(buf); | ||
333 | if (page_to_phys(page) != page_to_phys(npage) - l) { | ||
334 | sg_set_page(sgl, page, l - off, off); | ||
335 | sgl = sg_next(sgl); | ||
336 | if (++n == nents || sgl == NULL) | ||
337 | return n; | ||
338 | page = npage; | ||
339 | len -= l - off; | ||
340 | l = off = 0; | ||
341 | } | ||
342 | } | ||
343 | sg_set_page(sgl, page, len, off); | ||
344 | return n + 1; | ||
345 | } | ||
346 | |||
347 | static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | ||
348 | int nents, unsigned int len, unsigned int off) | ||
349 | { | ||
350 | unsigned int size = fifo->mask + 1; | ||
351 | unsigned int esize = fifo->esize; | ||
352 | unsigned int l; | ||
353 | unsigned int n; | ||
354 | |||
355 | off &= fifo->mask; | ||
356 | if (esize != 1) { | ||
357 | off *= esize; | ||
358 | size *= esize; | ||
359 | len *= esize; | ||
360 | } | ||
361 | l = min(len, size - off); | ||
362 | |||
363 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | ||
364 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | ||
365 | |||
366 | return n; | ||
367 | } | ||
368 | |||
369 | unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, | ||
370 | struct scatterlist *sgl, int nents, unsigned int len) | ||
371 | { | ||
372 | unsigned int l; | ||
373 | |||
374 | l = kfifo_unused(fifo); | ||
375 | if (len > l) | ||
376 | len = l; | ||
377 | |||
378 | return setup_sgl(fifo, sgl, nents, len, fifo->in); | ||
379 | } | ||
380 | EXPORT_SYMBOL(__kfifo_dma_in_prepare); | ||
381 | |||
382 | unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, | ||
383 | struct scatterlist *sgl, int nents, unsigned int len) | ||
384 | { | ||
385 | unsigned int l; | ||
386 | |||
387 | l = fifo->in - fifo->out; | ||
388 | if (len > l) | ||
389 | len = l; | ||
390 | |||
391 | return setup_sgl(fifo, sgl, nents, len, fifo->out); | ||
392 | } | ||
393 | EXPORT_SYMBOL(__kfifo_dma_out_prepare); | ||
394 | |||
395 | unsigned int __kfifo_max_r(unsigned int len, size_t recsize) | ||
396 | { | ||
397 | unsigned int max = (1 << (recsize << 3)) - 1; | ||
398 | |||
399 | if (len > max) | ||
400 | return max; | ||
401 | return len; | ||
402 | } | ||
403 | EXPORT_SYMBOL(__kfifo_max_r); | ||
404 | |||
405 | #define __KFIFO_PEEK(data, out, mask) \ | ||
406 | ((data)[(out) & (mask)]) | ||
407 | /* | ||
408 | * __kfifo_peek_n internal helper function for determinate the length of | ||
409 | * the next record in the fifo | ||
410 | */ | ||
411 | static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) | ||
412 | { | ||
413 | unsigned int l; | ||
414 | unsigned int mask = fifo->mask; | ||
415 | unsigned char *data = fifo->data; | ||
416 | |||
417 | l = __KFIFO_PEEK(data, fifo->out, mask); | ||
418 | |||
419 | if (--recsize) | ||
420 | l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; | ||
421 | |||
422 | return l; | ||
423 | } | ||
424 | |||
425 | #define __KFIFO_POKE(data, in, mask, val) \ | ||
426 | ( \ | ||
427 | (data)[(in) & (mask)] = (unsigned char)(val) \ | ||
428 | ) | ||
429 | |||
430 | /* | ||
431 | * __kfifo_poke_n internal helper function for storeing the length of | ||
432 | * the record into the fifo | ||
433 | */ | ||
434 | static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) | ||
435 | { | ||
436 | unsigned int mask = fifo->mask; | ||
437 | unsigned char *data = fifo->data; | ||
438 | |||
439 | __KFIFO_POKE(data, fifo->in, mask, n); | ||
440 | |||
441 | if (recsize > 1) | ||
442 | __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); | ||
443 | } | ||
444 | |||
445 | unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) | ||
446 | { | ||
447 | return __kfifo_peek_n(fifo, recsize); | ||
448 | } | ||
449 | EXPORT_SYMBOL(__kfifo_len_r); | ||
450 | |||
451 | unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, | ||
452 | unsigned int len, size_t recsize) | ||
453 | { | ||
454 | if (len + recsize > kfifo_unused(fifo)) | ||
455 | return 0; | ||
456 | |||
457 | __kfifo_poke_n(fifo, len, recsize); | ||
458 | |||
459 | kfifo_copy_in(fifo, buf, len, fifo->in + recsize); | ||
460 | fifo->in += len + recsize; | ||
461 | return len; | ||
462 | } | ||
463 | EXPORT_SYMBOL(__kfifo_in_r); | ||
464 | |||
465 | static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, | ||
466 | void *buf, unsigned int len, size_t recsize, unsigned int *n) | ||
467 | { | ||
468 | *n = __kfifo_peek_n(fifo, recsize); | ||
469 | |||
470 | if (len > *n) | ||
471 | len = *n; | ||
472 | |||
473 | kfifo_copy_out(fifo, buf, len, fifo->out + recsize); | ||
474 | return len; | ||
475 | } | ||
476 | |||
477 | unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, | ||
478 | unsigned int len, size_t recsize) | ||
479 | { | ||
480 | unsigned int n; | ||
481 | |||
482 | if (fifo->in == fifo->out) | ||
483 | return 0; | ||
484 | |||
485 | return kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
486 | } | ||
487 | EXPORT_SYMBOL(__kfifo_out_peek_r); | ||
488 | |||
489 | unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, | ||
490 | unsigned int len, size_t recsize) | ||
491 | { | ||
492 | unsigned int n; | ||
493 | |||
494 | if (fifo->in == fifo->out) | ||
495 | return 0; | ||
496 | |||
497 | len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); | ||
498 | fifo->out += n + recsize; | ||
499 | return len; | ||
500 | } | ||
501 | EXPORT_SYMBOL(__kfifo_out_r); | ||
502 | |||
503 | void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize) | ||
504 | { | ||
505 | unsigned int n; | ||
506 | |||
507 | n = __kfifo_peek_n(fifo, recsize); | ||
508 | fifo->out += n + recsize; | ||
509 | } | ||
510 | EXPORT_SYMBOL(__kfifo_skip_r); | ||
511 | |||
512 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, | ||
513 | unsigned long len, unsigned int *copied, size_t recsize) | ||
514 | { | ||
515 | unsigned long ret; | ||
516 | |||
517 | len = __kfifo_max_r(len, recsize); | ||
518 | |||
519 | if (len + recsize > kfifo_unused(fifo)) { | ||
520 | *copied = 0; | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | __kfifo_poke_n(fifo, len, recsize); | ||
525 | |||
526 | ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); | ||
527 | if (unlikely(ret)) { | ||
528 | *copied = 0; | ||
529 | return -EFAULT; | ||
530 | } | ||
531 | fifo->in += len + recsize; | ||
532 | return 0; | ||
533 | } | ||
534 | EXPORT_SYMBOL(__kfifo_from_user_r); | ||
535 | |||
536 | int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, | ||
537 | unsigned long len, unsigned int *copied, size_t recsize) | ||
538 | { | ||
539 | unsigned long ret; | ||
540 | unsigned int n; | ||
541 | |||
542 | if (fifo->in == fifo->out) { | ||
543 | *copied = 0; | ||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | n = __kfifo_peek_n(fifo, recsize); | ||
548 | if (len > n) | ||
549 | len = n; | ||
550 | |||
551 | ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); | ||
552 | if (unlikely(ret)) { | ||
553 | *copied = 0; | ||
554 | return -EFAULT; | ||
555 | } | ||
556 | fifo->out += n + recsize; | ||
557 | return 0; | ||
558 | } | ||
559 | EXPORT_SYMBOL(__kfifo_to_user_r); | ||
560 | |||
561 | unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, | ||
562 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | ||
563 | { | ||
564 | if (!nents) | ||
565 | BUG(); | ||
566 | |||
567 | len = __kfifo_max_r(len, recsize); | ||
568 | |||
569 | if (len + recsize > kfifo_unused(fifo)) | ||
570 | return 0; | ||
571 | |||
572 | return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); | ||
573 | } | ||
574 | EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); | ||
575 | |||
576 | void __kfifo_dma_in_finish_r(struct __kfifo *fifo, | ||
577 | unsigned int len, size_t recsize) | ||
578 | { | ||
579 | len = __kfifo_max_r(len, recsize); | ||
580 | __kfifo_poke_n(fifo, len, recsize); | ||
581 | fifo->in += len + recsize; | ||
582 | } | ||
583 | EXPORT_SYMBOL(__kfifo_dma_in_finish_r); | ||
584 | |||
585 | unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, | ||
586 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | ||
587 | { | ||
588 | if (!nents) | ||
589 | BUG(); | ||
590 | |||
591 | len = __kfifo_max_r(len, recsize); | ||
592 | |||
593 | if (len + recsize > fifo->in - fifo->out) | ||
594 | return 0; | ||
595 | |||
596 | return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); | ||
597 | } | ||
598 | EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); | ||
599 | |||
600 | void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) | ||
601 | { | ||
602 | unsigned int len; | ||
603 | |||
604 | len = __kfifo_peek_n(fifo, recsize); | ||
605 | fifo->out += len + recsize; | ||
606 | } | ||
607 | EXPORT_SYMBOL(__kfifo_dma_out_finish_r); | ||
diff --git a/lib/lru_cache.c b/lib/lru_cache.c index d71d89498943..8335d39d2ccd 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c | |||
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) | |||
262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, | 262 | static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, |
263 | bool include_changing) | 263 | bool include_changing) |
264 | { | 264 | { |
265 | struct hlist_node *n; | ||
266 | struct lc_element *e; | 265 | struct lc_element *e; |
267 | 266 | ||
268 | BUG_ON(!lc); | 267 | BUG_ON(!lc); |
269 | BUG_ON(!lc->nr_elements); | 268 | BUG_ON(!lc->nr_elements); |
270 | hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { | 269 | hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) { |
271 | /* "about to be changed" elements, pending transaction commit, | 270 | /* "about to be changed" elements, pending transaction commit, |
272 | * are hashed by their "new number". "Normal" elements have | 271 | * are hashed by their "new number". "Normal" elements have |
273 | * lc_number == lc_new_number. */ | 272 | * lc_number == lc_new_number. */ |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 7874b01e816e..b83c144d731f 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, | |||
394 | } | 394 | } |
395 | EXPORT_SYMBOL(sg_alloc_table_from_pages); | 395 | EXPORT_SYMBOL(sg_alloc_table_from_pages); |
396 | 396 | ||
397 | void __sg_page_iter_start(struct sg_page_iter *piter, | ||
398 | struct scatterlist *sglist, unsigned int nents, | ||
399 | unsigned long pgoffset) | ||
400 | { | ||
401 | piter->__pg_advance = 0; | ||
402 | piter->__nents = nents; | ||
403 | |||
404 | piter->page = NULL; | ||
405 | piter->sg = sglist; | ||
406 | piter->sg_pgoffset = pgoffset; | ||
407 | } | ||
408 | EXPORT_SYMBOL(__sg_page_iter_start); | ||
409 | |||
410 | static int sg_page_count(struct scatterlist *sg) | ||
411 | { | ||
412 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; | ||
413 | } | ||
414 | |||
415 | bool __sg_page_iter_next(struct sg_page_iter *piter) | ||
416 | { | ||
417 | if (!piter->__nents || !piter->sg) | ||
418 | return false; | ||
419 | |||
420 | piter->sg_pgoffset += piter->__pg_advance; | ||
421 | piter->__pg_advance = 1; | ||
422 | |||
423 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { | ||
424 | piter->sg_pgoffset -= sg_page_count(piter->sg); | ||
425 | piter->sg = sg_next(piter->sg); | ||
426 | if (!--piter->__nents || !piter->sg) | ||
427 | return false; | ||
428 | } | ||
429 | piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset); | ||
430 | |||
431 | return true; | ||
432 | } | ||
433 | EXPORT_SYMBOL(__sg_page_iter_next); | ||
434 | |||
397 | /** | 435 | /** |
398 | * sg_miter_start - start mapping iteration over a sg list | 436 | * sg_miter_start - start mapping iteration over a sg list |
399 | * @miter: sg mapping iter to be started | 437 | * @miter: sg mapping iter to be started |
@@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | |||
411 | { | 449 | { |
412 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | 450 | memset(miter, 0, sizeof(struct sg_mapping_iter)); |
413 | 451 | ||
414 | miter->__sg = sgl; | 452 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); |
415 | miter->__nents = nents; | ||
416 | miter->__offset = 0; | ||
417 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); | 453 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); |
418 | miter->__flags = flags; | 454 | miter->__flags = flags; |
419 | } | 455 | } |
@@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start); | |||
438 | */ | 474 | */ |
439 | bool sg_miter_next(struct sg_mapping_iter *miter) | 475 | bool sg_miter_next(struct sg_mapping_iter *miter) |
440 | { | 476 | { |
441 | unsigned int off, len; | ||
442 | |||
443 | /* check for end and drop resources from the last iteration */ | ||
444 | if (!miter->__nents) | ||
445 | return false; | ||
446 | |||
447 | sg_miter_stop(miter); | 477 | sg_miter_stop(miter); |
448 | 478 | ||
449 | /* get to the next sg if necessary. __offset is adjusted by stop */ | 479 | /* |
450 | while (miter->__offset == miter->__sg->length) { | 480 | * Get to the next page if necessary. |
451 | if (--miter->__nents) { | 481 | * __remaining, __offset is adjusted by sg_miter_stop |
452 | miter->__sg = sg_next(miter->__sg); | 482 | */ |
453 | miter->__offset = 0; | 483 | if (!miter->__remaining) { |
454 | } else | 484 | struct scatterlist *sg; |
485 | unsigned long pgoffset; | ||
486 | |||
487 | if (!__sg_page_iter_next(&miter->piter)) | ||
455 | return false; | 488 | return false; |
456 | } | ||
457 | 489 | ||
458 | /* map the next page */ | 490 | sg = miter->piter.sg; |
459 | off = miter->__sg->offset + miter->__offset; | 491 | pgoffset = miter->piter.sg_pgoffset; |
460 | len = miter->__sg->length - miter->__offset; | ||
461 | 492 | ||
462 | miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); | 493 | miter->__offset = pgoffset ? 0 : sg->offset; |
463 | off &= ~PAGE_MASK; | 494 | miter->__remaining = sg->offset + sg->length - |
464 | miter->length = min_t(unsigned int, len, PAGE_SIZE - off); | 495 | (pgoffset << PAGE_SHIFT) - miter->__offset; |
465 | miter->consumed = miter->length; | 496 | miter->__remaining = min_t(unsigned long, miter->__remaining, |
497 | PAGE_SIZE - miter->__offset); | ||
498 | } | ||
499 | miter->page = miter->piter.page; | ||
500 | miter->consumed = miter->length = miter->__remaining; | ||
466 | 501 | ||
467 | if (miter->__flags & SG_MITER_ATOMIC) | 502 | if (miter->__flags & SG_MITER_ATOMIC) |
468 | miter->addr = kmap_atomic(miter->page) + off; | 503 | miter->addr = kmap_atomic(miter->page) + miter->__offset; |
469 | else | 504 | else |
470 | miter->addr = kmap(miter->page) + off; | 505 | miter->addr = kmap(miter->page) + miter->__offset; |
471 | 506 | ||
472 | return true; | 507 | return true; |
473 | } | 508 | } |
@@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) | |||
494 | /* drop resources from the last iteration */ | 529 | /* drop resources from the last iteration */ |
495 | if (miter->addr) { | 530 | if (miter->addr) { |
496 | miter->__offset += miter->consumed; | 531 | miter->__offset += miter->consumed; |
532 | miter->__remaining -= miter->consumed; | ||
497 | 533 | ||
498 | if (miter->__flags & SG_MITER_TO_SG) | 534 | if (miter->__flags & SG_MITER_TO_SG) |
499 | flush_kernel_dcache_page(miter->page); | 535 | flush_kernel_dcache_page(miter->page); |