aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 20:08:13 -0400
committerTejun Heo <tj@kernel.org>2013-04-01 21:45:36 -0400
commit229641a6f1f09e27a1f12fba38980f33f4c92975 (patch)
tree234a6f8aea0910de3242af0bbe6d7494fcf81847 /lib
parentd55262c4d164759a8debe772da6c9b16059dec47 (diff)
parent07961ac7c0ee8b546658717034fe692fd12eefa9 (diff)
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10 branch to take advantage of custom attrs and NUMA support for unbound workqueues. Mainline currently contains two commits which result in non-trivial merge conflicts with wq/for-3.10 and because block/for-3.10/core is based on v3.9-rc3 which contains one of the conflicting commits, we need a pre-merge-window merge anyway. Let's pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer from workqueue merge conflicts. The two conflicts and their resolutions: * e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes worker_pool_assign_id() to use idr_alloc() instead of the old idr interface. worker_pool_assign_id() goes through multiple locking changes in wq/for-3.10 causing the following conflict. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; <<<<<<< HEAD lockdep_assert_held(&wq_pool_mutex); do { if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) return -ENOMEM; ret = idr_get_new(&worker_pool_idr, pool, &pool->id); } while (ret == -EAGAIN); ======= mutex_lock(&worker_pool_idr_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) pool->id = ret; mutex_unlock(&worker_pool_idr_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 return ret < 0 ? ret : 0; } We want locking from the former and idr_alloc() usage from the latter, which can be combined to the following. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; lockdep_assert_held(&wq_pool_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) { pool->id = ret; return 0; } return ret; } * eb2834285c ("workqueue: fix possible pool stall bug in wq_unbind_fn()") updated wq_unbind_fn() such that it has single larger for_each_std_worker_pool() loop instead of two separate loops with a schedule() call inbetween. wq/for-3.10 renamed pool->assoc_mutex to pool->manager_mutex causing the following conflict (earlier function body and comments omitted for brevity). static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); <<<<<<< HEAD mutex_unlock(&pool->manager_mutex); } ======= mutex_unlock(&pool->assoc_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 schedule(); <<<<<<< HEAD for_each_cpu_worker_pool(pool, cpu) ======= >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } The resolution is mostly trivial. We want the control flow of the latter with the rename of the former. static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); mutex_unlock(&pool->manager_mutex); schedule(); atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/dma-debug.c45
-rw-r--r--lib/idr.c96
-rw-r--r--lib/xz/Kconfig2
4 files changed, 66 insertions, 80 deletions
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 9681d54b95d1..f8e0e5367398 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/printk.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <linux/tty.h> 13#include <linux/tty.h>
13#include <linux/wait.h> 14#include <linux/wait.h>
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
28 wake_up_klogd(); 29 wake_up_klogd();
29 } 30 }
30} 31}
31
32
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 5e396accd3d0..d87a17a819d0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
862 entry = bucket_find_exact(bucket, ref); 862 entry = bucket_find_exact(bucket, ref);
863 863
864 if (!entry) { 864 if (!entry) {
865 /* must drop lock before calling dma_mapping_error */
866 put_hash_bucket(bucket, &flags);
867
865 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 868 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
866 err_printk(ref->dev, NULL, 869 err_printk(ref->dev, NULL,
867 "DMA-API: device driver tries " 870 "DMA-API: device driver tries to free an "
868 "to free an invalid DMA memory address\n"); 871 "invalid DMA memory address\n");
869 return; 872 } else {
873 err_printk(ref->dev, NULL,
874 "DMA-API: device driver tries to free DMA "
875 "memory it has not allocated [device "
876 "address=0x%016llx] [size=%llu bytes]\n",
877 ref->dev_addr, ref->size);
870 } 878 }
871 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 879 return;
872 "to free DMA memory it has not allocated "
873 "[device address=0x%016llx] [size=%llu bytes]\n",
874 ref->dev_addr, ref->size);
875 goto out;
876 } 880 }
877 881
878 if (ref->size != entry->size) { 882 if (ref->size != entry->size) {
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
936 hash_bucket_del(entry); 940 hash_bucket_del(entry);
937 dma_entry_free(entry); 941 dma_entry_free(entry);
938 942
939out:
940 put_hash_bucket(bucket, &flags); 943 put_hash_bucket(bucket, &flags);
941} 944}
942 945
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1082 ref.dev = dev; 1085 ref.dev = dev;
1083 ref.dev_addr = dma_addr; 1086 ref.dev_addr = dma_addr;
1084 bucket = get_hash_bucket(&ref, &flags); 1087 bucket = get_hash_bucket(&ref, &flags);
1085 entry = bucket_find_exact(bucket, &ref);
1086 1088
1087 if (!entry) 1089 list_for_each_entry(entry, &bucket->list, list) {
1088 goto out; 1090 if (!exact_match(&ref, entry))
1091 continue;
1092
1093 /*
1094 * The same physical address can be mapped multiple
1095 * times. Without a hardware IOMMU this results in the
1096 * same device addresses being put into the dma-debug
1097 * hash multiple times too. This can result in false
1098 * positives being reported. Therefore we implement a
1099 * best-fit algorithm here which updates the first entry
1100 * from the hash which fits the reference value and is
1101 * not currently listed as being checked.
1102 */
1103 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1104 entry->map_err_type = MAP_ERR_CHECKED;
1105 break;
1106 }
1107 }
1089 1108
1090 entry->map_err_type = MAP_ERR_CHECKED;
1091out:
1092 put_hash_bucket(bucket, &flags); 1109 put_hash_bucket(bucket, &flags);
1093} 1110}
1094EXPORT_SYMBOL(debug_dma_mapping_error); 1111EXPORT_SYMBOL(debug_dma_mapping_error);
diff --git a/lib/idr.c b/lib/idr.c
index 73f4d53c02f3..322e2816f2fb 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
106 if (layer_idr) 106 if (layer_idr)
107 return get_from_free_list(layer_idr); 107 return get_from_free_list(layer_idr);
108 108
109 /* try to allocate directly from kmem_cache */ 109 /*
110 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 110 * Try to allocate directly from kmem_cache. We want to try this
111 * before preload buffer; otherwise, non-preloading idr_alloc()
112 * users will end up taking advantage of preloading ones. As the
113 * following is allowed to fail for preloaded cases, suppress
114 * warning this time.
115 */
116 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
111 if (new) 117 if (new)
112 return new; 118 return new;
113 119
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
115 * Try to fetch one from the per-cpu preload buffer if in process 121 * Try to fetch one from the per-cpu preload buffer if in process
116 * context. See idr_preload() for details. 122 * context. See idr_preload() for details.
117 */ 123 */
118 if (in_interrupt()) 124 if (!in_interrupt()) {
119 return NULL; 125 preempt_disable();
120 126 new = __this_cpu_read(idr_preload_head);
121 preempt_disable(); 127 if (new) {
122 new = __this_cpu_read(idr_preload_head); 128 __this_cpu_write(idr_preload_head, new->ary[0]);
123 if (new) { 129 __this_cpu_dec(idr_preload_cnt);
124 __this_cpu_write(idr_preload_head, new->ary[0]); 130 new->ary[0] = NULL;
125 __this_cpu_dec(idr_preload_cnt); 131 }
126 new->ary[0] = NULL; 132 preempt_enable();
133 if (new)
134 return new;
127 } 135 }
128 preempt_enable(); 136
129 return new; 137 /*
138 * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
139 * that memory allocation failure warning is printed as intended.
140 */
141 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
130} 142}
131 143
132static void idr_layer_rcu_free(struct rcu_head *head) 144static void idr_layer_rcu_free(struct rcu_head *head)
@@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
184 } 196 }
185} 197}
186 198
187/** 199int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
188 * idr_pre_get - reserve resources for idr allocation
189 * @idp: idr handle
190 * @gfp_mask: memory allocation flags
191 *
192 * This function should be called prior to calling the idr_get_new* functions.
193 * It preallocates enough memory to satisfy the worst possible allocation. The
194 * caller should pass in GFP_KERNEL if possible. This of course requires that
195 * no spinning locks be held.
196 *
197 * If the system is REALLY out of memory this function returns %0,
198 * otherwise %1.
199 */
200int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
201{ 200{
202 while (idp->id_free_cnt < MAX_IDR_FREE) { 201 while (idp->id_free_cnt < MAX_IDR_FREE) {
203 struct idr_layer *new; 202 struct idr_layer *new;
@@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
208 } 207 }
209 return 1; 208 return 1;
210} 209}
211EXPORT_SYMBOL(idr_pre_get); 210EXPORT_SYMBOL(__idr_pre_get);
212 211
213/** 212/**
214 * sub_alloc - try to allocate an id without growing the tree depth 213 * sub_alloc - try to allocate an id without growing the tree depth
215 * @idp: idr handle 214 * @idp: idr handle
216 * @starting_id: id to start search at 215 * @starting_id: id to start search at
217 * @id: pointer to the allocated handle
218 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer 216 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
219 * @gfp_mask: allocation mask for idr_layer_alloc() 217 * @gfp_mask: allocation mask for idr_layer_alloc()
220 * @layer_idr: optional idr passed to idr_layer_alloc() 218 * @layer_idr: optional idr passed to idr_layer_alloc()
@@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
376 idr_mark_full(pa, id); 374 idr_mark_full(pa, id);
377} 375}
378 376
379/** 377int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
380 * idr_get_new_above - allocate new idr entry above or equal to a start id
381 * @idp: idr handle
382 * @ptr: pointer you want associated with the id
383 * @starting_id: id to start search at
384 * @id: pointer to the allocated handle
385 *
386 * This is the allocate id function. It should be called with any
387 * required locks.
388 *
389 * If allocation from IDR's private freelist fails, idr_get_new_above() will
390 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
391 * IDR's preallocation and then retry the idr_get_new_above() call.
392 *
393 * If the idr is full idr_get_new_above() will return %-ENOSPC.
394 *
395 * @id returns a value in the range @starting_id ... %0x7fffffff
396 */
397int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
398{ 378{
399 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 379 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
400 int rv; 380 int rv;
@@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
407 *id = rv; 387 *id = rv;
408 return 0; 388 return 0;
409} 389}
410EXPORT_SYMBOL(idr_get_new_above); 390EXPORT_SYMBOL(__idr_get_new_above);
411 391
412/** 392/**
413 * idr_preload - preload for idr_alloc() 393 * idr_preload - preload for idr_alloc()
@@ -569,8 +549,7 @@ void idr_remove(struct idr *idp, int id)
569 struct idr_layer *p; 549 struct idr_layer *p;
570 struct idr_layer *to_free; 550 struct idr_layer *to_free;
571 551
572 /* see comment in idr_find_slowpath() */ 552 if (id < 0)
573 if (WARN_ON_ONCE(id < 0))
574 return; 553 return;
575 554
576 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 555 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
@@ -667,15 +646,7 @@ void *idr_find_slowpath(struct idr *idp, int id)
667 int n; 646 int n;
668 struct idr_layer *p; 647 struct idr_layer *p;
669 648
670 /* 649 if (id < 0)
671 * If @id is negative, idr_find() used to ignore the sign bit and
672 * performed lookup with the rest of bits, which is weird and can
673 * lead to very obscure bugs. We're now returning NULL for all
674 * negative IDs but just in case somebody was depending on the sign
675 * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
676 * be detected and fixed. WARN_ON_ONCE() can later be removed.
677 */
678 if (WARN_ON_ONCE(id < 0))
679 return NULL; 650 return NULL;
680 651
681 p = rcu_dereference_raw(idp->top); 652 p = rcu_dereference_raw(idp->top);
@@ -824,8 +795,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
824 int n; 795 int n;
825 struct idr_layer *p, *old_p; 796 struct idr_layer *p, *old_p;
826 797
827 /* see comment in idr_find_slowpath() */ 798 if (id < 0)
828 if (WARN_ON_ONCE(id < 0))
829 return ERR_PTR(-EINVAL); 799 return ERR_PTR(-EINVAL);
830 800
831 p = idp->top; 801 p = idp->top;
@@ -918,7 +888,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
918int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 888int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
919{ 889{
920 /* allocate idr_layers */ 890 /* allocate idr_layers */
921 if (!idr_pre_get(&ida->idr, gfp_mask)) 891 if (!__idr_pre_get(&ida->idr, gfp_mask))
922 return 0; 892 return 0;
923 893
924 /* allocate free_bitmap */ 894 /* allocate free_bitmap */
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 82a04d7ba99e..08837db52d94 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -15,7 +15,7 @@ config XZ_DEC_X86
15 15
16config XZ_DEC_POWERPC 16config XZ_DEC_POWERPC
17 bool "PowerPC BCJ filter decoder" 17 bool "PowerPC BCJ filter decoder"
18 default y if POWERPC 18 default y if PPC
19 select XZ_DEC_BCJ 19 select XZ_DEC_BCJ
20 20
21config XZ_DEC_IA64 21config XZ_DEC_IA64