aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:08:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:08:43 -0400
commit8a4a8918ed6e4a361f4df19f199bbc2d0a89a46c (patch)
treed76974986aaaa8549baf2d6a106fa6cb60d64b88 /lib
parent8686a0e200419322654a75155e2e6f80346a1297 (diff)
parent540f41edc15473ca3b2876de72646546ae101374 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits) llist: Add back llist_add_batch() and llist_del_first() prototypes sched: Don't use tasklist_lock for debug prints sched: Warn on rt throttling sched: Unify the ->cpus_allowed mask copy sched: Wrap scheduler p->cpus_allowed access sched: Request for idle balance during nohz idle load balance sched: Use resched IPI to kick off the nohz idle balance sched: Fix idle_cpu() llist: Remove cpu_relax() usage in cmpxchg loops sched: Convert to struct llist llist: Add llist_next() irq_work: Use llist in the struct irq_work logic llist: Return whether list is empty before adding in llist_add() llist: Move cpu_relax() to after the cmpxchg() llist: Remove the platform-dependent NMI checks llist: Make some llist functions inline sched, tracing: Show PREEMPT_ACTIVE state in trace_sched_switch sched: Remove redundant test in check_preempt_tick() sched: Add documentation for bandwidth control sched: Return unused runtime on group dequeue ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile4
-rw-r--r--lib/llist.c74
-rw-r--r--lib/smp_processor_id.c2
4 files changed, 19 insertions, 64 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 6c695ff9caba..32f3e5ae2be5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -276,7 +276,4 @@ config CORDIC
276 so its calculations are in fixed point. Modules can select this 276 so its calculations are in fixed point. Modules can select this
277 when they require this function. Module will be called cordic. 277 when they require this function. Module will be called cordic.
278 278
279config LLIST
280 bool
281
282endmenu 279endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 3f5bc6d903e0..a4da283f5dc0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
25 bsearch.o find_last_bit.o find_next_bit.o 25 bsearch.o find_last_bit.o find_next_bit.o llist.o
26obj-y += kstrtox.o 26obj-y += kstrtox.o
27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
28 28
@@ -115,8 +115,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
115 115
116obj-$(CONFIG_CORDIC) += cordic.o 116obj-$(CONFIG_CORDIC) += cordic.o
117 117
118obj-$(CONFIG_LLIST) += llist.o
119
120hostprogs-y := gen_crc32table 118hostprogs-y := gen_crc32table
121clean-files := crc32table.h 119clean-files := crc32table.h
122 120
diff --git a/lib/llist.c b/lib/llist.c
index da445724fa1f..700cff77a387 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -3,8 +3,8 @@
3 * 3 *
4 * The basic atomic operation of this list is cmpxchg on long. On 4 * The basic atomic operation of this list is cmpxchg on long. On
5 * architectures that don't have NMI-safe cmpxchg implementation, the 5 * architectures that don't have NMI-safe cmpxchg implementation, the
6 * list can NOT be used in NMI handler. So code uses the list in NMI 6 * list can NOT be used in NMI handlers. So code that uses the list in
7 * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 7 * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
8 * 8 *
9 * Copyright 2010,2011 Intel Corp. 9 * Copyright 2010,2011 Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com> 10 * Author: Huang Ying <ying.huang@intel.com>
@@ -30,48 +30,28 @@
30#include <asm/system.h> 30#include <asm/system.h>
31 31
32/** 32/**
33 * llist_add - add a new entry
34 * @new: new entry to be added
35 * @head: the head for your lock-less list
36 */
37void llist_add(struct llist_node *new, struct llist_head *head)
38{
39 struct llist_node *entry, *old_entry;
40
41#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
42 BUG_ON(in_nmi());
43#endif
44
45 entry = head->first;
46 do {
47 old_entry = entry;
48 new->next = entry;
49 cpu_relax();
50 } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
51}
52EXPORT_SYMBOL_GPL(llist_add);
53
54/**
55 * llist_add_batch - add several linked entries in batch 33 * llist_add_batch - add several linked entries in batch
56 * @new_first: first entry in batch to be added 34 * @new_first: first entry in batch to be added
57 * @new_last: last entry in batch to be added 35 * @new_last: last entry in batch to be added
58 * @head: the head for your lock-less list 36 * @head: the head for your lock-less list
37 *
38 * Return whether list is empty before adding.
59 */ 39 */
60void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, 40bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
61 struct llist_head *head) 41 struct llist_head *head)
62{ 42{
63 struct llist_node *entry, *old_entry; 43 struct llist_node *entry, *old_entry;
64 44
65#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
66 BUG_ON(in_nmi());
67#endif
68
69 entry = head->first; 45 entry = head->first;
70 do { 46 for (;;) {
71 old_entry = entry; 47 old_entry = entry;
72 new_last->next = entry; 48 new_last->next = entry;
73 cpu_relax(); 49 entry = cmpxchg(&head->first, old_entry, new_first);
74 } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry); 50 if (entry == old_entry)
51 break;
52 }
53
54 return old_entry == NULL;
75} 55}
76EXPORT_SYMBOL_GPL(llist_add_batch); 56EXPORT_SYMBOL_GPL(llist_add_batch);
77 57
@@ -93,37 +73,17 @@ struct llist_node *llist_del_first(struct llist_head *head)
93{ 73{
94 struct llist_node *entry, *old_entry, *next; 74 struct llist_node *entry, *old_entry, *next;
95 75
96#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
97 BUG_ON(in_nmi());
98#endif
99
100 entry = head->first; 76 entry = head->first;
101 do { 77 for (;;) {
102 if (entry == NULL) 78 if (entry == NULL)
103 return NULL; 79 return NULL;
104 old_entry = entry; 80 old_entry = entry;
105 next = entry->next; 81 next = entry->next;
106 cpu_relax(); 82 entry = cmpxchg(&head->first, old_entry, next);
107 } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry); 83 if (entry == old_entry)
84 break;
85 }
108 86
109 return entry; 87 return entry;
110} 88}
111EXPORT_SYMBOL_GPL(llist_del_first); 89EXPORT_SYMBOL_GPL(llist_del_first);
112
113/**
114 * llist_del_all - delete all entries from lock-less list
115 * @head: the head of lock-less list to delete all entries
116 *
117 * If list is empty, return NULL, otherwise, delete all entries and
118 * return the pointer to the first entry. The order of entries
119 * deleted is from the newest to the oldest added one.
120 */
121struct llist_node *llist_del_all(struct llist_head *head)
122{
123#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
124 BUG_ON(in_nmi());
125#endif
126
127 return xchg(&head->first, NULL);
128}
129EXPORT_SYMBOL_GPL(llist_del_all);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 4689cb073da4..503f087382a4 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
22 * Kernel threads bound to a single CPU can safely use 22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id(): 23 * smp_processor_id():
24 */ 24 */
25 if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu))) 25 if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
26 goto out; 26 goto out;
27 27
28 /* 28 /*