aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug7
-rw-r--r--lib/flex_array.c25
-rw-r--r--lib/iommu-helper.c9
-rw-r--r--lib/list_debug.c6
-rw-r--r--lib/percpu_counter.c27
-rw-r--r--lib/radix-tree.c94
-rw-r--r--lib/rwsem.c150
-rw-r--r--lib/scatterlist.c23
-rw-r--r--lib/vsprintf.c14
9 files changed, 268 insertions, 87 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 79e0dff1cdcb..9e06b7f5ecf1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -410,6 +410,13 @@ config DEBUG_KMEMLEAK_TEST
410 410
411 If unsure, say N. 411 If unsure, say N.
412 412
413config DEBUG_KMEMLEAK_DEFAULT_OFF
414 bool "Default kmemleak to off"
415 depends on DEBUG_KMEMLEAK
416 help
417 Say Y here to disable kmemleak by default. It can then be enabled
418 on the command line via kmemleak=on.
419
413config DEBUG_PREEMPT 420config DEBUG_PREEMPT
414 bool "Debug preemptible kernel" 421 bool "Debug preemptible kernel"
415 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT 422 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 41b1804fa728..77a6fea7481e 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -171,6 +171,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
171 * Note that this *copies* the contents of @src into 171 * Note that this *copies* the contents of @src into
172 * the array. If you are trying to store an array of 172 * the array. If you are trying to store an array of
173 * pointers, make sure to pass in &ptr instead of ptr. 173 * pointers, make sure to pass in &ptr instead of ptr.
174 * You may instead wish to use the flex_array_put_ptr()
175 * helper function.
174 * 176 *
175 * Locking must be provided by the caller. 177 * Locking must be provided by the caller.
176 */ 178 */
@@ -265,7 +267,8 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start,
265 * 267 *
266 * Returns a pointer to the data at index @element_nr. Note 268 * Returns a pointer to the data at index @element_nr. Note
267 * that this is a copy of the data that was passed in. If you 269 * that this is a copy of the data that was passed in. If you
268 * are using this to store pointers, you'll get back &ptr. 270 * are using this to store pointers, you'll get back &ptr. You
271 * may instead wish to use the flex_array_get_ptr helper.
269 * 272 *
270 * Locking must be provided by the caller. 273 * Locking must be provided by the caller.
271 */ 274 */
@@ -286,6 +289,26 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
286 return &part->elements[index_inside_part(fa, element_nr)]; 289 return &part->elements[index_inside_part(fa, element_nr)];
287} 290}
288 291
292/**
293 * flex_array_get_ptr - pull a ptr back out of the array
294 * @fa: the flex array from which to extract data
295 * @element_nr: index of the element to fetch from the array
296 *
297 * Returns the pointer placed in the flex array at element_nr using
298 * flex_array_put_ptr(). This function should not be called if the
299 * element in question was not set using the _put_ptr() helper.
300 */
301void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
302{
303 void **tmp;
304
305 tmp = flex_array_get(fa, element_nr);
306 if (!tmp)
307 return NULL;
308
309 return *tmp;
310}
311
289static int part_is_free(struct flex_array_part *part) 312static int part_is_free(struct flex_array_part *part)
290{ 313{
291 int i; 314 int i;
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index c0251f4ad08b..da053313ee5c 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -38,12 +38,3 @@ again:
38 return -1; 38 return -1;
39} 39}
40EXPORT_SYMBOL(iommu_area_alloc); 40EXPORT_SYMBOL(iommu_area_alloc);
41
42unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
43 unsigned long io_page_size)
44{
45 unsigned long size = (addr & (io_page_size - 1)) + len;
46
47 return DIV_ROUND_UP(size, io_page_size);
48}
49EXPORT_SYMBOL(iommu_num_pages);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 1a39f4e3ae1f..344c710d16ca 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -43,6 +43,12 @@ EXPORT_SYMBOL(__list_add);
43 */ 43 */
44void list_del(struct list_head *entry) 44void list_del(struct list_head *entry)
45{ 45{
46 WARN(entry->next == LIST_POISON1,
47 "list_del corruption, next is LIST_POISON1 (%p)\n",
48 LIST_POISON1);
49 WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
50 "list_del corruption, prev is LIST_POISON2 (%p)\n",
51 LIST_POISON2);
46 WARN(entry->prev->next != entry, 52 WARN(entry->prev->next != entry,
47 "list_del corruption. prev->next should be %p, " 53 "list_del corruption. prev->next should be %p, "
48 "but was %p\n", entry, entry->prev->next); 54 "but was %p\n", entry, entry->prev->next);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index aeaa6d734447..ec9048e74f44 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -137,6 +137,33 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
137 return NOTIFY_OK; 137 return NOTIFY_OK;
138} 138}
139 139
140/*
141 * Compare counter against given value.
142 * Return 1 if greater, 0 if equal and -1 if less
143 */
144int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
145{
146 s64 count;
147
148 count = percpu_counter_read(fbc);
149 /* Check to see if rough count will be sufficient for comparison */
150 if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
151 if (count > rhs)
152 return 1;
153 else
154 return -1;
155 }
156 /* Need to use precise count */
157 count = percpu_counter_sum(fbc);
158 if (count > rhs)
159 return 1;
160 else if (count < rhs)
161 return -1;
162 else
163 return 0;
164}
165EXPORT_SYMBOL(percpu_counter_compare);
166
140static int __init percpu_counter_startup(void) 167static int __init percpu_counter_startup(void)
141{ 168{
142 compute_batch_value(); 169 compute_batch_value();
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 05da38bcc298..e907858498a6 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -609,6 +609,100 @@ int radix_tree_tag_get(struct radix_tree_root *root,
609EXPORT_SYMBOL(radix_tree_tag_get); 609EXPORT_SYMBOL(radix_tree_tag_get);
610 610
611/** 611/**
612 * radix_tree_range_tag_if_tagged - for each item in given range set given
613 * tag if item has another tag set
614 * @root: radix tree root
615 * @first_indexp: pointer to a starting index of a range to scan
616 * @last_index: last index of a range to scan
617 * @nr_to_tag: maximum number items to tag
618 * @iftag: tag index to test
619 * @settag: tag index to set if tested tag is set
620 *
621 * This function scans range of radix tree from first_index to last_index
622 * (inclusive). For each item in the range if iftag is set, the function sets
623 * also settag. The function stops either after tagging nr_to_tag items or
624 * after reaching last_index.
625 *
626 * The function returns number of leaves where the tag was set and sets
627 * *first_indexp to the first unscanned index.
628 */
629unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
630 unsigned long *first_indexp, unsigned long last_index,
631 unsigned long nr_to_tag,
632 unsigned int iftag, unsigned int settag)
633{
634 unsigned int height = root->height, shift;
635 unsigned long tagged = 0, index = *first_indexp;
636 struct radix_tree_node *open_slots[height], *slot;
637
638 last_index = min(last_index, radix_tree_maxindex(height));
639 if (index > last_index)
640 return 0;
641 if (!nr_to_tag)
642 return 0;
643 if (!root_tag_get(root, iftag)) {
644 *first_indexp = last_index + 1;
645 return 0;
646 }
647 if (height == 0) {
648 *first_indexp = last_index + 1;
649 root_tag_set(root, settag);
650 return 1;
651 }
652
653 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
654 slot = radix_tree_indirect_to_ptr(root->rnode);
655
656 for (;;) {
657 int offset;
658
659 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
660 if (!slot->slots[offset])
661 goto next;
662 if (!tag_get(slot, iftag, offset))
663 goto next;
664 tag_set(slot, settag, offset);
665 if (height == 1) {
666 tagged++;
667 goto next;
668 }
669 /* Go down one level */
670 height--;
671 shift -= RADIX_TREE_MAP_SHIFT;
672 open_slots[height] = slot;
673 slot = slot->slots[offset];
674 continue;
675next:
676 /* Go to next item at level determined by 'shift' */
677 index = ((index >> shift) + 1) << shift;
678 if (index > last_index)
679 break;
680 if (tagged >= nr_to_tag)
681 break;
682 while (((index >> shift) & RADIX_TREE_MAP_MASK) == 0) {
683 /*
684 * We've fully scanned this node. Go up. Because
685 * last_index is guaranteed to be in the tree, what
686 * we do below cannot wander astray.
687 */
688 slot = open_slots[height];
689 height++;
690 shift += RADIX_TREE_MAP_SHIFT;
691 }
692 }
693 /*
694 * The iftag must have been set somewhere because otherwise
695 * we would return immediated at the beginning of the function
696 */
697 root_tag_set(root, settag);
698 *first_indexp = index;
699
700 return tagged;
701}
702EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
703
704
705/**
612 * radix_tree_next_hole - find the next hole (not-present entry) 706 * radix_tree_next_hole - find the next hole (not-present entry)
613 * @root: tree root 707 * @root: tree root
614 * @index: index key 708 * @index: index key
diff --git a/lib/rwsem.c b/lib/rwsem.c
index ceba8e28807a..f236d7cd5cf3 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -36,45 +36,56 @@ struct rwsem_waiter {
36#define RWSEM_WAITING_FOR_WRITE 0x00000002 36#define RWSEM_WAITING_FOR_WRITE 0x00000002
37}; 37};
38 38
39/* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
40 * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
41 * since the rwsem value was observed.
42 */
43#define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */
44#define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
45#define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
46
39/* 47/*
40 * handle the lock release when processes blocked on it that can now run 48 * handle the lock release when processes blocked on it that can now run
41 * - if we come here from up_xxxx(), then: 49 * - if we come here from up_xxxx(), then:
42 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) 50 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
43 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) 51 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
44 * - there must be someone on the queue 52 * - there must be someone on the queue
45 * - the spinlock must be held by the caller 53 * - the spinlock must be held by the caller
46 * - woken process blocks are discarded from the list after having task zeroed 54 * - woken process blocks are discarded from the list after having task zeroed
47 * - writers are only woken if downgrading is false 55 * - writers are only woken if downgrading is false
48 */ 56 */
49static inline struct rw_semaphore * 57static struct rw_semaphore *
50__rwsem_do_wake(struct rw_semaphore *sem, int downgrading) 58__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
51{ 59{
52 struct rwsem_waiter *waiter; 60 struct rwsem_waiter *waiter;
53 struct task_struct *tsk; 61 struct task_struct *tsk;
54 struct list_head *next; 62 struct list_head *next;
55 signed long oldcount, woken, loop; 63 signed long oldcount, woken, loop, adjustment;
56
57 if (downgrading)
58 goto dont_wake_writers;
59
60 /* if we came through an up_xxxx() call, we only only wake someone up
61 * if we can transition the active part of the count from 0 -> 1
62 */
63 try_again:
64 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
65 - RWSEM_ACTIVE_BIAS;
66 if (oldcount & RWSEM_ACTIVE_MASK)
67 goto undo;
68 64
69 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 65 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
70
71 /* try to grant a single write lock if there's a writer at the front
72 * of the queue - note we leave the 'active part' of the count
73 * incremented by 1 and the waiting part incremented by 0x00010000
74 */
75 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) 66 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
76 goto readers_only; 67 goto readers_only;
77 68
69 if (wake_type == RWSEM_WAKE_READ_OWNED)
70 /* Another active reader was observed, so wakeup is not
71 * likely to succeed. Save the atomic op.
72 */
73 goto out;
74
75 /* There's a writer at the front of the queue - try to grant it the
76 * write lock. However, we only wake this writer if we can transition
77 * the active part of the count from 0 -> 1
78 */
79 adjustment = RWSEM_ACTIVE_WRITE_BIAS;
80 if (waiter->list.next == &sem->wait_list)
81 adjustment -= RWSEM_WAITING_BIAS;
82
83 try_again_write:
84 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
85 if (oldcount & RWSEM_ACTIVE_MASK)
86 /* Someone grabbed the sem already */
87 goto undo_write;
88
78 /* We must be careful not to touch 'waiter' after we set ->task = NULL. 89 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
79 * It is an allocated on the waiter's stack and may become invalid at 90 * It is an allocated on the waiter's stack and may become invalid at
80 * any time after that point (due to a wakeup from another source). 91 * any time after that point (due to a wakeup from another source).
@@ -87,18 +98,30 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
87 put_task_struct(tsk); 98 put_task_struct(tsk);
88 goto out; 99 goto out;
89 100
90 /* don't want to wake any writers */ 101 readers_only:
91 dont_wake_writers: 102 /* If we come here from up_xxxx(), another thread might have reached
92 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 103 * rwsem_down_failed_common() before we acquired the spinlock and
93 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) 104 * woken up a waiter, making it now active. We prefer to check for
105 * this first in order to not spend too much time with the spinlock
106 * held if we're not going to be able to wake up readers in the end.
107 *
108 * Note that we do not need to update the rwsem count: any writer
109 * trying to acquire rwsem will run rwsem_down_write_failed() due
110 * to the waiting threads and block trying to acquire the spinlock.
111 *
112 * We use a dummy atomic update in order to acquire the cache line
113 * exclusively since we expect to succeed and run the final rwsem
114 * count adjustment pretty soon.
115 */
116 if (wake_type == RWSEM_WAKE_ANY &&
117 rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
118 /* Someone grabbed the sem for write already */
94 goto out; 119 goto out;
95 120
96 /* grant an infinite number of read locks to the readers at the front 121 /* Grant an infinite number of read locks to the readers at the front
97 * of the queue 122 * of the queue. Note we increment the 'active part' of the count by
98 * - note we increment the 'active part' of the count by the number of 123 * the number of readers before waking any processes up.
99 * readers before waking any processes up
100 */ 124 */
101 readers_only:
102 woken = 0; 125 woken = 0;
103 do { 126 do {
104 woken++; 127 woken++;
@@ -111,16 +134,15 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
111 134
112 } while (waiter->flags & RWSEM_WAITING_FOR_READ); 135 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
113 136
114 loop = woken; 137 adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
115 woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; 138 if (waiter->flags & RWSEM_WAITING_FOR_READ)
116 if (!downgrading) 139 /* hit end of list above */
117 /* we'd already done one increment earlier */ 140 adjustment -= RWSEM_WAITING_BIAS;
118 woken -= RWSEM_ACTIVE_BIAS;
119 141
120 rwsem_atomic_add(woken, sem); 142 rwsem_atomic_add(adjustment, sem);
121 143
122 next = sem->wait_list.next; 144 next = sem->wait_list.next;
123 for (; loop > 0; loop--) { 145 for (loop = woken; loop > 0; loop--) {
124 waiter = list_entry(next, struct rwsem_waiter, list); 146 waiter = list_entry(next, struct rwsem_waiter, list);
125 next = waiter->list.next; 147 next = waiter->list.next;
126 tsk = waiter->task; 148 tsk = waiter->task;
@@ -138,10 +160,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
138 160
139 /* undo the change to the active count, but check for a transition 161 /* undo the change to the active count, but check for a transition
140 * 1->0 */ 162 * 1->0 */
141 undo: 163 undo_write:
142 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) 164 if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
143 goto out; 165 goto out;
144 goto try_again; 166 goto try_again_write;
145} 167}
146 168
147/* 169/*
@@ -149,8 +171,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
149 */ 171 */
150static struct rw_semaphore __sched * 172static struct rw_semaphore __sched *
151rwsem_down_failed_common(struct rw_semaphore *sem, 173rwsem_down_failed_common(struct rw_semaphore *sem,
152 struct rwsem_waiter *waiter, signed long adjustment) 174 unsigned int flags, signed long adjustment)
153{ 175{
176 struct rwsem_waiter waiter;
154 struct task_struct *tsk = current; 177 struct task_struct *tsk = current;
155 signed long count; 178 signed long count;
156 179
@@ -158,23 +181,34 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
158 181
159 /* set up my own style of waitqueue */ 182 /* set up my own style of waitqueue */
160 spin_lock_irq(&sem->wait_lock); 183 spin_lock_irq(&sem->wait_lock);
161 waiter->task = tsk; 184 waiter.task = tsk;
185 waiter.flags = flags;
162 get_task_struct(tsk); 186 get_task_struct(tsk);
163 187
164 list_add_tail(&waiter->list, &sem->wait_list); 188 if (list_empty(&sem->wait_list))
189 adjustment += RWSEM_WAITING_BIAS;
190 list_add_tail(&waiter.list, &sem->wait_list);
165 191
166 /* we're now waiting on the lock, but no longer actively read-locking */ 192 /* we're now waiting on the lock, but no longer actively locking */
167 count = rwsem_atomic_update(adjustment, sem); 193 count = rwsem_atomic_update(adjustment, sem);
168 194
169 /* if there are no active locks, wake the front queued process(es) up */ 195 /* If there are no active locks, wake the front queued process(es) up.
170 if (!(count & RWSEM_ACTIVE_MASK)) 196 *
171 sem = __rwsem_do_wake(sem, 0); 197 * Alternatively, if we're called from a failed down_write(), there
198 * were already threads queued before us and there are no active
199 * writers, the lock must be read owned; so we try to wake any read
200 * locks that were queued ahead of us. */
201 if (count == RWSEM_WAITING_BIAS)
202 sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
203 else if (count > RWSEM_WAITING_BIAS &&
204 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
205 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
172 206
173 spin_unlock_irq(&sem->wait_lock); 207 spin_unlock_irq(&sem->wait_lock);
174 208
175 /* wait to be given the lock */ 209 /* wait to be given the lock */
176 for (;;) { 210 for (;;) {
177 if (!waiter->task) 211 if (!waiter.task)
178 break; 212 break;
179 schedule(); 213 schedule();
180 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 214 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
@@ -191,12 +225,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
191asmregparm struct rw_semaphore __sched * 225asmregparm struct rw_semaphore __sched *
192rwsem_down_read_failed(struct rw_semaphore *sem) 226rwsem_down_read_failed(struct rw_semaphore *sem)
193{ 227{
194 struct rwsem_waiter waiter; 228 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
195 229 -RWSEM_ACTIVE_READ_BIAS);
196 waiter.flags = RWSEM_WAITING_FOR_READ;
197 rwsem_down_failed_common(sem, &waiter,
198 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
199 return sem;
200} 230}
201 231
202/* 232/*
@@ -205,12 +235,8 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
205asmregparm struct rw_semaphore __sched * 235asmregparm struct rw_semaphore __sched *
206rwsem_down_write_failed(struct rw_semaphore *sem) 236rwsem_down_write_failed(struct rw_semaphore *sem)
207{ 237{
208 struct rwsem_waiter waiter; 238 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
209 239 -RWSEM_ACTIVE_WRITE_BIAS);
210 waiter.flags = RWSEM_WAITING_FOR_WRITE;
211 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
212
213 return sem;
214} 240}
215 241
216/* 242/*
@@ -225,7 +251,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
225 251
226 /* do nothing if list empty */ 252 /* do nothing if list empty */
227 if (!list_empty(&sem->wait_list)) 253 if (!list_empty(&sem->wait_list))
228 sem = __rwsem_do_wake(sem, 0); 254 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
229 255
230 spin_unlock_irqrestore(&sem->wait_lock, flags); 256 spin_unlock_irqrestore(&sem->wait_lock, flags);
231 257
@@ -245,7 +271,7 @@ asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
245 271
246 /* do nothing if list empty */ 272 /* do nothing if list empty */
247 if (!list_empty(&sem->wait_list)) 273 if (!list_empty(&sem->wait_list))
248 sem = __rwsem_do_wake(sem, 1); 274 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
249 275
250 spin_unlock_irqrestore(&sem->wait_lock, flags); 276 spin_unlock_irqrestore(&sem->wait_lock, flags);
251 277
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 9afa25b52a83..a5ec42868f99 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/kmemleak.h>
13 14
14/** 15/**
15 * sg_next - return the next scatterlist entry in a list 16 * sg_next - return the next scatterlist entry in a list
@@ -115,17 +116,29 @@ EXPORT_SYMBOL(sg_init_one);
115 */ 116 */
116static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) 117static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
117{ 118{
118 if (nents == SG_MAX_SINGLE_ALLOC) 119 if (nents == SG_MAX_SINGLE_ALLOC) {
119 return (struct scatterlist *) __get_free_page(gfp_mask); 120 /*
120 else 121 * Kmemleak doesn't track page allocations as they are not
122 * commonly used (in a raw form) for kernel data structures.
123 * As we chain together a list of pages and then a normal
124 * kmalloc (tracked by kmemleak), in order to for that last
125 * allocation not to become decoupled (and thus a
126 * false-positive) we need to inform kmemleak of all the
127 * intermediate allocations.
128 */
129 void *ptr = (void *) __get_free_page(gfp_mask);
130 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
131 return ptr;
132 } else
121 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); 133 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
122} 134}
123 135
124static void sg_kfree(struct scatterlist *sg, unsigned int nents) 136static void sg_kfree(struct scatterlist *sg, unsigned int nents)
125{ 137{
126 if (nents == SG_MAX_SINGLE_ALLOC) 138 if (nents == SG_MAX_SINGLE_ALLOC) {
139 kmemleak_free(sg);
127 free_page((unsigned long) sg); 140 free_page((unsigned long) sg);
128 else 141 } else
129 kfree(sg); 142 kfree(sg);
130} 143}
131 144
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 4ee19d0d3910..7af9d841c43b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -146,19 +146,16 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
146{ 146{
147 char *tail; 147 char *tail;
148 unsigned long val; 148 unsigned long val;
149 size_t len;
150 149
151 *res = 0; 150 *res = 0;
152 len = strlen(cp); 151 if (!*cp)
153 if (len == 0)
154 return -EINVAL; 152 return -EINVAL;
155 153
156 val = simple_strtoul(cp, &tail, base); 154 val = simple_strtoul(cp, &tail, base);
157 if (tail == cp) 155 if (tail == cp)
158 return -EINVAL; 156 return -EINVAL;
159 157
160 if ((*tail == '\0') || 158 if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
161 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
162 *res = val; 159 *res = val;
163 return 0; 160 return 0;
164 } 161 }
@@ -220,18 +217,15 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
220{ 217{
221 char *tail; 218 char *tail;
222 unsigned long long val; 219 unsigned long long val;
223 size_t len;
224 220
225 *res = 0; 221 *res = 0;
226 len = strlen(cp); 222 if (!*cp)
227 if (len == 0)
228 return -EINVAL; 223 return -EINVAL;
229 224
230 val = simple_strtoull(cp, &tail, base); 225 val = simple_strtoull(cp, &tail, base);
231 if (tail == cp) 226 if (tail == cp)
232 return -EINVAL; 227 return -EINVAL;
233 if ((*tail == '\0') || 228 if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
234 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
235 *res = val; 229 *res = val;
236 return 0; 230 return 0;
237 } 231 }