aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-i386/atomic.h4
-rw-r--r--include/asm-i386/bitops.h4
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/kfifo.h6
-rw-r--r--include/linux/ktime.h6
-rw-r--r--include/linux/list.h11
-rw-r--r--ipc/util.c21
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/kfifo.c10
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/relay.c12
-rw-r--r--kernel/sched.c9
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/timer.c20
-rw-r--r--kernel/workqueue.c6
-rw-r--r--lib/bitmap.c8
-rw-r--r--lib/cmdline.c8
-rw-r--r--lib/idr.c4
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/sha1.c9
-rw-r--r--lib/sort.c2
-rw-r--r--lib/string.c8
-rw-r--r--lib/textsearch.c2
-rw-r--r--lib/vsprintf.c12
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mempool.c6
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmalloc.c2
33 files changed, 105 insertions, 116 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index c57441bb2905..4dd272331361 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -211,12 +211,12 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212 212
213/** 213/**
214 * atomic_add_unless - add unless the number is a given value 214 * atomic_add_unless - add unless the number is already a given value
215 * @v: pointer of type atomic_t 215 * @v: pointer of type atomic_t
216 * @a: the amount to add to v... 216 * @a: the amount to add to v...
217 * @u: ...unless v is equal to u. 217 * @u: ...unless v is equal to u.
218 * 218 *
219 * Atomically adds @a to @v, so long as it was not @u. 219 * Atomically adds @a to @v, so long as @v was not already @u.
220 * Returns non-zero if @v was not @u, and zero otherwise. 220 * Returns non-zero if @v was not @u, and zero otherwise.
221 */ 221 */
222#define atomic_add_unless(v, a, u) \ 222#define atomic_add_unless(v, a, u) \
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 1c780fa1e762..273b50629357 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
371 * 371 *
372 * This is defined the same way as 372 * This is defined the same way as
373 * the libc and compiler builtin ffs routines, therefore 373 * the libc and compiler builtin ffs routines, therefore
374 * differs in spirit from the above ffz (man ffs). 374 * differs in spirit from the above ffz() (man ffs).
375 */ 375 */
376static inline int ffs(int x) 376static inline int ffs(int x)
377{ 377{
@@ -388,7 +388,7 @@ static inline int ffs(int x)
388 * fls - find last bit set 388 * fls - find last bit set
389 * @x: the word to search 389 * @x: the word to search
390 * 390 *
391 * This is defined the same way as ffs. 391 * This is defined the same way as ffs().
392 */ 392 */
393static inline int fls(int x) 393static inline int fls(int x)
394{ 394{
diff --git a/include/linux/init.h b/include/linux/init.h
index 5a593a1dec1e..c65f5107d512 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -172,7 +172,7 @@ void __init parse_early_param(void);
172 * module_init() - driver initialization entry point 172 * module_init() - driver initialization entry point
173 * @x: function to be run at kernel boot time or module insertion 173 * @x: function to be run at kernel boot time or module insertion
174 * 174 *
175 * module_init() will either be called during do_initcalls (if 175 * module_init() will either be called during do_initcalls() (if
176 * builtin) or at module insertion time (if a module). There can only 176 * builtin) or at module insertion time (if a module). There can only
177 * be one per module. 177 * be one per module.
178 */ 178 */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 48eccd865bd8..404f4464cb1a 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -74,7 +74,7 @@ static inline void kfifo_reset(struct kfifo *fifo)
74 * @buffer: the data to be added. 74 * @buffer: the data to be added.
75 * @len: the length of the data to be added. 75 * @len: the length of the data to be added.
76 * 76 *
77 * This function copies at most 'len' bytes from the 'buffer' into 77 * This function copies at most @len bytes from the @buffer into
78 * the FIFO depending on the free space, and returns the number of 78 * the FIFO depending on the free space, and returns the number of
79 * bytes copied. 79 * bytes copied.
80 */ 80 */
@@ -99,8 +99,8 @@ static inline unsigned int kfifo_put(struct kfifo *fifo,
99 * @buffer: where the data must be copied. 99 * @buffer: where the data must be copied.
100 * @len: the size of the destination buffer. 100 * @len: the size of the destination buffer.
101 * 101 *
102 * This function copies at most 'len' bytes from the FIFO into the 102 * This function copies at most @len bytes from the FIFO into the
103 * 'buffer' and returns the number of copied bytes. 103 * @buffer and returns the number of copied bytes.
104 */ 104 */
105static inline unsigned int kfifo_get(struct kfifo *fifo, 105static inline unsigned int kfifo_get(struct kfifo *fifo,
106 unsigned char *buffer, unsigned int len) 106 unsigned char *buffer, unsigned int len)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 611f17f79eef..7444a6326231 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -163,7 +163,7 @@ static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
163 * @add1: addend1 163 * @add1: addend1
164 * @add2: addend2 164 * @add2: addend2
165 * 165 *
166 * Returns the sum of addend1 and addend2 166 * Returns the sum of @add1 and @add2.
167 */ 167 */
168static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2) 168static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
169{ 169{
@@ -189,7 +189,7 @@ static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
189 * @kt: addend 189 * @kt: addend
190 * @nsec: the scalar nsec value to add 190 * @nsec: the scalar nsec value to add
191 * 191 *
192 * Returns the sum of kt and nsec in ktime_t format 192 * Returns the sum of @kt and @nsec in ktime_t format
193 */ 193 */
194extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec); 194extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
195 195
@@ -246,7 +246,7 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
246 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds 246 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
247 * @kt: the ktime_t variable to convert 247 * @kt: the ktime_t variable to convert
248 * 248 *
249 * Returns the scalar nanoseconds representation of kt 249 * Returns the scalar nanoseconds representation of @kt
250 */ 250 */
251static inline s64 ktime_to_ns(const ktime_t kt) 251static inline s64 ktime_to_ns(const ktime_t kt)
252{ 252{
diff --git a/include/linux/list.h b/include/linux/list.h
index cdc96559e5ae..f9d71eab05ee 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -161,7 +161,7 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
161/** 161/**
162 * list_del - deletes entry from list. 162 * list_del - deletes entry from list.
163 * @entry: the element to delete from the list. 163 * @entry: the element to delete from the list.
164 * Note: list_empty on entry does not return true after this, the entry is 164 * Note: list_empty() on entry does not return true after this, the entry is
165 * in an undefined state. 165 * in an undefined state.
166 */ 166 */
167#ifndef CONFIG_DEBUG_LIST 167#ifndef CONFIG_DEBUG_LIST
@@ -179,7 +179,7 @@ extern void list_del(struct list_head *entry);
179 * list_del_rcu - deletes entry from list without re-initialization 179 * list_del_rcu - deletes entry from list without re-initialization
180 * @entry: the element to delete from the list. 180 * @entry: the element to delete from the list.
181 * 181 *
182 * Note: list_empty on entry does not return true after this, 182 * Note: list_empty() on entry does not return true after this,
183 * the entry is in an undefined state. It is useful for RCU based 183 * the entry is in an undefined state. It is useful for RCU based
184 * lockfree traversal. 184 * lockfree traversal.
185 * 185 *
@@ -209,7 +209,8 @@ static inline void list_del_rcu(struct list_head *entry)
209 * list_replace - replace old entry by new one 209 * list_replace - replace old entry by new one
210 * @old : the element to be replaced 210 * @old : the element to be replaced
211 * @new : the new element to insert 211 * @new : the new element to insert
212 * Note: if 'old' was empty, it will be overwritten. 212 *
213 * If @old was empty, it will be overwritten.
213 */ 214 */
214static inline void list_replace(struct list_head *old, 215static inline void list_replace(struct list_head *old,
215 struct list_head *new) 216 struct list_head *new)
@@ -488,12 +489,12 @@ static inline void list_splice_init_rcu(struct list_head *list,
488 pos = list_entry(pos->member.prev, typeof(*pos), member)) 489 pos = list_entry(pos->member.prev, typeof(*pos), member))
489 490
490/** 491/**
491 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue 492 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
492 * @pos: the type * to use as a start point 493 * @pos: the type * to use as a start point
493 * @head: the head of the list 494 * @head: the head of the list
494 * @member: the name of the list_struct within the struct. 495 * @member: the name of the list_struct within the struct.
495 * 496 *
496 * Prepares a pos entry for use as a start point in list_for_each_entry_continue. 497 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
497 */ 498 */
498#define list_prepare_entry(pos, head, member) \ 499#define list_prepare_entry(pos, head, member) \
499 ((pos) ? : list_entry(head, typeof(*pos), member)) 500 ((pos) ? : list_entry(head, typeof(*pos), member))
diff --git a/ipc/util.c b/ipc/util.c
index a9b7a227b8d4..0c97cb746160 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -150,7 +150,7 @@ void free_ipc_ns(struct kref *kref)
150 * ipc_init - initialise IPC subsystem 150 * ipc_init - initialise IPC subsystem
151 * 151 *
152 * The various system5 IPC resources (semaphores, messages and shared 152 * The various system5 IPC resources (semaphores, messages and shared
153 * memory are initialised 153 * memory) are initialised
154 */ 154 */
155 155
156static int __init ipc_init(void) 156static int __init ipc_init(void)
@@ -207,8 +207,7 @@ void __ipc_init ipc_init_ids(struct ipc_ids* ids, int size)
207#ifdef CONFIG_PROC_FS 207#ifdef CONFIG_PROC_FS
208static struct file_operations sysvipc_proc_fops; 208static struct file_operations sysvipc_proc_fops;
209/** 209/**
210 * ipc_init_proc_interface - Create a proc interface for sysipc types 210 * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
211 * using a seq_file interface.
212 * @path: Path in procfs 211 * @path: Path in procfs
213 * @header: Banner to be printed at the beginning of the file. 212 * @header: Banner to be printed at the beginning of the file.
214 * @ids: ipc id table to iterate. 213 * @ids: ipc id table to iterate.
@@ -417,7 +416,7 @@ void* ipc_alloc(int size)
417 * @ptr: pointer returned by ipc_alloc 416 * @ptr: pointer returned by ipc_alloc
418 * @size: size of block 417 * @size: size of block
419 * 418 *
420 * Free a block created with ipc_alloc. The caller must know the size 419 * Free a block created with ipc_alloc(). The caller must know the size
421 * used in the allocation call. 420 * used in the allocation call.
422 */ 421 */
423 422
@@ -524,7 +523,7 @@ static void ipc_do_vfree(struct work_struct *work)
524 * @head: RCU callback structure for queued work 523 * @head: RCU callback structure for queued work
525 * 524 *
526 * Since RCU callback function is called in bh, 525 * Since RCU callback function is called in bh,
527 * we need to defer the vfree to schedule_work 526 * we need to defer the vfree to schedule_work().
528 */ 527 */
529static void ipc_schedule_free(struct rcu_head *head) 528static void ipc_schedule_free(struct rcu_head *head)
530{ 529{
@@ -541,7 +540,7 @@ static void ipc_schedule_free(struct rcu_head *head)
541 * ipc_immediate_free - free ipc + rcu space 540 * ipc_immediate_free - free ipc + rcu space
542 * @head: RCU callback structure that contains pointer to be freed 541 * @head: RCU callback structure that contains pointer to be freed
543 * 542 *
544 * Free from the RCU callback context 543 * Free from the RCU callback context.
545 */ 544 */
546static void ipc_immediate_free(struct rcu_head *head) 545static void ipc_immediate_free(struct rcu_head *head)
547{ 546{
@@ -603,8 +602,8 @@ int ipcperms (struct kern_ipc_perm *ipcp, short flag)
603 * @in: kernel permissions 602 * @in: kernel permissions
604 * @out: new style IPC permissions 603 * @out: new style IPC permissions
605 * 604 *
606 * Turn the kernel object 'in' into a set of permissions descriptions 605 * Turn the kernel object @in into a set of permissions descriptions
607 * for returning to userspace (out). 606 * for returning to userspace (@out).
608 */ 607 */
609 608
610 609
@@ -624,8 +623,8 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
624 * @in: new style IPC permissions 623 * @in: new style IPC permissions
625 * @out: old style IPC permissions 624 * @out: old style IPC permissions
626 * 625 *
627 * Turn the new style permissions object in into a compatibility 626 * Turn the new style permissions object @in into a compatibility
628 * object and store it into the 'out' pointer. 627 * object and store it into the @out pointer.
629 */ 628 */
630 629
631void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) 630void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
@@ -722,7 +721,7 @@ int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
722 * @cmd: pointer to command 721 * @cmd: pointer to command
723 * 722 *
724 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. 723 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
725 * The cmd value is turned from an encoding command and version into 724 * The @cmd value is turned from an encoding command and version into
726 * just the command code. 725 * just the command code.
727 */ 726 */
728 727
diff --git a/kernel/exit.c b/kernel/exit.c
index fec12eb12471..bc71fdfcd8a7 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -257,8 +257,7 @@ static int has_stopped_jobs(int pgrp)
257} 257}
258 258
259/** 259/**
260 * reparent_to_init - Reparent the calling kernel thread to the init task 260 * reparent_to_init - Reparent the calling kernel thread to the init task of the pid space that the thread belongs to.
261 * of the pid space that the thread belongs to.
262 * 261 *
263 * If a kernel thread is launched as a result of a system call, or if 262 * If a kernel thread is launched as a result of a system call, or if
264 * it ever exits, it should generally reparent itself to init so that 263 * it ever exits, it should generally reparent itself to init so that
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d0ba190dfeb6..f44e499e8fca 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -102,7 +102,7 @@ static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
102 * 102 *
103 * The function calculates the monotonic clock from the realtime 103 * The function calculates the monotonic clock from the realtime
104 * clock and the wall_to_monotonic offset and stores the result 104 * clock and the wall_to_monotonic offset and stores the result
105 * in normalized timespec format in the variable pointed to by ts. 105 * in normalized timespec format in the variable pointed to by @ts.
106 */ 106 */
107void ktime_get_ts(struct timespec *ts) 107void ktime_get_ts(struct timespec *ts)
108{ 108{
@@ -583,8 +583,8 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
583 * @which_clock: which clock to query 583 * @which_clock: which clock to query
584 * @tp: pointer to timespec variable to store the resolution 584 * @tp: pointer to timespec variable to store the resolution
585 * 585 *
586 * Store the resolution of the clock selected by which_clock in the 586 * Store the resolution of the clock selected by @which_clock in the
587 * variable pointed to by tp. 587 * variable pointed to by @tp.
588 */ 588 */
589int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) 589int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
590{ 590{
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 5d1d907378a2..cee419143fd4 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -32,8 +32,8 @@
32 * @gfp_mask: get_free_pages mask, passed to kmalloc() 32 * @gfp_mask: get_free_pages mask, passed to kmalloc()
33 * @lock: the lock to be used to protect the fifo buffer 33 * @lock: the lock to be used to protect the fifo buffer
34 * 34 *
35 * Do NOT pass the kfifo to kfifo_free() after use ! Simply free the 35 * Do NOT pass the kfifo to kfifo_free() after use! Simply free the
36 * struct kfifo with kfree(). 36 * &struct kfifo with kfree().
37 */ 37 */
38struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 38struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
39 gfp_t gfp_mask, spinlock_t *lock) 39 gfp_t gfp_mask, spinlock_t *lock)
@@ -108,7 +108,7 @@ EXPORT_SYMBOL(kfifo_free);
108 * @buffer: the data to be added. 108 * @buffer: the data to be added.
109 * @len: the length of the data to be added. 109 * @len: the length of the data to be added.
110 * 110 *
111 * This function copies at most 'len' bytes from the 'buffer' into 111 * This function copies at most @len bytes from the @buffer into
112 * the FIFO depending on the free space, and returns the number of 112 * the FIFO depending on the free space, and returns the number of
113 * bytes copied. 113 * bytes copied.
114 * 114 *
@@ -155,8 +155,8 @@ EXPORT_SYMBOL(__kfifo_put);
155 * @buffer: where the data must be copied. 155 * @buffer: where the data must be copied.
156 * @len: the size of the destination buffer. 156 * @len: the size of the destination buffer.
157 * 157 *
158 * This function copies at most 'len' bytes from the FIFO into the 158 * This function copies at most @len bytes from the FIFO into the
159 * 'buffer' and returns the number of copied bytes. 159 * @buffer and returns the number of copied bytes.
160 * 160 *
161 * Note that with only one concurrent reader and one concurrent 161 * Note that with only one concurrent reader and one concurrent
162 * writer, you don't need extra locking to use these functions. 162 * writer, you don't need extra locking to use these functions.
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 1db8c72d0d38..87c50ccd1d4e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -50,7 +50,7 @@ static struct kthread_stop_info kthread_stop_info;
50/** 50/**
51 * kthread_should_stop - should this kthread return now? 51 * kthread_should_stop - should this kthread return now?
52 * 52 *
53 * When someone calls kthread_stop on your kthread, it will be woken 53 * When someone calls kthread_stop() on your kthread, it will be woken
54 * and this will return true. You should then return, and your return 54 * and this will return true. You should then return, and your return
55 * value will be passed through to kthread_stop(). 55 * value will be passed through to kthread_stop().
56 */ 56 */
@@ -143,7 +143,7 @@ static void keventd_create_kthread(struct work_struct *work)
143 * it. See also kthread_run(), kthread_create_on_cpu(). 143 * it. See also kthread_run(), kthread_create_on_cpu().
144 * 144 *
145 * When woken, the thread will run @threadfn() with @data as its 145 * When woken, the thread will run @threadfn() with @data as its
146 * argument. @threadfn can either call do_exit() directly if it is a 146 * argument. @threadfn() can either call do_exit() directly if it is a
147 * standalone thread for which noone will call kthread_stop(), or 147 * standalone thread for which noone will call kthread_stop(), or
148 * return when 'kthread_should_stop()' is true (which means 148 * return when 'kthread_should_stop()' is true (which means
149 * kthread_stop() has been called). The return value should be zero 149 * kthread_stop() has been called). The return value should be zero
@@ -192,7 +192,7 @@ EXPORT_SYMBOL(kthread_create);
192 * 192 *
193 * Description: This function is equivalent to set_cpus_allowed(), 193 * Description: This function is equivalent to set_cpus_allowed(),
194 * except that @cpu doesn't need to be online, and the thread must be 194 * except that @cpu doesn't need to be online, and the thread must be
195 * stopped (i.e., just returned from kthread_create(). 195 * stopped (i.e., just returned from kthread_create()).
196 */ 196 */
197void kthread_bind(struct task_struct *k, unsigned int cpu) 197void kthread_bind(struct task_struct *k, unsigned int cpu)
198{ 198{
diff --git a/kernel/printk.c b/kernel/printk.c
index c770e1a4e882..3e79e18dce33 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -483,7 +483,7 @@ static int have_callable_console(void)
483 * printk - print a kernel message 483 * printk - print a kernel message
484 * @fmt: format string 484 * @fmt: format string
485 * 485 *
486 * This is printk. It can be called from any context. We want it to work. 486 * This is printk(). It can be called from any context. We want it to work.
487 * 487 *
488 * We try to grab the console_sem. If we succeed, it's easy - we log the output and 488 * We try to grab the console_sem. If we succeed, it's easy - we log the output and
489 * call the console drivers. If we fail to get the semaphore we place the output 489 * call the console drivers. If we fail to get the semaphore we place the output
diff --git a/kernel/relay.c b/kernel/relay.c
index ef923f6de2e7..ef8a935710a2 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -328,7 +328,7 @@ static void wakeup_readers(struct work_struct *work)
328 * @buf: the channel buffer 328 * @buf: the channel buffer
329 * @init: 1 if this is a first-time initialization 329 * @init: 1 if this is a first-time initialization
330 * 330 *
331 * See relay_reset for description of effect. 331 * See relay_reset() for description of effect.
332 */ 332 */
333static void __relay_reset(struct rchan_buf *buf, unsigned int init) 333static void __relay_reset(struct rchan_buf *buf, unsigned int init)
334{ 334{
@@ -364,7 +364,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
364 * and restarting the channel in its initial state. The buffers 364 * and restarting the channel in its initial state. The buffers
365 * are not freed, so any mappings are still in effect. 365 * are not freed, so any mappings are still in effect.
366 * 366 *
367 * NOTE: Care should be taken that the channel isn't actually 367 * NOTE. Care should be taken that the channel isn't actually
368 * being used by anything when this call is made. 368 * being used by anything when this call is made.
369 */ 369 */
370void relay_reset(struct rchan *chan) 370void relay_reset(struct rchan *chan)
@@ -528,7 +528,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
528 * Creates a channel buffer for each cpu using the sizes and 528 * Creates a channel buffer for each cpu using the sizes and
529 * attributes specified. The created channel buffer files 529 * attributes specified. The created channel buffer files
530 * will be named base_filename0...base_filenameN-1. File 530 * will be named base_filename0...base_filenameN-1. File
531 * permissions will be S_IRUSR. 531 * permissions will be %S_IRUSR.
532 */ 532 */
533struct rchan *relay_open(const char *base_filename, 533struct rchan *relay_open(const char *base_filename,
534 struct dentry *parent, 534 struct dentry *parent,
@@ -648,7 +648,7 @@ EXPORT_SYMBOL_GPL(relay_switch_subbuf);
648 * subbufs_consumed should be the number of sub-buffers newly consumed, 648 * subbufs_consumed should be the number of sub-buffers newly consumed,
649 * not the total consumed. 649 * not the total consumed.
650 * 650 *
651 * NOTE: Kernel clients don't need to call this function if the channel 651 * NOTE. Kernel clients don't need to call this function if the channel
652 * mode is 'overwrite'. 652 * mode is 'overwrite'.
653 */ 653 */
654void relay_subbufs_consumed(struct rchan *chan, 654void relay_subbufs_consumed(struct rchan *chan,
@@ -749,7 +749,7 @@ static int relay_file_open(struct inode *inode, struct file *filp)
749 * @filp: the file 749 * @filp: the file
750 * @vma: the vma describing what to map 750 * @vma: the vma describing what to map
751 * 751 *
752 * Calls upon relay_mmap_buf to map the file into user space. 752 * Calls upon relay_mmap_buf() to map the file into user space.
753 */ 753 */
754static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) 754static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
755{ 755{
@@ -891,7 +891,7 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos,
891 * @read_pos: file read position 891 * @read_pos: file read position
892 * @buf: relay channel buffer 892 * @buf: relay channel buffer
893 * 893 *
894 * If the read_pos is in the middle of padding, return the 894 * If the @read_pos is in the middle of padding, return the
895 * position of the first actually available byte, otherwise 895 * position of the first actually available byte, otherwise
896 * return the original value. 896 * return the original value.
897 */ 897 */
diff --git a/kernel/sched.c b/kernel/sched.c
index 1cd4ee769e20..1fd67e16cd31 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4203,13 +4203,12 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
4203} 4203}
4204 4204
4205/** 4205/**
4206 * sched_setscheduler - change the scheduling policy and/or RT priority of 4206 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4207 * a thread.
4208 * @p: the task in question. 4207 * @p: the task in question.
4209 * @policy: new policy. 4208 * @policy: new policy.
4210 * @param: structure containing the new RT priority. 4209 * @param: structure containing the new RT priority.
4211 * 4210 *
4212 * NOTE: the task may be already dead 4211 * NOTE that the task may be already dead.
4213 */ 4212 */
4214int sched_setscheduler(struct task_struct *p, int policy, 4213int sched_setscheduler(struct task_struct *p, int policy,
4215 struct sched_param *param) 4214 struct sched_param *param)
@@ -4577,7 +4576,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4577/** 4576/**
4578 * sys_sched_yield - yield the current processor to other threads. 4577 * sys_sched_yield - yield the current processor to other threads.
4579 * 4578 *
4580 * this function yields the current CPU by moving the calling thread 4579 * This function yields the current CPU by moving the calling thread
4581 * to the expired array. If there are no other threads running on this 4580 * to the expired array. If there are no other threads running on this
4582 * CPU then this function will return. 4581 * CPU then this function will return.
4583 */ 4582 */
@@ -4704,7 +4703,7 @@ EXPORT_SYMBOL(cond_resched_softirq);
4704/** 4703/**
4705 * yield - yield the current processor to other threads. 4704 * yield - yield the current processor to other threads.
4706 * 4705 *
4707 * this is a shortcut for kernel-space yielding - it marks the 4706 * This is a shortcut for kernel-space yielding - it marks the
4708 * thread runnable and calls sys_sched_yield(). 4707 * thread runnable and calls sys_sched_yield().
4709 */ 4708 */
4710void __sched yield(void) 4709void __sched yield(void)
diff --git a/kernel/signal.c b/kernel/signal.c
index ea4632bd40a0..228fdb5c01d1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2282,7 +2282,7 @@ static int do_tkill(int tgid, int pid, int sig)
2282 * @pid: the PID of the thread 2282 * @pid: the PID of the thread
2283 * @sig: signal to be sent 2283 * @sig: signal to be sent
2284 * 2284 *
2285 * This syscall also checks the tgid and returns -ESRCH even if the PID 2285 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2286 * exists but it's not belonging to the target process anymore. This 2286 * exists but it's not belonging to the target process anymore. This
2287 * method solves the problem of threads exiting and PIDs getting reused. 2287 * method solves the problem of threads exiting and PIDs getting reused.
2288 */ 2288 */
diff --git a/kernel/sys.c b/kernel/sys.c
index 6e2101dec0fc..e1024383314d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -215,7 +215,7 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
215 * This routine uses RCU to synchronize with changes to the chain. 215 * This routine uses RCU to synchronize with changes to the chain.
216 * 216 *
217 * If the return value of the notifier can be and'ed 217 * If the return value of the notifier can be and'ed
218 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain 218 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
219 * will return immediately, with the return value of 219 * will return immediately, with the return value of
220 * the notifier function which halted execution. 220 * the notifier function which halted execution.
221 * Otherwise the return value is the return value 221 * Otherwise the return value is the return value
@@ -313,7 +313,7 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
313 * run in a process context, so they are allowed to block. 313 * run in a process context, so they are allowed to block.
314 * 314 *
315 * If the return value of the notifier can be and'ed 315 * If the return value of the notifier can be and'ed
316 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain 316 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
317 * will return immediately, with the return value of 317 * will return immediately, with the return value of
318 * the notifier function which halted execution. 318 * the notifier function which halted execution.
319 * Otherwise the return value is the return value 319 * Otherwise the return value is the return value
@@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
393 * All locking must be provided by the caller. 393 * All locking must be provided by the caller.
394 * 394 *
395 * If the return value of the notifier can be and'ed 395 * If the return value of the notifier can be and'ed
396 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain 396 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
397 * will return immediately, with the return value of 397 * will return immediately, with the return value of
398 * the notifier function which halted execution. 398 * the notifier function which halted execution.
399 * Otherwise the return value is the return value 399 * Otherwise the return value is the return value
@@ -487,7 +487,7 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
487 * run in a process context, so they are allowed to block. 487 * run in a process context, so they are allowed to block.
488 * 488 *
489 * If the return value of the notifier can be and'ed 489 * If the return value of the notifier can be and'ed
490 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain 490 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
491 * will return immediately, with the return value of 491 * will return immediately, with the return value of
492 * the notifier function which halted execution. 492 * the notifier function which halted execution.
493 * Otherwise the return value is the return value 493 * Otherwise the return value is the return value
@@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
538 * Registers a function with the list of functions 538 * Registers a function with the list of functions
539 * to be called at reboot time. 539 * to be called at reboot time.
540 * 540 *
541 * Currently always returns zero, as blocking_notifier_chain_register 541 * Currently always returns zero, as blocking_notifier_chain_register()
542 * always returns zero. 542 * always returns zero.
543 */ 543 */
544 544
diff --git a/kernel/timer.c b/kernel/timer.c
index d38801a95866..31ab627df8a0 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -85,7 +85,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
85 * @j: the time in (absolute) jiffies that should be rounded 85 * @j: the time in (absolute) jiffies that should be rounded
86 * @cpu: the processor number on which the timeout will happen 86 * @cpu: the processor number on which the timeout will happen
87 * 87 *
88 * __round_jiffies rounds an absolute time in the future (in jiffies) 88 * __round_jiffies() rounds an absolute time in the future (in jiffies)
89 * up or down to (approximately) full seconds. This is useful for timers 89 * up or down to (approximately) full seconds. This is useful for timers
90 * for which the exact time they fire does not matter too much, as long as 90 * for which the exact time they fire does not matter too much, as long as
91 * they fire approximately every X seconds. 91 * they fire approximately every X seconds.
@@ -98,7 +98,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
98 * processors firing at the exact same time, which could lead 98 * processors firing at the exact same time, which could lead
99 * to lock contention or spurious cache line bouncing. 99 * to lock contention or spurious cache line bouncing.
100 * 100 *
101 * The return value is the rounded version of the "j" parameter. 101 * The return value is the rounded version of the @j parameter.
102 */ 102 */
103unsigned long __round_jiffies(unsigned long j, int cpu) 103unsigned long __round_jiffies(unsigned long j, int cpu)
104{ 104{
@@ -142,7 +142,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
142 * @j: the time in (relative) jiffies that should be rounded 142 * @j: the time in (relative) jiffies that should be rounded
143 * @cpu: the processor number on which the timeout will happen 143 * @cpu: the processor number on which the timeout will happen
144 * 144 *
145 * __round_jiffies_relative rounds a time delta in the future (in jiffies) 145 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
146 * up or down to (approximately) full seconds. This is useful for timers 146 * up or down to (approximately) full seconds. This is useful for timers
147 * for which the exact time they fire does not matter too much, as long as 147 * for which the exact time they fire does not matter too much, as long as
148 * they fire approximately every X seconds. 148 * they fire approximately every X seconds.
@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
155 * processors firing at the exact same time, which could lead 155 * processors firing at the exact same time, which could lead
156 * to lock contention or spurious cache line bouncing. 156 * to lock contention or spurious cache line bouncing.
157 * 157 *
158 * The return value is the rounded version of the "j" parameter. 158 * The return value is the rounded version of the @j parameter.
159 */ 159 */
160unsigned long __round_jiffies_relative(unsigned long j, int cpu) 160unsigned long __round_jiffies_relative(unsigned long j, int cpu)
161{ 161{
@@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
173 * round_jiffies - function to round jiffies to a full second 173 * round_jiffies - function to round jiffies to a full second
174 * @j: the time in (absolute) jiffies that should be rounded 174 * @j: the time in (absolute) jiffies that should be rounded
175 * 175 *
176 * round_jiffies rounds an absolute time in the future (in jiffies) 176 * round_jiffies() rounds an absolute time in the future (in jiffies)
177 * up or down to (approximately) full seconds. This is useful for timers 177 * up or down to (approximately) full seconds. This is useful for timers
178 * for which the exact time they fire does not matter too much, as long as 178 * for which the exact time they fire does not matter too much, as long as
179 * they fire approximately every X seconds. 179 * they fire approximately every X seconds.
@@ -182,7 +182,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
182 * at the same time, rather than at various times spread out. The goal 182 * at the same time, rather than at various times spread out. The goal
183 * of this is to have the CPU wake up less, which saves power. 183 * of this is to have the CPU wake up less, which saves power.
184 * 184 *
185 * The return value is the rounded version of the "j" parameter. 185 * The return value is the rounded version of the @j parameter.
186 */ 186 */
187unsigned long round_jiffies(unsigned long j) 187unsigned long round_jiffies(unsigned long j)
188{ 188{
@@ -194,7 +194,7 @@ EXPORT_SYMBOL_GPL(round_jiffies);
194 * round_jiffies_relative - function to round jiffies to a full second 194 * round_jiffies_relative - function to round jiffies to a full second
195 * @j: the time in (relative) jiffies that should be rounded 195 * @j: the time in (relative) jiffies that should be rounded
196 * 196 *
197 * round_jiffies_relative rounds a time delta in the future (in jiffies) 197 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
198 * up or down to (approximately) full seconds. This is useful for timers 198 * up or down to (approximately) full seconds. This is useful for timers
199 * for which the exact time they fire does not matter too much, as long as 199 * for which the exact time they fire does not matter too much, as long as
200 * they fire approximately every X seconds. 200 * they fire approximately every X seconds.
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(round_jiffies);
203 * at the same time, rather than at various times spread out. The goal 203 * at the same time, rather than at various times spread out. The goal
204 * of this is to have the CPU wake up less, which saves power. 204 * of this is to have the CPU wake up less, which saves power.
205 * 205 *
206 * The return value is the rounded version of the "j" parameter. 206 * The return value is the rounded version of the @j parameter.
207 */ 207 */
208unsigned long round_jiffies_relative(unsigned long j) 208unsigned long round_jiffies_relative(unsigned long j)
209{ 209{
@@ -387,7 +387,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
387 * @timer: the timer to be modified 387 * @timer: the timer to be modified
388 * @expires: new timeout in jiffies 388 * @expires: new timeout in jiffies
389 * 389 *
390 * mod_timer is a more efficient way to update the expire field of an 390 * mod_timer() is a more efficient way to update the expire field of an
391 * active timer (if the timer is inactive it will be activated) 391 * active timer (if the timer is inactive it will be activated)
392 * 392 *
393 * mod_timer(timer, expires) is equivalent to: 393 * mod_timer(timer, expires) is equivalent to:
@@ -490,7 +490,7 @@ out:
490 * the timer it also makes sure the handler has finished executing on other 490 * the timer it also makes sure the handler has finished executing on other
491 * CPUs. 491 * CPUs.
492 * 492 *
493 * Synchronization rules: callers must prevent restarting of the timer, 493 * Synchronization rules: Callers must prevent restarting of the timer,
494 * otherwise this function is meaningless. It must not be called from 494 * otherwise this function is meaningless. It must not be called from
495 * interrupt contexts. The caller must not hold locks which would prevent 495 * interrupt contexts. The caller must not hold locks which would prevent
496 * completion of the timer's handler. The timer's handler must not call 496 * completion of the timer's handler. The timer's handler must not call
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a3da07c5af28..020d1fff57dc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -656,8 +656,7 @@ void flush_scheduled_work(void)
656EXPORT_SYMBOL(flush_scheduled_work); 656EXPORT_SYMBOL(flush_scheduled_work);
657 657
658/** 658/**
659 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 659 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
660 * work whose handler rearms the delayed work.
661 * @wq: the controlling workqueue structure 660 * @wq: the controlling workqueue structure
662 * @dwork: the delayed work struct 661 * @dwork: the delayed work struct
663 */ 662 */
@@ -670,8 +669,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
670EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 669EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
671 670
672/** 671/**
673 * cancel_rearming_delayed_work - reliably kill off a delayed keventd 672 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
674 * work whose handler rearms the delayed work.
675 * @dwork: the delayed work struct 673 * @dwork: the delayed work struct
676 */ 674 */
677void cancel_rearming_delayed_work(struct delayed_work *dwork) 675void cancel_rearming_delayed_work(struct delayed_work *dwork)
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 037fa9aa2ed7..ee6e58fce8f7 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -95,7 +95,7 @@ void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
95} 95}
96EXPORT_SYMBOL(__bitmap_complement); 96EXPORT_SYMBOL(__bitmap_complement);
97 97
98/* 98/**
99 * __bitmap_shift_right - logical right shift of the bits in a bitmap 99 * __bitmap_shift_right - logical right shift of the bits in a bitmap
100 * @dst - destination bitmap 100 * @dst - destination bitmap
101 * @src - source bitmap 101 * @src - source bitmap
@@ -139,7 +139,7 @@ void __bitmap_shift_right(unsigned long *dst,
139EXPORT_SYMBOL(__bitmap_shift_right); 139EXPORT_SYMBOL(__bitmap_shift_right);
140 140
141 141
142/* 142/**
143 * __bitmap_shift_left - logical left shift of the bits in a bitmap 143 * __bitmap_shift_left - logical left shift of the bits in a bitmap
144 * @dst - destination bitmap 144 * @dst - destination bitmap
145 * @src - source bitmap 145 * @src - source bitmap
@@ -529,7 +529,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
529} 529}
530EXPORT_SYMBOL(bitmap_parselist); 530EXPORT_SYMBOL(bitmap_parselist);
531 531
532/* 532/**
533 * bitmap_pos_to_ord(buf, pos, bits) 533 * bitmap_pos_to_ord(buf, pos, bits)
534 * @buf: pointer to a bitmap 534 * @buf: pointer to a bitmap
535 * @pos: a bit position in @buf (0 <= @pos < @bits) 535 * @pos: a bit position in @buf (0 <= @pos < @bits)
@@ -804,7 +804,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
804 * @pos: beginning of bit region to release 804 * @pos: beginning of bit region to release
805 * @order: region size (log base 2 of number of bits) to release 805 * @order: region size (log base 2 of number of bits) to release
806 * 806 *
807 * This is the complement to __bitmap_find_free_region and releases 807 * This is the complement to __bitmap_find_free_region() and releases
808 * the found region (by clearing it in the bitmap). 808 * the found region (by clearing it in the bitmap).
809 * 809 *
810 * No return value. 810 * No return value.
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8a5b5303bd4f..f596c08d213a 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -43,10 +43,10 @@ static int get_range(char **str, int *pint)
43 * comma as well. 43 * comma as well.
44 * 44 *
45 * Return values: 45 * Return values:
46 * 0 : no int in string 46 * 0 - no int in string
47 * 1 : int found, no subsequent comma 47 * 1 - int found, no subsequent comma
48 * 2 : int found including a subsequent comma 48 * 2 - int found including a subsequent comma
49 * 3 : hyphen found to denote a range 49 * 3 - hyphen found to denote a range
50 */ 50 */
51 51
52int get_option (char **str, int *pint) 52int get_option (char **str, int *pint)
diff --git a/lib/idr.c b/lib/idr.c
index 71853531d3b0..305117ca2d41 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -329,8 +329,8 @@ static void sub_remove(struct idr *idp, int shift, int id)
329 329
330/** 330/**
331 * idr_remove - remove the given id and free it's slot 331 * idr_remove - remove the given id and free it's slot
332 * idp: idr handle 332 * @idp: idr handle
333 * id: uniqueue key 333 * @id: unique key
334 */ 334 */
335void idr_remove(struct idr *idp, int id) 335void idr_remove(struct idr *idp, int id)
336{ 336{
diff --git a/lib/kobject.c b/lib/kobject.c
index c2917ffe8bf1..2782f49e906e 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -97,11 +97,12 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
97} 97}
98 98
99/** 99/**
100 * kobject_get_path - generate and return the path associated with a given kobj 100 * kobject_get_path - generate and return the path associated with a given kobj and kset pair.
101 * and kset pair. The result must be freed by the caller with kfree().
102 * 101 *
103 * @kobj: kobject in question, with which to build the path 102 * @kobj: kobject in question, with which to build the path
104 * @gfp_mask: the allocation type used to allocate the path 103 * @gfp_mask: the allocation type used to allocate the path
104 *
105 * The result must be freed by the caller with kfree().
105 */ 106 */
106char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) 107char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
107{ 108{
diff --git a/lib/sha1.c b/lib/sha1.c
index 1cdabe3065f9..4c45fd50e913 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -20,8 +20,8 @@
20#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ 20#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
21#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ 21#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
22 22
23/* 23/**
24 * sha_transform: single block SHA1 transform 24 * sha_transform - single block SHA1 transform
25 * 25 *
26 * @digest: 160 bit digest to update 26 * @digest: 160 bit digest to update
27 * @data: 512 bits of data to hash 27 * @data: 512 bits of data to hash
@@ -80,9 +80,8 @@ void sha_transform(__u32 *digest, const char *in, __u32 *W)
80} 80}
81EXPORT_SYMBOL(sha_transform); 81EXPORT_SYMBOL(sha_transform);
82 82
83/* 83/**
84 * sha_init: initialize the vectors for a SHA1 digest 84 * sha_init - initialize the vectors for a SHA1 digest
85 *
86 * @buf: vector to initialize 85 * @buf: vector to initialize
87 */ 86 */
88void sha_init(__u32 *buf) 87void sha_init(__u32 *buf)
diff --git a/lib/sort.c b/lib/sort.c
index 488788b341cb..961567894d16 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -27,7 +27,7 @@ static void generic_swap(void *a, void *b, int size)
27 } while (--size > 0); 27 } while (--size > 0);
28} 28}
29 29
30/* 30/**
31 * sort - sort an array of elements 31 * sort - sort an array of elements
32 * @base: pointer to data to sort 32 * @base: pointer to data to sort
33 * @num: number of elements 33 * @num: number of elements
diff --git a/lib/string.c b/lib/string.c
index a485d75962af..bab440fb0dfc 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -160,7 +160,7 @@ EXPORT_SYMBOL(strcat);
160 * @src: The string to append to it 160 * @src: The string to append to it
161 * @count: The maximum numbers of bytes to copy 161 * @count: The maximum numbers of bytes to copy
162 * 162 *
163 * Note that in contrast to strncpy, strncat ensures the result is 163 * Note that in contrast to strncpy(), strncat() ensures the result is
164 * terminated. 164 * terminated.
165 */ 165 */
166char *strncat(char *dest, const char *src, size_t count) 166char *strncat(char *dest, const char *src, size_t count)
@@ -366,8 +366,7 @@ EXPORT_SYMBOL(strnlen);
366 366
367#ifndef __HAVE_ARCH_STRSPN 367#ifndef __HAVE_ARCH_STRSPN
368/** 368/**
369 * strspn - Calculate the length of the initial substring of @s which only 369 * strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
370 * contain letters in @accept
371 * @s: The string to be searched 370 * @s: The string to be searched
372 * @accept: The string to search for 371 * @accept: The string to search for
373 */ 372 */
@@ -394,8 +393,7 @@ EXPORT_SYMBOL(strspn);
394 393
395#ifndef __HAVE_ARCH_STRCSPN 394#ifndef __HAVE_ARCH_STRCSPN
396/** 395/**
397 * strcspn - Calculate the length of the initial substring of @s which does 396 * strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
398 * not contain letters in @reject
399 * @s: The string to be searched 397 * @s: The string to be searched
400 * @reject: The string to avoid 398 * @reject: The string to avoid
401 */ 399 */
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 98bcadc01185..9e2a002c5b54 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -218,7 +218,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
218 * Call textsearch_next() to retrieve subsequent matches. 218 * Call textsearch_next() to retrieve subsequent matches.
219 * 219 *
220 * Returns the position of first occurrence of the pattern or 220 * Returns the position of first occurrence of the pattern or
221 * UINT_MAX if no occurrence was found. 221 * %UINT_MAX if no occurrence was found.
222 */ 222 */
223unsigned int textsearch_find_continuous(struct ts_config *conf, 223unsigned int textsearch_find_continuous(struct ts_config *conf,
224 struct ts_state *state, 224 struct ts_state *state,
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index bed7229378f2..44f0e339a947 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -247,12 +247,12 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
247 * be generated for the given input, excluding the trailing 247 * be generated for the given input, excluding the trailing
248 * '\0', as per ISO C99. If you want to have the exact 248 * '\0', as per ISO C99. If you want to have the exact
249 * number of characters written into @buf as return value 249 * number of characters written into @buf as return value
250 * (not including the trailing '\0'), use vscnprintf. If the 250 * (not including the trailing '\0'), use vscnprintf(). If the
251 * return is greater than or equal to @size, the resulting 251 * return is greater than or equal to @size, the resulting
252 * string is truncated. 252 * string is truncated.
253 * 253 *
254 * Call this function if you are already dealing with a va_list. 254 * Call this function if you are already dealing with a va_list.
255 * You probably want snprintf instead. 255 * You probably want snprintf() instead.
256 */ 256 */
257int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 257int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
258{ 258{
@@ -509,7 +509,7 @@ EXPORT_SYMBOL(vsnprintf);
509 * returns 0. 509 * returns 0.
510 * 510 *
511 * Call this function if you are already dealing with a va_list. 511 * Call this function if you are already dealing with a va_list.
512 * You probably want scnprintf instead. 512 * You probably want scnprintf() instead.
513 */ 513 */
514int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) 514int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
515{ 515{
@@ -577,11 +577,11 @@ EXPORT_SYMBOL(scnprintf);
577 * @args: Arguments for the format string 577 * @args: Arguments for the format string
578 * 578 *
579 * The function returns the number of characters written 579 * The function returns the number of characters written
580 * into @buf. Use vsnprintf or vscnprintf in order to avoid 580 * into @buf. Use vsnprintf() or vscnprintf() in order to avoid
581 * buffer overflows. 581 * buffer overflows.
582 * 582 *
583 * Call this function if you are already dealing with a va_list. 583 * Call this function if you are already dealing with a va_list.
584 * You probably want sprintf instead. 584 * You probably want sprintf() instead.
585 */ 585 */
586int vsprintf(char *buf, const char *fmt, va_list args) 586int vsprintf(char *buf, const char *fmt, va_list args)
587{ 587{
@@ -597,7 +597,7 @@ EXPORT_SYMBOL(vsprintf);
597 * @...: Arguments for the format string 597 * @...: Arguments for the format string
598 * 598 *
599 * The function returns the number of characters written 599 * The function returns the number of characters written
600 * into @buf. Use snprintf or scnprintf in order to avoid 600 * into @buf. Use snprintf() or scnprintf() in order to avoid
601 * buffer overflows. 601 * buffer overflows.
602 */ 602 */
603int sprintf(char * buf, const char *fmt, ...) 603int sprintf(char * buf, const char *fmt, ...)
diff --git a/mm/filemap.c b/mm/filemap.c
index f30ef28405d3..00414849a867 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -327,7 +327,7 @@ EXPORT_SYMBOL(sync_page_range);
327 * @pos: beginning offset in pages to write 327 * @pos: beginning offset in pages to write
328 * @count: number of bytes to write 328 * @count: number of bytes to write
329 * 329 *
330 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea 330 * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
331 * as it forces O_SYNC writers to different parts of the same file 331 * as it forces O_SYNC writers to different parts of the same file
332 * to be serialised right until io completion. 332 * to be serialised right until io completion.
333 */ 333 */
@@ -784,7 +784,7 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
784 * @mapping: target address_space 784 * @mapping: target address_space
785 * @index: the page index 785 * @index: the page index
786 * 786 *
787 * Same as grab_cache_page, but do not wait if the page is unavailable. 787 * Same as grab_cache_page(), but do not wait if the page is unavailable.
788 * This is intended for speculative data generators, where the data can 788 * This is intended for speculative data generators, where the data can
789 * be regenerated if the page couldn't be grabbed. This routine should 789 * be regenerated if the page couldn't be grabbed. This routine should
790 * be safe to call while holding the lock for another page. 790 * be safe to call while holding the lock for another page.
diff --git a/mm/memory.c b/mm/memory.c
index 0e6a402d86be..072c1135ad37 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1775,9 +1775,7 @@ restart:
1775} 1775}
1776 1776
1777/** 1777/**
1778 * unmap_mapping_range - unmap the portion of all mmaps 1778 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
1779 * in the specified address_space corresponding to the specified
1780 * page range in the underlying file.
1781 * @mapping: the address space containing mmaps to be unmapped. 1779 * @mapping: the address space containing mmaps to be unmapped.
1782 * @holebegin: byte in first page to unmap, relative to the start of 1780 * @holebegin: byte in first page to unmap, relative to the start of
1783 * the underlying file. This will be rounded down to a PAGE_SIZE 1781 * the underlying file. This will be rounded down to a PAGE_SIZE
diff --git a/mm/mempool.c b/mm/mempool.c
index ccd8cb8cd41f..cc1ca86dfc24 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -46,9 +46,9 @@ static void free_pool(mempool_t *pool)
46 * @pool_data: optional private data available to the user-defined functions. 46 * @pool_data: optional private data available to the user-defined functions.
47 * 47 *
48 * this function creates and allocates a guaranteed size, preallocated 48 * this function creates and allocates a guaranteed size, preallocated
49 * memory pool. The pool can be used from the mempool_alloc and mempool_free 49 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
50 * functions. This function might sleep. Both the alloc_fn() and the free_fn() 50 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
51 * functions might sleep - as long as the mempool_alloc function is not called 51 * functions might sleep - as long as the mempool_alloc() function is not called
52 * from IRQ contexts. 52 * from IRQ contexts.
53 */ 53 */
54mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, 54mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
@@ -195,7 +195,7 @@ EXPORT_SYMBOL(mempool_destroy);
195 * mempool_create(). 195 * mempool_create().
196 * @gfp_mask: the usual allocation bitmask. 196 * @gfp_mask: the usual allocation bitmask.
197 * 197 *
198 * this function only sleeps if the alloc_fn function sleeps or 198 * this function only sleeps if the alloc_fn() function sleeps or
199 * returns NULL. Note that due to preallocation, this function 199 * returns NULL. Note that due to preallocation, this function
200 * *never* fails when called from process contexts. (it might 200 * *never* fails when called from process contexts. (it might
201 * fail if called from an IRQ context.) 201 * fail if called from an IRQ context.)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 438833cbbca4..fd96a555e500 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -549,9 +549,7 @@ void __init page_writeback_init(void)
549} 549}
550 550
551/** 551/**
552 * generic_writepages - walk the list of dirty pages of the given 552 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
553 * address space and writepage() all of them.
554 *
555 * @mapping: address space structure to write 553 * @mapping: address space structure to write
556 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 554 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
557 * 555 *
@@ -698,7 +696,6 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
698 696
699/** 697/**
700 * write_one_page - write out a single page and optionally wait on I/O 698 * write_one_page - write out a single page and optionally wait on I/O
701 *
702 * @page: the page to write 699 * @page: the page to write
703 * @wait: if true, wait on writeout 700 * @wait: if true, wait on writeout
704 * 701 *
diff --git a/mm/slab.c b/mm/slab.c
index 196df70eb8cb..70784b848b69 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2520,7 +2520,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2520 * kmem_cache_destroy - delete a cache 2520 * kmem_cache_destroy - delete a cache
2521 * @cachep: the cache to destroy 2521 * @cachep: the cache to destroy
2522 * 2522 *
2523 * Remove a struct kmem_cache object from the slab cache. 2523 * Remove a &struct kmem_cache object from the slab cache.
2524 * 2524 *
2525 * It is expected this function will be called by a module when it is 2525 * It is expected this function will be called by a module when it is
2526 * unloaded. This will remove the cache completely, and avoid a duplicate 2526 * unloaded. This will remove the cache completely, and avoid a duplicate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 86897ee792d6..9eef486da909 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -699,7 +699,7 @@ finished:
699 * that it is big enough to cover the vma. Will return failure if 699 * that it is big enough to cover the vma. Will return failure if
700 * that criteria isn't met. 700 * that criteria isn't met.
701 * 701 *
702 * Similar to remap_pfn_range (see mm/memory.c) 702 * Similar to remap_pfn_range() (see mm/memory.c)
703 */ 703 */
704int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 704int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
705 unsigned long pgoff) 705 unsigned long pgoff)