diff options
author | Robert P. J. Day <rpjday@mindspring.com> | 2007-02-10 04:45:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-11 13:51:32 -0500 |
commit | 72fd4a35a824331d7a0f4168d7576502d95d34b3 (patch) | |
tree | be27880bc36b7f62e8044a88b8744a35c5317714 /kernel | |
parent | 262086cf5b5343c2b81c97b1c606058e921859df (diff) |
[PATCH] Numerous fixes to kernel-doc info in source files.
A variety of (mostly) innocuous fixes to the embedded kernel-doc content in
source files, including:
* make multi-line initial descriptions single line
* denote some function names, constants and structs as such
* change erroneous opening '/*' to '/**' in a few places
* reword some text for clarity
Signed-off-by: Robert P. J. Day <rpjday@mindspring.com>
Cc: "Randy.Dunlap" <rdunlap@xenotime.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 3 | ||||
-rw-r--r-- | kernel/hrtimer.c | 6 | ||||
-rw-r--r-- | kernel/kfifo.c | 10 | ||||
-rw-r--r-- | kernel/kthread.c | 6 | ||||
-rw-r--r-- | kernel/printk.c | 2 | ||||
-rw-r--r-- | kernel/relay.c | 12 | ||||
-rw-r--r-- | kernel/sched.c | 9 | ||||
-rw-r--r-- | kernel/signal.c | 2 | ||||
-rw-r--r-- | kernel/sys.c | 10 | ||||
-rw-r--r-- | kernel/timer.c | 20 | ||||
-rw-r--r-- | kernel/workqueue.c | 6 |
11 files changed, 41 insertions, 45 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index fec12eb12471..bc71fdfcd8a7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -257,8 +257,7 @@ static int has_stopped_jobs(int pgrp) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | /** | 259 | /** |
260 | * reparent_to_init - Reparent the calling kernel thread to the init task | 260 | * reparent_to_init - Reparent the calling kernel thread to the init task of the pid space that the thread belongs to. |
261 | * of the pid space that the thread belongs to. | ||
262 | * | 261 | * |
263 | * If a kernel thread is launched as a result of a system call, or if | 262 | * If a kernel thread is launched as a result of a system call, or if |
264 | * it ever exits, it should generally reparent itself to init so that | 263 | * it ever exits, it should generally reparent itself to init so that |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d0ba190dfeb6..f44e499e8fca 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -102,7 +102,7 @@ static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) = | |||
102 | * | 102 | * |
103 | * The function calculates the monotonic clock from the realtime | 103 | * The function calculates the monotonic clock from the realtime |
104 | * clock and the wall_to_monotonic offset and stores the result | 104 | * clock and the wall_to_monotonic offset and stores the result |
105 | * in normalized timespec format in the variable pointed to by ts. | 105 | * in normalized timespec format in the variable pointed to by @ts. |
106 | */ | 106 | */ |
107 | void ktime_get_ts(struct timespec *ts) | 107 | void ktime_get_ts(struct timespec *ts) |
108 | { | 108 | { |
@@ -583,8 +583,8 @@ EXPORT_SYMBOL_GPL(hrtimer_init); | |||
583 | * @which_clock: which clock to query | 583 | * @which_clock: which clock to query |
584 | * @tp: pointer to timespec variable to store the resolution | 584 | * @tp: pointer to timespec variable to store the resolution |
585 | * | 585 | * |
586 | * Store the resolution of the clock selected by which_clock in the | 586 | * Store the resolution of the clock selected by @which_clock in the |
587 | * variable pointed to by tp. | 587 | * variable pointed to by @tp. |
588 | */ | 588 | */ |
589 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | 589 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) |
590 | { | 590 | { |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 5d1d907378a2..cee419143fd4 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -32,8 +32,8 @@ | |||
32 | * @gfp_mask: get_free_pages mask, passed to kmalloc() | 32 | * @gfp_mask: get_free_pages mask, passed to kmalloc() |
33 | * @lock: the lock to be used to protect the fifo buffer | 33 | * @lock: the lock to be used to protect the fifo buffer |
34 | * | 34 | * |
35 | * Do NOT pass the kfifo to kfifo_free() after use ! Simply free the | 35 | * Do NOT pass the kfifo to kfifo_free() after use! Simply free the |
36 | * struct kfifo with kfree(). | 36 | * &struct kfifo with kfree(). |
37 | */ | 37 | */ |
38 | struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, | 38 | struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, |
39 | gfp_t gfp_mask, spinlock_t *lock) | 39 | gfp_t gfp_mask, spinlock_t *lock) |
@@ -108,7 +108,7 @@ EXPORT_SYMBOL(kfifo_free); | |||
108 | * @buffer: the data to be added. | 108 | * @buffer: the data to be added. |
109 | * @len: the length of the data to be added. | 109 | * @len: the length of the data to be added. |
110 | * | 110 | * |
111 | * This function copies at most 'len' bytes from the 'buffer' into | 111 | * This function copies at most @len bytes from the @buffer into |
112 | * the FIFO depending on the free space, and returns the number of | 112 | * the FIFO depending on the free space, and returns the number of |
113 | * bytes copied. | 113 | * bytes copied. |
114 | * | 114 | * |
@@ -155,8 +155,8 @@ EXPORT_SYMBOL(__kfifo_put); | |||
155 | * @buffer: where the data must be copied. | 155 | * @buffer: where the data must be copied. |
156 | * @len: the size of the destination buffer. | 156 | * @len: the size of the destination buffer. |
157 | * | 157 | * |
158 | * This function copies at most 'len' bytes from the FIFO into the | 158 | * This function copies at most @len bytes from the FIFO into the |
159 | * 'buffer' and returns the number of copied bytes. | 159 | * @buffer and returns the number of copied bytes. |
160 | * | 160 | * |
161 | * Note that with only one concurrent reader and one concurrent | 161 | * Note that with only one concurrent reader and one concurrent |
162 | * writer, you don't need extra locking to use these functions. | 162 | * writer, you don't need extra locking to use these functions. |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 1db8c72d0d38..87c50ccd1d4e 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -50,7 +50,7 @@ static struct kthread_stop_info kthread_stop_info; | |||
50 | /** | 50 | /** |
51 | * kthread_should_stop - should this kthread return now? | 51 | * kthread_should_stop - should this kthread return now? |
52 | * | 52 | * |
53 | * When someone calls kthread_stop on your kthread, it will be woken | 53 | * When someone calls kthread_stop() on your kthread, it will be woken |
54 | * and this will return true. You should then return, and your return | 54 | * and this will return true. You should then return, and your return |
55 | * value will be passed through to kthread_stop(). | 55 | * value will be passed through to kthread_stop(). |
56 | */ | 56 | */ |
@@ -143,7 +143,7 @@ static void keventd_create_kthread(struct work_struct *work) | |||
143 | * it. See also kthread_run(), kthread_create_on_cpu(). | 143 | * it. See also kthread_run(), kthread_create_on_cpu(). |
144 | * | 144 | * |
145 | * When woken, the thread will run @threadfn() with @data as its | 145 | * When woken, the thread will run @threadfn() with @data as its |
146 | * argument. @threadfn can either call do_exit() directly if it is a | 146 | * argument. @threadfn() can either call do_exit() directly if it is a |
147 | * standalone thread for which noone will call kthread_stop(), or | 147 | * standalone thread for which noone will call kthread_stop(), or |
148 | * return when 'kthread_should_stop()' is true (which means | 148 | * return when 'kthread_should_stop()' is true (which means |
149 | * kthread_stop() has been called). The return value should be zero | 149 | * kthread_stop() has been called). The return value should be zero |
@@ -192,7 +192,7 @@ EXPORT_SYMBOL(kthread_create); | |||
192 | * | 192 | * |
193 | * Description: This function is equivalent to set_cpus_allowed(), | 193 | * Description: This function is equivalent to set_cpus_allowed(), |
194 | * except that @cpu doesn't need to be online, and the thread must be | 194 | * except that @cpu doesn't need to be online, and the thread must be |
195 | * stopped (i.e., just returned from kthread_create(). | 195 | * stopped (i.e., just returned from kthread_create()). |
196 | */ | 196 | */ |
197 | void kthread_bind(struct task_struct *k, unsigned int cpu) | 197 | void kthread_bind(struct task_struct *k, unsigned int cpu) |
198 | { | 198 | { |
diff --git a/kernel/printk.c b/kernel/printk.c index c770e1a4e882..3e79e18dce33 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -483,7 +483,7 @@ static int have_callable_console(void) | |||
483 | * printk - print a kernel message | 483 | * printk - print a kernel message |
484 | * @fmt: format string | 484 | * @fmt: format string |
485 | * | 485 | * |
486 | * This is printk. It can be called from any context. We want it to work. | 486 | * This is printk(). It can be called from any context. We want it to work. |
487 | * | 487 | * |
488 | * We try to grab the console_sem. If we succeed, it's easy - we log the output and | 488 | * We try to grab the console_sem. If we succeed, it's easy - we log the output and |
489 | * call the console drivers. If we fail to get the semaphore we place the output | 489 | * call the console drivers. If we fail to get the semaphore we place the output |
diff --git a/kernel/relay.c b/kernel/relay.c index ef923f6de2e7..ef8a935710a2 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -328,7 +328,7 @@ static void wakeup_readers(struct work_struct *work) | |||
328 | * @buf: the channel buffer | 328 | * @buf: the channel buffer |
329 | * @init: 1 if this is a first-time initialization | 329 | * @init: 1 if this is a first-time initialization |
330 | * | 330 | * |
331 | * See relay_reset for description of effect. | 331 | * See relay_reset() for description of effect. |
332 | */ | 332 | */ |
333 | static void __relay_reset(struct rchan_buf *buf, unsigned int init) | 333 | static void __relay_reset(struct rchan_buf *buf, unsigned int init) |
334 | { | 334 | { |
@@ -364,7 +364,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) | |||
364 | * and restarting the channel in its initial state. The buffers | 364 | * and restarting the channel in its initial state. The buffers |
365 | * are not freed, so any mappings are still in effect. | 365 | * are not freed, so any mappings are still in effect. |
366 | * | 366 | * |
367 | * NOTE: Care should be taken that the channel isn't actually | 367 | * NOTE. Care should be taken that the channel isn't actually |
368 | * being used by anything when this call is made. | 368 | * being used by anything when this call is made. |
369 | */ | 369 | */ |
370 | void relay_reset(struct rchan *chan) | 370 | void relay_reset(struct rchan *chan) |
@@ -528,7 +528,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, | |||
528 | * Creates a channel buffer for each cpu using the sizes and | 528 | * Creates a channel buffer for each cpu using the sizes and |
529 | * attributes specified. The created channel buffer files | 529 | * attributes specified. The created channel buffer files |
530 | * will be named base_filename0...base_filenameN-1. File | 530 | * will be named base_filename0...base_filenameN-1. File |
531 | * permissions will be S_IRUSR. | 531 | * permissions will be %S_IRUSR. |
532 | */ | 532 | */ |
533 | struct rchan *relay_open(const char *base_filename, | 533 | struct rchan *relay_open(const char *base_filename, |
534 | struct dentry *parent, | 534 | struct dentry *parent, |
@@ -648,7 +648,7 @@ EXPORT_SYMBOL_GPL(relay_switch_subbuf); | |||
648 | * subbufs_consumed should be the number of sub-buffers newly consumed, | 648 | * subbufs_consumed should be the number of sub-buffers newly consumed, |
649 | * not the total consumed. | 649 | * not the total consumed. |
650 | * | 650 | * |
651 | * NOTE: Kernel clients don't need to call this function if the channel | 651 | * NOTE. Kernel clients don't need to call this function if the channel |
652 | * mode is 'overwrite'. | 652 | * mode is 'overwrite'. |
653 | */ | 653 | */ |
654 | void relay_subbufs_consumed(struct rchan *chan, | 654 | void relay_subbufs_consumed(struct rchan *chan, |
@@ -749,7 +749,7 @@ static int relay_file_open(struct inode *inode, struct file *filp) | |||
749 | * @filp: the file | 749 | * @filp: the file |
750 | * @vma: the vma describing what to map | 750 | * @vma: the vma describing what to map |
751 | * | 751 | * |
752 | * Calls upon relay_mmap_buf to map the file into user space. | 752 | * Calls upon relay_mmap_buf() to map the file into user space. |
753 | */ | 753 | */ |
754 | static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) | 754 | static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) |
755 | { | 755 | { |
@@ -891,7 +891,7 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos, | |||
891 | * @read_pos: file read position | 891 | * @read_pos: file read position |
892 | * @buf: relay channel buffer | 892 | * @buf: relay channel buffer |
893 | * | 893 | * |
894 | * If the read_pos is in the middle of padding, return the | 894 | * If the @read_pos is in the middle of padding, return the |
895 | * position of the first actually available byte, otherwise | 895 | * position of the first actually available byte, otherwise |
896 | * return the original value. | 896 | * return the original value. |
897 | */ | 897 | */ |
diff --git a/kernel/sched.c b/kernel/sched.c index 1cd4ee769e20..1fd67e16cd31 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4203,13 +4203,12 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
4203 | } | 4203 | } |
4204 | 4204 | ||
4205 | /** | 4205 | /** |
4206 | * sched_setscheduler - change the scheduling policy and/or RT priority of | 4206 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. |
4207 | * a thread. | ||
4208 | * @p: the task in question. | 4207 | * @p: the task in question. |
4209 | * @policy: new policy. | 4208 | * @policy: new policy. |
4210 | * @param: structure containing the new RT priority. | 4209 | * @param: structure containing the new RT priority. |
4211 | * | 4210 | * |
4212 | * NOTE: the task may be already dead | 4211 | * NOTE that the task may be already dead. |
4213 | */ | 4212 | */ |
4214 | int sched_setscheduler(struct task_struct *p, int policy, | 4213 | int sched_setscheduler(struct task_struct *p, int policy, |
4215 | struct sched_param *param) | 4214 | struct sched_param *param) |
@@ -4577,7 +4576,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
4577 | /** | 4576 | /** |
4578 | * sys_sched_yield - yield the current processor to other threads. | 4577 | * sys_sched_yield - yield the current processor to other threads. |
4579 | * | 4578 | * |
4580 | * this function yields the current CPU by moving the calling thread | 4579 | * This function yields the current CPU by moving the calling thread |
4581 | * to the expired array. If there are no other threads running on this | 4580 | * to the expired array. If there are no other threads running on this |
4582 | * CPU then this function will return. | 4581 | * CPU then this function will return. |
4583 | */ | 4582 | */ |
@@ -4704,7 +4703,7 @@ EXPORT_SYMBOL(cond_resched_softirq); | |||
4704 | /** | 4703 | /** |
4705 | * yield - yield the current processor to other threads. | 4704 | * yield - yield the current processor to other threads. |
4706 | * | 4705 | * |
4707 | * this is a shortcut for kernel-space yielding - it marks the | 4706 | * This is a shortcut for kernel-space yielding - it marks the |
4708 | * thread runnable and calls sys_sched_yield(). | 4707 | * thread runnable and calls sys_sched_yield(). |
4709 | */ | 4708 | */ |
4710 | void __sched yield(void) | 4709 | void __sched yield(void) |
diff --git a/kernel/signal.c b/kernel/signal.c index ea4632bd40a0..228fdb5c01d1 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2282,7 +2282,7 @@ static int do_tkill(int tgid, int pid, int sig) | |||
2282 | * @pid: the PID of the thread | 2282 | * @pid: the PID of the thread |
2283 | * @sig: signal to be sent | 2283 | * @sig: signal to be sent |
2284 | * | 2284 | * |
2285 | * This syscall also checks the tgid and returns -ESRCH even if the PID | 2285 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
2286 | * exists but it's not belonging to the target process anymore. This | 2286 | * exists but it's not belonging to the target process anymore. This |
2287 | * method solves the problem of threads exiting and PIDs getting reused. | 2287 | * method solves the problem of threads exiting and PIDs getting reused. |
2288 | */ | 2288 | */ |
diff --git a/kernel/sys.c b/kernel/sys.c index 6e2101dec0fc..e1024383314d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -215,7 +215,7 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); | |||
215 | * This routine uses RCU to synchronize with changes to the chain. | 215 | * This routine uses RCU to synchronize with changes to the chain. |
216 | * | 216 | * |
217 | * If the return value of the notifier can be and'ed | 217 | * If the return value of the notifier can be and'ed |
218 | * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain | 218 | * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() |
219 | * will return immediately, with the return value of | 219 | * will return immediately, with the return value of |
220 | * the notifier function which halted execution. | 220 | * the notifier function which halted execution. |
221 | * Otherwise the return value is the return value | 221 | * Otherwise the return value is the return value |
@@ -313,7 +313,7 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); | |||
313 | * run in a process context, so they are allowed to block. | 313 | * run in a process context, so they are allowed to block. |
314 | * | 314 | * |
315 | * If the return value of the notifier can be and'ed | 315 | * If the return value of the notifier can be and'ed |
316 | * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain | 316 | * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() |
317 | * will return immediately, with the return value of | 317 | * will return immediately, with the return value of |
318 | * the notifier function which halted execution. | 318 | * the notifier function which halted execution. |
319 | * Otherwise the return value is the return value | 319 | * Otherwise the return value is the return value |
@@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); | |||
393 | * All locking must be provided by the caller. | 393 | * All locking must be provided by the caller. |
394 | * | 394 | * |
395 | * If the return value of the notifier can be and'ed | 395 | * If the return value of the notifier can be and'ed |
396 | * with %NOTIFY_STOP_MASK then raw_notifier_call_chain | 396 | * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() |
397 | * will return immediately, with the return value of | 397 | * will return immediately, with the return value of |
398 | * the notifier function which halted execution. | 398 | * the notifier function which halted execution. |
399 | * Otherwise the return value is the return value | 399 | * Otherwise the return value is the return value |
@@ -487,7 +487,7 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); | |||
487 | * run in a process context, so they are allowed to block. | 487 | * run in a process context, so they are allowed to block. |
488 | * | 488 | * |
489 | * If the return value of the notifier can be and'ed | 489 | * If the return value of the notifier can be and'ed |
490 | * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain | 490 | * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() |
491 | * will return immediately, with the return value of | 491 | * will return immediately, with the return value of |
492 | * the notifier function which halted execution. | 492 | * the notifier function which halted execution. |
493 | * Otherwise the return value is the return value | 493 | * Otherwise the return value is the return value |
@@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head); | |||
538 | * Registers a function with the list of functions | 538 | * Registers a function with the list of functions |
539 | * to be called at reboot time. | 539 | * to be called at reboot time. |
540 | * | 540 | * |
541 | * Currently always returns zero, as blocking_notifier_chain_register | 541 | * Currently always returns zero, as blocking_notifier_chain_register() |
542 | * always returns zero. | 542 | * always returns zero. |
543 | */ | 543 | */ |
544 | 544 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index d38801a95866..31ab627df8a0 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -85,7 +85,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; | |||
85 | * @j: the time in (absolute) jiffies that should be rounded | 85 | * @j: the time in (absolute) jiffies that should be rounded |
86 | * @cpu: the processor number on which the timeout will happen | 86 | * @cpu: the processor number on which the timeout will happen |
87 | * | 87 | * |
88 | * __round_jiffies rounds an absolute time in the future (in jiffies) | 88 | * __round_jiffies() rounds an absolute time in the future (in jiffies) |
89 | * up or down to (approximately) full seconds. This is useful for timers | 89 | * up or down to (approximately) full seconds. This is useful for timers |
90 | * for which the exact time they fire does not matter too much, as long as | 90 | * for which the exact time they fire does not matter too much, as long as |
91 | * they fire approximately every X seconds. | 91 | * they fire approximately every X seconds. |
@@ -98,7 +98,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; | |||
98 | * processors firing at the exact same time, which could lead | 98 | * processors firing at the exact same time, which could lead |
99 | * to lock contention or spurious cache line bouncing. | 99 | * to lock contention or spurious cache line bouncing. |
100 | * | 100 | * |
101 | * The return value is the rounded version of the "j" parameter. | 101 | * The return value is the rounded version of the @j parameter. |
102 | */ | 102 | */ |
103 | unsigned long __round_jiffies(unsigned long j, int cpu) | 103 | unsigned long __round_jiffies(unsigned long j, int cpu) |
104 | { | 104 | { |
@@ -142,7 +142,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies); | |||
142 | * @j: the time in (relative) jiffies that should be rounded | 142 | * @j: the time in (relative) jiffies that should be rounded |
143 | * @cpu: the processor number on which the timeout will happen | 143 | * @cpu: the processor number on which the timeout will happen |
144 | * | 144 | * |
145 | * __round_jiffies_relative rounds a time delta in the future (in jiffies) | 145 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
146 | * up or down to (approximately) full seconds. This is useful for timers | 146 | * up or down to (approximately) full seconds. This is useful for timers |
147 | * for which the exact time they fire does not matter too much, as long as | 147 | * for which the exact time they fire does not matter too much, as long as |
148 | * they fire approximately every X seconds. | 148 | * they fire approximately every X seconds. |
@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies); | |||
155 | * processors firing at the exact same time, which could lead | 155 | * processors firing at the exact same time, which could lead |
156 | * to lock contention or spurious cache line bouncing. | 156 | * to lock contention or spurious cache line bouncing. |
157 | * | 157 | * |
158 | * The return value is the rounded version of the "j" parameter. | 158 | * The return value is the rounded version of the @j parameter. |
159 | */ | 159 | */ |
160 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | 160 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) |
161 | { | 161 | { |
@@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |||
173 | * round_jiffies - function to round jiffies to a full second | 173 | * round_jiffies - function to round jiffies to a full second |
174 | * @j: the time in (absolute) jiffies that should be rounded | 174 | * @j: the time in (absolute) jiffies that should be rounded |
175 | * | 175 | * |
176 | * round_jiffies rounds an absolute time in the future (in jiffies) | 176 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
177 | * up or down to (approximately) full seconds. This is useful for timers | 177 | * up or down to (approximately) full seconds. This is useful for timers |
178 | * for which the exact time they fire does not matter too much, as long as | 178 | * for which the exact time they fire does not matter too much, as long as |
179 | * they fire approximately every X seconds. | 179 | * they fire approximately every X seconds. |
@@ -182,7 +182,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |||
182 | * at the same time, rather than at various times spread out. The goal | 182 | * at the same time, rather than at various times spread out. The goal |
183 | * of this is to have the CPU wake up less, which saves power. | 183 | * of this is to have the CPU wake up less, which saves power. |
184 | * | 184 | * |
185 | * The return value is the rounded version of the "j" parameter. | 185 | * The return value is the rounded version of the @j parameter. |
186 | */ | 186 | */ |
187 | unsigned long round_jiffies(unsigned long j) | 187 | unsigned long round_jiffies(unsigned long j) |
188 | { | 188 | { |
@@ -194,7 +194,7 @@ EXPORT_SYMBOL_GPL(round_jiffies); | |||
194 | * round_jiffies_relative - function to round jiffies to a full second | 194 | * round_jiffies_relative - function to round jiffies to a full second |
195 | * @j: the time in (relative) jiffies that should be rounded | 195 | * @j: the time in (relative) jiffies that should be rounded |
196 | * | 196 | * |
197 | * round_jiffies_relative rounds a time delta in the future (in jiffies) | 197 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
198 | * up or down to (approximately) full seconds. This is useful for timers | 198 | * up or down to (approximately) full seconds. This is useful for timers |
199 | * for which the exact time they fire does not matter too much, as long as | 199 | * for which the exact time they fire does not matter too much, as long as |
200 | * they fire approximately every X seconds. | 200 | * they fire approximately every X seconds. |
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(round_jiffies); | |||
203 | * at the same time, rather than at various times spread out. The goal | 203 | * at the same time, rather than at various times spread out. The goal |
204 | * of this is to have the CPU wake up less, which saves power. | 204 | * of this is to have the CPU wake up less, which saves power. |
205 | * | 205 | * |
206 | * The return value is the rounded version of the "j" parameter. | 206 | * The return value is the rounded version of the @j parameter. |
207 | */ | 207 | */ |
208 | unsigned long round_jiffies_relative(unsigned long j) | 208 | unsigned long round_jiffies_relative(unsigned long j) |
209 | { | 209 | { |
@@ -387,7 +387,7 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
387 | * @timer: the timer to be modified | 387 | * @timer: the timer to be modified |
388 | * @expires: new timeout in jiffies | 388 | * @expires: new timeout in jiffies |
389 | * | 389 | * |
390 | * mod_timer is a more efficient way to update the expire field of an | 390 | * mod_timer() is a more efficient way to update the expire field of an |
391 | * active timer (if the timer is inactive it will be activated) | 391 | * active timer (if the timer is inactive it will be activated) |
392 | * | 392 | * |
393 | * mod_timer(timer, expires) is equivalent to: | 393 | * mod_timer(timer, expires) is equivalent to: |
@@ -490,7 +490,7 @@ out: | |||
490 | * the timer it also makes sure the handler has finished executing on other | 490 | * the timer it also makes sure the handler has finished executing on other |
491 | * CPUs. | 491 | * CPUs. |
492 | * | 492 | * |
493 | * Synchronization rules: callers must prevent restarting of the timer, | 493 | * Synchronization rules: Callers must prevent restarting of the timer, |
494 | * otherwise this function is meaningless. It must not be called from | 494 | * otherwise this function is meaningless. It must not be called from |
495 | * interrupt contexts. The caller must not hold locks which would prevent | 495 | * interrupt contexts. The caller must not hold locks which would prevent |
496 | * completion of the timer's handler. The timer's handler must not call | 496 | * completion of the timer's handler. The timer's handler must not call |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a3da07c5af28..020d1fff57dc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -656,8 +656,7 @@ void flush_scheduled_work(void) | |||
656 | EXPORT_SYMBOL(flush_scheduled_work); | 656 | EXPORT_SYMBOL(flush_scheduled_work); |
657 | 657 | ||
658 | /** | 658 | /** |
659 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 659 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. |
660 | * work whose handler rearms the delayed work. | ||
661 | * @wq: the controlling workqueue structure | 660 | * @wq: the controlling workqueue structure |
662 | * @dwork: the delayed work struct | 661 | * @dwork: the delayed work struct |
663 | */ | 662 | */ |
@@ -670,8 +669,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | |||
670 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 669 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); |
671 | 670 | ||
672 | /** | 671 | /** |
673 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd | 672 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. |
674 | * work whose handler rearms the delayed work. | ||
675 | * @dwork: the delayed work struct | 673 | * @dwork: the delayed work struct |
676 | */ | 674 | */ |
677 | void cancel_rearming_delayed_work(struct delayed_work *dwork) | 675 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |