aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2007-02-10 04:44:41 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 13:51:25 -0500
commit780a065668b1c6ca6a70c7d36b9f6552ea3bb5f5 (patch)
tree4d61d1d042a1a58b84bdf8f5b4b0a33146271a54 /include/linux
parent7131b6d167b41593463ce98df17e101e776bf5ec (diff)
[PATCH] count_vm_events-warning-fix
- Prevent things like this: block/ll_rw_blk.c: In function 'submit_bio': block/ll_rw_blk.c:3222: warning: unused variable 'count' inlines are very, very preferable to macros. - remove unused get_cpu_vm_events() macro Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/vmstat.h47
1 files changed, 29 insertions, 18 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 7ba91f2839fa..acb1f105870c 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -7,18 +7,6 @@
7#include <linux/mmzone.h> 7#include <linux/mmzone.h>
8#include <asm/atomic.h> 8#include <asm/atomic.h>
9 9
10#ifdef CONFIG_VM_EVENT_COUNTERS
11/*
12 * Light weight per cpu counter implementation.
13 *
14 * Counters should only be incremented. You need to set EMBEDDED
15 * to disable VM_EVENT_COUNTERS. Things like procps (vmstat,
16 * top, etc) use /proc/vmstat and depend on these counters.
17 *
18 * Counters are handled completely inline. On many platforms the code
19 * generated will simply be the increment of a global address.
20 */
21
22#ifdef CONFIG_ZONE_DMA 10#ifdef CONFIG_ZONE_DMA
23#define DMA_ZONE(xx) xx##_DMA, 11#define DMA_ZONE(xx) xx##_DMA,
24#else 12#else
@@ -52,6 +40,17 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
52 NR_VM_EVENT_ITEMS 40 NR_VM_EVENT_ITEMS
53}; 41};
54 42
43#ifdef CONFIG_VM_EVENT_COUNTERS
44/*
45 * Light weight per cpu counter implementation.
46 *
47 * Counters should only be incremented and no critical kernel component
48 * should rely on the counter values.
49 *
50 * Counters are handled completely inline. On many platforms the code
51 * generated will simply be the increment of a global address.
52 */
53
55struct vm_event_state { 54struct vm_event_state {
56 unsigned long event[NR_VM_EVENT_ITEMS]; 55 unsigned long event[NR_VM_EVENT_ITEMS];
57}; 56};
@@ -92,12 +91,24 @@ static inline void vm_events_fold_cpu(int cpu)
92#else 91#else
93 92
94/* Disable counters */ 93/* Disable counters */
95#define get_cpu_vm_events(e) 0L 94static inline void count_vm_event(enum vm_event_item item)
96#define count_vm_event(e) do { } while (0) 95{
97#define count_vm_events(e,d) do { } while (0) 96}
98#define __count_vm_event(e) do { } while (0) 97static inline void count_vm_events(enum vm_event_item item, long delta)
99#define __count_vm_events(e,d) do { } while (0) 98{
100#define vm_events_fold_cpu(x) do { } while (0) 99}
100static inline void __count_vm_event(enum vm_event_item item)
101{
102}
103static inline void __count_vm_events(enum vm_event_item item, long delta)
104{
105}
106static inline void all_vm_events(unsigned long *ret)
107{
108}
109static inline void vm_events_fold_cpu(int cpu)
110{
111}
101 112
102#endif /* CONFIG_VM_EVENT_COUNTERS */ 113#endif /* CONFIG_VM_EVENT_COUNTERS */
103 114