aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/atomic.h38
-rw-r--r--include/asm-powerpc/cputable.h4
-rw-r--r--include/asm-powerpc/cputime.h202
-rw-r--r--include/asm-powerpc/firmware.h9
-rw-r--r--include/asm-powerpc/irq.h6
-rw-r--r--include/asm-powerpc/lmb.h17
-rw-r--r--include/asm-powerpc/paca.h7
-rw-r--r--include/asm-powerpc/pgtable-4k.h11
-rw-r--r--include/asm-powerpc/ppc_asm.h42
-rw-r--r--include/asm-powerpc/rwsem.h2
-rw-r--r--include/asm-powerpc/synch.h2
-rw-r--r--include/asm-powerpc/system.h4
-rw-r--r--include/asm-powerpc/time.h15
13 files changed, 318 insertions, 41 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 147a38dcc766..bb3c0ab7e667 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -8,6 +8,7 @@
8typedef struct { volatile int counter; } atomic_t; 8typedef struct { volatile int counter; } atomic_t;
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11#include <linux/compiler.h>
11#include <asm/synch.h> 12#include <asm/synch.h>
12#include <asm/asm-compat.h> 13#include <asm/asm-compat.h>
13 14
@@ -176,20 +177,29 @@ static __inline__ int atomic_dec_return(atomic_t *v)
176 * Atomically adds @a to @v, so long as it was not @u. 177 * Atomically adds @a to @v, so long as it was not @u.
177 * Returns non-zero if @v was not @u, and zero otherwise. 178 * Returns non-zero if @v was not @u, and zero otherwise.
178 */ 179 */
179#define atomic_add_unless(v, a, u) \ 180static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
180({ \ 181{
181 int c, old; \ 182 int t;
182 c = atomic_read(v); \ 183
183 for (;;) { \ 184 __asm__ __volatile__ (
184 if (unlikely(c == (u))) \ 185 LWSYNC_ON_SMP
185 break; \ 186"1: lwarx %0,0,%1 # atomic_add_unless\n\
186 old = atomic_cmpxchg((v), c, c + (a)); \ 187 cmpw 0,%0,%3 \n\
187 if (likely(old == c)) \ 188 beq- 2f \n\
188 break; \ 189 add %0,%2,%0 \n"
189 c = old; \ 190 PPC405_ERR77(0,%2)
190 } \ 191" stwcx. %0,0,%1 \n\
191 c != (u); \ 192 bne- 1b \n"
192}) 193 ISYNC_ON_SMP
194" subf %0,%2,%0 \n\
1952:"
196 : "=&r" (t)
197 : "r" (&v->counter), "r" (a), "r" (u)
198 : "cc", "memory");
199
200 return t != u;
201}
202
193#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 203#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
194 204
195#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) 205#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 90d005bb4d1c..99d12ff6346c 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -117,6 +117,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
117#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) 117#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
118#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000) 118#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000)
119#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000) 119#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000)
120#define CPU_FTR_PURR ASM_CONST(0x0000400000000000)
120#else 121#else
121/* ensure on 32b processors the flags are available for compiling but 122/* ensure on 32b processors the flags are available for compiling but
122 * don't do anything */ 123 * don't do anything */
@@ -132,6 +133,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
132#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0) 133#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
133#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0) 134#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
134#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0) 135#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0)
136#define CPU_FTR_PURR ASM_CONST(0x0)
135#endif 137#endif
136 138
137#ifndef __ASSEMBLY__ 139#ifndef __ASSEMBLY__
@@ -316,7 +318,7 @@ enum {
316 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | 318 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
317 CPU_FTR_MMCRA | CPU_FTR_SMT | 319 CPU_FTR_MMCRA | CPU_FTR_SMT |
318 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | 320 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
319 CPU_FTR_MMCRA_SIHV, 321 CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR,
320 CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 322 CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
321 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | 323 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
322 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | 324 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT |
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
index 6d68ad7e0ea3..a21185d47883 100644
--- a/include/asm-powerpc/cputime.h
+++ b/include/asm-powerpc/cputime.h
@@ -1 +1,203 @@
1/*
2 * Definitions for measuring cputime on powerpc machines.
3 *
4 * Copyright (C) 2006 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
12 * the same units as the timebase. Otherwise we measure cpu time
13 * in jiffies using the generic definitions.
14 */
15
16#ifndef __POWERPC_CPUTIME_H
17#define __POWERPC_CPUTIME_H
18
19#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1#include <asm-generic/cputime.h> 20#include <asm-generic/cputime.h>
21#else
22
23#include <linux/types.h>
24#include <linux/time.h>
25#include <asm/div64.h>
26#include <asm/time.h>
27#include <asm/param.h>
28
29typedef u64 cputime_t;
30typedef u64 cputime64_t;
31
32#define cputime_zero ((cputime_t)0)
33#define cputime_max ((~((cputime_t)0) >> 1) - 1)
34#define cputime_add(__a, __b) ((__a) + (__b))
35#define cputime_sub(__a, __b) ((__a) - (__b))
36#define cputime_div(__a, __n) ((__a) / (__n))
37#define cputime_halve(__a) ((__a) >> 1)
38#define cputime_eq(__a, __b) ((__a) == (__b))
39#define cputime_gt(__a, __b) ((__a) > (__b))
40#define cputime_ge(__a, __b) ((__a) >= (__b))
41#define cputime_lt(__a, __b) ((__a) < (__b))
42#define cputime_le(__a, __b) ((__a) <= (__b))
43
44#define cputime64_zero ((cputime64_t)0)
45#define cputime64_add(__a, __b) ((__a) + (__b))
46#define cputime_to_cputime64(__ct) (__ct)
47
48#ifdef __KERNEL__
49
50/*
51 * Convert cputime <-> jiffies
52 */
53extern u64 __cputime_jiffies_factor;
54
55static inline unsigned long cputime_to_jiffies(const cputime_t ct)
56{
57 return mulhdu(ct, __cputime_jiffies_factor);
58}
59
60static inline cputime_t jiffies_to_cputime(const unsigned long jif)
61{
62 cputime_t ct;
63 unsigned long sec;
64
65 /* have to be a little careful about overflow */
66 ct = jif % HZ;
67 sec = jif / HZ;
68 if (ct) {
69 ct *= tb_ticks_per_sec;
70 do_div(ct, HZ);
71 }
72 if (sec)
73 ct += (cputime_t) sec * tb_ticks_per_sec;
74 return ct;
75}
76
77static inline u64 cputime64_to_jiffies64(const cputime_t ct)
78{
79 return mulhdu(ct, __cputime_jiffies_factor);
80}
81
82/*
83 * Convert cputime <-> milliseconds
84 */
85extern u64 __cputime_msec_factor;
86
87static inline unsigned long cputime_to_msecs(const cputime_t ct)
88{
89 return mulhdu(ct, __cputime_msec_factor);
90}
91
92static inline cputime_t msecs_to_cputime(const unsigned long ms)
93{
94 cputime_t ct;
95 unsigned long sec;
96
97 /* have to be a little careful about overflow */
98 ct = ms % 1000;
99 sec = ms / 1000;
100 if (ct) {
101 ct *= tb_ticks_per_sec;
102 do_div(ct, 1000);
103 }
104 if (sec)
105 ct += (cputime_t) sec * tb_ticks_per_sec;
106 return ct;
107}
108
109/*
110 * Convert cputime <-> seconds
111 */
112extern u64 __cputime_sec_factor;
113
114static inline unsigned long cputime_to_secs(const cputime_t ct)
115{
116 return mulhdu(ct, __cputime_sec_factor);
117}
118
119static inline cputime_t secs_to_cputime(const unsigned long sec)
120{
121 return (cputime_t) sec * tb_ticks_per_sec;
122}
123
124/*
125 * Convert cputime <-> timespec
126 */
127static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
128{
129 u64 x = ct;
130 unsigned int frac;
131
132 frac = do_div(x, tb_ticks_per_sec);
133 p->tv_sec = x;
134 x = (u64) frac * 1000000000;
135 do_div(x, tb_ticks_per_sec);
136 p->tv_nsec = x;
137}
138
139static inline cputime_t timespec_to_cputime(const struct timespec *p)
140{
141 cputime_t ct;
142
143 ct = (u64) p->tv_nsec * tb_ticks_per_sec;
144 do_div(ct, 1000000000);
145 return ct + (u64) p->tv_sec * tb_ticks_per_sec;
146}
147
148/*
149 * Convert cputime <-> timeval
150 */
151static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
152{
153 u64 x = ct;
154 unsigned int frac;
155
156 frac = do_div(x, tb_ticks_per_sec);
157 p->tv_sec = x;
158 x = (u64) frac * 1000000;
159 do_div(x, tb_ticks_per_sec);
160 p->tv_usec = x;
161}
162
163static inline cputime_t timeval_to_cputime(const struct timeval *p)
164{
165 cputime_t ct;
166
167 ct = (u64) p->tv_usec * tb_ticks_per_sec;
168 do_div(ct, 1000000);
169 return ct + (u64) p->tv_sec * tb_ticks_per_sec;
170}
171
172/*
173 * Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
174 */
175extern u64 __cputime_clockt_factor;
176
177static inline unsigned long cputime_to_clock_t(const cputime_t ct)
178{
179 return mulhdu(ct, __cputime_clockt_factor);
180}
181
182static inline cputime_t clock_t_to_cputime(const unsigned long clk)
183{
184 cputime_t ct;
185 unsigned long sec;
186
187 /* have to be a little careful about overflow */
188 ct = clk % USER_HZ;
189 sec = clk / USER_HZ;
190 if (ct) {
191 ct *= tb_ticks_per_sec;
192 do_div(ct, USER_HZ);
193 }
194 if (sec)
195 ct += (cputime_t) sec * tb_ticks_per_sec;
196 return ct;
197}
198
199#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
200
201#endif /* __KERNEL__ */
202#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
203#endif /* __POWERPC_CPUTIME_H */
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index f804b34cf06a..b7791a1b05db 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -89,15 +89,6 @@ static inline unsigned long firmware_has_feature(unsigned long feature)
89 (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature); 89 (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature);
90} 90}
91 91
92#ifdef CONFIG_PPC_PSERIES
93typedef struct {
94 unsigned long val;
95 char * name;
96} firmware_feature_t;
97
98extern firmware_feature_t firmware_features_table[];
99#endif
100
101extern void system_reset_fwnmi(void); 92extern void system_reset_fwnmi(void);
102extern void machine_check_fwnmi(void); 93extern void machine_check_fwnmi(void);
103 94
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index 8eb7e857ec4c..51f87d9993b6 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -479,6 +479,10 @@ extern int distribute_irqs;
479struct irqaction; 479struct irqaction;
480struct pt_regs; 480struct pt_regs;
481 481
482#define __ARCH_HAS_DO_SOFTIRQ
483
484extern void __do_softirq(void);
485
482#ifdef CONFIG_IRQSTACKS 486#ifdef CONFIG_IRQSTACKS
483/* 487/*
484 * Per-cpu stacks for handling hard and soft interrupts. 488 * Per-cpu stacks for handling hard and soft interrupts.
@@ -491,8 +495,6 @@ extern void call_do_softirq(struct thread_info *tp);
491extern int call___do_IRQ(int irq, struct pt_regs *regs, 495extern int call___do_IRQ(int irq, struct pt_regs *regs,
492 struct thread_info *tp); 496 struct thread_info *tp);
493 497
494#define __ARCH_HAS_DO_SOFTIRQ
495
496#else 498#else
497#define irq_ctx_init() 499#define irq_ctx_init()
498 500
diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h
index d3546c4c9f46..377ac1b23aa3 100644
--- a/include/asm-powerpc/lmb.h
+++ b/include/asm-powerpc/lmb.h
@@ -19,8 +19,6 @@
19 19
20#define MAX_LMB_REGIONS 128 20#define MAX_LMB_REGIONS 128
21 21
22#define LMB_ALLOC_ANYWHERE 0
23
24struct lmb_property { 22struct lmb_property {
25 unsigned long base; 23 unsigned long base;
26 unsigned long size; 24 unsigned long size;
@@ -43,15 +41,16 @@ extern struct lmb lmb;
43 41
44extern void __init lmb_init(void); 42extern void __init lmb_init(void);
45extern void __init lmb_analyze(void); 43extern void __init lmb_analyze(void);
46extern long __init lmb_add(unsigned long, unsigned long); 44extern long __init lmb_add(unsigned long base, unsigned long size);
47extern long __init lmb_reserve(unsigned long, unsigned long); 45extern long __init lmb_reserve(unsigned long base, unsigned long size);
48extern unsigned long __init lmb_alloc(unsigned long, unsigned long); 46extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align);
49extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long, 47extern unsigned long __init lmb_alloc_base(unsigned long size,
50 unsigned long); 48 unsigned long align, unsigned long max_addr);
49extern unsigned long __init __lmb_alloc_base(unsigned long size,
50 unsigned long align, unsigned long max_addr);
51extern unsigned long __init lmb_phys_mem_size(void); 51extern unsigned long __init lmb_phys_mem_size(void);
52extern unsigned long __init lmb_end_of_DRAM(void); 52extern unsigned long __init lmb_end_of_DRAM(void);
53extern unsigned long __init lmb_abs_to_phys(unsigned long); 53extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
54extern void __init lmb_enforce_memory_limit(unsigned long);
55 54
56extern void lmb_dump_all(void); 55extern void lmb_dump_all(void);
57 56
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index c9add8f1ad94..4465b95ebef0 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -54,7 +54,7 @@ struct paca_struct {
54#endif /* CONFIG_PPC_ISERIES */ 54#endif /* CONFIG_PPC_ISERIES */
55 55
56 /* 56 /*
57 * MAGIC: the spinlock functions in arch/ppc64/lib/locks.c 57 * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
58 * load lock_token and paca_index with a single lwz 58 * load lock_token and paca_index with a single lwz
59 * instruction. They must travel together and be properly 59 * instruction. They must travel together and be properly
60 * aligned. 60 * aligned.
@@ -96,6 +96,11 @@ struct paca_struct {
96 u64 saved_r1; /* r1 save for RTAS calls */ 96 u64 saved_r1; /* r1 save for RTAS calls */
97 u64 saved_msr; /* MSR saved here by enter_rtas */ 97 u64 saved_msr; /* MSR saved here by enter_rtas */
98 u8 proc_enabled; /* irq soft-enable flag */ 98 u8 proc_enabled; /* irq soft-enable flag */
99
100 /* Stuff for accurate time accounting */
101 u64 user_time; /* accumulated usermode TB ticks */
102 u64 system_time; /* accumulated system TB ticks */
103 u64 startpurr; /* PURR/TB value snapshot */
99}; 104};
100 105
101extern struct paca_struct paca[]; 106extern struct paca_struct paca[];
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index e9590c06ad92..35f92813464c 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -62,9 +62,14 @@
62/* shift to put page number into pte */ 62/* shift to put page number into pte */
63#define PTE_RPN_SHIFT (17) 63#define PTE_RPN_SHIFT (17)
64 64
65#define __real_pte(e,p) ((real_pte_t)(e)) 65#ifdef STRICT_MM_TYPECHECKS
66#define __rpte_to_pte(r) (r) 66#define __real_pte(e,p) ((real_pte_t){(e)})
67#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12) 67#define __rpte_to_pte(r) ((r).pte)
68#else
69#define __real_pte(e,p) (e)
70#define __rpte_to_pte(r) (__pte(r))
71#endif
72#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
68 73
69#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 74#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
70 do { \ 75 do { \
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index ab8688d39024..dd1c0a913d5f 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -15,6 +15,48 @@
15#define SZL (BITS_PER_LONG/8) 15#define SZL (BITS_PER_LONG/8)
16 16
17/* 17/*
18 * Stuff for accurate CPU time accounting.
19 * These macros handle transitions between user and system state
20 * in exception entry and exit and accumulate time to the
21 * user_time and system_time fields in the paca.
22 */
23
24#ifndef CONFIG_VIRT_CPU_ACCOUNTING
25#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
26#define ACCOUNT_CPU_USER_EXIT(ra, rb)
27#else
28#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
29 beq 2f; /* if from kernel mode */ \
30BEGIN_FTR_SECTION; \
31 mfspr ra,SPRN_PURR; /* get processor util. reg */ \
32END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
33BEGIN_FTR_SECTION; \
34 mftb ra; /* or get TB if no PURR */ \
35END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
36 ld rb,PACA_STARTPURR(r13); \
37 std ra,PACA_STARTPURR(r13); \
38 subf rb,rb,ra; /* subtract start value */ \
39 ld ra,PACA_USER_TIME(r13); \
40 add ra,ra,rb; /* add on to user time */ \
41 std ra,PACA_USER_TIME(r13); \
422:
43
44#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
45BEGIN_FTR_SECTION; \
46 mfspr ra,SPRN_PURR; /* get processor util. reg */ \
47END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
48BEGIN_FTR_SECTION; \
49 mftb ra; /* or get TB if no PURR */ \
50END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
51 ld rb,PACA_STARTPURR(r13); \
52 std ra,PACA_STARTPURR(r13); \
53 subf rb,rb,ra; /* subtract start value */ \
54 ld ra,PACA_SYSTEM_TIME(r13); \
55 add ra,ra,rb; /* add on to user time */ \
56 std ra,PACA_SYSTEM_TIME(r13);
57#endif
58
59/*
18 * Macros for storing registers into and loading registers from 60 * Macros for storing registers into and loading registers from
19 * exception frames. 61 * exception frames.
20 */ 62 */
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h
index 79bae4933b73..2c2fe9647595 100644
--- a/include/asm-powerpc/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -4,7 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6/* 6/*
7 * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff 7 * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff
8 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h 8 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
9 * by Paul Mackerras <paulus@samba.org>. 9 * by Paul Mackerras <paulus@samba.org>.
10 */ 10 */
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
index c90d9d9aae72..2cda3c38a9fa 100644
--- a/include/asm-powerpc/synch.h
+++ b/include/asm-powerpc/synch.h
@@ -15,7 +15,7 @@
15#endif 15#endif
16 16
17#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
18#define ISYNC_ON_SMP "\n\tisync" 18#define ISYNC_ON_SMP "\n\tisync\n"
19#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" 19#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
20#else 20#else
21#define ISYNC_ON_SMP 21#define ISYNC_ON_SMP
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d9bf53653b10..41b7a5b3d701 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -424,5 +424,9 @@ static inline void create_function_call(unsigned long addr, void * func)
424 create_branch(addr, func_addr, BRANCH_SET_LINK); 424 create_branch(addr, func_addr, BRANCH_SET_LINK);
425} 425}
426 426
427#ifdef CONFIG_VIRT_CPU_ACCOUNTING
428extern void account_system_vtime(struct task_struct *);
429#endif
430
427#endif /* __KERNEL__ */ 431#endif /* __KERNEL__ */
428#endif /* _ASM_POWERPC_SYSTEM_H */ 432#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index baddc9ab57ad..912118db13ae 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -41,6 +41,7 @@ extern time_t last_rtc_update;
41 41
42extern void generic_calibrate_decr(void); 42extern void generic_calibrate_decr(void);
43extern void wakeup_decrementer(void); 43extern void wakeup_decrementer(void);
44extern void snapshot_timebase(void);
44 45
45/* Some sane defaults: 125 MHz timebase, 1GHz processor */ 46/* Some sane defaults: 125 MHz timebase, 1GHz processor */
46extern unsigned long ppc_proc_freq; 47extern unsigned long ppc_proc_freq;
@@ -221,5 +222,19 @@ struct cpu_usage {
221 222
222DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); 223DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
223 224
225#ifdef CONFIG_VIRT_CPU_ACCOUNTING
226extern void account_process_vtime(struct task_struct *tsk);
227#else
228#define account_process_vtime(tsk) do { } while (0)
229#endif
230
231#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
232extern void calculate_steal_time(void);
233extern void snapshot_timebases(void);
234#else
235#define calculate_steal_time() do { } while (0)
236#define snapshot_timebases() do { } while (0)
237#endif
238
224#endif /* __KERNEL__ */ 239#endif /* __KERNEL__ */
225#endif /* __PPC64_TIME_H */ 240#endif /* __PPC64_TIME_H */