aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-12-03 23:59:07 -0500
committerPaul Mackerras <paulus@samba.org>2006-12-03 23:59:07 -0500
commit79acbb3ff2d8095b692e1502b9eb2ccec348de26 (patch)
tree6ab773e5a8f9de2cd6443362b21d0d6fffe3b35e /include/asm-powerpc
parent19a79859e168640f8e16d7b216d211c1c52b687a (diff)
parent2b5f6dcce5bf94b9b119e9ed8d537098ec61c3d2 (diff)
Merge branch 'linux-2.6' into for-linus
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/asm-compat.h52
-rw-r--r--include/asm-powerpc/checksum.h59
-rw-r--r--include/asm-powerpc/cputable.h37
-rw-r--r--include/asm-powerpc/current.h12
-rw-r--r--include/asm-powerpc/device.h7
-rw-r--r--include/asm-powerpc/firmware.h17
-rw-r--r--include/asm-powerpc/i8259.h1
-rw-r--r--include/asm-powerpc/io.h7
-rw-r--r--include/asm-powerpc/iommu.h22
-rw-r--r--include/asm-powerpc/oprofile_impl.h87
-rw-r--r--include/asm-powerpc/pci.h20
-rw-r--r--include/asm-powerpc/pmc.h13
-rw-r--r--include/asm-powerpc/ppc_asm.h18
-rw-r--r--include/asm-powerpc/prom.h10
-rw-r--r--include/asm-powerpc/reg.h26
-rw-r--r--include/asm-powerpc/systbl.h3
-rw-r--r--include/asm-powerpc/system.h6
-rw-r--r--include/asm-powerpc/tce.h3
-rw-r--r--include/asm-powerpc/time.h31
-rw-r--r--include/asm-powerpc/timex.h20
-rw-r--r--include/asm-powerpc/topology.h8
-rw-r--r--include/asm-powerpc/unistd.h5
22 files changed, 317 insertions, 147 deletions
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h
index 8e64be0cc47d..c89bd58ee283 100644
--- a/include/asm-powerpc/asm-compat.h
+++ b/include/asm-powerpc/asm-compat.h
@@ -14,6 +14,58 @@
14# define ASM_CONST(x) __ASM_CONST(x) 14# define ASM_CONST(x) __ASM_CONST(x)
15#endif 15#endif
16 16
17
18/*
19 * Feature section common macros
20 *
21 * Note that the entries now contain offsets between the table entry
22 * and the code rather than absolute code pointers in order to be
23 * useable with the vdso shared library. There is also an assumption
24 * that values will be negative, that is, the fixup table has to be
25 * located after the code it fixes up.
26 */
27#ifdef CONFIG_PPC64
28#ifdef __powerpc64__
29/* 64 bits kernel, 64 bits code */
30#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
3199: \
32 .section sect,"a"; \
33 .align 3; \
3498: \
35 .llong msk; \
36 .llong val; \
37 .llong label##b-98b; \
38 .llong 99b-98b; \
39 .previous
40#else /* __powerpc64__ */
41/* 64 bits kernel, 32 bits code (ie. vdso32) */
42#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
4399: \
44 .section sect,"a"; \
45 .align 3; \
4698: \
47 .llong msk; \
48 .llong val; \
49 .long 0xffffffff; \
50 .long label##b-98b; \
51 .long 0xffffffff; \
52 .long 99b-98b; \
53 .previous
54#endif /* !__powerpc64__ */
55#else /* CONFIG_PPC64 */
56/* 32 bits kernel, 32 bits code */
57#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
5899: \
59 .section sect,"a"; \
60 .align 2; \
6198: \
62 .long msk; \
63 .long val; \
64 .long label##b-98b; \
65 .long 99b-98b; \
66 .previous
67#endif /* !CONFIG_PPC64 */
68
17#ifdef __powerpc64__ 69#ifdef __powerpc64__
18 70
19/* operations for longs and pointers */ 71/* operations for longs and pointers */
diff --git a/include/asm-powerpc/checksum.h b/include/asm-powerpc/checksum.h
index 609ecbbd7210..7cdf358337cf 100644
--- a/include/asm-powerpc/checksum.h
+++ b/include/asm-powerpc/checksum.h
@@ -14,17 +14,16 @@
14 * which always checksum on 4 octet boundaries. ihl is the number 14 * which always checksum on 4 octet boundaries. ihl is the number
15 * of 32-bit words and is always >= 5. 15 * of 32-bit words and is always >= 5.
16 */ 16 */
17extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl); 17extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
18 18
19/* 19/*
20 * computes the checksum of the TCP/UDP pseudo-header 20 * computes the checksum of the TCP/UDP pseudo-header
21 * returns a 16-bit checksum, already complemented 21 * returns a 16-bit checksum, already complemented
22 */ 22 */
23extern unsigned short csum_tcpudp_magic(unsigned long saddr, 23extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
24 unsigned long daddr,
25 unsigned short len, 24 unsigned short len,
26 unsigned short proto, 25 unsigned short proto,
27 unsigned int sum); 26 __wsum sum);
28 27
29/* 28/*
30 * computes the checksum of a memory block at buff, length len, 29 * computes the checksum of a memory block at buff, length len,
@@ -38,8 +37,7 @@ extern unsigned short csum_tcpudp_magic(unsigned long saddr,
38 * 37 *
39 * it's best to have buff aligned on a 32-bit boundary 38 * it's best to have buff aligned on a 32-bit boundary
40 */ 39 */
41extern unsigned int csum_partial(const unsigned char * buff, int len, 40extern __wsum csum_partial(const void *buff, int len, __wsum sum);
42 unsigned int sum);
43 41
44/* 42/*
45 * Computes the checksum of a memory block at src, length len, 43 * Computes the checksum of a memory block at src, length len,
@@ -51,20 +49,15 @@ extern unsigned int csum_partial(const unsigned char * buff, int len,
51 * Like csum_partial, this must be called with even lengths, 49 * Like csum_partial, this must be called with even lengths,
52 * except for the last fragment. 50 * except for the last fragment.
53 */ 51 */
54extern unsigned int csum_partial_copy_generic(const char *src, char *dst, 52extern __wsum csum_partial_copy_generic(const void *src, void *dst,
55 int len, unsigned int sum, 53 int len, __wsum sum,
56 int *src_err, int *dst_err); 54 int *src_err, int *dst_err);
57/* 55/*
58 * the same as csum_partial, but copies from src to dst while it 56 * the same as csum_partial, but copies from src to dst while it
59 * checksums. 57 * checksums.
60 */ 58 */
61unsigned int csum_partial_copy_nocheck(const char *src,
62 char *dst,
63 int len,
64 unsigned int sum);
65
66#define csum_partial_copy_from_user(src, dst, len, sum, errp) \ 59#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
67 csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL) 60 csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL)
68 61
69#define csum_partial_copy_nocheck(src, dst, len, sum) \ 62#define csum_partial_copy_nocheck(src, dst, len, sum) \
70 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) 63 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
@@ -74,7 +67,7 @@ unsigned int csum_partial_copy_nocheck(const char *src,
74 * turns a 32-bit partial checksum (e.g. from csum_partial) into a 67 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
75 * 1's complement 16-bit checksum. 68 * 1's complement 16-bit checksum.
76 */ 69 */
77static inline unsigned int csum_fold(unsigned int sum) 70static inline __sum16 csum_fold(__wsum sum)
78{ 71{
79 unsigned int tmp; 72 unsigned int tmp;
80 73
@@ -83,41 +76,32 @@ static inline unsigned int csum_fold(unsigned int sum)
83 /* if there is a carry from adding the two 16-bit halves, 76 /* if there is a carry from adding the two 16-bit halves,
84 it will carry from the lower half into the upper half, 77 it will carry from the lower half into the upper half,
85 giving us the correct sum in the upper half. */ 78 giving us the correct sum in the upper half. */
86 sum = ~(sum + tmp) >> 16; 79 return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
87 return sum;
88} 80}
89 81
90/* 82/*
91 * this routine is used for miscellaneous IP-like checksums, mainly 83 * this routine is used for miscellaneous IP-like checksums, mainly
92 * in icmp.c 84 * in icmp.c
93 */ 85 */
94static inline unsigned short ip_compute_csum(unsigned char * buff, int len) 86static inline __sum16 ip_compute_csum(const void *buff, int len)
95{ 87{
96 return csum_fold(csum_partial(buff, len, 0)); 88 return csum_fold(csum_partial(buff, len, 0));
97} 89}
98 90
99#ifdef __powerpc64__ 91static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
100static inline u32 csum_tcpudp_nofold(u32 saddr,
101 u32 daddr,
102 unsigned short len, 92 unsigned short len,
103 unsigned short proto, 93 unsigned short proto,
104 unsigned int sum) 94 __wsum sum)
105{ 95{
106 unsigned long s = sum; 96#ifdef __powerpc64__
97 unsigned long s = (__force u32)sum;
107 98
108 s += saddr; 99 s += (__force u32)saddr;
109 s += daddr; 100 s += (__force u32)daddr;
110 s += (proto << 16) + len; 101 s += proto + len;
111 s += (s >> 32); 102 s += (s >> 32);
112 return (u32) s; 103 return (__force __wsum) s;
113}
114#else 104#else
115static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
116 unsigned long daddr,
117 unsigned short len,
118 unsigned short proto,
119 unsigned int sum)
120{
121 __asm__("\n\ 105 __asm__("\n\
122 addc %0,%0,%1 \n\ 106 addc %0,%0,%1 \n\
123 adde %0,%0,%2 \n\ 107 adde %0,%0,%2 \n\
@@ -125,10 +109,9 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
125 addze %0,%0 \n\ 109 addze %0,%0 \n\
126 " 110 "
127 : "=r" (sum) 111 : "=r" (sum)
128 : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); 112 : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
129 return sum; 113 return sum;
130}
131
132#endif 114#endif
115}
133#endif /* __KERNEL__ */ 116#endif /* __KERNEL__ */
134#endif 117#endif
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 12707ab9dc98..a9a40149a7c0 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -89,8 +89,11 @@ struct cpu_spec {
89 89
90extern struct cpu_spec *cur_cpu_spec; 90extern struct cpu_spec *cur_cpu_spec;
91 91
92extern void identify_cpu(unsigned long offset, unsigned long cpu); 92extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
93extern void do_cpu_ftr_fixups(unsigned long offset); 93
94extern struct cpu_spec *identify_cpu(unsigned long offset);
95extern void do_feature_fixups(unsigned long value, void *fixup_start,
96 void *fixup_end);
94 97
95#endif /* __ASSEMBLY__ */ 98#endif /* __ASSEMBLY__ */
96 99
@@ -144,6 +147,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
144#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000) 147#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000)
145#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) 148#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
146#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) 149#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
150#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
147 151
148#ifndef __ASSEMBLY__ 152#ifndef __ASSEMBLY__
149 153
@@ -332,7 +336,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
332#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 336#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
333 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 337 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
334 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 338 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
335 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE) 339 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG)
336#define CPU_FTRS_PA6T (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 340#define CPU_FTRS_PA6T (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
337 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 341 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
338 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ 342 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
@@ -431,29 +435,12 @@ static inline int cpu_has_feature(unsigned long feature)
431 435
432#ifdef __ASSEMBLY__ 436#ifdef __ASSEMBLY__
433 437
434#define BEGIN_FTR_SECTION 98: 438#define BEGIN_FTR_SECTION_NESTED(label) label:
435 439#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97)
436#ifndef __powerpc64__ 440#define END_FTR_SECTION_NESTED(msk, val, label) \
441 MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
437#define END_FTR_SECTION(msk, val) \ 442#define END_FTR_SECTION(msk, val) \
43899: \ 443 END_FTR_SECTION_NESTED(msk, val, 97)
439 .section __ftr_fixup,"a"; \
440 .align 2; \
441 .long msk; \
442 .long val; \
443 .long 98b; \
444 .long 99b; \
445 .previous
446#else /* __powerpc64__ */
447#define END_FTR_SECTION(msk, val) \
44899: \
449 .section __ftr_fixup,"a"; \
450 .align 3; \
451 .llong msk; \
452 .llong val; \
453 .llong 98b; \
454 .llong 99b; \
455 .previous
456#endif /* __powerpc64__ */
457 444
458#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) 445#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
459#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) 446#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
diff --git a/include/asm-powerpc/current.h b/include/asm-powerpc/current.h
index 1938d6abd255..b8708aedf925 100644
--- a/include/asm-powerpc/current.h
+++ b/include/asm-powerpc/current.h
@@ -14,7 +14,17 @@ struct task_struct;
14#ifdef __powerpc64__ 14#ifdef __powerpc64__
15#include <asm/paca.h> 15#include <asm/paca.h>
16 16
17#define current (get_paca()->__current) 17static inline struct task_struct *get_current(void)
18{
19 struct task_struct *task;
20
21 __asm__ __volatile__("ld %0,%1(13)"
22 : "=r" (task)
23 : "i" (offsetof(struct paca_struct, __current)));
24
25 return task;
26}
27#define current get_current()
18 28
19#else 29#else
20 30
diff --git a/include/asm-powerpc/device.h b/include/asm-powerpc/device.h
new file mode 100644
index 000000000000..d8f9872b0e2d
--- /dev/null
+++ b/include/asm-powerpc/device.h
@@ -0,0 +1,7 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index 1022737f4f34..fdf9aff71150 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -96,19 +96,16 @@ extern void machine_check_fwnmi(void);
96/* This is true if we are using the firmware NMI handler (typically LPAR) */ 96/* This is true if we are using the firmware NMI handler (typically LPAR) */
97extern int fwnmi_active; 97extern int fwnmi_active;
98 98
99#else /* __ASSEMBLY__ */ 99extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
100 100
101#define BEGIN_FW_FTR_SECTION 96: 101#else /* __ASSEMBLY__ */
102 102
103#define BEGIN_FW_FTR_SECTION_NESTED(label) label:
104#define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97)
105#define END_FW_FTR_SECTION_NESTED(msk, val, label) \
106 MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
103#define END_FW_FTR_SECTION(msk, val) \ 107#define END_FW_FTR_SECTION(msk, val) \
10497: \ 108 END_FW_FTR_SECTION_NESTED(msk, val, 97)
105 .section __fw_ftr_fixup,"a"; \
106 .align 3; \
107 .llong msk; \
108 .llong val; \
109 .llong 96b; \
110 .llong 97b; \
111 .previous
112 109
113#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) 110#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk))
114#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) 111#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0)
diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h
index 78489fb8d140..db1362f8c603 100644
--- a/include/asm-powerpc/i8259.h
+++ b/include/asm-powerpc/i8259.h
@@ -7,6 +7,7 @@
7#ifdef CONFIG_PPC_MERGE 7#ifdef CONFIG_PPC_MERGE
8extern void i8259_init(struct device_node *node, unsigned long intack_addr); 8extern void i8259_init(struct device_node *node, unsigned long intack_addr);
9extern unsigned int i8259_irq(void); 9extern unsigned int i8259_irq(void);
10extern struct irq_host *i8259_get_host(void);
10#else 11#else
11extern void i8259_init(unsigned long intack_addr, int offset); 12extern void i8259_init(unsigned long intack_addr, int offset);
12extern int i8259_irq(void); 13extern int i8259_irq(void);
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 3baff8b0fd5a..c2c5f14b5f5f 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -163,8 +163,11 @@ extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
163 163
164static inline void mmiowb(void) 164static inline void mmiowb(void)
165{ 165{
166 __asm__ __volatile__ ("sync" : : : "memory"); 166 unsigned long tmp;
167 get_paca()->io_sync = 0; 167
168 __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
169 : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
170 : "memory");
168} 171}
169 172
170/* 173/*
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index 264ed6242771..19e6f7e0a607 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -22,17 +22,35 @@
22#define _ASM_IOMMU_H 22#define _ASM_IOMMU_H
23#ifdef __KERNEL__ 23#ifdef __KERNEL__
24 24
25#include <asm/types.h> 25#include <linux/compiler.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <asm/types.h>
30#include <asm/bitops.h>
31
32#define IOMMU_PAGE_SHIFT 12
33#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
34#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
35#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
36
37#ifndef __ASSEMBLY__
38
39/* Pure 2^n version of get_order */
40static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
41{
42 return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
43}
44
45#endif /* __ASSEMBLY__ */
46
29 47
30/* 48/*
31 * IOMAP_MAX_ORDER defines the largest contiguous block 49 * IOMAP_MAX_ORDER defines the largest contiguous block
32 * of dma space we can get. IOMAP_MAX_ORDER = 13 50 * of dma space we can get. IOMAP_MAX_ORDER = 13
33 * allows up to 2**12 pages (4096 * 4096) = 16 MB 51 * allows up to 2**12 pages (4096 * 4096) = 16 MB
34 */ 52 */
35#define IOMAP_MAX_ORDER 13 53#define IOMAP_MAX_ORDER 13
36 54
37struct iommu_table { 55struct iommu_table {
38 unsigned long it_busno; /* Bus number this table belongs to */ 56 unsigned long it_busno; /* Bus number this table belongs to */
diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
index 5b33994cd488..07a10e590c1d 100644
--- a/include/asm-powerpc/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -42,7 +42,7 @@ struct op_powerpc_model {
42 void (*reg_setup) (struct op_counter_config *, 42 void (*reg_setup) (struct op_counter_config *,
43 struct op_system_config *, 43 struct op_system_config *,
44 int num_counters); 44 int num_counters);
45 void (*cpu_setup) (void *); 45 void (*cpu_setup) (struct op_counter_config *);
46 void (*start) (struct op_counter_config *); 46 void (*start) (struct op_counter_config *);
47 void (*stop) (void); 47 void (*stop) (void);
48 void (*handle_interrupt) (struct pt_regs *, 48 void (*handle_interrupt) (struct pt_regs *,
@@ -121,7 +121,90 @@ static inline void ctr_write(unsigned int i, unsigned int val)
121 break; 121 break;
122 } 122 }
123} 123}
124#endif /* !CONFIG_FSL_BOOKE */ 124#else /* CONFIG_FSL_BOOKE */
125static inline u32 get_pmlca(int ctr)
126{
127 u32 pmlca;
128
129 switch (ctr) {
130 case 0:
131 pmlca = mfpmr(PMRN_PMLCA0);
132 break;
133 case 1:
134 pmlca = mfpmr(PMRN_PMLCA1);
135 break;
136 case 2:
137 pmlca = mfpmr(PMRN_PMLCA2);
138 break;
139 case 3:
140 pmlca = mfpmr(PMRN_PMLCA3);
141 break;
142 default:
143 panic("Bad ctr number\n");
144 }
145
146 return pmlca;
147}
148
149static inline void set_pmlca(int ctr, u32 pmlca)
150{
151 switch (ctr) {
152 case 0:
153 mtpmr(PMRN_PMLCA0, pmlca);
154 break;
155 case 1:
156 mtpmr(PMRN_PMLCA1, pmlca);
157 break;
158 case 2:
159 mtpmr(PMRN_PMLCA2, pmlca);
160 break;
161 case 3:
162 mtpmr(PMRN_PMLCA3, pmlca);
163 break;
164 default:
165 panic("Bad ctr number\n");
166 }
167}
168
169static inline unsigned int ctr_read(unsigned int i)
170{
171 switch(i) {
172 case 0:
173 return mfpmr(PMRN_PMC0);
174 case 1:
175 return mfpmr(PMRN_PMC1);
176 case 2:
177 return mfpmr(PMRN_PMC2);
178 case 3:
179 return mfpmr(PMRN_PMC3);
180 default:
181 return 0;
182 }
183}
184
185static inline void ctr_write(unsigned int i, unsigned int val)
186{
187 switch(i) {
188 case 0:
189 mtpmr(PMRN_PMC0, val);
190 break;
191 case 1:
192 mtpmr(PMRN_PMC1, val);
193 break;
194 case 2:
195 mtpmr(PMRN_PMC2, val);
196 break;
197 case 3:
198 mtpmr(PMRN_PMC3, val);
199 break;
200 default:
201 break;
202 }
203}
204
205
206#endif /* CONFIG_FSL_BOOKE */
207
125 208
126extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth); 209extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
127 210
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
index 051694f14c3e..c77286051496 100644
--- a/include/asm-powerpc/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -62,19 +62,13 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
62} 62}
63 63
64#ifdef CONFIG_PPC64 64#ifdef CONFIG_PPC64
65#define HAVE_ARCH_PCI_MWI 1 65
66static inline int pcibios_prep_mwi(struct pci_dev *dev) 66/*
67{ 67 * We want to avoid touching the cacheline size or MWI bit.
68 /* 68 * pSeries firmware sets the cacheline size (which is not the cpu cacheline
69 * We would like to avoid touching the cacheline size or MWI bit 69 * size in all cases) and hardware treats MWI the same as memory write.
70 * but we cant do that with the current pcibios_prep_mwi 70 */
71 * interface. pSeries firmware sets the cacheline size (which is not 71#define PCI_DISABLE_MWI
72 * the cpu cacheline size in all cases) and hardware treats MWI
73 * the same as memory write. So we dont touch the cacheline size
74 * here and allow the generic code to set the MWI bit.
75 */
76 return 0;
77}
78 72
79extern struct dma_mapping_ops pci_dma_ops; 73extern struct dma_mapping_ops pci_dma_ops;
80 74
diff --git a/include/asm-powerpc/pmc.h b/include/asm-powerpc/pmc.h
index 07d6a4279319..8588be68e0ad 100644
--- a/include/asm-powerpc/pmc.h
+++ b/include/asm-powerpc/pmc.h
@@ -32,18 +32,5 @@ void release_pmc_hardware(void);
32void power4_enable_pmcs(void); 32void power4_enable_pmcs(void);
33#endif 33#endif
34 34
35#ifdef CONFIG_FSL_BOOKE
36void init_pmc_stop(int ctr);
37void set_pmc_event(int ctr, int event);
38void set_pmc_user_kernel(int ctr, int user, int kernel);
39void set_pmc_marked(int ctr, int mark0, int mark1);
40void pmc_start_ctr(int ctr, int enable);
41void pmc_start_ctrs(int enable);
42void pmc_stop_ctrs(void);
43void dump_pmcs(void);
44
45extern struct op_powerpc_model op_model_fsl_booke;
46#endif
47
48#endif /* __KERNEL__ */ 35#endif /* __KERNEL__ */
49#endif /* _POWERPC_PMC_H */ 36#endif /* _POWERPC_PMC_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index a940cfe040da..fa083d8e4663 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -30,9 +30,9 @@ BEGIN_FTR_SECTION; \
30 mfspr ra,SPRN_PURR; /* get processor util. reg */ \ 30 mfspr ra,SPRN_PURR; /* get processor util. reg */ \
31END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ 31END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
32BEGIN_FTR_SECTION; \ 32BEGIN_FTR_SECTION; \
33 mftb ra; /* or get TB if no PURR */ \ 33 MFTB(ra); /* or get TB if no PURR */ \
34END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ 34END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
35 ld rb,PACA_STARTPURR(r13); \ 35 ld rb,PACA_STARTPURR(r13); \
36 std ra,PACA_STARTPURR(r13); \ 36 std ra,PACA_STARTPURR(r13); \
37 subf rb,rb,ra; /* subtract start value */ \ 37 subf rb,rb,ra; /* subtract start value */ \
38 ld ra,PACA_USER_TIME(r13); \ 38 ld ra,PACA_USER_TIME(r13); \
@@ -45,9 +45,9 @@ BEGIN_FTR_SECTION; \
45 mfspr ra,SPRN_PURR; /* get processor util. reg */ \ 45 mfspr ra,SPRN_PURR; /* get processor util. reg */ \
46END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ 46END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
47BEGIN_FTR_SECTION; \ 47BEGIN_FTR_SECTION; \
48 mftb ra; /* or get TB if no PURR */ \ 48 MFTB(ra); /* or get TB if no PURR */ \
49END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ 49END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
50 ld rb,PACA_STARTPURR(r13); \ 50 ld rb,PACA_STARTPURR(r13); \
51 std ra,PACA_STARTPURR(r13); \ 51 std ra,PACA_STARTPURR(r13); \
52 subf rb,rb,ra; /* subtract start value */ \ 52 subf rb,rb,ra; /* subtract start value */ \
53 ld ra,PACA_SYSTEM_TIME(r13); \ 53 ld ra,PACA_SYSTEM_TIME(r13); \
@@ -274,6 +274,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
274#define ISYNC_601 274#define ISYNC_601
275#endif 275#endif
276 276
277#ifdef CONFIG_PPC_CELL
278#define MFTB(dest) \
27990: mftb dest; \
280BEGIN_FTR_SECTION_NESTED(96); \
281 cmpwi dest,0; \
282 beq- 90b; \
283END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
284#else
285#define MFTB(dest) mftb dest
286#endif
277 287
278#ifndef CONFIG_SMP 288#ifndef CONFIG_SMP
279#define TLBSYNC 289#define TLBSYNC
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index 1fd7a2253e3a..0afee17f33b4 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -135,7 +135,7 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
135extern struct device_node *of_get_parent(const struct device_node *node); 135extern struct device_node *of_get_parent(const struct device_node *node);
136extern struct device_node *of_get_next_child(const struct device_node *node, 136extern struct device_node *of_get_next_child(const struct device_node *node,
137 struct device_node *prev); 137 struct device_node *prev);
138extern struct property *of_find_property(struct device_node *np, 138extern struct property *of_find_property(const struct device_node *np,
139 const char *name, 139 const char *name,
140 int *lenp); 140 int *lenp);
141extern struct device_node *of_node_get(struct device_node *node); 141extern struct device_node *of_node_get(struct device_node *node);
@@ -159,10 +159,12 @@ extern void of_detach_node(const struct device_node *);
159extern void finish_device_tree(void); 159extern void finish_device_tree(void);
160extern void unflatten_device_tree(void); 160extern void unflatten_device_tree(void);
161extern void early_init_devtree(void *); 161extern void early_init_devtree(void *);
162extern int device_is_compatible(struct device_node *device, const char *); 162extern int device_is_compatible(const struct device_node *device,
163 const char *);
163extern int machine_is_compatible(const char *compat); 164extern int machine_is_compatible(const char *compat);
164extern const void *get_property(struct device_node *node, const char *name, 165extern const void *get_property(const struct device_node *node,
165 int *lenp); 166 const char *name,
167 int *lenp);
166extern void print_properties(struct device_node *node); 168extern void print_properties(struct device_node *node);
167extern int prom_n_addr_cells(struct device_node* np); 169extern int prom_n_addr_cells(struct device_node* np);
168extern int prom_n_size_cells(struct device_node* np); 170extern int prom_n_size_cells(struct device_node* np);
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 8fb96811b55d..6faae7b14d55 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -591,6 +591,7 @@
591#define PV_630 0x0040 591#define PV_630 0x0040
592#define PV_630p 0x0041 592#define PV_630p 0x0041
593#define PV_970MP 0x0044 593#define PV_970MP 0x0044
594#define PV_970GX 0x0045
594#define PV_BE 0x0070 595#define PV_BE 0x0070
595#define PV_PA6T 0x0090 596#define PV_PA6T 0x0090
596 597
@@ -618,10 +619,35 @@
618 : "=r" (rval)); rval;}) 619 : "=r" (rval)); rval;})
619#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) 620#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
620 621
622#ifdef __powerpc64__
623#ifdef CONFIG_PPC_CELL
624#define mftb() ({unsigned long rval; \
625 asm volatile( \
626 "90: mftb %0;\n" \
627 "97: cmpwi %0,0;\n" \
628 " beq- 90b;\n" \
629 "99:\n" \
630 ".section __ftr_fixup,\"a\"\n" \
631 ".align 3\n" \
632 "98:\n" \
633 " .llong %1\n" \
634 " .llong %1\n" \
635 " .llong 97b-98b\n" \
636 " .llong 99b-98b\n" \
637 ".previous" \
638 : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;})
639#else
621#define mftb() ({unsigned long rval; \ 640#define mftb() ({unsigned long rval; \
622 asm volatile("mftb %0" : "=r" (rval)); rval;}) 641 asm volatile("mftb %0" : "=r" (rval)); rval;})
642#endif /* !CONFIG_PPC_CELL */
643
644#else /* __powerpc64__ */
645
623#define mftbl() ({unsigned long rval; \ 646#define mftbl() ({unsigned long rval; \
624 asm volatile("mftbl %0" : "=r" (rval)); rval;}) 647 asm volatile("mftbl %0" : "=r" (rval)); rval;})
648#define mftbu() ({unsigned long rval; \
649 asm volatile("mftbu %0" : "=r" (rval)); rval;})
650#endif /* !__powerpc64__ */
625 651
626#define mttbl(v) asm volatile("mttbl %0":: "r"(v)) 652#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
627#define mttbu(v) asm volatile("mttbu %0":: "r"(v)) 653#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index eac85ce101b6..97b435484177 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -261,7 +261,7 @@ SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
261PPC_SYS_SPU(rtas) 261PPC_SYS_SPU(rtas)
262OLDSYS(debug_setcontext) 262OLDSYS(debug_setcontext)
263SYSCALL(ni_syscall) 263SYSCALL(ni_syscall)
264SYSCALL(ni_syscall) 264COMPAT_SYS(migrate_pages)
265COMPAT_SYS(mbind) 265COMPAT_SYS(mbind)
266COMPAT_SYS(get_mempolicy) 266COMPAT_SYS(get_mempolicy)
267COMPAT_SYS(set_mempolicy) 267COMPAT_SYS(set_mempolicy)
@@ -304,3 +304,4 @@ SYSCALL_SPU(fchmodat)
304SYSCALL_SPU(faccessat) 304SYSCALL_SPU(faccessat)
305COMPAT_SYS_SPU(get_robust_list) 305COMPAT_SYS_SPU(get_robust_list)
306COMPAT_SYS_SPU(set_robust_list) 306COMPAT_SYS_SPU(set_robust_list)
307COMPAT_SYS(move_pages)
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 43627596003b..f7b1227d6454 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -25,8 +25,8 @@
25 * 25 *
26 * We have to use the sync instructions for mb(), since lwsync doesn't 26 * We have to use the sync instructions for mb(), since lwsync doesn't
27 * order loads with respect to previous stores. Lwsync is fine for 27 * order loads with respect to previous stores. Lwsync is fine for
28 * rmb(), though. Note that lwsync is interpreted as sync by 28 * rmb(), though. Note that rmb() actually uses a sync on 32-bit
29 * 32-bit and older 64-bit CPUs. 29 * architectures.
30 * 30 *
31 * For wmb(), we use sync since wmb is used in drivers to order 31 * For wmb(), we use sync since wmb is used in drivers to order
32 * stores to system memory with respect to writes to the device. 32 * stores to system memory with respect to writes to the device.
@@ -34,7 +34,7 @@
34 * SMP since it is only used to order updates to system memory. 34 * SMP since it is only used to order updates to system memory.
35 */ 35 */
36#define mb() __asm__ __volatile__ ("sync" : : : "memory") 36#define mb() __asm__ __volatile__ ("sync" : : : "memory")
37#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") 37#define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory")
38#define wmb() __asm__ __volatile__ ("sync" : : : "memory") 38#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
39#define read_barrier_depends() do { } while(0) 39#define read_barrier_depends() do { } while(0)
40 40
diff --git a/include/asm-powerpc/tce.h b/include/asm-powerpc/tce.h
index c9483adbf599..f663634cccc9 100644
--- a/include/asm-powerpc/tce.h
+++ b/include/asm-powerpc/tce.h
@@ -22,6 +22,8 @@
22#define _ASM_POWERPC_TCE_H 22#define _ASM_POWERPC_TCE_H
23#ifdef __KERNEL__ 23#ifdef __KERNEL__
24 24
25#include <asm/iommu.h>
26
25/* 27/*
26 * Tces come in two formats, one for the virtual bus and a different 28 * Tces come in two formats, one for the virtual bus and a different
27 * format for PCI 29 * format for PCI
@@ -33,7 +35,6 @@
33 35
34#define TCE_SHIFT 12 36#define TCE_SHIFT 12
35#define TCE_PAGE_SIZE (1 << TCE_SHIFT) 37#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
36#define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT)
37 38
38#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */ 39#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
39 40
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index b051d4c88c3b..4cff977ad526 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -39,10 +39,6 @@ extern void generic_calibrate_decr(void);
39extern void wakeup_decrementer(void); 39extern void wakeup_decrementer(void);
40extern void snapshot_timebase(void); 40extern void snapshot_timebase(void);
41 41
42#ifdef CONFIG_RTC_CLASS
43extern int __init rtc_class_hookup(void);
44#endif
45
46/* Some sane defaults: 125 MHz timebase, 1GHz processor */ 42/* Some sane defaults: 125 MHz timebase, 1GHz processor */
47extern unsigned long ppc_proc_freq; 43extern unsigned long ppc_proc_freq;
48#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8) 44#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
@@ -82,30 +78,35 @@ struct div_result {
82#define __USE_RTC() 0 78#define __USE_RTC() 0
83#endif 79#endif
84 80
85/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */ 81#ifdef CONFIG_PPC64
82
83/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
84#define get_tbl get_tb
85
86#else
87
86static inline unsigned long get_tbl(void) 88static inline unsigned long get_tbl(void)
87{ 89{
88 unsigned long tbl;
89
90#if defined(CONFIG_403GCX) 90#if defined(CONFIG_403GCX)
91 unsigned long tbl;
91 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); 92 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
93 return tbl;
92#else 94#else
93 asm volatile("mftb %0" : "=r" (tbl)); 95 return mftbl();
94#endif 96#endif
95 return tbl;
96} 97}
97 98
98static inline unsigned int get_tbu(void) 99static inline unsigned int get_tbu(void)
99{ 100{
101#ifdef CONFIG_403GCX
100 unsigned int tbu; 102 unsigned int tbu;
101
102#if defined(CONFIG_403GCX)
103 asm volatile("mfspr %0, 0x3dc" : "=r" (tbu)); 103 asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
104 return tbu;
104#else 105#else
105 asm volatile("mftbu %0" : "=r" (tbu)); 106 return mftbu();
106#endif 107#endif
107 return tbu;
108} 108}
109#endif /* !CONFIG_PPC64 */
109 110
110static inline unsigned int get_rtcl(void) 111static inline unsigned int get_rtcl(void)
111{ 112{
@@ -131,7 +132,7 @@ static inline u64 get_tb(void)
131{ 132{
132 return mftb(); 133 return mftb();
133} 134}
134#else 135#else /* CONFIG_PPC64 */
135static inline u64 get_tb(void) 136static inline u64 get_tb(void)
136{ 137{
137 unsigned int tbhi, tblo, tbhi2; 138 unsigned int tbhi, tblo, tbhi2;
@@ -144,7 +145,7 @@ static inline u64 get_tb(void)
144 145
145 return ((u64)tbhi << 32) | tblo; 146 return ((u64)tbhi << 32) | tblo;
146} 147}
147#endif 148#endif /* !CONFIG_PPC64 */
148 149
149static inline void set_tb(unsigned int upper, unsigned int lower) 150static inline void set_tb(unsigned int upper, unsigned int lower)
150{ 151{
diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h
index 3b9a8e786806..92dedde761d1 100644
--- a/include/asm-powerpc/timex.h
+++ b/include/asm-powerpc/timex.h
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <asm/cputable.h> 10#include <asm/cputable.h>
11#include <asm/reg.h>
11 12
12#define CLOCK_TICK_RATE 1024000 /* Underlying HZ */ 13#define CLOCK_TICK_RATE 1024000 /* Underlying HZ */
13 14
@@ -15,13 +16,11 @@ typedef unsigned long cycles_t;
15 16
16static inline cycles_t get_cycles(void) 17static inline cycles_t get_cycles(void)
17{ 18{
18 cycles_t ret;
19
20#ifdef __powerpc64__ 19#ifdef __powerpc64__
21 20 return mftb();
22 __asm__ __volatile__("mftb %0" : "=r" (ret) : );
23
24#else 21#else
22 cycles_t ret;
23
25 /* 24 /*
26 * For the "cycle" counter we use the timebase lower half. 25 * For the "cycle" counter we use the timebase lower half.
27 * Currently only used on SMP. 26 * Currently only used on SMP.
@@ -30,18 +29,19 @@ static inline cycles_t get_cycles(void)
30 ret = 0; 29 ret = 0;
31 30
32 __asm__ __volatile__( 31 __asm__ __volatile__(
33 "98: mftb %0\n" 32 "97: mftb %0\n"
34 "99:\n" 33 "99:\n"
35 ".section __ftr_fixup,\"a\"\n" 34 ".section __ftr_fixup,\"a\"\n"
35 ".align 2\n"
36 "98:\n"
36 " .long %1\n" 37 " .long %1\n"
37 " .long 0\n" 38 " .long 0\n"
38 " .long 98b\n" 39 " .long 97b-98b\n"
39 " .long 99b\n" 40 " .long 99b-98b\n"
40 ".previous" 41 ".previous"
41 : "=r" (ret) : "i" (CPU_FTR_601)); 42 : "=r" (ret) : "i" (CPU_FTR_601));
42#endif
43
44 return ret; 43 return ret;
44#endif
45} 45}
46 46
47#endif /* __KERNEL__ */ 47#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 8f7ee16781a4..9fe7894ee035 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -96,7 +96,13 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
96 96
97#ifdef CONFIG_SMP 97#ifdef CONFIG_SMP
98#include <asm/cputable.h> 98#include <asm/cputable.h>
99#define smt_capable() (cpu_has_feature(CPU_FTR_SMT)) 99#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
100
101#ifdef CONFIG_PPC64
102#include <asm/smp.h>
103
104#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
105#endif
100#endif 106#endif
101 107
102#endif /* __KERNEL__ */ 108#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 464a48cce7f5..0e4ea37f6466 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -276,7 +276,7 @@
276#define __NR_rtas 255 276#define __NR_rtas 255
277#define __NR_sys_debug_setcontext 256 277#define __NR_sys_debug_setcontext 256
278/* Number 257 is reserved for vserver */ 278/* Number 257 is reserved for vserver */
279/* 258 currently unused */ 279#define __NR_migrate_pages 258
280#define __NR_mbind 259 280#define __NR_mbind 259
281#define __NR_get_mempolicy 260 281#define __NR_get_mempolicy 260
282#define __NR_set_mempolicy 261 282#define __NR_set_mempolicy 261
@@ -323,10 +323,11 @@
323#define __NR_faccessat 298 323#define __NR_faccessat 298
324#define __NR_get_robust_list 299 324#define __NR_get_robust_list 299
325#define __NR_set_robust_list 300 325#define __NR_set_robust_list 300
326#define __NR_move_pages 301
326 327
327#ifdef __KERNEL__ 328#ifdef __KERNEL__
328 329
329#define __NR_syscalls 301 330#define __NR_syscalls 302
330 331
331#define __NR__exit __NR_exit 332#define __NR__exit __NR_exit
332#define NR_syscalls __NR_syscalls 333#define NR_syscalls __NR_syscalls