aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig20
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c9
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/rtas_fw.c (renamed from arch/powerpc/platforms/pseries/rtas-fw.c)4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c23
-rw-r--r--arch/powerpc/kernel/traps.c12
-rw-r--r--arch/powerpc/lib/Makefile9
-rw-r--r--arch/powerpc/lib/bitops.c (renamed from arch/ppc64/kernel/bitops.c)97
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c (renamed from arch/ppc64/kernel/bpa_iic.c)8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h (renamed from arch/ppc64/kernel/bpa_iic.h)8
-rw-r--r--arch/powerpc/platforms/cell/iommu.c (renamed from arch/ppc64/kernel/bpa_iommu.c)44
-rw-r--r--arch/powerpc/platforms/cell/iommu.h (renamed from arch/ppc64/kernel/bpa_iommu.h)10
-rw-r--r--arch/powerpc/platforms/cell/setup.c (renamed from arch/ppc64/kernel/bpa_setup.c)46
-rw-r--r--arch/powerpc/platforms/cell/smp.c230
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c (renamed from arch/ppc64/kernel/spider-pic.c)2
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c6
-rw-r--r--arch/powerpc/platforms/iseries/setup.c42
-rw-r--r--arch/powerpc/platforms/powermac/pic.c8
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fw.h3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c (renamed from arch/ppc64/kernel/bpa_nvram.c)60
-rw-r--r--arch/ppc/Makefile3
-rw-r--r--arch/ppc/kernel/bitops.c126
-rw-r--r--arch/ppc64/Makefile1
-rw-r--r--arch/ppc64/kernel/Makefile5
-rw-r--r--arch/ppc64/kernel/irq.c2
-rw-r--r--arch/ppc64/kernel/proc_ppc64.c2
-rw-r--r--arch/ppc64/kernel/prom_init.c4
-rw-r--r--arch/ppc64/lib/Makefile5
-rw-r--r--arch/ppc64/lib/string.S179
-rw-r--r--drivers/macintosh/via-pmu.c29
-rw-r--r--include/asm-powerpc/bitops.h437
-rw-r--r--include/asm-powerpc/bug.h34
-rw-r--r--include/asm-powerpc/futex.h (renamed from include/asm-ppc64/futex.h)45
-rw-r--r--include/asm-powerpc/ioctls.h3
-rw-r--r--include/asm-powerpc/ipcbuf.h33
-rw-r--r--include/asm-powerpc/irq.h2
-rw-r--r--include/asm-powerpc/ppc_asm.h7
-rw-r--r--include/asm-powerpc/processor.h2
-rw-r--r--include/asm-powerpc/rtas.h3
-rw-r--r--include/asm-powerpc/termios.h135
-rw-r--r--include/asm-powerpc/uaccess.h468
-rw-r--r--include/asm-ppc/bitops.h460
-rw-r--r--include/asm-ppc/futex.h53
-rw-r--r--include/asm-ppc/ipcbuf.h29
-rw-r--r--include/asm-ppc/uaccess.h393
-rw-r--r--include/asm-ppc64/bitops.h360
-rw-r--r--include/asm-ppc64/ipcbuf.h28
-rw-r--r--include/asm-ppc64/mmu_context.h15
-rw-r--r--include/asm-ppc64/nvram.h2
-rw-r--r--include/asm-ppc64/smp.h1
-rw-r--r--include/asm-ppc64/uaccess.h341
58 files changed, 1459 insertions, 2404 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 967ecf92d6a7..1c44a1dac421 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -278,6 +278,7 @@ config PPC_PSERIES
278 select PPC_I8259 278 select PPC_I8259
279 select PPC_RTAS 279 select PPC_RTAS
280 select RTAS_ERROR_LOGGING 280 select RTAS_ERROR_LOGGING
281 select RTAS_FW
281 default y 282 default y
282 283
283config PPC_CHRP 284config PPC_CHRP
@@ -319,10 +320,12 @@ config PPC_MAPLE
319 This option enables support for the Maple 970FX Evaluation Board. 320 This option enables support for the Maple 970FX Evaluation Board.
320 For more informations, refer to <http://www.970eval.com> 321 For more informations, refer to <http://www.970eval.com>
321 322
322config PPC_BPA 323config PPC_CELL
323 bool " Broadband Processor Architecture" 324 bool " Cell Broadband Processor Architecture"
324 depends on PPC_MULTIPLATFORM && PPC64 325 depends on PPC_MULTIPLATFORM && PPC64
325 select PPC_RTAS 326 select PPC_RTAS
327 select RTAS_FW
328 select MMIO_NVRAM
326 329
327config PPC_OF 330config PPC_OF
328 bool 331 bool
@@ -353,13 +356,22 @@ config RTAS_ERROR_LOGGING
353 depends on PPC_RTAS 356 depends on PPC_RTAS
354 default n 357 default n
355 358
359config RTAS_FW
360 bool
361 depends on PPC_RTAS
362 default n
363
364config MMIO_NVRAM
365 bool
366 default n
367
356config MPIC_BROKEN_U3 368config MPIC_BROKEN_U3
357 bool 369 bool
358 depends on PPC_MAPLE 370 depends on PPC_MAPLE
359 default y 371 default y
360 372
361config BPA_IIC 373config CELL_IIC
362 depends on PPC_BPA 374 depends on PPC_CELL
363 bool 375 bool
364 default y 376 default y
365 377
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 572d4f5eaacb..abad3059a21a 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
18obj-$(CONFIG_POWER4) += idle_power4.o 18obj-$(CONFIG_POWER4) += idle_power4.o
19obj-$(CONFIG_PPC_OF) += of_device.o 19obj-$(CONFIG_PPC_OF) += of_device.o
20obj-$(CONFIG_PPC_RTAS) += rtas.o 20obj-$(CONFIG_PPC_RTAS) += rtas.o
21obj-$(CONFIG_RTAS_FW) += rtas_fw.o
21obj-$(CONFIG_IBMVIO) += vio.o 22obj-$(CONFIG_IBMVIO) += vio.o
22 23
23ifeq ($(CONFIG_PPC_MERGE),y) 24ifeq ($(CONFIG_PPC_MERGE),y)
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 8bc540337ba0..47d6f7e2ea9f 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -81,15 +81,6 @@ EXPORT_SYMBOL(_prep_type);
81EXPORT_SYMBOL(ucSystemType); 81EXPORT_SYMBOL(ucSystemType);
82#endif 82#endif
83 83
84#if !defined(__INLINE_BITOPS)
85EXPORT_SYMBOL(set_bit);
86EXPORT_SYMBOL(clear_bit);
87EXPORT_SYMBOL(change_bit);
88EXPORT_SYMBOL(test_and_set_bit);
89EXPORT_SYMBOL(test_and_clear_bit);
90EXPORT_SYMBOL(test_and_change_bit);
91#endif /* __INLINE_BITOPS */
92
93EXPORT_SYMBOL(strcpy); 84EXPORT_SYMBOL(strcpy);
94EXPORT_SYMBOL(strncpy); 85EXPORT_SYMBOL(strncpy);
95EXPORT_SYMBOL(strcat); 86EXPORT_SYMBOL(strcat);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 9750b3cd8ecd..c758b6624d7b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2000,7 +2000,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2000#endif 2000#endif
2001 2001
2002 /* 2002 /*
2003 * On pSeries and BPA, copy the CPU hold code 2003 * Copy the CPU hold code
2004 */ 2004 */
2005 if (RELOC(of_platform) != PLATFORM_POWERMAC) 2005 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0); 2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
diff --git a/arch/powerpc/platforms/pseries/rtas-fw.c b/arch/powerpc/kernel/rtas_fw.c
index 15d81d758ca0..448922e8af1b 100644
--- a/arch/powerpc/platforms/pseries/rtas-fw.c
+++ b/arch/powerpc/kernel/rtas_fw.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * Procedures for firmware flash updates on pSeries systems. 3 * Procedures for firmware flash updates.
4 * 4 *
5 * Peter Bergner, IBM March 2001. 5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM. 6 * Copyright (C) 2001 IBM.
@@ -31,8 +31,6 @@
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/systemcfg.h> 32#include <asm/systemcfg.h>
33 33
34#include "rtas-fw.h"
35
36struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; 34struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
37 35
38#define FLASH_BLOCK_LIST_VERSION (1UL) 36#define FLASH_BLOCK_LIST_VERSION (1UL)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1292460fcde2..14ebe3bc48c3 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -201,11 +201,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
201#ifdef CONFIG_TAU_AVERAGE 201#ifdef CONFIG_TAU_AVERAGE
202 /* more straightforward, but potentially misleading */ 202 /* more straightforward, but potentially misleading */
203 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 203 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
204 cpu_temp(i)); 204 cpu_temp(cpu_id));
205#else 205#else
206 /* show the actual temp sensor range */ 206 /* show the actual temp sensor range */
207 u32 temp; 207 u32 temp;
208 temp = cpu_temp_both(i); 208 temp = cpu_temp_both(cpu_id);
209 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", 209 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
210 temp & 0xff, temp >> 16); 210 temp & 0xff, temp >> 16);
211#endif 211#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 079867e18145..d4a3c5dd1a21 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -296,7 +296,7 @@ static void __init setup_cpu_maps(void)
296extern struct machdep_calls pSeries_md; 296extern struct machdep_calls pSeries_md;
297extern struct machdep_calls pmac_md; 297extern struct machdep_calls pmac_md;
298extern struct machdep_calls maple_md; 298extern struct machdep_calls maple_md;
299extern struct machdep_calls bpa_md; 299extern struct machdep_calls cell_md;
300extern struct machdep_calls iseries_md; 300extern struct machdep_calls iseries_md;
301 301
302/* Ultimately, stuff them in an elf section like initcalls... */ 302/* Ultimately, stuff them in an elf section like initcalls... */
@@ -310,8 +310,8 @@ static struct machdep_calls __initdata *machines[] = {
310#ifdef CONFIG_PPC_MAPLE 310#ifdef CONFIG_PPC_MAPLE
311 &maple_md, 311 &maple_md,
312#endif /* CONFIG_PPC_MAPLE */ 312#endif /* CONFIG_PPC_MAPLE */
313#ifdef CONFIG_PPC_BPA 313#ifdef CONFIG_PPC_CELL
314 &bpa_md, 314 &cell_md,
315#endif 315#endif
316#ifdef CONFIG_PPC_ISERIES 316#ifdef CONFIG_PPC_ISERIES
317 &iseries_md, 317 &iseries_md,
@@ -631,23 +631,6 @@ static int ppc64_panic_event(struct notifier_block *this,
631 return NOTIFY_DONE; 631 return NOTIFY_DONE;
632} 632}
633 633
634#ifdef CONFIG_PPC_ISERIES
635/*
636 * On iSeries we just parse the mem=X option from the command line.
637 * On pSeries it's a bit more complicated, see prom_init_mem()
638 */
639static int __init early_parsemem(char *p)
640{
641 if (!p)
642 return 0;
643
644 memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
645
646 return 0;
647}
648early_param("mem", early_parsemem);
649#endif /* CONFIG_PPC_ISERIES */
650
651#ifdef CONFIG_IRQSTACKS 634#ifdef CONFIG_IRQSTACKS
652static void __init irqstack_early_init(void) 635static void __init irqstack_early_init(void)
653{ 636{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 5d638ecddbd0..07e5ee40b870 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -147,8 +147,8 @@ int die(const char *str, struct pt_regs *regs, long err)
147 printk("POWERMAC "); 147 printk("POWERMAC ");
148 nl = 1; 148 nl = 1;
149 break; 149 break;
150 case PLATFORM_BPA: 150 case PLATFORM_CELL:
151 printk("BPA "); 151 printk("CELL ");
152 nl = 1; 152 nl = 1;
153 break; 153 break;
154 } 154 }
@@ -749,22 +749,22 @@ static int check_bug_trap(struct pt_regs *regs)
749 if (bug->line & BUG_WARNING_TRAP) { 749 if (bug->line & BUG_WARNING_TRAP) {
750 /* this is a WARN_ON rather than BUG/BUG_ON */ 750 /* this is a WARN_ON rather than BUG/BUG_ON */
751#ifdef CONFIG_XMON 751#ifdef CONFIG_XMON
752 xmon_printf(KERN_ERR "Badness in %s at %s:%d\n", 752 xmon_printf(KERN_ERR "Badness in %s at %s:%ld\n",
753 bug->function, bug->file, 753 bug->function, bug->file,
754 bug->line & ~BUG_WARNING_TRAP); 754 bug->line & ~BUG_WARNING_TRAP);
755#endif /* CONFIG_XMON */ 755#endif /* CONFIG_XMON */
756 printk(KERN_ERR "Badness in %s at %s:%d\n", 756 printk(KERN_ERR "Badness in %s at %s:%ld\n",
757 bug->function, bug->file, 757 bug->function, bug->file,
758 bug->line & ~BUG_WARNING_TRAP); 758 bug->line & ~BUG_WARNING_TRAP);
759 dump_stack(); 759 dump_stack();
760 return 1; 760 return 1;
761 } 761 }
762#ifdef CONFIG_XMON 762#ifdef CONFIG_XMON
763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n", 763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
764 bug->function, bug->file, bug->line); 764 bug->function, bug->file, bug->line);
765 xmon(regs); 765 xmon(regs);
766#endif /* CONFIG_XMON */ 766#endif /* CONFIG_XMON */
767 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", 767 printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
768 bug->function, bug->file, bug->line); 768 bug->function, bug->file, bug->line);
769 769
770 return 0; 770 return 0;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index e6b2be3bcec1..34f5c2e074c9 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -3,13 +3,14 @@
3# 3#
4 4
5ifeq ($(CONFIG_PPC_MERGE),y) 5ifeq ($(CONFIG_PPC_MERGE),y)
6obj-y := string.o 6obj-y := string.o strcase.o
7obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
7endif 8endif
8 9
9obj-y += strcase.o 10obj-y += bitops.o
10obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
11obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ 11obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
12 memcpy_64.o usercopy_64.o mem_64.o 12 memcpy_64.o usercopy_64.o mem_64.o string.o \
13 strcase.o
13obj-$(CONFIG_PPC_ISERIES) += e2a.o 14obj-$(CONFIG_PPC_ISERIES) += e2a.o
14obj-$(CONFIG_XMON) += sstep.o 15obj-$(CONFIG_XMON) += sstep.o
15 16
diff --git a/arch/ppc64/kernel/bitops.c b/arch/powerpc/lib/bitops.c
index ae329e8b4acb..b67ce3004ebf 100644
--- a/arch/ppc64/kernel/bitops.c
+++ b/arch/powerpc/lib/bitops.c
@@ -1,93 +1,97 @@
1/* 1#include <linux/types.h>
2 * These are too big to be inlined.
3 */
4
5#include <linux/kernel.h>
6#include <linux/module.h> 2#include <linux/module.h>
7#include <linux/bitops.h>
8#include <asm/byteorder.h> 3#include <asm/byteorder.h>
4#include <asm/bitops.h>
9 5
10unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, 6/**
11 unsigned long offset) 7 * find_next_bit - find the next set bit in a memory region
8 * @addr: The address to base the search on
9 * @offset: The bitnumber to start searching at
10 * @size: The maximum size to search
11 */
12unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
13 unsigned long offset)
12{ 14{
13 const unsigned long *p = addr + (offset >> 6); 15 const unsigned long *p = addr + BITOP_WORD(offset);
14 unsigned long result = offset & ~63UL; 16 unsigned long result = offset & ~(BITS_PER_LONG-1);
15 unsigned long tmp; 17 unsigned long tmp;
16 18
17 if (offset >= size) 19 if (offset >= size)
18 return size; 20 return size;
19 size -= result; 21 size -= result;
20 offset &= 63UL; 22 offset %= BITS_PER_LONG;
21 if (offset) { 23 if (offset) {
22 tmp = *(p++); 24 tmp = *(p++);
23 tmp |= ~0UL >> (64 - offset); 25 tmp &= (~0UL << offset);
24 if (size < 64) 26 if (size < BITS_PER_LONG)
25 goto found_first; 27 goto found_first;
26 if (~tmp) 28 if (tmp)
27 goto found_middle; 29 goto found_middle;
28 size -= 64; 30 size -= BITS_PER_LONG;
29 result += 64; 31 result += BITS_PER_LONG;
30 } 32 }
31 while (size & ~63UL) { 33 while (size & ~(BITS_PER_LONG-1)) {
32 if (~(tmp = *(p++))) 34 if ((tmp = *(p++)))
33 goto found_middle; 35 goto found_middle;
34 result += 64; 36 result += BITS_PER_LONG;
35 size -= 64; 37 size -= BITS_PER_LONG;
36 } 38 }
37 if (!size) 39 if (!size)
38 return result; 40 return result;
39 tmp = *p; 41 tmp = *p;
40 42
41found_first: 43found_first:
42 tmp |= ~0UL << size; 44 tmp &= (~0UL >> (64 - size));
43 if (tmp == ~0UL) /* Are any bits zero? */ 45 if (tmp == 0UL) /* Are any bits set? */
44 return result + size; /* Nope. */ 46 return result + size; /* Nope. */
45found_middle: 47found_middle:
46 return result + ffz(tmp); 48 return result + __ffs(tmp);
47} 49}
50EXPORT_SYMBOL(find_next_bit);
48 51
49EXPORT_SYMBOL(find_next_zero_bit); 52/*
50 53 * This implementation of find_{first,next}_zero_bit was stolen from
51unsigned long find_next_bit(const unsigned long *addr, unsigned long size, 54 * Linus' asm-alpha/bitops.h.
52 unsigned long offset) 55 */
56unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
57 unsigned long offset)
53{ 58{
54 const unsigned long *p = addr + (offset >> 6); 59 const unsigned long *p = addr + BITOP_WORD(offset);
55 unsigned long result = offset & ~63UL; 60 unsigned long result = offset & ~(BITS_PER_LONG-1);
56 unsigned long tmp; 61 unsigned long tmp;
57 62
58 if (offset >= size) 63 if (offset >= size)
59 return size; 64 return size;
60 size -= result; 65 size -= result;
61 offset &= 63UL; 66 offset %= BITS_PER_LONG;
62 if (offset) { 67 if (offset) {
63 tmp = *(p++); 68 tmp = *(p++);
64 tmp &= (~0UL << offset); 69 tmp |= ~0UL >> (BITS_PER_LONG - offset);
65 if (size < 64) 70 if (size < BITS_PER_LONG)
66 goto found_first; 71 goto found_first;
67 if (tmp) 72 if (~tmp)
68 goto found_middle; 73 goto found_middle;
69 size -= 64; 74 size -= BITS_PER_LONG;
70 result += 64; 75 result += BITS_PER_LONG;
71 } 76 }
72 while (size & ~63UL) { 77 while (size & ~(BITS_PER_LONG-1)) {
73 if ((tmp = *(p++))) 78 if (~(tmp = *(p++)))
74 goto found_middle; 79 goto found_middle;
75 result += 64; 80 result += BITS_PER_LONG;
76 size -= 64; 81 size -= BITS_PER_LONG;
77 } 82 }
78 if (!size) 83 if (!size)
79 return result; 84 return result;
80 tmp = *p; 85 tmp = *p;
81 86
82found_first: 87found_first:
83 tmp &= (~0UL >> (64 - size)); 88 tmp |= ~0UL << size;
84 if (tmp == 0UL) /* Are any bits set? */ 89 if (tmp == ~0UL) /* Are any bits zero? */
85 return result + size; /* Nope. */ 90 return result + size; /* Nope. */
86found_middle: 91found_middle:
87 return result + __ffs(tmp); 92 return result + ffz(tmp);
88} 93}
89 94EXPORT_SYMBOL(find_next_zero_bit);
90EXPORT_SYMBOL(find_next_bit);
91 95
92static inline unsigned int ext2_ilog2(unsigned int x) 96static inline unsigned int ext2_ilog2(unsigned int x)
93{ 97{
@@ -106,8 +110,8 @@ static inline unsigned int ext2_ffz(unsigned int x)
106 return rc; 110 return rc;
107} 111}
108 112
109unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, 113unsigned long find_next_zero_le_bit(const unsigned long *addr,
110 unsigned long offset) 114 unsigned long size, unsigned long offset)
111{ 115{
112 const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5); 116 const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5);
113 unsigned int result = offset & ~31; 117 unsigned int result = offset & ~31;
@@ -143,5 +147,4 @@ found_first:
143found_middle: 147found_middle:
144 return result + ext2_ffz(tmp); 148 return result + ext2_ffz(tmp);
145} 149}
146
147EXPORT_SYMBOL(find_next_zero_le_bit); 150EXPORT_SYMBOL(find_next_zero_le_bit);
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 172c0db63504..8836b3a00668 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_85xx) += 85xx/
11obj-$(CONFIG_PPC_PSERIES) += pseries/ 11obj-$(CONFIG_PPC_PSERIES) += pseries/
12obj-$(CONFIG_PPC_ISERIES) += iseries/ 12obj-$(CONFIG_PPC_ISERIES) += iseries/
13obj-$(CONFIG_PPC_MAPLE) += maple/ 13obj-$(CONFIG_PPC_MAPLE) += maple/
14obj-$(CONFIG_PPC_CELL) += cell/
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
new file mode 100644
index 000000000000..55e094b96bc0
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -0,0 +1,2 @@
1obj-y += interrupt.o iommu.o setup.o spider-pic.o
2obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/ppc64/kernel/bpa_iic.c b/arch/powerpc/platforms/cell/interrupt.c
index 0aaa878e19d3..7fbe78a9327d 100644
--- a/arch/ppc64/kernel/bpa_iic.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * BPA Internal Interrupt Controller 2 * Cell Internal Interrupt Controller
3 * 3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * 5 *
@@ -31,7 +31,7 @@
31#include <asm/prom.h> 31#include <asm/prom.h>
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33 33
34#include "bpa_iic.h" 34#include "interrupt.h"
35 35
36struct iic_pending_bits { 36struct iic_pending_bits {
37 u32 data; 37 u32 data;
@@ -89,7 +89,7 @@ static void iic_end(unsigned int irq)
89} 89}
90 90
91static struct hw_interrupt_type iic_pic = { 91static struct hw_interrupt_type iic_pic = {
92 .typename = " BPA-IIC ", 92 .typename = " CELL-IIC ",
93 .startup = iic_startup, 93 .startup = iic_startup,
94 .enable = iic_enable, 94 .enable = iic_enable,
95 .disable = iic_disable, 95 .disable = iic_disable,
@@ -106,7 +106,7 @@ static int iic_external_get_irq(struct iic_pending_bits pending)
106 irq = -1; 106 irq = -1;
107 107
108 /* 108 /*
109 * This mapping is specific to the Broadband 109 * This mapping is specific to the Cell Broadband
110 * Engine. We might need to get the numbers 110 * Engine. We might need to get the numbers
111 * from the device tree to support future CPUs. 111 * from the device tree to support future CPUs.
112 */ 112 */
diff --git a/arch/ppc64/kernel/bpa_iic.h b/arch/powerpc/platforms/cell/interrupt.h
index 6833c3022166..37d58e6fd0c6 100644
--- a/arch/ppc64/kernel/bpa_iic.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -1,5 +1,5 @@
1#ifndef ASM_BPA_IIC_H 1#ifndef ASM_CELL_PIC_H
2#define ASM_BPA_IIC_H 2#define ASM_CELL_PIC_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4/* 4/*
5 * Mapping of IIC pending bits into per-node 5 * Mapping of IIC pending bits into per-node
@@ -21,7 +21,7 @@
21 * + node number 21 * + node number
22 * * don't care 22 * * don't care
23 * 23 *
24 * A node consists of a Broadband Engine and an optional 24 * A node consists of a Cell Broadband Engine and an optional
25 * south bridge device providing a maximum of 64 IRQs. 25 * south bridge device providing a maximum of 64 IRQs.
26 * The south bridge may be connected to either IOIF0 26 * The south bridge may be connected to either IOIF0
27 * or IOIF1. 27 * or IOIF1.
@@ -59,4 +59,4 @@ extern void spider_init_IRQ(void);
59extern int spider_get_irq(unsigned long int_pending); 59extern int spider_get_irq(unsigned long int_pending);
60 60
61#endif 61#endif
62#endif /* ASM_BPA_IIC_H */ 62#endif /* ASM_CELL_PIC_H */
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/powerpc/platforms/cell/iommu.c
index da1b4b7a3269..74f999b4ac9e 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * IOMMU implementation for Broadband Processor Architecture 2 * IOMMU implementation for Cell Broadband Processor Architecture
3 * We just establish a linear mapping at boot by setting all the 3 * We just establish a linear mapping at boot by setting all the
4 * IOPT cache entries in the CPU. 4 * IOPT cache entries in the CPU.
5 * The mapping functions should be identical to pci_direct_iommu, 5 * The mapping functions should be identical to pci_direct_iommu,
@@ -41,7 +41,7 @@
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/ppc-pci.h> 42#include <asm/ppc-pci.h>
43 43
44#include "bpa_iommu.h" 44#include "iommu.h"
45 45
46static inline unsigned long 46static inline unsigned long
47get_iopt_entry(unsigned long real_address, unsigned long ioid, 47get_iopt_entry(unsigned long real_address, unsigned long ioid,
@@ -276,7 +276,7 @@ static void iommu_dev_setup_null(struct pci_dev *d) { }
276 * for each DMA window used by any device. For now, we 276 * for each DMA window used by any device. For now, we
277 * happen to know that there is only one DMA window in use, 277 * happen to know that there is only one DMA window in use,
278 * starting at iopt_phys_offset. */ 278 * starting at iopt_phys_offset. */
279static void bpa_map_iommu(void) 279static void cell_map_iommu(void)
280{ 280{
281 unsigned long address; 281 unsigned long address;
282 void __iomem *base; 282 void __iomem *base;
@@ -309,7 +309,7 @@ static void bpa_map_iommu(void)
309} 309}
310 310
311 311
312static void *bpa_alloc_coherent(struct device *hwdev, size_t size, 312static void *cell_alloc_coherent(struct device *hwdev, size_t size,
313 dma_addr_t *dma_handle, gfp_t flag) 313 dma_addr_t *dma_handle, gfp_t flag)
314{ 314{
315 void *ret; 315 void *ret;
@@ -317,65 +317,65 @@ static void *bpa_alloc_coherent(struct device *hwdev, size_t size,
317 ret = (void *)__get_free_pages(flag, get_order(size)); 317 ret = (void *)__get_free_pages(flag, get_order(size));
318 if (ret != NULL) { 318 if (ret != NULL) {
319 memset(ret, 0, size); 319 memset(ret, 0, size);
320 *dma_handle = virt_to_abs(ret) | BPA_DMA_VALID; 320 *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID;
321 } 321 }
322 return ret; 322 return ret;
323} 323}
324 324
325static void bpa_free_coherent(struct device *hwdev, size_t size, 325static void cell_free_coherent(struct device *hwdev, size_t size,
326 void *vaddr, dma_addr_t dma_handle) 326 void *vaddr, dma_addr_t dma_handle)
327{ 327{
328 free_pages((unsigned long)vaddr, get_order(size)); 328 free_pages((unsigned long)vaddr, get_order(size));
329} 329}
330 330
331static dma_addr_t bpa_map_single(struct device *hwdev, void *ptr, 331static dma_addr_t cell_map_single(struct device *hwdev, void *ptr,
332 size_t size, enum dma_data_direction direction) 332 size_t size, enum dma_data_direction direction)
333{ 333{
334 return virt_to_abs(ptr) | BPA_DMA_VALID; 334 return virt_to_abs(ptr) | CELL_DMA_VALID;
335} 335}
336 336
337static void bpa_unmap_single(struct device *hwdev, dma_addr_t dma_addr, 337static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
338 size_t size, enum dma_data_direction direction) 338 size_t size, enum dma_data_direction direction)
339{ 339{
340} 340}
341 341
342static int bpa_map_sg(struct device *hwdev, struct scatterlist *sg, 342static int cell_map_sg(struct device *hwdev, struct scatterlist *sg,
343 int nents, enum dma_data_direction direction) 343 int nents, enum dma_data_direction direction)
344{ 344{
345 int i; 345 int i;
346 346
347 for (i = 0; i < nents; i++, sg++) { 347 for (i = 0; i < nents; i++, sg++) {
348 sg->dma_address = (page_to_phys(sg->page) + sg->offset) 348 sg->dma_address = (page_to_phys(sg->page) + sg->offset)
349 | BPA_DMA_VALID; 349 | CELL_DMA_VALID;
350 sg->dma_length = sg->length; 350 sg->dma_length = sg->length;
351 } 351 }
352 352
353 return nents; 353 return nents;
354} 354}
355 355
356static void bpa_unmap_sg(struct device *hwdev, struct scatterlist *sg, 356static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg,
357 int nents, enum dma_data_direction direction) 357 int nents, enum dma_data_direction direction)
358{ 358{
359} 359}
360 360
361static int bpa_dma_supported(struct device *dev, u64 mask) 361static int cell_dma_supported(struct device *dev, u64 mask)
362{ 362{
363 return mask < 0x100000000ull; 363 return mask < 0x100000000ull;
364} 364}
365 365
366void bpa_init_iommu(void) 366void cell_init_iommu(void)
367{ 367{
368 bpa_map_iommu(); 368 cell_map_iommu();
369 369
370 /* Direct I/O, IOMMU off */ 370 /* Direct I/O, IOMMU off */
371 ppc_md.iommu_dev_setup = iommu_dev_setup_null; 371 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
372 ppc_md.iommu_bus_setup = iommu_bus_setup_null; 372 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
373 373
374 pci_dma_ops.alloc_coherent = bpa_alloc_coherent; 374 pci_dma_ops.alloc_coherent = cell_alloc_coherent;
375 pci_dma_ops.free_coherent = bpa_free_coherent; 375 pci_dma_ops.free_coherent = cell_free_coherent;
376 pci_dma_ops.map_single = bpa_map_single; 376 pci_dma_ops.map_single = cell_map_single;
377 pci_dma_ops.unmap_single = bpa_unmap_single; 377 pci_dma_ops.unmap_single = cell_unmap_single;
378 pci_dma_ops.map_sg = bpa_map_sg; 378 pci_dma_ops.map_sg = cell_map_sg;
379 pci_dma_ops.unmap_sg = bpa_unmap_sg; 379 pci_dma_ops.unmap_sg = cell_unmap_sg;
380 pci_dma_ops.dma_supported = bpa_dma_supported; 380 pci_dma_ops.dma_supported = cell_dma_supported;
381} 381}
diff --git a/arch/ppc64/kernel/bpa_iommu.h b/arch/powerpc/platforms/cell/iommu.h
index e547d77dfa04..490d77abfe85 100644
--- a/arch/ppc64/kernel/bpa_iommu.h
+++ b/arch/powerpc/platforms/cell/iommu.h
@@ -1,5 +1,5 @@
1#ifndef BPA_IOMMU_H 1#ifndef CELL_IOMMU_H
2#define BPA_IOMMU_H 2#define CELL_IOMMU_H
3 3
4/* some constants */ 4/* some constants */
5enum { 5enum {
@@ -55,11 +55,11 @@ enum {
55 55
56 /* The high bit needs to be set on every DMA address, 56 /* The high bit needs to be set on every DMA address,
57 only 2GB are addressable */ 57 only 2GB are addressable */
58 BPA_DMA_VALID = 0x80000000, 58 CELL_DMA_VALID = 0x80000000,
59 BPA_DMA_MASK = 0x7fffffff, 59 CELL_DMA_MASK = 0x7fffffff,
60}; 60};
61 61
62 62
63void bpa_init_iommu(void); 63void cell_init_iommu(void);
64 64
65#endif 65#endif
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/powerpc/platforms/cell/setup.c
index c2dc8f282eb8..9a495634d0c2 100644
--- a/arch/ppc64/kernel/bpa_setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/arch/ppc/kernel/bpa_setup.c 2 * linux/arch/powerpc/platforms/cell/cell_setup.c
3 * 3 *
4 * Copyright (C) 1995 Linus Torvalds 4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas 5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu) 6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp 7 * Modified by PPC64 Team, IBM Corp
8 * Modified by BPA Team, IBM Deutschland Entwicklung GmbH 8 * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
@@ -46,8 +46,8 @@
46#include <asm/ppc-pci.h> 46#include <asm/ppc-pci.h>
47#include <asm/irq.h> 47#include <asm/irq.h>
48 48
49#include "bpa_iic.h" 49#include "interrupt.h"
50#include "bpa_iommu.h" 50#include "iommu.h"
51 51
52#ifdef DEBUG 52#ifdef DEBUG
53#define DBG(fmt...) udbg_printf(fmt) 53#define DBG(fmt...) udbg_printf(fmt)
@@ -55,7 +55,7 @@
55#define DBG(fmt...) 55#define DBG(fmt...)
56#endif 56#endif
57 57
58void bpa_show_cpuinfo(struct seq_file *m) 58void cell_show_cpuinfo(struct seq_file *m)
59{ 59{
60 struct device_node *root; 60 struct device_node *root;
61 const char *model = ""; 61 const char *model = "";
@@ -63,22 +63,22 @@ void bpa_show_cpuinfo(struct seq_file *m)
63 root = of_find_node_by_path("/"); 63 root = of_find_node_by_path("/");
64 if (root) 64 if (root)
65 model = get_property(root, "model", NULL); 65 model = get_property(root, "model", NULL);
66 seq_printf(m, "machine\t\t: BPA %s\n", model); 66 seq_printf(m, "machine\t\t: CHRP %s\n", model);
67 of_node_put(root); 67 of_node_put(root);
68} 68}
69 69
70static void bpa_progress(char *s, unsigned short hex) 70static void cell_progress(char *s, unsigned short hex)
71{ 71{
72 printk("*** %04x : %s\n", hex, s ? s : ""); 72 printk("*** %04x : %s\n", hex, s ? s : "");
73} 73}
74 74
75static void __init bpa_setup_arch(void) 75static void __init cell_setup_arch(void)
76{ 76{
77 ppc_md.init_IRQ = iic_init_IRQ; 77 ppc_md.init_IRQ = iic_init_IRQ;
78 ppc_md.get_irq = iic_get_irq; 78 ppc_md.get_irq = iic_get_irq;
79 79
80#ifdef CONFIG_SMP 80#ifdef CONFIG_SMP
81 smp_init_pSeries(); 81 smp_init_cell();
82#endif 82#endif
83 83
84 /* init to some ~sane value until calibrate_delay() runs */ 84 /* init to some ~sane value until calibrate_delay() runs */
@@ -97,39 +97,39 @@ static void __init bpa_setup_arch(void)
97 conswitchp = &dummy_con; 97 conswitchp = &dummy_con;
98#endif 98#endif
99 99
100 bpa_nvram_init(); 100 mmio_nvram_init();
101} 101}
102 102
103/* 103/*
104 * Early initialization. Relocation is on but do not reference unbolted pages 104 * Early initialization. Relocation is on but do not reference unbolted pages
105 */ 105 */
106static void __init bpa_init_early(void) 106static void __init cell_init_early(void)
107{ 107{
108 DBG(" -> bpa_init_early()\n"); 108 DBG(" -> cell_init_early()\n");
109 109
110 hpte_init_native(); 110 hpte_init_native();
111 111
112 bpa_init_iommu(); 112 cell_init_iommu();
113 113
114 ppc64_interrupt_controller = IC_BPA_IIC; 114 ppc64_interrupt_controller = IC_CELL_PIC;
115 115
116 DBG(" <- bpa_init_early()\n"); 116 DBG(" <- cell_init_early()\n");
117} 117}
118 118
119 119
120static int __init bpa_probe(int platform) 120static int __init cell_probe(int platform)
121{ 121{
122 if (platform != PLATFORM_BPA) 122 if (platform != PLATFORM_CELL)
123 return 0; 123 return 0;
124 124
125 return 1; 125 return 1;
126} 126}
127 127
128struct machdep_calls __initdata bpa_md = { 128struct machdep_calls __initdata cell_md = {
129 .probe = bpa_probe, 129 .probe = cell_probe,
130 .setup_arch = bpa_setup_arch, 130 .setup_arch = cell_setup_arch,
131 .init_early = bpa_init_early, 131 .init_early = cell_init_early,
132 .show_cpuinfo = bpa_show_cpuinfo, 132 .show_cpuinfo = cell_show_cpuinfo,
133 .restart = rtas_restart, 133 .restart = rtas_restart,
134 .power_off = rtas_power_off, 134 .power_off = rtas_power_off,
135 .halt = rtas_halt, 135 .halt = rtas_halt,
@@ -137,5 +137,5 @@ struct machdep_calls __initdata bpa_md = {
137 .get_rtc_time = rtas_get_rtc_time, 137 .get_rtc_time = rtas_get_rtc_time,
138 .set_rtc_time = rtas_set_rtc_time, 138 .set_rtc_time = rtas_set_rtc_time,
139 .calibrate_decr = generic_calibrate_decr, 139 .calibrate_decr = generic_calibrate_decr,
140 .progress = bpa_progress, 140 .progress = cell_progress,
141}; 141};
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
new file mode 100644
index 000000000000..de96eadf419d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -0,0 +1,230 @@
1/*
2 * SMP support for BPA machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/sysdev.h>
29#include <linux/cpu.h>
30
31#include <asm/ptrace.h>
32#include <asm/atomic.h>
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/smp.h>
39#include <asm/paca.h>
40#include <asm/time.h>
41#include <asm/machdep.h>
42#include <asm/cputable.h>
43#include <asm/firmware.h>
44#include <asm/system.h>
45#include <asm/rtas.h>
46
47#include "interrupt.h"
48
49#ifdef DEBUG
50#define DBG(fmt...) udbg_printf(fmt)
51#else
52#define DBG(fmt...)
53#endif
54
55/*
56 * The primary thread of each non-boot processor is recorded here before
57 * smp init.
58 */
59static cpumask_t of_spin_map;
60
61extern void pSeries_secondary_smp_init(unsigned long);
62
63/**
64 * smp_startup_cpu() - start the given cpu
65 *
66 * At boot time, there is nothing to do for primary threads which were
67 * started from Open Firmware. For anything else, call RTAS with the
68 * appropriate start location.
69 *
70 * Returns:
71 * 0 - failure
72 * 1 - success
73 */
74static inline int __devinit smp_startup_cpu(unsigned int lcpu)
75{
76 int status;
77 unsigned long start_here = __pa((u32)*((unsigned long *)
78 pSeries_secondary_smp_init));
79 unsigned int pcpu;
80 int start_cpu;
81
82 if (cpu_isset(lcpu, of_spin_map))
83 /* Already started by OF and sitting in spin loop */
84 return 1;
85
86 pcpu = get_hard_smp_processor_id(lcpu);
87
88 /* Fixup atomic count: it exited inside IRQ handler. */
89 paca[lcpu].__current->thread_info->preempt_count = 0;
90
91 /*
92 * If the RTAS start-cpu token does not exist then presume the
93 * cpu is already spinning.
94 */
95 start_cpu = rtas_token("start-cpu");
96 if (start_cpu == RTAS_UNKNOWN_SERVICE)
97 return 1;
98
99 status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
100 if (status != 0) {
101 printk(KERN_ERR "start-cpu failed: %i\n", status);
102 return 0;
103 }
104
105 return 1;
106}
107
108static void smp_iic_message_pass(int target, int msg)
109{
110 unsigned int i;
111
112 if (target < NR_CPUS) {
113 iic_cause_IPI(target, msg);
114 } else {
115 for_each_online_cpu(i) {
116 if (target == MSG_ALL_BUT_SELF
117 && i == smp_processor_id())
118 continue;
119 iic_cause_IPI(i, msg);
120 }
121 }
122}
123
124static int __init smp_iic_probe(void)
125{
126 iic_request_IPIs();
127
128 return cpus_weight(cpu_possible_map);
129}
130
131static void __devinit smp_iic_setup_cpu(int cpu)
132{
133 if (cpu != boot_cpuid)
134 iic_setup_cpu();
135}
136
137static DEFINE_SPINLOCK(timebase_lock);
138static unsigned long timebase = 0;
139
140static void __devinit cell_give_timebase(void)
141{
142 spin_lock(&timebase_lock);
143 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
144 timebase = get_tb();
145 spin_unlock(&timebase_lock);
146
147 while (timebase)
148 barrier();
149 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
150}
151
152static void __devinit cell_take_timebase(void)
153{
154 while (!timebase)
155 barrier();
156 spin_lock(&timebase_lock);
157 set_tb(timebase >> 32, timebase & 0xffffffff);
158 timebase = 0;
159 spin_unlock(&timebase_lock);
160}
161
162static void __devinit smp_cell_kick_cpu(int nr)
163{
164 BUG_ON(nr < 0 || nr >= NR_CPUS);
165
166 if (!smp_startup_cpu(nr))
167 return;
168
169 /*
170 * The processor is currently spinning, waiting for the
171 * cpu_start field to become non-zero After we set cpu_start,
172 * the processor will continue on to secondary_start
173 */
174 paca[nr].cpu_start = 1;
175}
176
177static int smp_cell_cpu_bootable(unsigned int nr)
178{
179 /* Special case - we inhibit secondary thread startup
180 * during boot if the user requests it. Odd-numbered
181 * cpus are assumed to be secondary threads.
182 */
183 if (system_state < SYSTEM_RUNNING &&
184 cpu_has_feature(CPU_FTR_SMT) &&
185 !smt_enabled_at_boot && nr % 2 != 0)
186 return 0;
187
188 return 1;
189}
190static struct smp_ops_t bpa_iic_smp_ops = {
191 .message_pass = smp_iic_message_pass,
192 .probe = smp_iic_probe,
193 .kick_cpu = smp_cell_kick_cpu,
194 .setup_cpu = smp_iic_setup_cpu,
195 .cpu_bootable = smp_cell_cpu_bootable,
196};
197
198/* This is called very early */
199void __init smp_init_cell(void)
200{
201 int i;
202
203 DBG(" -> smp_init_cell()\n");
204
205 smp_ops = &bpa_iic_smp_ops;
206
207 /* Mark threads which are still spinning in hold loops. */
208 if (cpu_has_feature(CPU_FTR_SMT)) {
209 for_each_present_cpu(i) {
210 if (i % 2 == 0)
211 /*
212 * Even-numbered logical cpus correspond to
213 * primary threads.
214 */
215 cpu_set(i, of_spin_map);
216 }
217 } else {
218 of_spin_map = cpu_present_map;
219 }
220
221 cpu_clear(boot_cpuid, of_spin_map);
222
223 /* Non-lpar has additional take/give timebase */
224 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
225 smp_ops->give_timebase = cell_give_timebase;
226 smp_ops->take_timebase = cell_take_timebase;
227 }
228
229 DBG(" <- smp_init_cell()\n");
230}
diff --git a/arch/ppc64/kernel/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index d5c9a02fb119..e74132188bdf 100644
--- a/arch/ppc64/kernel/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -27,7 +27,7 @@
27#include <asm/prom.h> 27#include <asm/prom.h>
28#include <asm/io.h> 28#include <asm/io.h>
29 29
30#include "bpa_iic.h" 30#include "interrupt.h"
31 31
32/* register layout taken from Spider spec, table 7.4-4 */ 32/* register layout taken from Spider spec, table 7.4-4 */
33enum { 33enum {
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index 4ac7125aa09c..65266b46db9b 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -34,7 +34,8 @@ static unsigned char chrp_nvram_read(int addr)
34 return 0xff; 34 return 0xff;
35 } 35 }
36 spin_lock_irqsave(&nvram_lock, flags); 36 spin_lock_irqsave(&nvram_lock, flags);
37 if ((call_rtas("nvram-fetch", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) 37 if ((rtas_call(rtas_token("nvram-fetch"), 3, 2, &done, addr,
38 __pa(nvram_buf), 1) != 0) || 1 != done)
38 ret = 0xff; 39 ret = 0xff;
39 else 40 else
40 ret = nvram_buf[0]; 41 ret = nvram_buf[0];
@@ -54,7 +55,8 @@ static void chrp_nvram_write(int addr, unsigned char val)
54 } 55 }
55 spin_lock_irqsave(&nvram_lock, flags); 56 spin_lock_irqsave(&nvram_lock, flags);
56 nvram_buf[0] = val; 57 nvram_buf[0] = val;
57 if ((call_rtas("nvram-store", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) 58 if ((rtas_call(rtas_token("nvram-store"), 3, 2, &done, addr,
59 __pa(nvram_buf), 1) != 0) || 1 != done)
58 printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr); 60 printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr);
59 spin_unlock_irqrestore(&nvram_lock, flags); 61 spin_unlock_irqrestore(&nvram_lock, flags);
60} 62}
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index d955e950a74c..8e5ef62715bb 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -27,6 +27,7 @@
27#include <linux/kdev_t.h> 27#include <linux/kdev_t.h>
28#include <linux/major.h> 28#include <linux/major.h>
29#include <linux/root_dev.h> 29#include <linux/root_dev.h>
30#include <linux/kernel.h>
30 31
31#include <asm/processor.h> 32#include <asm/processor.h>
32#include <asm/machdep.h> 33#include <asm/machdep.h>
@@ -94,6 +95,8 @@ extern unsigned long iSeries_recal_titan;
94 95
95static int mf_initialized; 96static int mf_initialized;
96 97
98static unsigned long cmd_mem_limit;
99
97struct MemoryBlock { 100struct MemoryBlock {
98 unsigned long absStart; 101 unsigned long absStart;
99 unsigned long absEnd; 102 unsigned long absEnd;
@@ -341,23 +344,6 @@ static void __init iSeries_init_early(void)
341 */ 344 */
342 iommu_init_early_iSeries(); 345 iommu_init_early_iSeries();
343 346
344 iSeries_get_cmdline();
345
346 /* Save unparsed command line copy for /proc/cmdline */
347 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
348
349 /* Parse early parameters, in particular mem=x */
350 parse_early_param();
351
352 if (memory_limit) {
353 if (memory_limit < systemcfg->physicalMemorySize)
354 systemcfg->physicalMemorySize = memory_limit;
355 else {
356 printk("Ignoring mem=%lu >= ram_top.\n", memory_limit);
357 memory_limit = 0;
358 }
359 }
360
361 /* Initialize machine-dependency vectors */ 347 /* Initialize machine-dependency vectors */
362#ifdef CONFIG_SMP 348#ifdef CONFIG_SMP
363 smp_init_iSeries(); 349 smp_init_iSeries();
@@ -971,6 +957,8 @@ void build_flat_dt(struct iseries_flat_dt *dt)
971 /* /chosen */ 957 /* /chosen */
972 dt_start_node(dt, "chosen"); 958 dt_start_node(dt, "chosen");
973 dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR); 959 dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
960 if (cmd_mem_limit)
961 dt_prop_u64(dt, "linux,memory-limit", cmd_mem_limit);
974 dt_end_node(dt); 962 dt_end_node(dt);
975 963
976 dt_cpus(dt); 964 dt_cpus(dt);
@@ -990,7 +978,27 @@ void * __init iSeries_early_setup(void)
990 */ 978 */
991 build_iSeries_Memory_Map(); 979 build_iSeries_Memory_Map();
992 980
981 iSeries_get_cmdline();
982
983 /* Save unparsed command line copy for /proc/cmdline */
984 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
985
986 /* Parse early parameters, in particular mem=x */
987 parse_early_param();
988
993 build_flat_dt(&iseries_dt); 989 build_flat_dt(&iseries_dt);
994 990
995 return (void *) __pa(&iseries_dt); 991 return (void *) __pa(&iseries_dt);
996} 992}
993
994/*
995 * On iSeries we just parse the mem=X option from the command line.
996 * On pSeries it's a bit more complicated, see prom_init_mem()
997 */
998static int __init early_parsemem(char *p)
999{
1000 if (p)
1001 cmd_mem_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
1002 return 0;
1003}
1004early_param("mem", early_parsemem);
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 0037a8c8c81f..83a49e80ac29 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -576,7 +576,7 @@ void __init pmac_pic_init(void)
576#endif /* CONFIG_PPC32 */ 576#endif /* CONFIG_PPC32 */
577} 577}
578 578
579#ifdef CONFIG_PM 579#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
580/* 580/*
581 * These procedures are used in implementing sleep on the powerbooks. 581 * These procedures are used in implementing sleep on the powerbooks.
582 * sleep_save_intrs() saves the states of all interrupt enables 582 * sleep_save_intrs() saves the states of all interrupt enables
@@ -643,7 +643,7 @@ static int pmacpic_resume(struct sys_device *sysdev)
643 return 0; 643 return 0;
644} 644}
645 645
646#endif /* CONFIG_PM */ 646#endif /* CONFIG_PM && CONFIG_PPC32 */
647 647
648static struct sysdev_class pmacpic_sysclass = { 648static struct sysdev_class pmacpic_sysclass = {
649 set_kset_name("pmac_pic"), 649 set_kset_name("pmac_pic"),
@@ -655,10 +655,10 @@ static struct sys_device device_pmacpic = {
655}; 655};
656 656
657static struct sysdev_driver driver_pmacpic = { 657static struct sysdev_driver driver_pmacpic = {
658#ifdef CONFIG_PM 658#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
659 .suspend = &pmacpic_suspend, 659 .suspend = &pmacpic_suspend,
660 .resume = &pmacpic_resume, 660 .resume = &pmacpic_resume,
661#endif /* CONFIG_PM */ 661#endif /* CONFIG_PM && CONFIG_PPC32 */
662}; 662};
663 663
664static int __init init_pmacpic_sysfs(void) 664static int __init init_pmacpic_sysfs(void)
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 5ef494e3a70f..91909a844736 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -1,5 +1,5 @@
1obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \ 1obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
2 setup.o iommu.o rtas-fw.o ras.o 2 setup.o iommu.o ras.o
3obj-$(CONFIG_SMP) += smp.o 3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_IBMVIO) += vio.o 4obj-$(CONFIG_IBMVIO) += vio.o
5obj-$(CONFIG_XICS) += xics.o 5obj-$(CONFIG_XICS) += xics.o
diff --git a/arch/powerpc/platforms/pseries/rtas-fw.h b/arch/powerpc/platforms/pseries/rtas-fw.h
deleted file mode 100644
index e70fa69974a3..000000000000
--- a/arch/powerpc/platforms/pseries/rtas-fw.h
+++ /dev/null
@@ -1,3 +0,0 @@
1void rtas_fw_restart(char *cmd);
2void rtas_fw_power_off(void);
3void rtas_fw_halt(void);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 10cb0f2d9b5b..c0a3d918148a 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -67,8 +67,6 @@
67#include <asm/i8259.h> 67#include <asm/i8259.h>
68#include <asm/udbg.h> 68#include <asm/udbg.h>
69 69
70#include "rtas-fw.h"
71
72#ifdef DEBUG 70#ifdef DEBUG
73#define DBG(fmt...) udbg_printf(fmt) 71#define DBG(fmt...) udbg_printf(fmt)
74#else 72#else
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 8acd21dee05d..6b7efcfc352a 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_PPC_MPC106) += grackle.o
5obj-$(CONFIG_BOOKE) += dcr.o 5obj-$(CONFIG_BOOKE) += dcr.o
6obj-$(CONFIG_40x) += dcr.o 6obj-$(CONFIG_40x) += dcr.o
7obj-$(CONFIG_U3_DART) += u3_iommu.o 7obj-$(CONFIG_U3_DART) += u3_iommu.o
8obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
diff --git a/arch/ppc64/kernel/bpa_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 06a119cfceb5..74e0d31a3559 100644
--- a/arch/ppc64/kernel/bpa_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * NVRAM for CPBW 2 * memory mapped NVRAM
3 * 3 *
4 * (C) Copyright IBM Corp. 2005 4 * (C) Copyright IBM Corp. 2005
5 * 5 *
@@ -30,54 +30,54 @@
30#include <asm/nvram.h> 30#include <asm/nvram.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32 32
33static void __iomem *bpa_nvram_start; 33static void __iomem *mmio_nvram_start;
34static long bpa_nvram_len; 34static long mmio_nvram_len;
35static spinlock_t bpa_nvram_lock = SPIN_LOCK_UNLOCKED; 35static spinlock_t mmio_nvram_lock = SPIN_LOCK_UNLOCKED;
36 36
37static ssize_t bpa_nvram_read(char *buf, size_t count, loff_t *index) 37static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index)
38{ 38{
39 unsigned long flags; 39 unsigned long flags;
40 40
41 if (*index >= bpa_nvram_len) 41 if (*index >= mmio_nvram_len)
42 return 0; 42 return 0;
43 if (*index + count > bpa_nvram_len) 43 if (*index + count > mmio_nvram_len)
44 count = bpa_nvram_len - *index; 44 count = mmio_nvram_len - *index;
45 45
46 spin_lock_irqsave(&bpa_nvram_lock, flags); 46 spin_lock_irqsave(&mmio_nvram_lock, flags);
47 47
48 memcpy_fromio(buf, bpa_nvram_start + *index, count); 48 memcpy_fromio(buf, mmio_nvram_start + *index, count);
49 49
50 spin_unlock_irqrestore(&bpa_nvram_lock, flags); 50 spin_unlock_irqrestore(&mmio_nvram_lock, flags);
51 51
52 *index += count; 52 *index += count;
53 return count; 53 return count;
54} 54}
55 55
56static ssize_t bpa_nvram_write(char *buf, size_t count, loff_t *index) 56static ssize_t mmio_nvram_write(char *buf, size_t count, loff_t *index)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 59
60 if (*index >= bpa_nvram_len) 60 if (*index >= mmio_nvram_len)
61 return 0; 61 return 0;
62 if (*index + count > bpa_nvram_len) 62 if (*index + count > mmio_nvram_len)
63 count = bpa_nvram_len - *index; 63 count = mmio_nvram_len - *index;
64 64
65 spin_lock_irqsave(&bpa_nvram_lock, flags); 65 spin_lock_irqsave(&mmio_nvram_lock, flags);
66 66
67 memcpy_toio(bpa_nvram_start + *index, buf, count); 67 memcpy_toio(mmio_nvram_start + *index, buf, count);
68 68
69 spin_unlock_irqrestore(&bpa_nvram_lock, flags); 69 spin_unlock_irqrestore(&mmio_nvram_lock, flags);
70 70
71 *index += count; 71 *index += count;
72 return count; 72 return count;
73} 73}
74 74
75static ssize_t bpa_nvram_get_size(void) 75static ssize_t mmio_nvram_get_size(void)
76{ 76{
77 return bpa_nvram_len; 77 return mmio_nvram_len;
78} 78}
79 79
80int __init bpa_nvram_init(void) 80int __init mmio_nvram_init(void)
81{ 81{
82 struct device_node *nvram_node; 82 struct device_node *nvram_node;
83 unsigned long *buffer; 83 unsigned long *buffer;
@@ -97,20 +97,20 @@ int __init bpa_nvram_init(void)
97 97
98 ret = -ENODEV; 98 ret = -ENODEV;
99 nvram_addr = buffer[0]; 99 nvram_addr = buffer[0];
100 bpa_nvram_len = buffer[1]; 100 mmio_nvram_len = buffer[1];
101 if ( (!bpa_nvram_len) || (!nvram_addr) ) 101 if ( (!mmio_nvram_len) || (!nvram_addr) )
102 goto out; 102 goto out;
103 103
104 bpa_nvram_start = ioremap(nvram_addr, bpa_nvram_len); 104 mmio_nvram_start = ioremap(nvram_addr, mmio_nvram_len);
105 if (!bpa_nvram_start) 105 if (!mmio_nvram_start)
106 goto out; 106 goto out;
107 107
108 printk(KERN_INFO "BPA NVRAM, %luk mapped to %p\n", 108 printk(KERN_INFO "mmio NVRAM, %luk mapped to %p\n",
109 bpa_nvram_len >> 10, bpa_nvram_start); 109 mmio_nvram_len >> 10, mmio_nvram_start);
110 110
111 ppc_md.nvram_read = bpa_nvram_read; 111 ppc_md.nvram_read = mmio_nvram_read;
112 ppc_md.nvram_write = bpa_nvram_write; 112 ppc_md.nvram_write = mmio_nvram_write;
113 ppc_md.nvram_size = bpa_nvram_get_size; 113 ppc_md.nvram_size = mmio_nvram_get_size;
114 114
115out: 115out:
116 of_node_put(nvram_node); 116 of_node_put(nvram_node);
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 94d5716fa7c3..e719a4933af1 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -66,7 +66,8 @@ head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
66core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \ 66core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \
67 arch/ppc/platforms/ \ 67 arch/ppc/platforms/ \
68 arch/ppc/mm/ arch/ppc/lib/ \ 68 arch/ppc/mm/ arch/ppc/lib/ \
69 arch/ppc/syslib/ arch/powerpc/sysdev/ 69 arch/ppc/syslib/ arch/powerpc/sysdev/ \
70 arch/powerpc/lib/
70core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/ 71core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/
71core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/ 72core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/
72core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/ 73core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/
diff --git a/arch/ppc/kernel/bitops.c b/arch/ppc/kernel/bitops.c
deleted file mode 100644
index 7f53d193968b..000000000000
--- a/arch/ppc/kernel/bitops.c
+++ /dev/null
@@ -1,126 +0,0 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 */
4
5#include <linux/kernel.h>
6#include <linux/bitops.h>
7
8/*
9 * If the bitops are not inlined in bitops.h, they are defined here.
10 * -- paulus
11 */
12#if !__INLINE_BITOPS
13void set_bit(int nr, volatile void * addr)
14{
15 unsigned long old;
16 unsigned long mask = 1 << (nr & 0x1f);
17 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
18
19 __asm__ __volatile__(SMP_WMB "\n\
201: lwarx %0,0,%3 \n\
21 or %0,%0,%2 \n"
22 PPC405_ERR77(0,%3)
23" stwcx. %0,0,%3 \n\
24 bne 1b"
25 SMP_MB
26 : "=&r" (old), "=m" (*p)
27 : "r" (mask), "r" (p), "m" (*p)
28 : "cc" );
29}
30
31void clear_bit(int nr, volatile void *addr)
32{
33 unsigned long old;
34 unsigned long mask = 1 << (nr & 0x1f);
35 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
36
37 __asm__ __volatile__(SMP_WMB "\n\
381: lwarx %0,0,%3 \n\
39 andc %0,%0,%2 \n"
40 PPC405_ERR77(0,%3)
41" stwcx. %0,0,%3 \n\
42 bne 1b"
43 SMP_MB
44 : "=&r" (old), "=m" (*p)
45 : "r" (mask), "r" (p), "m" (*p)
46 : "cc");
47}
48
49void change_bit(int nr, volatile void *addr)
50{
51 unsigned long old;
52 unsigned long mask = 1 << (nr & 0x1f);
53 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
54
55 __asm__ __volatile__(SMP_WMB "\n\
561: lwarx %0,0,%3 \n\
57 xor %0,%0,%2 \n"
58 PPC405_ERR77(0,%3)
59" stwcx. %0,0,%3 \n\
60 bne 1b"
61 SMP_MB
62 : "=&r" (old), "=m" (*p)
63 : "r" (mask), "r" (p), "m" (*p)
64 : "cc");
65}
66
67int test_and_set_bit(int nr, volatile void *addr)
68{
69 unsigned int old, t;
70 unsigned int mask = 1 << (nr & 0x1f);
71 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
72
73 __asm__ __volatile__(SMP_WMB "\n\
741: lwarx %0,0,%4 \n\
75 or %1,%0,%3 \n"
76 PPC405_ERR77(0,%4)
77" stwcx. %1,0,%4 \n\
78 bne 1b"
79 SMP_MB
80 : "=&r" (old), "=&r" (t), "=m" (*p)
81 : "r" (mask), "r" (p), "m" (*p)
82 : "cc");
83
84 return (old & mask) != 0;
85}
86
87int test_and_clear_bit(int nr, volatile void *addr)
88{
89 unsigned int old, t;
90 unsigned int mask = 1 << (nr & 0x1f);
91 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
92
93 __asm__ __volatile__(SMP_WMB "\n\
941: lwarx %0,0,%4 \n\
95 andc %1,%0,%3 \n"
96 PPC405_ERR77(0,%4)
97" stwcx. %1,0,%4 \n\
98 bne 1b"
99 SMP_MB
100 : "=&r" (old), "=&r" (t), "=m" (*p)
101 : "r" (mask), "r" (p), "m" (*p)
102 : "cc");
103
104 return (old & mask) != 0;
105}
106
107int test_and_change_bit(int nr, volatile void *addr)
108{
109 unsigned int old, t;
110 unsigned int mask = 1 << (nr & 0x1f);
111 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
112
113 __asm__ __volatile__(SMP_WMB "\n\
1141: lwarx %0,0,%4 \n\
115 xor %1,%0,%3 \n"
116 PPC405_ERR77(0,%4)
117" stwcx. %1,0,%4 \n\
118 bne 1b"
119 SMP_MB
120 : "=&r" (old), "=&r" (t), "=m" (*p)
121 : "r" (mask), "r" (p), "m" (*p)
122 : "cc");
123
124 return (old & mask) != 0;
125}
126#endif /* !__INLINE_BITOPS */
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index fdbd6f44adc0..a55a82d145d4 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -86,7 +86,6 @@ head-y := arch/ppc64/kernel/head.o
86head-y += arch/powerpc/kernel/fpu.o 86head-y += arch/powerpc/kernel/fpu.o
87head-y += arch/powerpc/kernel/entry_64.o 87head-y += arch/powerpc/kernel/entry_64.o
88 88
89libs-y += arch/ppc64/lib/
90core-y += arch/ppc64/kernel/ arch/powerpc/kernel/ 89core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
91core-y += arch/powerpc/mm/ 90core-y += arch/powerpc/mm/
92core-y += arch/powerpc/sysdev/ 91core-y += arch/powerpc/sysdev/
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index 327c08ce4291..990df0905c87 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -13,7 +13,7 @@ endif
13 13
14obj-y += irq.o idle.o dma.o \ 14obj-y += irq.o idle.o dma.o \
15 signal.o \ 15 signal.o \
16 align.o bitops.o pacaData.o \ 16 align.o pacaData.o \
17 udbg.o ioctl32.o \ 17 udbg.o ioctl32.o \
18 rtc.o \ 18 rtc.o \
19 cpu_setup_power4.o \ 19 cpu_setup_power4.o \
@@ -31,9 +31,6 @@ endif
31 31
32obj-$(CONFIG_PPC_PSERIES) += rtasd.o udbg_16550.o 32obj-$(CONFIG_PPC_PSERIES) += rtasd.o udbg_16550.o
33 33
34obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
35 bpa_iic.o spider-pic.o
36
37obj-$(CONFIG_KEXEC) += machine_kexec.o 34obj-$(CONFIG_KEXEC) += machine_kexec.o
38obj-$(CONFIG_EEH) += eeh.o 35obj-$(CONFIG_EEH) += eeh.o
39obj-$(CONFIG_PROC_FS) += proc_ppc64.o 36obj-$(CONFIG_PROC_FS) += proc_ppc64.o
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c
index 310931dbd4ae..87474584033f 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/ppc64/kernel/irq.c
@@ -392,7 +392,7 @@ int virt_irq_create_mapping(unsigned int real_irq)
392 if (ppc64_interrupt_controller == IC_OPEN_PIC) 392 if (ppc64_interrupt_controller == IC_OPEN_PIC)
393 return real_irq; /* no mapping for openpic (for now) */ 393 return real_irq; /* no mapping for openpic (for now) */
394 394
395 if (ppc64_interrupt_controller == IC_BPA_IIC) 395 if (ppc64_interrupt_controller == IC_CELL_PIC)
396 return real_irq; /* no mapping for iic either */ 396 return real_irq; /* no mapping for iic either */
397 397
398 /* don't map interrupts < MIN_VIRT_IRQ */ 398 /* don't map interrupts < MIN_VIRT_IRQ */
diff --git a/arch/ppc64/kernel/proc_ppc64.c b/arch/ppc64/kernel/proc_ppc64.c
index a87c66a9652a..24e955ee9487 100644
--- a/arch/ppc64/kernel/proc_ppc64.c
+++ b/arch/ppc64/kernel/proc_ppc64.c
@@ -53,7 +53,7 @@ static int __init proc_ppc64_create(void)
53 if (!root) 53 if (!root)
54 return 1; 54 return 1;
55 55
56 if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_BPA))) 56 if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_CELL)))
57 return 0; 57 return 0;
58 58
59 if (!proc_mkdir("rtas", root)) 59 if (!proc_mkdir("rtas", root))
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index 69924ba4d7d9..a4bbca6dbb8b 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -1939,9 +1939,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
1939 prom_send_capabilities(); 1939 prom_send_capabilities();
1940 1940
1941 /* 1941 /*
1942 * On pSeries and BPA, copy the CPU hold code 1942 * On pSeries and Cell, copy the CPU hold code
1943 */ 1943 */
1944 if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_BPA)) 1944 if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_CELL))
1945 copy_and_flush(0, KERNELBASE - offset, 0x100, 0); 1945 copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
1946 1946
1947 /* 1947 /*
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile
deleted file mode 100644
index 42d5295bf345..000000000000
--- a/arch/ppc64/lib/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for ppc64-specific library files..
3#
4
5lib-y := string.o
diff --git a/arch/ppc64/lib/string.S b/arch/ppc64/lib/string.S
deleted file mode 100644
index e21a0038a4d6..000000000000
--- a/arch/ppc64/lib/string.S
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/errno.h>
13#include <asm/ppc_asm.h>
14
15_GLOBAL(strcpy)
16 addi r5,r3,-1
17 addi r4,r4,-1
181: lbzu r0,1(r4)
19 cmpwi 0,r0,0
20 stbu r0,1(r5)
21 bne 1b
22 blr
23
24_GLOBAL(strncpy)
25 cmpwi 0,r5,0
26 beqlr
27 mtctr r5
28 addi r6,r3,-1
29 addi r4,r4,-1
301: lbzu r0,1(r4)
31 cmpwi 0,r0,0
32 stbu r0,1(r6)
33 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
34 blr
35
36_GLOBAL(strcat)
37 addi r5,r3,-1
38 addi r4,r4,-1
391: lbzu r0,1(r5)
40 cmpwi 0,r0,0
41 bne 1b
42 addi r5,r5,-1
431: lbzu r0,1(r4)
44 cmpwi 0,r0,0
45 stbu r0,1(r5)
46 bne 1b
47 blr
48
49_GLOBAL(strcmp)
50 addi r5,r3,-1
51 addi r4,r4,-1
521: lbzu r3,1(r5)
53 cmpwi 1,r3,0
54 lbzu r0,1(r4)
55 subf. r3,r0,r3
56 beqlr 1
57 beq 1b
58 blr
59
60_GLOBAL(strlen)
61 addi r4,r3,-1
621: lbzu r0,1(r4)
63 cmpwi 0,r0,0
64 bne 1b
65 subf r3,r3,r4
66 blr
67
68_GLOBAL(memcmp)
69 cmpwi 0,r5,0
70 ble- 2f
71 mtctr r5
72 addi r6,r3,-1
73 addi r4,r4,-1
741: lbzu r3,1(r6)
75 lbzu r0,1(r4)
76 subf. r3,r0,r3
77 bdnzt 2,1b
78 blr
792: li r3,0
80 blr
81
82_GLOBAL(memchr)
83 cmpwi 0,r5,0
84 ble- 2f
85 mtctr r5
86 addi r3,r3,-1
871: lbzu r0,1(r3)
88 cmpw 0,r0,r4
89 bdnzf 2,1b
90 beqlr
912: li r3,0
92 blr
93
94_GLOBAL(__clear_user)
95 addi r6,r3,-4
96 li r3,0
97 li r5,0
98 cmplwi 0,r4,4
99 blt 7f
100 /* clear a single word */
10111: stwu r5,4(r6)
102 beqlr
103 /* clear word sized chunks */
104 andi. r0,r6,3
105 add r4,r0,r4
106 subf r6,r0,r6
107 srwi r0,r4,2
108 andi. r4,r4,3
109 mtctr r0
110 bdz 7f
1111: stwu r5,4(r6)
112 bdnz 1b
113 /* clear byte sized chunks */
1147: cmpwi 0,r4,0
115 beqlr
116 mtctr r4
117 addi r6,r6,3
1188: stbu r5,1(r6)
119 bdnz 8b
120 blr
12190: mr r3,r4
122 blr
12391: mfctr r3
124 slwi r3,r3,2
125 add r3,r3,r4
126 blr
12792: mfctr r3
128 blr
129
130 .section __ex_table,"a"
131 .align 3
132 .llong 11b,90b
133 .llong 1b,91b
134 .llong 8b,92b
135 .text
136
137/* r3 = dst, r4 = src, r5 = count */
138_GLOBAL(__strncpy_from_user)
139 addi r6,r3,-1
140 addi r4,r4,-1
141 cmpwi 0,r5,0
142 beq 2f
143 mtctr r5
1441: lbzu r0,1(r4)
145 cmpwi 0,r0,0
146 stbu r0,1(r6)
147 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
148 beq 3f
1492: addi r6,r6,1
1503: subf r3,r3,r6
151 blr
15299: li r3,-EFAULT
153 blr
154
155 .section __ex_table,"a"
156 .align 3
157 .llong 1b,99b
158 .text
159
160/* r3 = str, r4 = len (> 0) */
161_GLOBAL(__strnlen_user)
162 addi r7,r3,-1
163 mtctr r4 /* ctr = len */
1641: lbzu r0,1(r7) /* get next byte */
165 cmpwi 0,r0,0
166 bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
167 addi r7,r7,1
168 subf r3,r3,r7 /* number of bytes we have looked at */
169 beqlr /* return if we found a 0 byte */
170 cmpw 0,r3,r4 /* did we look at all len bytes? */
171 blt 99f /* if not, must have hit top */
172 addi r3,r4,1 /* return len + 1 to indicate no null found */
173 blr
17499: li r3,0 /* bad address, return 0 */
175 blr
176
177 .section __ex_table,"a"
178 .align 3
179 .llong 1b,99b
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 91920a1140fa..9bc6cc6e3845 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -155,10 +155,10 @@ static spinlock_t pmu_lock;
155static u8 pmu_intr_mask; 155static u8 pmu_intr_mask;
156static int pmu_version; 156static int pmu_version;
157static int drop_interrupts; 157static int drop_interrupts;
158#ifdef CONFIG_PM 158#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
159static int option_lid_wakeup = 1; 159static int option_lid_wakeup = 1;
160static int sleep_in_progress; 160static int sleep_in_progress;
161#endif /* CONFIG_PM */ 161#endif /* CONFIG_PM && CONFIG_PPC32 */
162static unsigned long async_req_locks; 162static unsigned long async_req_locks;
163static unsigned int pmu_irq_stats[11]; 163static unsigned int pmu_irq_stats[11];
164 164
@@ -865,7 +865,7 @@ proc_read_options(char *page, char **start, off_t off,
865{ 865{
866 char *p = page; 866 char *p = page;
867 867
868#ifdef CONFIG_PM 868#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
869 if (pmu_kind == PMU_KEYLARGO_BASED && 869 if (pmu_kind == PMU_KEYLARGO_BASED &&
870 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 870 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
871 p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup); 871 p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup);
@@ -906,7 +906,7 @@ proc_write_options(struct file *file, const char __user *buffer,
906 *(val++) = 0; 906 *(val++) = 0;
907 while(*val == ' ') 907 while(*val == ' ')
908 val++; 908 val++;
909#ifdef CONFIG_PM 909#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
910 if (pmu_kind == PMU_KEYLARGO_BASED && 910 if (pmu_kind == PMU_KEYLARGO_BASED &&
911 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 911 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
912 if (!strcmp(label, "lid_wakeup")) 912 if (!strcmp(label, "lid_wakeup"))
@@ -2063,6 +2063,9 @@ pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* n)
2063 n->list.next = NULL; 2063 n->list.next = NULL;
2064 return 0; 2064 return 0;
2065} 2065}
2066#endif /* CONFIG_PM */
2067
2068#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
2066 2069
2067/* Sleep is broadcast last-to-first */ 2070/* Sleep is broadcast last-to-first */
2068static int 2071static int
@@ -2687,7 +2690,7 @@ powerbook_sleep_3400(void)
2687 return 0; 2690 return 0;
2688} 2691}
2689 2692
2690#endif /* CONFIG_PM */ 2693#endif /* CONFIG_PM && CONFIG_PPC32 */
2691 2694
2692/* 2695/*
2693 * Support for /dev/pmu device 2696 * Support for /dev/pmu device
@@ -2871,7 +2874,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2871 int error = -EINVAL; 2874 int error = -EINVAL;
2872 2875
2873 switch (cmd) { 2876 switch (cmd) {
2874#ifdef CONFIG_PM 2877#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
2875 case PMU_IOC_SLEEP: 2878 case PMU_IOC_SLEEP:
2876 if (!capable(CAP_SYS_ADMIN)) 2879 if (!capable(CAP_SYS_ADMIN))
2877 return -EACCES; 2880 return -EACCES;
@@ -2899,7 +2902,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2899 return put_user(0, argp); 2902 return put_user(0, argp);
2900 else 2903 else
2901 return put_user(1, argp); 2904 return put_user(1, argp);
2902#endif /* CONFIG_PM */ 2905#endif /* CONFIG_PM && CONFIG_PPC32 */
2903 2906
2904#ifdef CONFIG_PMAC_BACKLIGHT 2907#ifdef CONFIG_PMAC_BACKLIGHT
2905 /* Backlight should have its own device or go via 2908 /* Backlight should have its own device or go via
@@ -3047,7 +3050,7 @@ pmu_polled_request(struct adb_request *req)
3047 * to do suspend-to-disk. 3050 * to do suspend-to-disk.
3048 */ 3051 */
3049 3052
3050#ifdef CONFIG_PM 3053#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
3051 3054
3052static int pmu_sys_suspended = 0; 3055static int pmu_sys_suspended = 0;
3053 3056
@@ -3082,7 +3085,7 @@ static int pmu_sys_resume(struct sys_device *sysdev)
3082 return 0; 3085 return 0;
3083} 3086}
3084 3087
3085#endif /* CONFIG_PM */ 3088#endif /* CONFIG_PM && CONFIG_PPC32 */
3086 3089
3087static struct sysdev_class pmu_sysclass = { 3090static struct sysdev_class pmu_sysclass = {
3088 set_kset_name("pmu"), 3091 set_kset_name("pmu"),
@@ -3094,10 +3097,10 @@ static struct sys_device device_pmu = {
3094}; 3097};
3095 3098
3096static struct sysdev_driver driver_pmu = { 3099static struct sysdev_driver driver_pmu = {
3097#ifdef CONFIG_PM 3100#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
3098 .suspend = &pmu_sys_suspend, 3101 .suspend = &pmu_sys_suspend,
3099 .resume = &pmu_sys_resume, 3102 .resume = &pmu_sys_resume,
3100#endif /* CONFIG_PM */ 3103#endif /* CONFIG_PM && CONFIG_PPC32 */
3101}; 3104};
3102 3105
3103static int __init init_pmu_sysfs(void) 3106static int __init init_pmu_sysfs(void)
@@ -3135,12 +3138,12 @@ EXPORT_SYMBOL(pmu_i2c_combined_read);
3135EXPORT_SYMBOL(pmu_i2c_stdsub_write); 3138EXPORT_SYMBOL(pmu_i2c_stdsub_write);
3136EXPORT_SYMBOL(pmu_i2c_simple_read); 3139EXPORT_SYMBOL(pmu_i2c_simple_read);
3137EXPORT_SYMBOL(pmu_i2c_simple_write); 3140EXPORT_SYMBOL(pmu_i2c_simple_write);
3138#ifdef CONFIG_PM 3141#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
3139EXPORT_SYMBOL(pmu_register_sleep_notifier); 3142EXPORT_SYMBOL(pmu_register_sleep_notifier);
3140EXPORT_SYMBOL(pmu_unregister_sleep_notifier); 3143EXPORT_SYMBOL(pmu_unregister_sleep_notifier);
3141EXPORT_SYMBOL(pmu_enable_irled); 3144EXPORT_SYMBOL(pmu_enable_irled);
3142EXPORT_SYMBOL(pmu_battery_count); 3145EXPORT_SYMBOL(pmu_battery_count);
3143EXPORT_SYMBOL(pmu_batteries); 3146EXPORT_SYMBOL(pmu_batteries);
3144EXPORT_SYMBOL(pmu_power_flags); 3147EXPORT_SYMBOL(pmu_power_flags);
3145#endif /* CONFIG_PM */ 3148#endif /* CONFIG_PM && CONFIG_PPC32 */
3146 3149
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
new file mode 100644
index 000000000000..dc25c53704d5
--- /dev/null
+++ b/include/asm-powerpc/bitops.h
@@ -0,0 +1,437 @@
1/*
2 * PowerPC atomic bit operations.
3 *
4 * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5 * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6 * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
7 * originally took it from the ppc32 code.
8 *
9 * Within a word, bits are numbered LSB first. Lot's of places make
10 * this assumption by directly testing bits with (val & (1<<nr)).
11 * This can cause confusion for large (> 1 word) bitmaps on a
12 * big-endian system because, unlike little endian, the number of each
13 * bit depends on the word size.
14 *
15 * The bitop functions are defined to work on unsigned longs, so for a
16 * ppc64 system the bits end up numbered:
17 * |63..............0|127............64|191...........128|255...........196|
18 * and on ppc32:
19 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
20 *
21 * There are a few little-endian macros used mostly for filesystem
22 * bitmaps, these work on similar bit arrays layouts, but
23 * byte-oriented:
24 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25 *
26 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27 * number field needs to be reversed compared to the big-endian bit
28 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29 *
30 * This program is free software; you can redistribute it and/or
31 * modify it under the terms of the GNU General Public License
32 * as published by the Free Software Foundation; either version
33 * 2 of the License, or (at your option) any later version.
34 */
35
36#ifndef _ASM_POWERPC_BITOPS_H
37#define _ASM_POWERPC_BITOPS_H
38
39#ifdef __KERNEL__
40
41#include <linux/compiler.h>
42#include <asm/atomic.h>
43#include <asm/synch.h>
44
45/*
46 * clear_bit doesn't imply a memory barrier
47 */
48#define smp_mb__before_clear_bit() smp_mb()
49#define smp_mb__after_clear_bit() smp_mb()
50
51#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
52#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
53#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
54
55#ifdef CONFIG_PPC64
56#define LARXL "ldarx"
57#define STCXL "stdcx."
58#define CNTLZL "cntlzd"
59#else
60#define LARXL "lwarx"
61#define STCXL "stwcx."
62#define CNTLZL "cntlzw"
63#endif
64
65static __inline__ void set_bit(int nr, volatile unsigned long *addr)
66{
67 unsigned long old;
68 unsigned long mask = BITOP_MASK(nr);
69 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
70
71 __asm__ __volatile__(
72"1:" LARXL " %0,0,%3 # set_bit\n"
73 "or %0,%0,%2\n"
74 PPC405_ERR77(0,%3)
75 STCXL " %0,0,%3\n"
76 "bne- 1b"
77 : "=&r"(old), "=m"(*p)
78 : "r"(mask), "r"(p), "m"(*p)
79 : "cc" );
80}
81
82static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
83{
84 unsigned long old;
85 unsigned long mask = BITOP_MASK(nr);
86 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
87
88 __asm__ __volatile__(
89"1:" LARXL " %0,0,%3 # set_bit\n"
90 "andc %0,%0,%2\n"
91 PPC405_ERR77(0,%3)
92 STCXL " %0,0,%3\n"
93 "bne- 1b"
94 : "=&r"(old), "=m"(*p)
95 : "r"(mask), "r"(p), "m"(*p)
96 : "cc" );
97}
98
99static __inline__ void change_bit(int nr, volatile unsigned long *addr)
100{
101 unsigned long old;
102 unsigned long mask = BITOP_MASK(nr);
103 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
104
105 __asm__ __volatile__(
106"1:" LARXL " %0,0,%3 # set_bit\n"
107 "xor %0,%0,%2\n"
108 PPC405_ERR77(0,%3)
109 STCXL " %0,0,%3\n"
110 "bne- 1b"
111 : "=&r"(old), "=m"(*p)
112 : "r"(mask), "r"(p), "m"(*p)
113 : "cc" );
114}
115
116static __inline__ int test_and_set_bit(unsigned long nr,
117 volatile unsigned long *addr)
118{
119 unsigned long old, t;
120 unsigned long mask = BITOP_MASK(nr);
121 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
122
123 __asm__ __volatile__(
124 EIEIO_ON_SMP
125"1:" LARXL " %0,0,%3 # test_and_set_bit\n"
126 "or %1,%0,%2 \n"
127 PPC405_ERR77(0,%3)
128 STCXL " %1,0,%3 \n"
129 "bne- 1b"
130 ISYNC_ON_SMP
131 : "=&r" (old), "=&r" (t)
132 : "r" (mask), "r" (p)
133 : "cc", "memory");
134
135 return (old & mask) != 0;
136}
137
138static __inline__ int test_and_clear_bit(unsigned long nr,
139 volatile unsigned long *addr)
140{
141 unsigned long old, t;
142 unsigned long mask = BITOP_MASK(nr);
143 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
144
145 __asm__ __volatile__(
146 EIEIO_ON_SMP
147"1:" LARXL " %0,0,%3 # test_and_clear_bit\n"
148 "andc %1,%0,%2 \n"
149 PPC405_ERR77(0,%3)
150 STCXL " %1,0,%3 \n"
151 "bne- 1b"
152 ISYNC_ON_SMP
153 : "=&r" (old), "=&r" (t)
154 : "r" (mask), "r" (p)
155 : "cc", "memory");
156
157 return (old & mask) != 0;
158}
159
160static __inline__ int test_and_change_bit(unsigned long nr,
161 volatile unsigned long *addr)
162{
163 unsigned long old, t;
164 unsigned long mask = BITOP_MASK(nr);
165 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
166
167 __asm__ __volatile__(
168 EIEIO_ON_SMP
169"1:" LARXL " %0,0,%3 # test_and_change_bit\n"
170 "xor %1,%0,%2 \n"
171 PPC405_ERR77(0,%3)
172 STCXL " %1,0,%3 \n"
173 "bne- 1b"
174 ISYNC_ON_SMP
175 : "=&r" (old), "=&r" (t)
176 : "r" (mask), "r" (p)
177 : "cc", "memory");
178
179 return (old & mask) != 0;
180}
181
182static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
183{
184 unsigned long old;
185
186 __asm__ __volatile__(
187"1:" LARXL " %0,0,%3 # set_bit\n"
188 "or %0,%0,%2\n"
189 STCXL " %0,0,%3\n"
190 "bne- 1b"
191 : "=&r" (old), "=m" (*addr)
192 : "r" (mask), "r" (addr), "m" (*addr)
193 : "cc");
194}
195
196/* Non-atomic versions */
197static __inline__ int test_bit(unsigned long nr,
198 __const__ volatile unsigned long *addr)
199{
200 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
201}
202
203static __inline__ void __set_bit(unsigned long nr,
204 volatile unsigned long *addr)
205{
206 unsigned long mask = BITOP_MASK(nr);
207 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
208
209 *p |= mask;
210}
211
212static __inline__ void __clear_bit(unsigned long nr,
213 volatile unsigned long *addr)
214{
215 unsigned long mask = BITOP_MASK(nr);
216 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
217
218 *p &= ~mask;
219}
220
221static __inline__ void __change_bit(unsigned long nr,
222 volatile unsigned long *addr)
223{
224 unsigned long mask = BITOP_MASK(nr);
225 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
226
227 *p ^= mask;
228}
229
230static __inline__ int __test_and_set_bit(unsigned long nr,
231 volatile unsigned long *addr)
232{
233 unsigned long mask = BITOP_MASK(nr);
234 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
235 unsigned long old = *p;
236
237 *p = old | mask;
238 return (old & mask) != 0;
239}
240
241static __inline__ int __test_and_clear_bit(unsigned long nr,
242 volatile unsigned long *addr)
243{
244 unsigned long mask = BITOP_MASK(nr);
245 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
246 unsigned long old = *p;
247
248 *p = old & ~mask;
249 return (old & mask) != 0;
250}
251
252static __inline__ int __test_and_change_bit(unsigned long nr,
253 volatile unsigned long *addr)
254{
255 unsigned long mask = BITOP_MASK(nr);
256 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
257 unsigned long old = *p;
258
259 *p = old ^ mask;
260 return (old & mask) != 0;
261}
262
263/*
264 * Return the zero-based bit position (LE, not IBM bit numbering) of
265 * the most significant 1-bit in a double word.
266 */
267static __inline__ int __ilog2(unsigned long x)
268{
269 int lz;
270
271 asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
272 return BITS_PER_LONG - 1 - lz;
273}
274
275/*
276 * Determines the bit position of the least significant 0 bit in the
277 * specified double word. The returned bit position will be
278 * zero-based, starting from the right side (63/31 - 0).
279 */
280static __inline__ unsigned long ffz(unsigned long x)
281{
282 /* no zero exists anywhere in the 8 byte area. */
283 if ((x = ~x) == 0)
284 return BITS_PER_LONG;
285
286 /*
287 * Calculate the bit position of the least signficant '1' bit in x
288 * (since x has been changed this will actually be the least signficant
289 * '0' bit in * the original x). Note: (x & -x) gives us a mask that
290 * is the least significant * (RIGHT-most) 1-bit of the value in x.
291 */
292 return __ilog2(x & -x);
293}
294
295static __inline__ int __ffs(unsigned long x)
296{
297 return __ilog2(x & -x);
298}
299
300/*
301 * ffs: find first bit set. This is defined the same way as
302 * the libc and compiler builtin ffs routines, therefore
303 * differs in spirit from the above ffz (man ffs).
304 */
305static __inline__ int ffs(int x)
306{
307 unsigned long i = (unsigned long)x;
308 return __ilog2(i & -i) + 1;
309}
310
311/*
312 * fls: find last (most-significant) bit set.
313 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
314 */
315static __inline__ int fls(unsigned int x)
316{
317 int lz;
318
319 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
320 return 32 - lz;
321}
322
323/*
324 * hweightN: returns the hamming weight (i.e. the number
325 * of bits set) of a N-bit word
326 */
327#define hweight64(x) generic_hweight64(x)
328#define hweight32(x) generic_hweight32(x)
329#define hweight16(x) generic_hweight16(x)
330#define hweight8(x) generic_hweight8(x)
331
332#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
333unsigned long find_next_zero_bit(const unsigned long *addr,
334 unsigned long size, unsigned long offset);
335/**
336 * find_first_bit - find the first set bit in a memory region
337 * @addr: The address to start the search at
338 * @size: The maximum size to search
339 *
340 * Returns the bit-number of the first set bit, not the number of the byte
341 * containing a bit.
342 */
343#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
344unsigned long find_next_bit(const unsigned long *addr,
345 unsigned long size, unsigned long offset);
346
347/* Little-endian versions */
348
349static __inline__ int test_le_bit(unsigned long nr,
350 __const__ unsigned long *addr)
351{
352 __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
353 return (tmp[nr >> 3] >> (nr & 7)) & 1;
354}
355
356#define __set_le_bit(nr, addr) \
357 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
358#define __clear_le_bit(nr, addr) \
359 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
360
361#define test_and_set_le_bit(nr, addr) \
362 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
363#define test_and_clear_le_bit(nr, addr) \
364 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
365
366#define __test_and_set_le_bit(nr, addr) \
367 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
368#define __test_and_clear_le_bit(nr, addr) \
369 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
370
371#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
372unsigned long find_next_zero_le_bit(const unsigned long *addr,
373 unsigned long size, unsigned long offset);
374
375/* Bitmap functions for the ext2 filesystem */
376
377#define ext2_set_bit(nr,addr) \
378 __test_and_set_le_bit((nr), (unsigned long*)addr)
379#define ext2_clear_bit(nr, addr) \
380 __test_and_clear_le_bit((nr), (unsigned long*)addr)
381
382#define ext2_set_bit_atomic(lock, nr, addr) \
383 test_and_set_le_bit((nr), (unsigned long*)addr)
384#define ext2_clear_bit_atomic(lock, nr, addr) \
385 test_and_clear_le_bit((nr), (unsigned long*)addr)
386
387#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
388
389#define ext2_find_first_zero_bit(addr, size) \
390 find_first_zero_le_bit((unsigned long*)addr, size)
391#define ext2_find_next_zero_bit(addr, size, off) \
392 find_next_zero_le_bit((unsigned long*)addr, size, off)
393
394/* Bitmap functions for the minix filesystem. */
395
396#define minix_test_and_set_bit(nr,addr) \
397 __test_and_set_le_bit(nr, (unsigned long *)addr)
398#define minix_set_bit(nr,addr) \
399 __set_le_bit(nr, (unsigned long *)addr)
400#define minix_test_and_clear_bit(nr,addr) \
401 __test_and_clear_le_bit(nr, (unsigned long *)addr)
402#define minix_test_bit(nr,addr) \
403 test_le_bit(nr, (unsigned long *)addr)
404
405#define minix_find_first_zero_bit(addr,size) \
406 find_first_zero_le_bit((unsigned long *)addr, size)
407
408/*
409 * Every architecture must define this function. It's the fastest
410 * way of searching a 140-bit bitmap where the first 100 bits are
411 * unlikely to be set. It's guaranteed that at least one of the 140
412 * bits is cleared.
413 */
414static inline int sched_find_first_bit(const unsigned long *b)
415{
416#ifdef CONFIG_PPC64
417 if (unlikely(b[0]))
418 return __ffs(b[0]);
419 if (unlikely(b[1]))
420 return __ffs(b[1]) + 64;
421 return __ffs(b[2]) + 128;
422#else
423 if (unlikely(b[0]))
424 return __ffs(b[0]);
425 if (unlikely(b[1]))
426 return __ffs(b[1]) + 32;
427 if (unlikely(b[2]))
428 return __ffs(b[2]) + 64;
429 if (b[3])
430 return __ffs(b[3]) + 96;
431 return __ffs(b[4]) + 128;
432#endif
433}
434
435#endif /* __KERNEL__ */
436
437#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index e4d028e87020..d625ee55f957 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -12,20 +12,16 @@
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13 13
14#ifdef __powerpc64__ 14#ifdef __powerpc64__
15#define BUG_TABLE_ENTRY(label, line, file, func) \ 15#define BUG_TABLE_ENTRY ".llong"
16 ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n" 16#define BUG_TRAP_OP "tdnei"
17#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
18#define DATA_TYPE long long
19#else 17#else
20#define BUG_TABLE_ENTRY(label, line, file, func) \ 18#define BUG_TABLE_ENTRY ".long"
21 ".long " #label ", " #line ", " #file ", " #func "\n" 19#define BUG_TRAP_OP "twnei"
22#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
23#define DATA_TYPE int
24#endif /* __powerpc64__ */ 20#endif /* __powerpc64__ */
25 21
26struct bug_entry { 22struct bug_entry {
27 unsigned long bug_addr; 23 unsigned long bug_addr;
28 int line; 24 long line;
29 const char *file; 25 const char *file;
30 const char *function; 26 const char *function;
31}; 27};
@@ -43,29 +39,29 @@ struct bug_entry *find_bug(unsigned long bugaddr);
43#define BUG() do { \ 39#define BUG() do { \
44 __asm__ __volatile__( \ 40 __asm__ __volatile__( \
45 "1: twi 31,0,0\n" \ 41 "1: twi 31,0,0\n" \
46 ".section __bug_table,\"a\"\n\t" \ 42 ".section __bug_table,\"a\"\n" \
47 BUG_TABLE_ENTRY(1b,%0,%1,%2) \ 43 "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \
48 ".previous" \ 44 ".previous" \
49 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ 45 : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
50} while (0) 46} while (0)
51 47
52#define BUG_ON(x) do { \ 48#define BUG_ON(x) do { \
53 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
54 TRAP_OP(%0,0) \ 50 "1: "BUG_TRAP_OP" %0,0\n" \
55 ".section __bug_table,\"a\"\n\t" \ 51 ".section __bug_table,\"a\"\n" \
56 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 52 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
57 ".previous" \ 53 ".previous" \
58 : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \ 54 : : "r" ((long)(x)), "i" (__LINE__), \
59 "i" (__FILE__), "i" (__FUNCTION__)); \ 55 "i" (__FILE__), "i" (__FUNCTION__)); \
60} while (0) 56} while (0)
61 57
62#define WARN_ON(x) do { \ 58#define WARN_ON(x) do { \
63 __asm__ __volatile__( \ 59 __asm__ __volatile__( \
64 TRAP_OP(%0,0) \ 60 "1: "BUG_TRAP_OP" %0,0\n" \
65 ".section __bug_table,\"a\"\n\t" \ 61 ".section __bug_table,\"a\"\n" \
66 BUG_TABLE_ENTRY(1b,%1,%2,%3) \ 62 "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
67 ".previous" \ 63 ".previous" \
68 : : "r" ((DATA_TYPE)(x)), \ 64 : : "r" ((long)(x)), \
69 "i" (__LINE__ + BUG_WARNING_TRAP), \ 65 "i" (__LINE__ + BUG_WARNING_TRAP), \
70 "i" (__FILE__), "i" (__FUNCTION__)); \ 66 "i" (__FILE__), "i" (__FUNCTION__)); \
71} while (0) 67} while (0)
diff --git a/include/asm-ppc64/futex.h b/include/asm-powerpc/futex.h
index 266b460de44e..37c94e52ab6d 100644
--- a/include/asm-ppc64/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_FUTEX_H 1#ifndef _ASM_POWERPC_FUTEX_H
2#define _ASM_FUTEX_H 2#define _ASM_POWERPC_FUTEX_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -7,28 +7,29 @@
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/synch.h> 8#include <asm/synch.h>
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10#include <asm/ppc_asm.h>
10 11
11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 12#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
12 __asm__ __volatile (SYNC_ON_SMP \ 13 __asm__ __volatile ( \
13"1: lwarx %0,0,%2\n" \ 14 SYNC_ON_SMP \
14 insn \ 15"1: lwarx %0,0,%2\n" \
15"2: stwcx. %1,0,%2\n\ 16 insn \
16 bne- 1b\n\ 17"2: stwcx. %1,0,%2\n" \
17 li %1,0\n\ 18 "bne- 1b\n" \
183: .section .fixup,\"ax\"\n\ 19 "li %1,0\n" \
194: li %1,%3\n\ 20"3: .section .fixup,\"ax\"\n" \
20 b 3b\n\ 21"4: li %1,%3\n" \
21 .previous\n\ 22 "b 3b\n" \
22 .section __ex_table,\"a\"\n\ 23 ".previous\n" \
23 .align 3\n\ 24 ".section __ex_table,\"a\"\n" \
24 .llong 1b,4b,2b,4b\n\ 25 ".align 3\n" \
25 .previous" \ 26 DATAL " 1b,4b,2b,4b\n" \
26 : "=&r" (oldval), "=&r" (ret) \ 27 ".previous" \
27 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ 28 : "=&r" (oldval), "=&r" (ret) \
29 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
28 : "cr0", "memory") 30 : "cr0", "memory")
29 31
30static inline int 32static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
31futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
32{ 33{
33 int op = (encoded_op >> 28) & 7; 34 int op = (encoded_op >> 28) & 7;
34 int cmp = (encoded_op >> 24) & 15; 35 int cmp = (encoded_op >> 24) & 15;
@@ -79,5 +80,5 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
79 return ret; 80 return ret;
80} 81}
81 82
82#endif 83#endif /* __KERNEL__ */
83#endif 84#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h
index 5b94ff489b8b..279a6229584b 100644
--- a/include/asm-powerpc/ioctls.h
+++ b/include/asm-powerpc/ioctls.h
@@ -62,6 +62,9 @@
62# define TIOCM_DSR 0x100 62# define TIOCM_DSR 0x100
63# define TIOCM_CD TIOCM_CAR 63# define TIOCM_CD TIOCM_CAR
64# define TIOCM_RI TIOCM_RNG 64# define TIOCM_RI TIOCM_RNG
65#define TIOCM_OUT1 0x2000
66#define TIOCM_OUT2 0x4000
67#define TIOCM_LOOP 0x8000
65 68
66#define TIOCGSOFTCAR 0x5419 69#define TIOCGSOFTCAR 0x5419
67#define TIOCSSOFTCAR 0x541A 70#define TIOCSSOFTCAR 0x541A
diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h
new file mode 100644
index 000000000000..71382c1ec6e3
--- /dev/null
+++ b/include/asm-powerpc/ipcbuf.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_POWERPC_IPCBUF_H
2#define _ASM_POWERPC_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for the powerpc is identical to
6 * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
7 * kernel. Note extra padding because this structure is passed back
8 * and forth between kernel and user space. Pad space is left for:
9 * - 1 32-bit value to fill up for 8-byte alignment
10 * - 2 miscellaneous 64-bit values
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/types.h>
19
20struct ipc64_perm
21{
22 __kernel_key_t key;
23 __kernel_uid_t uid;
24 __kernel_gid_t gid;
25 __kernel_uid_t cuid;
26 __kernel_gid_t cgid;
27 __kernel_mode_t mode;
28 unsigned int seq;
29 unsigned int __pad1;
30 __u32 __unused[4];
31};
32
33#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index c7c3f912a3c2..b3935ea28fff 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -73,7 +73,7 @@ extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
73#define IC_INVALID 0 73#define IC_INVALID 0
74#define IC_OPEN_PIC 1 74#define IC_OPEN_PIC 1
75#define IC_PPC_XIC 2 75#define IC_PPC_XIC 2
76#define IC_BPA_IIC 3 76#define IC_CELL_PIC 3
77#define IC_ISERIES 4 77#define IC_ISERIES 4
78 78
79extern u64 ppc64_interrupt_controller; 79extern u64 ppc64_interrupt_controller;
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index f99f2af82ca5..c534ca41224b 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -506,6 +506,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
506#else 506#else
507 #define __ASM_CONST(x) x##UL 507 #define __ASM_CONST(x) x##UL
508 #define ASM_CONST(x) __ASM_CONST(x) 508 #define ASM_CONST(x) __ASM_CONST(x)
509
510#ifdef CONFIG_PPC64
511#define DATAL ".llong"
512#else
513#define DATAL ".long"
514#endif
515
509#endif /* __ASSEMBLY__ */ 516#endif /* __ASSEMBLY__ */
510 517
511#endif /* _ASM_POWERPC_PPC_ASM_H */ 518#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index eee954a001fd..1dc4bf7b52b3 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -70,7 +70,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
70#define PLATFORM_LPAR 0x0001 70#define PLATFORM_LPAR 0x0001
71#define PLATFORM_POWERMAC 0x0400 71#define PLATFORM_POWERMAC 0x0400
72#define PLATFORM_MAPLE 0x0500 72#define PLATFORM_MAPLE 0x0500
73#define PLATFORM_BPA 0x1000 73#define PLATFORM_CELL 0x1000
74 74
75/* Compatibility with drivers coming from PPC32 world */ 75/* Compatibility with drivers coming from PPC32 world */
76#define _machine (systemcfg->platform) 76#define _machine (systemcfg->platform)
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index 2c050332471d..d9fd7866927f 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -171,6 +171,9 @@ struct flash_block_list_header { /* just the header of flash_block_list */
171 struct flash_block_list *next; 171 struct flash_block_list *next;
172}; 172};
173extern struct flash_block_list_header rtas_firmware_flash_list; 173extern struct flash_block_list_header rtas_firmware_flash_list;
174void rtas_fw_restart(char *cmd);
175void rtas_fw_power_off(void);
176void rtas_fw_halt(void);
174 177
175extern struct rtas_t rtas; 178extern struct rtas_t rtas;
176 179
diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h
index c5b8e5358f83..7f80a019b6a0 100644
--- a/include/asm-powerpc/termios.h
+++ b/include/asm-powerpc/termios.h
@@ -94,142 +94,9 @@ struct termio {
94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" 94#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
95#endif 95#endif
96 96
97#define FIOCLEX _IO('f', 1)
98#define FIONCLEX _IO('f', 2)
99#define FIOASYNC _IOW('f', 125, int)
100#define FIONBIO _IOW('f', 126, int)
101#define FIONREAD _IOR('f', 127, int)
102#define TIOCINQ FIONREAD
103
104#define TIOCGETP _IOR('t', 8, struct sgttyb)
105#define TIOCSETP _IOW('t', 9, struct sgttyb)
106#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
107
108#define TIOCSETC _IOW('t', 17, struct tchars)
109#define TIOCGETC _IOR('t', 18, struct tchars)
110#define TCGETS _IOR('t', 19, struct termios)
111#define TCSETS _IOW('t', 20, struct termios)
112#define TCSETSW _IOW('t', 21, struct termios)
113#define TCSETSF _IOW('t', 22, struct termios)
114
115#define TCGETA _IOR('t', 23, struct termio)
116#define TCSETA _IOW('t', 24, struct termio)
117#define TCSETAW _IOW('t', 25, struct termio)
118#define TCSETAF _IOW('t', 28, struct termio)
119
120#define TCSBRK _IO('t', 29)
121#define TCXONC _IO('t', 30)
122#define TCFLSH _IO('t', 31)
123
124#define TIOCSWINSZ _IOW('t', 103, struct winsize)
125#define TIOCGWINSZ _IOR('t', 104, struct winsize)
126#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
127#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
128#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
129
130#define TIOCGLTC _IOR('t', 116, struct ltchars)
131#define TIOCSLTC _IOW('t', 117, struct ltchars)
132#define TIOCSPGRP _IOW('t', 118, int)
133#define TIOCGPGRP _IOR('t', 119, int)
134
135#define TIOCEXCL 0x540C
136#define TIOCNXCL 0x540D
137#define TIOCSCTTY 0x540E
138
139#define TIOCSTI 0x5412
140#define TIOCMGET 0x5415
141#define TIOCMBIS 0x5416
142#define TIOCMBIC 0x5417
143#define TIOCMSET 0x5418
144#define TIOCGSOFTCAR 0x5419
145#define TIOCSSOFTCAR 0x541A
146#define TIOCLINUX 0x541C
147#define TIOCCONS 0x541D
148#define TIOCGSERIAL 0x541E
149#define TIOCSSERIAL 0x541F
150#define TIOCPKT 0x5420
151
152#define TIOCNOTTY 0x5422
153#define TIOCSETD 0x5423
154#define TIOCGETD 0x5424
155#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
156
157#define TIOCSERCONFIG 0x5453
158#define TIOCSERGWILD 0x5454
159#define TIOCSERSWILD 0x5455
160#define TIOCGLCKTRMIOS 0x5456
161#define TIOCSLCKTRMIOS 0x5457
162#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
163#define TIOCSERGETLSR 0x5459 /* Get line status register */
164#define TIOCSERGETMULTI 0x545A /* Get multiport config */
165#define TIOCSERSETMULTI 0x545B /* Set multiport config */
166
167#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
168#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
169
170/* Used for packet mode */
171#define TIOCPKT_DATA 0
172#define TIOCPKT_FLUSHREAD 1
173#define TIOCPKT_FLUSHWRITE 2
174#define TIOCPKT_STOP 4
175#define TIOCPKT_START 8
176#define TIOCPKT_NOSTOP 16
177#define TIOCPKT_DOSTOP 32
178
179/* modem lines */
180#define TIOCM_LE 0x001
181#define TIOCM_DTR 0x002
182#define TIOCM_RTS 0x004
183#define TIOCM_ST 0x008
184#define TIOCM_SR 0x010
185#define TIOCM_CTS 0x020
186#define TIOCM_CAR 0x040
187#define TIOCM_RNG 0x080
188#define TIOCM_DSR 0x100
189#define TIOCM_CD TIOCM_CAR
190#define TIOCM_RI TIOCM_RNG
191#define TIOCM_OUT1 0x2000
192#define TIOCM_OUT2 0x4000
193#define TIOCM_LOOP 0x8000
194
195/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
196#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
197
198#ifdef __KERNEL__ 97#ifdef __KERNEL__
199 98
200/* 99#include <asm-generic/termios.h>
201 * Translate a "termio" structure into a "termios". Ugh.
202 */
203#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
204 unsigned short __tmp; \
205 get_user(__tmp,&(termio)->x); \
206 (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \
207}
208
209#define user_termio_to_kernel_termios(termios, termio) \
210({ \
211 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
212 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
213 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
214 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
215 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
216})
217
218/*
219 * Translate a "termios" structure into a "termio". Ugh.
220 */
221#define kernel_termios_to_user_termio(termio, termios) \
222({ \
223 put_user((termios)->c_iflag, &(termio)->c_iflag); \
224 put_user((termios)->c_oflag, &(termio)->c_oflag); \
225 put_user((termios)->c_cflag, &(termio)->c_cflag); \
226 put_user((termios)->c_lflag, &(termio)->c_lflag); \
227 put_user((termios)->c_line, &(termio)->c_line); \
228 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
229})
230
231#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
232#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
233 100
234#endif /* __KERNEL__ */ 101#endif /* __KERNEL__ */
235 102
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
new file mode 100644
index 000000000000..33af730f0d19
--- /dev/null
+++ b/include/asm-powerpc/uaccess.h
@@ -0,0 +1,468 @@
1#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <asm/processor.h>
10
11#define VERIFY_READ 0
12#define VERIFY_WRITE 1
13
14/*
15 * The fs value determines whether argument validity checking should be
16 * performed or not. If get_fs() == USER_DS, checking is performed, with
17 * get_fs() == KERNEL_DS, checking is bypassed.
18 *
19 * For historical reasons, these macros are grossly misnamed.
20 *
21 * The fs/ds values are now the highest legal address in the "segment".
22 * This simplifies the checking in the routines below.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(~0UL)
28#ifdef __powerpc64__
29/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
30#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
31#else
32#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
33#endif
34
35#define get_ds() (KERNEL_DS)
36#define get_fs() (current->thread.fs)
37#define set_fs(val) (current->thread.fs = (val))
38
39#define segment_eq(a, b) ((a).seg == (b).seg)
40
41#ifdef __powerpc64__
42/*
43 * This check is sufficient because there is a large enough
44 * gap between user addresses and the kernel addresses
45 */
46#define __access_ok(addr, size, segment) \
47 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
48
49#else
50
51#define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && \
53 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
54
55#endif
56
57#define access_ok(type, addr, size) \
58 (__chk_user_ptr(addr), \
59 __access_ok((__force unsigned long)(addr), (size), get_fs()))
60
61/*
62 * The exception table consists of pairs of addresses: the first is the
63 * address of an instruction that is allowed to fault, and the second is
64 * the address at which the program should continue. No registers are
65 * modified, so it is entirely up to the continuation code to figure out
66 * what to do.
67 *
68 * All the routines below use bits of fixup code that are out of line
69 * with the main instruction path. This means when everything is well,
70 * we don't even have to jump over them. Further, they do not intrude
71 * on our cache or tlb entries.
72 */
73
74struct exception_table_entry {
75 unsigned long insn;
76 unsigned long fixup;
77};
78
79/*
80 * These are the main single-value transfer routines. They automatically
81 * use the right size if we just have the right pointer type.
82 *
83 * This gets kind of ugly. We want to return _two_ values in "get_user()"
84 * and yet we don't want to do any pointers, because that is too much
85 * of a performance impact. Thus we have a few rather ugly macros here,
86 * and hide all the ugliness from the user.
87 *
88 * The "__xxx" versions of the user access functions are versions that
89 * do not verify the address space, that must have been done previously
90 * with a separate "access_ok()" call (this is used when we do multiple
91 * accesses to the same area of user memory).
92 *
93 * As we use the same address space for kernel and user data on the
94 * PowerPC, we can just do these as direct assignments. (Of course, the
95 * exception handling means that it's no longer "just"...)
96 *
97 * The "user64" versions of the user access functions are versions that
98 * allow access of 64-bit data. The "get_user" functions do not
99 * properly handle 64-bit data because the value gets down cast to a long.
100 * The "put_user" functions already handle 64-bit data properly but we add
101 * "user64" versions for completeness
102 */
103#define get_user(x, ptr) \
104 __get_user_check((x), (ptr), sizeof(*(ptr)))
105#define put_user(x, ptr) \
106 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
107
108#define __get_user(x, ptr) \
109 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
110#define __put_user(x, ptr) \
111 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
112#ifndef __powerpc64__
113#define __get_user64(x, ptr) \
114 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
115#define __put_user64(x, ptr) __put_user(x, ptr)
116#endif
117
118#define __get_user_unaligned __get_user
119#define __put_user_unaligned __put_user
120
121extern long __put_user_bad(void);
122
123#ifdef __powerpc64__
124#define __EX_TABLE_ALIGN "3"
125#define __EX_TABLE_TYPE "llong"
126#else
127#define __EX_TABLE_ALIGN "2"
128#define __EX_TABLE_TYPE "long"
129#endif
130
131/*
132 * We don't tell gcc that we are accessing memory, but this is OK
133 * because we do not write to any memory gcc knows about, so there
134 * are no aliasing issues.
135 */
136#define __put_user_asm(x, addr, err, op) \
137 __asm__ __volatile__( \
138 "1: " op " %1,0(%2) # put_user\n" \
139 "2:\n" \
140 ".section .fixup,\"ax\"\n" \
141 "3: li %0,%3\n" \
142 " b 2b\n" \
143 ".previous\n" \
144 ".section __ex_table,\"a\"\n" \
145 " .align " __EX_TABLE_ALIGN "\n" \
146 " ."__EX_TABLE_TYPE" 1b,3b\n" \
147 ".previous" \
148 : "=r" (err) \
149 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
150
151#ifdef __powerpc64__
152#define __put_user_asm2(x, ptr, retval) \
153 __put_user_asm(x, ptr, retval, "std")
154#else /* __powerpc64__ */
155#define __put_user_asm2(x, addr, err) \
156 __asm__ __volatile__( \
157 "1: stw %1,0(%2)\n" \
158 "2: stw %1+1,4(%2)\n" \
159 "3:\n" \
160 ".section .fixup,\"ax\"\n" \
161 "4: li %0,%3\n" \
162 " b 3b\n" \
163 ".previous\n" \
164 ".section __ex_table,\"a\"\n" \
165 " .align " __EX_TABLE_ALIGN "\n" \
166 " ." __EX_TABLE_TYPE " 1b,4b\n" \
167 " ." __EX_TABLE_TYPE " 2b,4b\n" \
168 ".previous" \
169 : "=r" (err) \
170 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
171#endif /* __powerpc64__ */
172
173#define __put_user_size(x, ptr, size, retval) \
174do { \
175 retval = 0; \
176 switch (size) { \
177 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
178 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
179 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
180 case 8: __put_user_asm2(x, ptr, retval); break; \
181 default: __put_user_bad(); \
182 } \
183} while (0)
184
185#define __put_user_nocheck(x, ptr, size) \
186({ \
187 long __pu_err; \
188 might_sleep(); \
189 __chk_user_ptr(ptr); \
190 __put_user_size((x), (ptr), (size), __pu_err); \
191 __pu_err; \
192})
193
194#define __put_user_check(x, ptr, size) \
195({ \
196 long __pu_err = -EFAULT; \
197 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
198 might_sleep(); \
199 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
200 __put_user_size((x), __pu_addr, (size), __pu_err); \
201 __pu_err; \
202})
203
204extern long __get_user_bad(void);
205
206#define __get_user_asm(x, addr, err, op) \
207 __asm__ __volatile__( \
208 "1: "op" %1,0(%2) # get_user\n" \
209 "2:\n" \
210 ".section .fixup,\"ax\"\n" \
211 "3: li %0,%3\n" \
212 " li %1,0\n" \
213 " b 2b\n" \
214 ".previous\n" \
215 ".section __ex_table,\"a\"\n" \
216 " .align "__EX_TABLE_ALIGN "\n" \
217 " ." __EX_TABLE_TYPE " 1b,3b\n" \
218 ".previous" \
219 : "=r" (err), "=r" (x) \
220 : "b" (addr), "i" (-EFAULT), "0" (err))
221
222#ifdef __powerpc64__
223#define __get_user_asm2(x, addr, err) \
224 __get_user_asm(x, addr, err, "ld")
225#else /* __powerpc64__ */
226#define __get_user_asm2(x, addr, err) \
227 __asm__ __volatile__( \
228 "1: lwz %1,0(%2)\n" \
229 "2: lwz %1+1,4(%2)\n" \
230 "3:\n" \
231 ".section .fixup,\"ax\"\n" \
232 "4: li %0,%3\n" \
233 " li %1,0\n" \
234 " li %1+1,0\n" \
235 " b 3b\n" \
236 ".previous\n" \
237 ".section __ex_table,\"a\"\n" \
238 " .align " __EX_TABLE_ALIGN "\n" \
239 " ." __EX_TABLE_TYPE " 1b,4b\n" \
240 " ." __EX_TABLE_TYPE " 2b,4b\n" \
241 ".previous" \
242 : "=r" (err), "=&r" (x) \
243 : "b" (addr), "i" (-EFAULT), "0" (err))
244#endif /* __powerpc64__ */
245
246#define __get_user_size(x, ptr, size, retval) \
247do { \
248 retval = 0; \
249 __chk_user_ptr(ptr); \
250 if (size > sizeof(x)) \
251 (x) = __get_user_bad(); \
252 switch (size) { \
253 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
254 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
255 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
256 case 8: __get_user_asm2(x, ptr, retval); break; \
257 default: (x) = __get_user_bad(); \
258 } \
259} while (0)
260
261#define __get_user_nocheck(x, ptr, size) \
262({ \
263 long __gu_err; \
264 unsigned long __gu_val; \
265 __chk_user_ptr(ptr); \
266 might_sleep(); \
267 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
268 (x) = (__typeof__(*(ptr)))__gu_val; \
269 __gu_err; \
270})
271
272#ifndef __powerpc64__
273#define __get_user64_nocheck(x, ptr, size) \
274({ \
275 long __gu_err; \
276 long long __gu_val; \
277 __chk_user_ptr(ptr); \
278 might_sleep(); \
279 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
280 (x) = (__typeof__(*(ptr)))__gu_val; \
281 __gu_err; \
282})
283#endif /* __powerpc64__ */
284
285#define __get_user_check(x, ptr, size) \
286({ \
287 long __gu_err = -EFAULT; \
288 unsigned long __gu_val = 0; \
289 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
290 might_sleep(); \
291 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
292 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
293 (x) = (__typeof__(*(ptr)))__gu_val; \
294 __gu_err; \
295})
296
297/* more complex routines */
298
299extern unsigned long __copy_tofrom_user(void __user *to,
300 const void __user *from, unsigned long size);
301
302#ifndef __powerpc64__
303
304extern inline unsigned long copy_from_user(void *to,
305 const void __user *from, unsigned long n)
306{
307 unsigned long over;
308
309 if (access_ok(VERIFY_READ, from, n))
310 return __copy_tofrom_user((__force void __user *)to, from, n);
311 if ((unsigned long)from < TASK_SIZE) {
312 over = (unsigned long)from + n - TASK_SIZE;
313 return __copy_tofrom_user((__force void __user *)to, from,
314 n - over) + over;
315 }
316 return n;
317}
318
319extern inline unsigned long copy_to_user(void __user *to,
320 const void *from, unsigned long n)
321{
322 unsigned long over;
323
324 if (access_ok(VERIFY_WRITE, to, n))
325 return __copy_tofrom_user(to, (__force void __user *)from, n);
326 if ((unsigned long)to < TASK_SIZE) {
327 over = (unsigned long)to + n - TASK_SIZE;
328 return __copy_tofrom_user(to, (__force void __user *)from,
329 n - over) + over;
330 }
331 return n;
332}
333
334#else /* __powerpc64__ */
335
336#define __copy_in_user(to, from, size) \
337 __copy_tofrom_user((to), (from), (size))
338
339extern unsigned long copy_from_user(void *to, const void __user *from,
340 unsigned long n);
341extern unsigned long copy_to_user(void __user *to, const void *from,
342 unsigned long n);
343extern unsigned long copy_in_user(void __user *to, const void __user *from,
344 unsigned long n);
345
346#endif /* __powerpc64__ */
347
348static inline unsigned long __copy_from_user_inatomic(void *to,
349 const void __user *from, unsigned long n)
350{
351 if (__builtin_constant_p(n) && (n <= 8)) {
352 unsigned long ret;
353
354 switch (n) {
355 case 1:
356 __get_user_size(*(u8 *)to, from, 1, ret);
357 break;
358 case 2:
359 __get_user_size(*(u16 *)to, from, 2, ret);
360 break;
361 case 4:
362 __get_user_size(*(u32 *)to, from, 4, ret);
363 break;
364 case 8:
365 __get_user_size(*(u64 *)to, from, 8, ret);
366 break;
367 }
368 if (ret == 0)
369 return 0;
370 }
371 return __copy_tofrom_user((__force void __user *)to, from, n);
372}
373
374static inline unsigned long __copy_to_user_inatomic(void __user *to,
375 const void *from, unsigned long n)
376{
377 if (__builtin_constant_p(n) && (n <= 8)) {
378 unsigned long ret;
379
380 switch (n) {
381 case 1:
382 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
383 break;
384 case 2:
385 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
386 break;
387 case 4:
388 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
389 break;
390 case 8:
391 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
392 break;
393 }
394 if (ret == 0)
395 return 0;
396 }
397 return __copy_tofrom_user(to, (__force const void __user *)from, n);
398}
399
400static inline unsigned long __copy_from_user(void *to,
401 const void __user *from, unsigned long size)
402{
403 might_sleep();
404 return __copy_from_user_inatomic(to, from, size);
405}
406
407static inline unsigned long __copy_to_user(void __user *to,
408 const void *from, unsigned long size)
409{
410 might_sleep();
411 return __copy_to_user_inatomic(to, from, size);
412}
413
414extern unsigned long __clear_user(void __user *addr, unsigned long size);
415
416static inline unsigned long clear_user(void __user *addr, unsigned long size)
417{
418 might_sleep();
419 if (likely(access_ok(VERIFY_WRITE, addr, size)))
420 return __clear_user(addr, size);
421 if ((unsigned long)addr < TASK_SIZE) {
422 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
423 return __clear_user(addr, size - over) + over;
424 }
425 return size;
426}
427
428extern int __strncpy_from_user(char *dst, const char __user *src, long count);
429
430static inline long strncpy_from_user(char *dst, const char __user *src,
431 long count)
432{
433 might_sleep();
434 if (likely(access_ok(VERIFY_READ, src, 1)))
435 return __strncpy_from_user(dst, src, count);
436 return -EFAULT;
437}
438
439/*
440 * Return the size of a string (including the ending 0)
441 *
442 * Return 0 for error
443 */
444extern int __strnlen_user(const char __user *str, long len, unsigned long top);
445
446/*
447 * Returns the length of the string at str (including the null byte),
448 * or 0 if we hit a page we can't access,
449 * or something > len if we didn't find a null byte.
450 *
451 * The `top' parameter to __strnlen_user is to make sure that
452 * we can never overflow from the user area into kernel space.
453 */
454static inline int strnlen_user(const char __user *str, long len)
455{
456 unsigned long top = current->thread.fs.seg;
457
458 if ((unsigned long)str > top)
459 return 0;
460 return __strnlen_user(str, len, top);
461}
462
463#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
464
465#endif /* __ASSEMBLY__ */
466#endif /* __KERNEL__ */
467
468#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h
deleted file mode 100644
index e30f536fd830..000000000000
--- a/include/asm-ppc/bitops.h
+++ /dev/null
@@ -1,460 +0,0 @@
1/*
2 * bitops.h: Bit string operations on the ppc
3 */
4
5#ifdef __KERNEL__
6#ifndef _PPC_BITOPS_H
7#define _PPC_BITOPS_H
8
9#include <linux/config.h>
10#include <linux/compiler.h>
11#include <asm/byteorder.h>
12#include <asm/atomic.h>
13
14/*
15 * The test_and_*_bit operations are taken to imply a memory barrier
16 * on SMP systems.
17 */
18#ifdef CONFIG_SMP
19#define SMP_WMB "eieio\n"
20#define SMP_MB "\nsync"
21#else
22#define SMP_WMB
23#define SMP_MB
24#endif /* CONFIG_SMP */
25
26static __inline__ void set_bit(int nr, volatile unsigned long * addr)
27{
28 unsigned long old;
29 unsigned long mask = 1 << (nr & 0x1f);
30 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
31
32 __asm__ __volatile__("\n\
331: lwarx %0,0,%3 \n\
34 or %0,%0,%2 \n"
35 PPC405_ERR77(0,%3)
36" stwcx. %0,0,%3 \n\
37 bne- 1b"
38 : "=&r" (old), "=m" (*p)
39 : "r" (mask), "r" (p), "m" (*p)
40 : "cc" );
41}
42
43/*
44 * non-atomic version
45 */
46static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
47{
48 unsigned long mask = 1 << (nr & 0x1f);
49 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
50
51 *p |= mask;
52}
53
54/*
55 * clear_bit doesn't imply a memory barrier
56 */
57#define smp_mb__before_clear_bit() smp_mb()
58#define smp_mb__after_clear_bit() smp_mb()
59
60static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
61{
62 unsigned long old;
63 unsigned long mask = 1 << (nr & 0x1f);
64 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
65
66 __asm__ __volatile__("\n\
671: lwarx %0,0,%3 \n\
68 andc %0,%0,%2 \n"
69 PPC405_ERR77(0,%3)
70" stwcx. %0,0,%3 \n\
71 bne- 1b"
72 : "=&r" (old), "=m" (*p)
73 : "r" (mask), "r" (p), "m" (*p)
74 : "cc");
75}
76
77/*
78 * non-atomic version
79 */
80static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
81{
82 unsigned long mask = 1 << (nr & 0x1f);
83 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
84
85 *p &= ~mask;
86}
87
88static __inline__ void change_bit(int nr, volatile unsigned long *addr)
89{
90 unsigned long old;
91 unsigned long mask = 1 << (nr & 0x1f);
92 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
93
94 __asm__ __volatile__("\n\
951: lwarx %0,0,%3 \n\
96 xor %0,%0,%2 \n"
97 PPC405_ERR77(0,%3)
98" stwcx. %0,0,%3 \n\
99 bne- 1b"
100 : "=&r" (old), "=m" (*p)
101 : "r" (mask), "r" (p), "m" (*p)
102 : "cc");
103}
104
105/*
106 * non-atomic version
107 */
108static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
109{
110 unsigned long mask = 1 << (nr & 0x1f);
111 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
112
113 *p ^= mask;
114}
115
116/*
117 * test_and_*_bit do imply a memory barrier (?)
118 */
119static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr)
120{
121 unsigned int old, t;
122 unsigned int mask = 1 << (nr & 0x1f);
123 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
124
125 __asm__ __volatile__(SMP_WMB "\n\
1261: lwarx %0,0,%4 \n\
127 or %1,%0,%3 \n"
128 PPC405_ERR77(0,%4)
129" stwcx. %1,0,%4 \n\
130 bne 1b"
131 SMP_MB
132 : "=&r" (old), "=&r" (t), "=m" (*p)
133 : "r" (mask), "r" (p), "m" (*p)
134 : "cc", "memory");
135
136 return (old & mask) != 0;
137}
138
139/*
140 * non-atomic version
141 */
142static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
143{
144 unsigned long mask = 1 << (nr & 0x1f);
145 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
146 unsigned long old = *p;
147
148 *p = old | mask;
149 return (old & mask) != 0;
150}
151
152static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
153{
154 unsigned int old, t;
155 unsigned int mask = 1 << (nr & 0x1f);
156 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
157
158 __asm__ __volatile__(SMP_WMB "\n\
1591: lwarx %0,0,%4 \n\
160 andc %1,%0,%3 \n"
161 PPC405_ERR77(0,%4)
162" stwcx. %1,0,%4 \n\
163 bne 1b"
164 SMP_MB
165 : "=&r" (old), "=&r" (t), "=m" (*p)
166 : "r" (mask), "r" (p), "m" (*p)
167 : "cc", "memory");
168
169 return (old & mask) != 0;
170}
171
172/*
173 * non-atomic version
174 */
175static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
176{
177 unsigned long mask = 1 << (nr & 0x1f);
178 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
179 unsigned long old = *p;
180
181 *p = old & ~mask;
182 return (old & mask) != 0;
183}
184
185static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
186{
187 unsigned int old, t;
188 unsigned int mask = 1 << (nr & 0x1f);
189 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
190
191 __asm__ __volatile__(SMP_WMB "\n\
1921: lwarx %0,0,%4 \n\
193 xor %1,%0,%3 \n"
194 PPC405_ERR77(0,%4)
195" stwcx. %1,0,%4 \n\
196 bne 1b"
197 SMP_MB
198 : "=&r" (old), "=&r" (t), "=m" (*p)
199 : "r" (mask), "r" (p), "m" (*p)
200 : "cc", "memory");
201
202 return (old & mask) != 0;
203}
204
205/*
206 * non-atomic version
207 */
208static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
209{
210 unsigned long mask = 1 << (nr & 0x1f);
211 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
212 unsigned long old = *p;
213
214 *p = old ^ mask;
215 return (old & mask) != 0;
216}
217
218static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
219{
220 return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
221}
222
223/* Return the bit position of the most significant 1 bit in a word */
224static __inline__ int __ilog2(unsigned long x)
225{
226 int lz;
227
228 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
229 return 31 - lz;
230}
231
232static __inline__ int ffz(unsigned long x)
233{
234 if ((x = ~x) == 0)
235 return 32;
236 return __ilog2(x & -x);
237}
238
239static inline int __ffs(unsigned long x)
240{
241 return __ilog2(x & -x);
242}
243
244/*
245 * ffs: find first bit set. This is defined the same way as
246 * the libc and compiler builtin ffs routines, therefore
247 * differs in spirit from the above ffz (man ffs).
248 */
249static __inline__ int ffs(int x)
250{
251 return __ilog2(x & -x) + 1;
252}
253
254/*
255 * fls: find last (most-significant) bit set.
256 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
257 */
258static __inline__ int fls(unsigned int x)
259{
260 int lz;
261
262 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
263 return 32 - lz;
264}
265
266/*
267 * hweightN: returns the hamming weight (i.e. the number
268 * of bits set) of a N-bit word
269 */
270
271#define hweight32(x) generic_hweight32(x)
272#define hweight16(x) generic_hweight16(x)
273#define hweight8(x) generic_hweight8(x)
274
275/*
276 * Find the first bit set in a 140-bit bitmap.
277 * The first 100 bits are unlikely to be set.
278 */
279static inline int sched_find_first_bit(const unsigned long *b)
280{
281 if (unlikely(b[0]))
282 return __ffs(b[0]);
283 if (unlikely(b[1]))
284 return __ffs(b[1]) + 32;
285 if (unlikely(b[2]))
286 return __ffs(b[2]) + 64;
287 if (b[3])
288 return __ffs(b[3]) + 96;
289 return __ffs(b[4]) + 128;
290}
291
292/**
293 * find_next_bit - find the next set bit in a memory region
294 * @addr: The address to base the search on
295 * @offset: The bitnumber to start searching at
296 * @size: The maximum size to search
297 */
298static __inline__ unsigned long find_next_bit(const unsigned long *addr,
299 unsigned long size, unsigned long offset)
300{
301 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
302 unsigned int result = offset & ~31UL;
303 unsigned int tmp;
304
305 if (offset >= size)
306 return size;
307 size -= result;
308 offset &= 31UL;
309 if (offset) {
310 tmp = *p++;
311 tmp &= ~0UL << offset;
312 if (size < 32)
313 goto found_first;
314 if (tmp)
315 goto found_middle;
316 size -= 32;
317 result += 32;
318 }
319 while (size >= 32) {
320 if ((tmp = *p++) != 0)
321 goto found_middle;
322 result += 32;
323 size -= 32;
324 }
325 if (!size)
326 return result;
327 tmp = *p;
328
329found_first:
330 tmp &= ~0UL >> (32 - size);
331 if (tmp == 0UL) /* Are any bits set? */
332 return result + size; /* Nope. */
333found_middle:
334 return result + __ffs(tmp);
335}
336
337/**
338 * find_first_bit - find the first set bit in a memory region
339 * @addr: The address to start the search at
340 * @size: The maximum size to search
341 *
342 * Returns the bit-number of the first set bit, not the number of the byte
343 * containing a bit.
344 */
345#define find_first_bit(addr, size) \
346 find_next_bit((addr), (size), 0)
347
348/*
349 * This implementation of find_{first,next}_zero_bit was stolen from
350 * Linus' asm-alpha/bitops.h.
351 */
352#define find_first_zero_bit(addr, size) \
353 find_next_zero_bit((addr), (size), 0)
354
355static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr,
356 unsigned long size, unsigned long offset)
357{
358 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
359 unsigned int result = offset & ~31UL;
360 unsigned int tmp;
361
362 if (offset >= size)
363 return size;
364 size -= result;
365 offset &= 31UL;
366 if (offset) {
367 tmp = *p++;
368 tmp |= ~0UL >> (32-offset);
369 if (size < 32)
370 goto found_first;
371 if (tmp != ~0U)
372 goto found_middle;
373 size -= 32;
374 result += 32;
375 }
376 while (size >= 32) {
377 if ((tmp = *p++) != ~0U)
378 goto found_middle;
379 result += 32;
380 size -= 32;
381 }
382 if (!size)
383 return result;
384 tmp = *p;
385found_first:
386 tmp |= ~0UL << size;
387 if (tmp == ~0UL) /* Are any bits zero? */
388 return result + size; /* Nope. */
389found_middle:
390 return result + ffz(tmp);
391}
392
393
394#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
395#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
396#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
397#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
398
399static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
400{
401 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
402
403 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
404}
405
406/*
407 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
408 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
409 */
410
411#define ext2_find_first_zero_bit(addr, size) \
412 ext2_find_next_zero_bit((addr), (size), 0)
413
414static __inline__ unsigned long ext2_find_next_zero_bit(const void *addr,
415 unsigned long size, unsigned long offset)
416{
417 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
418 unsigned int result = offset & ~31UL;
419 unsigned int tmp;
420
421 if (offset >= size)
422 return size;
423 size -= result;
424 offset &= 31UL;
425 if (offset) {
426 tmp = cpu_to_le32p(p++);
427 tmp |= ~0UL >> (32-offset);
428 if (size < 32)
429 goto found_first;
430 if (tmp != ~0U)
431 goto found_middle;
432 size -= 32;
433 result += 32;
434 }
435 while (size >= 32) {
436 if ((tmp = cpu_to_le32p(p++)) != ~0U)
437 goto found_middle;
438 result += 32;
439 size -= 32;
440 }
441 if (!size)
442 return result;
443 tmp = cpu_to_le32p(p);
444found_first:
445 tmp |= ~0U << size;
446 if (tmp == ~0UL) /* Are any bits zero? */
447 return result + size; /* Nope. */
448found_middle:
449 return result + ffz(tmp);
450}
451
452/* Bitmap functions for the minix filesystem. */
453#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
454#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
455#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
456#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
457#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
458
459#endif /* _PPC_BITOPS_H */
460#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/futex.h b/include/asm-ppc/futex.h
deleted file mode 100644
index 9feff4ce1424..000000000000
--- a/include/asm-ppc/futex.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-ppc/ipcbuf.h b/include/asm-ppc/ipcbuf.h
deleted file mode 100644
index fab6752c7480..000000000000
--- a/include/asm-ppc/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __PPC_IPCBUF_H__
2#define __PPC_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for PPC architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 1 32-bit value to fill up for 8-byte alignment
11 * - 2 miscellaneous 64-bit values (so that this structure matches
12 * PPC64 ipc64_perm)
13 */
14
15struct ipc64_perm
16{
17 __kernel_key_t key;
18 __kernel_uid_t uid;
19 __kernel_gid_t gid;
20 __kernel_uid_t cuid;
21 __kernel_gid_t cgid;
22 __kernel_mode_t mode;
23 unsigned long seq;
24 unsigned int __pad2;
25 unsigned long long __unused1;
26 unsigned long long __unused2;
27};
28
29#endif /* __PPC_IPCBUF_H__ */
diff --git a/include/asm-ppc/uaccess.h b/include/asm-ppc/uaccess.h
deleted file mode 100644
index 63f56224da8c..000000000000
--- a/include/asm-ppc/uaccess.h
+++ /dev/null
@@ -1,393 +0,0 @@
1#ifdef __KERNEL__
2#ifndef _PPC_UACCESS_H
3#define _PPC_UACCESS_H
4
5#ifndef __ASSEMBLY__
6#include <linux/sched.h>
7#include <linux/errno.h>
8#include <asm/processor.h>
9
10#define VERIFY_READ 0
11#define VERIFY_WRITE 1
12
13/*
14 * The fs value determines whether argument validity checking should be
15 * performed or not. If get_fs() == USER_DS, checking is performed, with
16 * get_fs() == KERNEL_DS, checking is bypassed.
17 *
18 * For historical reasons, these macros are grossly misnamed.
19 *
20 * The fs/ds values are now the highest legal address in the "segment".
21 * This simplifies the checking in the routines below.
22 */
23
24#define KERNEL_DS ((mm_segment_t) { ~0UL })
25#define USER_DS ((mm_segment_t) { TASK_SIZE - 1 })
26
27#define get_ds() (KERNEL_DS)
28#define get_fs() (current->thread.fs)
29#define set_fs(val) (current->thread.fs = (val))
30
31#define segment_eq(a,b) ((a).seg == (b).seg)
32
33#define __access_ok(addr,size) \
34 ((addr) <= current->thread.fs.seg \
35 && ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
36
37#define access_ok(type, addr, size) \
38 (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
39
40/*
41 * The exception table consists of pairs of addresses: the first is the
42 * address of an instruction that is allowed to fault, and the second is
43 * the address at which the program should continue. No registers are
44 * modified, so it is entirely up to the continuation code to figure out
45 * what to do.
46 *
47 * All the routines below use bits of fixup code that are out of line
48 * with the main instruction path. This means when everything is well,
49 * we don't even have to jump over them. Further, they do not intrude
50 * on our cache or tlb entries.
51 */
52
53struct exception_table_entry
54{
55 unsigned long insn, fixup;
56};
57
58/*
59 * These are the main single-value transfer routines. They automatically
60 * use the right size if we just have the right pointer type.
61 *
62 * This gets kind of ugly. We want to return _two_ values in "get_user()"
63 * and yet we don't want to do any pointers, because that is too much
64 * of a performance impact. Thus we have a few rather ugly macros here,
65 * and hide all the ugliness from the user.
66 *
67 * The "__xxx" versions of the user access functions are versions that
68 * do not verify the address space, that must have been done previously
69 * with a separate "access_ok()" call (this is used when we do multiple
70 * accesses to the same area of user memory).
71 *
72 * As we use the same address space for kernel and user data on the
73 * PowerPC, we can just do these as direct assignments. (Of course, the
74 * exception handling means that it's no longer "just"...)
75 *
76 * The "user64" versions of the user access functions are versions that
77 * allow access of 64-bit data. The "get_user" functions do not
78 * properly handle 64-bit data because the value gets down cast to a long.
79 * The "put_user" functions already handle 64-bit data properly but we add
80 * "user64" versions for completeness
81 */
82#define get_user(x,ptr) \
83 __get_user_check((x),(ptr),sizeof(*(ptr)))
84#define get_user64(x,ptr) \
85 __get_user64_check((x),(ptr),sizeof(*(ptr)))
86#define put_user(x,ptr) \
87 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88#define put_user64(x,ptr) put_user(x,ptr)
89
90#define __get_user(x,ptr) \
91 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
92#define __get_user64(x,ptr) \
93 __get_user64_nocheck((x),(ptr),sizeof(*(ptr)))
94#define __put_user(x,ptr) \
95 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
96#define __put_user64(x,ptr) __put_user(x,ptr)
97
98extern long __put_user_bad(void);
99
100#define __put_user_nocheck(x,ptr,size) \
101({ \
102 long __pu_err; \
103 __chk_user_ptr(ptr); \
104 __put_user_size((x),(ptr),(size),__pu_err); \
105 __pu_err; \
106})
107
108#define __put_user_check(x,ptr,size) \
109({ \
110 long __pu_err = -EFAULT; \
111 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
112 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
113 __put_user_size((x),__pu_addr,(size),__pu_err); \
114 __pu_err; \
115})
116
117#define __put_user_size(x,ptr,size,retval) \
118do { \
119 retval = 0; \
120 switch (size) { \
121 case 1: \
122 __put_user_asm(x, ptr, retval, "stb"); \
123 break; \
124 case 2: \
125 __put_user_asm(x, ptr, retval, "sth"); \
126 break; \
127 case 4: \
128 __put_user_asm(x, ptr, retval, "stw"); \
129 break; \
130 case 8: \
131 __put_user_asm2(x, ptr, retval); \
132 break; \
133 default: \
134 __put_user_bad(); \
135 } \
136} while (0)
137
138/*
139 * We don't tell gcc that we are accessing memory, but this is OK
140 * because we do not write to any memory gcc knows about, so there
141 * are no aliasing issues.
142 */
143#define __put_user_asm(x, addr, err, op) \
144 __asm__ __volatile__( \
145 "1: "op" %1,0(%2)\n" \
146 "2:\n" \
147 ".section .fixup,\"ax\"\n" \
148 "3: li %0,%3\n" \
149 " b 2b\n" \
150 ".previous\n" \
151 ".section __ex_table,\"a\"\n" \
152 " .align 2\n" \
153 " .long 1b,3b\n" \
154 ".previous" \
155 : "=r" (err) \
156 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
157
158#define __put_user_asm2(x, addr, err) \
159 __asm__ __volatile__( \
160 "1: stw %1,0(%2)\n" \
161 "2: stw %1+1,4(%2)\n" \
162 "3:\n" \
163 ".section .fixup,\"ax\"\n" \
164 "4: li %0,%3\n" \
165 " b 3b\n" \
166 ".previous\n" \
167 ".section __ex_table,\"a\"\n" \
168 " .align 2\n" \
169 " .long 1b,4b\n" \
170 " .long 2b,4b\n" \
171 ".previous" \
172 : "=r" (err) \
173 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
174
175#define __get_user_nocheck(x, ptr, size) \
176({ \
177 long __gu_err; \
178 unsigned long __gu_val; \
179 __chk_user_ptr(ptr); \
180 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
181 (x) = (__typeof__(*(ptr)))__gu_val; \
182 __gu_err; \
183})
184
185#define __get_user64_nocheck(x, ptr, size) \
186({ \
187 long __gu_err; \
188 long long __gu_val; \
189 __chk_user_ptr(ptr); \
190 __get_user_size64(__gu_val, (ptr), (size), __gu_err); \
191 (x) = (__typeof__(*(ptr)))__gu_val; \
192 __gu_err; \
193})
194
195#define __get_user_check(x, ptr, size) \
196({ \
197 long __gu_err = -EFAULT; \
198 unsigned long __gu_val = 0; \
199 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
200 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
201 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
202 (x) = (__typeof__(*(ptr)))__gu_val; \
203 __gu_err; \
204})
205
206#define __get_user64_check(x, ptr, size) \
207({ \
208 long __gu_err = -EFAULT; \
209 long long __gu_val = 0; \
210 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
211 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
212 __get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
213 (x) = (__typeof__(*(ptr)))__gu_val; \
214 __gu_err; \
215})
216
217extern long __get_user_bad(void);
218
219#define __get_user_size(x, ptr, size, retval) \
220do { \
221 retval = 0; \
222 switch (size) { \
223 case 1: \
224 __get_user_asm(x, ptr, retval, "lbz"); \
225 break; \
226 case 2: \
227 __get_user_asm(x, ptr, retval, "lhz"); \
228 break; \
229 case 4: \
230 __get_user_asm(x, ptr, retval, "lwz"); \
231 break; \
232 default: \
233 x = __get_user_bad(); \
234 } \
235} while (0)
236
237#define __get_user_size64(x, ptr, size, retval) \
238do { \
239 retval = 0; \
240 switch (size) { \
241 case 1: \
242 __get_user_asm(x, ptr, retval, "lbz"); \
243 break; \
244 case 2: \
245 __get_user_asm(x, ptr, retval, "lhz"); \
246 break; \
247 case 4: \
248 __get_user_asm(x, ptr, retval, "lwz"); \
249 break; \
250 case 8: \
251 __get_user_asm2(x, ptr, retval); \
252 break; \
253 default: \
254 x = __get_user_bad(); \
255 } \
256} while (0)
257
258#define __get_user_asm(x, addr, err, op) \
259 __asm__ __volatile__( \
260 "1: "op" %1,0(%2)\n" \
261 "2:\n" \
262 ".section .fixup,\"ax\"\n" \
263 "3: li %0,%3\n" \
264 " li %1,0\n" \
265 " b 2b\n" \
266 ".previous\n" \
267 ".section __ex_table,\"a\"\n" \
268 " .align 2\n" \
269 " .long 1b,3b\n" \
270 ".previous" \
271 : "=r"(err), "=r"(x) \
272 : "b"(addr), "i"(-EFAULT), "0"(err))
273
274#define __get_user_asm2(x, addr, err) \
275 __asm__ __volatile__( \
276 "1: lwz %1,0(%2)\n" \
277 "2: lwz %1+1,4(%2)\n" \
278 "3:\n" \
279 ".section .fixup,\"ax\"\n" \
280 "4: li %0,%3\n" \
281 " li %1,0\n" \
282 " li %1+1,0\n" \
283 " b 3b\n" \
284 ".previous\n" \
285 ".section __ex_table,\"a\"\n" \
286 " .align 2\n" \
287 " .long 1b,4b\n" \
288 " .long 2b,4b\n" \
289 ".previous" \
290 : "=r"(err), "=&r"(x) \
291 : "b"(addr), "i"(-EFAULT), "0"(err))
292
293/* more complex routines */
294
295extern int __copy_tofrom_user(void __user *to, const void __user *from,
296 unsigned long size);
297
298extern inline unsigned long
299copy_from_user(void *to, const void __user *from, unsigned long n)
300{
301 unsigned long over;
302
303 if (access_ok(VERIFY_READ, from, n))
304 return __copy_tofrom_user((__force void __user *)to, from, n);
305 if ((unsigned long)from < TASK_SIZE) {
306 over = (unsigned long)from + n - TASK_SIZE;
307 return __copy_tofrom_user((__force void __user *)to, from, n - over) + over;
308 }
309 return n;
310}
311
312extern inline unsigned long
313copy_to_user(void __user *to, const void *from, unsigned long n)
314{
315 unsigned long over;
316
317 if (access_ok(VERIFY_WRITE, to, n))
318 return __copy_tofrom_user(to, (__force void __user *) from, n);
319 if ((unsigned long)to < TASK_SIZE) {
320 over = (unsigned long)to + n - TASK_SIZE;
321 return __copy_tofrom_user(to, (__force void __user *) from, n - over) + over;
322 }
323 return n;
324}
325
326static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size)
327{
328 return __copy_tofrom_user((__force void __user *)to, from, size);
329}
330
331static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size)
332{
333 return __copy_tofrom_user(to, (__force void __user *)from, size);
334}
335
336#define __copy_to_user_inatomic __copy_to_user
337#define __copy_from_user_inatomic __copy_from_user
338
339extern unsigned long __clear_user(void __user *addr, unsigned long size);
340
341extern inline unsigned long
342clear_user(void __user *addr, unsigned long size)
343{
344 if (access_ok(VERIFY_WRITE, addr, size))
345 return __clear_user(addr, size);
346 if ((unsigned long)addr < TASK_SIZE) {
347 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
348 return __clear_user(addr, size - over) + over;
349 }
350 return size;
351}
352
353extern int __strncpy_from_user(char *dst, const char __user *src, long count);
354
355extern inline long
356strncpy_from_user(char *dst, const char __user *src, long count)
357{
358 if (access_ok(VERIFY_READ, src, 1))
359 return __strncpy_from_user(dst, src, count);
360 return -EFAULT;
361}
362
363/*
364 * Return the size of a string (including the ending 0)
365 *
366 * Return 0 for error
367 */
368
369extern int __strnlen_user(const char __user *str, long len, unsigned long top);
370
371/*
372 * Returns the length of the string at str (including the null byte),
373 * or 0 if we hit a page we can't access,
374 * or something > len if we didn't find a null byte.
375 *
376 * The `top' parameter to __strnlen_user is to make sure that
377 * we can never overflow from the user area into kernel space.
378 */
379extern __inline__ int strnlen_user(const char __user *str, long len)
380{
381 unsigned long top = current->thread.fs.seg;
382
383 if ((unsigned long)str > top)
384 return 0;
385 return __strnlen_user(str, len, top);
386}
387
388#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
389
390#endif /* __ASSEMBLY__ */
391
392#endif /* _PPC_UACCESS_H */
393#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
deleted file mode 100644
index dbfa42ef4a99..000000000000
--- a/include/asm-ppc64/bitops.h
+++ /dev/null
@@ -1,360 +0,0 @@
1/*
2 * PowerPC64 atomic bit operations.
3 * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner,
4 * Anton Blanchard
5 *
6 * Originally taken from the 32b PPC code. Modified to use 64b values for
7 * the various counters & memory references.
8 *
9 * Bitops are odd when viewed on big-endian systems. They were designed
10 * on little endian so the size of the bitset doesn't matter (low order bytes
11 * come first) as long as the bit in question is valid.
12 *
13 * Bits are "tested" often using the C expression (val & (1<<nr)) so we do
14 * our best to stay compatible with that. The assumption is that val will
15 * be unsigned long for such tests. As such, we assume the bits are stored
16 * as an array of unsigned long (the usual case is a single unsigned long,
17 * of course). Here's an example bitset with bit numbering:
18 *
19 * |63..........0|127........64|195.......128|255.......196|
20 *
21 * This leads to a problem. If an int, short or char is passed as a bitset
22 * it will be a bad memory reference since we want to store in chunks
23 * of unsigned long (64 bits here) size.
24 *
25 * There are a few little-endian macros used mostly for filesystem bitmaps,
26 * these work on similar bit arrays layouts, but byte-oriented:
27 *
28 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
29 *
30 * The main difference is that bit 3-5 in the bit number field needs to be
31 * reversed compared to the big-endian bit fields. This can be achieved
32 * by XOR with 0b111000 (0x38).
33 *
34 * This program is free software; you can redistribute it and/or
35 * modify it under the terms of the GNU General Public License
36 * as published by the Free Software Foundation; either version
37 * 2 of the License, or (at your option) any later version.
38 */
39
40#ifndef _PPC64_BITOPS_H
41#define _PPC64_BITOPS_H
42
43#ifdef __KERNEL__
44
45#include <asm/synch.h>
46
47/*
48 * clear_bit doesn't imply a memory barrier
49 */
50#define smp_mb__before_clear_bit() smp_mb()
51#define smp_mb__after_clear_bit() smp_mb()
52
53static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr)
54{
55 return (1UL & (addr[nr >> 6] >> (nr & 63)));
56}
57
58static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr)
59{
60 unsigned long old;
61 unsigned long mask = 1UL << (nr & 0x3f);
62 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
63
64 __asm__ __volatile__(
65"1: ldarx %0,0,%3 # set_bit\n\
66 or %0,%0,%2\n\
67 stdcx. %0,0,%3\n\
68 bne- 1b"
69 : "=&r" (old), "=m" (*p)
70 : "r" (mask), "r" (p), "m" (*p)
71 : "cc");
72}
73
74static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr)
75{
76 unsigned long old;
77 unsigned long mask = 1UL << (nr & 0x3f);
78 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
79
80 __asm__ __volatile__(
81"1: ldarx %0,0,%3 # clear_bit\n\
82 andc %0,%0,%2\n\
83 stdcx. %0,0,%3\n\
84 bne- 1b"
85 : "=&r" (old), "=m" (*p)
86 : "r" (mask), "r" (p), "m" (*p)
87 : "cc");
88}
89
90static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr)
91{
92 unsigned long old;
93 unsigned long mask = 1UL << (nr & 0x3f);
94 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
95
96 __asm__ __volatile__(
97"1: ldarx %0,0,%3 # change_bit\n\
98 xor %0,%0,%2\n\
99 stdcx. %0,0,%3\n\
100 bne- 1b"
101 : "=&r" (old), "=m" (*p)
102 : "r" (mask), "r" (p), "m" (*p)
103 : "cc");
104}
105
106static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
107{
108 unsigned long old, t;
109 unsigned long mask = 1UL << (nr & 0x3f);
110 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
111
112 __asm__ __volatile__(
113 EIEIO_ON_SMP
114"1: ldarx %0,0,%3 # test_and_set_bit\n\
115 or %1,%0,%2 \n\
116 stdcx. %1,0,%3 \n\
117 bne- 1b"
118 ISYNC_ON_SMP
119 : "=&r" (old), "=&r" (t)
120 : "r" (mask), "r" (p)
121 : "cc", "memory");
122
123 return (old & mask) != 0;
124}
125
126static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
127{
128 unsigned long old, t;
129 unsigned long mask = 1UL << (nr & 0x3f);
130 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
131
132 __asm__ __volatile__(
133 EIEIO_ON_SMP
134"1: ldarx %0,0,%3 # test_and_clear_bit\n\
135 andc %1,%0,%2\n\
136 stdcx. %1,0,%3\n\
137 bne- 1b"
138 ISYNC_ON_SMP
139 : "=&r" (old), "=&r" (t)
140 : "r" (mask), "r" (p)
141 : "cc", "memory");
142
143 return (old & mask) != 0;
144}
145
146static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
147{
148 unsigned long old, t;
149 unsigned long mask = 1UL << (nr & 0x3f);
150 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
151
152 __asm__ __volatile__(
153 EIEIO_ON_SMP
154"1: ldarx %0,0,%3 # test_and_change_bit\n\
155 xor %1,%0,%2\n\
156 stdcx. %1,0,%3\n\
157 bne- 1b"
158 ISYNC_ON_SMP
159 : "=&r" (old), "=&r" (t)
160 : "r" (mask), "r" (p)
161 : "cc", "memory");
162
163 return (old & mask) != 0;
164}
165
166static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
167{
168 unsigned long old;
169
170 __asm__ __volatile__(
171"1: ldarx %0,0,%3 # set_bit\n\
172 or %0,%0,%2\n\
173 stdcx. %0,0,%3\n\
174 bne- 1b"
175 : "=&r" (old), "=m" (*addr)
176 : "r" (mask), "r" (addr), "m" (*addr)
177 : "cc");
178}
179
180/*
181 * non-atomic versions
182 */
183static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr)
184{
185 unsigned long mask = 1UL << (nr & 0x3f);
186 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
187
188 *p |= mask;
189}
190
191static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr)
192{
193 unsigned long mask = 1UL << (nr & 0x3f);
194 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
195
196 *p &= ~mask;
197}
198
199static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr)
200{
201 unsigned long mask = 1UL << (nr & 0x3f);
202 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
203
204 *p ^= mask;
205}
206
207static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
208{
209 unsigned long mask = 1UL << (nr & 0x3f);
210 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
211 unsigned long old = *p;
212
213 *p = old | mask;
214 return (old & mask) != 0;
215}
216
217static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
218{
219 unsigned long mask = 1UL << (nr & 0x3f);
220 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
221 unsigned long old = *p;
222
223 *p = old & ~mask;
224 return (old & mask) != 0;
225}
226
227static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
228{
229 unsigned long mask = 1UL << (nr & 0x3f);
230 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
231 unsigned long old = *p;
232
233 *p = old ^ mask;
234 return (old & mask) != 0;
235}
236
237/*
238 * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the
239 * most significant (left-most) 1-bit in a double word.
240 */
241static __inline__ int __ilog2(unsigned long x)
242{
243 int lz;
244
245 asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
246 return 63 - lz;
247}
248
249/*
250 * Determines the bit position of the least significant (rightmost) 0 bit
251 * in the specified double word. The returned bit position will be zero-based,
252 * starting from the right side (63 - 0).
253 */
254static __inline__ unsigned long ffz(unsigned long x)
255{
256 /* no zero exists anywhere in the 8 byte area. */
257 if ((x = ~x) == 0)
258 return 64;
259
260 /*
261 * Calculate the bit position of the least signficant '1' bit in x
262 * (since x has been changed this will actually be the least signficant
263 * '0' bit in * the original x). Note: (x & -x) gives us a mask that
264 * is the least significant * (RIGHT-most) 1-bit of the value in x.
265 */
266 return __ilog2(x & -x);
267}
268
269static __inline__ int __ffs(unsigned long x)
270{
271 return __ilog2(x & -x);
272}
273
274/*
275 * ffs: find first bit set. This is defined the same way as
276 * the libc and compiler builtin ffs routines, therefore
277 * differs in spirit from the above ffz (man ffs).
278 */
279static __inline__ int ffs(int x)
280{
281 unsigned long i = (unsigned long)x;
282 return __ilog2(i & -i) + 1;
283}
284
285/*
286 * fls: find last (most-significant) bit set.
287 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
288 */
289#define fls(x) generic_fls(x)
290
291/*
292 * hweightN: returns the hamming weight (i.e. the number
293 * of bits set) of a N-bit word
294 */
295#define hweight64(x) generic_hweight64(x)
296#define hweight32(x) generic_hweight32(x)
297#define hweight16(x) generic_hweight16(x)
298#define hweight8(x) generic_hweight8(x)
299
300extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
301#define find_first_zero_bit(addr, size) \
302 find_next_zero_bit((addr), (size), 0)
303
304extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
305#define find_first_bit(addr, size) \
306 find_next_bit((addr), (size), 0)
307
308extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
309#define find_first_zero_le_bit(addr, size) \
310 find_next_zero_le_bit((addr), (size), 0)
311
312static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr)
313{
314 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
315 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
316}
317
318#define test_and_clear_le_bit(nr, addr) \
319 test_and_clear_bit((nr) ^ 0x38, (addr))
320#define test_and_set_le_bit(nr, addr) \
321 test_and_set_bit((nr) ^ 0x38, (addr))
322
323/*
324 * non-atomic versions
325 */
326
327#define __set_le_bit(nr, addr) \
328 __set_bit((nr) ^ 0x38, (addr))
329#define __clear_le_bit(nr, addr) \
330 __clear_bit((nr) ^ 0x38, (addr))
331#define __test_and_clear_le_bit(nr, addr) \
332 __test_and_clear_bit((nr) ^ 0x38, (addr))
333#define __test_and_set_le_bit(nr, addr) \
334 __test_and_set_bit((nr) ^ 0x38, (addr))
335
336#define ext2_set_bit(nr,addr) \
337 __test_and_set_le_bit((nr), (unsigned long*)addr)
338#define ext2_clear_bit(nr, addr) \
339 __test_and_clear_le_bit((nr), (unsigned long*)addr)
340
341#define ext2_set_bit_atomic(lock, nr, addr) \
342 test_and_set_le_bit((nr), (unsigned long*)addr)
343#define ext2_clear_bit_atomic(lock, nr, addr) \
344 test_and_clear_le_bit((nr), (unsigned long*)addr)
345
346
347#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
348#define ext2_find_first_zero_bit(addr, size) \
349 find_first_zero_le_bit((unsigned long*)addr, size)
350#define ext2_find_next_zero_bit(addr, size, off) \
351 find_next_zero_le_bit((unsigned long*)addr, size, off)
352
353#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
354#define minix_set_bit(nr,addr) set_bit(nr,addr)
355#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
356#define minix_test_bit(nr,addr) test_bit(nr,addr)
357#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
358
359#endif /* __KERNEL__ */
360#endif /* _PPC64_BITOPS_H */
diff --git a/include/asm-ppc64/ipcbuf.h b/include/asm-ppc64/ipcbuf.h
deleted file mode 100644
index fa393c8342af..000000000000
--- a/include/asm-ppc64/ipcbuf.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef __PPC64_IPCBUF_H__
2#define __PPC64_IPCBUF_H__
3
4/*
5 * The ipc64_perm structure for the PPC is identical to kern_ipc_perm
6 * as we have always had 32-bit UIDs and GIDs in the kernel.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid_t uid;
18 __kernel_gid_t gid;
19 __kernel_uid_t cuid;
20 __kernel_gid_t cgid;
21 __kernel_mode_t mode;
22 unsigned int seq;
23 unsigned int __pad1;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* __PPC64_IPCBUF_H__ */
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
index 77a743402db4..820dd729b895 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-ppc64/mmu_context.h
@@ -16,21 +16,6 @@
16 * 2 of the License, or (at your option) any later version. 16 * 2 of the License, or (at your option) any later version.
17 */ 17 */
18 18
19/*
20 * Every architecture must define this function. It's the fastest
21 * way of searching a 140-bit bitmap where the first 100 bits are
22 * unlikely to be set. It's guaranteed that at least one of the 140
23 * bits is cleared.
24 */
25static inline int sched_find_first_bit(unsigned long *b)
26{
27 if (unlikely(b[0]))
28 return __ffs(b[0]);
29 if (unlikely(b[1]))
30 return __ffs(b[1]) + 64;
31 return __ffs(b[2]) + 128;
32}
33
34static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 19static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
35{ 20{
36} 21}
diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h
index dfaa21566c9a..def47d720d3d 100644
--- a/include/asm-ppc64/nvram.h
+++ b/include/asm-ppc64/nvram.h
@@ -70,7 +70,7 @@ extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
70 70
71extern int pSeries_nvram_init(void); 71extern int pSeries_nvram_init(void);
72extern int pmac_nvram_init(void); 72extern int pmac_nvram_init(void);
73extern int bpa_nvram_init(void); 73extern int mmio_nvram_init(void);
74 74
75/* PowerMac specific nvram stuffs */ 75/* PowerMac specific nvram stuffs */
76 76
diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h
index c5e9052e7967..0f42fcc1900b 100644
--- a/include/asm-ppc64/smp.h
+++ b/include/asm-ppc64/smp.h
@@ -64,6 +64,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
64 64
65void smp_init_iSeries(void); 65void smp_init_iSeries(void);
66void smp_init_pSeries(void); 66void smp_init_pSeries(void);
67void smp_init_cell(void);
67 68
68extern int __cpu_disable(void); 69extern int __cpu_disable(void);
69extern void __cpu_die(unsigned int cpu); 70extern void __cpu_die(unsigned int cpu);
diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h
deleted file mode 100644
index 132c1276547b..000000000000
--- a/include/asm-ppc64/uaccess.h
+++ /dev/null
@@ -1,341 +0,0 @@
1#ifndef _PPC64_UACCESS_H
2#define _PPC64_UACCESS_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#ifndef __ASSEMBLY__
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <asm/processor.h>
15
16#define VERIFY_READ 0
17#define VERIFY_WRITE 1
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26
27#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
28
29#define KERNEL_DS MAKE_MM_SEG(0UL)
30#define USER_DS MAKE_MM_SEG(0xf000000000000000UL)
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current->thread.fs)
34#define set_fs(val) (current->thread.fs = (val))
35
36#define segment_eq(a,b) ((a).seg == (b).seg)
37
38/*
39 * Use the alpha trick for checking ranges:
40 *
41 * Is a address valid? This does a straightforward calculation rather
42 * than tests.
43 *
44 * Address valid if:
45 * - "addr" doesn't have any high-bits set
46 * - AND "size" doesn't have any high-bits set
47 * - OR we are in kernel mode.
48 *
49 * We dont have to check for high bits in (addr+size) because the first
50 * two checks force the maximum result to be below the start of the
51 * kernel region.
52 */
53#define __access_ok(addr,size,segment) \
54 (((segment).seg & (addr | size )) == 0)
55
56#define access_ok(type,addr,size) \
57 __access_ok(((__force unsigned long)(addr)),(size),get_fs())
58
59/*
60 * The exception table consists of pairs of addresses: the first is the
61 * address of an instruction that is allowed to fault, and the second is
62 * the address at which the program should continue. No registers are
63 * modified, so it is entirely up to the continuation code to figure out
64 * what to do.
65 *
66 * All the routines below use bits of fixup code that are out of line
67 * with the main instruction path. This means when everything is well,
68 * we don't even have to jump over them. Further, they do not intrude
69 * on our cache or tlb entries.
70 */
71
72struct exception_table_entry
73{
74 unsigned long insn, fixup;
75};
76
77/* Returns 0 if exception not found and fixup otherwise. */
78extern unsigned long search_exception_table(unsigned long);
79
80/*
81 * These are the main single-value transfer routines. They automatically
82 * use the right size if we just have the right pointer type.
83 *
84 * This gets kind of ugly. We want to return _two_ values in "get_user()"
85 * and yet we don't want to do any pointers, because that is too much
86 * of a performance impact. Thus we have a few rather ugly macros here,
87 * and hide all the ugliness from the user.
88 *
89 * The "__xxx" versions of the user access functions are versions that
90 * do not verify the address space, that must have been done previously
91 * with a separate "access_ok()" call (this is used when we do multiple
92 * accesses to the same area of user memory).
93 *
94 * As we use the same address space for kernel and user data on the
95 * PowerPC, we can just do these as direct assignments. (Of course, the
96 * exception handling means that it's no longer "just"...)
97 */
98#define get_user(x,ptr) \
99 __get_user_check((x),(ptr),sizeof(*(ptr)))
100#define put_user(x,ptr) \
101 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
102
103#define __get_user(x,ptr) \
104 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
105#define __put_user(x,ptr) \
106 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
107
108#define __get_user_unaligned __get_user
109#define __put_user_unaligned __put_user
110
111extern long __put_user_bad(void);
112
113#define __put_user_nocheck(x,ptr,size) \
114({ \
115 long __pu_err; \
116 might_sleep(); \
117 __chk_user_ptr(ptr); \
118 __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
119 __pu_err; \
120})
121
122#define __put_user_check(x,ptr,size) \
123({ \
124 long __pu_err = -EFAULT; \
125 void __user *__pu_addr = (ptr); \
126 might_sleep(); \
127 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
128 __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
129 __pu_err; \
130})
131
132#define __put_user_size(x,ptr,size,retval,errret) \
133do { \
134 retval = 0; \
135 switch (size) { \
136 case 1: __put_user_asm(x,ptr,retval,"stb",errret); break; \
137 case 2: __put_user_asm(x,ptr,retval,"sth",errret); break; \
138 case 4: __put_user_asm(x,ptr,retval,"stw",errret); break; \
139 case 8: __put_user_asm(x,ptr,retval,"std",errret); break; \
140 default: __put_user_bad(); \
141 } \
142} while (0)
143
144/*
145 * We don't tell gcc that we are accessing memory, but this is OK
146 * because we do not write to any memory gcc knows about, so there
147 * are no aliasing issues.
148 */
149#define __put_user_asm(x, addr, err, op, errret) \
150 __asm__ __volatile__( \
151 "1: "op" %1,0(%2) # put_user\n" \
152 "2:\n" \
153 ".section .fixup,\"ax\"\n" \
154 "3: li %0,%3\n" \
155 " b 2b\n" \
156 ".previous\n" \
157 ".section __ex_table,\"a\"\n" \
158 " .align 3\n" \
159 " .llong 1b,3b\n" \
160 ".previous" \
161 : "=r"(err) \
162 : "r"(x), "b"(addr), "i"(errret), "0"(err))
163
164
165#define __get_user_nocheck(x,ptr,size) \
166({ \
167 long __gu_err; \
168 unsigned long __gu_val; \
169 might_sleep(); \
170 __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
171 (x) = (__typeof__(*(ptr)))__gu_val; \
172 __gu_err; \
173})
174
175#define __get_user_check(x,ptr,size) \
176({ \
177 long __gu_err = -EFAULT; \
178 unsigned long __gu_val = 0; \
179 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
180 might_sleep(); \
181 if (access_ok(VERIFY_READ,__gu_addr,size)) \
182 __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
183 (x) = (__typeof__(*(ptr)))__gu_val; \
184 __gu_err; \
185})
186
187extern long __get_user_bad(void);
188
189#define __get_user_size(x,ptr,size,retval,errret) \
190do { \
191 retval = 0; \
192 __chk_user_ptr(ptr); \
193 switch (size) { \
194 case 1: __get_user_asm(x,ptr,retval,"lbz",errret); break; \
195 case 2: __get_user_asm(x,ptr,retval,"lhz",errret); break; \
196 case 4: __get_user_asm(x,ptr,retval,"lwz",errret); break; \
197 case 8: __get_user_asm(x,ptr,retval,"ld",errret); break; \
198 default: (x) = __get_user_bad(); \
199 } \
200} while (0)
201
202#define __get_user_asm(x, addr, err, op, errret) \
203 __asm__ __volatile__( \
204 "1: "op" %1,0(%2) # get_user\n" \
205 "2:\n" \
206 ".section .fixup,\"ax\"\n" \
207 "3: li %0,%3\n" \
208 " li %1,0\n" \
209 " b 2b\n" \
210 ".previous\n" \
211 ".section __ex_table,\"a\"\n" \
212 " .align 3\n" \
213 " .llong 1b,3b\n" \
214 ".previous" \
215 : "=r"(err), "=r"(x) \
216 : "b"(addr), "i"(errret), "0"(err))
217
218/* more complex routines */
219
220extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from,
221 unsigned long size);
222
223static inline unsigned long
224__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
225{
226 if (__builtin_constant_p(n)) {
227 unsigned long ret;
228
229 switch (n) {
230 case 1:
231 __get_user_size(*(u8 *)to, from, 1, ret, 1);
232 return ret;
233 case 2:
234 __get_user_size(*(u16 *)to, from, 2, ret, 2);
235 return ret;
236 case 4:
237 __get_user_size(*(u32 *)to, from, 4, ret, 4);
238 return ret;
239 case 8:
240 __get_user_size(*(u64 *)to, from, 8, ret, 8);
241 return ret;
242 }
243 }
244 return __copy_tofrom_user((__force void __user *) to, from, n);
245}
246
247static inline unsigned long
248__copy_from_user(void *to, const void __user *from, unsigned long n)
249{
250 might_sleep();
251 return __copy_from_user_inatomic(to, from, n);
252}
253
254static inline unsigned long
255__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
256{
257 if (__builtin_constant_p(n)) {
258 unsigned long ret;
259
260 switch (n) {
261 case 1:
262 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
263 return ret;
264 case 2:
265 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
266 return ret;
267 case 4:
268 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
269 return ret;
270 case 8:
271 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret, 8);
272 return ret;
273 }
274 }
275 return __copy_tofrom_user(to, (__force const void __user *) from, n);
276}
277
278static inline unsigned long
279__copy_to_user(void __user *to, const void *from, unsigned long n)
280{
281 might_sleep();
282 return __copy_to_user_inatomic(to, from, n);
283}
284
285#define __copy_in_user(to, from, size) \
286 __copy_tofrom_user((to), (from), (size))
287
288extern unsigned long copy_from_user(void *to, const void __user *from,
289 unsigned long n);
290extern unsigned long copy_to_user(void __user *to, const void *from,
291 unsigned long n);
292extern unsigned long copy_in_user(void __user *to, const void __user *from,
293 unsigned long n);
294
295extern unsigned long __clear_user(void __user *addr, unsigned long size);
296
297static inline unsigned long
298clear_user(void __user *addr, unsigned long size)
299{
300 might_sleep();
301 if (likely(access_ok(VERIFY_WRITE, addr, size)))
302 size = __clear_user(addr, size);
303 return size;
304}
305
306extern int __strncpy_from_user(char *dst, const char __user *src, long count);
307
308static inline long
309strncpy_from_user(char *dst, const char __user *src, long count)
310{
311 might_sleep();
312 if (likely(access_ok(VERIFY_READ, src, 1)))
313 return __strncpy_from_user(dst, src, count);
314 return -EFAULT;
315}
316
317/*
318 * Return the size of a string (including the ending 0)
319 *
320 * Return 0 for error
321 */
322extern int __strnlen_user(const char __user *str, long len);
323
324/*
325 * Returns the length of the string at str (including the null byte),
326 * or 0 if we hit a page we can't access,
327 * or something > len if we didn't find a null byte.
328 */
329static inline int strnlen_user(const char __user *str, long len)
330{
331 might_sleep();
332 if (likely(access_ok(VERIFY_READ, str, 1)))
333 return __strnlen_user(str, len);
334 return 0;
335}
336
337#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
338
339#endif /* __ASSEMBLY__ */
340
341#endif /* _PPC64_UACCESS_H */