aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-ixp4xx/io.h9
-rw-r--r--include/asm-arm/hardware/amba_serial.h5
-rw-r--r--include/asm-arm/io.h21
-rw-r--r--include/asm-arm/uaccess.h8
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/smp.h7
-rw-r--r--include/asm-parisc/spinlock.h19
-rw-r--r--include/asm-parisc/tlbflush.h16
-rw-r--r--include/asm-powerpc/dma-mapping.h (renamed from include/asm-ppc/dma-mapping.h)138
-rw-r--r--include/asm-powerpc/io.h (renamed from include/asm-ppc64/io.h)12
-rw-r--r--include/asm-powerpc/mmu.h (renamed from include/asm-ppc64/mmu.h)14
-rw-r--r--include/asm-powerpc/mmu_context.h (renamed from include/asm-ppc64/mmu_context.h)12
-rw-r--r--include/asm-powerpc/mmzone.h (renamed from include/asm-ppc64/mmzone.h)0
-rw-r--r--include/asm-powerpc/pci-bridge.h (renamed from include/asm-ppc64/pci-bridge.h)12
-rw-r--r--include/asm-powerpc/pci.h (renamed from include/asm-ppc64/pci.h)110
-rw-r--r--include/asm-powerpc/pgalloc.h (renamed from include/asm-ppc64/pgalloc.h)11
-rw-r--r--include/asm-powerpc/pgtable-4k.h (renamed from include/asm-ppc64/pgtable-4k.h)0
-rw-r--r--include/asm-powerpc/pgtable-64k.h (renamed from include/asm-ppc64/pgtable-64k.h)0
-rw-r--r--include/asm-powerpc/pgtable.h (renamed from include/asm-ppc64/pgtable.h)20
-rw-r--r--include/asm-powerpc/ppc-pci.h2
-rw-r--r--include/asm-powerpc/spinlock.h (renamed from include/asm-ppc64/spinlock.h)72
-rw-r--r--include/asm-ppc/io.h17
-rw-r--r--include/asm-ppc64/dma-mapping.h136
-rw-r--r--include/asm-ppc64/imalloc.h26
-rw-r--r--include/asm-ppc64/ptrace-common.h164
-rw-r--r--include/asm-x86_64/msr.h2
-rw-r--r--include/linux/cciss_ioctl.h2
-rw-r--r--include/linux/hdreg.h6
-rw-r--r--include/linux/ide.h30
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netfilter_ipv4/ipt_sctp.h12
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/uinput.h13
-rw-r--r--include/net/ipv6.h2
35 files changed, 374 insertions, 541 deletions
diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h
index 688f7f90d93e..942b622455bc 100644
--- a/include/asm-arm/arch-ixp4xx/io.h
+++ b/include/asm-arm/arch-ixp4xx/io.h
@@ -59,11 +59,10 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
59 * fallback to the default. 59 * fallback to the default.
60 */ 60 */
61static inline void __iomem * 61static inline void __iomem *
62__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned long align) 62__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags)
63{ 63{
64 extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
65 if((addr < 0x48000000) || (addr > 0x4fffffff)) 64 if((addr < 0x48000000) || (addr > 0x4fffffff))
66 return __ioremap(addr, size, flags, align); 65 return __ioremap(addr, size, flags);
67 66
68 return (void *)addr; 67 return (void *)addr;
69} 68}
@@ -71,13 +70,11 @@ __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned
71static inline void 70static inline void
72__ixp4xx_iounmap(void __iomem *addr) 71__ixp4xx_iounmap(void __iomem *addr)
73{ 72{
74 extern void __iounmap(void __iomem *addr);
75
76 if ((u32)addr >= VMALLOC_START) 73 if ((u32)addr >= VMALLOC_START)
77 __iounmap(addr); 74 __iounmap(addr);
78} 75}
79 76
80#define __arch_ioremap(a, s, f, x) __ixp4xx_ioremap(a, s, f, x) 77#define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
81#define __arch_iounmap(a) __ixp4xx_iounmap(a) 78#define __arch_iounmap(a) __ixp4xx_iounmap(a)
82 79
83#define writeb(v, p) __ixp4xx_writeb(v, p) 80#define writeb(v, p) __ixp4xx_writeb(v, p)
diff --git a/include/asm-arm/hardware/amba_serial.h b/include/asm-arm/hardware/amba_serial.h
index 71770aa6389f..dc726ffccebd 100644
--- a/include/asm-arm/hardware/amba_serial.h
+++ b/include/asm-arm/hardware/amba_serial.h
@@ -50,6 +50,11 @@
50#define UART011_ICR 0x44 /* Interrupt clear register. */ 50#define UART011_ICR 0x44 /* Interrupt clear register. */
51#define UART011_DMACR 0x48 /* DMA control register. */ 51#define UART011_DMACR 0x48 /* DMA control register. */
52 52
53#define UART011_DR_OE (1 << 11)
54#define UART011_DR_BE (1 << 10)
55#define UART011_DR_PE (1 << 9)
56#define UART011_DR_FE (1 << 8)
57
53#define UART01x_RSR_OE 0x08 58#define UART01x_RSR_OE 0x08
54#define UART01x_RSR_BE 0x04 59#define UART01x_RSR_BE 0x04
55#define UART01x_RSR_PE 0x02 60#define UART01x_RSR_PE 0x02
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index 2e6799632f12..ae69db4a1010 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -55,6 +55,12 @@ extern void __raw_readsl(void __iomem *addr, void *data, int longlen);
55#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a)) 55#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
56 56
57/* 57/*
58 * Architecture ioremap implementation.
59 */
60extern void __iomem * __ioremap(unsigned long, size_t, unsigned long);
61extern void __iounmap(void __iomem *addr);
62
63/*
58 * Bad read/write accesses... 64 * Bad read/write accesses...
59 */ 65 */
60extern void __readwrite_bug(const char *fn); 66extern void __readwrite_bug(const char *fn);
@@ -256,18 +262,15 @@ out:
256 * ioremap takes a PCI memory address, as specified in 262 * ioremap takes a PCI memory address, as specified in
257 * Documentation/IO-mapping.txt. 263 * Documentation/IO-mapping.txt.
258 */ 264 */
259extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
260extern void __iounmap(void __iomem *addr);
261
262#ifndef __arch_ioremap 265#ifndef __arch_ioremap
263#define ioremap(cookie,size) __ioremap(cookie,size,0,1) 266#define ioremap(cookie,size) __ioremap(cookie,size,0)
264#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0,1) 267#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0)
265#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE,1) 268#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE)
266#define iounmap(cookie) __iounmap(cookie) 269#define iounmap(cookie) __iounmap(cookie)
267#else 270#else
268#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0,1) 271#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0)
269#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0,1) 272#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0)
270#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE,1) 273#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE)
271#define iounmap(cookie) __arch_iounmap(cookie) 274#define iounmap(cookie) __arch_iounmap(cookie)
272#endif 275#endif
273 276
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index a2fdad0138b3..064f0f5e8e2b 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -100,7 +100,6 @@ static inline void set_fs (mm_segment_t fs)
100extern int __get_user_1(void *); 100extern int __get_user_1(void *);
101extern int __get_user_2(void *); 101extern int __get_user_2(void *);
102extern int __get_user_4(void *); 102extern int __get_user_4(void *);
103extern int __get_user_8(void *);
104extern int __get_user_bad(void); 103extern int __get_user_bad(void);
105 104
106#define __get_user_x(__r2,__p,__e,__s,__i...) \ 105#define __get_user_x(__r2,__p,__e,__s,__i...) \
@@ -114,7 +113,7 @@ extern int __get_user_bad(void);
114#define get_user(x,p) \ 113#define get_user(x,p) \
115 ({ \ 114 ({ \
116 const register typeof(*(p)) __user *__p asm("r0") = (p);\ 115 const register typeof(*(p)) __user *__p asm("r0") = (p);\
117 register typeof(*(p)) __r2 asm("r2"); \ 116 register unsigned int __r2 asm("r2"); \
118 register int __e asm("r0"); \ 117 register int __e asm("r0"); \
119 switch (sizeof(*(__p))) { \ 118 switch (sizeof(*(__p))) { \
120 case 1: \ 119 case 1: \
@@ -126,12 +125,9 @@ extern int __get_user_bad(void);
126 case 4: \ 125 case 4: \
127 __get_user_x(__r2, __p, __e, 4, "lr"); \ 126 __get_user_x(__r2, __p, __e, 4, "lr"); \
128 break; \ 127 break; \
129 case 8: \
130 __get_user_x(__r2, __p, __e, 8, "lr"); \
131 break; \
132 default: __e = __get_user_bad(); break; \ 128 default: __e = __get_user_bad(); break; \
133 } \ 129 } \
134 x = __r2; \ 130 x = (typeof(*(p))) __r2; \
135 __e; \ 131 __e; \
136 }) 132 })
137 133
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index f876bdf22056..b0a30e2c9813 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -8,6 +8,7 @@
8#define _ASM_PARISC_IRQ_H 8#define _ASM_PARISC_IRQ_H
9 9
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/cpumask.h>
11#include <asm/types.h> 12#include <asm/types.h>
12 13
13#define NO_IRQ (-1) 14#define NO_IRQ (-1)
@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
49extern int txn_claim_irq(int); 50extern int txn_claim_irq(int);
50extern unsigned int txn_alloc_data(unsigned int); 51extern unsigned int txn_alloc_data(unsigned int);
51extern unsigned long txn_alloc_addr(unsigned int); 52extern unsigned long txn_alloc_addr(unsigned int);
53extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
52 54
53extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); 55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
54 56extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
56 57
57/* soft power switch support (power.c) */ 58/* soft power switch support (power.c) */
58extern struct tasklet_struct power_tasklet; 59extern struct tasklet_struct power_tasklet;
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 9413f67a540b..dbdbd2e9fdf9 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
29#define cpu_logical_map(cpu) (cpu) 29#define cpu_logical_map(cpu) (cpu)
30 30
31extern void smp_send_reschedule(int cpu); 31extern void smp_send_reschedule(int cpu);
32extern void smp_send_all_nop(void);
32 33
33#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
34 35
@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
53 54
54#define raw_smp_processor_id() (current_thread_info()->cpu) 55#define raw_smp_processor_id() (current_thread_info()->cpu)
55 56
56#endif /* CONFIG_SMP */ 57#else /* CONFIG_SMP */
58
59static inline void smp_send_all_nop(void) { return; }
60
61#endif
57 62
58#define NO_PROC_ID 0xFF /* No processor magic marker */ 63#define NO_PROC_ID 0xFF /* No processor magic marker */
59#define ANY_PROC_ID 0xFF /* Any processor magic marker */ 64#define ANY_PROC_ID 0xFF /* Any processor magic marker */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 7c3f406a746a..16c2ac075fc5 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
11 return *a == 0; 11 return *a == 0;
12} 12}
13 13
14#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
15#define __raw_spin_unlock_wait(x) \ 15#define __raw_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (__raw_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *x) 18static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
19 unsigned long flags)
19{ 20{
20 volatile unsigned int *a; 21 volatile unsigned int *a;
21 22
22 mb(); 23 mb();
23 a = __ldcw_align(x); 24 a = __ldcw_align(x);
24 while (__ldcw(a) == 0) 25 while (__ldcw(a) == 0)
25 while (*a == 0); 26 while (*a == 0)
27 if (flags & PSW_SM_I) {
28 local_irq_enable();
29 cpu_relax();
30 local_irq_disable();
31 } else
32 cpu_relax();
26 mb(); 33 mb();
27} 34}
28 35
@@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
60 67
61static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 68static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
62{ 69{
63 unsigned long flags;
64 local_irq_save(flags);
65 __raw_spin_lock(&rw->lock); 70 __raw_spin_lock(&rw->lock);
66 71
67 rw->counter++; 72 rw->counter++;
68 73
69 __raw_spin_unlock(&rw->lock); 74 __raw_spin_unlock(&rw->lock);
70 local_irq_restore(flags);
71} 75}
72 76
73static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 77static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
74{ 78{
75 unsigned long flags;
76 local_irq_save(flags);
77 __raw_spin_lock(&rw->lock); 79 __raw_spin_lock(&rw->lock);
78 80
79 rw->counter--; 81 rw->counter--;
80 82
81 __raw_spin_unlock(&rw->lock); 83 __raw_spin_unlock(&rw->lock);
82 local_irq_restore(flags);
83} 84}
84 85
85/* write_lock is less trivial. We optimistically grab the lock and check 86/* write_lock is less trivial. We optimistically grab the lock and check
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index e97aa8d1eff5..c9ec39c6fc6c 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -12,21 +12,15 @@
12 * N class systems, only one PxTLB inter processor broadcast can be 12 * N class systems, only one PxTLB inter processor broadcast can be
13 * active at any one time on the Merced bus. This tlb purge 13 * active at any one time on the Merced bus. This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate 14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all SMP systems not just the N class. */ 15 * it on all SMP systems not just the N class. We also need to have
16#ifdef CONFIG_SMP 16 * preemption disabled on uniprocessor machines, and spin_lock does that
17 * nicely.
18 */
17extern spinlock_t pa_tlb_lock; 19extern spinlock_t pa_tlb_lock;
18 20
19#define purge_tlb_start(x) spin_lock(&pa_tlb_lock) 21#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
20#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) 22#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
21 23
22#else
23
24#define purge_tlb_start(x) do { } while(0)
25#define purge_tlb_end(x) do { } while (0)
26
27#endif
28
29
30extern void flush_tlb_all(void); 24extern void flush_tlb_all(void);
31 25
32/* 26/*
@@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
88 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ 82 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
89 flush_tlb_all(); 83 flush_tlb_all();
90 else { 84 else {
91 preempt_disable();
92 mtsp(vma->vm_mm->context,1); 85 mtsp(vma->vm_mm->context,1);
93 purge_tlb_start(); 86 purge_tlb_start();
94 if (split_tlb) { 87 if (split_tlb) {
@@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
102 pdtlb(start); 95 pdtlb(start);
103 start += PAGE_SIZE; 96 start += PAGE_SIZE;
104 } 97 }
105 preempt_enable();
106 } 98 }
107 purge_tlb_end(); 99 purge_tlb_end();
108 } 100 }
diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 6e9635114433..59a80163f75f 100644
--- a/include/asm-ppc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -1,15 +1,22 @@
1/* 1/*
2 * This is based on both include/asm-sh/dma-mapping.h and 2 * Copyright (C) 2004 IBM
3 * include/asm-ppc/pci.h 3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
4 */ 6 */
5#ifndef __ASM_PPC_DMA_MAPPING_H 7#ifndef _ASM_DMA_MAPPING_H
6#define __ASM_PPC_DMA_MAPPING_H 8#define _ASM_DMA_MAPPING_H
7 9
8#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/types.h>
12#include <linux/cache.h>
9/* need struct page definitions */ 13/* need struct page definitions */
10#include <linux/mm.h> 14#include <linux/mm.h>
11#include <asm/scatterlist.h> 15#include <asm/scatterlist.h>
12#include <asm/io.h> 16#include <asm/io.h>
17#include <asm/bug.h>
18
19#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
13 20
14#ifdef CONFIG_NOT_COHERENT_CACHE 21#ifdef CONFIG_NOT_COHERENT_CACHE
15/* 22/*
@@ -24,22 +31,12 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
24extern void __dma_sync(void *vaddr, size_t size, int direction); 31extern void __dma_sync(void *vaddr, size_t size, int direction);
25extern void __dma_sync_page(struct page *page, unsigned long offset, 32extern void __dma_sync_page(struct page *page, unsigned long offset,
26 size_t size, int direction); 33 size_t size, int direction);
27#define dma_cache_inv(_start,_size) \
28 invalidate_dcache_range(_start, (_start + _size))
29#define dma_cache_wback(_start,_size) \
30 clean_dcache_range(_start, (_start + _size))
31#define dma_cache_wback_inv(_start,_size) \
32 flush_dcache_range(_start, (_start + _size))
33 34
34#else /* ! CONFIG_NOT_COHERENT_CACHE */ 35#else /* ! CONFIG_NOT_COHERENT_CACHE */
35/* 36/*
36 * Cache coherent cores. 37 * Cache coherent cores.
37 */ 38 */
38 39
39#define dma_cache_inv(_start,_size) do { } while (0)
40#define dma_cache_wback(_start,_size) do { } while (0)
41#define dma_cache_wback_inv(_start,_size) do { } while (0)
42
43#define __dma_alloc_coherent(gfp, size, handle) NULL 40#define __dma_alloc_coherent(gfp, size, handle) NULL
44#define __dma_free_coherent(size, addr) do { } while (0) 41#define __dma_free_coherent(size, addr) do { } while (0)
45#define __dma_sync(addr, size, rw) do { } while (0) 42#define __dma_sync(addr, size, rw) do { } while (0)
@@ -47,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
47 44
48#endif /* ! CONFIG_NOT_COHERENT_CACHE */ 45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
49 46
47#ifdef CONFIG_PPC64
48
49extern int dma_supported(struct device *dev, u64 mask);
50extern int dma_set_mask(struct device *dev, u64 dma_mask);
51extern void *dma_alloc_coherent(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, gfp_t flag);
53extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
54 dma_addr_t dma_handle);
55extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
56 size_t size, enum dma_data_direction direction);
57extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction);
59extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
60 unsigned long offset, size_t size,
61 enum dma_data_direction direction);
62extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
63 size_t size, enum dma_data_direction direction);
64extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
65 enum dma_data_direction direction);
66extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
67 int nhwentries, enum dma_data_direction direction);
68
69#else /* CONFIG_PPC64 */
70
50#define dma_supported(dev, mask) (1) 71#define dma_supported(dev, mask) (1)
51 72
52static inline int dma_set_mask(struct device *dev, u64 dma_mask) 73static inline int dma_set_mask(struct device *dev, u64 dma_mask)
@@ -144,29 +165,27 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
144/* We don't do anything here. */ 165/* We don't do anything here. */
145#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) 166#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
146 167
147static inline void 168#endif /* CONFIG_PPC64 */
148dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 169
149 size_t size, 170static inline void dma_sync_single_for_cpu(struct device *dev,
150 enum dma_data_direction direction) 171 dma_addr_t dma_handle, size_t size,
172 enum dma_data_direction direction)
151{ 173{
152 BUG_ON(direction == DMA_NONE); 174 BUG_ON(direction == DMA_NONE);
153
154 __dma_sync(bus_to_virt(dma_handle), size, direction); 175 __dma_sync(bus_to_virt(dma_handle), size, direction);
155} 176}
156 177
157static inline void 178static inline void dma_sync_single_for_device(struct device *dev,
158dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 179 dma_addr_t dma_handle, size_t size,
159 size_t size, 180 enum dma_data_direction direction)
160 enum dma_data_direction direction)
161{ 181{
162 BUG_ON(direction == DMA_NONE); 182 BUG_ON(direction == DMA_NONE);
163
164 __dma_sync(bus_to_virt(dma_handle), size, direction); 183 __dma_sync(bus_to_virt(dma_handle), size, direction);
165} 184}
166 185
167static inline void 186static inline void dma_sync_sg_for_cpu(struct device *dev,
168dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, 187 struct scatterlist *sg, int nents,
169 enum dma_data_direction direction) 188 enum dma_data_direction direction)
170{ 189{
171 int i; 190 int i;
172 191
@@ -176,9 +195,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
176 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 195 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
177} 196}
178 197
179static inline void 198static inline void dma_sync_sg_for_device(struct device *dev,
180dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, 199 struct scatterlist *sg, int nents,
181 enum dma_data_direction direction) 200 enum dma_data_direction direction)
182{ 201{
183 int i; 202 int i;
184 203
@@ -188,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
188 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 207 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
189} 208}
190 209
210static inline int dma_mapping_error(dma_addr_t dma_addr)
211{
212#ifdef CONFIG_PPC64
213 return (dma_addr == DMA_ERROR_CODE);
214#else
215 return 0;
216#endif
217}
218
191#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 219#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
192#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 220#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
193#ifdef CONFIG_NOT_COHERENT_CACHE 221#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -198,40 +226,60 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
198 226
199static inline int dma_get_cache_alignment(void) 227static inline int dma_get_cache_alignment(void)
200{ 228{
229#ifdef CONFIG_PPC64
230 /* no easy way to get cache size on all processors, so return
231 * the maximum possible, to be safe */
232 return (1 << L1_CACHE_SHIFT_MAX);
233#else
201 /* 234 /*
202 * Each processor family will define its own L1_CACHE_SHIFT, 235 * Each processor family will define its own L1_CACHE_SHIFT,
203 * L1_CACHE_BYTES wraps to this, so this is always safe. 236 * L1_CACHE_BYTES wraps to this, so this is always safe.
204 */ 237 */
205 return L1_CACHE_BYTES; 238 return L1_CACHE_BYTES;
239#endif
206} 240}
207 241
208static inline void 242static inline void dma_sync_single_range_for_cpu(struct device *dev,
209dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 243 dma_addr_t dma_handle, unsigned long offset, size_t size,
210 unsigned long offset, size_t size, 244 enum dma_data_direction direction)
211 enum dma_data_direction direction)
212{ 245{
213 /* just sync everything for now */ 246 /* just sync everything for now */
214 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); 247 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
215} 248}
216 249
217static inline void 250static inline void dma_sync_single_range_for_device(struct device *dev,
218dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 251 dma_addr_t dma_handle, unsigned long offset, size_t size,
219 unsigned long offset, size_t size, 252 enum dma_data_direction direction)
220 enum dma_data_direction direction)
221{ 253{
222 /* just sync everything for now */ 254 /* just sync everything for now */
223 dma_sync_single_for_device(dev, dma_handle, offset + size, direction); 255 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
224} 256}
225 257
226static inline void dma_cache_sync(void *vaddr, size_t size, 258static inline void dma_cache_sync(void *vaddr, size_t size,
227 enum dma_data_direction direction) 259 enum dma_data_direction direction)
228{ 260{
261 BUG_ON(direction == DMA_NONE);
229 __dma_sync(vaddr, size, (int)direction); 262 __dma_sync(vaddr, size, (int)direction);
230} 263}
231 264
232static inline int dma_mapping_error(dma_addr_t dma_addr) 265/*
233{ 266 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
234 return 0; 267 */
235} 268struct dma_mapping_ops {
236 269 void * (*alloc_coherent)(struct device *dev, size_t size,
237#endif /* __ASM_PPC_DMA_MAPPING_H */ 270 dma_addr_t *dma_handle, gfp_t flag);
271 void (*free_coherent)(struct device *dev, size_t size,
272 void *vaddr, dma_addr_t dma_handle);
273 dma_addr_t (*map_single)(struct device *dev, void *ptr,
274 size_t size, enum dma_data_direction direction);
275 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
276 size_t size, enum dma_data_direction direction);
277 int (*map_sg)(struct device *dev, struct scatterlist *sg,
278 int nents, enum dma_data_direction direction);
279 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
280 int nents, enum dma_data_direction direction);
281 int (*dma_supported)(struct device *dev, u64 mask);
282 int (*dac_dma_supported)(struct device *dev, u64 mask);
283};
284
285#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-ppc64/io.h b/include/asm-powerpc/io.h
index 77fc07c3c6bd..48938d84d055 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-powerpc/io.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_IO_H 1#ifndef _ASM_POWERPC_IO_H
2#define _PPC64_IO_H 2#define _ASM_POWERPC_IO_H
3 3
4/* 4/*
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,7 +8,10 @@
8 * 2 of the License, or (at your option) any later version. 8 * 2 of the License, or (at your option) any later version.
9 */ 9 */
10 10
11#include <linux/config.h> 11#ifndef CONFIG_PPC64
12#include <asm-ppc/io.h>
13#else
14
12#include <linux/compiler.h> 15#include <linux/compiler.h>
13#include <asm/page.h> 16#include <asm/page.h>
14#include <asm/byteorder.h> 17#include <asm/byteorder.h>
@@ -455,4 +458,5 @@ extern int check_legacy_ioport(unsigned long base_port);
455 458
456#endif /* __KERNEL__ */ 459#endif /* __KERNEL__ */
457 460
458#endif /* _PPC64_IO_H */ 461#endif /* CONFIG_PPC64 */
462#endif /* _ASM_POWERPC_IO_H */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-powerpc/mmu.h
index 1a7e0afa2dc6..c1b4bbabbe97 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -1,3 +1,10 @@
1#ifndef _ASM_POWERPC_MMU_H_
2#define _ASM_POWERPC_MMU_H_
3
4#ifndef CONFIG_PPC64
5#include <asm-ppc/mmu.h>
6#else
7
1/* 8/*
2 * PowerPC memory management structures 9 * PowerPC memory management structures
3 * 10 *
@@ -10,10 +17,6 @@
10 * 2 of the License, or (at your option) any later version. 17 * 2 of the License, or (at your option) any later version.
11 */ 18 */
12 19
13#ifndef _PPC64_MMU_H_
14#define _PPC64_MMU_H_
15
16#include <linux/config.h>
17#include <asm/asm-compat.h> 20#include <asm/asm-compat.h>
18#include <asm/page.h> 21#include <asm/page.h>
19 22
@@ -392,4 +395,5 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
392 395
393#endif /* __ASSEMBLY */ 396#endif /* __ASSEMBLY */
394 397
395#endif /* _PPC64_MMU_H_ */ 398#endif /* CONFIG_PPC64 */
399#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-powerpc/mmu_context.h
index 4f512e9fa6b8..ea6798c7d5fc 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-powerpc/mmu_context.h
@@ -1,7 +1,10 @@
1#ifndef __PPC64_MMU_CONTEXT_H 1#ifndef __ASM_POWERPC_MMU_CONTEXT_H
2#define __PPC64_MMU_CONTEXT_H 2#define __ASM_POWERPC_MMU_CONTEXT_H
3
4#ifndef CONFIG_PPC64
5#include <asm-ppc/mmu_context.h>
6#else
3 7
4#include <linux/config.h>
5#include <linux/kernel.h> 8#include <linux/kernel.h>
6#include <linux/mm.h> 9#include <linux/mm.h>
7#include <asm/mmu.h> 10#include <asm/mmu.h>
@@ -82,4 +85,5 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
82 local_irq_restore(flags); 85 local_irq_restore(flags);
83} 86}
84 87
85#endif /* __PPC64_MMU_CONTEXT_H */ 88#endif /* CONFIG_PPC64 */
89#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-powerpc/mmzone.h
index 54958d6cae04..54958d6cae04 100644
--- a/include/asm-ppc64/mmzone.h
+++ b/include/asm-powerpc/mmzone.h
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index cf04327a597a..223ec7bd81da 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -1,8 +1,10 @@
1#ifdef __KERNEL__ 1#ifndef _ASM_POWERPC_PCI_BRIDGE_H
2#ifndef _ASM_PCI_BRIDGE_H 2#define _ASM_POWERPC_PCI_BRIDGE_H
3#define _ASM_PCI_BRIDGE_H 3
4#ifndef CONFIG_PPC64
5#include <asm-ppc/pci-bridge.h>
6#else
4 7
5#include <linux/config.h>
6#include <linux/pci.h> 8#include <linux/pci.h>
7#include <linux/list.h> 9#include <linux/list.h>
8 10
@@ -147,5 +149,5 @@ extern void pcibios_free_controller(struct pci_controller *phb);
147#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ 149#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
148#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */ 150#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
149 151
152#endif /* CONFIG_PPC64 */
150#endif 153#endif
151#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/pci.h b/include/asm-powerpc/pci.h
index fafdf885a3cc..d5934a076bd0 100644
--- a/include/asm-ppc64/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -1,5 +1,5 @@
1#ifndef __PPC64_PCI_H 1#ifndef __ASM_POWERPC_PCI_H
2#define __PPC64_PCI_H 2#define __ASM_POWERPC_PCI_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5/* 5/*
@@ -18,6 +18,7 @@
18#include <asm/scatterlist.h> 18#include <asm/scatterlist.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/prom.h> 20#include <asm/prom.h>
21#include <asm/pci-bridge.h>
21 22
22#include <asm-generic/pci-dma-compat.h> 23#include <asm-generic/pci-dma-compat.h>
23 24
@@ -26,11 +27,21 @@
26 27
27struct pci_dev; 28struct pci_dev;
28 29
29#ifdef CONFIG_PPC_ISERIES 30/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
31#define IOBASE_BRIDGE_NUMBER 0
32#define IOBASE_MEMORY 1
33#define IOBASE_IO 2
34#define IOBASE_ISA_IO 3
35#define IOBASE_ISA_MEM 4
36
37/*
38 * Set this to 1 if you want the kernel to re-assign all PCI
39 * bus numbers
40 */
41extern int pci_assign_all_buses;
42#define pcibios_assign_all_busses() (pci_assign_all_buses)
43
30#define pcibios_scan_all_fns(a, b) 0 44#define pcibios_scan_all_fns(a, b) 0
31#else
32extern int pcibios_scan_all_fns(struct pci_bus *bus, int devfn);
33#endif
34 45
35static inline void pcibios_set_master(struct pci_dev *dev) 46static inline void pcibios_set_master(struct pci_dev *dev)
36{ 47{
@@ -50,6 +61,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
50 return channel ? 15 : 14; 61 return channel ? 15 : 14;
51} 62}
52 63
64#ifdef CONFIG_PPC64
53#define HAVE_ARCH_PCI_MWI 1 65#define HAVE_ARCH_PCI_MWI 1
54static inline int pcibios_prep_mwi(struct pci_dev *dev) 66static inline int pcibios_prep_mwi(struct pci_dev *dev)
55{ 67{
@@ -64,12 +76,10 @@ static inline int pcibios_prep_mwi(struct pci_dev *dev)
64 return 0; 76 return 0;
65} 77}
66 78
67extern unsigned int pcibios_assign_all_busses(void);
68
69extern struct dma_mapping_ops pci_dma_ops; 79extern struct dma_mapping_ops pci_dma_ops;
70 80
71/* For DAC DMA, we currently don't support it by default, but 81/* For DAC DMA, we currently don't support it by default, but
72 * we let the platform override this 82 * we let 64-bit platforms override this.
73 */ 83 */
74static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) 84static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
75{ 85{
@@ -102,6 +112,35 @@ extern int pci_domain_nr(struct pci_bus *bus);
102/* Decide whether to display the domain number in /proc */ 112/* Decide whether to display the domain number in /proc */
103extern int pci_proc_domain(struct pci_bus *bus); 113extern int pci_proc_domain(struct pci_bus *bus);
104 114
115#else /* 32-bit */
116
117#ifdef CONFIG_PCI
118static inline void pci_dma_burst_advice(struct pci_dev *pdev,
119 enum pci_dma_burst_strategy *strat,
120 unsigned long *strategy_parameter)
121{
122 *strat = PCI_DMA_BURST_INFINITY;
123 *strategy_parameter = ~0UL;
124}
125#endif
126
127/*
128 * At present there are very few 32-bit PPC machines that can have
129 * memory above the 4GB point, and we don't support that.
130 */
131#define pci_dac_dma_supported(pci_dev, mask) (0)
132
133/* Return the index of the PCI controller for device PDEV. */
134#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
135
136/* Set the name of the bus as it appears in /proc/bus/pci */
137static inline int pci_proc_domain(struct pci_bus *bus)
138{
139 return 0;
140}
141
142#endif /* CONFIG_PPC64 */
143
105struct vm_area_struct; 144struct vm_area_struct;
106/* Map a range of PCI memory or I/O space for a device into user space */ 145/* Map a range of PCI memory or I/O space for a device into user space */
107int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, 146int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
@@ -110,6 +149,7 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
110/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ 149/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
111#define HAVE_PCI_MMAP 1 150#define HAVE_PCI_MMAP 1
112 151
152#ifdef CONFIG_PPC64
113/* pci_unmap_{single,page} is not a nop, thus... */ 153/* pci_unmap_{single,page} is not a nop, thus... */
114#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 154#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
115 dma_addr_t ADDR_NAME; 155 dma_addr_t ADDR_NAME;
@@ -124,22 +164,40 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
124#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 164#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
125 (((PTR)->LEN_NAME) = (VAL)) 165 (((PTR)->LEN_NAME) = (VAL))
126 166
127/* The PCI address space does equal the physical memory 167/* The PCI address space does not equal the physical memory address
128 * address space. The networking and block device layers use 168 * space (we have an IOMMU). The IDE and SCSI device layers use
129 * this boolean for bounce buffer decisions. 169 * this boolean for bounce buffer decisions.
130 */ 170 */
131#define PCI_DMA_BUS_IS_PHYS (0) 171#define PCI_DMA_BUS_IS_PHYS (0)
172
173#else /* 32-bit */
174
175/* The PCI address space does equal the physical memory
176 * address space (no IOMMU). The IDE and SCSI device layers use
177 * this boolean for bounce buffer decisions.
178 */
179#define PCI_DMA_BUS_IS_PHYS (1)
180
181/* pci_unmap_{page,single} is a nop so... */
182#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
183#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
184#define pci_unmap_addr(PTR, ADDR_NAME) (0)
185#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
186#define pci_unmap_len(PTR, LEN_NAME) (0)
187#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
188
189#endif /* CONFIG_PPC64 */
132 190
133extern void 191extern void pcibios_resource_to_bus(struct pci_dev *dev,
134pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 192 struct pci_bus_region *region,
135 struct resource *res); 193 struct resource *res);
136 194
137extern void 195extern void pcibios_bus_to_resource(struct pci_dev *dev,
138pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 196 struct resource *res,
139 struct pci_bus_region *region); 197 struct pci_bus_region *region);
140 198
141static inline struct resource * 199static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
142pcibios_select_root(struct pci_dev *pdev, struct resource *res) 200 struct resource *res)
143{ 201{
144 struct resource *root = NULL; 202 struct resource *root = NULL;
145 203
@@ -151,14 +209,12 @@ pcibios_select_root(struct pci_dev *pdev, struct resource *res)
151 return root; 209 return root;
152} 210}
153 211
154extern int 212extern int unmap_bus_range(struct pci_bus *bus);
155unmap_bus_range(struct pci_bus *bus);
156 213
157extern int 214extern int remap_bus_range(struct pci_bus *bus);
158remap_bus_range(struct pci_bus *bus);
159 215
160extern void 216extern void pcibios_fixup_device_resources(struct pci_dev *dev,
161pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus); 217 struct pci_bus *bus);
162 218
163extern struct pci_controller *init_phb_dynamic(struct device_node *dn); 219extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
164 220
@@ -180,14 +236,12 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
180 unsigned long size, 236 unsigned long size,
181 pgprot_t prot); 237 pgprot_t prot);
182 238
183#ifdef CONFIG_PPC_MULTIPLATFORM 239#if defined(CONFIG_PPC_MULTIPLATFORM) || defined(CONFIG_PPC32)
184#define HAVE_ARCH_PCI_RESOURCE_TO_USER 240#define HAVE_ARCH_PCI_RESOURCE_TO_USER
185extern void pci_resource_to_user(const struct pci_dev *dev, int bar, 241extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
186 const struct resource *rsrc, 242 const struct resource *rsrc,
187 u64 *start, u64 *end); 243 u64 *start, u64 *end);
188#endif /* CONFIG_PPC_MULTIPLATFORM */ 244#endif /* CONFIG_PPC_MULTIPLATFORM || CONFIG_PPC32 */
189
190 245
191#endif /* __KERNEL__ */ 246#endif /* __KERNEL__ */
192 247#endif /* __ASM_POWERPC_PCI_H */
193#endif /* __PPC64_PCI_H */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-powerpc/pgalloc.h
index dcf3622d1946..bfc2113b3630 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -1,5 +1,9 @@
1#ifndef _PPC64_PGALLOC_H 1#ifndef _ASM_POWERPC_PGALLOC_H
2#define _PPC64_PGALLOC_H 2#define _ASM_POWERPC_PGALLOC_H
3
4#ifndef CONFIG_PPC64
5#include <asm-ppc/pgalloc.h>
6#else
3 7
4#include <linux/mm.h> 8#include <linux/mm.h>
5#include <linux/slab.h> 9#include <linux/slab.h>
@@ -148,4 +152,5 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
148 152
149#define check_pgt_cache() do { } while (0) 153#define check_pgt_cache() do { } while (0)
150 154
151#endif /* _PPC64_PGALLOC_H */ 155#endif /* CONFIG_PPC64 */
156#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index e9590c06ad92..e9590c06ad92 100644
--- a/include/asm-ppc64/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 154f1840ece4..154f1840ece4 100644
--- a/include/asm-ppc64/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-powerpc/pgtable.h
index a9783ba7fe98..0303f57366c1 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -1,5 +1,9 @@
1#ifndef _PPC64_PGTABLE_H 1#ifndef _ASM_POWERPC_PGTABLE_H
2#define _PPC64_PGTABLE_H 2#define _ASM_POWERPC_PGTABLE_H
3
4#ifndef CONFIG_PPC64
5#include <asm-ppc/pgtable.h>
6#else
3 7
4/* 8/*
5 * This file contains the functions and defines necessary to modify and use 9 * This file contains the functions and defines necessary to modify and use
@@ -47,6 +51,13 @@ struct mm_struct;
47#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 51#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
48 52
49/* 53/*
54 * Define the address range of the imalloc VM area.
55 */
56#define PHBS_IO_BASE VMALLOC_END
57#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
58#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
59
60/*
50 * Common bits in a linux-style PTE. These match the bits in the 61 * Common bits in a linux-style PTE. These match the bits in the
51 * (hardware-defined) PowerPC PTE as closely as possible. Additional 62 * (hardware-defined) PowerPC PTE as closely as possible. Additional
52 * bits may be defined in pgtable-*.h 63 * bits may be defined in pgtable-*.h
@@ -69,7 +80,7 @@ struct mm_struct;
69 80
70#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 81#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
71 82
72/* __pgprot defined in asm-ppc64/page.h */ 83/* __pgprot defined in asm-powerpc/page.h */
73#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 84#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
74 85
75#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 86#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
@@ -509,4 +520,5 @@ void pgtable_cache_init(void);
509 520
510#endif /* __ASSEMBLY__ */ 521#endif /* __ASSEMBLY__ */
511 522
512#endif /* _PPC64_PGTABLE_H */ 523#endif /* CONFIG_PPC64 */
524#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 2e36e5a7f4f3..36cdc869e580 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -48,8 +48,6 @@ extern void pSeries_final_fixup(void);
48extern void pSeries_irq_bus_setup(struct pci_bus *bus); 48extern void pSeries_irq_bus_setup(struct pci_bus *bus);
49 49
50extern unsigned long pci_probe_only; 50extern unsigned long pci_probe_only;
51extern unsigned long pci_assign_all_buses;
52extern int pci_read_irq_line(struct pci_dev *pci_dev);
53 51
54/* ---- EEH internal-use-only related routines ---- */ 52/* ---- EEH internal-use-only related routines ---- */
55#ifdef CONFIG_EEH 53#ifdef CONFIG_EEH
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-powerpc/spinlock.h
index 7d84fb5e39f1..caa4b14e0e94 100644
--- a/include/asm-ppc64/spinlock.h
+++ b/include/asm-powerpc/spinlock.h
@@ -18,31 +18,41 @@
18 * 18 *
19 * (the type definitions are in asm/spinlock_types.h) 19 * (the type definitions are in asm/spinlock_types.h)
20 */ 20 */
21#include <linux/config.h> 21#ifdef CONFIG_PPC64
22#include <asm/paca.h> 22#include <asm/paca.h>
23#include <asm/hvcall.h> 23#include <asm/hvcall.h>
24#include <asm/iseries/hv_call.h> 24#include <asm/iseries/hv_call.h>
25#endif
26#include <asm/asm-compat.h>
27#include <asm/synch.h>
25 28
26#define __raw_spin_is_locked(x) ((x)->slock != 0) 29#define __raw_spin_is_locked(x) ((x)->slock != 0)
27 30
31#ifdef CONFIG_PPC64
32/* use 0x800000yy when locked, where yy == CPU number */
33#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
34#else
35#define LOCK_TOKEN 1
36#endif
37
28/* 38/*
29 * This returns the old value in the lock, so we succeeded 39 * This returns the old value in the lock, so we succeeded
30 * in getting the lock if the return value is 0. 40 * in getting the lock if the return value is 0.
31 */ 41 */
32static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) 42static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
33{ 43{
34 unsigned long tmp, tmp2; 44 unsigned long tmp, token;
35 45
46 token = LOCK_TOKEN;
36 __asm__ __volatile__( 47 __asm__ __volatile__(
37" lwz %1,%3(13) # __spin_trylock\n\ 48"1: lwarx %0,0,%2 # __spin_trylock\n\
381: lwarx %0,0,%2\n\
39 cmpwi 0,%0,0\n\ 49 cmpwi 0,%0,0\n\
40 bne- 2f\n\ 50 bne- 2f\n\
41 stwcx. %1,0,%2\n\ 51 stwcx. %1,0,%2\n\
42 bne- 1b\n\ 52 bne- 1b\n\
43 isync\n\ 53 isync\n\
442:" : "=&r" (tmp), "=&r" (tmp2) 542:" : "=&r" (tmp)
45 : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) 55 : "r" (token), "r" (&lock->slock)
46 : "cr0", "memory"); 56 : "cr0", "memory");
47 57
48 return tmp; 58 return tmp;
@@ -113,11 +123,17 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
113 123
114static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) 124static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
115{ 125{
116 __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); 126 __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock"
127 : : :"memory");
117 lock->slock = 0; 128 lock->slock = 0;
118} 129}
119 130
131#ifdef CONFIG_PPC64
120extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); 132extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
133#else
134#define __raw_spin_unlock_wait(lock) \
135 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
136#endif
121 137
122/* 138/*
123 * Read-write spinlocks, allowing multiple readers 139 * Read-write spinlocks, allowing multiple readers
@@ -133,6 +149,14 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
133#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 149#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
134#define __raw_write_can_lock(rw) (!(rw)->lock) 150#define __raw_write_can_lock(rw) (!(rw)->lock)
135 151
152#ifdef CONFIG_PPC64
153#define __DO_SIGN_EXTEND "extsw %0,%0\n"
154#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
155#else
156#define __DO_SIGN_EXTEND
157#define WRLOCK_TOKEN (-1)
158#endif
159
136/* 160/*
137 * This returns the old value in the lock + 1, 161 * This returns the old value in the lock + 1,
138 * so we got a read lock if the return value is > 0. 162 * so we got a read lock if the return value is > 0.
@@ -142,11 +166,12 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
142 long tmp; 166 long tmp;
143 167
144 __asm__ __volatile__( 168 __asm__ __volatile__(
145"1: lwarx %0,0,%1 # read_trylock\n\ 169"1: lwarx %0,0,%1 # read_trylock\n"
146 extsw %0,%0\n\ 170 __DO_SIGN_EXTEND
147 addic. %0,%0,1\n\ 171" addic. %0,%0,1\n\
148 ble- 2f\n\ 172 ble- 2f\n"
149 stwcx. %0,0,%1\n\ 173 PPC405_ERR77(0,%1)
174" stwcx. %0,0,%1\n\
150 bne- 1b\n\ 175 bne- 1b\n\
151 isync\n\ 176 isync\n\
1522:" : "=&r" (tmp) 1772:" : "=&r" (tmp)
@@ -162,18 +187,19 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
162 */ 187 */
163static __inline__ long __write_trylock(raw_rwlock_t *rw) 188static __inline__ long __write_trylock(raw_rwlock_t *rw)
164{ 189{
165 long tmp, tmp2; 190 long tmp, token;
166 191
192 token = WRLOCK_TOKEN;
167 __asm__ __volatile__( 193 __asm__ __volatile__(
168" lwz %1,%3(13) # write_trylock\n\ 194"1: lwarx %0,0,%2 # write_trylock\n\
1691: lwarx %0,0,%2\n\
170 cmpwi 0,%0,0\n\ 195 cmpwi 0,%0,0\n\
171 bne- 2f\n\ 196 bne- 2f\n"
172 stwcx. %1,0,%2\n\ 197 PPC405_ERR77(0,%1)
198" stwcx. %1,0,%2\n\
173 bne- 1b\n\ 199 bne- 1b\n\
174 isync\n\ 200 isync\n\
1752:" : "=&r" (tmp), "=&r" (tmp2) 2012:" : "=&r" (tmp)
176 : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token)) 202 : "r" (token), "r" (&rw->lock)
177 : "cr0", "memory"); 203 : "cr0", "memory");
178 204
179 return tmp; 205 return tmp;
@@ -224,8 +250,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
224 __asm__ __volatile__( 250 __asm__ __volatile__(
225 "eieio # read_unlock\n\ 251 "eieio # read_unlock\n\
2261: lwarx %0,0,%1\n\ 2521: lwarx %0,0,%1\n\
227 addic %0,%0,-1\n\ 253 addic %0,%0,-1\n"
228 stwcx. %0,0,%1\n\ 254 PPC405_ERR77(0,%1)
255" stwcx. %0,0,%1\n\
229 bne- 1b" 256 bne- 1b"
230 : "=&r"(tmp) 257 : "=&r"(tmp)
231 : "r"(&rw->lock) 258 : "r"(&rw->lock)
@@ -234,7 +261,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
234 261
235static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 262static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
236{ 263{
237 __asm__ __volatile__("lwsync # write_unlock": : :"memory"); 264 __asm__ __volatile__(SYNC_ON_SMP" # write_unlock"
265 : : :"memory");
238 rw->lock = 0; 266 rw->lock = 0;
239} 267}
240 268
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 2bfdf9c98459..84ac6e258eef 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -545,6 +545,23 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
545#include <asm/mpc8260_pci9.h> 545#include <asm/mpc8260_pci9.h>
546#endif 546#endif
547 547
548#ifdef CONFIG_NOT_COHERENT_CACHE
549
550#define dma_cache_inv(_start,_size) \
551 invalidate_dcache_range(_start, (_start + _size))
552#define dma_cache_wback(_start,_size) \
553 clean_dcache_range(_start, (_start + _size))
554#define dma_cache_wback_inv(_start,_size) \
555 flush_dcache_range(_start, (_start + _size))
556
557#else
558
559#define dma_cache_inv(_start,_size) do { } while (0)
560#define dma_cache_wback(_start,_size) do { } while (0)
561#define dma_cache_wback_inv(_start,_size) do { } while (0)
562
563#endif
564
548/* 565/*
549 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 566 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
550 * access 567 * access
diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h
deleted file mode 100644
index fb68fa23bea8..000000000000
--- a/include/asm-ppc64/dma-mapping.h
+++ /dev/null
@@ -1,136 +0,0 @@
1/* Copyright (C) 2004 IBM
2 *
3 * Implements the generic device dma API for ppc64. Handles
4 * the pci and vio busses
5 */
6
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
9
10#include <linux/types.h>
11#include <linux/cache.h>
12/* need struct page definitions */
13#include <linux/mm.h>
14#include <asm/scatterlist.h>
15#include <asm/bug.h>
16
17#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
18
19extern int dma_supported(struct device *dev, u64 mask);
20extern int dma_set_mask(struct device *dev, u64 dma_mask);
21extern void *dma_alloc_coherent(struct device *dev, size_t size,
22 dma_addr_t *dma_handle, gfp_t flag);
23extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
24 dma_addr_t dma_handle);
25extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
26 size_t size, enum dma_data_direction direction);
27extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
28 size_t size, enum dma_data_direction direction);
29extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
30 unsigned long offset, size_t size,
31 enum dma_data_direction direction);
32extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
33 size_t size, enum dma_data_direction direction);
34extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
35 enum dma_data_direction direction);
36extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
37 int nhwentries, enum dma_data_direction direction);
38
39static inline void
40dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
41 enum dma_data_direction direction)
42{
43 BUG_ON(direction == DMA_NONE);
44 /* nothing to do */
45}
46
47static inline void
48dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
49 enum dma_data_direction direction)
50{
51 BUG_ON(direction == DMA_NONE);
52 /* nothing to do */
53}
54
55static inline void
56dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
57 enum dma_data_direction direction)
58{
59 BUG_ON(direction == DMA_NONE);
60 /* nothing to do */
61}
62
63static inline void
64dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
65 enum dma_data_direction direction)
66{
67 BUG_ON(direction == DMA_NONE);
68 /* nothing to do */
69}
70
71static inline int dma_mapping_error(dma_addr_t dma_addr)
72{
73 return (dma_addr == DMA_ERROR_CODE);
74}
75
76/* Now for the API extensions over the pci_ one */
77
78#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
79#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
80#define dma_is_consistent(d) (1)
81
82static inline int
83dma_get_cache_alignment(void)
84{
85 /* no easy way to get cache size on all processors, so return
86 * the maximum possible, to be safe */
87 return (1 << L1_CACHE_SHIFT_MAX);
88}
89
90static inline void
91dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
92 unsigned long offset, size_t size,
93 enum dma_data_direction direction)
94{
95 BUG_ON(direction == DMA_NONE);
96 /* nothing to do */
97}
98
99static inline void
100dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
101 unsigned long offset, size_t size,
102 enum dma_data_direction direction)
103{
104 BUG_ON(direction == DMA_NONE);
105 /* nothing to do */
106}
107
108static inline void
109dma_cache_sync(void *vaddr, size_t size,
110 enum dma_data_direction direction)
111{
112 BUG_ON(direction == DMA_NONE);
113 /* nothing to do */
114}
115
116/*
117 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
118 */
119struct dma_mapping_ops {
120 void * (*alloc_coherent)(struct device *dev, size_t size,
121 dma_addr_t *dma_handle, gfp_t flag);
122 void (*free_coherent)(struct device *dev, size_t size,
123 void *vaddr, dma_addr_t dma_handle);
124 dma_addr_t (*map_single)(struct device *dev, void *ptr,
125 size_t size, enum dma_data_direction direction);
126 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
127 size_t size, enum dma_data_direction direction);
128 int (*map_sg)(struct device *dev, struct scatterlist *sg,
129 int nents, enum dma_data_direction direction);
130 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
131 int nents, enum dma_data_direction direction);
132 int (*dma_supported)(struct device *dev, u64 mask);
133 int (*dac_dma_supported)(struct device *dev, u64 mask);
134};
135
136#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
deleted file mode 100644
index 42adf7033a81..000000000000
--- a/include/asm-ppc64/imalloc.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef _PPC64_IMALLOC_H
2#define _PPC64_IMALLOC_H
3
4/*
5 * Define the address range of the imalloc VM area.
6 */
7#define PHBS_IO_BASE VMALLOC_END
8#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
9#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
10
11
12/* imalloc region types */
13#define IM_REGION_UNUSED 0x1
14#define IM_REGION_SUBSET 0x2
15#define IM_REGION_EXISTS 0x4
16#define IM_REGION_OVERLAP 0x8
17#define IM_REGION_SUPERSET 0x10
18
19extern struct vm_struct * im_get_free_area(unsigned long size);
20extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
21 int region_type);
22extern void im_free(void *addr);
23
24extern unsigned long ioremap_bot;
25
26#endif /* _PPC64_IMALLOC_H */
diff --git a/include/asm-ppc64/ptrace-common.h b/include/asm-ppc64/ptrace-common.h
deleted file mode 100644
index b1babb729673..000000000000
--- a/include/asm-ppc64/ptrace-common.h
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/ptrace-common.h
3 *
4 * Copyright (c) 2002 Stephen Rothwell, IBM Coproration
5 * Extracted from ptrace.c and ptrace32.c
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file README.legal in the main directory of
9 * this archive for more details.
10 */
11
12#ifndef _PPC64_PTRACE_COMMON_H
13#define _PPC64_PTRACE_COMMON_H
14
15#include <linux/config.h>
16#include <asm/system.h>
17
18/*
19 * Set of msr bits that gdb can change on behalf of a process.
20 */
21#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
22
23/*
24 * Get contents of register REGNO in task TASK.
25 */
26static inline unsigned long get_reg(struct task_struct *task, int regno)
27{
28 unsigned long tmp = 0;
29
30 /*
31 * Put the correct FP bits in, they might be wrong as a result
32 * of our lazy FP restore.
33 */
34 if (regno == PT_MSR) {
35 tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
36 tmp |= task->thread.fpexc_mode;
37 } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
38 tmp = ((unsigned long *)task->thread.regs)[regno];
39 }
40
41 return tmp;
42}
43
44/*
45 * Write contents of register REGNO in task TASK.
46 */
47static inline int put_reg(struct task_struct *task, int regno,
48 unsigned long data)
49{
50 if (regno < PT_SOFTE) {
51 if (regno == PT_MSR)
52 data = (data & MSR_DEBUGCHANGE)
53 | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
54 ((unsigned long *)task->thread.regs)[regno] = data;
55 return 0;
56 }
57 return -EIO;
58}
59
60static inline void set_single_step(struct task_struct *task)
61{
62 struct pt_regs *regs = task->thread.regs;
63 if (regs != NULL)
64 regs->msr |= MSR_SE;
65 set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
66}
67
68static inline void clear_single_step(struct task_struct *task)
69{
70 struct pt_regs *regs = task->thread.regs;
71 if (regs != NULL)
72 regs->msr &= ~MSR_SE;
73 clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
74}
75
76#ifdef CONFIG_ALTIVEC
77/*
78 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
79 * The transfer totals 34 quadword. Quadwords 0-31 contain the
80 * corresponding vector registers. Quadword 32 contains the vscr as the
81 * last word (offset 12) within that quadword. Quadword 33 contains the
82 * vrsave as the first word (offset 0) within the quadword.
83 *
84 * This definition of the VMX state is compatible with the current PPC32
85 * ptrace interface. This allows signal handling and ptrace to use the
86 * same structures. This also simplifies the implementation of a bi-arch
87 * (combined (32- and 64-bit) gdb.
88 */
89
90/*
91 * Get contents of AltiVec register state in task TASK
92 */
93static inline int get_vrregs(unsigned long __user *data,
94 struct task_struct *task)
95{
96 unsigned long regsize;
97
98 /* copy AltiVec registers VR[0] .. VR[31] */
99 regsize = 32 * sizeof(vector128);
100 if (copy_to_user(data, task->thread.vr, regsize))
101 return -EFAULT;
102 data += (regsize / sizeof(unsigned long));
103
104 /* copy VSCR */
105 regsize = 1 * sizeof(vector128);
106 if (copy_to_user(data, &task->thread.vscr, regsize))
107 return -EFAULT;
108 data += (regsize / sizeof(unsigned long));
109
110 /* copy VRSAVE */
111 if (put_user(task->thread.vrsave, (u32 __user *)data))
112 return -EFAULT;
113
114 return 0;
115}
116
117/*
118 * Write contents of AltiVec register state into task TASK.
119 */
120static inline int set_vrregs(struct task_struct *task,
121 unsigned long __user *data)
122{
123 unsigned long regsize;
124
125 /* copy AltiVec registers VR[0] .. VR[31] */
126 regsize = 32 * sizeof(vector128);
127 if (copy_from_user(task->thread.vr, data, regsize))
128 return -EFAULT;
129 data += (regsize / sizeof(unsigned long));
130
131 /* copy VSCR */
132 regsize = 1 * sizeof(vector128);
133 if (copy_from_user(&task->thread.vscr, data, regsize))
134 return -EFAULT;
135 data += (regsize / sizeof(unsigned long));
136
137 /* copy VRSAVE */
138 if (get_user(task->thread.vrsave, (u32 __user *)data))
139 return -EFAULT;
140
141 return 0;
142}
143#endif
144
145static inline int ptrace_set_debugreg(struct task_struct *task,
146 unsigned long addr, unsigned long data)
147{
148 /* We only support one DABR and no IABRS at the moment */
149 if (addr > 0)
150 return -EINVAL;
151
152 /* The bottom 3 bits are flags */
153 if ((data & ~0x7UL) >= TASK_SIZE)
154 return -EIO;
155
156 /* Ensure translation is on */
157 if (data && !(data & DABR_TRANSLATION))
158 return -EIO;
159
160 task->thread.dabr = data;
161 return 0;
162}
163
164#endif /* _PPC64_PTRACE_COMMON_H */
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 24dc39651bc4..10f8b51cec8b 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -56,7 +56,7 @@
56 ".section __ex_table,\"a\"\n" \ 56 ".section __ex_table,\"a\"\n" \
57 " .align 8\n" \ 57 " .align 8\n" \
58 " .quad 1b,3b\n" \ 58 " .quad 1b,3b\n" \
59 ".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\ 59 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
60 :"c"(msr), "i"(-EIO), "0"(0)); \ 60 :"c"(msr), "i"(-EIO), "0"(0)); \
61 ret__; }) 61 ret__; })
62 62
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 424d5e622b43..6e27f42e3a57 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -10,8 +10,8 @@
10typedef struct _cciss_pci_info_struct 10typedef struct _cciss_pci_info_struct
11{ 11{
12 unsigned char bus; 12 unsigned char bus;
13 unsigned short domain;
14 unsigned char dev_fn; 13 unsigned char dev_fn;
14 unsigned short domain;
15 __u32 board_id; 15 __u32 board_id;
16} cciss_pci_info_struct; 16} cciss_pci_info_struct;
17 17
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index b5d660089de4..2b54eac738ea 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -80,10 +80,12 @@
80/* 80/*
81 * Define standard taskfile in/out register 81 * Define standard taskfile in/out register
82 */ 82 */
83#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE
84#define IDE_TASKFILE_STD_IN_FLAGS 0xFE 83#define IDE_TASKFILE_STD_IN_FLAGS 0xFE
85#define IDE_HOB_STD_OUT_FLAGS 0x3C
86#define IDE_HOB_STD_IN_FLAGS 0x3C 84#define IDE_HOB_STD_IN_FLAGS 0x3C
85#ifndef __KERNEL__
86#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE
87#define IDE_HOB_STD_OUT_FLAGS 0x3C
88#endif
87 89
88typedef unsigned char task_ioreg_t; 90typedef unsigned char task_ioreg_t;
89typedef unsigned long sata_ioreg_t; 91typedef unsigned long sata_ioreg_t;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ac8b25fa6506..a39c3c59789d 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1089,9 +1089,11 @@ enum {
1089 1089
1090/* 1090/*
1091 * Subdrivers support. 1091 * Subdrivers support.
1092 *
1093 * The gendriver.owner field should be set to the module owner of this driver.
1094 * The gendriver.name field should be set to the name of this driver
1092 */ 1095 */
1093typedef struct ide_driver_s { 1096typedef struct ide_driver_s {
1094 struct module *owner;
1095 const char *version; 1097 const char *version;
1096 u8 media; 1098 u8 media;
1097 unsigned supports_dsc_overlap : 1; 1099 unsigned supports_dsc_overlap : 1;
@@ -1199,37 +1201,11 @@ extern u64 ide_get_error_location(ide_drive_t *, char *);
1199 */ 1201 */
1200typedef enum { 1202typedef enum {
1201 ide_wait, /* insert rq at end of list, and wait for it */ 1203 ide_wait, /* insert rq at end of list, and wait for it */
1202 ide_next, /* insert rq immediately after current request */
1203 ide_preempt, /* insert rq in front of current request */ 1204 ide_preempt, /* insert rq in front of current request */
1204 ide_head_wait, /* insert rq in front of current request and wait for it */ 1205 ide_head_wait, /* insert rq in front of current request and wait for it */
1205 ide_end /* insert rq at end of list, but don't wait for it */ 1206 ide_end /* insert rq at end of list, but don't wait for it */
1206} ide_action_t; 1207} ide_action_t;
1207 1208
1208/*
1209 * This function issues a special IDE device request
1210 * onto the request queue.
1211 *
1212 * If action is ide_wait, then the rq is queued at the end of the
1213 * request queue, and the function sleeps until it has been processed.
1214 * This is for use when invoked from an ioctl handler.
1215 *
1216 * If action is ide_preempt, then the rq is queued at the head of
1217 * the request queue, displacing the currently-being-processed
1218 * request and this function returns immediately without waiting
1219 * for the new rq to be completed. This is VERY DANGEROUS, and is
1220 * intended for careful use by the ATAPI tape/cdrom driver code.
1221 *
1222 * If action is ide_next, then the rq is queued immediately after
1223 * the currently-being-processed-request (if any), and the function
1224 * returns without waiting for the new rq to be completed. As above,
1225 * This is VERY DANGEROUS, and is intended for careful use by the
1226 * ATAPI tape/cdrom driver code.
1227 *
1228 * If action is ide_end, then the rq is queued at the end of the
1229 * request queue, and the function returns immediately without waiting
1230 * for the new rq to be completed. This is again intended for careful
1231 * use by the ATAPI tape/cdrom driver code.
1232 */
1233extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t); 1209extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t);
1234 1210
1235/* 1211/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1013a42d10b1..0986d19be0b7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr);
940 940
941/* Do stack extension */ 941/* Do stack extension */
942extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 942extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
943#ifdef CONFIG_IA64
943extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 944extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
945#endif
944 946
945/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 947/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
946extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 948extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
diff --git a/include/linux/netfilter_ipv4/ipt_sctp.h b/include/linux/netfilter_ipv4/ipt_sctp.h
index e93a9ec99fc2..80b3dbacd193 100644
--- a/include/linux/netfilter_ipv4/ipt_sctp.h
+++ b/include/linux/netfilter_ipv4/ipt_sctp.h
@@ -7,8 +7,6 @@
7 7
8#define IPT_SCTP_VALID_FLAGS 0x07 8#define IPT_SCTP_VALID_FLAGS 0x07
9 9
10#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
11
12 10
13struct ipt_sctp_flag_info { 11struct ipt_sctp_flag_info {
14 u_int8_t chunktype; 12 u_int8_t chunktype;
@@ -59,21 +57,21 @@ struct ipt_sctp_info {
59#define SCTP_CHUNKMAP_RESET(chunkmap) \ 57#define SCTP_CHUNKMAP_RESET(chunkmap) \
60 do { \ 58 do { \
61 int i; \ 59 int i; \
62 for (i = 0; i < ELEMCOUNT(chunkmap); i++) \ 60 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
63 chunkmap[i] = 0; \ 61 chunkmap[i] = 0; \
64 } while (0) 62 } while (0)
65 63
66#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \ 64#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
67 do { \ 65 do { \
68 int i; \ 66 int i; \
69 for (i = 0; i < ELEMCOUNT(chunkmap); i++) \ 67 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
70 chunkmap[i] = ~0; \ 68 chunkmap[i] = ~0; \
71 } while (0) 69 } while (0)
72 70
73#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \ 71#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
74 do { \ 72 do { \
75 int i; \ 73 int i; \
76 for (i = 0; i < ELEMCOUNT(chunkmap); i++) \ 74 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
77 destmap[i] = srcmap[i]; \ 75 destmap[i] = srcmap[i]; \
78 } while (0) 76 } while (0)
79 77
@@ -81,7 +79,7 @@ struct ipt_sctp_info {
81({ \ 79({ \
82 int i; \ 80 int i; \
83 int flag = 1; \ 81 int flag = 1; \
84 for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \ 82 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
85 if (chunkmap[i]) { \ 83 if (chunkmap[i]) { \
86 flag = 0; \ 84 flag = 0; \
87 break; \ 85 break; \
@@ -94,7 +92,7 @@ struct ipt_sctp_info {
94({ \ 92({ \
95 int i; \ 93 int i; \
96 int flag = 1; \ 94 int flag = 1; \
97 for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \ 95 for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
98 if (chunkmap[i] != ~0) { \ 96 if (chunkmap[i] != ~0) { \
99 flag = 0; \ 97 flag = 0; \
100 break; \ 98 break; \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7b387faedb4d..1e737e269db9 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -620,6 +620,7 @@
620#define PCI_DEVICE_ID_SI_961 0x0961 620#define PCI_DEVICE_ID_SI_961 0x0961
621#define PCI_DEVICE_ID_SI_962 0x0962 621#define PCI_DEVICE_ID_SI_962 0x0962
622#define PCI_DEVICE_ID_SI_963 0x0963 622#define PCI_DEVICE_ID_SI_963 0x0963
623#define PCI_DEVICE_ID_SI_965 0x0965
623#define PCI_DEVICE_ID_SI_5511 0x5511 624#define PCI_DEVICE_ID_SI_5511 0x5511
624#define PCI_DEVICE_ID_SI_5513 0x5513 625#define PCI_DEVICE_ID_SI_5513 0x5513
625#define PCI_DEVICE_ID_SI_5518 0x5518 626#define PCI_DEVICE_ID_SI_5518 0x5518
@@ -1198,6 +1199,7 @@
1198#define PCI_DEVICE_ID_VIA_3269_0 0x0269 1199#define PCI_DEVICE_ID_VIA_3269_0 0x0269
1199#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282 1200#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
1200#define PCI_DEVICE_ID_VIA_8363_0 0x0305 1201#define PCI_DEVICE_ID_VIA_8363_0 0x0305
1202#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
1201#define PCI_DEVICE_ID_VIA_8371_0 0x0391 1203#define PCI_DEVICE_ID_VIA_8371_0 0x0391
1202#define PCI_DEVICE_ID_VIA_8501_0 0x0501 1204#define PCI_DEVICE_ID_VIA_8501_0 0x0501
1203#define PCI_DEVICE_ID_VIA_82C561 0x0561 1205#define PCI_DEVICE_ID_VIA_82C561 0x0561
@@ -1234,6 +1236,7 @@
1234#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148 1236#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148
1235#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 1237#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149
1236#define PCI_DEVICE_ID_VIA_XN266 0x3156 1238#define PCI_DEVICE_ID_VIA_XN266 0x3156
1239#define PCI_DEVICE_ID_VIA_6410 0x3164
1237#define PCI_DEVICE_ID_VIA_8754C_0 0x3168 1240#define PCI_DEVICE_ID_VIA_8754C_0 0x3168
1238#define PCI_DEVICE_ID_VIA_8235 0x3177 1241#define PCI_DEVICE_ID_VIA_8235 0x3177
1239#define PCI_DEVICE_ID_VIA_8385_0 0x3188 1242#define PCI_DEVICE_ID_VIA_8385_0 0x3188
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0a8ea8b35816..8c5d6001a923 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -206,6 +206,7 @@ enum {
206 * @nfct: Associated connection, if any 206 * @nfct: Associated connection, if any
207 * @ipvs_property: skbuff is owned by ipvs 207 * @ipvs_property: skbuff is owned by ipvs
208 * @nfctinfo: Relationship of this skb to the connection 208 * @nfctinfo: Relationship of this skb to the connection
209 * @nfct_reasm: netfilter conntrack re-assembly pointer
209 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 210 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
210 * @tc_index: Traffic control index 211 * @tc_index: Traffic control index
211 * @tc_verd: traffic control verdict 212 * @tc_verd: traffic control verdict
@@ -264,16 +265,14 @@ struct sk_buff {
264 nohdr:1, 265 nohdr:1,
265 nfctinfo:3; 266 nfctinfo:3;
266 __u8 pkt_type:3, 267 __u8 pkt_type:3,
267 fclone:2; 268 fclone:2,
269 ipvs_property:1;
268 __be16 protocol; 270 __be16 protocol;
269 271
270 void (*destructor)(struct sk_buff *skb); 272 void (*destructor)(struct sk_buff *skb);
271#ifdef CONFIG_NETFILTER 273#ifdef CONFIG_NETFILTER
272 __u32 nfmark; 274 __u32 nfmark;
273 struct nf_conntrack *nfct; 275 struct nf_conntrack *nfct;
274#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
275 __u8 ipvs_property:1;
276#endif
277#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 276#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
278 struct sk_buff *nfct_reasm; 277 struct sk_buff *nfct_reasm;
279#endif 278#endif
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 84876077027f..0ff7ca68e5c5 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -34,8 +34,7 @@
34#define UINPUT_BUFFER_SIZE 16 34#define UINPUT_BUFFER_SIZE 16
35#define UINPUT_NUM_REQUESTS 16 35#define UINPUT_NUM_REQUESTS 16
36 36
37/* state flags => bit index for {set|clear|test}_bit ops */ 37enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
38#define UIST_CREATED 0
39 38
40struct uinput_request { 39struct uinput_request {
41 int id; 40 int id;
@@ -52,11 +51,12 @@ struct uinput_request {
52 51
53struct uinput_device { 52struct uinput_device {
54 struct input_dev *dev; 53 struct input_dev *dev;
55 unsigned long state; 54 struct semaphore sem;
55 enum uinput_state state;
56 wait_queue_head_t waitq; 56 wait_queue_head_t waitq;
57 unsigned char ready, 57 unsigned char ready;
58 head, 58 unsigned char head;
59 tail; 59 unsigned char tail;
60 struct input_event buff[UINPUT_BUFFER_SIZE]; 60 struct input_event buff[UINPUT_BUFFER_SIZE];
61 61
62 struct uinput_request *requests[UINPUT_NUM_REQUESTS]; 62 struct uinput_request *requests[UINPUT_NUM_REQUESTS];
@@ -91,6 +91,7 @@ struct uinput_ff_erase {
91#define UI_SET_SNDBIT _IOW(UINPUT_IOCTL_BASE, 106, int) 91#define UI_SET_SNDBIT _IOW(UINPUT_IOCTL_BASE, 106, int)
92#define UI_SET_FFBIT _IOW(UINPUT_IOCTL_BASE, 107, int) 92#define UI_SET_FFBIT _IOW(UINPUT_IOCTL_BASE, 107, int)
93#define UI_SET_PHYS _IOW(UINPUT_IOCTL_BASE, 108, char*) 93#define UI_SET_PHYS _IOW(UINPUT_IOCTL_BASE, 108, char*)
94#define UI_SET_SWBIT _IOW(UINPUT_IOCTL_BASE, 109, int)
94 95
95#define UI_BEGIN_FF_UPLOAD _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload) 96#define UI_BEGIN_FF_UPLOAD _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload)
96#define UI_END_FF_UPLOAD _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload) 97#define UI_END_FF_UPLOAD _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6addb4d464d6..0a2ad51cff82 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -237,6 +237,8 @@ extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_t
237 int newtype, 237 int newtype,
238 struct ipv6_opt_hdr __user *newopt, 238 struct ipv6_opt_hdr __user *newopt,
239 int newoptlen); 239 int newoptlen);
240struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
241 struct ipv6_txoptions *opt);
240 242
241extern int ip6_frag_nqueues; 243extern int ip6_frag_nqueues;
242extern atomic_t ip6_frag_mem; 244extern atomic_t ip6_frag_mem;