aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-03-21 14:05:45 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-03-21 14:05:45 -0500
commitd04cdb64212eb5ae6a98026a97dda626e40e8e9a (patch)
treeb6a7dbb21ccfceb915844e9a330b3d3dfcaf3c5b /include
parent2f8600dff2b140096a7df781884e918a16aa90e0 (diff)
parentec1248e70edc5cf7b485efcc7b41e44e10f422e5 (diff)
Merge ../linux-2.6
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/irq.h2
-rw-r--r--include/asm-arm/system.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h14
-rw-r--r--include/asm-mips/byteorder.h18
-rw-r--r--include/asm-mips/compat.h8
-rw-r--r--include/asm-mips/cpu-features.h3
-rw-r--r--include/asm-mips/cpu-info.h1
-rw-r--r--include/asm-mips/hazards.h180
-rw-r--r--include/asm-mips/io.h87
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h2
-rw-r--r--include/asm-mips/mach-generic/mangle-port.h36
-rw-r--r--include/asm-mips/mach-ip27/mangle-port.h9
-rw-r--r--include/asm-mips/mach-ip32/mangle-port.h9
-rw-r--r--include/asm-mips/mach-mips/cpu-feature-overrides.h4
-rw-r--r--include/asm-mips/mmu_context.h7
-rw-r--r--include/asm-mips/pgtable-32.h2
-rw-r--r--include/asm-mips/r4kcache.h4
-rw-r--r--include/asm-mips/sibyte/sb1250.h2
-rw-r--r--include/asm-mips/sibyte/sb1250_scd.h5
-rw-r--r--include/asm-mips/signal.h20
-rw-r--r--include/asm-mips/sn/klconfig.h2
-rw-r--r--include/asm-mips/sn/mapped_kernel.h4
-rw-r--r--include/asm-mips/sn/sn0/hubio.h12
-rw-r--r--include/asm-mips/stackframe.h20
-rw-r--r--include/asm-mips/system.h8
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-powerpc/ppc-pci.h1
-rw-r--r--include/asm-sparc/idprom.h26
-rw-r--r--include/asm-sparc/oplib.h2
-rw-r--r--include/asm-sparc/socket.h2
-rw-r--r--include/asm-sparc/uaccess.h47
-rw-r--r--include/asm-sparc64/a.out.h6
-rw-r--r--include/asm-sparc64/asi.h18
-rw-r--r--include/asm-sparc64/cpudata.h203
-rw-r--r--include/asm-sparc64/elf.h22
-rw-r--r--include/asm-sparc64/head.h15
-rw-r--r--include/asm-sparc64/hypervisor.h2128
-rw-r--r--include/asm-sparc64/idprom.h12
-rw-r--r--include/asm-sparc64/intr_queue.h15
-rw-r--r--include/asm-sparc64/irq.h4
-rw-r--r--include/asm-sparc64/mmu.h36
-rw-r--r--include/asm-sparc64/mmu_context.h162
-rw-r--r--include/asm-sparc64/numnodes.h6
-rw-r--r--include/asm-sparc64/oplib.h43
-rw-r--r--include/asm-sparc64/page.h13
-rw-r--r--include/asm-sparc64/pbm.h3
-rw-r--r--include/asm-sparc64/pci.h56
-rw-r--r--include/asm-sparc64/pgalloc.h166
-rw-r--r--include/asm-sparc64/pgtable.h704
-rw-r--r--include/asm-sparc64/pil.h4
-rw-r--r--include/asm-sparc64/processor.h23
-rw-r--r--include/asm-sparc64/pstate.h9
-rw-r--r--include/asm-sparc64/scratchpad.h14
-rw-r--r--include/asm-sparc64/smp.h30
-rw-r--r--include/asm-sparc64/sparsemem.h12
-rw-r--r--include/asm-sparc64/spitfire.h1
-rw-r--r--include/asm-sparc64/system.h7
-rw-r--r--include/asm-sparc64/thread_info.h9
-rw-r--r--include/asm-sparc64/timex.h6
-rw-r--r--include/asm-sparc64/tlbflush.h25
-rw-r--r--include/asm-sparc64/tsb.h281
-rw-r--r--include/asm-sparc64/ttable.h272
-rw-r--r--include/asm-sparc64/uaccess.h46
-rw-r--r--include/asm-sparc64/vdev.h16
-rw-r--r--include/asm-sparc64/xor.h34
-rw-r--r--include/linux/amba/clcd.h12
-rw-r--r--include/linux/arcdevice.h9
-rw-r--r--include/linux/ata.h22
-rw-r--r--include/linux/blkdev.h10
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/dccp.h132
-rw-r--r--include/linux/debugfs.h15
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dn.h44
-rw-r--r--include/linux/dvb/audio.h13
-rw-r--r--include/linux/dvb/video.h13
-rw-r--r--include/linux/elevator.h10
-rw-r--r--include/linux/fsl_devices.h27
-rw-r--r--include/linux/icmpv6.h11
-rw-r--r--include/linux/if.h29
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/in.h1
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/ipv6.h14
-rw-r--r--include/linux/ipv6_route.h10
-rw-r--r--include/linux/irda.h1
-rw-r--r--include/linux/kobj_map.h4
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/libata.h180
-rw-r--r--include/linux/list.h24
-rw-r--r--include/linux/module.h10
-rw-r--r--include/linux/mv643xx.h27
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/netdevice.h41
-rw-r--r--include/linux/netfilter.h9
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/nfnetlink_log.h6
-rw-r--r--include/linux/netfilter/x_tables.h37
-rw-r--r--include/linux/netfilter/xt_policy.h58
-rw-r--r--include/linux/netfilter_bridge.h27
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_h323.h30
-rw-r--r--include/linux/netfilter_ipv4/ip_nat.h2
-rw-r--r--include/linux/netfilter_ipv4/ipt_policy.h69
-rw-r--r--include/linux/netfilter_ipv6/ip6t_policy.h69
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/pci_ids.h7
-rw-r--r--include/linux/rtnetlink.h23
-rw-r--r--include/linux/security.h25
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/skbuff.h47
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sysctl.h27
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb_gadget.h7
-rw-r--r--include/linux/videodev2.h84
-rw-r--r--include/linux/xfrm.h30
-rw-r--r--include/media/ir-common.h40
-rw-r--r--include/media/saa7146.h21
-rw-r--r--include/media/tuner-types.h3
-rw-r--r--include/media/tuner.h6
-rw-r--r--include/media/v4l2-common.h62
-rw-r--r--include/media/video-buf-dvb.h2
-rw-r--r--include/media/video-buf.h2
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/ax25.h9
-rw-r--r--include/net/dn.h105
-rw-r--r--include/net/dn_dev.h88
-rw-r--r--include/net/dn_fib.h22
-rw-r--r--include/net/dn_neigh.h4
-rw-r--r--include/net/dn_nsp.h72
-rw-r--r--include/net/dn_route.h12
-rw-r--r--include/net/flow.h8
-rw-r--r--include/net/ieee80211.h177
-rw-r--r--include/net/ieee80211_crypt.h3
-rw-r--r--include/net/if_inet6.h3
-rw-r--r--include/net/inet_connection_sock.h26
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_route.h24
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/llc.h2
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h56
-rw-r--r--include/net/scm.h10
-rw-r--r--include/net/sctp/structs.h10
-rw-r--r--include/net/sock.h14
-rw-r--r--include/net/tcp.h16
-rw-r--r--include/net/xfrm.h62
-rw-r--r--include/rdma/ib_fmr_pool.h2
-rw-r--r--include/rdma/ib_mad.h48
-rw-r--r--include/rdma/ib_user_verbs.h79
-rw-r--r--include/rdma/ib_verbs.h38
-rw-r--r--include/scsi/scsi_eh.h3
157 files changed, 5774 insertions, 1489 deletions
diff --git a/include/asm-arm/irq.h b/include/asm-arm/irq.h
index 7772432d3fd7..60b5105c9c93 100644
--- a/include/asm-arm/irq.h
+++ b/include/asm-arm/irq.h
@@ -27,7 +27,7 @@ extern void enable_irq(unsigned int);
27 27
28/* 28/*
29 * These correspond with the SA_TRIGGER_* defines, and therefore the 29 * These correspond with the SA_TRIGGER_* defines, and therefore the
30 * IRQRESOURCE_IRQ_* defines. 30 * IORESOURCE_IRQ_* defines.
31 */ 31 */
32#define __IRQT_RISEDGE (1 << 0) 32#define __IRQT_RISEDGE (1 << 0)
33#define __IRQT_FALEDGE (1 << 1) 33#define __IRQT_FALEDGE (1 << 1)
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index eb2de8c10515..ec91d1ff032a 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -415,6 +415,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
415 return ret; 415 return ret;
416} 416}
417 417
418extern void disable_hlt(void);
419extern void enable_hlt(void);
420
418#endif /* __ASSEMBLY__ */ 421#endif /* __ASSEMBLY__ */
419 422
420#define arch_align_stack(x) (x) 423#define arch_align_stack(x) (x)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 35de20cf8fac..9d11550b4818 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -58,6 +58,13 @@
58 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 58 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
59 } \ 59 } \
60 \ 60 \
61 /* Kernel symbol table: GPL-future-only symbols */ \
62 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
63 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
64 *(__ksymtab_gpl_future) \
65 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
66 } \
67 \
61 /* Kernel symbol table: Normal symbols */ \ 68 /* Kernel symbol table: Normal symbols */ \
62 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 69 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
63 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 70 VMLINUX_SYMBOL(__start___kcrctab) = .; \
@@ -72,6 +79,13 @@
72 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 79 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
73 } \ 80 } \
74 \ 81 \
82 /* Kernel symbol table: GPL-future-only symbols */ \
83 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
84 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
85 *(__kcrctab_gpl_future) \
86 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
87 } \
88 \
75 /* Kernel symbol table: strings */ \ 89 /* Kernel symbol table: strings */ \
76 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 90 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
77 *(__ksymtab_strings) \ 91 *(__ksymtab_strings) \
diff --git a/include/asm-mips/byteorder.h b/include/asm-mips/byteorder.h
index 584f8128fffd..aefc02f16fd8 100644
--- a/include/asm-mips/byteorder.h
+++ b/include/asm-mips/byteorder.h
@@ -39,6 +39,24 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
39} 39}
40#define __arch__swab32(x) ___arch__swab32(x) 40#define __arch__swab32(x) ___arch__swab32(x)
41 41
42#ifdef CONFIG_CPU_MIPS64_R2
43
44static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
45{
46 __asm__(
47 " dsbh %0, %1 \n"
48 " dshd %0, %0 \n"
49 " drotr %0, %0, 32 \n"
50 : "=r" (x)
51 : "r" (x));
52
53 return x;
54}
55
56#define __arch__swab64(x) ___arch__swab64(x)
57
58#endif /* CONFIG_CPU_MIPS64_R2 */
59
42#endif /* CONFIG_CPU_MIPSR2 */ 60#endif /* CONFIG_CPU_MIPSR2 */
43 61
44#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) 62#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 35d2604fe69c..0012bd804d2d 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -128,17 +128,17 @@ typedef u32 compat_sigset_word;
128 */ 128 */
129typedef u32 compat_uptr_t; 129typedef u32 compat_uptr_t;
130 130
131static inline void *compat_ptr(compat_uptr_t uptr) 131static inline void __user *compat_ptr(compat_uptr_t uptr)
132{ 132{
133 return (void *)(long)uptr; 133 return (void __user *)(long)uptr;
134} 134}
135 135
136static inline void *compat_alloc_user_space(long len) 136static inline void __user *compat_alloc_user_space(long len)
137{ 137{
138 struct pt_regs *regs = (struct pt_regs *) 138 struct pt_regs *regs = (struct pt_regs *)
139 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; 139 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
140 140
141 return (void *) (regs->regs[29] - len); 141 return (void __user *) (regs->regs[29] - len);
142} 142}
143#if defined (__MIPSEL__) 143#if defined (__MIPSEL__)
144#define __COMPAT_ENDIAN_SWAP__ 1 144#define __COMPAT_ENDIAN_SWAP__ 1
diff --git a/include/asm-mips/cpu-features.h b/include/asm-mips/cpu-features.h
index 78c9cc2735d5..3f2b6d9ac45e 100644
--- a/include/asm-mips/cpu-features.h
+++ b/include/asm-mips/cpu-features.h
@@ -96,6 +96,9 @@
96#ifndef cpu_has_ic_fills_f_dc 96#ifndef cpu_has_ic_fills_f_dc
97#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC) 97#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
98#endif 98#endif
99#ifndef cpu_has_pindexed_dcache
100#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
101#endif
99 102
100/* 103/*
101 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors 104 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h
index d5cf519f8fcc..140be1c67da7 100644
--- a/include/asm-mips/cpu-info.h
+++ b/include/asm-mips/cpu-info.h
@@ -39,6 +39,7 @@ struct cache_desc {
39#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */ 39#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */
40#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */ 40#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */
41#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */ 41#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */
42#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
42 43
43struct cpuinfo_mips { 44struct cpuinfo_mips {
44 unsigned long udelay_val; 45 unsigned long udelay_val;
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index 6111a0ce58c4..feb29a793888 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -3,7 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2003, 2004 Ralf Baechle 6 * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) MIPS Technologies, Inc.
8 * written by Ralf Baechle <ralf@linux-mips.org>
7 */ 9 */
8#ifndef _ASM_HAZARDS_H 10#ifndef _ASM_HAZARDS_H
9#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
@@ -74,8 +76,7 @@
74#define irq_disable_hazard 76#define irq_disable_hazard
75 _ehb 77 _ehb
76 78
77#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \ 79#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
78 defined(CONFIG_CPU_SB1)
79 80
80/* 81/*
81 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 82 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
@@ -99,13 +100,13 @@
99#else /* __ASSEMBLY__ */ 100#else /* __ASSEMBLY__ */
100 101
101__asm__( 102__asm__(
102 " .macro _ssnop \n\t" 103 " .macro _ssnop \n"
103 " sll $0, $0, 1 \n\t" 104 " sll $0, $0, 1 \n"
104 " .endm \n\t" 105 " .endm \n"
105 " \n\t" 106 " \n"
106 " .macro _ehb \n\t" 107 " .macro _ehb \n"
107 " sll $0, $0, 3 \n\t" 108 " sll $0, $0, 3 \n"
108 " .endm \n\t"); 109 " .endm \n");
109 110
110#ifdef CONFIG_CPU_RM9000 111#ifdef CONFIG_CPU_RM9000
111 112
@@ -117,17 +118,21 @@ __asm__(
117 118
118#define mtc0_tlbw_hazard() \ 119#define mtc0_tlbw_hazard() \
119 __asm__ __volatile__( \ 120 __asm__ __volatile__( \
120 ".set\tmips32\n\t" \ 121 " .set mips32 \n" \
121 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ 122 " _ssnop \n" \
122 ".set\tmips0") 123 " _ssnop \n" \
124 " _ssnop \n" \
125 " _ssnop \n" \
126 " .set mips0 \n")
123 127
124#define tlbw_use_hazard() \ 128#define tlbw_use_hazard() \
125 __asm__ __volatile__( \ 129 __asm__ __volatile__( \
126 ".set\tmips32\n\t" \ 130 " .set mips32 \n" \
127 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ 131 " _ssnop \n" \
128 ".set\tmips0") 132 " _ssnop \n" \
129 133 " _ssnop \n" \
130#define back_to_back_c0_hazard() do { } while (0) 134 " _ssnop \n" \
135 " .set mips0 \n")
131 136
132#else 137#else
133 138
@@ -136,15 +141,25 @@ __asm__(
136 */ 141 */
137#define mtc0_tlbw_hazard() \ 142#define mtc0_tlbw_hazard() \
138 __asm__ __volatile__( \ 143 __asm__ __volatile__( \
139 ".set noreorder\n\t" \ 144 " .set noreorder \n" \
140 "nop; nop; nop; nop; nop; nop;\n\t" \ 145 " nop \n" \
141 ".set reorder\n\t") 146 " nop \n" \
147 " nop \n" \
148 " nop \n" \
149 " nop \n" \
150 " nop \n" \
151 " .set reorder \n")
142 152
143#define tlbw_use_hazard() \ 153#define tlbw_use_hazard() \
144 __asm__ __volatile__( \ 154 __asm__ __volatile__( \
145 ".set noreorder\n\t" \ 155 " .set noreorder \n" \
146 "nop; nop; nop; nop; nop; nop;\n\t" \ 156 " nop \n" \
147 ".set reorder\n\t") 157 " nop \n" \
158 " nop \n" \
159 " nop \n" \
160 " nop \n" \
161 " nop \n" \
162 " .set reorder \n")
148 163
149#endif 164#endif
150 165
@@ -156,49 +171,26 @@ __asm__(
156 171
157#ifdef CONFIG_CPU_MIPSR2 172#ifdef CONFIG_CPU_MIPSR2
158 173
159__asm__( 174__asm__(" .macro irq_enable_hazard \n"
160 " .macro\tirq_enable_hazard \n\t" 175 " _ehb \n"
161 " _ehb \n\t" 176 " .endm \n"
162 " .endm \n\t" 177 " \n"
163 " \n\t" 178 " .macro irq_disable_hazard \n"
164 " .macro\tirq_disable_hazard \n\t" 179 " _ehb \n"
165 " _ehb \n\t" 180 " .endm \n");
166 " .endm \n\t"
167 " \n\t"
168 " .macro\tback_to_back_c0_hazard \n\t"
169 " _ehb \n\t"
170 " .endm");
171
172#define irq_enable_hazard() \
173 __asm__ __volatile__( \
174 "irq_enable_hazard")
175 181
176#define irq_disable_hazard() \ 182#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
177 __asm__ __volatile__( \
178 "irq_disable_hazard")
179
180#define back_to_back_c0_hazard() \
181 __asm__ __volatile__( \
182 "back_to_back_c0_hazard")
183
184#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
185 defined(CONFIG_CPU_SB1)
186 183
187/* 184/*
188 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 185 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
189 */ 186 */
190 187
191__asm__( 188__asm__(
192 " .macro\tirq_enable_hazard \n\t" 189 " .macro irq_enable_hazard \n"
193 " .endm \n\t" 190 " .endm \n"
194 " \n\t" 191 " \n"
195 " .macro\tirq_disable_hazard \n\t" 192 " .macro irq_disable_hazard \n"
196 " .endm"); 193 " .endm \n");
197
198#define irq_enable_hazard() do { } while (0)
199#define irq_disable_hazard() do { } while (0)
200
201#define back_to_back_c0_hazard() do { } while (0)
202 194
203#else 195#else
204 196
@@ -209,29 +201,63 @@ __asm__(
209 */ 201 */
210 202
211__asm__( 203__asm__(
212 " # \n\t" 204 " # \n"
213 " # There is a hazard but we do not care \n\t" 205 " # There is a hazard but we do not care \n"
214 " # \n\t" 206 " # \n"
215 " .macro\tirq_enable_hazard \n\t" 207 " .macro\tirq_enable_hazard \n"
216 " .endm \n\t" 208 " .endm \n"
217 " \n\t" 209 " \n"
218 " .macro\tirq_disable_hazard \n\t" 210 " .macro\tirq_disable_hazard \n"
219 " _ssnop; _ssnop; _ssnop \n\t" 211 " _ssnop \n"
220 " .endm"); 212 " _ssnop \n"
213 " _ssnop \n"
214 " .endm \n");
221 215
222#define irq_enable_hazard() do { } while (0) 216#endif
217
218#define irq_enable_hazard() \
219 __asm__ __volatile__("irq_enable_hazard")
223#define irq_disable_hazard() \ 220#define irq_disable_hazard() \
224 __asm__ __volatile__( \ 221 __asm__ __volatile__("irq_disable_hazard")
225 "irq_disable_hazard")
226 222
227#define back_to_back_c0_hazard() \ 223
228 __asm__ __volatile__( \ 224/*
229 " .set noreorder \n" \ 225 * Back-to-back hazards -
230 " nop; nop; nop \n" \ 226 *
231 " .set reorder \n") 227 * What is needed to separate a move to cp0 from a subsequent read from the
228 * same cp0 register?
229 */
230#ifdef CONFIG_CPU_MIPSR2
231
232__asm__(" .macro back_to_back_c0_hazard \n"
233 " _ehb \n"
234 " .endm \n");
235
236#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
237 defined(CONFIG_CPU_SB1)
238
239__asm__(" .macro back_to_back_c0_hazard \n"
240 " .endm \n");
241
242#else
243
244__asm__(" .macro back_to_back_c0_hazard \n"
245 " .set noreorder \n"
246 " _ssnop \n"
247 " _ssnop \n"
248 " _ssnop \n"
249 " .set reorder \n"
250 " .endm");
232 251
233#endif 252#endif
234 253
254#define back_to_back_c0_hazard() \
255 __asm__ __volatile__("back_to_back_c0_hazard")
256
257
258/*
259 * Instruction execution hazard
260 */
235#ifdef CONFIG_CPU_MIPSR2 261#ifdef CONFIG_CPU_MIPSR2
236/* 262/*
237 * gcc has a tradition of misscompiling the previous construct using the 263 * gcc has a tradition of misscompiling the previous construct using the
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index 8c011aa61afa..546a17e56a9b 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -4,7 +4,7 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH 6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com> 10 * Author: Maciej W. Rozycki <macro@mips.com>
@@ -40,56 +40,13 @@
40 * hardware. An example use would be for flash memory that's used for 40 * hardware. An example use would be for flash memory that's used for
41 * execute in place. 41 * execute in place.
42 */ 42 */
43# define __raw_ioswabb(x) (x) 43# define __raw_ioswabb(a,x) (x)
44# define __raw_ioswabw(x) (x) 44# define __raw_ioswabw(a,x) (x)
45# define __raw_ioswabl(x) (x) 45# define __raw_ioswabl(a,x) (x)
46# define __raw_ioswabq(x) (x) 46# define __raw_ioswabq(a,x) (x)
47# define ____raw_ioswabq(x) (x) 47# define ____raw_ioswabq(a,x) (x)
48 48
49/* 49/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
50 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
51 * less sane hardware forces software to fiddle with this...
52 *
53 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
54 * you can't have the numerical value of data and byte addresses within
55 * multibyte quantities both preserved at the same time. Hence two
56 * variations of functions: non-prefixed ones that preserve the value
57 * and prefixed ones that preserve byte addresses. The latters are
58 * typically used for moving raw data between a peripheral and memory (cf.
59 * string I/O functions), hence the "__mem_" prefix.
60 */
61#if defined(CONFIG_SWAP_IO_SPACE)
62
63# define ioswabb(x) (x)
64# define __mem_ioswabb(x) (x)
65# ifdef CONFIG_SGI_IP22
66/*
67 * IP22 seems braindead enough to swap 16bits values in hardware, but
68 * not 32bits. Go figure... Can't tell without documentation.
69 */
70# define ioswabw(x) (x)
71# define __mem_ioswabw(x) le16_to_cpu(x)
72# else
73# define ioswabw(x) le16_to_cpu(x)
74# define __mem_ioswabw(x) (x)
75# endif
76# define ioswabl(x) le32_to_cpu(x)
77# define __mem_ioswabl(x) (x)
78# define ioswabq(x) le64_to_cpu(x)
79# define __mem_ioswabq(x) (x)
80
81#else
82
83# define ioswabb(x) (x)
84# define __mem_ioswabb(x) (x)
85# define ioswabw(x) (x)
86# define __mem_ioswabw(x) cpu_to_le16(x)
87# define ioswabl(x) (x)
88# define __mem_ioswabl(x) cpu_to_le32(x)
89# define ioswabq(x) (x)
90# define __mem_ioswabq(x) cpu_to_le32(x)
91
92#endif
93 50
94#define IO_SPACE_LIMIT 0xffff 51#define IO_SPACE_LIMIT 0xffff
95 52
@@ -103,8 +60,20 @@
103 */ 60 */
104extern const unsigned long mips_io_port_base; 61extern const unsigned long mips_io_port_base;
105 62
106#define set_io_port_base(base) \ 63/*
107 do { * (unsigned long *) &mips_io_port_base = (base); } while (0) 64 * Gcc will generate code to load the value of mips_io_port_base after each
65 * function call which may be fairly wasteful in some cases. So we don't
66 * play quite by the book. We tell gcc mips_io_port_base is a long variable
67 * which solves the code generation issue. Now we need to violate the
68 * aliasing rules a little to make initialization possible and finally we
69 * will need the barrier() to fight side effects of the aliasing chat.
70 * This trickery will eventually collapse under gcc's optimizer. Oh well.
71 */
72static inline void set_io_port_base(unsigned long base)
73{
74 * (unsigned long *) &mips_io_port_base = base;
75 barrier();
76}
108 77
109/* 78/*
110 * Thanks to James van Artsdalen for a better timing-fix than 79 * Thanks to James van Artsdalen for a better timing-fix than
@@ -334,7 +303,7 @@ static inline void pfx##write##bwlq(type val, \
334 \ 303 \
335 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 304 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
336 \ 305 \
337 __val = pfx##ioswab##bwlq(val); \ 306 __val = pfx##ioswab##bwlq(__mem, val); \
338 \ 307 \
339 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 308 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
340 *__mem = __val; \ 309 *__mem = __val; \
@@ -389,7 +358,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
389 BUG(); \ 358 BUG(); \
390 } \ 359 } \
391 \ 360 \
392 return pfx##ioswab##bwlq(__val); \ 361 return pfx##ioswab##bwlq(__mem, __val); \
393} 362}
394 363
395#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 364#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
@@ -399,10 +368,9 @@ static inline void pfx##out##bwlq##p(type val, unsigned long port) \
399 volatile type *__addr; \ 368 volatile type *__addr; \
400 type __val; \ 369 type __val; \
401 \ 370 \
402 port = __swizzle_addr_##bwlq(port); \ 371 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
403 __addr = (void *)(mips_io_port_base + port); \
404 \ 372 \
405 __val = pfx##ioswab##bwlq(val); \ 373 __val = pfx##ioswab##bwlq(__addr, val); \
406 \ 374 \
407 /* Really, we want this to be atomic */ \ 375 /* Really, we want this to be atomic */ \
408 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 376 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
@@ -416,15 +384,14 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
416 volatile type *__addr; \ 384 volatile type *__addr; \
417 type __val; \ 385 type __val; \
418 \ 386 \
419 port = __swizzle_addr_##bwlq(port); \ 387 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
420 __addr = (void *)(mips_io_port_base + port); \
421 \ 388 \
422 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 389 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
423 \ 390 \
424 __val = *__addr; \ 391 __val = *__addr; \
425 slow; \ 392 slow; \
426 \ 393 \
427 return pfx##ioswab##bwlq(__val); \ 394 return pfx##ioswab##bwlq(__addr, __val); \
428} 395}
429 396
430#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 397#define __BUILD_MEMORY_PFX(bus, bwlq, type) \
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 78e1df2095fb..b3c5ecbec03c 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -113,4 +113,6 @@ do { \
113# define COBALT_KEY_SELECT (1 << 7) 113# define COBALT_KEY_SELECT (1 << 7)
114# define COBALT_KEY_MASK 0xfe 114# define COBALT_KEY_MASK 0xfe
115 115
116#define COBALT_UART ((volatile unsigned char *) CKSEG1ADDR(0x1c800000))
117
116#endif /* __ASM_COBALT_H */ 118#endif /* __ASM_COBALT_H */
diff --git a/include/asm-mips/mach-generic/mangle-port.h b/include/asm-mips/mach-generic/mangle-port.h
index 4a98d83b8ec7..6e1b0c075de7 100644
--- a/include/asm-mips/mach-generic/mangle-port.h
+++ b/include/asm-mips/mach-generic/mangle-port.h
@@ -13,4 +13,40 @@
13#define __swizzle_addr_l(port) (port) 13#define __swizzle_addr_l(port) (port)
14#define __swizzle_addr_q(port) (port) 14#define __swizzle_addr_q(port) (port)
15 15
16/*
17 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
18 * less sane hardware forces software to fiddle with this...
19 *
20 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
21 * you can't have the numerical value of data and byte addresses within
22 * multibyte quantities both preserved at the same time. Hence two
23 * variations of functions: non-prefixed ones that preserve the value
24 * and prefixed ones that preserve byte addresses. The latters are
25 * typically used for moving raw data between a peripheral and memory (cf.
26 * string I/O functions), hence the "__mem_" prefix.
27 */
28#if defined(CONFIG_SWAP_IO_SPACE)
29
30# define ioswabb(a,x) (x)
31# define __mem_ioswabb(a,x) (x)
32# define ioswabw(a,x) le16_to_cpu(x)
33# define __mem_ioswabw(a,x) (x)
34# define ioswabl(a,x) le32_to_cpu(x)
35# define __mem_ioswabl(a,x) (x)
36# define ioswabq(a,x) le64_to_cpu(x)
37# define __mem_ioswabq(a,x) (x)
38
39#else
40
41# define ioswabb(a,x) (x)
42# define __mem_ioswabb(a,x) (x)
43# define ioswabw(a,x) (x)
44# define __mem_ioswabw(a,x) cpu_to_le16(x)
45# define ioswabl(a,x) (x)
46# define __mem_ioswabl(a,x) cpu_to_le32(x)
47# define ioswabq(a,x) (x)
48# define __mem_ioswabq(a,x) cpu_to_le32(x)
49
50#endif
51
16#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ 52#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/include/asm-mips/mach-ip27/mangle-port.h b/include/asm-mips/mach-ip27/mangle-port.h
index f76c44880451..d615312a451a 100644
--- a/include/asm-mips/mach-ip27/mangle-port.h
+++ b/include/asm-mips/mach-ip27/mangle-port.h
@@ -13,4 +13,13 @@
13#define __swizzle_addr_l(port) (port) 13#define __swizzle_addr_l(port) (port)
14#define __swizzle_addr_q(port) (port) 14#define __swizzle_addr_q(port) (port)
15 15
16# define ioswabb(a,x) (x)
17# define __mem_ioswabb(a,x) (x)
18# define ioswabw(a,x) (x)
19# define __mem_ioswabw(a,x) cpu_to_le16(x)
20# define ioswabl(a,x) (x)
21# define __mem_ioswabl(a,x) cpu_to_le32(x)
22# define ioswabq(a,x) (x)
23# define __mem_ioswabq(a,x) cpu_to_le32(x)
24
16#endif /* __ASM_MACH_IP27_MANGLE_PORT_H */ 25#endif /* __ASM_MACH_IP27_MANGLE_PORT_H */
diff --git a/include/asm-mips/mach-ip32/mangle-port.h b/include/asm-mips/mach-ip32/mangle-port.h
index 6e25b52ed8f2..81320eb55324 100644
--- a/include/asm-mips/mach-ip32/mangle-port.h
+++ b/include/asm-mips/mach-ip32/mangle-port.h
@@ -14,4 +14,13 @@
14#define __swizzle_addr_l(port) (port) 14#define __swizzle_addr_l(port) (port)
15#define __swizzle_addr_q(port) (port) 15#define __swizzle_addr_q(port) (port)
16 16
17# define ioswabb(a,x) (x)
18# define __mem_ioswabb(a,x) (x)
19# define ioswabw(a,x) (x)
20# define __mem_ioswabw(a,x) cpu_to_le16(x)
21# define ioswabl(a,x) (x)
22# define __mem_ioswabl(a,x) cpu_to_le32(x)
23# define ioswabq(a,x) (x)
24# define __mem_ioswabq(a,x) cpu_to_le32(x)
25
17#endif /* __ASM_MACH_IP32_MANGLE_PORT_H */ 26#endif /* __ASM_MACH_IP32_MANGLE_PORT_H */
diff --git a/include/asm-mips/mach-mips/cpu-feature-overrides.h b/include/asm-mips/mach-mips/cpu-feature-overrides.h
index 9f92aed17754..e06af6c86f86 100644
--- a/include/asm-mips/mach-mips/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-mips/cpu-feature-overrides.h
@@ -29,7 +29,11 @@
29/* #define cpu_has_prefetch ? */ 29/* #define cpu_has_prefetch ? */
30#define cpu_has_mcheck 1 30#define cpu_has_mcheck 1
31/* #define cpu_has_ejtag ? */ 31/* #define cpu_has_ejtag ? */
32#ifdef CONFIG_CPU_HAS_LLSC
32#define cpu_has_llsc 1 33#define cpu_has_llsc 1
34#else
35#define cpu_has_llsc 0
36#endif
33/* #define cpu_has_vtag_icache ? */ 37/* #define cpu_has_vtag_icache ? */
34/* #define cpu_has_dc_aliases ? */ 38/* #define cpu_has_dc_aliases ? */
35/* #define cpu_has_ic_fills_f_dc ? */ 39/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index 19cdf7642e66..61cf22588137 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -33,12 +33,7 @@ extern unsigned long pgd_current[];
33 write_c0_context((unsigned long) smp_processor_id() << 25); \ 33 write_c0_context((unsigned long) smp_processor_id() << 25); \
34 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 34 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
35#endif 35#endif
36#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 36#ifdef CONFIG_64BIT
37#define TLBMISS_HANDLER_SETUP() \
38 write_c0_context((unsigned long) &pgd_current[smp_processor_id()] << 23); \
39 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
40#endif
41#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64)
42#define TLBMISS_HANDLER_SETUP() \ 37#define TLBMISS_HANDLER_SETUP() \
43 write_c0_context((unsigned long) smp_processor_id() << 26); \ 38 write_c0_context((unsigned long) smp_processor_id() << 26); \
44 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 39 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 0cff64ce0fb8..4d6bc45df594 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -206,7 +206,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
206 /* fixme */ 206 /* fixme */
207#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f)) 207#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f))
208#define pgoff_to_pte(off) \ 208#define pgoff_to_pte(off) \
209 ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)}) 209 ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)})
210 210
211#else 211#else
212#define pte_to_pgoff(_pte) \ 212#define pte_to_pgoff(_pte) \
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h
index 9632c27dad15..90c374700977 100644
--- a/include/asm-mips/r4kcache.h
+++ b/include/asm-mips/r4kcache.h
@@ -257,7 +257,8 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
257 \ 257 \
258static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ 258static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
259{ \ 259{ \
260 unsigned long start = page; \ 260 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
261 unsigned long start = INDEX_BASE + (page & indexmask); \
261 unsigned long end = start + PAGE_SIZE; \ 262 unsigned long end = start + PAGE_SIZE; \
262 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ 263 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
263 unsigned long ws_end = current_cpu_data.desc.ways << \ 264 unsigned long ws_end = current_cpu_data.desc.ways << \
@@ -302,5 +303,6 @@ __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
302__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) 303__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
303/* blast_inv_dcache_range */ 304/* blast_inv_dcache_range */
304__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) 305__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
306__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
305 307
306#endif /* _ASM_R4KCACHE_H */ 308#endif /* _ASM_R4KCACHE_H */
diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h
index a474c29cd701..b09e16c93ca0 100644
--- a/include/asm-mips/sibyte/sb1250.h
+++ b/include/asm-mips/sibyte/sb1250.h
@@ -45,8 +45,8 @@ extern unsigned int soc_type;
45extern unsigned int periph_rev; 45extern unsigned int periph_rev;
46extern unsigned int zbbus_mhz; 46extern unsigned int zbbus_mhz;
47 47
48extern void sb1250_hpt_setup(void);
48extern void sb1250_time_init(void); 49extern void sb1250_time_init(void);
49extern unsigned long sb1250_gettimeoffset(void);
50extern void sb1250_mask_irq(int cpu, int irq); 50extern void sb1250_mask_irq(int cpu, int irq);
51extern void sb1250_unmask_irq(int cpu, int irq); 51extern void sb1250_unmask_irq(int cpu, int irq);
52extern void sb1250_smp_finish(void); 52extern void sb1250_smp_finish(void);
diff --git a/include/asm-mips/sibyte/sb1250_scd.h b/include/asm-mips/sibyte/sb1250_scd.h
index a667bc14a7cd..f4178bdcfcb0 100644
--- a/include/asm-mips/sibyte/sb1250_scd.h
+++ b/include/asm-mips/sibyte/sb1250_scd.h
@@ -359,14 +359,15 @@
359 */ 359 */
360 360
361#define V_SCD_TIMER_FREQ 1000000 361#define V_SCD_TIMER_FREQ 1000000
362#define V_SCD_TIMER_WIDTH 23
362 363
363#define S_SCD_TIMER_INIT 0 364#define S_SCD_TIMER_INIT 0
364#define M_SCD_TIMER_INIT _SB_MAKEMASK(20,S_SCD_TIMER_INIT) 365#define M_SCD_TIMER_INIT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_INIT)
365#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_INIT) 366#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_INIT)
366#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x,S_SCD_TIMER_INIT,M_SCD_TIMER_INIT) 367#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x,S_SCD_TIMER_INIT,M_SCD_TIMER_INIT)
367 368
368#define S_SCD_TIMER_CNT 0 369#define S_SCD_TIMER_CNT 0
369#define M_SCD_TIMER_CNT _SB_MAKEMASK(20,S_SCD_TIMER_CNT) 370#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_CNT)
370#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_CNT) 371#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_CNT)
371#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x,S_SCD_TIMER_CNT,M_SCD_TIMER_CNT) 372#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x,S_SCD_TIMER_CNT,M_SCD_TIMER_CNT)
372 373
diff --git a/include/asm-mips/signal.h b/include/asm-mips/signal.h
index 6fe903e09c62..d8349e4b55ee 100644
--- a/include/asm-mips/signal.h
+++ b/include/asm-mips/signal.h
@@ -147,16 +147,34 @@ struct k_sigaction {
147 147
148/* IRIX compatible stack_t */ 148/* IRIX compatible stack_t */
149typedef struct sigaltstack { 149typedef struct sigaltstack {
150 void *ss_sp; 150 void __user *ss_sp;
151 size_t ss_size; 151 size_t ss_size;
152 int ss_flags; 152 int ss_flags;
153} stack_t; 153} stack_t;
154 154
155#ifdef __KERNEL__ 155#ifdef __KERNEL__
156#include <asm/sigcontext.h> 156#include <asm/sigcontext.h>
157#include <asm/siginfo.h>
157 158
158#define ptrace_signal_deliver(regs, cookie) do { } while (0) 159#define ptrace_signal_deliver(regs, cookie) do { } while (0)
159 160
161struct pt_regs;
162extern void do_signal(struct pt_regs *regs);
163extern void do_signal32(struct pt_regs *regs);
164
165extern int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
166 int signr, sigset_t *set);
167extern int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
168 int signr, sigset_t *set, siginfo_t *info);
169
170extern int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
171 int signr, sigset_t *set);
172extern int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
173 int signr, sigset_t *set, siginfo_t *info);
174
175extern int setup_rt_frame_n32(struct k_sigaction * ka, struct pt_regs *regs,
176 int signr, sigset_t *set, siginfo_t *info);
177
160#endif /* __KERNEL__ */ 178#endif /* __KERNEL__ */
161 179
162#endif /* _ASM_SIGNAL_H */ 180#endif /* _ASM_SIGNAL_H */
diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h
index d028e28d6239..9709ff701d9b 100644
--- a/include/asm-mips/sn/klconfig.h
+++ b/include/asm-mips/sn/klconfig.h
@@ -99,7 +99,7 @@ typedef s32 klconf_off_t;
99#define ENABLE_BOARD 0x01 99#define ENABLE_BOARD 0x01
100#define FAILED_BOARD 0x02 100#define FAILED_BOARD 0x02
101#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which 101#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which
102 are discovered twice. Use one of them */ 102 are discovered twice. Use one of them */
103#define VISITED_BOARD 0x08 /* Used for compact hub numbering. */ 103#define VISITED_BOARD 0x08 /* Used for compact hub numbering. */
104#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */ 104#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */
105#define GLOBAL_MASTER_IO6 0x20 105#define GLOBAL_MASTER_IO6 0x20
diff --git a/include/asm-mips/sn/mapped_kernel.h b/include/asm-mips/sn/mapped_kernel.h
index 3a17846df849..59edb20f8ec5 100644
--- a/include/asm-mips/sn/mapped_kernel.h
+++ b/include/asm-mips/sn/mapped_kernel.h
@@ -23,11 +23,7 @@
23#include <linux/config.h> 23#include <linux/config.h>
24#include <asm/addrspace.h> 24#include <asm/addrspace.h>
25 25
26#ifdef CONFIG_BUILD_ELF64
27#define REP_BASE CAC_BASE 26#define REP_BASE CAC_BASE
28#else
29#define REP_BASE CKSEG0
30#endif
31 27
32#ifdef CONFIG_MAPPED_KERNEL 28#ifdef CONFIG_MAPPED_KERNEL
33 29
diff --git a/include/asm-mips/sn/sn0/hubio.h b/include/asm-mips/sn/sn0/hubio.h
index 80cf6a52ed3b..f314da21b970 100644
--- a/include/asm-mips/sn/sn0/hubio.h
+++ b/include/asm-mips/sn/sn0/hubio.h
@@ -229,7 +229,7 @@ typedef union hubii_ilcsr_u {
229 icsr_llp_en: 1, /* LLP enable bit */ 229 icsr_llp_en: 1, /* LLP enable bit */
230 icsr_rsvd2: 1, /* reserver */ 230 icsr_rsvd2: 1, /* reserver */
231 icsr_wrm_reset: 1, /* Warm reset bit */ 231 icsr_wrm_reset: 1, /* Warm reset bit */
232 icsr_rsvd1: 2, /* Data ready offset */ 232 icsr_rsvd1: 2, /* Data ready offset */
233 icsr_null_to: 6; /* Null timeout */ 233 icsr_null_to: 6; /* Null timeout */
234 234
235 } icsr_fields_s; 235 } icsr_fields_s;
@@ -274,9 +274,9 @@ typedef union io_perf_sel {
274 u64 perf_sel_reg; 274 u64 perf_sel_reg;
275 struct { 275 struct {
276 u64 perf_rsvd : 48, 276 u64 perf_rsvd : 48,
277 perf_icct : 8, 277 perf_icct : 8,
278 perf_ippr1 : 4, 278 perf_ippr1 : 4,
279 perf_ippr0 : 4; 279 perf_ippr0 : 4;
280 } perf_sel_bits; 280 } perf_sel_bits;
281} io_perf_sel_t; 281} io_perf_sel_t;
282 282
@@ -287,8 +287,8 @@ typedef union io_perf_cnt {
287 u64 perf_cnt; 287 u64 perf_cnt;
288 struct { 288 struct {
289 u64 perf_rsvd1 : 32, 289 u64 perf_rsvd1 : 32,
290 perf_rsvd2 : 12, 290 perf_rsvd2 : 12,
291 perf_cnt : 20; 291 perf_cnt : 20;
292 } perf_cnt_bits; 292 } perf_cnt_bits;
293} io_perf_cnt_t; 293} io_perf_cnt_t;
294 294
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index a8919dcc93c8..2acf3e844f00 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -63,17 +63,7 @@
63 addu k1, k0 63 addu k1, k0
64 LONG_L k1, %lo(kernelsp)(k1) 64 LONG_L k1, %lo(kernelsp)(k1)
65#endif 65#endif
66#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 66#ifdef CONFIG_64BIT
67 MFC0 k1, CP0_CONTEXT
68 dsra k1, 23
69 lui k0, %hi(pgd_current)
70 addiu k0, %lo(pgd_current)
71 dsubu k1, k0
72 lui k0, %hi(kernelsp)
73 daddu k1, k0
74 LONG_L k1, %lo(kernelsp)(k1)
75#endif
76#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64)
77 MFC0 k1, CP0_CONTEXT 67 MFC0 k1, CP0_CONTEXT
78 lui k0, %highest(kernelsp) 68 lui k0, %highest(kernelsp)
79 dsrl k1, 23 69 dsrl k1, 23
@@ -91,11 +81,7 @@
91 mfc0 \temp, CP0_CONTEXT 81 mfc0 \temp, CP0_CONTEXT
92 srl \temp, 23 82 srl \temp, 23
93#endif 83#endif
94#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 84#ifdef CONFIG_64BIT
95 lw \temp, TI_CPU(gp)
96 dsll \temp, 3
97#endif
98#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64)
99 MFC0 \temp, CP0_CONTEXT 85 MFC0 \temp, CP0_CONTEXT
100 dsrl \temp, 23 86 dsrl \temp, 23
101#endif 87#endif
@@ -103,7 +89,7 @@
103 .endm 89 .endm
104#else 90#else
105 .macro get_saved_sp /* Uniprocessor variation */ 91 .macro get_saved_sp /* Uniprocessor variation */
106#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64) 92#ifdef CONFIG_64BIT
107 lui k1, %highest(kernelsp) 93 lui k1, %highest(kernelsp)
108 daddiu k1, %higher(kernelsp) 94 daddiu k1, %higher(kernelsp)
109 dsll k1, k1, 16 95 dsll k1, k1, 16
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index ddae9bae31af..4097fac5ac3c 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -286,10 +286,10 @@ extern void __xchg_called_with_bad_pointer(void);
286static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 286static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
287{ 287{
288 switch (size) { 288 switch (size) {
289 case 4: 289 case 4:
290 return __xchg_u32(ptr, x); 290 return __xchg_u32(ptr, x);
291 case 8: 291 case 8:
292 return __xchg_u64(ptr, x); 292 return __xchg_u64(ptr, x);
293 } 293 }
294 __xchg_called_with_bad_pointer(); 294 __xchg_called_with_bad_pointer();
295 return x; 295 return x;
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index fa193f861e71..f8d97dafd2f4 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -31,7 +31,7 @@ struct thread_info {
31 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 int preempt_count; /* 0 => preemptable, <0 => BUG */
32 32
33 mm_segment_t addr_limit; /* thread address space: 33 mm_segment_t addr_limit; /* thread address space:
34 0-0xBFFFFFFF for user-thead 34 0-0xBFFFFFFF for user-thead
35 0-0xFFFFFFFF for kernel-thread 35 0-0xFFFFFFFF for kernel-thread
36 */ 36 */
37 struct restart_block restart_block; 37 struct restart_block restart_block;
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index f80482c7231f..cf79bc7ebb55 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -38,6 +38,7 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
38 38
39void pci_devs_phb_init(void); 39void pci_devs_phb_init(void);
40void pci_devs_phb_init_dynamic(struct pci_controller *phb); 40void pci_devs_phb_init_dynamic(struct pci_controller *phb);
41int setup_phb(struct device_node *dev, struct pci_controller *phb);
41void __devinit scan_phb(struct pci_controller *hose); 42void __devinit scan_phb(struct pci_controller *hose);
42 43
43/* From rtas_pci.h */ 44/* From rtas_pci.h */
diff --git a/include/asm-sparc/idprom.h b/include/asm-sparc/idprom.h
index d856e640acd3..59083ed85232 100644
--- a/include/asm-sparc/idprom.h
+++ b/include/asm-sparc/idprom.h
@@ -7,27 +7,19 @@
7#ifndef _SPARC_IDPROM_H 7#ifndef _SPARC_IDPROM_H
8#define _SPARC_IDPROM_H 8#define _SPARC_IDPROM_H
9 9
10/* Offset into the EEPROM where the id PROM is located on the 4c */ 10#include <linux/types.h>
11#define IDPROM_OFFSET 0x7d8
12 11
13/* On sun4m; physical. */ 12struct idprom {
14/* MicroSPARC(-II) does not decode 31rd bit, but it works. */ 13 u8 id_format; /* Format identifier (always 0x01) */
15#define IDPROM_OFFSET_M 0xfd8 14 u8 id_machtype; /* Machine type */
16 15 u8 id_ethaddr[6]; /* Hardware ethernet address */
17struct idprom 16 s32 id_date; /* Date of manufacture */
18{ 17 u32 id_sernum:24; /* Unique serial number */
19 unsigned char id_format; /* Format identifier (always 0x01) */ 18 u8 id_cksum; /* Checksum - xor of the data bytes */
20 unsigned char id_machtype; /* Machine type */ 19 u8 reserved[16];
21 unsigned char id_ethaddr[6]; /* Hardware ethernet address */
22 long id_date; /* Date of manufacture */
23 unsigned int id_sernum:24; /* Unique serial number */
24 unsigned char id_cksum; /* Checksum - xor of the data bytes */
25 unsigned char reserved[16];
26}; 20};
27 21
28extern struct idprom *idprom; 22extern struct idprom *idprom;
29extern void idprom_init(void); 23extern void idprom_init(void);
30 24
31#define IDPROM_SIZE (sizeof(struct idprom))
32
33#endif /* !(_SPARC_IDPROM_H) */ 25#endif /* !(_SPARC_IDPROM_H) */
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index d0d76b30eb4c..f283f8aaf6a9 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -165,6 +165,7 @@ enum prom_input_device {
165 PROMDEV_ITTYA, /* input from ttya */ 165 PROMDEV_ITTYA, /* input from ttya */
166 PROMDEV_ITTYB, /* input from ttyb */ 166 PROMDEV_ITTYB, /* input from ttyb */
167 PROMDEV_IRSC, /* input from rsc */ 167 PROMDEV_IRSC, /* input from rsc */
168 PROMDEV_IVCONS, /* input from virtual-console */
168 PROMDEV_I_UNK, 169 PROMDEV_I_UNK,
169}; 170};
170 171
@@ -177,6 +178,7 @@ enum prom_output_device {
177 PROMDEV_OTTYA, /* to ttya */ 178 PROMDEV_OTTYA, /* to ttya */
178 PROMDEV_OTTYB, /* to ttyb */ 179 PROMDEV_OTTYB, /* to ttyb */
179 PROMDEV_ORSC, /* to rsc */ 180 PROMDEV_ORSC, /* to rsc */
181 PROMDEV_OVCONS, /* to virtual-console */
180 PROMDEV_O_UNK, 182 PROMDEV_O_UNK,
181}; 183};
182 184
diff --git a/include/asm-sparc/socket.h b/include/asm-sparc/socket.h
index 09575b608adb..4e0ce3a35ea9 100644
--- a/include/asm-sparc/socket.h
+++ b/include/asm-sparc/socket.h
@@ -47,7 +47,7 @@
47#define SO_TIMESTAMP 0x001d 47#define SO_TIMESTAMP 0x001d
48#define SCM_TIMESTAMP SO_TIMESTAMP 48#define SCM_TIMESTAMP SO_TIMESTAMP
49 49
50#define SO_PEERSEC 0x100e 50#define SO_PEERSEC 0x001e
51 51
52/* Security levels - as per NRL IPv6 - don't actually do anything */ 52/* Security levels - as per NRL IPv6 - don't actually do anything */
53#define SO_SECURITY_AUTHENTICATION 0x5001 53#define SO_SECURITY_AUTHENTICATION 0x5001
diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h
index f8f1ec1f06e6..3cf132e1aa25 100644
--- a/include/asm-sparc/uaccess.h
+++ b/include/asm-sparc/uaccess.h
@@ -120,17 +120,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
120default: __pu_ret = __put_user_bad(); break; \ 120default: __pu_ret = __put_user_bad(); break; \
121} } else { __pu_ret = -EFAULT; } __pu_ret; }) 121} } else { __pu_ret = -EFAULT; } __pu_ret; })
122 122
123#define __put_user_check_ret(x,addr,size,retval) ({ \
124register int __foo __asm__ ("l1"); \
125if (__access_ok(addr,size)) { \
126switch (size) { \
127case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \
128case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \
129case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \
130case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \
131default: if (__put_user_bad()) return retval; break; \
132} } else return retval; })
133
134#define __put_user_nocheck(x,addr,size) ({ \ 123#define __put_user_nocheck(x,addr,size) ({ \
135register int __pu_ret; \ 124register int __pu_ret; \
136switch (size) { \ 125switch (size) { \
@@ -141,16 +130,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
141default: __pu_ret = __put_user_bad(); break; \ 130default: __pu_ret = __put_user_bad(); break; \
142} __pu_ret; }) 131} __pu_ret; })
143 132
144#define __put_user_nocheck_ret(x,addr,size,retval) ({ \
145register int __foo __asm__ ("l1"); \
146switch (size) { \
147case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \
148case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \
149case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \
150case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \
151default: if (__put_user_bad()) return retval; break; \
152} })
153
154#define __put_user_asm(x,size,addr,ret) \ 133#define __put_user_asm(x,size,addr,ret) \
155__asm__ __volatile__( \ 134__asm__ __volatile__( \
156 "/* Put user asm, inline. */\n" \ 135 "/* Put user asm, inline. */\n" \
@@ -170,32 +149,6 @@ __asm__ __volatile__( \
170 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ 149 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
171 "i" (-EFAULT)) 150 "i" (-EFAULT))
172 151
173#define __put_user_asm_ret(x,size,addr,ret,foo) \
174if (__builtin_constant_p(ret) && ret == -EFAULT) \
175__asm__ __volatile__( \
176 "/* Put user asm ret, inline. */\n" \
177"1:\t" "st"#size " %1, %2\n\n\t" \
178 ".section __ex_table,#alloc\n\t" \
179 ".align 4\n\t" \
180 ".word 1b, __ret_efault\n\n\t" \
181 ".previous\n\n\t" \
182 : "=r" (foo) : "r" (x), "m" (*__m(addr))); \
183else \
184__asm__ __volatile( \
185 "/* Put user asm ret, inline. */\n" \
186"1:\t" "st"#size " %1, %2\n\n\t" \
187 ".section .fixup,#alloc,#execinstr\n\t" \
188 ".align 4\n" \
189"3:\n\t" \
190 "ret\n\t" \
191 " restore %%g0, %3, %%o0\n\t" \
192 ".previous\n\n\t" \
193 ".section __ex_table,#alloc\n\t" \
194 ".align 4\n\t" \
195 ".word 1b, 3b\n\n\t" \
196 ".previous\n\n\t" \
197 : "=r" (foo) : "r" (x), "m" (*__m(addr)), "i" (ret))
198
199extern int __put_user_bad(void); 152extern int __put_user_bad(void);
200 153
201#define __get_user_check(x,addr,size,type) ({ \ 154#define __get_user_check(x,addr,size,type) ({ \
diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h
index 02af289e3f46..35cb5c9e0c92 100644
--- a/include/asm-sparc64/a.out.h
+++ b/include/asm-sparc64/a.out.h
@@ -95,7 +95,11 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */
95 95
96#ifdef __KERNEL__ 96#ifdef __KERNEL__
97 97
98#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L) 98#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
99#define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
100
101#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
102 STACK_TOP32 : STACK_TOP64)
99 103
100#endif 104#endif
101 105
diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h
index 534855660f2a..662a21107ae6 100644
--- a/include/asm-sparc64/asi.h
+++ b/include/asm-sparc64/asi.h
@@ -25,14 +25,27 @@
25 25
26/* SpitFire and later extended ASIs. The "(III)" marker designates 26/* SpitFire and later extended ASIs. The "(III)" marker designates
27 * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates 27 * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates
28 * Chip Multi Threading specific ASIs. 28 * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific
29 * ASIs, "(4V)" designates SUN4V specific ASIs.
29 */ 30 */
30#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ 31#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
31#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ 32#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */
33#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */
34#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */
32#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ 35#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/
33#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ 36#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */
37#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/
38#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */
39#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */
40#define ASI_MMU 0x21 /* (4V) MMU Context Registers */
41#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
42 * secondary, user
43 */
34#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ 44#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
45#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */
46#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */
35#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ 47#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */
48#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */
36#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ 49#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */
37#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ 50#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */
38#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ 51#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */
@@ -137,6 +150,9 @@
137#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ 150#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
138#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ 151#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */
139#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ 152#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */
153#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load,
154 * primary, implicit
155 */
140#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ 156#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */
141#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ 157#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */
142#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ 158#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 74de79dca915..c66a81bbc84d 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -1,41 +1,224 @@
1/* cpudata.h: Per-cpu parameters. 1/* cpudata.h: Per-cpu parameters.
2 * 2 *
3 * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#ifndef _SPARC64_CPUDATA_H 6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H 7#define _SPARC64_CPUDATA_H
8 8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__
13
9#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/threads.h>
10 16
11typedef struct { 17typedef struct {
12 /* Dcache line 1 */ 18 /* Dcache line 1 */
13 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
14 unsigned int multiplier; 20 unsigned int multiplier;
15 unsigned int counter; 21 unsigned int counter;
16 unsigned int idle_volume; 22 unsigned int __pad1;
17 unsigned long clock_tick; /* %tick's per second */ 23 unsigned long clock_tick; /* %tick's per second */
18 unsigned long udelay_val; 24 unsigned long udelay_val;
19 25
20 /* Dcache line 2 */ 26 /* Dcache line 2, rarely used */
21 unsigned int pgcache_size;
22 unsigned int __pad1;
23 unsigned long *pte_cache[2];
24 unsigned long *pgd_cache;
25
26 /* Dcache line 3, rarely used */
27 unsigned int dcache_size; 27 unsigned int dcache_size;
28 unsigned int dcache_line_size; 28 unsigned int dcache_line_size;
29 unsigned int icache_size; 29 unsigned int icache_size;
30 unsigned int icache_line_size; 30 unsigned int icache_line_size;
31 unsigned int ecache_size; 31 unsigned int ecache_size;
32 unsigned int ecache_line_size; 32 unsigned int ecache_line_size;
33 unsigned int __pad2;
34 unsigned int __pad3; 33 unsigned int __pad3;
34 unsigned int __pad4;
35} cpuinfo_sparc; 35} cpuinfo_sparc;
36 36
37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data) 39#define local_cpu_data() __get_cpu_var(__cpu_data)
40 40
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long __pad1[2];
75
76/* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */
77 unsigned long __pad2[4];
78} __attribute__((aligned(64)));
79extern struct trap_per_cpu trap_block[NR_CPUS];
80extern void init_cur_cpu_trap(struct thread_info *);
81extern void setup_tba(void);
82
83struct cpuid_patch_entry {
84 unsigned int addr;
85 unsigned int cheetah_safari[4];
86 unsigned int cheetah_jbus[4];
87 unsigned int starfire[4];
88 unsigned int sun4v[4];
89};
90extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
91
92struct sun4v_1insn_patch_entry {
93 unsigned int addr;
94 unsigned int insn;
95};
96extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
97 __sun4v_1insn_patch_end;
98
99struct sun4v_2insn_patch_entry {
100 unsigned int addr;
101 unsigned int insns[2];
102};
103extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
104 __sun4v_2insn_patch_end;
105
106#endif /* !(__ASSEMBLY__) */
107
108#define TRAP_PER_CPU_THREAD 0x00
109#define TRAP_PER_CPU_PGD_PADDR 0x08
110#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
111#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
112#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
113#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
114#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
115#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
116#define TRAP_PER_CPU_FAULT_INFO 0x40
117#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
118#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
119
120#define TRAP_BLOCK_SZ_SHIFT 8
121
122#include <asm/scratchpad.h>
123
124#define __GET_CPUID(REG) \
125 /* Spitfire implementation (default). */ \
126661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
127 srlx REG, 17, REG; \
128 and REG, 0x1f, REG; \
129 nop; \
130 .section .cpuid_patch, "ax"; \
131 /* Instruction location. */ \
132 .word 661b; \
133 /* Cheetah Safari implementation. */ \
134 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
135 srlx REG, 17, REG; \
136 and REG, 0x3ff, REG; \
137 nop; \
138 /* Cheetah JBUS implementation. */ \
139 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
140 srlx REG, 17, REG; \
141 and REG, 0x1f, REG; \
142 nop; \
143 /* Starfire implementation. */ \
144 sethi %hi(0x1fff40000d0 >> 9), REG; \
145 sllx REG, 9, REG; \
146 or REG, 0xd0, REG; \
147 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
148 /* sun4v implementation. */ \
149 mov SCRATCHPAD_CPUID, REG; \
150 ldxa [REG] ASI_SCRATCHPAD, REG; \
151 nop; \
152 nop; \
153 .previous;
154
155#ifdef CONFIG_SMP
156
157#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
158 __GET_CPUID(TMP) \
159 sethi %hi(trap_block), DEST; \
160 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
161 or DEST, %lo(trap_block), DEST; \
162 add DEST, TMP, DEST; \
163
164/* Clobbers TMP, current address space PGD phys address into DEST. */
165#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
166 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
167 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
168
169/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
170#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
171 __GET_CPUID(TMP) \
172 sethi %hi(__irq_work), DEST; \
173 sllx TMP, 6, TMP; \
174 or DEST, %lo(__irq_work), DEST; \
175 add DEST, TMP, DEST;
176
177/* Clobbers TMP, loads DEST with current thread info pointer. */
178#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
179 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
180 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
181
182/* Given the current thread info pointer in THR, load the per-cpu
183 * area base of the current processor into DEST. REG1, REG2, and REG3 are
184 * clobbered.
185 *
186 * You absolutely cannot use DEST as a temporary in this code. The
187 * reason is that traps can happen during execution, and return from
188 * trap will load the fully resolved DEST per-cpu base. This can corrupt
189 * the calculations done by the macro mid-stream.
190 */
191#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
192 ldub [THR + TI_CPU], REG1; \
193 sethi %hi(__per_cpu_shift), REG3; \
194 sethi %hi(__per_cpu_base), REG2; \
195 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
196 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
197 sllx REG1, REG3, REG3; \
198 add REG3, REG2, DEST;
199
200#else
201
202#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
203 sethi %hi(trap_block), DEST; \
204 or DEST, %lo(trap_block), DEST; \
205
206/* Uniprocessor versions, we know the cpuid is zero. */
207#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
208 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
209 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
210
211#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
212 sethi %hi(__irq_work), DEST; \
213 or DEST, %lo(__irq_work), DEST;
214
215#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
216 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
217 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
218
219/* No per-cpu areas on uniprocessor, so no need to load DEST. */
220#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
221
222#endif /* !(CONFIG_SMP) */
223
41#endif /* _SPARC64_CPUDATA_H */ 224#endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 69539a8ab833..303d85e2f82e 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -10,6 +10,7 @@
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13#include <asm/spitfire.h>
13#endif 14#endif
14 15
15/* 16/*
@@ -68,6 +69,7 @@
68#define HWCAP_SPARC_MULDIV 8 69#define HWCAP_SPARC_MULDIV 8
69#define HWCAP_SPARC_V9 16 70#define HWCAP_SPARC_V9 16
70#define HWCAP_SPARC_ULTRA3 32 71#define HWCAP_SPARC_ULTRA3 32
72#define HWCAP_SPARC_BLKINIT 64
71 73
72/* 74/*
73 * These are used to set parameters in the core dumps. 75 * These are used to set parameters in the core dumps.
@@ -145,11 +147,21 @@ typedef struct {
145 instruction set this cpu supports. */ 147 instruction set this cpu supports. */
146 148
147/* On Ultra, we support all of the v8 capabilities. */ 149/* On Ultra, we support all of the v8 capabilities. */
148#define ELF_HWCAP ((HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \ 150static inline unsigned int sparc64_elf_hwcap(void)
149 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | \ 151{
150 HWCAP_SPARC_V9) | \ 152 unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
151 ((tlb_type == cheetah || tlb_type == cheetah_plus) ? \ 153 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
152 HWCAP_SPARC_ULTRA3 : 0)) 154 HWCAP_SPARC_V9);
155
156 if (tlb_type == cheetah || tlb_type == cheetah_plus)
157 cap |= HWCAP_SPARC_ULTRA3;
158 else if (tlb_type == hypervisor)
159 cap |= HWCAP_SPARC_BLKINIT;
160
161 return cap;
162}
163
164#define ELF_HWCAP sparc64_elf_hwcap();
153 165
154/* This yields a string that ld.so will use to load implementation 166/* This yields a string that ld.so will use to load implementation
155 specific libraries for optimization. This is more specific in 167 specific libraries for optimization. This is more specific in
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index 0abd3a674e8f..67960a751f4d 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -4,12 +4,21 @@
4 4
5#include <asm/pstate.h> 5#include <asm/pstate.h>
6 6
7 /* wrpr %g0, val, %gl */
8#define SET_GL(val) \
9 .word 0xa1902000 | val
10
11 /* rdpr %gl, %gN */
12#define GET_GL_GLOBAL(N) \
13 .word 0x81540000 | (N << 25)
14
7#define KERNBASE 0x400000 15#define KERNBASE 0x400000
8 16
9#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) 17#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
10 18
11#define __CHEETAH_ID 0x003e0014 19#define __CHEETAH_ID 0x003e0014
12#define __JALAPENO_ID 0x003e0016 20#define __JALAPENO_ID 0x003e0016
21#define __SERRANO_ID 0x003e0022
13 22
14#define CHEETAH_MANUF 0x003e 23#define CHEETAH_MANUF 0x003e
15#define CHEETAH_IMPL 0x0014 /* Ultra-III */ 24#define CHEETAH_IMPL 0x0014 /* Ultra-III */
@@ -19,6 +28,12 @@
19#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ 28#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */
20#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ 29#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */
21 30
31#define BRANCH_IF_SUN4V(tmp1,label) \
32 sethi %hi(is_sun4v), %tmp1; \
33 lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \
34 brnz,pn %tmp1, label; \
35 nop
36
22#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ 37#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \
23 rdpr %ver, %tmp1; \ 38 rdpr %ver, %tmp1; \
24 sethi %hi(__CHEETAH_ID), %tmp2; \ 39 sethi %hi(__CHEETAH_ID), %tmp2; \
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
new file mode 100644
index 000000000000..612bf319753f
--- /dev/null
+++ b/include/asm-sparc64/hypervisor.h
@@ -0,0 +1,2128 @@
1#ifndef _SPARC64_HYPERVISOR_H
2#define _SPARC64_HYPERVISOR_H
3
4/* Sun4v hypervisor interfaces and defines.
5 *
6 * Hypervisor calls are made via traps to software traps number 0x80
7 * and above. Registers %o0 to %o5 serve as argument, status, and
8 * return value registers.
9 *
10 * There are two kinds of these traps. First there are the normal
11 * "fast traps" which use software trap 0x80 and encode the function
12 * to invoke by number in register %o5. Argument and return value
13 * handling is as follows:
14 *
15 * -----------------------------------------------
16 * | %o5 | function number | undefined |
17 * | %o0 | argument 0 | return status |
18 * | %o1 | argument 1 | return value 1 |
19 * | %o2 | argument 2 | return value 2 |
20 * | %o3 | argument 3 | return value 3 |
21 * | %o4 | argument 4 | return value 4 |
22 * -----------------------------------------------
23 *
24 * The second type are "hyper-fast traps" which encode the function
25 * number in the software trap number itself. So these use trap
26 * numbers > 0x80. The register usage for hyper-fast traps is as
27 * follows:
28 *
29 * -----------------------------------------------
30 * | %o0 | argument 0 | return status |
31 * | %o1 | argument 1 | return value 1 |
32 * | %o2 | argument 2 | return value 2 |
33 * | %o3 | argument 3 | return value 3 |
34 * | %o4 | argument 4 | return value 4 |
35 * -----------------------------------------------
36 *
37 * Registers providing explicit arguments to the hypervisor calls
38 * are volatile across the call. Upon return their values are
39 * undefined unless explicitly specified as containing a particular
40 * return value by the specific call. The return status is always
41 * returned in register %o0, zero indicates a successful execution of
42 * the hypervisor call and other values indicate an error status as
43 * defined below. So, for example, if a hyper-fast trap takes
44 * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across
45 * the call and %o3, %o4, and %o5 would be preserved.
46 *
47 * If the hypervisor trap is invalid, or the fast trap function number
48 * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits
49 * of the argument and return values are significant.
50 */
51
52/* Trap numbers. */
53#define HV_FAST_TRAP 0x80
54#define HV_MMU_MAP_ADDR_TRAP 0x83
55#define HV_MMU_UNMAP_ADDR_TRAP 0x84
56#define HV_TTRACE_ADDENTRY_TRAP 0x85
57#define HV_CORE_TRAP 0xff
58
59/* Error codes. */
60#define HV_EOK 0 /* Successful return */
61#define HV_ENOCPU 1 /* Invalid CPU id */
62#define HV_ENORADDR 2 /* Invalid real address */
63#define HV_ENOINTR 3 /* Invalid interrupt id */
64#define HV_EBADPGSZ 4 /* Invalid pagesize encoding */
65#define HV_EBADTSB 5 /* Invalid TSB description */
66#define HV_EINVAL 6 /* Invalid argument */
67#define HV_EBADTRAP 7 /* Invalid function number */
68#define HV_EBADALIGN 8 /* Invalid address alignment */
69#define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */
70#define HV_ENOACCESS 10 /* No access to resource */
71#define HV_EIO 11 /* I/O error */
72#define HV_ECPUERROR 12 /* CPU in error state */
73#define HV_ENOTSUPPORTED 13 /* Function not supported */
74#define HV_ENOMAP 14 /* No mapping found */
75#define HV_ETOOMANY 15 /* Too many items specified */
76
77/* mach_exit()
78 * TRAP: HV_FAST_TRAP
79 * FUNCTION: HV_FAST_MACH_EXIT
80 * ARG0: exit code
81 * ERRORS: This service does not return.
82 *
83 * Stop all CPUs in the virtual domain and place them into the stopped
84 * state. The 64-bit exit code may be passed to a service entity as
85 * the domain's exit status. On systems without a service entity, the
86 * domain will undergo a reset, and the boot firmware will be
87 * reloaded.
88 *
89 * This function will never return to the guest that invokes it.
90 *
91 * Note: By convention an exit code of zero denotes a successful exit by
92 * the guest code. A non-zero exit code denotes a guest specific
93 * error indication.
94 *
95 */
96#define HV_FAST_MACH_EXIT 0x00
97
98/* Domain services. */
99
100/* mach_desc()
101 * TRAP: HV_FAST_TRAP
102 * FUNCTION: HV_FAST_MACH_DESC
103 * ARG0: buffer
104 * ARG1: length
105 * RET0: status
106 * RET1: length
107 * ERRORS: HV_EBADALIGN Buffer is badly aligned
108 * HV_ENORADDR Buffer is to an illegal real address.
109 * HV_EINVAL Buffer length is too small for complete
110 * machine description.
111 *
112 * Copy the most current machine description into the buffer indicated
113 * by the real address in ARG0. The buffer provided must be 16 byte
114 * aligned. Upon success or HV_EINVAL, this service returns the
115 * actual size of the machine description in the RET1 return value.
116 *
117 * Note: A method of determining the appropriate buffer size for the
118 * machine description is to first call this service with a buffer
119 * length of 0 bytes.
120 */
121#define HV_FAST_MACH_DESC 0x01
122
123/* mach_exit()
124 * TRAP: HV_FAST_TRAP
125 * FUNCTION: HV_FAST_MACH_SIR
126 * ERRORS: This service does not return.
127 *
128 * Perform a software initiated reset of the virtual machine domain.
129 * All CPUs are captured as soon as possible, all hardware devices are
130 * returned to the entry default state, and the domain is restarted at
131 * the SIR (trap type 0x04) real trap table (RTBA) entry point on one
132 * of the CPUs. The single CPU restarted is selected as determined by
133 * platform specific policy. Memory is preserved across this
134 * operation.
135 */
136#define HV_FAST_MACH_SIR 0x02
137
138/* mach_set_soft_state()
139 * TRAP: HV_FAST_TRAP
140 * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE
141 * ARG0: software state
142 * ARG1: software state description pointer
143 * RET0: status
144 * ERRORS: EINVAL software state not valid or software state
145 * description is not NULL terminated
146 * ENORADDR software state description pointer is not a
147 * valid real address
148 * EBADALIGNED software state description is not correctly
149 * aligned
150 *
151 * This allows the guest to report it's soft state to the hypervisor. There
152 * are two primary components to this state. The first part states whether
153 * the guest software is running or not. The second containts optional
154 * details specific to the software.
155 *
156 * The software state argument is defined below in HV_SOFT_STATE_*, and
157 * indicates whether the guest is operating normally or in a transitional
158 * state.
159 *
160 * The software state description argument is a real address of a data buffer
161 * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL
162 * terminated 7-bit ASCII string of up to 31 characters not including the
163 * NULL termination.
164 */
165#define HV_FAST_MACH_SET_SOFT_STATE 0x03
166#define HV_SOFT_STATE_NORMAL 0x01
167#define HV_SOFT_STATE_TRANSITION 0x02
168
169/* mach_get_soft_state()
170 * TRAP: HV_FAST_TRAP
171 * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
172 * ARG0: software state description pointer
173 * RET0: status
174 * RET1: software state
175 * ERRORS: ENORADDR software state description pointer is not a
176 * valid real address
177 * EBADALIGNED software state description is not correctly
178 * aligned
179 *
180 * Retrieve the current value of the guest's software state. The rules
181 * for the software state pointer are the same as for mach_set_soft_state()
182 * above.
183 */
184#define HV_FAST_MACH_GET_SOFT_STATE 0x04
185
186/* CPU services.
187 *
188 * CPUs represent devices that can execute software threads. A single
189 * chip that contains multiple cores or strands is represented as
190 * multiple CPUs with unique CPU identifiers. CPUs are exported to
191 * OBP via the machine description (and to the OS via the OBP device
192 * tree). CPUs are always in one of three states: stopped, running,
193 * or error.
194 *
195 * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a
196 * CPU within a logical domain. Operations that are to be performed
197 * on multiple CPUs specify them via a CPU list. A CPU list is an
198 * array in real memory, of which each 16-bit word is a CPU ID. CPU
199 * lists are passed through the API as two arguments. The first is
200 * the number of entries (16-bit words) in the CPU list, and the
201 * second is the (real address) pointer to the CPU ID list.
202 */
203
204/* cpu_start()
205 * TRAP: HV_FAST_TRAP
206 * FUNCTION: HV_FAST_CPU_START
207 * ARG0: CPU ID
208 * ARG1: PC
209 * ARG1: RTBA
210 * ARG1: target ARG0
211 * RET0: status
212 * ERRORS: ENOCPU Invalid CPU ID
213 * EINVAL Target CPU ID is not in the stopped state
214 * ENORADDR Invalid PC or RTBA real address
215 * EBADALIGN Unaligned PC or unaligned RTBA
216 * EWOULDBLOCK Starting resources are not available
217 *
218 * Start CPU with given CPU ID with PC in %pc and with a real trap
219 * base address value of RTBA. The indicated CPU must be in the
220 * stopped state. The supplied RTBA must be aligned on a 256 byte
221 * boundary. On successful completion, the specified CPU will be in
222 * the running state and will be supplied with "target ARG0" in %o0
223 * and RTBA in %tba.
224 */
225#define HV_FAST_CPU_START 0x10
226
227/* cpu_stop()
228 * TRAP: HV_FAST_TRAP
229 * FUNCTION: HV_FAST_CPU_STOP
230 * ARG0: CPU ID
231 * RET0: status
232 * ERRORS: ENOCPU Invalid CPU ID
233 * EINVAL Target CPU ID is the current cpu
234 * EINVAL Target CPU ID is not in the running state
235 * EWOULDBLOCK Stopping resources are not available
236 * ENOTSUPPORTED Not supported on this platform
237 *
238 * The specified CPU is stopped. The indicated CPU must be in the
239 * running state. On completion, it will be in the stopped state. It
240 * is not legal to stop the current CPU.
241 *
242 * Note: As this service cannot be used to stop the current cpu, this service
243 * may not be used to stop the last running CPU in a domain. To stop
244 * and exit a running domain, a guest must use the mach_exit() service.
245 */
246#define HV_FAST_CPU_STOP 0x11
247
248/* cpu_yield()
249 * TRAP: HV_FAST_TRAP
250 * FUNCTION: HV_FAST_CPU_YIELD
251 * RET0: status
252 * ERRORS: No possible error.
253 *
254 * Suspend execution on the current CPU. Execution will resume when
255 * an interrupt (device, %stick_compare, or cross-call) is targeted to
256 * the CPU. On some CPUs, this API may be used by the hypervisor to
257 * save power by disabling hardware strands.
258 */
259#define HV_FAST_CPU_YIELD 0x12
260
261#ifndef __ASSEMBLY__
262extern unsigned long sun4v_cpu_yield(void);
263#endif
264
265/* cpu_qconf()
266 * TRAP: HV_FAST_TRAP
267 * FUNCTION: HV_FAST_CPU_QCONF
268 * ARG0: queue
269 * ARG1: base real address
270 * ARG2: number of entries
271 * RET0: status
272 * ERRORS: ENORADDR Invalid base real address
273 * EINVAL Invalid queue or number of entries is less
274 * than 2 or too large.
275 * EBADALIGN Base real address is not correctly aligned
276 * for size.
277 *
278 * Configure the given queue to be placed at the given base real
279 * address, with the given number of entries. The number of entries
280 * must be a power of 2. The base real address must be aligned
281 * exactly to match the queue size. Each queue entry is 64 bytes
282 * long, so for example a 32 entry queue must be aligned on a 2048
283 * byte real address boundary.
284 *
285 * The specified queue is unconfigured if the number of entries is given
286 * as zero.
287 *
288 * For the current version of this API service, the argument queue is defined
289 * as follows:
290 *
291 * queue description
292 * ----- -------------------------
293 * 0x3c cpu mondo queue
294 * 0x3d device mondo queue
295 * 0x3e resumable error queue
296 * 0x3f non-resumable error queue
297 *
298 * Note: The maximum number of entries for each queue for a specific cpu may
299 * be determined from the machine description.
300 */
301#define HV_FAST_CPU_QCONF 0x14
302#define HV_CPU_QUEUE_CPU_MONDO 0x3c
303#define HV_CPU_QUEUE_DEVICE_MONDO 0x3d
304#define HV_CPU_QUEUE_RES_ERROR 0x3e
305#define HV_CPU_QUEUE_NONRES_ERROR 0x3f
306
307#ifndef __ASSEMBLY__
308extern unsigned long sun4v_cpu_qconf(unsigned long type,
309 unsigned long queue_paddr,
310 unsigned long num_queue_entries);
311#endif
312
313/* cpu_qinfo()
314 * TRAP: HV_FAST_TRAP
315 * FUNCTION: HV_FAST_CPU_QINFO
316 * ARG0: queue
317 * RET0: status
318 * RET1: base real address
319 * RET1: number of entries
320 * ERRORS: EINVAL Invalid queue
321 *
322 * Return the configuration info for the given queue. The base real
323 * address and number of entries of the defined queue are returned.
324 * The queue argument values are the same as for cpu_qconf() above.
325 *
326 * If the specified queue is a valid queue number, but no queue has
327 * been defined, the number of entries will be set to zero and the
328 * base real address returned is undefined.
329 */
330#define HV_FAST_CPU_QINFO 0x15
331
332/* cpu_mondo_send()
333 * TRAP: HV_FAST_TRAP
334 * FUNCTION: HV_FAST_CPU_MONDO_SEND
335 * ARG0-1: CPU list
336 * ARG2: data real address
337 * RET0: status
338 * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list
339 * is not 2-byte aligned.
340 * ENORADDR Invalid data mondo address, or invalid cpu list
341 * address.
342 * ENOCPU Invalid cpu in CPU list
343 * EWOULDBLOCK Some or all of the listed CPUs did not receive
344 * the mondo
345 * ECPUERROR One or more of the listed CPUs are in error
346 * state, use HV_FAST_CPU_STATE to see which ones
347 * EINVAL CPU list includes caller's CPU ID
348 *
349 * Send a mondo interrupt to the CPUs in the given CPU list with the
350 * 64-bytes at the given data real address. The data must be 64-byte
351 * aligned. The mondo data will be delivered to the cpu_mondo queues
352 * of the recipient CPUs.
353 *
354 * In all cases, error or not, the CPUs in the CPU list to which the
355 * mondo has been successfully delivered will be indicated by having
356 * their entry in CPU list updated with the value 0xffff.
357 */
358#define HV_FAST_CPU_MONDO_SEND 0x42
359
360#ifndef __ASSEMBLY__
361extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
362#endif
363
364/* cpu_myid()
365 * TRAP: HV_FAST_TRAP
366 * FUNCTION: HV_FAST_CPU_MYID
367 * RET0: status
368 * RET1: CPU ID
369 * ERRORS: No errors defined.
370 *
371 * Return the hypervisor ID handle for the current CPU. Use by a
372 * virtual CPU to discover it's own identity.
373 */
374#define HV_FAST_CPU_MYID 0x16
375
376/* cpu_state()
377 * TRAP: HV_FAST_TRAP
378 * FUNCTION: HV_FAST_CPU_STATE
379 * ARG0: CPU ID
380 * RET0: status
381 * RET1: state
382 * ERRORS: ENOCPU Invalid CPU ID
383 *
384 * Retrieve the current state of the CPU with the given CPU ID.
385 */
386#define HV_FAST_CPU_STATE 0x17
387#define HV_CPU_STATE_STOPPED 0x01
388#define HV_CPU_STATE_RUNNING 0x02
389#define HV_CPU_STATE_ERROR 0x03
390
391#ifndef __ASSEMBLY__
392extern long sun4v_cpu_state(unsigned long cpuid);
393#endif
394
395/* cpu_set_rtba()
396 * TRAP: HV_FAST_TRAP
397 * FUNCTION: HV_FAST_CPU_SET_RTBA
398 * ARG0: RTBA
399 * RET0: status
400 * RET1: previous RTBA
401 * ERRORS: ENORADDR Invalid RTBA real address
402 * EBADALIGN RTBA is incorrectly aligned for a trap table
403 *
404 * Set the real trap base address of the local cpu to the given RTBA.
405 * The supplied RTBA must be aligned on a 256 byte boundary. Upon
406 * success the previous value of the RTBA is returned in RET1.
407 *
408 * Note: This service does not affect %tba
409 */
410#define HV_FAST_CPU_SET_RTBA 0x18
411
412/* cpu_set_rtba()
413 * TRAP: HV_FAST_TRAP
414 * FUNCTION: HV_FAST_CPU_GET_RTBA
415 * RET0: status
416 * RET1: previous RTBA
417 * ERRORS: No possible error.
418 *
419 * Returns the current value of RTBA in RET1.
420 */
421#define HV_FAST_CPU_GET_RTBA 0x19
422
423/* MMU services.
424 *
425 * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls.
426 */
427#ifndef __ASSEMBLY__
428struct hv_tsb_descr {
429 unsigned short pgsz_idx;
430 unsigned short assoc;
431 unsigned int num_ttes; /* in TTEs */
432 unsigned int ctx_idx;
433 unsigned int pgsz_mask;
434 unsigned long tsb_base;
435 unsigned long resv;
436};
437#endif
438#define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00
439#define HV_TSB_DESCR_ASSOC_OFFSET 0x02
440#define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04
441#define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08
442#define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c
443#define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10
444#define HV_TSB_DESCR_RESV_OFFSET 0x18
445
446/* Page size bitmask. */
447#define HV_PGSZ_MASK_8K (1 << 0)
448#define HV_PGSZ_MASK_64K (1 << 1)
449#define HV_PGSZ_MASK_512K (1 << 2)
450#define HV_PGSZ_MASK_4MB (1 << 3)
451#define HV_PGSZ_MASK_32MB (1 << 4)
452#define HV_PGSZ_MASK_256MB (1 << 5)
453#define HV_PGSZ_MASK_2GB (1 << 6)
454#define HV_PGSZ_MASK_16GB (1 << 7)
455
456/* Page size index. The value given in the TSB descriptor must correspond
457 * to the smallest page size specified in the pgsz_mask page size bitmask.
458 */
459#define HV_PGSZ_IDX_8K 0
460#define HV_PGSZ_IDX_64K 1
461#define HV_PGSZ_IDX_512K 2
462#define HV_PGSZ_IDX_4MB 3
463#define HV_PGSZ_IDX_32MB 4
464#define HV_PGSZ_IDX_256MB 5
465#define HV_PGSZ_IDX_2GB 6
466#define HV_PGSZ_IDX_16GB 7
467
468/* MMU fault status area.
469 *
470 * MMU related faults have their status and fault address information
471 * placed into a memory region made available by privileged code. Each
472 * virtual processor must make a mmu_fault_area_conf() call to tell the
473 * hypervisor where that processor's fault status should be stored.
474 *
475 * The fault status block is a multiple of 64-bytes and must be aligned
476 * on a 64-byte boundary.
477 */
478#ifndef __ASSEMBLY__
479struct hv_fault_status {
480 unsigned long i_fault_type;
481 unsigned long i_fault_addr;
482 unsigned long i_fault_ctx;
483 unsigned long i_reserved[5];
484 unsigned long d_fault_type;
485 unsigned long d_fault_addr;
486 unsigned long d_fault_ctx;
487 unsigned long d_reserved[5];
488};
489#endif
490#define HV_FAULT_I_TYPE_OFFSET 0x00
491#define HV_FAULT_I_ADDR_OFFSET 0x08
492#define HV_FAULT_I_CTX_OFFSET 0x10
493#define HV_FAULT_D_TYPE_OFFSET 0x40
494#define HV_FAULT_D_ADDR_OFFSET 0x48
495#define HV_FAULT_D_CTX_OFFSET 0x50
496
497#define HV_FAULT_TYPE_FAST_MISS 1
498#define HV_FAULT_TYPE_FAST_PROT 2
499#define HV_FAULT_TYPE_MMU_MISS 3
500#define HV_FAULT_TYPE_INV_RA 4
501#define HV_FAULT_TYPE_PRIV_VIOL 5
502#define HV_FAULT_TYPE_PROT_VIOL 6
503#define HV_FAULT_TYPE_NFO 7
504#define HV_FAULT_TYPE_NFO_SEFF 8
505#define HV_FAULT_TYPE_INV_VA 9
506#define HV_FAULT_TYPE_INV_ASI 10
507#define HV_FAULT_TYPE_NC_ATOMIC 11
508#define HV_FAULT_TYPE_PRIV_ACT 12
509#define HV_FAULT_TYPE_RESV1 13
510#define HV_FAULT_TYPE_UNALIGNED 14
511#define HV_FAULT_TYPE_INV_PGSZ 15
512/* Values 16 --> -2 are reserved. */
513#define HV_FAULT_TYPE_MULTIPLE -1
514
515/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(),
516 * and mmu_{map,unmap}_perm_addr().
517 */
518#define HV_MMU_DMMU 0x01
519#define HV_MMU_IMMU 0x02
520#define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU)
521
522/* mmu_map_addr()
523 * TRAP: HV_MMU_MAP_ADDR_TRAP
524 * ARG0: virtual address
525 * ARG1: mmu context
526 * ARG2: TTE
527 * ARG3: flags (HV_MMU_{IMMU,DMMU})
528 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
529 * EBADPGSZ Invalid page size value
530 * ENORADDR Invalid real address in TTE
531 *
532 * Create a non-permanent mapping using the given TTE, virtual
533 * address, and mmu context. The flags argument determines which
534 * (data, or instruction, or both) TLB the mapping gets loaded into.
535 *
536 * The behavior is undefined if the valid bit is clear in the TTE.
537 *
538 * Note: This API call is for privileged code to specify temporary translation
539 * mappings without the need to create and manage a TSB.
540 */
541
542/* mmu_unmap_addr()
543 * TRAP: HV_MMU_UNMAP_ADDR_TRAP
544 * ARG0: virtual address
545 * ARG1: mmu context
546 * ARG2: flags (HV_MMU_{IMMU,DMMU})
547 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
548 *
549 * Demaps the given virtual address in the given mmu context on this
550 * CPU. This function is intended to be used to demap pages mapped
551 * with mmu_map_addr. This service is equivalent to invoking
552 * mmu_demap_page() with only the current CPU in the CPU list. The
553 * flags argument determines which (data, or instruction, or both) TLB
554 * the mapping gets unmapped from.
555 *
556 * Attempting to perform an unmap operation for a previously defined
557 * permanent mapping will have undefined results.
558 */
559
560/* mmu_tsb_ctx0()
561 * TRAP: HV_FAST_TRAP
562 * FUNCTION: HV_FAST_MMU_TSB_CTX0
563 * ARG0: number of TSB descriptions
564 * ARG1: TSB descriptions pointer
565 * RET0: status
566 * ERRORS: ENORADDR Invalid TSB descriptions pointer or
567 * TSB base within a descriptor
568 * EBADALIGN TSB descriptions pointer is not aligned
569 * to an 8-byte boundary, or TSB base
570 * within a descriptor is not aligned for
571 * the given TSB size
572 * EBADPGSZ Invalid page size in a TSB descriptor
573 * EBADTSB Invalid associativity or size in a TSB
574 * descriptor
575 * EINVAL Invalid number of TSB descriptions, or
576 * invalid context index in a TSB
577 * descriptor, or index page size not
578 * equal to smallest page size in page
579 * size bitmask field.
580 *
581 * Configures the TSBs for the current CPU for virtual addresses with
582 * context zero. The TSB descriptions pointer is a pointer to an
583 * array of the given number of TSB descriptions.
584 *
585 * Note: The maximum number of TSBs available to a virtual CPU is given by the
586 * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the
587 * machine description.
588 */
589#define HV_FAST_MMU_TSB_CTX0 0x20
590
591/* mmu_tsb_ctxnon0()
592 * TRAP: HV_FAST_TRAP
593 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0
594 * ARG0: number of TSB descriptions
595 * ARG1: TSB descriptions pointer
596 * RET0: status
597 * ERRORS: Same as for mmu_tsb_ctx0() above.
598 *
599 * Configures the TSBs for the current CPU for virtual addresses with
600 * non-zero contexts. The TSB descriptions pointer is a pointer to an
601 * array of the given number of TSB descriptions.
602 *
603 * Note: A maximum of 16 TSBs may be specified in the TSB description list.
604 */
605#define HV_FAST_MMU_TSB_CTXNON0 0x21
606
607/* mmu_demap_page()
608 * TRAP: HV_FAST_TRAP
609 * FUNCTION: HV_FAST_MMU_DEMAP_PAGE
610 * ARG0: reserved, must be zero
611 * ARG1: reserved, must be zero
612 * ARG2: virtual address
613 * ARG3: mmu context
614 * ARG4: flags (HV_MMU_{IMMU,DMMU})
615 * RET0: status
616 * ERRORS: EINVAL Invalid virutal address, context, or
617 * flags value
618 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
619 *
620 * Demaps any page mapping of the given virtual address in the given
621 * mmu context for the current virtual CPU. Any virtually tagged
622 * caches are guaranteed to be kept consistent. The flags argument
623 * determines which TLB (instruction, or data, or both) participate in
624 * the operation.
625 *
626 * ARG0 and ARG1 are both reserved and must be set to zero.
627 */
628#define HV_FAST_MMU_DEMAP_PAGE 0x22
629
630/* mmu_demap_ctx()
631 * TRAP: HV_FAST_TRAP
632 * FUNCTION: HV_FAST_MMU_DEMAP_CTX
633 * ARG0: reserved, must be zero
634 * ARG1: reserved, must be zero
635 * ARG2: mmu context
636 * ARG3: flags (HV_MMU_{IMMU,DMMU})
637 * RET0: status
638 * ERRORS: EINVAL Invalid context or flags value
639 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
640 *
641 * Demaps all non-permanent virtual page mappings previously specified
642 * for the given context for the current virtual CPU. Any virtual
643 * tagged caches are guaranteed to be kept consistent. The flags
644 * argument determines which TLB (instruction, or data, or both)
645 * participate in the operation.
646 *
647 * ARG0 and ARG1 are both reserved and must be set to zero.
648 */
649#define HV_FAST_MMU_DEMAP_CTX 0x23
650
651/* mmu_demap_all()
652 * TRAP: HV_FAST_TRAP
653 * FUNCTION: HV_FAST_MMU_DEMAP_ALL
654 * ARG0: reserved, must be zero
655 * ARG1: reserved, must be zero
656 * ARG2: flags (HV_MMU_{IMMU,DMMU})
657 * RET0: status
658 * ERRORS: EINVAL Invalid flags value
659 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
660 *
661 * Demaps all non-permanent virtual page mappings previously specified
662 * for the current virtual CPU. Any virtual tagged caches are
663 * guaranteed to be kept consistent. The flags argument determines
664 * which TLB (instruction, or data, or both) participate in the
665 * operation.
666 *
667 * ARG0 and ARG1 are both reserved and must be set to zero.
668 */
669#define HV_FAST_MMU_DEMAP_ALL 0x24
670
671/* mmu_map_perm_addr()
672 * TRAP: HV_FAST_TRAP
673 * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR
674 * ARG0: virtual address
675 * ARG1: reserved, must be zero
676 * ARG2: TTE
677 * ARG3: flags (HV_MMU_{IMMU,DMMU})
678 * RET0: status
679 * ERRORS: EINVAL Invalid virutal address or flags value
680 * EBADPGSZ Invalid page size value
681 * ENORADDR Invalid real address in TTE
682 * ETOOMANY Too many mappings (max of 8 reached)
683 *
684 * Create a permanent mapping using the given TTE and virtual address
685 * for context 0 on the calling virtual CPU. A maximum of 8 such
686 * permanent mappings may be specified by privileged code. Mappings
687 * may be removed with mmu_unmap_perm_addr().
688 *
689 * The behavior is undefined if a TTE with the valid bit clear is given.
690 *
691 * Note: This call is used to specify address space mappings for which
692 * privileged code does not expect to receive misses. For example,
693 * this mechanism can be used to map kernel nucleus code and data.
694 */
695#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
696
697/* mmu_fault_area_conf()
698 * TRAP: HV_FAST_TRAP
699 * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF
700 * ARG0: real address
701 * RET0: status
702 * RET1: previous mmu fault area real address
703 * ERRORS: ENORADDR Invalid real address
704 * EBADALIGN Invalid alignment for fault area
705 *
706 * Configure the MMU fault status area for the calling CPU. A 64-byte
707 * aligned real address specifies where MMU fault status information
708 * is placed. The return value is the previously specified area, or 0
709 * for the first invocation. Specifying a fault area at real address
710 * 0 is not allowed.
711 */
712#define HV_FAST_MMU_FAULT_AREA_CONF 0x26
713
714/* mmu_enable()
715 * TRAP: HV_FAST_TRAP
716 * FUNCTION: HV_FAST_MMU_ENABLE
717 * ARG0: enable flag
718 * ARG1: return target address
719 * RET0: status
720 * ERRORS: ENORADDR Invalid real address when disabling
721 * translation.
722 * EBADALIGN The return target address is not
723 * aligned to an instruction.
724 * EINVAL The enable flag request the current
725 * operating mode (e.g. disable if already
726 * disabled)
727 *
728 * Enable or disable virtual address translation for the calling CPU
729 * within the virtual machine domain. If the enable flag is zero,
730 * translation is disabled, any non-zero value will enable
731 * translation.
732 *
733 * When this function returns, the newly selected translation mode
734 * will be active. If the mmu is being enabled, then the return
735 * target address is a virtual address else it is a real address.
736 *
737 * Upon successful completion, control will be returned to the given
738 * return target address (ie. the cpu will jump to that address). On
739 * failure, the previous mmu mode remains and the trap simply returns
740 * as normal with the appropriate error code in RET0.
741 */
742#define HV_FAST_MMU_ENABLE 0x27
743
744/* mmu_unmap_perm_addr()
745 * TRAP: HV_FAST_TRAP
746 * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR
747 * ARG0: virtual address
748 * ARG1: reserved, must be zero
749 * ARG2: flags (HV_MMU_{IMMU,DMMU})
750 * RET0: status
751 * ERRORS: EINVAL Invalid virutal address or flags value
752 * ENOMAP Specified mapping was not found
753 *
754 * Demaps any permanent page mapping (established via
755 * mmu_map_perm_addr()) at the given virtual address for context 0 on
756 * the current virtual CPU. Any virtual tagged caches are guaranteed
757 * to be kept consistent.
758 */
759#define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28
760
761/* mmu_tsb_ctx0_info()
762 * TRAP: HV_FAST_TRAP
763 * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO
764 * ARG0: max TSBs
765 * ARG1: buffer pointer
766 * RET0: status
767 * RET1: number of TSBs
768 * ERRORS: EINVAL Supplied buffer is too small
769 * EBADALIGN The buffer pointer is badly aligned
770 * ENORADDR Invalid real address for buffer pointer
771 *
772 * Return the TSB configuration as previous defined by mmu_tsb_ctx0()
773 * into the provided buffer. The size of the buffer is given in ARG1
774 * in terms of the number of TSB description entries.
775 *
776 * Upon return, RET1 always contains the number of TSB descriptions
777 * previously configured. If zero TSBs were configured, EOK is
778 * returned with RET1 containing 0.
779 */
780#define HV_FAST_MMU_TSB_CTX0_INFO 0x29
781
782/* mmu_tsb_ctxnon0_info()
783 * TRAP: HV_FAST_TRAP
784 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO
785 * ARG0: max TSBs
786 * ARG1: buffer pointer
787 * RET0: status
788 * RET1: number of TSBs
789 * ERRORS: EINVAL Supplied buffer is too small
790 * EBADALIGN The buffer pointer is badly aligned
791 * ENORADDR Invalid real address for buffer pointer
792 *
793 * Return the TSB configuration as previous defined by
794 * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer
795 * is given in ARG1 in terms of the number of TSB description entries.
796 *
797 * Upon return, RET1 always contains the number of TSB descriptions
798 * previously configured. If zero TSBs were configured, EOK is
799 * returned with RET1 containing 0.
800 */
801#define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a
802
803/* mmu_fault_area_info()
804 * TRAP: HV_FAST_TRAP
805 * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO
806 * RET0: status
807 * RET1: fault area real address
808 * ERRORS: No errors defined.
809 *
810 * Return the currently defined MMU fault status area for the current
811 * CPU. The real address of the fault status area is returned in
812 * RET1, or 0 is returned in RET1 if no fault status area is defined.
813 *
814 * Note: mmu_fault_area_conf() may be called with the return value (RET1)
815 * from this service if there is a need to save and restore the fault
816 * area for a cpu.
817 */
818#define HV_FAST_MMU_FAULT_AREA_INFO 0x2b
819
820/* Cache and Memory services. */
821
822/* mem_scrub()
823 * TRAP: HV_FAST_TRAP
824 * FUNCTION: HV_FAST_MEM_SCRUB
825 * ARG0: real address
826 * ARG1: length
827 * RET0: status
828 * RET1: length scrubbed
829 * ERRORS: ENORADDR Invalid real address
830 * EBADALIGN Start address or length are not correctly
831 * aligned
832 * EINVAL Length is zero
833 *
834 * Zero the memory contents in the range real address to real address
835 * plus length minus 1. Also, valid ECC will be generated for that
836 * memory address range. Scrubbing is started at the given real
837 * address, but may not scrub the entire given length. The actual
838 * length scrubbed will be returned in RET1.
839 *
840 * The real address and length must be aligned on an 8K boundary, or
841 * contain the start address and length from a sun4v error report.
842 *
843 * Note: There are two uses for this function. The first use is to block clear
844 * and initialize memory and the second is to scrub an u ncorrectable
845 * error reported via a resumable or non-resumable trap. The second
846 * use requires the arguments to be equal to the real address and length
847 * provided in a sun4v memory error report.
848 */
849#define HV_FAST_MEM_SCRUB 0x31
850
851/* mem_sync()
852 * TRAP: HV_FAST_TRAP
853 * FUNCTION: HV_FAST_MEM_SYNC
854 * ARG0: real address
855 * ARG1: length
856 * RET0: status
857 * RET1: length synced
858 * ERRORS: ENORADDR Invalid real address
859 * EBADALIGN Start address or length are not correctly
860 * aligned
861 * EINVAL Length is zero
862 *
863 * Force the next access within the real address to real address plus
864 * length minus 1 to be fetches from main system memory. Less than
865 * the given length may be synced, the actual amount synced is
866 * returned in RET1. The real address and length must be aligned on
867 * an 8K boundary.
868 */
869#define HV_FAST_MEM_SYNC 0x32
870
871/* Time of day services.
872 *
873 * The hypervisor maintains the time of day on a per-domain basis.
874 * Changing the time of day in one domain does not affect the time of
875 * day on any other domain.
876 *
877 * Time is described by a single unsigned 64-bit word which is the
878 * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1,
879 * 1970).
880 */
881
882/* tod_get()
883 * TRAP: HV_FAST_TRAP
884 * FUNCTION: HV_FAST_TOD_GET
885 * RET0: status
886 * RET1: TOD
887 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
888 * ENOTSUPPORTED If TOD not supported on this platform
889 *
890 * Return the current time of day. May block if TOD access is
891 * temporarily not possible.
892 */
893#define HV_FAST_TOD_GET 0x50
894
895/* tod_set()
896 * TRAP: HV_FAST_TRAP
897 * FUNCTION: HV_FAST_TOD_SET
898 * ARG0: TOD
899 * RET0: status
900 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
901 * ENOTSUPPORTED If TOD not supported on this platform
902 *
903 * The current time of day is set to the value specified in ARG0. May
904 * block if TOD access is temporarily not possible.
905 */
906#define HV_FAST_TOD_SET 0x51
907
908/* Console services */
909
910/* con_getchar()
911 * TRAP: HV_FAST_TRAP
912 * FUNCTION: HV_FAST_CONS_GETCHAR
913 * RET0: status
914 * RET1: character
915 * ERRORS: EWOULDBLOCK No character available.
916 *
917 * Returns a character from the console device. If no character is
918 * available then an EWOULDBLOCK error is returned. If a character is
919 * available, then the returned status is EOK and the character value
920 * is in RET1.
921 *
922 * A virtual BREAK is represented by the 64-bit value -1.
923 *
924 * A virtual HUP signal is represented by the 64-bit value -2.
925 */
926#define HV_FAST_CONS_GETCHAR 0x60
927
928/* con_putchar()
929 * TRAP: HV_FAST_TRAP
930 * FUNCTION: HV_FAST_CONS_PUTCHAR
931 * ARG0: character
932 * RET0: status
933 * ERRORS: EINVAL Illegal character
934 * EWOULDBLOCK Output buffer currently full, would block
935 *
936 * Send a character to the console device. Only character values
937 * between 0 and 255 may be used. Values outside this range are
938 * invalid except for the 64-bit value -1 which is used to send a
939 * virtual BREAK.
940 */
941#define HV_FAST_CONS_PUTCHAR 0x61
942
943/* Trap trace services.
944 *
945 * The hypervisor provides a trap tracing capability for privileged
946 * code running on each virtual CPU. Privileged code provides a
947 * round-robin trap trace queue within which the hypervisor writes
948 * 64-byte entries detailing hyperprivileged traps taken n behalf of
949 * privileged code. This is provided as a debugging capability for
950 * privileged code.
951 *
952 * The trap trace control structure is 64-bytes long and placed at the
953 * start (offset 0) of the trap trace buffer, and is described as
954 * follows:
955 */
956#ifndef __ASSEMBLY__
957struct hv_trap_trace_control {
958 unsigned long head_offset;
959 unsigned long tail_offset;
960 unsigned long __reserved[0x30 / sizeof(unsigned long)];
961};
962#endif
963#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00
964#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08
965
966/* The head offset is the offset of the most recently completed entry
967 * in the trap-trace buffer. The tail offset is the offset of the
968 * next entry to be written. The control structure is owned and
969 * modified by the hypervisor. A guest may not modify the control
970 * structure contents. Attempts to do so will result in undefined
971 * behavior for the guest.
972 *
973 * Each trap trace buffer entry is layed out as follows:
974 */
975#ifndef __ASSEMBLY__
976struct hv_trap_trace_entry {
977 unsigned char type; /* Hypervisor or guest entry? */
978 unsigned char hpstate; /* Hyper-privileged state */
979 unsigned char tl; /* Trap level */
980 unsigned char gl; /* Global register level */
981 unsigned short tt; /* Trap type */
982 unsigned short tag; /* Extended trap identifier */
983 unsigned long tstate; /* Trap state */
984 unsigned long tick; /* Tick */
985 unsigned long tpc; /* Trap PC */
986 unsigned long f1; /* Entry specific */
987 unsigned long f2; /* Entry specific */
988 unsigned long f3; /* Entry specific */
989 unsigned long f4; /* Entry specific */
990};
991#endif
992#define HV_TRAP_TRACE_ENTRY_TYPE 0x00
993#define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01
994#define HV_TRAP_TRACE_ENTRY_TL 0x02
995#define HV_TRAP_TRACE_ENTRY_GL 0x03
996#define HV_TRAP_TRACE_ENTRY_TT 0x04
997#define HV_TRAP_TRACE_ENTRY_TAG 0x06
998#define HV_TRAP_TRACE_ENTRY_TSTATE 0x08
999#define HV_TRAP_TRACE_ENTRY_TICK 0x10
1000#define HV_TRAP_TRACE_ENTRY_TPC 0x18
1001#define HV_TRAP_TRACE_ENTRY_F1 0x20
1002#define HV_TRAP_TRACE_ENTRY_F2 0x28
1003#define HV_TRAP_TRACE_ENTRY_F3 0x30
1004#define HV_TRAP_TRACE_ENTRY_F4 0x38
1005
1006/* The type field is encoded as follows. */
1007#define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */
1008#define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */
1009#define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */
1010
1011/* ttrace_buf_conf()
1012 * TRAP: HV_FAST_TRAP
1013 * FUNCTION: HV_FAST_TTRACE_BUF_CONF
1014 * ARG0: real address
1015 * ARG1: number of entries
1016 * RET0: status
1017 * RET1: number of entries
1018 * ERRORS: ENORADDR Invalid real address
1019 * EINVAL Size is too small
1020 * EBADALIGN Real address not aligned on 64-byte boundary
1021 *
1022 * Requests hypervisor trap tracing and declares a virtual CPU's trap
1023 * trace buffer to the hypervisor. The real address supplies the real
1024 * base address of the trap trace queue and must be 64-byte aligned.
1025 * Specifying a value of 0 for the number of entries disables trap
1026 * tracing for the calling virtual CPU. The buffer allocated must be
1027 * sized for a power of two number of 64-byte trap trace entries plus
1028 * an initial 64-byte control structure.
1029 *
1030 * This may be invoked any number of times so that a virtual CPU may
1031 * relocate a trap trace buffer or create "snapshots" of information.
1032 *
1033 * If the real address is illegal or badly aligned, then trap tracing
1034 * is disabled and an error is returned.
1035 *
1036 * Upon failure with EINVAL, this service call returns in RET1 the
1037 * minimum number of buffer entries required. Upon other failures
1038 * RET1 is undefined.
1039 */
1040#define HV_FAST_TTRACE_BUF_CONF 0x90
1041
1042/* ttrace_buf_info()
1043 * TRAP: HV_FAST_TRAP
1044 * FUNCTION: HV_FAST_TTRACE_BUF_INFO
1045 * RET0: status
1046 * RET1: real address
1047 * RET2: size
1048 * ERRORS: None defined.
1049 *
1050 * Returns the size and location of the previously declared trap-trace
1051 * buffer. In the event that no buffer was previously defined, or the
1052 * buffer is disabled, this call will return a size of zero bytes.
1053 */
1054#define HV_FAST_TTRACE_BUF_INFO 0x91
1055
1056/* ttrace_enable()
1057 * TRAP: HV_FAST_TRAP
1058 * FUNCTION: HV_FAST_TTRACE_ENABLE
1059 * ARG0: enable
1060 * RET0: status
1061 * RET1: previous enable state
1062 * ERRORS: EINVAL No trap trace buffer currently defined
1063 *
1064 * Enable or disable trap tracing, and return the previous enabled
1065 * state in RET1. Future systems may define various flags for the
1066 * enable argument (ARG0), for the moment a guest should pass
1067 * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all
1068 * tracing - which will ensure future compatability.
1069 */
1070#define HV_FAST_TTRACE_ENABLE 0x92
1071
1072/* ttrace_freeze()
1073 * TRAP: HV_FAST_TRAP
1074 * FUNCTION: HV_FAST_TTRACE_FREEZE
1075 * ARG0: freeze
1076 * RET0: status
1077 * RET1: previous freeze state
1078 * ERRORS: EINVAL No trap trace buffer currently defined
1079 *
1080 * Freeze or unfreeze trap tracing, returning the previous freeze
1081 * state in RET1. A guest should pass a non-zero value to freeze and
1082 * a zero value to unfreeze all tracing. The returned previous state
1083 * is 0 for not frozen and 1 for frozen.
1084 */
1085#define HV_FAST_TTRACE_FREEZE 0x93
1086
1087/* ttrace_addentry()
1088 * TRAP: HV_TTRACE_ADDENTRY_TRAP
1089 * ARG0: tag (16-bits)
1090 * ARG1: data word 0
1091 * ARG2: data word 1
1092 * ARG3: data word 2
1093 * ARG4: data word 3
1094 * RET0: status
1095 * ERRORS: EINVAL No trap trace buffer currently defined
1096 *
1097 * Add an entry to the trap trace buffer. Upon return only ARG0/RET0
1098 * is modified - none of the other registers holding arguments are
1099 * volatile across this hypervisor service.
1100 */
1101
1102/* Core dump services.
1103 *
1104 * Since the hypervisor viraulizes and thus obscures a lot of the
1105 * physical machine layout and state, traditional OS crash dumps can
1106 * be difficult to diagnose especially when the problem is a
1107 * configuration error of some sort.
1108 *
1109 * The dump services provide an opaque buffer into which the
1110 * hypervisor can place it's internal state in order to assist in
1111 * debugging such situations. The contents are opaque and extremely
1112 * platform and hypervisor implementation specific. The guest, during
1113 * a core dump, requests that the hypervisor update any information in
1114 * the dump buffer in preparation to being dumped as part of the
1115 * domain's memory image.
1116 */
1117
1118/* dump_buf_update()
1119 * TRAP: HV_FAST_TRAP
1120 * FUNCTION: HV_FAST_DUMP_BUF_UPDATE
1121 * ARG0: real address
1122 * ARG1: size
1123 * RET0: status
1124 * RET1: required size of dump buffer
1125 * ERRORS: ENORADDR Invalid real address
1126 * EBADALIGN Real address is not aligned on a 64-byte
1127 * boundary
1128 * EINVAL Size is non-zero but less than minimum size
1129 * required
1130 * ENOTSUPPORTED Operation not supported on current logical
1131 * domain
1132 *
1133 * Declare a domain dump buffer to the hypervisor. The real address
1134 * provided for the domain dump buffer must be 64-byte aligned. The
1135 * size specifies the size of the dump buffer and may be larger than
1136 * the minimum size specified in the machine description. The
1137 * hypervisor will fill the dump buffer with opaque data.
1138 *
1139 * Note: A guest may elect to include dump buffer contents as part of a crash
1140 * dump to assist with debugging. This function may be called any number
1141 * of times so that a guest may relocate a dump buffer, or create
1142 * "snapshots" of any dump-buffer information. Each call to
1143 * dump_buf_update() atomically declares the new dump buffer to the
1144 * hypervisor.
1145 *
1146 * A specified size of 0 unconfigures the dump buffer. If the real
1147 * address is illegal or badly aligned, then any currently active dump
1148 * buffer is disabled and an error is returned.
1149 *
1150 * In the event that the call fails with EINVAL, RET1 contains the
1151 * minimum size requires by the hypervisor for a valid dump buffer.
1152 */
1153#define HV_FAST_DUMP_BUF_UPDATE 0x94
1154
1155/* dump_buf_info()
1156 * TRAP: HV_FAST_TRAP
1157 * FUNCTION: HV_FAST_DUMP_BUF_INFO
1158 * RET0: status
1159 * RET1: real address of current dump buffer
1160 * RET2: size of current dump buffer
1161 * ERRORS: No errors defined.
1162 *
1163 * Return the currently configures dump buffer description. A
1164 * returned size of 0 bytes indicates an undefined dump buffer. In
1165 * this case the return address in RET1 is undefined.
1166 */
1167#define HV_FAST_DUMP_BUF_INFO 0x95
1168
1169/* Device interrupt services.
1170 *
1171 * Device interrupts are allocated to system bus bridges by the hypervisor,
1172 * and described to OBP in the machine description. OBP then describes
1173 * these interrupts to the OS via properties in the device tree.
1174 *
1175 * Terminology:
1176 *
1177 * cpuid Unique opaque value which represents a target cpu.
1178 *
1179 * devhandle Device handle. It uniquely identifies a device, and
1180 * consistes of the lower 28-bits of the hi-cell of the
1181 * first entry of the device's "reg" property in the
1182 * OBP device tree.
1183 *
1184 * devino Device interrupt number. Specifies the relative
1185 * interrupt number within the device. The unique
1186 * combination of devhandle and devino are used to
1187 * identify a specific device interrupt.
1188 *
1189 * Note: The devino value is the same as the values in the
1190 * "interrupts" property or "interrupt-map" property
1191 * in the OBP device tree for that device.
1192 *
1193 * sysino System interrupt number. A 64-bit unsigned interger
1194 * representing a unique interrupt within a virtual
1195 * machine.
1196 *
1197 * intr_state A flag representing the interrupt state for a given
1198 * sysino. The state values are defined below.
1199 *
1200 * intr_enabled A flag representing the 'enabled' state for a given
1201 * sysino. The enable values are defined below.
1202 */
1203
1204#define HV_INTR_STATE_IDLE 0 /* Nothing pending */
1205#define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */
1206#define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */
1207
1208#define HV_INTR_DISABLED 0 /* sysino not enabled */
1209#define HV_INTR_ENABLED 1 /* sysino enabled */
1210
1211/* intr_devino_to_sysino()
1212 * TRAP: HV_FAST_TRAP
1213 * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO
1214 * ARG0: devhandle
1215 * ARG1: devino
1216 * RET0: status
1217 * RET1: sysino
1218 * ERRORS: EINVAL Invalid devhandle/devino
1219 *
1220 * Converts a device specific interrupt number of the given
1221 * devhandle/devino into a system specific ino (sysino).
1222 */
1223#define HV_FAST_INTR_DEVINO2SYSINO 0xa0
1224
1225#ifndef __ASSEMBLY__
1226extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
1227 unsigned long devino);
1228#endif
1229
1230/* intr_getenabled()
1231 * TRAP: HV_FAST_TRAP
1232 * FUNCTION: HV_FAST_INTR_GETENABLED
1233 * ARG0: sysino
1234 * RET0: status
1235 * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1236 * ERRORS: EINVAL Invalid sysino
1237 *
1238 * Returns interrupt enabled state in RET1 for the interrupt defined
1239 * by the given sysino.
1240 */
1241#define HV_FAST_INTR_GETENABLED 0xa1
1242
1243#ifndef __ASSEMBLY__
1244extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
1245#endif
1246
1247/* intr_setenabled()
1248 * TRAP: HV_FAST_TRAP
1249 * FUNCTION: HV_FAST_INTR_SETENABLED
1250 * ARG0: sysino
1251 * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1252 * RET0: status
1253 * ERRORS: EINVAL Invalid sysino or intr_enabled value
1254 *
1255 * Set the 'enabled' state of the interrupt sysino.
1256 */
1257#define HV_FAST_INTR_SETENABLED 0xa2
1258
1259#ifndef __ASSEMBLY__
1260extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
1261#endif
1262
1263/* intr_getstate()
1264 * TRAP: HV_FAST_TRAP
1265 * FUNCTION: HV_FAST_INTR_GETSTATE
1266 * ARG0: sysino
1267 * RET0: status
1268 * RET1: intr_state (HV_INTR_STATE_*)
1269 * ERRORS: EINVAL Invalid sysino
1270 *
1271 * Returns current state of the interrupt defined by the given sysino.
1272 */
1273#define HV_FAST_INTR_GETSTATE 0xa3
1274
1275#ifndef __ASSEMBLY__
1276extern unsigned long sun4v_intr_getstate(unsigned long sysino);
1277#endif
1278
1279/* intr_setstate()
1280 * TRAP: HV_FAST_TRAP
1281 * FUNCTION: HV_FAST_INTR_SETSTATE
1282 * ARG0: sysino
1283 * ARG1: intr_state (HV_INTR_STATE_*)
1284 * RET0: status
1285 * ERRORS: EINVAL Invalid sysino or intr_state value
1286 *
1287 * Sets the current state of the interrupt described by the given sysino
1288 * value.
1289 *
1290 * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending
1291 * interrupt for sysino.
1292 */
1293#define HV_FAST_INTR_SETSTATE 0xa4
1294
1295#ifndef __ASSEMBLY__
1296extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
1297#endif
1298
1299/* intr_gettarget()
1300 * TRAP: HV_FAST_TRAP
1301 * FUNCTION: HV_FAST_INTR_GETTARGET
1302 * ARG0: sysino
1303 * RET0: status
1304 * RET1: cpuid
1305 * ERRORS: EINVAL Invalid sysino
1306 *
1307 * Returns CPU that is the current target of the interrupt defined by
1308 * the given sysino. The CPU value returned is undefined if the target
1309 * has not been set via intr_settarget().
1310 */
1311#define HV_FAST_INTR_GETTARGET 0xa5
1312
1313#ifndef __ASSEMBLY__
1314extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
1315#endif
1316
1317/* intr_settarget()
1318 * TRAP: HV_FAST_TRAP
1319 * FUNCTION: HV_FAST_INTR_SETTARGET
1320 * ARG0: sysino
1321 * ARG1: cpuid
1322 * RET0: status
1323 * ERRORS: EINVAL Invalid sysino
1324 * ENOCPU Invalid cpuid
1325 *
1326 * Set the target CPU for the interrupt defined by the given sysino.
1327 */
1328#define HV_FAST_INTR_SETTARGET 0xa6
1329
1330#ifndef __ASSEMBLY__
1331extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
1332#endif
1333
1334/* PCI IO services.
1335 *
1336 * See the terminology descriptions in the device interrupt services
1337 * section above as those apply here too. Here are terminology
1338 * definitions specific to these PCI IO services:
1339 *
1340 * tsbnum TSB number. Indentifies which io-tsb is used.
1341 * For this version of the specification, tsbnum
1342 * must be zero.
1343 *
1344 * tsbindex TSB index. Identifies which entry in the TSB
1345 * is used. The first entry is zero.
1346 *
1347 * tsbid A 64-bit aligned data structure which contains
1348 * a tsbnum and a tsbindex. Bits 63:32 contain the
1349 * tsbnum and bits 31:00 contain the tsbindex.
1350 *
1351 * Use the HV_PCI_TSBID() macro to construct such
1352 * values.
1353 *
1354 * io_attributes IO attributes for IOMMU mappings. One of more
1355 * of the attritbute bits are stores in a 64-bit
1356 * value. The values are defined below.
1357 *
1358 * r_addr 64-bit real address
1359 *
1360 * pci_device PCI device address. A PCI device address identifies
1361 * a specific device on a specific PCI bus segment.
1362 * A PCI device address ia a 32-bit unsigned integer
1363 * with the following format:
1364 *
1365 * 00000000.bbbbbbbb.dddddfff.00000000
1366 *
1367 * Use the HV_PCI_DEVICE_BUILD() macro to construct
1368 * such values.
1369 *
1370 * pci_config_offset
1371 * PCI configureation space offset. For conventional
1372 * PCI a value between 0 and 255. For extended
1373 * configuration space, a value between 0 and 4095.
1374 *
1375 * Note: For PCI configuration space accesses, the offset
1376 * must be aligned to the access size.
1377 *
1378 * error_flag A return value which specifies if the action succeeded
1379 * or failed. 0 means no error, non-0 means some error
1380 * occurred while performing the service.
1381 *
1382 * io_sync_direction
1383 * Direction definition for pci_dma_sync(), defined
1384 * below in HV_PCI_SYNC_*.
1385 *
1386 * io_page_list A list of io_page_addresses, an io_page_address is
1387 * a real address.
1388 *
1389 * io_page_list_p A pointer to an io_page_list.
1390 *
1391 * "size based byte swap" - Some functions do size based byte swapping
1392 * which allows sw to access pointers and
1393 * counters in native form when the processor
1394 * operates in a different endianness than the
1395 * IO bus. Size-based byte swapping converts a
1396 * multi-byte field between big-endian and
1397 * little-endian format.
1398 */
1399
1400#define HV_PCI_MAP_ATTR_READ 0x01
1401#define HV_PCI_MAP_ATTR_WRITE 0x02
1402
1403#define HV_PCI_DEVICE_BUILD(b,d,f) \
1404 ((((b) & 0xff) << 16) | \
1405 (((d) & 0x1f) << 11) | \
1406 (((f) & 0x07) << 8))
1407
1408#define HV_PCI_TSBID(__tsb_num, __tsb_index) \
1409 ((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index)))
1410
1411#define HV_PCI_SYNC_FOR_DEVICE 0x01
1412#define HV_PCI_SYNC_FOR_CPU 0x02
1413
1414/* pci_iommu_map()
1415 * TRAP: HV_FAST_TRAP
1416 * FUNCTION: HV_FAST_PCI_IOMMU_MAP
1417 * ARG0: devhandle
1418 * ARG1: tsbid
1419 * ARG2: #ttes
1420 * ARG3: io_attributes
1421 * ARG4: io_page_list_p
1422 * RET0: status
1423 * RET1: #ttes mapped
1424 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes
1425 * EBADALIGN Improperly aligned real address
1426 * ENORADDR Invalid real address
1427 *
1428 * Create IOMMU mappings in the sun4v device defined by the given
1429 * devhandle. The mappings are created in the TSB defined by the
1430 * tsbnum component of the given tsbid. The first mapping is created
1431 * in the TSB i ndex defined by the tsbindex component of the given tsbid.
1432 * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex,
1433 * the second at tsbnum, tsbindex + 1, etc.
1434 *
1435 * All mappings are created with the attributes defined by the io_attributes
1436 * argument. The page mapping addresses are described in the io_page_list
1437 * defined by the given io_page_list_p, which is a pointer to the io_page_list.
1438 * The first entry in the io_page_list is the address for the first iotte, the
1439 * 2nd for the 2nd iotte, and so on.
1440 *
1441 * Each io_page_address in the io_page_list must be appropriately aligned.
1442 * #ttes must be greater than zero. For this version of the spec, the tsbnum
1443 * component of the given tsbid must be zero.
1444 *
1445 * Returns the actual number of mappings creates, which may be less than
1446 * or equal to the argument #ttes. If the function returns a value which
1447 * is less than the #ttes, the caller may continus to call the function with
1448 * an updated tsbid, #ttes, io_page_list_p arguments until all pages are
1449 * mapped.
1450 *
1451 * Note: This function does not imply an iotte cache flush. The guest must
1452 * demap an entry before re-mapping it.
1453 */
1454#define HV_FAST_PCI_IOMMU_MAP 0xb0
1455
1456/* pci_iommu_demap()
1457 * TRAP: HV_FAST_TRAP
1458 * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP
1459 * ARG0: devhandle
1460 * ARG1: tsbid
1461 * ARG2: #ttes
1462 * RET0: status
1463 * RET1: #ttes demapped
1464 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex
1465 *
1466 * Demap and flush IOMMU mappings in the device defined by the given
1467 * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum
1468 * component of the given tsbid, starting at the TSB index defined by the
1469 * tsbindex component of the given tsbid.
1470 *
1471 * For this version of the spec, the tsbnum of the given tsbid must be zero.
1472 * #ttes must be greater than zero.
1473 *
1474 * Returns the actual number of ttes demapped, which may be less than or equal
1475 * to the argument #ttes. If #ttes demapped is less than #ttes, the caller
1476 * may continue to call this function with updated tsbid and #ttes arguments
1477 * until all pages are demapped.
1478 *
1479 * Note: Entries do not have to be mapped to be demapped. A demap of an
1480 * unmapped page will flush the entry from the tte cache.
1481 */
1482#define HV_FAST_PCI_IOMMU_DEMAP 0xb1
1483
1484/* pci_iommu_getmap()
1485 * TRAP: HV_FAST_TRAP
1486 * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP
1487 * ARG0: devhandle
1488 * ARG1: tsbid
1489 * RET0: status
1490 * RET1: io_attributes
1491 * RET2: real address
1492 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex
1493 * ENOMAP Mapping is not valid, no translation exists
1494 *
1495 * Read and return the mapping in the device described by the given devhandle
1496 * and tsbid. If successful, the io_attributes shall be returned in RET1
1497 * and the page address of the mapping shall be returned in RET2.
1498 *
1499 * For this version of the spec, the tsbnum component of the given tsbid
1500 * must be zero.
1501 */
1502#define HV_FAST_PCI_IOMMU_GETMAP 0xb2
1503
1504/* pci_iommu_getbypass()
1505 * TRAP: HV_FAST_TRAP
1506 * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS
1507 * ARG0: devhandle
1508 * ARG1: real address
1509 * ARG2: io_attributes
1510 * RET0: status
1511 * RET1: io_addr
1512 * ERRORS: EINVAL Invalid devhandle/io_attributes
1513 * ENORADDR Invalid real address
1514 * ENOTSUPPORTED Function not supported in this implementation.
1515 *
1516 * Create a "special" mapping in the device described by the given devhandle,
1517 * for the given real address and attributes. Return the IO address in RET1
1518 * if successful.
1519 */
1520#define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3
1521
1522/* pci_config_get()
1523 * TRAP: HV_FAST_TRAP
1524 * FUNCTION: HV_FAST_PCI_CONFIG_GET
1525 * ARG0: devhandle
1526 * ARG1: pci_device
1527 * ARG2: pci_config_offset
1528 * ARG3: size
1529 * RET0: status
1530 * RET1: error_flag
1531 * RET2: data
1532 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size
1533 * EBADALIGN pci_config_offset not size aligned
1534 * ENOACCESS Access to this offset is not permitted
1535 *
1536 * Read PCI configuration space for the adapter described by the given
1537 * devhandle. Read size (1, 2, or 4) bytes of data from the given
1538 * pci_device, at pci_config_offset from the beginning of the device's
1539 * configuration space. If there was no error, RET1 is set to zero and
1540 * RET2 is set to the data read. Insignificant bits in RET2 are not
1541 * guarenteed to have any specific value and therefore must be ignored.
1542 *
1543 * The data returned in RET2 is size based byte swapped.
1544 *
1545 * If an error occurs during the read, set RET1 to a non-zero value. The
1546 * given pci_config_offset must be 'size' aligned.
1547 */
1548#define HV_FAST_PCI_CONFIG_GET 0xb4
1549
1550/* pci_config_put()
1551 * TRAP: HV_FAST_TRAP
1552 * FUNCTION: HV_FAST_PCI_CONFIG_PUT
1553 * ARG0: devhandle
1554 * ARG1: pci_device
1555 * ARG2: pci_config_offset
1556 * ARG3: size
1557 * ARG4: data
1558 * RET0: status
1559 * RET1: error_flag
1560 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size
1561 * EBADALIGN pci_config_offset not size aligned
1562 * ENOACCESS Access to this offset is not permitted
1563 *
1564 * Write PCI configuration space for the adapter described by the given
1565 * devhandle. Write size (1, 2, or 4) bytes of data in a single operation,
1566 * at pci_config_offset from the beginning of the device's configuration
1567 * space. The data argument contains the data to be written to configuration
1568 * space. Prior to writing, the data is size based byte swapped.
1569 *
1570 * If an error occurs during the write access, do not generate an error
1571 * report, do set RET1 to a non-zero value. Otherwise RET1 is zero.
1572 * The given pci_config_offset must be 'size' aligned.
1573 *
1574 * This function is permitted to read from offset zero in the configuration
1575 * space described by the given pci_device if necessary to ensure that the
1576 * write access to config space completes.
1577 */
1578#define HV_FAST_PCI_CONFIG_PUT 0xb5
1579
1580/* pci_peek()
1581 * TRAP: HV_FAST_TRAP
1582 * FUNCTION: HV_FAST_PCI_PEEK
1583 * ARG0: devhandle
1584 * ARG1: real address
1585 * ARG2: size
1586 * RET0: status
1587 * RET1: error_flag
1588 * RET2: data
1589 * ERRORS: EINVAL Invalid devhandle or size
1590 * EBADALIGN Improperly aligned real address
1591 * ENORADDR Bad real address
1592 * ENOACCESS Guest access prohibited
1593 *
1594 * Attempt to read the IO address given by the given devhandle, real address,
1595 * and size. Size must be 1, 2, 4, or 8. The read is performed as a single
1596 * access operation using the given size. If an error occurs when reading
1597 * from the given location, do not generate an error report, but return a
1598 * non-zero value in RET1. If the read was successful, return zero in RET1
1599 * and return the actual data read in RET2. The data returned is size based
1600 * byte swapped.
1601 *
1602 * Non-significant bits in RET2 are not guarenteed to have any specific value
1603 * and therefore must be ignored. If RET1 is returned as non-zero, the data
1604 * value is not guarenteed to have any specific value and should be ignored.
1605 *
1606 * The caller must have permission to read from the given devhandle, real
1607 * address, which must be an IO address. The argument real address must be a
1608 * size aligned address.
1609 *
1610 * The hypervisor implementation of this function must block access to any
1611 * IO address that the guest does not have explicit permission to access.
1612 */
1613#define HV_FAST_PCI_PEEK 0xb6
1614
1615/* pci_poke()
1616 * TRAP: HV_FAST_TRAP
1617 * FUNCTION: HV_FAST_PCI_POKE
1618 * ARG0: devhandle
1619 * ARG1: real address
1620 * ARG2: size
1621 * ARG3: data
1622 * ARG4: pci_device
1623 * RET0: status
1624 * RET1: error_flag
1625 * ERRORS: EINVAL Invalid devhandle, size, or pci_device
1626 * EBADALIGN Improperly aligned real address
1627 * ENORADDR Bad real address
1628 * ENOACCESS Guest access prohibited
1629 * ENOTSUPPORTED Function is not supported by implementation
1630 *
1631 * Attempt to write data to the IO address given by the given devhandle,
1632 * real address, and size. Size must be 1, 2, 4, or 8. The write is
1633 * performed as a single access operation using the given size. Prior to
1634 * writing the data is size based swapped.
1635 *
1636 * If an error occurs when writing to the given location, do not generate an
1637 * error report, but return a non-zero value in RET1. If the write was
1638 * successful, return zero in RET1.
1639 *
1640 * pci_device describes the configuration address of the device being
1641 * written to. The implementation may safely read from offset 0 with
1642 * the configuration space of the device described by devhandle and
1643 * pci_device in order to guarantee that the write portion of the operation
1644 * completes
1645 *
1646 * Any error that occurs due to the read shall be reported using the normal
1647 * error reporting mechanisms .. the read error is not suppressed.
1648 *
1649 * The caller must have permission to write to the given devhandle, real
1650 * address, which must be an IO address. The argument real address must be a
1651 * size aligned address. The caller must have permission to read from
1652 * the given devhandle, pci_device cofiguration space offset 0.
1653 *
1654 * The hypervisor implementation of this function must block access to any
1655 * IO address that the guest does not have explicit permission to access.
1656 */
1657#define HV_FAST_PCI_POKE 0xb7
1658
1659/* pci_dma_sync()
1660 * TRAP: HV_FAST_TRAP
1661 * FUNCTION: HV_FAST_PCI_DMA_SYNC
1662 * ARG0: devhandle
1663 * ARG1: real address
1664 * ARG2: size
1665 * ARG3: io_sync_direction
1666 * RET0: status
1667 * RET1: #synced
1668 * ERRORS: EINVAL Invalid devhandle or io_sync_direction
1669 * ENORADDR Bad real address
1670 *
1671 * Synchronize a memory region described by the given real address and size,
1672 * for the device defined by the given devhandle using the direction(s)
1673 * defined by the given io_sync_direction. The argument size is the size of
1674 * the memory region in bytes.
1675 *
1676 * Return the actual number of bytes synchronized in the return value #synced,
1677 * which may be less than or equal to the argument size. If the return
1678 * value #synced is less than size, the caller must continue to call this
1679 * function with updated real address and size arguments until the entire
1680 * memory region is synchronized.
1681 */
1682#define HV_FAST_PCI_DMA_SYNC 0xb8
1683
1684/* PCI MSI services. */
1685
1686#define HV_MSITYPE_MSI32 0x00
1687#define HV_MSITYPE_MSI64 0x01
1688
1689#define HV_MSIQSTATE_IDLE 0x00
1690#define HV_MSIQSTATE_ERROR 0x01
1691
1692#define HV_MSIQ_INVALID 0x00
1693#define HV_MSIQ_VALID 0x01
1694
1695#define HV_MSISTATE_IDLE 0x00
1696#define HV_MSISTATE_DELIVERED 0x01
1697
1698#define HV_MSIVALID_INVALID 0x00
1699#define HV_MSIVALID_VALID 0x01
1700
1701#define HV_PCIE_MSGTYPE_PME_MSG 0x18
1702#define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b
1703#define HV_PCIE_MSGTYPE_CORR_MSG 0x30
1704#define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31
1705#define HV_PCIE_MSGTYPE_FATAL_MSG 0x33
1706
1707#define HV_MSG_INVALID 0x00
1708#define HV_MSG_VALID 0x01
1709
1710/* pci_msiq_conf()
1711 * TRAP: HV_FAST_TRAP
1712 * FUNCTION: HV_FAST_PCI_MSIQ_CONF
1713 * ARG0: devhandle
1714 * ARG1: msiqid
1715 * ARG2: real address
1716 * ARG3: number of entries
1717 * RET0: status
1718 * ERRORS: EINVAL Invalid devhandle, msiqid or nentries
1719 * EBADALIGN Improperly aligned real address
1720 * ENORADDR Bad real address
1721 *
1722 * Configure the MSI queue given by the devhandle and msiqid arguments,
1723 * and to be placed at the given real address and be of the given
1724 * number of entries. The real address must be aligned exactly to match
1725 * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry
1726 * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ
1727 * Head and Tail are initialized so that the MSI-EQ is 'empty'.
1728 *
1729 * Implementation Note: Certain implementations have fixed sized queues. In
1730 * that case, number of entries must contain the correct
1731 * value.
1732 */
1733#define HV_FAST_PCI_MSIQ_CONF 0xc0
1734
1735/* pci_msiq_info()
1736 * TRAP: HV_FAST_TRAP
1737 * FUNCTION: HV_FAST_PCI_MSIQ_INFO
1738 * ARG0: devhandle
1739 * ARG1: msiqid
1740 * RET0: status
1741 * RET1: real address
1742 * RET2: number of entries
1743 * ERRORS: EINVAL Invalid devhandle or msiqid
1744 *
1745 * Return the configuration information for the MSI queue described
1746 * by the given devhandle and msiqid. The base address of the queue
1747 * is returned in ARG1 and the number of entries is returned in ARG2.
1748 * If the queue is unconfigured, the real address is undefined and the
1749 * number of entries will be returned as zero.
1750 */
1751#define HV_FAST_PCI_MSIQ_INFO 0xc1
1752
1753/* pci_msiq_getvalid()
1754 * TRAP: HV_FAST_TRAP
1755 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID
1756 * ARG0: devhandle
1757 * ARG1: msiqid
1758 * RET0: status
1759 * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID)
1760 * ERRORS: EINVAL Invalid devhandle or msiqid
1761 *
1762 * Get the valid state of the MSI-EQ described by the given devhandle and
1763 * msiqid.
1764 */
1765#define HV_FAST_PCI_MSIQ_GETVALID 0xc2
1766
1767/* pci_msiq_setvalid()
1768 * TRAP: HV_FAST_TRAP
1769 * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID
1770 * ARG0: devhandle
1771 * ARG1: msiqid
1772 * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID)
1773 * RET0: status
1774 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid
1775 * value or MSI EQ is uninitialized
1776 *
1777 * Set the valid state of the MSI-EQ described by the given devhandle and
1778 * msiqid to the given msiqvalid.
1779 */
1780#define HV_FAST_PCI_MSIQ_SETVALID 0xc3
1781
1782/* pci_msiq_getstate()
1783 * TRAP: HV_FAST_TRAP
1784 * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE
1785 * ARG0: devhandle
1786 * ARG1: msiqid
1787 * RET0: status
1788 * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
1789 * ERRORS: EINVAL Invalid devhandle or msiqid
1790 *
1791 * Get the state of the MSI-EQ described by the given devhandle and
1792 * msiqid.
1793 */
1794#define HV_FAST_PCI_MSIQ_GETSTATE 0xc4
1795
1796/* pci_msiq_getvalid()
1797 * TRAP: HV_FAST_TRAP
1798 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID
1799 * ARG0: devhandle
1800 * ARG1: msiqid
1801 * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
1802 * RET0: status
1803 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate
1804 * value or MSI EQ is uninitialized
1805 *
1806 * Set the state of the MSI-EQ described by the given devhandle and
1807 * msiqid to the given msiqvalid.
1808 */
1809#define HV_FAST_PCI_MSIQ_SETSTATE 0xc5
1810
1811/* pci_msiq_gethead()
1812 * TRAP: HV_FAST_TRAP
1813 * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD
1814 * ARG0: devhandle
1815 * ARG1: msiqid
1816 * RET0: status
1817 * RET1: msiqhead
1818 * ERRORS: EINVAL Invalid devhandle or msiqid
1819 *
1820 * Get the current MSI EQ queue head for the MSI-EQ described by the
1821 * given devhandle and msiqid.
1822 */
1823#define HV_FAST_PCI_MSIQ_GETHEAD 0xc6
1824
1825/* pci_msiq_sethead()
1826 * TRAP: HV_FAST_TRAP
1827 * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD
1828 * ARG0: devhandle
1829 * ARG1: msiqid
1830 * ARG2: msiqhead
1831 * RET0: status
1832 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead,
1833 * or MSI EQ is uninitialized
1834 *
1835 * Set the current MSI EQ queue head for the MSI-EQ described by the
1836 * given devhandle and msiqid.
1837 */
1838#define HV_FAST_PCI_MSIQ_SETHEAD 0xc7
1839
1840/* pci_msiq_gettail()
1841 * TRAP: HV_FAST_TRAP
1842 * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL
1843 * ARG0: devhandle
1844 * ARG1: msiqid
1845 * RET0: status
1846 * RET1: msiqtail
1847 * ERRORS: EINVAL Invalid devhandle or msiqid
1848 *
1849 * Get the current MSI EQ queue tail for the MSI-EQ described by the
1850 * given devhandle and msiqid.
1851 */
1852#define HV_FAST_PCI_MSIQ_GETTAIL 0xc8
1853
1854/* pci_msi_getvalid()
1855 * TRAP: HV_FAST_TRAP
1856 * FUNCTION: HV_FAST_PCI_MSI_GETVALID
1857 * ARG0: devhandle
1858 * ARG1: msinum
1859 * RET0: status
1860 * RET1: msivalidstate
1861 * ERRORS: EINVAL Invalid devhandle or msinum
1862 *
1863 * Get the current valid/enabled state for the MSI defined by the
1864 * given devhandle and msinum.
1865 */
1866#define HV_FAST_PCI_MSI_GETVALID 0xc9
1867
1868/* pci_msi_setvalid()
1869 * TRAP: HV_FAST_TRAP
1870 * FUNCTION: HV_FAST_PCI_MSI_SETVALID
1871 * ARG0: devhandle
1872 * ARG1: msinum
1873 * ARG2: msivalidstate
1874 * RET0: status
1875 * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate
1876 *
1877 * Set the current valid/enabled state for the MSI defined by the
1878 * given devhandle and msinum.
1879 */
1880#define HV_FAST_PCI_MSI_SETVALID 0xca
1881
1882/* pci_msi_getmsiq()
1883 * TRAP: HV_FAST_TRAP
1884 * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ
1885 * ARG0: devhandle
1886 * ARG1: msinum
1887 * RET0: status
1888 * RET1: msiqid
1889 * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound
1890 *
1891 * Get the MSI EQ that the MSI defined by the given devhandle and
1892 * msinum is bound to.
1893 */
1894#define HV_FAST_PCI_MSI_GETMSIQ 0xcb
1895
1896/* pci_msi_setmsiq()
1897 * TRAP: HV_FAST_TRAP
1898 * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ
1899 * ARG0: devhandle
1900 * ARG1: msinum
1901 * ARG2: msitype
1902 * ARG3: msiqid
1903 * RET0: status
1904 * ERRORS: EINVAL Invalid devhandle or msinum or msiqid
1905 *
1906 * Set the MSI EQ that the MSI defined by the given devhandle and
1907 * msinum is bound to.
1908 */
1909#define HV_FAST_PCI_MSI_SETMSIQ 0xcc
1910
1911/* pci_msi_getstate()
1912 * TRAP: HV_FAST_TRAP
1913 * FUNCTION: HV_FAST_PCI_MSI_GETSTATE
1914 * ARG0: devhandle
1915 * ARG1: msinum
1916 * RET0: status
1917 * RET1: msistate
1918 * ERRORS: EINVAL Invalid devhandle or msinum
1919 *
1920 * Get the state of the MSI defined by the given devhandle and msinum.
1921 * If not initialized, return HV_MSISTATE_IDLE.
1922 */
1923#define HV_FAST_PCI_MSI_GETSTATE 0xcd
1924
1925/* pci_msi_setstate()
1926 * TRAP: HV_FAST_TRAP
1927 * FUNCTION: HV_FAST_PCI_MSI_SETSTATE
1928 * ARG0: devhandle
1929 * ARG1: msinum
1930 * ARG2: msistate
1931 * RET0: status
1932 * ERRORS: EINVAL Invalid devhandle or msinum or msistate
1933 *
1934 * Set the state of the MSI defined by the given devhandle and msinum.
1935 */
1936#define HV_FAST_PCI_MSI_SETSTATE 0xce
1937
1938/* pci_msg_getmsiq()
1939 * TRAP: HV_FAST_TRAP
1940 * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ
1941 * ARG0: devhandle
1942 * ARG1: msgtype
1943 * RET0: status
1944 * RET1: msiqid
1945 * ERRORS: EINVAL Invalid devhandle or msgtype
1946 *
1947 * Get the MSI EQ of the MSG defined by the given devhandle and msgtype.
1948 */
1949#define HV_FAST_PCI_MSG_GETMSIQ 0xd0
1950
1951/* pci_msg_setmsiq()
1952 * TRAP: HV_FAST_TRAP
1953 * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ
1954 * ARG0: devhandle
1955 * ARG1: msgtype
1956 * ARG2: msiqid
1957 * RET0: status
1958 * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid
1959 *
1960 * Set the MSI EQ of the MSG defined by the given devhandle and msgtype.
1961 */
1962#define HV_FAST_PCI_MSG_SETMSIQ 0xd1
1963
1964/* pci_msg_getvalid()
1965 * TRAP: HV_FAST_TRAP
1966 * FUNCTION: HV_FAST_PCI_MSG_GETVALID
1967 * ARG0: devhandle
1968 * ARG1: msgtype
1969 * RET0: status
1970 * RET1: msgvalidstate
1971 * ERRORS: EINVAL Invalid devhandle or msgtype
1972 *
1973 * Get the valid/enabled state of the MSG defined by the given
1974 * devhandle and msgtype.
1975 */
1976#define HV_FAST_PCI_MSG_GETVALID 0xd2
1977
1978/* pci_msg_setvalid()
1979 * TRAP: HV_FAST_TRAP
1980 * FUNCTION: HV_FAST_PCI_MSG_SETVALID
1981 * ARG0: devhandle
1982 * ARG1: msgtype
1983 * ARG2: msgvalidstate
1984 * RET0: status
1985 * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate
1986 *
1987 * Set the valid/enabled state of the MSG defined by the given
1988 * devhandle and msgtype.
1989 */
1990#define HV_FAST_PCI_MSG_SETVALID 0xd3
1991
1992/* Performance counter services. */
1993
1994#define HV_PERF_JBUS_PERF_CTRL_REG 0x00
1995#define HV_PERF_JBUS_PERF_CNT_REG 0x01
1996#define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02
1997#define HV_PERF_DRAM_PERF_CNT_REG_0 0x03
1998#define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04
1999#define HV_PERF_DRAM_PERF_CNT_REG_1 0x05
2000#define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06
2001#define HV_PERF_DRAM_PERF_CNT_REG_2 0x07
2002#define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08
2003#define HV_PERF_DRAM_PERF_CNT_REG_3 0x09
2004
2005/* get_perfreg()
2006 * TRAP: HV_FAST_TRAP
2007 * FUNCTION: HV_FAST_GET_PERFREG
2008 * ARG0: performance reg number
2009 * RET0: status
2010 * RET1: performance reg value
2011 * ERRORS: EINVAL Invalid performance register number
2012 * ENOACCESS No access allowed to performance counters
2013 *
2014 * Read the value of the given DRAM/JBUS performance counter/control register.
2015 */
2016#define HV_FAST_GET_PERFREG 0x100
2017
2018/* set_perfreg()
2019 * TRAP: HV_FAST_TRAP
2020 * FUNCTION: HV_FAST_SET_PERFREG
2021 * ARG0: performance reg number
2022 * ARG1: performance reg value
2023 * RET0: status
2024 * ERRORS: EINVAL Invalid performance register number
2025 * ENOACCESS No access allowed to performance counters
2026 *
2027 * Write the given performance reg value to the given DRAM/JBUS
2028 * performance counter/control register.
2029 */
2030#define HV_FAST_SET_PERFREG 0x101
2031
2032/* MMU statistics services.
2033 *
2034 * The hypervisor maintains MMU statistics and privileged code provides
2035 * a buffer where these statistics can be collected. It is continually
2036 * updated once configured. The layout is as follows:
2037 */
2038#ifndef __ASSEMBLY__
2039struct hv_mmu_statistics {
2040 unsigned long immu_tsb_hits_ctx0_8k_tte;
2041 unsigned long immu_tsb_ticks_ctx0_8k_tte;
2042 unsigned long immu_tsb_hits_ctx0_64k_tte;
2043 unsigned long immu_tsb_ticks_ctx0_64k_tte;
2044 unsigned long __reserved1[2];
2045 unsigned long immu_tsb_hits_ctx0_4mb_tte;
2046 unsigned long immu_tsb_ticks_ctx0_4mb_tte;
2047 unsigned long __reserved2[2];
2048 unsigned long immu_tsb_hits_ctx0_256mb_tte;
2049 unsigned long immu_tsb_ticks_ctx0_256mb_tte;
2050 unsigned long __reserved3[4];
2051 unsigned long immu_tsb_hits_ctxnon0_8k_tte;
2052 unsigned long immu_tsb_ticks_ctxnon0_8k_tte;
2053 unsigned long immu_tsb_hits_ctxnon0_64k_tte;
2054 unsigned long immu_tsb_ticks_ctxnon0_64k_tte;
2055 unsigned long __reserved4[2];
2056 unsigned long immu_tsb_hits_ctxnon0_4mb_tte;
2057 unsigned long immu_tsb_ticks_ctxnon0_4mb_tte;
2058 unsigned long __reserved5[2];
2059 unsigned long immu_tsb_hits_ctxnon0_256mb_tte;
2060 unsigned long immu_tsb_ticks_ctxnon0_256mb_tte;
2061 unsigned long __reserved6[4];
2062 unsigned long dmmu_tsb_hits_ctx0_8k_tte;
2063 unsigned long dmmu_tsb_ticks_ctx0_8k_tte;
2064 unsigned long dmmu_tsb_hits_ctx0_64k_tte;
2065 unsigned long dmmu_tsb_ticks_ctx0_64k_tte;
2066 unsigned long __reserved7[2];
2067 unsigned long dmmu_tsb_hits_ctx0_4mb_tte;
2068 unsigned long dmmu_tsb_ticks_ctx0_4mb_tte;
2069 unsigned long __reserved8[2];
2070 unsigned long dmmu_tsb_hits_ctx0_256mb_tte;
2071 unsigned long dmmu_tsb_ticks_ctx0_256mb_tte;
2072 unsigned long __reserved9[4];
2073 unsigned long dmmu_tsb_hits_ctxnon0_8k_tte;
2074 unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte;
2075 unsigned long dmmu_tsb_hits_ctxnon0_64k_tte;
2076 unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte;
2077 unsigned long __reserved10[2];
2078 unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte;
2079 unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte;
2080 unsigned long __reserved11[2];
2081 unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte;
2082 unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte;
2083 unsigned long __reserved12[4];
2084};
2085#endif
2086
2087/* mmustat_conf()
2088 * TRAP: HV_FAST_TRAP
2089 * FUNCTION: HV_FAST_MMUSTAT_CONF
2090 * ARG0: real address
2091 * RET0: status
2092 * RET1: real address
2093 * ERRORS: ENORADDR Invalid real address
2094 * EBADALIGN Real address not aligned on 64-byte boundary
2095 * EBADTRAP API not supported on this processor
2096 *
2097 * Enable MMU statistic gathering using the buffer at the given real
2098 * address on the current virtual CPU. The new buffer real address
2099 * is given in ARG1, and the previously specified buffer real address
2100 * is returned in RET1, or is returned as zero for the first invocation.
2101 *
2102 * If the passed in real address argument is zero, this will disable
2103 * MMU statistic collection on the current virtual CPU. If an error is
2104 * returned then no statistics are collected.
2105 *
2106 * The buffer contents should be initialized to all zeros before being
2107 * given to the hypervisor or else the statistics will be meaningless.
2108 */
2109#define HV_FAST_MMUSTAT_CONF 0x102
2110
2111/* mmustat_info()
2112 * TRAP: HV_FAST_TRAP
2113 * FUNCTION: HV_FAST_MMUSTAT_INFO
2114 * RET0: status
2115 * RET1: real address
2116 * ERRORS: EBADTRAP API not supported on this processor
2117 *
2118 * Return the current state and real address of the currently configured
2119 * MMU statistics buffer on the current virtual CPU.
2120 */
2121#define HV_FAST_MMUSTAT_INFO 0x103
2122
2123/* Function numbers for HV_CORE_TRAP. */
2124#define HV_CORE_VER 0x00
2125#define HV_CORE_PUTCHAR 0x01
2126#define HV_CORE_EXIT 0x02
2127
2128#endif /* !(_SPARC64_HYPERVISOR_H) */
diff --git a/include/asm-sparc64/idprom.h b/include/asm-sparc64/idprom.h
index 701483c5465d..77fbf987385f 100644
--- a/include/asm-sparc64/idprom.h
+++ b/include/asm-sparc64/idprom.h
@@ -9,15 +9,7 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12/* Offset into the EEPROM where the id PROM is located on the 4c */ 12struct idprom {
13#define IDPROM_OFFSET 0x7d8
14
15/* On sun4m; physical. */
16/* MicroSPARC(-II) does not decode 31rd bit, but it works. */
17#define IDPROM_OFFSET_M 0xfd8
18
19struct idprom
20{
21 u8 id_format; /* Format identifier (always 0x01) */ 13 u8 id_format; /* Format identifier (always 0x01) */
22 u8 id_machtype; /* Machine type */ 14 u8 id_machtype; /* Machine type */
23 u8 id_ethaddr[6]; /* Hardware ethernet address */ 15 u8 id_ethaddr[6]; /* Hardware ethernet address */
@@ -30,6 +22,4 @@ struct idprom
30extern struct idprom *idprom; 22extern struct idprom *idprom;
31extern void idprom_init(void); 23extern void idprom_init(void);
32 24
33#define IDPROM_SIZE (sizeof(struct idprom))
34
35#endif /* !(_SPARC_IDPROM_H) */ 25#endif /* !(_SPARC_IDPROM_H) */
diff --git a/include/asm-sparc64/intr_queue.h b/include/asm-sparc64/intr_queue.h
new file mode 100644
index 000000000000..206077dedc2a
--- /dev/null
+++ b/include/asm-sparc64/intr_queue.h
@@ -0,0 +1,15 @@
1#ifndef _SPARC64_INTR_QUEUE_H
2#define _SPARC64_INTR_QUEUE_H
3
4/* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */
5
6#define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */
7#define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */
8#define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */
9#define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */
10#define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */
11#define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */
12#define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */
13#define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */
14
15#endif /* !(_SPARC64_INTR_QUEUE_H) */
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 8b70edcb80dc..de33d6e1afb5 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -72,8 +72,11 @@ struct ino_bucket {
72#define IMAP_VALID 0x80000000 /* IRQ Enabled */ 72#define IMAP_VALID 0x80000000 /* IRQ Enabled */
73#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ 73#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
74#define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ 74#define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */
75#define IMAP_TID_SHIFT 26
75#define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ 76#define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */
77#define IMAP_AID_SHIFT 26
76#define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ 78#define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */
79#define IMAP_NID_SHIFT 21
77#define IMAP_IGN 0x000007c0 /* IRQ Group Number */ 80#define IMAP_IGN 0x000007c0 /* IRQ Group Number */
78#define IMAP_INO 0x0000003f /* IRQ Number */ 81#define IMAP_INO 0x0000003f /* IRQ Number */
79#define IMAP_INR 0x000007ff /* Full interrupt number*/ 82#define IMAP_INR 0x000007ff /* Full interrupt number*/
@@ -111,6 +114,7 @@ extern void disable_irq(unsigned int);
111#define disable_irq_nosync disable_irq 114#define disable_irq_nosync disable_irq
112extern void enable_irq(unsigned int); 115extern void enable_irq(unsigned int);
113extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap); 116extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap);
117extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags);
114extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); 118extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
115 119
116static __inline__ void set_softint(unsigned long bits) 120static __inline__ void set_softint(unsigned long bits)
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 8627eed6e83d..230ba678d3b0 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -4,20 +4,9 @@
4#include <linux/config.h> 4#include <linux/config.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm/const.h> 6#include <asm/const.h>
7#include <asm/hypervisor.h>
7 8
8/* 9#define CTX_NR_BITS 13
9 * For the 8k pagesize kernel, use only 10 hw context bits to optimize some
10 * shifts in the fast tlbmiss handlers, instead of all 13 bits (specifically
11 * for vpte offset calculation). For other pagesizes, this optimization in
12 * the tlbhandlers can not be done; but still, all 13 bits can not be used
13 * because the tlb handlers use "andcc" instruction which sign extends 13
14 * bit arguments.
15 */
16#if PAGE_SHIFT == 13
17#define CTX_NR_BITS 10
18#else
19#define CTX_NR_BITS 12
20#endif
21 10
22#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) 11#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
23 12
@@ -90,8 +79,27 @@
90 79
91#ifndef __ASSEMBLY__ 80#ifndef __ASSEMBLY__
92 81
82#define TSB_ENTRY_ALIGNMENT 16
83
84struct tsb {
85 unsigned long tag;
86 unsigned long pte;
87} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
88
89extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
90extern void tsb_flush(unsigned long ent, unsigned long tag);
91extern void tsb_init(struct tsb *tsb, unsigned long size);
92
93typedef struct { 93typedef struct {
94 unsigned long sparc64_ctx_val; 94 spinlock_t lock;
95 unsigned long sparc64_ctx_val;
96 struct tsb *tsb;
97 unsigned long tsb_rss_limit;
98 unsigned long tsb_nentries;
99 unsigned long tsb_reg_val;
100 unsigned long tsb_map_vaddr;
101 unsigned long tsb_map_pte;
102 struct hv_tsb_descr tsb_descr;
95} mm_context_t; 103} mm_context_t;
96 104
97#endif /* !__ASSEMBLY__ */ 105#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 57ee7b306189..e7974321d052 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -19,96 +19,98 @@ extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[]; 19extern unsigned long mmu_context_bmap[];
20 20
21extern void get_new_mmu_context(struct mm_struct *mm); 21extern void get_new_mmu_context(struct mm_struct *mm);
22#ifdef CONFIG_SMP
23extern void smp_new_mmu_context_version(void);
24#else
25#define smp_new_mmu_context_version() do { } while (0)
26#endif
27
28extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
29extern void destroy_context(struct mm_struct *mm);
30
31extern void __tsb_context_switch(unsigned long pgd_pa,
32 unsigned long tsb_reg,
33 unsigned long tsb_vaddr,
34 unsigned long tsb_pte,
35 unsigned long tsb_descr_pa);
36
37static inline void tsb_context_switch(struct mm_struct *mm)
38{
39 __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
40 mm->context.tsb_map_vaddr,
41 mm->context.tsb_map_pte,
42 __pa(&mm->context.tsb_descr));
43}
22 44
23/* Initialize a new mmu context. This is invoked when a new 45extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss);
24 * address space instance (unique or shared) is instantiated. 46#ifdef CONFIG_SMP
25 * This just needs to set mm->context to an invalid context. 47extern void smp_tsb_sync(struct mm_struct *mm);
26 */ 48#else
27#define init_new_context(__tsk, __mm) \ 49#define smp_tsb_sync(__mm) do { } while (0)
28 (((__mm)->context.sparc64_ctx_val = 0UL), 0) 50#endif
29
30/* Destroy a dead context. This occurs when mmput drops the
31 * mm_users count to zero, the mmaps have been released, and
32 * all the page tables have been flushed. Our job is to destroy
33 * any remaining processor-specific state, and in the sparc64
34 * case this just means freeing up the mmu context ID held by
35 * this task if valid.
36 */
37#define destroy_context(__mm) \
38do { spin_lock(&ctx_alloc_lock); \
39 if (CTX_VALID((__mm)->context)) { \
40 unsigned long nr = CTX_NRBITS((__mm)->context); \
41 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
42 } \
43 spin_unlock(&ctx_alloc_lock); \
44} while(0)
45
46/* Reload the two core values used by TLB miss handler
47 * processing on sparc64. They are:
48 * 1) The physical address of mm->pgd, when full page
49 * table walks are necessary, this is where the
50 * search begins.
51 * 2) A "PGD cache". For 32-bit tasks only pgd[0] is
52 * ever used since that maps the entire low 4GB
53 * completely. To speed up TLB miss processing we
54 * make this value available to the handlers. This
55 * decreases the amount of memory traffic incurred.
56 */
57#define reload_tlbmiss_state(__tsk, __mm) \
58do { \
59 register unsigned long paddr asm("o5"); \
60 register unsigned long pgd_cache asm("o4"); \
61 paddr = __pa((__mm)->pgd); \
62 pgd_cache = 0UL; \
63 if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
64 pgd_cache = get_pgd_cache((__mm)->pgd); \
65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
66 "mov %3, %%g4\n\t" \
67 "mov %0, %%g7\n\t" \
68 "stxa %1, [%%g4] %2\n\t" \
69 "membar #Sync\n\t" \
70 "wrpr %%g0, 0x096, %%pstate" \
71 : /* no outputs */ \
72 : "r" (paddr), "r" (pgd_cache),\
73 "i" (ASI_DMMU), "i" (TSB_REG)); \
74} while(0)
75 51
76/* Set MMU context in the actual hardware. */ 52/* Set MMU context in the actual hardware. */
77#define load_secondary_context(__mm) \ 53#define load_secondary_context(__mm) \
78 __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ 54 __asm__ __volatile__( \
79 "flush %%g6" \ 55 "\n661: stxa %0, [%1] %2\n" \
80 : /* No outputs */ \ 56 " .section .sun4v_1insn_patch, \"ax\"\n" \
81 : "r" (CTX_HWBITS((__mm)->context)), \ 57 " .word 661b\n" \
82 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU)) 58 " stxa %0, [%1] %3\n" \
59 " .previous\n" \
60 " flush %%g6\n" \
61 : /* No outputs */ \
62 : "r" (CTX_HWBITS((__mm)->context)), \
63 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
83 64
84extern void __flush_tlb_mm(unsigned long, unsigned long); 65extern void __flush_tlb_mm(unsigned long, unsigned long);
85 66
86/* Switch the current MM context. */ 67/* Switch the current MM context. Interrupts are disabled. */
87static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 68static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
88{ 69{
89 unsigned long ctx_valid; 70 unsigned long ctx_valid, flags;
90 int cpu; 71 int cpu;
91 72
92 /* Note: page_table_lock is used here to serialize switch_mm 73 spin_lock_irqsave(&mm->context.lock, flags);
93 * and activate_mm, and their calls to get_new_mmu_context.
94 * This use of page_table_lock is unrelated to its other uses.
95 */
96 spin_lock(&mm->page_table_lock);
97 ctx_valid = CTX_VALID(mm->context); 74 ctx_valid = CTX_VALID(mm->context);
98 if (!ctx_valid) 75 if (!ctx_valid)
99 get_new_mmu_context(mm); 76 get_new_mmu_context(mm);
100 spin_unlock(&mm->page_table_lock);
101 77
102 if (!ctx_valid || (old_mm != mm)) { 78 /* We have to be extremely careful here or else we will miss
103 load_secondary_context(mm); 79 * a TSB grow if we switch back and forth between a kernel
104 reload_tlbmiss_state(tsk, mm); 80 * thread and an address space which has it's TSB size increased
105 } 81 * on another processor.
82 *
83 * It is possible to play some games in order to optimize the
84 * switch, but the safest thing to do is to unconditionally
85 * perform the secondary context load and the TSB context switch.
86 *
87 * For reference the bad case is, for address space "A":
88 *
89 * CPU 0 CPU 1
90 * run address space A
91 * set cpu0's bits in cpu_vm_mask
92 * switch to kernel thread, borrow
93 * address space A via entry_lazy_tlb
94 * run address space A
95 * set cpu1's bit in cpu_vm_mask
96 * flush_tlb_pending()
97 * reset cpu_vm_mask to just cpu1
98 * TSB grow
99 * run address space A
100 * context was valid, so skip
101 * TSB context switch
102 *
103 * At that point cpu0 continues to use a stale TSB, the one from
104 * before the TSB grow performed on cpu1. cpu1 did not cross-call
105 * cpu0 to update it's TSB because at that point the cpu_vm_mask
106 * only had cpu1 set in it.
107 */
108 load_secondary_context(mm);
109 tsb_context_switch(mm);
106 110
107 /* Even if (mm == old_mm) we _must_ check 111 /* Any time a processor runs a context on an address space
108 * the cpu_vm_mask. If we do not we could 112 * for the first time, we must flush that context out of the
109 * corrupt the TLB state because of how 113 * local TLB.
110 * smp_flush_tlb_{page,range,mm} on sparc64
111 * and lazy tlb switches work. -DaveM
112 */ 114 */
113 cpu = smp_processor_id(); 115 cpu = smp_processor_id();
114 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { 116 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
@@ -116,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
116 __flush_tlb_mm(CTX_HWBITS(mm->context), 118 __flush_tlb_mm(CTX_HWBITS(mm->context),
117 SECONDARY_CONTEXT); 119 SECONDARY_CONTEXT);
118 } 120 }
121 spin_unlock_irqrestore(&mm->context.lock, flags);
119} 122}
120 123
121#define deactivate_mm(tsk,mm) do { } while (0) 124#define deactivate_mm(tsk,mm) do { } while (0)
@@ -123,23 +126,20 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
123/* Activate a new MM instance for the current task. */ 126/* Activate a new MM instance for the current task. */
124static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) 127static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
125{ 128{
129 unsigned long flags;
126 int cpu; 130 int cpu;
127 131
128 /* Note: page_table_lock is used here to serialize switch_mm 132 spin_lock_irqsave(&mm->context.lock, flags);
129 * and activate_mm, and their calls to get_new_mmu_context.
130 * This use of page_table_lock is unrelated to its other uses.
131 */
132 spin_lock(&mm->page_table_lock);
133 if (!CTX_VALID(mm->context)) 133 if (!CTX_VALID(mm->context))
134 get_new_mmu_context(mm); 134 get_new_mmu_context(mm);
135 cpu = smp_processor_id(); 135 cpu = smp_processor_id();
136 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 136 if (!cpu_isset(cpu, mm->cpu_vm_mask))
137 cpu_set(cpu, mm->cpu_vm_mask); 137 cpu_set(cpu, mm->cpu_vm_mask);
138 spin_unlock(&mm->page_table_lock);
139 138
140 load_secondary_context(mm); 139 load_secondary_context(mm);
141 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 140 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
142 reload_tlbmiss_state(current, mm); 141 tsb_context_switch(mm);
142 spin_unlock_irqrestore(&mm->context.lock, flags);
143} 143}
144 144
145#endif /* !(__ASSEMBLY__) */ 145#endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/numnodes.h b/include/asm-sparc64/numnodes.h
new file mode 100644
index 000000000000..017e7e74f5e7
--- /dev/null
+++ b/include/asm-sparc64/numnodes.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC64_NUMNODES_H
2#define _SPARC64_NUMNODES_H
3
4#define NODES_SHIFT 0
5
6#endif /* !(_SPARC64_NUMNODES_H) */
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 3c59b2693fb9..c754676e13ef 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -12,18 +12,8 @@
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/openprom.h> 13#include <asm/openprom.h>
14 14
15/* Enumeration to describe the prom major version we have detected. */ 15/* OBP version string. */
16enum prom_major_version { 16extern char prom_version[];
17 PROM_V0, /* Original sun4c V0 prom */
18 PROM_V2, /* sun4c and early sun4m V2 prom */
19 PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */
20 PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */
21 PROM_AP1000, /* actually no prom at all */
22};
23
24extern enum prom_major_version prom_vers;
25/* Revision, and firmware revision. */
26extern unsigned int prom_rev, prom_prev;
27 17
28/* Root node of the prom device tree, this stays constant after 18/* Root node of the prom device tree, this stays constant after
29 * initialization is complete. 19 * initialization is complete.
@@ -39,6 +29,9 @@ extern int prom_stdin, prom_stdout;
39extern int prom_chosen_node; 29extern int prom_chosen_node;
40 30
41/* Helper values and strings in arch/sparc64/kernel/head.S */ 31/* Helper values and strings in arch/sparc64/kernel/head.S */
32extern const char prom_peer_name[];
33extern const char prom_compatible_name[];
34extern const char prom_root_compatible[];
42extern const char prom_finddev_name[]; 35extern const char prom_finddev_name[];
43extern const char prom_chosen_path[]; 36extern const char prom_chosen_path[];
44extern const char prom_getprop_name[]; 37extern const char prom_getprop_name[];
@@ -130,15 +123,6 @@ extern void prom_setcallback(callback_func_t func_ptr);
130 */ 123 */
131extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); 124extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
132 125
133/* Get the prom major version. */
134extern int prom_version(void);
135
136/* Get the prom plugin revision. */
137extern int prom_getrev(void);
138
139/* Get the prom firmware revision. */
140extern int prom_getprev(void);
141
142/* Character operations to/from the console.... */ 126/* Character operations to/from the console.... */
143 127
144/* Non-blocking get character from console. */ 128/* Non-blocking get character from console. */
@@ -164,6 +148,7 @@ enum prom_input_device {
164 PROMDEV_ITTYA, /* input from ttya */ 148 PROMDEV_ITTYA, /* input from ttya */
165 PROMDEV_ITTYB, /* input from ttyb */ 149 PROMDEV_ITTYB, /* input from ttyb */
166 PROMDEV_IRSC, /* input from rsc */ 150 PROMDEV_IRSC, /* input from rsc */
151 PROMDEV_IVCONS, /* input from virtual-console */
167 PROMDEV_I_UNK, 152 PROMDEV_I_UNK,
168}; 153};
169 154
@@ -176,6 +161,7 @@ enum prom_output_device {
176 PROMDEV_OTTYA, /* to ttya */ 161 PROMDEV_OTTYA, /* to ttya */
177 PROMDEV_OTTYB, /* to ttyb */ 162 PROMDEV_OTTYB, /* to ttyb */
178 PROMDEV_ORSC, /* to rsc */ 163 PROMDEV_ORSC, /* to rsc */
164 PROMDEV_OVCONS, /* to virtual-console */
179 PROMDEV_O_UNK, 165 PROMDEV_O_UNK,
180}; 166};
181 167
@@ -183,10 +169,18 @@ extern enum prom_output_device prom_query_output_device(void);
183 169
184/* Multiprocessor operations... */ 170/* Multiprocessor operations... */
185#ifdef CONFIG_SMP 171#ifdef CONFIG_SMP
186/* Start the CPU with the given device tree node, context table, and context 172/* Start the CPU with the given device tree node at the passed program
187 * at the passed program counter. 173 * counter with the given arg passed in via register %o0.
174 */
175extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
176
177/* Start the CPU with the given cpu ID at the passed program
178 * counter with the given arg passed in via register %o0.
188 */ 179 */
189extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0); 180extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
181
182/* Stop the CPU with the given cpu ID. */
183extern void prom_stopcpu_cpuid(int cpuid);
190 184
191/* Stop the current CPU. */ 185/* Stop the current CPU. */
192extern void prom_stopself(void); 186extern void prom_stopself(void);
@@ -335,6 +329,7 @@ int cpu_find_by_mid(int mid, int *prom_node);
335 329
336/* Client interface level routines. */ 330/* Client interface level routines. */
337extern void prom_set_trap_table(unsigned long tba); 331extern void prom_set_trap_table(unsigned long tba);
332extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa);
338 333
339extern long p1275_cmd(const char *, long, ...); 334extern long p1275_cmd(const char *, long, ...);
340 335
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 5426bb28a993..fcb2812265f4 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -104,10 +104,12 @@ typedef unsigned long pgprot_t;
104#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 104#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
105#define ARCH_HAS_SETCLEAR_HUGE_PTE 105#define ARCH_HAS_SETCLEAR_HUGE_PTE
106#define ARCH_HAS_HUGETLB_PREFAULT_HOOK 106#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
107#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
107#endif 108#endif
108 109
109#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ 110#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
110 (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET)) 111 (_AC(0x0000000070000000,UL)) : \
112 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
111 113
112#endif /* !(__ASSEMBLY__) */ 114#endif /* !(__ASSEMBLY__) */
113 115
@@ -124,17 +126,10 @@ typedef unsigned long pgprot_t;
124#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 126#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
125#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 127#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
126 128
127/* PFNs are real physical page numbers. However, mem_map only begins to record 129#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
128 * per-page information starting at pfn_base. This is to handle systems where
129 * the first physical page in the machine is at some huge physical address,
130 * such as 4GB. This is common on a partitioned E10000, for example.
131 */
132extern struct page *pfn_to_page(unsigned long pfn);
133extern unsigned long page_to_pfn(struct page *);
134 130
135#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) 131#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
136 132
137#define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr)
138#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 133#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
139 134
140#define virt_to_phys __pa 135#define virt_to_phys __pa
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index dd35a2c7798a..1396f110939a 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -139,6 +139,9 @@ struct pci_pbm_info {
139 /* Opaque 32-bit system bus Port ID. */ 139 /* Opaque 32-bit system bus Port ID. */
140 u32 portid; 140 u32 portid;
141 141
142 /* Opaque 32-bit handle used for hypervisor calls. */
143 u32 devhandle;
144
142 /* Chipset version information. */ 145 /* Chipset version information. */
143 int chip_type; 146 int chip_type;
144#define PBM_CHIP_TYPE_SABRE 1 147#define PBM_CHIP_TYPE_SABRE 1
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index 89bd71b1c0d8..7c5a589ea437 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -41,10 +41,26 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
41 41
42struct pci_dev; 42struct pci_dev;
43 43
44struct pci_iommu_ops {
45 void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *);
46 void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
47 dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
48 void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
49 int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int);
50 void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int);
51 void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int);
52 void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
53};
54
55extern struct pci_iommu_ops *pci_iommu_ops;
56
44/* Allocate and map kernel buffer using consistent mode DMA for a device. 57/* Allocate and map kernel buffer using consistent mode DMA for a device.
45 * hwdev should be valid struct pci_dev pointer for PCI devices. 58 * hwdev should be valid struct pci_dev pointer for PCI devices.
46 */ 59 */
47extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); 60static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
61{
62 return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle);
63}
48 64
49/* Free and unmap a consistent DMA buffer. 65/* Free and unmap a consistent DMA buffer.
50 * cpu_addr is what was returned from pci_alloc_consistent, 66 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -54,7 +70,10 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t
54 * References to the memory and mappings associated with cpu_addr/dma_addr 70 * References to the memory and mappings associated with cpu_addr/dma_addr
55 * past this call are illegal. 71 * past this call are illegal.
56 */ 72 */
57extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); 73static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
74{
75 return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle);
76}
58 77
59/* Map a single buffer of the indicated size for DMA in streaming mode. 78/* Map a single buffer of the indicated size for DMA in streaming mode.
60 * The 32-bit bus address to use is returned. 79 * The 32-bit bus address to use is returned.
@@ -62,7 +81,10 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
62 * Once the device is given the dma address, the device owns this memory 81 * Once the device is given the dma address, the device owns this memory
63 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. 82 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
64 */ 83 */
65extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); 84static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
85{
86 return pci_iommu_ops->map_single(hwdev, ptr, size, direction);
87}
66 88
67/* Unmap a single streaming mode DMA translation. The dma_addr and size 89/* Unmap a single streaming mode DMA translation. The dma_addr and size
68 * must match what was provided for in a previous pci_map_single call. All 90 * must match what was provided for in a previous pci_map_single call. All
@@ -71,7 +93,10 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
71 * After this call, reads by the cpu to the buffer are guaranteed to see 93 * After this call, reads by the cpu to the buffer are guaranteed to see
72 * whatever the device wrote there. 94 * whatever the device wrote there.
73 */ 95 */
74extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); 96static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
97{
98 pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction);
99}
75 100
76/* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ 101/* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */
77#define pci_map_page(dev, page, off, size, dir) \ 102#define pci_map_page(dev, page, off, size, dir) \
@@ -107,15 +132,19 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
107 * Device ownership issues as mentioned above for pci_map_single are 132 * Device ownership issues as mentioned above for pci_map_single are
108 * the same here. 133 * the same here.
109 */ 134 */
110extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, 135static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
111 int nents, int direction); 136{
137 return pci_iommu_ops->map_sg(hwdev, sg, nents, direction);
138}
112 139
113/* Unmap a set of streaming mode DMA translations. 140/* Unmap a set of streaming mode DMA translations.
114 * Again, cpu read rules concerning calls here are the same as for 141 * Again, cpu read rules concerning calls here are the same as for
115 * pci_unmap_single() above. 142 * pci_unmap_single() above.
116 */ 143 */
117extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, 144static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction)
118 int nhwents, int direction); 145{
146 pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction);
147}
119 148
120/* Make physical memory consistent for a single 149/* Make physical memory consistent for a single
121 * streaming mode DMA translation after a transfer. 150 * streaming mode DMA translation after a transfer.
@@ -127,8 +156,10 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
127 * must first perform a pci_dma_sync_for_device, and then the 156 * must first perform a pci_dma_sync_for_device, and then the
128 * device again owns the buffer. 157 * device again owns the buffer.
129 */ 158 */
130extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, 159static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
131 size_t size, int direction); 160{
161 pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction);
162}
132 163
133static inline void 164static inline void
134pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, 165pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
@@ -144,7 +175,10 @@ pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
144 * The same as pci_dma_sync_single_* but for a scatter-gather list, 175 * The same as pci_dma_sync_single_* but for a scatter-gather list,
145 * same rules and usage. 176 * same rules and usage.
146 */ 177 */
147extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); 178static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
179{
180 pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction);
181}
148 182
149static inline void 183static inline void
150pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, 184pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index a96067cca963..12e4a273bd43 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -6,6 +6,7 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/slab.h>
9 10
10#include <asm/spitfire.h> 11#include <asm/spitfire.h>
11#include <asm/cpudata.h> 12#include <asm/cpudata.h>
@@ -13,172 +14,59 @@
13#include <asm/page.h> 14#include <asm/page.h>
14 15
15/* Page table allocation/freeing. */ 16/* Page table allocation/freeing. */
16#ifdef CONFIG_SMP 17extern kmem_cache_t *pgtable_cache;
17/* Sliiiicck */
18#define pgt_quicklists local_cpu_data()
19#else
20extern struct pgtable_cache_struct {
21 unsigned long *pgd_cache;
22 unsigned long *pte_cache[2];
23 unsigned int pgcache_size;
24} pgt_quicklists;
25#endif
26#define pgd_quicklist (pgt_quicklists.pgd_cache)
27#define pmd_quicklist ((unsigned long *)0)
28#define pte_quicklist (pgt_quicklists.pte_cache)
29#define pgtable_cache_size (pgt_quicklists.pgcache_size)
30 18
31static __inline__ void free_pgd_fast(pgd_t *pgd) 19static inline pgd_t *pgd_alloc(struct mm_struct *mm)
32{ 20{
33 preempt_disable(); 21 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
34 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
35 pgd_quicklist = (unsigned long *) pgd;
36 pgtable_cache_size++;
37 preempt_enable();
38} 22}
39 23
40static __inline__ pgd_t *get_pgd_fast(void) 24static inline void pgd_free(pgd_t *pgd)
41{ 25{
42 unsigned long *ret; 26 kmem_cache_free(pgtable_cache, pgd);
43
44 preempt_disable();
45 if((ret = pgd_quicklist) != NULL) {
46 pgd_quicklist = (unsigned long *)(*ret);
47 ret[0] = 0;
48 pgtable_cache_size--;
49 preempt_enable();
50 } else {
51 preempt_enable();
52 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
53 if(ret)
54 memset(ret, 0, PAGE_SIZE);
55 }
56 return (pgd_t *)ret;
57} 27}
58 28
59static __inline__ void free_pgd_slow(pgd_t *pgd)
60{
61 free_page((unsigned long)pgd);
62}
63
64#ifdef DCACHE_ALIASING_POSSIBLE
65#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
66#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
67#else
68#define VPTE_COLOR(address) 0
69#define DCACHE_COLOR(address) 0
70#endif
71
72#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 29#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
73 30
74static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) 31static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
75{ 32{
76 unsigned long *ret; 33 return kmem_cache_alloc(pgtable_cache,
77 int color = 0; 34 GFP_KERNEL|__GFP_REPEAT);
78
79 preempt_disable();
80 if (pte_quicklist[color] == NULL)
81 color = 1;
82
83 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
84 pte_quicklist[color] = (unsigned long *)(*ret);
85 ret[0] = 0;
86 pgtable_cache_size--;
87 }
88 preempt_enable();
89
90 return (pmd_t *)ret;
91} 35}
92 36
93static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 37static inline void pmd_free(pmd_t *pmd)
94{ 38{
95 pmd_t *pmd; 39 kmem_cache_free(pgtable_cache, pmd);
96
97 pmd = pmd_alloc_one_fast(mm, address);
98 if (!pmd) {
99 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
100 if (pmd)
101 memset(pmd, 0, PAGE_SIZE);
102 }
103 return pmd;
104} 40}
105 41
106static __inline__ void free_pmd_fast(pmd_t *pmd) 42static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
43 unsigned long address)
107{ 44{
108 unsigned long color = DCACHE_COLOR((unsigned long)pmd); 45 return kmem_cache_alloc(pgtable_cache,
109 46 GFP_KERNEL|__GFP_REPEAT);
110 preempt_disable();
111 *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
112 pte_quicklist[color] = (unsigned long *) pmd;
113 pgtable_cache_size++;
114 preempt_enable();
115} 47}
116 48
117static __inline__ void free_pmd_slow(pmd_t *pmd) 49static inline struct page *pte_alloc_one(struct mm_struct *mm,
50 unsigned long address)
118{ 51{
119 free_page((unsigned long)pmd); 52 return virt_to_page(pte_alloc_one_kernel(mm, address));
120} 53}
121 54
122#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
123#define pmd_populate(MM,PMD,PTE_PAGE) \
124 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
125
126extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
127
128static inline struct page *
129pte_alloc_one(struct mm_struct *mm, unsigned long addr)
130{
131 pte_t *pte = pte_alloc_one_kernel(mm, addr);
132
133 if (pte)
134 return virt_to_page(pte);
135
136 return NULL;
137}
138
139static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
140{
141 unsigned long color = VPTE_COLOR(address);
142 unsigned long *ret;
143
144 preempt_disable();
145 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
146 pte_quicklist[color] = (unsigned long *)(*ret);
147 ret[0] = 0;
148 pgtable_cache_size--;
149 }
150 preempt_enable();
151 return (pte_t *)ret;
152}
153
154static __inline__ void free_pte_fast(pte_t *pte)
155{
156 unsigned long color = DCACHE_COLOR((unsigned long)pte);
157
158 preempt_disable();
159 *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
160 pte_quicklist[color] = (unsigned long *) pte;
161 pgtable_cache_size++;
162 preempt_enable();
163}
164
165static __inline__ void free_pte_slow(pte_t *pte)
166{
167 free_page((unsigned long)pte);
168}
169
170static inline void pte_free_kernel(pte_t *pte) 55static inline void pte_free_kernel(pte_t *pte)
171{ 56{
172 free_pte_fast(pte); 57 kmem_cache_free(pgtable_cache, pte);
173} 58}
174 59
175static inline void pte_free(struct page *ptepage) 60static inline void pte_free(struct page *ptepage)
176{ 61{
177 free_pte_fast(page_address(ptepage)); 62 pte_free_kernel(page_address(ptepage));
178} 63}
179 64
180#define pmd_free(pmd) free_pmd_fast(pmd) 65
181#define pgd_free(pgd) free_pgd_fast(pgd) 66#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
182#define pgd_alloc(mm) get_pgd_fast() 67#define pmd_populate(MM,PMD,PTE_PAGE) \
68 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
69
70#define check_pgt_cache() do { } while (0)
183 71
184#endif /* _SPARC64_PGALLOC_H */ 72#endif /* _SPARC64_PGALLOC_H */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index f0a9b44d3eb5..ed4124edf837 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -25,7 +25,8 @@
25#include <asm/const.h> 25#include <asm/const.h>
26 26
27/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). 27/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
28 * The page copy blockops can use 0x2000000 to 0x10000000. 28 * The page copy blockops can use 0x2000000 to 0x4000000.
29 * The TSB is mapped in the 0x4000000 to 0x6000000 range.
29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000. 30 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
30 * The vmalloc area spans 0x100000000 to 0x200000000. 31 * The vmalloc area spans 0x100000000 to 0x200000000.
31 * Since modules need to be in the lowest 32-bits of the address space, 32 * Since modules need to be in the lowest 32-bits of the address space,
@@ -34,6 +35,7 @@
34 * 0x400000000. 35 * 0x400000000.
35 */ 36 */
36#define TLBTEMP_BASE _AC(0x0000000002000000,UL) 37#define TLBTEMP_BASE _AC(0x0000000002000000,UL)
38#define TSBMAP_BASE _AC(0x0000000004000000,UL)
37#define MODULES_VADDR _AC(0x0000000010000000,UL) 39#define MODULES_VADDR _AC(0x0000000010000000,UL)
38#define MODULES_LEN _AC(0x00000000e0000000,UL) 40#define MODULES_LEN _AC(0x00000000e0000000,UL)
39#define MODULES_END _AC(0x00000000f0000000,UL) 41#define MODULES_END _AC(0x00000000f0000000,UL)
@@ -88,162 +90,538 @@
88 90
89#endif /* !(__ASSEMBLY__) */ 91#endif /* !(__ASSEMBLY__) */
90 92
91/* Spitfire/Cheetah TTE bits. */ 93/* PTE bits which are the same in SUN4U and SUN4V format. */
92#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ 94#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
93#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/ 95#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
94#define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */ 96
95#define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */ 97/* SUN4U pte bits... */
96#define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */ 98#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
97#define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */ 99#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
98#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ 100#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
99#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ 101#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
100#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ 102#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
101#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ 103#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
102#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ 104#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
103#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ 105#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
104#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ 106#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
105#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ 107#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
106#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ 108#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
107#define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */ 109#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
108#define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */ 110#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
109#define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */ 111#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
110#define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ 112#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
111#define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ 113#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
112#define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */ 114#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
113#define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */ 115#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
114#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ 116#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
115#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ 117#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
116 118#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
117/* Here are the SpitFire software bits we use in the TTE's. 119#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
118 * 120#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
119 * WARNING: If you are going to try and start using some 121#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
120 * of the soft2 bits, you will need to make 122#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
121 * modifications to the swap entry implementation. 123#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
122 * For example, one thing that could happen is that 124#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
123 * swp_entry_to_pte() would BUG_ON() if you tried 125
124 * to use one of the soft2 bits for _PAGE_FILE. 126/* SUN4V pte bits... */
125 * 127#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
126 * Like other architectures, I have aliased _PAGE_FILE with 128#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
127 * _PAGE_MODIFIED. This works because _PAGE_FILE is never 129#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
128 * interpreted that way unless _PAGE_PRESENT is clear. 130#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
129 */ 131#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
130#define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */ 132#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
131#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */ 133#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
132#define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */ 134#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
133#define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ 135#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
134#define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */ 136#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
135#define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */ 137#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
136#define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */ 138#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
139#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
140#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
141#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
142#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
143#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
144#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
145#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
146#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
147#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
148#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
149#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
150#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
151#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
152#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
137 153
138#if PAGE_SHIFT == 13 154#if PAGE_SHIFT == 13
139#define _PAGE_SZBITS _PAGE_SZ8K 155#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
156#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
140#elif PAGE_SHIFT == 16 157#elif PAGE_SHIFT == 16
141#define _PAGE_SZBITS _PAGE_SZ64K 158#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
159#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
142#elif PAGE_SHIFT == 19 160#elif PAGE_SHIFT == 19
143#define _PAGE_SZBITS _PAGE_SZ512K 161#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U
162#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V
144#elif PAGE_SHIFT == 22 163#elif PAGE_SHIFT == 22
145#define _PAGE_SZBITS _PAGE_SZ4MB 164#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U
165#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V
146#else 166#else
147#error Wrong PAGE_SHIFT specified 167#error Wrong PAGE_SHIFT specified
148#endif 168#endif
149 169
150#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 170#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
151#define _PAGE_SZHUGE _PAGE_SZ4MB 171#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
172#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
152#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) 173#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
153#define _PAGE_SZHUGE _PAGE_SZ512K 174#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
175#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
154#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 176#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
155#define _PAGE_SZHUGE _PAGE_SZ64K 177#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
178#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
156#endif 179#endif
157 180
158#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV) 181/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
182#define __P000 __pgprot(0)
183#define __P001 __pgprot(0)
184#define __P010 __pgprot(0)
185#define __P011 __pgprot(0)
186#define __P100 __pgprot(0)
187#define __P101 __pgprot(0)
188#define __P110 __pgprot(0)
189#define __P111 __pgprot(0)
190
191#define __S000 __pgprot(0)
192#define __S001 __pgprot(0)
193#define __S010 __pgprot(0)
194#define __S011 __pgprot(0)
195#define __S100 __pgprot(0)
196#define __S101 __pgprot(0)
197#define __S110 __pgprot(0)
198#define __S111 __pgprot(0)
159 199
160#define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W) 200#ifndef __ASSEMBLY__
161#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
162#define __PRIV_BITS _PAGE_P
163 201
164#define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE) 202extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
165 203
166/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */ 204extern unsigned long pte_sz_bits(unsigned long size);
167#define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
168 __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
169 205
170#define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ 206extern pgprot_t PAGE_KERNEL;
171 __ACCESS_BITS | _PAGE_EXEC) 207extern pgprot_t PAGE_KERNEL_LOCKED;
208extern pgprot_t PAGE_COPY;
209extern pgprot_t PAGE_SHARED;
172 210
173#define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ 211/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
174 __ACCESS_BITS | _PAGE_EXEC) 212extern unsigned long _PAGE_IE;
213extern unsigned long _PAGE_E;
214extern unsigned long _PAGE_CACHE;
175 215
176#define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ 216extern unsigned long pg_iobits;
177 __PRIV_BITS | \ 217extern unsigned long _PAGE_ALL_SZ_BITS;
178 __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC) 218extern unsigned long _PAGE_SZBITS;
179 219
180#define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ 220extern struct page *mem_map_zero;
181 _PAGE_CACHE | \ 221#define ZERO_PAGE(vaddr) (mem_map_zero)
182 __ACCESS_BITS | _PAGE_WRITE)
183 222
184#define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ 223/* PFNs are real physical page numbers. However, mem_map only begins to record
185 _PAGE_CACHE | __ACCESS_BITS) 224 * per-page information starting at pfn_base. This is to handle systems where
225 * the first physical page in the machine is at some huge physical address,
226 * such as 4GB. This is common on a partitioned E10000, for example.
227 */
228static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
229{
230 unsigned long paddr = pfn << PAGE_SHIFT;
231 unsigned long sz_bits;
232
233 sz_bits = 0UL;
234 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
235 __asm__ __volatile__(
236 "\n661: sethi %uhi(%1), %0\n"
237 " sllx %0, 32, %0\n"
238 " .section .sun4v_2insn_patch, \"ax\"\n"
239 " .word 661b\n"
240 " mov %2, %0\n"
241 " nop\n"
242 " .previous\n"
243 : "=r" (sz_bits)
244 : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V));
245 }
246 return __pte(paddr | sz_bits | pgprot_val(prot));
247}
248#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
186 249
187#define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ 250/* This one can be done with two shifts. */
188 _PAGE_CACHE | __ACCESS_BITS) 251static inline unsigned long pte_pfn(pte_t pte)
252{
253 unsigned long ret;
254
255 __asm__ __volatile__(
256 "\n661: sllx %1, %2, %0\n"
257 " srlx %0, %3, %0\n"
258 " .section .sun4v_2insn_patch, \"ax\"\n"
259 " .word 661b\n"
260 " sllx %1, %4, %0\n"
261 " srlx %0, %5, %0\n"
262 " .previous\n"
263 : "=r" (ret)
264 : "r" (pte_val(pte)),
265 "i" (21), "i" (21 + PAGE_SHIFT),
266 "i" (8), "i" (8 + PAGE_SHIFT));
267
268 return ret;
269}
270#define pte_page(x) pfn_to_page(pte_pfn(x))
189 271
190#define _PFN_MASK _PAGE_PADDR 272static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
273{
274 unsigned long mask, tmp;
275
276 /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
277 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
278 *
279 * Even if we use negation tricks the result is still a 6
280 * instruction sequence, so don't try to play fancy and just
281 * do the most straightforward implementation.
282 *
283 * Note: We encode this into 3 sun4v 2-insn patch sequences.
284 */
191 285
192#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \ 286 __asm__ __volatile__(
193 __ACCESS_BITS | _PAGE_E) 287 "\n661: sethi %%uhi(%2), %1\n"
288 " sethi %%hi(%2), %0\n"
289 "\n662: or %1, %%ulo(%2), %1\n"
290 " or %0, %%lo(%2), %0\n"
291 "\n663: sllx %1, 32, %1\n"
292 " or %0, %1, %0\n"
293 " .section .sun4v_2insn_patch, \"ax\"\n"
294 " .word 661b\n"
295 " sethi %%uhi(%3), %1\n"
296 " sethi %%hi(%3), %0\n"
297 " .word 662b\n"
298 " or %1, %%ulo(%3), %1\n"
299 " or %0, %%lo(%3), %0\n"
300 " .word 663b\n"
301 " sllx %1, 32, %1\n"
302 " or %0, %1, %0\n"
303 " .previous\n"
304 : "=r" (mask), "=r" (tmp)
305 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
306 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
307 _PAGE_SZBITS_4U),
308 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
309 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
310 _PAGE_SZBITS_4V));
311
312 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
313}
194 314
195#define __P000 PAGE_NONE 315static inline pte_t pgoff_to_pte(unsigned long off)
196#define __P001 PAGE_READONLY_NOEXEC 316{
197#define __P010 PAGE_COPY_NOEXEC 317 off <<= PAGE_SHIFT;
198#define __P011 PAGE_COPY_NOEXEC 318
199#define __P100 PAGE_READONLY 319 __asm__ __volatile__(
200#define __P101 PAGE_READONLY 320 "\n661: or %0, %2, %0\n"
201#define __P110 PAGE_COPY 321 " .section .sun4v_1insn_patch, \"ax\"\n"
202#define __P111 PAGE_COPY 322 " .word 661b\n"
323 " or %0, %3, %0\n"
324 " .previous\n"
325 : "=r" (off)
326 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
327
328 return __pte(off);
329}
203 330
204#define __S000 PAGE_NONE 331static inline pgprot_t pgprot_noncached(pgprot_t prot)
205#define __S001 PAGE_READONLY_NOEXEC 332{
206#define __S010 PAGE_SHARED_NOEXEC 333 unsigned long val = pgprot_val(prot);
207#define __S011 PAGE_SHARED_NOEXEC 334
208#define __S100 PAGE_READONLY 335 __asm__ __volatile__(
209#define __S101 PAGE_READONLY 336 "\n661: andn %0, %2, %0\n"
210#define __S110 PAGE_SHARED 337 " or %0, %3, %0\n"
211#define __S111 PAGE_SHARED 338 " .section .sun4v_2insn_patch, \"ax\"\n"
339 " .word 661b\n"
340 " andn %0, %4, %0\n"
341 " or %0, %3, %0\n"
342 " .previous\n"
343 : "=r" (val)
344 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
345 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
346
347 return __pgprot(val);
348}
349/* Various pieces of code check for platform support by ifdef testing
350 * on "pgprot_noncached". That's broken and should be fixed, but for
351 * now...
352 */
353#define pgprot_noncached pgprot_noncached
212 354
213#ifndef __ASSEMBLY__ 355#ifdef CONFIG_HUGETLB_PAGE
356static inline pte_t pte_mkhuge(pte_t pte)
357{
358 unsigned long mask;
359
360 __asm__ __volatile__(
361 "\n661: sethi %%uhi(%1), %0\n"
362 " sllx %0, 32, %0\n"
363 " .section .sun4v_2insn_patch, \"ax\"\n"
364 " .word 661b\n"
365 " mov %2, %0\n"
366 " nop\n"
367 " .previous\n"
368 : "=r" (mask)
369 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
370
371 return __pte(pte_val(pte) | mask);
372}
373#endif
214 374
215extern unsigned long phys_base; 375static inline pte_t pte_mkdirty(pte_t pte)
216extern unsigned long pfn_base; 376{
377 unsigned long val = pte_val(pte), tmp;
378
379 __asm__ __volatile__(
380 "\n661: or %0, %3, %0\n"
381 " nop\n"
382 "\n662: nop\n"
383 " nop\n"
384 " .section .sun4v_2insn_patch, \"ax\"\n"
385 " .word 661b\n"
386 " sethi %%uhi(%4), %1\n"
387 " sllx %1, 32, %1\n"
388 " .word 662b\n"
389 " or %1, %%lo(%4), %1\n"
390 " or %0, %1, %0\n"
391 " .previous\n"
392 : "=r" (val), "=r" (tmp)
393 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
394 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
395
396 return __pte(val);
397}
217 398
218extern struct page *mem_map_zero; 399static inline pte_t pte_mkclean(pte_t pte)
219#define ZERO_PAGE(vaddr) (mem_map_zero) 400{
401 unsigned long val = pte_val(pte), tmp;
402
403 __asm__ __volatile__(
404 "\n661: andn %0, %3, %0\n"
405 " nop\n"
406 "\n662: nop\n"
407 " nop\n"
408 " .section .sun4v_2insn_patch, \"ax\"\n"
409 " .word 661b\n"
410 " sethi %%uhi(%4), %1\n"
411 " sllx %1, 32, %1\n"
412 " .word 662b\n"
413 " or %1, %%lo(%4), %1\n"
414 " andn %0, %1, %0\n"
415 " .previous\n"
416 : "=r" (val), "=r" (tmp)
417 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
418 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
419
420 return __pte(val);
421}
220 422
221/* PFNs are real physical page numbers. However, mem_map only begins to record 423static inline pte_t pte_mkwrite(pte_t pte)
222 * per-page information starting at pfn_base. This is to handle systems where 424{
223 * the first physical page in the machine is at some huge physical address, 425 unsigned long val = pte_val(pte), mask;
224 * such as 4GB. This is common on a partitioned E10000, for example. 426
225 */ 427 __asm__ __volatile__(
428 "\n661: mov %1, %0\n"
429 " nop\n"
430 " .section .sun4v_2insn_patch, \"ax\"\n"
431 " .word 661b\n"
432 " sethi %%uhi(%2), %0\n"
433 " sllx %0, 32, %0\n"
434 " .previous\n"
435 : "=r" (mask)
436 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
437
438 return __pte(val | mask);
439}
226 440
227#define pfn_pte(pfn, prot) \ 441static inline pte_t pte_wrprotect(pte_t pte)
228 __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS) 442{
229#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 443 unsigned long val = pte_val(pte), tmp;
444
445 __asm__ __volatile__(
446 "\n661: andn %0, %3, %0\n"
447 " nop\n"
448 "\n662: nop\n"
449 " nop\n"
450 " .section .sun4v_2insn_patch, \"ax\"\n"
451 " .word 661b\n"
452 " sethi %%uhi(%4), %1\n"
453 " sllx %1, 32, %1\n"
454 " .word 662b\n"
455 " or %1, %%lo(%4), %1\n"
456 " andn %0, %1, %0\n"
457 " .previous\n"
458 : "=r" (val), "=r" (tmp)
459 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
460 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
461
462 return __pte(val);
463}
464
465static inline pte_t pte_mkold(pte_t pte)
466{
467 unsigned long mask;
468
469 __asm__ __volatile__(
470 "\n661: mov %1, %0\n"
471 " nop\n"
472 " .section .sun4v_2insn_patch, \"ax\"\n"
473 " .word 661b\n"
474 " sethi %%uhi(%2), %0\n"
475 " sllx %0, 32, %0\n"
476 " .previous\n"
477 : "=r" (mask)
478 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
479
480 mask |= _PAGE_R;
481
482 return __pte(pte_val(pte) & ~mask);
483}
484
485static inline pte_t pte_mkyoung(pte_t pte)
486{
487 unsigned long mask;
488
489 __asm__ __volatile__(
490 "\n661: mov %1, %0\n"
491 " nop\n"
492 " .section .sun4v_2insn_patch, \"ax\"\n"
493 " .word 661b\n"
494 " sethi %%uhi(%2), %0\n"
495 " sllx %0, 32, %0\n"
496 " .previous\n"
497 : "=r" (mask)
498 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
499
500 mask |= _PAGE_R;
501
502 return __pte(pte_val(pte) | mask);
503}
230 504
231#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT) 505static inline unsigned long pte_young(pte_t pte)
232#define pte_page(x) pfn_to_page(pte_pfn(x)) 506{
507 unsigned long mask;
508
509 __asm__ __volatile__(
510 "\n661: mov %1, %0\n"
511 " nop\n"
512 " .section .sun4v_2insn_patch, \"ax\"\n"
513 " .word 661b\n"
514 " sethi %%uhi(%2), %0\n"
515 " sllx %0, 32, %0\n"
516 " .previous\n"
517 : "=r" (mask)
518 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
519
520 return (pte_val(pte) & mask);
521}
522
523static inline unsigned long pte_dirty(pte_t pte)
524{
525 unsigned long mask;
526
527 __asm__ __volatile__(
528 "\n661: mov %1, %0\n"
529 " nop\n"
530 " .section .sun4v_2insn_patch, \"ax\"\n"
531 " .word 661b\n"
532 " sethi %%uhi(%2), %0\n"
533 " sllx %0, 32, %0\n"
534 " .previous\n"
535 : "=r" (mask)
536 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
537
538 return (pte_val(pte) & mask);
539}
233 540
234static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) 541static inline unsigned long pte_write(pte_t pte)
235{ 542{
236 pte_t __pte; 543 unsigned long mask;
237 const unsigned long preserve_mask = (_PFN_MASK | 544
238 _PAGE_MODIFIED | _PAGE_ACCESSED | 545 __asm__ __volatile__(
239 _PAGE_CACHE | _PAGE_E | 546 "\n661: mov %1, %0\n"
240 _PAGE_PRESENT | _PAGE_SZBITS); 547 " nop\n"
548 " .section .sun4v_2insn_patch, \"ax\"\n"
549 " .word 661b\n"
550 " sethi %%uhi(%2), %0\n"
551 " sllx %0, 32, %0\n"
552 " .previous\n"
553 : "=r" (mask)
554 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
555
556 return (pte_val(pte) & mask);
557}
241 558
242 pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) | 559static inline unsigned long pte_exec(pte_t pte)
243 (pgprot_val(new_prot) & ~preserve_mask); 560{
561 unsigned long mask;
562
563 __asm__ __volatile__(
564 "\n661: sethi %%hi(%1), %0\n"
565 " .section .sun4v_1insn_patch, \"ax\"\n"
566 " .word 661b\n"
567 " mov %2, %0\n"
568 " .previous\n"
569 : "=r" (mask)
570 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
571
572 return (pte_val(pte) & mask);
573}
244 574
245 return __pte; 575static inline unsigned long pte_read(pte_t pte)
576{
577 unsigned long mask;
578
579 __asm__ __volatile__(
580 "\n661: mov %1, %0\n"
581 " nop\n"
582 " .section .sun4v_2insn_patch, \"ax\"\n"
583 " .word 661b\n"
584 " sethi %%uhi(%2), %0\n"
585 " sllx %0, 32, %0\n"
586 " .previous\n"
587 : "=r" (mask)
588 : "i" (_PAGE_READ_4U), "i" (_PAGE_READ_4V));
589
590 return (pte_val(pte) & mask);
246} 591}
592
593static inline unsigned long pte_file(pte_t pte)
594{
595 unsigned long val = pte_val(pte);
596
597 __asm__ __volatile__(
598 "\n661: and %0, %2, %0\n"
599 " .section .sun4v_1insn_patch, \"ax\"\n"
600 " .word 661b\n"
601 " and %0, %3, %0\n"
602 " .previous\n"
603 : "=r" (val)
604 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
605
606 return val;
607}
608
609static inline unsigned long pte_present(pte_t pte)
610{
611 unsigned long val = pte_val(pte);
612
613 __asm__ __volatile__(
614 "\n661: and %0, %2, %0\n"
615 " .section .sun4v_1insn_patch, \"ax\"\n"
616 " .word 661b\n"
617 " and %0, %3, %0\n"
618 " .previous\n"
619 : "=r" (val)
620 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
621
622 return val;
623}
624
247#define pmd_set(pmdp, ptep) \ 625#define pmd_set(pmdp, ptep) \
248 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) 626 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
249#define pud_set(pudp, pmdp) \ 627#define pud_set(pudp, pmdp) \
@@ -253,8 +631,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
253#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) 631#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
254#define pud_page(pud) \ 632#define pud_page(pud) \
255 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) 633 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
256#define pte_none(pte) (!pte_val(pte))
257#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
258#define pmd_none(pmd) (!pmd_val(pmd)) 634#define pmd_none(pmd) (!pmd_val(pmd))
259#define pmd_bad(pmd) (0) 635#define pmd_bad(pmd) (0)
260#define pmd_present(pmd) (pmd_val(pmd) != 0U) 636#define pmd_present(pmd) (pmd_val(pmd) != 0U)
@@ -264,30 +640,8 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
264#define pud_present(pud) (pud_val(pud) != 0U) 640#define pud_present(pud) (pud_val(pud) != 0U)
265#define pud_clear(pudp) (pud_val(*(pudp)) = 0U) 641#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
266 642
267/* The following only work if pte_present() is true. 643/* Same in both SUN4V and SUN4U. */
268 * Undefined behaviour if not.. 644#define pte_none(pte) (!pte_val(pte))
269 */
270#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
271#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)
272#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
273#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
274#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
275#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))
276#define pte_rdprotect(pte) \
277 (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))
278#define pte_mkclean(pte) \
279 (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
280#define pte_mkold(pte) \
281 (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
282
283/* Permanent address of a page. */
284#define __page_address(page) page_address(page)
285
286/* Be very careful when you change these three, they are delicate. */
287#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
288#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE))
289#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
290#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE))
291 645
292/* to find an entry in a page-table-directory. */ 646/* to find an entry in a page-table-directory. */
293#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 647#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -296,11 +650,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
296/* to find an entry in a kernel page-table-directory */ 650/* to find an entry in a kernel page-table-directory */
297#define pgd_offset_k(address) pgd_offset(&init_mm, address) 651#define pgd_offset_k(address) pgd_offset(&init_mm, address)
298 652
299/* extract the pgd cache used for optimizing the tlb miss
300 * slow path when executing 32-bit compat processes
301 */
302#define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11)
303
304/* Find an entry in the second-level page table.. */ 653/* Find an entry in the second-level page table.. */
305#define pmd_offset(pudp, address) \ 654#define pmd_offset(pudp, address) \
306 ((pmd_t *) pud_page(*(pudp)) + \ 655 ((pmd_t *) pud_page(*(pudp)) + \
@@ -327,6 +676,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
327 676
328 /* It is more efficient to let flush_tlb_kernel_range() 677 /* It is more efficient to let flush_tlb_kernel_range()
329 * handle init_mm tlb flushes. 678 * handle init_mm tlb flushes.
679 *
680 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
681 * and SUN4V pte layout, so this inline test is fine.
330 */ 682 */
331 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) 683 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
332 tlb_batch_add(mm, addr, ptep, orig); 684 tlb_batch_add(mm, addr, ptep, orig);
@@ -361,42 +713,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
361#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 713#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
362 714
363/* File offset in PTE support. */ 715/* File offset in PTE support. */
364#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) 716extern unsigned long pte_file(pte_t);
365#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) 717#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
366#define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE)) 718extern pte_t pgoff_to_pte(unsigned long);
367#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 719#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
368 720
369extern unsigned long prom_virt_to_phys(unsigned long, int *); 721extern unsigned long prom_virt_to_phys(unsigned long, int *);
370 722
371static __inline__ unsigned long 723extern unsigned long sun4u_get_pte(unsigned long);
372sun4u_get_pte (unsigned long addr)
373{
374 pgd_t *pgdp;
375 pud_t *pudp;
376 pmd_t *pmdp;
377 pte_t *ptep;
378
379 if (addr >= PAGE_OFFSET)
380 return addr & _PAGE_PADDR;
381 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
382 return prom_virt_to_phys(addr, NULL);
383 pgdp = pgd_offset_k(addr);
384 pudp = pud_offset(pgdp, addr);
385 pmdp = pmd_offset(pudp, addr);
386 ptep = pte_offset_kernel(pmdp, addr);
387 return pte_val(*ptep) & _PAGE_PADDR;
388}
389 724
390static __inline__ unsigned long 725static inline unsigned long __get_phys(unsigned long addr)
391__get_phys (unsigned long addr)
392{ 726{
393 return sun4u_get_pte (addr); 727 return sun4u_get_pte(addr);
394} 728}
395 729
396static __inline__ int 730static inline int __get_iospace(unsigned long addr)
397__get_iospace (unsigned long addr)
398{ 731{
399 return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); 732 return ((sun4u_get_pte(addr) & 0xf0000000) >> 28);
400} 733}
401 734
402extern unsigned long *sparc64_valid_addr_bitmap; 735extern unsigned long *sparc64_valid_addr_bitmap;
@@ -409,11 +742,6 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
409 unsigned long pfn, 742 unsigned long pfn,
410 unsigned long size, pgprot_t prot); 743 unsigned long size, pgprot_t prot);
411 744
412/* Clear virtual and physical cachability, set side-effect bit. */
413#define pgprot_noncached(prot) \
414 (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \
415 _PAGE_E))
416
417/* 745/*
418 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 746 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
419 * its high 4 bits. These macros/functions put it there or get it from there. 747 * its high 4 bits. These macros/functions put it there or get it from there.
@@ -424,8 +752,11 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
424 752
425#include <asm-generic/pgtable.h> 753#include <asm-generic/pgtable.h>
426 754
427/* We provide our own get_unmapped_area to cope with VA holes for userland */ 755/* We provide our own get_unmapped_area to cope with VA holes and
756 * SHM area cache aliasing for userland.
757 */
428#define HAVE_ARCH_UNMAPPED_AREA 758#define HAVE_ARCH_UNMAPPED_AREA
759#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
429 760
430/* We provide a special get_unmapped_area for framebuffer mmaps to try and use 761/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
431 * the largest alignment possible such that larget PTEs can be used. 762 * the largest alignment possible such that larget PTEs can be used.
@@ -435,12 +766,9 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
435 unsigned long); 766 unsigned long);
436#define HAVE_ARCH_FB_UNMAPPED_AREA 767#define HAVE_ARCH_FB_UNMAPPED_AREA
437 768
438/* 769extern void pgtable_cache_init(void);
439 * No page table caches to initialise 770extern void sun4v_register_fault_status(void);
440 */ 771extern void sun4v_ktsb_register(void);
441#define pgtable_cache_init() do { } while (0)
442
443extern void check_pgt_cache(void);
444 772
445#endif /* !(__ASSEMBLY__) */ 773#endif /* !(__ASSEMBLY__) */
446 774
diff --git a/include/asm-sparc64/pil.h b/include/asm-sparc64/pil.h
index 8f87750c3517..79f827eb3f5d 100644
--- a/include/asm-sparc64/pil.h
+++ b/include/asm-sparc64/pil.h
@@ -16,11 +16,13 @@
16#define PIL_SMP_CALL_FUNC 1 16#define PIL_SMP_CALL_FUNC 1
17#define PIL_SMP_RECEIVE_SIGNAL 2 17#define PIL_SMP_RECEIVE_SIGNAL 2
18#define PIL_SMP_CAPTURE 3 18#define PIL_SMP_CAPTURE 3
19#define PIL_SMP_CTX_NEW_VERSION 4
19 20
20#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
21#define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ 22#define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \
22 (PIL) == PIL_SMP_RECEIVE_SIGNAL || \ 23 (PIL) == PIL_SMP_RECEIVE_SIGNAL || \
23 (PIL) == PIL_SMP_CAPTURE) 24 (PIL) == PIL_SMP_CAPTURE || \
25 (PIL) == PIL_SMP_CTX_NEW_VERSION)
24#endif 26#endif
25 27
26#endif /* !(_SPARC64_PIL_H) */ 28#endif /* !(_SPARC64_PIL_H) */
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index cd8d9b4c8658..c6896b88283e 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -28,6 +28,8 @@
28 * User lives in his very own context, and cannot reference us. Note 28 * User lives in his very own context, and cannot reference us. Note
29 * that TASK_SIZE is a misnomer, it really gives maximum user virtual 29 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
30 * address that the kernel will allocate out. 30 * address that the kernel will allocate out.
31 *
32 * XXX No longer using virtual page tables, kill this upper limit...
31 */ 33 */
32#define VA_BITS 44 34#define VA_BITS 44
33#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
@@ -37,18 +39,6 @@
37#endif 39#endif
38#define TASK_SIZE ((unsigned long)-VPTE_SIZE) 40#define TASK_SIZE ((unsigned long)-VPTE_SIZE)
39 41
40/*
41 * The vpte base must be able to hold the entire vpte, half
42 * of which lives above, and half below, the base. And it
43 * is placed as close to the highest address range as possible.
44 */
45#define VPTE_BASE_SPITFIRE (-(VPTE_SIZE/2))
46#if 1
47#define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE
48#else
49#define VPTE_BASE_CHEETAH 0xffe0000000000000
50#endif
51
52#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__
53 43
54typedef struct { 44typedef struct {
@@ -101,7 +91,8 @@ extern unsigned long thread_saved_pc(struct task_struct *);
101/* Do necessary setup to start up a newly executed thread. */ 91/* Do necessary setup to start up a newly executed thread. */
102#define start_thread(regs, pc, sp) \ 92#define start_thread(regs, pc, sp) \
103do { \ 93do { \
104 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \ 94 unsigned long __asi = ASI_PNF; \
95 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
105 regs->tpc = ((pc & (~3)) - 4); \ 96 regs->tpc = ((pc & (~3)) - 4); \
106 regs->tnpc = regs->tpc + 4; \ 97 regs->tnpc = regs->tpc + 4; \
107 regs->y = 0; \ 98 regs->y = 0; \
@@ -138,10 +129,10 @@ do { \
138 129
139#define start_thread32(regs, pc, sp) \ 130#define start_thread32(regs, pc, sp) \
140do { \ 131do { \
132 unsigned long __asi = ASI_PNF; \
141 pc &= 0x00000000ffffffffUL; \ 133 pc &= 0x00000000ffffffffUL; \
142 sp &= 0x00000000ffffffffUL; \ 134 sp &= 0x00000000ffffffffUL; \
143\ 135 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
144 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \
145 regs->tpc = ((pc & (~3)) - 4); \ 136 regs->tpc = ((pc & (~3)) - 4); \
146 regs->tnpc = regs->tpc + 4; \ 137 regs->tnpc = regs->tpc + 4; \
147 regs->y = 0; \ 138 regs->y = 0; \
@@ -226,6 +217,8 @@ static inline void prefetchw(const void *x)
226 217
227#define spin_lock_prefetch(x) prefetchw(x) 218#define spin_lock_prefetch(x) prefetchw(x)
228 219
220#define HAVE_ARCH_PICK_MMAP_LAYOUT
221
229#endif /* !(__ASSEMBLY__) */ 222#endif /* !(__ASSEMBLY__) */
230 223
231#endif /* !(__ASM_SPARC64_PROCESSOR_H) */ 224#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h
index 29fb74aa805d..49a7924a89ab 100644
--- a/include/asm-sparc64/pstate.h
+++ b/include/asm-sparc64/pstate.h
@@ -28,11 +28,12 @@
28 28
29/* The V9 TSTATE Register (with SpitFire and Linux extensions). 29/* The V9 TSTATE Register (with SpitFire and Linux extensions).
30 * 30 *
31 * --------------------------------------------------------------- 31 * ---------------------------------------------------------------------
32 * | Resv | CCR | ASI | %pil | PSTATE | Resv | CWP | 32 * | Resv | GL | CCR | ASI | %pil | PSTATE | Resv | CWP |
33 * --------------------------------------------------------------- 33 * ---------------------------------------------------------------------
34 * 63 40 39 32 31 24 23 20 19 8 7 5 4 0 34 * 63 43 42 40 39 32 31 24 23 20 19 8 7 5 4 0
35 */ 35 */
36#define TSTATE_GL _AC(0x0000070000000000,UL) /* Global reg level */
36#define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */ 37#define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */
37#define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */ 38#define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */
38#define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */ 39#define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */
diff --git a/include/asm-sparc64/scratchpad.h b/include/asm-sparc64/scratchpad.h
new file mode 100644
index 000000000000..5e8b01fb3343
--- /dev/null
+++ b/include/asm-sparc64/scratchpad.h
@@ -0,0 +1,14 @@
1#ifndef _SPARC64_SCRATCHPAD_H
2#define _SPARC64_SCRATCHPAD_H
3
4/* Sun4v scratchpad registers, accessed via ASI_SCRATCHPAD. */
5
6#define SCRATCHPAD_MMU_MISS 0x00 /* Shared with OBP - set by OBP */
7#define SCRATCHPAD_CPUID 0x08 /* Shared with OBP - set by hypervisor */
8#define SCRATCHPAD_UTSBREG1 0x10
9#define SCRATCHPAD_UTSBREG2 0x18
10 /* 0x20 and 0x28, hypervisor only... */
11#define SCRATCHPAD_UNUSED1 0x30
12#define SCRATCHPAD_UNUSED2 0x38 /* Reserved for OBP */
13
14#endif /* !(_SPARC64_SCRATCHPAD_H) */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 473edb2603ec..89d86ecaab24 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -33,37 +33,13 @@
33extern cpumask_t phys_cpu_present_map; 33extern cpumask_t phys_cpu_present_map;
34#define cpu_possible_map phys_cpu_present_map 34#define cpu_possible_map phys_cpu_present_map
35 35
36extern cpumask_t cpu_sibling_map[NR_CPUS];
37
36/* 38/*
37 * General functions that each host system must provide. 39 * General functions that each host system must provide.
38 */ 40 */
39 41
40static __inline__ int hard_smp_processor_id(void) 42extern int hard_smp_processor_id(void);
41{
42 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
43 unsigned long cfg, ver;
44 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
45 if ((ver >> 32) == 0x003e0016) {
46 __asm__ __volatile__("ldxa [%%g0] %1, %0"
47 : "=r" (cfg)
48 : "i" (ASI_JBUS_CONFIG));
49 return ((cfg >> 17) & 0x1f);
50 } else {
51 __asm__ __volatile__("ldxa [%%g0] %1, %0"
52 : "=r" (cfg)
53 : "i" (ASI_SAFARI_CONFIG));
54 return ((cfg >> 17) & 0x3ff);
55 }
56 } else if (this_is_starfire != 0) {
57 return starfire_hard_smp_processor_id();
58 } else {
59 unsigned long upaconfig;
60 __asm__ __volatile__("ldxa [%%g0] %1, %0"
61 : "=r" (upaconfig)
62 : "i" (ASI_UPA_CONFIG));
63 return ((upaconfig >> 17) & 0x1f);
64 }
65}
66
67#define raw_smp_processor_id() (current_thread_info()->cpu) 43#define raw_smp_processor_id() (current_thread_info()->cpu)
68 44
69extern void smp_setup_cpu_possible_map(void); 45extern void smp_setup_cpu_possible_map(void);
diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h
new file mode 100644
index 000000000000..ed5c9d8541e2
--- /dev/null
+++ b/include/asm-sparc64/sparsemem.h
@@ -0,0 +1,12 @@
1#ifndef _SPARC64_SPARSEMEM_H
2#define _SPARC64_SPARSEMEM_H
3
4#ifdef __KERNEL__
5
6#define SECTION_SIZE_BITS 26
7#define MAX_PHYSADDR_BITS 42
8#define MAX_PHYSMEM_BITS 42
9
10#endif /* !(__KERNEL__) */
11
12#endif /* !(_SPARC64_SPARSEMEM_H) */
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 962638c9d122..23ad8a7987ad 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -44,6 +44,7 @@ enum ultra_tlb_layout {
44 spitfire = 0, 44 spitfire = 0,
45 cheetah = 1, 45 cheetah = 1,
46 cheetah_plus = 2, 46 cheetah_plus = 2,
47 hypervisor = 3,
47}; 48};
48 49
49extern enum ultra_tlb_layout tlb_type; 50extern enum ultra_tlb_layout tlb_type;
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index af254e581834..a18ec87a52c1 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -209,9 +209,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
209 /* so that ASI is only written if it changes, think again. */ \ 209 /* so that ASI is only written if it changes, think again. */ \
210 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 210 __asm__ __volatile__("wr %%g0, %0, %%asi" \
211 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ 211 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
212 trap_block[current_thread_info()->cpu].thread = \
213 task_thread_info(next); \
212 __asm__ __volatile__( \ 214 __asm__ __volatile__( \
213 "mov %%g4, %%g7\n\t" \ 215 "mov %%g4, %%g7\n\t" \
214 "wrpr %%g0, 0x95, %%pstate\n\t" \
215 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ 216 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
216 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ 217 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
217 "rdpr %%wstate, %%o5\n\t" \ 218 "rdpr %%wstate, %%o5\n\t" \
@@ -225,14 +226,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
225 "ldx [%%g6 + %3], %%o6\n\t" \ 226 "ldx [%%g6 + %3], %%o6\n\t" \
226 "ldub [%%g6 + %2], %%o5\n\t" \ 227 "ldub [%%g6 + %2], %%o5\n\t" \
227 "ldub [%%g6 + %4], %%o7\n\t" \ 228 "ldub [%%g6 + %4], %%o7\n\t" \
228 "mov %%g6, %%l2\n\t" \
229 "wrpr %%o5, 0x0, %%wstate\n\t" \ 229 "wrpr %%o5, 0x0, %%wstate\n\t" \
230 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ 230 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
231 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ 231 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
232 "wrpr %%g0, 0x94, %%pstate\n\t" \
233 "mov %%l2, %%g6\n\t" \
234 "ldx [%%g6 + %6], %%g4\n\t" \ 232 "ldx [%%g6 + %6], %%g4\n\t" \
235 "wrpr %%g0, 0x96, %%pstate\n\t" \
236 "brz,pt %%o7, 1f\n\t" \ 233 "brz,pt %%o7, 1f\n\t" \
237 " mov %%g7, %0\n\t" \ 234 " mov %%g7, %0\n\t" \
238 "b,a ret_from_syscall\n\t" \ 235 "b,a ret_from_syscall\n\t" \
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index ac9d068aab4f..2ebf7f27bf91 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -64,8 +64,6 @@ struct thread_info {
64 __u64 kernel_cntd0, kernel_cntd1; 64 __u64 kernel_cntd0, kernel_cntd1;
65 __u64 pcr_reg; 65 __u64 pcr_reg;
66 66
67 __u64 cee_stuff;
68
69 struct restart_block restart_block; 67 struct restart_block restart_block;
70 68
71 struct pt_regs *kern_una_regs; 69 struct pt_regs *kern_una_regs;
@@ -104,10 +102,9 @@ struct thread_info {
104#define TI_KERN_CNTD0 0x00000480 102#define TI_KERN_CNTD0 0x00000480
105#define TI_KERN_CNTD1 0x00000488 103#define TI_KERN_CNTD1 0x00000488
106#define TI_PCR 0x00000490 104#define TI_PCR 0x00000490
107#define TI_CEE_STUFF 0x00000498 105#define TI_RESTART_BLOCK 0x00000498
108#define TI_RESTART_BLOCK 0x000004a0 106#define TI_KUNA_REGS 0x000004c0
109#define TI_KUNA_REGS 0x000004c8 107#define TI_KUNA_INSN 0x000004c8
110#define TI_KUNA_INSN 0x000004d0
111#define TI_FPREGS 0x00000500 108#define TI_FPREGS 0x00000500
112 109
113/* We embed this in the uppermost byte of thread_info->flags */ 110/* We embed this in the uppermost byte of thread_info->flags */
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h
index 9e8d4175bcb2..2a5e4ebaad80 100644
--- a/include/asm-sparc64/timex.h
+++ b/include/asm-sparc64/timex.h
@@ -14,4 +14,10 @@
14typedef unsigned long cycles_t; 14typedef unsigned long cycles_t;
15#define get_cycles() tick_ops->get_tick() 15#define get_cycles() tick_ops->get_tick()
16 16
17#define ARCH_HAS_READ_CURRENT_TIMER 1
18#define read_current_timer(timer_val_p) \
19({ *timer_val_p = tick_ops->get_tick(); \
20 0; \
21})
22
17#endif 23#endif
diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h
index 3ef9909ac3ac..9ad5d9c51d42 100644
--- a/include/asm-sparc64/tlbflush.h
+++ b/include/asm-sparc64/tlbflush.h
@@ -5,6 +5,11 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <asm/mmu_context.h> 6#include <asm/mmu_context.h>
7 7
8/* TSB flush operations. */
9struct mmu_gather;
10extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
11extern void flush_tsb_user(struct mmu_gather *mp);
12
8/* TLB flush operations. */ 13/* TLB flush operations. */
9 14
10extern void flush_tlb_pending(void); 15extern void flush_tlb_pending(void);
@@ -14,28 +19,36 @@ extern void flush_tlb_pending(void);
14#define flush_tlb_page(vma,addr) flush_tlb_pending() 19#define flush_tlb_page(vma,addr) flush_tlb_pending()
15#define flush_tlb_mm(mm) flush_tlb_pending() 20#define flush_tlb_mm(mm) flush_tlb_pending()
16 21
22/* Local cpu only. */
17extern void __flush_tlb_all(void); 23extern void __flush_tlb_all(void);
24
18extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); 25extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
19 26
20extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); 27extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
21 28
22#ifndef CONFIG_SMP 29#ifndef CONFIG_SMP
23 30
24#define flush_tlb_all() __flush_tlb_all()
25#define flush_tlb_kernel_range(start,end) \ 31#define flush_tlb_kernel_range(start,end) \
26 __flush_tlb_kernel_range(start,end) 32do { flush_tsb_kernel_range(start,end); \
33 __flush_tlb_kernel_range(start,end); \
34} while (0)
27 35
28#else /* CONFIG_SMP */ 36#else /* CONFIG_SMP */
29 37
30extern void smp_flush_tlb_all(void);
31extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 38extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
32 39
33#define flush_tlb_all() smp_flush_tlb_all()
34#define flush_tlb_kernel_range(start, end) \ 40#define flush_tlb_kernel_range(start, end) \
35 smp_flush_tlb_kernel_range(start, end) 41do { flush_tsb_kernel_range(start,end); \
42 smp_flush_tlb_kernel_range(start, end); \
43} while (0)
36 44
37#endif /* ! CONFIG_SMP */ 45#endif /* ! CONFIG_SMP */
38 46
39extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long); 47static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
48{
49 /* We don't use virtual page tables for TLB miss processing
50 * any more. Nowadays we use the TSB.
51 */
52}
40 53
41#endif /* _SPARC64_TLBFLUSH_H */ 54#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
new file mode 100644
index 000000000000..e82612cd9f33
--- /dev/null
+++ b/include/asm-sparc64/tsb.h
@@ -0,0 +1,281 @@
1#ifndef _SPARC64_TSB_H
2#define _SPARC64_TSB_H
3
4/* The sparc64 TSB is similar to the powerpc hashtables. It's a
5 * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes
6 * pointers into this table for 8K and 64K page sizes, and also a
7 * comparison TAG based upon the virtual address and context which
8 * faults.
9 *
10 * TLB miss trap handler software does the actual lookup via something
11 * of the form:
12 *
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * sllx %g6, 22, %g6
16 * srlx %g6, 22, %g6
17 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
18 * cmp %g4, %g6
19 * bne,pn %xcc, tsb_miss_{d,i}tlb
20 * mov FAULT_CODE_{D,I}TLB, %g3
21 * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
22 * retry
23 *
24 *
25 * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
26 * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
27 * register which is:
28 *
29 * -------------------------------------------------
30 * | - | CONTEXT | - | VADDR bits 63:22 |
31 * -------------------------------------------------
32 * 63 61 60 48 47 42 41 0
33 *
34 * But actually, since we use per-mm TSB's, we zero out the CONTEXT
35 * field.
36 *
37 * Like the powerpc hashtables we need to use locking in order to
38 * synchronize while we update the entries. PTE updates need locking
39 * as well.
40 *
41 * We need to carefully choose a lock bits for the TSB entry. We
42 * choose to use bit 47 in the tag. Also, since we never map anything
43 * at page zero in context zero, we use zero as an invalid tag entry.
44 * When the lock bit is set, this forces a tag comparison failure.
45 */
46
47#define TSB_TAG_LOCK_BIT 47
48#define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
49
50#define TSB_TAG_INVALID_BIT 46
51#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32))
52
53#define TSB_MEMBAR membar #StoreStore
54
55/* Some cpus support physical address quad loads. We want to use
56 * those if possible so we don't need to hard-lock the TSB mapping
57 * into the TLB. We encode some instruction patching in order to
58 * support this.
59 *
60 * The kernel TSB is locked into the TLB by virtue of being in the
61 * kernel image, so we don't play these games for swapper_tsb access.
62 */
63#ifndef __ASSEMBLY__
64struct tsb_ldquad_phys_patch_entry {
65 unsigned int addr;
66 unsigned int sun4u_insn;
67 unsigned int sun4v_insn;
68};
69extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
70 __tsb_ldquad_phys_patch_end;
71
72struct tsb_phys_patch_entry {
73 unsigned int addr;
74 unsigned int insn;
75};
76extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
77#endif
78#define TSB_LOAD_QUAD(TSB, REG) \
79661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
80 .section .tsb_ldquad_phys_patch, "ax"; \
81 .word 661b; \
82 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
83 ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
84 .previous
85
86#define TSB_LOAD_TAG_HIGH(TSB, REG) \
87661: lduwa [TSB] ASI_N, REG; \
88 .section .tsb_phys_patch, "ax"; \
89 .word 661b; \
90 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
91 .previous
92
93#define TSB_LOAD_TAG(TSB, REG) \
94661: ldxa [TSB] ASI_N, REG; \
95 .section .tsb_phys_patch, "ax"; \
96 .word 661b; \
97 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
98 .previous
99
100#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
101661: casa [TSB] ASI_N, REG1, REG2; \
102 .section .tsb_phys_patch, "ax"; \
103 .word 661b; \
104 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
105 .previous
106
107#define TSB_CAS_TAG(TSB, REG1, REG2) \
108661: casxa [TSB] ASI_N, REG1, REG2; \
109 .section .tsb_phys_patch, "ax"; \
110 .word 661b; \
111 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
112 .previous
113
114#define TSB_STORE(ADDR, VAL) \
115661: stxa VAL, [ADDR] ASI_N; \
116 .section .tsb_phys_patch, "ax"; \
117 .word 661b; \
118 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
119 .previous
120
121#define TSB_LOCK_TAG(TSB, REG1, REG2) \
12299: TSB_LOAD_TAG_HIGH(TSB, REG1); \
123 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
124 andcc REG1, REG2, %g0; \
125 bne,pn %icc, 99b; \
126 nop; \
127 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
128 cmp REG1, REG2; \
129 bne,pn %icc, 99b; \
130 nop; \
131 TSB_MEMBAR
132
133#define TSB_WRITE(TSB, TTE, TAG) \
134 add TSB, 0x8, TSB; \
135 TSB_STORE(TSB, TTE); \
136 sub TSB, 0x8, TSB; \
137 TSB_MEMBAR; \
138 TSB_STORE(TSB, TAG);
139
140#define KTSB_LOAD_QUAD(TSB, REG) \
141 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
142
143#define KTSB_STORE(ADDR, VAL) \
144 stxa VAL, [ADDR] ASI_N;
145
146#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
14799: lduwa [TSB] ASI_N, REG1; \
148 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
149 andcc REG1, REG2, %g0; \
150 bne,pn %icc, 99b; \
151 nop; \
152 casa [TSB] ASI_N, REG1, REG2;\
153 cmp REG1, REG2; \
154 bne,pn %icc, 99b; \
155 nop; \
156 TSB_MEMBAR
157
158#define KTSB_WRITE(TSB, TTE, TAG) \
159 add TSB, 0x8, TSB; \
160 stxa TTE, [TSB] ASI_N; \
161 sub TSB, 0x8, TSB; \
162 TSB_MEMBAR; \
163 stxa TAG, [TSB] ASI_N;
164
165 /* Do a kernel page table walk. Leaves physical PTE pointer in
166 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
167 * VADDR will not be clobbered, but REG2 will.
168 */
169#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
170 sethi %hi(swapper_pg_dir), REG1; \
171 or REG1, %lo(swapper_pg_dir), REG1; \
172 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
173 srlx REG2, 64 - PAGE_SHIFT, REG2; \
174 andn REG2, 0x3, REG2; \
175 lduw [REG1 + REG2], REG1; \
176 brz,pn REG1, FAIL_LABEL; \
177 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
178 srlx REG2, 64 - PAGE_SHIFT, REG2; \
179 sllx REG1, 11, REG1; \
180 andn REG2, 0x3, REG2; \
181 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
182 brz,pn REG1, FAIL_LABEL; \
183 sllx VADDR, 64 - PMD_SHIFT, REG2; \
184 srlx REG2, 64 - PAGE_SHIFT, REG2; \
185 sllx REG1, 11, REG1; \
186 andn REG2, 0x7, REG2; \
187 add REG1, REG2, REG1;
188
189 /* Do a user page table walk in MMU globals. Leaves physical PTE
190 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk
191 * termination. Physical base of page tables is in PHYS_PGD which
192 * will not be modified.
193 *
194 * VADDR will not be clobbered, but REG1 and REG2 will.
195 */
196#define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
197 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
198 srlx REG2, 64 - PAGE_SHIFT, REG2; \
199 andn REG2, 0x3, REG2; \
200 lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
201 brz,pn REG1, FAIL_LABEL; \
202 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
203 srlx REG2, 64 - PAGE_SHIFT, REG2; \
204 sllx REG1, 11, REG1; \
205 andn REG2, 0x3, REG2; \
206 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
207 brz,pn REG1, FAIL_LABEL; \
208 sllx VADDR, 64 - PMD_SHIFT, REG2; \
209 srlx REG2, 64 - PAGE_SHIFT, REG2; \
210 sllx REG1, 11, REG1; \
211 andn REG2, 0x7, REG2; \
212 add REG1, REG2, REG1;
213
214/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
215 * If no entry is found, FAIL_LABEL will be branched to. On success
216 * the resulting PTE value will be left in REG1. VADDR is preserved
217 * by this routine.
218 */
219#define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
220 sethi %hi(prom_trans), REG1; \
221 or REG1, %lo(prom_trans), REG1; \
22297: ldx [REG1 + 0x00], REG2; \
223 brz,pn REG2, FAIL_LABEL; \
224 nop; \
225 ldx [REG1 + 0x08], REG3; \
226 add REG2, REG3, REG3; \
227 cmp REG2, VADDR; \
228 bgu,pt %xcc, 98f; \
229 cmp VADDR, REG3; \
230 bgeu,pt %xcc, 98f; \
231 ldx [REG1 + 0x10], REG3; \
232 sub VADDR, REG2, REG2; \
233 ba,pt %xcc, 99f; \
234 add REG3, REG2, REG1; \
23598: ba,pt %xcc, 97b; \
236 add REG1, (3 * 8), REG1; \
23799:
238
239 /* We use a 32K TSB for the whole kernel, this allows to
240 * handle about 16MB of modules and vmalloc mappings without
241 * incurring many hash conflicts.
242 */
243#define KERNEL_TSB_SIZE_BYTES (32 * 1024)
244#define KERNEL_TSB_NENTRIES \
245 (KERNEL_TSB_SIZE_BYTES / 16)
246#define KERNEL_TSB4M_NENTRIES 4096
247
248 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
249 * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
250 * and the found TTE will be left in REG1. REG3 and REG4 must
251 * be an even/odd pair of registers.
252 *
253 * VADDR and TAG will be preserved and not clobbered by this macro.
254 */
255#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
256 sethi %hi(swapper_tsb), REG1; \
257 or REG1, %lo(swapper_tsb), REG1; \
258 srlx VADDR, PAGE_SHIFT, REG2; \
259 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
260 sllx REG2, 4, REG2; \
261 add REG1, REG2, REG2; \
262 KTSB_LOAD_QUAD(REG2, REG3); \
263 cmp REG3, TAG; \
264 be,a,pt %xcc, OK_LABEL; \
265 mov REG4, REG1;
266
267 /* This version uses a trick, the TAG is already (VADDR >> 22) so
268 * we can make use of that for the index computation.
269 */
270#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
271 sethi %hi(swapper_4m_tsb), REG1; \
272 or REG1, %lo(swapper_4m_tsb), REG1; \
273 and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \
274 sllx REG2, 4, REG2; \
275 add REG1, REG2, REG2; \
276 KTSB_LOAD_QUAD(REG2, REG3); \
277 cmp REG3, TAG; \
278 be,a,pt %xcc, OK_LABEL; \
279 mov REG4, REG1;
280
281#endif /* !(_SPARC64_TSB_H) */
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index 2784f80094c3..2d5e3c464df5 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -93,7 +93,7 @@
93 93
94#define SYSCALL_TRAP(routine, systbl) \ 94#define SYSCALL_TRAP(routine, systbl) \
95 sethi %hi(109f), %g7; \ 95 sethi %hi(109f), %g7; \
96 ba,pt %xcc, scetrap; \ 96 ba,pt %xcc, etrap; \
97109: or %g7, %lo(109b), %g7; \ 97109: or %g7, %lo(109b), %g7; \
98 sethi %hi(systbl), %l7; \ 98 sethi %hi(systbl), %l7; \
99 ba,pt %xcc, routine; \ 99 ba,pt %xcc, routine; \
@@ -109,14 +109,14 @@
109 nop;nop;nop; 109 nop;nop;nop;
110 110
111#define TRAP_UTRAP(handler,lvl) \ 111#define TRAP_UTRAP(handler,lvl) \
112 ldx [%g6 + TI_UTRAPS], %g1; \ 112 mov handler, %g3; \
113 sethi %hi(109f), %g7; \ 113 ba,pt %xcc, utrap_trap; \
114 brz,pn %g1, utrap; \ 114 mov lvl, %g4; \
115 or %g7, %lo(109f), %g7; \ 115 nop; \
116 ba,pt %xcc, utrap; \ 116 nop; \
117109: ldx [%g1 + handler*8], %g1; \ 117 nop; \
118 ba,pt %xcc, utrap_ill; \ 118 nop; \
119 mov lvl, %o1; 119 nop;
120 120
121#ifdef CONFIG_SUNOS_EMUL 121#ifdef CONFIG_SUNOS_EMUL
122#define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table) 122#define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table)
@@ -136,8 +136,6 @@
136#else 136#else
137#define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) 137#define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall)
138#endif 138#endif
139/* FIXME: Write these actually */
140#define NETBSD_SYSCALL_TRAP TRAP(netbsd_syscall)
141#define BREAKPOINT_TRAP TRAP(breakpoint_trap) 139#define BREAKPOINT_TRAP TRAP(breakpoint_trap)
142 140
143#define TRAP_IRQ(routine, level) \ 141#define TRAP_IRQ(routine, level) \
@@ -182,6 +180,26 @@
182#define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) 180#define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
183#endif 181#endif
184 182
183#define SUN4V_ITSB_MISS \
184 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
185 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \
186 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \
187 srlx %g4, 22, %g6; \
188 ba,pt %xcc, sun4v_itsb_miss; \
189 nop; \
190 nop; \
191 nop;
192
193#define SUN4V_DTSB_MISS \
194 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
195 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \
196 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \
197 srlx %g4, 22, %g6; \
198 ba,pt %xcc, sun4v_dtsb_miss; \
199 nop; \
200 nop; \
201 nop;
202
185/* Before touching these macros, you owe it to yourself to go and 203/* Before touching these macros, you owe it to yourself to go and
186 * see how arch/sparc64/kernel/winfixup.S works... -DaveM 204 * see how arch/sparc64/kernel/winfixup.S works... -DaveM
187 * 205 *
@@ -221,6 +239,31 @@
221 saved; retry; nop; nop; nop; nop; nop; nop; \ 239 saved; retry; nop; nop; nop; nop; nop; nop; \
222 nop; nop; nop; nop; nop; nop; nop; nop; 240 nop; nop; nop; nop; nop; nop; nop; nop;
223 241
242#define SPILL_0_NORMAL_ETRAP \
243etrap_kernel_spill: \
244 stx %l0, [%sp + STACK_BIAS + 0x00]; \
245 stx %l1, [%sp + STACK_BIAS + 0x08]; \
246 stx %l2, [%sp + STACK_BIAS + 0x10]; \
247 stx %l3, [%sp + STACK_BIAS + 0x18]; \
248 stx %l4, [%sp + STACK_BIAS + 0x20]; \
249 stx %l5, [%sp + STACK_BIAS + 0x28]; \
250 stx %l6, [%sp + STACK_BIAS + 0x30]; \
251 stx %l7, [%sp + STACK_BIAS + 0x38]; \
252 stx %i0, [%sp + STACK_BIAS + 0x40]; \
253 stx %i1, [%sp + STACK_BIAS + 0x48]; \
254 stx %i2, [%sp + STACK_BIAS + 0x50]; \
255 stx %i3, [%sp + STACK_BIAS + 0x58]; \
256 stx %i4, [%sp + STACK_BIAS + 0x60]; \
257 stx %i5, [%sp + STACK_BIAS + 0x68]; \
258 stx %i6, [%sp + STACK_BIAS + 0x70]; \
259 stx %i7, [%sp + STACK_BIAS + 0x78]; \
260 saved; \
261 sub %g1, 2, %g1; \
262 ba,pt %xcc, etrap_save; \
263 wrpr %g1, %cwp; \
264 nop; nop; nop; nop; nop; nop; nop; nop; \
265 nop; nop; nop; nop;
266
224/* Normal 64bit spill */ 267/* Normal 64bit spill */
225#define SPILL_1_GENERIC(ASI) \ 268#define SPILL_1_GENERIC(ASI) \
226 add %sp, STACK_BIAS + 0x00, %g1; \ 269 add %sp, STACK_BIAS + 0x00, %g1; \
@@ -254,6 +297,67 @@
254 b,a,pt %xcc, spill_fixup_mna; \ 297 b,a,pt %xcc, spill_fixup_mna; \
255 b,a,pt %xcc, spill_fixup; 298 b,a,pt %xcc, spill_fixup;
256 299
300#define SPILL_1_GENERIC_ETRAP \
301etrap_user_spill_64bit: \
302 stxa %l0, [%sp + STACK_BIAS + 0x00] %asi; \
303 stxa %l1, [%sp + STACK_BIAS + 0x08] %asi; \
304 stxa %l2, [%sp + STACK_BIAS + 0x10] %asi; \
305 stxa %l3, [%sp + STACK_BIAS + 0x18] %asi; \
306 stxa %l4, [%sp + STACK_BIAS + 0x20] %asi; \
307 stxa %l5, [%sp + STACK_BIAS + 0x28] %asi; \
308 stxa %l6, [%sp + STACK_BIAS + 0x30] %asi; \
309 stxa %l7, [%sp + STACK_BIAS + 0x38] %asi; \
310 stxa %i0, [%sp + STACK_BIAS + 0x40] %asi; \
311 stxa %i1, [%sp + STACK_BIAS + 0x48] %asi; \
312 stxa %i2, [%sp + STACK_BIAS + 0x50] %asi; \
313 stxa %i3, [%sp + STACK_BIAS + 0x58] %asi; \
314 stxa %i4, [%sp + STACK_BIAS + 0x60] %asi; \
315 stxa %i5, [%sp + STACK_BIAS + 0x68] %asi; \
316 stxa %i6, [%sp + STACK_BIAS + 0x70] %asi; \
317 stxa %i7, [%sp + STACK_BIAS + 0x78] %asi; \
318 saved; \
319 sub %g1, 2, %g1; \
320 ba,pt %xcc, etrap_save; \
321 wrpr %g1, %cwp; \
322 nop; nop; nop; nop; nop; \
323 nop; nop; nop; nop; \
324 ba,a,pt %xcc, etrap_spill_fixup_64bit; \
325 ba,a,pt %xcc, etrap_spill_fixup_64bit; \
326 ba,a,pt %xcc, etrap_spill_fixup_64bit;
327
328#define SPILL_1_GENERIC_ETRAP_FIXUP \
329etrap_spill_fixup_64bit: \
330 ldub [%g6 + TI_WSAVED], %g1; \
331 sll %g1, 3, %g3; \
332 add %g6, %g3, %g3; \
333 stx %sp, [%g3 + TI_RWIN_SPTRS]; \
334 sll %g1, 7, %g3; \
335 add %g6, %g3, %g3; \
336 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]; \
337 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]; \
338 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]; \
339 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]; \
340 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]; \
341 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]; \
342 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]; \
343 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]; \
344 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]; \
345 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]; \
346 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]; \
347 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]; \
348 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]; \
349 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]; \
350 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]; \
351 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]; \
352 add %g1, 1, %g1; \
353 stb %g1, [%g6 + TI_WSAVED]; \
354 saved; \
355 rdpr %cwp, %g1; \
356 sub %g1, 2, %g1; \
357 ba,pt %xcc, etrap_save; \
358 wrpr %g1, %cwp; \
359 nop; nop; nop
360
257/* Normal 32bit spill */ 361/* Normal 32bit spill */
258#define SPILL_2_GENERIC(ASI) \ 362#define SPILL_2_GENERIC(ASI) \
259 srl %sp, 0, %sp; \ 363 srl %sp, 0, %sp; \
@@ -287,6 +391,68 @@
287 b,a,pt %xcc, spill_fixup_mna; \ 391 b,a,pt %xcc, spill_fixup_mna; \
288 b,a,pt %xcc, spill_fixup; 392 b,a,pt %xcc, spill_fixup;
289 393
394#define SPILL_2_GENERIC_ETRAP \
395etrap_user_spill_32bit: \
396 srl %sp, 0, %sp; \
397 stwa %l0, [%sp + 0x00] %asi; \
398 stwa %l1, [%sp + 0x04] %asi; \
399 stwa %l2, [%sp + 0x08] %asi; \
400 stwa %l3, [%sp + 0x0c] %asi; \
401 stwa %l4, [%sp + 0x10] %asi; \
402 stwa %l5, [%sp + 0x14] %asi; \
403 stwa %l6, [%sp + 0x18] %asi; \
404 stwa %l7, [%sp + 0x1c] %asi; \
405 stwa %i0, [%sp + 0x20] %asi; \
406 stwa %i1, [%sp + 0x24] %asi; \
407 stwa %i2, [%sp + 0x28] %asi; \
408 stwa %i3, [%sp + 0x2c] %asi; \
409 stwa %i4, [%sp + 0x30] %asi; \
410 stwa %i5, [%sp + 0x34] %asi; \
411 stwa %i6, [%sp + 0x38] %asi; \
412 stwa %i7, [%sp + 0x3c] %asi; \
413 saved; \
414 sub %g1, 2, %g1; \
415 ba,pt %xcc, etrap_save; \
416 wrpr %g1, %cwp; \
417 nop; nop; nop; nop; \
418 nop; nop; nop; nop; \
419 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
420 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
421 ba,a,pt %xcc, etrap_spill_fixup_32bit;
422
423#define SPILL_2_GENERIC_ETRAP_FIXUP \
424etrap_spill_fixup_32bit: \
425 ldub [%g6 + TI_WSAVED], %g1; \
426 sll %g1, 3, %g3; \
427 add %g6, %g3, %g3; \
428 stx %sp, [%g3 + TI_RWIN_SPTRS]; \
429 sll %g1, 7, %g3; \
430 add %g6, %g3, %g3; \
431 stw %l0, [%g3 + TI_REG_WINDOW + 0x00]; \
432 stw %l1, [%g3 + TI_REG_WINDOW + 0x04]; \
433 stw %l2, [%g3 + TI_REG_WINDOW + 0x08]; \
434 stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]; \
435 stw %l4, [%g3 + TI_REG_WINDOW + 0x10]; \
436 stw %l5, [%g3 + TI_REG_WINDOW + 0x14]; \
437 stw %l6, [%g3 + TI_REG_WINDOW + 0x18]; \
438 stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]; \
439 stw %i0, [%g3 + TI_REG_WINDOW + 0x20]; \
440 stw %i1, [%g3 + TI_REG_WINDOW + 0x24]; \
441 stw %i2, [%g3 + TI_REG_WINDOW + 0x28]; \
442 stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]; \
443 stw %i4, [%g3 + TI_REG_WINDOW + 0x30]; \
444 stw %i5, [%g3 + TI_REG_WINDOW + 0x34]; \
445 stw %i6, [%g3 + TI_REG_WINDOW + 0x38]; \
446 stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]; \
447 add %g1, 1, %g1; \
448 stb %g1, [%g6 + TI_WSAVED]; \
449 saved; \
450 rdpr %cwp, %g1; \
451 sub %g1, 2, %g1; \
452 ba,pt %xcc, etrap_save; \
453 wrpr %g1, %cwp; \
454 nop; nop; nop
455
290#define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) 456#define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP)
291#define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP) 457#define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP)
292#define SPILL_3_NORMAL SPILL_0_NORMAL 458#define SPILL_3_NORMAL SPILL_0_NORMAL
@@ -325,6 +491,35 @@
325 restored; retry; nop; nop; nop; nop; nop; nop; \ 491 restored; retry; nop; nop; nop; nop; nop; nop; \
326 nop; nop; nop; nop; nop; nop; nop; nop; 492 nop; nop; nop; nop; nop; nop; nop; nop;
327 493
494#define FILL_0_NORMAL_RTRAP \
495kern_rtt_fill: \
496 rdpr %cwp, %g1; \
497 sub %g1, 1, %g1; \
498 wrpr %g1, %cwp; \
499 ldx [%sp + STACK_BIAS + 0x00], %l0; \
500 ldx [%sp + STACK_BIAS + 0x08], %l1; \
501 ldx [%sp + STACK_BIAS + 0x10], %l2; \
502 ldx [%sp + STACK_BIAS + 0x18], %l3; \
503 ldx [%sp + STACK_BIAS + 0x20], %l4; \
504 ldx [%sp + STACK_BIAS + 0x28], %l5; \
505 ldx [%sp + STACK_BIAS + 0x30], %l6; \
506 ldx [%sp + STACK_BIAS + 0x38], %l7; \
507 ldx [%sp + STACK_BIAS + 0x40], %i0; \
508 ldx [%sp + STACK_BIAS + 0x48], %i1; \
509 ldx [%sp + STACK_BIAS + 0x50], %i2; \
510 ldx [%sp + STACK_BIAS + 0x58], %i3; \
511 ldx [%sp + STACK_BIAS + 0x60], %i4; \
512 ldx [%sp + STACK_BIAS + 0x68], %i5; \
513 ldx [%sp + STACK_BIAS + 0x70], %i6; \
514 ldx [%sp + STACK_BIAS + 0x78], %i7; \
515 restored; \
516 add %g1, 1, %g1; \
517 ba,pt %xcc, kern_rtt_restore; \
518 wrpr %g1, %cwp; \
519 nop; nop; nop; nop; nop; \
520 nop; nop; nop; nop;
521
522
328/* Normal 64bit fill */ 523/* Normal 64bit fill */
329#define FILL_1_GENERIC(ASI) \ 524#define FILL_1_GENERIC(ASI) \
330 add %sp, STACK_BIAS + 0x00, %g1; \ 525 add %sp, STACK_BIAS + 0x00, %g1; \
@@ -356,6 +551,33 @@
356 b,a,pt %xcc, fill_fixup_mna; \ 551 b,a,pt %xcc, fill_fixup_mna; \
357 b,a,pt %xcc, fill_fixup; 552 b,a,pt %xcc, fill_fixup;
358 553
554#define FILL_1_GENERIC_RTRAP \
555user_rtt_fill_64bit: \
556 ldxa [%sp + STACK_BIAS + 0x00] %asi, %l0; \
557 ldxa [%sp + STACK_BIAS + 0x08] %asi, %l1; \
558 ldxa [%sp + STACK_BIAS + 0x10] %asi, %l2; \
559 ldxa [%sp + STACK_BIAS + 0x18] %asi, %l3; \
560 ldxa [%sp + STACK_BIAS + 0x20] %asi, %l4; \
561 ldxa [%sp + STACK_BIAS + 0x28] %asi, %l5; \
562 ldxa [%sp + STACK_BIAS + 0x30] %asi, %l6; \
563 ldxa [%sp + STACK_BIAS + 0x38] %asi, %l7; \
564 ldxa [%sp + STACK_BIAS + 0x40] %asi, %i0; \
565 ldxa [%sp + STACK_BIAS + 0x48] %asi, %i1; \
566 ldxa [%sp + STACK_BIAS + 0x50] %asi, %i2; \
567 ldxa [%sp + STACK_BIAS + 0x58] %asi, %i3; \
568 ldxa [%sp + STACK_BIAS + 0x60] %asi, %i4; \
569 ldxa [%sp + STACK_BIAS + 0x68] %asi, %i5; \
570 ldxa [%sp + STACK_BIAS + 0x70] %asi, %i6; \
571 ldxa [%sp + STACK_BIAS + 0x78] %asi, %i7; \
572 ba,pt %xcc, user_rtt_pre_restore; \
573 restored; \
574 nop; nop; nop; nop; nop; nop; \
575 nop; nop; nop; nop; nop; \
576 ba,a,pt %xcc, user_rtt_fill_fixup; \
577 ba,a,pt %xcc, user_rtt_fill_fixup; \
578 ba,a,pt %xcc, user_rtt_fill_fixup;
579
580
359/* Normal 32bit fill */ 581/* Normal 32bit fill */
360#define FILL_2_GENERIC(ASI) \ 582#define FILL_2_GENERIC(ASI) \
361 srl %sp, 0, %sp; \ 583 srl %sp, 0, %sp; \
@@ -387,6 +609,34 @@
387 b,a,pt %xcc, fill_fixup_mna; \ 609 b,a,pt %xcc, fill_fixup_mna; \
388 b,a,pt %xcc, fill_fixup; 610 b,a,pt %xcc, fill_fixup;
389 611
612#define FILL_2_GENERIC_RTRAP \
613user_rtt_fill_32bit: \
614 srl %sp, 0, %sp; \
615 lduwa [%sp + 0x00] %asi, %l0; \
616 lduwa [%sp + 0x04] %asi, %l1; \
617 lduwa [%sp + 0x08] %asi, %l2; \
618 lduwa [%sp + 0x0c] %asi, %l3; \
619 lduwa [%sp + 0x10] %asi, %l4; \
620 lduwa [%sp + 0x14] %asi, %l5; \
621 lduwa [%sp + 0x18] %asi, %l6; \
622 lduwa [%sp + 0x1c] %asi, %l7; \
623 lduwa [%sp + 0x20] %asi, %i0; \
624 lduwa [%sp + 0x24] %asi, %i1; \
625 lduwa [%sp + 0x28] %asi, %i2; \
626 lduwa [%sp + 0x2c] %asi, %i3; \
627 lduwa [%sp + 0x30] %asi, %i4; \
628 lduwa [%sp + 0x34] %asi, %i5; \
629 lduwa [%sp + 0x38] %asi, %i6; \
630 lduwa [%sp + 0x3c] %asi, %i7; \
631 ba,pt %xcc, user_rtt_pre_restore; \
632 restored; \
633 nop; nop; nop; nop; nop; \
634 nop; nop; nop; nop; nop; \
635 ba,a,pt %xcc, user_rtt_fill_fixup; \
636 ba,a,pt %xcc, user_rtt_fill_fixup; \
637 ba,a,pt %xcc, user_rtt_fill_fixup;
638
639
390#define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) 640#define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP)
391#define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP) 641#define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP)
392#define FILL_3_NORMAL FILL_0_NORMAL 642#define FILL_3_NORMAL FILL_0_NORMAL
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index c91d1e38eac6..afe236ba555b 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -114,16 +114,6 @@ case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
114default: __pu_ret = __put_user_bad(); break; \ 114default: __pu_ret = __put_user_bad(); break; \
115} __pu_ret; }) 115} __pu_ret; })
116 116
117#define __put_user_nocheck_ret(data,addr,size,retval) ({ \
118register int __foo __asm__ ("l1"); \
119switch (size) { \
120case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \
121case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \
122case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \
123case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \
124default: if (__put_user_bad()) return retval; break; \
125} })
126
127#define __put_user_asm(x,size,addr,ret) \ 117#define __put_user_asm(x,size,addr,ret) \
128__asm__ __volatile__( \ 118__asm__ __volatile__( \
129 "/* Put user asm, inline. */\n" \ 119 "/* Put user asm, inline. */\n" \
@@ -143,33 +133,6 @@ __asm__ __volatile__( \
143 : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 133 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
144 "i" (-EFAULT)) 134 "i" (-EFAULT))
145 135
146#define __put_user_asm_ret(x,size,addr,ret,foo) \
147if (__builtin_constant_p(ret) && ret == -EFAULT) \
148__asm__ __volatile__( \
149 "/* Put user asm ret, inline. */\n" \
150"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
151 ".section __ex_table,\"a\"\n\t" \
152 ".align 4\n\t" \
153 ".word 1b, __ret_efault\n\n\t" \
154 ".previous\n\n\t" \
155 : "=r" (foo) : "r" (x), "r" (__m(addr))); \
156else \
157__asm__ __volatile__( \
158 "/* Put user asm ret, inline. */\n" \
159"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
160 ".section .fixup,#alloc,#execinstr\n\t" \
161 ".align 4\n" \
162"3:\n\t" \
163 "ret\n\t" \
164 " restore %%g0, %3, %%o0\n\n\t" \
165 ".previous\n\t" \
166 ".section __ex_table,\"a\"\n\t" \
167 ".align 4\n\t" \
168 ".word 1b, 3b\n\n\t" \
169 ".previous\n\n\t" \
170 : "=r" (foo) : "r" (x), "r" (__m(addr)), \
171 "i" (ret))
172
173extern int __put_user_bad(void); 136extern int __put_user_bad(void);
174 137
175#define __get_user_nocheck(data,addr,size,type) ({ \ 138#define __get_user_nocheck(data,addr,size,type) ({ \
@@ -289,14 +252,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
289} 252}
290#define __copy_in_user copy_in_user 253#define __copy_in_user copy_in_user
291 254
292extern unsigned long __must_check __bzero_noasi(void __user *, unsigned long); 255extern unsigned long __must_check __clear_user(void __user *, unsigned long);
293
294static inline unsigned long __must_check
295__clear_user(void __user *addr, unsigned long size)
296{
297
298 return __bzero_noasi(addr, size);
299}
300 256
301#define clear_user __clear_user 257#define clear_user __clear_user
302 258
diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h
new file mode 100644
index 000000000000..996e6be7b976
--- /dev/null
+++ b/include/asm-sparc64/vdev.h
@@ -0,0 +1,16 @@
1/* vdev.h: SUN4V virtual device interfaces and defines.
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#ifndef _SPARC64_VDEV_H
7#define _SPARC64_VDEV_H
8
9#include <linux/types.h>
10
11extern u32 sun4v_vdev_devhandle;
12extern int sun4v_vdev_root;
13
14extern unsigned int sun4v_vdev_device_interrupt(unsigned int);
15
16#endif /* !(_SPARC64_VDEV_H) */
diff --git a/include/asm-sparc64/xor.h b/include/asm-sparc64/xor.h
index 8b3a7e4b6062..8ce3f1813e28 100644
--- a/include/asm-sparc64/xor.h
+++ b/include/asm-sparc64/xor.h
@@ -2,9 +2,11 @@
2 * include/asm-sparc64/xor.h 2 * include/asm-sparc64/xor.h
3 * 3 *
4 * High speed xor_block operation for RAID4/5 utilizing the 4 * High speed xor_block operation for RAID4/5 utilizing the
5 * UltraSparc Visual Instruction Set. 5 * UltraSparc Visual Instruction Set and Niagara block-init
6 * twin-load instructions.
6 * 7 *
7 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) 8 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
9 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
8 * 10 *
9 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
@@ -16,8 +18,7 @@
16 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 19 */
18 20
19#include <asm/pstate.h> 21#include <asm/spitfire.h>
20#include <asm/asi.h>
21 22
22extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 23extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
23extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 24extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
@@ -37,4 +38,29 @@ static struct xor_block_template xor_block_VIS = {
37 .do_5 = xor_vis_5, 38 .do_5 = xor_vis_5,
38}; 39};
39 40
40#define XOR_TRY_TEMPLATES xor_speed(&xor_block_VIS) 41extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
42extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
43 unsigned long *);
44extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
45 unsigned long *, unsigned long *);
46extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
47 unsigned long *, unsigned long *, unsigned long *);
48
49static struct xor_block_template xor_block_niagara = {
50 .name = "Niagara",
51 .do_2 = xor_niagara_2,
52 .do_3 = xor_niagara_3,
53 .do_4 = xor_niagara_4,
54 .do_5 = xor_niagara_5,
55};
56
57#undef XOR_TRY_TEMPLATES
58#define XOR_TRY_TEMPLATES \
59 do { \
60 xor_speed(&xor_block_VIS); \
61 xor_speed(&xor_block_niagara); \
62 } while (0)
63
64/* For VIS for everything except Niagara. */
65#define XOR_SELECT_TEMPLATE(FASTEST) \
66 (tlb_type == hypervisor ? &xor_block_niagara : &xor_block_VIS)
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index 6b8d73dc1ab0..9cf64b1b688b 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -54,6 +54,7 @@
54#define CNTL_LCDBPP4 (2 << 1) 54#define CNTL_LCDBPP4 (2 << 1)
55#define CNTL_LCDBPP8 (3 << 1) 55#define CNTL_LCDBPP8 (3 << 1)
56#define CNTL_LCDBPP16 (4 << 1) 56#define CNTL_LCDBPP16 (4 << 1)
57#define CNTL_LCDBPP16_565 (6 << 1)
57#define CNTL_LCDBPP24 (5 << 1) 58#define CNTL_LCDBPP24 (5 << 1)
58#define CNTL_LCDBW (1 << 4) 59#define CNTL_LCDBW (1 << 4)
59#define CNTL_LCDTFT (1 << 5) 60#define CNTL_LCDTFT (1 << 5)
@@ -209,7 +210,16 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
209 val |= CNTL_LCDBPP8; 210 val |= CNTL_LCDBPP8;
210 break; 211 break;
211 case 16: 212 case 16:
212 val |= CNTL_LCDBPP16; 213 /*
214 * PL110 cannot choose between 5551 and 565 modes in
215 * its control register
216 */
217 if ((fb->dev->periphid & 0x000fffff) == 0x00041110)
218 val |= CNTL_LCDBPP16;
219 else if (fb->fb.var.green.length == 5)
220 val |= CNTL_LCDBPP16;
221 else
222 val |= CNTL_LCDBPP16_565;
213 break; 223 break;
214 case 32: 224 case 32:
215 val |= CNTL_LCDBPP24; 225 val |= CNTL_LCDBPP24;
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h
index 7198f129e135..231ba090ae34 100644
--- a/include/linux/arcdevice.h
+++ b/include/linux/arcdevice.h
@@ -206,7 +206,6 @@ struct ArcProto {
206 206
207extern struct ArcProto *arc_proto_map[256], *arc_proto_default, 207extern struct ArcProto *arc_proto_map[256], *arc_proto_default,
208 *arc_bcast_proto, *arc_raw_proto; 208 *arc_bcast_proto, *arc_raw_proto;
209extern struct ArcProto arc_proto_null;
210 209
211 210
212/* 211/*
@@ -334,17 +333,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
334#define arcnet_dump_skb(dev,skb,desc) ; 333#define arcnet_dump_skb(dev,skb,desc) ;
335#endif 334#endif
336 335
337#if (ARCNET_DEBUG_MAX & D_RX) || (ARCNET_DEBUG_MAX & D_TX)
338void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc,
339 int take_arcnet_lock);
340#else
341#define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) ;
342#endif
343
344void arcnet_unregister_proto(struct ArcProto *proto); 336void arcnet_unregister_proto(struct ArcProto *proto);
345irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); 337irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs);
346struct net_device *alloc_arcdev(char *name); 338struct net_device *alloc_arcdev(char *name);
347void arcnet_rx(struct net_device *dev, int bufnum);
348 339
349#endif /* __KERNEL__ */ 340#endif /* __KERNEL__ */
350#endif /* _LINUX_ARCDEVICE_H */ 341#endif /* _LINUX_ARCDEVICE_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 94f77cce27fa..b02a16c435e7 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -267,6 +267,16 @@ struct ata_taskfile {
267 ((u64) (id)[(n) + 1] << 16) | \ 267 ((u64) (id)[(n) + 1] << 16) | \
268 ((u64) (id)[(n) + 0]) ) 268 ((u64) (id)[(n) + 0]) )
269 269
270static inline unsigned int ata_id_major_version(const u16 *id)
271{
272 unsigned int mver;
273
274 for (mver = 14; mver >= 1; mver--)
275 if (id[ATA_ID_MAJOR_VER] & (1 << mver))
276 break;
277 return mver;
278}
279
270static inline int ata_id_current_chs_valid(const u16 *id) 280static inline int ata_id_current_chs_valid(const u16 *id)
271{ 281{
272 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 282 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -302,4 +312,16 @@ static inline int ata_ok(u8 status)
302 == ATA_DRDY); 312 == ATA_DRDY);
303} 313}
304 314
315static inline int lba_28_ok(u64 block, u32 n_block)
316{
317 /* check the ending block number */
318 return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
319}
320
321static inline int lba_48_ok(u64 block, u32 n_block)
322{
323 /* check the ending block number */
324 return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
325}
326
305#endif /* __LINUX_ATA_H__ */ 327#endif /* __LINUX_ATA_H__ */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 860e7a485a5f..56bb6a4e15f3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -58,7 +58,7 @@ struct cfq_io_context {
58 * circular list of cfq_io_contexts belonging to a process io context 58 * circular list of cfq_io_contexts belonging to a process io context
59 */ 59 */
60 struct list_head list; 60 struct list_head list;
61 struct cfq_queue *cfqq; 61 struct cfq_queue *cfqq[2];
62 void *key; 62 void *key;
63 63
64 struct io_context *ioc; 64 struct io_context *ioc;
@@ -69,6 +69,8 @@ struct cfq_io_context {
69 unsigned long ttime_samples; 69 unsigned long ttime_samples;
70 unsigned long ttime_mean; 70 unsigned long ttime_mean;
71 71
72 struct list_head queue_list;
73
72 void (*dtor)(struct cfq_io_context *); 74 void (*dtor)(struct cfq_io_context *);
73 void (*exit)(struct cfq_io_context *); 75 void (*exit)(struct cfq_io_context *);
74}; 76};
@@ -404,8 +406,6 @@ struct request_queue
404 406
405 struct blk_queue_tag *queue_tags; 407 struct blk_queue_tag *queue_tags;
406 408
407 atomic_t refcnt;
408
409 unsigned int nr_sorted; 409 unsigned int nr_sorted;
410 unsigned int in_flight; 410 unsigned int in_flight;
411 411
@@ -424,6 +424,8 @@ struct request_queue
424 struct request pre_flush_rq, bar_rq, post_flush_rq; 424 struct request pre_flush_rq, bar_rq, post_flush_rq;
425 struct request *orig_bar_rq; 425 struct request *orig_bar_rq;
426 unsigned int bi_size; 426 unsigned int bi_size;
427
428 struct mutex sysfs_lock;
427}; 429};
428 430
429#define RQ_INACTIVE (-1) 431#define RQ_INACTIVE (-1)
@@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void);
725int blk_get_queue(request_queue_t *); 727int blk_get_queue(request_queue_t *);
726request_queue_t *blk_alloc_queue(gfp_t); 728request_queue_t *blk_alloc_queue(gfp_t);
727request_queue_t *blk_alloc_queue_node(gfp_t, int); 729request_queue_t *blk_alloc_queue_node(gfp_t, int);
728#define blk_put_queue(q) blk_cleanup_queue((q)) 730extern void blk_put_queue(request_queue_t *);
729 731
730/* 732/*
731 * tag stuff 733 * tag stuff
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 0ed1d4853c69..d612b89dce33 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -32,7 +32,7 @@ struct cpu {
32}; 32};
33 33
34extern int register_cpu(struct cpu *, int, struct node *); 34extern int register_cpu(struct cpu *, int, struct node *);
35extern struct sys_device *get_cpu_sysdev(int cpu); 35extern struct sys_device *get_cpu_sysdev(unsigned cpu);
36#ifdef CONFIG_HOTPLUG_CPU 36#ifdef CONFIG_HOTPLUG_CPU
37extern void unregister_cpu(struct cpu *, struct node *); 37extern void unregister_cpu(struct cpu *, struct node *);
38#endif 38#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index d88bf8aa8b47..0ab1bc1152ca 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -229,6 +229,8 @@ struct crypto_tfm {
229 } crt_u; 229 } crt_u;
230 230
231 struct crypto_alg *__crt_alg; 231 struct crypto_alg *__crt_alg;
232
233 char __crt_ctx[] __attribute__ ((__aligned__));
232}; 234};
233 235
234/* 236/*
@@ -301,7 +303,13 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
301 303
302static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 304static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
303{ 305{
304 return (void *)&tfm[1]; 306 return tfm->__crt_ctx;
307}
308
309static inline unsigned int crypto_tfm_ctx_alignment(void)
310{
311 struct crypto_tfm *tfm;
312 return __alignof__(tfm->__crt_ctx);
305} 313}
306 314
307/* 315/*
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 088529f54965..676333b9fad0 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -18,7 +18,7 @@
18 * @dccph_seq - sequence number high or low order 24 bits, depends on dccph_x 18 * @dccph_seq - sequence number high or low order 24 bits, depends on dccph_x
19 */ 19 */
20struct dccp_hdr { 20struct dccp_hdr {
21 __u16 dccph_sport, 21 __be16 dccph_sport,
22 dccph_dport; 22 dccph_dport;
23 __u8 dccph_doff; 23 __u8 dccph_doff;
24#if defined(__LITTLE_ENDIAN_BITFIELD) 24#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -32,18 +32,18 @@ struct dccp_hdr {
32#endif 32#endif
33 __u16 dccph_checksum; 33 __u16 dccph_checksum;
34#if defined(__LITTLE_ENDIAN_BITFIELD) 34#if defined(__LITTLE_ENDIAN_BITFIELD)
35 __u32 dccph_x:1, 35 __u8 dccph_x:1,
36 dccph_type:4, 36 dccph_type:4,
37 dccph_reserved:3, 37 dccph_reserved:3;
38 dccph_seq:24;
39#elif defined(__BIG_ENDIAN_BITFIELD) 38#elif defined(__BIG_ENDIAN_BITFIELD)
40 __u32 dccph_reserved:3, 39 __u8 dccph_reserved:3,
41 dccph_type:4, 40 dccph_type:4,
42 dccph_x:1, 41 dccph_x:1;
43 dccph_seq:24;
44#else 42#else
45#error "Adjust your <asm/byteorder.h> defines" 43#error "Adjust your <asm/byteorder.h> defines"
46#endif 44#endif
45 __u8 dccph_seq2;
46 __be16 dccph_seq;
47}; 47};
48 48
49/** 49/**
@@ -52,7 +52,7 @@ struct dccp_hdr {
52 * @dccph_seq_low - low 24 bits of a 48 bit seq packet 52 * @dccph_seq_low - low 24 bits of a 48 bit seq packet
53 */ 53 */
54struct dccp_hdr_ext { 54struct dccp_hdr_ext {
55 __u32 dccph_seq_low; 55 __be32 dccph_seq_low;
56}; 56};
57 57
58/** 58/**
@@ -62,7 +62,7 @@ struct dccp_hdr_ext {
62 * @dccph_req_options - list of options (must be a multiple of 32 bits 62 * @dccph_req_options - list of options (must be a multiple of 32 bits
63 */ 63 */
64struct dccp_hdr_request { 64struct dccp_hdr_request {
65 __u32 dccph_req_service; 65 __be32 dccph_req_service;
66}; 66};
67/** 67/**
68 * struct dccp_hdr_ack_bits - acknowledgment bits common to most packets 68 * struct dccp_hdr_ack_bits - acknowledgment bits common to most packets
@@ -71,9 +71,9 @@ struct dccp_hdr_request {
71 * @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR 71 * @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR
72 */ 72 */
73struct dccp_hdr_ack_bits { 73struct dccp_hdr_ack_bits {
74 __u32 dccph_reserved1:8, 74 __be16 dccph_reserved1;
75 dccph_ack_nr_high:24; 75 __be16 dccph_ack_nr_high;
76 __u32 dccph_ack_nr_low; 76 __be32 dccph_ack_nr_low;
77}; 77};
78/** 78/**
79 * struct dccp_hdr_response - Conection initiation response header 79 * struct dccp_hdr_response - Conection initiation response header
@@ -85,7 +85,7 @@ struct dccp_hdr_ack_bits {
85 */ 85 */
86struct dccp_hdr_response { 86struct dccp_hdr_response {
87 struct dccp_hdr_ack_bits dccph_resp_ack; 87 struct dccp_hdr_ack_bits dccph_resp_ack;
88 __u32 dccph_resp_service; 88 __be32 dccph_resp_service;
89}; 89};
90 90
91/** 91/**
@@ -154,6 +154,10 @@ enum {
154 DCCPO_MANDATORY = 1, 154 DCCPO_MANDATORY = 1,
155 DCCPO_MIN_RESERVED = 3, 155 DCCPO_MIN_RESERVED = 3,
156 DCCPO_MAX_RESERVED = 31, 156 DCCPO_MAX_RESERVED = 31,
157 DCCPO_CHANGE_L = 32,
158 DCCPO_CONFIRM_L = 33,
159 DCCPO_CHANGE_R = 34,
160 DCCPO_CONFIRM_R = 35,
157 DCCPO_NDP_COUNT = 37, 161 DCCPO_NDP_COUNT = 37,
158 DCCPO_ACK_VECTOR_0 = 38, 162 DCCPO_ACK_VECTOR_0 = 38,
159 DCCPO_ACK_VECTOR_1 = 39, 163 DCCPO_ACK_VECTOR_1 = 39,
@@ -168,7 +172,9 @@ enum {
168/* DCCP features */ 172/* DCCP features */
169enum { 173enum {
170 DCCPF_RESERVED = 0, 174 DCCPF_RESERVED = 0,
175 DCCPF_CCID = 1,
171 DCCPF_SEQUENCE_WINDOW = 3, 176 DCCPF_SEQUENCE_WINDOW = 3,
177 DCCPF_ACK_RATIO = 5,
172 DCCPF_SEND_ACK_VECTOR = 6, 178 DCCPF_SEND_ACK_VECTOR = 6,
173 DCCPF_SEND_NDP_COUNT = 7, 179 DCCPF_SEND_NDP_COUNT = 7,
174 /* 10-127 reserved */ 180 /* 10-127 reserved */
@@ -176,9 +182,18 @@ enum {
176 DCCPF_MAX_CCID_SPECIFIC = 255, 182 DCCPF_MAX_CCID_SPECIFIC = 255,
177}; 183};
178 184
185/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
186struct dccp_so_feat {
187 __u8 dccpsf_feat;
188 __u8 *dccpsf_val;
189 __u8 dccpsf_len;
190};
191
179/* DCCP socket options */ 192/* DCCP socket options */
180#define DCCP_SOCKOPT_PACKET_SIZE 1 193#define DCCP_SOCKOPT_PACKET_SIZE 1
181#define DCCP_SOCKOPT_SERVICE 2 194#define DCCP_SOCKOPT_SERVICE 2
195#define DCCP_SOCKOPT_CHANGE_L 3
196#define DCCP_SOCKOPT_CHANGE_R 4
182#define DCCP_SOCKOPT_CCID_RX_INFO 128 197#define DCCP_SOCKOPT_CCID_RX_INFO 128
183#define DCCP_SOCKOPT_CCID_TX_INFO 192 198#define DCCP_SOCKOPT_CCID_TX_INFO 192
184 199
@@ -254,16 +269,12 @@ static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
254static inline __u64 dccp_hdr_seq(const struct sk_buff *skb) 269static inline __u64 dccp_hdr_seq(const struct sk_buff *skb)
255{ 270{
256 const struct dccp_hdr *dh = dccp_hdr(skb); 271 const struct dccp_hdr *dh = dccp_hdr(skb);
257#if defined(__LITTLE_ENDIAN_BITFIELD) 272 __u64 seq_nr = ntohs(dh->dccph_seq);
258 __u64 seq_nr = ntohl(dh->dccph_seq << 8);
259#elif defined(__BIG_ENDIAN_BITFIELD)
260 __u64 seq_nr = ntohl(dh->dccph_seq);
261#else
262#error "Adjust your <asm/byteorder.h> defines"
263#endif
264 273
265 if (dh->dccph_x != 0) 274 if (dh->dccph_x != 0)
266 seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low); 275 seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low);
276 else
277 seq_nr += (u32)dh->dccph_seq2 << 16;
267 278
268 return seq_nr; 279 return seq_nr;
269} 280}
@@ -281,13 +292,7 @@ static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *
281static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb) 292static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
282{ 293{
283 const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb); 294 const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
284#if defined(__LITTLE_ENDIAN_BITFIELD) 295 return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low);
285 return (((u64)ntohl(dhack->dccph_ack_nr_high << 8)) << 32) + ntohl(dhack->dccph_ack_nr_low);
286#elif defined(__BIG_ENDIAN_BITFIELD)
287 return (((u64)ntohl(dhack->dccph_ack_nr_high)) << 32) + ntohl(dhack->dccph_ack_nr_low);
288#else
289#error "Adjust your <asm/byteorder.h> defines"
290#endif
291} 296}
292 297
293static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb) 298static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
@@ -314,38 +319,60 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
314 319
315/* initial values for each feature */ 320/* initial values for each feature */
316#define DCCPF_INITIAL_SEQUENCE_WINDOW 100 321#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
317/* FIXME: for now we're using CCID 3 (TFRC) */ 322#define DCCPF_INITIAL_ACK_RATIO 2
318#define DCCPF_INITIAL_CCID 3 323#define DCCPF_INITIAL_CCID 2
319#define DCCPF_INITIAL_SEND_ACK_VECTOR 0 324#define DCCPF_INITIAL_SEND_ACK_VECTOR 1
320/* FIXME: for now we're default to 1 but it should really be 0 */ 325/* FIXME: for now we're default to 1 but it should really be 0 */
321#define DCCPF_INITIAL_SEND_NDP_COUNT 1 326#define DCCPF_INITIAL_SEND_NDP_COUNT 1
322 327
323#define DCCP_NDP_LIMIT 0xFFFFFF 328#define DCCP_NDP_LIMIT 0xFFFFFF
324 329
325/** 330/**
326 * struct dccp_options - option values for a DCCP connection 331 * struct dccp_minisock - Minimal DCCP connection representation
327 * @dccpo_sequence_window - Sequence Window Feature (section 7.5.2) 332 *
328 * @dccpo_ccid - Congestion Control Id (CCID) (section 10) 333 * Will be used to pass the state from dccp_request_sock to dccp_sock.
329 * @dccpo_send_ack_vector - Send Ack Vector Feature (section 11.5) 334 *
330 * @dccpo_send_ndp_count - Send NDP Count Feature (7.7.2) 335 * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
336 * @dccpms_ccid - Congestion Control Id (CCID) (section 10)
337 * @dccpms_send_ack_vector - Send Ack Vector Feature (section 11.5)
338 * @dccpms_send_ndp_count - Send NDP Count Feature (7.7.2)
331 */ 339 */
332struct dccp_options { 340struct dccp_minisock {
333 __u64 dccpo_sequence_window; 341 __u64 dccpms_sequence_window;
334 __u8 dccpo_rx_ccid; 342 __u8 dccpms_rx_ccid;
335 __u8 dccpo_tx_ccid; 343 __u8 dccpms_tx_ccid;
336 __u8 dccpo_send_ack_vector; 344 __u8 dccpms_send_ack_vector;
337 __u8 dccpo_send_ndp_count; 345 __u8 dccpms_send_ndp_count;
346 __u8 dccpms_ack_ratio;
347 struct list_head dccpms_pending;
348 struct list_head dccpms_conf;
349};
350
351struct dccp_opt_conf {
352 __u8 *dccpoc_val;
353 __u8 dccpoc_len;
354};
355
356struct dccp_opt_pend {
357 struct list_head dccpop_node;
358 __u8 dccpop_type;
359 __u8 dccpop_feat;
360 __u8 *dccpop_val;
361 __u8 dccpop_len;
362 int dccpop_conf;
363 struct dccp_opt_conf *dccpop_sc;
338}; 364};
339 365
340extern void __dccp_options_init(struct dccp_options *dccpo); 366extern void __dccp_minisock_init(struct dccp_minisock *dmsk);
341extern void dccp_options_init(struct dccp_options *dccpo); 367extern void dccp_minisock_init(struct dccp_minisock *dmsk);
368
342extern int dccp_parse_options(struct sock *sk, struct sk_buff *skb); 369extern int dccp_parse_options(struct sock *sk, struct sk_buff *skb);
343 370
344struct dccp_request_sock { 371struct dccp_request_sock {
345 struct inet_request_sock dreq_inet_rsk; 372 struct inet_request_sock dreq_inet_rsk;
346 __u64 dreq_iss; 373 __u64 dreq_iss;
347 __u64 dreq_isr; 374 __u64 dreq_isr;
348 __u32 dreq_service; 375 __be32 dreq_service;
349}; 376};
350 377
351static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req) 378static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
@@ -373,13 +400,13 @@ enum dccp_role {
373 400
374struct dccp_service_list { 401struct dccp_service_list {
375 __u32 dccpsl_nr; 402 __u32 dccpsl_nr;
376 __u32 dccpsl_list[0]; 403 __be32 dccpsl_list[0];
377}; 404};
378 405
379#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) 406#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
380 407
381static inline int dccp_list_has_service(const struct dccp_service_list *sl, 408static inline int dccp_list_has_service(const struct dccp_service_list *sl,
382 const u32 service) 409 const __be32 service)
383{ 410{
384 if (likely(sl != NULL)) { 411 if (likely(sl != NULL)) {
385 u32 i = sl->dccpsl_nr; 412 u32 i = sl->dccpsl_nr;
@@ -425,17 +452,17 @@ struct dccp_sock {
425 __u64 dccps_gss; 452 __u64 dccps_gss;
426 __u64 dccps_gsr; 453 __u64 dccps_gsr;
427 __u64 dccps_gar; 454 __u64 dccps_gar;
428 __u32 dccps_service; 455 __be32 dccps_service;
429 struct dccp_service_list *dccps_service_list; 456 struct dccp_service_list *dccps_service_list;
430 struct timeval dccps_timestamp_time; 457 struct timeval dccps_timestamp_time;
431 __u32 dccps_timestamp_echo; 458 __u32 dccps_timestamp_echo;
432 __u32 dccps_packet_size; 459 __u32 dccps_packet_size;
460 __u16 dccps_l_ack_ratio;
461 __u16 dccps_r_ack_ratio;
433 unsigned long dccps_ndp_count; 462 unsigned long dccps_ndp_count;
434 __u32 dccps_mss_cache; 463 __u32 dccps_mss_cache;
435 struct dccp_options dccps_options; 464 struct dccp_minisock dccps_minisock;
436 struct dccp_ackvec *dccps_hc_rx_ackvec; 465 struct dccp_ackvec *dccps_hc_rx_ackvec;
437 void *dccps_hc_rx_ccid_private;
438 void *dccps_hc_tx_ccid_private;
439 struct ccid *dccps_hc_rx_ccid; 466 struct ccid *dccps_hc_rx_ccid;
440 struct ccid *dccps_hc_tx_ccid; 467 struct ccid *dccps_hc_tx_ccid;
441 struct dccp_options_received dccps_options_received; 468 struct dccp_options_received dccps_options_received;
@@ -450,6 +477,11 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk)
450 return (struct dccp_sock *)sk; 477 return (struct dccp_sock *)sk;
451} 478}
452 479
480static inline struct dccp_minisock *dccp_msk(const struct sock *sk)
481{
482 return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock;
483}
484
453static inline int dccp_service_not_initialized(const struct sock *sk) 485static inline int dccp_service_not_initialized(const struct sock *sk)
454{ 486{
455 return dccp_sk(sk)->dccps_service == DCCP_SERVICE_INVALID_VALUE; 487 return dccp_sk(sk)->dccps_service == DCCP_SERVICE_INVALID_VALUE;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index a5fa6a6eede8..4b0428e335be 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -21,6 +21,11 @@
21 21
22struct file_operations; 22struct file_operations;
23 23
24struct debugfs_blob_wrapper {
25 void *data;
26 unsigned long size;
27};
28
24#if defined(CONFIG_DEBUG_FS) 29#if defined(CONFIG_DEBUG_FS)
25struct dentry *debugfs_create_file(const char *name, mode_t mode, 30struct dentry *debugfs_create_file(const char *name, mode_t mode,
26 struct dentry *parent, void *data, 31 struct dentry *parent, void *data,
@@ -39,6 +44,9 @@ struct dentry *debugfs_create_u32(const char *name, mode_t mode,
39struct dentry *debugfs_create_bool(const char *name, mode_t mode, 44struct dentry *debugfs_create_bool(const char *name, mode_t mode,
40 struct dentry *parent, u32 *value); 45 struct dentry *parent, u32 *value);
41 46
47struct dentry *debugfs_create_blob(const char *name, mode_t mode,
48 struct dentry *parent,
49 struct debugfs_blob_wrapper *blob);
42#else 50#else
43 51
44#include <linux/err.h> 52#include <linux/err.h>
@@ -94,6 +102,13 @@ static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode,
94 return ERR_PTR(-ENODEV); 102 return ERR_PTR(-ENODEV);
95} 103}
96 104
105static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
106 struct dentry *parent,
107 struct debugfs_blob_wrapper *blob)
108{
109 return ERR_PTR(-ENODEV);
110}
111
97#endif 112#endif
98 113
99#endif 114#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index e8ac5bcfbec7..6d2345d86082 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -425,6 +425,8 @@ extern void firmware_unregister(struct subsystem *);
425 dev_printk(KERN_INFO , dev , format , ## arg) 425 dev_printk(KERN_INFO , dev , format , ## arg)
426#define dev_warn(dev, format, arg...) \ 426#define dev_warn(dev, format, arg...) \
427 dev_printk(KERN_WARNING , dev , format , ## arg) 427 dev_printk(KERN_WARNING , dev , format , ## arg)
428#define dev_notice(dev, format, arg...) \
429 dev_printk(KERN_NOTICE , dev , format , ## arg)
428 430
429/* Create alias, so I can be autoloaded. */ 431/* Create alias, so I can be autoloaded. */
430#define MODULE_ALIAS_CHARDEV(major,minor) \ 432#define MODULE_ALIAS_CHARDEV(major,minor) \
diff --git a/include/linux/dn.h b/include/linux/dn.h
index 782cae49e64c..10b6a6fd5837 100644
--- a/include/linux/dn.h
+++ b/include/linux/dn.h
@@ -71,17 +71,17 @@
71 71
72struct dn_naddr 72struct dn_naddr
73{ 73{
74 unsigned short a_len; 74 __le16 a_len;
75 unsigned char a_addr[DN_MAXADDL]; 75 __u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
76}; 76};
77 77
78struct sockaddr_dn 78struct sockaddr_dn
79{ 79{
80 unsigned short sdn_family; 80 __u16 sdn_family;
81 unsigned char sdn_flags; 81 __u8 sdn_flags;
82 unsigned char sdn_objnum; 82 __u8 sdn_objnum;
83 unsigned short sdn_objnamel; 83 __le16 sdn_objnamel;
84 unsigned char sdn_objname[DN_MAXOBJL]; 84 __u8 sdn_objname[DN_MAXOBJL];
85 struct dn_naddr sdn_add; 85 struct dn_naddr sdn_add;
86}; 86};
87#define sdn_nodeaddrl sdn_add.a_len /* Node address length */ 87#define sdn_nodeaddrl sdn_add.a_len /* Node address length */
@@ -93,38 +93,38 @@ struct sockaddr_dn
93 * DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure 93 * DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure
94 */ 94 */
95struct optdata_dn { 95struct optdata_dn {
96 unsigned short opt_status; /* Extended status return */ 96 __le16 opt_status; /* Extended status return */
97#define opt_sts opt_status 97#define opt_sts opt_status
98 unsigned short opt_optl; /* Length of user data */ 98 __le16 opt_optl; /* Length of user data */
99 unsigned char opt_data[16]; /* User data */ 99 __u8 opt_data[16]; /* User data */
100}; 100};
101 101
102struct accessdata_dn 102struct accessdata_dn
103{ 103{
104 unsigned char acc_accl; 104 __u8 acc_accl;
105 unsigned char acc_acc[DN_MAXACCL]; 105 __u8 acc_acc[DN_MAXACCL];
106 unsigned char acc_passl; 106 __u8 acc_passl;
107 unsigned char acc_pass[DN_MAXACCL]; 107 __u8 acc_pass[DN_MAXACCL];
108 unsigned char acc_userl; 108 __u8 acc_userl;
109 unsigned char acc_user[DN_MAXACCL]; 109 __u8 acc_user[DN_MAXACCL];
110}; 110};
111 111
112/* 112/*
113 * DECnet logical link information structure 113 * DECnet logical link information structure
114 */ 114 */
115struct linkinfo_dn { 115struct linkinfo_dn {
116 unsigned short idn_segsize; /* Segment size for link */ 116 __le16 idn_segsize; /* Segment size for link */
117 unsigned char idn_linkstate; /* Logical link state */ 117 __u8 idn_linkstate; /* Logical link state */
118}; 118};
119 119
120/* 120/*
121 * Ethernet address format (for DECnet) 121 * Ethernet address format (for DECnet)
122 */ 122 */
123union etheraddress { 123union etheraddress {
124 unsigned char dne_addr[6]; /* Full ethernet address */ 124 __u8 dne_addr[6]; /* Full ethernet address */
125 struct { 125 struct {
126 unsigned char dne_hiord[4]; /* DECnet HIORD prefix */ 126 __u8 dne_hiord[4]; /* DECnet HIORD prefix */
127 unsigned char dne_nodeaddr[2]; /* DECnet node address */ 127 __u8 dne_nodeaddr[2]; /* DECnet node address */
128 } dne_remote; 128 } dne_remote;
129}; 129};
130 130
@@ -133,7 +133,7 @@ union etheraddress {
133 * DECnet physical socket address format 133 * DECnet physical socket address format
134 */ 134 */
135struct dn_addr { 135struct dn_addr {
136 unsigned short dna_family; /* AF_DECnet */ 136 __le16 dna_family; /* AF_DECnet */
137 union etheraddress dna_netaddr; /* DECnet ethernet address */ 137 union etheraddress dna_netaddr; /* DECnet ethernet address */
138}; 138};
139 139
diff --git a/include/linux/dvb/audio.h b/include/linux/dvb/audio.h
index 2b8797084685..0874a67c6b92 100644
--- a/include/linux/dvb/audio.h
+++ b/include/linux/dvb/audio.h
@@ -121,4 +121,17 @@ typedef uint16_t audio_attributes_t;
121#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t) 121#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t)
122#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t) 122#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t)
123 123
124/**
125 * AUDIO_GET_PTS
126 *
127 * Read the 33 bit presentation time stamp as defined
128 * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
129 *
130 * The PTS should belong to the currently played
131 * frame if possible, but may also be a value close to it
132 * like the PTS of the last decoded frame or the last PTS
133 * extracted by the PES parser.
134 */
135#define AUDIO_GET_PTS _IOR('o', 19, __u64)
136
124#endif /* _DVBAUDIO_H_ */ 137#endif /* _DVBAUDIO_H_ */
diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h
index b81e58b2ebf8..faebfda397ff 100644
--- a/include/linux/dvb/video.h
+++ b/include/linux/dvb/video.h
@@ -200,4 +200,17 @@ typedef uint16_t video_attributes_t;
200#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t) 200#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t)
201#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int) 201#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int)
202 202
203/**
204 * VIDEO_GET_PTS
205 *
206 * Read the 33 bit presentation time stamp as defined
207 * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
208 *
209 * The PTS should belong to the currently played
210 * frame if possible, but may also be a value close to it
211 * like the PTS of the last decoded frame or the last PTS
212 * extracted by the PES parser.
213 */
214#define VIDEO_GET_PTS _IOR('o', 57, __u64)
215
203#endif /*_DVBVIDEO_H_*/ 216#endif /*_DVBVIDEO_H_*/
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 18cf1f3e1184..ad133fcfb239 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -48,10 +48,17 @@ struct elevator_ops
48 48
49 elevator_init_fn *elevator_init_fn; 49 elevator_init_fn *elevator_init_fn;
50 elevator_exit_fn *elevator_exit_fn; 50 elevator_exit_fn *elevator_exit_fn;
51 void (*trim)(struct io_context *);
51}; 52};
52 53
53#define ELV_NAME_MAX (16) 54#define ELV_NAME_MAX (16)
54 55
56struct elv_fs_entry {
57 struct attribute attr;
58 ssize_t (*show)(elevator_t *, char *);
59 ssize_t (*store)(elevator_t *, const char *, size_t);
60};
61
55/* 62/*
56 * identifies an elevator type, such as AS or deadline 63 * identifies an elevator type, such as AS or deadline
57 */ 64 */
@@ -60,7 +67,7 @@ struct elevator_type
60 struct list_head list; 67 struct list_head list;
61 struct elevator_ops ops; 68 struct elevator_ops ops;
62 struct elevator_type *elevator_type; 69 struct elevator_type *elevator_type;
63 struct kobj_type *elevator_ktype; 70 struct elv_fs_entry *elevator_attrs;
64 char elevator_name[ELV_NAME_MAX]; 71 char elevator_name[ELV_NAME_MAX];
65 struct module *elevator_owner; 72 struct module *elevator_owner;
66}; 73};
@@ -74,6 +81,7 @@ struct elevator_queue
74 void *elevator_data; 81 void *elevator_data;
75 struct kobject kobj; 82 struct kobject kobj;
76 struct elevator_type *elevator_type; 83 struct elevator_type *elevator_type;
84 struct mutex sysfs_lock;
77}; 85};
78 86
79/* 87/*
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index a9f1cfd096ff..a3a0e078f79d 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -83,5 +83,32 @@ struct fsl_i2c_platform_data {
83#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001 83#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
84#define FSL_I2C_DEV_CLOCK_5200 0x00000002 84#define FSL_I2C_DEV_CLOCK_5200 0x00000002
85 85
86
87enum fsl_usb2_operating_modes {
88 FSL_USB2_MPH_HOST,
89 FSL_USB2_DR_HOST,
90 FSL_USB2_DR_DEVICE,
91 FSL_USB2_DR_OTG,
92};
93
94enum fsl_usb2_phy_modes {
95 FSL_USB2_PHY_NONE,
96 FSL_USB2_PHY_ULPI,
97 FSL_USB2_PHY_UTMI,
98 FSL_USB2_PHY_UTMI_WIDE,
99 FSL_USB2_PHY_SERIAL,
100};
101
102struct fsl_usb2_platform_data {
103 /* board specific information */
104 enum fsl_usb2_operating_modes operating_mode;
105 enum fsl_usb2_phy_modes phy_mode;
106 unsigned int port_enables;
107};
108
109/* Flags in fsl_usb2_mph_platform_data */
110#define FSL_USB2_PORT0_ENABLED 0x00000001
111#define FSL_USB2_PORT1_ENABLED 0x00000002
112
86#endif /* _FSL_DEVICE_H_ */ 113#endif /* _FSL_DEVICE_H_ */
87#endif /* __KERNEL__ */ 114#endif /* __KERNEL__ */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 0cf6c8b12caf..c771a7db9871 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -40,14 +40,16 @@ struct icmp6hdr {
40 struct icmpv6_nd_ra { 40 struct icmpv6_nd_ra {
41 __u8 hop_limit; 41 __u8 hop_limit;
42#if defined(__LITTLE_ENDIAN_BITFIELD) 42#if defined(__LITTLE_ENDIAN_BITFIELD)
43 __u8 reserved:6, 43 __u8 reserved:4,
44 router_pref:2,
44 other:1, 45 other:1,
45 managed:1; 46 managed:1;
46 47
47#elif defined(__BIG_ENDIAN_BITFIELD) 48#elif defined(__BIG_ENDIAN_BITFIELD)
48 __u8 managed:1, 49 __u8 managed:1,
49 other:1, 50 other:1,
50 reserved:6; 51 router_pref:2,
52 reserved:4;
51#else 53#else
52#error "Please fix <asm/byteorder.h>" 54#error "Please fix <asm/byteorder.h>"
53#endif 55#endif
@@ -70,8 +72,13 @@ struct icmp6hdr {
70#define icmp6_addrconf_managed icmp6_dataun.u_nd_ra.managed 72#define icmp6_addrconf_managed icmp6_dataun.u_nd_ra.managed
71#define icmp6_addrconf_other icmp6_dataun.u_nd_ra.other 73#define icmp6_addrconf_other icmp6_dataun.u_nd_ra.other
72#define icmp6_rt_lifetime icmp6_dataun.u_nd_ra.rt_lifetime 74#define icmp6_rt_lifetime icmp6_dataun.u_nd_ra.rt_lifetime
75#define icmp6_router_pref icmp6_dataun.u_nd_ra.router_pref
73}; 76};
74 77
78#define ICMPV6_ROUTER_PREF_LOW 0x3
79#define ICMPV6_ROUTER_PREF_MEDIUM 0x0
80#define ICMPV6_ROUTER_PREF_HIGH 0x1
81#define ICMPV6_ROUTER_PREF_INVALID 0x2
75 82
76#define ICMPV6_DEST_UNREACH 1 83#define ICMPV6_DEST_UNREACH 1
77#define ICMPV6_PKT_TOOBIG 2 84#define ICMPV6_PKT_TOOBIG 2
diff --git a/include/linux/if.h b/include/linux/if.h
index ce627d9092ef..374e20ad8b0d 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -33,7 +33,7 @@
33#define IFF_LOOPBACK 0x8 /* is a loopback net */ 33#define IFF_LOOPBACK 0x8 /* is a loopback net */
34#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */ 34#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
35#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */ 35#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
36#define IFF_RUNNING 0x40 /* interface running and carrier ok */ 36#define IFF_RUNNING 0x40 /* interface RFC2863 OPER_UP */
37#define IFF_NOARP 0x80 /* no ARP protocol */ 37#define IFF_NOARP 0x80 /* no ARP protocol */
38#define IFF_PROMISC 0x100 /* receive all packets */ 38#define IFF_PROMISC 0x100 /* receive all packets */
39#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/ 39#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
@@ -43,15 +43,22 @@
43 43
44#define IFF_MULTICAST 0x1000 /* Supports multicast */ 44#define IFF_MULTICAST 0x1000 /* Supports multicast */
45 45
46#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_MASTER|IFF_SLAVE|IFF_RUNNING)
47
48#define IFF_PORTSEL 0x2000 /* can set media type */ 46#define IFF_PORTSEL 0x2000 /* can set media type */
49#define IFF_AUTOMEDIA 0x4000 /* auto media select active */ 47#define IFF_AUTOMEDIA 0x4000 /* auto media select active */
50#define IFF_DYNAMIC 0x8000 /* dialup device with changing addresses*/ 48#define IFF_DYNAMIC 0x8000 /* dialup device with changing addresses*/
51 49
50#define IFF_LOWER_UP 0x10000 /* driver signals L1 up */
51#define IFF_DORMANT 0x20000 /* driver signals dormant */
52
53#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|\
54 IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
55
52/* Private (from user) interface flags (netdevice->priv_flags). */ 56/* Private (from user) interface flags (netdevice->priv_flags). */
53#define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ 57#define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */
54#define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ 58#define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */
59#define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */
60#define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */
61#define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */
55 62
56#define IF_GET_IFACE 0x0001 /* for querying only */ 63#define IF_GET_IFACE 0x0001 /* for querying only */
57#define IF_GET_PROTO 0x0002 64#define IF_GET_PROTO 0x0002
@@ -80,6 +87,22 @@
80#define IF_PROTO_FR_ETH_PVC 0x200B 87#define IF_PROTO_FR_ETH_PVC 0x200B
81#define IF_PROTO_RAW 0x200C /* RAW Socket */ 88#define IF_PROTO_RAW 0x200C /* RAW Socket */
82 89
90/* RFC 2863 operational status */
91enum {
92 IF_OPER_UNKNOWN,
93 IF_OPER_NOTPRESENT,
94 IF_OPER_DOWN,
95 IF_OPER_LOWERLAYERDOWN,
96 IF_OPER_TESTING,
97 IF_OPER_DORMANT,
98 IF_OPER_UP,
99};
100
101/* link modes */
102enum {
103 IF_LINK_MODE_DEFAULT,
104 IF_LINK_MODE_DORMANT, /* limit upward transition to dormant */
105};
83 106
84/* 107/*
85 * Device mapping structure. I'd just gone off and designed a 108 * Device mapping structure. I'd just gone off and designed a
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 7a92c1ce1457..ab08f35cbc35 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -61,6 +61,7 @@
61#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ 61#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
62#define ETH_P_IPX 0x8137 /* IPX over DIX */ 62#define ETH_P_IPX 0x8137 /* IPX over DIX */
63#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ 63#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
64#define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */
64#define ETH_P_WCCP 0x883E /* Web-cache coordination protocol 65#define ETH_P_WCCP 0x883E /* Web-cache coordination protocol
65 * defined in draft-wilson-wrec-wccp-v2-00.txt */ 66 * defined in draft-wilson-wrec-wccp-v2-00.txt */
66#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ 67#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
diff --git a/include/linux/in.h b/include/linux/in.h
index ba355384016a..94f557fa4636 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -72,6 +72,7 @@ struct in_addr {
72#define IP_FREEBIND 15 72#define IP_FREEBIND 15
73#define IP_IPSEC_POLICY 16 73#define IP_IPSEC_POLICY 16
74#define IP_XFRM_POLICY 17 74#define IP_XFRM_POLICY 17
75#define IP_PASSSEC 18
75 76
76/* BSD compatibility */ 77/* BSD compatibility */
77#define IP_RECVRETOPTS IP_RETOPTS 78#define IP_RECVRETOPTS IP_RETOPTS
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index fd7af86151b1..92297ff24e85 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -25,6 +25,7 @@ struct ipv4_devconf
25 int arp_filter; 25 int arp_filter;
26 int arp_announce; 26 int arp_announce;
27 int arp_ignore; 27 int arp_ignore;
28 int arp_accept;
28 int medium_id; 29 int medium_id;
29 int no_xfrm; 30 int no_xfrm;
30 int no_policy; 31 int no_policy;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 9c8f4c9ed429..1263d8cb3c18 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -145,6 +145,15 @@ struct ipv6_devconf {
145 __s32 max_desync_factor; 145 __s32 max_desync_factor;
146#endif 146#endif
147 __s32 max_addresses; 147 __s32 max_addresses;
148 __s32 accept_ra_defrtr;
149 __s32 accept_ra_pinfo;
150#ifdef CONFIG_IPV6_ROUTER_PREF
151 __s32 accept_ra_rtr_pref;
152 __s32 rtr_probe_interval;
153#ifdef CONFIG_IPV6_ROUTE_INFO
154 __s32 accept_ra_rt_info_max_plen;
155#endif
156#endif
148 void *sysctl; 157 void *sysctl;
149}; 158};
150 159
@@ -167,6 +176,11 @@ enum {
167 DEVCONF_MAX_DESYNC_FACTOR, 176 DEVCONF_MAX_DESYNC_FACTOR,
168 DEVCONF_MAX_ADDRESSES, 177 DEVCONF_MAX_ADDRESSES,
169 DEVCONF_FORCE_MLD_VERSION, 178 DEVCONF_FORCE_MLD_VERSION,
179 DEVCONF_ACCEPT_RA_DEFRTR,
180 DEVCONF_ACCEPT_RA_PINFO,
181 DEVCONF_ACCEPT_RA_RTR_PREF,
182 DEVCONF_RTR_PROBE_INTERVAL,
183 DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN,
170 DEVCONF_MAX 184 DEVCONF_MAX
171}; 185};
172 186
diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h
index d7c41d1d706a..b323ff577967 100644
--- a/include/linux/ipv6_route.h
+++ b/include/linux/ipv6_route.h
@@ -23,12 +23,22 @@
23#define RTF_NONEXTHOP 0x00200000 /* route with no nexthop */ 23#define RTF_NONEXTHOP 0x00200000 /* route with no nexthop */
24#define RTF_EXPIRES 0x00400000 24#define RTF_EXPIRES 0x00400000
25 25
26#define RTF_ROUTEINFO 0x00800000 /* route information - RA */
27
26#define RTF_CACHE 0x01000000 /* cache entry */ 28#define RTF_CACHE 0x01000000 /* cache entry */
27#define RTF_FLOW 0x02000000 /* flow significant route */ 29#define RTF_FLOW 0x02000000 /* flow significant route */
28#define RTF_POLICY 0x04000000 /* policy route */ 30#define RTF_POLICY 0x04000000 /* policy route */
29 31
32#define RTF_PREF(pref) ((pref) << 27)
33#define RTF_PREF_MASK 0x18000000
34
30#define RTF_LOCAL 0x80000000 35#define RTF_LOCAL 0x80000000
31 36
37#ifdef __KERNEL__
38#define IPV6_EXTRACT_PREF(flag) (((flag) & RTF_PREF_MASK) >> 27)
39#define IPV6_DECODE_PREF(pref) ((pref) ^ 2) /* 1:low,2:med,3:high */
40#endif
41
32struct in6_rtmsg { 42struct in6_rtmsg {
33 struct in6_addr rtmsg_dst; 43 struct in6_addr rtmsg_dst;
34 struct in6_addr rtmsg_src; 44 struct in6_addr rtmsg_src;
diff --git a/include/linux/irda.h b/include/linux/irda.h
index 95dee174cdc5..09d8f105a5a8 100644
--- a/include/linux/irda.h
+++ b/include/linux/irda.h
@@ -76,6 +76,7 @@ typedef enum {
76 IRDA_MCP2120_DONGLE = 9, 76 IRDA_MCP2120_DONGLE = 9,
77 IRDA_ACT200L_DONGLE = 10, 77 IRDA_ACT200L_DONGLE = 10,
78 IRDA_MA600_DONGLE = 11, 78 IRDA_MA600_DONGLE = 11,
79 IRDA_TOIM3232_DONGLE = 12,
79} IRDA_DONGLE; 80} IRDA_DONGLE;
80 81
81/* Protocol types to be used for SOCK_DGRAM */ 82/* Protocol types to be used for SOCK_DGRAM */
diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h
index cbe7d8008042..bafe178a381f 100644
--- a/include/linux/kobj_map.h
+++ b/include/linux/kobj_map.h
@@ -1,6 +1,6 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2 2
3#include <asm/semaphore.h> 3#include <linux/mutex.h>
4 4
5typedef struct kobject *kobj_probe_t(dev_t, int *, void *); 5typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
6struct kobj_map; 6struct kobj_map;
@@ -9,6 +9,6 @@ int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *,
9 kobj_probe_t *, int (*)(dev_t, void *), void *); 9 kobj_probe_t *, int (*)(dev_t, void *), void *);
10void kobj_unmap(struct kobj_map *, dev_t, unsigned long); 10void kobj_unmap(struct kobj_map *, dev_t, unsigned long);
11struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *); 11struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *);
12struct kobj_map *kobj_map_init(kobj_probe_t *, struct semaphore *); 12struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *);
13 13
14#endif 14#endif
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index c374b5fa8d3b..4cb1214ec290 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -80,6 +80,8 @@ extern void kobject_unregister(struct kobject *);
80extern struct kobject * kobject_get(struct kobject *); 80extern struct kobject * kobject_get(struct kobject *);
81extern void kobject_put(struct kobject *); 81extern void kobject_put(struct kobject *);
82 82
83extern struct kobject *kobject_add_dir(struct kobject *, const char *);
84
83extern char * kobject_get_path(struct kobject *, gfp_t); 85extern char * kobject_get_path(struct kobject *, gfp_t);
84 86
85struct kobj_type { 87struct kobj_type {
@@ -255,7 +257,7 @@ struct subsys_attribute {
255extern int subsys_create_file(struct subsystem * , struct subsys_attribute *); 257extern int subsys_create_file(struct subsystem * , struct subsys_attribute *);
256extern void subsys_remove_file(struct subsystem * , struct subsys_attribute *); 258extern void subsys_remove_file(struct subsystem * , struct subsys_attribute *);
257 259
258#if defined(CONFIG_HOTPLUG) & defined(CONFIG_NET) 260#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
259void kobject_uevent(struct kobject *kobj, enum kobject_action action); 261void kobject_uevent(struct kobject *kobj, enum kobject_action action);
260 262
261int add_uevent_var(char **envp, int num_envp, int *cur_index, 263int add_uevent_var(char **envp, int num_envp, int *cur_index,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c91be5e64ede..239408ecfddf 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -35,7 +35,8 @@
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36 36
37/* 37/*
38 * compile-time options 38 * compile-time options: to be removed as soon as all the drivers are
39 * converted to the new debugging mechanism
39 */ 40 */
40#undef ATA_DEBUG /* debugging output */ 41#undef ATA_DEBUG /* debugging output */
41#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 42#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
@@ -61,15 +62,37 @@
61 62
62#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 63#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
63 64
64#ifdef ATA_NDEBUG 65/* NEW: debug levels */
65#define assert(expr) 66#define HAVE_LIBATA_MSG 1
66#else 67
67#define assert(expr) \ 68enum {
68 if(unlikely(!(expr))) { \ 69 ATA_MSG_DRV = 0x0001,
69 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 70 ATA_MSG_INFO = 0x0002,
70 #expr,__FILE__,__FUNCTION__,__LINE__); \ 71 ATA_MSG_PROBE = 0x0004,
71 } 72 ATA_MSG_WARN = 0x0008,
72#endif 73 ATA_MSG_MALLOC = 0x0010,
74 ATA_MSG_CTL = 0x0020,
75 ATA_MSG_INTR = 0x0040,
76 ATA_MSG_ERR = 0x0080,
77};
78
79#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
80#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
81#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
82#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
83#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
85#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
86#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
87
88static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89{
90 if (dval < 0 || dval >= (sizeof(u32) * 8))
91 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92 if (!dval)
93 return 0;
94 return (1 << dval) - 1;
95}
73 96
74/* defines only for the constants which don't work well as enums */ 97/* defines only for the constants which don't work well as enums */
75#define ATA_TAG_POISON 0xfafbfcfdU 98#define ATA_TAG_POISON 0xfafbfcfdU
@@ -99,8 +122,7 @@ enum {
99 /* struct ata_device stuff */ 122 /* struct ata_device stuff */
100 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
101 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
102 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
103 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
104 126
105 ATA_DEV_UNKNOWN = 0, /* unknown device */ 127 ATA_DEV_UNKNOWN = 0, /* unknown device */
106 ATA_DEV_ATA = 1, /* ATA device */ 128 ATA_DEV_ATA = 1, /* ATA device */
@@ -115,9 +137,9 @@ enum {
115 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 137 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
116 ATA_FLAG_SATA = (1 << 3), 138 ATA_FLAG_SATA = (1 << 3),
117 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 139 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
118 ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */ 140 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
119 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 141 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
120 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 142 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
121 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 143 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
122 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 144 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
123 * proper HSM is in place. */ 145 * proper HSM is in place. */
@@ -129,10 +151,14 @@ enum {
129 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 151 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
130 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 152 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
131 153
154 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */
155 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
156
132 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 157 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
133 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 158 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
134 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 159 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
135 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 160 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
161 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
136 162
137 /* various lengths of time */ 163 /* various lengths of time */
138 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */ 164 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
@@ -162,11 +188,19 @@ enum {
162 PORT_DISABLED = 2, 188 PORT_DISABLED = 2,
163 189
164 /* encoding various smaller bitmaps into a single 190 /* encoding various smaller bitmaps into a single
165 * unsigned long bitmap 191 * unsigned int bitmap
166 */ 192 */
167 ATA_SHIFT_UDMA = 0, 193 ATA_BITS_PIO = 5,
168 ATA_SHIFT_MWDMA = 8, 194 ATA_BITS_MWDMA = 3,
169 ATA_SHIFT_PIO = 11, 195 ATA_BITS_UDMA = 8,
196
197 ATA_SHIFT_PIO = 0,
198 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO,
199 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
200
201 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
202 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
203 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
170 204
171 /* size of buffer to pad xfers ending on unaligned boundaries */ 205 /* size of buffer to pad xfers ending on unaligned boundaries */
172 ATA_DMA_PAD_SZ = 4, 206 ATA_DMA_PAD_SZ = 4,
@@ -189,10 +223,15 @@ enum hsm_task_states {
189}; 223};
190 224
191enum ata_completion_errors { 225enum ata_completion_errors {
192 AC_ERR_OTHER = (1 << 0), 226 AC_ERR_DEV = (1 << 0), /* device reported error */
193 AC_ERR_DEV = (1 << 1), 227 AC_ERR_HSM = (1 << 1), /* host state machine violation */
194 AC_ERR_ATA_BUS = (1 << 2), 228 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
195 AC_ERR_HOST_BUS = (1 << 3), 229 AC_ERR_MEDIA = (1 << 3), /* media error */
230 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
231 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
232 AC_ERR_SYSTEM = (1 << 6), /* system error */
233 AC_ERR_INVALID = (1 << 7), /* invalid argument */
234 AC_ERR_OTHER = (1 << 8), /* unknown */
196}; 235};
197 236
198/* forward declarations */ 237/* forward declarations */
@@ -202,7 +241,10 @@ struct ata_port;
202struct ata_queued_cmd; 241struct ata_queued_cmd;
203 242
204/* typedefs */ 243/* typedefs */
205typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 244typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
245typedef void (*ata_probeinit_fn_t)(struct ata_port *);
246typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
247typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
206 248
207struct ata_ioports { 249struct ata_ioports {
208 unsigned long cmd_addr; 250 unsigned long cmd_addr;
@@ -305,7 +347,7 @@ struct ata_device {
305 unsigned long flags; /* ATA_DFLAG_xxx */ 347 unsigned long flags; /* ATA_DFLAG_xxx */
306 unsigned int class; /* ATA_DEV_xxx */ 348 unsigned int class; /* ATA_DEV_xxx */
307 unsigned int devno; /* 0 or 1 */ 349 unsigned int devno; /* 0 or 1 */
308 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 350 u16 *id; /* IDENTIFY xxx DEVICE data */
309 u8 pio_mode; 351 u8 pio_mode;
310 u8 dma_mode; 352 u8 dma_mode;
311 u8 xfer_mode; 353 u8 xfer_mode;
@@ -313,6 +355,8 @@ struct ata_device {
313 355
314 unsigned int multi_count; /* sectors count for 356 unsigned int multi_count; /* sectors count for
315 READ/WRITE MULTIPLE */ 357 READ/WRITE MULTIPLE */
358 unsigned int max_sectors; /* per-device max sectors */
359 unsigned int cdb_len;
316 360
317 /* for CHS addressing */ 361 /* for CHS addressing */
318 u16 cylinders; /* Number of cylinders */ 362 u16 cylinders; /* Number of cylinders */
@@ -342,7 +386,6 @@ struct ata_port {
342 unsigned int mwdma_mask; 386 unsigned int mwdma_mask;
343 unsigned int udma_mask; 387 unsigned int udma_mask;
344 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 388 unsigned int cbl; /* cable type; ATA_CBL_xxx */
345 unsigned int cdb_len;
346 389
347 struct ata_device device[ATA_MAX_DEVICES]; 390 struct ata_device device[ATA_MAX_DEVICES];
348 391
@@ -353,12 +396,14 @@ struct ata_port {
353 struct ata_host_stats stats; 396 struct ata_host_stats stats;
354 struct ata_host_set *host_set; 397 struct ata_host_set *host_set;
355 398
356 struct work_struct packet_task; 399 struct work_struct port_task;
357 400
358 struct work_struct pio_task;
359 unsigned int hsm_task_state; 401 unsigned int hsm_task_state;
360 unsigned long pio_task_timeout; 402 unsigned long pio_task_timeout;
361 403
404 u32 msg_enable;
405 struct list_head eh_done_q;
406
362 void *private_data; 407 void *private_data;
363}; 408};
364 409
@@ -378,7 +423,9 @@ struct ata_port_operations {
378 u8 (*check_altstatus)(struct ata_port *ap); 423 u8 (*check_altstatus)(struct ata_port *ap);
379 void (*dev_select)(struct ata_port *ap, unsigned int device); 424 void (*dev_select)(struct ata_port *ap, unsigned int device);
380 425
381 void (*phy_reset) (struct ata_port *ap); 426 void (*phy_reset) (struct ata_port *ap); /* obsolete */
427 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
428
382 void (*post_set_mode) (struct ata_port *ap); 429 void (*post_set_mode) (struct ata_port *ap);
383 430
384 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 431 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
@@ -387,7 +434,7 @@ struct ata_port_operations {
387 void (*bmdma_start) (struct ata_queued_cmd *qc); 434 void (*bmdma_start) (struct ata_queued_cmd *qc);
388 435
389 void (*qc_prep) (struct ata_queued_cmd *qc); 436 void (*qc_prep) (struct ata_queued_cmd *qc);
390 int (*qc_issue) (struct ata_queued_cmd *qc); 437 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
391 438
392 void (*eng_timeout) (struct ata_port *ap); 439 void (*eng_timeout) (struct ata_port *ap);
393 440
@@ -435,6 +482,18 @@ extern void ata_port_probe(struct ata_port *);
435extern void __sata_phy_reset(struct ata_port *ap); 482extern void __sata_phy_reset(struct ata_port *ap);
436extern void sata_phy_reset(struct ata_port *ap); 483extern void sata_phy_reset(struct ata_port *ap);
437extern void ata_bus_reset(struct ata_port *ap); 484extern void ata_bus_reset(struct ata_port *ap);
485extern int ata_drive_probe_reset(struct ata_port *ap,
486 ata_probeinit_fn_t probeinit,
487 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
488 ata_postreset_fn_t postreset, unsigned int *classes);
489extern void ata_std_probeinit(struct ata_port *ap);
490extern int ata_std_softreset(struct ata_port *ap, int verbose,
491 unsigned int *classes);
492extern int sata_std_hardreset(struct ata_port *ap, int verbose,
493 unsigned int *class);
494extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
495extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
496 int post_reset);
438extern void ata_port_disable(struct ata_port *); 497extern void ata_port_disable(struct ata_port *);
439extern void ata_std_ports(struct ata_ioports *ioaddr); 498extern void ata_std_ports(struct ata_ioports *ioaddr);
440#ifdef CONFIG_PCI 499#ifdef CONFIG_PCI
@@ -449,7 +508,10 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
449extern int ata_scsi_detect(struct scsi_host_template *sht); 508extern int ata_scsi_detect(struct scsi_host_template *sht);
450extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 509extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
451extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 510extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
511extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
452extern int ata_scsi_error(struct Scsi_Host *host); 512extern int ata_scsi_error(struct Scsi_Host *host);
513extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
514extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
453extern int ata_scsi_release(struct Scsi_Host *host); 515extern int ata_scsi_release(struct Scsi_Host *host);
454extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 516extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
455extern int ata_scsi_device_resume(struct scsi_device *); 517extern int ata_scsi_device_resume(struct scsi_device *);
@@ -457,6 +519,11 @@ extern int ata_scsi_device_suspend(struct scsi_device *);
457extern int ata_device_resume(struct ata_port *, struct ata_device *); 519extern int ata_device_resume(struct ata_port *, struct ata_device *);
458extern int ata_device_suspend(struct ata_port *, struct ata_device *); 520extern int ata_device_suspend(struct ata_port *, struct ata_device *);
459extern int ata_ratelimit(void); 521extern int ata_ratelimit(void);
522extern unsigned int ata_busy_sleep(struct ata_port *ap,
523 unsigned long timeout_pat,
524 unsigned long timeout);
525extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
526 void *data, unsigned long delay);
460 527
461/* 528/*
462 * Default driver ops implementations 529 * Default driver ops implementations
@@ -470,26 +537,28 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
470extern u8 ata_check_status(struct ata_port *ap); 537extern u8 ata_check_status(struct ata_port *ap);
471extern u8 ata_altstatus(struct ata_port *ap); 538extern u8 ata_altstatus(struct ata_port *ap);
472extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 539extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
540extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
473extern int ata_port_start (struct ata_port *ap); 541extern int ata_port_start (struct ata_port *ap);
474extern void ata_port_stop (struct ata_port *ap); 542extern void ata_port_stop (struct ata_port *ap);
475extern void ata_host_stop (struct ata_host_set *host_set); 543extern void ata_host_stop (struct ata_host_set *host_set);
476extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 544extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
477extern void ata_qc_prep(struct ata_queued_cmd *qc); 545extern void ata_qc_prep(struct ata_queued_cmd *qc);
478extern int ata_qc_issue_prot(struct ata_queued_cmd *qc); 546extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
479extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 547extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
480 unsigned int buflen); 548 unsigned int buflen);
481extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 549extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
482 unsigned int n_elem); 550 unsigned int n_elem);
483extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 551extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
484extern void ata_dev_id_string(const u16 *id, unsigned char *s, 552extern void ata_id_string(const u16 *id, unsigned char *s,
485 unsigned int ofs, unsigned int len); 553 unsigned int ofs, unsigned int len);
486extern void ata_dev_config(struct ata_port *ap, unsigned int i); 554extern void ata_id_c_string(const u16 *id, unsigned char *s,
555 unsigned int ofs, unsigned int len);
487extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 556extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
488extern void ata_bmdma_start (struct ata_queued_cmd *qc); 557extern void ata_bmdma_start (struct ata_queued_cmd *qc);
489extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 558extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
490extern u8 ata_bmdma_status(struct ata_port *ap); 559extern u8 ata_bmdma_status(struct ata_port *ap);
491extern void ata_bmdma_irq_clear(struct ata_port *ap); 560extern void ata_bmdma_irq_clear(struct ata_port *ap);
492extern void ata_qc_complete(struct ata_queued_cmd *qc); 561extern void __ata_qc_complete(struct ata_queued_cmd *qc);
493extern void ata_eng_timeout(struct ata_port *ap); 562extern void ata_eng_timeout(struct ata_port *ap);
494extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 563extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
495 struct scsi_cmnd *cmd, 564 struct scsi_cmnd *cmd,
@@ -586,10 +655,14 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
586 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 655 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
587} 656}
588 657
658static inline unsigned int ata_class_present(unsigned int class)
659{
660 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
661}
662
589static inline unsigned int ata_dev_present(const struct ata_device *dev) 663static inline unsigned int ata_dev_present(const struct ata_device *dev)
590{ 664{
591 return ((dev->class == ATA_DEV_ATA) || 665 return ata_class_present(dev->class);
592 (dev->class == ATA_DEV_ATAPI));
593} 666}
594 667
595static inline u8 ata_chk_status(struct ata_port *ap) 668static inline u8 ata_chk_status(struct ata_port *ap)
@@ -657,9 +730,9 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
657 730
658 if (status & (ATA_BUSY | ATA_DRQ)) { 731 if (status & (ATA_BUSY | ATA_DRQ)) {
659 unsigned long l = ap->ioaddr.status_addr; 732 unsigned long l = ap->ioaddr.status_addr;
660 printk(KERN_WARNING 733 if (ata_msg_warn(ap))
661 "ATA: abnormal status 0x%X on port 0x%lX\n", 734 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
662 status, l); 735 status, l);
663 } 736 }
664 737
665 return status; 738 return status;
@@ -701,6 +774,24 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
701 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 774 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
702} 775}
703 776
777/**
778 * ata_qc_complete - Complete an active ATA command
779 * @qc: Command to complete
780 * @err_mask: ATA Status register contents
781 *
782 * Indicate to the mid and upper layers that an ATA
783 * command has completed, with either an ok or not-ok status.
784 *
785 * LOCKING:
786 * spin_lock_irqsave(host_set lock)
787 */
788static inline void ata_qc_complete(struct ata_queued_cmd *qc)
789{
790 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
791 return;
792
793 __ata_qc_complete(qc);
794}
704 795
705/** 796/**
706 * ata_irq_on - Enable interrupts on a port. 797 * ata_irq_on - Enable interrupts on a port.
@@ -751,7 +842,8 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
751 842
752 status = ata_busy_wait(ap, bits, 1000); 843 status = ata_busy_wait(ap, bits, 1000);
753 if (status & bits) 844 if (status & bits)
754 DPRINTK("abnormal status 0x%X\n", status); 845 if (ata_msg_err(ap))
846 printk(KERN_ERR "abnormal status 0x%X\n", status);
755 847
756 /* get controller status; clear intr, err bits */ 848 /* get controller status; clear intr, err bits */
757 if (ap->flags & ATA_FLAG_MMIO) { 849 if (ap->flags & ATA_FLAG_MMIO) {
@@ -769,8 +861,10 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
769 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 861 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
770 } 862 }
771 863
772 VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 864 if (ata_msg_intr(ap))
773 host_stat, post_stat, status); 865 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
866 __FUNCTION__,
867 host_stat, post_stat, status);
774 868
775 return status; 869 return status;
776} 870}
@@ -807,7 +901,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
807static inline unsigned int ac_err_mask(u8 status) 901static inline unsigned int ac_err_mask(u8 status)
808{ 902{
809 if (status & ATA_BUSY) 903 if (status & ATA_BUSY)
810 return AC_ERR_ATA_BUS; 904 return AC_ERR_HSM;
811 if (status & (ATA_ERR | ATA_DF)) 905 if (status & (ATA_ERR | ATA_DF))
812 return AC_ERR_DEV; 906 return AC_ERR_DEV;
813 return 0; 907 return 0;
diff --git a/include/linux/list.h b/include/linux/list.h
index 47208bd99f9e..67258b47e9ca 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -411,6 +411,17 @@ static inline void list_splice_init(struct list_head *list,
411 pos = list_entry(pos->member.next, typeof(*pos), member)) 411 pos = list_entry(pos->member.next, typeof(*pos), member))
412 412
413/** 413/**
414 * list_for_each_entry_from - iterate over list of given type
415 * continuing from existing point
416 * @pos: the type * to use as a loop counter.
417 * @head: the head for your list.
418 * @member: the name of the list_struct within the struct.
419 */
420#define list_for_each_entry_from(pos, head, member) \
421 for (; prefetch(pos->member.next), &pos->member != (head); \
422 pos = list_entry(pos->member.next, typeof(*pos), member))
423
424/**
414 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 425 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
415 * @pos: the type * to use as a loop counter. 426 * @pos: the type * to use as a loop counter.
416 * @n: another type * to use as temporary storage 427 * @n: another type * to use as temporary storage
@@ -438,6 +449,19 @@ static inline void list_splice_init(struct list_head *list,
438 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 449 pos = n, n = list_entry(n->member.next, typeof(*n), member))
439 450
440/** 451/**
452 * list_for_each_entry_safe_from - iterate over list of given type
453 * from existing point safe against removal of list entry
454 * @pos: the type * to use as a loop counter.
455 * @n: another type * to use as temporary storage
456 * @head: the head for your list.
457 * @member: the name of the list_struct within the struct.
458 */
459#define list_for_each_entry_safe_from(pos, n, head, member) \
460 for (n = list_entry(pos->member.next, typeof(*pos), member); \
461 &pos->member != (head); \
462 pos = n, n = list_entry(n->member.next, typeof(*n), member))
463
464/**
441 * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against 465 * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
442 * removal of list entry 466 * removal of list entry
443 * @pos: the type * to use as a loop counter. 467 * @pos: the type * to use as a loop counter.
diff --git a/include/linux/module.h b/include/linux/module.h
index 84d75f3a8aca..70bd843c71cb 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -198,6 +198,9 @@ void *__symbol_get_gpl(const char *symbol);
198#define EXPORT_SYMBOL_GPL(sym) \ 198#define EXPORT_SYMBOL_GPL(sym) \
199 __EXPORT_SYMBOL(sym, "_gpl") 199 __EXPORT_SYMBOL(sym, "_gpl")
200 200
201#define EXPORT_SYMBOL_GPL_FUTURE(sym) \
202 __EXPORT_SYMBOL(sym, "_gpl_future")
203
201#endif 204#endif
202 205
203struct module_ref 206struct module_ref
@@ -242,6 +245,7 @@ struct module
242 /* Sysfs stuff. */ 245 /* Sysfs stuff. */
243 struct module_kobject mkobj; 246 struct module_kobject mkobj;
244 struct module_param_attrs *param_attrs; 247 struct module_param_attrs *param_attrs;
248 struct module_attribute *modinfo_attrs;
245 const char *version; 249 const char *version;
246 const char *srcversion; 250 const char *srcversion;
247 251
@@ -255,6 +259,11 @@ struct module
255 unsigned int num_gpl_syms; 259 unsigned int num_gpl_syms;
256 const unsigned long *gpl_crcs; 260 const unsigned long *gpl_crcs;
257 261
262 /* symbols that will be GPL-only in the near future. */
263 const struct kernel_symbol *gpl_future_syms;
264 unsigned int num_gpl_future_syms;
265 const unsigned long *gpl_future_crcs;
266
258 /* Exception table */ 267 /* Exception table */
259 unsigned int num_exentries; 268 unsigned int num_exentries;
260 const struct exception_table_entry *extable; 269 const struct exception_table_entry *extable;
@@ -441,6 +450,7 @@ void module_remove_driver(struct device_driver *);
441#else /* !CONFIG_MODULES... */ 450#else /* !CONFIG_MODULES... */
442#define EXPORT_SYMBOL(sym) 451#define EXPORT_SYMBOL(sym)
443#define EXPORT_SYMBOL_GPL(sym) 452#define EXPORT_SYMBOL_GPL(sym)
453#define EXPORT_SYMBOL_GPL_FUTURE(sym)
444 454
445/* Given an address, look for it in the exception tables. */ 455/* Given an address, look for it in the exception tables. */
446static inline const struct exception_table_entry * 456static inline const struct exception_table_entry *
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index 0b08cd692201..955d3069d727 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -1214,6 +1214,7 @@ struct mv64xxx_i2c_pdata {
1214#define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 1214#define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0
1215#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) 1215#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7)
1216#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) 1216#define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8)
1217#define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9)
1217#define MV643XX_ETH_FORCE_LINK_FAIL 0 1218#define MV643XX_ETH_FORCE_LINK_FAIL 0
1218#define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) 1219#define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10)
1219#define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 1220#define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0
@@ -1243,6 +1244,8 @@ struct mv64xxx_i2c_pdata {
1243#define MV643XX_ETH_SET_MII_SPEED_TO_10 0 1244#define MV643XX_ETH_SET_MII_SPEED_TO_10 0
1244#define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) 1245#define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24)
1245 1246
1247#define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17)
1248
1246#define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ 1249#define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \
1247 MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ 1250 MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \
1248 MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ 1251 MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \
@@ -1285,23 +1288,15 @@ struct mv64xxx_i2c_pdata {
1285#define MV643XX_ETH_NAME "mv643xx_eth" 1288#define MV643XX_ETH_NAME "mv643xx_eth"
1286 1289
1287struct mv643xx_eth_platform_data { 1290struct mv643xx_eth_platform_data {
1288 /*
1289 * Non-values for mac_addr, phy_addr, port_config, etc.
1290 * override the default value. Setting the corresponding
1291 * force_* field, causes the default value to be overridden
1292 * even when zero.
1293 */
1294 unsigned int force_phy_addr:1;
1295 unsigned int force_port_config:1;
1296 unsigned int force_port_config_extend:1;
1297 unsigned int force_port_sdma_config:1;
1298 unsigned int force_port_serial_control:1;
1299 int phy_addr;
1300 char *mac_addr; /* pointer to mac address */ 1291 char *mac_addr; /* pointer to mac address */
1301 u32 port_config; 1292 u16 force_phy_addr; /* force override if phy_addr == 0 */
1302 u32 port_config_extend; 1293 u16 phy_addr;
1303 u32 port_sdma_config; 1294
1304 u32 port_serial_control; 1295 /* If speed is 0, then speed and duplex are autonegotiated. */
1296 int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */
1297 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
1298
1299 /* non-zero values of the following fields override defaults */
1305 u32 tx_queue_size; 1300 u32 tx_queue_size;
1306 u32 rx_queue_size; 1301 u32 rx_queue_size;
1307 u32 tx_sram_addr; 1302 u32 tx_sram_addr;
diff --git a/include/linux/net.h b/include/linux/net.h
index 28195a2d8ff0..152fa6551fd8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -149,6 +149,10 @@ struct proto_ops {
149 int optname, char __user *optval, int optlen); 149 int optname, char __user *optval, int optlen);
150 int (*getsockopt)(struct socket *sock, int level, 150 int (*getsockopt)(struct socket *sock, int level,
151 int optname, char __user *optval, int __user *optlen); 151 int optname, char __user *optval, int __user *optlen);
152 int (*compat_setsockopt)(struct socket *sock, int level,
153 int optname, char __user *optval, int optlen);
154 int (*compat_getsockopt)(struct socket *sock, int level,
155 int optname, char __user *optval, int __user *optlen);
152 int (*sendmsg) (struct kiocb *iocb, struct socket *sock, 156 int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
153 struct msghdr *m, size_t total_len); 157 struct msghdr *m, size_t total_len);
154 int (*recvmsg) (struct kiocb *iocb, struct socket *sock, 158 int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7fda03d338d1..950dc55e5192 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -230,7 +230,8 @@ enum netdev_state_t
230 __LINK_STATE_SCHED, 230 __LINK_STATE_SCHED,
231 __LINK_STATE_NOCARRIER, 231 __LINK_STATE_NOCARRIER,
232 __LINK_STATE_RX_SCHED, 232 __LINK_STATE_RX_SCHED,
233 __LINK_STATE_LINKWATCH_PENDING 233 __LINK_STATE_LINKWATCH_PENDING,
234 __LINK_STATE_DORMANT,
234}; 235};
235 236
236 237
@@ -335,11 +336,14 @@ struct net_device
335 */ 336 */
336 337
337 338
338 unsigned short flags; /* interface flags (a la BSD) */ 339 unsigned int flags; /* interface flags (a la BSD) */
339 unsigned short gflags; 340 unsigned short gflags;
340 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ 341 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
341 unsigned short padded; /* How much padding added by alloc_netdev() */ 342 unsigned short padded; /* How much padding added by alloc_netdev() */
342 343
344 unsigned char operstate; /* RFC2863 operstate */
345 unsigned char link_mode; /* mapping policy to operstate */
346
343 unsigned mtu; /* interface MTU value */ 347 unsigned mtu; /* interface MTU value */
344 unsigned short type; /* interface hardware type */ 348 unsigned short type; /* interface hardware type */
345 unsigned short hard_header_len; /* hardware hdr length */ 349 unsigned short hard_header_len; /* hardware hdr length */
@@ -708,12 +712,18 @@ static inline void dev_put(struct net_device *dev)
708 atomic_dec(&dev->refcnt); 712 atomic_dec(&dev->refcnt);
709} 713}
710 714
711#define __dev_put(dev) atomic_dec(&(dev)->refcnt) 715static inline void dev_hold(struct net_device *dev)
712#define dev_hold(dev) atomic_inc(&(dev)->refcnt) 716{
717 atomic_inc(&dev->refcnt);
718}
713 719
714/* Carrier loss detection, dial on demand. The functions netif_carrier_on 720/* Carrier loss detection, dial on demand. The functions netif_carrier_on
715 * and _off may be called from IRQ context, but it is caller 721 * and _off may be called from IRQ context, but it is caller
716 * who is responsible for serialization of these calls. 722 * who is responsible for serialization of these calls.
723 *
724 * The name carrier is inappropriate, these functions should really be
725 * called netif_lowerlayer_*() because they represent the state of any
726 * kind of lower layer not just hardware media.
717 */ 727 */
718 728
719extern void linkwatch_fire_event(struct net_device *dev); 729extern void linkwatch_fire_event(struct net_device *dev);
@@ -729,6 +739,29 @@ extern void netif_carrier_on(struct net_device *dev);
729 739
730extern void netif_carrier_off(struct net_device *dev); 740extern void netif_carrier_off(struct net_device *dev);
731 741
742static inline void netif_dormant_on(struct net_device *dev)
743{
744 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
745 linkwatch_fire_event(dev);
746}
747
748static inline void netif_dormant_off(struct net_device *dev)
749{
750 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
751 linkwatch_fire_event(dev);
752}
753
754static inline int netif_dormant(const struct net_device *dev)
755{
756 return test_bit(__LINK_STATE_DORMANT, &dev->state);
757}
758
759
760static inline int netif_oper_up(const struct net_device *dev) {
761 return (dev->operstate == IF_OPER_UP ||
762 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
763}
764
732/* Hot-plugging. */ 765/* Hot-plugging. */
733static inline int netif_device_present(struct net_device *dev) 766static inline int netif_device_present(struct net_device *dev)
734{ 767{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 468896939843..412e52ca9720 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -80,10 +80,14 @@ struct nf_sockopt_ops
80 int set_optmin; 80 int set_optmin;
81 int set_optmax; 81 int set_optmax;
82 int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); 82 int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
83 int (*compat_set)(struct sock *sk, int optval,
84 void __user *user, unsigned int len);
83 85
84 int get_optmin; 86 int get_optmin;
85 int get_optmax; 87 int get_optmax;
86 int (*get)(struct sock *sk, int optval, void __user *user, int *len); 88 int (*get)(struct sock *sk, int optval, void __user *user, int *len);
89 int (*compat_get)(struct sock *sk, int optval,
90 void __user *user, int *len);
87 91
88 /* Number of users inside set() or get(). */ 92 /* Number of users inside set() or get(). */
89 unsigned int use; 93 unsigned int use;
@@ -246,6 +250,11 @@ int nf_setsockopt(struct sock *sk, int pf, int optval, char __user *opt,
246int nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt, 250int nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt,
247 int *len); 251 int *len);
248 252
253int compat_nf_setsockopt(struct sock *sk, int pf, int optval,
254 char __user *opt, int len);
255int compat_nf_getsockopt(struct sock *sk, int pf, int optval,
256 char __user *opt, int *len);
257
249/* Packet queuing */ 258/* Packet queuing */
250struct nf_queue_handler { 259struct nf_queue_handler {
251 int (*outfn)(struct sk_buff *skb, struct nf_info *info, 260 int (*outfn)(struct sk_buff *skb, struct nf_info *info,
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 934a2479f160..9f5b12cf489b 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -164,6 +164,7 @@ extern void nfattr_parse(struct nfattr *tb[], int maxattr,
164 __res; \ 164 __res; \
165}) 165})
166 166
167extern int nfnetlink_has_listeners(unsigned int group);
167extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, 168extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
168 int echo); 169 int echo);
169extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags); 170extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index b04b03880595..a7497c7436df 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -47,6 +47,8 @@ enum nfulnl_attr_type {
47 NFULA_PAYLOAD, /* opaque data payload */ 47 NFULA_PAYLOAD, /* opaque data payload */
48 NFULA_PREFIX, /* string prefix */ 48 NFULA_PREFIX, /* string prefix */
49 NFULA_UID, /* user id of socket */ 49 NFULA_UID, /* user id of socket */
50 NFULA_SEQ, /* instance-local sequence number */
51 NFULA_SEQ_GLOBAL, /* global sequence number */
50 52
51 __NFULA_MAX 53 __NFULA_MAX
52}; 54};
@@ -77,6 +79,7 @@ enum nfulnl_attr_config {
77 NFULA_CFG_NLBUFSIZ, /* u_int32_t buffer size */ 79 NFULA_CFG_NLBUFSIZ, /* u_int32_t buffer size */
78 NFULA_CFG_TIMEOUT, /* u_int32_t in 1/100 s */ 80 NFULA_CFG_TIMEOUT, /* u_int32_t in 1/100 s */
79 NFULA_CFG_QTHRESH, /* u_int32_t */ 81 NFULA_CFG_QTHRESH, /* u_int32_t */
82 NFULA_CFG_FLAGS, /* u_int16_t */
80 __NFULA_CFG_MAX 83 __NFULA_CFG_MAX
81}; 84};
82#define NFULA_CFG_MAX (__NFULA_CFG_MAX -1) 85#define NFULA_CFG_MAX (__NFULA_CFG_MAX -1)
@@ -85,4 +88,7 @@ enum nfulnl_attr_config {
85#define NFULNL_COPY_META 0x01 88#define NFULNL_COPY_META 0x01
86#define NFULNL_COPY_PACKET 0x02 89#define NFULNL_COPY_PACKET 0x02
87 90
91#define NFULNL_CFG_F_SEQ 0x0001
92#define NFULNL_CFG_F_SEQ_GLOBAL 0x0002
93
88#endif /* _NFNETLINK_LOG_H */ 94#endif /* _NFNETLINK_LOG_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 6500d4e59d46..46a0f974f87c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -92,8 +92,6 @@ struct xt_match
92 92
93 const char name[XT_FUNCTION_MAXNAMELEN-1]; 93 const char name[XT_FUNCTION_MAXNAMELEN-1];
94 94
95 u_int8_t revision;
96
97 /* Return true or false: return FALSE and set *hotdrop = 1 to 95 /* Return true or false: return FALSE and set *hotdrop = 1 to
98 force immediate packet drop. */ 96 force immediate packet drop. */
99 /* Arguments changed since 2.6.9, as this must now handle 97 /* Arguments changed since 2.6.9, as this must now handle
@@ -102,6 +100,7 @@ struct xt_match
102 int (*match)(const struct sk_buff *skb, 100 int (*match)(const struct sk_buff *skb,
103 const struct net_device *in, 101 const struct net_device *in,
104 const struct net_device *out, 102 const struct net_device *out,
103 const struct xt_match *match,
105 const void *matchinfo, 104 const void *matchinfo,
106 int offset, 105 int offset,
107 unsigned int protoff, 106 unsigned int protoff,
@@ -111,15 +110,25 @@ struct xt_match
111 /* Should return true or false. */ 110 /* Should return true or false. */
112 int (*checkentry)(const char *tablename, 111 int (*checkentry)(const char *tablename,
113 const void *ip, 112 const void *ip,
113 const struct xt_match *match,
114 void *matchinfo, 114 void *matchinfo,
115 unsigned int matchinfosize, 115 unsigned int matchinfosize,
116 unsigned int hook_mask); 116 unsigned int hook_mask);
117 117
118 /* Called when entry of this type deleted. */ 118 /* Called when entry of this type deleted. */
119 void (*destroy)(void *matchinfo, unsigned int matchinfosize); 119 void (*destroy)(const struct xt_match *match, void *matchinfo,
120 unsigned int matchinfosize);
120 121
121 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 122 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
122 struct module *me; 123 struct module *me;
124
125 char *table;
126 unsigned int matchsize;
127 unsigned int hooks;
128 unsigned short proto;
129
130 unsigned short family;
131 u_int8_t revision;
123}; 132};
124 133
125/* Registration hooks for targets. */ 134/* Registration hooks for targets. */
@@ -129,8 +138,6 @@ struct xt_target
129 138
130 const char name[XT_FUNCTION_MAXNAMELEN-1]; 139 const char name[XT_FUNCTION_MAXNAMELEN-1];
131 140
132 u_int8_t revision;
133
134 /* Returns verdict. Argument order changed since 2.6.9, as this 141 /* Returns verdict. Argument order changed since 2.6.9, as this
135 must now handle non-linear skbs, using skb_copy_bits and 142 must now handle non-linear skbs, using skb_copy_bits and
136 skb_ip_make_writable. */ 143 skb_ip_make_writable. */
@@ -138,6 +145,7 @@ struct xt_target
138 const struct net_device *in, 145 const struct net_device *in,
139 const struct net_device *out, 146 const struct net_device *out,
140 unsigned int hooknum, 147 unsigned int hooknum,
148 const struct xt_target *target,
141 const void *targinfo, 149 const void *targinfo,
142 void *userdata); 150 void *userdata);
143 151
@@ -147,15 +155,25 @@ struct xt_target
147 /* Should return true or false. */ 155 /* Should return true or false. */
148 int (*checkentry)(const char *tablename, 156 int (*checkentry)(const char *tablename,
149 const void *entry, 157 const void *entry,
158 const struct xt_target *target,
150 void *targinfo, 159 void *targinfo,
151 unsigned int targinfosize, 160 unsigned int targinfosize,
152 unsigned int hook_mask); 161 unsigned int hook_mask);
153 162
154 /* Called when entry of this type deleted. */ 163 /* Called when entry of this type deleted. */
155 void (*destroy)(void *targinfo, unsigned int targinfosize); 164 void (*destroy)(const struct xt_target *target, void *targinfo,
165 unsigned int targinfosize);
156 166
157 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 167 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
158 struct module *me; 168 struct module *me;
169
170 char *table;
171 unsigned int targetsize;
172 unsigned int hooks;
173 unsigned short proto;
174
175 unsigned short family;
176 u_int8_t revision;
159}; 177};
160 178
161/* Furniture shopping... */ 179/* Furniture shopping... */
@@ -207,6 +225,13 @@ extern void xt_unregister_target(int af, struct xt_target *target);
207extern int xt_register_match(int af, struct xt_match *target); 225extern int xt_register_match(int af, struct xt_match *target);
208extern void xt_unregister_match(int af, struct xt_match *target); 226extern void xt_unregister_match(int af, struct xt_match *target);
209 227
228extern int xt_check_match(const struct xt_match *match, unsigned short family,
229 unsigned int size, const char *table, unsigned int hook,
230 unsigned short proto, int inv_proto);
231extern int xt_check_target(const struct xt_target *target, unsigned short family,
232 unsigned int size, const char *table, unsigned int hook,
233 unsigned short proto, int inv_proto);
234
210extern int xt_register_table(struct xt_table *table, 235extern int xt_register_table(struct xt_table *table,
211 struct xt_table_info *bootstrap, 236 struct xt_table_info *bootstrap,
212 struct xt_table_info *newinfo); 237 struct xt_table_info *newinfo);
diff --git a/include/linux/netfilter/xt_policy.h b/include/linux/netfilter/xt_policy.h
new file mode 100644
index 000000000000..a8132ec076fb
--- /dev/null
+++ b/include/linux/netfilter/xt_policy.h
@@ -0,0 +1,58 @@
1#ifndef _XT_POLICY_H
2#define _XT_POLICY_H
3
4#define XT_POLICY_MAX_ELEM 4
5
6enum xt_policy_flags
7{
8 XT_POLICY_MATCH_IN = 0x1,
9 XT_POLICY_MATCH_OUT = 0x2,
10 XT_POLICY_MATCH_NONE = 0x4,
11 XT_POLICY_MATCH_STRICT = 0x8,
12};
13
14enum xt_policy_modes
15{
16 XT_POLICY_MODE_TRANSPORT,
17 XT_POLICY_MODE_TUNNEL
18};
19
20struct xt_policy_spec
21{
22 u_int8_t saddr:1,
23 daddr:1,
24 proto:1,
25 mode:1,
26 spi:1,
27 reqid:1;
28};
29
30union xt_policy_addr
31{
32 struct in_addr a4;
33 struct in6_addr a6;
34};
35
36struct xt_policy_elem
37{
38 union xt_policy_addr saddr;
39 union xt_policy_addr smask;
40 union xt_policy_addr daddr;
41 union xt_policy_addr dmask;
42 u_int32_t spi;
43 u_int32_t reqid;
44 u_int8_t proto;
45 u_int8_t mode;
46
47 struct xt_policy_spec match;
48 struct xt_policy_spec invert;
49};
50
51struct xt_policy_info
52{
53 struct xt_policy_elem pol[XT_POLICY_MAX_ELEM];
54 u_int16_t flags;
55 u_int16_t len;
56};
57
58#endif /* _XT_POLICY_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index de4d397865ce..a75b84bb9a88 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -47,22 +47,6 @@ enum nf_br_hook_priorities {
47#define BRNF_BRIDGED 0x08 47#define BRNF_BRIDGED 0x08
48#define BRNF_NF_BRIDGE_PREROUTING 0x10 48#define BRNF_NF_BRIDGE_PREROUTING 0x10
49 49
50static inline
51struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
52{
53 struct nf_bridge_info **nf_bridge = &(skb->nf_bridge);
54
55 if ((*nf_bridge = kmalloc(sizeof(**nf_bridge), GFP_ATOMIC)) != NULL) {
56 atomic_set(&(*nf_bridge)->use, 1);
57 (*nf_bridge)->mask = 0;
58 (*nf_bridge)->physindev = (*nf_bridge)->physoutdev = NULL;
59#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
60 (*nf_bridge)->netoutdev = NULL;
61#endif
62 }
63
64 return *nf_bridge;
65}
66 50
67/* Only used in br_forward.c */ 51/* Only used in br_forward.c */
68static inline 52static inline
@@ -77,17 +61,6 @@ void nf_bridge_maybe_copy_header(struct sk_buff *skb)
77 } 61 }
78} 62}
79 63
80static inline
81void nf_bridge_save_header(struct sk_buff *skb)
82{
83 int header_size = 16;
84
85 if (skb->protocol == __constant_htons(ETH_P_8021Q))
86 header_size = 18;
87
88 memcpy(skb->nf_bridge->data, skb->data - header_size, header_size);
89}
90
91/* This is called by the IP fragmenting code and it ensures there is 64/* This is called by the IP fragmenting code and it ensures there is
92 * enough room for the encapsulating header (if there is one). */ 65 * enough room for the encapsulating header (if there is one). */
93static inline 66static inline
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index 215765f043e6..f32d75c4f4cf 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -29,6 +29,7 @@ union ip_conntrack_expect_proto {
29}; 29};
30 30
31/* Add protocol helper include file here */ 31/* Add protocol helper include file here */
32#include <linux/netfilter_ipv4/ip_conntrack_h323.h>
32#include <linux/netfilter_ipv4/ip_conntrack_pptp.h> 33#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
33#include <linux/netfilter_ipv4/ip_conntrack_amanda.h> 34#include <linux/netfilter_ipv4/ip_conntrack_amanda.h>
34#include <linux/netfilter_ipv4/ip_conntrack_ftp.h> 35#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
@@ -37,6 +38,7 @@ union ip_conntrack_expect_proto {
37/* per conntrack: application helper private data */ 38/* per conntrack: application helper private data */
38union ip_conntrack_help { 39union ip_conntrack_help {
39 /* insert conntrack helper private data (master) here */ 40 /* insert conntrack helper private data (master) here */
41 struct ip_ct_h323_master ct_h323_info;
40 struct ip_ct_pptp_master ct_pptp_info; 42 struct ip_ct_pptp_master ct_pptp_info;
41 struct ip_ct_ftp_master ct_ftp_info; 43 struct ip_ct_ftp_master ct_ftp_info;
42 struct ip_ct_irc_master ct_irc_info; 44 struct ip_ct_irc_master ct_irc_info;
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_h323.h b/include/linux/netfilter_ipv4/ip_conntrack_h323.h
new file mode 100644
index 000000000000..0987cea53840
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_conntrack_h323.h
@@ -0,0 +1,30 @@
1#ifndef _IP_CONNTRACK_H323_H
2#define _IP_CONNTRACK_H323_H
3
4#ifdef __KERNEL__
5
6#define RAS_PORT 1719
7#define Q931_PORT 1720
8#define H323_RTP_CHANNEL_MAX 4 /* Audio, video, FAX and other */
9
10/* This structure exists only once per master */
11struct ip_ct_h323_master {
12
13 /* Original and NATed Q.931 or H.245 signal ports */
14 u_int16_t sig_port[IP_CT_DIR_MAX];
15
16 /* Original and NATed RTP ports */
17 u_int16_t rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX];
18
19 union {
20 /* RAS connection timeout */
21 u_int32_t timeout;
22
23 /* Next TPKT length (for separate TPKT header and data) */
24 u_int16_t tpkt_len[IP_CT_DIR_MAX];
25 };
26};
27
28#endif
29
30#endif
diff --git a/include/linux/netfilter_ipv4/ip_nat.h b/include/linux/netfilter_ipv4/ip_nat.h
index 41a107de17cf..e9f5ed1d9f68 100644
--- a/include/linux/netfilter_ipv4/ip_nat.h
+++ b/include/linux/netfilter_ipv4/ip_nat.h
@@ -23,7 +23,7 @@ struct ip_nat_seq {
23 * modification (if any) */ 23 * modification (if any) */
24 u_int32_t correction_pos; 24 u_int32_t correction_pos;
25 /* sequence number offset before and after last modification */ 25 /* sequence number offset before and after last modification */
26 int32_t offset_before, offset_after; 26 int16_t offset_before, offset_after;
27}; 27};
28 28
29/* Single range specification. */ 29/* Single range specification. */
diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h
index a3f6eff39d33..b9478a255301 100644
--- a/include/linux/netfilter_ipv4/ipt_policy.h
+++ b/include/linux/netfilter_ipv4/ipt_policy.h
@@ -1,58 +1,21 @@
1#ifndef _IPT_POLICY_H 1#ifndef _IPT_POLICY_H
2#define _IPT_POLICY_H 2#define _IPT_POLICY_H
3 3
4#define IPT_POLICY_MAX_ELEM 4 4#define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
5 5
6enum ipt_policy_flags 6/* ipt_policy_flags */
7{ 7#define IPT_POLICY_MATCH_IN XT_POLICY_MATCH_IN
8 IPT_POLICY_MATCH_IN = 0x1, 8#define IPT_POLICY_MATCH_OUT XT_POLICY_MATCH_OUT
9 IPT_POLICY_MATCH_OUT = 0x2, 9#define IPT_POLICY_MATCH_NONE XT_POLICY_MATCH_NONE
10 IPT_POLICY_MATCH_NONE = 0x4, 10#define IPT_POLICY_MATCH_STRICT XT_POLICY_MATCH_STRICT
11 IPT_POLICY_MATCH_STRICT = 0x8, 11
12}; 12/* ipt_policy_modes */
13 13#define IPT_POLICY_MODE_TRANSPORT XT_POLICY_MODE_TRANSPORT
14enum ipt_policy_modes 14#define IPT_POLICY_MODE_TUNNEL XT_POLICY_MODE_TUNNEL
15{ 15
16 IPT_POLICY_MODE_TRANSPORT, 16#define ipt_policy_spec xt_policy_spec
17 IPT_POLICY_MODE_TUNNEL 17#define ipt_policy_addr xt_policy_addr
18}; 18#define ipt_policy_elem xt_policy_elem
19 19#define ipt_policy_info xt_policy_info
20struct ipt_policy_spec
21{
22 u_int8_t saddr:1,
23 daddr:1,
24 proto:1,
25 mode:1,
26 spi:1,
27 reqid:1;
28};
29
30union ipt_policy_addr
31{
32 struct in_addr a4;
33 struct in6_addr a6;
34};
35
36struct ipt_policy_elem
37{
38 union ipt_policy_addr saddr;
39 union ipt_policy_addr smask;
40 union ipt_policy_addr daddr;
41 union ipt_policy_addr dmask;
42 u_int32_t spi;
43 u_int32_t reqid;
44 u_int8_t proto;
45 u_int8_t mode;
46
47 struct ipt_policy_spec match;
48 struct ipt_policy_spec invert;
49};
50
51struct ipt_policy_info
52{
53 struct ipt_policy_elem pol[IPT_POLICY_MAX_ELEM];
54 u_int16_t flags;
55 u_int16_t len;
56};
57 20
58#endif /* _IPT_POLICY_H */ 21#endif /* _IPT_POLICY_H */
diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h
index 671bd818300f..6bab3163d2fb 100644
--- a/include/linux/netfilter_ipv6/ip6t_policy.h
+++ b/include/linux/netfilter_ipv6/ip6t_policy.h
@@ -1,58 +1,21 @@
1#ifndef _IP6T_POLICY_H 1#ifndef _IP6T_POLICY_H
2#define _IP6T_POLICY_H 2#define _IP6T_POLICY_H
3 3
4#define IP6T_POLICY_MAX_ELEM 4 4#define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
5 5
6enum ip6t_policy_flags 6/* ip6t_policy_flags */
7{ 7#define IP6T_POLICY_MATCH_IN XT_POLICY_MATCH_IN
8 IP6T_POLICY_MATCH_IN = 0x1, 8#define IP6T_POLICY_MATCH_OUT XT_POLICY_MATCH_OUT
9 IP6T_POLICY_MATCH_OUT = 0x2, 9#define IP6T_POLICY_MATCH_NONE XT_POLICY_MATCH_NONE
10 IP6T_POLICY_MATCH_NONE = 0x4, 10#define IP6T_POLICY_MATCH_STRICT XT_POLICY_MATCH_STRICT
11 IP6T_POLICY_MATCH_STRICT = 0x8, 11
12}; 12/* ip6t_policy_modes */
13 13#define IP6T_POLICY_MODE_TRANSPORT XT_POLICY_MODE_TRANSPORT
14enum ip6t_policy_modes 14#define IP6T_POLICY_MODE_TUNNEL XT_POLICY_MODE_TUNNEL
15{ 15
16 IP6T_POLICY_MODE_TRANSPORT, 16#define ip6t_policy_spec xt_policy_spec
17 IP6T_POLICY_MODE_TUNNEL 17#define ip6t_policy_addr xt_policy_addr
18}; 18#define ip6t_policy_elem xt_policy_elem
19 19#define ip6t_policy_info xt_policy_info
20struct ip6t_policy_spec
21{
22 u_int8_t saddr:1,
23 daddr:1,
24 proto:1,
25 mode:1,
26 spi:1,
27 reqid:1;
28};
29
30union ip6t_policy_addr
31{
32 struct in_addr a4;
33 struct in6_addr a6;
34};
35
36struct ip6t_policy_elem
37{
38 union ip6t_policy_addr saddr;
39 union ip6t_policy_addr smask;
40 union ip6t_policy_addr daddr;
41 union ip6t_policy_addr dmask;
42 u_int32_t spi;
43 u_int32_t reqid;
44 u_int8_t proto;
45 u_int8_t mode;
46
47 struct ip6t_policy_spec match;
48 struct ip6t_policy_spec invert;
49};
50
51struct ip6t_policy_info
52{
53 struct ip6t_policy_elem pol[IP6T_POLICY_MAX_ELEM];
54 u_int16_t flags;
55 u_int16_t len;
56};
57 20
58#endif /* _IP6T_POLICY_H */ 21#endif /* _IP6T_POLICY_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index c256ebe2a7b4..f8f3d1c927f8 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -151,6 +151,7 @@ struct netlink_skb_parms
151 151
152extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), struct module *module); 152extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), struct module *module);
153extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 153extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
154extern int netlink_has_listeners(struct sock *sk, unsigned int group);
154extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); 155extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
155extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, 156extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
156 __u32 group, gfp_t allocation); 157 __u32 group, gfp_t allocation);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a6fdbe13a245..ec3c32932620 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1367,6 +1367,7 @@
1367#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 1367#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008
1368#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 1368#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
1369#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017 1369#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
1370#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
1370#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 1371#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
1371#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 1372#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
1372#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 1373#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
@@ -1858,16 +1859,22 @@
1858#define PCI_DEVICE_ID_TIGON3_5705M 0x165d 1859#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
1859#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e 1860#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
1860#define PCI_DEVICE_ID_TIGON3_5714 0x1668 1861#define PCI_DEVICE_ID_TIGON3_5714 0x1668
1862#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
1861#define PCI_DEVICE_ID_TIGON3_5780 0x166a 1863#define PCI_DEVICE_ID_TIGON3_5780 0x166a
1862#define PCI_DEVICE_ID_TIGON3_5780S 0x166b 1864#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
1863#define PCI_DEVICE_ID_TIGON3_5705F 0x166e 1865#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
1866#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
1864#define PCI_DEVICE_ID_TIGON3_5750 0x1676 1867#define PCI_DEVICE_ID_TIGON3_5750 0x1676
1865#define PCI_DEVICE_ID_TIGON3_5751 0x1677 1868#define PCI_DEVICE_ID_TIGON3_5751 0x1677
1866#define PCI_DEVICE_ID_TIGON3_5715 0x1678 1869#define PCI_DEVICE_ID_TIGON3_5715 0x1678
1870#define PCI_DEVICE_ID_TIGON3_5715S 0x1679
1871#define PCI_DEVICE_ID_TIGON3_5754 0x167a
1867#define PCI_DEVICE_ID_TIGON3_5750M 0x167c 1872#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
1868#define PCI_DEVICE_ID_TIGON3_5751M 0x167d 1873#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
1869#define PCI_DEVICE_ID_TIGON3_5751F 0x167e 1874#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
1875#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
1870#define PCI_DEVICE_ID_TIGON3_5782 0x1696 1876#define PCI_DEVICE_ID_TIGON3_5782 0x1696
1877#define PCI_DEVICE_ID_TIGON3_5787 0x169b
1871#define PCI_DEVICE_ID_TIGON3_5788 0x169c 1878#define PCI_DEVICE_ID_TIGON3_5788 0x169c
1872#define PCI_DEVICE_ID_TIGON3_5789 0x169d 1879#define PCI_DEVICE_ID_TIGON3_5789 0x169d
1873#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 1880#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index d50482ba27fe..d572d5376319 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -199,6 +199,7 @@ enum
199#define RTPROT_BIRD 12 /* BIRD */ 199#define RTPROT_BIRD 12 /* BIRD */
200#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ 200#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
201#define RTPROT_XORP 14 /* XORP */ 201#define RTPROT_XORP 14 /* XORP */
202#define RTPROT_NTK 15 /* Netsukuku */
202 203
203/* rtm_scope 204/* rtm_scope
204 205
@@ -733,6 +734,8 @@ enum
733#define IFLA_MAP IFLA_MAP 734#define IFLA_MAP IFLA_MAP
734 IFLA_WEIGHT, 735 IFLA_WEIGHT,
735#define IFLA_WEIGHT IFLA_WEIGHT 736#define IFLA_WEIGHT IFLA_WEIGHT
737 IFLA_OPERSTATE,
738 IFLA_LINKMODE,
736 __IFLA_MAX 739 __IFLA_MAX
737}; 740};
738 741
@@ -905,6 +908,7 @@ struct tcamsg
905#ifdef __KERNEL__ 908#ifdef __KERNEL__
906 909
907#include <linux/config.h> 910#include <linux/config.h>
911#include <linux/mutex.h>
908 912
909extern size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size); 913extern size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size);
910static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) 914static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
@@ -1036,24 +1040,17 @@ __rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
1036 1040
1037extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); 1041extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
1038 1042
1039extern struct semaphore rtnl_sem; 1043/* RTNL is used as a global lock for all changes to network configuration */
1040
1041#define rtnl_shlock() down(&rtnl_sem)
1042#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
1043
1044#define rtnl_shunlock() do { up(&rtnl_sem); \
1045 if (rtnl && rtnl->sk_receive_queue.qlen) \
1046 rtnl->sk_data_ready(rtnl, 0); \
1047 } while(0)
1048
1049extern void rtnl_lock(void); 1044extern void rtnl_lock(void);
1050extern int rtnl_lock_interruptible(void);
1051extern void rtnl_unlock(void); 1045extern void rtnl_unlock(void);
1046extern int rtnl_trylock(void);
1047
1052extern void rtnetlink_init(void); 1048extern void rtnetlink_init(void);
1049extern void __rtnl_unlock(void);
1053 1050
1054#define ASSERT_RTNL() do { \ 1051#define ASSERT_RTNL() do { \
1055 if (unlikely(down_trylock(&rtnl_sem) == 0)) { \ 1052 if (unlikely(rtnl_trylock())) { \
1056 up(&rtnl_sem); \ 1053 rtnl_unlock(); \
1057 printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ 1054 printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
1058 __FILE__, __LINE__); \ 1055 __FILE__, __LINE__); \
1059 dump_stack(); \ 1056 dump_stack(); \
diff --git a/include/linux/security.h b/include/linux/security.h
index 7cbef482e13a..b18eb8cfa639 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1286,7 +1286,8 @@ struct security_operations {
1286 int (*socket_setsockopt) (struct socket * sock, int level, int optname); 1286 int (*socket_setsockopt) (struct socket * sock, int level, int optname);
1287 int (*socket_shutdown) (struct socket * sock, int how); 1287 int (*socket_shutdown) (struct socket * sock, int how);
1288 int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); 1288 int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb);
1289 int (*socket_getpeersec) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); 1289 int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
1290 int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen);
1290 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); 1291 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
1291 void (*sk_free_security) (struct sock *sk); 1292 void (*sk_free_security) (struct sock *sk);
1292 unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir); 1293 unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir);
@@ -2741,10 +2742,16 @@ static inline int security_sock_rcv_skb (struct sock * sk,
2741 return security_ops->socket_sock_rcv_skb (sk, skb); 2742 return security_ops->socket_sock_rcv_skb (sk, skb);
2742} 2743}
2743 2744
2744static inline int security_socket_getpeersec(struct socket *sock, char __user *optval, 2745static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
2745 int __user *optlen, unsigned len) 2746 int __user *optlen, unsigned len)
2746{ 2747{
2747 return security_ops->socket_getpeersec(sock, optval, optlen, len); 2748 return security_ops->socket_getpeersec_stream(sock, optval, optlen, len);
2749}
2750
2751static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
2752 u32 *seclen)
2753{
2754 return security_ops->socket_getpeersec_dgram(skb, secdata, seclen);
2748} 2755}
2749 2756
2750static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) 2757static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
@@ -2863,8 +2870,14 @@ static inline int security_sock_rcv_skb (struct sock * sk,
2863 return 0; 2870 return 0;
2864} 2871}
2865 2872
2866static inline int security_socket_getpeersec(struct socket *sock, char __user *optval, 2873static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
2867 int __user *optlen, unsigned len) 2874 int __user *optlen, unsigned len)
2875{
2876 return -ENOPROTOOPT;
2877}
2878
2879static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
2880 u32 *seclen)
2868{ 2881{
2869 return -ENOPROTOOPT; 2882 return -ENOPROTOOPT;
2870} 2883}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 4041122dabfc..57abcea1cb5d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -127,6 +127,9 @@
127/* Hilscher netx */ 127/* Hilscher netx */
128#define PORT_NETX 71 128#define PORT_NETX 71
129 129
130/* SUN4V Hypervisor Console */
131#define PORT_SUNHV 72
132
130#ifdef __KERNEL__ 133#ifdef __KERNEL__
131 134
132#include <linux/config.h> 135#include <linux/config.h>
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ad7cc22bd424..613b9513f8b9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -270,7 +270,6 @@ struct sk_buff {
270 270
271 void (*destructor)(struct sk_buff *skb); 271 void (*destructor)(struct sk_buff *skb);
272#ifdef CONFIG_NETFILTER 272#ifdef CONFIG_NETFILTER
273 __u32 nfmark;
274 struct nf_conntrack *nfct; 273 struct nf_conntrack *nfct;
275#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 274#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
276 struct sk_buff *nfct_reasm; 275 struct sk_buff *nfct_reasm;
@@ -278,6 +277,7 @@ struct sk_buff {
278#ifdef CONFIG_BRIDGE_NETFILTER 277#ifdef CONFIG_BRIDGE_NETFILTER
279 struct nf_bridge_info *nf_bridge; 278 struct nf_bridge_info *nf_bridge;
280#endif 279#endif
280 __u32 nfmark;
281#endif /* CONFIG_NETFILTER */ 281#endif /* CONFIG_NETFILTER */
282#ifdef CONFIG_NET_SCHED 282#ifdef CONFIG_NET_SCHED
283 __u16 tc_index; /* traffic control index */ 283 __u16 tc_index; /* traffic control index */
@@ -304,6 +304,7 @@ struct sk_buff {
304 304
305#include <asm/system.h> 305#include <asm/system.h>
306 306
307extern void kfree_skb(struct sk_buff *skb);
307extern void __kfree_skb(struct sk_buff *skb); 308extern void __kfree_skb(struct sk_buff *skb);
308extern struct sk_buff *__alloc_skb(unsigned int size, 309extern struct sk_buff *__alloc_skb(unsigned int size,
309 gfp_t priority, int fclone); 310 gfp_t priority, int fclone);
@@ -404,22 +405,6 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
404 */ 405 */
405 406
406/** 407/**
407 * kfree_skb - free an sk_buff
408 * @skb: buffer to free
409 *
410 * Drop a reference to the buffer and free it if the usage count has
411 * hit zero.
412 */
413static inline void kfree_skb(struct sk_buff *skb)
414{
415 if (likely(atomic_read(&skb->users) == 1))
416 smp_rmb();
417 else if (likely(!atomic_dec_and_test(&skb->users)))
418 return;
419 __kfree_skb(skb);
420}
421
422/**
423 * skb_cloned - is the buffer a clone 408 * skb_cloned - is the buffer a clone
424 * @skb: buffer to check 409 * @skb: buffer to check
425 * 410 *
@@ -1174,12 +1159,14 @@ static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
1174 */ 1159 */
1175 1160
1176static inline void skb_postpull_rcsum(struct sk_buff *skb, 1161static inline void skb_postpull_rcsum(struct sk_buff *skb,
1177 const void *start, int len) 1162 const void *start, unsigned int len)
1178{ 1163{
1179 if (skb->ip_summed == CHECKSUM_HW) 1164 if (skb->ip_summed == CHECKSUM_HW)
1180 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1165 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1181} 1166}
1182 1167
1168unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1169
1183/** 1170/**
1184 * pskb_trim_rcsum - trim received skb and update checksum 1171 * pskb_trim_rcsum - trim received skb and update checksum
1185 * @skb: buffer to trim 1172 * @skb: buffer to trim
@@ -1351,16 +1338,6 @@ static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1351 kfree_skb(skb); 1338 kfree_skb(skb);
1352} 1339}
1353#endif 1340#endif
1354static inline void nf_reset(struct sk_buff *skb)
1355{
1356 nf_conntrack_put(skb->nfct);
1357 skb->nfct = NULL;
1358#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1359 nf_conntrack_put_reasm(skb->nfct_reasm);
1360 skb->nfct_reasm = NULL;
1361#endif
1362}
1363
1364#ifdef CONFIG_BRIDGE_NETFILTER 1341#ifdef CONFIG_BRIDGE_NETFILTER
1365static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 1342static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1366{ 1343{
@@ -1373,6 +1350,20 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1373 atomic_inc(&nf_bridge->use); 1350 atomic_inc(&nf_bridge->use);
1374} 1351}
1375#endif /* CONFIG_BRIDGE_NETFILTER */ 1352#endif /* CONFIG_BRIDGE_NETFILTER */
1353static inline void nf_reset(struct sk_buff *skb)
1354{
1355 nf_conntrack_put(skb->nfct);
1356 skb->nfct = NULL;
1357#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1358 nf_conntrack_put_reasm(skb->nfct_reasm);
1359 skb->nfct_reasm = NULL;
1360#endif
1361#ifdef CONFIG_BRIDGE_NETFILTER
1362 nf_bridge_put(skb->nf_bridge);
1363 skb->nf_bridge = NULL;
1364#endif
1365}
1366
1376#else /* CONFIG_NETFILTER */ 1367#else /* CONFIG_NETFILTER */
1377static inline void nf_reset(struct sk_buff *skb) {} 1368static inline void nf_reset(struct sk_buff *skb) {}
1378#endif /* CONFIG_NETFILTER */ 1369#endif /* CONFIG_NETFILTER */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b02dda4ee83d..9ab2ddd80221 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -150,6 +150,7 @@ __KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__
150 150
151#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */ 151#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
152#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */ 152#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */
153#define SCM_SECURITY 0x03 /* rw: security label */
153 154
154struct ucred { 155struct ucred {
155 __u32 pid; 156 __u32 pid;
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index d33c6face032..b4acb3d37c3f 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -36,7 +36,7 @@ struct svc_sock {
36 36
37 struct list_head sk_deferred; /* deferred requests that need to 37 struct list_head sk_deferred; /* deferred requests that need to
38 * be revisted */ 38 * be revisted */
39 struct semaphore sk_sem; /* to serialize sending data */ 39 struct mutex sk_mutex; /* to serialize sending data */
40 40
41 int (*sk_recvfrom)(struct svc_rqst *rqstp); 41 int (*sk_recvfrom)(struct svc_rqst *rqstp);
42 int (*sk_sendto)(struct svc_rqst *rqstp); 42 int (*sk_sendto)(struct svc_rqst *rqstp);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index bac61db26456..76eaeff76f82 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -211,6 +211,7 @@ enum
211 NET_SCTP=17, 211 NET_SCTP=17,
212 NET_LLC=18, 212 NET_LLC=18,
213 NET_NETFILTER=19, 213 NET_NETFILTER=19,
214 NET_DCCP=20,
214}; 215};
215 216
216/* /proc/sys/kernel/random */ 217/* /proc/sys/kernel/random */
@@ -261,6 +262,8 @@ enum
261 NET_CORE_DEV_WEIGHT=17, 262 NET_CORE_DEV_WEIGHT=17,
262 NET_CORE_SOMAXCONN=18, 263 NET_CORE_SOMAXCONN=18,
263 NET_CORE_BUDGET=19, 264 NET_CORE_BUDGET=19,
265 NET_CORE_AEVENT_ETIME=20,
266 NET_CORE_AEVENT_RSEQTH=21,
264}; 267};
265 268
266/* /proc/sys/net/ethernet */ 269/* /proc/sys/net/ethernet */
@@ -397,6 +400,9 @@ enum
397 NET_TCP_CONG_CONTROL=110, 400 NET_TCP_CONG_CONTROL=110,
398 NET_TCP_ABC=111, 401 NET_TCP_ABC=111,
399 NET_IPV4_IPFRAG_MAX_DIST=112, 402 NET_IPV4_IPFRAG_MAX_DIST=112,
403 NET_TCP_MTU_PROBING=113,
404 NET_TCP_BASE_MSS=114,
405 NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
400}; 406};
401 407
402enum { 408enum {
@@ -451,6 +457,7 @@ enum
451 NET_IPV4_CONF_ARP_ANNOUNCE=18, 457 NET_IPV4_CONF_ARP_ANNOUNCE=18,
452 NET_IPV4_CONF_ARP_IGNORE=19, 458 NET_IPV4_CONF_ARP_IGNORE=19,
453 NET_IPV4_CONF_PROMOTE_SECONDARIES=20, 459 NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
460 NET_IPV4_CONF_ARP_ACCEPT=21,
454 __NET_IPV4_CONF_MAX 461 __NET_IPV4_CONF_MAX
455}; 462};
456 463
@@ -531,6 +538,11 @@ enum {
531 NET_IPV6_MAX_DESYNC_FACTOR=15, 538 NET_IPV6_MAX_DESYNC_FACTOR=15,
532 NET_IPV6_MAX_ADDRESSES=16, 539 NET_IPV6_MAX_ADDRESSES=16,
533 NET_IPV6_FORCE_MLD_VERSION=17, 540 NET_IPV6_FORCE_MLD_VERSION=17,
541 NET_IPV6_ACCEPT_RA_DEFRTR=18,
542 NET_IPV6_ACCEPT_RA_PINFO=19,
543 NET_IPV6_ACCEPT_RA_RTR_PREF=20,
544 NET_IPV6_RTR_PROBE_INTERVAL=21,
545 NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
534 __NET_IPV6_MAX 546 __NET_IPV6_MAX
535}; 547};
536 548
@@ -562,6 +574,21 @@ enum {
562 __NET_NEIGH_MAX 574 __NET_NEIGH_MAX
563}; 575};
564 576
577/* /proc/sys/net/dccp */
578enum {
579 NET_DCCP_DEFAULT=1,
580};
581
582/* /proc/sys/net/dccp/default */
583enum {
584 NET_DCCP_DEFAULT_SEQ_WINDOW = 1,
585 NET_DCCP_DEFAULT_RX_CCID = 2,
586 NET_DCCP_DEFAULT_TX_CCID = 3,
587 NET_DCCP_DEFAULT_ACK_RATIO = 4,
588 NET_DCCP_DEFAULT_SEND_ACKVEC = 5,
589 NET_DCCP_DEFAULT_SEND_NDP = 6,
590};
591
565/* /proc/sys/net/ipx */ 592/* /proc/sys/net/ipx */
566enum { 593enum {
567 NET_IPX_PPROP_BROADCASTING=1, 594 NET_IPX_PPROP_BROADCASTING=1,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index f2bb2396853f..542d39596bd8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -343,6 +343,12 @@ struct tcp_sock {
343 __u32 seq; 343 __u32 seq;
344 __u32 time; 344 __u32 time;
345 } rcvq_space; 345 } rcvq_space;
346
347/* TCP-specific MTU probe information. */
348 struct {
349 __u32 probe_seq_start;
350 __u32 probe_seq_end;
351 } mtu_probe;
346}; 352};
347 353
348static inline struct tcp_sock *tcp_sk(const struct sock *sk) 354static inline struct tcp_sock *tcp_sk(const struct sock *sk)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 827cc6de5f5c..130d125fda12 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1018,8 +1018,6 @@ extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
1018 unsigned char descindex, void *buf, int size); 1018 unsigned char descindex, void *buf, int size);
1019extern int usb_get_status(struct usb_device *dev, 1019extern int usb_get_status(struct usb_device *dev,
1020 int type, int target, void *data); 1020 int type, int target, void *data);
1021extern int usb_get_string(struct usb_device *dev,
1022 unsigned short langid, unsigned char index, void *buf, int size);
1023extern int usb_string(struct usb_device *dev, int index, 1021extern int usb_string(struct usb_device *dev, int index,
1024 char *buf, size_t size); 1022 char *buf, size_t size);
1025 1023
diff --git a/include/linux/usb_gadget.h b/include/linux/usb_gadget.h
index ff81117eb733..1d78870ed8af 100644
--- a/include/linux/usb_gadget.h
+++ b/include/linux/usb_gadget.h
@@ -801,7 +801,9 @@ struct usb_gadget_driver {
801 * Call this in your gadget driver's module initialization function, 801 * Call this in your gadget driver's module initialization function,
802 * to tell the underlying usb controller driver about your driver. 802 * to tell the underlying usb controller driver about your driver.
803 * The driver's bind() function will be called to bind it to a 803 * The driver's bind() function will be called to bind it to a
804 * gadget. This function must be called in a context that can sleep. 804 * gadget before this registration call returns. It's expected that
805 * the bind() functions will be in init sections.
806 * This function must be called in a context that can sleep.
805 */ 807 */
806int usb_gadget_register_driver (struct usb_gadget_driver *driver); 808int usb_gadget_register_driver (struct usb_gadget_driver *driver);
807 809
@@ -814,7 +816,8 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver);
814 * going away. If the controller is connected to a USB host, 816 * going away. If the controller is connected to a USB host,
815 * it will first disconnect(). The driver is also requested 817 * it will first disconnect(). The driver is also requested
816 * to unbind() and clean up any device state, before this procedure 818 * to unbind() and clean up any device state, before this procedure
817 * finally returns. 819 * finally returns. It's expected that the unbind() functions
820 * will in in exit sections, so may not be linked in some kernels.
818 * This function must be called in a context that can sleep. 821 * This function must be called in a context that can sleep.
819 */ 822 */
820int usb_gadget_unregister_driver (struct usb_gadget_driver *driver); 823int usb_gadget_unregister_driver (struct usb_gadget_driver *driver);
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 5208b12d5550..724cfbf54b8a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -17,11 +17,12 @@
17#include <linux/time.h> /* need struct timeval */ 17#include <linux/time.h> /* need struct timeval */
18#include <linux/poll.h> 18#include <linux/poll.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/mutex.h>
20#endif 21#endif
21#include <linux/compiler.h> /* need __user */ 22#include <linux/compiler.h> /* need __user */
22 23
23 24
24#define OBSOLETE_OWNER 1 /* It will be removed for 2.6.15 */ 25#define OBSOLETE_OWNER 1 /* It will be removed for 2.6.17 */
25#define HAVE_V4L2 1 26#define HAVE_V4L2 1
26 27
27/* 28/*
@@ -48,6 +49,16 @@
48 49
49#ifdef __KERNEL__ 50#ifdef __KERNEL__
50 51
52/* Minor device allocation */
53#define MINOR_VFL_TYPE_GRABBER_MIN 0
54#define MINOR_VFL_TYPE_GRABBER_MAX 63
55#define MINOR_VFL_TYPE_RADIO_MIN 64
56#define MINOR_VFL_TYPE_RADIO_MAX 127
57#define MINOR_VFL_TYPE_VTX_MIN 192
58#define MINOR_VFL_TYPE_VTX_MAX 223
59#define MINOR_VFL_TYPE_VBI_MIN 224
60#define MINOR_VFL_TYPE_VBI_MAX 255
61
51#define VFL_TYPE_GRABBER 0 62#define VFL_TYPE_GRABBER 0
52#define VFL_TYPE_VBI 1 63#define VFL_TYPE_VBI 1
53#define VFL_TYPE_RADIO 2 64#define VFL_TYPE_RADIO 2
@@ -80,7 +91,7 @@ struct video_device
80 91
81 /* for videodev.c intenal usage -- please don't touch */ 92 /* for videodev.c intenal usage -- please don't touch */
82 int users; /* video_exclusive_{open|close} ... */ 93 int users; /* video_exclusive_{open|close} ... */
83 struct semaphore lock; /* ... helper function uses these */ 94 struct mutex lock; /* ... helper function uses these */
84 char devfs_name[64]; /* devfs */ 95 char devfs_name[64]; /* devfs */
85 struct class_device class_dev; /* sysfs */ 96 struct class_device class_dev; /* sysfs */
86}; 97};
@@ -952,13 +963,68 @@ struct v4l2_sliced_vbi_format
952 __u32 reserved[2]; /* must be zero */ 963 __u32 reserved[2]; /* must be zero */
953}; 964};
954 965
955#define V4L2_SLICED_TELETEXT_B (0x0001) 966/* Teletext World System Teletext
956#define V4L2_SLICED_VPS (0x0400) 967 (WST), defined on ITU-R BT.653-2 */
957#define V4L2_SLICED_CAPTION_525 (0x1000) 968#define V4L2_SLICED_TELETEXT_PAL_B (0x000001)
958#define V4L2_SLICED_WSS_625 (0x4000) 969#define V4L2_SLICED_TELETEXT_PAL_C (0x000002)
959 970#define V4L2_SLICED_TELETEXT_NTSC_B (0x000010)
960#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) 971#define V4L2_SLICED_TELETEXT_SECAM (0x000020)
961#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) 972
973/* Teletext North American Broadcast Teletext Specification
974 (NABTS), defined on ITU-R BT.653-2 */
975#define V4L2_SLICED_TELETEXT_NTSC_C (0x000040)
976#define V4L2_SLICED_TELETEXT_NTSC_D (0x000080)
977
978/* Video Program System, defined on ETS 300 231*/
979#define V4L2_SLICED_VPS (0x000400)
980
981/* Closed Caption, defined on EIA-608 */
982#define V4L2_SLICED_CAPTION_525 (0x001000)
983#define V4L2_SLICED_CAPTION_625 (0x002000)
984
985/* Wide Screen System, defined on ITU-R BT1119.1 */
986#define V4L2_SLICED_WSS_625 (0x004000)
987
988/* Wide Screen System, defined on IEC 61880 */
989#define V4L2_SLICED_WSS_525 (0x008000)
990
991/* Vertical Interval Timecode (VITC), defined on SMPTE 12M */
992#define V4l2_SLICED_VITC_625 (0x010000)
993#define V4l2_SLICED_VITC_525 (0x020000)
994
995#define V4L2_SLICED_TELETEXT_B (V4L2_SLICED_TELETEXT_PAL_B |\
996 V4L2_SLICED_TELETEXT_NTSC_B)
997
998#define V4L2_SLICED_TELETEXT (V4L2_SLICED_TELETEXT_PAL_B |\
999 V4L2_SLICED_TELETEXT_PAL_C |\
1000 V4L2_SLICED_TELETEXT_SECAM |\
1001 V4L2_SLICED_TELETEXT_NTSC_B |\
1002 V4L2_SLICED_TELETEXT_NTSC_C |\
1003 V4L2_SLICED_TELETEXT_NTSC_D)
1004
1005#define V4L2_SLICED_CAPTION (V4L2_SLICED_CAPTION_525 |\
1006 V4L2_SLICED_CAPTION_625)
1007
1008#define V4L2_SLICED_WSS (V4L2_SLICED_WSS_525 |\
1009 V4L2_SLICED_WSS_625)
1010
1011#define V4L2_SLICED_VITC (V4L2_SLICED_VITC_525 |\
1012 V4L2_SLICED_VITC_625)
1013
1014#define V4L2_SLICED_VBI_525 (V4L2_SLICED_TELETEXT_NTSC_B |\
1015 V4L2_SLICED_TELETEXT_NTSC_C |\
1016 V4L2_SLICED_TELETEXT_NTSC_D |\
1017 V4L2_SLICED_CAPTION_525 |\
1018 V4L2_SLICED_WSS_525 |\
1019 V4l2_SLICED_VITC_525)
1020
1021#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_PAL_B |\
1022 V4L2_SLICED_TELETEXT_PAL_C |\
1023 V4L2_SLICED_TELETEXT_SECAM |\
1024 V4L2_SLICED_VPS |\
1025 V4L2_SLICED_CAPTION_625 |\
1026 V4L2_SLICED_WSS_625 |\
1027 V4l2_SLICED_VITC_625)
962 1028
963struct v4l2_sliced_vbi_cap 1029struct v4l2_sliced_vbi_cap
964{ 1030{
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 82fbb758e28f..6b42cc474c01 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -156,6 +156,10 @@ enum {
156 XFRM_MSG_FLUSHPOLICY, 156 XFRM_MSG_FLUSHPOLICY,
157#define XFRM_MSG_FLUSHPOLICY XFRM_MSG_FLUSHPOLICY 157#define XFRM_MSG_FLUSHPOLICY XFRM_MSG_FLUSHPOLICY
158 158
159 XFRM_MSG_NEWAE,
160#define XFRM_MSG_NEWAE XFRM_MSG_NEWAE
161 XFRM_MSG_GETAE,
162#define XFRM_MSG_GETAE XFRM_MSG_GETAE
159 __XFRM_MSG_MAX 163 __XFRM_MSG_MAX
160}; 164};
161#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) 165#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -194,6 +198,21 @@ struct xfrm_encap_tmpl {
194 xfrm_address_t encap_oa; 198 xfrm_address_t encap_oa;
195}; 199};
196 200
201/* AEVENT flags */
202enum xfrm_ae_ftype_t {
203 XFRM_AE_UNSPEC,
204 XFRM_AE_RTHR=1, /* replay threshold*/
205 XFRM_AE_RVAL=2, /* replay value */
206 XFRM_AE_LVAL=4, /* lifetime value */
207 XFRM_AE_ETHR=8, /* expiry timer threshold */
208 XFRM_AE_CR=16, /* Event cause is replay update */
209 XFRM_AE_CE=32, /* Event cause is timer expiry */
210 XFRM_AE_CU=64, /* Event cause is policy update */
211 __XFRM_AE_MAX
212
213#define XFRM_AE_MAX (__XFRM_AE_MAX - 1)
214};
215
197/* Netlink message attributes. */ 216/* Netlink message attributes. */
198enum xfrm_attr_type_t { 217enum xfrm_attr_type_t {
199 XFRMA_UNSPEC, 218 XFRMA_UNSPEC,
@@ -205,6 +224,10 @@ enum xfrm_attr_type_t {
205 XFRMA_SA, 224 XFRMA_SA,
206 XFRMA_POLICY, 225 XFRMA_POLICY,
207 XFRMA_SEC_CTX, /* struct xfrm_sec_ctx */ 226 XFRMA_SEC_CTX, /* struct xfrm_sec_ctx */
227 XFRMA_LTIME_VAL,
228 XFRMA_REPLAY_VAL,
229 XFRMA_REPLAY_THRESH,
230 XFRMA_ETIMER_THRESH,
208 __XFRMA_MAX 231 __XFRMA_MAX
209 232
210#define XFRMA_MAX (__XFRMA_MAX - 1) 233#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -235,6 +258,11 @@ struct xfrm_usersa_id {
235 __u8 proto; 258 __u8 proto;
236}; 259};
237 260
261struct xfrm_aevent_id {
262 struct xfrm_usersa_id sa_id;
263 __u32 flags;
264};
265
238struct xfrm_userspi_info { 266struct xfrm_userspi_info {
239 struct xfrm_usersa_info info; 267 struct xfrm_usersa_info info;
240 __u32 min; 268 __u32 min;
@@ -306,6 +334,8 @@ enum xfrm_nlgroups {
306#define XFRMNLGRP_SA XFRMNLGRP_SA 334#define XFRMNLGRP_SA XFRMNLGRP_SA
307 XFRMNLGRP_POLICY, 335 XFRMNLGRP_POLICY,
308#define XFRMNLGRP_POLICY XFRMNLGRP_POLICY 336#define XFRMNLGRP_POLICY XFRMNLGRP_POLICY
337 XFRMNLGRP_AEVENTS,
338#define XFRMNLGRP_AEVENTS XFRMNLGRP_AEVENTS
309 __XFRMNLGRP_MAX 339 __XFRMNLGRP_MAX
310}; 340};
311#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1) 341#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1)
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index ad3e9bb670c3..302d5b3946e7 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -47,13 +47,6 @@ struct ir_input_state {
47 int keypressed; /* current state */ 47 int keypressed; /* current state */
48}; 48};
49 49
50extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE];
51extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE];
52extern IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE];
53extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE];
54extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE];
55extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE];
56
57void ir_input_init(struct input_dev *dev, struct ir_input_state *ir, 50void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
58 int ir_type, IR_KEYTAB_TYPE *ir_codes); 51 int ir_type, IR_KEYTAB_TYPE *ir_codes);
59void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir); 52void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir);
@@ -64,6 +57,39 @@ int ir_dump_samples(u32 *samples, int count);
64int ir_decode_biphase(u32 *samples, int count, int low, int high); 57int ir_decode_biphase(u32 *samples, int count, int low, int high);
65int ir_decode_pulsedistance(u32 *samples, int count, int low, int high); 58int ir_decode_pulsedistance(u32 *samples, int count, int low, int high);
66 59
60/* Keymaps to be used by other modules */
61
62extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE];
63extern IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE];
64extern IR_KEYTAB_TYPE ir_codes_avermedia_dvbt[IR_KEYTAB_SIZE];
65extern IR_KEYTAB_TYPE ir_codes_apac_viewcomp[IR_KEYTAB_SIZE];
66extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE];
67extern IR_KEYTAB_TYPE ir_codes_nebula[IR_KEYTAB_SIZE];
68extern IR_KEYTAB_TYPE ir_codes_dntv_live_dvb_t[IR_KEYTAB_SIZE];
69extern IR_KEYTAB_TYPE ir_codes_iodata_bctv7e[IR_KEYTAB_SIZE];
70extern IR_KEYTAB_TYPE ir_codes_adstech_dvb_t_pci[IR_KEYTAB_SIZE];
71extern IR_KEYTAB_TYPE ir_codes_msi_tvanywhere[IR_KEYTAB_SIZE];
72extern IR_KEYTAB_TYPE ir_codes_cinergy_1400[IR_KEYTAB_SIZE];
73extern IR_KEYTAB_TYPE ir_codes_avertv_303[IR_KEYTAB_SIZE];
74extern IR_KEYTAB_TYPE ir_codes_dntv_live_dvbt_pro[IR_KEYTAB_SIZE];
75extern IR_KEYTAB_TYPE ir_codes_em_terratec[IR_KEYTAB_SIZE];
76extern IR_KEYTAB_TYPE ir_codes_em_pinnacle_usb[IR_KEYTAB_SIZE];
77extern IR_KEYTAB_TYPE ir_codes_flyvideo[IR_KEYTAB_SIZE];
78extern IR_KEYTAB_TYPE ir_codes_flydvb[IR_KEYTAB_SIZE];
79extern IR_KEYTAB_TYPE ir_codes_cinergy[IR_KEYTAB_SIZE];
80extern IR_KEYTAB_TYPE ir_codes_eztv[IR_KEYTAB_SIZE];
81extern IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE];
82extern IR_KEYTAB_TYPE ir_codes_videomate_tv_pvr[IR_KEYTAB_SIZE];
83extern IR_KEYTAB_TYPE ir_codes_manli[IR_KEYTAB_SIZE];
84extern IR_KEYTAB_TYPE ir_codes_gotview7135[IR_KEYTAB_SIZE];
85extern IR_KEYTAB_TYPE ir_codes_purpletv[IR_KEYTAB_SIZE];
86extern IR_KEYTAB_TYPE ir_codes_pctv_sedna[IR_KEYTAB_SIZE];
87extern IR_KEYTAB_TYPE ir_codes_pv951[IR_KEYTAB_SIZE];
88extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE];
89extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE];
90extern IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE];
91extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE];
92
67#endif 93#endif
68 94
69/* 95/*
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index 2bc634fcb7bb..fee579f10b32 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -11,6 +11,8 @@
11#include <linux/i2c.h> /* for i2c subsystem */ 11#include <linux/i2c.h> /* for i2c subsystem */
12#include <asm/io.h> /* for accessing devices */ 12#include <asm/io.h> /* for accessing devices */
13#include <linux/stringify.h> 13#include <linux/stringify.h>
14#include <linux/mutex.h>
15
14#include <linux/vmalloc.h> /* for vmalloc() */ 16#include <linux/vmalloc.h> /* for vmalloc() */
15#include <linux/mm.h> /* for vmalloc_to_page() */ 17#include <linux/mm.h> /* for vmalloc_to_page() */
16 18
@@ -112,7 +114,7 @@ struct saa7146_dev
112 114
113 /* different device locks */ 115 /* different device locks */
114 spinlock_t slock; 116 spinlock_t slock;
115 struct semaphore lock; 117 struct mutex lock;
116 118
117 unsigned char __iomem *mem; /* pointer to mapped IO memory */ 119 unsigned char __iomem *mem; /* pointer to mapped IO memory */
118 int revision; /* chip revision; needed for bug-workarounds*/ 120 int revision; /* chip revision; needed for bug-workarounds*/
@@ -133,15 +135,16 @@ struct saa7146_dev
133 void (*vv_callback)(struct saa7146_dev *dev, unsigned long status); 135 void (*vv_callback)(struct saa7146_dev *dev, unsigned long status);
134 136
135 /* i2c-stuff */ 137 /* i2c-stuff */
136 struct semaphore i2c_lock; 138 struct mutex i2c_lock;
137 u32 i2c_bitrate; 139
138 struct saa7146_dma d_i2c; /* pointer to i2c memory */ 140 u32 i2c_bitrate;
139 wait_queue_head_t i2c_wq; 141 struct saa7146_dma d_i2c; /* pointer to i2c memory */
140 int i2c_op; 142 wait_queue_head_t i2c_wq;
143 int i2c_op;
141 144
142 /* memories */ 145 /* memories */
143 struct saa7146_dma d_rps0; 146 struct saa7146_dma d_rps0;
144 struct saa7146_dma d_rps1; 147 struct saa7146_dma d_rps1;
145}; 148};
146 149
147/* from saa7146_i2c.c */ 150/* from saa7146_i2c.c */
@@ -150,7 +153,7 @@ int saa7146_i2c_transfer(struct saa7146_dev *saa, const struct i2c_msg *msgs, in
150 153
151/* from saa7146_core.c */ 154/* from saa7146_core.c */
152extern struct list_head saa7146_devices; 155extern struct list_head saa7146_devices;
153extern struct semaphore saa7146_devices_lock; 156extern struct mutex saa7146_devices_lock;
154int saa7146_register_extension(struct saa7146_extension*); 157int saa7146_register_extension(struct saa7146_extension*);
155int saa7146_unregister_extension(struct saa7146_extension*); 158int saa7146_unregister_extension(struct saa7146_extension*);
156struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc); 159struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc);
diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h
index 15821ab14a9e..ad9c171bfa07 100644
--- a/include/media/tuner-types.h
+++ b/include/media/tuner-types.h
@@ -14,6 +14,7 @@ enum param_type {
14 14
15struct tuner_range { 15struct tuner_range {
16 unsigned short limit; 16 unsigned short limit;
17 unsigned char config;
17 unsigned char cb; 18 unsigned char cb;
18}; 19};
19 20
@@ -38,7 +39,6 @@ struct tuner_params {
38 * static unless the control byte was sent first. 39 * static unless the control byte was sent first.
39 */ 40 */
40 unsigned int cb_first_if_lower_freq:1; 41 unsigned int cb_first_if_lower_freq:1;
41 unsigned char config; /* to be moved into struct tuner_range for dvb-pll merge */
42 42
43 unsigned int count; 43 unsigned int count;
44 struct tuner_range *ranges; 44 struct tuner_range *ranges;
@@ -46,6 +46,7 @@ struct tuner_params {
46 46
47struct tunertype { 47struct tunertype {
48 char *name; 48 char *name;
49 unsigned int count;
49 struct tuner_params *params; 50 struct tuner_params *params;
50}; 51};
51 52
diff --git a/include/media/tuner.h b/include/media/tuner.h
index a5beeac495c7..017fed7d5e4d 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -110,12 +110,15 @@
110 110
111#define TUNER_LG_TDVS_H062F 64 /* DViCO FusionHDTV 5 */ 111#define TUNER_LG_TDVS_H062F 64 /* DViCO FusionHDTV 5 */
112#define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */ 112#define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */
113#define TUNER_LG_NTSC_TALN_MINI 66 113#define TUNER_LG_TALN 66
114#define TUNER_PHILIPS_TD1316 67 114#define TUNER_PHILIPS_TD1316 67
115 115
116#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ 116#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */
117#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ 117#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */
118#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ 118#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */
119#define TUNER_XCEIVE_XC3028 71
120
121#define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */
119 122
120/* tv card specific */ 123/* tv card specific */
121#define TDA9887_PRESENT (1<<0) 124#define TDA9887_PRESENT (1<<0)
@@ -209,6 +212,7 @@ struct tuner {
209extern unsigned const int tuner_count; 212extern unsigned const int tuner_count;
210 213
211extern int microtune_init(struct i2c_client *c); 214extern int microtune_init(struct i2c_client *c);
215extern int xc3028_init(struct i2c_client *c);
212extern int tda8290_init(struct i2c_client *c); 216extern int tda8290_init(struct i2c_client *c);
213extern int tda8290_probe(struct i2c_client *c); 217extern int tda8290_probe(struct i2c_client *c);
214extern int tea5767_tuner_init(struct i2c_client *c); 218extern int tea5767_tuner_init(struct i2c_client *c);
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index d4030a7e16e0..2360453e7496 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -58,6 +58,9 @@
58/* Prints the ioctl in a human-readable format */ 58/* Prints the ioctl in a human-readable format */
59extern void v4l_printk_ioctl(unsigned int cmd); 59extern void v4l_printk_ioctl(unsigned int cmd);
60 60
61/* Prints the ioctl and arg in a human-readable format */
62extern void v4l_printk_ioctl_arg(char *s,unsigned int cmd, void *arg);
63
61/* Use this macro for non-I2C drivers. Pass the driver name as the first arg. */ 64/* Use this macro for non-I2C drivers. Pass the driver name as the first arg. */
62#define v4l_print_ioctl(name, cmd) \ 65#define v4l_print_ioctl(name, cmd) \
63 do { \ 66 do { \
@@ -100,6 +103,7 @@ enum v4l2_chip_ident {
100 V4L2_IDENT_UNKNOWN = 0, 103 V4L2_IDENT_UNKNOWN = 0,
101 104
102 /* module saa7115: reserved range 100-149 */ 105 /* module saa7115: reserved range 100-149 */
106 V4L2_IDENT_SAA7113 = 103,
103 V4L2_IDENT_SAA7114 = 104, 107 V4L2_IDENT_SAA7114 = 104,
104 V4L2_IDENT_SAA7115 = 105, 108 V4L2_IDENT_SAA7115 = 105,
105 109
@@ -115,12 +119,15 @@ enum v4l2_chip_ident {
115}; 119};
116 120
117/* audio ioctls */ 121/* audio ioctls */
118/* v4l device was opened in Radio mode */ 122
123/* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */
119#define AUDC_SET_RADIO _IO('d',88) 124#define AUDC_SET_RADIO _IO('d',88)
120/* select from TV,radio,extern,MUTE */ 125
126/* select from TV,radio,extern,MUTE, to be replaced with VIDIOC_INT_S_AUDIO_ROUTING */
121#define AUDC_SET_INPUT _IOW('d',89,int) 127#define AUDC_SET_INPUT _IOW('d',89,int)
122 128
123/* msp3400 ioctl: will be removed in the near future */ 129/* msp3400 ioctl: will be removed in the near future, to be replaced by
130 VIDIOC_INT_S_AUDIO_ROUTING. */
124struct msp_matrix { 131struct msp_matrix {
125 int input; 132 int input;
126 int output; 133 int output;
@@ -128,12 +135,25 @@ struct msp_matrix {
128#define MSP_SET_MATRIX _IOW('m',17,struct msp_matrix) 135#define MSP_SET_MATRIX _IOW('m',17,struct msp_matrix)
129 136
130/* tuner ioctls */ 137/* tuner ioctls */
138
131/* Sets tuner type and its I2C addr */ 139/* Sets tuner type and its I2C addr */
132#define TUNER_SET_TYPE_ADDR _IOW('d',90,int) 140#define TUNER_SET_TYPE_ADDR _IOW('d', 90, int)
133/* Puts tuner on powersaving state, disabling it, except for i2c */ 141
134#define TUNER_SET_STANDBY _IOW('d',91,int) 142/* Puts tuner on powersaving state, disabling it, except for i2c. To be replaced
143 by VIDIOC_INT_S_STANDBY. */
144#define TUNER_SET_STANDBY _IOW('d', 91, int)
145
135/* Sets tda9887 specific stuff, like port1, port2 and qss */ 146/* Sets tda9887 specific stuff, like port1, port2 and qss */
136#define TDA9887_SET_CONFIG _IOW('d',92,int) 147#define TDA9887_SET_CONFIG _IOW('d', 92, int)
148
149/* Switch the tuner to a specific tuner mode. Replacement of AUDC_SET_RADIO */
150#define VIDIOC_INT_S_TUNER_MODE _IOW('d', 93, enum v4l2_tuner_type)
151
152/* Generic standby command. Passing -1 (all bits set to 1) will put the whole
153 chip into standby mode, value 0 will make the chip fully active. Specific
154 bits can be used by certain chips to enable/disable specific subsystems.
155 Replacement of TUNER_SET_STANDBY. */
156#define VIDIOC_INT_S_STANDBY _IOW('d', 94, u32)
137 157
138/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ 158/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
139#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register) 159#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register)
@@ -160,7 +180,8 @@ struct msp_matrix {
160 180
161/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is 181/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is
162 filled with the data packets that should be output. Note that if you set 182 filled with the data packets that should be output. Note that if you set
163 the line field to 0, then that VBI signal is disabled. */ 183 the line field to 0, then that VBI signal is disabled. If no
184 valid VBI data was found, then the type field is set to 0 on return. */
164#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data) 185#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data)
165 186
166/* Used to obtain the sliced VBI packet from a readback register. Not all 187/* Used to obtain the sliced VBI packet from a readback register. Not all
@@ -168,11 +189,11 @@ struct msp_matrix {
168 register contains invalid or erroneous data -EIO is returned. Note that 189 register contains invalid or erroneous data -EIO is returned. Note that
169 you must fill in the 'id' member and the 'field' member (to determine 190 you must fill in the 'id' member and the 'field' member (to determine
170 whether CC data from the first or second field should be obtained). */ 191 whether CC data from the first or second field should be obtained). */
171#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data *) 192#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data)
172 193
173/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can 194/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can
174 be made. */ 195 be made. */
175#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident *) 196#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident)
176 197
177/* Sets I2S speed in bps. This is used to provide a standard way to select I2S 198/* Sets I2S speed in bps. This is used to provide a standard way to select I2S
178 clock used by driving digital audio streams at some board designs. 199 clock used by driving digital audio streams at some board designs.
@@ -180,4 +201,25 @@ struct msp_matrix {
180 If the frequency is not supported, then -EINVAL is returned. */ 201 If the frequency is not supported, then -EINVAL is returned. */
181#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32) 202#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32)
182 203
204/* Routing definition, device dependent. It specifies which inputs (if any)
205 should be routed to which outputs (if any). */
206struct v4l2_routing {
207 u32 input;
208 u32 output;
209};
210
211/* These internal commands should be used to define the inputs and outputs
212 of an audio/video chip. They will replace AUDC_SET_INPUT.
213 The v4l2 API commands VIDIOC_S/G_INPUT, VIDIOC_S/G_OUTPUT,
214 VIDIOC_S/G_AUDIO and VIDIOC_S/G_AUDOUT are meant to be used by the
215 user. Internally these commands should be used to switch inputs/outputs
216 because only the driver knows how to map a 'Television' input to the precise
217 input/output routing of an A/D converter, or a DSP, or a video digitizer.
218 These four commands should only be sent directly to an i2c device, they
219 should not be broadcast as the routing is very device specific. */
220#define VIDIOC_INT_S_AUDIO_ROUTING _IOW ('d', 109, struct v4l2_routing)
221#define VIDIOC_INT_G_AUDIO_ROUTING _IOR ('d', 110, struct v4l2_routing)
222#define VIDIOC_INT_S_VIDEO_ROUTING _IOW ('d', 111, struct v4l2_routing)
223#define VIDIOC_INT_G_VIDEO_ROUTING _IOR ('d', 112, struct v4l2_routing)
224
183#endif /* V4L2_COMMON_H_ */ 225#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/video-buf-dvb.h b/include/media/video-buf-dvb.h
index ad0a07a3a895..b78d90fe629f 100644
--- a/include/media/video-buf-dvb.h
+++ b/include/media/video-buf-dvb.h
@@ -11,7 +11,7 @@ struct videobuf_dvb {
11 struct videobuf_queue dvbq; 11 struct videobuf_queue dvbq;
12 12
13 /* video-buf-dvb state info */ 13 /* video-buf-dvb state info */
14 struct semaphore lock; 14 struct mutex lock;
15 struct task_struct *thread; 15 struct task_struct *thread;
16 int nfeeds; 16 int nfeeds;
17 17
diff --git a/include/media/video-buf.h b/include/media/video-buf.h
index 8ecfd78e0027..d90dec5484ee 100644
--- a/include/media/video-buf.h
+++ b/include/media/video-buf.h
@@ -177,7 +177,7 @@ struct videobuf_queue_ops {
177}; 177};
178 178
179struct videobuf_queue { 179struct videobuf_queue {
180 struct semaphore lock; 180 struct mutex lock;
181 spinlock_t *irqlock; 181 spinlock_t *irqlock;
182 struct pci_dev *pci; 182 struct pci_dev *pci;
183 183
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index bfc1779fc753..427dac94bc7e 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -4,6 +4,7 @@
4#include <linux/config.h> 4#include <linux/config.h>
5#include <linux/socket.h> 5#include <linux/socket.h>
6#include <linux/un.h> 6#include <linux/un.h>
7#include <linux/mutex.h>
7#include <net/sock.h> 8#include <net/sock.h>
8 9
9extern void unix_inflight(struct file *fp); 10extern void unix_inflight(struct file *fp);
@@ -71,7 +72,7 @@ struct unix_sock {
71 struct unix_address *addr; 72 struct unix_address *addr;
72 struct dentry *dentry; 73 struct dentry *dentry;
73 struct vfsmount *mnt; 74 struct vfsmount *mnt;
74 struct semaphore readsem; 75 struct mutex readlock;
75 struct sock *peer; 76 struct sock *peer;
76 struct sock *other; 77 struct sock *other;
77 struct sock *gc_tree; 78 struct sock *gc_tree;
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 2250a18b0cbb..d052b221dbcd 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -110,8 +110,15 @@ enum {
110enum { 110enum {
111 AX25_PROTO_STD_SIMPLEX, 111 AX25_PROTO_STD_SIMPLEX,
112 AX25_PROTO_STD_DUPLEX, 112 AX25_PROTO_STD_DUPLEX,
113#ifdef CONFIG_AX25_DAMA_SLAVE
113 AX25_PROTO_DAMA_SLAVE, 114 AX25_PROTO_DAMA_SLAVE,
114 AX25_PROTO_DAMA_MASTER 115#ifdef CONFIG_AX25_DAMA_MASTER
116 AX25_PROTO_DAMA_MASTER,
117#define AX25_PROTO_MAX AX25_PROTO_DAMA_MASTER
118#endif
119#endif
120 __AX25_PROTO_MAX,
121 AX25_PROTO_MAX = __AX25_PROTO_MAX -1
115}; 122};
116 123
117enum { 124enum {
diff --git a/include/net/dn.h b/include/net/dn.h
index a4b6168e1e25..465b78302782 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -6,10 +6,8 @@
6#include <net/tcp.h> 6#include <net/tcp.h>
7#include <asm/byteorder.h> 7#include <asm/byteorder.h>
8 8
9typedef unsigned short dn_address; 9#define dn_ntohs(x) le16_to_cpu(x)
10 10#define dn_htons(x) cpu_to_le16(x)
11#define dn_ntohs(x) le16_to_cpu((unsigned short)(x))
12#define dn_htons(x) cpu_to_le16((unsigned short)(x))
13 11
14struct dn_scp /* Session Control Port */ 12struct dn_scp /* Session Control Port */
15{ 13{
@@ -31,36 +29,36 @@ struct dn_scp /* Session Control Port */
31#define DN_CL 15 /* Closed */ 29#define DN_CL 15 /* Closed */
32#define DN_CN 16 /* Closed Notification */ 30#define DN_CN 16 /* Closed Notification */
33 31
34 unsigned short addrloc; 32 __le16 addrloc;
35 unsigned short addrrem; 33 __le16 addrrem;
36 unsigned short numdat; 34 __u16 numdat;
37 unsigned short numoth; 35 __u16 numoth;
38 unsigned short numoth_rcv; 36 __u16 numoth_rcv;
39 unsigned short numdat_rcv; 37 __u16 numdat_rcv;
40 unsigned short ackxmt_dat; 38 __u16 ackxmt_dat;
41 unsigned short ackxmt_oth; 39 __u16 ackxmt_oth;
42 unsigned short ackrcv_dat; 40 __u16 ackrcv_dat;
43 unsigned short ackrcv_oth; 41 __u16 ackrcv_oth;
44 unsigned char flowrem_sw; 42 __u8 flowrem_sw;
45 unsigned char flowloc_sw; 43 __u8 flowloc_sw;
46#define DN_SEND 2 44#define DN_SEND 2
47#define DN_DONTSEND 1 45#define DN_DONTSEND 1
48#define DN_NOCHANGE 0 46#define DN_NOCHANGE 0
49 unsigned short flowrem_dat; 47 __u16 flowrem_dat;
50 unsigned short flowrem_oth; 48 __u16 flowrem_oth;
51 unsigned short flowloc_dat; 49 __u16 flowloc_dat;
52 unsigned short flowloc_oth; 50 __u16 flowloc_oth;
53 unsigned char services_rem; 51 __u8 services_rem;
54 unsigned char services_loc; 52 __u8 services_loc;
55 unsigned char info_rem; 53 __u8 info_rem;
56 unsigned char info_loc; 54 __u8 info_loc;
57 55
58 unsigned short segsize_rem; 56 __u16 segsize_rem;
59 unsigned short segsize_loc; 57 __u16 segsize_loc;
60 58
61 unsigned char nonagle; 59 __u8 nonagle;
62 unsigned char multi_ireq; 60 __u8 multi_ireq;
63 unsigned char accept_mode; 61 __u8 accept_mode;
64 unsigned long seg_total; /* Running total of current segment */ 62 unsigned long seg_total; /* Running total of current segment */
65 63
66 struct optdata_dn conndata_in; 64 struct optdata_dn conndata_in;
@@ -160,40 +158,41 @@ static inline struct dn_scp *DN_SK(struct sock *sk)
160 */ 158 */
161#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb) 159#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
162struct dn_skb_cb { 160struct dn_skb_cb {
163 unsigned short dst; 161 __le16 dst;
164 unsigned short src; 162 __le16 src;
165 unsigned short hops; 163 __u16 hops;
166 unsigned short dst_port; 164 __le16 dst_port;
167 unsigned short src_port; 165 __le16 src_port;
168 unsigned char services; 166 __u8 services;
169 unsigned char info; 167 __u8 info;
170 unsigned char rt_flags; 168 __u8 rt_flags;
171 unsigned char nsp_flags; 169 __u8 nsp_flags;
172 unsigned short segsize; 170 __u16 segsize;
173 unsigned short segnum; 171 __u16 segnum;
174 unsigned short xmit_count; 172 __u16 xmit_count;
175 unsigned long stamp; 173 unsigned long stamp;
176 int iif; 174 int iif;
177}; 175};
178 176
179static inline dn_address dn_eth2dn(unsigned char *ethaddr) 177static inline __le16 dn_eth2dn(unsigned char *ethaddr)
180{ 178{
181 return ethaddr[4] | (ethaddr[5] << 8); 179 return dn_htons(ethaddr[4] | (ethaddr[5] << 8));
182} 180}
183 181
184static inline dn_address dn_saddr2dn(struct sockaddr_dn *saddr) 182static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr)
185{ 183{
186 return *(dn_address *)saddr->sdn_nodeaddr; 184 return *(__le16 *)saddr->sdn_nodeaddr;
187} 185}
188 186
189static inline void dn_dn2eth(unsigned char *ethaddr, dn_address addr) 187static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
190{ 188{
189 __u16 a = dn_ntohs(addr);
191 ethaddr[0] = 0xAA; 190 ethaddr[0] = 0xAA;
192 ethaddr[1] = 0x00; 191 ethaddr[1] = 0x00;
193 ethaddr[2] = 0x04; 192 ethaddr[2] = 0x04;
194 ethaddr[3] = 0x00; 193 ethaddr[3] = 0x00;
195 ethaddr[4] = (unsigned char)(addr & 0xff); 194 ethaddr[4] = (__u8)(a & 0xff);
196 ethaddr[5] = (unsigned char)(addr >> 8); 195 ethaddr[5] = (__u8)(a >> 8);
197} 196}
198 197
199static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp) 198static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp)
@@ -202,7 +201,7 @@ static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp)
202 fl->uli_u.dnports.dport = scp->addrrem; 201 fl->uli_u.dnports.dport = scp->addrrem;
203 fl->uli_u.dnports.objnum = scp->addr.sdn_objnum; 202 fl->uli_u.dnports.objnum = scp->addr.sdn_objnum;
204 if (fl->uli_u.dnports.objnum == 0) { 203 if (fl->uli_u.dnports.objnum == 0) {
205 fl->uli_u.dnports.objnamel = scp->addr.sdn_objnamel; 204 fl->uli_u.dnports.objnamel = (__u8)dn_ntohs(scp->addr.sdn_objnamel);
206 memcpy(fl->uli_u.dnports.objname, scp->addr.sdn_objname, 16); 205 memcpy(fl->uli_u.dnports.objname, scp->addr.sdn_objname, 16);
207 } 206 }
208} 207}
@@ -217,7 +216,7 @@ extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
217extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr); 216extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
218extern struct sock *dn_find_by_skb(struct sk_buff *skb); 217extern struct sock *dn_find_by_skb(struct sk_buff *skb);
219#define DN_ASCBUF_LEN 9 218#define DN_ASCBUF_LEN 9
220extern char *dn_addr2asc(dn_address, char *); 219extern char *dn_addr2asc(__u16, char *);
221extern int dn_destroy_timer(struct sock *sk); 220extern int dn_destroy_timer(struct sock *sk);
222 221
223extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type); 222extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
@@ -226,7 +225,7 @@ extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn
226extern void dn_start_slow_timer(struct sock *sk); 225extern void dn_start_slow_timer(struct sock *sk);
227extern void dn_stop_slow_timer(struct sock *sk); 226extern void dn_stop_slow_timer(struct sock *sk);
228 227
229extern dn_address decnet_address; 228extern __le16 decnet_address;
230extern int decnet_debug_level; 229extern int decnet_debug_level;
231extern int decnet_time_wait; 230extern int decnet_time_wait;
232extern int decnet_dn_count; 231extern int decnet_dn_count;
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 5a86e78081bf..cee46821dc53 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -7,11 +7,11 @@ struct dn_dev;
7struct dn_ifaddr { 7struct dn_ifaddr {
8 struct dn_ifaddr *ifa_next; 8 struct dn_ifaddr *ifa_next;
9 struct dn_dev *ifa_dev; 9 struct dn_dev *ifa_dev;
10 dn_address ifa_local; 10 __le16 ifa_local;
11 dn_address ifa_address; 11 __le16 ifa_address;
12 unsigned char ifa_flags; 12 __u8 ifa_flags;
13 unsigned char ifa_scope; 13 __u8 ifa_scope;
14 char ifa_label[IFNAMSIZ]; 14 char ifa_label[IFNAMSIZ];
15}; 15};
16 16
17#define DN_DEV_S_RU 0 /* Run - working normally */ 17#define DN_DEV_S_RU 0 /* Run - working normally */
@@ -91,7 +91,7 @@ struct dn_dev {
91 struct timer_list timer; 91 struct timer_list timer;
92 unsigned long t3; 92 unsigned long t3;
93 struct neigh_parms *neigh_parms; 93 struct neigh_parms *neigh_parms;
94 unsigned char addr[ETH_ALEN]; 94 __u8 addr[ETH_ALEN];
95 struct neighbour *router; /* Default router on circuit */ 95 struct neighbour *router; /* Default router on circuit */
96 struct neighbour *peer; /* Peer on pointopoint links */ 96 struct neighbour *peer; /* Peer on pointopoint links */
97 unsigned long uptime; /* Time device went up in jiffies */ 97 unsigned long uptime; /* Time device went up in jiffies */
@@ -99,56 +99,56 @@ struct dn_dev {
99 99
100struct dn_short_packet 100struct dn_short_packet
101{ 101{
102 unsigned char msgflg; 102 __u8 msgflg;
103 unsigned short dstnode; 103 __le16 dstnode;
104 unsigned short srcnode; 104 __le16 srcnode;
105 unsigned char forward; 105 __u8 forward;
106} __attribute__((packed)); 106} __attribute__((packed));
107 107
108struct dn_long_packet 108struct dn_long_packet
109{ 109{
110 unsigned char msgflg; 110 __u8 msgflg;
111 unsigned char d_area; 111 __u8 d_area;
112 unsigned char d_subarea; 112 __u8 d_subarea;
113 unsigned char d_id[6]; 113 __u8 d_id[6];
114 unsigned char s_area; 114 __u8 s_area;
115 unsigned char s_subarea; 115 __u8 s_subarea;
116 unsigned char s_id[6]; 116 __u8 s_id[6];
117 unsigned char nl2; 117 __u8 nl2;
118 unsigned char visit_ct; 118 __u8 visit_ct;
119 unsigned char s_class; 119 __u8 s_class;
120 unsigned char pt; 120 __u8 pt;
121} __attribute__((packed)); 121} __attribute__((packed));
122 122
123/*------------------------- DRP - Routing messages ---------------------*/ 123/*------------------------- DRP - Routing messages ---------------------*/
124 124
125struct endnode_hello_message 125struct endnode_hello_message
126{ 126{
127 unsigned char msgflg; 127 __u8 msgflg;
128 unsigned char tiver[3]; 128 __u8 tiver[3];
129 unsigned char id[6]; 129 __u8 id[6];
130 unsigned char iinfo; 130 __u8 iinfo;
131 unsigned short blksize; 131 __le16 blksize;
132 unsigned char area; 132 __u8 area;
133 unsigned char seed[8]; 133 __u8 seed[8];
134 unsigned char neighbor[6]; 134 __u8 neighbor[6];
135 unsigned short timer; 135 __le16 timer;
136 unsigned char mpd; 136 __u8 mpd;
137 unsigned char datalen; 137 __u8 datalen;
138 unsigned char data[2]; 138 __u8 data[2];
139} __attribute__((packed)); 139} __attribute__((packed));
140 140
141struct rtnode_hello_message 141struct rtnode_hello_message
142{ 142{
143 unsigned char msgflg; 143 __u8 msgflg;
144 unsigned char tiver[3]; 144 __u8 tiver[3];
145 unsigned char id[6]; 145 __u8 id[6];
146 unsigned char iinfo; 146 __u8 iinfo;
147 unsigned short blksize; 147 __le16 blksize;
148 unsigned char priority; 148 __u8 priority;
149 unsigned char area; 149 __u8 area;
150 unsigned short timer; 150 __le16 timer;
151 unsigned char mpd; 151 __u8 mpd;
152} __attribute__((packed)); 152} __attribute__((packed));
153 153
154 154
@@ -169,12 +169,12 @@ extern void dn_dev_down(struct net_device *);
169 169
170extern int dn_dev_set_default(struct net_device *dev, int force); 170extern int dn_dev_set_default(struct net_device *dev, int force);
171extern struct net_device *dn_dev_get_default(void); 171extern struct net_device *dn_dev_get_default(void);
172extern int dn_dev_bind_default(dn_address *addr); 172extern int dn_dev_bind_default(__le16 *addr);
173 173
174extern int register_dnaddr_notifier(struct notifier_block *nb); 174extern int register_dnaddr_notifier(struct notifier_block *nb);
175extern int unregister_dnaddr_notifier(struct notifier_block *nb); 175extern int unregister_dnaddr_notifier(struct notifier_block *nb);
176 176
177static inline int dn_dev_islocal(struct net_device *dev, dn_address addr) 177static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
178{ 178{
179 struct dn_dev *dn_db = dev->dn_ptr; 179 struct dn_dev *dn_db = dev->dn_ptr;
180 struct dn_ifaddr *ifa; 180 struct dn_ifaddr *ifa;
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index cd3c96d9601b..a15dcf0d5c1e 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -37,7 +37,7 @@ struct dn_fib_nh {
37 int nh_weight; 37 int nh_weight;
38 int nh_power; 38 int nh_power;
39 int nh_oif; 39 int nh_oif;
40 u32 nh_gw; 40 __le16 nh_gw;
41}; 41};
42 42
43struct dn_fib_info { 43struct dn_fib_info {
@@ -48,7 +48,7 @@ struct dn_fib_info {
48 int fib_dead; 48 int fib_dead;
49 unsigned fib_flags; 49 unsigned fib_flags;
50 int fib_protocol; 50 int fib_protocol;
51 dn_address fib_prefsrc; 51 __le16 fib_prefsrc;
52 __u32 fib_priority; 52 __u32 fib_priority;
53 __u32 fib_metrics[RTAX_MAX]; 53 __u32 fib_metrics[RTAX_MAX];
54#define dn_fib_mtu fib_metrics[RTAX_MTU-1] 54#define dn_fib_mtu fib_metrics[RTAX_MTU-1]
@@ -71,15 +71,15 @@ struct dn_fib_info {
71#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif) 71#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
72 72
73typedef struct { 73typedef struct {
74 u16 datum; 74 __le16 datum;
75} dn_fib_key_t; 75} dn_fib_key_t;
76 76
77typedef struct { 77typedef struct {
78 u16 datum; 78 __le16 datum;
79} dn_fib_hash_t; 79} dn_fib_hash_t;
80 80
81typedef struct { 81typedef struct {
82 u16 datum; 82 __u16 datum;
83} dn_fib_idx_t; 83} dn_fib_idx_t;
84 84
85struct dn_fib_node { 85struct dn_fib_node {
@@ -126,11 +126,11 @@ extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
126 const struct flowi *fl, 126 const struct flowi *fl,
127 struct dn_fib_res *res); 127 struct dn_fib_res *res);
128extern void dn_fib_release_info(struct dn_fib_info *fi); 128extern void dn_fib_release_info(struct dn_fib_info *fi);
129extern u16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type); 129extern __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type);
130extern void dn_fib_flush(void); 130extern void dn_fib_flush(void);
131extern void dn_fib_select_multipath(const struct flowi *fl, 131extern void dn_fib_select_multipath(const struct flowi *fl,
132 struct dn_fib_res *res); 132 struct dn_fib_res *res);
133extern int dn_fib_sync_down(dn_address local, struct net_device *dev, 133extern int dn_fib_sync_down(__le16 local, struct net_device *dev,
134 int force); 134 int force);
135extern int dn_fib_sync_up(struct net_device *dev); 135extern int dn_fib_sync_up(struct net_device *dev);
136 136
@@ -148,8 +148,8 @@ extern void dn_fib_table_cleanup(void);
148extern void dn_fib_rules_init(void); 148extern void dn_fib_rules_init(void);
149extern void dn_fib_rules_cleanup(void); 149extern void dn_fib_rules_cleanup(void);
150extern void dn_fib_rule_put(struct dn_fib_rule *); 150extern void dn_fib_rule_put(struct dn_fib_rule *);
151extern __u16 dn_fib_rules_policy(__u16 saddr, struct dn_fib_res *res, unsigned *flags); 151extern __le16 dn_fib_rules_policy(__le16 saddr, struct dn_fib_res *res, unsigned *flags);
152extern unsigned dnet_addr_type(__u16 addr); 152extern unsigned dnet_addr_type(__le16 addr);
153extern int dn_fib_lookup(const struct flowi *fl, struct dn_fib_res *res); 153extern int dn_fib_lookup(const struct flowi *fl, struct dn_fib_res *res);
154 154
155/* 155/*
@@ -194,10 +194,10 @@ extern struct dn_fib_table *dn_fib_tables[];
194 194
195#endif /* CONFIG_DECNET_ROUTER */ 195#endif /* CONFIG_DECNET_ROUTER */
196 196
197static inline u16 dnet_make_mask(int n) 197static inline __le16 dnet_make_mask(int n)
198{ 198{
199 if (n) 199 if (n)
200 return htons(~((1<<(16-n))-1)); 200 return dn_htons(~((1<<(16-n))-1));
201 return 0; 201 return 0;
202} 202}
203 203
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index 4b1eb038d637..4cb4ae7fb81f 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -7,13 +7,13 @@
7 */ 7 */
8struct dn_neigh { 8struct dn_neigh {
9 struct neighbour n; 9 struct neighbour n;
10 dn_address addr; 10 __le16 addr;
11 unsigned long flags; 11 unsigned long flags;
12#define DN_NDFLAG_R1 0x0001 /* Router L1 */ 12#define DN_NDFLAG_R1 0x0001 /* Router L1 */
13#define DN_NDFLAG_R2 0x0002 /* Router L2 */ 13#define DN_NDFLAG_R2 0x0002 /* Router L2 */
14#define DN_NDFLAG_P3 0x0004 /* Phase III Node */ 14#define DN_NDFLAG_P3 0x0004 /* Phase III Node */
15 unsigned long blksize; 15 unsigned long blksize;
16 unsigned char priority; 16 __u8 priority;
17}; 17};
18 18
19extern void dn_neigh_init(void); 19extern void dn_neigh_init(void);
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index e6182b86262b..96e816b6974d 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -72,77 +72,77 @@ extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int nobl
72 72
73struct nsp_data_seg_msg 73struct nsp_data_seg_msg
74{ 74{
75 unsigned char msgflg; 75 __u8 msgflg;
76 unsigned short dstaddr; 76 __le16 dstaddr;
77 unsigned short srcaddr; 77 __le16 srcaddr;
78} __attribute__((packed)); 78} __attribute__((packed));
79 79
80struct nsp_data_opt_msg 80struct nsp_data_opt_msg
81{ 81{
82 unsigned short acknum; 82 __le16 acknum;
83 unsigned short segnum; 83 __le16 segnum;
84 unsigned short lsflgs; 84 __le16 lsflgs;
85} __attribute__((packed)); 85} __attribute__((packed));
86 86
87struct nsp_data_opt_msg1 87struct nsp_data_opt_msg1
88{ 88{
89 unsigned short acknum; 89 __le16 acknum;
90 unsigned short segnum; 90 __le16 segnum;
91} __attribute__((packed)); 91} __attribute__((packed));
92 92
93 93
94/* Acknowledgment Message (data/other data) */ 94/* Acknowledgment Message (data/other data) */
95struct nsp_data_ack_msg 95struct nsp_data_ack_msg
96{ 96{
97 unsigned char msgflg; 97 __u8 msgflg;
98 unsigned short dstaddr; 98 __le16 dstaddr;
99 unsigned short srcaddr; 99 __le16 srcaddr;
100 unsigned short acknum; 100 __le16 acknum;
101} __attribute__((packed)); 101} __attribute__((packed));
102 102
103/* Connect Acknowledgment Message */ 103/* Connect Acknowledgment Message */
104struct nsp_conn_ack_msg 104struct nsp_conn_ack_msg
105{ 105{
106 unsigned char msgflg; 106 __u8 msgflg;
107 unsigned short dstaddr; 107 __le16 dstaddr;
108} __attribute__((packed)); 108} __attribute__((packed));
109 109
110 110
111/* Connect Initiate/Retransmit Initiate/Connect Confirm */ 111/* Connect Initiate/Retransmit Initiate/Connect Confirm */
112struct nsp_conn_init_msg 112struct nsp_conn_init_msg
113{ 113{
114 unsigned char msgflg; 114 __u8 msgflg;
115#define NSP_CI 0x18 /* Connect Initiate */ 115#define NSP_CI 0x18 /* Connect Initiate */
116#define NSP_RCI 0x68 /* Retrans. Conn Init */ 116#define NSP_RCI 0x68 /* Retrans. Conn Init */
117 unsigned short dstaddr; 117 __le16 dstaddr;
118 unsigned short srcaddr; 118 __le16 srcaddr;
119 unsigned char services; 119 __u8 services;
120#define NSP_FC_NONE 0x00 /* Flow Control None */ 120#define NSP_FC_NONE 0x00 /* Flow Control None */
121#define NSP_FC_SRC 0x04 /* Seg Req. Count */ 121#define NSP_FC_SRC 0x04 /* Seg Req. Count */
122#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */ 122#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
123#define NSP_FC_MASK 0x0c /* FC type mask */ 123#define NSP_FC_MASK 0x0c /* FC type mask */
124 unsigned char info; 124 __u8 info;
125 unsigned short segsize; 125 __le16 segsize;
126} __attribute__((packed)); 126} __attribute__((packed));
127 127
128/* Disconnect Initiate/Disconnect Confirm */ 128/* Disconnect Initiate/Disconnect Confirm */
129struct nsp_disconn_init_msg 129struct nsp_disconn_init_msg
130{ 130{
131 unsigned char msgflg; 131 __u8 msgflg;
132 unsigned short dstaddr; 132 __le16 dstaddr;
133 unsigned short srcaddr; 133 __le16 srcaddr;
134 unsigned short reason; 134 __le16 reason;
135} __attribute__((packed)); 135} __attribute__((packed));
136 136
137 137
138 138
139struct srcobj_fmt 139struct srcobj_fmt
140{ 140{
141 char format; 141 __u8 format;
142 unsigned char task; 142 __u8 task;
143 unsigned short grpcode; 143 __le16 grpcode;
144 unsigned short usrcode; 144 __le16 usrcode;
145 char dlen; 145 __u8 dlen;
146} __attribute__((packed)); 146} __attribute__((packed));
147 147
148/* 148/*
@@ -150,7 +150,7 @@ struct srcobj_fmt
150 * numbers used in NSP. Similar in operation to the functions 150 * numbers used in NSP. Similar in operation to the functions
151 * of the same name in TCP. 151 * of the same name in TCP.
152 */ 152 */
153static __inline__ int dn_before(unsigned short seq1, unsigned short seq2) 153static __inline__ int dn_before(__u16 seq1, __u16 seq2)
154{ 154{
155 seq1 &= 0x0fff; 155 seq1 &= 0x0fff;
156 seq2 &= 0x0fff; 156 seq2 &= 0x0fff;
@@ -159,7 +159,7 @@ static __inline__ int dn_before(unsigned short seq1, unsigned short seq2)
159} 159}
160 160
161 161
162static __inline__ int dn_after(unsigned short seq1, unsigned short seq2) 162static __inline__ int dn_after(__u16 seq1, __u16 seq2)
163{ 163{
164 seq1 &= 0x0fff; 164 seq1 &= 0x0fff;
165 seq2 &= 0x0fff; 165 seq2 &= 0x0fff;
@@ -167,23 +167,23 @@ static __inline__ int dn_after(unsigned short seq1, unsigned short seq2)
167 return (int)((seq2 - seq1) & 0x0fff) > 2048; 167 return (int)((seq2 - seq1) & 0x0fff) > 2048;
168} 168}
169 169
170static __inline__ int dn_equal(unsigned short seq1, unsigned short seq2) 170static __inline__ int dn_equal(__u16 seq1, __u16 seq2)
171{ 171{
172 return ((seq1 ^ seq2) & 0x0fff) == 0; 172 return ((seq1 ^ seq2) & 0x0fff) == 0;
173} 173}
174 174
175static __inline__ int dn_before_or_equal(unsigned short seq1, unsigned short seq2) 175static __inline__ int dn_before_or_equal(__u16 seq1, __u16 seq2)
176{ 176{
177 return (dn_before(seq1, seq2) || dn_equal(seq1, seq2)); 177 return (dn_before(seq1, seq2) || dn_equal(seq1, seq2));
178} 178}
179 179
180static __inline__ void seq_add(unsigned short *seq, unsigned short off) 180static __inline__ void seq_add(__u16 *seq, __u16 off)
181{ 181{
182 (*seq) += off; 182 (*seq) += off;
183 (*seq) &= 0x0fff; 183 (*seq) &= 0x0fff;
184} 184}
185 185
186static __inline__ int seq_next(unsigned short seq1, unsigned short seq2) 186static __inline__ int seq_next(__u16 seq1, __u16 seq2)
187{ 187{
188 return dn_equal(seq1 + 1, seq2); 188 return dn_equal(seq1 + 1, seq2);
189} 189}
@@ -191,7 +191,7 @@ static __inline__ int seq_next(unsigned short seq1, unsigned short seq2)
191/* 191/*
192 * Can we delay the ack ? 192 * Can we delay the ack ?
193 */ 193 */
194static __inline__ int sendack(unsigned short seq) 194static __inline__ int sendack(__u16 seq)
195{ 195{
196 return (int)((seq & 0x1000) ? 0 : 1); 196 return (int)((seq & 0x1000) ? 0 : 1);
197} 197}
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 5122da3f2eb3..76f957e258b0 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -71,12 +71,12 @@ struct dn_route {
71 struct dn_route *rt_next; 71 struct dn_route *rt_next;
72 } u; 72 } u;
73 73
74 __u16 rt_saddr; 74 __le16 rt_saddr;
75 __u16 rt_daddr; 75 __le16 rt_daddr;
76 __u16 rt_gateway; 76 __le16 rt_gateway;
77 __u16 rt_local_src; /* Source used for forwarding packets */ 77 __le16 rt_local_src; /* Source used for forwarding packets */
78 __u16 rt_src_map; 78 __le16 rt_src_map;
79 __u16 rt_dst_map; 79 __le16 rt_dst_map;
80 80
81 unsigned rt_flags; 81 unsigned rt_flags;
82 unsigned rt_type; 82 unsigned rt_type;
diff --git a/include/net/flow.h b/include/net/flow.h
index ec7eb86eb203..04d89f763451 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -30,8 +30,8 @@ struct flowi {
30 } ip6_u; 30 } ip6_u;
31 31
32 struct { 32 struct {
33 __u16 daddr; 33 __le16 daddr;
34 __u16 saddr; 34 __le16 saddr;
35 __u32 fwmark; 35 __u32 fwmark;
36 __u8 scope; 36 __u8 scope;
37 } dn_u; 37 } dn_u;
@@ -64,8 +64,8 @@ struct flowi {
64 } icmpt; 64 } icmpt;
65 65
66 struct { 66 struct {
67 __u16 sport; 67 __le16 sport;
68 __u16 dport; 68 __le16 dport;
69 __u8 objnum; 69 __u8 objnum;
70 __u8 objnamel; /* Not 16 bits since max val is 16 */ 70 __u8 objnamel; /* Not 16 bits since max val is 16 */
71 __u8 objname[16]; /* Not zero terminated */ 71 __u8 objname[16]; /* Not zero terminated */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index 9a92aef8b0b2..4725ff861c57 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -220,6 +220,7 @@ struct ieee80211_snap_hdr {
220/* Authentication algorithms */ 220/* Authentication algorithms */
221#define WLAN_AUTH_OPEN 0 221#define WLAN_AUTH_OPEN 0
222#define WLAN_AUTH_SHARED_KEY 1 222#define WLAN_AUTH_SHARED_KEY 1
223#define WLAN_AUTH_LEAP 2
223 224
224#define WLAN_AUTH_CHALLENGE_LEN 128 225#define WLAN_AUTH_CHALLENGE_LEN 128
225 226
@@ -299,6 +300,23 @@ enum ieee80211_reasoncode {
299 WLAN_REASON_CIPHER_SUITE_REJECTED = 24, 300 WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
300}; 301};
301 302
303/* Action categories - 802.11h */
304enum ieee80211_actioncategories {
305 WLAN_ACTION_SPECTRUM_MGMT = 0,
306 /* Reserved 1-127 */
307 /* Error 128-255 */
308};
309
310/* Action details - 802.11h */
311enum ieee80211_actiondetails {
312 WLAN_ACTION_CATEGORY_MEASURE_REQUEST = 0,
313 WLAN_ACTION_CATEGORY_MEASURE_REPORT = 1,
314 WLAN_ACTION_CATEGORY_TPC_REQUEST = 2,
315 WLAN_ACTION_CATEGORY_TPC_REPORT = 3,
316 WLAN_ACTION_CATEGORY_CHANNEL_SWITCH = 4,
317 /* 5 - 255 Reserved */
318};
319
302#define IEEE80211_STATMASK_SIGNAL (1<<0) 320#define IEEE80211_STATMASK_SIGNAL (1<<0)
303#define IEEE80211_STATMASK_RSSI (1<<1) 321#define IEEE80211_STATMASK_RSSI (1<<1)
304#define IEEE80211_STATMASK_NOISE (1<<2) 322#define IEEE80211_STATMASK_NOISE (1<<2)
@@ -377,6 +395,8 @@ struct ieee80211_rx_stats {
377 u8 mask; 395 u8 mask;
378 u8 freq; 396 u8 freq;
379 u16 len; 397 u16 len;
398 u64 tsf;
399 u32 beacon_time;
380}; 400};
381 401
382/* IEEE 802.11 requires that STA supports concurrent reception of at least 402/* IEEE 802.11 requires that STA supports concurrent reception of at least
@@ -608,6 +628,28 @@ struct ieee80211_auth {
608 struct ieee80211_info_element info_element[0]; 628 struct ieee80211_info_element info_element[0];
609} __attribute__ ((packed)); 629} __attribute__ ((packed));
610 630
631struct ieee80211_channel_switch {
632 u8 id;
633 u8 len;
634 u8 mode;
635 u8 channel;
636 u8 count;
637} __attribute__ ((packed));
638
639struct ieee80211_action {
640 struct ieee80211_hdr_3addr header;
641 u8 category;
642 u8 action;
643 union {
644 struct ieee80211_action_exchange {
645 u8 token;
646 struct ieee80211_info_element info_element[0];
647 } exchange;
648 struct ieee80211_channel_switch channel_switch;
649
650 } format;
651} __attribute__ ((packed));
652
611struct ieee80211_disassoc { 653struct ieee80211_disassoc {
612 struct ieee80211_hdr_3addr header; 654 struct ieee80211_hdr_3addr header;
613 __le16 reason; 655 __le16 reason;
@@ -692,7 +734,15 @@ struct ieee80211_txb {
692/* QoS structure */ 734/* QoS structure */
693#define NETWORK_HAS_QOS_PARAMETERS (1<<3) 735#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
694#define NETWORK_HAS_QOS_INFORMATION (1<<4) 736#define NETWORK_HAS_QOS_INFORMATION (1<<4)
695#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION) 737#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
738 NETWORK_HAS_QOS_INFORMATION)
739
740/* 802.11h */
741#define NETWORK_HAS_POWER_CONSTRAINT (1<<5)
742#define NETWORK_HAS_CSA (1<<6)
743#define NETWORK_HAS_QUIET (1<<7)
744#define NETWORK_HAS_IBSS_DFS (1<<8)
745#define NETWORK_HAS_TPC_REPORT (1<<9)
696 746
697#define QOS_QUEUE_NUM 4 747#define QOS_QUEUE_NUM 4
698#define QOS_OUI_LEN 3 748#define QOS_OUI_LEN 3
@@ -748,6 +798,91 @@ struct ieee80211_tim_parameters {
748 798
749/*******************************************************/ 799/*******************************************************/
750 800
801enum { /* ieee80211_basic_report.map */
802 IEEE80211_BASIC_MAP_BSS = (1 << 0),
803 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
804 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
805 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
806 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
807 /* Bits 5-7 are reserved */
808
809};
810struct ieee80211_basic_report {
811 u8 channel;
812 __le64 start_time;
813 __le16 duration;
814 u8 map;
815} __attribute__ ((packed));
816
817enum { /* ieee80211_measurement_request.mode */
818 /* Bit 0 is reserved */
819 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
820 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
821 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
822 /* Bits 4-7 are reserved */
823};
824
825enum {
826 IEEE80211_REPORT_BASIC = 0, /* required */
827 IEEE80211_REPORT_CCA = 1, /* optional */
828 IEEE80211_REPORT_RPI = 2, /* optional */
829 /* 3-255 reserved */
830};
831
832struct ieee80211_measurement_params {
833 u8 channel;
834 __le64 start_time;
835 __le16 duration;
836} __attribute__ ((packed));
837
838struct ieee80211_measurement_request {
839 struct ieee80211_info_element ie;
840 u8 token;
841 u8 mode;
842 u8 type;
843 struct ieee80211_measurement_params params[0];
844} __attribute__ ((packed));
845
846struct ieee80211_measurement_report {
847 struct ieee80211_info_element ie;
848 u8 token;
849 u8 mode;
850 u8 type;
851 union {
852 struct ieee80211_basic_report basic[0];
853 } u;
854} __attribute__ ((packed));
855
856struct ieee80211_tpc_report {
857 u8 transmit_power;
858 u8 link_margin;
859} __attribute__ ((packed));
860
861struct ieee80211_channel_map {
862 u8 channel;
863 u8 map;
864} __attribute__ ((packed));
865
866struct ieee80211_ibss_dfs {
867 struct ieee80211_info_element ie;
868 u8 owner[ETH_ALEN];
869 u8 recovery_interval;
870 struct ieee80211_channel_map channel_map[0];
871};
872
873struct ieee80211_csa {
874 u8 mode;
875 u8 channel;
876 u8 count;
877} __attribute__ ((packed));
878
879struct ieee80211_quiet {
880 u8 count;
881 u8 period;
882 u8 duration;
883 u8 offset;
884} __attribute__ ((packed));
885
751struct ieee80211_network { 886struct ieee80211_network {
752 /* These entries are used to identify a unique network */ 887 /* These entries are used to identify a unique network */
753 u8 bssid[ETH_ALEN]; 888 u8 bssid[ETH_ALEN];
@@ -767,7 +902,7 @@ struct ieee80211_network {
767 u8 rates_ex_len; 902 u8 rates_ex_len;
768 unsigned long last_scanned; 903 unsigned long last_scanned;
769 u8 mode; 904 u8 mode;
770 u8 flags; 905 u32 flags;
771 u32 last_associate; 906 u32 last_associate;
772 u32 time_stamp[2]; 907 u32 time_stamp[2];
773 u16 beacon_interval; 908 u16 beacon_interval;
@@ -779,6 +914,25 @@ struct ieee80211_network {
779 u8 rsn_ie[MAX_WPA_IE_LEN]; 914 u8 rsn_ie[MAX_WPA_IE_LEN];
780 size_t rsn_ie_len; 915 size_t rsn_ie_len;
781 struct ieee80211_tim_parameters tim; 916 struct ieee80211_tim_parameters tim;
917
918 /* 802.11h info */
919
920 /* Power Constraint - mandatory if spctrm mgmt required */
921 u8 power_constraint;
922
923 /* TPC Report - mandatory if spctrm mgmt required */
924 struct ieee80211_tpc_report tpc_report;
925
926 /* IBSS DFS - mandatory if spctrm mgmt required and IBSS
927 * NOTE: This is variable length and so must be allocated dynamically */
928 struct ieee80211_ibss_dfs *ibss_dfs;
929
930 /* Channel Switch Announcement - optional if spctrm mgmt required */
931 struct ieee80211_csa csa;
932
933 /* Quiet - optional if spctrm mgmt required */
934 struct ieee80211_quiet quiet;
935
782 struct list_head list; 936 struct list_head list;
783}; 937};
784 938
@@ -924,7 +1078,10 @@ struct ieee80211_device {
924 int (*handle_auth) (struct net_device * dev, 1078 int (*handle_auth) (struct net_device * dev,
925 struct ieee80211_auth * auth); 1079 struct ieee80211_auth * auth);
926 int (*handle_deauth) (struct net_device * dev, 1080 int (*handle_deauth) (struct net_device * dev,
927 struct ieee80211_auth * auth); 1081 struct ieee80211_deauth * auth);
1082 int (*handle_action) (struct net_device * dev,
1083 struct ieee80211_action * action,
1084 struct ieee80211_rx_stats * stats);
928 int (*handle_disassoc) (struct net_device * dev, 1085 int (*handle_disassoc) (struct net_device * dev,
929 struct ieee80211_disassoc * assoc); 1086 struct ieee80211_disassoc * assoc);
930 int (*handle_beacon) (struct net_device * dev, 1087 int (*handle_beacon) (struct net_device * dev,
@@ -1093,6 +1250,7 @@ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
1093extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, 1250extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1094 struct ieee80211_hdr_4addr *header, 1251 struct ieee80211_hdr_4addr *header,
1095 struct ieee80211_rx_stats *stats); 1252 struct ieee80211_rx_stats *stats);
1253extern void ieee80211_network_reset(struct ieee80211_network *network);
1096 1254
1097/* ieee80211_geo.c */ 1255/* ieee80211_geo.c */
1098extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device 1256extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device
@@ -1105,6 +1263,11 @@ extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee,
1105extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, 1263extern int ieee80211_channel_to_index(struct ieee80211_device *ieee,
1106 u8 channel); 1264 u8 channel);
1107extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); 1265extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq);
1266extern u8 ieee80211_get_channel_flags(struct ieee80211_device *ieee,
1267 u8 channel);
1268extern const struct ieee80211_channel *ieee80211_get_channel(struct
1269 ieee80211_device
1270 *ieee, u8 channel);
1108 1271
1109/* ieee80211_wx.c */ 1272/* ieee80211_wx.c */
1110extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, 1273extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
@@ -1122,6 +1285,14 @@ extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
1122extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, 1285extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
1123 struct iw_request_info *info, 1286 struct iw_request_info *info,
1124 union iwreq_data *wrqu, char *extra); 1287 union iwreq_data *wrqu, char *extra);
1288extern int ieee80211_wx_set_auth(struct net_device *dev,
1289 struct iw_request_info *info,
1290 union iwreq_data *wrqu,
1291 char *extra);
1292extern int ieee80211_wx_get_auth(struct net_device *dev,
1293 struct iw_request_info *info,
1294 union iwreq_data *wrqu,
1295 char *extra);
1125 1296
1126static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) 1297static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
1127{ 1298{
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h
index cd82c3e998e4..eb476414fd72 100644
--- a/include/net/ieee80211_crypt.h
+++ b/include/net/ieee80211_crypt.h
@@ -47,7 +47,8 @@ struct ieee80211_crypto_ops {
47 /* deinitialize crypto context and free allocated private data */ 47 /* deinitialize crypto context and free allocated private data */
48 void (*deinit) (void *priv); 48 void (*deinit) (void *priv);
49 49
50 int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv); 50 int (*build_iv) (struct sk_buff * skb, int hdr_len,
51 u8 *key, int keylen, void *priv);
51 52
52 /* encrypt/decrypt return < 0 on error or >= 0 on success. The return 53 /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
53 * value from decrypt_mpdu is passed as the keyidx value for 54 * value from decrypt_mpdu is passed as the keyidx value for
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index eb8afe3499a9..e459e1a0ae4a 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -180,11 +180,8 @@ struct inet6_dev
180 180
181#ifdef CONFIG_IPV6_PRIVACY 181#ifdef CONFIG_IPV6_PRIVACY
182 u8 rndid[8]; 182 u8 rndid[8];
183 u8 entropy[8];
184 struct timer_list regen_timer; 183 struct timer_list regen_timer;
185 struct inet6_ifaddr *tempaddr_list; 184 struct inet6_ifaddr *tempaddr_list;
186 __u8 work_eui64[8];
187 __u8 work_digest[16];
188#endif 185#endif
189 186
190 struct neigh_parms *nd_parms; 187 struct neigh_parms *nd_parms;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index fa587c94e9d0..9bf73fe50948 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -50,6 +50,12 @@ struct inet_connection_sock_af_ops {
50 char __user *optval, int optlen); 50 char __user *optval, int optlen);
51 int (*getsockopt)(struct sock *sk, int level, int optname, 51 int (*getsockopt)(struct sock *sk, int level, int optname,
52 char __user *optval, int __user *optlen); 52 char __user *optval, int __user *optlen);
53 int (*compat_setsockopt)(struct sock *sk,
54 int level, int optname,
55 char __user *optval, int optlen);
56 int (*compat_getsockopt)(struct sock *sk,
57 int level, int optname,
58 char __user *optval, int __user *optlen);
53 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); 59 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
54 int sockaddr_len; 60 int sockaddr_len;
55}; 61};
@@ -72,6 +78,7 @@ struct inet_connection_sock_af_ops {
72 * @icsk_probes_out: unanswered 0 window probes 78 * @icsk_probes_out: unanswered 0 window probes
73 * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options) 79 * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options)
74 * @icsk_ack: Delayed ACK control data 80 * @icsk_ack: Delayed ACK control data
81 * @icsk_mtup; MTU probing control data
75 */ 82 */
76struct inet_connection_sock { 83struct inet_connection_sock {
77 /* inet_sock has to be the first member! */ 84 /* inet_sock has to be the first member! */
@@ -104,6 +111,16 @@ struct inet_connection_sock {
104 __u16 last_seg_size; /* Size of last incoming segment */ 111 __u16 last_seg_size; /* Size of last incoming segment */
105 __u16 rcv_mss; /* MSS used for delayed ACK decisions */ 112 __u16 rcv_mss; /* MSS used for delayed ACK decisions */
106 } icsk_ack; 113 } icsk_ack;
114 struct {
115 int enabled;
116
117 /* Range of MTUs to search */
118 int search_high;
119 int search_low;
120
121 /* Information on the current probe. */
122 int probe_size;
123 } icsk_mtup;
107 u32 icsk_ca_priv[16]; 124 u32 icsk_ca_priv[16];
108#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) 125#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
109}; 126};
@@ -310,4 +327,13 @@ extern void inet_csk_listen_stop(struct sock *sk);
310 327
311extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); 328extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
312 329
330extern int inet_csk_ctl_sock_create(struct socket **sock,
331 unsigned short family,
332 unsigned short type,
333 unsigned char protocol);
334
335extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
336 char __user *optval, int __user *optlen);
337extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
338 char __user *optval, int optlen);
313#endif /* _INET_CONNECTION_SOCK_H */ 339#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index fab3d5b3ab1c..8fe6156ca9b0 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -356,6 +356,10 @@ extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
356extern int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc); 356extern int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc);
357extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen); 357extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen);
358extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); 358extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
359extern int compat_ip_setsockopt(struct sock *sk, int level,
360 int optname, char __user *optval, int optlen);
361extern int compat_ip_getsockopt(struct sock *sk, int level,
362 int optname, char __user *optval, int __user *optlen);
359extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); 363extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
360 364
361extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); 365extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 1f2e428ca364..a398ae5e30f9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -7,6 +7,23 @@
7#define IP6_RT_PRIO_KERN 512 7#define IP6_RT_PRIO_KERN 512
8#define IP6_RT_FLOW_MASK 0x00ff 8#define IP6_RT_FLOW_MASK 0x00ff
9 9
10struct route_info {
11 __u8 type;
12 __u8 length;
13 __u8 prefix_len;
14#if defined(__BIG_ENDIAN_BITFIELD)
15 __u8 reserved_h:3,
16 route_pref:2,
17 reserved_l:3;
18#elif defined(__LITTLE_ENDIAN_BITFIELD)
19 __u8 reserved_l:3,
20 route_pref:2,
21 reserved_h:3;
22#endif
23 __u32 lifetime;
24 __u8 prefix[0]; /* 0,8 or 16 */
25};
26
10#ifdef __KERNEL__ 27#ifdef __KERNEL__
11 28
12#include <net/flow.h> 29#include <net/flow.h>
@@ -87,11 +104,14 @@ extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
87extern struct rt6_info * rt6_get_dflt_router(struct in6_addr *addr, 104extern struct rt6_info * rt6_get_dflt_router(struct in6_addr *addr,
88 struct net_device *dev); 105 struct net_device *dev);
89extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr, 106extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr,
90 struct net_device *dev); 107 struct net_device *dev,
108 unsigned int pref);
91 109
92extern void rt6_purge_dflt_routers(void); 110extern void rt6_purge_dflt_routers(void);
93 111
94extern void rt6_reset_dflt_pointer(struct rt6_info *rt); 112extern int rt6_route_rcv(struct net_device *dev,
113 u8 *opt, int len,
114 struct in6_addr *gwaddr);
95 115
96extern void rt6_redirect(struct in6_addr *dest, 116extern void rt6_redirect(struct in6_addr *dest,
97 struct in6_addr *saddr, 117 struct in6_addr *saddr,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 3b1d963d396c..6d6f0634ae41 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -282,6 +282,18 @@ static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr
282 return memcmp((const void *) a1, (const void *) a2, sizeof(struct in6_addr)); 282 return memcmp((const void *) a1, (const void *) a2, sizeof(struct in6_addr));
283} 283}
284 284
285static inline int
286ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
287 const struct in6_addr *a2)
288{
289 unsigned int i;
290
291 for (i = 0; i < 4; i++)
292 if ((a1->s6_addr32[i] ^ a2->s6_addr32[i]) & m->s6_addr32[i])
293 return 1;
294 return 0;
295}
296
285static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) 297static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
286{ 298{
287 memcpy((void *) a1, (const void *) a2, sizeof(struct in6_addr)); 299 memcpy((void *) a1, (const void *) a2, sizeof(struct in6_addr));
@@ -508,6 +520,16 @@ extern int ipv6_getsockopt(struct sock *sk, int level,
508 int optname, 520 int optname,
509 char __user *optval, 521 char __user *optval,
510 int __user *optlen); 522 int __user *optlen);
523extern int compat_ipv6_setsockopt(struct sock *sk,
524 int level,
525 int optname,
526 char __user *optval,
527 int optlen);
528extern int compat_ipv6_getsockopt(struct sock *sk,
529 int level,
530 int optname,
531 char __user *optval,
532 int __user *optlen);
511 533
512extern void ipv6_packet_init(void); 534extern void ipv6_packet_init(void);
513 535
diff --git a/include/net/llc.h b/include/net/llc.h
index 1adb2ef3f6f7..f5024583fc8b 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -71,7 +71,7 @@ extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
71 struct packet_type *pt, struct net_device *orig_dev); 71 struct packet_type *pt, struct net_device *orig_dev);
72 72
73extern int llc_mac_hdr_init(struct sk_buff *skb, 73extern int llc_mac_hdr_init(struct sk_buff *skb,
74 unsigned char *sa, unsigned char *da); 74 const unsigned char *sa, const unsigned char *da);
75 75
76extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap, 76extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
77 struct sk_buff *skb)); 77 struct sk_buff *skb));
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index bbac87eeb422..91fa271a0064 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -22,6 +22,8 @@ enum {
22 ND_OPT_PREFIX_INFO = 3, /* RFC2461 */ 22 ND_OPT_PREFIX_INFO = 3, /* RFC2461 */
23 ND_OPT_REDIRECT_HDR = 4, /* RFC2461 */ 23 ND_OPT_REDIRECT_HDR = 4, /* RFC2461 */
24 ND_OPT_MTU = 5, /* RFC2461 */ 24 ND_OPT_MTU = 5, /* RFC2461 */
25 __ND_OPT_ARRAY_MAX,
26 ND_OPT_ROUTE_INFO = 24, /* RFC4191 */
25 __ND_OPT_MAX 27 __ND_OPT_MAX
26}; 28};
27 29
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6fa9ae190741..b0666d66293f 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -68,6 +68,7 @@ struct neigh_parms
68 struct net_device *dev; 68 struct net_device *dev;
69 struct neigh_parms *next; 69 struct neigh_parms *next;
70 int (*neigh_setup)(struct neighbour *); 70 int (*neigh_setup)(struct neighbour *);
71 void (*neigh_destructor)(struct neighbour *);
71 struct neigh_table *tbl; 72 struct neigh_table *tbl;
72 73
73 void *sysctl_table; 74 void *sysctl_table;
@@ -145,7 +146,6 @@ struct neighbour
145struct neigh_ops 146struct neigh_ops
146{ 147{
147 int family; 148 int family;
148 void (*destructor)(struct neighbour *);
149 void (*solicit)(struct neighbour *, struct sk_buff*); 149 void (*solicit)(struct neighbour *, struct sk_buff*);
150 void (*error_report)(struct neighbour *, struct sk_buff*); 150 void (*error_report)(struct neighbour *, struct sk_buff*);
151 int (*output)(struct sk_buff*); 151 int (*output)(struct sk_buff*);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 6d075ca16e6e..2743c156caa0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -67,6 +67,18 @@ do { \
67 67
68struct nf_conntrack_helper; 68struct nf_conntrack_helper;
69 69
70/* nf_conn feature for connections that have a helper */
71struct nf_conn_help {
72 /* Helper. if any */
73 struct nf_conntrack_helper *helper;
74
75 union nf_conntrack_help help;
76
77 /* Current number of expected connections */
78 unsigned int expecting;
79};
80
81
70#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 82#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
71struct nf_conn 83struct nf_conn
72{ 84{
@@ -81,6 +93,9 @@ struct nf_conn
81 /* Have we seen traffic both ways yet? (bitset) */ 93 /* Have we seen traffic both ways yet? (bitset) */
82 unsigned long status; 94 unsigned long status;
83 95
96 /* If we were expected by an expectation, this will be it */
97 struct nf_conn *master;
98
84 /* Timer function; drops refcnt when it goes off. */ 99 /* Timer function; drops refcnt when it goes off. */
85 struct timer_list timeout; 100 struct timer_list timeout;
86 101
@@ -88,38 +103,22 @@ struct nf_conn
88 /* Accounting Information (same cache line as other written members) */ 103 /* Accounting Information (same cache line as other written members) */
89 struct ip_conntrack_counter counters[IP_CT_DIR_MAX]; 104 struct ip_conntrack_counter counters[IP_CT_DIR_MAX];
90#endif 105#endif
91 /* If we were expected by an expectation, this will be it */
92 struct nf_conn *master;
93
94 /* Current number of expected connections */
95 unsigned int expecting;
96 106
97 /* Unique ID that identifies this conntrack*/ 107 /* Unique ID that identifies this conntrack*/
98 unsigned int id; 108 unsigned int id;
99 109
100 /* Helper. if any */
101 struct nf_conntrack_helper *helper;
102
103 /* features - nat, helper, ... used by allocating system */ 110 /* features - nat, helper, ... used by allocating system */
104 u_int32_t features; 111 u_int32_t features;
105 112
106 /* Storage reserved for other modules: */
107
108 union nf_conntrack_proto proto;
109
110#if defined(CONFIG_NF_CONNTRACK_MARK) 113#if defined(CONFIG_NF_CONNTRACK_MARK)
111 u_int32_t mark; 114 u_int32_t mark;
112#endif 115#endif
113 116
114 /* These members are dynamically allocated. */ 117 /* Storage reserved for other modules: */
115 118 union nf_conntrack_proto proto;
116 union nf_conntrack_help *help;
117 119
118 /* Layer 3 dependent members. (ex: NAT) */ 120 /* features dynamically at the end: helper, nat (both optional) */
119 union { 121 char data[0];
120 struct nf_conntrack_ipv4 *ipv4;
121 } l3proto;
122 void *data[0];
123}; 122};
124 123
125struct nf_conntrack_expect 124struct nf_conntrack_expect
@@ -373,10 +372,23 @@ nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
373#define NF_CT_F_NUM 4 372#define NF_CT_F_NUM 4
374 373
375extern int 374extern int
376nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size, 375nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size);
377 int (*init_conntrack)(struct nf_conn *, u_int32_t));
378extern void 376extern void
379nf_conntrack_unregister_cache(u_int32_t features); 377nf_conntrack_unregister_cache(u_int32_t features);
380 378
379/* valid combinations:
380 * basic: nf_conn, nf_conn .. nf_conn_help
381 * nat: nf_conn .. nf_conn_nat, nf_conn .. nf_conn_nat, nf_conn help
382 */
383static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
384{
385 unsigned int offset = sizeof(struct nf_conn);
386
387 if (!(ct->features & NF_CT_F_HELP))
388 return NULL;
389
390 return (struct nf_conn_help *) ((void *)ct + offset);
391}
392
381#endif /* __KERNEL__ */ 393#endif /* __KERNEL__ */
382#endif /* _NF_CONNTRACK_H */ 394#endif /* _NF_CONNTRACK_H */
diff --git a/include/net/scm.h b/include/net/scm.h
index c3fa3d5ab606..540619cb7160 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -37,10 +37,12 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
37static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, 37static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
38 struct scm_cookie *scm) 38 struct scm_cookie *scm)
39{ 39{
40 memset(scm, 0, sizeof(*scm)); 40 struct task_struct *p = current;
41 scm->creds.uid = current->uid; 41 scm->creds.uid = p->uid;
42 scm->creds.gid = current->gid; 42 scm->creds.gid = p->gid;
43 scm->creds.pid = current->tgid; 43 scm->creds.pid = p->tgid;
44 scm->fp = NULL;
45 scm->seq = 0;
44 if (msg->msg_controllen <= 0) 46 if (msg->msg_controllen <= 0)
45 return 0; 47 return 0;
46 return __scm_send(sock, msg, scm); 48 return __scm_send(sock, msg, scm);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 072f407848a6..eba99f375517 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -514,6 +514,16 @@ struct sctp_af {
514 int optname, 514 int optname,
515 char __user *optval, 515 char __user *optval,
516 int __user *optlen); 516 int __user *optlen);
517 int (*compat_setsockopt) (struct sock *sk,
518 int level,
519 int optname,
520 char __user *optval,
521 int optlen);
522 int (*compat_getsockopt) (struct sock *sk,
523 int level,
524 int optname,
525 char __user *optval,
526 int __user *optlen);
517 struct dst_entry *(*get_dst) (struct sctp_association *asoc, 527 struct dst_entry *(*get_dst) (struct sctp_association *asoc,
518 union sctp_addr *daddr, 528 union sctp_addr *daddr,
519 union sctp_addr *saddr); 529 union sctp_addr *saddr);
diff --git a/include/net/sock.h b/include/net/sock.h
index 30758035d616..ec226f31dc2a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -478,9 +478,9 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
478 rc = __condition; \ 478 rc = __condition; \
479 if (!rc) { \ 479 if (!rc) { \
480 *(__timeo) = schedule_timeout(*(__timeo)); \ 480 *(__timeo) = schedule_timeout(*(__timeo)); \
481 rc = __condition; \
482 } \ 481 } \
483 lock_sock(__sk); \ 482 lock_sock(__sk); \
483 rc = __condition; \
484 rc; \ 484 rc; \
485}) 485})
486 486
@@ -520,6 +520,14 @@ struct proto {
520 int (*getsockopt)(struct sock *sk, int level, 520 int (*getsockopt)(struct sock *sk, int level,
521 int optname, char __user *optval, 521 int optname, char __user *optval,
522 int __user *option); 522 int __user *option);
523 int (*compat_setsockopt)(struct sock *sk,
524 int level,
525 int optname, char __user *optval,
526 int optlen);
527 int (*compat_getsockopt)(struct sock *sk,
528 int level,
529 int optname, char __user *optval,
530 int __user *option);
523 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 531 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
524 struct msghdr *msg, size_t len); 532 struct msghdr *msg, size_t len);
525 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 533 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
@@ -816,6 +824,10 @@ extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
816 struct msghdr *msg, size_t size, int flags); 824 struct msghdr *msg, size_t size, int flags);
817extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 825extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
818 char __user *optval, int optlen); 826 char __user *optval, int optlen);
827extern int compat_sock_common_getsockopt(struct socket *sock, int level,
828 int optname, char __user *optval, int __user *optlen);
829extern int compat_sock_common_setsockopt(struct socket *sock, int level,
830 int optname, char __user *optval, int optlen);
819 831
820extern void sk_common_release(struct sock *sk); 832extern void sk_common_release(struct sock *sk);
821 833
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 77f21c65bbca..9418f4d1afbb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -60,6 +60,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
60/* Minimal RCV_MSS. */ 60/* Minimal RCV_MSS. */
61#define TCP_MIN_RCVMSS 536U 61#define TCP_MIN_RCVMSS 536U
62 62
63/* The least MTU to use for probing */
64#define TCP_BASE_MSS 512
65
63/* After receiving this amount of duplicate ACKs fast retransmit starts. */ 66/* After receiving this amount of duplicate ACKs fast retransmit starts. */
64#define TCP_FASTRETRANS_THRESH 3 67#define TCP_FASTRETRANS_THRESH 3
65 68
@@ -219,6 +222,9 @@ extern int sysctl_tcp_nometrics_save;
219extern int sysctl_tcp_moderate_rcvbuf; 222extern int sysctl_tcp_moderate_rcvbuf;
220extern int sysctl_tcp_tso_win_divisor; 223extern int sysctl_tcp_tso_win_divisor;
221extern int sysctl_tcp_abc; 224extern int sysctl_tcp_abc;
225extern int sysctl_tcp_mtu_probing;
226extern int sysctl_tcp_base_mss;
227extern int sysctl_tcp_workaround_signed_windows;
222 228
223extern atomic_t tcp_memory_allocated; 229extern atomic_t tcp_memory_allocated;
224extern atomic_t tcp_sockets_allocated; 230extern atomic_t tcp_sockets_allocated;
@@ -347,6 +353,12 @@ extern int tcp_getsockopt(struct sock *sk, int level,
347extern int tcp_setsockopt(struct sock *sk, int level, 353extern int tcp_setsockopt(struct sock *sk, int level,
348 int optname, char __user *optval, 354 int optname, char __user *optval,
349 int optlen); 355 int optlen);
356extern int compat_tcp_getsockopt(struct sock *sk,
357 int level, int optname,
358 char __user *optval, int __user *optlen);
359extern int compat_tcp_setsockopt(struct sock *sk,
360 int level, int optname,
361 char __user *optval, int optlen);
350extern void tcp_set_keepalive(struct sock *sk, int val); 362extern void tcp_set_keepalive(struct sock *sk, int val);
351extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, 363extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
352 struct msghdr *msg, 364 struct msghdr *msg,
@@ -447,6 +459,10 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
447 459
448extern void tcp_initialize_rcv_mss(struct sock *sk); 460extern void tcp_initialize_rcv_mss(struct sock *sk);
449 461
462extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
463extern int tcp_mss_to_mtu(struct sock *sk, int mss);
464extern void tcp_mtup_init(struct sock *sk);
465
450static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 466static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
451{ 467{
452 tp->pred_flags = htonl((tp->tcp_header_len << 26) | 468 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 8d362c49b8a9..61b7504fc2ba 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -11,6 +11,7 @@
11#include <linux/crypto.h> 11#include <linux/crypto.h>
12#include <linux/pfkeyv2.h> 12#include <linux/pfkeyv2.h>
13#include <linux/in6.h> 13#include <linux/in6.h>
14#include <linux/mutex.h>
14 15
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/dst.h> 17#include <net/dst.h>
@@ -20,7 +21,11 @@
20 21
21#define XFRM_ALIGN8(len) (((len) + 7) & ~7) 22#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
22 23
23extern struct semaphore xfrm_cfg_sem; 24extern struct sock *xfrm_nl;
25extern u32 sysctl_xfrm_aevent_etime;
26extern u32 sysctl_xfrm_aevent_rseqth;
27
28extern struct mutex xfrm_cfg_mutex;
24 29
25/* Organization of SPD aka "XFRM rules" 30/* Organization of SPD aka "XFRM rules"
26 ------------------------------------ 31 ------------------------------------
@@ -135,6 +140,16 @@ struct xfrm_state
135 /* State for replay detection */ 140 /* State for replay detection */
136 struct xfrm_replay_state replay; 141 struct xfrm_replay_state replay;
137 142
143 /* Replay detection state at the time we sent the last notification */
144 struct xfrm_replay_state preplay;
145
146 /* Replay detection notification settings */
147 u32 replay_maxage;
148 u32 replay_maxdiff;
149
150 /* Replay detection notification timer */
151 struct timer_list rtimer;
152
138 /* Statistics */ 153 /* Statistics */
139 struct xfrm_stats stats; 154 struct xfrm_stats stats;
140 155
@@ -169,6 +184,7 @@ struct km_event
169 u32 hard; 184 u32 hard;
170 u32 proto; 185 u32 proto;
171 u32 byid; 186 u32 byid;
187 u32 aevent;
172 } data; 188 } data;
173 189
174 u32 seq; 190 u32 seq;
@@ -199,10 +215,13 @@ extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
199extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); 215extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
200extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c); 216extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
201extern void km_state_notify(struct xfrm_state *x, struct km_event *c); 217extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
202
203#define XFRM_ACQ_EXPIRES 30 218#define XFRM_ACQ_EXPIRES 30
204 219
205struct xfrm_tmpl; 220struct xfrm_tmpl;
221extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
222extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
223extern int __xfrm_state_delete(struct xfrm_state *x);
224
206struct xfrm_state_afinfo { 225struct xfrm_state_afinfo {
207 unsigned short family; 226 unsigned short family;
208 rwlock_t lock; 227 rwlock_t lock;
@@ -305,7 +324,21 @@ struct xfrm_policy
305 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 324 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
306}; 325};
307 326
308#define XFRM_KM_TIMEOUT 30 327#define XFRM_KM_TIMEOUT 30
328/* which seqno */
329#define XFRM_REPLAY_SEQ 1
330#define XFRM_REPLAY_OSEQ 2
331#define XFRM_REPLAY_SEQ_MASK 3
332/* what happened */
333#define XFRM_REPLAY_UPDATE XFRM_AE_CR
334#define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
335
336/* default aevent timeout in units of 100ms */
337#define XFRM_AE_ETIME 10
338/* Async Event timer multiplier */
339#define XFRM_AE_ETH_M 10
340/* default seq threshold size */
341#define XFRM_AE_SEQT_SIZE 2
309 342
310struct xfrm_mgr 343struct xfrm_mgr
311{ 344{
@@ -865,6 +898,7 @@ extern int xfrm_state_delete(struct xfrm_state *x);
865extern void xfrm_state_flush(u8 proto); 898extern void xfrm_state_flush(u8 proto);
866extern int xfrm_replay_check(struct xfrm_state *x, u32 seq); 899extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
867extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq); 900extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
901extern void xfrm_replay_notify(struct xfrm_state *x, int event);
868extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb); 902extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb);
869extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); 903extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
870extern int xfrm_init_state(struct xfrm_state *x); 904extern int xfrm_init_state(struct xfrm_state *x);
@@ -924,7 +958,7 @@ extern void xfrm_init_pmtu(struct dst_entry *dst);
924 958
925extern wait_queue_head_t km_waitq; 959extern wait_queue_head_t km_waitq;
926extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport); 960extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
927extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard); 961extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
928 962
929extern void xfrm_input_init(void); 963extern void xfrm_input_init(void);
930extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq); 964extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq);
@@ -965,4 +999,24 @@ static inline int xfrm_policy_id2dir(u32 index)
965 return index & 7; 999 return index & 7;
966} 1000}
967 1001
1002static inline int xfrm_aevent_is_on(void)
1003{
1004 struct sock *nlsk;
1005 int ret = 0;
1006
1007 rcu_read_lock();
1008 nlsk = rcu_dereference(xfrm_nl);
1009 if (nlsk)
1010 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1011 rcu_read_unlock();
1012 return ret;
1013}
1014
1015static inline void xfrm_aevent_doreplay(struct xfrm_state *x)
1016{
1017 if (xfrm_aevent_is_on())
1018 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1019}
1020
1021
968#endif /* _NET_XFRM_H */ 1022#endif /* _NET_XFRM_H */
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
index 86b7e93f198b..4ace54cd0cce 100644
--- a/include/rdma/ib_fmr_pool.h
+++ b/include/rdma/ib_fmr_pool.h
@@ -43,6 +43,7 @@ struct ib_fmr_pool;
43/** 43/**
44 * struct ib_fmr_pool_param - Parameters for creating FMR pool 44 * struct ib_fmr_pool_param - Parameters for creating FMR pool
45 * @max_pages_per_fmr:Maximum number of pages per map request. 45 * @max_pages_per_fmr:Maximum number of pages per map request.
46 * @page_shift: Log2 of sizeof "pages" mapped by this fmr
46 * @access:Access flags for FMRs in pool. 47 * @access:Access flags for FMRs in pool.
47 * @pool_size:Number of FMRs to allocate for pool. 48 * @pool_size:Number of FMRs to allocate for pool.
48 * @dirty_watermark:Flush is triggered when @dirty_watermark dirty 49 * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
@@ -55,6 +56,7 @@ struct ib_fmr_pool;
55 */ 56 */
56struct ib_fmr_pool_param { 57struct ib_fmr_pool_param {
57 int max_pages_per_fmr; 58 int max_pages_per_fmr;
59 int page_shift;
58 enum ib_access_flags access; 60 enum ib_access_flags access;
59 int pool_size; 61 int pool_size;
60 int dirty_watermark; 62 int dirty_watermark;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 2c133506742b..51ab8eddb295 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -33,7 +33,7 @@
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE. 34 * SOFTWARE.
35 * 35 *
36 * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $ 36 * $Id: ib_mad.h 5596 2006-03-03 01:00:07Z sean.hefty $
37 */ 37 */
38 38
39#if !defined( IB_MAD_H ) 39#if !defined( IB_MAD_H )
@@ -208,15 +208,23 @@ struct ib_class_port_info
208/** 208/**
209 * ib_mad_send_buf - MAD data buffer and work request for sends. 209 * ib_mad_send_buf - MAD data buffer and work request for sends.
210 * @next: A pointer used to chain together MADs for posting. 210 * @next: A pointer used to chain together MADs for posting.
211 * @mad: References an allocated MAD data buffer. 211 * @mad: References an allocated MAD data buffer for MADs that do not have
212 * RMPP active. For MADs using RMPP, references the common and management
213 * class specific headers.
212 * @mad_agent: MAD agent that allocated the buffer. 214 * @mad_agent: MAD agent that allocated the buffer.
213 * @ah: The address handle to use when sending the MAD. 215 * @ah: The address handle to use when sending the MAD.
214 * @context: User-controlled context fields. 216 * @context: User-controlled context fields.
217 * @hdr_len: Indicates the size of the data header of the MAD. This length
218 * includes the common MAD, RMPP, and class specific headers.
219 * @data_len: Indicates the total size of user-transferred data.
220 * @seg_count: The number of RMPP segments allocated for this send.
221 * @seg_size: Size of each RMPP segment.
215 * @timeout_ms: Time to wait for a response. 222 * @timeout_ms: Time to wait for a response.
216 * @retries: Number of times to retry a request for a response. 223 * @retries: Number of times to retry a request for a response.
217 * 224 *
218 * Users are responsible for initializing the MAD buffer itself, with the 225 * Users are responsible for initializing the MAD buffer itself, with the
219 * exception of specifying the payload length field in any RMPP MAD. 226 * exception of any RMPP header. Additional segment buffer space allocated
227 * beyond data_len is padding.
220 */ 228 */
221struct ib_mad_send_buf { 229struct ib_mad_send_buf {
222 struct ib_mad_send_buf *next; 230 struct ib_mad_send_buf *next;
@@ -224,6 +232,10 @@ struct ib_mad_send_buf {
224 struct ib_mad_agent *mad_agent; 232 struct ib_mad_agent *mad_agent;
225 struct ib_ah *ah; 233 struct ib_ah *ah;
226 void *context[2]; 234 void *context[2];
235 int hdr_len;
236 int data_len;
237 int seg_count;
238 int seg_size;
227 int timeout_ms; 239 int timeout_ms;
228 int retries; 240 int retries;
229}; 241};
@@ -299,7 +311,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
299 * @mad_recv_wc: Received work completion information on the received MAD. 311 * @mad_recv_wc: Received work completion information on the received MAD.
300 * 312 *
301 * MADs received in response to a send request operation will be handed to 313 * MADs received in response to a send request operation will be handed to
302 * the user after the send operation completes. All data buffers given 314 * the user before the send operation completes. All data buffers given
303 * to registered agents through this routine are owned by the receiving 315 * to registered agents through this routine are owned by the receiving
304 * client, except for snooping agents. Clients snooping MADs should not 316 * client, except for snooping agents. Clients snooping MADs should not
305 * modify the data referenced by @mad_recv_wc. 317 * modify the data referenced by @mad_recv_wc.
@@ -485,17 +497,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
485int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 497int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
486 struct ib_mad_send_buf **bad_send_buf); 498 struct ib_mad_send_buf **bad_send_buf);
487 499
488/**
489 * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer.
490 * @mad_recv_wc: Work completion information for a received MAD.
491 * @buf: User-provided data buffer to receive the coalesced buffers. The
492 * referenced buffer should be at least the size of the mad_len specified
493 * by @mad_recv_wc.
494 *
495 * This call copies a chain of received MAD segments into a single data buffer,
496 * removing duplicated headers.
497 */
498void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
499 500
500/** 501/**
501 * ib_free_recv_mad - Returns data buffers used to receive a MAD. 502 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
@@ -590,9 +591,10 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
590 * with an initialized work request structure. Users may modify the returned 591 * with an initialized work request structure. Users may modify the returned
591 * MAD data buffer before posting the send. 592 * MAD data buffer before posting the send.
592 * 593 *
593 * The returned data buffer will be cleared. Users are responsible for 594 * The returned MAD header, class specific headers, and any padding will be
594 * initializing the common MAD and any class specific headers. If @rmpp_active 595 * cleared. Users are responsible for initializing the common MAD header,
595 * is set, the RMPP header will be initialized for sending. 596 * any class specific header, and MAD data area.
597 * If @rmpp_active is set, the RMPP header will be initialized for sending.
596 */ 598 */
597struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 599struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
598 u32 remote_qpn, u16 pkey_index, 600 u32 remote_qpn, u16 pkey_index,
@@ -601,6 +603,16 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
601 gfp_t gfp_mask); 603 gfp_t gfp_mask);
602 604
603/** 605/**
606 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
607 * @send_buf: Previously allocated send data buffer.
608 * @seg_num: number of segment to return
609 *
610 * This routine returns a pointer to the data buffer of an RMPP MAD.
611 * Users must provide synchronization to @send_buf around this call.
612 */
613void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
614
615/**
604 * ib_free_send_mad - Returns data buffers used to send a MAD. 616 * ib_free_send_mad - Returns data buffers used to send a MAD.
605 * @send_buf: Previously allocated send data buffer. 617 * @send_buf: Previously allocated send data buffer.
606 */ 618 */
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 5ff1490c08db..338ed4333063 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
5 * 6 *
6 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -43,7 +44,7 @@
43 * Increment this value if any changes that break userspace ABI 44 * Increment this value if any changes that break userspace ABI
44 * compatibility are made. 45 * compatibility are made.
45 */ 46 */
46#define IB_USER_VERBS_ABI_VERSION 4 47#define IB_USER_VERBS_ABI_VERSION 6
47 48
48enum { 49enum {
49 IB_USER_VERBS_CMD_GET_CONTEXT, 50 IB_USER_VERBS_CMD_GET_CONTEXT,
@@ -265,6 +266,17 @@ struct ib_uverbs_create_cq_resp {
265 __u32 cqe; 266 __u32 cqe;
266}; 267};
267 268
269struct ib_uverbs_resize_cq {
270 __u64 response;
271 __u32 cq_handle;
272 __u32 cqe;
273 __u64 driver_data[0];
274};
275
276struct ib_uverbs_resize_cq_resp {
277 __u32 cqe;
278};
279
268struct ib_uverbs_poll_cq { 280struct ib_uverbs_poll_cq {
269 __u64 response; 281 __u64 response;
270 __u32 cq_handle; 282 __u32 cq_handle;
@@ -338,6 +350,7 @@ struct ib_uverbs_create_qp_resp {
338 __u32 max_send_sge; 350 __u32 max_send_sge;
339 __u32 max_recv_sge; 351 __u32 max_recv_sge;
340 __u32 max_inline_data; 352 __u32 max_inline_data;
353 __u32 reserved;
341}; 354};
342 355
343/* 356/*
@@ -359,6 +372,47 @@ struct ib_uverbs_qp_dest {
359 __u8 port_num; 372 __u8 port_num;
360}; 373};
361 374
375struct ib_uverbs_query_qp {
376 __u64 response;
377 __u32 qp_handle;
378 __u32 attr_mask;
379 __u64 driver_data[0];
380};
381
382struct ib_uverbs_query_qp_resp {
383 struct ib_uverbs_qp_dest dest;
384 struct ib_uverbs_qp_dest alt_dest;
385 __u32 max_send_wr;
386 __u32 max_recv_wr;
387 __u32 max_send_sge;
388 __u32 max_recv_sge;
389 __u32 max_inline_data;
390 __u32 qkey;
391 __u32 rq_psn;
392 __u32 sq_psn;
393 __u32 dest_qp_num;
394 __u32 qp_access_flags;
395 __u16 pkey_index;
396 __u16 alt_pkey_index;
397 __u8 qp_state;
398 __u8 cur_qp_state;
399 __u8 path_mtu;
400 __u8 path_mig_state;
401 __u8 en_sqd_async_notify;
402 __u8 max_rd_atomic;
403 __u8 max_dest_rd_atomic;
404 __u8 min_rnr_timer;
405 __u8 port_num;
406 __u8 timeout;
407 __u8 retry_cnt;
408 __u8 rnr_retry;
409 __u8 alt_port_num;
410 __u8 alt_timeout;
411 __u8 sq_sig_all;
412 __u8 reserved[5];
413 __u64 driver_data[0];
414};
415
362struct ib_uverbs_modify_qp { 416struct ib_uverbs_modify_qp {
363 struct ib_uverbs_qp_dest dest; 417 struct ib_uverbs_qp_dest dest;
364 struct ib_uverbs_qp_dest alt_dest; 418 struct ib_uverbs_qp_dest alt_dest;
@@ -415,7 +469,7 @@ struct ib_uverbs_sge {
415}; 469};
416 470
417struct ib_uverbs_send_wr { 471struct ib_uverbs_send_wr {
418 __u64 wr_id; 472 __u64 wr_id;
419 __u32 num_sge; 473 __u32 num_sge;
420 __u32 opcode; 474 __u32 opcode;
421 __u32 send_flags; 475 __u32 send_flags;
@@ -489,7 +543,7 @@ struct ib_uverbs_post_srq_recv_resp {
489 543
490struct ib_uverbs_global_route { 544struct ib_uverbs_global_route {
491 __u8 dgid[16]; 545 __u8 dgid[16];
492 __u32 flow_label; 546 __u32 flow_label;
493 __u8 sgid_index; 547 __u8 sgid_index;
494 __u8 hop_limit; 548 __u8 hop_limit;
495 __u8 traffic_class; 549 __u8 traffic_class;
@@ -551,6 +605,9 @@ struct ib_uverbs_create_srq {
551 605
552struct ib_uverbs_create_srq_resp { 606struct ib_uverbs_create_srq_resp {
553 __u32 srq_handle; 607 __u32 srq_handle;
608 __u32 max_wr;
609 __u32 max_sge;
610 __u32 reserved;
554}; 611};
555 612
556struct ib_uverbs_modify_srq { 613struct ib_uverbs_modify_srq {
@@ -561,6 +618,20 @@ struct ib_uverbs_modify_srq {
561 __u64 driver_data[0]; 618 __u64 driver_data[0];
562}; 619};
563 620
621struct ib_uverbs_query_srq {
622 __u64 response;
623 __u32 srq_handle;
624 __u32 reserved;
625 __u64 driver_data[0];
626};
627
628struct ib_uverbs_query_srq_resp {
629 __u32 max_wr;
630 __u32 max_sge;
631 __u32 srq_limit;
632 __u32 reserved;
633};
634
564struct ib_uverbs_destroy_srq { 635struct ib_uverbs_destroy_srq {
565 __u64 response; 636 __u64 response;
566 __u32 srq_handle; 637 __u32 srq_handle;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 22fc886b9695..c1ad6273ac6c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -5,7 +5,7 @@
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005 Cisco Systems. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
9 * 9 *
10 * This software is available to you under a choice of one of two 10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU 11 * licenses. You may choose to be licensed under the terms of the GNU
@@ -222,11 +222,13 @@ struct ib_port_attr {
222}; 222};
223 223
224enum ib_device_modify_flags { 224enum ib_device_modify_flags {
225 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 225 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
226 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
226}; 227};
227 228
228struct ib_device_modify { 229struct ib_device_modify {
229 u64 sys_image_guid; 230 u64 sys_image_guid;
231 char node_desc[64];
230}; 232};
231 233
232enum ib_port_modify_flags { 234enum ib_port_modify_flags {
@@ -649,7 +651,7 @@ struct ib_mw_bind {
649struct ib_fmr_attr { 651struct ib_fmr_attr {
650 int max_pages; 652 int max_pages;
651 int max_maps; 653 int max_maps;
652 u8 page_size; 654 u8 page_shift;
653}; 655};
654 656
655struct ib_ucontext { 657struct ib_ucontext {
@@ -880,7 +882,8 @@ struct ib_device {
880 struct ib_ucontext *context, 882 struct ib_ucontext *context,
881 struct ib_udata *udata); 883 struct ib_udata *udata);
882 int (*destroy_cq)(struct ib_cq *cq); 884 int (*destroy_cq)(struct ib_cq *cq);
883 int (*resize_cq)(struct ib_cq *cq, int cqe); 885 int (*resize_cq)(struct ib_cq *cq, int cqe,
886 struct ib_udata *udata);
884 int (*poll_cq)(struct ib_cq *cq, int num_entries, 887 int (*poll_cq)(struct ib_cq *cq, int num_entries,
885 struct ib_wc *wc); 888 struct ib_wc *wc);
886 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 889 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
@@ -950,6 +953,7 @@ struct ib_device {
950 u64 uverbs_cmd_mask; 953 u64 uverbs_cmd_mask;
951 int uverbs_abi_ver; 954 int uverbs_abi_ver;
952 955
956 char node_desc[64];
953 __be64 node_guid; 957 __be64 node_guid;
954 u8 node_type; 958 u8 node_type;
955 u8 phys_port_cnt; 959 u8 phys_port_cnt;
@@ -986,6 +990,24 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
986 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 990 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
987} 991}
988 992
993/**
994 * ib_modify_qp_is_ok - Check that the supplied attribute mask
995 * contains all required attributes and no attributes not allowed for
996 * the given QP state transition.
997 * @cur_state: Current QP state
998 * @next_state: Next QP state
999 * @type: QP type
1000 * @mask: Mask of supplied QP attributes
1001 *
1002 * This function is a helper function that a low-level driver's
1003 * modify_qp method can use to validate the consumer's input. It
1004 * checks that cur_state and next_state are valid QP states, that a
1005 * transition from cur_state to next_state is allowed by the IB spec,
1006 * and that the attribute mask supplied is allowed for the transition.
1007 */
1008int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1009 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1010
989int ib_register_event_handler (struct ib_event_handler *event_handler); 1011int ib_register_event_handler (struct ib_event_handler *event_handler);
990int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1012int ib_unregister_event_handler(struct ib_event_handler *event_handler);
991void ib_dispatch_event(struct ib_event *event); 1013void ib_dispatch_event(struct ib_event *event);
@@ -1078,7 +1100,9 @@ int ib_destroy_ah(struct ib_ah *ah);
1078 * ib_create_srq - Creates a SRQ associated with the specified protection 1100 * ib_create_srq - Creates a SRQ associated with the specified protection
1079 * domain. 1101 * domain.
1080 * @pd: The protection domain associated with the SRQ. 1102 * @pd: The protection domain associated with the SRQ.
1081 * @srq_init_attr: A list of initial attributes required to create the SRQ. 1103 * @srq_init_attr: A list of initial attributes required to create the
1104 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1105 * the actual capabilities of the created SRQ.
1082 * 1106 *
1083 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1107 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1084 * requested size of the SRQ, and set to the actual values allocated 1108 * requested size of the SRQ, and set to the actual values allocated
@@ -1137,7 +1161,9 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
1137 * ib_create_qp - Creates a QP associated with the specified protection 1161 * ib_create_qp - Creates a QP associated with the specified protection
1138 * domain. 1162 * domain.
1139 * @pd: The protection domain associated with the QP. 1163 * @pd: The protection domain associated with the QP.
1140 * @qp_init_attr: A list of initial attributes required to create the QP. 1164 * @qp_init_attr: A list of initial attributes required to create the
1165 * QP. If QP creation succeeds, then the attributes are updated to
1166 * the actual capabilities of the created QP.
1141 */ 1167 */
1142struct ib_qp *ib_create_qp(struct ib_pd *pd, 1168struct ib_qp *ib_create_qp(struct ib_pd *pd,
1143 struct ib_qp_init_attr *qp_init_attr); 1169 struct ib_qp_init_attr *qp_init_attr);
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index fabd879c2f2e..d160880b2a87 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,9 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
35} 35}
36 36
37 37
38extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
39 struct list_head *done_q);
40extern void scsi_eh_flush_done_q(struct list_head *done_q);
38extern void scsi_report_bus_reset(struct Scsi_Host *, int); 41extern void scsi_report_bus_reset(struct Scsi_Host *, int);
39extern void scsi_report_device_reset(struct Scsi_Host *, int, int); 42extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
40extern int scsi_block_when_processing_errors(struct scsi_device *); 43extern int scsi_block_when_processing_errors(struct scsi_device *);