aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/system.h29
-rw-r--r--include/asm-arm/arch-ixp4xx/timex.h6
-rw-r--r--include/asm-arm/arch-s3c2410/usb-control.h3
-rw-r--r--include/asm-arm/arch-sa1100/mcp.h21
-rw-r--r--include/asm-arm/bug.h2
-rw-r--r--include/asm-arm/cpu-multi32.h2
-rw-r--r--include/asm-arm/cpu-single.h2
-rw-r--r--include/asm-arm/hardware/gic.h41
-rw-r--r--include/asm-arm/pgtable.h14
-rw-r--r--include/asm-arm/unistd.h5
-rw-r--r--include/asm-i386/pci.h4
-rw-r--r--include/asm-i386/processor.h2
-rw-r--r--include/asm-ia64/io.h8
-rw-r--r--include/asm-ia64/iosapic.h4
-rw-r--r--include/asm-m32r/smp.h2
-rw-r--r--include/asm-m68k/page.h6
-rw-r--r--include/asm-ppc/ibm44x.h4
-rw-r--r--include/asm-ppc/ppc4xx_dma.h2
-rw-r--r--include/asm-ppc/time.h2
-rw-r--r--include/asm-ppc64/bug.h7
-rw-r--r--include/asm-ppc64/iSeries/LparMap.h9
-rw-r--r--include/asm-s390/uaccess.h21
-rw-r--r--include/asm-sh/unistd.h10
-rw-r--r--include/asm-sh64/unistd.h7
-rw-r--r--include/asm-sparc/processor.h1
-rw-r--r--include/asm-sparc/segment.h6
-rw-r--r--include/asm-sparc/system.h1
-rw-r--r--include/asm-sparc64/atomic.h8
-rw-r--r--include/asm-sparc64/bitops.h4
-rw-r--r--include/asm-sparc64/processor.h1
-rw-r--r--include/asm-sparc64/segment.h6
-rw-r--r--include/asm-sparc64/sfafsr.h82
-rw-r--r--include/asm-sparc64/spinlock.h42
-rw-r--r--include/asm-sparc64/system.h17
-rw-r--r--include/asm-sparc64/thread_info.h5
-rw-r--r--include/asm-um/page.h4
-rw-r--r--include/asm-x86_64/e820.h2
-rw-r--r--include/asm-x86_64/pci.h4
-rw-r--r--include/asm-x86_64/processor.h2
-rw-r--r--include/linux/8250_pci.h39
-rw-r--r--include/linux/ata.h45
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/fsnotify.h6
-rw-r--r--include/linux/inotify.h4
-rw-r--r--include/linux/libata.h49
-rw-r--r--include/linux/mii.h9
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/mod_devicetable.h10
-rw-r--r--include/linux/netpoll.h20
-rw-r--r--include/linux/nfs_fs.h42
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/pci_ids.h12
-rw-r--r--include/linux/phy.h377
-rw-r--r--include/linux/serialP.h40
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/sunrpc/xdr.h1
-rw-r--r--include/net/ax25.h18
-rw-r--r--include/net/sock.h5
-rw-r--r--include/rdma/ib_cache.h105
-rw-r--r--include/rdma/ib_cm.h568
-rw-r--r--include/rdma/ib_fmr_pool.h93
-rw-r--r--include/rdma/ib_mad.h579
-rw-r--r--include/rdma/ib_pack.h245
-rw-r--r--include/rdma/ib_sa.h373
-rw-r--r--include/rdma/ib_smi.h94
-rw-r--r--include/rdma/ib_user_cm.h328
-rw-r--r--include/rdma/ib_user_mad.h137
-rw-r--r--include/rdma/ib_user_verbs.h422
-rw-r--r--include/rdma/ib_verbs.h1461
-rw-r--r--include/scsi/scsi_transport.h8
-rw-r--r--include/sound/core.h2
72 files changed, 5266 insertions, 245 deletions
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index c08ce970ff8c..bdb4d66418f1 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -443,22 +443,19 @@ __xchg_u64(volatile long *m, unsigned long val)
443 if something tries to do an invalid xchg(). */ 443 if something tries to do an invalid xchg(). */
444extern void __xchg_called_with_bad_pointer(void); 444extern void __xchg_called_with_bad_pointer(void);
445 445
446static inline unsigned long 446#define __xchg(ptr, x, size) \
447__xchg(volatile void *ptr, unsigned long x, int size) 447({ \
448{ 448 unsigned long __xchg__res; \
449 switch (size) { 449 volatile void *__xchg__ptr = (ptr); \
450 case 1: 450 switch (size) { \
451 return __xchg_u8(ptr, x); 451 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
452 case 2: 452 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
453 return __xchg_u16(ptr, x); 453 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
454 case 4: 454 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
455 return __xchg_u32(ptr, x); 455 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
456 case 8: 456 } \
457 return __xchg_u64(ptr, x); 457 __xchg__res; \
458 } 458})
459 __xchg_called_with_bad_pointer();
460 return x;
461}
462 459
463#define xchg(ptr,x) \ 460#define xchg(ptr,x) \
464 ({ \ 461 ({ \
diff --git a/include/asm-arm/arch-ixp4xx/timex.h b/include/asm-arm/arch-ixp4xx/timex.h
index 38c9d77d3727..3745e35cc030 100644
--- a/include/asm-arm/arch-ixp4xx/timex.h
+++ b/include/asm-arm/arch-ixp4xx/timex.h
@@ -7,7 +7,9 @@
7 7
8/* 8/*
9 * We use IXP425 General purpose timer for our timer needs, it runs at 9 * We use IXP425 General purpose timer for our timer needs, it runs at
10 * 66.66... MHz 10 * 66.66... MHz. We do a convulted calculation of CLOCK_TICK_RATE b/c the
11 * timer register ignores the bottom 2 bits of the LATCH value.
11 */ 12 */
12#define CLOCK_TICK_RATE (66666666) 13#define FREQ 66666666
14#define CLOCK_TICK_RATE (((FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
13 15
diff --git a/include/asm-arm/arch-s3c2410/usb-control.h b/include/asm-arm/arch-s3c2410/usb-control.h
index 1cc85a096b23..bd43b566db3e 100644
--- a/include/asm-arm/arch-s3c2410/usb-control.h
+++ b/include/asm-arm/arch-s3c2410/usb-control.h
@@ -12,6 +12,7 @@
12 * Changelog: 12 * Changelog:
13 * 11-Sep-2004 BJD Created file 13 * 11-Sep-2004 BJD Created file
14 * 21-Sep-2004 BJD Updated port info 14 * 21-Sep-2004 BJD Updated port info
15 * 09-Aug-2005 BJD Renamed s3c2410_report_oc s3c2410_usb_report_oc
15*/ 16*/
16 17
17#ifndef __ASM_ARCH_USBCONTROL_H 18#ifndef __ASM_ARCH_USBCONTROL_H
@@ -35,7 +36,7 @@ struct s3c2410_hcd_info {
35 void (*report_oc)(struct s3c2410_hcd_info *, int ports); 36 void (*report_oc)(struct s3c2410_hcd_info *, int ports);
36}; 37};
37 38
38static void inline s3c2410_report_oc(struct s3c2410_hcd_info *info, int ports) 39static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
39{ 40{
40 if (info->report_oc != NULL) { 41 if (info->report_oc != NULL) {
41 (info->report_oc)(info, ports); 42 (info->report_oc)(info, ports);
diff --git a/include/asm-arm/arch-sa1100/mcp.h b/include/asm-arm/arch-sa1100/mcp.h
new file mode 100644
index 000000000000..f58a22755c61
--- /dev/null
+++ b/include/asm-arm/arch-sa1100/mcp.h
@@ -0,0 +1,21 @@
1/*
2 * linux/include/asm-arm/arch-sa1100/mcp.h
3 *
4 * Copyright (C) 2005 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_ARCH_MCP_H
11#define __ASM_ARM_ARCH_MCP_H
12
13#include <linux/types.h>
14
15struct mcp_plat_data {
16 u32 mccr0;
17 u32 mccr1;
18 unsigned int sclk_rate;
19};
20
21#endif
diff --git a/include/asm-arm/bug.h b/include/asm-arm/bug.h
index 24d11672eb60..7fb02138f585 100644
--- a/include/asm-arm/bug.h
+++ b/include/asm-arm/bug.h
@@ -5,7 +5,7 @@
5 5
6#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
7#ifdef CONFIG_DEBUG_BUGVERBOSE 7#ifdef CONFIG_DEBUG_BUGVERBOSE
8extern volatile void __bug(const char *file, int line, void *data); 8extern void __bug(const char *file, int line, void *data) __attribute__((noreturn));
9 9
10/* give file/line information */ 10/* give file/line information */
11#define BUG() __bug(__FILE__, __LINE__, NULL) 11#define BUG() __bug(__FILE__, __LINE__, NULL)
diff --git a/include/asm-arm/cpu-multi32.h b/include/asm-arm/cpu-multi32.h
index ff48022e4720..4679f63688e9 100644
--- a/include/asm-arm/cpu-multi32.h
+++ b/include/asm-arm/cpu-multi32.h
@@ -31,7 +31,7 @@ extern struct processor {
31 /* 31 /*
32 * Special stuff for a reset 32 * Special stuff for a reset
33 */ 33 */
34 volatile void (*reset)(unsigned long addr); 34 void (*reset)(unsigned long addr) __attribute__((noreturn));
35 /* 35 /*
36 * Idle the processor 36 * Idle the processor
37 */ 37 */
diff --git a/include/asm-arm/cpu-single.h b/include/asm-arm/cpu-single.h
index b5ec5d54665d..6723e67244fa 100644
--- a/include/asm-arm/cpu-single.h
+++ b/include/asm-arm/cpu-single.h
@@ -41,4 +41,4 @@ extern int cpu_do_idle(void);
41extern void cpu_dcache_clean_area(void *, int); 41extern void cpu_dcache_clean_area(void *, int);
42extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 42extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
43extern void cpu_set_pte(pte_t *ptep, pte_t pte); 43extern void cpu_set_pte(pte_t *ptep, pte_t pte);
44extern volatile void cpu_reset(unsigned long addr); 44extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
diff --git a/include/asm-arm/hardware/gic.h b/include/asm-arm/hardware/gic.h
new file mode 100644
index 000000000000..3fa5eb70f64e
--- /dev/null
+++ b/include/asm-arm/hardware/gic.h
@@ -0,0 +1,41 @@
1/*
2 * linux/include/asm-arm/hardware/gic.h
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_HARDWARE_GIC_H
11#define __ASM_ARM_HARDWARE_GIC_H
12
13#include <linux/compiler.h>
14
15#define GIC_CPU_CTRL 0x00
16#define GIC_CPU_PRIMASK 0x04
17#define GIC_CPU_BINPOINT 0x08
18#define GIC_CPU_INTACK 0x0c
19#define GIC_CPU_EOI 0x10
20#define GIC_CPU_RUNNINGPRI 0x14
21#define GIC_CPU_HIGHPRI 0x18
22
23#define GIC_DIST_CTRL 0x000
24#define GIC_DIST_CTR 0x004
25#define GIC_DIST_ENABLE_SET 0x100
26#define GIC_DIST_ENABLE_CLEAR 0x180
27#define GIC_DIST_PENDING_SET 0x200
28#define GIC_DIST_PENDING_CLEAR 0x280
29#define GIC_DIST_ACTIVE_BIT 0x300
30#define GIC_DIST_PRI 0x400
31#define GIC_DIST_TARGET 0x800
32#define GIC_DIST_CONFIG 0xc00
33#define GIC_DIST_SOFTINT 0xf00
34
35#ifndef __ASSEMBLY__
36void gic_dist_init(void __iomem *base);
37void gic_cpu_init(void __iomem *base);
38void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
39#endif
40
41#endif
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index a9892eb42a23..478c49b56e18 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -188,12 +188,18 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
188/* 188/*
189 * - extended small page/tiny page 189 * - extended small page/tiny page
190 */ 190 */
191#define PTE_EXT_XN (1 << 0) /* v6 */
191#define PTE_EXT_AP_MASK (3 << 4) 192#define PTE_EXT_AP_MASK (3 << 4)
193#define PTE_EXT_AP0 (1 << 4)
194#define PTE_EXT_AP1 (2 << 4)
192#define PTE_EXT_AP_UNO_SRO (0 << 4) 195#define PTE_EXT_AP_UNO_SRO (0 << 4)
193#define PTE_EXT_AP_UNO_SRW (1 << 4) 196#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
194#define PTE_EXT_AP_URO_SRW (2 << 4) 197#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
195#define PTE_EXT_AP_URW_SRW (3 << 4) 198#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
196#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ 199#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
200#define PTE_EXT_APX (1 << 9) /* v6 */
201#define PTE_EXT_SHARED (1 << 10) /* v6 */
202#define PTE_EXT_NG (1 << 11) /* v6 */
197 203
198/* 204/*
199 * - small page 205 * - small page
@@ -224,6 +230,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
224#define L_PTE_WRITE (1 << 5) 230#define L_PTE_WRITE (1 << 5)
225#define L_PTE_EXEC (1 << 6) 231#define L_PTE_EXEC (1 << 6)
226#define L_PTE_DIRTY (1 << 7) 232#define L_PTE_DIRTY (1 << 7)
233#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */
234#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */
227 235
228#ifndef __ASSEMBLY__ 236#ifndef __ASSEMBLY__
229 237
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index ace27480886e..abb36e54c966 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -350,6 +350,11 @@
350#endif 350#endif
351 351
352#define __NR_vserver (__NR_SYSCALL_BASE+313) 352#define __NR_vserver (__NR_SYSCALL_BASE+313)
353#define __NR_ioprio_set (__NR_SYSCALL_BASE+314)
354#define __NR_ioprio_get (__NR_SYSCALL_BASE+315)
355#define __NR_inotify_init (__NR_SYSCALL_BASE+316)
356#define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317)
357#define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318)
353 358
354/* 359/*
355 * The following SWIs are ARM private. 360 * The following SWIs are ARM private.
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
index 2cbab30734d6..78c85985aee3 100644
--- a/include/asm-i386/pci.h
+++ b/include/asm-i386/pci.h
@@ -18,9 +18,11 @@ extern unsigned int pcibios_assign_all_busses(void);
18#define pcibios_scan_all_fns(a, b) 0 18#define pcibios_scan_all_fns(a, b) 0
19 19
20extern unsigned long pci_mem_start; 20extern unsigned long pci_mem_start;
21#define PCIBIOS_MIN_IO 0x4000 21#define PCIBIOS_MIN_IO 0x1000
22#define PCIBIOS_MIN_MEM (pci_mem_start) 22#define PCIBIOS_MIN_MEM (pci_mem_start)
23 23
24#define PCIBIOS_MIN_CARDBUS_IO 0x4000
25
24void pcibios_config_init(void); 26void pcibios_config_init(void);
25struct pci_bus * pcibios_scan_root(int bus); 27struct pci_bus * pcibios_scan_root(int bus);
26 28
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 5d06e6bd6ba0..d0d8b0160090 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -29,7 +29,7 @@ struct desc_struct {
29}; 29};
30 30
31#define desc_empty(desc) \ 31#define desc_empty(desc) \
32 (!((desc)->a + (desc)->b)) 32 (!((desc)->a | (desc)->b))
33 33
34#define desc_equal(desc1, desc2) \ 34#define desc_equal(desc1, desc2) \
35 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) 35 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index 491e9d1fc538..54e7637a326c 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -120,14 +120,6 @@ static inline void ___ia64_mmiowb(void)
120 ia64_mfa(); 120 ia64_mfa();
121} 121}
122 122
123static inline const unsigned long
124__ia64_get_io_port_base (void)
125{
126 extern unsigned long ia64_iobase;
127
128 return ia64_iobase;
129}
130
131static inline void* 123static inline void*
132__ia64_mk_io_addr (unsigned long port) 124__ia64_mk_io_addr (unsigned long port)
133{ 125{
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
index 1093f35b3b90..a429fe225b07 100644
--- a/include/asm-ia64/iosapic.h
+++ b/include/asm-ia64/iosapic.h
@@ -75,6 +75,8 @@ extern int __devinit iosapic_init (unsigned long address,
75 unsigned int gsi_base); 75 unsigned int gsi_base);
76#ifdef CONFIG_HOTPLUG 76#ifdef CONFIG_HOTPLUG
77extern int iosapic_remove (unsigned int gsi_base); 77extern int iosapic_remove (unsigned int gsi_base);
78#else
79#define iosapic_remove(gsi_base) (-EINVAL)
78#endif /* CONFIG_HOTPLUG */ 80#endif /* CONFIG_HOTPLUG */
79extern int gsi_to_vector (unsigned int gsi); 81extern int gsi_to_vector (unsigned int gsi);
80extern int gsi_to_irq (unsigned int gsi); 82extern int gsi_to_irq (unsigned int gsi);
@@ -102,9 +104,7 @@ extern void __devinit map_iosapic_to_node (unsigned int, int);
102#else 104#else
103#define iosapic_system_init(pcat_compat) do { } while (0) 105#define iosapic_system_init(pcat_compat) do { } while (0)
104#define iosapic_init(address,gsi_base) (-EINVAL) 106#define iosapic_init(address,gsi_base) (-EINVAL)
105#ifdef CONFIG_HOTPLUG
106#define iosapic_remove(gsi_base) (-ENODEV) 107#define iosapic_remove(gsi_base) (-ENODEV)
107#endif /* CONFIG_HOTPLUG */
108#define iosapic_register_intr(gsi,polarity,trigger) (gsi) 108#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
109#define iosapic_unregister_intr(irq) do { } while (0) 109#define iosapic_unregister_intr(irq) do { } while (0)
110#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0) 110#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index b9a20cdad65f..7885b7df84a2 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -61,9 +61,7 @@ extern physid_mask_t phys_cpu_present_map;
61 * Some lowlevel functions might want to know about 61 * Some lowlevel functions might want to know about
62 * the real CPU ID <-> CPU # mapping. 62 * the real CPU ID <-> CPU # mapping.
63 */ 63 */
64extern volatile int physid_2_cpu[NR_CPUS];
65extern volatile int cpu_2_physid[NR_CPUS]; 64extern volatile int cpu_2_physid[NR_CPUS];
66#define physid_to_cpu(physid) physid_2_cpu[physid]
67#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id] 65#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
68 66
69#define raw_smp_processor_id() (current_thread_info()->cpu) 67#define raw_smp_processor_id() (current_thread_info()->cpu)
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h
index 99a516709210..206313e2a817 100644
--- a/include/asm-m68k/page.h
+++ b/include/asm-m68k/page.h
@@ -138,13 +138,13 @@ extern unsigned long m68k_memoffset;
138#define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset) 138#define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset)
139#define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset)) 139#define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset))
140#else 140#else
141#define __pa(vaddr) virt_to_phys((void *)vaddr) 141#define __pa(vaddr) virt_to_phys((void *)(vaddr))
142#define __va(paddr) phys_to_virt((unsigned long)paddr) 142#define __va(paddr) phys_to_virt((unsigned long)(paddr))
143#endif 143#endif
144 144
145#else /* !CONFIG_SUN3 */ 145#else /* !CONFIG_SUN3 */
146/* This #define is a horrible hack to suppress lots of warnings. --m */ 146/* This #define is a horrible hack to suppress lots of warnings. --m */
147#define __pa(x) ___pa((unsigned long)x) 147#define __pa(x) ___pa((unsigned long)(x))
148static inline unsigned long ___pa(unsigned long x) 148static inline unsigned long ___pa(unsigned long x)
149{ 149{
150 if(x == 0) 150 if(x == 0)
diff --git a/include/asm-ppc/ibm44x.h b/include/asm-ppc/ibm44x.h
index 21e41c9b7267..e5374be86aef 100644
--- a/include/asm-ppc/ibm44x.h
+++ b/include/asm-ppc/ibm44x.h
@@ -423,11 +423,7 @@
423#define MQ0_CONFIG_SIZE_2G 0x0000c000 423#define MQ0_CONFIG_SIZE_2G 0x0000c000
424 424
425/* Internal SRAM Controller 440GX/440SP */ 425/* Internal SRAM Controller 440GX/440SP */
426#ifdef CONFIG_440SP
427#define DCRN_SRAM0_BASE 0x100
428#else /* 440GX */
429#define DCRN_SRAM0_BASE 0x000 426#define DCRN_SRAM0_BASE 0x000
430#endif
431 427
432#define DCRN_SRAM0_SB0CR (DCRN_SRAM0_BASE + 0x020) 428#define DCRN_SRAM0_SB0CR (DCRN_SRAM0_BASE + 0x020)
433#define DCRN_SRAM0_SB1CR (DCRN_SRAM0_BASE + 0x021) 429#define DCRN_SRAM0_SB1CR (DCRN_SRAM0_BASE + 0x021)
diff --git a/include/asm-ppc/ppc4xx_dma.h b/include/asm-ppc/ppc4xx_dma.h
index 8636cdbf6f8f..a415001165fa 100644
--- a/include/asm-ppc/ppc4xx_dma.h
+++ b/include/asm-ppc/ppc4xx_dma.h
@@ -285,7 +285,7 @@ typedef uint32_t sgl_handle_t;
285 285
286#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan)) 286#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
287 287
288#elif defined(CONFIG_STBXXX_DMA) /* stb03xxx */ 288#elif defined(CONFIG_STB03xxx) /* stb03xxx */
289 289
290#define DMA_PPC4xx_SIZE 4096 290#define DMA_PPC4xx_SIZE 4096
291 291
diff --git a/include/asm-ppc/time.h b/include/asm-ppc/time.h
index ce09b47fa819..321fb75b5f22 100644
--- a/include/asm-ppc/time.h
+++ b/include/asm-ppc/time.h
@@ -58,7 +58,7 @@ static __inline__ void set_dec(unsigned int val)
58/* Accessor functions for the timebase (RTC on 601) registers. */ 58/* Accessor functions for the timebase (RTC on 601) registers. */
59/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ 59/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
60#ifdef CONFIG_6xx 60#ifdef CONFIG_6xx
61extern __inline__ int const __USE_RTC(void) { 61extern __inline__ int __attribute_pure__ __USE_RTC(void) {
62 return (mfspr(SPRN_PVR)>>16) == 1; 62 return (mfspr(SPRN_PVR)>>16) == 1;
63} 63}
64#else 64#else
diff --git a/include/asm-ppc64/bug.h b/include/asm-ppc64/bug.h
index 169868fa307d..160178278861 100644
--- a/include/asm-ppc64/bug.h
+++ b/include/asm-ppc64/bug.h
@@ -43,8 +43,8 @@ struct bug_entry *find_bug(unsigned long bugaddr);
43 ".section __bug_table,\"a\"\n\t" \ 43 ".section __bug_table,\"a\"\n\t" \
44 " .llong 1b,%1,%2,%3\n" \ 44 " .llong 1b,%1,%2,%3\n" \
45 ".previous" \ 45 ".previous" \
46 : : "r" (x), "i" (__LINE__), "i" (__FILE__), \ 46 : : "r" ((long long)(x)), "i" (__LINE__), \
47 "i" (__FUNCTION__)); \ 47 "i" (__FILE__), "i" (__FUNCTION__)); \
48} while (0) 48} while (0)
49 49
50#define WARN_ON(x) do { \ 50#define WARN_ON(x) do { \
@@ -53,7 +53,8 @@ struct bug_entry *find_bug(unsigned long bugaddr);
53 ".section __bug_table,\"a\"\n\t" \ 53 ".section __bug_table,\"a\"\n\t" \
54 " .llong 1b,%1,%2,%3\n" \ 54 " .llong 1b,%1,%2,%3\n" \
55 ".previous" \ 55 ".previous" \
56 : : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \ 56 : : "r" ((long long)(x)), \
57 "i" (__LINE__ + BUG_WARNING_TRAP), \
57 "i" (__FILE__), "i" (__FUNCTION__)); \ 58 "i" (__FILE__), "i" (__FUNCTION__)); \
58} while (0) 59} while (0)
59 60
diff --git a/include/asm-ppc64/iSeries/LparMap.h b/include/asm-ppc64/iSeries/LparMap.h
index 5c32e38c1c01..a6840b186d03 100644
--- a/include/asm-ppc64/iSeries/LparMap.h
+++ b/include/asm-ppc64/iSeries/LparMap.h
@@ -19,6 +19,8 @@
19#ifndef _LPARMAP_H 19#ifndef _LPARMAP_H
20#define _LPARMAP_H 20#define _LPARMAP_H
21 21
22#ifndef __ASSEMBLY__
23
22#include <asm/types.h> 24#include <asm/types.h>
23 25
24/* 26/*
@@ -71,6 +73,11 @@ struct LparMap {
71 } xRanges[HvRangesToMap]; 73 } xRanges[HvRangesToMap];
72}; 74};
73 75
74extern struct LparMap xLparMap; 76extern const struct LparMap xLparMap;
77
78#endif /* __ASSEMBLY__ */
79
80/* the fixed address where the LparMap exists */
81#define LPARMAP_PHYS 0x7000
75 82
76#endif /* _LPARMAP_H */ 83#endif /* _LPARMAP_H */
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index a7f43a251f81..3e3bfe6a8fa8 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -149,11 +149,11 @@ struct exception_table_entry
149}) 149})
150#endif 150#endif
151 151
152#ifndef __CHECKER__
153#define __put_user(x, ptr) \ 152#define __put_user(x, ptr) \
154({ \ 153({ \
155 __typeof__(*(ptr)) __x = (x); \ 154 __typeof__(*(ptr)) __x = (x); \
156 int __pu_err; \ 155 int __pu_err; \
156 __chk_user_ptr(ptr); \
157 switch (sizeof (*(ptr))) { \ 157 switch (sizeof (*(ptr))) { \
158 case 1: \ 158 case 1: \
159 case 2: \ 159 case 2: \
@@ -167,14 +167,6 @@ struct exception_table_entry
167 } \ 167 } \
168 __pu_err; \ 168 __pu_err; \
169}) 169})
170#else
171#define __put_user(x, ptr) \
172({ \
173 void __user *p; \
174 p = (ptr); \
175 0; \
176})
177#endif
178 170
179#define put_user(x, ptr) \ 171#define put_user(x, ptr) \
180({ \ 172({ \
@@ -213,11 +205,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
213}) 205})
214#endif 206#endif
215 207
216#ifndef __CHECKER__
217#define __get_user(x, ptr) \ 208#define __get_user(x, ptr) \
218({ \ 209({ \
219 __typeof__(*(ptr)) __x; \ 210 __typeof__(*(ptr)) __x; \
220 int __gu_err; \ 211 int __gu_err; \
212 __chk_user_ptr(ptr); \
221 switch (sizeof(*(ptr))) { \ 213 switch (sizeof(*(ptr))) { \
222 case 1: \ 214 case 1: \
223 case 2: \ 215 case 2: \
@@ -232,15 +224,6 @@ extern int __put_user_bad(void) __attribute__((noreturn));
232 (x) = __x; \ 224 (x) = __x; \
233 __gu_err; \ 225 __gu_err; \
234}) 226})
235#else
236#define __get_user(x, ptr) \
237({ \
238 void __user *p; \
239 p = (ptr); \
240 0; \
241})
242#endif
243
244 227
245#define get_user(x, ptr) \ 228#define get_user(x, ptr) \
246({ \ 229({ \
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index 245447081f0d..ea89e8f223ea 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -295,8 +295,14 @@
295#define __NR_add_key 285 295#define __NR_add_key 285
296#define __NR_request_key 286 296#define __NR_request_key 286
297#define __NR_keyctl 287 297#define __NR_keyctl 287
298#define __NR_ioprio_set 288
299#define __NR_ioprio_get 289
300#define __NR_inotify_init 290
301#define __NR_inotify_add_watch 291
302#define __NR_inotify_rm_watch 292
298 303
299#define NR_syscalls 288 304
305#define NR_syscalls 293
300 306
301/* user-visible error numbers are in the range -1 - -124: see <asm-sh/errno.h> */ 307/* user-visible error numbers are in the range -1 - -124: see <asm-sh/errno.h> */
302 308
@@ -406,7 +412,7 @@ register long __sc6 __asm__ ("r6") = (long) arg3; \
406register long __sc7 __asm__ ("r7") = (long) arg4; \ 412register long __sc7 __asm__ ("r7") = (long) arg4; \
407register long __sc0 __asm__ ("r0") = (long) arg5; \ 413register long __sc0 __asm__ ("r0") = (long) arg5; \
408register long __sc1 __asm__ ("r1") = (long) arg6; \ 414register long __sc1 __asm__ ("r1") = (long) arg6; \
409__asm__ __volatile__ ("trapa #0x15" \ 415__asm__ __volatile__ ("trapa #0x16" \
410 : "=z" (__sc0) \ 416 : "=z" (__sc0) \
411 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ 417 : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \
412 "r" (__sc3), "r" (__sc1) \ 418 "r" (__sc3), "r" (__sc1) \
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
index 95f0b130405c..2a1cfa404ea4 100644
--- a/include/asm-sh64/unistd.h
+++ b/include/asm-sh64/unistd.h
@@ -338,8 +338,13 @@
338#define __NR_add_key 313 338#define __NR_add_key 313
339#define __NR_request_key 314 339#define __NR_request_key 314
340#define __NR_keyctl 315 340#define __NR_keyctl 315
341#define __NR_ioprio_set 316
342#define __NR_ioprio_get 317
343#define __NR_inotify_init 318
344#define __NR_inotify_add_watch 319
345#define __NR_inotify_rm_watch 320
341 346
342#define NR_syscalls 316 347#define NR_syscalls 321
343 348
344/* user-visible error numbers are in the range -1 - -125: see <asm-sh64/errno.h> */ 349/* user-visible error numbers are in the range -1 - -125: see <asm-sh64/errno.h> */
345 350
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h
index 32c9699367cf..5a7a1a8d29ac 100644
--- a/include/asm-sparc/processor.h
+++ b/include/asm-sparc/processor.h
@@ -19,7 +19,6 @@
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20#include <asm/head.h> 20#include <asm/head.h>
21#include <asm/signal.h> 21#include <asm/signal.h>
22#include <asm/segment.h>
23#include <asm/btfixup.h> 22#include <asm/btfixup.h>
24#include <asm/page.h> 23#include <asm/page.h>
25 24
diff --git a/include/asm-sparc/segment.h b/include/asm-sparc/segment.h
deleted file mode 100644
index a1b7ffc9eec9..000000000000
--- a/include/asm-sparc/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC_SEGMENT_H
2#define __SPARC_SEGMENT_H
3
4/* Only here because we have some old header files that expect it.. */
5
6#endif
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 898562ebe94c..3557781a4bfd 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -9,7 +9,6 @@
9#include <linux/threads.h> /* NR_CPUS */ 9#include <linux/threads.h> /* NR_CPUS */
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11 11
12#include <asm/segment.h>
13#include <asm/page.h> 12#include <asm/page.h>
14#include <asm/psr.h> 13#include <asm/psr.h>
15#include <asm/ptrace.h> 14#include <asm/ptrace.h>
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index d80f3379669b..e175afcf2cde 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,10 +72,10 @@ extern int atomic64_sub_ret(int, atomic64_t *);
72 72
73/* Atomic operations are already serializing */ 73/* Atomic operations are already serializing */
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75#define smp_mb__before_atomic_dec() membar("#StoreLoad | #LoadLoad") 75#define smp_mb__before_atomic_dec() membar_storeload_loadload();
76#define smp_mb__after_atomic_dec() membar("#StoreLoad | #StoreStore") 76#define smp_mb__after_atomic_dec() membar_storeload_storestore();
77#define smp_mb__before_atomic_inc() membar("#StoreLoad | #LoadLoad") 77#define smp_mb__before_atomic_inc() membar_storeload_loadload();
78#define smp_mb__after_atomic_inc() membar("#StoreLoad | #StoreStore") 78#define smp_mb__after_atomic_inc() membar_storeload_storestore();
79#else 79#else
80#define smp_mb__before_atomic_dec() barrier() 80#define smp_mb__before_atomic_dec() barrier()
81#define smp_mb__after_atomic_dec() barrier() 81#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 9c5e71970287..6388b8376c50 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -72,8 +72,8 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
72} 72}
73 73
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75#define smp_mb__before_clear_bit() membar("#StoreLoad | #LoadLoad") 75#define smp_mb__before_clear_bit() membar_storeload_loadload()
76#define smp_mb__after_clear_bit() membar("#StoreLoad | #StoreStore") 76#define smp_mb__after_clear_bit() membar_storeload_storestore()
77#else 77#else
78#define smp_mb__before_clear_bit() barrier() 78#define smp_mb__before_clear_bit() barrier()
79#define smp_mb__after_clear_bit() barrier() 79#define smp_mb__after_clear_bit() barrier()
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index d0bee2413560..3169f3e2237e 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -18,7 +18,6 @@
18#include <asm/a.out.h> 18#include <asm/a.out.h>
19#include <asm/pstate.h> 19#include <asm/pstate.h>
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21#include <asm/segment.h>
22#include <asm/page.h> 21#include <asm/page.h>
23 22
24/* The sparc has no problems with write protection */ 23/* The sparc has no problems with write protection */
diff --git a/include/asm-sparc64/segment.h b/include/asm-sparc64/segment.h
deleted file mode 100644
index b03e709fc945..000000000000
--- a/include/asm-sparc64/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC64_SEGMENT_H
2#define __SPARC64_SEGMENT_H
3
4/* Only here because we have some old header files that expect it.. */
5
6#endif
diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h
new file mode 100644
index 000000000000..2f792c20b53c
--- /dev/null
+++ b/include/asm-sparc64/sfafsr.h
@@ -0,0 +1,82 @@
1#ifndef _SPARC64_SFAFSR_H
2#define _SPARC64_SFAFSR_H
3
4#include <asm/const.h>
5
6/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
7
8#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT)
9#define SFAFSR_ME_SHIFT 32
10#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT)
11#define SFAFSR_PRIV_SHIFT 31
12#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT)
13#define SFAFSR_ISAP_SHIFT 30
14#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT)
15#define SFAFSR_ETP_SHIFT 29
16#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT)
17#define SFAFSR_IVUE_SHIFT 28
18#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT)
19#define SFAFSR_TO_SHIFT 27
20#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT)
21#define SFAFSR_BERR_SHIFT 26
22#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT)
23#define SFAFSR_LDP_SHIFT 25
24#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT)
25#define SFAFSR_CP_SHIFT 24
26#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT)
27#define SFAFSR_WP_SHIFT 23
28#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT)
29#define SFAFSR_EDP_SHIFT 22
30#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT)
31#define SFAFSR_UE_SHIFT 21
32#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT)
33#define SFAFSR_CE_SHIFT 20
34#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT)
35#define SFAFSR_ETS_SHIFT 16
36#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT)
37#define SFAFSR_PSYND_SHIFT 0
38
39/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read
40 * ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write
41 */
42
43#define UDBE_UE (_AC(1,UL) << 9)
44#define UDBE_CE (_AC(1,UL) << 8)
45#define UDBE_E_SYNDR (_AC(0xff,UL) << 0)
46
47/* The trap handlers for asynchronous errors encode the AFSR and
48 * other pieces of information into a 64-bit argument for C code
49 * encoded as follows:
50 *
51 * -----------------------------------------------
52 * | UDB_H | UDB_L | TL>1 | TT | AFSR |
53 * -----------------------------------------------
54 * 63 54 53 44 42 41 33 32 0
55 *
56 * The AFAR is passed in unchanged.
57 */
58#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
59#define SFSTAT_UDBH_SHIFT 54
60#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
61#define SFSTAT_UDBL_SHIFT 44
62#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT)
63#define SFSTAT_TL_GT_ONE_SHIFT 42
64#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT)
65#define SFSTAT_TRAP_TYPE_SHIFT 33
66#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT)
67#define SFSTAT_AFSR_SHIFT 0
68
69/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */
70#define ESTATE_ERR_CE 0x1 /* Correctable errors */
71#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */
72#define ESTATE_ERR_ISAP 0x4 /* System address parity error */
73#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \
74 ESTATE_ERR_NCE | \
75 ESTATE_ERR_ISAP)
76
77/* The various trap types that report using the above state. */
78#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */
79#define TRAP_TYPE_DAE 0x32 /* Data Access Error */
80#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */
81
82#endif /* _SPARC64_SFAFSR_H */
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index 9cb93a5c2b4f..a02c4370eb42 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -43,7 +43,7 @@ typedef struct {
43#define spin_is_locked(lp) ((lp)->lock != 0) 43#define spin_is_locked(lp) ((lp)->lock != 0)
44 44
45#define spin_unlock_wait(lp) \ 45#define spin_unlock_wait(lp) \
46do { membar("#LoadLoad"); \ 46do { rmb(); \
47} while((lp)->lock) 47} while((lp)->lock)
48 48
49static inline void _raw_spin_lock(spinlock_t *lock) 49static inline void _raw_spin_lock(spinlock_t *lock)
@@ -129,15 +129,18 @@ typedef struct {
129#define spin_is_locked(__lock) ((__lock)->lock != 0) 129#define spin_is_locked(__lock) ((__lock)->lock != 0)
130#define spin_unlock_wait(__lock) \ 130#define spin_unlock_wait(__lock) \
131do { \ 131do { \
132 membar("#LoadLoad"); \ 132 rmb(); \
133} while((__lock)->lock) 133} while((__lock)->lock)
134 134
135extern void _do_spin_lock (spinlock_t *lock, char *str); 135extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
136extern void _do_spin_unlock (spinlock_t *lock); 136extern void _do_spin_unlock(spinlock_t *lock);
137extern int _do_spin_trylock (spinlock_t *lock); 137extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
138 138
139#define _raw_spin_trylock(lp) _do_spin_trylock(lp) 139#define _raw_spin_trylock(lp) \
140#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") 140 _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
141#define _raw_spin_lock(lock) \
142 _do_spin_lock(lock, "spin_lock", \
143 (unsigned long) __builtin_return_address(0))
141#define _raw_spin_unlock(lock) _do_spin_unlock(lock) 144#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
142#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 145#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
143 146
@@ -279,37 +282,41 @@ typedef struct {
279#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } 282#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
280#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) 283#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
281 284
282extern void _do_read_lock(rwlock_t *rw, char *str); 285extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
283extern void _do_read_unlock(rwlock_t *rw, char *str); 286extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
284extern void _do_write_lock(rwlock_t *rw, char *str); 287extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
285extern void _do_write_unlock(rwlock_t *rw); 288extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
286extern int _do_write_trylock(rwlock_t *rw, char *str); 289extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
287 290
288#define _raw_read_lock(lock) \ 291#define _raw_read_lock(lock) \
289do { unsigned long flags; \ 292do { unsigned long flags; \
290 local_irq_save(flags); \ 293 local_irq_save(flags); \
291 _do_read_lock(lock, "read_lock"); \ 294 _do_read_lock(lock, "read_lock", \
295 (unsigned long) __builtin_return_address(0)); \
292 local_irq_restore(flags); \ 296 local_irq_restore(flags); \
293} while(0) 297} while(0)
294 298
295#define _raw_read_unlock(lock) \ 299#define _raw_read_unlock(lock) \
296do { unsigned long flags; \ 300do { unsigned long flags; \
297 local_irq_save(flags); \ 301 local_irq_save(flags); \
298 _do_read_unlock(lock, "read_unlock"); \ 302 _do_read_unlock(lock, "read_unlock", \
303 (unsigned long) __builtin_return_address(0)); \
299 local_irq_restore(flags); \ 304 local_irq_restore(flags); \
300} while(0) 305} while(0)
301 306
302#define _raw_write_lock(lock) \ 307#define _raw_write_lock(lock) \
303do { unsigned long flags; \ 308do { unsigned long flags; \
304 local_irq_save(flags); \ 309 local_irq_save(flags); \
305 _do_write_lock(lock, "write_lock"); \ 310 _do_write_lock(lock, "write_lock", \
311 (unsigned long) __builtin_return_address(0)); \
306 local_irq_restore(flags); \ 312 local_irq_restore(flags); \
307} while(0) 313} while(0)
308 314
309#define _raw_write_unlock(lock) \ 315#define _raw_write_unlock(lock) \
310do { unsigned long flags; \ 316do { unsigned long flags; \
311 local_irq_save(flags); \ 317 local_irq_save(flags); \
312 _do_write_unlock(lock); \ 318 _do_write_unlock(lock, \
319 (unsigned long) __builtin_return_address(0)); \
313 local_irq_restore(flags); \ 320 local_irq_restore(flags); \
314} while(0) 321} while(0)
315 322
@@ -317,7 +324,8 @@ do { unsigned long flags; \
317({ unsigned long flags; \ 324({ unsigned long flags; \
318 int val; \ 325 int val; \
319 local_irq_save(flags); \ 326 local_irq_save(flags); \
320 val = _do_write_trylock(lock, "write_trylock"); \ 327 val = _do_write_trylock(lock, "write_trylock", \
328 (unsigned long) __builtin_return_address(0)); \
321 local_irq_restore(flags); \ 329 local_irq_restore(flags); \
322 val; \ 330 val; \
323}) 331})
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index ee4bdfc6b88f..5e94c05dc2fc 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -28,6 +28,14 @@ enum sparc_cpu {
28#define ARCH_SUN4C_SUN4 0 28#define ARCH_SUN4C_SUN4 0
29#define ARCH_SUN4 0 29#define ARCH_SUN4 0
30 30
31extern void mb(void);
32extern void rmb(void);
33extern void wmb(void);
34extern void membar_storeload(void);
35extern void membar_storeload_storestore(void);
36extern void membar_storeload_loadload(void);
37extern void membar_storestore_loadstore(void);
38
31#endif 39#endif
32 40
33#define setipl(__new_ipl) \ 41#define setipl(__new_ipl) \
@@ -78,16 +86,11 @@ enum sparc_cpu {
78 86
79#define nop() __asm__ __volatile__ ("nop") 87#define nop() __asm__ __volatile__ ("nop")
80 88
81#define membar(type) __asm__ __volatile__ ("membar " type : : : "memory")
82#define mb() \
83 membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
84#define rmb() membar("#LoadLoad")
85#define wmb() membar("#StoreStore")
86#define read_barrier_depends() do { } while(0) 89#define read_barrier_depends() do { } while(0)
87#define set_mb(__var, __value) \ 90#define set_mb(__var, __value) \
88 do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) 91 do { __var = __value; membar_storeload_storestore(); } while(0)
89#define set_wmb(__var, __value) \ 92#define set_wmb(__var, __value) \
90 do { __var = __value; membar("#StoreStore"); } while(0) 93 do { __var = __value; wmb(); } while(0)
91 94
92#ifdef CONFIG_SMP 95#ifdef CONFIG_SMP
93#define smp_mb() mb() 96#define smp_mb() mb()
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index 352d9943661a..c94d8b3991bd 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -68,6 +68,9 @@ struct thread_info {
68 68
69 struct restart_block restart_block; 69 struct restart_block restart_block;
70 70
71 struct pt_regs *kern_una_regs;
72 unsigned int kern_una_insn;
73
71 unsigned long fpregs[0] __attribute__ ((aligned(64))); 74 unsigned long fpregs[0] __attribute__ ((aligned(64)));
72}; 75};
73 76
@@ -103,6 +106,8 @@ struct thread_info {
103#define TI_PCR 0x00000490 106#define TI_PCR 0x00000490
104#define TI_CEE_STUFF 0x00000498 107#define TI_CEE_STUFF 0x00000498
105#define TI_RESTART_BLOCK 0x000004a0 108#define TI_RESTART_BLOCK 0x000004a0
109#define TI_KUNA_REGS 0x000004c8
110#define TI_KUNA_INSN 0x000004d0
106#define TI_FPREGS 0x00000500 111#define TI_FPREGS 0x00000500
107 112
108/* We embed this in the uppermost byte of thread_info->flags */ 113/* We embed this in the uppermost byte of thread_info->flags */
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 5afee8a8cdf3..f58aedadeb4e 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -104,8 +104,8 @@ extern void *to_virt(unsigned long phys);
104 * casting is the right thing, but 32-bit UML can't have 64-bit virtual 104 * casting is the right thing, but 32-bit UML can't have 64-bit virtual
105 * addresses 105 * addresses
106 */ 106 */
107#define __pa(virt) to_phys((void *) (unsigned long) virt) 107#define __pa(virt) to_phys((void *) (unsigned long) (virt))
108#define __va(phys) to_virt((unsigned long) phys) 108#define __va(phys) to_virt((unsigned long) (phys))
109 109
110#define page_to_pfn(page) ((page) - mem_map) 110#define page_to_pfn(page) ((page) - mem_map)
111#define pfn_to_page(pfn) (mem_map + (pfn)) 111#define pfn_to_page(pfn) (mem_map + (pfn))
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index 8e94edf0b984..e682edc24a68 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -51,6 +51,8 @@ extern int e820_mapped(unsigned long start, unsigned long end, unsigned type);
51 51
52extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end); 52extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
53extern void e820_setup_gap(void); 53extern void e820_setup_gap(void);
54extern unsigned long e820_hole_size(unsigned long start_pfn,
55 unsigned long end_pfn);
54 56
55extern void __init parse_memopt(char *p, char **end); 57extern void __init parse_memopt(char *p, char **end);
56 58
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index 9c4527eb55e2..eeb3088a1c9e 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -22,9 +22,11 @@ extern unsigned int pcibios_assign_all_busses(void);
22extern int no_iommu, force_iommu; 22extern int no_iommu, force_iommu;
23 23
24extern unsigned long pci_mem_start; 24extern unsigned long pci_mem_start;
25#define PCIBIOS_MIN_IO 0x4000 25#define PCIBIOS_MIN_IO 0x1000
26#define PCIBIOS_MIN_MEM (pci_mem_start) 26#define PCIBIOS_MIN_MEM (pci_mem_start)
27 27
28#define PCIBIOS_MIN_CARDBUS_IO 0x4000
29
28void pcibios_config_init(void); 30void pcibios_config_init(void);
29struct pci_bus * pcibios_scan_root(int bus); 31struct pci_bus * pcibios_scan_root(int bus);
30extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); 32extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 106f666517bb..85549e656eeb 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -32,7 +32,7 @@
32#define ID_MASK 0x00200000 32#define ID_MASK 0x00200000
33 33
34#define desc_empty(desc) \ 34#define desc_empty(desc) \
35 (!((desc)->a + (desc)->b)) 35 (!((desc)->a | (desc)->b))
36 36
37#define desc_equal(desc1, desc2) \ 37#define desc_equal(desc1, desc2) \
38 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) 38 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h
index 5f3ab21b339b..3209dd46ea7d 100644
--- a/include/linux/8250_pci.h
+++ b/include/linux/8250_pci.h
@@ -1,2 +1,37 @@
1int pci_siig10x_fn(struct pci_dev *dev, int enable); 1/*
2int pci_siig20x_fn(struct pci_dev *dev, int enable); 2 * Definitions for PCI support.
3 */
4#define FL_BASE_MASK 0x0007
5#define FL_BASE0 0x0000
6#define FL_BASE1 0x0001
7#define FL_BASE2 0x0002
8#define FL_BASE3 0x0003
9#define FL_BASE4 0x0004
10#define FL_GET_BASE(x) (x & FL_BASE_MASK)
11
12/* Use successive BARs (PCI base address registers),
13 else use offset into some specified BAR */
14#define FL_BASE_BARS 0x0008
15
16/* do not assign an irq */
17#define FL_NOIRQ 0x0080
18
19/* Use the Base address register size to cap number of ports */
20#define FL_REGION_SZ_CAP 0x0100
21
22struct pciserial_board {
23 unsigned int flags;
24 unsigned int num_ports;
25 unsigned int base_baud;
26 unsigned int uart_offset;
27 unsigned int reg_shift;
28 unsigned int first_offset;
29};
30
31struct serial_private;
32
33struct serial_private *
34pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board);
35void pciserial_remove_ports(struct serial_private *priv);
36void pciserial_suspend_ports(struct serial_private *priv);
37void pciserial_resume_ports(struct serial_private *priv);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index ca5fcadf9981..a5b74efab067 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1,24 +1,29 @@
1 1
2/* 2/*
3 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 3 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
4 Copyright 2003-2004 Jeff Garzik 4 * Copyright 2003-2004 Jeff Garzik
5 5 *
6 The contents of this file are subject to the Open 6 *
7 Software License version 1.1 that can be found at 7 * This program is free software; you can redistribute it and/or modify
8 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 8 * it under the terms of the GNU General Public License as published by
9 by reference. 9 * the Free Software Foundation; either version 2, or (at your option)
10 10 * any later version.
11 Alternatively, the contents of this file may be used under the terms 11 *
12 of the GNU General Public License version 2 (the "GPL") as distributed 12 * This program is distributed in the hope that it will be useful,
13 in the kernel source COPYING file, in which case the provisions of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 the GPL are applicable instead of the above. If you wish to allow 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 the use of your version of this file only under the terms of the 15 * GNU General Public License for more details.
16 GPL and not to allow others to use your version of this file under 16 *
17 the OSL, indicate your decision by deleting the provisions above and 17 * You should have received a copy of the GNU General Public License
18 replace them with the notice and other provisions required by the GPL. 18 * along with this program; see the file COPYING. If not, write to
19 If you do not delete the provisions above, a recipient may use your 19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 version of this file under either the OSL or the GPL. 20 *
21 21 *
22 * libata documentation is available via 'make {ps|pdf}docs',
23 * as Documentation/DocBook/libata.*
24 *
25 * Hardware documentation available from http://www.t13.org/
26 *
22 */ 27 */
23 28
24#ifndef __LINUX_ATA_H__ 29#ifndef __LINUX_ATA_H__
@@ -108,6 +113,8 @@ enum {
108 113
109 /* ATA device commands */ 114 /* ATA device commands */
110 ATA_CMD_CHK_POWER = 0xE5, /* check power mode */ 115 ATA_CMD_CHK_POWER = 0xE5, /* check power mode */
116 ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
117 ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
111 ATA_CMD_EDD = 0x90, /* execute device diagnostic */ 118 ATA_CMD_EDD = 0x90, /* execute device diagnostic */
112 ATA_CMD_FLUSH = 0xE7, 119 ATA_CMD_FLUSH = 0xE7,
113 ATA_CMD_FLUSH_EXT = 0xEA, 120 ATA_CMD_FLUSH_EXT = 0xEA,
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index a0ab26aab450..d7021c391b2b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -408,6 +408,8 @@ struct ethtool_ops {
408#define SUPPORTED_FIBRE (1 << 10) 408#define SUPPORTED_FIBRE (1 << 10)
409#define SUPPORTED_BNC (1 << 11) 409#define SUPPORTED_BNC (1 << 11)
410#define SUPPORTED_10000baseT_Full (1 << 12) 410#define SUPPORTED_10000baseT_Full (1 << 12)
411#define SUPPORTED_Pause (1 << 13)
412#define SUPPORTED_Asym_Pause (1 << 14)
411 413
412/* Indicates what features are advertised by the interface. */ 414/* Indicates what features are advertised by the interface. */
413#define ADVERTISED_10baseT_Half (1 << 0) 415#define ADVERTISED_10baseT_Half (1 << 0)
@@ -423,6 +425,8 @@ struct ethtool_ops {
423#define ADVERTISED_FIBRE (1 << 10) 425#define ADVERTISED_FIBRE (1 << 10)
424#define ADVERTISED_BNC (1 << 11) 426#define ADVERTISED_BNC (1 << 11)
425#define ADVERTISED_10000baseT_Full (1 << 12) 427#define ADVERTISED_10000baseT_Full (1 << 12)
428#define ADVERTISED_Pause (1 << 13)
429#define ADVERTISED_Asym_Pause (1 << 14)
426 430
427/* The following are all involved in forcing a particular link 431/* The following are all involved in forcing a particular link
428 * mode for the device for setting things. When getting the 432 * mode for the device for setting things. When getting the
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f9adf75fd9b4..67e6732d4fdc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -993,8 +993,8 @@ struct inode_operations {
993 int (*rename) (struct inode *, struct dentry *, 993 int (*rename) (struct inode *, struct dentry *,
994 struct inode *, struct dentry *); 994 struct inode *, struct dentry *);
995 int (*readlink) (struct dentry *, char __user *,int); 995 int (*readlink) (struct dentry *, char __user *,int);
996 int (*follow_link) (struct dentry *, struct nameidata *); 996 void * (*follow_link) (struct dentry *, struct nameidata *);
997 void (*put_link) (struct dentry *, struct nameidata *); 997 void (*put_link) (struct dentry *, struct nameidata *, void *);
998 void (*truncate) (struct inode *); 998 void (*truncate) (struct inode *);
999 int (*permission) (struct inode *, int, struct nameidata *); 999 int (*permission) (struct inode *, int, struct nameidata *);
1000 int (*setattr) (struct dentry *, struct iattr *); 1000 int (*setattr) (struct dentry *, struct iattr *);
@@ -1602,8 +1602,8 @@ extern struct file_operations generic_ro_fops;
1602extern int vfs_readlink(struct dentry *, char __user *, int, const char *); 1602extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
1603extern int vfs_follow_link(struct nameidata *, const char *); 1603extern int vfs_follow_link(struct nameidata *, const char *);
1604extern int page_readlink(struct dentry *, char __user *, int); 1604extern int page_readlink(struct dentry *, char __user *, int);
1605extern int page_follow_link_light(struct dentry *, struct nameidata *); 1605extern void *page_follow_link_light(struct dentry *, struct nameidata *);
1606extern void page_put_link(struct dentry *, struct nameidata *); 1606extern void page_put_link(struct dentry *, struct nameidata *, void *);
1607extern int page_symlink(struct inode *inode, const char *symname, int len); 1607extern int page_symlink(struct inode *inode, const char *symname, int len);
1608extern struct inode_operations page_symlink_inode_operations; 1608extern struct inode_operations page_symlink_inode_operations;
1609extern int generic_readlink(struct dentry *, char __user *, int); 1609extern int generic_readlink(struct dentry *, char __user *, int);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 602c305c8585..03b8e7932b83 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -21,7 +21,7 @@
21 */ 21 */
22static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, 22static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
23 const char *old_name, const char *new_name, 23 const char *old_name, const char *new_name,
24 int isdir, struct inode *target) 24 int isdir, struct inode *target, struct inode *source)
25{ 25{
26 u32 cookie = inotify_get_cookie(); 26 u32 cookie = inotify_get_cookie();
27 27
@@ -41,6 +41,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
41 inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL); 41 inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL);
42 inotify_inode_is_dead(target); 42 inotify_inode_is_dead(target);
43 } 43 }
44
45 if (source) {
46 inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL);
47 }
44} 48}
45 49
46/* 50/*
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index a40c2bf0408e..93bb3afe646b 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -35,6 +35,7 @@ struct inotify_event {
35#define IN_CREATE 0x00000100 /* Subfile was created */ 35#define IN_CREATE 0x00000100 /* Subfile was created */
36#define IN_DELETE 0x00000200 /* Subfile was deleted */ 36#define IN_DELETE 0x00000200 /* Subfile was deleted */
37#define IN_DELETE_SELF 0x00000400 /* Self was deleted */ 37#define IN_DELETE_SELF 0x00000400 /* Self was deleted */
38#define IN_MOVE_SELF 0x00000800 /* Self was moved */
38 39
39/* the following are legal events. they are sent as needed to any watch */ 40/* the following are legal events. they are sent as needed to any watch */
40#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */ 41#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */
@@ -56,7 +57,8 @@ struct inotify_event {
56 */ 57 */
57#define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ 58#define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
58 IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ 59 IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
59 IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF) 60 IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \
61 IN_MOVE_SELF)
60 62
61#ifdef __KERNEL__ 63#ifdef __KERNEL__
62 64
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6cd9ba63563b..fc05a9899288 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1,23 +1,26 @@
1/* 1/*
2 Copyright 2003-2004 Red Hat, Inc. All rights reserved. 2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
3 Copyright 2003-2004 Jeff Garzik 3 * Copyright 2003-2005 Jeff Garzik
4 4 *
5 The contents of this file are subject to the Open 5 *
6 Software License version 1.1 that can be found at 6 * This program is free software; you can redistribute it and/or modify
7 http://www.opensource.org/licenses/osl-1.1.txt and is included herein 7 * it under the terms of the GNU General Public License as published by
8 by reference. 8 * the Free Software Foundation; either version 2, or (at your option)
9 9 * any later version.
10 Alternatively, the contents of this file may be used under the terms 10 *
11 of the GNU General Public License version 2 (the "GPL") as distributed 11 * This program is distributed in the hope that it will be useful,
12 in the kernel source COPYING file, in which case the provisions of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 the GPL are applicable instead of the above. If you wish to allow 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 the use of your version of this file only under the terms of the 14 * GNU General Public License for more details.
15 GPL and not to allow others to use your version of this file under 15 *
16 the OSL, indicate your decision by deleting the provisions above and 16 * You should have received a copy of the GNU General Public License
17 replace them with the notice and other provisions required by the GPL. 17 * along with this program; see the file COPYING. If not, write to
18 If you do not delete the provisions above, a recipient may use your 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 version of this file under either the OSL or the GPL. 19 *
20 20 *
21 * libata documentation is available via 'make {ps|pdf}docs',
22 * as Documentation/DocBook/libata.*
23 *
21 */ 24 */
22 25
23#ifndef __LINUX_LIBATA_H__ 26#ifndef __LINUX_LIBATA_H__
@@ -113,6 +116,8 @@ enum {
113 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 116 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
114 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 117 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
115 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 118 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
119 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
120 * proper HSM is in place. */
116 121
117 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 122 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
118 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 123 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
@@ -363,7 +368,7 @@ struct ata_port_operations {
363 368
364 void (*host_stop) (struct ata_host_set *host_set); 369 void (*host_stop) (struct ata_host_set *host_set);
365 370
366 void (*bmdma_stop) (struct ata_port *ap); 371 void (*bmdma_stop) (struct ata_queued_cmd *qc);
367 u8 (*bmdma_status) (struct ata_port *ap); 372 u8 (*bmdma_status) (struct ata_port *ap);
368}; 373};
369 374
@@ -424,7 +429,7 @@ extern void ata_dev_id_string(u16 *id, unsigned char *s,
424extern void ata_dev_config(struct ata_port *ap, unsigned int i); 429extern void ata_dev_config(struct ata_port *ap, unsigned int i);
425extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 430extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
426extern void ata_bmdma_start (struct ata_queued_cmd *qc); 431extern void ata_bmdma_start (struct ata_queued_cmd *qc);
427extern void ata_bmdma_stop(struct ata_port *ap); 432extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
428extern u8 ata_bmdma_status(struct ata_port *ap); 433extern u8 ata_bmdma_status(struct ata_port *ap);
429extern void ata_bmdma_irq_clear(struct ata_port *ap); 434extern void ata_bmdma_irq_clear(struct ata_port *ap);
430extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat); 435extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat);
@@ -644,7 +649,7 @@ static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
644 ap->ops->scr_write(ap, reg, val); 649 ap->ops->scr_write(ap, reg, val);
645} 650}
646 651
647static inline void scr_write_flush(struct ata_port *ap, unsigned int reg, 652static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
648 u32 val) 653 u32 val)
649{ 654{
650 ap->ops->scr_write(ap, reg, val); 655 ap->ops->scr_write(ap, reg, val);
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 374b615ea9ea..9b8d0476988a 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -22,6 +22,7 @@
22#define MII_EXPANSION 0x06 /* Expansion register */ 22#define MII_EXPANSION 0x06 /* Expansion register */
23#define MII_CTRL1000 0x09 /* 1000BASE-T control */ 23#define MII_CTRL1000 0x09 /* 1000BASE-T control */
24#define MII_STAT1000 0x0a /* 1000BASE-T status */ 24#define MII_STAT1000 0x0a /* 1000BASE-T status */
25#define MII_ESTATUS 0x0f /* Extended Status */
25#define MII_DCOUNTER 0x12 /* Disconnect counter */ 26#define MII_DCOUNTER 0x12 /* Disconnect counter */
26#define MII_FCSCOUNTER 0x13 /* False carrier counter */ 27#define MII_FCSCOUNTER 0x13 /* False carrier counter */
27#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ 28#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
@@ -54,7 +55,10 @@
54#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ 55#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
55#define BMSR_RFAULT 0x0010 /* Remote fault detected */ 56#define BMSR_RFAULT 0x0010 /* Remote fault detected */
56#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ 57#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
57#define BMSR_RESV 0x07c0 /* Unused... */ 58#define BMSR_RESV 0x00c0 /* Unused... */
59#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
60#define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */
61#define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */
58#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ 62#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
59#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ 63#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
60#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ 64#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
@@ -114,6 +118,9 @@
114#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */ 118#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */
115#define EXPANSION_RESV 0xffe0 /* Unused... */ 119#define EXPANSION_RESV 0xffe0 /* Unused... */
116 120
121#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
122#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
123
117/* N-way test register. */ 124/* N-way test register. */
118#define NWAYTEST_RESV1 0x00ff /* Unused... */ 125#define NWAYTEST_RESV1 0x00ff /* Unused... */
119#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */ 126#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index f90f674eb3b0..9a0893f3249e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -63,11 +63,12 @@ struct device;
63 63
64struct mmc_host { 64struct mmc_host {
65 struct device *dev; 65 struct device *dev;
66 struct class_device class_dev;
67 int index;
66 struct mmc_host_ops *ops; 68 struct mmc_host_ops *ops;
67 unsigned int f_min; 69 unsigned int f_min;
68 unsigned int f_max; 70 unsigned int f_max;
69 u32 ocr_avail; 71 u32 ocr_avail;
70 char host_name[8];
71 72
72 /* host specific block data */ 73 /* host specific block data */
73 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 74 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
@@ -97,6 +98,7 @@ extern void mmc_free_host(struct mmc_host *);
97 98
98#define mmc_priv(x) ((void *)((x) + 1)) 99#define mmc_priv(x) ((void *)((x) + 1))
99#define mmc_dev(x) ((x)->dev) 100#define mmc_dev(x) ((x)->dev)
101#define mmc_hostname(x) ((x)->class_dev.class_id)
100 102
101extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 103extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
102extern int mmc_resume_host(struct mmc_host *); 104extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index dce53ac1625d..97bbccdbcca3 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -33,7 +33,8 @@ struct ieee1394_device_id {
33 __u32 model_id; 33 __u32 model_id;
34 __u32 specifier_id; 34 __u32 specifier_id;
35 __u32 version; 35 __u32 version;
36 kernel_ulong_t driver_data; 36 kernel_ulong_t driver_data
37 __attribute__((aligned(sizeof(kernel_ulong_t))));
37}; 38};
38 39
39 40
@@ -182,7 +183,11 @@ struct of_device_id
182 char name[32]; 183 char name[32];
183 char type[32]; 184 char type[32];
184 char compatible[128]; 185 char compatible[128];
186#if __KERNEL__
185 void *data; 187 void *data;
188#else
189 kernel_ulong_t data;
190#endif
186}; 191};
187 192
188 193
@@ -208,7 +213,8 @@ struct pcmcia_device_id {
208#ifdef __KERNEL__ 213#ifdef __KERNEL__
209 const char * prod_id[4]; 214 const char * prod_id[4];
210#else 215#else
211 kernel_ulong_t prod_id[4]; 216 kernel_ulong_t prod_id[4]
217 __attribute__((aligned(sizeof(kernel_ulong_t))));
212#endif 218#endif
213 219
214 /* not matched against */ 220 /* not matched against */
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index bcd0ac33f592..5ade54a78dbb 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/rcupdate.h>
12#include <linux/list.h> 13#include <linux/list.h>
13 14
14struct netpoll; 15struct netpoll;
@@ -26,6 +27,7 @@ struct netpoll {
26struct netpoll_info { 27struct netpoll_info {
27 spinlock_t poll_lock; 28 spinlock_t poll_lock;
28 int poll_owner; 29 int poll_owner;
30 int tries;
29 int rx_flags; 31 int rx_flags;
30 spinlock_t rx_lock; 32 spinlock_t rx_lock;
31 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
@@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb)
60 return ret; 62 return ret;
61} 63}
62 64
63static inline void netpoll_poll_lock(struct net_device *dev) 65static inline void *netpoll_poll_lock(struct net_device *dev)
64{ 66{
67 rcu_read_lock(); /* deal with race on ->npinfo */
65 if (dev->npinfo) { 68 if (dev->npinfo) {
66 spin_lock(&dev->npinfo->poll_lock); 69 spin_lock(&dev->npinfo->poll_lock);
67 dev->npinfo->poll_owner = smp_processor_id(); 70 dev->npinfo->poll_owner = smp_processor_id();
71 return dev->npinfo;
68 } 72 }
73 return NULL;
69} 74}
70 75
71static inline void netpoll_poll_unlock(struct net_device *dev) 76static inline void netpoll_poll_unlock(void *have)
72{ 77{
73 if (dev->npinfo) { 78 struct netpoll_info *npi = have;
74 dev->npinfo->poll_owner = -1; 79
75 spin_unlock(&dev->npinfo->poll_lock); 80 if (npi) {
81 npi->poll_owner = -1;
82 spin_unlock(&npi->poll_lock);
76 } 83 }
84 rcu_read_unlock();
77} 85}
78 86
79#else 87#else
80#define netpoll_rx(a) 0 88#define netpoll_rx(a) 0
81#define netpoll_poll_lock(a) 89#define netpoll_poll_lock(a) 0
82#define netpoll_poll_unlock(a) 90#define netpoll_poll_unlock(a)
83#endif 91#endif
84 92
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 8ea249110fb0..9a6047ff1b25 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -112,7 +112,8 @@ struct nfs_inode {
112 /* 112 /*
113 * Various flags 113 * Various flags
114 */ 114 */
115 unsigned int flags; 115 unsigned long flags; /* atomic bit ops */
116 unsigned long cache_validity; /* bit mask */
116 117
117 /* 118 /*
118 * read_cache_jiffies is when we started read-caching this inode, 119 * read_cache_jiffies is when we started read-caching this inode,
@@ -174,8 +175,6 @@ struct nfs_inode {
174 /* Open contexts for shared mmap writes */ 175 /* Open contexts for shared mmap writes */
175 struct list_head open_files; 176 struct list_head open_files;
176 177
177 wait_queue_head_t nfs_i_wait;
178
179#ifdef CONFIG_NFS_V4 178#ifdef CONFIG_NFS_V4
180 struct nfs4_cached_acl *nfs4_acl; 179 struct nfs4_cached_acl *nfs4_acl;
181 /* NFSv4 state */ 180 /* NFSv4 state */
@@ -188,17 +187,21 @@ struct nfs_inode {
188}; 187};
189 188
190/* 189/*
191 * Legal inode flag values 190 * Cache validity bit flags
192 */ 191 */
193#define NFS_INO_STALE 0x0001 /* possible stale inode */ 192#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */
194#define NFS_INO_ADVISE_RDPLUS 0x0002 /* advise readdirplus */ 193#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */
195#define NFS_INO_REVALIDATING 0x0004 /* revalidating attrs */ 194#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */
196#define NFS_INO_INVALID_ATTR 0x0008 /* cached attrs are invalid */ 195#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */
197#define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */ 196#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */
198#define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */ 197#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */
199#define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */ 198
200#define NFS_INO_INVALID_ACL 0x0080 /* cached acls are invalid */ 199/*
201#define NFS_INO_REVAL_PAGECACHE 0x1000 /* must revalidate pagecache */ 200 * Bit offsets in flags field
201 */
202#define NFS_INO_REVALIDATING (0) /* revalidating attrs */
203#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */
204#define NFS_INO_STALE (2) /* possible stale inode */
202 205
203static inline struct nfs_inode *NFS_I(struct inode *inode) 206static inline struct nfs_inode *NFS_I(struct inode *inode)
204{ 207{
@@ -224,8 +227,7 @@ static inline struct nfs_inode *NFS_I(struct inode *inode)
224#define NFS_ATTRTIMEO_UPDATE(inode) (NFS_I(inode)->attrtimeo_timestamp) 227#define NFS_ATTRTIMEO_UPDATE(inode) (NFS_I(inode)->attrtimeo_timestamp)
225 228
226#define NFS_FLAGS(inode) (NFS_I(inode)->flags) 229#define NFS_FLAGS(inode) (NFS_I(inode)->flags)
227#define NFS_REVALIDATING(inode) (NFS_FLAGS(inode) & NFS_INO_REVALIDATING) 230#define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode)))
228#define NFS_STALE(inode) (NFS_FLAGS(inode) & NFS_INO_STALE)
229 231
230#define NFS_FILEID(inode) (NFS_I(inode)->fileid) 232#define NFS_FILEID(inode) (NFS_I(inode)->fileid)
231 233
@@ -236,8 +238,11 @@ static inline int nfs_caches_unstable(struct inode *inode)
236 238
237static inline void NFS_CACHEINV(struct inode *inode) 239static inline void NFS_CACHEINV(struct inode *inode)
238{ 240{
239 if (!nfs_caches_unstable(inode)) 241 if (!nfs_caches_unstable(inode)) {
240 NFS_FLAGS(inode) |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS; 242 spin_lock(&inode->i_lock);
243 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
244 spin_unlock(&inode->i_lock);
245 }
241} 246}
242 247
243static inline int nfs_server_capable(struct inode *inode, int cap) 248static inline int nfs_server_capable(struct inode *inode, int cap)
@@ -247,7 +252,7 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
247 252
248static inline int NFS_USE_READDIRPLUS(struct inode *inode) 253static inline int NFS_USE_READDIRPLUS(struct inode *inode)
249{ 254{
250 return NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS; 255 return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
251} 256}
252 257
253/** 258/**
@@ -292,6 +297,7 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
292extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); 297extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
293extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); 298extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
294extern int nfs_setattr(struct dentry *, struct iattr *); 299extern int nfs_setattr(struct dentry *, struct iattr *);
300extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
295extern void nfs_begin_attr_update(struct inode *); 301extern void nfs_begin_attr_update(struct inode *);
296extern void nfs_end_attr_update(struct inode *); 302extern void nfs_end_attr_update(struct inode *);
297extern void nfs_begin_data_update(struct inode *); 303extern void nfs_begin_data_update(struct inode *);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8621cf42b46f..bc4c40000c0d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -556,7 +556,8 @@ struct pci_dev {
556 /* keep track of device state */ 556 /* keep track of device state */
557 unsigned int is_enabled:1; /* pci_enable_device has been called */ 557 unsigned int is_enabled:1; /* pci_enable_device has been called */
558 unsigned int is_busmaster:1; /* device is busmaster */ 558 unsigned int is_busmaster:1; /* device is busmaster */
559 559 unsigned int no_msi:1; /* device may not use msi */
560
560 u32 saved_config_space[16]; /* config space saved at suspend time */ 561 u32 saved_config_space[16]; /* config space saved at suspend time */
561 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ 562 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
562 int rom_attr_enabled; /* has display of the rom attribute been enabled? */ 563 int rom_attr_enabled; /* has display of the rom attribute been enabled? */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index bc4cc10fabe9..499a5325f67f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -881,7 +881,7 @@
881#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e 881#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e
882#define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030 882#define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030
883#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 883#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032
884#define PCI_DEVIEC_ID_APPLE_UNI_N_ATA 0x0033 884#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033
885#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034 885#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034
886#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b 886#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b
887#define PCI_DEVICE_ID_APPLE_KEYLARGO_I 0x003e 887#define PCI_DEVICE_ID_APPLE_KEYLARGO_I 0x003e
@@ -1249,6 +1249,7 @@
1249#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 1249#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
1250#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 1250#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
1251#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E 1251#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
1252#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x036F
1252#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 1253#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
1253#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 1254#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
1254#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B 1255#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B
@@ -1580,6 +1581,7 @@
1580#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 1581#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
1581#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 1582#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
1582#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 1583#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
1584#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
1583#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 1585#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
1584#define PCI_DEVICE_ID_SERVERWORKS_OSB4USB 0x0220 1586#define PCI_DEVICE_ID_SERVERWORKS_OSB4USB 0x0220
1585#define PCI_DEVICE_ID_SERVERWORKS_CSB5USB PCI_DEVICE_ID_SERVERWORKS_OSB4USB 1587#define PCI_DEVICE_ID_SERVERWORKS_CSB5USB PCI_DEVICE_ID_SERVERWORKS_OSB4USB
@@ -2184,6 +2186,9 @@
2184#define PCI_VENDOR_ID_SIBYTE 0x166d 2186#define PCI_VENDOR_ID_SIBYTE 0x166d
2185#define PCI_DEVICE_ID_BCM1250_HT 0x0002 2187#define PCI_DEVICE_ID_BCM1250_HT 0x0002
2186 2188
2189#define PCI_VENDOR_ID_NETCELL 0x169c
2190#define PCI_DEVICE_ID_REVOLUTION 0x0044
2191
2187#define PCI_VENDOR_ID_LINKSYS 0x1737 2192#define PCI_VENDOR_ID_LINKSYS 0x1737
2188#define PCI_DEVICE_ID_LINKSYS_EG1032 0x1032 2193#define PCI_DEVICE_ID_LINKSYS_EG1032 0x1032
2189#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 2194#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064
@@ -2281,6 +2286,11 @@
2281#define PCI_VENDOR_ID_INTEL 0x8086 2286#define PCI_VENDOR_ID_INTEL 0x8086
2282#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 2287#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
2283#define PCI_DEVICE_ID_INTEL_21145 0x0039 2288#define PCI_DEVICE_ID_INTEL_21145 0x0039
2289#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
2290#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
2291#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
2292#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
2293#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
2284#define PCI_DEVICE_ID_INTEL_82375 0x0482 2294#define PCI_DEVICE_ID_INTEL_82375 0x0482
2285#define PCI_DEVICE_ID_INTEL_82424 0x0483 2295#define PCI_DEVICE_ID_INTEL_82424 0x0483
2286#define PCI_DEVICE_ID_INTEL_82378 0x0484 2296#define PCI_DEVICE_ID_INTEL_82378 0x0484
diff --git a/include/linux/phy.h b/include/linux/phy.h
new file mode 100644
index 000000000000..72cb67b66e0c
--- /dev/null
+++ b/include/linux/phy.h
@@ -0,0 +1,377 @@
1/*
2 * include/linux/phy.h
3 *
4 * Framework and drivers for configuring and reading different PHYs
5 * Based on code in sungem_phy.c and gianfar_phy.c
6 *
7 * Author: Andy Fleming
8 *
9 * Copyright (c) 2004 Freescale Semiconductor, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17
18#ifndef __PHY_H
19#define __PHY_H
20
21#include <linux/spinlock.h>
22#include <linux/device.h>
23
24#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
25 SUPPORTED_10baseT_Full | \
26 SUPPORTED_100baseT_Half | \
27 SUPPORTED_100baseT_Full | \
28 SUPPORTED_Autoneg | \
29 SUPPORTED_TP | \
30 SUPPORTED_MII)
31
32#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
33 SUPPORTED_1000baseT_Half | \
34 SUPPORTED_1000baseT_Full)
35
36/* Set phydev->irq to PHY_POLL if interrupts are not supported,
37 * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
38 * the attached driver handles the interrupt
39 */
40#define PHY_POLL -1
41#define PHY_IGNORE_INTERRUPT -2
42
43#define PHY_HAS_INTERRUPT 0x00000001
44#define PHY_HAS_MAGICANEG 0x00000002
45
46#define MII_BUS_MAX 4
47
48
49#define PHY_INIT_TIMEOUT 100000
50#define PHY_STATE_TIME 1
51#define PHY_FORCE_TIMEOUT 10
52#define PHY_AN_TIMEOUT 10
53
54#define PHY_MAX_ADDR 32
55
56/* The Bus class for PHYs. Devices which provide access to
57 * PHYs should register using this structure */
58struct mii_bus {
59 const char *name;
60 int id;
61 void *priv;
62 int (*read)(struct mii_bus *bus, int phy_id, int regnum);
63 int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
64 int (*reset)(struct mii_bus *bus);
65
66 /* A lock to ensure that only one thing can read/write
67 * the MDIO bus at a time */
68 spinlock_t mdio_lock;
69
70 struct device *dev;
71
72 /* list of all PHYs on bus */
73 struct phy_device *phy_map[PHY_MAX_ADDR];
74
75 /* Pointer to an array of interrupts, each PHY's
76 * interrupt at the index matching its address */
77 int *irq;
78};
79
80#define PHY_INTERRUPT_DISABLED 0x0
81#define PHY_INTERRUPT_ENABLED 0x80000000
82
83/* PHY state machine states:
84 *
85 * DOWN: PHY device and driver are not ready for anything. probe
86 * should be called if and only if the PHY is in this state,
87 * given that the PHY device exists.
88 * - PHY driver probe function will, depending on the PHY, set
89 * the state to STARTING or READY
90 *
91 * STARTING: PHY device is coming up, and the ethernet driver is
92 * not ready. PHY drivers may set this in the probe function.
93 * If they do, they are responsible for making sure the state is
94 * eventually set to indicate whether the PHY is UP or READY,
95 * depending on the state when the PHY is done starting up.
96 * - PHY driver will set the state to READY
97 * - start will set the state to PENDING
98 *
99 * READY: PHY is ready to send and receive packets, but the
100 * controller is not. By default, PHYs which do not implement
101 * probe will be set to this state by phy_probe(). If the PHY
102 * driver knows the PHY is ready, and the PHY state is STARTING,
103 * then it sets this STATE.
104 * - start will set the state to UP
105 *
106 * PENDING: PHY device is coming up, but the ethernet driver is
107 * ready. phy_start will set this state if the PHY state is
108 * STARTING.
109 * - PHY driver will set the state to UP when the PHY is ready
110 *
111 * UP: The PHY and attached device are ready to do work.
112 * Interrupts should be started here.
113 * - timer moves to AN
114 *
115 * AN: The PHY is currently negotiating the link state. Link is
116 * therefore down for now. phy_timer will set this state when it
117 * detects the state is UP. config_aneg will set this state
118 * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
119 * - If autonegotiation finishes, but there's no link, it sets
120 * the state to NOLINK.
121 * - If aneg finishes with link, it sets the state to RUNNING,
122 * and calls adjust_link
123 * - If autonegotiation did not finish after an arbitrary amount
124 * of time, autonegotiation should be tried again if the PHY
125 * supports "magic" autonegotiation (back to AN)
126 * - If it didn't finish, and no magic_aneg, move to FORCING.
127 *
128 * NOLINK: PHY is up, but not currently plugged in.
129 * - If the timer notes that the link comes back, we move to RUNNING
130 * - config_aneg moves to AN
131 * - phy_stop moves to HALTED
132 *
133 * FORCING: PHY is being configured with forced settings
134 * - if link is up, move to RUNNING
135 * - If link is down, we drop to the next highest setting, and
136 * retry (FORCING) after a timeout
137 * - phy_stop moves to HALTED
138 *
139 * RUNNING: PHY is currently up, running, and possibly sending
140 * and/or receiving packets
141 * - timer will set CHANGELINK if we're polling (this ensures the
142 * link state is polled every other cycle of this state machine,
143 * which makes it every other second)
144 * - irq will set CHANGELINK
145 * - config_aneg will set AN
146 * - phy_stop moves to HALTED
147 *
148 * CHANGELINK: PHY experienced a change in link state
149 * - timer moves to RUNNING if link
150 * - timer moves to NOLINK if the link is down
151 * - phy_stop moves to HALTED
152 *
153 * HALTED: PHY is up, but no polling or interrupts are done. Or
154 * PHY is in an error state.
155 *
156 * - phy_start moves to RESUMING
157 *
158 * RESUMING: PHY was halted, but now wants to run again.
159 * - If we are forcing, or aneg is done, timer moves to RUNNING
160 * - If aneg is not done, timer moves to AN
161 * - phy_stop moves to HALTED
162 */
163enum phy_state {
164 PHY_DOWN=0,
165 PHY_STARTING,
166 PHY_READY,
167 PHY_PENDING,
168 PHY_UP,
169 PHY_AN,
170 PHY_RUNNING,
171 PHY_NOLINK,
172 PHY_FORCING,
173 PHY_CHANGELINK,
174 PHY_HALTED,
175 PHY_RESUMING
176};
177
178/* phy_device: An instance of a PHY
179 *
180 * drv: Pointer to the driver for this PHY instance
181 * bus: Pointer to the bus this PHY is on
182 * dev: driver model device structure for this PHY
183 * phy_id: UID for this device found during discovery
184 * state: state of the PHY for management purposes
185 * dev_flags: Device-specific flags used by the PHY driver.
186 * addr: Bus address of PHY
187 * link_timeout: The number of timer firings to wait before the
188 * giving up on the current attempt at acquiring a link
189 * irq: IRQ number of the PHY's interrupt (-1 if none)
190 * phy_timer: The timer for handling the state machine
191 * phy_queue: A work_queue for the interrupt
192 * attached_dev: The attached enet driver's device instance ptr
193 * adjust_link: Callback for the enet controller to respond to
194 * changes in the link state.
195 * adjust_state: Callback for the enet driver to respond to
196 * changes in the state machine.
197 *
198 * speed, duplex, pause, supported, advertising, and
199 * autoneg are used like in mii_if_info
200 *
201 * interrupts currently only supports enabled or disabled,
202 * but could be changed in the future to support enabling
203 * and disabling specific interrupts
204 *
205 * Contains some infrastructure for polling and interrupt
206 * handling, as well as handling shifts in PHY hardware state
207 */
208struct phy_device {
209 /* Information about the PHY type */
210 /* And management functions */
211 struct phy_driver *drv;
212
213 struct mii_bus *bus;
214
215 struct device dev;
216
217 u32 phy_id;
218
219 enum phy_state state;
220
221 u32 dev_flags;
222
223 /* Bus address of the PHY (0-32) */
224 int addr;
225
226 /* forced speed & duplex (no autoneg)
227 * partner speed & duplex & pause (autoneg)
228 */
229 int speed;
230 int duplex;
231 int pause;
232 int asym_pause;
233
234 /* The most recently read link state */
235 int link;
236
237 /* Enabled Interrupts */
238 u32 interrupts;
239
240 /* Union of PHY and Attached devices' supported modes */
241 /* See mii.h for more info */
242 u32 supported;
243 u32 advertising;
244
245 int autoneg;
246
247 int link_timeout;
248
249 /* Interrupt number for this PHY
250 * -1 means no interrupt */
251 int irq;
252
253 /* private data pointer */
254 /* For use by PHYs to maintain extra state */
255 void *priv;
256
257 /* Interrupt and Polling infrastructure */
258 struct work_struct phy_queue;
259 struct timer_list phy_timer;
260
261 spinlock_t lock;
262
263 struct net_device *attached_dev;
264
265 void (*adjust_link)(struct net_device *dev);
266
267 void (*adjust_state)(struct net_device *dev);
268};
269#define to_phy_device(d) container_of(d, struct phy_device, dev)
270
271/* struct phy_driver: Driver structure for a particular PHY type
272 *
273 * phy_id: The result of reading the UID registers of this PHY
274 * type, and ANDing them with the phy_id_mask. This driver
275 * only works for PHYs with IDs which match this field
276 * name: The friendly name of this PHY type
277 * phy_id_mask: Defines the important bits of the phy_id
278 * features: A list of features (speed, duplex, etc) supported
279 * by this PHY
280 * flags: A bitfield defining certain other features this PHY
281 * supports (like interrupts)
282 *
283 * The drivers must implement config_aneg and read_status. All
284 * other functions are optional. Note that none of these
285 * functions should be called from interrupt time. The goal is
286 * for the bus read/write functions to be able to block when the
287 * bus transaction is happening, and be freed up by an interrupt
288 * (The MPC85xx has this ability, though it is not currently
289 * supported in the driver).
290 */
291struct phy_driver {
292 u32 phy_id;
293 char *name;
294 unsigned int phy_id_mask;
295 u32 features;
296 u32 flags;
297
298 /* Called to initialize the PHY,
299 * including after a reset */
300 int (*config_init)(struct phy_device *phydev);
301
302 /* Called during discovery. Used to set
303 * up device-specific structures, if any */
304 int (*probe)(struct phy_device *phydev);
305
306 /* PHY Power Management */
307 int (*suspend)(struct phy_device *phydev);
308 int (*resume)(struct phy_device *phydev);
309
310 /* Configures the advertisement and resets
311 * autonegotiation if phydev->autoneg is on,
312 * forces the speed to the current settings in phydev
313 * if phydev->autoneg is off */
314 int (*config_aneg)(struct phy_device *phydev);
315
316 /* Determines the negotiated speed and duplex */
317 int (*read_status)(struct phy_device *phydev);
318
319 /* Clears any pending interrupts */
320 int (*ack_interrupt)(struct phy_device *phydev);
321
322 /* Enables or disables interrupts */
323 int (*config_intr)(struct phy_device *phydev);
324
325 /* Clears up any memory if needed */
326 void (*remove)(struct phy_device *phydev);
327
328 struct device_driver driver;
329};
330#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
331
332int phy_read(struct phy_device *phydev, u16 regnum);
333int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
334struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
335int phy_clear_interrupt(struct phy_device *phydev);
336int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
337struct phy_device * phy_attach(struct net_device *dev,
338 const char *phy_id, u32 flags);
339struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
340 void (*handler)(struct net_device *), u32 flags);
341void phy_disconnect(struct phy_device *phydev);
342void phy_detach(struct phy_device *phydev);
343void phy_start(struct phy_device *phydev);
344void phy_stop(struct phy_device *phydev);
345int phy_start_aneg(struct phy_device *phydev);
346
347int mdiobus_register(struct mii_bus *bus);
348void mdiobus_unregister(struct mii_bus *bus);
349void phy_sanitize_settings(struct phy_device *phydev);
350int phy_stop_interrupts(struct phy_device *phydev);
351
352static inline int phy_read_status(struct phy_device *phydev) {
353 return phydev->drv->read_status(phydev);
354}
355
356int genphy_config_advert(struct phy_device *phydev);
357int genphy_setup_forced(struct phy_device *phydev);
358int genphy_restart_aneg(struct phy_device *phydev);
359int genphy_config_aneg(struct phy_device *phydev);
360int genphy_update_link(struct phy_device *phydev);
361int genphy_read_status(struct phy_device *phydev);
362void phy_driver_unregister(struct phy_driver *drv);
363int phy_driver_register(struct phy_driver *new_driver);
364void phy_prepare_link(struct phy_device *phydev,
365 void (*adjust_link)(struct net_device *));
366void phy_start_machine(struct phy_device *phydev,
367 void (*handler)(struct net_device *));
368void phy_stop_machine(struct phy_device *phydev);
369int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
370int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
371int phy_mii_ioctl(struct phy_device *phydev,
372 struct mii_ioctl_data *mii_data, int cmd);
373int phy_start_interrupts(struct phy_device *phydev);
374void phy_print_status(struct phy_device *phydev);
375
376extern struct bus_type mdio_bus_type;
377#endif /* __PHY_H */
diff --git a/include/linux/serialP.h b/include/linux/serialP.h
index 2b2f35a64d75..2b9e6b9554d5 100644
--- a/include/linux/serialP.h
+++ b/include/linux/serialP.h
@@ -140,44 +140,4 @@ struct rs_multiport_struct {
140#define ALPHA_KLUDGE_MCR 0 140#define ALPHA_KLUDGE_MCR 0
141#endif 141#endif
142 142
143/*
144 * Definitions for PCI support.
145 */
146#define SPCI_FL_BASE_MASK 0x0007
147#define SPCI_FL_BASE0 0x0000
148#define SPCI_FL_BASE1 0x0001
149#define SPCI_FL_BASE2 0x0002
150#define SPCI_FL_BASE3 0x0003
151#define SPCI_FL_BASE4 0x0004
152#define SPCI_FL_GET_BASE(x) (x & SPCI_FL_BASE_MASK)
153
154#define SPCI_FL_IRQ_MASK (0x0007 << 4)
155#define SPCI_FL_IRQBASE0 (0x0000 << 4)
156#define SPCI_FL_IRQBASE1 (0x0001 << 4)
157#define SPCI_FL_IRQBASE2 (0x0002 << 4)
158#define SPCI_FL_IRQBASE3 (0x0003 << 4)
159#define SPCI_FL_IRQBASE4 (0x0004 << 4)
160#define SPCI_FL_GET_IRQBASE(x) ((x & SPCI_FL_IRQ_MASK) >> 4)
161
162/* Use successive BARs (PCI base address registers),
163 else use offset into some specified BAR */
164#define SPCI_FL_BASE_TABLE 0x0100
165
166/* Use successive entries in the irq resource table */
167#define SPCI_FL_IRQ_TABLE 0x0200
168
169/* Use the irq resource table instead of dev->irq */
170#define SPCI_FL_IRQRESOURCE 0x0400
171
172/* Use the Base address register size to cap number of ports */
173#define SPCI_FL_REGION_SZ_CAP 0x0800
174
175/* Do not use irq sharing for this device */
176#define SPCI_FL_NO_SHIRQ 0x1000
177
178/* This is a PNP device */
179#define SPCI_FL_ISPNP 0x2000
180
181#define SPCI_FL_PNPDEFAULT (SPCI_FL_IRQRESOURCE|SPCI_FL_ISPNP)
182
183#endif /* _LINUX_SERIAL_H */ 143#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0061c9470482..948527e42a60 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -255,7 +255,7 @@ struct sk_buff {
255 nohdr:1; 255 nohdr:1;
256 /* 3 bits spare */ 256 /* 3 bits spare */
257 __u8 pkt_type; 257 __u8 pkt_type;
258 __u16 protocol; 258 __be16 protocol;
259 259
260 void (*destructor)(struct sk_buff *skb); 260 void (*destructor)(struct sk_buff *skb);
261#ifdef CONFIG_NETFILTER 261#ifdef CONFIG_NETFILTER
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 34ec3e8d99b3..23448d0fb5bc 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -177,6 +177,7 @@ typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem);
177struct xdr_array2_desc { 177struct xdr_array2_desc {
178 unsigned int elem_size; 178 unsigned int elem_size;
179 unsigned int array_len; 179 unsigned int array_len;
180 unsigned int array_maxlen;
180 xdr_xcode_elem_t xcode; 181 xdr_xcode_elem_t xcode;
181}; 182};
182 183
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 828a3a93dda1..3696f988a9f1 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -139,11 +139,25 @@ enum {
139#define AX25_DEF_DS_TIMEOUT (3 * 60 * HZ) /* DAMA timeout 3 minutes */ 139#define AX25_DEF_DS_TIMEOUT (3 * 60 * HZ) /* DAMA timeout 3 minutes */
140 140
141typedef struct ax25_uid_assoc { 141typedef struct ax25_uid_assoc {
142 struct ax25_uid_assoc *next; 142 struct hlist_node uid_node;
143 atomic_t refcount;
143 uid_t uid; 144 uid_t uid;
144 ax25_address call; 145 ax25_address call;
145} ax25_uid_assoc; 146} ax25_uid_assoc;
146 147
148#define ax25_uid_for_each(__ax25, node, list) \
149 hlist_for_each_entry(__ax25, node, list, uid_node)
150
151#define ax25_uid_hold(ax25) \
152 atomic_inc(&((ax25)->refcount))
153
154static inline void ax25_uid_put(ax25_uid_assoc *assoc)
155{
156 if (atomic_dec_and_test(&assoc->refcount)) {
157 kfree(assoc);
158 }
159}
160
147typedef struct { 161typedef struct {
148 ax25_address calls[AX25_MAX_DIGIS]; 162 ax25_address calls[AX25_MAX_DIGIS];
149 unsigned char repeated[AX25_MAX_DIGIS]; 163 unsigned char repeated[AX25_MAX_DIGIS];
@@ -376,7 +390,7 @@ extern unsigned long ax25_display_timer(struct timer_list *);
376 390
377/* ax25_uid.c */ 391/* ax25_uid.c */
378extern int ax25_uid_policy; 392extern int ax25_uid_policy;
379extern ax25_address *ax25_findbyuid(uid_t); 393extern ax25_uid_assoc *ax25_findbyuid(uid_t);
380extern int ax25_uid_ioctl(int, struct sockaddr_ax25 *); 394extern int ax25_uid_ioctl(int, struct sockaddr_ax25 *);
381extern struct file_operations ax25_uid_fops; 395extern struct file_operations ax25_uid_fops;
382extern void ax25_uid_free(void); 396extern void ax25_uid_free(void);
diff --git a/include/net/sock.h b/include/net/sock.h
index a1042d08becd..e9b1dbab90d0 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -384,6 +384,11 @@ enum sock_flags {
384 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 384 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
385}; 385};
386 386
387static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
388{
389 nsk->sk_flags = osk->sk_flags;
390}
391
387static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 392static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
388{ 393{
389 __set_bit(flag, &sk->sk_flags); 394 __set_bit(flag, &sk->sk_flags);
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
new file mode 100644
index 000000000000..5bf9834f7dca
--- /dev/null
+++ b/include/rdma/ib_cache.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: ib_cache.h 1349 2004-12-16 21:09:43Z roland $
35 */
36
37#ifndef _IB_CACHE_H
38#define _IB_CACHE_H
39
40#include <rdma/ib_verbs.h>
41
42/**
43 * ib_get_cached_gid - Returns a cached GID table entry
44 * @device: The device to query.
45 * @port_num: The port number of the device to query.
46 * @index: The index into the cached GID table to query.
47 * @gid: The GID value found at the specified index.
48 *
49 * ib_get_cached_gid() fetches the specified GID table entry stored in
50 * the local software cache.
51 */
52int ib_get_cached_gid(struct ib_device *device,
53 u8 port_num,
54 int index,
55 union ib_gid *gid);
56
57/**
58 * ib_find_cached_gid - Returns the port number and GID table index where
59 * a specified GID value occurs.
60 * @device: The device to query.
61 * @gid: The GID value to search for.
62 * @port_num: The port number of the device where the GID value was found.
63 * @index: The index into the cached GID table where the GID was found. This
64 * parameter may be NULL.
65 *
66 * ib_find_cached_gid() searches for the specified GID value in
67 * the local software cache.
68 */
69int ib_find_cached_gid(struct ib_device *device,
70 union ib_gid *gid,
71 u8 *port_num,
72 u16 *index);
73
74/**
75 * ib_get_cached_pkey - Returns a cached PKey table entry
76 * @device: The device to query.
77 * @port_num: The port number of the device to query.
78 * @index: The index into the cached PKey table to query.
79 * @pkey: The PKey value found at the specified index.
80 *
81 * ib_get_cached_pkey() fetches the specified PKey table entry stored in
82 * the local software cache.
83 */
84int ib_get_cached_pkey(struct ib_device *device_handle,
85 u8 port_num,
86 int index,
87 u16 *pkey);
88
89/**
90 * ib_find_cached_pkey - Returns the PKey table index where a specified
91 * PKey value occurs.
92 * @device: The device to query.
93 * @port_num: The port number of the device to search for the PKey.
94 * @pkey: The PKey value to search for.
95 * @index: The index into the cached PKey table where the PKey was found.
96 *
97 * ib_find_cached_pkey() searches the specified PKey table in
98 * the local software cache.
99 */
100int ib_find_cached_pkey(struct ib_device *device,
101 u8 port_num,
102 u16 pkey,
103 u16 *index);
104
105#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
new file mode 100644
index 000000000000..77fe9039209b
--- /dev/null
+++ b/include/rdma/ib_cm.h
@@ -0,0 +1,568 @@
1/*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: ib_cm.h 2730 2005-06-28 16:43:03Z sean.hefty $
36 */
37#if !defined(IB_CM_H)
38#define IB_CM_H
39
40#include <rdma/ib_mad.h>
41#include <rdma/ib_sa.h>
42
43enum ib_cm_state {
44 IB_CM_IDLE,
45 IB_CM_LISTEN,
46 IB_CM_REQ_SENT,
47 IB_CM_REQ_RCVD,
48 IB_CM_MRA_REQ_SENT,
49 IB_CM_MRA_REQ_RCVD,
50 IB_CM_REP_SENT,
51 IB_CM_REP_RCVD,
52 IB_CM_MRA_REP_SENT,
53 IB_CM_MRA_REP_RCVD,
54 IB_CM_ESTABLISHED,
55 IB_CM_DREQ_SENT,
56 IB_CM_DREQ_RCVD,
57 IB_CM_TIMEWAIT,
58 IB_CM_SIDR_REQ_SENT,
59 IB_CM_SIDR_REQ_RCVD
60};
61
62enum ib_cm_lap_state {
63 IB_CM_LAP_IDLE,
64 IB_CM_LAP_SENT,
65 IB_CM_LAP_RCVD,
66 IB_CM_MRA_LAP_SENT,
67 IB_CM_MRA_LAP_RCVD,
68};
69
70enum ib_cm_event_type {
71 IB_CM_REQ_ERROR,
72 IB_CM_REQ_RECEIVED,
73 IB_CM_REP_ERROR,
74 IB_CM_REP_RECEIVED,
75 IB_CM_RTU_RECEIVED,
76 IB_CM_USER_ESTABLISHED,
77 IB_CM_DREQ_ERROR,
78 IB_CM_DREQ_RECEIVED,
79 IB_CM_DREP_RECEIVED,
80 IB_CM_TIMEWAIT_EXIT,
81 IB_CM_MRA_RECEIVED,
82 IB_CM_REJ_RECEIVED,
83 IB_CM_LAP_ERROR,
84 IB_CM_LAP_RECEIVED,
85 IB_CM_APR_RECEIVED,
86 IB_CM_SIDR_REQ_ERROR,
87 IB_CM_SIDR_REQ_RECEIVED,
88 IB_CM_SIDR_REP_RECEIVED
89};
90
91enum ib_cm_data_size {
92 IB_CM_REQ_PRIVATE_DATA_SIZE = 92,
93 IB_CM_MRA_PRIVATE_DATA_SIZE = 222,
94 IB_CM_REJ_PRIVATE_DATA_SIZE = 148,
95 IB_CM_REP_PRIVATE_DATA_SIZE = 196,
96 IB_CM_RTU_PRIVATE_DATA_SIZE = 224,
97 IB_CM_DREQ_PRIVATE_DATA_SIZE = 220,
98 IB_CM_DREP_PRIVATE_DATA_SIZE = 224,
99 IB_CM_REJ_ARI_LENGTH = 72,
100 IB_CM_LAP_PRIVATE_DATA_SIZE = 168,
101 IB_CM_APR_PRIVATE_DATA_SIZE = 148,
102 IB_CM_APR_INFO_LENGTH = 72,
103 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
104 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
105 IB_CM_SIDR_REP_INFO_LENGTH = 72
106};
107
108struct ib_cm_id;
109
110struct ib_cm_req_event_param {
111 struct ib_cm_id *listen_id;
112 struct ib_device *device;
113 u8 port;
114
115 struct ib_sa_path_rec *primary_path;
116 struct ib_sa_path_rec *alternate_path;
117
118 __be64 remote_ca_guid;
119 u32 remote_qkey;
120 u32 remote_qpn;
121 enum ib_qp_type qp_type;
122
123 u32 starting_psn;
124 u8 responder_resources;
125 u8 initiator_depth;
126 unsigned int local_cm_response_timeout:5;
127 unsigned int flow_control:1;
128 unsigned int remote_cm_response_timeout:5;
129 unsigned int retry_count:3;
130 unsigned int rnr_retry_count:3;
131 unsigned int srq:1;
132};
133
134struct ib_cm_rep_event_param {
135 __be64 remote_ca_guid;
136 u32 remote_qkey;
137 u32 remote_qpn;
138 u32 starting_psn;
139 u8 responder_resources;
140 u8 initiator_depth;
141 unsigned int target_ack_delay:5;
142 unsigned int failover_accepted:2;
143 unsigned int flow_control:1;
144 unsigned int rnr_retry_count:3;
145 unsigned int srq:1;
146};
147
148enum ib_cm_rej_reason {
149 IB_CM_REJ_NO_QP = 1,
150 IB_CM_REJ_NO_EEC = 2,
151 IB_CM_REJ_NO_RESOURCES = 3,
152 IB_CM_REJ_TIMEOUT = 4,
153 IB_CM_REJ_UNSUPPORTED = 5,
154 IB_CM_REJ_INVALID_COMM_ID = 6,
155 IB_CM_REJ_INVALID_COMM_INSTANCE = 7,
156 IB_CM_REJ_INVALID_SERVICE_ID = 8,
157 IB_CM_REJ_INVALID_TRANSPORT_TYPE = 9,
158 IB_CM_REJ_STALE_CONN = 10,
159 IB_CM_REJ_RDC_NOT_EXIST = 11,
160 IB_CM_REJ_INVALID_GID = 12,
161 IB_CM_REJ_INVALID_LID = 13,
162 IB_CM_REJ_INVALID_SL = 14,
163 IB_CM_REJ_INVALID_TRAFFIC_CLASS = 15,
164 IB_CM_REJ_INVALID_HOP_LIMIT = 16,
165 IB_CM_REJ_INVALID_PACKET_RATE = 17,
166 IB_CM_REJ_INVALID_ALT_GID = 18,
167 IB_CM_REJ_INVALID_ALT_LID = 19,
168 IB_CM_REJ_INVALID_ALT_SL = 20,
169 IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = 21,
170 IB_CM_REJ_INVALID_ALT_HOP_LIMIT = 22,
171 IB_CM_REJ_INVALID_ALT_PACKET_RATE = 23,
172 IB_CM_REJ_PORT_CM_REDIRECT = 24,
173 IB_CM_REJ_PORT_REDIRECT = 25,
174 IB_CM_REJ_INVALID_MTU = 26,
175 IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = 27,
176 IB_CM_REJ_CONSUMER_DEFINED = 28,
177 IB_CM_REJ_INVALID_RNR_RETRY = 29,
178 IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30,
179 IB_CM_REJ_INVALID_CLASS_VERSION = 31,
180 IB_CM_REJ_INVALID_FLOW_LABEL = 32,
181 IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33
182};
183
184struct ib_cm_rej_event_param {
185 enum ib_cm_rej_reason reason;
186 void *ari;
187 u8 ari_length;
188};
189
190struct ib_cm_mra_event_param {
191 u8 service_timeout;
192};
193
194struct ib_cm_lap_event_param {
195 struct ib_sa_path_rec *alternate_path;
196};
197
198enum ib_cm_apr_status {
199 IB_CM_APR_SUCCESS,
200 IB_CM_APR_INVALID_COMM_ID,
201 IB_CM_APR_UNSUPPORTED,
202 IB_CM_APR_REJECT,
203 IB_CM_APR_REDIRECT,
204 IB_CM_APR_IS_CURRENT,
205 IB_CM_APR_INVALID_QPN_EECN,
206 IB_CM_APR_INVALID_LID,
207 IB_CM_APR_INVALID_GID,
208 IB_CM_APR_INVALID_FLOW_LABEL,
209 IB_CM_APR_INVALID_TCLASS,
210 IB_CM_APR_INVALID_HOP_LIMIT,
211 IB_CM_APR_INVALID_PACKET_RATE,
212 IB_CM_APR_INVALID_SL
213};
214
215struct ib_cm_apr_event_param {
216 enum ib_cm_apr_status ap_status;
217 void *apr_info;
218 u8 info_len;
219};
220
221struct ib_cm_sidr_req_event_param {
222 struct ib_cm_id *listen_id;
223 struct ib_device *device;
224 u8 port;
225 u16 pkey;
226};
227
228enum ib_cm_sidr_status {
229 IB_SIDR_SUCCESS,
230 IB_SIDR_UNSUPPORTED,
231 IB_SIDR_REJECT,
232 IB_SIDR_NO_QP,
233 IB_SIDR_REDIRECT,
234 IB_SIDR_UNSUPPORTED_VERSION
235};
236
237struct ib_cm_sidr_rep_event_param {
238 enum ib_cm_sidr_status status;
239 u32 qkey;
240 u32 qpn;
241 void *info;
242 u8 info_len;
243
244};
245
246struct ib_cm_event {
247 enum ib_cm_event_type event;
248 union {
249 struct ib_cm_req_event_param req_rcvd;
250 struct ib_cm_rep_event_param rep_rcvd;
251 /* No data for RTU received events. */
252 struct ib_cm_rej_event_param rej_rcvd;
253 struct ib_cm_mra_event_param mra_rcvd;
254 struct ib_cm_lap_event_param lap_rcvd;
255 struct ib_cm_apr_event_param apr_rcvd;
256 /* No data for DREQ/DREP received events. */
257 struct ib_cm_sidr_req_event_param sidr_req_rcvd;
258 struct ib_cm_sidr_rep_event_param sidr_rep_rcvd;
259 enum ib_wc_status send_status;
260 } param;
261
262 void *private_data;
263};
264
265/**
266 * ib_cm_handler - User-defined callback to process communication events.
267 * @cm_id: Communication identifier associated with the reported event.
268 * @event: Information about the communication event.
269 *
270 * IB_CM_REQ_RECEIVED and IB_CM_SIDR_REQ_RECEIVED communication events
271 * generated as a result of listen requests result in the allocation of a
272 * new @cm_id. The new @cm_id is returned to the user through this callback.
273 * Clients are responsible for destroying the new @cm_id. For peer-to-peer
274 * IB_CM_REQ_RECEIVED and all other events, the returned @cm_id corresponds
275 * to a user's existing communication identifier.
276 *
277 * Users may not call ib_destroy_cm_id while in the context of this callback;
278 * however, returning a non-zero value instructs the communication manager to
279 * destroy the @cm_id after the callback completes.
280 */
281typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
282 struct ib_cm_event *event);
283
284struct ib_cm_id {
285 ib_cm_handler cm_handler;
286 void *context;
287 __be64 service_id;
288 __be64 service_mask;
289 enum ib_cm_state state; /* internal CM/debug use */
290 enum ib_cm_lap_state lap_state; /* internal CM/debug use */
291 __be32 local_id;
292 __be32 remote_id;
293};
294
295/**
296 * ib_create_cm_id - Allocate a communication identifier.
297 * @cm_handler: Callback invoked to notify the user of CM events.
298 * @context: User specified context associated with the communication
299 * identifier.
300 *
301 * Communication identifiers are used to track connection states, service
302 * ID resolution requests, and listen requests.
303 */
304struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
305 void *context);
306
307/**
308 * ib_destroy_cm_id - Destroy a connection identifier.
309 * @cm_id: Connection identifier to destroy.
310 *
311 * This call blocks until the connection identifier is destroyed.
312 */
313void ib_destroy_cm_id(struct ib_cm_id *cm_id);
314
315#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL)
316#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL)
317
318/**
319 * ib_cm_listen - Initiates listening on the specified service ID for
320 * connection and service ID resolution requests.
321 * @cm_id: Connection identifier associated with the listen request.
322 * @service_id: Service identifier matched against incoming connection
323 * and service ID resolution requests. The service ID should be specified
324 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
325 * assign a service ID to the caller.
326 * @service_mask: Mask applied to service ID used to listen across a
327 * range of service IDs. If set to 0, the service ID is matched
328 * exactly. This parameter is ignored if %service_id is set to
329 * IB_CM_ASSIGN_SERVICE_ID.
330 */
331int ib_cm_listen(struct ib_cm_id *cm_id,
332 __be64 service_id,
333 __be64 service_mask);
334
335struct ib_cm_req_param {
336 struct ib_sa_path_rec *primary_path;
337 struct ib_sa_path_rec *alternate_path;
338 __be64 service_id;
339 u32 qp_num;
340 enum ib_qp_type qp_type;
341 u32 starting_psn;
342 const void *private_data;
343 u8 private_data_len;
344 u8 peer_to_peer;
345 u8 responder_resources;
346 u8 initiator_depth;
347 u8 remote_cm_response_timeout;
348 u8 flow_control;
349 u8 local_cm_response_timeout;
350 u8 retry_count;
351 u8 rnr_retry_count;
352 u8 max_cm_retries;
353 u8 srq;
354};
355
356/**
357 * ib_send_cm_req - Sends a connection request to the remote node.
358 * @cm_id: Connection identifier that will be associated with the
359 * connection request.
360 * @param: Connection request information needed to establish the
361 * connection.
362 */
363int ib_send_cm_req(struct ib_cm_id *cm_id,
364 struct ib_cm_req_param *param);
365
366struct ib_cm_rep_param {
367 u32 qp_num;
368 u32 starting_psn;
369 const void *private_data;
370 u8 private_data_len;
371 u8 responder_resources;
372 u8 initiator_depth;
373 u8 target_ack_delay;
374 u8 failover_accepted;
375 u8 flow_control;
376 u8 rnr_retry_count;
377 u8 srq;
378};
379
380/**
381 * ib_send_cm_rep - Sends a connection reply in response to a connection
382 * request.
383 * @cm_id: Connection identifier that will be associated with the
384 * connection request.
385 * @param: Connection reply information needed to establish the
386 * connection.
387 */
388int ib_send_cm_rep(struct ib_cm_id *cm_id,
389 struct ib_cm_rep_param *param);
390
391/**
392 * ib_send_cm_rtu - Sends a connection ready to use message in response
393 * to a connection reply message.
394 * @cm_id: Connection identifier associated with the connection request.
395 * @private_data: Optional user-defined private data sent with the
396 * ready to use message.
397 * @private_data_len: Size of the private data buffer, in bytes.
398 */
399int ib_send_cm_rtu(struct ib_cm_id *cm_id,
400 const void *private_data,
401 u8 private_data_len);
402
403/**
404 * ib_send_cm_dreq - Sends a disconnection request for an existing
405 * connection.
406 * @cm_id: Connection identifier associated with the connection being
407 * released.
408 * @private_data: Optional user-defined private data sent with the
409 * disconnection request message.
410 * @private_data_len: Size of the private data buffer, in bytes.
411 */
412int ib_send_cm_dreq(struct ib_cm_id *cm_id,
413 const void *private_data,
414 u8 private_data_len);
415
416/**
417 * ib_send_cm_drep - Sends a disconnection reply to a disconnection request.
418 * @cm_id: Connection identifier associated with the connection being
419 * released.
420 * @private_data: Optional user-defined private data sent with the
421 * disconnection reply message.
422 * @private_data_len: Size of the private data buffer, in bytes.
423 *
424 * If the cm_id is in the correct state, the CM will transition the connection
425 * to the timewait state, even if an error occurs sending the DREP message.
426 */
427int ib_send_cm_drep(struct ib_cm_id *cm_id,
428 const void *private_data,
429 u8 private_data_len);
430
431/**
432 * ib_cm_establish - Forces a connection state to established.
433 * @cm_id: Connection identifier to transition to established.
434 *
435 * This routine should be invoked by users who receive messages on a
436 * connected QP before an RTU has been received.
437 */
438int ib_cm_establish(struct ib_cm_id *cm_id);
439
440/**
441 * ib_send_cm_rej - Sends a connection rejection message to the
442 * remote node.
443 * @cm_id: Connection identifier associated with the connection being
444 * rejected.
445 * @reason: Reason for the connection request rejection.
446 * @ari: Optional additional rejection information.
447 * @ari_length: Size of the additional rejection information, in bytes.
448 * @private_data: Optional user-defined private data sent with the
449 * rejection message.
450 * @private_data_len: Size of the private data buffer, in bytes.
451 */
452int ib_send_cm_rej(struct ib_cm_id *cm_id,
453 enum ib_cm_rej_reason reason,
454 void *ari,
455 u8 ari_length,
456 const void *private_data,
457 u8 private_data_len);
458
459/**
460 * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection
461 * message.
462 * @cm_id: Connection identifier associated with the connection message.
463 * @service_timeout: The maximum time required for the sender to reply to
464 * to the connection message.
465 * @private_data: Optional user-defined private data sent with the
466 * message receipt acknowledgement.
467 * @private_data_len: Size of the private data buffer, in bytes.
468 */
469int ib_send_cm_mra(struct ib_cm_id *cm_id,
470 u8 service_timeout,
471 const void *private_data,
472 u8 private_data_len);
473
474/**
475 * ib_send_cm_lap - Sends a load alternate path request.
476 * @cm_id: Connection identifier associated with the load alternate path
477 * message.
478 * @alternate_path: A path record that identifies the alternate path to
479 * load.
480 * @private_data: Optional user-defined private data sent with the
481 * load alternate path message.
482 * @private_data_len: Size of the private data buffer, in bytes.
483 */
484int ib_send_cm_lap(struct ib_cm_id *cm_id,
485 struct ib_sa_path_rec *alternate_path,
486 const void *private_data,
487 u8 private_data_len);
488
489/**
490 * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
491 * to a specified QP state.
492 * @cm_id: Communication identifier associated with the QP attributes to
493 * initialize.
494 * @qp_attr: On input, specifies the desired QP state. On output, the
495 * mandatory and desired optional attributes will be set in order to
496 * modify the QP to the specified state.
497 * @qp_attr_mask: The QP attribute mask that may be used to transition the
498 * QP to the specified state.
499 *
500 * Users must set the @qp_attr->qp_state to the desired QP state. This call
501 * will set all required attributes for the given transition, along with
502 * known optional attributes. Users may override the attributes returned from
503 * this call before calling ib_modify_qp.
504 */
505int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
506 struct ib_qp_attr *qp_attr,
507 int *qp_attr_mask);
508
509/**
510 * ib_send_cm_apr - Sends an alternate path response message in response to
511 * a load alternate path request.
512 * @cm_id: Connection identifier associated with the alternate path response.
513 * @status: Reply status sent with the alternate path response.
514 * @info: Optional additional information sent with the alternate path
515 * response.
516 * @info_length: Size of the additional information, in bytes.
517 * @private_data: Optional user-defined private data sent with the
518 * alternate path response message.
519 * @private_data_len: Size of the private data buffer, in bytes.
520 */
521int ib_send_cm_apr(struct ib_cm_id *cm_id,
522 enum ib_cm_apr_status status,
523 void *info,
524 u8 info_length,
525 const void *private_data,
526 u8 private_data_len);
527
528struct ib_cm_sidr_req_param {
529 struct ib_sa_path_rec *path;
530 __be64 service_id;
531 int timeout_ms;
532 const void *private_data;
533 u8 private_data_len;
534 u8 max_cm_retries;
535 u16 pkey;
536};
537
538/**
539 * ib_send_cm_sidr_req - Sends a service ID resolution request to the
540 * remote node.
541 * @cm_id: Communication identifier that will be associated with the
542 * service ID resolution request.
543 * @param: Service ID resolution request information.
544 */
545int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
546 struct ib_cm_sidr_req_param *param);
547
548struct ib_cm_sidr_rep_param {
549 u32 qp_num;
550 u32 qkey;
551 enum ib_cm_sidr_status status;
552 const void *info;
553 u8 info_length;
554 const void *private_data;
555 u8 private_data_len;
556};
557
558/**
559 * ib_send_cm_sidr_rep - Sends a service ID resolution request to the
560 * remote node.
561 * @cm_id: Communication identifier associated with the received service ID
562 * resolution request.
563 * @param: Service ID resolution reply information.
564 */
565int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
566 struct ib_cm_sidr_rep_param *param);
567
568#endif /* IB_CM_H */
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
new file mode 100644
index 000000000000..86b7e93f198b
--- /dev/null
+++ b/include/rdma/ib_fmr_pool.h
@@ -0,0 +1,93 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_fmr_pool.h 2730 2005-06-28 16:43:03Z sean.hefty $
34 */
35
36#if !defined(IB_FMR_POOL_H)
37#define IB_FMR_POOL_H
38
39#include <rdma/ib_verbs.h>
40
41struct ib_fmr_pool;
42
43/**
44 * struct ib_fmr_pool_param - Parameters for creating FMR pool
45 * @max_pages_per_fmr:Maximum number of pages per map request.
46 * @access:Access flags for FMRs in pool.
47 * @pool_size:Number of FMRs to allocate for pool.
48 * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
49 * FMRs are present.
50 * @flush_function:Callback called when unmapped FMRs are flushed and
51 * more FMRs are possibly available for mapping
52 * @flush_arg:Context passed to user's flush function.
53 * @cache:If set, FMRs may be reused after unmapping for identical map
54 * requests.
55 */
56struct ib_fmr_pool_param {
57 int max_pages_per_fmr;
58 enum ib_access_flags access;
59 int pool_size;
60 int dirty_watermark;
61 void (*flush_function)(struct ib_fmr_pool *pool,
62 void * arg);
63 void *flush_arg;
64 unsigned cache:1;
65};
66
67struct ib_pool_fmr {
68 struct ib_fmr *fmr;
69 struct ib_fmr_pool *pool;
70 struct list_head list;
71 struct hlist_node cache_node;
72 int ref_count;
73 int remap_count;
74 u64 io_virtual_address;
75 int page_list_len;
76 u64 page_list[0];
77};
78
79struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
80 struct ib_fmr_pool_param *params);
81
82void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
83
84int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
85
86struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
87 u64 *page_list,
88 int list_len,
89 u64 *io_virtual_address);
90
91int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
92
93#endif /* IB_FMR_POOL_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
new file mode 100644
index 000000000000..fc6b1c18ffc6
--- /dev/null
+++ b/include/rdma/ib_mad.h
@@ -0,0 +1,579 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $
37 */
38
39#if !defined( IB_MAD_H )
40#define IB_MAD_H
41
42#include <linux/pci.h>
43
44#include <rdma/ib_verbs.h>
45
46/* Management base version */
47#define IB_MGMT_BASE_VERSION 1
48
49/* Management classes */
50#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
51#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81
52#define IB_MGMT_CLASS_SUBN_ADM 0x03
53#define IB_MGMT_CLASS_PERF_MGMT 0x04
54#define IB_MGMT_CLASS_BM 0x05
55#define IB_MGMT_CLASS_DEVICE_MGMT 0x06
56#define IB_MGMT_CLASS_CM 0x07
57#define IB_MGMT_CLASS_SNMP 0x08
58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
60
61#define IB_OPENIB_OUI (0x001405)
62
63/* Management methods */
64#define IB_MGMT_METHOD_GET 0x01
65#define IB_MGMT_METHOD_SET 0x02
66#define IB_MGMT_METHOD_GET_RESP 0x81
67#define IB_MGMT_METHOD_SEND 0x03
68#define IB_MGMT_METHOD_TRAP 0x05
69#define IB_MGMT_METHOD_REPORT 0x06
70#define IB_MGMT_METHOD_REPORT_RESP 0x86
71#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
72
73#define IB_MGMT_METHOD_RESP 0x80
74
75#define IB_MGMT_MAX_METHODS 128
76
77/* RMPP information */
78#define IB_MGMT_RMPP_VERSION 1
79
80#define IB_MGMT_RMPP_TYPE_DATA 1
81#define IB_MGMT_RMPP_TYPE_ACK 2
82#define IB_MGMT_RMPP_TYPE_STOP 3
83#define IB_MGMT_RMPP_TYPE_ABORT 4
84
85#define IB_MGMT_RMPP_FLAG_ACTIVE 1
86#define IB_MGMT_RMPP_FLAG_FIRST (1<<1)
87#define IB_MGMT_RMPP_FLAG_LAST (1<<2)
88
89#define IB_MGMT_RMPP_NO_RESPTIME 0x1F
90
91#define IB_MGMT_RMPP_STATUS_SUCCESS 0
92#define IB_MGMT_RMPP_STATUS_RESX 1
93#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118
94#define IB_MGMT_RMPP_STATUS_T2L 118
95#define IB_MGMT_RMPP_STATUS_BAD_LEN 119
96#define IB_MGMT_RMPP_STATUS_BAD_SEG 120
97#define IB_MGMT_RMPP_STATUS_BADT 121
98#define IB_MGMT_RMPP_STATUS_W2S 122
99#define IB_MGMT_RMPP_STATUS_S2B 123
100#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124
101#define IB_MGMT_RMPP_STATUS_UNV 125
102#define IB_MGMT_RMPP_STATUS_TMR 126
103#define IB_MGMT_RMPP_STATUS_UNSPEC 127
104#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
105
106#define IB_QP0 0
107#define IB_QP1 __constant_htonl(1)
108#define IB_QP1_QKEY 0x80010000
109#define IB_QP_SET_QKEY 0x80000000
110
111struct ib_mad_hdr {
112 u8 base_version;
113 u8 mgmt_class;
114 u8 class_version;
115 u8 method;
116 __be16 status;
117 __be16 class_specific;
118 __be64 tid;
119 __be16 attr_id;
120 __be16 resv;
121 __be32 attr_mod;
122};
123
124struct ib_rmpp_hdr {
125 u8 rmpp_version;
126 u8 rmpp_type;
127 u8 rmpp_rtime_flags;
128 u8 rmpp_status;
129 __be32 seg_num;
130 __be32 paylen_newwin;
131};
132
133typedef u64 __bitwise ib_sa_comp_mask;
134
135#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
136
137/*
138 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
139 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
140 * lay them out wrong otherwise. (And unfortunately they are sent on
141 * the wire so we can't change the layout)
142 */
143struct ib_sa_hdr {
144 __be64 sm_key;
145 __be16 attr_offset;
146 __be16 reserved;
147 ib_sa_comp_mask comp_mask;
148} __attribute__ ((packed));
149
150struct ib_mad {
151 struct ib_mad_hdr mad_hdr;
152 u8 data[232];
153};
154
155struct ib_rmpp_mad {
156 struct ib_mad_hdr mad_hdr;
157 struct ib_rmpp_hdr rmpp_hdr;
158 u8 data[220];
159};
160
161struct ib_sa_mad {
162 struct ib_mad_hdr mad_hdr;
163 struct ib_rmpp_hdr rmpp_hdr;
164 struct ib_sa_hdr sa_hdr;
165 u8 data[200];
166} __attribute__ ((packed));
167
168struct ib_vendor_mad {
169 struct ib_mad_hdr mad_hdr;
170 struct ib_rmpp_hdr rmpp_hdr;
171 u8 reserved;
172 u8 oui[3];
173 u8 data[216];
174};
175
176/**
177 * ib_mad_send_buf - MAD data buffer and work request for sends.
178 * @mad: References an allocated MAD data buffer. The size of the data
179 * buffer is specified in the @send_wr.length field.
180 * @mapping: DMA mapping information.
181 * @mad_agent: MAD agent that allocated the buffer.
182 * @context: User-controlled context fields.
183 * @send_wr: An initialized work request structure used when sending the MAD.
184 * The wr_id field of the work request is initialized to reference this
185 * data structure.
186 * @sge: A scatter-gather list referenced by the work request.
187 *
188 * Users are responsible for initializing the MAD buffer itself, with the
189 * exception of specifying the payload length field in any RMPP MAD.
190 */
191struct ib_mad_send_buf {
192 struct ib_mad *mad;
193 DECLARE_PCI_UNMAP_ADDR(mapping)
194 struct ib_mad_agent *mad_agent;
195 void *context[2];
196 struct ib_send_wr send_wr;
197 struct ib_sge sge;
198};
199
200/**
201 * ib_get_rmpp_resptime - Returns the RMPP response time.
202 * @rmpp_hdr: An RMPP header.
203 */
204static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
205{
206 return rmpp_hdr->rmpp_rtime_flags >> 3;
207}
208
209/**
210 * ib_get_rmpp_flags - Returns the RMPP flags.
211 * @rmpp_hdr: An RMPP header.
212 */
213static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
214{
215 return rmpp_hdr->rmpp_rtime_flags & 0x7;
216}
217
218/**
219 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
220 * @rmpp_hdr: An RMPP header.
221 * @rtime: The response time to set.
222 */
223static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
224{
225 rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
226}
227
228/**
229 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
230 * @rmpp_hdr: An RMPP header.
231 * @flags: The flags to set.
232 */
233static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
234{
235 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
236 (flags & 0x7);
237}
238
239struct ib_mad_agent;
240struct ib_mad_send_wc;
241struct ib_mad_recv_wc;
242
243/**
244 * ib_mad_send_handler - callback handler for a sent MAD.
245 * @mad_agent: MAD agent that sent the MAD.
246 * @mad_send_wc: Send work completion information on the sent MAD.
247 */
248typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
249 struct ib_mad_send_wc *mad_send_wc);
250
251/**
252 * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
253 * @mad_agent: MAD agent that snooped the MAD.
254 * @send_wr: Work request information on the sent MAD.
255 * @mad_send_wc: Work completion information on the sent MAD. Valid
256 * only for snooping that occurs on a send completion.
257 *
258 * Clients snooping MADs should not modify data referenced by the @send_wr
259 * or @mad_send_wc.
260 */
261typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
262 struct ib_send_wr *send_wr,
263 struct ib_mad_send_wc *mad_send_wc);
264
265/**
266 * ib_mad_recv_handler - callback handler for a received MAD.
267 * @mad_agent: MAD agent requesting the received MAD.
268 * @mad_recv_wc: Received work completion information on the received MAD.
269 *
270 * MADs received in response to a send request operation will be handed to
271 * the user after the send operation completes. All data buffers given
272 * to registered agents through this routine are owned by the receiving
273 * client, except for snooping agents. Clients snooping MADs should not
274 * modify the data referenced by @mad_recv_wc.
275 */
276typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
277 struct ib_mad_recv_wc *mad_recv_wc);
278
279/**
280 * ib_mad_agent - Used to track MAD registration with the access layer.
281 * @device: Reference to device registration is on.
282 * @qp: Reference to QP used for sending and receiving MADs.
283 * @mr: Memory region for system memory usable for DMA.
284 * @recv_handler: Callback handler for a received MAD.
285 * @send_handler: Callback handler for a sent MAD.
286 * @snoop_handler: Callback handler for snooped sent MADs.
287 * @context: User-specified context associated with this registration.
288 * @hi_tid: Access layer assigned transaction ID for this client.
289 * Unsolicited MADs sent by this client will have the upper 32-bits
290 * of their TID set to this value.
291 * @port_num: Port number on which QP is registered
292 * @rmpp_version: If set, indicates the RMPP version used by this agent.
293 */
294struct ib_mad_agent {
295 struct ib_device *device;
296 struct ib_qp *qp;
297 struct ib_mr *mr;
298 ib_mad_recv_handler recv_handler;
299 ib_mad_send_handler send_handler;
300 ib_mad_snoop_handler snoop_handler;
301 void *context;
302 u32 hi_tid;
303 u8 port_num;
304 u8 rmpp_version;
305};
306
307/**
308 * ib_mad_send_wc - MAD send completion information.
309 * @wr_id: Work request identifier associated with the send MAD request.
310 * @status: Completion status.
311 * @vendor_err: Optional vendor error information returned with a failed
312 * request.
313 */
314struct ib_mad_send_wc {
315 u64 wr_id;
316 enum ib_wc_status status;
317 u32 vendor_err;
318};
319
320/**
321 * ib_mad_recv_buf - received MAD buffer information.
322 * @list: Reference to next data buffer for a received RMPP MAD.
323 * @grh: References a data buffer containing the global route header.
324 * The data refereced by this buffer is only valid if the GRH is
325 * valid.
326 * @mad: References the start of the received MAD.
327 */
328struct ib_mad_recv_buf {
329 struct list_head list;
330 struct ib_grh *grh;
331 struct ib_mad *mad;
332};
333
334/**
335 * ib_mad_recv_wc - received MAD information.
336 * @wc: Completion information for the received data.
337 * @recv_buf: Specifies the location of the received data buffer(s).
338 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
339 * @mad_len: The length of the received MAD, without duplicated headers.
340 *
341 * For received response, the wr_id field of the wc is set to the wr_id
342 * for the corresponding send request.
343 */
344struct ib_mad_recv_wc {
345 struct ib_wc *wc;
346 struct ib_mad_recv_buf recv_buf;
347 struct list_head rmpp_list;
348 int mad_len;
349};
350
351/**
352 * ib_mad_reg_req - MAD registration request
353 * @mgmt_class: Indicates which management class of MADs should be receive
354 * by the caller. This field is only required if the user wishes to
355 * receive unsolicited MADs, otherwise it should be 0.
356 * @mgmt_class_version: Indicates which version of MADs for the given
357 * management class to receive.
358 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
359 * in the range from 0x30 to 0x4f. Otherwise not used.
360 * @method_mask: The caller will receive unsolicited MADs for any method
361 * where @method_mask = 1.
362 */
363struct ib_mad_reg_req {
364 u8 mgmt_class;
365 u8 mgmt_class_version;
366 u8 oui[3];
367 DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
368};
369
370/**
371 * ib_register_mad_agent - Register to send/receive MADs.
372 * @device: The device to register with.
373 * @port_num: The port on the specified device to use.
374 * @qp_type: Specifies which QP to access. Must be either
375 * IB_QPT_SMI or IB_QPT_GSI.
376 * @mad_reg_req: Specifies which unsolicited MADs should be received
377 * by the caller. This parameter may be NULL if the caller only
378 * wishes to receive solicited responses.
379 * @rmpp_version: If set, indicates that the client will send
380 * and receive MADs that contain the RMPP header for the given version.
381 * If set to 0, indicates that RMPP is not used by this client.
382 * @send_handler: The completion callback routine invoked after a send
383 * request has completed.
384 * @recv_handler: The completion callback routine invoked for a received
385 * MAD.
386 * @context: User specified context associated with the registration.
387 */
388struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
389 u8 port_num,
390 enum ib_qp_type qp_type,
391 struct ib_mad_reg_req *mad_reg_req,
392 u8 rmpp_version,
393 ib_mad_send_handler send_handler,
394 ib_mad_recv_handler recv_handler,
395 void *context);
396
397enum ib_mad_snoop_flags {
398 /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
399 /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
400 IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
401 /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
402 IB_MAD_SNOOP_RECVS = (1<<4)
403 /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
404 /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
405};
406
407/**
408 * ib_register_mad_snoop - Register to snoop sent and received MADs.
409 * @device: The device to register with.
410 * @port_num: The port on the specified device to use.
411 * @qp_type: Specifies which QP traffic to snoop. Must be either
412 * IB_QPT_SMI or IB_QPT_GSI.
413 * @mad_snoop_flags: Specifies information where snooping occurs.
414 * @send_handler: The callback routine invoked for a snooped send.
415 * @recv_handler: The callback routine invoked for a snooped receive.
416 * @context: User specified context associated with the registration.
417 */
418struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
419 u8 port_num,
420 enum ib_qp_type qp_type,
421 int mad_snoop_flags,
422 ib_mad_snoop_handler snoop_handler,
423 ib_mad_recv_handler recv_handler,
424 void *context);
425
426/**
427 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
428 * @mad_agent: Corresponding MAD registration request to deregister.
429 *
430 * After invoking this routine, MAD services are no longer usable by the
431 * client on the associated QP.
432 */
433int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
434
435/**
436 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
437 * with the registered client.
438 * @mad_agent: Specifies the associated registration to post the send to.
439 * @send_wr: Specifies the information needed to send the MAD(s).
440 * @bad_send_wr: Specifies the MAD on which an error was encountered.
441 *
442 * Sent MADs are not guaranteed to complete in the order that they were posted.
443 *
444 * If the MAD requires RMPP, the data buffer should contain a single copy
445 * of the common MAD, RMPP, and class specific headers, followed by the class
446 * defined data. If the class defined data would not divide evenly into
447 * RMPP segments, then space must be allocated at the end of the referenced
448 * buffer for any required padding. To indicate the amount of class defined
449 * data being transferred, the paylen_newwin field in the RMPP header should
450 * be set to the size of the class specific header plus the amount of class
451 * defined data being transferred. The paylen_newwin field should be
452 * specified in network-byte order.
453 */
454int ib_post_send_mad(struct ib_mad_agent *mad_agent,
455 struct ib_send_wr *send_wr,
456 struct ib_send_wr **bad_send_wr);
457
458/**
459 * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer.
460 * @mad_recv_wc: Work completion information for a received MAD.
461 * @buf: User-provided data buffer to receive the coalesced buffers. The
462 * referenced buffer should be at least the size of the mad_len specified
463 * by @mad_recv_wc.
464 *
465 * This call copies a chain of received MAD segments into a single data buffer,
466 * removing duplicated headers.
467 */
468void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
469
470/**
471 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
472 * @mad_recv_wc: Work completion information for a received MAD.
473 *
474 * Clients receiving MADs through their ib_mad_recv_handler must call this
475 * routine to return the work completion buffers to the access layer.
476 */
477void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
478
479/**
480 * ib_cancel_mad - Cancels an outstanding send MAD operation.
481 * @mad_agent: Specifies the registration associated with sent MAD.
482 * @wr_id: Indicates the work request identifier of the MAD to cancel.
483 *
484 * MADs will be returned to the user through the corresponding
485 * ib_mad_send_handler.
486 */
487void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id);
488
489/**
490 * ib_modify_mad - Modifies an outstanding send MAD operation.
491 * @mad_agent: Specifies the registration associated with sent MAD.
492 * @wr_id: Indicates the work request identifier of the MAD to modify.
493 * @timeout_ms: New timeout value for sent MAD.
494 *
495 * This call will reset the timeout value for a sent MAD to the specified
496 * value.
497 */
498int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms);
499
500/**
501 * ib_redirect_mad_qp - Registers a QP for MAD services.
502 * @qp: Reference to a QP that requires MAD services.
503 * @rmpp_version: If set, indicates that the client will send
504 * and receive MADs that contain the RMPP header for the given version.
505 * If set to 0, indicates that RMPP is not used by this client.
506 * @send_handler: The completion callback routine invoked after a send
507 * request has completed.
508 * @recv_handler: The completion callback routine invoked for a received
509 * MAD.
510 * @context: User specified context associated with the registration.
511 *
512 * Use of this call allows clients to use MAD services, such as RMPP,
513 * on user-owned QPs. After calling this routine, users may send
514 * MADs on the specified QP by calling ib_mad_post_send.
515 */
516struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
517 u8 rmpp_version,
518 ib_mad_send_handler send_handler,
519 ib_mad_recv_handler recv_handler,
520 void *context);
521
522/**
523 * ib_process_mad_wc - Processes a work completion associated with a
524 * MAD sent or received on a redirected QP.
525 * @mad_agent: Specifies the registered MAD service using the redirected QP.
526 * @wc: References a work completion associated with a sent or received
527 * MAD segment.
528 *
529 * This routine is used to complete or continue processing on a MAD request.
530 * If the work completion is associated with a send operation, calling
531 * this routine is required to continue an RMPP transfer or to wait for a
532 * corresponding response, if it is a request. If the work completion is
533 * associated with a receive operation, calling this routine is required to
534 * process an inbound or outbound RMPP transfer, or to match a response MAD
535 * with its corresponding request.
536 */
537int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
538 struct ib_wc *wc);
539
540/**
541 * ib_create_send_mad - Allocate and initialize a data buffer and work request
542 * for sending a MAD.
543 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
544 * @remote_qpn: Specifies the QPN of the receiving node.
545 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
546 * is valid only if the remote_qpn is QP 1.
547 * @ah: References the address handle used to transfer to the remote node.
548 * @rmpp_active: Indicates if the send will enable RMPP.
549 * @hdr_len: Indicates the size of the data header of the MAD. This length
550 * should include the common MAD header, RMPP header, plus any class
551 * specific header.
552 * @data_len: Indicates the size of any user-transferred data. The call will
553 * automatically adjust the allocated buffer size to account for any
554 * additional padding that may be necessary.
555 * @gfp_mask: GFP mask used for the memory allocation.
556 *
557 * This is a helper routine that may be used to allocate a MAD. Users are
558 * not required to allocate outbound MADs using this call. The returned
559 * MAD send buffer will reference a data buffer usable for sending a MAD, along
560 * with an initialized work request structure. Users may modify the returned
561 * MAD data buffer or work request before posting the send.
562 *
563 * The returned data buffer will be cleared. Users are responsible for
564 * initializing the common MAD and any class specific headers. If @rmpp_active
565 * is set, the RMPP header will be initialized for sending.
566 */
567struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
568 u32 remote_qpn, u16 pkey_index,
569 struct ib_ah *ah, int rmpp_active,
570 int hdr_len, int data_len,
571 unsigned int __nocast gfp_mask);
572
573/**
574 * ib_free_send_mad - Returns data buffers used to send a MAD.
575 * @send_buf: Previously allocated send data buffer.
576 */
577void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
578
579#endif /* IB_MAD_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
new file mode 100644
index 000000000000..f926020d6331
--- /dev/null
+++ b/include/rdma/ib_pack.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#ifndef IB_PACK_H
36#define IB_PACK_H
37
38#include <rdma/ib_verbs.h>
39
40enum {
41 IB_LRH_BYTES = 8,
42 IB_GRH_BYTES = 40,
43 IB_BTH_BYTES = 12,
44 IB_DETH_BYTES = 8
45};
46
47struct ib_field {
48 size_t struct_offset_bytes;
49 size_t struct_size_bytes;
50 int offset_words;
51 int offset_bits;
52 int size_bits;
53 char *field_name;
54};
55
56#define RESERVED \
57 .field_name = "reserved"
58
59/*
60 * This macro cleans up the definitions of constants for BTH opcodes.
61 * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY,
62 * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives
63 * the correct value.
64 *
65 * In short, user code should use the constants defined using the
66 * macro rather than worrying about adding together other constants.
67*/
68#define IB_OPCODE(transport, op) \
69 IB_OPCODE_ ## transport ## _ ## op = \
70 IB_OPCODE_ ## transport + IB_OPCODE_ ## op
71
72enum {
73 /* transport types -- just used to define real constants */
74 IB_OPCODE_RC = 0x00,
75 IB_OPCODE_UC = 0x20,
76 IB_OPCODE_RD = 0x40,
77 IB_OPCODE_UD = 0x60,
78
79 /* operations -- just used to define real constants */
80 IB_OPCODE_SEND_FIRST = 0x00,
81 IB_OPCODE_SEND_MIDDLE = 0x01,
82 IB_OPCODE_SEND_LAST = 0x02,
83 IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03,
84 IB_OPCODE_SEND_ONLY = 0x04,
85 IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05,
86 IB_OPCODE_RDMA_WRITE_FIRST = 0x06,
87 IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07,
88 IB_OPCODE_RDMA_WRITE_LAST = 0x08,
89 IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09,
90 IB_OPCODE_RDMA_WRITE_ONLY = 0x0a,
91 IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b,
92 IB_OPCODE_RDMA_READ_REQUEST = 0x0c,
93 IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d,
94 IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e,
95 IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f,
96 IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10,
97 IB_OPCODE_ACKNOWLEDGE = 0x11,
98 IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
99 IB_OPCODE_COMPARE_SWAP = 0x13,
100 IB_OPCODE_FETCH_ADD = 0x14,
101
102 /* real constants follow -- see comment about above IB_OPCODE()
103 macro for more details */
104
105 /* RC */
106 IB_OPCODE(RC, SEND_FIRST),
107 IB_OPCODE(RC, SEND_MIDDLE),
108 IB_OPCODE(RC, SEND_LAST),
109 IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
110 IB_OPCODE(RC, SEND_ONLY),
111 IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
112 IB_OPCODE(RC, RDMA_WRITE_FIRST),
113 IB_OPCODE(RC, RDMA_WRITE_MIDDLE),
114 IB_OPCODE(RC, RDMA_WRITE_LAST),
115 IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
116 IB_OPCODE(RC, RDMA_WRITE_ONLY),
117 IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
118 IB_OPCODE(RC, RDMA_READ_REQUEST),
119 IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
120 IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
121 IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
122 IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
123 IB_OPCODE(RC, ACKNOWLEDGE),
124 IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
125 IB_OPCODE(RC, COMPARE_SWAP),
126 IB_OPCODE(RC, FETCH_ADD),
127
128 /* UC */
129 IB_OPCODE(UC, SEND_FIRST),
130 IB_OPCODE(UC, SEND_MIDDLE),
131 IB_OPCODE(UC, SEND_LAST),
132 IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
133 IB_OPCODE(UC, SEND_ONLY),
134 IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
135 IB_OPCODE(UC, RDMA_WRITE_FIRST),
136 IB_OPCODE(UC, RDMA_WRITE_MIDDLE),
137 IB_OPCODE(UC, RDMA_WRITE_LAST),
138 IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
139 IB_OPCODE(UC, RDMA_WRITE_ONLY),
140 IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
141
142 /* RD */
143 IB_OPCODE(RD, SEND_FIRST),
144 IB_OPCODE(RD, SEND_MIDDLE),
145 IB_OPCODE(RD, SEND_LAST),
146 IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
147 IB_OPCODE(RD, SEND_ONLY),
148 IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
149 IB_OPCODE(RD, RDMA_WRITE_FIRST),
150 IB_OPCODE(RD, RDMA_WRITE_MIDDLE),
151 IB_OPCODE(RD, RDMA_WRITE_LAST),
152 IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
153 IB_OPCODE(RD, RDMA_WRITE_ONLY),
154 IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
155 IB_OPCODE(RD, RDMA_READ_REQUEST),
156 IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
157 IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
158 IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
159 IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
160 IB_OPCODE(RD, ACKNOWLEDGE),
161 IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
162 IB_OPCODE(RD, COMPARE_SWAP),
163 IB_OPCODE(RD, FETCH_ADD),
164
165 /* UD */
166 IB_OPCODE(UD, SEND_ONLY),
167 IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
168};
169
170enum {
171 IB_LNH_RAW = 0,
172 IB_LNH_IP = 1,
173 IB_LNH_IBA_LOCAL = 2,
174 IB_LNH_IBA_GLOBAL = 3
175};
176
177struct ib_unpacked_lrh {
178 u8 virtual_lane;
179 u8 link_version;
180 u8 service_level;
181 u8 link_next_header;
182 __be16 destination_lid;
183 __be16 packet_length;
184 __be16 source_lid;
185};
186
187struct ib_unpacked_grh {
188 u8 ip_version;
189 u8 traffic_class;
190 __be32 flow_label;
191 __be16 payload_length;
192 u8 next_header;
193 u8 hop_limit;
194 union ib_gid source_gid;
195 union ib_gid destination_gid;
196};
197
198struct ib_unpacked_bth {
199 u8 opcode;
200 u8 solicited_event;
201 u8 mig_req;
202 u8 pad_count;
203 u8 transport_header_version;
204 __be16 pkey;
205 __be32 destination_qpn;
206 u8 ack_req;
207 __be32 psn;
208};
209
210struct ib_unpacked_deth {
211 __be32 qkey;
212 __be32 source_qpn;
213};
214
215struct ib_ud_header {
216 struct ib_unpacked_lrh lrh;
217 int grh_present;
218 struct ib_unpacked_grh grh;
219 struct ib_unpacked_bth bth;
220 struct ib_unpacked_deth deth;
221 int immediate_present;
222 __be32 immediate_data;
223};
224
225void ib_pack(const struct ib_field *desc,
226 int desc_len,
227 void *structure,
228 void *buf);
229
230void ib_unpack(const struct ib_field *desc,
231 int desc_len,
232 void *buf,
233 void *structure);
234
235void ib_ud_header_init(int payload_bytes,
236 int grh_present,
237 struct ib_ud_header *header);
238
239int ib_ud_header_pack(struct ib_ud_header *header,
240 void *buf);
241
242int ib_ud_header_unpack(void *buf,
243 struct ib_ud_header *header);
244
245#endif /* IB_PACK_H */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
new file mode 100644
index 000000000000..c022edfc49da
--- /dev/null
+++ b/include/rdma/ib_sa.h
@@ -0,0 +1,373 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_sa.h 2811 2005-07-06 18:11:43Z halr $
34 */
35
36#ifndef IB_SA_H
37#define IB_SA_H
38
39#include <linux/compiler.h>
40
41#include <rdma/ib_verbs.h>
42#include <rdma/ib_mad.h>
43
44enum {
45 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
46
47 IB_SA_METHOD_GET_TABLE = 0x12,
48 IB_SA_METHOD_GET_TABLE_RESP = 0x92,
49 IB_SA_METHOD_DELETE = 0x15
50};
51
52enum ib_sa_selector {
53 IB_SA_GTE = 0,
54 IB_SA_LTE = 1,
55 IB_SA_EQ = 2,
56 /*
57 * The meaning of "best" depends on the attribute: for
58 * example, for MTU best will return the largest available
59 * MTU, while for packet life time, best will return the
60 * smallest available life time.
61 */
62 IB_SA_BEST = 3
63};
64
65enum ib_sa_rate {
66 IB_SA_RATE_2_5_GBPS = 2,
67 IB_SA_RATE_5_GBPS = 5,
68 IB_SA_RATE_10_GBPS = 3,
69 IB_SA_RATE_20_GBPS = 6,
70 IB_SA_RATE_30_GBPS = 4,
71 IB_SA_RATE_40_GBPS = 7,
72 IB_SA_RATE_60_GBPS = 8,
73 IB_SA_RATE_80_GBPS = 9,
74 IB_SA_RATE_120_GBPS = 10
75};
76
77static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate)
78{
79 switch (rate) {
80 case IB_SA_RATE_2_5_GBPS: return 1;
81 case IB_SA_RATE_5_GBPS: return 2;
82 case IB_SA_RATE_10_GBPS: return 4;
83 case IB_SA_RATE_20_GBPS: return 8;
84 case IB_SA_RATE_30_GBPS: return 12;
85 case IB_SA_RATE_40_GBPS: return 16;
86 case IB_SA_RATE_60_GBPS: return 24;
87 case IB_SA_RATE_80_GBPS: return 32;
88 case IB_SA_RATE_120_GBPS: return 48;
89 default: return -1;
90 }
91}
92
93/*
94 * Structures for SA records are named "struct ib_sa_xxx_rec." No
95 * attempt is made to pack structures to match the physical layout of
96 * SA records in SA MADs; all packing and unpacking is handled by the
97 * SA query code.
98 *
99 * For a record with structure ib_sa_xxx_rec, the naming convention
100 * for the component mask value for field yyy is IB_SA_XXX_REC_YYY (we
101 * never use different abbreviations or otherwise change the spelling
102 * of xxx/yyy between ib_sa_xxx_rec.yyy and IB_SA_XXX_REC_YYY).
103 *
104 * Reserved rows are indicated with comments to help maintainability.
105 */
106
107/* reserved: 0 */
108/* reserved: 1 */
109#define IB_SA_PATH_REC_DGID IB_SA_COMP_MASK( 2)
110#define IB_SA_PATH_REC_SGID IB_SA_COMP_MASK( 3)
111#define IB_SA_PATH_REC_DLID IB_SA_COMP_MASK( 4)
112#define IB_SA_PATH_REC_SLID IB_SA_COMP_MASK( 5)
113#define IB_SA_PATH_REC_RAW_TRAFFIC IB_SA_COMP_MASK( 6)
114/* reserved: 7 */
115#define IB_SA_PATH_REC_FLOW_LABEL IB_SA_COMP_MASK( 8)
116#define IB_SA_PATH_REC_HOP_LIMIT IB_SA_COMP_MASK( 9)
117#define IB_SA_PATH_REC_TRAFFIC_CLASS IB_SA_COMP_MASK(10)
118#define IB_SA_PATH_REC_REVERSIBLE IB_SA_COMP_MASK(11)
119#define IB_SA_PATH_REC_NUMB_PATH IB_SA_COMP_MASK(12)
120#define IB_SA_PATH_REC_PKEY IB_SA_COMP_MASK(13)
121/* reserved: 14 */
122#define IB_SA_PATH_REC_SL IB_SA_COMP_MASK(15)
123#define IB_SA_PATH_REC_MTU_SELECTOR IB_SA_COMP_MASK(16)
124#define IB_SA_PATH_REC_MTU IB_SA_COMP_MASK(17)
125#define IB_SA_PATH_REC_RATE_SELECTOR IB_SA_COMP_MASK(18)
126#define IB_SA_PATH_REC_RATE IB_SA_COMP_MASK(19)
127#define IB_SA_PATH_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(20)
128#define IB_SA_PATH_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(21)
129#define IB_SA_PATH_REC_PREFERENCE IB_SA_COMP_MASK(22)
130
131struct ib_sa_path_rec {
132 /* reserved */
133 /* reserved */
134 union ib_gid dgid;
135 union ib_gid sgid;
136 __be16 dlid;
137 __be16 slid;
138 int raw_traffic;
139 /* reserved */
140 __be32 flow_label;
141 u8 hop_limit;
142 u8 traffic_class;
143 int reversible;
144 u8 numb_path;
145 __be16 pkey;
146 /* reserved */
147 u8 sl;
148 u8 mtu_selector;
149 u8 mtu;
150 u8 rate_selector;
151 u8 rate;
152 u8 packet_life_time_selector;
153 u8 packet_life_time;
154 u8 preference;
155};
156
157#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
158#define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1)
159#define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2)
160#define IB_SA_MCMEMBER_REC_MLID IB_SA_COMP_MASK( 3)
161#define IB_SA_MCMEMBER_REC_MTU_SELECTOR IB_SA_COMP_MASK( 4)
162#define IB_SA_MCMEMBER_REC_MTU IB_SA_COMP_MASK( 5)
163#define IB_SA_MCMEMBER_REC_TRAFFIC_CLASS IB_SA_COMP_MASK( 6)
164#define IB_SA_MCMEMBER_REC_PKEY IB_SA_COMP_MASK( 7)
165#define IB_SA_MCMEMBER_REC_RATE_SELECTOR IB_SA_COMP_MASK( 8)
166#define IB_SA_MCMEMBER_REC_RATE IB_SA_COMP_MASK( 9)
167#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(10)
168#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(11)
169#define IB_SA_MCMEMBER_REC_SL IB_SA_COMP_MASK(12)
170#define IB_SA_MCMEMBER_REC_FLOW_LABEL IB_SA_COMP_MASK(13)
171#define IB_SA_MCMEMBER_REC_HOP_LIMIT IB_SA_COMP_MASK(14)
172#define IB_SA_MCMEMBER_REC_SCOPE IB_SA_COMP_MASK(15)
173#define IB_SA_MCMEMBER_REC_JOIN_STATE IB_SA_COMP_MASK(16)
174#define IB_SA_MCMEMBER_REC_PROXY_JOIN IB_SA_COMP_MASK(17)
175
176struct ib_sa_mcmember_rec {
177 union ib_gid mgid;
178 union ib_gid port_gid;
179 __be32 qkey;
180 __be16 mlid;
181 u8 mtu_selector;
182 u8 mtu;
183 u8 traffic_class;
184 __be16 pkey;
185 u8 rate_selector;
186 u8 rate;
187 u8 packet_life_time_selector;
188 u8 packet_life_time;
189 u8 sl;
190 __be32 flow_label;
191 u8 hop_limit;
192 u8 scope;
193 u8 join_state;
194 int proxy_join;
195};
196
197/* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */
198#define IB_SA_SERVICE_REC_SERVICE_ID IB_SA_COMP_MASK( 0)
199#define IB_SA_SERVICE_REC_SERVICE_GID IB_SA_COMP_MASK( 1)
200#define IB_SA_SERVICE_REC_SERVICE_PKEY IB_SA_COMP_MASK( 2)
201/* reserved: 3 */
202#define IB_SA_SERVICE_REC_SERVICE_LEASE IB_SA_COMP_MASK( 4)
203#define IB_SA_SERVICE_REC_SERVICE_KEY IB_SA_COMP_MASK( 5)
204#define IB_SA_SERVICE_REC_SERVICE_NAME IB_SA_COMP_MASK( 6)
205#define IB_SA_SERVICE_REC_SERVICE_DATA8_0 IB_SA_COMP_MASK( 7)
206#define IB_SA_SERVICE_REC_SERVICE_DATA8_1 IB_SA_COMP_MASK( 8)
207#define IB_SA_SERVICE_REC_SERVICE_DATA8_2 IB_SA_COMP_MASK( 9)
208#define IB_SA_SERVICE_REC_SERVICE_DATA8_3 IB_SA_COMP_MASK(10)
209#define IB_SA_SERVICE_REC_SERVICE_DATA8_4 IB_SA_COMP_MASK(11)
210#define IB_SA_SERVICE_REC_SERVICE_DATA8_5 IB_SA_COMP_MASK(12)
211#define IB_SA_SERVICE_REC_SERVICE_DATA8_6 IB_SA_COMP_MASK(13)
212#define IB_SA_SERVICE_REC_SERVICE_DATA8_7 IB_SA_COMP_MASK(14)
213#define IB_SA_SERVICE_REC_SERVICE_DATA8_8 IB_SA_COMP_MASK(15)
214#define IB_SA_SERVICE_REC_SERVICE_DATA8_9 IB_SA_COMP_MASK(16)
215#define IB_SA_SERVICE_REC_SERVICE_DATA8_10 IB_SA_COMP_MASK(17)
216#define IB_SA_SERVICE_REC_SERVICE_DATA8_11 IB_SA_COMP_MASK(18)
217#define IB_SA_SERVICE_REC_SERVICE_DATA8_12 IB_SA_COMP_MASK(19)
218#define IB_SA_SERVICE_REC_SERVICE_DATA8_13 IB_SA_COMP_MASK(20)
219#define IB_SA_SERVICE_REC_SERVICE_DATA8_14 IB_SA_COMP_MASK(21)
220#define IB_SA_SERVICE_REC_SERVICE_DATA8_15 IB_SA_COMP_MASK(22)
221#define IB_SA_SERVICE_REC_SERVICE_DATA16_0 IB_SA_COMP_MASK(23)
222#define IB_SA_SERVICE_REC_SERVICE_DATA16_1 IB_SA_COMP_MASK(24)
223#define IB_SA_SERVICE_REC_SERVICE_DATA16_2 IB_SA_COMP_MASK(25)
224#define IB_SA_SERVICE_REC_SERVICE_DATA16_3 IB_SA_COMP_MASK(26)
225#define IB_SA_SERVICE_REC_SERVICE_DATA16_4 IB_SA_COMP_MASK(27)
226#define IB_SA_SERVICE_REC_SERVICE_DATA16_5 IB_SA_COMP_MASK(28)
227#define IB_SA_SERVICE_REC_SERVICE_DATA16_6 IB_SA_COMP_MASK(29)
228#define IB_SA_SERVICE_REC_SERVICE_DATA16_7 IB_SA_COMP_MASK(30)
229#define IB_SA_SERVICE_REC_SERVICE_DATA32_0 IB_SA_COMP_MASK(31)
230#define IB_SA_SERVICE_REC_SERVICE_DATA32_1 IB_SA_COMP_MASK(32)
231#define IB_SA_SERVICE_REC_SERVICE_DATA32_2 IB_SA_COMP_MASK(33)
232#define IB_SA_SERVICE_REC_SERVICE_DATA32_3 IB_SA_COMP_MASK(34)
233#define IB_SA_SERVICE_REC_SERVICE_DATA64_0 IB_SA_COMP_MASK(35)
234#define IB_SA_SERVICE_REC_SERVICE_DATA64_1 IB_SA_COMP_MASK(36)
235
236#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF
237
238struct ib_sa_service_rec {
239 u64 id;
240 union ib_gid gid;
241 __be16 pkey;
242 /* reserved */
243 u32 lease;
244 u8 key[16];
245 u8 name[64];
246 u8 data8[16];
247 u16 data16[8];
248 u32 data32[4];
249 u64 data64[2];
250};
251
252struct ib_sa_query;
253
254void ib_sa_cancel_query(int id, struct ib_sa_query *query);
255
256int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
257 struct ib_sa_path_rec *rec,
258 ib_sa_comp_mask comp_mask,
259 int timeout_ms, unsigned int __nocast gfp_mask,
260 void (*callback)(int status,
261 struct ib_sa_path_rec *resp,
262 void *context),
263 void *context,
264 struct ib_sa_query **query);
265
266int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
267 u8 method,
268 struct ib_sa_mcmember_rec *rec,
269 ib_sa_comp_mask comp_mask,
270 int timeout_ms, unsigned int __nocast gfp_mask,
271 void (*callback)(int status,
272 struct ib_sa_mcmember_rec *resp,
273 void *context),
274 void *context,
275 struct ib_sa_query **query);
276
277int ib_sa_service_rec_query(struct ib_device *device, u8 port_num,
278 u8 method,
279 struct ib_sa_service_rec *rec,
280 ib_sa_comp_mask comp_mask,
281 int timeout_ms, unsigned int __nocast gfp_mask,
282 void (*callback)(int status,
283 struct ib_sa_service_rec *resp,
284 void *context),
285 void *context,
286 struct ib_sa_query **sa_query);
287
288/**
289 * ib_sa_mcmember_rec_set - Start an MCMember set query
290 * @device:device to send query on
291 * @port_num: port number to send query on
292 * @rec:MCMember Record to send in query
293 * @comp_mask:component mask to send in query
294 * @timeout_ms:time to wait for response
295 * @gfp_mask:GFP mask to use for internal allocations
296 * @callback:function called when query completes, times out or is
297 * canceled
298 * @context:opaque user context passed to callback
299 * @sa_query:query context, used to cancel query
300 *
301 * Send an MCMember Set query to the SA (eg to join a multicast
302 * group). The callback function will be called when the query
303 * completes (or fails); status is 0 for a successful response, -EINTR
304 * if the query is canceled, -ETIMEDOUT is the query timed out, or
305 * -EIO if an error occurred sending the query. The resp parameter of
306 * the callback is only valid if status is 0.
307 *
308 * If the return value of ib_sa_mcmember_rec_set() is negative, it is
309 * an error code. Otherwise it is a query ID that can be used to
310 * cancel the query.
311 */
312static inline int
313ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num,
314 struct ib_sa_mcmember_rec *rec,
315 ib_sa_comp_mask comp_mask,
316 int timeout_ms, unsigned int __nocast gfp_mask,
317 void (*callback)(int status,
318 struct ib_sa_mcmember_rec *resp,
319 void *context),
320 void *context,
321 struct ib_sa_query **query)
322{
323 return ib_sa_mcmember_rec_query(device, port_num,
324 IB_MGMT_METHOD_SET,
325 rec, comp_mask,
326 timeout_ms, gfp_mask, callback,
327 context, query);
328}
329
330/**
331 * ib_sa_mcmember_rec_delete - Start an MCMember delete query
332 * @device:device to send query on
333 * @port_num: port number to send query on
334 * @rec:MCMember Record to send in query
335 * @comp_mask:component mask to send in query
336 * @timeout_ms:time to wait for response
337 * @gfp_mask:GFP mask to use for internal allocations
338 * @callback:function called when query completes, times out or is
339 * canceled
340 * @context:opaque user context passed to callback
341 * @sa_query:query context, used to cancel query
342 *
343 * Send an MCMember Delete query to the SA (eg to leave a multicast
344 * group). The callback function will be called when the query
345 * completes (or fails); status is 0 for a successful response, -EINTR
346 * if the query is canceled, -ETIMEDOUT is the query timed out, or
347 * -EIO if an error occurred sending the query. The resp parameter of
348 * the callback is only valid if status is 0.
349 *
350 * If the return value of ib_sa_mcmember_rec_delete() is negative, it
351 * is an error code. Otherwise it is a query ID that can be used to
352 * cancel the query.
353 */
354static inline int
355ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num,
356 struct ib_sa_mcmember_rec *rec,
357 ib_sa_comp_mask comp_mask,
358 int timeout_ms, unsigned int __nocast gfp_mask,
359 void (*callback)(int status,
360 struct ib_sa_mcmember_rec *resp,
361 void *context),
362 void *context,
363 struct ib_sa_query **query)
364{
365 return ib_sa_mcmember_rec_query(device, port_num,
366 IB_SA_METHOD_DELETE,
367 rec, comp_mask,
368 timeout_ms, gfp_mask, callback,
369 context, query);
370}
371
372
373#endif /* IB_SA_H */
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
new file mode 100644
index 000000000000..87f60737f695
--- /dev/null
+++ b/include/rdma/ib_smi.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_smi.h 1389 2004-12-27 22:56:47Z roland $
37 */
38
39#if !defined( IB_SMI_H )
40#define IB_SMI_H
41
42#include <rdma/ib_mad.h>
43
44#define IB_SMP_DATA_SIZE 64
45#define IB_SMP_MAX_PATH_HOPS 64
46
47struct ib_smp {
48 u8 base_version;
49 u8 mgmt_class;
50 u8 class_version;
51 u8 method;
52 __be16 status;
53 u8 hop_ptr;
54 u8 hop_cnt;
55 __be64 tid;
56 __be16 attr_id;
57 __be16 resv;
58 __be32 attr_mod;
59 __be64 mkey;
60 __be16 dr_slid;
61 __be16 dr_dlid;
62 u8 reserved[28];
63 u8 data[IB_SMP_DATA_SIZE];
64 u8 initial_path[IB_SMP_MAX_PATH_HOPS];
65 u8 return_path[IB_SMP_MAX_PATH_HOPS];
66} __attribute__ ((packed));
67
68#define IB_SMP_DIRECTION __constant_htons(0x8000)
69
70/* Subnet management attributes */
71#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002)
72#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010)
73#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011)
74#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012)
75#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014)
76#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015)
77#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016)
78#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017)
79#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018)
80#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019)
81#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A)
82#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B)
83#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020)
84#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030)
85#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031)
86#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00)
87
88static inline u8
89ib_get_smp_direction(struct ib_smp *smp)
90{
91 return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
92}
93
94#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h
new file mode 100644
index 000000000000..72182d16778b
--- /dev/null
+++ b/include/rdma/ib_user_cm.h
@@ -0,0 +1,328 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_user_cm.h 2576 2005-06-09 17:00:30Z libor $
33 */
34
35#ifndef IB_USER_CM_H
36#define IB_USER_CM_H
37
38#include <linux/types.h>
39
40#define IB_USER_CM_ABI_VERSION 1
41
42enum {
43 IB_USER_CM_CMD_CREATE_ID,
44 IB_USER_CM_CMD_DESTROY_ID,
45 IB_USER_CM_CMD_ATTR_ID,
46
47 IB_USER_CM_CMD_LISTEN,
48 IB_USER_CM_CMD_ESTABLISH,
49
50 IB_USER_CM_CMD_SEND_REQ,
51 IB_USER_CM_CMD_SEND_REP,
52 IB_USER_CM_CMD_SEND_RTU,
53 IB_USER_CM_CMD_SEND_DREQ,
54 IB_USER_CM_CMD_SEND_DREP,
55 IB_USER_CM_CMD_SEND_REJ,
56 IB_USER_CM_CMD_SEND_MRA,
57 IB_USER_CM_CMD_SEND_LAP,
58 IB_USER_CM_CMD_SEND_APR,
59 IB_USER_CM_CMD_SEND_SIDR_REQ,
60 IB_USER_CM_CMD_SEND_SIDR_REP,
61
62 IB_USER_CM_CMD_EVENT,
63};
64/*
65 * command ABI structures.
66 */
67struct ib_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct ib_ucm_create_id {
74 __u64 response;
75};
76
77struct ib_ucm_create_id_resp {
78 __u32 id;
79};
80
81struct ib_ucm_destroy_id {
82 __u32 id;
83};
84
85struct ib_ucm_attr_id {
86 __u64 response;
87 __u32 id;
88};
89
90struct ib_ucm_attr_id_resp {
91 __be64 service_id;
92 __be64 service_mask;
93 __be32 local_id;
94 __be32 remote_id;
95};
96
97struct ib_ucm_listen {
98 __be64 service_id;
99 __be64 service_mask;
100 __u32 id;
101};
102
103struct ib_ucm_establish {
104 __u32 id;
105};
106
107struct ib_ucm_private_data {
108 __u64 data;
109 __u32 id;
110 __u8 len;
111 __u8 reserved[3];
112};
113
114struct ib_ucm_path_rec {
115 __u8 dgid[16];
116 __u8 sgid[16];
117 __be16 dlid;
118 __be16 slid;
119 __u32 raw_traffic;
120 __be32 flow_label;
121 __u32 reversible;
122 __u32 mtu;
123 __be16 pkey;
124 __u8 hop_limit;
125 __u8 traffic_class;
126 __u8 numb_path;
127 __u8 sl;
128 __u8 mtu_selector;
129 __u8 rate_selector;
130 __u8 rate;
131 __u8 packet_life_time_selector;
132 __u8 packet_life_time;
133 __u8 preference;
134};
135
136struct ib_ucm_req {
137 __u32 id;
138 __u32 qpn;
139 __u32 qp_type;
140 __u32 psn;
141 __be64 sid;
142 __u64 data;
143 __u64 primary_path;
144 __u64 alternate_path;
145 __u8 len;
146 __u8 peer_to_peer;
147 __u8 responder_resources;
148 __u8 initiator_depth;
149 __u8 remote_cm_response_timeout;
150 __u8 flow_control;
151 __u8 local_cm_response_timeout;
152 __u8 retry_count;
153 __u8 rnr_retry_count;
154 __u8 max_cm_retries;
155 __u8 srq;
156 __u8 reserved[1];
157};
158
159struct ib_ucm_rep {
160 __u64 data;
161 __u32 id;
162 __u32 qpn;
163 __u32 psn;
164 __u8 len;
165 __u8 responder_resources;
166 __u8 initiator_depth;
167 __u8 target_ack_delay;
168 __u8 failover_accepted;
169 __u8 flow_control;
170 __u8 rnr_retry_count;
171 __u8 srq;
172};
173
174struct ib_ucm_info {
175 __u32 id;
176 __u32 status;
177 __u64 info;
178 __u64 data;
179 __u8 info_len;
180 __u8 data_len;
181 __u8 reserved[2];
182};
183
184struct ib_ucm_mra {
185 __u64 data;
186 __u32 id;
187 __u8 len;
188 __u8 timeout;
189 __u8 reserved[2];
190};
191
192struct ib_ucm_lap {
193 __u64 path;
194 __u64 data;
195 __u32 id;
196 __u8 len;
197 __u8 reserved[3];
198};
199
200struct ib_ucm_sidr_req {
201 __u32 id;
202 __u32 timeout;
203 __be64 sid;
204 __u64 data;
205 __u64 path;
206 __u16 pkey;
207 __u8 len;
208 __u8 max_cm_retries;
209};
210
211struct ib_ucm_sidr_rep {
212 __u32 id;
213 __u32 qpn;
214 __u32 qkey;
215 __u32 status;
216 __u64 info;
217 __u64 data;
218 __u8 info_len;
219 __u8 data_len;
220 __u8 reserved[2];
221};
222/*
223 * event notification ABI structures.
224 */
225struct ib_ucm_event_get {
226 __u64 response;
227 __u64 data;
228 __u64 info;
229 __u8 data_len;
230 __u8 info_len;
231 __u8 reserved[2];
232};
233
234struct ib_ucm_req_event_resp {
235 __u32 listen_id;
236 /* device */
237 /* port */
238 struct ib_ucm_path_rec primary_path;
239 struct ib_ucm_path_rec alternate_path;
240 __be64 remote_ca_guid;
241 __u32 remote_qkey;
242 __u32 remote_qpn;
243 __u32 qp_type;
244 __u32 starting_psn;
245 __u8 responder_resources;
246 __u8 initiator_depth;
247 __u8 local_cm_response_timeout;
248 __u8 flow_control;
249 __u8 remote_cm_response_timeout;
250 __u8 retry_count;
251 __u8 rnr_retry_count;
252 __u8 srq;
253};
254
255struct ib_ucm_rep_event_resp {
256 __be64 remote_ca_guid;
257 __u32 remote_qkey;
258 __u32 remote_qpn;
259 __u32 starting_psn;
260 __u8 responder_resources;
261 __u8 initiator_depth;
262 __u8 target_ack_delay;
263 __u8 failover_accepted;
264 __u8 flow_control;
265 __u8 rnr_retry_count;
266 __u8 srq;
267 __u8 reserved[1];
268};
269
270struct ib_ucm_rej_event_resp {
271 __u32 reason;
272 /* ari in ib_ucm_event_get info field. */
273};
274
275struct ib_ucm_mra_event_resp {
276 __u8 timeout;
277 __u8 reserved[3];
278};
279
280struct ib_ucm_lap_event_resp {
281 struct ib_ucm_path_rec path;
282};
283
284struct ib_ucm_apr_event_resp {
285 __u32 status;
286 /* apr info in ib_ucm_event_get info field. */
287};
288
289struct ib_ucm_sidr_req_event_resp {
290 __u32 listen_id;
291 /* device */
292 /* port */
293 __u16 pkey;
294 __u8 reserved[2];
295};
296
297struct ib_ucm_sidr_rep_event_resp {
298 __u32 status;
299 __u32 qkey;
300 __u32 qpn;
301 /* info in ib_ucm_event_get info field. */
302};
303
304#define IB_UCM_PRES_DATA 0x01
305#define IB_UCM_PRES_INFO 0x02
306#define IB_UCM_PRES_PRIMARY 0x04
307#define IB_UCM_PRES_ALTERNATE 0x08
308
309struct ib_ucm_event_resp {
310 __u32 id;
311 __u32 event;
312 __u32 present;
313 union {
314 struct ib_ucm_req_event_resp req_resp;
315 struct ib_ucm_rep_event_resp rep_resp;
316 struct ib_ucm_rej_event_resp rej_resp;
317 struct ib_ucm_mra_event_resp mra_resp;
318 struct ib_ucm_lap_event_resp lap_resp;
319 struct ib_ucm_apr_event_resp apr_resp;
320
321 struct ib_ucm_sidr_req_event_resp sidr_req_resp;
322 struct ib_ucm_sidr_rep_event_resp sidr_rep_resp;
323
324 __u32 send_status;
325 } u;
326};
327
328#endif /* IB_USER_CM_H */
diff --git a/include/rdma/ib_user_mad.h b/include/rdma/ib_user_mad.h
new file mode 100644
index 000000000000..44537aa32e62
--- /dev/null
+++ b/include/rdma/ib_user_mad.h
@@ -0,0 +1,137 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_user_mad.h 2814 2005-07-06 19:14:09Z halr $
34 */
35
36#ifndef IB_USER_MAD_H
37#define IB_USER_MAD_H
38
39#include <linux/types.h>
40#include <linux/ioctl.h>
41
42/*
43 * Increment this value if any changes that break userspace ABI
44 * compatibility are made.
45 */
46#define IB_USER_MAD_ABI_VERSION 5
47
48/*
49 * Make sure that all structs defined in this file remain laid out so
50 * that they pack the same way on 32-bit and 64-bit architectures (to
51 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
52 */
53
54/**
55 * ib_user_mad_hdr - MAD packet header
56 * @id - ID of agent MAD received with/to be sent with
57 * @status - 0 on successful receive, ETIMEDOUT if no response
58 * received (transaction ID in data[] will be set to TID of original
59 * request) (ignored on send)
60 * @timeout_ms - Milliseconds to wait for response (unset on receive)
61 * @retries - Number of automatic retries to attempt
62 * @qpn - Remote QP number received from/to be sent to
63 * @qkey - Remote Q_Key to be sent with (unset on receive)
64 * @lid - Remote lid received from/to be sent to
65 * @sl - Service level received with/to be sent with
66 * @path_bits - Local path bits received with/to be sent with
67 * @grh_present - If set, GRH was received/should be sent
68 * @gid_index - Local GID index to send with (unset on receive)
69 * @hop_limit - Hop limit in GRH
70 * @traffic_class - Traffic class in GRH
71 * @gid - Remote GID in GRH
72 * @flow_label - Flow label in GRH
73 */
74struct ib_user_mad_hdr {
75 __u32 id;
76 __u32 status;
77 __u32 timeout_ms;
78 __u32 retries;
79 __u32 length;
80 __be32 qpn;
81 __be32 qkey;
82 __be16 lid;
83 __u8 sl;
84 __u8 path_bits;
85 __u8 grh_present;
86 __u8 gid_index;
87 __u8 hop_limit;
88 __u8 traffic_class;
89 __u8 gid[16];
90 __be32 flow_label;
91};
92
93/**
94 * ib_user_mad - MAD packet
95 * @hdr - MAD packet header
96 * @data - Contents of MAD
97 *
98 */
99struct ib_user_mad {
100 struct ib_user_mad_hdr hdr;
101 __u8 data[0];
102};
103
104/**
105 * ib_user_mad_reg_req - MAD registration request
106 * @id - Set by the kernel; used to identify agent in future requests.
107 * @qpn - Queue pair number; must be 0 or 1.
108 * @method_mask - The caller will receive unsolicited MADs for any method
109 * where @method_mask = 1.
110 * @mgmt_class - Indicates which management class of MADs should be receive
111 * by the caller. This field is only required if the user wishes to
112 * receive unsolicited MADs, otherwise it should be 0.
113 * @mgmt_class_version - Indicates which version of MADs for the given
114 * management class to receive.
115 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
116 * in the range from 0x30 to 0x4f. Otherwise not used.
117 * @rmpp_version: If set, indicates the RMPP version used.
118 *
119 */
120struct ib_user_mad_reg_req {
121 __u32 id;
122 __u32 method_mask[4];
123 __u8 qpn;
124 __u8 mgmt_class;
125 __u8 mgmt_class_version;
126 __u8 oui[3];
127 __u8 rmpp_version;
128};
129
130#define IB_IOCTL_MAGIC 0x1b
131
132#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \
133 struct ib_user_mad_reg_req)
134
135#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32)
136
137#endif /* IB_USER_MAD_H */
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
new file mode 100644
index 000000000000..7ebb01c8f996
--- /dev/null
+++ b/include/rdma/ib_user_verbs.h
@@ -0,0 +1,422 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_user_verbs.h 2708 2005-06-24 17:27:21Z roland $
34 */
35
36#ifndef IB_USER_VERBS_H
37#define IB_USER_VERBS_H
38
39#include <linux/types.h>
40
41/*
42 * Increment this value if any changes that break userspace ABI
43 * compatibility are made.
44 */
45#define IB_USER_VERBS_ABI_VERSION 1
46
47enum {
48 IB_USER_VERBS_CMD_QUERY_PARAMS,
49 IB_USER_VERBS_CMD_GET_CONTEXT,
50 IB_USER_VERBS_CMD_QUERY_DEVICE,
51 IB_USER_VERBS_CMD_QUERY_PORT,
52 IB_USER_VERBS_CMD_QUERY_GID,
53 IB_USER_VERBS_CMD_QUERY_PKEY,
54 IB_USER_VERBS_CMD_ALLOC_PD,
55 IB_USER_VERBS_CMD_DEALLOC_PD,
56 IB_USER_VERBS_CMD_CREATE_AH,
57 IB_USER_VERBS_CMD_MODIFY_AH,
58 IB_USER_VERBS_CMD_QUERY_AH,
59 IB_USER_VERBS_CMD_DESTROY_AH,
60 IB_USER_VERBS_CMD_REG_MR,
61 IB_USER_VERBS_CMD_REG_SMR,
62 IB_USER_VERBS_CMD_REREG_MR,
63 IB_USER_VERBS_CMD_QUERY_MR,
64 IB_USER_VERBS_CMD_DEREG_MR,
65 IB_USER_VERBS_CMD_ALLOC_MW,
66 IB_USER_VERBS_CMD_BIND_MW,
67 IB_USER_VERBS_CMD_DEALLOC_MW,
68 IB_USER_VERBS_CMD_CREATE_CQ,
69 IB_USER_VERBS_CMD_RESIZE_CQ,
70 IB_USER_VERBS_CMD_DESTROY_CQ,
71 IB_USER_VERBS_CMD_POLL_CQ,
72 IB_USER_VERBS_CMD_PEEK_CQ,
73 IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
74 IB_USER_VERBS_CMD_CREATE_QP,
75 IB_USER_VERBS_CMD_QUERY_QP,
76 IB_USER_VERBS_CMD_MODIFY_QP,
77 IB_USER_VERBS_CMD_DESTROY_QP,
78 IB_USER_VERBS_CMD_POST_SEND,
79 IB_USER_VERBS_CMD_POST_RECV,
80 IB_USER_VERBS_CMD_ATTACH_MCAST,
81 IB_USER_VERBS_CMD_DETACH_MCAST,
82 IB_USER_VERBS_CMD_CREATE_SRQ,
83 IB_USER_VERBS_CMD_MODIFY_SRQ,
84 IB_USER_VERBS_CMD_QUERY_SRQ,
85 IB_USER_VERBS_CMD_DESTROY_SRQ,
86 IB_USER_VERBS_CMD_POST_SRQ_RECV
87};
88
89/*
90 * Make sure that all structs defined in this file remain laid out so
91 * that they pack the same way on 32-bit and 64-bit architectures (to
92 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
93 * In particular do not use pointer types -- pass pointers in __u64
94 * instead.
95 */
96
97struct ib_uverbs_async_event_desc {
98 __u64 element;
99 __u32 event_type; /* enum ib_event_type */
100 __u32 reserved;
101};
102
103struct ib_uverbs_comp_event_desc {
104 __u64 cq_handle;
105};
106
107/*
108 * All commands from userspace should start with a __u32 command field
109 * followed by __u16 in_words and out_words fields (which give the
110 * length of the command block and response buffer if any in 32-bit
111 * words). The kernel driver will read these fields first and read
112 * the rest of the command struct based on these value.
113 */
114
115struct ib_uverbs_cmd_hdr {
116 __u32 command;
117 __u16 in_words;
118 __u16 out_words;
119};
120
121/*
122 * No driver_data for "query params" command, since this is intended
123 * to be a core function with no possible device dependence.
124 */
125struct ib_uverbs_query_params {
126 __u64 response;
127};
128
129struct ib_uverbs_query_params_resp {
130 __u32 num_cq_events;
131};
132
133struct ib_uverbs_get_context {
134 __u64 response;
135 __u64 cq_fd_tab;
136 __u64 driver_data[0];
137};
138
139struct ib_uverbs_get_context_resp {
140 __u32 async_fd;
141 __u32 reserved;
142};
143
144struct ib_uverbs_query_device {
145 __u64 response;
146 __u64 driver_data[0];
147};
148
149struct ib_uverbs_query_device_resp {
150 __u64 fw_ver;
151 __be64 node_guid;
152 __be64 sys_image_guid;
153 __u64 max_mr_size;
154 __u64 page_size_cap;
155 __u32 vendor_id;
156 __u32 vendor_part_id;
157 __u32 hw_ver;
158 __u32 max_qp;
159 __u32 max_qp_wr;
160 __u32 device_cap_flags;
161 __u32 max_sge;
162 __u32 max_sge_rd;
163 __u32 max_cq;
164 __u32 max_cqe;
165 __u32 max_mr;
166 __u32 max_pd;
167 __u32 max_qp_rd_atom;
168 __u32 max_ee_rd_atom;
169 __u32 max_res_rd_atom;
170 __u32 max_qp_init_rd_atom;
171 __u32 max_ee_init_rd_atom;
172 __u32 atomic_cap;
173 __u32 max_ee;
174 __u32 max_rdd;
175 __u32 max_mw;
176 __u32 max_raw_ipv6_qp;
177 __u32 max_raw_ethy_qp;
178 __u32 max_mcast_grp;
179 __u32 max_mcast_qp_attach;
180 __u32 max_total_mcast_qp_attach;
181 __u32 max_ah;
182 __u32 max_fmr;
183 __u32 max_map_per_fmr;
184 __u32 max_srq;
185 __u32 max_srq_wr;
186 __u32 max_srq_sge;
187 __u16 max_pkeys;
188 __u8 local_ca_ack_delay;
189 __u8 phys_port_cnt;
190 __u8 reserved[4];
191};
192
193struct ib_uverbs_query_port {
194 __u64 response;
195 __u8 port_num;
196 __u8 reserved[7];
197 __u64 driver_data[0];
198};
199
200struct ib_uverbs_query_port_resp {
201 __u32 port_cap_flags;
202 __u32 max_msg_sz;
203 __u32 bad_pkey_cntr;
204 __u32 qkey_viol_cntr;
205 __u32 gid_tbl_len;
206 __u16 pkey_tbl_len;
207 __u16 lid;
208 __u16 sm_lid;
209 __u8 state;
210 __u8 max_mtu;
211 __u8 active_mtu;
212 __u8 lmc;
213 __u8 max_vl_num;
214 __u8 sm_sl;
215 __u8 subnet_timeout;
216 __u8 init_type_reply;
217 __u8 active_width;
218 __u8 active_speed;
219 __u8 phys_state;
220 __u8 reserved[3];
221};
222
223struct ib_uverbs_query_gid {
224 __u64 response;
225 __u8 port_num;
226 __u8 index;
227 __u8 reserved[6];
228 __u64 driver_data[0];
229};
230
231struct ib_uverbs_query_gid_resp {
232 __u8 gid[16];
233};
234
235struct ib_uverbs_query_pkey {
236 __u64 response;
237 __u8 port_num;
238 __u8 index;
239 __u8 reserved[6];
240 __u64 driver_data[0];
241};
242
243struct ib_uverbs_query_pkey_resp {
244 __u16 pkey;
245 __u16 reserved;
246};
247
248struct ib_uverbs_alloc_pd {
249 __u64 response;
250 __u64 driver_data[0];
251};
252
253struct ib_uverbs_alloc_pd_resp {
254 __u32 pd_handle;
255};
256
257struct ib_uverbs_dealloc_pd {
258 __u32 pd_handle;
259};
260
261struct ib_uverbs_reg_mr {
262 __u64 response;
263 __u64 start;
264 __u64 length;
265 __u64 hca_va;
266 __u32 pd_handle;
267 __u32 access_flags;
268 __u64 driver_data[0];
269};
270
271struct ib_uverbs_reg_mr_resp {
272 __u32 mr_handle;
273 __u32 lkey;
274 __u32 rkey;
275};
276
277struct ib_uverbs_dereg_mr {
278 __u32 mr_handle;
279};
280
281struct ib_uverbs_create_cq {
282 __u64 response;
283 __u64 user_handle;
284 __u32 cqe;
285 __u32 event_handler;
286 __u64 driver_data[0];
287};
288
289struct ib_uverbs_create_cq_resp {
290 __u32 cq_handle;
291 __u32 cqe;
292};
293
294struct ib_uverbs_destroy_cq {
295 __u32 cq_handle;
296};
297
298struct ib_uverbs_create_qp {
299 __u64 response;
300 __u64 user_handle;
301 __u32 pd_handle;
302 __u32 send_cq_handle;
303 __u32 recv_cq_handle;
304 __u32 srq_handle;
305 __u32 max_send_wr;
306 __u32 max_recv_wr;
307 __u32 max_send_sge;
308 __u32 max_recv_sge;
309 __u32 max_inline_data;
310 __u8 sq_sig_all;
311 __u8 qp_type;
312 __u8 is_srq;
313 __u8 reserved;
314 __u64 driver_data[0];
315};
316
317struct ib_uverbs_create_qp_resp {
318 __u32 qp_handle;
319 __u32 qpn;
320};
321
322/*
323 * This struct needs to remain a multiple of 8 bytes to keep the
324 * alignment of the modify QP parameters.
325 */
326struct ib_uverbs_qp_dest {
327 __u8 dgid[16];
328 __u32 flow_label;
329 __u16 dlid;
330 __u16 reserved;
331 __u8 sgid_index;
332 __u8 hop_limit;
333 __u8 traffic_class;
334 __u8 sl;
335 __u8 src_path_bits;
336 __u8 static_rate;
337 __u8 is_global;
338 __u8 port_num;
339};
340
341struct ib_uverbs_modify_qp {
342 struct ib_uverbs_qp_dest dest;
343 struct ib_uverbs_qp_dest alt_dest;
344 __u32 qp_handle;
345 __u32 attr_mask;
346 __u32 qkey;
347 __u32 rq_psn;
348 __u32 sq_psn;
349 __u32 dest_qp_num;
350 __u32 qp_access_flags;
351 __u16 pkey_index;
352 __u16 alt_pkey_index;
353 __u8 qp_state;
354 __u8 cur_qp_state;
355 __u8 path_mtu;
356 __u8 path_mig_state;
357 __u8 en_sqd_async_notify;
358 __u8 max_rd_atomic;
359 __u8 max_dest_rd_atomic;
360 __u8 min_rnr_timer;
361 __u8 port_num;
362 __u8 timeout;
363 __u8 retry_cnt;
364 __u8 rnr_retry;
365 __u8 alt_port_num;
366 __u8 alt_timeout;
367 __u8 reserved[2];
368 __u64 driver_data[0];
369};
370
371struct ib_uverbs_modify_qp_resp {
372};
373
374struct ib_uverbs_destroy_qp {
375 __u32 qp_handle;
376};
377
378struct ib_uverbs_attach_mcast {
379 __u8 gid[16];
380 __u32 qp_handle;
381 __u16 mlid;
382 __u16 reserved;
383 __u64 driver_data[0];
384};
385
386struct ib_uverbs_detach_mcast {
387 __u8 gid[16];
388 __u32 qp_handle;
389 __u16 mlid;
390 __u16 reserved;
391 __u64 driver_data[0];
392};
393
394struct ib_uverbs_create_srq {
395 __u64 response;
396 __u64 user_handle;
397 __u32 pd_handle;
398 __u32 max_wr;
399 __u32 max_sge;
400 __u32 srq_limit;
401 __u64 driver_data[0];
402};
403
404struct ib_uverbs_create_srq_resp {
405 __u32 srq_handle;
406};
407
408struct ib_uverbs_modify_srq {
409 __u32 srq_handle;
410 __u32 attr_mask;
411 __u32 max_wr;
412 __u32 max_sge;
413 __u32 srq_limit;
414 __u32 reserved;
415 __u64 driver_data[0];
416};
417
418struct ib_uverbs_destroy_srq {
419 __u32 srq_handle;
420};
421
422#endif /* IB_USER_VERBS_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
new file mode 100644
index 000000000000..e16cf94870f2
--- /dev/null
+++ b/include/rdma/ib_verbs.h
@@ -0,0 +1,1461 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 *
38 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
39 */
40
41#if !defined(IB_VERBS_H)
42#define IB_VERBS_H
43
44#include <linux/types.h>
45#include <linux/device.h>
46
47#include <asm/atomic.h>
48#include <asm/scatterlist.h>
49#include <asm/uaccess.h>
50
51union ib_gid {
52 u8 raw[16];
53 struct {
54 __be64 subnet_prefix;
55 __be64 interface_id;
56 } global;
57};
58
59enum ib_node_type {
60 IB_NODE_CA = 1,
61 IB_NODE_SWITCH,
62 IB_NODE_ROUTER
63};
64
65enum ib_device_cap_flags {
66 IB_DEVICE_RESIZE_MAX_WR = 1,
67 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
68 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
69 IB_DEVICE_RAW_MULTI = (1<<3),
70 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
71 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
72 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
73 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
74 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
75 IB_DEVICE_INIT_TYPE = (1<<9),
76 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
77 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
78 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
79 IB_DEVICE_SRQ_RESIZE = (1<<13),
80 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
81};
82
83enum ib_atomic_cap {
84 IB_ATOMIC_NONE,
85 IB_ATOMIC_HCA,
86 IB_ATOMIC_GLOB
87};
88
89struct ib_device_attr {
90 u64 fw_ver;
91 __be64 node_guid;
92 __be64 sys_image_guid;
93 u64 max_mr_size;
94 u64 page_size_cap;
95 u32 vendor_id;
96 u32 vendor_part_id;
97 u32 hw_ver;
98 int max_qp;
99 int max_qp_wr;
100 int device_cap_flags;
101 int max_sge;
102 int max_sge_rd;
103 int max_cq;
104 int max_cqe;
105 int max_mr;
106 int max_pd;
107 int max_qp_rd_atom;
108 int max_ee_rd_atom;
109 int max_res_rd_atom;
110 int max_qp_init_rd_atom;
111 int max_ee_init_rd_atom;
112 enum ib_atomic_cap atomic_cap;
113 int max_ee;
114 int max_rdd;
115 int max_mw;
116 int max_raw_ipv6_qp;
117 int max_raw_ethy_qp;
118 int max_mcast_grp;
119 int max_mcast_qp_attach;
120 int max_total_mcast_qp_attach;
121 int max_ah;
122 int max_fmr;
123 int max_map_per_fmr;
124 int max_srq;
125 int max_srq_wr;
126 int max_srq_sge;
127 u16 max_pkeys;
128 u8 local_ca_ack_delay;
129};
130
131enum ib_mtu {
132 IB_MTU_256 = 1,
133 IB_MTU_512 = 2,
134 IB_MTU_1024 = 3,
135 IB_MTU_2048 = 4,
136 IB_MTU_4096 = 5
137};
138
139static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
140{
141 switch (mtu) {
142 case IB_MTU_256: return 256;
143 case IB_MTU_512: return 512;
144 case IB_MTU_1024: return 1024;
145 case IB_MTU_2048: return 2048;
146 case IB_MTU_4096: return 4096;
147 default: return -1;
148 }
149}
150
151enum ib_port_state {
152 IB_PORT_NOP = 0,
153 IB_PORT_DOWN = 1,
154 IB_PORT_INIT = 2,
155 IB_PORT_ARMED = 3,
156 IB_PORT_ACTIVE = 4,
157 IB_PORT_ACTIVE_DEFER = 5
158};
159
160enum ib_port_cap_flags {
161 IB_PORT_SM = 1 << 1,
162 IB_PORT_NOTICE_SUP = 1 << 2,
163 IB_PORT_TRAP_SUP = 1 << 3,
164 IB_PORT_OPT_IPD_SUP = 1 << 4,
165 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
166 IB_PORT_SL_MAP_SUP = 1 << 6,
167 IB_PORT_MKEY_NVRAM = 1 << 7,
168 IB_PORT_PKEY_NVRAM = 1 << 8,
169 IB_PORT_LED_INFO_SUP = 1 << 9,
170 IB_PORT_SM_DISABLED = 1 << 10,
171 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
172 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
173 IB_PORT_CM_SUP = 1 << 16,
174 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
175 IB_PORT_REINIT_SUP = 1 << 18,
176 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
177 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
178 IB_PORT_DR_NOTICE_SUP = 1 << 21,
179 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
180 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
181 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
182 IB_PORT_CLIENT_REG_SUP = 1 << 25
183};
184
185enum ib_port_width {
186 IB_WIDTH_1X = 1,
187 IB_WIDTH_4X = 2,
188 IB_WIDTH_8X = 4,
189 IB_WIDTH_12X = 8
190};
191
192static inline int ib_width_enum_to_int(enum ib_port_width width)
193{
194 switch (width) {
195 case IB_WIDTH_1X: return 1;
196 case IB_WIDTH_4X: return 4;
197 case IB_WIDTH_8X: return 8;
198 case IB_WIDTH_12X: return 12;
199 default: return -1;
200 }
201}
202
203struct ib_port_attr {
204 enum ib_port_state state;
205 enum ib_mtu max_mtu;
206 enum ib_mtu active_mtu;
207 int gid_tbl_len;
208 u32 port_cap_flags;
209 u32 max_msg_sz;
210 u32 bad_pkey_cntr;
211 u32 qkey_viol_cntr;
212 u16 pkey_tbl_len;
213 u16 lid;
214 u16 sm_lid;
215 u8 lmc;
216 u8 max_vl_num;
217 u8 sm_sl;
218 u8 subnet_timeout;
219 u8 init_type_reply;
220 u8 active_width;
221 u8 active_speed;
222 u8 phys_state;
223};
224
225enum ib_device_modify_flags {
226 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1
227};
228
229struct ib_device_modify {
230 u64 sys_image_guid;
231};
232
233enum ib_port_modify_flags {
234 IB_PORT_SHUTDOWN = 1,
235 IB_PORT_INIT_TYPE = (1<<2),
236 IB_PORT_RESET_QKEY_CNTR = (1<<3)
237};
238
239struct ib_port_modify {
240 u32 set_port_cap_mask;
241 u32 clr_port_cap_mask;
242 u8 init_type;
243};
244
245enum ib_event_type {
246 IB_EVENT_CQ_ERR,
247 IB_EVENT_QP_FATAL,
248 IB_EVENT_QP_REQ_ERR,
249 IB_EVENT_QP_ACCESS_ERR,
250 IB_EVENT_COMM_EST,
251 IB_EVENT_SQ_DRAINED,
252 IB_EVENT_PATH_MIG,
253 IB_EVENT_PATH_MIG_ERR,
254 IB_EVENT_DEVICE_FATAL,
255 IB_EVENT_PORT_ACTIVE,
256 IB_EVENT_PORT_ERR,
257 IB_EVENT_LID_CHANGE,
258 IB_EVENT_PKEY_CHANGE,
259 IB_EVENT_SM_CHANGE,
260 IB_EVENT_SRQ_ERR,
261 IB_EVENT_SRQ_LIMIT_REACHED,
262 IB_EVENT_QP_LAST_WQE_REACHED
263};
264
265struct ib_event {
266 struct ib_device *device;
267 union {
268 struct ib_cq *cq;
269 struct ib_qp *qp;
270 struct ib_srq *srq;
271 u8 port_num;
272 } element;
273 enum ib_event_type event;
274};
275
276struct ib_event_handler {
277 struct ib_device *device;
278 void (*handler)(struct ib_event_handler *, struct ib_event *);
279 struct list_head list;
280};
281
282#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
283 do { \
284 (_ptr)->device = _device; \
285 (_ptr)->handler = _handler; \
286 INIT_LIST_HEAD(&(_ptr)->list); \
287 } while (0)
288
289struct ib_global_route {
290 union ib_gid dgid;
291 u32 flow_label;
292 u8 sgid_index;
293 u8 hop_limit;
294 u8 traffic_class;
295};
296
297struct ib_grh {
298 __be32 version_tclass_flow;
299 __be16 paylen;
300 u8 next_hdr;
301 u8 hop_limit;
302 union ib_gid sgid;
303 union ib_gid dgid;
304};
305
306enum {
307 IB_MULTICAST_QPN = 0xffffff
308};
309
310#define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
311
312enum ib_ah_flags {
313 IB_AH_GRH = 1
314};
315
316struct ib_ah_attr {
317 struct ib_global_route grh;
318 u16 dlid;
319 u8 sl;
320 u8 src_path_bits;
321 u8 static_rate;
322 u8 ah_flags;
323 u8 port_num;
324};
325
326enum ib_wc_status {
327 IB_WC_SUCCESS,
328 IB_WC_LOC_LEN_ERR,
329 IB_WC_LOC_QP_OP_ERR,
330 IB_WC_LOC_EEC_OP_ERR,
331 IB_WC_LOC_PROT_ERR,
332 IB_WC_WR_FLUSH_ERR,
333 IB_WC_MW_BIND_ERR,
334 IB_WC_BAD_RESP_ERR,
335 IB_WC_LOC_ACCESS_ERR,
336 IB_WC_REM_INV_REQ_ERR,
337 IB_WC_REM_ACCESS_ERR,
338 IB_WC_REM_OP_ERR,
339 IB_WC_RETRY_EXC_ERR,
340 IB_WC_RNR_RETRY_EXC_ERR,
341 IB_WC_LOC_RDD_VIOL_ERR,
342 IB_WC_REM_INV_RD_REQ_ERR,
343 IB_WC_REM_ABORT_ERR,
344 IB_WC_INV_EECN_ERR,
345 IB_WC_INV_EEC_STATE_ERR,
346 IB_WC_FATAL_ERR,
347 IB_WC_RESP_TIMEOUT_ERR,
348 IB_WC_GENERAL_ERR
349};
350
351enum ib_wc_opcode {
352 IB_WC_SEND,
353 IB_WC_RDMA_WRITE,
354 IB_WC_RDMA_READ,
355 IB_WC_COMP_SWAP,
356 IB_WC_FETCH_ADD,
357 IB_WC_BIND_MW,
358/*
359 * Set value of IB_WC_RECV so consumers can test if a completion is a
360 * receive by testing (opcode & IB_WC_RECV).
361 */
362 IB_WC_RECV = 1 << 7,
363 IB_WC_RECV_RDMA_WITH_IMM
364};
365
366enum ib_wc_flags {
367 IB_WC_GRH = 1,
368 IB_WC_WITH_IMM = (1<<1)
369};
370
371struct ib_wc {
372 u64 wr_id;
373 enum ib_wc_status status;
374 enum ib_wc_opcode opcode;
375 u32 vendor_err;
376 u32 byte_len;
377 __be32 imm_data;
378 u32 qp_num;
379 u32 src_qp;
380 int wc_flags;
381 u16 pkey_index;
382 u16 slid;
383 u8 sl;
384 u8 dlid_path_bits;
385 u8 port_num; /* valid only for DR SMPs on switches */
386};
387
388enum ib_cq_notify {
389 IB_CQ_SOLICITED,
390 IB_CQ_NEXT_COMP
391};
392
393enum ib_srq_attr_mask {
394 IB_SRQ_MAX_WR = 1 << 0,
395 IB_SRQ_LIMIT = 1 << 1,
396};
397
398struct ib_srq_attr {
399 u32 max_wr;
400 u32 max_sge;
401 u32 srq_limit;
402};
403
404struct ib_srq_init_attr {
405 void (*event_handler)(struct ib_event *, void *);
406 void *srq_context;
407 struct ib_srq_attr attr;
408};
409
410struct ib_qp_cap {
411 u32 max_send_wr;
412 u32 max_recv_wr;
413 u32 max_send_sge;
414 u32 max_recv_sge;
415 u32 max_inline_data;
416};
417
418enum ib_sig_type {
419 IB_SIGNAL_ALL_WR,
420 IB_SIGNAL_REQ_WR
421};
422
423enum ib_qp_type {
424 /*
425 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
426 * here (and in that order) since the MAD layer uses them as
427 * indices into a 2-entry table.
428 */
429 IB_QPT_SMI,
430 IB_QPT_GSI,
431
432 IB_QPT_RC,
433 IB_QPT_UC,
434 IB_QPT_UD,
435 IB_QPT_RAW_IPV6,
436 IB_QPT_RAW_ETY
437};
438
439struct ib_qp_init_attr {
440 void (*event_handler)(struct ib_event *, void *);
441 void *qp_context;
442 struct ib_cq *send_cq;
443 struct ib_cq *recv_cq;
444 struct ib_srq *srq;
445 struct ib_qp_cap cap;
446 enum ib_sig_type sq_sig_type;
447 enum ib_qp_type qp_type;
448 u8 port_num; /* special QP types only */
449};
450
451enum ib_rnr_timeout {
452 IB_RNR_TIMER_655_36 = 0,
453 IB_RNR_TIMER_000_01 = 1,
454 IB_RNR_TIMER_000_02 = 2,
455 IB_RNR_TIMER_000_03 = 3,
456 IB_RNR_TIMER_000_04 = 4,
457 IB_RNR_TIMER_000_06 = 5,
458 IB_RNR_TIMER_000_08 = 6,
459 IB_RNR_TIMER_000_12 = 7,
460 IB_RNR_TIMER_000_16 = 8,
461 IB_RNR_TIMER_000_24 = 9,
462 IB_RNR_TIMER_000_32 = 10,
463 IB_RNR_TIMER_000_48 = 11,
464 IB_RNR_TIMER_000_64 = 12,
465 IB_RNR_TIMER_000_96 = 13,
466 IB_RNR_TIMER_001_28 = 14,
467 IB_RNR_TIMER_001_92 = 15,
468 IB_RNR_TIMER_002_56 = 16,
469 IB_RNR_TIMER_003_84 = 17,
470 IB_RNR_TIMER_005_12 = 18,
471 IB_RNR_TIMER_007_68 = 19,
472 IB_RNR_TIMER_010_24 = 20,
473 IB_RNR_TIMER_015_36 = 21,
474 IB_RNR_TIMER_020_48 = 22,
475 IB_RNR_TIMER_030_72 = 23,
476 IB_RNR_TIMER_040_96 = 24,
477 IB_RNR_TIMER_061_44 = 25,
478 IB_RNR_TIMER_081_92 = 26,
479 IB_RNR_TIMER_122_88 = 27,
480 IB_RNR_TIMER_163_84 = 28,
481 IB_RNR_TIMER_245_76 = 29,
482 IB_RNR_TIMER_327_68 = 30,
483 IB_RNR_TIMER_491_52 = 31
484};
485
486enum ib_qp_attr_mask {
487 IB_QP_STATE = 1,
488 IB_QP_CUR_STATE = (1<<1),
489 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
490 IB_QP_ACCESS_FLAGS = (1<<3),
491 IB_QP_PKEY_INDEX = (1<<4),
492 IB_QP_PORT = (1<<5),
493 IB_QP_QKEY = (1<<6),
494 IB_QP_AV = (1<<7),
495 IB_QP_PATH_MTU = (1<<8),
496 IB_QP_TIMEOUT = (1<<9),
497 IB_QP_RETRY_CNT = (1<<10),
498 IB_QP_RNR_RETRY = (1<<11),
499 IB_QP_RQ_PSN = (1<<12),
500 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
501 IB_QP_ALT_PATH = (1<<14),
502 IB_QP_MIN_RNR_TIMER = (1<<15),
503 IB_QP_SQ_PSN = (1<<16),
504 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
505 IB_QP_PATH_MIG_STATE = (1<<18),
506 IB_QP_CAP = (1<<19),
507 IB_QP_DEST_QPN = (1<<20)
508};
509
510enum ib_qp_state {
511 IB_QPS_RESET,
512 IB_QPS_INIT,
513 IB_QPS_RTR,
514 IB_QPS_RTS,
515 IB_QPS_SQD,
516 IB_QPS_SQE,
517 IB_QPS_ERR
518};
519
520enum ib_mig_state {
521 IB_MIG_MIGRATED,
522 IB_MIG_REARM,
523 IB_MIG_ARMED
524};
525
526struct ib_qp_attr {
527 enum ib_qp_state qp_state;
528 enum ib_qp_state cur_qp_state;
529 enum ib_mtu path_mtu;
530 enum ib_mig_state path_mig_state;
531 u32 qkey;
532 u32 rq_psn;
533 u32 sq_psn;
534 u32 dest_qp_num;
535 int qp_access_flags;
536 struct ib_qp_cap cap;
537 struct ib_ah_attr ah_attr;
538 struct ib_ah_attr alt_ah_attr;
539 u16 pkey_index;
540 u16 alt_pkey_index;
541 u8 en_sqd_async_notify;
542 u8 sq_draining;
543 u8 max_rd_atomic;
544 u8 max_dest_rd_atomic;
545 u8 min_rnr_timer;
546 u8 port_num;
547 u8 timeout;
548 u8 retry_cnt;
549 u8 rnr_retry;
550 u8 alt_port_num;
551 u8 alt_timeout;
552};
553
554enum ib_wr_opcode {
555 IB_WR_RDMA_WRITE,
556 IB_WR_RDMA_WRITE_WITH_IMM,
557 IB_WR_SEND,
558 IB_WR_SEND_WITH_IMM,
559 IB_WR_RDMA_READ,
560 IB_WR_ATOMIC_CMP_AND_SWP,
561 IB_WR_ATOMIC_FETCH_AND_ADD
562};
563
564enum ib_send_flags {
565 IB_SEND_FENCE = 1,
566 IB_SEND_SIGNALED = (1<<1),
567 IB_SEND_SOLICITED = (1<<2),
568 IB_SEND_INLINE = (1<<3)
569};
570
571struct ib_sge {
572 u64 addr;
573 u32 length;
574 u32 lkey;
575};
576
577struct ib_send_wr {
578 struct ib_send_wr *next;
579 u64 wr_id;
580 struct ib_sge *sg_list;
581 int num_sge;
582 enum ib_wr_opcode opcode;
583 int send_flags;
584 __be32 imm_data;
585 union {
586 struct {
587 u64 remote_addr;
588 u32 rkey;
589 } rdma;
590 struct {
591 u64 remote_addr;
592 u64 compare_add;
593 u64 swap;
594 u32 rkey;
595 } atomic;
596 struct {
597 struct ib_ah *ah;
598 struct ib_mad_hdr *mad_hdr;
599 u32 remote_qpn;
600 u32 remote_qkey;
601 int timeout_ms; /* valid for MADs only */
602 int retries; /* valid for MADs only */
603 u16 pkey_index; /* valid for GSI only */
604 u8 port_num; /* valid for DR SMPs on switch only */
605 } ud;
606 } wr;
607};
608
609struct ib_recv_wr {
610 struct ib_recv_wr *next;
611 u64 wr_id;
612 struct ib_sge *sg_list;
613 int num_sge;
614};
615
616enum ib_access_flags {
617 IB_ACCESS_LOCAL_WRITE = 1,
618 IB_ACCESS_REMOTE_WRITE = (1<<1),
619 IB_ACCESS_REMOTE_READ = (1<<2),
620 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
621 IB_ACCESS_MW_BIND = (1<<4)
622};
623
624struct ib_phys_buf {
625 u64 addr;
626 u64 size;
627};
628
629struct ib_mr_attr {
630 struct ib_pd *pd;
631 u64 device_virt_addr;
632 u64 size;
633 int mr_access_flags;
634 u32 lkey;
635 u32 rkey;
636};
637
638enum ib_mr_rereg_flags {
639 IB_MR_REREG_TRANS = 1,
640 IB_MR_REREG_PD = (1<<1),
641 IB_MR_REREG_ACCESS = (1<<2)
642};
643
644struct ib_mw_bind {
645 struct ib_mr *mr;
646 u64 wr_id;
647 u64 addr;
648 u32 length;
649 int send_flags;
650 int mw_access_flags;
651};
652
653struct ib_fmr_attr {
654 int max_pages;
655 int max_maps;
656 u8 page_size;
657};
658
659struct ib_ucontext {
660 struct ib_device *device;
661 struct list_head pd_list;
662 struct list_head mr_list;
663 struct list_head mw_list;
664 struct list_head cq_list;
665 struct list_head qp_list;
666 struct list_head srq_list;
667 struct list_head ah_list;
668 spinlock_t lock;
669};
670
671struct ib_uobject {
672 u64 user_handle; /* handle given to us by userspace */
673 struct ib_ucontext *context; /* associated user context */
674 struct list_head list; /* link to context's list */
675 u32 id; /* index into kernel idr */
676};
677
678struct ib_umem {
679 unsigned long user_base;
680 unsigned long virt_base;
681 size_t length;
682 int offset;
683 int page_size;
684 int writable;
685 struct list_head chunk_list;
686};
687
688struct ib_umem_chunk {
689 struct list_head list;
690 int nents;
691 int nmap;
692 struct scatterlist page_list[0];
693};
694
695struct ib_udata {
696 void __user *inbuf;
697 void __user *outbuf;
698 size_t inlen;
699 size_t outlen;
700};
701
702#define IB_UMEM_MAX_PAGE_CHUNK \
703 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
704 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
705 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
706
707struct ib_umem_object {
708 struct ib_uobject uobject;
709 struct ib_umem umem;
710};
711
712struct ib_pd {
713 struct ib_device *device;
714 struct ib_uobject *uobject;
715 atomic_t usecnt; /* count all resources */
716};
717
718struct ib_ah {
719 struct ib_device *device;
720 struct ib_pd *pd;
721 struct ib_uobject *uobject;
722};
723
724typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
725
726struct ib_cq {
727 struct ib_device *device;
728 struct ib_uobject *uobject;
729 ib_comp_handler comp_handler;
730 void (*event_handler)(struct ib_event *, void *);
731 void * cq_context;
732 int cqe;
733 atomic_t usecnt; /* count number of work queues */
734};
735
736struct ib_srq {
737 struct ib_device *device;
738 struct ib_pd *pd;
739 struct ib_uobject *uobject;
740 void (*event_handler)(struct ib_event *, void *);
741 void *srq_context;
742 atomic_t usecnt;
743};
744
745struct ib_qp {
746 struct ib_device *device;
747 struct ib_pd *pd;
748 struct ib_cq *send_cq;
749 struct ib_cq *recv_cq;
750 struct ib_srq *srq;
751 struct ib_uobject *uobject;
752 void (*event_handler)(struct ib_event *, void *);
753 void *qp_context;
754 u32 qp_num;
755 enum ib_qp_type qp_type;
756};
757
758struct ib_mr {
759 struct ib_device *device;
760 struct ib_pd *pd;
761 struct ib_uobject *uobject;
762 u32 lkey;
763 u32 rkey;
764 atomic_t usecnt; /* count number of MWs */
765};
766
767struct ib_mw {
768 struct ib_device *device;
769 struct ib_pd *pd;
770 struct ib_uobject *uobject;
771 u32 rkey;
772};
773
774struct ib_fmr {
775 struct ib_device *device;
776 struct ib_pd *pd;
777 struct list_head list;
778 u32 lkey;
779 u32 rkey;
780};
781
782struct ib_mad;
783struct ib_grh;
784
785enum ib_process_mad_flags {
786 IB_MAD_IGNORE_MKEY = 1,
787 IB_MAD_IGNORE_BKEY = 2,
788 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
789};
790
791enum ib_mad_result {
792 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
793 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
794 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
795 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
796};
797
798#define IB_DEVICE_NAME_MAX 64
799
800struct ib_cache {
801 rwlock_t lock;
802 struct ib_event_handler event_handler;
803 struct ib_pkey_cache **pkey_cache;
804 struct ib_gid_cache **gid_cache;
805};
806
807struct ib_device {
808 struct device *dma_device;
809
810 char name[IB_DEVICE_NAME_MAX];
811
812 struct list_head event_handler_list;
813 spinlock_t event_handler_lock;
814
815 struct list_head core_list;
816 struct list_head client_data_list;
817 spinlock_t client_data_lock;
818
819 struct ib_cache cache;
820
821 u32 flags;
822
823 int (*query_device)(struct ib_device *device,
824 struct ib_device_attr *device_attr);
825 int (*query_port)(struct ib_device *device,
826 u8 port_num,
827 struct ib_port_attr *port_attr);
828 int (*query_gid)(struct ib_device *device,
829 u8 port_num, int index,
830 union ib_gid *gid);
831 int (*query_pkey)(struct ib_device *device,
832 u8 port_num, u16 index, u16 *pkey);
833 int (*modify_device)(struct ib_device *device,
834 int device_modify_mask,
835 struct ib_device_modify *device_modify);
836 int (*modify_port)(struct ib_device *device,
837 u8 port_num, int port_modify_mask,
838 struct ib_port_modify *port_modify);
839 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
840 struct ib_udata *udata);
841 int (*dealloc_ucontext)(struct ib_ucontext *context);
842 int (*mmap)(struct ib_ucontext *context,
843 struct vm_area_struct *vma);
844 struct ib_pd * (*alloc_pd)(struct ib_device *device,
845 struct ib_ucontext *context,
846 struct ib_udata *udata);
847 int (*dealloc_pd)(struct ib_pd *pd);
848 struct ib_ah * (*create_ah)(struct ib_pd *pd,
849 struct ib_ah_attr *ah_attr);
850 int (*modify_ah)(struct ib_ah *ah,
851 struct ib_ah_attr *ah_attr);
852 int (*query_ah)(struct ib_ah *ah,
853 struct ib_ah_attr *ah_attr);
854 int (*destroy_ah)(struct ib_ah *ah);
855 struct ib_srq * (*create_srq)(struct ib_pd *pd,
856 struct ib_srq_init_attr *srq_init_attr,
857 struct ib_udata *udata);
858 int (*modify_srq)(struct ib_srq *srq,
859 struct ib_srq_attr *srq_attr,
860 enum ib_srq_attr_mask srq_attr_mask);
861 int (*query_srq)(struct ib_srq *srq,
862 struct ib_srq_attr *srq_attr);
863 int (*destroy_srq)(struct ib_srq *srq);
864 int (*post_srq_recv)(struct ib_srq *srq,
865 struct ib_recv_wr *recv_wr,
866 struct ib_recv_wr **bad_recv_wr);
867 struct ib_qp * (*create_qp)(struct ib_pd *pd,
868 struct ib_qp_init_attr *qp_init_attr,
869 struct ib_udata *udata);
870 int (*modify_qp)(struct ib_qp *qp,
871 struct ib_qp_attr *qp_attr,
872 int qp_attr_mask);
873 int (*query_qp)(struct ib_qp *qp,
874 struct ib_qp_attr *qp_attr,
875 int qp_attr_mask,
876 struct ib_qp_init_attr *qp_init_attr);
877 int (*destroy_qp)(struct ib_qp *qp);
878 int (*post_send)(struct ib_qp *qp,
879 struct ib_send_wr *send_wr,
880 struct ib_send_wr **bad_send_wr);
881 int (*post_recv)(struct ib_qp *qp,
882 struct ib_recv_wr *recv_wr,
883 struct ib_recv_wr **bad_recv_wr);
884 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
885 struct ib_ucontext *context,
886 struct ib_udata *udata);
887 int (*destroy_cq)(struct ib_cq *cq);
888 int (*resize_cq)(struct ib_cq *cq, int *cqe);
889 int (*poll_cq)(struct ib_cq *cq, int num_entries,
890 struct ib_wc *wc);
891 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
892 int (*req_notify_cq)(struct ib_cq *cq,
893 enum ib_cq_notify cq_notify);
894 int (*req_ncomp_notif)(struct ib_cq *cq,
895 int wc_cnt);
896 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
897 int mr_access_flags);
898 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
899 struct ib_phys_buf *phys_buf_array,
900 int num_phys_buf,
901 int mr_access_flags,
902 u64 *iova_start);
903 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
904 struct ib_umem *region,
905 int mr_access_flags,
906 struct ib_udata *udata);
907 int (*query_mr)(struct ib_mr *mr,
908 struct ib_mr_attr *mr_attr);
909 int (*dereg_mr)(struct ib_mr *mr);
910 int (*rereg_phys_mr)(struct ib_mr *mr,
911 int mr_rereg_mask,
912 struct ib_pd *pd,
913 struct ib_phys_buf *phys_buf_array,
914 int num_phys_buf,
915 int mr_access_flags,
916 u64 *iova_start);
917 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
918 int (*bind_mw)(struct ib_qp *qp,
919 struct ib_mw *mw,
920 struct ib_mw_bind *mw_bind);
921 int (*dealloc_mw)(struct ib_mw *mw);
922 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
923 int mr_access_flags,
924 struct ib_fmr_attr *fmr_attr);
925 int (*map_phys_fmr)(struct ib_fmr *fmr,
926 u64 *page_list, int list_len,
927 u64 iova);
928 int (*unmap_fmr)(struct list_head *fmr_list);
929 int (*dealloc_fmr)(struct ib_fmr *fmr);
930 int (*attach_mcast)(struct ib_qp *qp,
931 union ib_gid *gid,
932 u16 lid);
933 int (*detach_mcast)(struct ib_qp *qp,
934 union ib_gid *gid,
935 u16 lid);
936 int (*process_mad)(struct ib_device *device,
937 int process_mad_flags,
938 u8 port_num,
939 struct ib_wc *in_wc,
940 struct ib_grh *in_grh,
941 struct ib_mad *in_mad,
942 struct ib_mad *out_mad);
943
944 struct module *owner;
945 struct class_device class_dev;
946 struct kobject ports_parent;
947 struct list_head port_list;
948
949 enum {
950 IB_DEV_UNINITIALIZED,
951 IB_DEV_REGISTERED,
952 IB_DEV_UNREGISTERED
953 } reg_state;
954
955 u8 node_type;
956 u8 phys_port_cnt;
957};
958
959struct ib_client {
960 char *name;
961 void (*add) (struct ib_device *);
962 void (*remove)(struct ib_device *);
963
964 struct list_head list;
965};
966
967struct ib_device *ib_alloc_device(size_t size);
968void ib_dealloc_device(struct ib_device *device);
969
970int ib_register_device (struct ib_device *device);
971void ib_unregister_device(struct ib_device *device);
972
973int ib_register_client (struct ib_client *client);
974void ib_unregister_client(struct ib_client *client);
975
976void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
977void ib_set_client_data(struct ib_device *device, struct ib_client *client,
978 void *data);
979
980static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
981{
982 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
983}
984
985static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
986{
987 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
988}
989
990int ib_register_event_handler (struct ib_event_handler *event_handler);
991int ib_unregister_event_handler(struct ib_event_handler *event_handler);
992void ib_dispatch_event(struct ib_event *event);
993
994int ib_query_device(struct ib_device *device,
995 struct ib_device_attr *device_attr);
996
997int ib_query_port(struct ib_device *device,
998 u8 port_num, struct ib_port_attr *port_attr);
999
1000int ib_query_gid(struct ib_device *device,
1001 u8 port_num, int index, union ib_gid *gid);
1002
1003int ib_query_pkey(struct ib_device *device,
1004 u8 port_num, u16 index, u16 *pkey);
1005
1006int ib_modify_device(struct ib_device *device,
1007 int device_modify_mask,
1008 struct ib_device_modify *device_modify);
1009
1010int ib_modify_port(struct ib_device *device,
1011 u8 port_num, int port_modify_mask,
1012 struct ib_port_modify *port_modify);
1013
1014/**
1015 * ib_alloc_pd - Allocates an unused protection domain.
1016 * @device: The device on which to allocate the protection domain.
1017 *
1018 * A protection domain object provides an association between QPs, shared
1019 * receive queues, address handles, memory regions, and memory windows.
1020 */
1021struct ib_pd *ib_alloc_pd(struct ib_device *device);
1022
1023/**
1024 * ib_dealloc_pd - Deallocates a protection domain.
1025 * @pd: The protection domain to deallocate.
1026 */
1027int ib_dealloc_pd(struct ib_pd *pd);
1028
1029/**
1030 * ib_create_ah - Creates an address handle for the given address vector.
1031 * @pd: The protection domain associated with the address handle.
1032 * @ah_attr: The attributes of the address vector.
1033 *
1034 * The address handle is used to reference a local or global destination
1035 * in all UD QP post sends.
1036 */
1037struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1038
1039/**
1040 * ib_create_ah_from_wc - Creates an address handle associated with the
1041 * sender of the specified work completion.
1042 * @pd: The protection domain associated with the address handle.
1043 * @wc: Work completion information associated with a received message.
1044 * @grh: References the received global route header. This parameter is
1045 * ignored unless the work completion indicates that the GRH is valid.
1046 * @port_num: The outbound port number to associate with the address.
1047 *
1048 * The address handle is used to reference a local or global destination
1049 * in all UD QP post sends.
1050 */
1051struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1052 struct ib_grh *grh, u8 port_num);
1053
1054/**
1055 * ib_modify_ah - Modifies the address vector associated with an address
1056 * handle.
1057 * @ah: The address handle to modify.
1058 * @ah_attr: The new address vector attributes to associate with the
1059 * address handle.
1060 */
1061int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1062
1063/**
1064 * ib_query_ah - Queries the address vector associated with an address
1065 * handle.
1066 * @ah: The address handle to query.
1067 * @ah_attr: The address vector attributes associated with the address
1068 * handle.
1069 */
1070int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1071
1072/**
1073 * ib_destroy_ah - Destroys an address handle.
1074 * @ah: The address handle to destroy.
1075 */
1076int ib_destroy_ah(struct ib_ah *ah);
1077
1078/**
1079 * ib_create_srq - Creates a SRQ associated with the specified protection
1080 * domain.
1081 * @pd: The protection domain associated with the SRQ.
1082 * @srq_init_attr: A list of initial attributes required to create the SRQ.
1083 *
1084 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1085 * requested size of the SRQ, and set to the actual values allocated
1086 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1087 * will always be at least as large as the requested values.
1088 */
1089struct ib_srq *ib_create_srq(struct ib_pd *pd,
1090 struct ib_srq_init_attr *srq_init_attr);
1091
1092/**
1093 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1094 * @srq: The SRQ to modify.
1095 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1096 * the current values of selected SRQ attributes are returned.
1097 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1098 * are being modified.
1099 *
1100 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1101 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1102 * the number of receives queued drops below the limit.
1103 */
1104int ib_modify_srq(struct ib_srq *srq,
1105 struct ib_srq_attr *srq_attr,
1106 enum ib_srq_attr_mask srq_attr_mask);
1107
1108/**
1109 * ib_query_srq - Returns the attribute list and current values for the
1110 * specified SRQ.
1111 * @srq: The SRQ to query.
1112 * @srq_attr: The attributes of the specified SRQ.
1113 */
1114int ib_query_srq(struct ib_srq *srq,
1115 struct ib_srq_attr *srq_attr);
1116
1117/**
1118 * ib_destroy_srq - Destroys the specified SRQ.
1119 * @srq: The SRQ to destroy.
1120 */
1121int ib_destroy_srq(struct ib_srq *srq);
1122
1123/**
1124 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1125 * @srq: The SRQ to post the work request on.
1126 * @recv_wr: A list of work requests to post on the receive queue.
1127 * @bad_recv_wr: On an immediate failure, this parameter will reference
1128 * the work request that failed to be posted on the QP.
1129 */
1130static inline int ib_post_srq_recv(struct ib_srq *srq,
1131 struct ib_recv_wr *recv_wr,
1132 struct ib_recv_wr **bad_recv_wr)
1133{
1134 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1135}
1136
1137/**
1138 * ib_create_qp - Creates a QP associated with the specified protection
1139 * domain.
1140 * @pd: The protection domain associated with the QP.
1141 * @qp_init_attr: A list of initial attributes required to create the QP.
1142 */
1143struct ib_qp *ib_create_qp(struct ib_pd *pd,
1144 struct ib_qp_init_attr *qp_init_attr);
1145
1146/**
1147 * ib_modify_qp - Modifies the attributes for the specified QP and then
1148 * transitions the QP to the given state.
1149 * @qp: The QP to modify.
1150 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1151 * the current values of selected QP attributes are returned.
1152 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1153 * are being modified.
1154 */
1155int ib_modify_qp(struct ib_qp *qp,
1156 struct ib_qp_attr *qp_attr,
1157 int qp_attr_mask);
1158
1159/**
1160 * ib_query_qp - Returns the attribute list and current values for the
1161 * specified QP.
1162 * @qp: The QP to query.
1163 * @qp_attr: The attributes of the specified QP.
1164 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1165 * @qp_init_attr: Additional attributes of the selected QP.
1166 *
1167 * The qp_attr_mask may be used to limit the query to gathering only the
1168 * selected attributes.
1169 */
1170int ib_query_qp(struct ib_qp *qp,
1171 struct ib_qp_attr *qp_attr,
1172 int qp_attr_mask,
1173 struct ib_qp_init_attr *qp_init_attr);
1174
1175/**
1176 * ib_destroy_qp - Destroys the specified QP.
1177 * @qp: The QP to destroy.
1178 */
1179int ib_destroy_qp(struct ib_qp *qp);
1180
1181/**
1182 * ib_post_send - Posts a list of work requests to the send queue of
1183 * the specified QP.
1184 * @qp: The QP to post the work request on.
1185 * @send_wr: A list of work requests to post on the send queue.
1186 * @bad_send_wr: On an immediate failure, this parameter will reference
1187 * the work request that failed to be posted on the QP.
1188 */
1189static inline int ib_post_send(struct ib_qp *qp,
1190 struct ib_send_wr *send_wr,
1191 struct ib_send_wr **bad_send_wr)
1192{
1193 return qp->device->post_send(qp, send_wr, bad_send_wr);
1194}
1195
1196/**
1197 * ib_post_recv - Posts a list of work requests to the receive queue of
1198 * the specified QP.
1199 * @qp: The QP to post the work request on.
1200 * @recv_wr: A list of work requests to post on the receive queue.
1201 * @bad_recv_wr: On an immediate failure, this parameter will reference
1202 * the work request that failed to be posted on the QP.
1203 */
1204static inline int ib_post_recv(struct ib_qp *qp,
1205 struct ib_recv_wr *recv_wr,
1206 struct ib_recv_wr **bad_recv_wr)
1207{
1208 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1209}
1210
1211/**
1212 * ib_create_cq - Creates a CQ on the specified device.
1213 * @device: The device on which to create the CQ.
1214 * @comp_handler: A user-specified callback that is invoked when a
1215 * completion event occurs on the CQ.
1216 * @event_handler: A user-specified callback that is invoked when an
1217 * asynchronous event not associated with a completion occurs on the CQ.
1218 * @cq_context: Context associated with the CQ returned to the user via
1219 * the associated completion and event handlers.
1220 * @cqe: The minimum size of the CQ.
1221 *
1222 * Users can examine the cq structure to determine the actual CQ size.
1223 */
1224struct ib_cq *ib_create_cq(struct ib_device *device,
1225 ib_comp_handler comp_handler,
1226 void (*event_handler)(struct ib_event *, void *),
1227 void *cq_context, int cqe);
1228
1229/**
1230 * ib_resize_cq - Modifies the capacity of the CQ.
1231 * @cq: The CQ to resize.
1232 * @cqe: The minimum size of the CQ.
1233 *
1234 * Users can examine the cq structure to determine the actual CQ size.
1235 */
1236int ib_resize_cq(struct ib_cq *cq, int cqe);
1237
1238/**
1239 * ib_destroy_cq - Destroys the specified CQ.
1240 * @cq: The CQ to destroy.
1241 */
1242int ib_destroy_cq(struct ib_cq *cq);
1243
1244/**
1245 * ib_poll_cq - poll a CQ for completion(s)
1246 * @cq:the CQ being polled
1247 * @num_entries:maximum number of completions to return
1248 * @wc:array of at least @num_entries &struct ib_wc where completions
1249 * will be returned
1250 *
1251 * Poll a CQ for (possibly multiple) completions. If the return value
1252 * is < 0, an error occurred. If the return value is >= 0, it is the
1253 * number of completions returned. If the return value is
1254 * non-negative and < num_entries, then the CQ was emptied.
1255 */
1256static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1257 struct ib_wc *wc)
1258{
1259 return cq->device->poll_cq(cq, num_entries, wc);
1260}
1261
1262/**
1263 * ib_peek_cq - Returns the number of unreaped completions currently
1264 * on the specified CQ.
1265 * @cq: The CQ to peek.
1266 * @wc_cnt: A minimum number of unreaped completions to check for.
1267 *
1268 * If the number of unreaped completions is greater than or equal to wc_cnt,
1269 * this function returns wc_cnt, otherwise, it returns the actual number of
1270 * unreaped completions.
1271 */
1272int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1273
1274/**
1275 * ib_req_notify_cq - Request completion notification on a CQ.
1276 * @cq: The CQ to generate an event for.
1277 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will
1278 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP,
1279 * notification will occur on the next completion.
1280 */
1281static inline int ib_req_notify_cq(struct ib_cq *cq,
1282 enum ib_cq_notify cq_notify)
1283{
1284 return cq->device->req_notify_cq(cq, cq_notify);
1285}
1286
1287/**
1288 * ib_req_ncomp_notif - Request completion notification when there are
1289 * at least the specified number of unreaped completions on the CQ.
1290 * @cq: The CQ to generate an event for.
1291 * @wc_cnt: The number of unreaped completions that should be on the
1292 * CQ before an event is generated.
1293 */
1294static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1295{
1296 return cq->device->req_ncomp_notif ?
1297 cq->device->req_ncomp_notif(cq, wc_cnt) :
1298 -ENOSYS;
1299}
1300
1301/**
1302 * ib_get_dma_mr - Returns a memory region for system memory that is
1303 * usable for DMA.
1304 * @pd: The protection domain associated with the memory region.
1305 * @mr_access_flags: Specifies the memory access rights.
1306 */
1307struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1308
1309/**
1310 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1311 * by an HCA.
1312 * @pd: The protection domain associated assigned to the registered region.
1313 * @phys_buf_array: Specifies a list of physical buffers to use in the
1314 * memory region.
1315 * @num_phys_buf: Specifies the size of the phys_buf_array.
1316 * @mr_access_flags: Specifies the memory access rights.
1317 * @iova_start: The offset of the region's starting I/O virtual address.
1318 */
1319struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1320 struct ib_phys_buf *phys_buf_array,
1321 int num_phys_buf,
1322 int mr_access_flags,
1323 u64 *iova_start);
1324
1325/**
1326 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1327 * Conceptually, this call performs the functions deregister memory region
1328 * followed by register physical memory region. Where possible,
1329 * resources are reused instead of deallocated and reallocated.
1330 * @mr: The memory region to modify.
1331 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1332 * properties of the memory region are being modified.
1333 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1334 * the new protection domain to associated with the memory region,
1335 * otherwise, this parameter is ignored.
1336 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1337 * field specifies a list of physical buffers to use in the new
1338 * translation, otherwise, this parameter is ignored.
1339 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1340 * field specifies the size of the phys_buf_array, otherwise, this
1341 * parameter is ignored.
1342 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1343 * field specifies the new memory access rights, otherwise, this
1344 * parameter is ignored.
1345 * @iova_start: The offset of the region's starting I/O virtual address.
1346 */
1347int ib_rereg_phys_mr(struct ib_mr *mr,
1348 int mr_rereg_mask,
1349 struct ib_pd *pd,
1350 struct ib_phys_buf *phys_buf_array,
1351 int num_phys_buf,
1352 int mr_access_flags,
1353 u64 *iova_start);
1354
1355/**
1356 * ib_query_mr - Retrieves information about a specific memory region.
1357 * @mr: The memory region to retrieve information about.
1358 * @mr_attr: The attributes of the specified memory region.
1359 */
1360int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1361
1362/**
1363 * ib_dereg_mr - Deregisters a memory region and removes it from the
1364 * HCA translation table.
1365 * @mr: The memory region to deregister.
1366 */
1367int ib_dereg_mr(struct ib_mr *mr);
1368
1369/**
1370 * ib_alloc_mw - Allocates a memory window.
1371 * @pd: The protection domain associated with the memory window.
1372 */
1373struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1374
1375/**
1376 * ib_bind_mw - Posts a work request to the send queue of the specified
1377 * QP, which binds the memory window to the given address range and
1378 * remote access attributes.
1379 * @qp: QP to post the bind work request on.
1380 * @mw: The memory window to bind.
1381 * @mw_bind: Specifies information about the memory window, including
1382 * its address range, remote access rights, and associated memory region.
1383 */
1384static inline int ib_bind_mw(struct ib_qp *qp,
1385 struct ib_mw *mw,
1386 struct ib_mw_bind *mw_bind)
1387{
1388 /* XXX reference counting in corresponding MR? */
1389 return mw->device->bind_mw ?
1390 mw->device->bind_mw(qp, mw, mw_bind) :
1391 -ENOSYS;
1392}
1393
1394/**
1395 * ib_dealloc_mw - Deallocates a memory window.
1396 * @mw: The memory window to deallocate.
1397 */
1398int ib_dealloc_mw(struct ib_mw *mw);
1399
1400/**
1401 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1402 * @pd: The protection domain associated with the unmapped region.
1403 * @mr_access_flags: Specifies the memory access rights.
1404 * @fmr_attr: Attributes of the unmapped region.
1405 *
1406 * A fast memory region must be mapped before it can be used as part of
1407 * a work request.
1408 */
1409struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1410 int mr_access_flags,
1411 struct ib_fmr_attr *fmr_attr);
1412
1413/**
1414 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1415 * @fmr: The fast memory region to associate with the pages.
1416 * @page_list: An array of physical pages to map to the fast memory region.
1417 * @list_len: The number of pages in page_list.
1418 * @iova: The I/O virtual address to use with the mapped region.
1419 */
1420static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1421 u64 *page_list, int list_len,
1422 u64 iova)
1423{
1424 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1425}
1426
1427/**
1428 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1429 * @fmr_list: A linked list of fast memory regions to unmap.
1430 */
1431int ib_unmap_fmr(struct list_head *fmr_list);
1432
1433/**
1434 * ib_dealloc_fmr - Deallocates a fast memory region.
1435 * @fmr: The fast memory region to deallocate.
1436 */
1437int ib_dealloc_fmr(struct ib_fmr *fmr);
1438
1439/**
1440 * ib_attach_mcast - Attaches the specified QP to a multicast group.
1441 * @qp: QP to attach to the multicast group. The QP must be type
1442 * IB_QPT_UD.
1443 * @gid: Multicast group GID.
1444 * @lid: Multicast group LID in host byte order.
1445 *
1446 * In order to send and receive multicast packets, subnet
1447 * administration must have created the multicast group and configured
1448 * the fabric appropriately. The port associated with the specified
1449 * QP must also be a member of the multicast group.
1450 */
1451int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1452
1453/**
1454 * ib_detach_mcast - Detaches the specified QP from a multicast group.
1455 * @qp: QP to detach from the multicast group.
1456 * @gid: Multicast group GID.
1457 * @lid: Multicast group LID in host byte order.
1458 */
1459int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1460
1461#endif /* IB_VERBS_H */
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index a4f1837a33b1..f6e0bb484c63 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -29,6 +29,14 @@ struct scsi_transport_template {
29 struct transport_container target_attrs; 29 struct transport_container target_attrs;
30 struct transport_container device_attrs; 30 struct transport_container device_attrs;
31 31
32 /*
33 * If set, call target_parent prior to allocating a scsi_target,
34 * so we get the appropriate parent for the target. This function
35 * is required for transports like FC and iSCSI that do not put the
36 * scsi_target under scsi_host.
37 */
38 struct device *(*target_parent)(struct Scsi_Host *, int, uint);
39
32 /* The size of the specific transport attribute structure (a 40 /* The size of the specific transport attribute structure (a
33 * space of this size will be left at the end of the 41 * space of this size will be left at the end of the
34 * scsi_* structure */ 42 * scsi_* structure */
diff --git a/include/sound/core.h b/include/sound/core.h
index 38b357fc8958..f72b3ef515e2 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -360,11 +360,13 @@ int snd_device_free_all(snd_card_t *card, snd_device_cmd_t cmd);
360 360
361/* isadma.c */ 361/* isadma.c */
362 362
363#ifdef CONFIG_ISA_DMA_API
363#define DMA_MODE_NO_ENABLE 0x0100 364#define DMA_MODE_NO_ENABLE 0x0100
364 365
365void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode); 366void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode);
366void snd_dma_disable(unsigned long dma); 367void snd_dma_disable(unsigned long dma);
367unsigned int snd_dma_pointer(unsigned long dma, unsigned int size); 368unsigned int snd_dma_pointer(unsigned long dma, unsigned int size);
369#endif
368 370
369/* misc.c */ 371/* misc.c */
370 372