diff options
122 files changed, 2708 insertions, 15183 deletions
@@ -3738,7 +3738,7 @@ S: 93149 Nittenau | |||
3738 | S: Germany | 3738 | S: Germany |
3739 | 3739 | ||
3740 | N: Gertjan van Wingerde | 3740 | N: Gertjan van Wingerde |
3741 | E: gwingerde@home.nl | 3741 | E: gwingerde@gmail.com |
3742 | D: Ralink rt2x00 WLAN driver | 3742 | D: Ralink rt2x00 WLAN driver |
3743 | D: Minix V2 file-system | 3743 | D: Minix V2 file-system |
3744 | D: Misc fixes | 3744 | D: Misc fixes |
@@ -566,6 +566,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) | |||
566 | # disable pointer signed / unsigned warnings in gcc 4.0 | 566 | # disable pointer signed / unsigned warnings in gcc 4.0 |
567 | KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,) | 567 | KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,) |
568 | 568 | ||
569 | # disable invalid "can't wrap" optimzations for signed / pointers | ||
570 | KBUILD_CFLAGS += $(call cc-option,-fwrapv) | ||
571 | |||
569 | # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments | 572 | # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments |
570 | # But warn user when we do so | 573 | # But warn user when we do so |
571 | warn-assign = \ | 574 | warn-assign = \ |
diff --git a/arch/m68k/include/asm/param.h b/arch/m68k/include/asm/param.h index 40d1112a4588..85c41b75aa78 100644 --- a/arch/m68k/include/asm/param.h +++ b/arch/m68k/include/asm/param.h | |||
@@ -1,5 +1,26 @@ | |||
1 | #ifndef _M68K_PARAM_H | ||
2 | #define _M68K_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | ||
6 | # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ | ||
7 | # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
1 | #ifdef __uClinux__ | 14 | #ifdef __uClinux__ |
2 | #include "param_no.h" | 15 | #define EXEC_PAGESIZE 4096 |
3 | #else | 16 | #else |
4 | #include "param_mm.h" | 17 | #define EXEC_PAGESIZE 8192 |
18 | #endif | ||
19 | |||
20 | #ifndef NOGROUP | ||
21 | #define NOGROUP (-1) | ||
5 | #endif | 22 | #endif |
23 | |||
24 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
25 | |||
26 | #endif /* _M68K_PARAM_H */ | ||
diff --git a/arch/m68k/include/asm/param_mm.h b/arch/m68k/include/asm/param_mm.h deleted file mode 100644 index 536a27888358..000000000000 --- a/arch/m68k/include/asm/param_mm.h +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | #ifndef _M68K_PARAM_H | ||
2 | #define _M68K_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | ||
6 | # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ | ||
7 | # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 8192 | ||
15 | |||
16 | #ifndef NOGROUP | ||
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | |||
22 | #endif /* _M68K_PARAM_H */ | ||
diff --git a/arch/m68k/include/asm/param_no.h b/arch/m68k/include/asm/param_no.h deleted file mode 100644 index 6044397adb64..000000000000 --- a/arch/m68k/include/asm/param_no.h +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | #ifndef _M68KNOMMU_PARAM_H | ||
2 | #define _M68KNOMMU_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #define HZ CONFIG_HZ | ||
6 | #define USER_HZ HZ | ||
7 | #define CLOCKS_PER_SEC (USER_HZ) | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 4096 | ||
15 | |||
16 | #ifndef NOGROUP | ||
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | |||
22 | #endif /* _M68KNOMMU_PARAM_H */ | ||
diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h index e83cd2f66101..8c9194b98548 100644 --- a/arch/m68k/include/asm/ptrace.h +++ b/arch/m68k/include/asm/ptrace.h | |||
@@ -1,5 +1,87 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef _M68K_PTRACE_H |
2 | #include "ptrace_no.h" | 2 | #define _M68K_PTRACE_H |
3 | |||
4 | #define PT_D1 0 | ||
5 | #define PT_D2 1 | ||
6 | #define PT_D3 2 | ||
7 | #define PT_D4 3 | ||
8 | #define PT_D5 4 | ||
9 | #define PT_D6 5 | ||
10 | #define PT_D7 6 | ||
11 | #define PT_A0 7 | ||
12 | #define PT_A1 8 | ||
13 | #define PT_A2 9 | ||
14 | #define PT_A3 10 | ||
15 | #define PT_A4 11 | ||
16 | #define PT_A5 12 | ||
17 | #define PT_A6 13 | ||
18 | #define PT_D0 14 | ||
19 | #define PT_USP 15 | ||
20 | #define PT_ORIG_D0 16 | ||
21 | #define PT_SR 17 | ||
22 | #define PT_PC 18 | ||
23 | |||
24 | #ifndef __ASSEMBLY__ | ||
25 | |||
26 | /* this struct defines the way the registers are stored on the | ||
27 | stack during a system call. */ | ||
28 | |||
29 | struct pt_regs { | ||
30 | long d1; | ||
31 | long d2; | ||
32 | long d3; | ||
33 | long d4; | ||
34 | long d5; | ||
35 | long a0; | ||
36 | long a1; | ||
37 | long a2; | ||
38 | long d0; | ||
39 | long orig_d0; | ||
40 | long stkadj; | ||
41 | #ifdef CONFIG_COLDFIRE | ||
42 | unsigned format : 4; /* frame format specifier */ | ||
43 | unsigned vector : 12; /* vector offset */ | ||
44 | unsigned short sr; | ||
45 | unsigned long pc; | ||
3 | #else | 46 | #else |
4 | #include "ptrace_mm.h" | 47 | unsigned short sr; |
48 | unsigned long pc; | ||
49 | unsigned format : 4; /* frame format specifier */ | ||
50 | unsigned vector : 12; /* vector offset */ | ||
5 | #endif | 51 | #endif |
52 | }; | ||
53 | |||
54 | /* | ||
55 | * This is the extended stack used by signal handlers and the context | ||
56 | * switcher: it's pushed after the normal "struct pt_regs". | ||
57 | */ | ||
58 | struct switch_stack { | ||
59 | unsigned long d6; | ||
60 | unsigned long d7; | ||
61 | unsigned long a3; | ||
62 | unsigned long a4; | ||
63 | unsigned long a5; | ||
64 | unsigned long a6; | ||
65 | unsigned long retpc; | ||
66 | }; | ||
67 | |||
68 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | ||
69 | #define PTRACE_GETREGS 12 | ||
70 | #define PTRACE_SETREGS 13 | ||
71 | #define PTRACE_GETFPREGS 14 | ||
72 | #define PTRACE_SETFPREGS 15 | ||
73 | |||
74 | #ifdef __KERNEL__ | ||
75 | |||
76 | #ifndef PS_S | ||
77 | #define PS_S (0x2000) | ||
78 | #define PS_M (0x1000) | ||
79 | #endif | ||
80 | |||
81 | #define user_mode(regs) (!((regs)->sr & PS_S)) | ||
82 | #define instruction_pointer(regs) ((regs)->pc) | ||
83 | #define profile_pc(regs) instruction_pointer(regs) | ||
84 | extern void show_regs(struct pt_regs *); | ||
85 | #endif /* __KERNEL__ */ | ||
86 | #endif /* __ASSEMBLY__ */ | ||
87 | #endif /* _M68K_PTRACE_H */ | ||
diff --git a/arch/m68k/include/asm/ptrace_mm.h b/arch/m68k/include/asm/ptrace_mm.h deleted file mode 100644 index 57e763d79bf4..000000000000 --- a/arch/m68k/include/asm/ptrace_mm.h +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | #ifndef _M68K_PTRACE_H | ||
2 | #define _M68K_PTRACE_H | ||
3 | |||
4 | #define PT_D1 0 | ||
5 | #define PT_D2 1 | ||
6 | #define PT_D3 2 | ||
7 | #define PT_D4 3 | ||
8 | #define PT_D5 4 | ||
9 | #define PT_D6 5 | ||
10 | #define PT_D7 6 | ||
11 | #define PT_A0 7 | ||
12 | #define PT_A1 8 | ||
13 | #define PT_A2 9 | ||
14 | #define PT_A3 10 | ||
15 | #define PT_A4 11 | ||
16 | #define PT_A5 12 | ||
17 | #define PT_A6 13 | ||
18 | #define PT_D0 14 | ||
19 | #define PT_USP 15 | ||
20 | #define PT_ORIG_D0 16 | ||
21 | #define PT_SR 17 | ||
22 | #define PT_PC 18 | ||
23 | |||
24 | #ifndef __ASSEMBLY__ | ||
25 | |||
26 | /* this struct defines the way the registers are stored on the | ||
27 | stack during a system call. */ | ||
28 | |||
29 | struct pt_regs { | ||
30 | long d1; | ||
31 | long d2; | ||
32 | long d3; | ||
33 | long d4; | ||
34 | long d5; | ||
35 | long a0; | ||
36 | long a1; | ||
37 | long a2; | ||
38 | long d0; | ||
39 | long orig_d0; | ||
40 | long stkadj; | ||
41 | unsigned short sr; | ||
42 | unsigned long pc; | ||
43 | unsigned format : 4; /* frame format specifier */ | ||
44 | unsigned vector : 12; /* vector offset */ | ||
45 | }; | ||
46 | |||
47 | /* | ||
48 | * This is the extended stack used by signal handlers and the context | ||
49 | * switcher: it's pushed after the normal "struct pt_regs". | ||
50 | */ | ||
51 | struct switch_stack { | ||
52 | unsigned long d6; | ||
53 | unsigned long d7; | ||
54 | unsigned long a3; | ||
55 | unsigned long a4; | ||
56 | unsigned long a5; | ||
57 | unsigned long a6; | ||
58 | unsigned long retpc; | ||
59 | }; | ||
60 | |||
61 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | ||
62 | #define PTRACE_GETREGS 12 | ||
63 | #define PTRACE_SETREGS 13 | ||
64 | #define PTRACE_GETFPREGS 14 | ||
65 | #define PTRACE_SETFPREGS 15 | ||
66 | |||
67 | #ifdef __KERNEL__ | ||
68 | |||
69 | #ifndef PS_S | ||
70 | #define PS_S (0x2000) | ||
71 | #define PS_M (0x1000) | ||
72 | #endif | ||
73 | |||
74 | #define user_mode(regs) (!((regs)->sr & PS_S)) | ||
75 | #define instruction_pointer(regs) ((regs)->pc) | ||
76 | #define profile_pc(regs) instruction_pointer(regs) | ||
77 | extern void show_regs(struct pt_regs *); | ||
78 | #endif /* __KERNEL__ */ | ||
79 | #endif /* __ASSEMBLY__ */ | ||
80 | #endif /* _M68K_PTRACE_H */ | ||
diff --git a/arch/m68k/include/asm/ptrace_no.h b/arch/m68k/include/asm/ptrace_no.h deleted file mode 100644 index 8c9194b98548..000000000000 --- a/arch/m68k/include/asm/ptrace_no.h +++ /dev/null | |||
@@ -1,87 +0,0 @@ | |||
1 | #ifndef _M68K_PTRACE_H | ||
2 | #define _M68K_PTRACE_H | ||
3 | |||
4 | #define PT_D1 0 | ||
5 | #define PT_D2 1 | ||
6 | #define PT_D3 2 | ||
7 | #define PT_D4 3 | ||
8 | #define PT_D5 4 | ||
9 | #define PT_D6 5 | ||
10 | #define PT_D7 6 | ||
11 | #define PT_A0 7 | ||
12 | #define PT_A1 8 | ||
13 | #define PT_A2 9 | ||
14 | #define PT_A3 10 | ||
15 | #define PT_A4 11 | ||
16 | #define PT_A5 12 | ||
17 | #define PT_A6 13 | ||
18 | #define PT_D0 14 | ||
19 | #define PT_USP 15 | ||
20 | #define PT_ORIG_D0 16 | ||
21 | #define PT_SR 17 | ||
22 | #define PT_PC 18 | ||
23 | |||
24 | #ifndef __ASSEMBLY__ | ||
25 | |||
26 | /* this struct defines the way the registers are stored on the | ||
27 | stack during a system call. */ | ||
28 | |||
29 | struct pt_regs { | ||
30 | long d1; | ||
31 | long d2; | ||
32 | long d3; | ||
33 | long d4; | ||
34 | long d5; | ||
35 | long a0; | ||
36 | long a1; | ||
37 | long a2; | ||
38 | long d0; | ||
39 | long orig_d0; | ||
40 | long stkadj; | ||
41 | #ifdef CONFIG_COLDFIRE | ||
42 | unsigned format : 4; /* frame format specifier */ | ||
43 | unsigned vector : 12; /* vector offset */ | ||
44 | unsigned short sr; | ||
45 | unsigned long pc; | ||
46 | #else | ||
47 | unsigned short sr; | ||
48 | unsigned long pc; | ||
49 | unsigned format : 4; /* frame format specifier */ | ||
50 | unsigned vector : 12; /* vector offset */ | ||
51 | #endif | ||
52 | }; | ||
53 | |||
54 | /* | ||
55 | * This is the extended stack used by signal handlers and the context | ||
56 | * switcher: it's pushed after the normal "struct pt_regs". | ||
57 | */ | ||
58 | struct switch_stack { | ||
59 | unsigned long d6; | ||
60 | unsigned long d7; | ||
61 | unsigned long a3; | ||
62 | unsigned long a4; | ||
63 | unsigned long a5; | ||
64 | unsigned long a6; | ||
65 | unsigned long retpc; | ||
66 | }; | ||
67 | |||
68 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | ||
69 | #define PTRACE_GETREGS 12 | ||
70 | #define PTRACE_SETREGS 13 | ||
71 | #define PTRACE_GETFPREGS 14 | ||
72 | #define PTRACE_SETFPREGS 15 | ||
73 | |||
74 | #ifdef __KERNEL__ | ||
75 | |||
76 | #ifndef PS_S | ||
77 | #define PS_S (0x2000) | ||
78 | #define PS_M (0x1000) | ||
79 | #endif | ||
80 | |||
81 | #define user_mode(regs) (!((regs)->sr & PS_S)) | ||
82 | #define instruction_pointer(regs) ((regs)->pc) | ||
83 | #define profile_pc(regs) instruction_pointer(regs) | ||
84 | extern void show_regs(struct pt_regs *); | ||
85 | #endif /* __KERNEL__ */ | ||
86 | #endif /* __ASSEMBLY__ */ | ||
87 | #endif /* _M68K_PTRACE_H */ | ||
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h index 842f86f75ccd..4dfb3952b375 100644 --- a/arch/m68k/include/asm/setup.h +++ b/arch/m68k/include/asm/setup.h | |||
@@ -1,5 +1,376 @@ | |||
1 | #ifdef __uClinux__ | 1 | /* |
2 | #include "setup_no.h" | 2 | ** asm/setup.h -- Definition of the Linux/m68k setup information |
3 | ** | ||
4 | ** Copyright 1992 by Greg Harp | ||
5 | ** | ||
6 | ** This file is subject to the terms and conditions of the GNU General Public | ||
7 | ** License. See the file COPYING in the main directory of this archive | ||
8 | ** for more details. | ||
9 | ** | ||
10 | ** Created 09/29/92 by Greg Harp | ||
11 | ** | ||
12 | ** 5/2/94 Roman Hodek: | ||
13 | ** Added bi_atari part of the machine dependent union bi_un; for now it | ||
14 | ** contains just a model field to distinguish between TT and Falcon. | ||
15 | ** 26/7/96 Roman Zippel: | ||
16 | ** Renamed to setup.h; added some useful macros to allow gcc some | ||
17 | ** optimizations if possible. | ||
18 | ** 5/10/96 Geert Uytterhoeven: | ||
19 | ** Redesign of the boot information structure; moved boot information | ||
20 | ** structure to bootinfo.h | ||
21 | */ | ||
22 | |||
23 | #ifndef _M68K_SETUP_H | ||
24 | #define _M68K_SETUP_H | ||
25 | |||
26 | |||
27 | |||
28 | /* | ||
29 | * Linux/m68k Architectures | ||
30 | */ | ||
31 | |||
32 | #define MACH_AMIGA 1 | ||
33 | #define MACH_ATARI 2 | ||
34 | #define MACH_MAC 3 | ||
35 | #define MACH_APOLLO 4 | ||
36 | #define MACH_SUN3 5 | ||
37 | #define MACH_MVME147 6 | ||
38 | #define MACH_MVME16x 7 | ||
39 | #define MACH_BVME6000 8 | ||
40 | #define MACH_HP300 9 | ||
41 | #define MACH_Q40 10 | ||
42 | #define MACH_SUN3X 11 | ||
43 | |||
44 | #define COMMAND_LINE_SIZE 256 | ||
45 | |||
46 | #ifdef __KERNEL__ | ||
47 | |||
48 | #define CL_SIZE COMMAND_LINE_SIZE | ||
49 | |||
50 | #ifndef __ASSEMBLY__ | ||
51 | extern unsigned long m68k_machtype; | ||
52 | #endif /* !__ASSEMBLY__ */ | ||
53 | |||
54 | #if !defined(CONFIG_AMIGA) | ||
55 | # define MACH_IS_AMIGA (0) | ||
56 | #elif defined(CONFIG_ATARI) || defined(CONFIG_MAC) || defined(CONFIG_APOLLO) \ | ||
57 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
58 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
59 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
60 | # define MACH_IS_AMIGA (m68k_machtype == MACH_AMIGA) | ||
3 | #else | 61 | #else |
4 | #include "setup_mm.h" | 62 | # define MACH_AMIGA_ONLY |
63 | # define MACH_IS_AMIGA (1) | ||
64 | # define MACH_TYPE (MACH_AMIGA) | ||
5 | #endif | 65 | #endif |
66 | |||
67 | #if !defined(CONFIG_ATARI) | ||
68 | # define MACH_IS_ATARI (0) | ||
69 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_APOLLO) \ | ||
70 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
71 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
72 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
73 | # define MACH_IS_ATARI (m68k_machtype == MACH_ATARI) | ||
74 | #else | ||
75 | # define MACH_ATARI_ONLY | ||
76 | # define MACH_IS_ATARI (1) | ||
77 | # define MACH_TYPE (MACH_ATARI) | ||
78 | #endif | ||
79 | |||
80 | #if !defined(CONFIG_MAC) | ||
81 | # define MACH_IS_MAC (0) | ||
82 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_ATARI) || defined(CONFIG_APOLLO) \ | ||
83 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
84 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
85 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
86 | # define MACH_IS_MAC (m68k_machtype == MACH_MAC) | ||
87 | #else | ||
88 | # define MACH_MAC_ONLY | ||
89 | # define MACH_IS_MAC (1) | ||
90 | # define MACH_TYPE (MACH_MAC) | ||
91 | #endif | ||
92 | |||
93 | #if defined(CONFIG_SUN3) | ||
94 | #define MACH_IS_SUN3 (1) | ||
95 | #define MACH_SUN3_ONLY (1) | ||
96 | #define MACH_TYPE (MACH_SUN3) | ||
97 | #else | ||
98 | #define MACH_IS_SUN3 (0) | ||
99 | #endif | ||
100 | |||
101 | #if !defined (CONFIG_APOLLO) | ||
102 | # define MACH_IS_APOLLO (0) | ||
103 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
104 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
105 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
106 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
107 | # define MACH_IS_APOLLO (m68k_machtype == MACH_APOLLO) | ||
108 | #else | ||
109 | # define MACH_APOLLO_ONLY | ||
110 | # define MACH_IS_APOLLO (1) | ||
111 | # define MACH_TYPE (MACH_APOLLO) | ||
112 | #endif | ||
113 | |||
114 | #if !defined (CONFIG_MVME147) | ||
115 | # define MACH_IS_MVME147 (0) | ||
116 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
117 | || defined(CONFIG_APOLLO) || defined(CONFIG_BVME6000) \ | ||
118 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
119 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME16x) | ||
120 | # define MACH_IS_MVME147 (m68k_machtype == MACH_MVME147) | ||
121 | #else | ||
122 | # define MACH_MVME147_ONLY | ||
123 | # define MACH_IS_MVME147 (1) | ||
124 | # define MACH_TYPE (MACH_MVME147) | ||
125 | #endif | ||
126 | |||
127 | #if !defined (CONFIG_MVME16x) | ||
128 | # define MACH_IS_MVME16x (0) | ||
129 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
130 | || defined(CONFIG_APOLLO) || defined(CONFIG_BVME6000) \ | ||
131 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
132 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
133 | # define MACH_IS_MVME16x (m68k_machtype == MACH_MVME16x) | ||
134 | #else | ||
135 | # define MACH_MVME16x_ONLY | ||
136 | # define MACH_IS_MVME16x (1) | ||
137 | # define MACH_TYPE (MACH_MVME16x) | ||
138 | #endif | ||
139 | |||
140 | #if !defined (CONFIG_BVME6000) | ||
141 | # define MACH_IS_BVME6000 (0) | ||
142 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
143 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
144 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
145 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
146 | # define MACH_IS_BVME6000 (m68k_machtype == MACH_BVME6000) | ||
147 | #else | ||
148 | # define MACH_BVME6000_ONLY | ||
149 | # define MACH_IS_BVME6000 (1) | ||
150 | # define MACH_TYPE (MACH_BVME6000) | ||
151 | #endif | ||
152 | |||
153 | #if !defined (CONFIG_HP300) | ||
154 | # define MACH_IS_HP300 (0) | ||
155 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
156 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
157 | || defined(CONFIG_BVME6000) || defined(CONFIG_Q40) \ | ||
158 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
159 | # define MACH_IS_HP300 (m68k_machtype == MACH_HP300) | ||
160 | #else | ||
161 | # define MACH_HP300_ONLY | ||
162 | # define MACH_IS_HP300 (1) | ||
163 | # define MACH_TYPE (MACH_HP300) | ||
164 | #endif | ||
165 | |||
166 | #if !defined (CONFIG_Q40) | ||
167 | # define MACH_IS_Q40 (0) | ||
168 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
169 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
170 | || defined(CONFIG_BVME6000) || defined(CONFIG_HP300) \ | ||
171 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
172 | # define MACH_IS_Q40 (m68k_machtype == MACH_Q40) | ||
173 | #else | ||
174 | # define MACH_Q40_ONLY | ||
175 | # define MACH_IS_Q40 (1) | ||
176 | # define MACH_TYPE (MACH_Q40) | ||
177 | #endif | ||
178 | |||
179 | #if !defined (CONFIG_SUN3X) | ||
180 | # define MACH_IS_SUN3X (0) | ||
181 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
182 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
183 | || defined(CONFIG_BVME6000) || defined(CONFIG_HP300) \ | ||
184 | || defined(CONFIG_Q40) || defined(CONFIG_MVME147) | ||
185 | # define MACH_IS_SUN3X (m68k_machtype == MACH_SUN3X) | ||
186 | #else | ||
187 | # define CONFIG_SUN3X_ONLY | ||
188 | # define MACH_IS_SUN3X (1) | ||
189 | # define MACH_TYPE (MACH_SUN3X) | ||
190 | #endif | ||
191 | |||
192 | #ifndef MACH_TYPE | ||
193 | # define MACH_TYPE (m68k_machtype) | ||
194 | #endif | ||
195 | |||
196 | #endif /* __KERNEL__ */ | ||
197 | |||
198 | |||
199 | /* | ||
200 | * CPU, FPU and MMU types | ||
201 | * | ||
202 | * Note: we may rely on the following equalities: | ||
203 | * | ||
204 | * CPU_68020 == MMU_68851 | ||
205 | * CPU_68030 == MMU_68030 | ||
206 | * CPU_68040 == FPU_68040 == MMU_68040 | ||
207 | * CPU_68060 == FPU_68060 == MMU_68060 | ||
208 | */ | ||
209 | |||
210 | #define CPUB_68020 0 | ||
211 | #define CPUB_68030 1 | ||
212 | #define CPUB_68040 2 | ||
213 | #define CPUB_68060 3 | ||
214 | |||
215 | #define CPU_68020 (1<<CPUB_68020) | ||
216 | #define CPU_68030 (1<<CPUB_68030) | ||
217 | #define CPU_68040 (1<<CPUB_68040) | ||
218 | #define CPU_68060 (1<<CPUB_68060) | ||
219 | |||
220 | #define FPUB_68881 0 | ||
221 | #define FPUB_68882 1 | ||
222 | #define FPUB_68040 2 /* Internal FPU */ | ||
223 | #define FPUB_68060 3 /* Internal FPU */ | ||
224 | #define FPUB_SUNFPA 4 /* Sun-3 FPA */ | ||
225 | |||
226 | #define FPU_68881 (1<<FPUB_68881) | ||
227 | #define FPU_68882 (1<<FPUB_68882) | ||
228 | #define FPU_68040 (1<<FPUB_68040) | ||
229 | #define FPU_68060 (1<<FPUB_68060) | ||
230 | #define FPU_SUNFPA (1<<FPUB_SUNFPA) | ||
231 | |||
232 | #define MMUB_68851 0 | ||
233 | #define MMUB_68030 1 /* Internal MMU */ | ||
234 | #define MMUB_68040 2 /* Internal MMU */ | ||
235 | #define MMUB_68060 3 /* Internal MMU */ | ||
236 | #define MMUB_APOLLO 4 /* Custom Apollo */ | ||
237 | #define MMUB_SUN3 5 /* Custom Sun-3 */ | ||
238 | |||
239 | #define MMU_68851 (1<<MMUB_68851) | ||
240 | #define MMU_68030 (1<<MMUB_68030) | ||
241 | #define MMU_68040 (1<<MMUB_68040) | ||
242 | #define MMU_68060 (1<<MMUB_68060) | ||
243 | #define MMU_SUN3 (1<<MMUB_SUN3) | ||
244 | #define MMU_APOLLO (1<<MMUB_APOLLO) | ||
245 | |||
246 | #ifdef __KERNEL__ | ||
247 | |||
248 | #ifndef __ASSEMBLY__ | ||
249 | extern unsigned long m68k_cputype; | ||
250 | extern unsigned long m68k_fputype; | ||
251 | extern unsigned long m68k_mmutype; | ||
252 | #ifdef CONFIG_VME | ||
253 | extern unsigned long vme_brdtype; | ||
254 | #endif | ||
255 | |||
256 | /* | ||
257 | * m68k_is040or060 is != 0 for a '040 or higher; | ||
258 | * used numbers are 4 for 68040 and 6 for 68060. | ||
259 | */ | ||
260 | |||
261 | extern int m68k_is040or060; | ||
262 | #endif /* !__ASSEMBLY__ */ | ||
263 | |||
264 | #if !defined(CONFIG_M68020) | ||
265 | # define CPU_IS_020 (0) | ||
266 | # define MMU_IS_851 (0) | ||
267 | # define MMU_IS_SUN3 (0) | ||
268 | #elif defined(CONFIG_M68030) || defined(CONFIG_M68040) || defined(CONFIG_M68060) | ||
269 | # define CPU_IS_020 (m68k_cputype & CPU_68020) | ||
270 | # define MMU_IS_851 (m68k_mmutype & MMU_68851) | ||
271 | # define MMU_IS_SUN3 (0) /* Sun3 not supported with other CPU enabled */ | ||
272 | #else | ||
273 | # define CPU_M68020_ONLY | ||
274 | # define CPU_IS_020 (1) | ||
275 | #ifdef MACH_SUN3_ONLY | ||
276 | # define MMU_IS_SUN3 (1) | ||
277 | # define MMU_IS_851 (0) | ||
278 | #else | ||
279 | # define MMU_IS_SUN3 (0) | ||
280 | # define MMU_IS_851 (1) | ||
281 | #endif | ||
282 | #endif | ||
283 | |||
284 | #if !defined(CONFIG_M68030) | ||
285 | # define CPU_IS_030 (0) | ||
286 | # define MMU_IS_030 (0) | ||
287 | #elif defined(CONFIG_M68020) || defined(CONFIG_M68040) || defined(CONFIG_M68060) | ||
288 | # define CPU_IS_030 (m68k_cputype & CPU_68030) | ||
289 | # define MMU_IS_030 (m68k_mmutype & MMU_68030) | ||
290 | #else | ||
291 | # define CPU_M68030_ONLY | ||
292 | # define CPU_IS_030 (1) | ||
293 | # define MMU_IS_030 (1) | ||
294 | #endif | ||
295 | |||
296 | #if !defined(CONFIG_M68040) | ||
297 | # define CPU_IS_040 (0) | ||
298 | # define MMU_IS_040 (0) | ||
299 | #elif defined(CONFIG_M68020) || defined(CONFIG_M68030) || defined(CONFIG_M68060) | ||
300 | # define CPU_IS_040 (m68k_cputype & CPU_68040) | ||
301 | # define MMU_IS_040 (m68k_mmutype & MMU_68040) | ||
302 | #else | ||
303 | # define CPU_M68040_ONLY | ||
304 | # define CPU_IS_040 (1) | ||
305 | # define MMU_IS_040 (1) | ||
306 | #endif | ||
307 | |||
308 | #if !defined(CONFIG_M68060) | ||
309 | # define CPU_IS_060 (0) | ||
310 | # define MMU_IS_060 (0) | ||
311 | #elif defined(CONFIG_M68020) || defined(CONFIG_M68030) || defined(CONFIG_M68040) | ||
312 | # define CPU_IS_060 (m68k_cputype & CPU_68060) | ||
313 | # define MMU_IS_060 (m68k_mmutype & MMU_68060) | ||
314 | #else | ||
315 | # define CPU_M68060_ONLY | ||
316 | # define CPU_IS_060 (1) | ||
317 | # define MMU_IS_060 (1) | ||
318 | #endif | ||
319 | |||
320 | #if !defined(CONFIG_M68020) && !defined(CONFIG_M68030) | ||
321 | # define CPU_IS_020_OR_030 (0) | ||
322 | #else | ||
323 | # define CPU_M68020_OR_M68030 | ||
324 | # if defined(CONFIG_M68040) || defined(CONFIG_M68060) | ||
325 | # define CPU_IS_020_OR_030 (!m68k_is040or060) | ||
326 | # else | ||
327 | # define CPU_M68020_OR_M68030_ONLY | ||
328 | # define CPU_IS_020_OR_030 (1) | ||
329 | # endif | ||
330 | #endif | ||
331 | |||
332 | #if !defined(CONFIG_M68040) && !defined(CONFIG_M68060) | ||
333 | # define CPU_IS_040_OR_060 (0) | ||
334 | #else | ||
335 | # define CPU_M68040_OR_M68060 | ||
336 | # if defined(CONFIG_M68020) || defined(CONFIG_M68030) | ||
337 | # define CPU_IS_040_OR_060 (m68k_is040or060) | ||
338 | # else | ||
339 | # define CPU_M68040_OR_M68060_ONLY | ||
340 | # define CPU_IS_040_OR_060 (1) | ||
341 | # endif | ||
342 | #endif | ||
343 | |||
344 | #define CPU_TYPE (m68k_cputype) | ||
345 | |||
346 | #ifdef CONFIG_M68KFPU_EMU | ||
347 | # ifdef CONFIG_M68KFPU_EMU_ONLY | ||
348 | # define FPU_IS_EMU (1) | ||
349 | # else | ||
350 | # define FPU_IS_EMU (!m68k_fputype) | ||
351 | # endif | ||
352 | #else | ||
353 | # define FPU_IS_EMU (0) | ||
354 | #endif | ||
355 | |||
356 | |||
357 | /* | ||
358 | * Miscellaneous | ||
359 | */ | ||
360 | |||
361 | #define NUM_MEMINFO 4 | ||
362 | |||
363 | #ifndef __ASSEMBLY__ | ||
364 | struct mem_info { | ||
365 | unsigned long addr; /* physical address of memory chunk */ | ||
366 | unsigned long size; /* length of memory chunk (in bytes) */ | ||
367 | }; | ||
368 | |||
369 | extern int m68k_num_memory; /* # of memory blocks found (and used) */ | ||
370 | extern int m68k_realnum_memory; /* real # of memory blocks found */ | ||
371 | extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */ | ||
372 | #endif | ||
373 | |||
374 | #endif /* __KERNEL__ */ | ||
375 | |||
376 | #endif /* _M68K_SETUP_H */ | ||
diff --git a/arch/m68k/include/asm/setup_mm.h b/arch/m68k/include/asm/setup_mm.h deleted file mode 100644 index 4dfb3952b375..000000000000 --- a/arch/m68k/include/asm/setup_mm.h +++ /dev/null | |||
@@ -1,376 +0,0 @@ | |||
1 | /* | ||
2 | ** asm/setup.h -- Definition of the Linux/m68k setup information | ||
3 | ** | ||
4 | ** Copyright 1992 by Greg Harp | ||
5 | ** | ||
6 | ** This file is subject to the terms and conditions of the GNU General Public | ||
7 | ** License. See the file COPYING in the main directory of this archive | ||
8 | ** for more details. | ||
9 | ** | ||
10 | ** Created 09/29/92 by Greg Harp | ||
11 | ** | ||
12 | ** 5/2/94 Roman Hodek: | ||
13 | ** Added bi_atari part of the machine dependent union bi_un; for now it | ||
14 | ** contains just a model field to distinguish between TT and Falcon. | ||
15 | ** 26/7/96 Roman Zippel: | ||
16 | ** Renamed to setup.h; added some useful macros to allow gcc some | ||
17 | ** optimizations if possible. | ||
18 | ** 5/10/96 Geert Uytterhoeven: | ||
19 | ** Redesign of the boot information structure; moved boot information | ||
20 | ** structure to bootinfo.h | ||
21 | */ | ||
22 | |||
23 | #ifndef _M68K_SETUP_H | ||
24 | #define _M68K_SETUP_H | ||
25 | |||
26 | |||
27 | |||
28 | /* | ||
29 | * Linux/m68k Architectures | ||
30 | */ | ||
31 | |||
32 | #define MACH_AMIGA 1 | ||
33 | #define MACH_ATARI 2 | ||
34 | #define MACH_MAC 3 | ||
35 | #define MACH_APOLLO 4 | ||
36 | #define MACH_SUN3 5 | ||
37 | #define MACH_MVME147 6 | ||
38 | #define MACH_MVME16x 7 | ||
39 | #define MACH_BVME6000 8 | ||
40 | #define MACH_HP300 9 | ||
41 | #define MACH_Q40 10 | ||
42 | #define MACH_SUN3X 11 | ||
43 | |||
44 | #define COMMAND_LINE_SIZE 256 | ||
45 | |||
46 | #ifdef __KERNEL__ | ||
47 | |||
48 | #define CL_SIZE COMMAND_LINE_SIZE | ||
49 | |||
50 | #ifndef __ASSEMBLY__ | ||
51 | extern unsigned long m68k_machtype; | ||
52 | #endif /* !__ASSEMBLY__ */ | ||
53 | |||
54 | #if !defined(CONFIG_AMIGA) | ||
55 | # define MACH_IS_AMIGA (0) | ||
56 | #elif defined(CONFIG_ATARI) || defined(CONFIG_MAC) || defined(CONFIG_APOLLO) \ | ||
57 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
58 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
59 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
60 | # define MACH_IS_AMIGA (m68k_machtype == MACH_AMIGA) | ||
61 | #else | ||
62 | # define MACH_AMIGA_ONLY | ||
63 | # define MACH_IS_AMIGA (1) | ||
64 | # define MACH_TYPE (MACH_AMIGA) | ||
65 | #endif | ||
66 | |||
67 | #if !defined(CONFIG_ATARI) | ||
68 | # define MACH_IS_ATARI (0) | ||
69 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_APOLLO) \ | ||
70 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
71 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
72 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
73 | # define MACH_IS_ATARI (m68k_machtype == MACH_ATARI) | ||
74 | #else | ||
75 | # define MACH_ATARI_ONLY | ||
76 | # define MACH_IS_ATARI (1) | ||
77 | # define MACH_TYPE (MACH_ATARI) | ||
78 | #endif | ||
79 | |||
80 | #if !defined(CONFIG_MAC) | ||
81 | # define MACH_IS_MAC (0) | ||
82 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_ATARI) || defined(CONFIG_APOLLO) \ | ||
83 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
84 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
85 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
86 | # define MACH_IS_MAC (m68k_machtype == MACH_MAC) | ||
87 | #else | ||
88 | # define MACH_MAC_ONLY | ||
89 | # define MACH_IS_MAC (1) | ||
90 | # define MACH_TYPE (MACH_MAC) | ||
91 | #endif | ||
92 | |||
93 | #if defined(CONFIG_SUN3) | ||
94 | #define MACH_IS_SUN3 (1) | ||
95 | #define MACH_SUN3_ONLY (1) | ||
96 | #define MACH_TYPE (MACH_SUN3) | ||
97 | #else | ||
98 | #define MACH_IS_SUN3 (0) | ||
99 | #endif | ||
100 | |||
101 | #if !defined (CONFIG_APOLLO) | ||
102 | # define MACH_IS_APOLLO (0) | ||
103 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
104 | || defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000) \ | ||
105 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
106 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
107 | # define MACH_IS_APOLLO (m68k_machtype == MACH_APOLLO) | ||
108 | #else | ||
109 | # define MACH_APOLLO_ONLY | ||
110 | # define MACH_IS_APOLLO (1) | ||
111 | # define MACH_TYPE (MACH_APOLLO) | ||
112 | #endif | ||
113 | |||
114 | #if !defined (CONFIG_MVME147) | ||
115 | # define MACH_IS_MVME147 (0) | ||
116 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
117 | || defined(CONFIG_APOLLO) || defined(CONFIG_BVME6000) \ | ||
118 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
119 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME16x) | ||
120 | # define MACH_IS_MVME147 (m68k_machtype == MACH_MVME147) | ||
121 | #else | ||
122 | # define MACH_MVME147_ONLY | ||
123 | # define MACH_IS_MVME147 (1) | ||
124 | # define MACH_TYPE (MACH_MVME147) | ||
125 | #endif | ||
126 | |||
127 | #if !defined (CONFIG_MVME16x) | ||
128 | # define MACH_IS_MVME16x (0) | ||
129 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
130 | || defined(CONFIG_APOLLO) || defined(CONFIG_BVME6000) \ | ||
131 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
132 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
133 | # define MACH_IS_MVME16x (m68k_machtype == MACH_MVME16x) | ||
134 | #else | ||
135 | # define MACH_MVME16x_ONLY | ||
136 | # define MACH_IS_MVME16x (1) | ||
137 | # define MACH_TYPE (MACH_MVME16x) | ||
138 | #endif | ||
139 | |||
140 | #if !defined (CONFIG_BVME6000) | ||
141 | # define MACH_IS_BVME6000 (0) | ||
142 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
143 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
144 | || defined(CONFIG_HP300) || defined(CONFIG_Q40) \ | ||
145 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
146 | # define MACH_IS_BVME6000 (m68k_machtype == MACH_BVME6000) | ||
147 | #else | ||
148 | # define MACH_BVME6000_ONLY | ||
149 | # define MACH_IS_BVME6000 (1) | ||
150 | # define MACH_TYPE (MACH_BVME6000) | ||
151 | #endif | ||
152 | |||
153 | #if !defined (CONFIG_HP300) | ||
154 | # define MACH_IS_HP300 (0) | ||
155 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
156 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
157 | || defined(CONFIG_BVME6000) || defined(CONFIG_Q40) \ | ||
158 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
159 | # define MACH_IS_HP300 (m68k_machtype == MACH_HP300) | ||
160 | #else | ||
161 | # define MACH_HP300_ONLY | ||
162 | # define MACH_IS_HP300 (1) | ||
163 | # define MACH_TYPE (MACH_HP300) | ||
164 | #endif | ||
165 | |||
166 | #if !defined (CONFIG_Q40) | ||
167 | # define MACH_IS_Q40 (0) | ||
168 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
169 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
170 | || defined(CONFIG_BVME6000) || defined(CONFIG_HP300) \ | ||
171 | || defined(CONFIG_SUN3X) || defined(CONFIG_MVME147) | ||
172 | # define MACH_IS_Q40 (m68k_machtype == MACH_Q40) | ||
173 | #else | ||
174 | # define MACH_Q40_ONLY | ||
175 | # define MACH_IS_Q40 (1) | ||
176 | # define MACH_TYPE (MACH_Q40) | ||
177 | #endif | ||
178 | |||
179 | #if !defined (CONFIG_SUN3X) | ||
180 | # define MACH_IS_SUN3X (0) | ||
181 | #elif defined(CONFIG_AMIGA) || defined(CONFIG_MAC) || defined(CONFIG_ATARI) \ | ||
182 | || defined(CONFIG_APOLLO) || defined(CONFIG_MVME16x) \ | ||
183 | || defined(CONFIG_BVME6000) || defined(CONFIG_HP300) \ | ||
184 | || defined(CONFIG_Q40) || defined(CONFIG_MVME147) | ||
185 | # define MACH_IS_SUN3X (m68k_machtype == MACH_SUN3X) | ||
186 | #else | ||
187 | # define CONFIG_SUN3X_ONLY | ||
188 | # define MACH_IS_SUN3X (1) | ||
189 | # define MACH_TYPE (MACH_SUN3X) | ||
190 | #endif | ||
191 | |||
192 | #ifndef MACH_TYPE | ||
193 | # define MACH_TYPE (m68k_machtype) | ||
194 | #endif | ||
195 | |||
196 | #endif /* __KERNEL__ */ | ||
197 | |||
198 | |||
199 | /* | ||
200 | * CPU, FPU and MMU types | ||
201 | * | ||
202 | * Note: we may rely on the following equalities: | ||
203 | * | ||
204 | * CPU_68020 == MMU_68851 | ||
205 | * CPU_68030 == MMU_68030 | ||
206 | * CPU_68040 == FPU_68040 == MMU_68040 | ||
207 | * CPU_68060 == FPU_68060 == MMU_68060 | ||
208 | */ | ||
209 | |||
210 | #define CPUB_68020 0 | ||
211 | #define CPUB_68030 1 | ||
212 | #define CPUB_68040 2 | ||
213 | #define CPUB_68060 3 | ||
214 | |||
215 | #define CPU_68020 (1<<CPUB_68020) | ||
216 | #define CPU_68030 (1<<CPUB_68030) | ||
217 | #define CPU_68040 (1<<CPUB_68040) | ||
218 | #define CPU_68060 (1<<CPUB_68060) | ||
219 | |||
220 | #define FPUB_68881 0 | ||
221 | #define FPUB_68882 1 | ||
222 | #define FPUB_68040 2 /* Internal FPU */ | ||
223 | #define FPUB_68060 3 /* Internal FPU */ | ||
224 | #define FPUB_SUNFPA 4 /* Sun-3 FPA */ | ||
225 | |||
226 | #define FPU_68881 (1<<FPUB_68881) | ||
227 | #define FPU_68882 (1<<FPUB_68882) | ||
228 | #define FPU_68040 (1<<FPUB_68040) | ||
229 | #define FPU_68060 (1<<FPUB_68060) | ||
230 | #define FPU_SUNFPA (1<<FPUB_SUNFPA) | ||
231 | |||
232 | #define MMUB_68851 0 | ||
233 | #define MMUB_68030 1 /* Internal MMU */ | ||
234 | #define MMUB_68040 2 /* Internal MMU */ | ||
235 | #define MMUB_68060 3 /* Internal MMU */ | ||
236 | #define MMUB_APOLLO 4 /* Custom Apollo */ | ||
237 | #define MMUB_SUN3 5 /* Custom Sun-3 */ | ||
238 | |||
239 | #define MMU_68851 (1<<MMUB_68851) | ||
240 | #define MMU_68030 (1<<MMUB_68030) | ||
241 | #define MMU_68040 (1<<MMUB_68040) | ||
242 | #define MMU_68060 (1<<MMUB_68060) | ||
243 | #define MMU_SUN3 (1<<MMUB_SUN3) | ||
244 | #define MMU_APOLLO (1<<MMUB_APOLLO) | ||
245 | |||
246 | #ifdef __KERNEL__ | ||
247 | |||
248 | #ifndef __ASSEMBLY__ | ||
249 | extern unsigned long m68k_cputype; | ||
250 | extern unsigned long m68k_fputype; | ||
251 | extern unsigned long m68k_mmutype; | ||
252 | #ifdef CONFIG_VME | ||
253 | extern unsigned long vme_brdtype; | ||
254 | #endif | ||
255 | |||
256 | /* | ||
257 | * m68k_is040or060 is != 0 for a '040 or higher; | ||
258 | * used numbers are 4 for 68040 and 6 for 68060. | ||
259 | */ | ||
260 | |||
261 | extern int m68k_is040or060; | ||
262 | #endif /* !__ASSEMBLY__ */ | ||
263 | |||
264 | #if !defined(CONFIG_M68020) | ||
265 | # define CPU_IS_020 (0) | ||
266 | # define MMU_IS_851 (0) | ||
267 | # define MMU_IS_SUN3 (0) | ||
268 | #elif defined(CONFIG_M68030) || defined(CONFIG_M68040) || defined(CONFIG_M68060) | ||
269 | # define CPU_IS_020 (m68k_cputype & CPU_68020) | ||
270 | # define MMU_IS_851 (m68k_mmutype & MMU_68851) | ||
271 | # define MMU_IS_SUN3 (0) /* Sun3 not supported with other CPU enabled */ | ||
272 | #else | ||
273 | # define CPU_M68020_ONLY | ||
274 | # define CPU_IS_020 (1) | ||
275 | #ifdef MACH_SUN3_ONLY | ||
276 | # define MMU_IS_SUN3 (1) | ||
277 | # define MMU_IS_851 (0) | ||
278 | #else | ||
279 | # define MMU_IS_SUN3 (0) | ||
280 | # define MMU_IS_851 (1) | ||
281 | #endif | ||
282 | #endif | ||
283 | |||
284 | #if !defined(CONFIG_M68030) | ||
285 | # define CPU_IS_030 (0) | ||
286 | # define MMU_IS_030 (0) | ||
287 | #elif defined(CONFIG_M68020) || defined(CONFIG_M68040) || defined(CONFIG_M68060) | ||
288 | # define CPU_IS_030 (m68k_cputype & CPU_68030) | ||
289 | # define MMU_IS_030 (m68k_mmutype & MMU_68030) | ||
290 | #else | ||
291 | # define CPU_M68030_ONLY | ||
292 | # define CPU_IS_030 (1) | ||
293 | # define MMU_IS_030 (1) | ||
294 | #endif | ||
295 | |||
296 | #if !defined(CONFIG_M68040) | ||
297 | # define CPU_IS_040 (0) | ||
298 | # define MMU_IS_040 (0) | ||
299 | #elif defined(CONFIG_M68020) || defined(CONFIG_M68030) || defined(CONFIG_M68060) | ||
300 | # define CPU_IS_040 (m68k_cputype & CPU_68040) | ||
301 | # define MMU_IS_040 (m68k_mmutype & MMU_68040) | ||
302 | #else | ||
303 | # define CPU_M68040_ONLY | ||
304 | # define CPU_IS_040 (1) | ||
305 | # define MMU_IS_040 (1) | ||
306 | #endif | ||
307 | |||
308 | #if !defined(CONFIG_M68060) | ||
309 | # define CPU_IS_060 (0) | ||
310 | # define MMU_IS_060 (0) | ||
311 | #elif defined(CONFIG_M68020) || defined(CONFIG_M68030) || defined(CONFIG_M68040) | ||
312 | # define CPU_IS_060 (m68k_cputype & CPU_68060) | ||
313 | # define MMU_IS_060 (m68k_mmutype & MMU_68060) | ||
314 | #else | ||
315 | # define CPU_M68060_ONLY | ||
316 | # define CPU_IS_060 (1) | ||
317 | # define MMU_IS_060 (1) | ||
318 | #endif | ||
319 | |||
320 | #if !defined(CONFIG_M68020) && !defined(CONFIG_M68030) | ||
321 | # define CPU_IS_020_OR_030 (0) | ||
322 | #else | ||
323 | # define CPU_M68020_OR_M68030 | ||
324 | # if defined(CONFIG_M68040) || defined(CONFIG_M68060) | ||
325 | # define CPU_IS_020_OR_030 (!m68k_is040or060) | ||
326 | # else | ||
327 | # define CPU_M68020_OR_M68030_ONLY | ||
328 | # define CPU_IS_020_OR_030 (1) | ||
329 | # endif | ||
330 | #endif | ||
331 | |||
332 | #if !defined(CONFIG_M68040) && !defined(CONFIG_M68060) | ||
333 | # define CPU_IS_040_OR_060 (0) | ||
334 | #else | ||
335 | # define CPU_M68040_OR_M68060 | ||
336 | # if defined(CONFIG_M68020) || defined(CONFIG_M68030) | ||
337 | # define CPU_IS_040_OR_060 (m68k_is040or060) | ||
338 | # else | ||
339 | # define CPU_M68040_OR_M68060_ONLY | ||
340 | # define CPU_IS_040_OR_060 (1) | ||
341 | # endif | ||
342 | #endif | ||
343 | |||
344 | #define CPU_TYPE (m68k_cputype) | ||
345 | |||
346 | #ifdef CONFIG_M68KFPU_EMU | ||
347 | # ifdef CONFIG_M68KFPU_EMU_ONLY | ||
348 | # define FPU_IS_EMU (1) | ||
349 | # else | ||
350 | # define FPU_IS_EMU (!m68k_fputype) | ||
351 | # endif | ||
352 | #else | ||
353 | # define FPU_IS_EMU (0) | ||
354 | #endif | ||
355 | |||
356 | |||
357 | /* | ||
358 | * Miscellaneous | ||
359 | */ | ||
360 | |||
361 | #define NUM_MEMINFO 4 | ||
362 | |||
363 | #ifndef __ASSEMBLY__ | ||
364 | struct mem_info { | ||
365 | unsigned long addr; /* physical address of memory chunk */ | ||
366 | unsigned long size; /* length of memory chunk (in bytes) */ | ||
367 | }; | ||
368 | |||
369 | extern int m68k_num_memory; /* # of memory blocks found (and used) */ | ||
370 | extern int m68k_realnum_memory; /* real # of memory blocks found */ | ||
371 | extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */ | ||
372 | #endif | ||
373 | |||
374 | #endif /* __KERNEL__ */ | ||
375 | |||
376 | #endif /* _M68K_SETUP_H */ | ||
diff --git a/arch/m68k/include/asm/setup_no.h b/arch/m68k/include/asm/setup_no.h deleted file mode 100644 index 45d286ce9398..000000000000 --- a/arch/m68k/include/asm/setup_no.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifdef __KERNEL__ | ||
2 | |||
3 | #include <asm/setup_mm.h> | ||
4 | |||
5 | /* We have a bigger command line buffer. */ | ||
6 | #undef COMMAND_LINE_SIZE | ||
7 | |||
8 | #endif /* __KERNEL__ */ | ||
9 | |||
10 | #define COMMAND_LINE_SIZE 512 | ||
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h index bff6d40345a9..523db2a51cf3 100644 --- a/arch/m68k/include/asm/sigcontext.h +++ b/arch/m68k/include/asm/sigcontext.h | |||
@@ -1,5 +1,24 @@ | |||
1 | #ifndef _ASM_M68k_SIGCONTEXT_H | ||
2 | #define _ASM_M68k_SIGCONTEXT_H | ||
3 | |||
4 | struct sigcontext { | ||
5 | unsigned long sc_mask; /* old sigmask */ | ||
6 | unsigned long sc_usp; /* old user stack pointer */ | ||
7 | unsigned long sc_d0; | ||
8 | unsigned long sc_d1; | ||
9 | unsigned long sc_a0; | ||
10 | unsigned long sc_a1; | ||
1 | #ifdef __uClinux__ | 11 | #ifdef __uClinux__ |
2 | #include "sigcontext_no.h" | 12 | unsigned long sc_a5; |
3 | #else | 13 | #endif |
4 | #include "sigcontext_mm.h" | 14 | unsigned short sc_sr; |
15 | unsigned long sc_pc; | ||
16 | unsigned short sc_formatvec; | ||
17 | #ifndef __uClinux__ | ||
18 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ | ||
19 | unsigned long sc_fpcntl[3]; | ||
20 | unsigned char sc_fpstate[216]; | ||
21 | #endif | ||
22 | }; | ||
23 | |||
5 | #endif | 24 | #endif |
diff --git a/arch/m68k/include/asm/sigcontext_mm.h b/arch/m68k/include/asm/sigcontext_mm.h deleted file mode 100644 index 64fbe34cf26f..000000000000 --- a/arch/m68k/include/asm/sigcontext_mm.h +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | #ifndef _ASM_M68k_SIGCONTEXT_H | ||
2 | #define _ASM_M68k_SIGCONTEXT_H | ||
3 | |||
4 | struct sigcontext { | ||
5 | unsigned long sc_mask; /* old sigmask */ | ||
6 | unsigned long sc_usp; /* old user stack pointer */ | ||
7 | unsigned long sc_d0; | ||
8 | unsigned long sc_d1; | ||
9 | unsigned long sc_a0; | ||
10 | unsigned long sc_a1; | ||
11 | unsigned short sc_sr; | ||
12 | unsigned long sc_pc; | ||
13 | unsigned short sc_formatvec; | ||
14 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ | ||
15 | unsigned long sc_fpcntl[3]; | ||
16 | unsigned char sc_fpstate[216]; | ||
17 | }; | ||
18 | |||
19 | #endif | ||
diff --git a/arch/m68k/include/asm/sigcontext_no.h b/arch/m68k/include/asm/sigcontext_no.h deleted file mode 100644 index 36c293fc133d..000000000000 --- a/arch/m68k/include/asm/sigcontext_no.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | #ifndef _ASM_M68KNOMMU_SIGCONTEXT_H | ||
2 | #define _ASM_M68KNOMMU_SIGCONTEXT_H | ||
3 | |||
4 | struct sigcontext { | ||
5 | unsigned long sc_mask; /* old sigmask */ | ||
6 | unsigned long sc_usp; /* old user stack pointer */ | ||
7 | unsigned long sc_d0; | ||
8 | unsigned long sc_d1; | ||
9 | unsigned long sc_a0; | ||
10 | unsigned long sc_a1; | ||
11 | unsigned long sc_a5; | ||
12 | unsigned short sc_sr; | ||
13 | unsigned long sc_pc; | ||
14 | unsigned short sc_formatvec; | ||
15 | }; | ||
16 | |||
17 | #endif | ||
diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h index 61219d7affc8..ca7dde8fd223 100644 --- a/arch/m68k/include/asm/siginfo.h +++ b/arch/m68k/include/asm/siginfo.h | |||
@@ -1,5 +1,97 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef _M68K_SIGINFO_H |
2 | #include "siginfo_no.h" | 2 | #define _M68K_SIGINFO_H |
3 | |||
4 | #ifndef __uClinux__ | ||
5 | #define HAVE_ARCH_SIGINFO_T | ||
6 | #define HAVE_ARCH_COPY_SIGINFO | ||
7 | #endif | ||
8 | |||
9 | #include <asm-generic/siginfo.h> | ||
10 | |||
11 | #ifndef __uClinux__ | ||
12 | |||
13 | typedef struct siginfo { | ||
14 | int si_signo; | ||
15 | int si_errno; | ||
16 | int si_code; | ||
17 | |||
18 | union { | ||
19 | int _pad[SI_PAD_SIZE]; | ||
20 | |||
21 | /* kill() */ | ||
22 | struct { | ||
23 | __kernel_pid_t _pid; /* sender's pid */ | ||
24 | __kernel_uid_t _uid; /* backwards compatibility */ | ||
25 | __kernel_uid32_t _uid32; /* sender's uid */ | ||
26 | } _kill; | ||
27 | |||
28 | /* POSIX.1b timers */ | ||
29 | struct { | ||
30 | timer_t _tid; /* timer id */ | ||
31 | int _overrun; /* overrun count */ | ||
32 | char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; | ||
33 | sigval_t _sigval; /* same as below */ | ||
34 | int _sys_private; /* not to be passed to user */ | ||
35 | } _timer; | ||
36 | |||
37 | /* POSIX.1b signals */ | ||
38 | struct { | ||
39 | __kernel_pid_t _pid; /* sender's pid */ | ||
40 | __kernel_uid_t _uid; /* backwards compatibility */ | ||
41 | sigval_t _sigval; | ||
42 | __kernel_uid32_t _uid32; /* sender's uid */ | ||
43 | } _rt; | ||
44 | |||
45 | /* SIGCHLD */ | ||
46 | struct { | ||
47 | __kernel_pid_t _pid; /* which child */ | ||
48 | __kernel_uid_t _uid; /* backwards compatibility */ | ||
49 | int _status; /* exit code */ | ||
50 | clock_t _utime; | ||
51 | clock_t _stime; | ||
52 | __kernel_uid32_t _uid32; /* sender's uid */ | ||
53 | } _sigchld; | ||
54 | |||
55 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
56 | struct { | ||
57 | void *_addr; /* faulting insn/memory ref. */ | ||
58 | } _sigfault; | ||
59 | |||
60 | /* SIGPOLL */ | ||
61 | struct { | ||
62 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
63 | int _fd; | ||
64 | } _sigpoll; | ||
65 | } _sifields; | ||
66 | } siginfo_t; | ||
67 | |||
68 | #define UID16_SIGINFO_COMPAT_NEEDED | ||
69 | |||
70 | /* | ||
71 | * How these fields are to be accessed. | ||
72 | */ | ||
73 | #undef si_uid | ||
74 | #ifdef __KERNEL__ | ||
75 | #define si_uid _sifields._kill._uid32 | ||
76 | #define si_uid16 _sifields._kill._uid | ||
3 | #else | 77 | #else |
4 | #include "siginfo_mm.h" | 78 | #define si_uid _sifields._kill._uid |
79 | #endif | ||
80 | |||
81 | #ifdef __KERNEL__ | ||
82 | |||
83 | #include <linux/string.h> | ||
84 | |||
85 | static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) | ||
86 | { | ||
87 | if (from->si_code < 0) | ||
88 | memcpy(to, from, sizeof(*to)); | ||
89 | else | ||
90 | /* _sigchld is currently the largest know union member */ | ||
91 | memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); | ||
92 | } | ||
93 | |||
94 | #endif /* __KERNEL__ */ | ||
95 | #endif /* !__uClinux__ */ | ||
96 | |||
5 | #endif | 97 | #endif |
diff --git a/arch/m68k/include/asm/siginfo_mm.h b/arch/m68k/include/asm/siginfo_mm.h deleted file mode 100644 index 05a8d6d90b58..000000000000 --- a/arch/m68k/include/asm/siginfo_mm.h +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | #ifndef _M68K_SIGINFO_H | ||
2 | #define _M68K_SIGINFO_H | ||
3 | |||
4 | #define HAVE_ARCH_SIGINFO_T | ||
5 | #define HAVE_ARCH_COPY_SIGINFO | ||
6 | |||
7 | #include <asm-generic/siginfo.h> | ||
8 | |||
9 | typedef struct siginfo { | ||
10 | int si_signo; | ||
11 | int si_errno; | ||
12 | int si_code; | ||
13 | |||
14 | union { | ||
15 | int _pad[SI_PAD_SIZE]; | ||
16 | |||
17 | /* kill() */ | ||
18 | struct { | ||
19 | __kernel_pid_t _pid; /* sender's pid */ | ||
20 | __kernel_uid_t _uid; /* backwards compatibility */ | ||
21 | __kernel_uid32_t _uid32; /* sender's uid */ | ||
22 | } _kill; | ||
23 | |||
24 | /* POSIX.1b timers */ | ||
25 | struct { | ||
26 | timer_t _tid; /* timer id */ | ||
27 | int _overrun; /* overrun count */ | ||
28 | char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; | ||
29 | sigval_t _sigval; /* same as below */ | ||
30 | int _sys_private; /* not to be passed to user */ | ||
31 | } _timer; | ||
32 | |||
33 | /* POSIX.1b signals */ | ||
34 | struct { | ||
35 | __kernel_pid_t _pid; /* sender's pid */ | ||
36 | __kernel_uid_t _uid; /* backwards compatibility */ | ||
37 | sigval_t _sigval; | ||
38 | __kernel_uid32_t _uid32; /* sender's uid */ | ||
39 | } _rt; | ||
40 | |||
41 | /* SIGCHLD */ | ||
42 | struct { | ||
43 | __kernel_pid_t _pid; /* which child */ | ||
44 | __kernel_uid_t _uid; /* backwards compatibility */ | ||
45 | int _status; /* exit code */ | ||
46 | clock_t _utime; | ||
47 | clock_t _stime; | ||
48 | __kernel_uid32_t _uid32; /* sender's uid */ | ||
49 | } _sigchld; | ||
50 | |||
51 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
52 | struct { | ||
53 | void *_addr; /* faulting insn/memory ref. */ | ||
54 | } _sigfault; | ||
55 | |||
56 | /* SIGPOLL */ | ||
57 | struct { | ||
58 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
59 | int _fd; | ||
60 | } _sigpoll; | ||
61 | } _sifields; | ||
62 | } siginfo_t; | ||
63 | |||
64 | #define UID16_SIGINFO_COMPAT_NEEDED | ||
65 | |||
66 | /* | ||
67 | * How these fields are to be accessed. | ||
68 | */ | ||
69 | #undef si_uid | ||
70 | #ifdef __KERNEL__ | ||
71 | #define si_uid _sifields._kill._uid32 | ||
72 | #define si_uid16 _sifields._kill._uid | ||
73 | #else | ||
74 | #define si_uid _sifields._kill._uid | ||
75 | #endif | ||
76 | |||
77 | #ifdef __KERNEL__ | ||
78 | |||
79 | #include <linux/string.h> | ||
80 | |||
81 | static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) | ||
82 | { | ||
83 | if (from->si_code < 0) | ||
84 | memcpy(to, from, sizeof(*to)); | ||
85 | else | ||
86 | /* _sigchld is currently the largest know union member */ | ||
87 | memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); | ||
88 | } | ||
89 | |||
90 | #endif /* __KERNEL__ */ | ||
91 | |||
92 | #endif | ||
diff --git a/arch/m68k/include/asm/siginfo_no.h b/arch/m68k/include/asm/siginfo_no.h deleted file mode 100644 index b18e5f4064ae..000000000000 --- a/arch/m68k/include/asm/siginfo_no.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef _M68KNOMMU_SIGINFO_H | ||
2 | #define _M68KNOMMU_SIGINFO_H | ||
3 | |||
4 | #include <asm-generic/siginfo.h> | ||
5 | |||
6 | #endif | ||
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 3c19988bd93c..08788fdefde0 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h | |||
@@ -1,5 +1,213 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef _M68K_SIGNAL_H |
2 | #include "signal_no.h" | 2 | #define _M68K_SIGNAL_H |
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* Avoid too many header ordering problems. */ | ||
7 | struct siginfo; | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | /* Most things should be clean enough to redefine this at will, if care | ||
11 | is taken to make libc match. */ | ||
12 | |||
13 | #define _NSIG 64 | ||
14 | #define _NSIG_BPW 32 | ||
15 | #define _NSIG_WORDS (_NSIG / _NSIG_BPW) | ||
16 | |||
17 | typedef unsigned long old_sigset_t; /* at least 32 bits */ | ||
18 | |||
19 | typedef struct { | ||
20 | unsigned long sig[_NSIG_WORDS]; | ||
21 | } sigset_t; | ||
22 | |||
3 | #else | 23 | #else |
4 | #include "signal_mm.h" | 24 | /* Here we must cater to libcs that poke about in kernel headers. */ |
5 | #endif | 25 | |
26 | #define NSIG 32 | ||
27 | typedef unsigned long sigset_t; | ||
28 | |||
29 | #endif /* __KERNEL__ */ | ||
30 | |||
31 | #define SIGHUP 1 | ||
32 | #define SIGINT 2 | ||
33 | #define SIGQUIT 3 | ||
34 | #define SIGILL 4 | ||
35 | #define SIGTRAP 5 | ||
36 | #define SIGABRT 6 | ||
37 | #define SIGIOT 6 | ||
38 | #define SIGBUS 7 | ||
39 | #define SIGFPE 8 | ||
40 | #define SIGKILL 9 | ||
41 | #define SIGUSR1 10 | ||
42 | #define SIGSEGV 11 | ||
43 | #define SIGUSR2 12 | ||
44 | #define SIGPIPE 13 | ||
45 | #define SIGALRM 14 | ||
46 | #define SIGTERM 15 | ||
47 | #define SIGSTKFLT 16 | ||
48 | #define SIGCHLD 17 | ||
49 | #define SIGCONT 18 | ||
50 | #define SIGSTOP 19 | ||
51 | #define SIGTSTP 20 | ||
52 | #define SIGTTIN 21 | ||
53 | #define SIGTTOU 22 | ||
54 | #define SIGURG 23 | ||
55 | #define SIGXCPU 24 | ||
56 | #define SIGXFSZ 25 | ||
57 | #define SIGVTALRM 26 | ||
58 | #define SIGPROF 27 | ||
59 | #define SIGWINCH 28 | ||
60 | #define SIGIO 29 | ||
61 | #define SIGPOLL SIGIO | ||
62 | /* | ||
63 | #define SIGLOST 29 | ||
64 | */ | ||
65 | #define SIGPWR 30 | ||
66 | #define SIGSYS 31 | ||
67 | #define SIGUNUSED 31 | ||
68 | |||
69 | /* These should not be considered constants from userland. */ | ||
70 | #define SIGRTMIN 32 | ||
71 | #define SIGRTMAX _NSIG | ||
72 | |||
73 | /* | ||
74 | * SA_FLAGS values: | ||
75 | * | ||
76 | * SA_ONSTACK indicates that a registered stack_t will be used. | ||
77 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
78 | * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. | ||
79 | * SA_RESETHAND clears the handler when the signal is delivered. | ||
80 | * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. | ||
81 | * SA_NODEFER prevents the current signal from being masked in the handler. | ||
82 | * | ||
83 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | ||
84 | * Unix names RESETHAND and NODEFER respectively. | ||
85 | */ | ||
86 | #define SA_NOCLDSTOP 0x00000001 | ||
87 | #define SA_NOCLDWAIT 0x00000002 | ||
88 | #define SA_SIGINFO 0x00000004 | ||
89 | #define SA_ONSTACK 0x08000000 | ||
90 | #define SA_RESTART 0x10000000 | ||
91 | #define SA_NODEFER 0x40000000 | ||
92 | #define SA_RESETHAND 0x80000000 | ||
93 | |||
94 | #define SA_NOMASK SA_NODEFER | ||
95 | #define SA_ONESHOT SA_RESETHAND | ||
96 | |||
97 | /* | ||
98 | * sigaltstack controls | ||
99 | */ | ||
100 | #define SS_ONSTACK 1 | ||
101 | #define SS_DISABLE 2 | ||
102 | |||
103 | #define MINSIGSTKSZ 2048 | ||
104 | #define SIGSTKSZ 8192 | ||
105 | |||
106 | #include <asm-generic/signal.h> | ||
107 | |||
108 | #ifdef __KERNEL__ | ||
109 | struct old_sigaction { | ||
110 | __sighandler_t sa_handler; | ||
111 | old_sigset_t sa_mask; | ||
112 | unsigned long sa_flags; | ||
113 | __sigrestore_t sa_restorer; | ||
114 | }; | ||
115 | |||
116 | struct sigaction { | ||
117 | __sighandler_t sa_handler; | ||
118 | unsigned long sa_flags; | ||
119 | __sigrestore_t sa_restorer; | ||
120 | sigset_t sa_mask; /* mask last for extensibility */ | ||
121 | }; | ||
122 | |||
123 | struct k_sigaction { | ||
124 | struct sigaction sa; | ||
125 | }; | ||
126 | #else | ||
127 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
128 | |||
129 | struct sigaction { | ||
130 | union { | ||
131 | __sighandler_t _sa_handler; | ||
132 | void (*_sa_sigaction)(int, struct siginfo *, void *); | ||
133 | } _u; | ||
134 | sigset_t sa_mask; | ||
135 | unsigned long sa_flags; | ||
136 | void (*sa_restorer)(void); | ||
137 | }; | ||
138 | |||
139 | #define sa_handler _u._sa_handler | ||
140 | #define sa_sigaction _u._sa_sigaction | ||
141 | |||
142 | #endif /* __KERNEL__ */ | ||
143 | |||
144 | typedef struct sigaltstack { | ||
145 | void __user *ss_sp; | ||
146 | int ss_flags; | ||
147 | size_t ss_size; | ||
148 | } stack_t; | ||
149 | |||
150 | #ifdef __KERNEL__ | ||
151 | #include <asm/sigcontext.h> | ||
152 | |||
153 | #ifndef __uClinux__ | ||
154 | #define __HAVE_ARCH_SIG_BITOPS | ||
155 | |||
156 | static inline void sigaddset(sigset_t *set, int _sig) | ||
157 | { | ||
158 | asm ("bfset %0{%1,#1}" | ||
159 | : "+od" (*set) | ||
160 | : "id" ((_sig - 1) ^ 31) | ||
161 | : "cc"); | ||
162 | } | ||
163 | |||
164 | static inline void sigdelset(sigset_t *set, int _sig) | ||
165 | { | ||
166 | asm ("bfclr %0{%1,#1}" | ||
167 | : "+od" (*set) | ||
168 | : "id" ((_sig - 1) ^ 31) | ||
169 | : "cc"); | ||
170 | } | ||
171 | |||
172 | static inline int __const_sigismember(sigset_t *set, int _sig) | ||
173 | { | ||
174 | unsigned long sig = _sig - 1; | ||
175 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); | ||
176 | } | ||
177 | |||
178 | static inline int __gen_sigismember(sigset_t *set, int _sig) | ||
179 | { | ||
180 | int ret; | ||
181 | asm ("bfextu %1{%2,#1},%0" | ||
182 | : "=d" (ret) | ||
183 | : "od" (*set), "id" ((_sig-1) ^ 31) | ||
184 | : "cc"); | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | #define sigismember(set,sig) \ | ||
189 | (__builtin_constant_p(sig) ? \ | ||
190 | __const_sigismember(set,sig) : \ | ||
191 | __gen_sigismember(set,sig)) | ||
192 | |||
193 | static inline int sigfindinword(unsigned long word) | ||
194 | { | ||
195 | asm ("bfffo %1{#0,#0},%0" | ||
196 | : "=d" (word) | ||
197 | : "d" (word & -word) | ||
198 | : "cc"); | ||
199 | return word ^ 31; | ||
200 | } | ||
201 | |||
202 | struct pt_regs; | ||
203 | extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie); | ||
204 | |||
205 | #else | ||
206 | |||
207 | #undef __HAVE_ARCH_SIG_BITOPS | ||
208 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | ||
209 | |||
210 | #endif /* __uClinux__ */ | ||
211 | #endif /* __KERNEL__ */ | ||
212 | |||
213 | #endif /* _M68K_SIGNAL_H */ | ||
diff --git a/arch/m68k/include/asm/signal_mm.h b/arch/m68k/include/asm/signal_mm.h deleted file mode 100644 index 3db8a81942f1..000000000000 --- a/arch/m68k/include/asm/signal_mm.h +++ /dev/null | |||
@@ -1,206 +0,0 @@ | |||
1 | #ifndef _M68K_SIGNAL_H | ||
2 | #define _M68K_SIGNAL_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* Avoid too many header ordering problems. */ | ||
7 | struct siginfo; | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | /* Most things should be clean enough to redefine this at will, if care | ||
11 | is taken to make libc match. */ | ||
12 | |||
13 | #define _NSIG 64 | ||
14 | #define _NSIG_BPW 32 | ||
15 | #define _NSIG_WORDS (_NSIG / _NSIG_BPW) | ||
16 | |||
17 | typedef unsigned long old_sigset_t; /* at least 32 bits */ | ||
18 | |||
19 | typedef struct { | ||
20 | unsigned long sig[_NSIG_WORDS]; | ||
21 | } sigset_t; | ||
22 | |||
23 | #else | ||
24 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
25 | |||
26 | #define NSIG 32 | ||
27 | typedef unsigned long sigset_t; | ||
28 | |||
29 | #endif /* __KERNEL__ */ | ||
30 | |||
31 | #define SIGHUP 1 | ||
32 | #define SIGINT 2 | ||
33 | #define SIGQUIT 3 | ||
34 | #define SIGILL 4 | ||
35 | #define SIGTRAP 5 | ||
36 | #define SIGABRT 6 | ||
37 | #define SIGIOT 6 | ||
38 | #define SIGBUS 7 | ||
39 | #define SIGFPE 8 | ||
40 | #define SIGKILL 9 | ||
41 | #define SIGUSR1 10 | ||
42 | #define SIGSEGV 11 | ||
43 | #define SIGUSR2 12 | ||
44 | #define SIGPIPE 13 | ||
45 | #define SIGALRM 14 | ||
46 | #define SIGTERM 15 | ||
47 | #define SIGSTKFLT 16 | ||
48 | #define SIGCHLD 17 | ||
49 | #define SIGCONT 18 | ||
50 | #define SIGSTOP 19 | ||
51 | #define SIGTSTP 20 | ||
52 | #define SIGTTIN 21 | ||
53 | #define SIGTTOU 22 | ||
54 | #define SIGURG 23 | ||
55 | #define SIGXCPU 24 | ||
56 | #define SIGXFSZ 25 | ||
57 | #define SIGVTALRM 26 | ||
58 | #define SIGPROF 27 | ||
59 | #define SIGWINCH 28 | ||
60 | #define SIGIO 29 | ||
61 | #define SIGPOLL SIGIO | ||
62 | /* | ||
63 | #define SIGLOST 29 | ||
64 | */ | ||
65 | #define SIGPWR 30 | ||
66 | #define SIGSYS 31 | ||
67 | #define SIGUNUSED 31 | ||
68 | |||
69 | /* These should not be considered constants from userland. */ | ||
70 | #define SIGRTMIN 32 | ||
71 | #define SIGRTMAX _NSIG | ||
72 | |||
73 | /* | ||
74 | * SA_FLAGS values: | ||
75 | * | ||
76 | * SA_ONSTACK indicates that a registered stack_t will be used. | ||
77 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
78 | * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. | ||
79 | * SA_RESETHAND clears the handler when the signal is delivered. | ||
80 | * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. | ||
81 | * SA_NODEFER prevents the current signal from being masked in the handler. | ||
82 | * | ||
83 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | ||
84 | * Unix names RESETHAND and NODEFER respectively. | ||
85 | */ | ||
86 | #define SA_NOCLDSTOP 0x00000001 | ||
87 | #define SA_NOCLDWAIT 0x00000002 | ||
88 | #define SA_SIGINFO 0x00000004 | ||
89 | #define SA_ONSTACK 0x08000000 | ||
90 | #define SA_RESTART 0x10000000 | ||
91 | #define SA_NODEFER 0x40000000 | ||
92 | #define SA_RESETHAND 0x80000000 | ||
93 | |||
94 | #define SA_NOMASK SA_NODEFER | ||
95 | #define SA_ONESHOT SA_RESETHAND | ||
96 | |||
97 | /* | ||
98 | * sigaltstack controls | ||
99 | */ | ||
100 | #define SS_ONSTACK 1 | ||
101 | #define SS_DISABLE 2 | ||
102 | |||
103 | #define MINSIGSTKSZ 2048 | ||
104 | #define SIGSTKSZ 8192 | ||
105 | |||
106 | #include <asm-generic/signal.h> | ||
107 | |||
108 | #ifdef __KERNEL__ | ||
109 | struct old_sigaction { | ||
110 | __sighandler_t sa_handler; | ||
111 | old_sigset_t sa_mask; | ||
112 | unsigned long sa_flags; | ||
113 | __sigrestore_t sa_restorer; | ||
114 | }; | ||
115 | |||
116 | struct sigaction { | ||
117 | __sighandler_t sa_handler; | ||
118 | unsigned long sa_flags; | ||
119 | __sigrestore_t sa_restorer; | ||
120 | sigset_t sa_mask; /* mask last for extensibility */ | ||
121 | }; | ||
122 | |||
123 | struct k_sigaction { | ||
124 | struct sigaction sa; | ||
125 | }; | ||
126 | #else | ||
127 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
128 | |||
129 | struct sigaction { | ||
130 | union { | ||
131 | __sighandler_t _sa_handler; | ||
132 | void (*_sa_sigaction)(int, struct siginfo *, void *); | ||
133 | } _u; | ||
134 | sigset_t sa_mask; | ||
135 | unsigned long sa_flags; | ||
136 | void (*sa_restorer)(void); | ||
137 | }; | ||
138 | |||
139 | #define sa_handler _u._sa_handler | ||
140 | #define sa_sigaction _u._sa_sigaction | ||
141 | |||
142 | #endif /* __KERNEL__ */ | ||
143 | |||
144 | typedef struct sigaltstack { | ||
145 | void __user *ss_sp; | ||
146 | int ss_flags; | ||
147 | size_t ss_size; | ||
148 | } stack_t; | ||
149 | |||
150 | #ifdef __KERNEL__ | ||
151 | #include <asm/sigcontext.h> | ||
152 | |||
153 | #define __HAVE_ARCH_SIG_BITOPS | ||
154 | |||
155 | static inline void sigaddset(sigset_t *set, int _sig) | ||
156 | { | ||
157 | asm ("bfset %0{%1,#1}" | ||
158 | : "+od" (*set) | ||
159 | : "id" ((_sig - 1) ^ 31) | ||
160 | : "cc"); | ||
161 | } | ||
162 | |||
163 | static inline void sigdelset(sigset_t *set, int _sig) | ||
164 | { | ||
165 | asm ("bfclr %0{%1,#1}" | ||
166 | : "+od" (*set) | ||
167 | : "id" ((_sig - 1) ^ 31) | ||
168 | : "cc"); | ||
169 | } | ||
170 | |||
171 | static inline int __const_sigismember(sigset_t *set, int _sig) | ||
172 | { | ||
173 | unsigned long sig = _sig - 1; | ||
174 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); | ||
175 | } | ||
176 | |||
177 | static inline int __gen_sigismember(sigset_t *set, int _sig) | ||
178 | { | ||
179 | int ret; | ||
180 | asm ("bfextu %1{%2,#1},%0" | ||
181 | : "=d" (ret) | ||
182 | : "od" (*set), "id" ((_sig-1) ^ 31) | ||
183 | : "cc"); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | #define sigismember(set,sig) \ | ||
188 | (__builtin_constant_p(sig) ? \ | ||
189 | __const_sigismember(set,sig) : \ | ||
190 | __gen_sigismember(set,sig)) | ||
191 | |||
192 | static inline int sigfindinword(unsigned long word) | ||
193 | { | ||
194 | asm ("bfffo %1{#0,#0},%0" | ||
195 | : "=d" (word) | ||
196 | : "d" (word & -word) | ||
197 | : "cc"); | ||
198 | return word ^ 31; | ||
199 | } | ||
200 | |||
201 | struct pt_regs; | ||
202 | extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie); | ||
203 | |||
204 | #endif /* __KERNEL__ */ | ||
205 | |||
206 | #endif /* _M68K_SIGNAL_H */ | ||
diff --git a/arch/m68k/include/asm/signal_no.h b/arch/m68k/include/asm/signal_no.h deleted file mode 100644 index 216c08be54a0..000000000000 --- a/arch/m68k/include/asm/signal_no.h +++ /dev/null | |||
@@ -1,159 +0,0 @@ | |||
1 | #ifndef _M68KNOMMU_SIGNAL_H | ||
2 | #define _M68KNOMMU_SIGNAL_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* Avoid too many header ordering problems. */ | ||
7 | struct siginfo; | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | /* Most things should be clean enough to redefine this at will, if care | ||
11 | is taken to make libc match. */ | ||
12 | |||
13 | #define _NSIG 64 | ||
14 | #define _NSIG_BPW 32 | ||
15 | #define _NSIG_WORDS (_NSIG / _NSIG_BPW) | ||
16 | |||
17 | typedef unsigned long old_sigset_t; /* at least 32 bits */ | ||
18 | |||
19 | typedef struct { | ||
20 | unsigned long sig[_NSIG_WORDS]; | ||
21 | } sigset_t; | ||
22 | |||
23 | #else | ||
24 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
25 | |||
26 | #define NSIG 32 | ||
27 | typedef unsigned long sigset_t; | ||
28 | |||
29 | #endif /* __KERNEL__ */ | ||
30 | |||
31 | #define SIGHUP 1 | ||
32 | #define SIGINT 2 | ||
33 | #define SIGQUIT 3 | ||
34 | #define SIGILL 4 | ||
35 | #define SIGTRAP 5 | ||
36 | #define SIGABRT 6 | ||
37 | #define SIGIOT 6 | ||
38 | #define SIGBUS 7 | ||
39 | #define SIGFPE 8 | ||
40 | #define SIGKILL 9 | ||
41 | #define SIGUSR1 10 | ||
42 | #define SIGSEGV 11 | ||
43 | #define SIGUSR2 12 | ||
44 | #define SIGPIPE 13 | ||
45 | #define SIGALRM 14 | ||
46 | #define SIGTERM 15 | ||
47 | #define SIGSTKFLT 16 | ||
48 | #define SIGCHLD 17 | ||
49 | #define SIGCONT 18 | ||
50 | #define SIGSTOP 19 | ||
51 | #define SIGTSTP 20 | ||
52 | #define SIGTTIN 21 | ||
53 | #define SIGTTOU 22 | ||
54 | #define SIGURG 23 | ||
55 | #define SIGXCPU 24 | ||
56 | #define SIGXFSZ 25 | ||
57 | #define SIGVTALRM 26 | ||
58 | #define SIGPROF 27 | ||
59 | #define SIGWINCH 28 | ||
60 | #define SIGIO 29 | ||
61 | #define SIGPOLL SIGIO | ||
62 | /* | ||
63 | #define SIGLOST 29 | ||
64 | */ | ||
65 | #define SIGPWR 30 | ||
66 | #define SIGSYS 31 | ||
67 | #define SIGUNUSED 31 | ||
68 | |||
69 | /* These should not be considered constants from userland. */ | ||
70 | #define SIGRTMIN 32 | ||
71 | #define SIGRTMAX _NSIG | ||
72 | |||
73 | /* | ||
74 | * SA_FLAGS values: | ||
75 | * | ||
76 | * SA_ONSTACK indicates that a registered stack_t will be used. | ||
77 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
78 | * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. | ||
79 | * SA_RESETHAND clears the handler when the signal is delivered. | ||
80 | * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. | ||
81 | * SA_NODEFER prevents the current signal from being masked in the handler. | ||
82 | * | ||
83 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | ||
84 | * Unix names RESETHAND and NODEFER respectively. | ||
85 | */ | ||
86 | #define SA_NOCLDSTOP 0x00000001 | ||
87 | #define SA_NOCLDWAIT 0x00000002 | ||
88 | #define SA_SIGINFO 0x00000004 | ||
89 | #define SA_ONSTACK 0x08000000 | ||
90 | #define SA_RESTART 0x10000000 | ||
91 | #define SA_NODEFER 0x40000000 | ||
92 | #define SA_RESETHAND 0x80000000 | ||
93 | |||
94 | #define SA_NOMASK SA_NODEFER | ||
95 | #define SA_ONESHOT SA_RESETHAND | ||
96 | |||
97 | /* | ||
98 | * sigaltstack controls | ||
99 | */ | ||
100 | #define SS_ONSTACK 1 | ||
101 | #define SS_DISABLE 2 | ||
102 | |||
103 | #define MINSIGSTKSZ 2048 | ||
104 | #define SIGSTKSZ 8192 | ||
105 | |||
106 | #include <asm-generic/signal.h> | ||
107 | |||
108 | #ifdef __KERNEL__ | ||
109 | struct old_sigaction { | ||
110 | __sighandler_t sa_handler; | ||
111 | old_sigset_t sa_mask; | ||
112 | unsigned long sa_flags; | ||
113 | void (*sa_restorer)(void); | ||
114 | }; | ||
115 | |||
116 | struct sigaction { | ||
117 | __sighandler_t sa_handler; | ||
118 | unsigned long sa_flags; | ||
119 | void (*sa_restorer)(void); | ||
120 | sigset_t sa_mask; /* mask last for extensibility */ | ||
121 | }; | ||
122 | |||
123 | struct k_sigaction { | ||
124 | struct sigaction sa; | ||
125 | }; | ||
126 | #else | ||
127 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
128 | |||
129 | struct sigaction { | ||
130 | union { | ||
131 | __sighandler_t _sa_handler; | ||
132 | void (*_sa_sigaction)(int, struct siginfo *, void *); | ||
133 | } _u; | ||
134 | sigset_t sa_mask; | ||
135 | unsigned long sa_flags; | ||
136 | void (*sa_restorer)(void); | ||
137 | }; | ||
138 | |||
139 | #define sa_handler _u._sa_handler | ||
140 | #define sa_sigaction _u._sa_sigaction | ||
141 | |||
142 | #endif /* __KERNEL__ */ | ||
143 | |||
144 | typedef struct sigaltstack { | ||
145 | void *ss_sp; | ||
146 | int ss_flags; | ||
147 | size_t ss_size; | ||
148 | } stack_t; | ||
149 | |||
150 | #ifdef __KERNEL__ | ||
151 | |||
152 | #include <asm/sigcontext.h> | ||
153 | #undef __HAVE_ARCH_SIG_BITOPS | ||
154 | |||
155 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | ||
156 | |||
157 | #endif /* __KERNEL__ */ | ||
158 | |||
159 | #endif /* _M68KNOMMU_SIGNAL_H */ | ||
diff --git a/arch/m68k/include/asm/swab.h b/arch/m68k/include/asm/swab.h index 7d7dde1c73ec..9e3054ea59e9 100644 --- a/arch/m68k/include/asm/swab.h +++ b/arch/m68k/include/asm/swab.h | |||
@@ -1,5 +1,27 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef _M68K_SWAB_H |
2 | #include "swab_no.h" | 2 | #define _M68K_SWAB_H |
3 | #else | 3 | |
4 | #include "swab_mm.h" | 4 | #include <asm/types.h> |
5 | #include <linux/compiler.h> | ||
6 | |||
7 | #define __SWAB_64_THRU_32__ | ||
8 | |||
9 | #if defined (__mcfisaaplus__) || defined (__mcfisac__) | ||
10 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) | ||
11 | { | ||
12 | __asm__("byterev %0" : "=d" (val) : "0" (val)); | ||
13 | return val; | ||
14 | } | ||
15 | |||
16 | #define __arch_swab32 __arch_swab32 | ||
17 | #elif !defined(__uClinux__) | ||
18 | |||
19 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) | ||
20 | { | ||
21 | __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val)); | ||
22 | return val; | ||
23 | } | ||
24 | #define __arch_swab32 __arch_swab32 | ||
5 | #endif | 25 | #endif |
26 | |||
27 | #endif /* _M68K_SWAB_H */ | ||
diff --git a/arch/m68k/include/asm/swab_mm.h b/arch/m68k/include/asm/swab_mm.h deleted file mode 100644 index 7221e3066825..000000000000 --- a/arch/m68k/include/asm/swab_mm.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef _M68K_SWAB_H | ||
2 | #define _M68K_SWAB_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
7 | #define __SWAB_64_THRU_32__ | ||
8 | |||
9 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) | ||
10 | { | ||
11 | __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val)); | ||
12 | return val; | ||
13 | } | ||
14 | #define __arch_swab32 __arch_swab32 | ||
15 | |||
16 | #endif /* _M68K_SWAB_H */ | ||
diff --git a/arch/m68k/include/asm/swab_no.h b/arch/m68k/include/asm/swab_no.h deleted file mode 100644 index e582257db300..000000000000 --- a/arch/m68k/include/asm/swab_no.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | #ifndef _M68KNOMMU_SWAB_H | ||
2 | #define _M68KNOMMU_SWAB_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) | ||
7 | # define __SWAB_64_THRU_32__ | ||
8 | #endif | ||
9 | |||
10 | #if defined (__mcfisaaplus__) || defined (__mcfisac__) | ||
11 | static inline __attribute_const__ __u32 __arch_swab32(__u32 val) | ||
12 | { | ||
13 | asm( | ||
14 | "byterev %0" | ||
15 | : "=d" (val) | ||
16 | : "0" (val) | ||
17 | ); | ||
18 | return val; | ||
19 | } | ||
20 | |||
21 | #define __arch_swab32 __arch_swab32 | ||
22 | #endif | ||
23 | |||
24 | #endif /* _M68KNOMMU_SWAB_H */ | ||
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index df1d9d4cb1fd..3c19027331fa 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -1,5 +1,372 @@ | |||
1 | #ifdef __uClinux__ | 1 | #ifndef _ASM_M68K_UNISTD_H_ |
2 | #include "unistd_no.h" | 2 | #define _ASM_M68K_UNISTD_H_ |
3 | #else | 3 | |
4 | #include "unistd_mm.h" | 4 | /* |
5 | #endif | 5 | * This file contains the system call numbers. |
6 | */ | ||
7 | |||
8 | #define __NR_restart_syscall 0 | ||
9 | #define __NR_exit 1 | ||
10 | #define __NR_fork 2 | ||
11 | #define __NR_read 3 | ||
12 | #define __NR_write 4 | ||
13 | #define __NR_open 5 | ||
14 | #define __NR_close 6 | ||
15 | #define __NR_waitpid 7 | ||
16 | #define __NR_creat 8 | ||
17 | #define __NR_link 9 | ||
18 | #define __NR_unlink 10 | ||
19 | #define __NR_execve 11 | ||
20 | #define __NR_chdir 12 | ||
21 | #define __NR_time 13 | ||
22 | #define __NR_mknod 14 | ||
23 | #define __NR_chmod 15 | ||
24 | #define __NR_chown 16 | ||
25 | #define __NR_break 17 | ||
26 | #define __NR_oldstat 18 | ||
27 | #define __NR_lseek 19 | ||
28 | #define __NR_getpid 20 | ||
29 | #define __NR_mount 21 | ||
30 | #define __NR_umount 22 | ||
31 | #define __NR_setuid 23 | ||
32 | #define __NR_getuid 24 | ||
33 | #define __NR_stime 25 | ||
34 | #define __NR_ptrace 26 | ||
35 | #define __NR_alarm 27 | ||
36 | #define __NR_oldfstat 28 | ||
37 | #define __NR_pause 29 | ||
38 | #define __NR_utime 30 | ||
39 | #define __NR_stty 31 | ||
40 | #define __NR_gtty 32 | ||
41 | #define __NR_access 33 | ||
42 | #define __NR_nice 34 | ||
43 | #define __NR_ftime 35 | ||
44 | #define __NR_sync 36 | ||
45 | #define __NR_kill 37 | ||
46 | #define __NR_rename 38 | ||
47 | #define __NR_mkdir 39 | ||
48 | #define __NR_rmdir 40 | ||
49 | #define __NR_dup 41 | ||
50 | #define __NR_pipe 42 | ||
51 | #define __NR_times 43 | ||
52 | #define __NR_prof 44 | ||
53 | #define __NR_brk 45 | ||
54 | #define __NR_setgid 46 | ||
55 | #define __NR_getgid 47 | ||
56 | #define __NR_signal 48 | ||
57 | #define __NR_geteuid 49 | ||
58 | #define __NR_getegid 50 | ||
59 | #define __NR_acct 51 | ||
60 | #define __NR_umount2 52 | ||
61 | #define __NR_lock 53 | ||
62 | #define __NR_ioctl 54 | ||
63 | #define __NR_fcntl 55 | ||
64 | #define __NR_mpx 56 | ||
65 | #define __NR_setpgid 57 | ||
66 | #define __NR_ulimit 58 | ||
67 | #define __NR_oldolduname 59 | ||
68 | #define __NR_umask 60 | ||
69 | #define __NR_chroot 61 | ||
70 | #define __NR_ustat 62 | ||
71 | #define __NR_dup2 63 | ||
72 | #define __NR_getppid 64 | ||
73 | #define __NR_getpgrp 65 | ||
74 | #define __NR_setsid 66 | ||
75 | #define __NR_sigaction 67 | ||
76 | #define __NR_sgetmask 68 | ||
77 | #define __NR_ssetmask 69 | ||
78 | #define __NR_setreuid 70 | ||
79 | #define __NR_setregid 71 | ||
80 | #define __NR_sigsuspend 72 | ||
81 | #define __NR_sigpending 73 | ||
82 | #define __NR_sethostname 74 | ||
83 | #define __NR_setrlimit 75 | ||
84 | #define __NR_getrlimit 76 | ||
85 | #define __NR_getrusage 77 | ||
86 | #define __NR_gettimeofday 78 | ||
87 | #define __NR_settimeofday 79 | ||
88 | #define __NR_getgroups 80 | ||
89 | #define __NR_setgroups 81 | ||
90 | #define __NR_select 82 | ||
91 | #define __NR_symlink 83 | ||
92 | #define __NR_oldlstat 84 | ||
93 | #define __NR_readlink 85 | ||
94 | #define __NR_uselib 86 | ||
95 | #define __NR_swapon 87 | ||
96 | #define __NR_reboot 88 | ||
97 | #define __NR_readdir 89 | ||
98 | #define __NR_mmap 90 | ||
99 | #define __NR_munmap 91 | ||
100 | #define __NR_truncate 92 | ||
101 | #define __NR_ftruncate 93 | ||
102 | #define __NR_fchmod 94 | ||
103 | #define __NR_fchown 95 | ||
104 | #define __NR_getpriority 96 | ||
105 | #define __NR_setpriority 97 | ||
106 | #define __NR_profil 98 | ||
107 | #define __NR_statfs 99 | ||
108 | #define __NR_fstatfs 100 | ||
109 | #define __NR_ioperm 101 | ||
110 | #define __NR_socketcall 102 | ||
111 | #define __NR_syslog 103 | ||
112 | #define __NR_setitimer 104 | ||
113 | #define __NR_getitimer 105 | ||
114 | #define __NR_stat 106 | ||
115 | #define __NR_lstat 107 | ||
116 | #define __NR_fstat 108 | ||
117 | #define __NR_olduname 109 | ||
118 | #define __NR_iopl /* 110 */ not supported | ||
119 | #define __NR_vhangup 111 | ||
120 | #define __NR_idle /* 112 */ Obsolete | ||
121 | #define __NR_vm86 /* 113 */ not supported | ||
122 | #define __NR_wait4 114 | ||
123 | #define __NR_swapoff 115 | ||
124 | #define __NR_sysinfo 116 | ||
125 | #define __NR_ipc 117 | ||
126 | #define __NR_fsync 118 | ||
127 | #define __NR_sigreturn 119 | ||
128 | #define __NR_clone 120 | ||
129 | #define __NR_setdomainname 121 | ||
130 | #define __NR_uname 122 | ||
131 | #define __NR_cacheflush 123 | ||
132 | #define __NR_adjtimex 124 | ||
133 | #define __NR_mprotect 125 | ||
134 | #define __NR_sigprocmask 126 | ||
135 | #define __NR_create_module 127 | ||
136 | #define __NR_init_module 128 | ||
137 | #define __NR_delete_module 129 | ||
138 | #define __NR_get_kernel_syms 130 | ||
139 | #define __NR_quotactl 131 | ||
140 | #define __NR_getpgid 132 | ||
141 | #define __NR_fchdir 133 | ||
142 | #define __NR_bdflush 134 | ||
143 | #define __NR_sysfs 135 | ||
144 | #define __NR_personality 136 | ||
145 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ | ||
146 | #define __NR_setfsuid 138 | ||
147 | #define __NR_setfsgid 139 | ||
148 | #define __NR__llseek 140 | ||
149 | #define __NR_getdents 141 | ||
150 | #define __NR__newselect 142 | ||
151 | #define __NR_flock 143 | ||
152 | #define __NR_msync 144 | ||
153 | #define __NR_readv 145 | ||
154 | #define __NR_writev 146 | ||
155 | #define __NR_getsid 147 | ||
156 | #define __NR_fdatasync 148 | ||
157 | #define __NR__sysctl 149 | ||
158 | #define __NR_mlock 150 | ||
159 | #define __NR_munlock 151 | ||
160 | #define __NR_mlockall 152 | ||
161 | #define __NR_munlockall 153 | ||
162 | #define __NR_sched_setparam 154 | ||
163 | #define __NR_sched_getparam 155 | ||
164 | #define __NR_sched_setscheduler 156 | ||
165 | #define __NR_sched_getscheduler 157 | ||
166 | #define __NR_sched_yield 158 | ||
167 | #define __NR_sched_get_priority_max 159 | ||
168 | #define __NR_sched_get_priority_min 160 | ||
169 | #define __NR_sched_rr_get_interval 161 | ||
170 | #define __NR_nanosleep 162 | ||
171 | #define __NR_mremap 163 | ||
172 | #define __NR_setresuid 164 | ||
173 | #define __NR_getresuid 165 | ||
174 | #define __NR_getpagesize 166 | ||
175 | #define __NR_query_module 167 | ||
176 | #define __NR_poll 168 | ||
177 | #define __NR_nfsservctl 169 | ||
178 | #define __NR_setresgid 170 | ||
179 | #define __NR_getresgid 171 | ||
180 | #define __NR_prctl 172 | ||
181 | #define __NR_rt_sigreturn 173 | ||
182 | #define __NR_rt_sigaction 174 | ||
183 | #define __NR_rt_sigprocmask 175 | ||
184 | #define __NR_rt_sigpending 176 | ||
185 | #define __NR_rt_sigtimedwait 177 | ||
186 | #define __NR_rt_sigqueueinfo 178 | ||
187 | #define __NR_rt_sigsuspend 179 | ||
188 | #define __NR_pread64 180 | ||
189 | #define __NR_pwrite64 181 | ||
190 | #define __NR_lchown 182 | ||
191 | #define __NR_getcwd 183 | ||
192 | #define __NR_capget 184 | ||
193 | #define __NR_capset 185 | ||
194 | #define __NR_sigaltstack 186 | ||
195 | #define __NR_sendfile 187 | ||
196 | #define __NR_getpmsg 188 /* some people actually want streams */ | ||
197 | #define __NR_putpmsg 189 /* some people actually want streams */ | ||
198 | #define __NR_vfork 190 | ||
199 | #define __NR_ugetrlimit 191 | ||
200 | #define __NR_mmap2 192 | ||
201 | #define __NR_truncate64 193 | ||
202 | #define __NR_ftruncate64 194 | ||
203 | #define __NR_stat64 195 | ||
204 | #define __NR_lstat64 196 | ||
205 | #define __NR_fstat64 197 | ||
206 | #define __NR_chown32 198 | ||
207 | #define __NR_getuid32 199 | ||
208 | #define __NR_getgid32 200 | ||
209 | #define __NR_geteuid32 201 | ||
210 | #define __NR_getegid32 202 | ||
211 | #define __NR_setreuid32 203 | ||
212 | #define __NR_setregid32 204 | ||
213 | #define __NR_getgroups32 205 | ||
214 | #define __NR_setgroups32 206 | ||
215 | #define __NR_fchown32 207 | ||
216 | #define __NR_setresuid32 208 | ||
217 | #define __NR_getresuid32 209 | ||
218 | #define __NR_setresgid32 210 | ||
219 | #define __NR_getresgid32 211 | ||
220 | #define __NR_lchown32 212 | ||
221 | #define __NR_setuid32 213 | ||
222 | #define __NR_setgid32 214 | ||
223 | #define __NR_setfsuid32 215 | ||
224 | #define __NR_setfsgid32 216 | ||
225 | #define __NR_pivot_root 217 | ||
226 | #define __NR_getdents64 220 | ||
227 | #define __NR_gettid 221 | ||
228 | #define __NR_tkill 222 | ||
229 | #define __NR_setxattr 223 | ||
230 | #define __NR_lsetxattr 224 | ||
231 | #define __NR_fsetxattr 225 | ||
232 | #define __NR_getxattr 226 | ||
233 | #define __NR_lgetxattr 227 | ||
234 | #define __NR_fgetxattr 228 | ||
235 | #define __NR_listxattr 229 | ||
236 | #define __NR_llistxattr 230 | ||
237 | #define __NR_flistxattr 231 | ||
238 | #define __NR_removexattr 232 | ||
239 | #define __NR_lremovexattr 233 | ||
240 | #define __NR_fremovexattr 234 | ||
241 | #define __NR_futex 235 | ||
242 | #define __NR_sendfile64 236 | ||
243 | #define __NR_mincore 237 | ||
244 | #define __NR_madvise 238 | ||
245 | #define __NR_fcntl64 239 | ||
246 | #define __NR_readahead 240 | ||
247 | #define __NR_io_setup 241 | ||
248 | #define __NR_io_destroy 242 | ||
249 | #define __NR_io_getevents 243 | ||
250 | #define __NR_io_submit 244 | ||
251 | #define __NR_io_cancel 245 | ||
252 | #define __NR_fadvise64 246 | ||
253 | #define __NR_exit_group 247 | ||
254 | #define __NR_lookup_dcookie 248 | ||
255 | #define __NR_epoll_create 249 | ||
256 | #define __NR_epoll_ctl 250 | ||
257 | #define __NR_epoll_wait 251 | ||
258 | #define __NR_remap_file_pages 252 | ||
259 | #define __NR_set_tid_address 253 | ||
260 | #define __NR_timer_create 254 | ||
261 | #define __NR_timer_settime 255 | ||
262 | #define __NR_timer_gettime 256 | ||
263 | #define __NR_timer_getoverrun 257 | ||
264 | #define __NR_timer_delete 258 | ||
265 | #define __NR_clock_settime 259 | ||
266 | #define __NR_clock_gettime 260 | ||
267 | #define __NR_clock_getres 261 | ||
268 | #define __NR_clock_nanosleep 262 | ||
269 | #define __NR_statfs64 263 | ||
270 | #define __NR_fstatfs64 264 | ||
271 | #define __NR_tgkill 265 | ||
272 | #define __NR_utimes 266 | ||
273 | #define __NR_fadvise64_64 267 | ||
274 | #define __NR_mbind 268 | ||
275 | #define __NR_get_mempolicy 269 | ||
276 | #define __NR_set_mempolicy 270 | ||
277 | #define __NR_mq_open 271 | ||
278 | #define __NR_mq_unlink 272 | ||
279 | #define __NR_mq_timedsend 273 | ||
280 | #define __NR_mq_timedreceive 274 | ||
281 | #define __NR_mq_notify 275 | ||
282 | #define __NR_mq_getsetattr 276 | ||
283 | #define __NR_waitid 277 | ||
284 | #define __NR_vserver 278 | ||
285 | #define __NR_add_key 279 | ||
286 | #define __NR_request_key 280 | ||
287 | #define __NR_keyctl 281 | ||
288 | #define __NR_ioprio_set 282 | ||
289 | #define __NR_ioprio_get 283 | ||
290 | #define __NR_inotify_init 284 | ||
291 | #define __NR_inotify_add_watch 285 | ||
292 | #define __NR_inotify_rm_watch 286 | ||
293 | #define __NR_migrate_pages 287 | ||
294 | #define __NR_openat 288 | ||
295 | #define __NR_mkdirat 289 | ||
296 | #define __NR_mknodat 290 | ||
297 | #define __NR_fchownat 291 | ||
298 | #define __NR_futimesat 292 | ||
299 | #define __NR_fstatat64 293 | ||
300 | #define __NR_unlinkat 294 | ||
301 | #define __NR_renameat 295 | ||
302 | #define __NR_linkat 296 | ||
303 | #define __NR_symlinkat 297 | ||
304 | #define __NR_readlinkat 298 | ||
305 | #define __NR_fchmodat 299 | ||
306 | #define __NR_faccessat 300 | ||
307 | #define __NR_pselect6 301 | ||
308 | #define __NR_ppoll 302 | ||
309 | #define __NR_unshare 303 | ||
310 | #define __NR_set_robust_list 304 | ||
311 | #define __NR_get_robust_list 305 | ||
312 | #define __NR_splice 306 | ||
313 | #define __NR_sync_file_range 307 | ||
314 | #define __NR_tee 308 | ||
315 | #define __NR_vmsplice 309 | ||
316 | #define __NR_move_pages 310 | ||
317 | #define __NR_sched_setaffinity 311 | ||
318 | #define __NR_sched_getaffinity 312 | ||
319 | #define __NR_kexec_load 313 | ||
320 | #define __NR_getcpu 314 | ||
321 | #define __NR_epoll_pwait 315 | ||
322 | #define __NR_utimensat 316 | ||
323 | #define __NR_signalfd 317 | ||
324 | #define __NR_timerfd_create 318 | ||
325 | #define __NR_eventfd 319 | ||
326 | #define __NR_fallocate 320 | ||
327 | #define __NR_timerfd_settime 321 | ||
328 | #define __NR_timerfd_gettime 322 | ||
329 | #define __NR_signalfd4 323 | ||
330 | #define __NR_eventfd2 324 | ||
331 | #define __NR_epoll_create1 325 | ||
332 | #define __NR_dup3 326 | ||
333 | #define __NR_pipe2 327 | ||
334 | #define __NR_inotify_init1 328 | ||
335 | |||
336 | #ifdef __KERNEL__ | ||
337 | |||
338 | #define NR_syscalls 329 | ||
339 | |||
340 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
341 | #define __ARCH_WANT_OLD_READDIR | ||
342 | #define __ARCH_WANT_OLD_STAT | ||
343 | #define __ARCH_WANT_STAT64 | ||
344 | #define __ARCH_WANT_SYS_ALARM | ||
345 | #define __ARCH_WANT_SYS_GETHOSTNAME | ||
346 | #define __ARCH_WANT_SYS_PAUSE | ||
347 | #define __ARCH_WANT_SYS_SGETMASK | ||
348 | #define __ARCH_WANT_SYS_SIGNAL | ||
349 | #define __ARCH_WANT_SYS_TIME | ||
350 | #define __ARCH_WANT_SYS_UTIME | ||
351 | #define __ARCH_WANT_SYS_WAITPID | ||
352 | #define __ARCH_WANT_SYS_SOCKETCALL | ||
353 | #define __ARCH_WANT_SYS_FADVISE64 | ||
354 | #define __ARCH_WANT_SYS_GETPGRP | ||
355 | #define __ARCH_WANT_SYS_LLSEEK | ||
356 | #define __ARCH_WANT_SYS_NICE | ||
357 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | ||
358 | #define __ARCH_WANT_SYS_OLDUMOUNT | ||
359 | #define __ARCH_WANT_SYS_SIGPENDING | ||
360 | #define __ARCH_WANT_SYS_SIGPROCMASK | ||
361 | #define __ARCH_WANT_SYS_RT_SIGACTION | ||
362 | |||
363 | /* | ||
364 | * "Conditional" syscalls | ||
365 | * | ||
366 | * What we want is __attribute__((weak,alias("sys_ni_syscall"))), | ||
367 | * but it doesn't work on all toolchains, so we just do it by hand | ||
368 | */ | ||
369 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | ||
370 | |||
371 | #endif /* __KERNEL__ */ | ||
372 | #endif /* _ASM_M68K_UNISTD_H_ */ | ||
diff --git a/arch/m68k/include/asm/unistd_mm.h b/arch/m68k/include/asm/unistd_mm.h deleted file mode 100644 index 3c19027331fa..000000000000 --- a/arch/m68k/include/asm/unistd_mm.h +++ /dev/null | |||
@@ -1,372 +0,0 @@ | |||
1 | #ifndef _ASM_M68K_UNISTD_H_ | ||
2 | #define _ASM_M68K_UNISTD_H_ | ||
3 | |||
4 | /* | ||
5 | * This file contains the system call numbers. | ||
6 | */ | ||
7 | |||
8 | #define __NR_restart_syscall 0 | ||
9 | #define __NR_exit 1 | ||
10 | #define __NR_fork 2 | ||
11 | #define __NR_read 3 | ||
12 | #define __NR_write 4 | ||
13 | #define __NR_open 5 | ||
14 | #define __NR_close 6 | ||
15 | #define __NR_waitpid 7 | ||
16 | #define __NR_creat 8 | ||
17 | #define __NR_link 9 | ||
18 | #define __NR_unlink 10 | ||
19 | #define __NR_execve 11 | ||
20 | #define __NR_chdir 12 | ||
21 | #define __NR_time 13 | ||
22 | #define __NR_mknod 14 | ||
23 | #define __NR_chmod 15 | ||
24 | #define __NR_chown 16 | ||
25 | #define __NR_break 17 | ||
26 | #define __NR_oldstat 18 | ||
27 | #define __NR_lseek 19 | ||
28 | #define __NR_getpid 20 | ||
29 | #define __NR_mount 21 | ||
30 | #define __NR_umount 22 | ||
31 | #define __NR_setuid 23 | ||
32 | #define __NR_getuid 24 | ||
33 | #define __NR_stime 25 | ||
34 | #define __NR_ptrace 26 | ||
35 | #define __NR_alarm 27 | ||
36 | #define __NR_oldfstat 28 | ||
37 | #define __NR_pause 29 | ||
38 | #define __NR_utime 30 | ||
39 | #define __NR_stty 31 | ||
40 | #define __NR_gtty 32 | ||
41 | #define __NR_access 33 | ||
42 | #define __NR_nice 34 | ||
43 | #define __NR_ftime 35 | ||
44 | #define __NR_sync 36 | ||
45 | #define __NR_kill 37 | ||
46 | #define __NR_rename 38 | ||
47 | #define __NR_mkdir 39 | ||
48 | #define __NR_rmdir 40 | ||
49 | #define __NR_dup 41 | ||
50 | #define __NR_pipe 42 | ||
51 | #define __NR_times 43 | ||
52 | #define __NR_prof 44 | ||
53 | #define __NR_brk 45 | ||
54 | #define __NR_setgid 46 | ||
55 | #define __NR_getgid 47 | ||
56 | #define __NR_signal 48 | ||
57 | #define __NR_geteuid 49 | ||
58 | #define __NR_getegid 50 | ||
59 | #define __NR_acct 51 | ||
60 | #define __NR_umount2 52 | ||
61 | #define __NR_lock 53 | ||
62 | #define __NR_ioctl 54 | ||
63 | #define __NR_fcntl 55 | ||
64 | #define __NR_mpx 56 | ||
65 | #define __NR_setpgid 57 | ||
66 | #define __NR_ulimit 58 | ||
67 | #define __NR_oldolduname 59 | ||
68 | #define __NR_umask 60 | ||
69 | #define __NR_chroot 61 | ||
70 | #define __NR_ustat 62 | ||
71 | #define __NR_dup2 63 | ||
72 | #define __NR_getppid 64 | ||
73 | #define __NR_getpgrp 65 | ||
74 | #define __NR_setsid 66 | ||
75 | #define __NR_sigaction 67 | ||
76 | #define __NR_sgetmask 68 | ||
77 | #define __NR_ssetmask 69 | ||
78 | #define __NR_setreuid 70 | ||
79 | #define __NR_setregid 71 | ||
80 | #define __NR_sigsuspend 72 | ||
81 | #define __NR_sigpending 73 | ||
82 | #define __NR_sethostname 74 | ||
83 | #define __NR_setrlimit 75 | ||
84 | #define __NR_getrlimit 76 | ||
85 | #define __NR_getrusage 77 | ||
86 | #define __NR_gettimeofday 78 | ||
87 | #define __NR_settimeofday 79 | ||
88 | #define __NR_getgroups 80 | ||
89 | #define __NR_setgroups 81 | ||
90 | #define __NR_select 82 | ||
91 | #define __NR_symlink 83 | ||
92 | #define __NR_oldlstat 84 | ||
93 | #define __NR_readlink 85 | ||
94 | #define __NR_uselib 86 | ||
95 | #define __NR_swapon 87 | ||
96 | #define __NR_reboot 88 | ||
97 | #define __NR_readdir 89 | ||
98 | #define __NR_mmap 90 | ||
99 | #define __NR_munmap 91 | ||
100 | #define __NR_truncate 92 | ||
101 | #define __NR_ftruncate 93 | ||
102 | #define __NR_fchmod 94 | ||
103 | #define __NR_fchown 95 | ||
104 | #define __NR_getpriority 96 | ||
105 | #define __NR_setpriority 97 | ||
106 | #define __NR_profil 98 | ||
107 | #define __NR_statfs 99 | ||
108 | #define __NR_fstatfs 100 | ||
109 | #define __NR_ioperm 101 | ||
110 | #define __NR_socketcall 102 | ||
111 | #define __NR_syslog 103 | ||
112 | #define __NR_setitimer 104 | ||
113 | #define __NR_getitimer 105 | ||
114 | #define __NR_stat 106 | ||
115 | #define __NR_lstat 107 | ||
116 | #define __NR_fstat 108 | ||
117 | #define __NR_olduname 109 | ||
118 | #define __NR_iopl /* 110 */ not supported | ||
119 | #define __NR_vhangup 111 | ||
120 | #define __NR_idle /* 112 */ Obsolete | ||
121 | #define __NR_vm86 /* 113 */ not supported | ||
122 | #define __NR_wait4 114 | ||
123 | #define __NR_swapoff 115 | ||
124 | #define __NR_sysinfo 116 | ||
125 | #define __NR_ipc 117 | ||
126 | #define __NR_fsync 118 | ||
127 | #define __NR_sigreturn 119 | ||
128 | #define __NR_clone 120 | ||
129 | #define __NR_setdomainname 121 | ||
130 | #define __NR_uname 122 | ||
131 | #define __NR_cacheflush 123 | ||
132 | #define __NR_adjtimex 124 | ||
133 | #define __NR_mprotect 125 | ||
134 | #define __NR_sigprocmask 126 | ||
135 | #define __NR_create_module 127 | ||
136 | #define __NR_init_module 128 | ||
137 | #define __NR_delete_module 129 | ||
138 | #define __NR_get_kernel_syms 130 | ||
139 | #define __NR_quotactl 131 | ||
140 | #define __NR_getpgid 132 | ||
141 | #define __NR_fchdir 133 | ||
142 | #define __NR_bdflush 134 | ||
143 | #define __NR_sysfs 135 | ||
144 | #define __NR_personality 136 | ||
145 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ | ||
146 | #define __NR_setfsuid 138 | ||
147 | #define __NR_setfsgid 139 | ||
148 | #define __NR__llseek 140 | ||
149 | #define __NR_getdents 141 | ||
150 | #define __NR__newselect 142 | ||
151 | #define __NR_flock 143 | ||
152 | #define __NR_msync 144 | ||
153 | #define __NR_readv 145 | ||
154 | #define __NR_writev 146 | ||
155 | #define __NR_getsid 147 | ||
156 | #define __NR_fdatasync 148 | ||
157 | #define __NR__sysctl 149 | ||
158 | #define __NR_mlock 150 | ||
159 | #define __NR_munlock 151 | ||
160 | #define __NR_mlockall 152 | ||
161 | #define __NR_munlockall 153 | ||
162 | #define __NR_sched_setparam 154 | ||
163 | #define __NR_sched_getparam 155 | ||
164 | #define __NR_sched_setscheduler 156 | ||
165 | #define __NR_sched_getscheduler 157 | ||
166 | #define __NR_sched_yield 158 | ||
167 | #define __NR_sched_get_priority_max 159 | ||
168 | #define __NR_sched_get_priority_min 160 | ||
169 | #define __NR_sched_rr_get_interval 161 | ||
170 | #define __NR_nanosleep 162 | ||
171 | #define __NR_mremap 163 | ||
172 | #define __NR_setresuid 164 | ||
173 | #define __NR_getresuid 165 | ||
174 | #define __NR_getpagesize 166 | ||
175 | #define __NR_query_module 167 | ||
176 | #define __NR_poll 168 | ||
177 | #define __NR_nfsservctl 169 | ||
178 | #define __NR_setresgid 170 | ||
179 | #define __NR_getresgid 171 | ||
180 | #define __NR_prctl 172 | ||
181 | #define __NR_rt_sigreturn 173 | ||
182 | #define __NR_rt_sigaction 174 | ||
183 | #define __NR_rt_sigprocmask 175 | ||
184 | #define __NR_rt_sigpending 176 | ||
185 | #define __NR_rt_sigtimedwait 177 | ||
186 | #define __NR_rt_sigqueueinfo 178 | ||
187 | #define __NR_rt_sigsuspend 179 | ||
188 | #define __NR_pread64 180 | ||
189 | #define __NR_pwrite64 181 | ||
190 | #define __NR_lchown 182 | ||
191 | #define __NR_getcwd 183 | ||
192 | #define __NR_capget 184 | ||
193 | #define __NR_capset 185 | ||
194 | #define __NR_sigaltstack 186 | ||
195 | #define __NR_sendfile 187 | ||
196 | #define __NR_getpmsg 188 /* some people actually want streams */ | ||
197 | #define __NR_putpmsg 189 /* some people actually want streams */ | ||
198 | #define __NR_vfork 190 | ||
199 | #define __NR_ugetrlimit 191 | ||
200 | #define __NR_mmap2 192 | ||
201 | #define __NR_truncate64 193 | ||
202 | #define __NR_ftruncate64 194 | ||
203 | #define __NR_stat64 195 | ||
204 | #define __NR_lstat64 196 | ||
205 | #define __NR_fstat64 197 | ||
206 | #define __NR_chown32 198 | ||
207 | #define __NR_getuid32 199 | ||
208 | #define __NR_getgid32 200 | ||
209 | #define __NR_geteuid32 201 | ||
210 | #define __NR_getegid32 202 | ||
211 | #define __NR_setreuid32 203 | ||
212 | #define __NR_setregid32 204 | ||
213 | #define __NR_getgroups32 205 | ||
214 | #define __NR_setgroups32 206 | ||
215 | #define __NR_fchown32 207 | ||
216 | #define __NR_setresuid32 208 | ||
217 | #define __NR_getresuid32 209 | ||
218 | #define __NR_setresgid32 210 | ||
219 | #define __NR_getresgid32 211 | ||
220 | #define __NR_lchown32 212 | ||
221 | #define __NR_setuid32 213 | ||
222 | #define __NR_setgid32 214 | ||
223 | #define __NR_setfsuid32 215 | ||
224 | #define __NR_setfsgid32 216 | ||
225 | #define __NR_pivot_root 217 | ||
226 | #define __NR_getdents64 220 | ||
227 | #define __NR_gettid 221 | ||
228 | #define __NR_tkill 222 | ||
229 | #define __NR_setxattr 223 | ||
230 | #define __NR_lsetxattr 224 | ||
231 | #define __NR_fsetxattr 225 | ||
232 | #define __NR_getxattr 226 | ||
233 | #define __NR_lgetxattr 227 | ||
234 | #define __NR_fgetxattr 228 | ||
235 | #define __NR_listxattr 229 | ||
236 | #define __NR_llistxattr 230 | ||
237 | #define __NR_flistxattr 231 | ||
238 | #define __NR_removexattr 232 | ||
239 | #define __NR_lremovexattr 233 | ||
240 | #define __NR_fremovexattr 234 | ||
241 | #define __NR_futex 235 | ||
242 | #define __NR_sendfile64 236 | ||
243 | #define __NR_mincore 237 | ||
244 | #define __NR_madvise 238 | ||
245 | #define __NR_fcntl64 239 | ||
246 | #define __NR_readahead 240 | ||
247 | #define __NR_io_setup 241 | ||
248 | #define __NR_io_destroy 242 | ||
249 | #define __NR_io_getevents 243 | ||
250 | #define __NR_io_submit 244 | ||
251 | #define __NR_io_cancel 245 | ||
252 | #define __NR_fadvise64 246 | ||
253 | #define __NR_exit_group 247 | ||
254 | #define __NR_lookup_dcookie 248 | ||
255 | #define __NR_epoll_create 249 | ||
256 | #define __NR_epoll_ctl 250 | ||
257 | #define __NR_epoll_wait 251 | ||
258 | #define __NR_remap_file_pages 252 | ||
259 | #define __NR_set_tid_address 253 | ||
260 | #define __NR_timer_create 254 | ||
261 | #define __NR_timer_settime 255 | ||
262 | #define __NR_timer_gettime 256 | ||
263 | #define __NR_timer_getoverrun 257 | ||
264 | #define __NR_timer_delete 258 | ||
265 | #define __NR_clock_settime 259 | ||
266 | #define __NR_clock_gettime 260 | ||
267 | #define __NR_clock_getres 261 | ||
268 | #define __NR_clock_nanosleep 262 | ||
269 | #define __NR_statfs64 263 | ||
270 | #define __NR_fstatfs64 264 | ||
271 | #define __NR_tgkill 265 | ||
272 | #define __NR_utimes 266 | ||
273 | #define __NR_fadvise64_64 267 | ||
274 | #define __NR_mbind 268 | ||
275 | #define __NR_get_mempolicy 269 | ||
276 | #define __NR_set_mempolicy 270 | ||
277 | #define __NR_mq_open 271 | ||
278 | #define __NR_mq_unlink 272 | ||
279 | #define __NR_mq_timedsend 273 | ||
280 | #define __NR_mq_timedreceive 274 | ||
281 | #define __NR_mq_notify 275 | ||
282 | #define __NR_mq_getsetattr 276 | ||
283 | #define __NR_waitid 277 | ||
284 | #define __NR_vserver 278 | ||
285 | #define __NR_add_key 279 | ||
286 | #define __NR_request_key 280 | ||
287 | #define __NR_keyctl 281 | ||
288 | #define __NR_ioprio_set 282 | ||
289 | #define __NR_ioprio_get 283 | ||
290 | #define __NR_inotify_init 284 | ||
291 | #define __NR_inotify_add_watch 285 | ||
292 | #define __NR_inotify_rm_watch 286 | ||
293 | #define __NR_migrate_pages 287 | ||
294 | #define __NR_openat 288 | ||
295 | #define __NR_mkdirat 289 | ||
296 | #define __NR_mknodat 290 | ||
297 | #define __NR_fchownat 291 | ||
298 | #define __NR_futimesat 292 | ||
299 | #define __NR_fstatat64 293 | ||
300 | #define __NR_unlinkat 294 | ||
301 | #define __NR_renameat 295 | ||
302 | #define __NR_linkat 296 | ||
303 | #define __NR_symlinkat 297 | ||
304 | #define __NR_readlinkat 298 | ||
305 | #define __NR_fchmodat 299 | ||
306 | #define __NR_faccessat 300 | ||
307 | #define __NR_pselect6 301 | ||
308 | #define __NR_ppoll 302 | ||
309 | #define __NR_unshare 303 | ||
310 | #define __NR_set_robust_list 304 | ||
311 | #define __NR_get_robust_list 305 | ||
312 | #define __NR_splice 306 | ||
313 | #define __NR_sync_file_range 307 | ||
314 | #define __NR_tee 308 | ||
315 | #define __NR_vmsplice 309 | ||
316 | #define __NR_move_pages 310 | ||
317 | #define __NR_sched_setaffinity 311 | ||
318 | #define __NR_sched_getaffinity 312 | ||
319 | #define __NR_kexec_load 313 | ||
320 | #define __NR_getcpu 314 | ||
321 | #define __NR_epoll_pwait 315 | ||
322 | #define __NR_utimensat 316 | ||
323 | #define __NR_signalfd 317 | ||
324 | #define __NR_timerfd_create 318 | ||
325 | #define __NR_eventfd 319 | ||
326 | #define __NR_fallocate 320 | ||
327 | #define __NR_timerfd_settime 321 | ||
328 | #define __NR_timerfd_gettime 322 | ||
329 | #define __NR_signalfd4 323 | ||
330 | #define __NR_eventfd2 324 | ||
331 | #define __NR_epoll_create1 325 | ||
332 | #define __NR_dup3 326 | ||
333 | #define __NR_pipe2 327 | ||
334 | #define __NR_inotify_init1 328 | ||
335 | |||
336 | #ifdef __KERNEL__ | ||
337 | |||
338 | #define NR_syscalls 329 | ||
339 | |||
340 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
341 | #define __ARCH_WANT_OLD_READDIR | ||
342 | #define __ARCH_WANT_OLD_STAT | ||
343 | #define __ARCH_WANT_STAT64 | ||
344 | #define __ARCH_WANT_SYS_ALARM | ||
345 | #define __ARCH_WANT_SYS_GETHOSTNAME | ||
346 | #define __ARCH_WANT_SYS_PAUSE | ||
347 | #define __ARCH_WANT_SYS_SGETMASK | ||
348 | #define __ARCH_WANT_SYS_SIGNAL | ||
349 | #define __ARCH_WANT_SYS_TIME | ||
350 | #define __ARCH_WANT_SYS_UTIME | ||
351 | #define __ARCH_WANT_SYS_WAITPID | ||
352 | #define __ARCH_WANT_SYS_SOCKETCALL | ||
353 | #define __ARCH_WANT_SYS_FADVISE64 | ||
354 | #define __ARCH_WANT_SYS_GETPGRP | ||
355 | #define __ARCH_WANT_SYS_LLSEEK | ||
356 | #define __ARCH_WANT_SYS_NICE | ||
357 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | ||
358 | #define __ARCH_WANT_SYS_OLDUMOUNT | ||
359 | #define __ARCH_WANT_SYS_SIGPENDING | ||
360 | #define __ARCH_WANT_SYS_SIGPROCMASK | ||
361 | #define __ARCH_WANT_SYS_RT_SIGACTION | ||
362 | |||
363 | /* | ||
364 | * "Conditional" syscalls | ||
365 | * | ||
366 | * What we want is __attribute__((weak,alias("sys_ni_syscall"))), | ||
367 | * but it doesn't work on all toolchains, so we just do it by hand | ||
368 | */ | ||
369 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | ||
370 | |||
371 | #endif /* __KERNEL__ */ | ||
372 | #endif /* _ASM_M68K_UNISTD_H_ */ | ||
diff --git a/arch/m68k/include/asm/unistd_no.h b/arch/m68k/include/asm/unistd_no.h deleted file mode 100644 index b034a2f7b444..000000000000 --- a/arch/m68k/include/asm/unistd_no.h +++ /dev/null | |||
@@ -1,372 +0,0 @@ | |||
1 | #ifndef _ASM_M68K_UNISTD_H_ | ||
2 | #define _ASM_M68K_UNISTD_H_ | ||
3 | |||
4 | /* | ||
5 | * This file contains the system call numbers. | ||
6 | */ | ||
7 | |||
8 | #define __NR_restart_syscall 0 | ||
9 | #define __NR_exit 1 | ||
10 | #define __NR_fork 2 | ||
11 | #define __NR_read 3 | ||
12 | #define __NR_write 4 | ||
13 | #define __NR_open 5 | ||
14 | #define __NR_close 6 | ||
15 | #define __NR_waitpid 7 | ||
16 | #define __NR_creat 8 | ||
17 | #define __NR_link 9 | ||
18 | #define __NR_unlink 10 | ||
19 | #define __NR_execve 11 | ||
20 | #define __NR_chdir 12 | ||
21 | #define __NR_time 13 | ||
22 | #define __NR_mknod 14 | ||
23 | #define __NR_chmod 15 | ||
24 | #define __NR_chown 16 | ||
25 | #define __NR_break 17 | ||
26 | #define __NR_oldstat 18 | ||
27 | #define __NR_lseek 19 | ||
28 | #define __NR_getpid 20 | ||
29 | #define __NR_mount 21 | ||
30 | #define __NR_umount 22 | ||
31 | #define __NR_setuid 23 | ||
32 | #define __NR_getuid 24 | ||
33 | #define __NR_stime 25 | ||
34 | #define __NR_ptrace 26 | ||
35 | #define __NR_alarm 27 | ||
36 | #define __NR_oldfstat 28 | ||
37 | #define __NR_pause 29 | ||
38 | #define __NR_utime 30 | ||
39 | #define __NR_stty 31 | ||
40 | #define __NR_gtty 32 | ||
41 | #define __NR_access 33 | ||
42 | #define __NR_nice 34 | ||
43 | #define __NR_ftime 35 | ||
44 | #define __NR_sync 36 | ||
45 | #define __NR_kill 37 | ||
46 | #define __NR_rename 38 | ||
47 | #define __NR_mkdir 39 | ||
48 | #define __NR_rmdir 40 | ||
49 | #define __NR_dup 41 | ||
50 | #define __NR_pipe 42 | ||
51 | #define __NR_times 43 | ||
52 | #define __NR_prof 44 | ||
53 | #define __NR_brk 45 | ||
54 | #define __NR_setgid 46 | ||
55 | #define __NR_getgid 47 | ||
56 | #define __NR_signal 48 | ||
57 | #define __NR_geteuid 49 | ||
58 | #define __NR_getegid 50 | ||
59 | #define __NR_acct 51 | ||
60 | #define __NR_umount2 52 | ||
61 | #define __NR_lock 53 | ||
62 | #define __NR_ioctl 54 | ||
63 | #define __NR_fcntl 55 | ||
64 | #define __NR_mpx 56 | ||
65 | #define __NR_setpgid 57 | ||
66 | #define __NR_ulimit 58 | ||
67 | #define __NR_oldolduname 59 | ||
68 | #define __NR_umask 60 | ||
69 | #define __NR_chroot 61 | ||
70 | #define __NR_ustat 62 | ||
71 | #define __NR_dup2 63 | ||
72 | #define __NR_getppid 64 | ||
73 | #define __NR_getpgrp 65 | ||
74 | #define __NR_setsid 66 | ||
75 | #define __NR_sigaction 67 | ||
76 | #define __NR_sgetmask 68 | ||
77 | #define __NR_ssetmask 69 | ||
78 | #define __NR_setreuid 70 | ||
79 | #define __NR_setregid 71 | ||
80 | #define __NR_sigsuspend 72 | ||
81 | #define __NR_sigpending 73 | ||
82 | #define __NR_sethostname 74 | ||
83 | #define __NR_setrlimit 75 | ||
84 | #define __NR_getrlimit 76 | ||
85 | #define __NR_getrusage 77 | ||
86 | #define __NR_gettimeofday 78 | ||
87 | #define __NR_settimeofday 79 | ||
88 | #define __NR_getgroups 80 | ||
89 | #define __NR_setgroups 81 | ||
90 | #define __NR_select 82 | ||
91 | #define __NR_symlink 83 | ||
92 | #define __NR_oldlstat 84 | ||
93 | #define __NR_readlink 85 | ||
94 | #define __NR_uselib 86 | ||
95 | #define __NR_swapon 87 | ||
96 | #define __NR_reboot 88 | ||
97 | #define __NR_readdir 89 | ||
98 | #define __NR_mmap 90 | ||
99 | #define __NR_munmap 91 | ||
100 | #define __NR_truncate 92 | ||
101 | #define __NR_ftruncate 93 | ||
102 | #define __NR_fchmod 94 | ||
103 | #define __NR_fchown 95 | ||
104 | #define __NR_getpriority 96 | ||
105 | #define __NR_setpriority 97 | ||
106 | #define __NR_profil 98 | ||
107 | #define __NR_statfs 99 | ||
108 | #define __NR_fstatfs 100 | ||
109 | #define __NR_ioperm 101 | ||
110 | #define __NR_socketcall 102 | ||
111 | #define __NR_syslog 103 | ||
112 | #define __NR_setitimer 104 | ||
113 | #define __NR_getitimer 105 | ||
114 | #define __NR_stat 106 | ||
115 | #define __NR_lstat 107 | ||
116 | #define __NR_fstat 108 | ||
117 | #define __NR_olduname 109 | ||
118 | #define __NR_iopl /* 110 */ not supported | ||
119 | #define __NR_vhangup 111 | ||
120 | #define __NR_idle /* 112 */ Obsolete | ||
121 | #define __NR_vm86 /* 113 */ not supported | ||
122 | #define __NR_wait4 114 | ||
123 | #define __NR_swapoff 115 | ||
124 | #define __NR_sysinfo 116 | ||
125 | #define __NR_ipc 117 | ||
126 | #define __NR_fsync 118 | ||
127 | #define __NR_sigreturn 119 | ||
128 | #define __NR_clone 120 | ||
129 | #define __NR_setdomainname 121 | ||
130 | #define __NR_uname 122 | ||
131 | #define __NR_cacheflush 123 | ||
132 | #define __NR_adjtimex 124 | ||
133 | #define __NR_mprotect 125 | ||
134 | #define __NR_sigprocmask 126 | ||
135 | #define __NR_create_module 127 | ||
136 | #define __NR_init_module 128 | ||
137 | #define __NR_delete_module 129 | ||
138 | #define __NR_get_kernel_syms 130 | ||
139 | #define __NR_quotactl 131 | ||
140 | #define __NR_getpgid 132 | ||
141 | #define __NR_fchdir 133 | ||
142 | #define __NR_bdflush 134 | ||
143 | #define __NR_sysfs 135 | ||
144 | #define __NR_personality 136 | ||
145 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ | ||
146 | #define __NR_setfsuid 138 | ||
147 | #define __NR_setfsgid 139 | ||
148 | #define __NR__llseek 140 | ||
149 | #define __NR_getdents 141 | ||
150 | #define __NR__newselect 142 | ||
151 | #define __NR_flock 143 | ||
152 | #define __NR_msync 144 | ||
153 | #define __NR_readv 145 | ||
154 | #define __NR_writev 146 | ||
155 | #define __NR_getsid 147 | ||
156 | #define __NR_fdatasync 148 | ||
157 | #define __NR__sysctl 149 | ||
158 | #define __NR_mlock 150 | ||
159 | #define __NR_munlock 151 | ||
160 | #define __NR_mlockall 152 | ||
161 | #define __NR_munlockall 153 | ||
162 | #define __NR_sched_setparam 154 | ||
163 | #define __NR_sched_getparam 155 | ||
164 | #define __NR_sched_setscheduler 156 | ||
165 | #define __NR_sched_getscheduler 157 | ||
166 | #define __NR_sched_yield 158 | ||
167 | #define __NR_sched_get_priority_max 159 | ||
168 | #define __NR_sched_get_priority_min 160 | ||
169 | #define __NR_sched_rr_get_interval 161 | ||
170 | #define __NR_nanosleep 162 | ||
171 | #define __NR_mremap 163 | ||
172 | #define __NR_setresuid 164 | ||
173 | #define __NR_getresuid 165 | ||
174 | #define __NR_getpagesize 166 | ||
175 | #define __NR_query_module 167 | ||
176 | #define __NR_poll 168 | ||
177 | #define __NR_nfsservctl 169 | ||
178 | #define __NR_setresgid 170 | ||
179 | #define __NR_getresgid 171 | ||
180 | #define __NR_prctl 172 | ||
181 | #define __NR_rt_sigreturn 173 | ||
182 | #define __NR_rt_sigaction 174 | ||
183 | #define __NR_rt_sigprocmask 175 | ||
184 | #define __NR_rt_sigpending 176 | ||
185 | #define __NR_rt_sigtimedwait 177 | ||
186 | #define __NR_rt_sigqueueinfo 178 | ||
187 | #define __NR_rt_sigsuspend 179 | ||
188 | #define __NR_pread64 180 | ||
189 | #define __NR_pwrite64 181 | ||
190 | #define __NR_lchown 182 | ||
191 | #define __NR_getcwd 183 | ||
192 | #define __NR_capget 184 | ||
193 | #define __NR_capset 185 | ||
194 | #define __NR_sigaltstack 186 | ||
195 | #define __NR_sendfile 187 | ||
196 | #define __NR_getpmsg 188 /* some people actually want streams */ | ||
197 | #define __NR_putpmsg 189 /* some people actually want streams */ | ||
198 | #define __NR_vfork 190 | ||
199 | #define __NR_ugetrlimit 191 | ||
200 | #define __NR_mmap2 192 | ||
201 | #define __NR_truncate64 193 | ||
202 | #define __NR_ftruncate64 194 | ||
203 | #define __NR_stat64 195 | ||
204 | #define __NR_lstat64 196 | ||
205 | #define __NR_fstat64 197 | ||
206 | #define __NR_chown32 198 | ||
207 | #define __NR_getuid32 199 | ||
208 | #define __NR_getgid32 200 | ||
209 | #define __NR_geteuid32 201 | ||
210 | #define __NR_getegid32 202 | ||
211 | #define __NR_setreuid32 203 | ||
212 | #define __NR_setregid32 204 | ||
213 | #define __NR_getgroups32 205 | ||
214 | #define __NR_setgroups32 206 | ||
215 | #define __NR_fchown32 207 | ||
216 | #define __NR_setresuid32 208 | ||
217 | #define __NR_getresuid32 209 | ||
218 | #define __NR_setresgid32 210 | ||
219 | #define __NR_getresgid32 211 | ||
220 | #define __NR_lchown32 212 | ||
221 | #define __NR_setuid32 213 | ||
222 | #define __NR_setgid32 214 | ||
223 | #define __NR_setfsuid32 215 | ||
224 | #define __NR_setfsgid32 216 | ||
225 | #define __NR_pivot_root 217 | ||
226 | #define __NR_getdents64 220 | ||
227 | #define __NR_gettid 221 | ||
228 | #define __NR_tkill 222 | ||
229 | #define __NR_setxattr 223 | ||
230 | #define __NR_lsetxattr 224 | ||
231 | #define __NR_fsetxattr 225 | ||
232 | #define __NR_getxattr 226 | ||
233 | #define __NR_lgetxattr 227 | ||
234 | #define __NR_fgetxattr 228 | ||
235 | #define __NR_listxattr 229 | ||
236 | #define __NR_llistxattr 230 | ||
237 | #define __NR_flistxattr 231 | ||
238 | #define __NR_removexattr 232 | ||
239 | #define __NR_lremovexattr 233 | ||
240 | #define __NR_fremovexattr 234 | ||
241 | #define __NR_futex 235 | ||
242 | #define __NR_sendfile64 236 | ||
243 | #define __NR_mincore 237 | ||
244 | #define __NR_madvise 238 | ||
245 | #define __NR_fcntl64 239 | ||
246 | #define __NR_readahead 240 | ||
247 | #define __NR_io_setup 241 | ||
248 | #define __NR_io_destroy 242 | ||
249 | #define __NR_io_getevents 243 | ||
250 | #define __NR_io_submit 244 | ||
251 | #define __NR_io_cancel 245 | ||
252 | #define __NR_fadvise64 246 | ||
253 | #define __NR_exit_group 247 | ||
254 | #define __NR_lookup_dcookie 248 | ||
255 | #define __NR_epoll_create 249 | ||
256 | #define __NR_epoll_ctl 250 | ||
257 | #define __NR_epoll_wait 251 | ||
258 | #define __NR_remap_file_pages 252 | ||
259 | #define __NR_set_tid_address 253 | ||
260 | #define __NR_timer_create 254 | ||
261 | #define __NR_timer_settime 255 | ||
262 | #define __NR_timer_gettime 256 | ||
263 | #define __NR_timer_getoverrun 257 | ||
264 | #define __NR_timer_delete 258 | ||
265 | #define __NR_clock_settime 259 | ||
266 | #define __NR_clock_gettime 260 | ||
267 | #define __NR_clock_getres 261 | ||
268 | #define __NR_clock_nanosleep 262 | ||
269 | #define __NR_statfs64 263 | ||
270 | #define __NR_fstatfs64 264 | ||
271 | #define __NR_tgkill 265 | ||
272 | #define __NR_utimes 266 | ||
273 | #define __NR_fadvise64_64 267 | ||
274 | #define __NR_mbind 268 | ||
275 | #define __NR_get_mempolicy 269 | ||
276 | #define __NR_set_mempolicy 270 | ||
277 | #define __NR_mq_open 271 | ||
278 | #define __NR_mq_unlink 272 | ||
279 | #define __NR_mq_timedsend 273 | ||
280 | #define __NR_mq_timedreceive 274 | ||
281 | #define __NR_mq_notify 275 | ||
282 | #define __NR_mq_getsetattr 276 | ||
283 | #define __NR_waitid 277 | ||
284 | #define __NR_vserver 278 | ||
285 | #define __NR_add_key 279 | ||
286 | #define __NR_request_key 280 | ||
287 | #define __NR_keyctl 281 | ||
288 | #define __NR_ioprio_set 282 | ||
289 | #define __NR_ioprio_get 283 | ||
290 | #define __NR_inotify_init 284 | ||
291 | #define __NR_inotify_add_watch 285 | ||
292 | #define __NR_inotify_rm_watch 286 | ||
293 | #define __NR_migrate_pages 287 | ||
294 | #define __NR_openat 288 | ||
295 | #define __NR_mkdirat 289 | ||
296 | #define __NR_mknodat 290 | ||
297 | #define __NR_fchownat 291 | ||
298 | #define __NR_futimesat 292 | ||
299 | #define __NR_fstatat64 293 | ||
300 | #define __NR_unlinkat 294 | ||
301 | #define __NR_renameat 295 | ||
302 | #define __NR_linkat 296 | ||
303 | #define __NR_symlinkat 297 | ||
304 | #define __NR_readlinkat 298 | ||
305 | #define __NR_fchmodat 299 | ||
306 | #define __NR_faccessat 300 | ||
307 | #define __NR_pselect6 301 | ||
308 | #define __NR_ppoll 302 | ||
309 | #define __NR_unshare 303 | ||
310 | #define __NR_set_robust_list 304 | ||
311 | #define __NR_get_robust_list 305 | ||
312 | #define __NR_splice 306 | ||
313 | #define __NR_sync_file_range 307 | ||
314 | #define __NR_tee 308 | ||
315 | #define __NR_vmsplice 309 | ||
316 | #define __NR_move_pages 310 | ||
317 | #define __NR_sched_setaffinity 311 | ||
318 | #define __NR_sched_getaffinity 312 | ||
319 | #define __NR_kexec_load 313 | ||
320 | #define __NR_getcpu 314 | ||
321 | #define __NR_epoll_pwait 315 | ||
322 | #define __NR_utimensat 316 | ||
323 | #define __NR_signalfd 317 | ||
324 | #define __NR_timerfd_create 318 | ||
325 | #define __NR_eventfd 319 | ||
326 | #define __NR_fallocate 320 | ||
327 | #define __NR_timerfd_settime 321 | ||
328 | #define __NR_timerfd_gettime 322 | ||
329 | #define __NR_signalfd4 323 | ||
330 | #define __NR_eventfd2 324 | ||
331 | #define __NR_epoll_create1 325 | ||
332 | #define __NR_dup3 326 | ||
333 | #define __NR_pipe2 327 | ||
334 | #define __NR_inotify_init1 328 | ||
335 | |||
336 | #ifdef __KERNEL__ | ||
337 | |||
338 | #define NR_syscalls 329 | ||
339 | |||
340 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
341 | #define __ARCH_WANT_OLD_READDIR | ||
342 | #define __ARCH_WANT_OLD_STAT | ||
343 | #define __ARCH_WANT_STAT64 | ||
344 | #define __ARCH_WANT_SYS_ALARM | ||
345 | #define __ARCH_WANT_SYS_GETHOSTNAME | ||
346 | #define __ARCH_WANT_SYS_PAUSE | ||
347 | #define __ARCH_WANT_SYS_SGETMASK | ||
348 | #define __ARCH_WANT_SYS_SIGNAL | ||
349 | #define __ARCH_WANT_SYS_TIME | ||
350 | #define __ARCH_WANT_SYS_UTIME | ||
351 | #define __ARCH_WANT_SYS_WAITPID | ||
352 | #define __ARCH_WANT_SYS_SOCKETCALL | ||
353 | #define __ARCH_WANT_SYS_FADVISE64 | ||
354 | #define __ARCH_WANT_SYS_GETPGRP | ||
355 | #define __ARCH_WANT_SYS_LLSEEK | ||
356 | #define __ARCH_WANT_SYS_NICE | ||
357 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | ||
358 | #define __ARCH_WANT_SYS_OLDUMOUNT | ||
359 | #define __ARCH_WANT_SYS_SIGPENDING | ||
360 | #define __ARCH_WANT_SYS_SIGPROCMASK | ||
361 | #define __ARCH_WANT_SYS_RT_SIGACTION | ||
362 | |||
363 | /* | ||
364 | * "Conditional" syscalls | ||
365 | * | ||
366 | * What we want is __attribute__((weak,alias("sys_ni_syscall"))), | ||
367 | * but it doesn't work on all toolchains, so we just do it by hand | ||
368 | */ | ||
369 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | ||
370 | |||
371 | #endif /* __KERNEL__ */ | ||
372 | #endif /* _ASM_M68K_UNISTD_H_ */ | ||
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index b6eee7c93cdd..ac14f5245d2a 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.27-rc3 | 3 | # Linux kernel version: 2.6.29-rc8 |
4 | # Wed Aug 20 08:16:53 2008 | 4 | # Fri Mar 13 09:28:45 2009 |
5 | # | 5 | # |
6 | CONFIG_PPC64=y | 6 | CONFIG_PPC64=y |
7 | 7 | ||
@@ -16,13 +16,14 @@ CONFIG_PPC_FPU=y | |||
16 | CONFIG_ALTIVEC=y | 16 | CONFIG_ALTIVEC=y |
17 | # CONFIG_VSX is not set | 17 | # CONFIG_VSX is not set |
18 | CONFIG_PPC_STD_MMU=y | 18 | CONFIG_PPC_STD_MMU=y |
19 | CONFIG_PPC_STD_MMU_64=y | ||
19 | CONFIG_PPC_MM_SLICES=y | 20 | CONFIG_PPC_MM_SLICES=y |
20 | CONFIG_VIRT_CPU_ACCOUNTING=y | 21 | CONFIG_VIRT_CPU_ACCOUNTING=y |
21 | CONFIG_SMP=y | 22 | CONFIG_SMP=y |
22 | CONFIG_NR_CPUS=2 | 23 | CONFIG_NR_CPUS=2 |
23 | CONFIG_64BIT=y | 24 | CONFIG_64BIT=y |
24 | CONFIG_WORD_SIZE=64 | 25 | CONFIG_WORD_SIZE=64 |
25 | CONFIG_PPC_MERGE=y | 26 | CONFIG_ARCH_PHYS_ADDR_T_64BIT=y |
26 | CONFIG_MMU=y | 27 | CONFIG_MMU=y |
27 | CONFIG_GENERIC_CMOS_UPDATE=y | 28 | CONFIG_GENERIC_CMOS_UPDATE=y |
28 | CONFIG_GENERIC_TIME=y | 29 | CONFIG_GENERIC_TIME=y |
@@ -46,7 +47,7 @@ CONFIG_PPC=y | |||
46 | CONFIG_EARLY_PRINTK=y | 47 | CONFIG_EARLY_PRINTK=y |
47 | CONFIG_COMPAT=y | 48 | CONFIG_COMPAT=y |
48 | CONFIG_SYSVIPC_COMPAT=y | 49 | CONFIG_SYSVIPC_COMPAT=y |
49 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 50 | CONFIG_SCHED_OMIT_FRAME_POINTER=y |
50 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | 51 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y |
51 | CONFIG_PPC_OF=y | 52 | CONFIG_PPC_OF=y |
52 | CONFIG_OF=y | 53 | CONFIG_OF=y |
@@ -74,10 +75,19 @@ CONFIG_POSIX_MQUEUE=y | |||
74 | # CONFIG_BSD_PROCESS_ACCT is not set | 75 | # CONFIG_BSD_PROCESS_ACCT is not set |
75 | # CONFIG_TASKSTATS is not set | 76 | # CONFIG_TASKSTATS is not set |
76 | # CONFIG_AUDIT is not set | 77 | # CONFIG_AUDIT is not set |
78 | |||
79 | # | ||
80 | # RCU Subsystem | ||
81 | # | ||
82 | CONFIG_CLASSIC_RCU=y | ||
83 | # CONFIG_TREE_RCU is not set | ||
84 | # CONFIG_PREEMPT_RCU is not set | ||
85 | # CONFIG_TREE_RCU_TRACE is not set | ||
86 | # CONFIG_PREEMPT_RCU_TRACE is not set | ||
77 | # CONFIG_IKCONFIG is not set | 87 | # CONFIG_IKCONFIG is not set |
78 | CONFIG_LOG_BUF_SHIFT=17 | 88 | CONFIG_LOG_BUF_SHIFT=17 |
79 | # CONFIG_CGROUPS is not set | ||
80 | # CONFIG_GROUP_SCHED is not set | 89 | # CONFIG_GROUP_SCHED is not set |
90 | # CONFIG_CGROUPS is not set | ||
81 | CONFIG_SYSFS_DEPRECATED=y | 91 | CONFIG_SYSFS_DEPRECATED=y |
82 | CONFIG_SYSFS_DEPRECATED_V2=y | 92 | CONFIG_SYSFS_DEPRECATED_V2=y |
83 | # CONFIG_RELAY is not set | 93 | # CONFIG_RELAY is not set |
@@ -86,11 +96,13 @@ CONFIG_NAMESPACES=y | |||
86 | # CONFIG_IPC_NS is not set | 96 | # CONFIG_IPC_NS is not set |
87 | # CONFIG_USER_NS is not set | 97 | # CONFIG_USER_NS is not set |
88 | # CONFIG_PID_NS is not set | 98 | # CONFIG_PID_NS is not set |
99 | # CONFIG_NET_NS is not set | ||
89 | CONFIG_BLK_DEV_INITRD=y | 100 | CONFIG_BLK_DEV_INITRD=y |
90 | CONFIG_INITRAMFS_SOURCE="" | 101 | CONFIG_INITRAMFS_SOURCE="" |
91 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 102 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
92 | CONFIG_SYSCTL=y | 103 | CONFIG_SYSCTL=y |
93 | # CONFIG_EMBEDDED is not set | 104 | CONFIG_ANON_INODES=y |
105 | CONFIG_EMBEDDED=y | ||
94 | CONFIG_SYSCTL_SYSCALL=y | 106 | CONFIG_SYSCTL_SYSCALL=y |
95 | CONFIG_KALLSYMS=y | 107 | CONFIG_KALLSYMS=y |
96 | CONFIG_KALLSYMS_ALL=y | 108 | CONFIG_KALLSYMS_ALL=y |
@@ -99,37 +111,36 @@ CONFIG_HOTPLUG=y | |||
99 | CONFIG_PRINTK=y | 111 | CONFIG_PRINTK=y |
100 | CONFIG_BUG=y | 112 | CONFIG_BUG=y |
101 | CONFIG_ELF_CORE=y | 113 | CONFIG_ELF_CORE=y |
102 | # CONFIG_COMPAT_BRK is not set | ||
103 | CONFIG_BASE_FULL=y | 114 | CONFIG_BASE_FULL=y |
104 | CONFIG_FUTEX=y | 115 | CONFIG_FUTEX=y |
105 | CONFIG_ANON_INODES=y | ||
106 | CONFIG_EPOLL=y | 116 | CONFIG_EPOLL=y |
107 | CONFIG_SIGNALFD=y | 117 | CONFIG_SIGNALFD=y |
108 | CONFIG_TIMERFD=y | 118 | CONFIG_TIMERFD=y |
109 | CONFIG_EVENTFD=y | 119 | CONFIG_EVENTFD=y |
110 | CONFIG_SHMEM=y | 120 | CONFIG_SHMEM=y |
121 | CONFIG_AIO=y | ||
111 | CONFIG_VM_EVENT_COUNTERS=y | 122 | CONFIG_VM_EVENT_COUNTERS=y |
123 | # CONFIG_COMPAT_BRK is not set | ||
112 | CONFIG_SLAB=y | 124 | CONFIG_SLAB=y |
113 | # CONFIG_SLUB is not set | 125 | # CONFIG_SLUB is not set |
114 | # CONFIG_SLOB is not set | 126 | # CONFIG_SLOB is not set |
115 | CONFIG_PROFILING=y | 127 | CONFIG_PROFILING=y |
116 | # CONFIG_MARKERS is not set | 128 | CONFIG_TRACEPOINTS=y |
129 | CONFIG_MARKERS=y | ||
117 | CONFIG_OPROFILE=m | 130 | CONFIG_OPROFILE=m |
118 | CONFIG_HAVE_OPROFILE=y | 131 | CONFIG_HAVE_OPROFILE=y |
119 | # CONFIG_KPROBES is not set | 132 | # CONFIG_KPROBES is not set |
120 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | 133 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y |
134 | CONFIG_HAVE_SYSCALL_WRAPPERS=y | ||
121 | CONFIG_HAVE_IOREMAP_PROT=y | 135 | CONFIG_HAVE_IOREMAP_PROT=y |
122 | CONFIG_HAVE_KPROBES=y | 136 | CONFIG_HAVE_KPROBES=y |
123 | CONFIG_HAVE_KRETPROBES=y | 137 | CONFIG_HAVE_KRETPROBES=y |
124 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 138 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
125 | CONFIG_HAVE_DMA_ATTRS=y | 139 | CONFIG_HAVE_DMA_ATTRS=y |
126 | CONFIG_USE_GENERIC_SMP_HELPERS=y | 140 | CONFIG_USE_GENERIC_SMP_HELPERS=y |
127 | # CONFIG_HAVE_CLK is not set | ||
128 | CONFIG_PROC_PAGE_MONITOR=y | ||
129 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | 141 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set |
130 | CONFIG_SLABINFO=y | 142 | CONFIG_SLABINFO=y |
131 | CONFIG_RT_MUTEXES=y | 143 | CONFIG_RT_MUTEXES=y |
132 | # CONFIG_TINY_SHMEM is not set | ||
133 | CONFIG_BASE_SMALL=0 | 144 | CONFIG_BASE_SMALL=0 |
134 | CONFIG_MODULES=y | 145 | CONFIG_MODULES=y |
135 | # CONFIG_MODULE_FORCE_LOAD is not set | 146 | # CONFIG_MODULE_FORCE_LOAD is not set |
@@ -137,7 +148,6 @@ CONFIG_MODULE_UNLOAD=y | |||
137 | # CONFIG_MODULE_FORCE_UNLOAD is not set | 148 | # CONFIG_MODULE_FORCE_UNLOAD is not set |
138 | # CONFIG_MODVERSIONS is not set | 149 | # CONFIG_MODVERSIONS is not set |
139 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 150 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
140 | CONFIG_KMOD=y | ||
141 | CONFIG_STOP_MACHINE=y | 151 | CONFIG_STOP_MACHINE=y |
142 | CONFIG_BLOCK=y | 152 | CONFIG_BLOCK=y |
143 | # CONFIG_BLK_DEV_IO_TRACE is not set | 153 | # CONFIG_BLK_DEV_IO_TRACE is not set |
@@ -157,7 +167,7 @@ CONFIG_DEFAULT_AS=y | |||
157 | # CONFIG_DEFAULT_CFQ is not set | 167 | # CONFIG_DEFAULT_CFQ is not set |
158 | # CONFIG_DEFAULT_NOOP is not set | 168 | # CONFIG_DEFAULT_NOOP is not set |
159 | CONFIG_DEFAULT_IOSCHED="anticipatory" | 169 | CONFIG_DEFAULT_IOSCHED="anticipatory" |
160 | CONFIG_CLASSIC_RCU=y | 170 | # CONFIG_FREEZER is not set |
161 | 171 | ||
162 | # | 172 | # |
163 | # Platform support | 173 | # Platform support |
@@ -183,18 +193,20 @@ CONFIG_PS3_STORAGE=y | |||
183 | CONFIG_PS3_DISK=y | 193 | CONFIG_PS3_DISK=y |
184 | CONFIG_PS3_ROM=y | 194 | CONFIG_PS3_ROM=y |
185 | CONFIG_PS3_FLASH=y | 195 | CONFIG_PS3_FLASH=y |
186 | CONFIG_OPROFILE_PS3=y | 196 | CONFIG_PS3_VRAM=m |
187 | CONFIG_PS3_LPM=m | 197 | CONFIG_PS3_LPM=m |
188 | CONFIG_PPC_CELL=y | 198 | CONFIG_PPC_CELL=y |
189 | # CONFIG_PPC_CELL_NATIVE is not set | 199 | # CONFIG_PPC_CELL_NATIVE is not set |
190 | # CONFIG_PPC_IBM_CELL_BLADE is not set | 200 | # CONFIG_PPC_IBM_CELL_BLADE is not set |
191 | # CONFIG_PPC_CELLEB is not set | 201 | # CONFIG_PPC_CELLEB is not set |
202 | # CONFIG_PPC_CELL_QPACE is not set | ||
192 | 203 | ||
193 | # | 204 | # |
194 | # Cell Broadband Engine options | 205 | # Cell Broadband Engine options |
195 | # | 206 | # |
196 | CONFIG_SPU_FS=y | 207 | CONFIG_SPU_FS=y |
197 | CONFIG_SPU_FS_64K_LS=y | 208 | CONFIG_SPU_FS_64K_LS=y |
209 | # CONFIG_SPU_TRACE is not set | ||
198 | CONFIG_SPU_BASE=y | 210 | CONFIG_SPU_BASE=y |
199 | # CONFIG_PQ2ADS is not set | 211 | # CONFIG_PQ2ADS is not set |
200 | # CONFIG_IPIC is not set | 212 | # CONFIG_IPIC is not set |
@@ -210,6 +222,7 @@ CONFIG_SPU_BASE=y | |||
210 | # CONFIG_GENERIC_IOMAP is not set | 222 | # CONFIG_GENERIC_IOMAP is not set |
211 | # CONFIG_CPU_FREQ is not set | 223 | # CONFIG_CPU_FREQ is not set |
212 | # CONFIG_FSL_ULI1575 is not set | 224 | # CONFIG_FSL_ULI1575 is not set |
225 | # CONFIG_SIMPLE_GPIO is not set | ||
213 | 226 | ||
214 | # | 227 | # |
215 | # Kernel options | 228 | # Kernel options |
@@ -229,6 +242,8 @@ CONFIG_PREEMPT_NONE=y | |||
229 | # CONFIG_PREEMPT is not set | 242 | # CONFIG_PREEMPT is not set |
230 | CONFIG_BINFMT_ELF=y | 243 | CONFIG_BINFMT_ELF=y |
231 | CONFIG_COMPAT_BINFMT_ELF=y | 244 | CONFIG_COMPAT_BINFMT_ELF=y |
245 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
246 | # CONFIG_HAVE_AOUT is not set | ||
232 | CONFIG_BINFMT_MISC=y | 247 | CONFIG_BINFMT_MISC=y |
233 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y | 248 | CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y |
234 | # CONFIG_IOMMU_VMERGE is not set | 249 | # CONFIG_IOMMU_VMERGE is not set |
@@ -251,7 +266,6 @@ CONFIG_SELECT_MEMORY_MODEL=y | |||
251 | CONFIG_SPARSEMEM_MANUAL=y | 266 | CONFIG_SPARSEMEM_MANUAL=y |
252 | CONFIG_SPARSEMEM=y | 267 | CONFIG_SPARSEMEM=y |
253 | CONFIG_HAVE_MEMORY_PRESENT=y | 268 | CONFIG_HAVE_MEMORY_PRESENT=y |
254 | # CONFIG_SPARSEMEM_STATIC is not set | ||
255 | CONFIG_SPARSEMEM_EXTREME=y | 269 | CONFIG_SPARSEMEM_EXTREME=y |
256 | CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y | 270 | CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y |
257 | # CONFIG_SPARSEMEM_VMEMMAP is not set | 271 | # CONFIG_SPARSEMEM_VMEMMAP is not set |
@@ -261,11 +275,14 @@ CONFIG_MEMORY_HOTPLUG_SPARSE=y | |||
261 | CONFIG_PAGEFLAGS_EXTENDED=y | 275 | CONFIG_PAGEFLAGS_EXTENDED=y |
262 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 276 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
263 | CONFIG_MIGRATION=y | 277 | CONFIG_MIGRATION=y |
264 | CONFIG_RESOURCES_64BIT=y | 278 | CONFIG_PHYS_ADDR_T_64BIT=y |
265 | CONFIG_ZONE_DMA_FLAG=1 | 279 | CONFIG_ZONE_DMA_FLAG=1 |
266 | CONFIG_BOUNCE=y | 280 | CONFIG_BOUNCE=y |
281 | CONFIG_UNEVICTABLE_LRU=y | ||
267 | CONFIG_ARCH_MEMORY_PROBE=y | 282 | CONFIG_ARCH_MEMORY_PROBE=y |
268 | CONFIG_PPC_HAS_HASH_64K=y | 283 | CONFIG_PPC_HAS_HASH_64K=y |
284 | CONFIG_PPC_4K_PAGES=y | ||
285 | # CONFIG_PPC_16K_PAGES is not set | ||
269 | # CONFIG_PPC_64K_PAGES is not set | 286 | # CONFIG_PPC_64K_PAGES is not set |
270 | CONFIG_FORCE_MAX_ZONEORDER=13 | 287 | CONFIG_FORCE_MAX_ZONEORDER=13 |
271 | CONFIG_SCHED_SMT=y | 288 | CONFIG_SCHED_SMT=y |
@@ -299,6 +316,7 @@ CONFIG_NET=y | |||
299 | # | 316 | # |
300 | # Networking options | 317 | # Networking options |
301 | # | 318 | # |
319 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
302 | CONFIG_PACKET=y | 320 | CONFIG_PACKET=y |
303 | CONFIG_PACKET_MMAP=y | 321 | CONFIG_PACKET_MMAP=y |
304 | CONFIG_UNIX=y | 322 | CONFIG_UNIX=y |
@@ -361,6 +379,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y | |||
361 | # CONFIG_TIPC is not set | 379 | # CONFIG_TIPC is not set |
362 | # CONFIG_ATM is not set | 380 | # CONFIG_ATM is not set |
363 | # CONFIG_BRIDGE is not set | 381 | # CONFIG_BRIDGE is not set |
382 | # CONFIG_NET_DSA is not set | ||
364 | # CONFIG_VLAN_8021Q is not set | 383 | # CONFIG_VLAN_8021Q is not set |
365 | # CONFIG_DECNET is not set | 384 | # CONFIG_DECNET is not set |
366 | # CONFIG_LLC2 is not set | 385 | # CONFIG_LLC2 is not set |
@@ -371,6 +390,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y | |||
371 | # CONFIG_ECONET is not set | 390 | # CONFIG_ECONET is not set |
372 | # CONFIG_WAN_ROUTER is not set | 391 | # CONFIG_WAN_ROUTER is not set |
373 | # CONFIG_NET_SCHED is not set | 392 | # CONFIG_NET_SCHED is not set |
393 | # CONFIG_DCB is not set | ||
374 | 394 | ||
375 | # | 395 | # |
376 | # Network testing | 396 | # Network testing |
@@ -392,39 +412,37 @@ CONFIG_BT_HIDP=m | |||
392 | # | 412 | # |
393 | # Bluetooth device drivers | 413 | # Bluetooth device drivers |
394 | # | 414 | # |
395 | CONFIG_BT_HCIUSB=m | 415 | CONFIG_BT_HCIBTUSB=m |
396 | CONFIG_BT_HCIUSB_SCO=y | ||
397 | # CONFIG_BT_HCIUART is not set | 416 | # CONFIG_BT_HCIUART is not set |
398 | # CONFIG_BT_HCIBCM203X is not set | 417 | # CONFIG_BT_HCIBCM203X is not set |
399 | # CONFIG_BT_HCIBPA10X is not set | 418 | # CONFIG_BT_HCIBPA10X is not set |
400 | # CONFIG_BT_HCIBFUSB is not set | 419 | # CONFIG_BT_HCIBFUSB is not set |
401 | # CONFIG_BT_HCIVHCI is not set | 420 | # CONFIG_BT_HCIVHCI is not set |
402 | # CONFIG_AF_RXRPC is not set | 421 | # CONFIG_AF_RXRPC is not set |
403 | 422 | # CONFIG_PHONET is not set | |
404 | # | 423 | CONFIG_WIRELESS=y |
405 | # Wireless | ||
406 | # | ||
407 | CONFIG_CFG80211=m | 424 | CONFIG_CFG80211=m |
425 | # CONFIG_CFG80211_REG_DEBUG is not set | ||
408 | CONFIG_NL80211=y | 426 | CONFIG_NL80211=y |
427 | # CONFIG_WIRELESS_OLD_REGULATORY is not set | ||
409 | CONFIG_WIRELESS_EXT=y | 428 | CONFIG_WIRELESS_EXT=y |
410 | # CONFIG_WIRELESS_EXT_SYSFS is not set | 429 | # CONFIG_WIRELESS_EXT_SYSFS is not set |
430 | # CONFIG_LIB80211 is not set | ||
411 | CONFIG_MAC80211=m | 431 | CONFIG_MAC80211=m |
412 | 432 | ||
413 | # | 433 | # |
414 | # Rate control algorithm selection | 434 | # Rate control algorithm selection |
415 | # | 435 | # |
416 | CONFIG_MAC80211_RC_PID=y | 436 | CONFIG_MAC80211_RC_PID=y |
437 | # CONFIG_MAC80211_RC_MINSTREL is not set | ||
417 | CONFIG_MAC80211_RC_DEFAULT_PID=y | 438 | CONFIG_MAC80211_RC_DEFAULT_PID=y |
439 | # CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set | ||
418 | CONFIG_MAC80211_RC_DEFAULT="pid" | 440 | CONFIG_MAC80211_RC_DEFAULT="pid" |
419 | # CONFIG_MAC80211_MESH is not set | 441 | # CONFIG_MAC80211_MESH is not set |
420 | # CONFIG_MAC80211_LEDS is not set | 442 | # CONFIG_MAC80211_LEDS is not set |
421 | # CONFIG_MAC80211_DEBUGFS is not set | 443 | # CONFIG_MAC80211_DEBUGFS is not set |
422 | # CONFIG_MAC80211_DEBUG_MENU is not set | 444 | # CONFIG_MAC80211_DEBUG_MENU is not set |
423 | CONFIG_IEEE80211=m | 445 | # CONFIG_WIMAX is not set |
424 | # CONFIG_IEEE80211_DEBUG is not set | ||
425 | CONFIG_IEEE80211_CRYPT_WEP=m | ||
426 | CONFIG_IEEE80211_CRYPT_CCMP=m | ||
427 | CONFIG_IEEE80211_CRYPT_TKIP=m | ||
428 | # CONFIG_RFKILL is not set | 446 | # CONFIG_RFKILL is not set |
429 | # CONFIG_NET_9P is not set | 447 | # CONFIG_NET_9P is not set |
430 | 448 | ||
@@ -450,6 +468,7 @@ CONFIG_MTD_DEBUG=y | |||
450 | CONFIG_MTD_DEBUG_VERBOSE=0 | 468 | CONFIG_MTD_DEBUG_VERBOSE=0 |
451 | # CONFIG_MTD_CONCAT is not set | 469 | # CONFIG_MTD_CONCAT is not set |
452 | # CONFIG_MTD_PARTITIONS is not set | 470 | # CONFIG_MTD_PARTITIONS is not set |
471 | # CONFIG_MTD_TESTS is not set | ||
453 | 472 | ||
454 | # | 473 | # |
455 | # User Modules And Translation Layers | 474 | # User Modules And Translation Layers |
@@ -494,7 +513,6 @@ CONFIG_MTD_CFI_I2=y | |||
494 | # | 513 | # |
495 | # CONFIG_MTD_SLRAM is not set | 514 | # CONFIG_MTD_SLRAM is not set |
496 | # CONFIG_MTD_PHRAM is not set | 515 | # CONFIG_MTD_PHRAM is not set |
497 | CONFIG_MTD_PS3VRAM=y | ||
498 | # CONFIG_MTD_MTDRAM is not set | 516 | # CONFIG_MTD_MTDRAM is not set |
499 | # CONFIG_MTD_BLOCK2MTD is not set | 517 | # CONFIG_MTD_BLOCK2MTD is not set |
500 | 518 | ||
@@ -508,6 +526,11 @@ CONFIG_MTD_PS3VRAM=y | |||
508 | # CONFIG_MTD_ONENAND is not set | 526 | # CONFIG_MTD_ONENAND is not set |
509 | 527 | ||
510 | # | 528 | # |
529 | # LPDDR flash memory drivers | ||
530 | # | ||
531 | # CONFIG_MTD_LPDDR is not set | ||
532 | |||
533 | # | ||
511 | # UBI - Unsorted block images | 534 | # UBI - Unsorted block images |
512 | # | 535 | # |
513 | # CONFIG_MTD_UBI is not set | 536 | # CONFIG_MTD_UBI is not set |
@@ -528,8 +551,13 @@ CONFIG_BLK_DEV_RAM_SIZE=65535 | |||
528 | # CONFIG_ATA_OVER_ETH is not set | 551 | # CONFIG_ATA_OVER_ETH is not set |
529 | # CONFIG_BLK_DEV_HD is not set | 552 | # CONFIG_BLK_DEV_HD is not set |
530 | CONFIG_MISC_DEVICES=y | 553 | CONFIG_MISC_DEVICES=y |
531 | # CONFIG_EEPROM_93CX6 is not set | ||
532 | # CONFIG_ENCLOSURE_SERVICES is not set | 554 | # CONFIG_ENCLOSURE_SERVICES is not set |
555 | # CONFIG_C2PORT is not set | ||
556 | |||
557 | # | ||
558 | # EEPROM support | ||
559 | # | ||
560 | # CONFIG_EEPROM_93CX6 is not set | ||
533 | CONFIG_HAVE_IDE=y | 561 | CONFIG_HAVE_IDE=y |
534 | # CONFIG_IDE is not set | 562 | # CONFIG_IDE is not set |
535 | 563 | ||
@@ -575,7 +603,17 @@ CONFIG_SCSI_WAIT_SCAN=m | |||
575 | # CONFIG_SCSI_LOWLEVEL is not set | 603 | # CONFIG_SCSI_LOWLEVEL is not set |
576 | # CONFIG_SCSI_DH is not set | 604 | # CONFIG_SCSI_DH is not set |
577 | # CONFIG_ATA is not set | 605 | # CONFIG_ATA is not set |
578 | # CONFIG_MD is not set | 606 | CONFIG_MD=y |
607 | # CONFIG_BLK_DEV_MD is not set | ||
608 | CONFIG_BLK_DEV_DM=m | ||
609 | # CONFIG_DM_DEBUG is not set | ||
610 | # CONFIG_DM_CRYPT is not set | ||
611 | # CONFIG_DM_SNAPSHOT is not set | ||
612 | # CONFIG_DM_MIRROR is not set | ||
613 | # CONFIG_DM_ZERO is not set | ||
614 | # CONFIG_DM_MULTIPATH is not set | ||
615 | # CONFIG_DM_DELAY is not set | ||
616 | # CONFIG_DM_UEVENT is not set | ||
579 | # CONFIG_MACINTOSH_DRIVERS is not set | 617 | # CONFIG_MACINTOSH_DRIVERS is not set |
580 | CONFIG_NETDEVICES=y | 618 | CONFIG_NETDEVICES=y |
581 | # CONFIG_DUMMY is not set | 619 | # CONFIG_DUMMY is not set |
@@ -591,6 +629,9 @@ CONFIG_MII=m | |||
591 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | 629 | # CONFIG_IBM_NEW_EMAC_RGMII is not set |
592 | # CONFIG_IBM_NEW_EMAC_TAH is not set | 630 | # CONFIG_IBM_NEW_EMAC_TAH is not set |
593 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | 631 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set |
632 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | ||
633 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | ||
634 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | ||
594 | # CONFIG_B44 is not set | 635 | # CONFIG_B44 is not set |
595 | CONFIG_NETDEV_1000=y | 636 | CONFIG_NETDEV_1000=y |
596 | CONFIG_GELIC_NET=y | 637 | CONFIG_GELIC_NET=y |
@@ -604,6 +645,7 @@ CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y | |||
604 | # CONFIG_WLAN_PRE80211 is not set | 645 | # CONFIG_WLAN_PRE80211 is not set |
605 | CONFIG_WLAN_80211=y | 646 | CONFIG_WLAN_80211=y |
606 | # CONFIG_LIBERTAS is not set | 647 | # CONFIG_LIBERTAS is not set |
648 | # CONFIG_LIBERTAS_THINFIRM is not set | ||
607 | # CONFIG_USB_ZD1201 is not set | 649 | # CONFIG_USB_ZD1201 is not set |
608 | # CONFIG_USB_NET_RNDIS_WLAN is not set | 650 | # CONFIG_USB_NET_RNDIS_WLAN is not set |
609 | # CONFIG_RTL8187 is not set | 651 | # CONFIG_RTL8187 is not set |
@@ -615,13 +657,11 @@ CONFIG_WLAN_80211=y | |||
615 | # CONFIG_B43LEGACY is not set | 657 | # CONFIG_B43LEGACY is not set |
616 | CONFIG_ZD1211RW=m | 658 | CONFIG_ZD1211RW=m |
617 | # CONFIG_ZD1211RW_DEBUG is not set | 659 | # CONFIG_ZD1211RW_DEBUG is not set |
618 | CONFIG_RT2X00=m | 660 | # CONFIG_RT2X00 is not set |
619 | CONFIG_RT2X00_LIB=m | 661 | |
620 | CONFIG_RT2X00_LIB_USB=m | 662 | # |
621 | CONFIG_RT2X00_LIB_FIRMWARE=y | 663 | # Enable WiMAX (Networking options) to see the WiMAX drivers |
622 | # CONFIG_RT2500USB is not set | 664 | # |
623 | CONFIG_RT73USB=m | ||
624 | # CONFIG_RT2X00_DEBUG is not set | ||
625 | 665 | ||
626 | # | 666 | # |
627 | # USB Network Adapters | 667 | # USB Network Adapters |
@@ -634,6 +674,7 @@ CONFIG_USB_USBNET=m | |||
634 | CONFIG_USB_NET_AX8817X=m | 674 | CONFIG_USB_NET_AX8817X=m |
635 | # CONFIG_USB_NET_CDCETHER is not set | 675 | # CONFIG_USB_NET_CDCETHER is not set |
636 | # CONFIG_USB_NET_DM9601 is not set | 676 | # CONFIG_USB_NET_DM9601 is not set |
677 | # CONFIG_USB_NET_SMSC95XX is not set | ||
637 | # CONFIG_USB_NET_GL620A is not set | 678 | # CONFIG_USB_NET_GL620A is not set |
638 | # CONFIG_USB_NET_NET1080 is not set | 679 | # CONFIG_USB_NET_NET1080 is not set |
639 | # CONFIG_USB_NET_PLUSB is not set | 680 | # CONFIG_USB_NET_PLUSB is not set |
@@ -664,7 +705,7 @@ CONFIG_SLHC=m | |||
664 | # Input device support | 705 | # Input device support |
665 | # | 706 | # |
666 | CONFIG_INPUT=y | 707 | CONFIG_INPUT=y |
667 | # CONFIG_INPUT_FF_MEMLESS is not set | 708 | CONFIG_INPUT_FF_MEMLESS=m |
668 | # CONFIG_INPUT_POLLDEV is not set | 709 | # CONFIG_INPUT_POLLDEV is not set |
669 | 710 | ||
670 | # | 711 | # |
@@ -735,8 +776,10 @@ CONFIG_DEVKMEM=y | |||
735 | # Non-8250 serial port support | 776 | # Non-8250 serial port support |
736 | # | 777 | # |
737 | CONFIG_UNIX98_PTYS=y | 778 | CONFIG_UNIX98_PTYS=y |
779 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | ||
738 | CONFIG_LEGACY_PTYS=y | 780 | CONFIG_LEGACY_PTYS=y |
739 | CONFIG_LEGACY_PTY_COUNT=16 | 781 | CONFIG_LEGACY_PTY_COUNT=16 |
782 | # CONFIG_HVC_UDBG is not set | ||
740 | # CONFIG_IPMI_HANDLER is not set | 783 | # CONFIG_IPMI_HANDLER is not set |
741 | # CONFIG_HW_RANDOM is not set | 784 | # CONFIG_HW_RANDOM is not set |
742 | # CONFIG_R3964 is not set | 785 | # CONFIG_R3964 is not set |
@@ -753,11 +796,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | |||
753 | # CONFIG_THERMAL is not set | 796 | # CONFIG_THERMAL is not set |
754 | # CONFIG_THERMAL_HWMON is not set | 797 | # CONFIG_THERMAL_HWMON is not set |
755 | # CONFIG_WATCHDOG is not set | 798 | # CONFIG_WATCHDOG is not set |
799 | CONFIG_SSB_POSSIBLE=y | ||
756 | 800 | ||
757 | # | 801 | # |
758 | # Sonics Silicon Backplane | 802 | # Sonics Silicon Backplane |
759 | # | 803 | # |
760 | CONFIG_SSB_POSSIBLE=y | ||
761 | # CONFIG_SSB is not set | 804 | # CONFIG_SSB is not set |
762 | 805 | ||
763 | # | 806 | # |
@@ -767,6 +810,7 @@ CONFIG_SSB_POSSIBLE=y | |||
767 | # CONFIG_MFD_SM501 is not set | 810 | # CONFIG_MFD_SM501 is not set |
768 | # CONFIG_HTC_PASIC3 is not set | 811 | # CONFIG_HTC_PASIC3 is not set |
769 | # CONFIG_MFD_TMIO is not set | 812 | # CONFIG_MFD_TMIO is not set |
813 | # CONFIG_REGULATOR is not set | ||
770 | 814 | ||
771 | # | 815 | # |
772 | # Multimedia devices | 816 | # Multimedia devices |
@@ -792,6 +836,7 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m | |||
792 | CONFIG_FB=y | 836 | CONFIG_FB=y |
793 | # CONFIG_FIRMWARE_EDID is not set | 837 | # CONFIG_FIRMWARE_EDID is not set |
794 | # CONFIG_FB_DDC is not set | 838 | # CONFIG_FB_DDC is not set |
839 | # CONFIG_FB_BOOT_VESA_SUPPORT is not set | ||
795 | # CONFIG_FB_CFB_FILLRECT is not set | 840 | # CONFIG_FB_CFB_FILLRECT is not set |
796 | # CONFIG_FB_CFB_COPYAREA is not set | 841 | # CONFIG_FB_CFB_COPYAREA is not set |
797 | # CONFIG_FB_CFB_IMAGEBLIT is not set | 842 | # CONFIG_FB_CFB_IMAGEBLIT is not set |
@@ -817,6 +862,8 @@ CONFIG_FB_SYS_FOPS=y | |||
817 | CONFIG_FB_PS3=y | 862 | CONFIG_FB_PS3=y |
818 | CONFIG_FB_PS3_DEFAULT_SIZE_M=9 | 863 | CONFIG_FB_PS3_DEFAULT_SIZE_M=9 |
819 | # CONFIG_FB_VIRTUAL is not set | 864 | # CONFIG_FB_VIRTUAL is not set |
865 | # CONFIG_FB_METRONOME is not set | ||
866 | # CONFIG_FB_MB862XX is not set | ||
820 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | 867 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set |
821 | 868 | ||
822 | # | 869 | # |
@@ -841,6 +888,7 @@ CONFIG_FB_LOGO_EXTRA=y | |||
841 | # CONFIG_LOGO_LINUX_VGA16 is not set | 888 | # CONFIG_LOGO_LINUX_VGA16 is not set |
842 | CONFIG_LOGO_LINUX_CLUT224=y | 889 | CONFIG_LOGO_LINUX_CLUT224=y |
843 | CONFIG_SOUND=m | 890 | CONFIG_SOUND=m |
891 | # CONFIG_SOUND_OSS_CORE is not set | ||
844 | CONFIG_SND=m | 892 | CONFIG_SND=m |
845 | CONFIG_SND_TIMER=m | 893 | CONFIG_SND_TIMER=m |
846 | CONFIG_SND_PCM=m | 894 | CONFIG_SND_PCM=m |
@@ -849,6 +897,7 @@ CONFIG_SND_RAWMIDI=m | |||
849 | # CONFIG_SND_SEQUENCER is not set | 897 | # CONFIG_SND_SEQUENCER is not set |
850 | # CONFIG_SND_MIXER_OSS is not set | 898 | # CONFIG_SND_MIXER_OSS is not set |
851 | # CONFIG_SND_PCM_OSS is not set | 899 | # CONFIG_SND_PCM_OSS is not set |
900 | # CONFIG_SND_HRTIMER is not set | ||
852 | # CONFIG_SND_DYNAMIC_MINORS is not set | 901 | # CONFIG_SND_DYNAMIC_MINORS is not set |
853 | CONFIG_SND_SUPPORT_OLD_API=y | 902 | CONFIG_SND_SUPPORT_OLD_API=y |
854 | CONFIG_SND_VERBOSE_PROCFS=y | 903 | CONFIG_SND_VERBOSE_PROCFS=y |
@@ -873,15 +922,40 @@ CONFIG_HIDRAW=y | |||
873 | # USB Input Devices | 922 | # USB Input Devices |
874 | # | 923 | # |
875 | CONFIG_USB_HID=m | 924 | CONFIG_USB_HID=m |
876 | # CONFIG_USB_HIDINPUT_POWERBOOK is not set | 925 | # CONFIG_HID_PID is not set |
877 | # CONFIG_HID_FF is not set | 926 | CONFIG_USB_HIDDEV=y |
878 | # CONFIG_USB_HIDDEV is not set | ||
879 | 927 | ||
880 | # | 928 | # |
881 | # USB HID Boot Protocol drivers | 929 | # USB HID Boot Protocol drivers |
882 | # | 930 | # |
883 | # CONFIG_USB_KBD is not set | 931 | # CONFIG_USB_KBD is not set |
884 | # CONFIG_USB_MOUSE is not set | 932 | # CONFIG_USB_MOUSE is not set |
933 | |||
934 | # | ||
935 | # Special HID drivers | ||
936 | # | ||
937 | # CONFIG_HID_COMPAT is not set | ||
938 | # CONFIG_HID_A4TECH is not set | ||
939 | # CONFIG_HID_APPLE is not set | ||
940 | # CONFIG_HID_BELKIN is not set | ||
941 | # CONFIG_HID_CHERRY is not set | ||
942 | # CONFIG_HID_CHICONY is not set | ||
943 | # CONFIG_HID_CYPRESS is not set | ||
944 | # CONFIG_HID_EZKEY is not set | ||
945 | # CONFIG_HID_GYRATION is not set | ||
946 | # CONFIG_HID_LOGITECH is not set | ||
947 | # CONFIG_HID_MICROSOFT is not set | ||
948 | # CONFIG_HID_MONTEREY is not set | ||
949 | # CONFIG_HID_NTRIG is not set | ||
950 | # CONFIG_HID_PANTHERLORD is not set | ||
951 | # CONFIG_HID_PETALYNX is not set | ||
952 | # CONFIG_HID_SAMSUNG is not set | ||
953 | # CONFIG_HID_SONY is not set | ||
954 | # CONFIG_HID_SUNPLUS is not set | ||
955 | # CONFIG_GREENASIA_FF is not set | ||
956 | # CONFIG_HID_TOPSEED is not set | ||
957 | # CONFIG_THRUSTMASTER_FF is not set | ||
958 | # CONFIG_ZEROPLUS_FF is not set | ||
885 | CONFIG_USB_SUPPORT=y | 959 | CONFIG_USB_SUPPORT=y |
886 | CONFIG_USB_ARCH_HAS_HCD=y | 960 | CONFIG_USB_ARCH_HAS_HCD=y |
887 | CONFIG_USB_ARCH_HAS_OHCI=y | 961 | CONFIG_USB_ARCH_HAS_OHCI=y |
@@ -898,7 +972,11 @@ CONFIG_USB_DEVICEFS=y | |||
898 | # CONFIG_USB_DYNAMIC_MINORS is not set | 972 | # CONFIG_USB_DYNAMIC_MINORS is not set |
899 | CONFIG_USB_SUSPEND=y | 973 | CONFIG_USB_SUSPEND=y |
900 | # CONFIG_USB_OTG is not set | 974 | # CONFIG_USB_OTG is not set |
901 | CONFIG_USB_MON=y | 975 | # CONFIG_USB_OTG_WHITELIST is not set |
976 | # CONFIG_USB_OTG_BLACKLIST_HUB is not set | ||
977 | CONFIG_USB_MON=m | ||
978 | # CONFIG_USB_WUSB is not set | ||
979 | # CONFIG_USB_WUSB_CBAF is not set | ||
902 | 980 | ||
903 | # | 981 | # |
904 | # USB Host Controller Drivers | 982 | # USB Host Controller Drivers |
@@ -909,6 +987,7 @@ CONFIG_USB_EHCI_HCD=m | |||
909 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | 987 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set |
910 | CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y | 988 | CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y |
911 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set | 989 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set |
990 | # CONFIG_USB_OXU210HP_HCD is not set | ||
912 | # CONFIG_USB_ISP116X_HCD is not set | 991 | # CONFIG_USB_ISP116X_HCD is not set |
913 | # CONFIG_USB_ISP1760_HCD is not set | 992 | # CONFIG_USB_ISP1760_HCD is not set |
914 | CONFIG_USB_OHCI_HCD=m | 993 | CONFIG_USB_OHCI_HCD=m |
@@ -918,6 +997,7 @@ CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y | |||
918 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | 997 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y |
919 | # CONFIG_USB_SL811_HCD is not set | 998 | # CONFIG_USB_SL811_HCD is not set |
920 | # CONFIG_USB_R8A66597_HCD is not set | 999 | # CONFIG_USB_R8A66597_HCD is not set |
1000 | # CONFIG_USB_HWA_HCD is not set | ||
921 | 1001 | ||
922 | # | 1002 | # |
923 | # Enable Host or Gadget support to see Inventra options | 1003 | # Enable Host or Gadget support to see Inventra options |
@@ -929,20 +1009,20 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y | |||
929 | # CONFIG_USB_ACM is not set | 1009 | # CONFIG_USB_ACM is not set |
930 | # CONFIG_USB_PRINTER is not set | 1010 | # CONFIG_USB_PRINTER is not set |
931 | # CONFIG_USB_WDM is not set | 1011 | # CONFIG_USB_WDM is not set |
1012 | # CONFIG_USB_TMC is not set | ||
932 | 1013 | ||
933 | # | 1014 | # |
934 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | 1015 | # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; |
935 | # | 1016 | # |
936 | 1017 | ||
937 | # | 1018 | # |
938 | # may also be needed; see USB_STORAGE Help for more information | 1019 | # see USB_STORAGE Help for more information |
939 | # | 1020 | # |
940 | CONFIG_USB_STORAGE=m | 1021 | CONFIG_USB_STORAGE=m |
941 | # CONFIG_USB_STORAGE_DEBUG is not set | 1022 | # CONFIG_USB_STORAGE_DEBUG is not set |
942 | # CONFIG_USB_STORAGE_DATAFAB is not set | 1023 | # CONFIG_USB_STORAGE_DATAFAB is not set |
943 | # CONFIG_USB_STORAGE_FREECOM is not set | 1024 | # CONFIG_USB_STORAGE_FREECOM is not set |
944 | # CONFIG_USB_STORAGE_ISD200 is not set | 1025 | # CONFIG_USB_STORAGE_ISD200 is not set |
945 | # CONFIG_USB_STORAGE_DPCM is not set | ||
946 | # CONFIG_USB_STORAGE_USBAT is not set | 1026 | # CONFIG_USB_STORAGE_USBAT is not set |
947 | # CONFIG_USB_STORAGE_SDDR09 is not set | 1027 | # CONFIG_USB_STORAGE_SDDR09 is not set |
948 | # CONFIG_USB_STORAGE_SDDR55 is not set | 1028 | # CONFIG_USB_STORAGE_SDDR55 is not set |
@@ -950,7 +1030,6 @@ CONFIG_USB_STORAGE=m | |||
950 | # CONFIG_USB_STORAGE_ALAUDA is not set | 1030 | # CONFIG_USB_STORAGE_ALAUDA is not set |
951 | # CONFIG_USB_STORAGE_ONETOUCH is not set | 1031 | # CONFIG_USB_STORAGE_ONETOUCH is not set |
952 | # CONFIG_USB_STORAGE_KARMA is not set | 1032 | # CONFIG_USB_STORAGE_KARMA is not set |
953 | # CONFIG_USB_STORAGE_SIERRA is not set | ||
954 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set | 1033 | # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set |
955 | # CONFIG_USB_LIBUSUAL is not set | 1034 | # CONFIG_USB_LIBUSUAL is not set |
956 | 1035 | ||
@@ -971,6 +1050,7 @@ CONFIG_USB_STORAGE=m | |||
971 | # CONFIG_USB_EMI62 is not set | 1050 | # CONFIG_USB_EMI62 is not set |
972 | # CONFIG_USB_EMI26 is not set | 1051 | # CONFIG_USB_EMI26 is not set |
973 | # CONFIG_USB_ADUTUX is not set | 1052 | # CONFIG_USB_ADUTUX is not set |
1053 | # CONFIG_USB_SEVSEG is not set | ||
974 | # CONFIG_USB_RIO500 is not set | 1054 | # CONFIG_USB_RIO500 is not set |
975 | # CONFIG_USB_LEGOTOWER is not set | 1055 | # CONFIG_USB_LEGOTOWER is not set |
976 | # CONFIG_USB_LCD is not set | 1056 | # CONFIG_USB_LCD is not set |
@@ -988,7 +1068,12 @@ CONFIG_USB_STORAGE=m | |||
988 | # CONFIG_USB_IOWARRIOR is not set | 1068 | # CONFIG_USB_IOWARRIOR is not set |
989 | # CONFIG_USB_TEST is not set | 1069 | # CONFIG_USB_TEST is not set |
990 | # CONFIG_USB_ISIGHTFW is not set | 1070 | # CONFIG_USB_ISIGHTFW is not set |
1071 | # CONFIG_USB_VST is not set | ||
991 | # CONFIG_USB_GADGET is not set | 1072 | # CONFIG_USB_GADGET is not set |
1073 | |||
1074 | # | ||
1075 | # OTG and related infrastructure | ||
1076 | # | ||
992 | # CONFIG_MMC is not set | 1077 | # CONFIG_MMC is not set |
993 | # CONFIG_MEMSTICK is not set | 1078 | # CONFIG_MEMSTICK is not set |
994 | # CONFIG_NEW_LEDS is not set | 1079 | # CONFIG_NEW_LEDS is not set |
@@ -1014,12 +1099,15 @@ CONFIG_RTC_INTF_DEV=y | |||
1014 | # Platform RTC drivers | 1099 | # Platform RTC drivers |
1015 | # | 1100 | # |
1016 | # CONFIG_RTC_DRV_CMOS is not set | 1101 | # CONFIG_RTC_DRV_CMOS is not set |
1102 | # CONFIG_RTC_DRV_DS1286 is not set | ||
1017 | # CONFIG_RTC_DRV_DS1511 is not set | 1103 | # CONFIG_RTC_DRV_DS1511 is not set |
1018 | # CONFIG_RTC_DRV_DS1553 is not set | 1104 | # CONFIG_RTC_DRV_DS1553 is not set |
1019 | # CONFIG_RTC_DRV_DS1742 is not set | 1105 | # CONFIG_RTC_DRV_DS1742 is not set |
1020 | # CONFIG_RTC_DRV_STK17TA8 is not set | 1106 | # CONFIG_RTC_DRV_STK17TA8 is not set |
1021 | # CONFIG_RTC_DRV_M48T86 is not set | 1107 | # CONFIG_RTC_DRV_M48T86 is not set |
1108 | # CONFIG_RTC_DRV_M48T35 is not set | ||
1022 | # CONFIG_RTC_DRV_M48T59 is not set | 1109 | # CONFIG_RTC_DRV_M48T59 is not set |
1110 | # CONFIG_RTC_DRV_BQ4802 is not set | ||
1023 | # CONFIG_RTC_DRV_V3020 is not set | 1111 | # CONFIG_RTC_DRV_V3020 is not set |
1024 | 1112 | ||
1025 | # | 1113 | # |
@@ -1028,6 +1116,7 @@ CONFIG_RTC_INTF_DEV=y | |||
1028 | CONFIG_RTC_DRV_PPC=m | 1116 | CONFIG_RTC_DRV_PPC=m |
1029 | # CONFIG_DMADEVICES is not set | 1117 | # CONFIG_DMADEVICES is not set |
1030 | # CONFIG_UIO is not set | 1118 | # CONFIG_UIO is not set |
1119 | # CONFIG_STAGING is not set | ||
1031 | 1120 | ||
1032 | # | 1121 | # |
1033 | # File systems | 1122 | # File systems |
@@ -1035,26 +1124,35 @@ CONFIG_RTC_DRV_PPC=m | |||
1035 | CONFIG_EXT2_FS=m | 1124 | CONFIG_EXT2_FS=m |
1036 | # CONFIG_EXT2_FS_XATTR is not set | 1125 | # CONFIG_EXT2_FS_XATTR is not set |
1037 | # CONFIG_EXT2_FS_XIP is not set | 1126 | # CONFIG_EXT2_FS_XIP is not set |
1038 | CONFIG_EXT3_FS=y | 1127 | CONFIG_EXT3_FS=m |
1039 | CONFIG_EXT3_FS_XATTR=y | 1128 | CONFIG_EXT3_FS_XATTR=y |
1040 | # CONFIG_EXT3_FS_POSIX_ACL is not set | 1129 | # CONFIG_EXT3_FS_POSIX_ACL is not set |
1041 | # CONFIG_EXT3_FS_SECURITY is not set | 1130 | # CONFIG_EXT3_FS_SECURITY is not set |
1042 | # CONFIG_EXT4DEV_FS is not set | 1131 | CONFIG_EXT4_FS=y |
1043 | CONFIG_JBD=y | 1132 | # CONFIG_EXT4DEV_COMPAT is not set |
1133 | CONFIG_EXT4_FS_XATTR=y | ||
1134 | # CONFIG_EXT4_FS_POSIX_ACL is not set | ||
1135 | # CONFIG_EXT4_FS_SECURITY is not set | ||
1136 | CONFIG_JBD=m | ||
1044 | # CONFIG_JBD_DEBUG is not set | 1137 | # CONFIG_JBD_DEBUG is not set |
1138 | CONFIG_JBD2=y | ||
1139 | # CONFIG_JBD2_DEBUG is not set | ||
1045 | CONFIG_FS_MBCACHE=y | 1140 | CONFIG_FS_MBCACHE=y |
1046 | # CONFIG_REISERFS_FS is not set | 1141 | # CONFIG_REISERFS_FS is not set |
1047 | # CONFIG_JFS_FS is not set | 1142 | # CONFIG_JFS_FS is not set |
1048 | # CONFIG_FS_POSIX_ACL is not set | 1143 | # CONFIG_FS_POSIX_ACL is not set |
1144 | CONFIG_FILE_LOCKING=y | ||
1049 | # CONFIG_XFS_FS is not set | 1145 | # CONFIG_XFS_FS is not set |
1050 | # CONFIG_GFS2_FS is not set | 1146 | # CONFIG_GFS2_FS is not set |
1051 | # CONFIG_OCFS2_FS is not set | 1147 | # CONFIG_OCFS2_FS is not set |
1148 | # CONFIG_BTRFS_FS is not set | ||
1052 | CONFIG_DNOTIFY=y | 1149 | CONFIG_DNOTIFY=y |
1053 | CONFIG_INOTIFY=y | 1150 | CONFIG_INOTIFY=y |
1054 | CONFIG_INOTIFY_USER=y | 1151 | CONFIG_INOTIFY_USER=y |
1055 | CONFIG_QUOTA=y | 1152 | CONFIG_QUOTA=y |
1056 | # CONFIG_QUOTA_NETLINK_INTERFACE is not set | 1153 | # CONFIG_QUOTA_NETLINK_INTERFACE is not set |
1057 | CONFIG_PRINT_QUOTA_WARNING=y | 1154 | CONFIG_PRINT_QUOTA_WARNING=y |
1155 | CONFIG_QUOTA_TREE=y | ||
1058 | # CONFIG_QFMT_V1 is not set | 1156 | # CONFIG_QFMT_V1 is not set |
1059 | CONFIG_QFMT_V2=y | 1157 | CONFIG_QFMT_V2=y |
1060 | CONFIG_QUOTACTL=y | 1158 | CONFIG_QUOTACTL=y |
@@ -1087,16 +1185,14 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | |||
1087 | CONFIG_PROC_FS=y | 1185 | CONFIG_PROC_FS=y |
1088 | CONFIG_PROC_KCORE=y | 1186 | CONFIG_PROC_KCORE=y |
1089 | CONFIG_PROC_SYSCTL=y | 1187 | CONFIG_PROC_SYSCTL=y |
1188 | CONFIG_PROC_PAGE_MONITOR=y | ||
1090 | CONFIG_SYSFS=y | 1189 | CONFIG_SYSFS=y |
1091 | CONFIG_TMPFS=y | 1190 | CONFIG_TMPFS=y |
1092 | # CONFIG_TMPFS_POSIX_ACL is not set | 1191 | # CONFIG_TMPFS_POSIX_ACL is not set |
1093 | CONFIG_HUGETLBFS=y | 1192 | CONFIG_HUGETLBFS=y |
1094 | CONFIG_HUGETLB_PAGE=y | 1193 | CONFIG_HUGETLB_PAGE=y |
1095 | # CONFIG_CONFIGFS_FS is not set | 1194 | # CONFIG_CONFIGFS_FS is not set |
1096 | 1195 | CONFIG_MISC_FILESYSTEMS=y | |
1097 | # | ||
1098 | # Miscellaneous filesystems | ||
1099 | # | ||
1100 | # CONFIG_ADFS_FS is not set | 1196 | # CONFIG_ADFS_FS is not set |
1101 | # CONFIG_AFFS_FS is not set | 1197 | # CONFIG_AFFS_FS is not set |
1102 | # CONFIG_HFS_FS is not set | 1198 | # CONFIG_HFS_FS is not set |
@@ -1106,6 +1202,7 @@ CONFIG_HUGETLB_PAGE=y | |||
1106 | # CONFIG_EFS_FS is not set | 1202 | # CONFIG_EFS_FS is not set |
1107 | # CONFIG_JFFS2_FS is not set | 1203 | # CONFIG_JFFS2_FS is not set |
1108 | # CONFIG_CRAMFS is not set | 1204 | # CONFIG_CRAMFS is not set |
1205 | # CONFIG_SQUASHFS is not set | ||
1109 | # CONFIG_VXFS_FS is not set | 1206 | # CONFIG_VXFS_FS is not set |
1110 | # CONFIG_MINIX_FS is not set | 1207 | # CONFIG_MINIX_FS is not set |
1111 | # CONFIG_OMFS_FS is not set | 1208 | # CONFIG_OMFS_FS is not set |
@@ -1126,6 +1223,7 @@ CONFIG_LOCKD_V4=y | |||
1126 | CONFIG_NFS_COMMON=y | 1223 | CONFIG_NFS_COMMON=y |
1127 | CONFIG_SUNRPC=y | 1224 | CONFIG_SUNRPC=y |
1128 | CONFIG_SUNRPC_GSS=y | 1225 | CONFIG_SUNRPC_GSS=y |
1226 | # CONFIG_SUNRPC_REGISTER_V4 is not set | ||
1129 | CONFIG_RPCSEC_GSS_KRB5=y | 1227 | CONFIG_RPCSEC_GSS_KRB5=y |
1130 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | 1228 | # CONFIG_RPCSEC_GSS_SPKM3 is not set |
1131 | # CONFIG_SMB_FS is not set | 1229 | # CONFIG_SMB_FS is not set |
@@ -1190,9 +1288,9 @@ CONFIG_NLS_ISO8859_1=y | |||
1190 | # Library routines | 1288 | # Library routines |
1191 | # | 1289 | # |
1192 | CONFIG_BITREVERSE=y | 1290 | CONFIG_BITREVERSE=y |
1193 | # CONFIG_GENERIC_FIND_FIRST_BIT is not set | 1291 | CONFIG_GENERIC_FIND_LAST_BIT=y |
1194 | CONFIG_CRC_CCITT=m | 1292 | CONFIG_CRC_CCITT=m |
1195 | # CONFIG_CRC16 is not set | 1293 | CONFIG_CRC16=y |
1196 | CONFIG_CRC_T10DIF=y | 1294 | CONFIG_CRC_T10DIF=y |
1197 | CONFIG_CRC_ITU_T=m | 1295 | CONFIG_CRC_ITU_T=m |
1198 | CONFIG_CRC32=y | 1296 | CONFIG_CRC32=y |
@@ -1250,27 +1348,44 @@ CONFIG_DEBUG_WRITECOUNT=y | |||
1250 | CONFIG_DEBUG_MEMORY_INIT=y | 1348 | CONFIG_DEBUG_MEMORY_INIT=y |
1251 | CONFIG_DEBUG_LIST=y | 1349 | CONFIG_DEBUG_LIST=y |
1252 | # CONFIG_DEBUG_SG is not set | 1350 | # CONFIG_DEBUG_SG is not set |
1253 | CONFIG_FRAME_POINTER=y | 1351 | # CONFIG_DEBUG_NOTIFIERS is not set |
1254 | # CONFIG_BOOT_PRINTK_DELAY is not set | 1352 | # CONFIG_BOOT_PRINTK_DELAY is not set |
1255 | # CONFIG_RCU_TORTURE_TEST is not set | 1353 | # CONFIG_RCU_TORTURE_TEST is not set |
1354 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1256 | # CONFIG_BACKTRACE_SELF_TEST is not set | 1355 | # CONFIG_BACKTRACE_SELF_TEST is not set |
1356 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | ||
1257 | # CONFIG_FAULT_INJECTION is not set | 1357 | # CONFIG_FAULT_INJECTION is not set |
1258 | # CONFIG_LATENCYTOP is not set | 1358 | # CONFIG_LATENCYTOP is not set |
1259 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 1359 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
1260 | CONFIG_HAVE_FTRACE=y | 1360 | CONFIG_NOP_TRACER=y |
1361 | CONFIG_HAVE_FUNCTION_TRACER=y | ||
1261 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 1362 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
1262 | # CONFIG_FTRACE is not set | 1363 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
1364 | CONFIG_RING_BUFFER=y | ||
1365 | CONFIG_TRACING=y | ||
1366 | |||
1367 | # | ||
1368 | # Tracers | ||
1369 | # | ||
1370 | # CONFIG_FUNCTION_TRACER is not set | ||
1263 | # CONFIG_IRQSOFF_TRACER is not set | 1371 | # CONFIG_IRQSOFF_TRACER is not set |
1264 | # CONFIG_SCHED_TRACER is not set | 1372 | # CONFIG_SCHED_TRACER is not set |
1265 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | 1373 | # CONFIG_CONTEXT_SWITCH_TRACER is not set |
1374 | # CONFIG_BOOT_TRACER is not set | ||
1375 | # CONFIG_TRACE_BRANCH_PROFILING is not set | ||
1376 | # CONFIG_STACK_TRACER is not set | ||
1377 | # CONFIG_FTRACE_STARTUP_TEST is not set | ||
1378 | # CONFIG_DYNAMIC_PRINTK_DEBUG is not set | ||
1266 | # CONFIG_SAMPLES is not set | 1379 | # CONFIG_SAMPLES is not set |
1267 | CONFIG_HAVE_ARCH_KGDB=y | 1380 | CONFIG_HAVE_ARCH_KGDB=y |
1268 | # CONFIG_KGDB is not set | 1381 | # CONFIG_KGDB is not set |
1382 | CONFIG_PRINT_STACK_DEPTH=64 | ||
1269 | CONFIG_DEBUG_STACKOVERFLOW=y | 1383 | CONFIG_DEBUG_STACKOVERFLOW=y |
1270 | # CONFIG_DEBUG_STACK_USAGE is not set | 1384 | # CONFIG_DEBUG_STACK_USAGE is not set |
1271 | # CONFIG_DEBUG_PAGEALLOC is not set | 1385 | # CONFIG_DEBUG_PAGEALLOC is not set |
1272 | # CONFIG_CODE_PATCHING_SELFTEST is not set | 1386 | # CONFIG_CODE_PATCHING_SELFTEST is not set |
1273 | # CONFIG_FTR_FIXUP_SELFTEST is not set | 1387 | # CONFIG_FTR_FIXUP_SELFTEST is not set |
1388 | # CONFIG_MSI_BITMAP_SELFTEST is not set | ||
1274 | # CONFIG_XMON is not set | 1389 | # CONFIG_XMON is not set |
1275 | CONFIG_IRQSTACKS=y | 1390 | CONFIG_IRQSTACKS=y |
1276 | # CONFIG_VIRQ_DEBUG is not set | 1391 | # CONFIG_VIRQ_DEBUG is not set |
@@ -1282,16 +1397,26 @@ CONFIG_IRQSTACKS=y | |||
1282 | # | 1397 | # |
1283 | # CONFIG_KEYS is not set | 1398 | # CONFIG_KEYS is not set |
1284 | # CONFIG_SECURITY is not set | 1399 | # CONFIG_SECURITY is not set |
1400 | # CONFIG_SECURITYFS is not set | ||
1285 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | 1401 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set |
1286 | CONFIG_CRYPTO=y | 1402 | CONFIG_CRYPTO=y |
1287 | 1403 | ||
1288 | # | 1404 | # |
1289 | # Crypto core or helper | 1405 | # Crypto core or helper |
1290 | # | 1406 | # |
1407 | # CONFIG_CRYPTO_FIPS is not set | ||
1291 | CONFIG_CRYPTO_ALGAPI=y | 1408 | CONFIG_CRYPTO_ALGAPI=y |
1409 | CONFIG_CRYPTO_ALGAPI2=y | ||
1292 | CONFIG_CRYPTO_AEAD=m | 1410 | CONFIG_CRYPTO_AEAD=m |
1411 | CONFIG_CRYPTO_AEAD2=y | ||
1293 | CONFIG_CRYPTO_BLKCIPHER=y | 1412 | CONFIG_CRYPTO_BLKCIPHER=y |
1413 | CONFIG_CRYPTO_BLKCIPHER2=y | ||
1414 | CONFIG_CRYPTO_HASH=y | ||
1415 | CONFIG_CRYPTO_HASH2=y | ||
1416 | CONFIG_CRYPTO_RNG=m | ||
1417 | CONFIG_CRYPTO_RNG2=y | ||
1294 | CONFIG_CRYPTO_MANAGER=y | 1418 | CONFIG_CRYPTO_MANAGER=y |
1419 | CONFIG_CRYPTO_MANAGER2=y | ||
1295 | CONFIG_CRYPTO_GF128MUL=m | 1420 | CONFIG_CRYPTO_GF128MUL=m |
1296 | # CONFIG_CRYPTO_NULL is not set | 1421 | # CONFIG_CRYPTO_NULL is not set |
1297 | # CONFIG_CRYPTO_CRYPTD is not set | 1422 | # CONFIG_CRYPTO_CRYPTD is not set |
@@ -1363,6 +1488,11 @@ CONFIG_CRYPTO_SALSA20=m | |||
1363 | # | 1488 | # |
1364 | # CONFIG_CRYPTO_DEFLATE is not set | 1489 | # CONFIG_CRYPTO_DEFLATE is not set |
1365 | CONFIG_CRYPTO_LZO=m | 1490 | CONFIG_CRYPTO_LZO=m |
1491 | |||
1492 | # | ||
1493 | # Random Number Generation | ||
1494 | # | ||
1495 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
1366 | CONFIG_CRYPTO_HW=y | 1496 | CONFIG_CRYPTO_HW=y |
1367 | # CONFIG_PPC_CLOCK is not set | 1497 | # CONFIG_PPC_CLOCK is not set |
1368 | # CONFIG_VIRTUALIZATION is not set | 1498 | # CONFIG_VIRTUALIZATION is not set |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 4911104791c3..21172badd708 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -241,9 +241,11 @@ extern const char *powerpc_base_platform; | |||
241 | /* We need to mark all pages as being coherent if we're SMP or we have a | 241 | /* We need to mark all pages as being coherent if we're SMP or we have a |
242 | * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II | 242 | * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II |
243 | * require it for PCI "streaming/prefetch" to work properly. | 243 | * require it for PCI "streaming/prefetch" to work properly. |
244 | * This is also required by 52xx family. | ||
244 | */ | 245 | */ |
245 | #if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \ | 246 | #if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \ |
246 | || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) | 247 | || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \ |
248 | || defined(CONFIG_PPC_MPC52xx) | ||
247 | #define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT | 249 | #define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT |
248 | #else | 250 | #else |
249 | #define CPU_FTR_COMMON 0 | 251 | #define CPU_FTR_COMMON 0 |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index a1c4cfd25ded..7db2e42d97a2 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -511,7 +511,7 @@ InstructionTLBMiss: | |||
511 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | 511 | and r1,r1,r2 /* writable if _RW and _DIRTY */ |
512 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | 512 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ |
513 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | 513 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ |
514 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | 514 | ori r1,r1,0xe04 /* clear out reserved bits */ |
515 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | 515 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ |
516 | mtspr SPRN_RPA,r1 | 516 | mtspr SPRN_RPA,r1 |
517 | mfspr r3,SPRN_IMISS | 517 | mfspr r3,SPRN_IMISS |
@@ -585,7 +585,7 @@ DataLoadTLBMiss: | |||
585 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | 585 | and r1,r1,r2 /* writable if _RW and _DIRTY */ |
586 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | 586 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ |
587 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | 587 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ |
588 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | 588 | ori r1,r1,0xe04 /* clear out reserved bits */ |
589 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | 589 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ |
590 | mtspr SPRN_RPA,r1 | 590 | mtspr SPRN_RPA,r1 |
591 | mfspr r3,SPRN_DMISS | 591 | mfspr r3,SPRN_DMISS |
@@ -653,7 +653,7 @@ DataStoreTLBMiss: | |||
653 | stw r3,0(r2) /* update PTE (accessed/dirty bits) */ | 653 | stw r3,0(r2) /* update PTE (accessed/dirty bits) */ |
654 | /* Convert linux-style PTE to low word of PPC-style PTE */ | 654 | /* Convert linux-style PTE to low word of PPC-style PTE */ |
655 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | 655 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ |
656 | li r1,0xe15 /* clear out reserved bits and M */ | 656 | li r1,0xe05 /* clear out reserved bits & PP lsb */ |
657 | andc r1,r3,r1 /* PP = user? 2: 0 */ | 657 | andc r1,r3,r1 /* PP = user? 2: 0 */ |
658 | mtspr SPRN_RPA,r1 | 658 | mtspr SPRN_RPA,r1 |
659 | mfspr r3,SPRN_DMISS | 659 | mfspr r3,SPRN_DMISS |
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index 920cf7a454b1..740ef56a1550 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig | |||
@@ -128,6 +128,13 @@ config PS3_FLASH | |||
128 | be disabled on the kernel command line using "ps3flash=off", to | 128 | be disabled on the kernel command line using "ps3flash=off", to |
129 | not allocate this fixed buffer. | 129 | not allocate this fixed buffer. |
130 | 130 | ||
131 | config PS3_VRAM | ||
132 | tristate "PS3 Video RAM Storage Driver" | ||
133 | depends on FB_PS3=y && BLOCK && m | ||
134 | help | ||
135 | This driver allows you to use excess PS3 video RAM as volatile | ||
136 | storage or system swap. | ||
137 | |||
131 | config PS3_LPM | 138 | config PS3_LPM |
132 | tristate "PS3 Logical Performance Monitor support" | 139 | tristate "PS3 Logical Performance Monitor support" |
133 | depends on PPC_PS3 | 140 | depends on PPC_PS3 |
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 7839767d837e..da01432e8f44 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h | |||
@@ -22,4 +22,9 @@ | |||
22 | #define MCL_CURRENT 1 /* lock all current mappings */ | 22 | #define MCL_CURRENT 1 /* lock all current mappings */ |
23 | #define MCL_FUTURE 2 /* lock all future mappings */ | 23 | #define MCL_FUTURE 2 /* lock all future mappings */ |
24 | 24 | ||
25 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) | ||
26 | int s390_mmap_check(unsigned long addr, unsigned long len); | ||
27 | #define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) | ||
28 | #endif | ||
29 | |||
25 | #endif /* __S390_MMAN_H__ */ | 30 | #endif /* __S390_MMAN_H__ */ |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 066b99502e09..db4523fe38ac 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -61,7 +61,7 @@ extern void print_cpu_info(struct cpuinfo_S390 *); | |||
61 | extern int get_cpu_capability(unsigned int *); | 61 | extern int get_cpu_capability(unsigned int *); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * User space process size: 2GB for 31 bit, 4TB for 64 bit. | 64 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. |
65 | */ | 65 | */ |
66 | #ifndef __s390x__ | 66 | #ifndef __s390x__ |
67 | 67 | ||
@@ -70,8 +70,7 @@ extern int get_cpu_capability(unsigned int *); | |||
70 | 70 | ||
71 | #else /* __s390x__ */ | 71 | #else /* __s390x__ */ |
72 | 72 | ||
73 | #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \ | 73 | #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) |
74 | (1UL << 31) : (1UL << 53)) | ||
75 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ | 74 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ |
76 | (1UL << 30) : (1UL << 41)) | 75 | (1UL << 30) : (1UL << 41)) |
77 | #define TASK_SIZE TASK_SIZE_OF(current) | 76 | #define TASK_SIZE TASK_SIZE_OF(current) |
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index c93eb50e1d09..c979c3b56ab0 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -30,6 +30,8 @@ static inline void s390_init_cpu_topology(void) | |||
30 | }; | 30 | }; |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #define SD_MC_INIT SD_CPU_INIT | ||
34 | |||
33 | #include <asm-generic/topology.h> | 35 | #include <asm-generic/topology.h> |
34 | 36 | ||
35 | #endif /* _ASM_S390_TOPOLOGY_H */ | 37 | #endif /* _ASM_S390_TOPOLOGY_H */ |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 397d131a345f..80641224a095 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -5,6 +5,8 @@ | |||
5 | * | 5 | * |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <asm/asm-offsets.h> | ||
9 | |||
8 | #ifndef CONFIG_64BIT | 10 | #ifndef CONFIG_64BIT |
9 | .globl _mcount | 11 | .globl _mcount |
10 | _mcount: | 12 | _mcount: |
@@ -14,7 +16,7 @@ _mcount: | |||
14 | ahi %r15,-96 | 16 | ahi %r15,-96 |
15 | l %r3,100(%r15) | 17 | l %r3,100(%r15) |
16 | la %r2,0(%r14) | 18 | la %r2,0(%r14) |
17 | st %r1,0(%r15) | 19 | st %r1,__SF_BACKCHAIN(%r15) |
18 | la %r3,0(%r3) | 20 | la %r3,0(%r3) |
19 | bras %r14,0f | 21 | bras %r14,0f |
20 | .long ftrace_trace_function | 22 | .long ftrace_trace_function |
@@ -38,7 +40,7 @@ _mcount: | |||
38 | stg %r14,112(%r15) | 40 | stg %r14,112(%r15) |
39 | lgr %r1,%r15 | 41 | lgr %r1,%r15 |
40 | aghi %r15,-160 | 42 | aghi %r15,-160 |
41 | stg %r1,0(%r15) | 43 | stg %r1,__SF_BACKCHAIN(%r15) |
42 | lgr %r2,%r14 | 44 | lgr %r2,%r14 |
43 | lg %r3,168(%r15) | 45 | lg %r3,168(%r15) |
44 | larl %r14,ftrace_trace_function | 46 | larl %r14,ftrace_trace_function |
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c index a5f8300bf3ee..d9e62c0b576a 100644 --- a/arch/s390/lib/div64.c +++ b/arch/s390/lib/div64.c | |||
@@ -61,7 +61,7 @@ static uint32_t __div64_31(uint64_t *n, uint32_t base) | |||
61 | " clr %0,%3\n" | 61 | " clr %0,%3\n" |
62 | " jl 0f\n" | 62 | " jl 0f\n" |
63 | " slr %0,%3\n" | 63 | " slr %0,%3\n" |
64 | " alr %1,%2\n" | 64 | " ahi %1,1\n" |
65 | "0:\n" | 65 | "0:\n" |
66 | : "+d" (reg2), "+d" (reg3), "=d" (tmp) | 66 | : "+d" (reg2), "+d" (reg3), "=d" (tmp) |
67 | : "d" (base), "2" (1UL) : "cc" ); | 67 | : "d" (base), "2" (1UL) : "cc" ); |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index d66215b0fde9..b0b84c35b0ad 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -119,8 +119,6 @@ retry: | |||
119 | goto fault; | 119 | goto fault; |
120 | 120 | ||
121 | pfn = pte_pfn(*pte); | 121 | pfn = pte_pfn(*pte); |
122 | if (!pfn_valid(pfn)) | ||
123 | goto out; | ||
124 | 122 | ||
125 | offset = uaddr & (PAGE_SIZE - 1); | 123 | offset = uaddr & (PAGE_SIZE - 1); |
126 | size = min(n - done, PAGE_SIZE - offset); | 124 | size = min(n - done, PAGE_SIZE - offset); |
@@ -135,7 +133,6 @@ retry: | |||
135 | done += size; | 133 | done += size; |
136 | uaddr += size; | 134 | uaddr += size; |
137 | } while (done < n); | 135 | } while (done < n); |
138 | out: | ||
139 | spin_unlock(&mm->page_table_lock); | 136 | spin_unlock(&mm->page_table_lock); |
140 | return n - done; | 137 | return n - done; |
141 | fault: | 138 | fault: |
@@ -163,9 +160,6 @@ retry: | |||
163 | goto fault; | 160 | goto fault; |
164 | 161 | ||
165 | pfn = pte_pfn(*pte); | 162 | pfn = pte_pfn(*pte); |
166 | if (!pfn_valid(pfn)) | ||
167 | goto out; | ||
168 | |||
169 | ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); | 163 | ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); |
170 | out: | 164 | out: |
171 | return ret; | 165 | return ret; |
@@ -244,11 +238,6 @@ retry: | |||
244 | goto fault; | 238 | goto fault; |
245 | 239 | ||
246 | pfn = pte_pfn(*pte); | 240 | pfn = pte_pfn(*pte); |
247 | if (!pfn_valid(pfn)) { | ||
248 | done = -1; | ||
249 | goto out; | ||
250 | } | ||
251 | |||
252 | offset = uaddr & (PAGE_SIZE-1); | 241 | offset = uaddr & (PAGE_SIZE-1); |
253 | addr = (char *)(pfn << PAGE_SHIFT) + offset; | 242 | addr = (char *)(pfn << PAGE_SHIFT) + offset; |
254 | len = min(count - done, PAGE_SIZE - offset); | 243 | len = min(count - done, PAGE_SIZE - offset); |
@@ -256,7 +245,6 @@ retry: | |||
256 | done += len_str; | 245 | done += len_str; |
257 | uaddr += len_str; | 246 | uaddr += len_str; |
258 | } while ((len_str == len) && (done < count)); | 247 | } while ((len_str == len) && (done < count)); |
259 | out: | ||
260 | spin_unlock(&mm->page_table_lock); | 248 | spin_unlock(&mm->page_table_lock); |
261 | return done + 1; | 249 | return done + 1; |
262 | fault: | 250 | fault: |
@@ -325,12 +313,7 @@ retry: | |||
325 | } | 313 | } |
326 | 314 | ||
327 | pfn_from = pte_pfn(*pte_from); | 315 | pfn_from = pte_pfn(*pte_from); |
328 | if (!pfn_valid(pfn_from)) | ||
329 | goto out; | ||
330 | pfn_to = pte_pfn(*pte_to); | 316 | pfn_to = pte_pfn(*pte_to); |
331 | if (!pfn_valid(pfn_to)) | ||
332 | goto out; | ||
333 | |||
334 | offset_from = uaddr_from & (PAGE_SIZE-1); | 317 | offset_from = uaddr_from & (PAGE_SIZE-1); |
335 | offset_to = uaddr_from & (PAGE_SIZE-1); | 318 | offset_to = uaddr_from & (PAGE_SIZE-1); |
336 | offset_max = max(offset_from, offset_to); | 319 | offset_max = max(offset_from, offset_to); |
@@ -342,7 +325,6 @@ retry: | |||
342 | uaddr_from += size; | 325 | uaddr_from += size; |
343 | uaddr_to += size; | 326 | uaddr_to += size; |
344 | } while (done < n); | 327 | } while (done < n); |
345 | out: | ||
346 | spin_unlock(&mm->page_table_lock); | 328 | spin_unlock(&mm->page_table_lock); |
347 | return n - done; | 329 | return n - done; |
348 | fault: | 330 | fault: |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 5932a824547a..e008d236cc15 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -35,7 +35,7 @@ | |||
35 | * Leave an at least ~128 MB hole. | 35 | * Leave an at least ~128 MB hole. |
36 | */ | 36 | */ |
37 | #define MIN_GAP (128*1024*1024) | 37 | #define MIN_GAP (128*1024*1024) |
38 | #define MAX_GAP (TASK_SIZE/6*5) | 38 | #define MAX_GAP (STACK_TOP/6*5) |
39 | 39 | ||
40 | static inline unsigned long mmap_base(void) | 40 | static inline unsigned long mmap_base(void) |
41 | { | 41 | { |
@@ -46,7 +46,7 @@ static inline unsigned long mmap_base(void) | |||
46 | else if (gap > MAX_GAP) | 46 | else if (gap > MAX_GAP) |
47 | gap = MAX_GAP; | 47 | gap = MAX_GAP; |
48 | 48 | ||
49 | return TASK_SIZE - (gap & PAGE_MASK); | 49 | return STACK_TOP - (gap & PAGE_MASK); |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline int mmap_is_legacy(void) | 52 | static inline int mmap_is_legacy(void) |
@@ -89,42 +89,58 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); | |||
89 | 89 | ||
90 | #else | 90 | #else |
91 | 91 | ||
92 | int s390_mmap_check(unsigned long addr, unsigned long len) | ||
93 | { | ||
94 | if (!test_thread_flag(TIF_31BIT) && | ||
95 | len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) | ||
96 | return crst_table_upgrade(current->mm, 1UL << 53); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
92 | static unsigned long | 100 | static unsigned long |
93 | s390_get_unmapped_area(struct file *filp, unsigned long addr, | 101 | s390_get_unmapped_area(struct file *filp, unsigned long addr, |
94 | unsigned long len, unsigned long pgoff, unsigned long flags) | 102 | unsigned long len, unsigned long pgoff, unsigned long flags) |
95 | { | 103 | { |
96 | struct mm_struct *mm = current->mm; | 104 | struct mm_struct *mm = current->mm; |
105 | unsigned long area; | ||
97 | int rc; | 106 | int rc; |
98 | 107 | ||
99 | addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 108 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); |
100 | if (addr & ~PAGE_MASK) | 109 | if (!(area & ~PAGE_MASK)) |
101 | return addr; | 110 | return area; |
102 | if (unlikely(mm->context.asce_limit < addr + len)) { | 111 | if (area == -ENOMEM && |
103 | rc = crst_table_upgrade(mm, addr + len); | 112 | !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { |
113 | /* Upgrade the page table to 4 levels and retry. */ | ||
114 | rc = crst_table_upgrade(mm, 1UL << 53); | ||
104 | if (rc) | 115 | if (rc) |
105 | return (unsigned long) rc; | 116 | return (unsigned long) rc; |
117 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | ||
106 | } | 118 | } |
107 | return addr; | 119 | return area; |
108 | } | 120 | } |
109 | 121 | ||
110 | static unsigned long | 122 | static unsigned long |
111 | s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | 123 | s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, |
112 | const unsigned long len, const unsigned long pgoff, | 124 | const unsigned long len, const unsigned long pgoff, |
113 | const unsigned long flags) | 125 | const unsigned long flags) |
114 | { | 126 | { |
115 | struct mm_struct *mm = current->mm; | 127 | struct mm_struct *mm = current->mm; |
116 | unsigned long addr = addr0; | 128 | unsigned long area; |
117 | int rc; | 129 | int rc; |
118 | 130 | ||
119 | addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); | 131 | area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); |
120 | if (addr & ~PAGE_MASK) | 132 | if (!(area & ~PAGE_MASK)) |
121 | return addr; | 133 | return area; |
122 | if (unlikely(mm->context.asce_limit < addr + len)) { | 134 | if (area == -ENOMEM && |
123 | rc = crst_table_upgrade(mm, addr + len); | 135 | !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { |
136 | /* Upgrade the page table to 4 levels and retry. */ | ||
137 | rc = crst_table_upgrade(mm, 1UL << 53); | ||
124 | if (rc) | 138 | if (rc) |
125 | return (unsigned long) rc; | 139 | return (unsigned long) rc; |
140 | area = arch_get_unmapped_area_topdown(filp, addr, len, | ||
141 | pgoff, flags); | ||
126 | } | 142 | } |
127 | return addr; | 143 | return area; |
128 | } | 144 | } |
129 | /* | 145 | /* |
130 | * This function, called very early during the creation of a new | 146 | * This function, called very early during the creation of a new |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0767827540b1..6b6ddc4ea02b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -117,6 +117,7 @@ repeat: | |||
117 | crst_table_init(table, entry); | 117 | crst_table_init(table, entry); |
118 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | 118 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); |
119 | mm->pgd = (pgd_t *) table; | 119 | mm->pgd = (pgd_t *) table; |
120 | mm->task_size = mm->context.asce_limit; | ||
120 | table = NULL; | 121 | table = NULL; |
121 | } | 122 | } |
122 | spin_unlock(&mm->page_table_lock); | 123 | spin_unlock(&mm->page_table_lock); |
@@ -154,6 +155,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
154 | BUG(); | 155 | BUG(); |
155 | } | 156 | } |
156 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | 157 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); |
158 | mm->task_size = mm->context.asce_limit; | ||
157 | crst_table_free(mm, (unsigned long *) pgd); | 159 | crst_table_free(mm, (unsigned long *) pgd); |
158 | } | 160 | } |
159 | update_mm(mm, current); | 161 | update_mm(mm, current); |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 204332b29578..87e120e0a79c 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_MAC_FLOPPY) += swim3.o | |||
9 | obj-$(CONFIG_BLK_DEV_FD) += floppy.o | 9 | obj-$(CONFIG_BLK_DEV_FD) += floppy.o |
10 | obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o | 10 | obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o |
11 | obj-$(CONFIG_PS3_DISK) += ps3disk.o | 11 | obj-$(CONFIG_PS3_DISK) += ps3disk.o |
12 | obj-$(CONFIG_PS3_VRAM) += ps3vram.o | ||
12 | obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o | 13 | obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o |
13 | obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o | 14 | obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o |
14 | obj-$(CONFIG_BLK_DEV_RAM) += brd.o | 15 | obj-$(CONFIG_BLK_DEV_RAM) += brd.o |
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c new file mode 100644 index 000000000000..393ed6760d78 --- /dev/null +++ b/drivers/block/ps3vram.c | |||
@@ -0,0 +1,865 @@ | |||
1 | /* | ||
2 | * ps3vram - Use extra PS3 video ram as MTD block device. | ||
3 | * | ||
4 | * Copyright 2009 Sony Corporation | ||
5 | * | ||
6 | * Based on the MTD ps3vram driver, which is | ||
7 | * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com> | ||
8 | * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr> | ||
9 | */ | ||
10 | |||
11 | #include <linux/blkdev.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/proc_fs.h> | ||
14 | #include <linux/seq_file.h> | ||
15 | |||
16 | #include <asm/firmware.h> | ||
17 | #include <asm/lv1call.h> | ||
18 | #include <asm/ps3.h> | ||
19 | |||
20 | |||
21 | #define DEVICE_NAME "ps3vram" | ||
22 | |||
23 | |||
24 | #define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */ | ||
25 | #define XDR_IOIF 0x0c000000 | ||
26 | |||
27 | #define FIFO_BASE XDR_IOIF | ||
28 | #define FIFO_SIZE (64 * 1024) | ||
29 | |||
30 | #define DMA_PAGE_SIZE (4 * 1024) | ||
31 | |||
32 | #define CACHE_PAGE_SIZE (256 * 1024) | ||
33 | #define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE) | ||
34 | |||
35 | #define CACHE_OFFSET CACHE_PAGE_SIZE | ||
36 | #define FIFO_OFFSET 0 | ||
37 | |||
38 | #define CTRL_PUT 0x10 | ||
39 | #define CTRL_GET 0x11 | ||
40 | #define CTRL_TOP 0x15 | ||
41 | |||
42 | #define UPLOAD_SUBCH 1 | ||
43 | #define DOWNLOAD_SUBCH 2 | ||
44 | |||
45 | #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c | ||
46 | #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 | ||
47 | |||
48 | #define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 | ||
49 | |||
50 | #define CACHE_PAGE_PRESENT 1 | ||
51 | #define CACHE_PAGE_DIRTY 2 | ||
52 | |||
53 | struct ps3vram_tag { | ||
54 | unsigned int address; | ||
55 | unsigned int flags; | ||
56 | }; | ||
57 | |||
58 | struct ps3vram_cache { | ||
59 | unsigned int page_count; | ||
60 | unsigned int page_size; | ||
61 | struct ps3vram_tag *tags; | ||
62 | unsigned int hit; | ||
63 | unsigned int miss; | ||
64 | }; | ||
65 | |||
66 | struct ps3vram_priv { | ||
67 | struct request_queue *queue; | ||
68 | struct gendisk *gendisk; | ||
69 | |||
70 | u64 size; | ||
71 | |||
72 | u64 memory_handle; | ||
73 | u64 context_handle; | ||
74 | u32 *ctrl; | ||
75 | u32 *reports; | ||
76 | u8 __iomem *ddr_base; | ||
77 | u8 *xdr_buf; | ||
78 | |||
79 | u32 *fifo_base; | ||
80 | u32 *fifo_ptr; | ||
81 | |||
82 | struct ps3vram_cache cache; | ||
83 | |||
84 | /* Used to serialize cache/DMA operations */ | ||
85 | struct mutex lock; | ||
86 | }; | ||
87 | |||
88 | |||
89 | static int ps3vram_major; | ||
90 | |||
91 | |||
92 | static struct block_device_operations ps3vram_fops = { | ||
93 | .owner = THIS_MODULE, | ||
94 | }; | ||
95 | |||
96 | |||
97 | #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ | ||
98 | #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ | ||
99 | #define DMA_NOTIFIER_SIZE 0x40 | ||
100 | #define NOTIFIER 7 /* notifier used for completion report */ | ||
101 | |||
102 | static char *size = "256M"; | ||
103 | module_param(size, charp, 0); | ||
104 | MODULE_PARM_DESC(size, "memory size"); | ||
105 | |||
106 | static u32 *ps3vram_get_notifier(u32 *reports, int notifier) | ||
107 | { | ||
108 | return (void *)reports + DMA_NOTIFIER_OFFSET_BASE + | ||
109 | DMA_NOTIFIER_SIZE * notifier; | ||
110 | } | ||
111 | |||
112 | static void ps3vram_notifier_reset(struct ps3_system_bus_device *dev) | ||
113 | { | ||
114 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
115 | u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); | ||
116 | int i; | ||
117 | |||
118 | for (i = 0; i < 4; i++) | ||
119 | notify[i] = 0xffffffff; | ||
120 | } | ||
121 | |||
122 | static int ps3vram_notifier_wait(struct ps3_system_bus_device *dev, | ||
123 | unsigned int timeout_ms) | ||
124 | { | ||
125 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
126 | u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); | ||
127 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | ||
128 | |||
129 | do { | ||
130 | if (!notify[3]) | ||
131 | return 0; | ||
132 | msleep(1); | ||
133 | } while (time_before(jiffies, timeout)); | ||
134 | |||
135 | return -ETIMEDOUT; | ||
136 | } | ||
137 | |||
138 | static void ps3vram_init_ring(struct ps3_system_bus_device *dev) | ||
139 | { | ||
140 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
141 | |||
142 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; | ||
143 | priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET; | ||
144 | } | ||
145 | |||
146 | static int ps3vram_wait_ring(struct ps3_system_bus_device *dev, | ||
147 | unsigned int timeout_ms) | ||
148 | { | ||
149 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
150 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | ||
151 | |||
152 | do { | ||
153 | if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET]) | ||
154 | return 0; | ||
155 | msleep(1); | ||
156 | } while (time_before(jiffies, timeout)); | ||
157 | |||
158 | dev_warn(&dev->core, "FIFO timeout (%08x/%08x/%08x)\n", | ||
159 | priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET], | ||
160 | priv->ctrl[CTRL_TOP]); | ||
161 | |||
162 | return -ETIMEDOUT; | ||
163 | } | ||
164 | |||
165 | static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data) | ||
166 | { | ||
167 | *(priv->fifo_ptr)++ = data; | ||
168 | } | ||
169 | |||
170 | static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, u32 tag, | ||
171 | u32 size) | ||
172 | { | ||
173 | ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag); | ||
174 | } | ||
175 | |||
176 | static void ps3vram_rewind_ring(struct ps3_system_bus_device *dev) | ||
177 | { | ||
178 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
179 | int status; | ||
180 | |||
181 | ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET)); | ||
182 | |||
183 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; | ||
184 | |||
185 | /* asking the HV for a blit will kick the FIFO */ | ||
186 | status = lv1_gpu_context_attribute(priv->context_handle, | ||
187 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0, | ||
188 | 0, 0, 0); | ||
189 | if (status) | ||
190 | dev_err(&dev->core, | ||
191 | "%s: lv1_gpu_context_attribute failed %d\n", __func__, | ||
192 | status); | ||
193 | |||
194 | priv->fifo_ptr = priv->fifo_base; | ||
195 | } | ||
196 | |||
197 | static void ps3vram_fire_ring(struct ps3_system_bus_device *dev) | ||
198 | { | ||
199 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
200 | int status; | ||
201 | |||
202 | mutex_lock(&ps3_gpu_mutex); | ||
203 | |||
204 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET + | ||
205 | (priv->fifo_ptr - priv->fifo_base) * sizeof(u32); | ||
206 | |||
207 | /* asking the HV for a blit will kick the FIFO */ | ||
208 | status = lv1_gpu_context_attribute(priv->context_handle, | ||
209 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, 0, | ||
210 | 0, 0, 0); | ||
211 | if (status) | ||
212 | dev_err(&dev->core, | ||
213 | "%s: lv1_gpu_context_attribute failed %d\n", __func__, | ||
214 | status); | ||
215 | |||
216 | if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) > | ||
217 | FIFO_SIZE - 1024) { | ||
218 | dev_dbg(&dev->core, "FIFO full, rewinding\n"); | ||
219 | ps3vram_wait_ring(dev, 200); | ||
220 | ps3vram_rewind_ring(dev); | ||
221 | } | ||
222 | |||
223 | mutex_unlock(&ps3_gpu_mutex); | ||
224 | } | ||
225 | |||
226 | static void ps3vram_bind(struct ps3_system_bus_device *dev) | ||
227 | { | ||
228 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
229 | |||
230 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1); | ||
231 | ps3vram_out_ring(priv, 0x31337303); | ||
232 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3); | ||
233 | ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); | ||
234 | ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ | ||
235 | ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ | ||
236 | |||
237 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1); | ||
238 | ps3vram_out_ring(priv, 0x3137c0de); | ||
239 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3); | ||
240 | ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); | ||
241 | ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ | ||
242 | ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ | ||
243 | |||
244 | ps3vram_fire_ring(dev); | ||
245 | } | ||
246 | |||
247 | static int ps3vram_upload(struct ps3_system_bus_device *dev, | ||
248 | unsigned int src_offset, unsigned int dst_offset, | ||
249 | int len, int count) | ||
250 | { | ||
251 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
252 | |||
253 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, | ||
254 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | ||
255 | ps3vram_out_ring(priv, XDR_IOIF + src_offset); | ||
256 | ps3vram_out_ring(priv, dst_offset); | ||
257 | ps3vram_out_ring(priv, len); | ||
258 | ps3vram_out_ring(priv, len); | ||
259 | ps3vram_out_ring(priv, len); | ||
260 | ps3vram_out_ring(priv, count); | ||
261 | ps3vram_out_ring(priv, (1 << 8) | 1); | ||
262 | ps3vram_out_ring(priv, 0); | ||
263 | |||
264 | ps3vram_notifier_reset(dev); | ||
265 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, | ||
266 | NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); | ||
267 | ps3vram_out_ring(priv, 0); | ||
268 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1); | ||
269 | ps3vram_out_ring(priv, 0); | ||
270 | ps3vram_fire_ring(dev); | ||
271 | if (ps3vram_notifier_wait(dev, 200) < 0) { | ||
272 | dev_warn(&dev->core, "%s: Notifier timeout\n", __func__); | ||
273 | return -1; | ||
274 | } | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static int ps3vram_download(struct ps3_system_bus_device *dev, | ||
280 | unsigned int src_offset, unsigned int dst_offset, | ||
281 | int len, int count) | ||
282 | { | ||
283 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
284 | |||
285 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, | ||
286 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | ||
287 | ps3vram_out_ring(priv, src_offset); | ||
288 | ps3vram_out_ring(priv, XDR_IOIF + dst_offset); | ||
289 | ps3vram_out_ring(priv, len); | ||
290 | ps3vram_out_ring(priv, len); | ||
291 | ps3vram_out_ring(priv, len); | ||
292 | ps3vram_out_ring(priv, count); | ||
293 | ps3vram_out_ring(priv, (1 << 8) | 1); | ||
294 | ps3vram_out_ring(priv, 0); | ||
295 | |||
296 | ps3vram_notifier_reset(dev); | ||
297 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, | ||
298 | NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); | ||
299 | ps3vram_out_ring(priv, 0); | ||
300 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1); | ||
301 | ps3vram_out_ring(priv, 0); | ||
302 | ps3vram_fire_ring(dev); | ||
303 | if (ps3vram_notifier_wait(dev, 200) < 0) { | ||
304 | dev_warn(&dev->core, "%s: Notifier timeout\n", __func__); | ||
305 | return -1; | ||
306 | } | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static void ps3vram_cache_evict(struct ps3_system_bus_device *dev, int entry) | ||
312 | { | ||
313 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
314 | struct ps3vram_cache *cache = &priv->cache; | ||
315 | |||
316 | if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY)) | ||
317 | return; | ||
318 | |||
319 | dev_dbg(&dev->core, "Flushing %d: 0x%08x\n", entry, | ||
320 | cache->tags[entry].address); | ||
321 | if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size, | ||
322 | cache->tags[entry].address, DMA_PAGE_SIZE, | ||
323 | cache->page_size / DMA_PAGE_SIZE) < 0) { | ||
324 | dev_err(&dev->core, | ||
325 | "Failed to upload from 0x%x to " "0x%x size 0x%x\n", | ||
326 | entry * cache->page_size, cache->tags[entry].address, | ||
327 | cache->page_size); | ||
328 | } | ||
329 | cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; | ||
330 | } | ||
331 | |||
332 | static void ps3vram_cache_load(struct ps3_system_bus_device *dev, int entry, | ||
333 | unsigned int address) | ||
334 | { | ||
335 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
336 | struct ps3vram_cache *cache = &priv->cache; | ||
337 | |||
338 | dev_dbg(&dev->core, "Fetching %d: 0x%08x\n", entry, address); | ||
339 | if (ps3vram_download(dev, address, | ||
340 | CACHE_OFFSET + entry * cache->page_size, | ||
341 | DMA_PAGE_SIZE, | ||
342 | cache->page_size / DMA_PAGE_SIZE) < 0) { | ||
343 | dev_err(&dev->core, | ||
344 | "Failed to download from 0x%x to 0x%x size 0x%x\n", | ||
345 | address, entry * cache->page_size, cache->page_size); | ||
346 | } | ||
347 | |||
348 | cache->tags[entry].address = address; | ||
349 | cache->tags[entry].flags |= CACHE_PAGE_PRESENT; | ||
350 | } | ||
351 | |||
352 | |||
353 | static void ps3vram_cache_flush(struct ps3_system_bus_device *dev) | ||
354 | { | ||
355 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
356 | struct ps3vram_cache *cache = &priv->cache; | ||
357 | int i; | ||
358 | |||
359 | dev_dbg(&dev->core, "FLUSH\n"); | ||
360 | for (i = 0; i < cache->page_count; i++) { | ||
361 | ps3vram_cache_evict(dev, i); | ||
362 | cache->tags[i].flags = 0; | ||
363 | } | ||
364 | } | ||
365 | |||
366 | static unsigned int ps3vram_cache_match(struct ps3_system_bus_device *dev, | ||
367 | loff_t address) | ||
368 | { | ||
369 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
370 | struct ps3vram_cache *cache = &priv->cache; | ||
371 | unsigned int base; | ||
372 | unsigned int offset; | ||
373 | int i; | ||
374 | static int counter; | ||
375 | |||
376 | offset = (unsigned int) (address & (cache->page_size - 1)); | ||
377 | base = (unsigned int) (address - offset); | ||
378 | |||
379 | /* fully associative check */ | ||
380 | for (i = 0; i < cache->page_count; i++) { | ||
381 | if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && | ||
382 | cache->tags[i].address == base) { | ||
383 | cache->hit++; | ||
384 | dev_dbg(&dev->core, "Found entry %d: 0x%08x\n", i, | ||
385 | cache->tags[i].address); | ||
386 | return i; | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* choose a random entry */ | ||
391 | i = (jiffies + (counter++)) % cache->page_count; | ||
392 | dev_dbg(&dev->core, "Using entry %d\n", i); | ||
393 | |||
394 | ps3vram_cache_evict(dev, i); | ||
395 | ps3vram_cache_load(dev, i, base); | ||
396 | |||
397 | cache->miss++; | ||
398 | return i; | ||
399 | } | ||
400 | |||
401 | static int ps3vram_cache_init(struct ps3_system_bus_device *dev) | ||
402 | { | ||
403 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
404 | |||
405 | priv->cache.page_count = CACHE_PAGE_COUNT; | ||
406 | priv->cache.page_size = CACHE_PAGE_SIZE; | ||
407 | priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * | ||
408 | CACHE_PAGE_COUNT, GFP_KERNEL); | ||
409 | if (priv->cache.tags == NULL) { | ||
410 | dev_err(&dev->core, "Could not allocate cache tags\n"); | ||
411 | return -ENOMEM; | ||
412 | } | ||
413 | |||
414 | dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n", | ||
415 | CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev) | ||
421 | { | ||
422 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
423 | |||
424 | ps3vram_cache_flush(dev); | ||
425 | kfree(priv->cache.tags); | ||
426 | } | ||
427 | |||
428 | static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, | ||
429 | size_t len, size_t *retlen, u_char *buf) | ||
430 | { | ||
431 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
432 | unsigned int cached, count; | ||
433 | |||
434 | dev_dbg(&dev->core, "%s: from=0x%08x len=0x%zx\n", __func__, | ||
435 | (unsigned int)from, len); | ||
436 | |||
437 | if (from >= priv->size) | ||
438 | return -EIO; | ||
439 | |||
440 | if (len > priv->size - from) | ||
441 | len = priv->size - from; | ||
442 | |||
443 | /* Copy from vram to buf */ | ||
444 | count = len; | ||
445 | while (count) { | ||
446 | unsigned int offset, avail; | ||
447 | unsigned int entry; | ||
448 | |||
449 | offset = (unsigned int) (from & (priv->cache.page_size - 1)); | ||
450 | avail = priv->cache.page_size - offset; | ||
451 | |||
452 | mutex_lock(&priv->lock); | ||
453 | |||
454 | entry = ps3vram_cache_match(dev, from); | ||
455 | cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; | ||
456 | |||
457 | dev_dbg(&dev->core, "%s: from=%08x cached=%08x offset=%08x " | ||
458 | "avail=%08x count=%08x\n", __func__, | ||
459 | (unsigned int)from, cached, offset, avail, count); | ||
460 | |||
461 | if (avail > count) | ||
462 | avail = count; | ||
463 | memcpy(buf, priv->xdr_buf + cached, avail); | ||
464 | |||
465 | mutex_unlock(&priv->lock); | ||
466 | |||
467 | buf += avail; | ||
468 | count -= avail; | ||
469 | from += avail; | ||
470 | } | ||
471 | |||
472 | *retlen = len; | ||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to, | ||
477 | size_t len, size_t *retlen, const u_char *buf) | ||
478 | { | ||
479 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
480 | unsigned int cached, count; | ||
481 | |||
482 | if (to >= priv->size) | ||
483 | return -EIO; | ||
484 | |||
485 | if (len > priv->size - to) | ||
486 | len = priv->size - to; | ||
487 | |||
488 | /* Copy from buf to vram */ | ||
489 | count = len; | ||
490 | while (count) { | ||
491 | unsigned int offset, avail; | ||
492 | unsigned int entry; | ||
493 | |||
494 | offset = (unsigned int) (to & (priv->cache.page_size - 1)); | ||
495 | avail = priv->cache.page_size - offset; | ||
496 | |||
497 | mutex_lock(&priv->lock); | ||
498 | |||
499 | entry = ps3vram_cache_match(dev, to); | ||
500 | cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; | ||
501 | |||
502 | dev_dbg(&dev->core, "%s: to=%08x cached=%08x offset=%08x " | ||
503 | "avail=%08x count=%08x\n", __func__, (unsigned int)to, | ||
504 | cached, offset, avail, count); | ||
505 | |||
506 | if (avail > count) | ||
507 | avail = count; | ||
508 | memcpy(priv->xdr_buf + cached, buf, avail); | ||
509 | |||
510 | priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; | ||
511 | |||
512 | mutex_unlock(&priv->lock); | ||
513 | |||
514 | buf += avail; | ||
515 | count -= avail; | ||
516 | to += avail; | ||
517 | } | ||
518 | |||
519 | *retlen = len; | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | static int ps3vram_proc_show(struct seq_file *m, void *v) | ||
524 | { | ||
525 | struct ps3vram_priv *priv = m->private; | ||
526 | |||
527 | seq_printf(m, "hit:%u\nmiss:%u\n", priv->cache.hit, priv->cache.miss); | ||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | static int ps3vram_proc_open(struct inode *inode, struct file *file) | ||
532 | { | ||
533 | return single_open(file, ps3vram_proc_show, PDE(inode)->data); | ||
534 | } | ||
535 | |||
536 | static const struct file_operations ps3vram_proc_fops = { | ||
537 | .owner = THIS_MODULE, | ||
538 | .open = ps3vram_proc_open, | ||
539 | .read = seq_read, | ||
540 | .llseek = seq_lseek, | ||
541 | .release = single_release, | ||
542 | }; | ||
543 | |||
544 | static void __devinit ps3vram_proc_init(struct ps3_system_bus_device *dev) | ||
545 | { | ||
546 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
547 | struct proc_dir_entry *pde; | ||
548 | |||
549 | pde = proc_create(DEVICE_NAME, 0444, NULL, &ps3vram_proc_fops); | ||
550 | if (!pde) { | ||
551 | dev_warn(&dev->core, "failed to create /proc entry\n"); | ||
552 | return; | ||
553 | } | ||
554 | |||
555 | pde->owner = THIS_MODULE; | ||
556 | pde->data = priv; | ||
557 | } | ||
558 | |||
559 | static int ps3vram_make_request(struct request_queue *q, struct bio *bio) | ||
560 | { | ||
561 | struct ps3_system_bus_device *dev = q->queuedata; | ||
562 | int write = bio_data_dir(bio) == WRITE; | ||
563 | const char *op = write ? "write" : "read"; | ||
564 | loff_t offset = bio->bi_sector << 9; | ||
565 | int error = 0; | ||
566 | struct bio_vec *bvec; | ||
567 | unsigned int i; | ||
568 | |||
569 | dev_dbg(&dev->core, "%s\n", __func__); | ||
570 | |||
571 | bio_for_each_segment(bvec, bio, i) { | ||
572 | /* PS3 is ppc64, so we don't handle highmem */ | ||
573 | char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; | ||
574 | size_t len = bvec->bv_len, retlen; | ||
575 | |||
576 | dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, | ||
577 | len, offset); | ||
578 | if (write) | ||
579 | error = ps3vram_write(dev, offset, len, &retlen, ptr); | ||
580 | else | ||
581 | error = ps3vram_read(dev, offset, len, &retlen, ptr); | ||
582 | |||
583 | if (error) { | ||
584 | dev_err(&dev->core, "%s failed\n", op); | ||
585 | goto out; | ||
586 | } | ||
587 | |||
588 | if (retlen != len) { | ||
589 | dev_err(&dev->core, "Short %s\n", op); | ||
590 | goto out; | ||
591 | } | ||
592 | |||
593 | offset += len; | ||
594 | } | ||
595 | |||
596 | dev_dbg(&dev->core, "%s completed\n", op); | ||
597 | |||
598 | out: | ||
599 | bio_endio(bio, error); | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) | ||
604 | { | ||
605 | struct ps3vram_priv *priv; | ||
606 | int error, status; | ||
607 | struct request_queue *queue; | ||
608 | struct gendisk *gendisk; | ||
609 | u64 ddr_lpar, ctrl_lpar, info_lpar, reports_lpar, ddr_size, | ||
610 | reports_size; | ||
611 | char *rest; | ||
612 | |||
613 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
614 | if (!priv) { | ||
615 | error = -ENOMEM; | ||
616 | goto fail; | ||
617 | } | ||
618 | |||
619 | mutex_init(&priv->lock); | ||
620 | dev->core.driver_data = priv; | ||
621 | |||
622 | priv = dev->core.driver_data; | ||
623 | |||
624 | /* Allocate XDR buffer (1MiB aligned) */ | ||
625 | priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL, | ||
626 | get_order(XDR_BUF_SIZE)); | ||
627 | if (priv->xdr_buf == NULL) { | ||
628 | dev_err(&dev->core, "Could not allocate XDR buffer\n"); | ||
629 | error = -ENOMEM; | ||
630 | goto fail_free_priv; | ||
631 | } | ||
632 | |||
633 | /* Put FIFO at begginning of XDR buffer */ | ||
634 | priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET); | ||
635 | priv->fifo_ptr = priv->fifo_base; | ||
636 | |||
637 | /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */ | ||
638 | if (ps3_open_hv_device(dev)) { | ||
639 | dev_err(&dev->core, "ps3_open_hv_device failed\n"); | ||
640 | error = -EAGAIN; | ||
641 | goto out_close_gpu; | ||
642 | } | ||
643 | |||
644 | /* Request memory */ | ||
645 | status = -1; | ||
646 | ddr_size = ALIGN(memparse(size, &rest), 1024*1024); | ||
647 | if (!ddr_size) { | ||
648 | dev_err(&dev->core, "Specified size is too small\n"); | ||
649 | error = -EINVAL; | ||
650 | goto out_close_gpu; | ||
651 | } | ||
652 | |||
653 | while (ddr_size > 0) { | ||
654 | status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0, | ||
655 | &priv->memory_handle, | ||
656 | &ddr_lpar); | ||
657 | if (!status) | ||
658 | break; | ||
659 | ddr_size -= 1024*1024; | ||
660 | } | ||
661 | if (status) { | ||
662 | dev_err(&dev->core, "lv1_gpu_memory_allocate failed %d\n", | ||
663 | status); | ||
664 | error = -ENOMEM; | ||
665 | goto out_free_xdr_buf; | ||
666 | } | ||
667 | |||
668 | /* Request context */ | ||
669 | status = lv1_gpu_context_allocate(priv->memory_handle, 0, | ||
670 | &priv->context_handle, &ctrl_lpar, | ||
671 | &info_lpar, &reports_lpar, | ||
672 | &reports_size); | ||
673 | if (status) { | ||
674 | dev_err(&dev->core, "lv1_gpu_context_allocate failed %d\n", | ||
675 | status); | ||
676 | error = -ENOMEM; | ||
677 | goto out_free_memory; | ||
678 | } | ||
679 | |||
680 | /* Map XDR buffer to RSX */ | ||
681 | status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, | ||
682 | ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)), | ||
683 | XDR_BUF_SIZE, 0); | ||
684 | if (status) { | ||
685 | dev_err(&dev->core, "lv1_gpu_context_iomap failed %d\n", | ||
686 | status); | ||
687 | error = -ENOMEM; | ||
688 | goto out_free_context; | ||
689 | } | ||
690 | |||
691 | priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE); | ||
692 | |||
693 | if (!priv->ddr_base) { | ||
694 | dev_err(&dev->core, "ioremap DDR failed\n"); | ||
695 | error = -ENOMEM; | ||
696 | goto out_free_context; | ||
697 | } | ||
698 | |||
699 | priv->ctrl = ioremap(ctrl_lpar, 64 * 1024); | ||
700 | if (!priv->ctrl) { | ||
701 | dev_err(&dev->core, "ioremap CTRL failed\n"); | ||
702 | error = -ENOMEM; | ||
703 | goto out_unmap_vram; | ||
704 | } | ||
705 | |||
706 | priv->reports = ioremap(reports_lpar, reports_size); | ||
707 | if (!priv->reports) { | ||
708 | dev_err(&dev->core, "ioremap REPORTS failed\n"); | ||
709 | error = -ENOMEM; | ||
710 | goto out_unmap_ctrl; | ||
711 | } | ||
712 | |||
713 | mutex_lock(&ps3_gpu_mutex); | ||
714 | ps3vram_init_ring(dev); | ||
715 | mutex_unlock(&ps3_gpu_mutex); | ||
716 | |||
717 | priv->size = ddr_size; | ||
718 | |||
719 | ps3vram_bind(dev); | ||
720 | |||
721 | mutex_lock(&ps3_gpu_mutex); | ||
722 | error = ps3vram_wait_ring(dev, 100); | ||
723 | mutex_unlock(&ps3_gpu_mutex); | ||
724 | if (error < 0) { | ||
725 | dev_err(&dev->core, "Failed to initialize channels\n"); | ||
726 | error = -ETIMEDOUT; | ||
727 | goto out_unmap_reports; | ||
728 | } | ||
729 | |||
730 | ps3vram_cache_init(dev); | ||
731 | ps3vram_proc_init(dev); | ||
732 | |||
733 | queue = blk_alloc_queue(GFP_KERNEL); | ||
734 | if (!queue) { | ||
735 | dev_err(&dev->core, "blk_alloc_queue failed\n"); | ||
736 | error = -ENOMEM; | ||
737 | goto out_cache_cleanup; | ||
738 | } | ||
739 | |||
740 | priv->queue = queue; | ||
741 | queue->queuedata = dev; | ||
742 | blk_queue_make_request(queue, ps3vram_make_request); | ||
743 | blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); | ||
744 | blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS); | ||
745 | blk_queue_max_segment_size(queue, MAX_SEGMENT_SIZE); | ||
746 | blk_queue_max_sectors(queue, SAFE_MAX_SECTORS); | ||
747 | |||
748 | gendisk = alloc_disk(1); | ||
749 | if (!gendisk) { | ||
750 | dev_err(&dev->core, "alloc_disk failed\n"); | ||
751 | error = -ENOMEM; | ||
752 | goto fail_cleanup_queue; | ||
753 | } | ||
754 | |||
755 | priv->gendisk = gendisk; | ||
756 | gendisk->major = ps3vram_major; | ||
757 | gendisk->first_minor = 0; | ||
758 | gendisk->fops = &ps3vram_fops; | ||
759 | gendisk->queue = queue; | ||
760 | gendisk->private_data = dev; | ||
761 | gendisk->driverfs_dev = &dev->core; | ||
762 | strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name)); | ||
763 | set_capacity(gendisk, priv->size >> 9); | ||
764 | |||
765 | dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n", | ||
766 | gendisk->disk_name, get_capacity(gendisk) >> 11); | ||
767 | |||
768 | add_disk(gendisk); | ||
769 | return 0; | ||
770 | |||
771 | fail_cleanup_queue: | ||
772 | blk_cleanup_queue(queue); | ||
773 | out_cache_cleanup: | ||
774 | remove_proc_entry(DEVICE_NAME, NULL); | ||
775 | ps3vram_cache_cleanup(dev); | ||
776 | out_unmap_reports: | ||
777 | iounmap(priv->reports); | ||
778 | out_unmap_ctrl: | ||
779 | iounmap(priv->ctrl); | ||
780 | out_unmap_vram: | ||
781 | iounmap(priv->ddr_base); | ||
782 | out_free_context: | ||
783 | lv1_gpu_context_free(priv->context_handle); | ||
784 | out_free_memory: | ||
785 | lv1_gpu_memory_free(priv->memory_handle); | ||
786 | out_close_gpu: | ||
787 | ps3_close_hv_device(dev); | ||
788 | out_free_xdr_buf: | ||
789 | free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); | ||
790 | fail_free_priv: | ||
791 | kfree(priv); | ||
792 | dev->core.driver_data = NULL; | ||
793 | fail: | ||
794 | return error; | ||
795 | } | ||
796 | |||
797 | static int ps3vram_remove(struct ps3_system_bus_device *dev) | ||
798 | { | ||
799 | struct ps3vram_priv *priv = dev->core.driver_data; | ||
800 | |||
801 | del_gendisk(priv->gendisk); | ||
802 | put_disk(priv->gendisk); | ||
803 | blk_cleanup_queue(priv->queue); | ||
804 | remove_proc_entry(DEVICE_NAME, NULL); | ||
805 | ps3vram_cache_cleanup(dev); | ||
806 | iounmap(priv->reports); | ||
807 | iounmap(priv->ctrl); | ||
808 | iounmap(priv->ddr_base); | ||
809 | lv1_gpu_context_free(priv->context_handle); | ||
810 | lv1_gpu_memory_free(priv->memory_handle); | ||
811 | ps3_close_hv_device(dev); | ||
812 | free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); | ||
813 | kfree(priv); | ||
814 | dev->core.driver_data = NULL; | ||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | static struct ps3_system_bus_driver ps3vram = { | ||
819 | .match_id = PS3_MATCH_ID_GPU, | ||
820 | .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK, | ||
821 | .core.name = DEVICE_NAME, | ||
822 | .core.owner = THIS_MODULE, | ||
823 | .probe = ps3vram_probe, | ||
824 | .remove = ps3vram_remove, | ||
825 | .shutdown = ps3vram_remove, | ||
826 | }; | ||
827 | |||
828 | |||
829 | static int __init ps3vram_init(void) | ||
830 | { | ||
831 | int error; | ||
832 | |||
833 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) | ||
834 | return -ENODEV; | ||
835 | |||
836 | error = register_blkdev(0, DEVICE_NAME); | ||
837 | if (error <= 0) { | ||
838 | pr_err("%s: register_blkdev failed %d\n", DEVICE_NAME, error); | ||
839 | return error; | ||
840 | } | ||
841 | ps3vram_major = error; | ||
842 | |||
843 | pr_info("%s: registered block device major %d\n", DEVICE_NAME, | ||
844 | ps3vram_major); | ||
845 | |||
846 | error = ps3_system_bus_driver_register(&ps3vram); | ||
847 | if (error) | ||
848 | unregister_blkdev(ps3vram_major, DEVICE_NAME); | ||
849 | |||
850 | return error; | ||
851 | } | ||
852 | |||
853 | static void __exit ps3vram_exit(void) | ||
854 | { | ||
855 | ps3_system_bus_driver_unregister(&ps3vram); | ||
856 | unregister_blkdev(ps3vram_major, DEVICE_NAME); | ||
857 | } | ||
858 | |||
859 | module_init(ps3vram_init); | ||
860 | module_exit(ps3vram_exit); | ||
861 | |||
862 | MODULE_LICENSE("GPL"); | ||
863 | MODULE_DESCRIPTION("PS3 Video RAM Storage Driver"); | ||
864 | MODULE_AUTHOR("Sony Corporation"); | ||
865 | MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK); | ||
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 4940e4d70c2d..1f5b5d4c3c34 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
@@ -306,7 +306,7 @@ static int hiddev_open(struct inode *inode, struct file *file) | |||
306 | return 0; | 306 | return 0; |
307 | bail: | 307 | bail: |
308 | file->private_data = NULL; | 308 | file->private_data = NULL; |
309 | kfree(list->hiddev); | 309 | kfree(list); |
310 | return res; | 310 | return res; |
311 | } | 311 | } |
312 | 312 | ||
@@ -323,7 +323,7 @@ static ssize_t hiddev_write(struct file * file, const char __user * buffer, size | |||
323 | */ | 323 | */ |
324 | static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t count, loff_t *ppos) | 324 | static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t count, loff_t *ppos) |
325 | { | 325 | { |
326 | DECLARE_WAITQUEUE(wait, current); | 326 | DEFINE_WAIT(wait); |
327 | struct hiddev_list *list = file->private_data; | 327 | struct hiddev_list *list = file->private_data; |
328 | int event_size; | 328 | int event_size; |
329 | int retval; | 329 | int retval; |
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index bc33200535fc..6fde0a2e3567 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig | |||
@@ -120,13 +120,6 @@ config MTD_PHRAM | |||
120 | doesn't have access to, memory beyond the mem=xxx limit, nvram, | 120 | doesn't have access to, memory beyond the mem=xxx limit, nvram, |
121 | memory on the video card, etc... | 121 | memory on the video card, etc... |
122 | 122 | ||
123 | config MTD_PS3VRAM | ||
124 | tristate "PS3 video RAM" | ||
125 | depends on FB_PS3 | ||
126 | help | ||
127 | This driver allows you to use excess PS3 video RAM as volatile | ||
128 | storage or system swap. | ||
129 | |||
130 | config MTD_LART | 123 | config MTD_LART |
131 | tristate "28F160xx flash driver for LART" | 124 | tristate "28F160xx flash driver for LART" |
132 | depends on SA1100_LART | 125 | depends on SA1100_LART |
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index e51521df4e40..0993d5cf3923 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile | |||
@@ -16,4 +16,3 @@ obj-$(CONFIG_MTD_LART) += lart.o | |||
16 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o | 16 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o |
17 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o | 17 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o |
18 | obj-$(CONFIG_MTD_M25P80) += m25p80.o | 18 | obj-$(CONFIG_MTD_M25P80) += m25p80.o |
19 | obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o | ||
diff --git a/drivers/mtd/devices/ps3vram.c b/drivers/mtd/devices/ps3vram.c deleted file mode 100644 index d21e9beb7ed2..000000000000 --- a/drivers/mtd/devices/ps3vram.c +++ /dev/null | |||
@@ -1,768 +0,0 @@ | |||
1 | /** | ||
2 | * ps3vram - Use extra PS3 video ram as MTD block device. | ||
3 | * | ||
4 | * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com> | ||
5 | * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr> | ||
6 | */ | ||
7 | |||
8 | #include <linux/io.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/list.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/version.h> | ||
17 | #include <linux/gfp.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/mtd/mtd.h> | ||
20 | |||
21 | #include <asm/lv1call.h> | ||
22 | #include <asm/ps3.h> | ||
23 | |||
24 | #define DEVICE_NAME "ps3vram" | ||
25 | |||
26 | #define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */ | ||
27 | #define XDR_IOIF 0x0c000000 | ||
28 | |||
29 | #define FIFO_BASE XDR_IOIF | ||
30 | #define FIFO_SIZE (64 * 1024) | ||
31 | |||
32 | #define DMA_PAGE_SIZE (4 * 1024) | ||
33 | |||
34 | #define CACHE_PAGE_SIZE (256 * 1024) | ||
35 | #define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE) | ||
36 | |||
37 | #define CACHE_OFFSET CACHE_PAGE_SIZE | ||
38 | #define FIFO_OFFSET 0 | ||
39 | |||
40 | #define CTRL_PUT 0x10 | ||
41 | #define CTRL_GET 0x11 | ||
42 | #define CTRL_TOP 0x15 | ||
43 | |||
44 | #define UPLOAD_SUBCH 1 | ||
45 | #define DOWNLOAD_SUBCH 2 | ||
46 | |||
47 | #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c | ||
48 | #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 | ||
49 | |||
50 | #define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 | ||
51 | |||
52 | struct mtd_info ps3vram_mtd; | ||
53 | |||
54 | #define CACHE_PAGE_PRESENT 1 | ||
55 | #define CACHE_PAGE_DIRTY 2 | ||
56 | |||
57 | struct ps3vram_tag { | ||
58 | unsigned int address; | ||
59 | unsigned int flags; | ||
60 | }; | ||
61 | |||
62 | struct ps3vram_cache { | ||
63 | unsigned int page_count; | ||
64 | unsigned int page_size; | ||
65 | struct ps3vram_tag *tags; | ||
66 | }; | ||
67 | |||
68 | struct ps3vram_priv { | ||
69 | u64 memory_handle; | ||
70 | u64 context_handle; | ||
71 | u32 *ctrl; | ||
72 | u32 *reports; | ||
73 | u8 __iomem *ddr_base; | ||
74 | u8 *xdr_buf; | ||
75 | |||
76 | u32 *fifo_base; | ||
77 | u32 *fifo_ptr; | ||
78 | |||
79 | struct device *dev; | ||
80 | struct ps3vram_cache cache; | ||
81 | |||
82 | /* Used to serialize cache/DMA operations */ | ||
83 | struct mutex lock; | ||
84 | }; | ||
85 | |||
86 | #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ | ||
87 | #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ | ||
88 | #define DMA_NOTIFIER_SIZE 0x40 | ||
89 | #define NOTIFIER 7 /* notifier used for completion report */ | ||
90 | |||
91 | /* A trailing '-' means to subtract off ps3fb_videomemory.size */ | ||
92 | char *size = "256M-"; | ||
93 | module_param(size, charp, 0); | ||
94 | MODULE_PARM_DESC(size, "memory size"); | ||
95 | |||
96 | static u32 *ps3vram_get_notifier(u32 *reports, int notifier) | ||
97 | { | ||
98 | return (void *) reports + | ||
99 | DMA_NOTIFIER_OFFSET_BASE + | ||
100 | DMA_NOTIFIER_SIZE * notifier; | ||
101 | } | ||
102 | |||
103 | static void ps3vram_notifier_reset(struct mtd_info *mtd) | ||
104 | { | ||
105 | int i; | ||
106 | |||
107 | struct ps3vram_priv *priv = mtd->priv; | ||
108 | u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); | ||
109 | for (i = 0; i < 4; i++) | ||
110 | notify[i] = 0xffffffff; | ||
111 | } | ||
112 | |||
113 | static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms) | ||
114 | { | ||
115 | struct ps3vram_priv *priv = mtd->priv; | ||
116 | u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); | ||
117 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | ||
118 | |||
119 | do { | ||
120 | if (!notify[3]) | ||
121 | return 0; | ||
122 | msleep(1); | ||
123 | } while (time_before(jiffies, timeout)); | ||
124 | |||
125 | return -ETIMEDOUT; | ||
126 | } | ||
127 | |||
128 | static void ps3vram_init_ring(struct mtd_info *mtd) | ||
129 | { | ||
130 | struct ps3vram_priv *priv = mtd->priv; | ||
131 | |||
132 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; | ||
133 | priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET; | ||
134 | } | ||
135 | |||
136 | static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms) | ||
137 | { | ||
138 | struct ps3vram_priv *priv = mtd->priv; | ||
139 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | ||
140 | |||
141 | do { | ||
142 | if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET]) | ||
143 | return 0; | ||
144 | msleep(1); | ||
145 | } while (time_before(jiffies, timeout)); | ||
146 | |||
147 | dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__, | ||
148 | __LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET], | ||
149 | priv->ctrl[CTRL_TOP]); | ||
150 | |||
151 | return -ETIMEDOUT; | ||
152 | } | ||
153 | |||
154 | static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data) | ||
155 | { | ||
156 | *(priv->fifo_ptr)++ = data; | ||
157 | } | ||
158 | |||
159 | static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, | ||
160 | u32 tag, u32 size) | ||
161 | { | ||
162 | ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag); | ||
163 | } | ||
164 | |||
165 | static void ps3vram_rewind_ring(struct mtd_info *mtd) | ||
166 | { | ||
167 | struct ps3vram_priv *priv = mtd->priv; | ||
168 | u64 status; | ||
169 | |||
170 | ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET)); | ||
171 | |||
172 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; | ||
173 | |||
174 | /* asking the HV for a blit will kick the fifo */ | ||
175 | status = lv1_gpu_context_attribute(priv->context_handle, | ||
176 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, | ||
177 | 0, 0, 0, 0); | ||
178 | if (status) | ||
179 | dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", | ||
180 | __func__, __LINE__); | ||
181 | |||
182 | priv->fifo_ptr = priv->fifo_base; | ||
183 | } | ||
184 | |||
185 | static void ps3vram_fire_ring(struct mtd_info *mtd) | ||
186 | { | ||
187 | struct ps3vram_priv *priv = mtd->priv; | ||
188 | u64 status; | ||
189 | |||
190 | mutex_lock(&ps3_gpu_mutex); | ||
191 | |||
192 | priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET + | ||
193 | (priv->fifo_ptr - priv->fifo_base) * sizeof(u32); | ||
194 | |||
195 | /* asking the HV for a blit will kick the fifo */ | ||
196 | status = lv1_gpu_context_attribute(priv->context_handle, | ||
197 | L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, | ||
198 | 0, 0, 0, 0); | ||
199 | if (status) | ||
200 | dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", | ||
201 | __func__, __LINE__); | ||
202 | |||
203 | if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) > | ||
204 | FIFO_SIZE - 1024) { | ||
205 | dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__, | ||
206 | __LINE__); | ||
207 | ps3vram_wait_ring(mtd, 200); | ||
208 | ps3vram_rewind_ring(mtd); | ||
209 | } | ||
210 | |||
211 | mutex_unlock(&ps3_gpu_mutex); | ||
212 | } | ||
213 | |||
214 | static void ps3vram_bind(struct mtd_info *mtd) | ||
215 | { | ||
216 | struct ps3vram_priv *priv = mtd->priv; | ||
217 | |||
218 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1); | ||
219 | ps3vram_out_ring(priv, 0x31337303); | ||
220 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3); | ||
221 | ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); | ||
222 | ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ | ||
223 | ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ | ||
224 | |||
225 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1); | ||
226 | ps3vram_out_ring(priv, 0x3137c0de); | ||
227 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3); | ||
228 | ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); | ||
229 | ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ | ||
230 | ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ | ||
231 | |||
232 | ps3vram_fire_ring(mtd); | ||
233 | } | ||
234 | |||
235 | static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset, | ||
236 | unsigned int dst_offset, int len, int count) | ||
237 | { | ||
238 | struct ps3vram_priv *priv = mtd->priv; | ||
239 | |||
240 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, | ||
241 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | ||
242 | ps3vram_out_ring(priv, XDR_IOIF + src_offset); | ||
243 | ps3vram_out_ring(priv, dst_offset); | ||
244 | ps3vram_out_ring(priv, len); | ||
245 | ps3vram_out_ring(priv, len); | ||
246 | ps3vram_out_ring(priv, len); | ||
247 | ps3vram_out_ring(priv, count); | ||
248 | ps3vram_out_ring(priv, (1 << 8) | 1); | ||
249 | ps3vram_out_ring(priv, 0); | ||
250 | |||
251 | ps3vram_notifier_reset(mtd); | ||
252 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, | ||
253 | NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); | ||
254 | ps3vram_out_ring(priv, 0); | ||
255 | ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1); | ||
256 | ps3vram_out_ring(priv, 0); | ||
257 | ps3vram_fire_ring(mtd); | ||
258 | if (ps3vram_notifier_wait(mtd, 200) < 0) { | ||
259 | dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, | ||
260 | __LINE__); | ||
261 | return -1; | ||
262 | } | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset, | ||
268 | unsigned int dst_offset, int len, int count) | ||
269 | { | ||
270 | struct ps3vram_priv *priv = mtd->priv; | ||
271 | |||
272 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, | ||
273 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | ||
274 | ps3vram_out_ring(priv, src_offset); | ||
275 | ps3vram_out_ring(priv, XDR_IOIF + dst_offset); | ||
276 | ps3vram_out_ring(priv, len); | ||
277 | ps3vram_out_ring(priv, len); | ||
278 | ps3vram_out_ring(priv, len); | ||
279 | ps3vram_out_ring(priv, count); | ||
280 | ps3vram_out_ring(priv, (1 << 8) | 1); | ||
281 | ps3vram_out_ring(priv, 0); | ||
282 | |||
283 | ps3vram_notifier_reset(mtd); | ||
284 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, | ||
285 | NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); | ||
286 | ps3vram_out_ring(priv, 0); | ||
287 | ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1); | ||
288 | ps3vram_out_ring(priv, 0); | ||
289 | ps3vram_fire_ring(mtd); | ||
290 | if (ps3vram_notifier_wait(mtd, 200) < 0) { | ||
291 | dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, | ||
292 | __LINE__); | ||
293 | return -1; | ||
294 | } | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static void ps3vram_cache_evict(struct mtd_info *mtd, int entry) | ||
300 | { | ||
301 | struct ps3vram_priv *priv = mtd->priv; | ||
302 | struct ps3vram_cache *cache = &priv->cache; | ||
303 | |||
304 | if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) { | ||
305 | dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__, | ||
306 | __LINE__, entry, cache->tags[entry].address); | ||
307 | if (ps3vram_upload(mtd, | ||
308 | CACHE_OFFSET + entry * cache->page_size, | ||
309 | cache->tags[entry].address, | ||
310 | DMA_PAGE_SIZE, | ||
311 | cache->page_size / DMA_PAGE_SIZE) < 0) { | ||
312 | dev_dbg(priv->dev, "%s:%d: failed to upload from " | ||
313 | "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, | ||
314 | entry * cache->page_size, | ||
315 | cache->tags[entry].address, cache->page_size); | ||
316 | } | ||
317 | cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | static void ps3vram_cache_load(struct mtd_info *mtd, int entry, | ||
322 | unsigned int address) | ||
323 | { | ||
324 | struct ps3vram_priv *priv = mtd->priv; | ||
325 | struct ps3vram_cache *cache = &priv->cache; | ||
326 | |||
327 | dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__, | ||
328 | entry, address); | ||
329 | if (ps3vram_download(mtd, | ||
330 | address, | ||
331 | CACHE_OFFSET + entry * cache->page_size, | ||
332 | DMA_PAGE_SIZE, | ||
333 | cache->page_size / DMA_PAGE_SIZE) < 0) { | ||
334 | dev_err(priv->dev, "%s:%d: failed to download from " | ||
335 | "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address, | ||
336 | entry * cache->page_size, cache->page_size); | ||
337 | } | ||
338 | |||
339 | cache->tags[entry].address = address; | ||
340 | cache->tags[entry].flags |= CACHE_PAGE_PRESENT; | ||
341 | } | ||
342 | |||
343 | |||
344 | static void ps3vram_cache_flush(struct mtd_info *mtd) | ||
345 | { | ||
346 | struct ps3vram_priv *priv = mtd->priv; | ||
347 | struct ps3vram_cache *cache = &priv->cache; | ||
348 | int i; | ||
349 | |||
350 | dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__); | ||
351 | for (i = 0; i < cache->page_count; i++) { | ||
352 | ps3vram_cache_evict(mtd, i); | ||
353 | cache->tags[i].flags = 0; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address) | ||
358 | { | ||
359 | struct ps3vram_priv *priv = mtd->priv; | ||
360 | struct ps3vram_cache *cache = &priv->cache; | ||
361 | unsigned int base; | ||
362 | unsigned int offset; | ||
363 | int i; | ||
364 | static int counter; | ||
365 | |||
366 | offset = (unsigned int) (address & (cache->page_size - 1)); | ||
367 | base = (unsigned int) (address - offset); | ||
368 | |||
369 | /* fully associative check */ | ||
370 | for (i = 0; i < cache->page_count; i++) { | ||
371 | if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && | ||
372 | cache->tags[i].address == base) { | ||
373 | dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n", | ||
374 | __func__, __LINE__, i, cache->tags[i].address); | ||
375 | return i; | ||
376 | } | ||
377 | } | ||
378 | |||
379 | /* choose a random entry */ | ||
380 | i = (jiffies + (counter++)) % cache->page_count; | ||
381 | dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i); | ||
382 | |||
383 | ps3vram_cache_evict(mtd, i); | ||
384 | ps3vram_cache_load(mtd, i, base); | ||
385 | |||
386 | return i; | ||
387 | } | ||
388 | |||
389 | static int ps3vram_cache_init(struct mtd_info *mtd) | ||
390 | { | ||
391 | struct ps3vram_priv *priv = mtd->priv; | ||
392 | |||
393 | priv->cache.page_count = CACHE_PAGE_COUNT; | ||
394 | priv->cache.page_size = CACHE_PAGE_SIZE; | ||
395 | priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * | ||
396 | CACHE_PAGE_COUNT, GFP_KERNEL); | ||
397 | if (priv->cache.tags == NULL) { | ||
398 | dev_err(priv->dev, "%s:%d: could not allocate cache tags\n", | ||
399 | __func__, __LINE__); | ||
400 | return -ENOMEM; | ||
401 | } | ||
402 | |||
403 | dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n", | ||
404 | CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); | ||
405 | |||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | static void ps3vram_cache_cleanup(struct mtd_info *mtd) | ||
410 | { | ||
411 | struct ps3vram_priv *priv = mtd->priv; | ||
412 | |||
413 | ps3vram_cache_flush(mtd); | ||
414 | kfree(priv->cache.tags); | ||
415 | } | ||
416 | |||
417 | static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
418 | { | ||
419 | struct ps3vram_priv *priv = mtd->priv; | ||
420 | |||
421 | if (instr->addr + instr->len > mtd->size) | ||
422 | return -EINVAL; | ||
423 | |||
424 | mutex_lock(&priv->lock); | ||
425 | |||
426 | ps3vram_cache_flush(mtd); | ||
427 | |||
428 | /* Set bytes to 0xFF */ | ||
429 | memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len); | ||
430 | |||
431 | mutex_unlock(&priv->lock); | ||
432 | |||
433 | instr->state = MTD_ERASE_DONE; | ||
434 | mtd_erase_callback(instr); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
440 | size_t *retlen, u_char *buf) | ||
441 | { | ||
442 | struct ps3vram_priv *priv = mtd->priv; | ||
443 | unsigned int cached, count; | ||
444 | |||
445 | dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__, | ||
446 | (unsigned int)from, len); | ||
447 | |||
448 | if (from >= mtd->size) | ||
449 | return -EINVAL; | ||
450 | |||
451 | if (len > mtd->size - from) | ||
452 | len = mtd->size - from; | ||
453 | |||
454 | /* Copy from vram to buf */ | ||
455 | count = len; | ||
456 | while (count) { | ||
457 | unsigned int offset, avail; | ||
458 | unsigned int entry; | ||
459 | |||
460 | offset = (unsigned int) (from & (priv->cache.page_size - 1)); | ||
461 | avail = priv->cache.page_size - offset; | ||
462 | |||
463 | mutex_lock(&priv->lock); | ||
464 | |||
465 | entry = ps3vram_cache_match(mtd, from); | ||
466 | cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; | ||
467 | |||
468 | dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x " | ||
469 | "avail=%08x count=%08x\n", __func__, __LINE__, | ||
470 | (unsigned int)from, cached, offset, avail, count); | ||
471 | |||
472 | if (avail > count) | ||
473 | avail = count; | ||
474 | memcpy(buf, priv->xdr_buf + cached, avail); | ||
475 | |||
476 | mutex_unlock(&priv->lock); | ||
477 | |||
478 | buf += avail; | ||
479 | count -= avail; | ||
480 | from += avail; | ||
481 | } | ||
482 | |||
483 | *retlen = len; | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
488 | size_t *retlen, const u_char *buf) | ||
489 | { | ||
490 | struct ps3vram_priv *priv = mtd->priv; | ||
491 | unsigned int cached, count; | ||
492 | |||
493 | if (to >= mtd->size) | ||
494 | return -EINVAL; | ||
495 | |||
496 | if (len > mtd->size - to) | ||
497 | len = mtd->size - to; | ||
498 | |||
499 | /* Copy from buf to vram */ | ||
500 | count = len; | ||
501 | while (count) { | ||
502 | unsigned int offset, avail; | ||
503 | unsigned int entry; | ||
504 | |||
505 | offset = (unsigned int) (to & (priv->cache.page_size - 1)); | ||
506 | avail = priv->cache.page_size - offset; | ||
507 | |||
508 | mutex_lock(&priv->lock); | ||
509 | |||
510 | entry = ps3vram_cache_match(mtd, to); | ||
511 | cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; | ||
512 | |||
513 | dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x " | ||
514 | "avail=%08x count=%08x\n", __func__, __LINE__, | ||
515 | (unsigned int)to, cached, offset, avail, count); | ||
516 | |||
517 | if (avail > count) | ||
518 | avail = count; | ||
519 | memcpy(priv->xdr_buf + cached, buf, avail); | ||
520 | |||
521 | priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; | ||
522 | |||
523 | mutex_unlock(&priv->lock); | ||
524 | |||
525 | buf += avail; | ||
526 | count -= avail; | ||
527 | to += avail; | ||
528 | } | ||
529 | |||
530 | *retlen = len; | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) | ||
535 | { | ||
536 | struct ps3vram_priv *priv; | ||
537 | int status; | ||
538 | u64 ddr_lpar; | ||
539 | u64 ctrl_lpar; | ||
540 | u64 info_lpar; | ||
541 | u64 reports_lpar; | ||
542 | u64 ddr_size; | ||
543 | u64 reports_size; | ||
544 | int ret = -ENOMEM; | ||
545 | char *rest; | ||
546 | |||
547 | ret = -EIO; | ||
548 | ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL); | ||
549 | if (!ps3vram_mtd.priv) | ||
550 | goto out; | ||
551 | priv = ps3vram_mtd.priv; | ||
552 | |||
553 | mutex_init(&priv->lock); | ||
554 | priv->dev = &dev->core; | ||
555 | |||
556 | /* Allocate XDR buffer (1MiB aligned) */ | ||
557 | priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL, | ||
558 | get_order(XDR_BUF_SIZE)); | ||
559 | if (priv->xdr_buf == NULL) { | ||
560 | dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n", | ||
561 | __func__, __LINE__); | ||
562 | ret = -ENOMEM; | ||
563 | goto out_free_priv; | ||
564 | } | ||
565 | |||
566 | /* Put FIFO at begginning of XDR buffer */ | ||
567 | priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET); | ||
568 | priv->fifo_ptr = priv->fifo_base; | ||
569 | |||
570 | /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */ | ||
571 | if (ps3_open_hv_device(dev)) { | ||
572 | dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n", | ||
573 | __func__, __LINE__); | ||
574 | ret = -EAGAIN; | ||
575 | goto out_close_gpu; | ||
576 | } | ||
577 | |||
578 | /* Request memory */ | ||
579 | status = -1; | ||
580 | ddr_size = memparse(size, &rest); | ||
581 | if (*rest == '-') | ||
582 | ddr_size -= ps3fb_videomemory.size; | ||
583 | ddr_size = ALIGN(ddr_size, 1024*1024); | ||
584 | if (ddr_size <= 0) { | ||
585 | dev_err(&dev->core, "%s:%d: specified size is too small\n", | ||
586 | __func__, __LINE__); | ||
587 | ret = -EINVAL; | ||
588 | goto out_close_gpu; | ||
589 | } | ||
590 | |||
591 | while (ddr_size > 0) { | ||
592 | status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0, | ||
593 | &priv->memory_handle, | ||
594 | &ddr_lpar); | ||
595 | if (!status) | ||
596 | break; | ||
597 | ddr_size -= 1024*1024; | ||
598 | } | ||
599 | if (status || ddr_size <= 0) { | ||
600 | dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n", | ||
601 | __func__, __LINE__); | ||
602 | ret = -ENOMEM; | ||
603 | goto out_free_xdr_buf; | ||
604 | } | ||
605 | |||
606 | /* Request context */ | ||
607 | status = lv1_gpu_context_allocate(priv->memory_handle, | ||
608 | 0, | ||
609 | &priv->context_handle, | ||
610 | &ctrl_lpar, | ||
611 | &info_lpar, | ||
612 | &reports_lpar, | ||
613 | &reports_size); | ||
614 | if (status) { | ||
615 | dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n", | ||
616 | __func__, __LINE__); | ||
617 | ret = -ENOMEM; | ||
618 | goto out_free_memory; | ||
619 | } | ||
620 | |||
621 | /* Map XDR buffer to RSX */ | ||
622 | status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, | ||
623 | ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)), | ||
624 | XDR_BUF_SIZE, 0); | ||
625 | if (status) { | ||
626 | dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n", | ||
627 | __func__, __LINE__); | ||
628 | ret = -ENOMEM; | ||
629 | goto out_free_context; | ||
630 | } | ||
631 | |||
632 | priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE); | ||
633 | |||
634 | if (!priv->ddr_base) { | ||
635 | dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, | ||
636 | __LINE__); | ||
637 | ret = -ENOMEM; | ||
638 | goto out_free_context; | ||
639 | } | ||
640 | |||
641 | priv->ctrl = ioremap(ctrl_lpar, 64 * 1024); | ||
642 | if (!priv->ctrl) { | ||
643 | dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, | ||
644 | __LINE__); | ||
645 | ret = -ENOMEM; | ||
646 | goto out_unmap_vram; | ||
647 | } | ||
648 | |||
649 | priv->reports = ioremap(reports_lpar, reports_size); | ||
650 | if (!priv->reports) { | ||
651 | dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, | ||
652 | __LINE__); | ||
653 | ret = -ENOMEM; | ||
654 | goto out_unmap_ctrl; | ||
655 | } | ||
656 | |||
657 | mutex_lock(&ps3_gpu_mutex); | ||
658 | ps3vram_init_ring(&ps3vram_mtd); | ||
659 | mutex_unlock(&ps3_gpu_mutex); | ||
660 | |||
661 | ps3vram_mtd.name = "ps3vram"; | ||
662 | ps3vram_mtd.size = ddr_size; | ||
663 | ps3vram_mtd.flags = MTD_CAP_RAM; | ||
664 | ps3vram_mtd.erase = ps3vram_erase; | ||
665 | ps3vram_mtd.point = NULL; | ||
666 | ps3vram_mtd.unpoint = NULL; | ||
667 | ps3vram_mtd.read = ps3vram_read; | ||
668 | ps3vram_mtd.write = ps3vram_write; | ||
669 | ps3vram_mtd.owner = THIS_MODULE; | ||
670 | ps3vram_mtd.type = MTD_RAM; | ||
671 | ps3vram_mtd.erasesize = CACHE_PAGE_SIZE; | ||
672 | ps3vram_mtd.writesize = 1; | ||
673 | |||
674 | ps3vram_bind(&ps3vram_mtd); | ||
675 | |||
676 | mutex_lock(&ps3_gpu_mutex); | ||
677 | ret = ps3vram_wait_ring(&ps3vram_mtd, 100); | ||
678 | mutex_unlock(&ps3_gpu_mutex); | ||
679 | if (ret < 0) { | ||
680 | dev_err(&dev->core, "%s:%d: failed to initialize channels\n", | ||
681 | __func__, __LINE__); | ||
682 | ret = -ETIMEDOUT; | ||
683 | goto out_unmap_reports; | ||
684 | } | ||
685 | |||
686 | ps3vram_cache_init(&ps3vram_mtd); | ||
687 | |||
688 | if (add_mtd_device(&ps3vram_mtd)) { | ||
689 | dev_err(&dev->core, "%s:%d: add_mtd_device failed\n", | ||
690 | __func__, __LINE__); | ||
691 | ret = -EAGAIN; | ||
692 | goto out_cache_cleanup; | ||
693 | } | ||
694 | |||
695 | dev_info(&dev->core, "reserved %u MiB of gpu memory\n", | ||
696 | (unsigned int)(ddr_size / 1024 / 1024)); | ||
697 | |||
698 | return 0; | ||
699 | |||
700 | out_cache_cleanup: | ||
701 | ps3vram_cache_cleanup(&ps3vram_mtd); | ||
702 | out_unmap_reports: | ||
703 | iounmap(priv->reports); | ||
704 | out_unmap_ctrl: | ||
705 | iounmap(priv->ctrl); | ||
706 | out_unmap_vram: | ||
707 | iounmap(priv->ddr_base); | ||
708 | out_free_context: | ||
709 | lv1_gpu_context_free(priv->context_handle); | ||
710 | out_free_memory: | ||
711 | lv1_gpu_memory_free(priv->memory_handle); | ||
712 | out_close_gpu: | ||
713 | ps3_close_hv_device(dev); | ||
714 | out_free_xdr_buf: | ||
715 | free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); | ||
716 | out_free_priv: | ||
717 | kfree(ps3vram_mtd.priv); | ||
718 | ps3vram_mtd.priv = NULL; | ||
719 | out: | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static int ps3vram_shutdown(struct ps3_system_bus_device *dev) | ||
724 | { | ||
725 | struct ps3vram_priv *priv; | ||
726 | |||
727 | priv = ps3vram_mtd.priv; | ||
728 | |||
729 | del_mtd_device(&ps3vram_mtd); | ||
730 | ps3vram_cache_cleanup(&ps3vram_mtd); | ||
731 | iounmap(priv->reports); | ||
732 | iounmap(priv->ctrl); | ||
733 | iounmap(priv->ddr_base); | ||
734 | lv1_gpu_context_free(priv->context_handle); | ||
735 | lv1_gpu_memory_free(priv->memory_handle); | ||
736 | ps3_close_hv_device(dev); | ||
737 | free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); | ||
738 | kfree(priv); | ||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | static struct ps3_system_bus_driver ps3vram_driver = { | ||
743 | .match_id = PS3_MATCH_ID_GPU, | ||
744 | .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK, | ||
745 | .core.name = DEVICE_NAME, | ||
746 | .core.owner = THIS_MODULE, | ||
747 | .probe = ps3vram_probe, | ||
748 | .remove = ps3vram_shutdown, | ||
749 | .shutdown = ps3vram_shutdown, | ||
750 | }; | ||
751 | |||
752 | static int __init ps3vram_init(void) | ||
753 | { | ||
754 | return ps3_system_bus_driver_register(&ps3vram_driver); | ||
755 | } | ||
756 | |||
757 | static void __exit ps3vram_exit(void) | ||
758 | { | ||
759 | ps3_system_bus_driver_unregister(&ps3vram_driver); | ||
760 | } | ||
761 | |||
762 | module_init(ps3vram_init); | ||
763 | module_exit(ps3vram_exit); | ||
764 | |||
765 | MODULE_LICENSE("GPL"); | ||
766 | MODULE_AUTHOR("Jim Paris <jim@jtan.com>"); | ||
767 | MODULE_DESCRIPTION("MTD driver for PS3 video RAM"); | ||
768 | MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK); | ||
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index ce6badded47a..211af86a6c55 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
@@ -73,8 +73,6 @@ source "drivers/staging/rt2860/Kconfig" | |||
73 | 73 | ||
74 | source "drivers/staging/rt2870/Kconfig" | 74 | source "drivers/staging/rt2870/Kconfig" |
75 | 75 | ||
76 | source "drivers/staging/benet/Kconfig" | ||
77 | |||
78 | source "drivers/staging/comedi/Kconfig" | 76 | source "drivers/staging/comedi/Kconfig" |
79 | 77 | ||
80 | source "drivers/staging/asus_oled/Kconfig" | 78 | source "drivers/staging/asus_oled/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 9ddcc2bb3365..47a56f5ffabc 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
@@ -19,7 +19,6 @@ obj-$(CONFIG_AGNX) += agnx/ | |||
19 | obj-$(CONFIG_OTUS) += otus/ | 19 | obj-$(CONFIG_OTUS) += otus/ |
20 | obj-$(CONFIG_RT2860) += rt2860/ | 20 | obj-$(CONFIG_RT2860) += rt2860/ |
21 | obj-$(CONFIG_RT2870) += rt2870/ | 21 | obj-$(CONFIG_RT2870) += rt2870/ |
22 | obj-$(CONFIG_BENET) += benet/ | ||
23 | obj-$(CONFIG_COMEDI) += comedi/ | 22 | obj-$(CONFIG_COMEDI) += comedi/ |
24 | obj-$(CONFIG_ASUS_OLED) += asus_oled/ | 23 | obj-$(CONFIG_ASUS_OLED) += asus_oled/ |
25 | obj-$(CONFIG_PANEL) += panel/ | 24 | obj-$(CONFIG_PANEL) += panel/ |
diff --git a/drivers/staging/benet/Kconfig b/drivers/staging/benet/Kconfig deleted file mode 100644 index f6806074f998..000000000000 --- a/drivers/staging/benet/Kconfig +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | config BENET | ||
2 | tristate "ServerEngines 10Gb NIC - BladeEngine" | ||
3 | depends on PCI && INET | ||
4 | select INET_LRO | ||
5 | help | ||
6 | This driver implements the NIC functionality for ServerEngines | ||
7 | 10Gb network adapter BladeEngine (EC 3210). | ||
diff --git a/drivers/staging/benet/MAINTAINERS b/drivers/staging/benet/MAINTAINERS deleted file mode 100644 index d5ce340218b3..000000000000 --- a/drivers/staging/benet/MAINTAINERS +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | SERVER ENGINES 10Gbe NIC - BLADE-ENGINE | ||
2 | P: Subbu Seetharaman | ||
3 | M: subbus@serverengines.com | ||
4 | L: netdev@vger.kernel.org | ||
5 | W: http://www.serverengines.com | ||
6 | S: Supported | ||
diff --git a/drivers/staging/benet/Makefile b/drivers/staging/benet/Makefile deleted file mode 100644 index 460b923b99bd..000000000000 --- a/drivers/staging/benet/Makefile +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | # | ||
2 | # Makefile to build the network driver for ServerEngine's BladeEngine | ||
3 | # | ||
4 | obj-$(CONFIG_BENET) += benet.o | ||
5 | |||
6 | benet-y := be_init.o \ | ||
7 | be_int.o \ | ||
8 | be_netif.o \ | ||
9 | be_ethtool.o \ | ||
10 | funcobj.o \ | ||
11 | cq.o \ | ||
12 | eq.o \ | ||
13 | mpu.o \ | ||
14 | eth.o | ||
diff --git a/drivers/staging/benet/TODO b/drivers/staging/benet/TODO deleted file mode 100644 index a51dfb59a62f..000000000000 --- a/drivers/staging/benet/TODO +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | TODO: | ||
2 | - remove wrappers around common iowrite functions | ||
3 | - full netdev audit of common problems/issues | ||
4 | |||
5 | Please send all patches and questions to Subbu Seetharaman | ||
6 | <subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com> | ||
diff --git a/drivers/staging/benet/asyncmesg.h b/drivers/staging/benet/asyncmesg.h deleted file mode 100644 index d1e779adb848..000000000000 --- a/drivers/staging/benet/asyncmesg.h +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __asyncmesg_amap_h__ | ||
21 | #define __asyncmesg_amap_h__ | ||
22 | #include "fwcmd_common.h" | ||
23 | |||
24 | /* --- ASYNC_EVENT_CODES --- */ | ||
25 | #define ASYNC_EVENT_CODE_LINK_STATE (1) | ||
26 | #define ASYNC_EVENT_CODE_ISCSI (2) | ||
27 | |||
28 | /* --- ASYNC_LINK_STATES --- */ | ||
29 | #define ASYNC_EVENT_LINK_DOWN (0) /* Link Down on a port */ | ||
30 | #define ASYNC_EVENT_LINK_UP (1) /* Link Up on a port */ | ||
31 | |||
32 | /* | ||
33 | * The last 4 bytes of the async events have this common format. It allows | ||
34 | * the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from | ||
35 | * asynchronous events. Both arrive on the same completion queue. This | ||
36 | * structure also contains the common fields used to decode the async event. | ||
37 | */ | ||
38 | struct BE_ASYNC_EVENT_TRAILER_AMAP { | ||
39 | u8 rsvd0[8]; /* DWORD 0 */ | ||
40 | u8 event_code[8]; /* DWORD 0 */ | ||
41 | u8 event_type[8]; /* DWORD 0 */ | ||
42 | u8 rsvd1[6]; /* DWORD 0 */ | ||
43 | u8 async_event; /* DWORD 0 */ | ||
44 | u8 valid; /* DWORD 0 */ | ||
45 | } __packed; | ||
46 | struct ASYNC_EVENT_TRAILER_AMAP { | ||
47 | u32 dw[1]; | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * Applicable in Initiator, Target and NIC modes. | ||
52 | * A link state async event is seen by all device drivers as soon they | ||
53 | * create an MCC ring. Thereafter, anytime the link status changes the | ||
54 | * drivers will receive a link state async event. Notifications continue to | ||
55 | * be sent until a driver destroys its MCC ring. A link down event is | ||
56 | * reported when either port loses link. A link up event is reported | ||
57 | * when either port regains link. When BE's failover mechanism is enabled, a | ||
58 | * link down on the active port causes traffic to be diverted to the standby | ||
59 | * port by the BE's ARM firmware (assuming the standby port has link). In | ||
60 | * this case, the standy port assumes the active status. Note: when link is | ||
61 | * restored on the failed port, traffic continues on the currently active | ||
62 | * port. The ARM firmware does not attempt to 'fail back' traffic to | ||
63 | * the restored port. | ||
64 | */ | ||
65 | struct BE_ASYNC_EVENT_LINK_STATE_AMAP { | ||
66 | u8 port0_link_status[8]; | ||
67 | u8 port1_link_status[8]; | ||
68 | u8 active_port[8]; | ||
69 | u8 rsvd0[8]; /* DWORD 0 */ | ||
70 | u8 port0_duplex[8]; | ||
71 | u8 port0_speed[8]; | ||
72 | u8 port1_duplex[8]; | ||
73 | u8 port1_speed[8]; | ||
74 | u8 port0_fault[8]; | ||
75 | u8 port1_fault[8]; | ||
76 | u8 rsvd1[2][8]; /* DWORD 2 */ | ||
77 | struct BE_ASYNC_EVENT_TRAILER_AMAP trailer; | ||
78 | } __packed; | ||
79 | struct ASYNC_EVENT_LINK_STATE_AMAP { | ||
80 | u32 dw[4]; | ||
81 | }; | ||
82 | #endif /* __asyncmesg_amap_h__ */ | ||
diff --git a/drivers/staging/benet/be_cm.h b/drivers/staging/benet/be_cm.h deleted file mode 100644 index b7a1dfd20c36..000000000000 --- a/drivers/staging/benet/be_cm.h +++ /dev/null | |||
@@ -1,134 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __be_cm_amap_h__ | ||
21 | #define __be_cm_amap_h__ | ||
22 | #include "be_common.h" | ||
23 | #include "etx_context.h" | ||
24 | #include "mpu_context.h" | ||
25 | |||
26 | /* | ||
27 | * --- CEV_WATERMARK_ENUM --- | ||
28 | * CQ/EQ Watermark Encodings. Encoded as number of free entries in | ||
29 | * Queue when Watermark is reached. | ||
30 | */ | ||
31 | #define CEV_WMARK_0 (0) /* Watermark when Queue full */ | ||
32 | #define CEV_WMARK_16 (1) /* Watermark at 16 free entries */ | ||
33 | #define CEV_WMARK_32 (2) /* Watermark at 32 free entries */ | ||
34 | #define CEV_WMARK_48 (3) /* Watermark at 48 free entries */ | ||
35 | #define CEV_WMARK_64 (4) /* Watermark at 64 free entries */ | ||
36 | #define CEV_WMARK_80 (5) /* Watermark at 80 free entries */ | ||
37 | #define CEV_WMARK_96 (6) /* Watermark at 96 free entries */ | ||
38 | #define CEV_WMARK_112 (7) /* Watermark at 112 free entries */ | ||
39 | #define CEV_WMARK_128 (8) /* Watermark at 128 free entries */ | ||
40 | #define CEV_WMARK_144 (9) /* Watermark at 144 free entries */ | ||
41 | #define CEV_WMARK_160 (10) /* Watermark at 160 free entries */ | ||
42 | #define CEV_WMARK_176 (11) /* Watermark at 176 free entries */ | ||
43 | #define CEV_WMARK_192 (12) /* Watermark at 192 free entries */ | ||
44 | #define CEV_WMARK_208 (13) /* Watermark at 208 free entries */ | ||
45 | #define CEV_WMARK_224 (14) /* Watermark at 224 free entries */ | ||
46 | #define CEV_WMARK_240 (15) /* Watermark at 240 free entries */ | ||
47 | |||
48 | /* | ||
49 | * --- CQ_CNT_ENUM --- | ||
50 | * Completion Queue Count Encodings. | ||
51 | */ | ||
52 | #define CEV_CQ_CNT_256 (0) /* CQ has 256 entries */ | ||
53 | #define CEV_CQ_CNT_512 (1) /* CQ has 512 entries */ | ||
54 | #define CEV_CQ_CNT_1024 (2) /* CQ has 1024 entries */ | ||
55 | |||
56 | /* | ||
57 | * --- EQ_CNT_ENUM --- | ||
58 | * Event Queue Count Encodings. | ||
59 | */ | ||
60 | #define CEV_EQ_CNT_256 (0) /* EQ has 256 entries (16-byte EQEs only) */ | ||
61 | #define CEV_EQ_CNT_512 (1) /* EQ has 512 entries (16-byte EQEs only) */ | ||
62 | #define CEV_EQ_CNT_1024 (2) /* EQ has 1024 entries (4-byte or */ | ||
63 | /* 16-byte EQEs only) */ | ||
64 | #define CEV_EQ_CNT_2048 (3) /* EQ has 2048 entries (4-byte or */ | ||
65 | /* 16-byte EQEs only) */ | ||
66 | #define CEV_EQ_CNT_4096 (4) /* EQ has 4096 entries (4-byte EQEs only) */ | ||
67 | |||
68 | /* | ||
69 | * --- EQ_SIZE_ENUM --- | ||
70 | * Event Queue Entry Size Encoding. | ||
71 | */ | ||
72 | #define CEV_EQ_SIZE_4 (0) /* EQE is 4 bytes */ | ||
73 | #define CEV_EQ_SIZE_16 (1) /* EQE is 16 bytes */ | ||
74 | |||
75 | /* | ||
76 | * Completion Queue Context Table Entry. Contains the state of a CQ. | ||
77 | * Located in RAM within the CEV block. | ||
78 | */ | ||
79 | struct BE_CQ_CONTEXT_AMAP { | ||
80 | u8 Cidx[11]; /* DWORD 0 */ | ||
81 | u8 Watermark[4]; /* DWORD 0 */ | ||
82 | u8 NoDelay; /* DWORD 0 */ | ||
83 | u8 EPIdx[11]; /* DWORD 0 */ | ||
84 | u8 Count[2]; /* DWORD 0 */ | ||
85 | u8 valid; /* DWORD 0 */ | ||
86 | u8 SolEvent; /* DWORD 0 */ | ||
87 | u8 Eventable; /* DWORD 0 */ | ||
88 | u8 Pidx[11]; /* DWORD 1 */ | ||
89 | u8 PD[10]; /* DWORD 1 */ | ||
90 | u8 EQID[7]; /* DWORD 1 */ | ||
91 | u8 Func; /* DWORD 1 */ | ||
92 | u8 WME; /* DWORD 1 */ | ||
93 | u8 Stalled; /* DWORD 1 */ | ||
94 | u8 Armed; /* DWORD 1 */ | ||
95 | } __packed; | ||
96 | struct CQ_CONTEXT_AMAP { | ||
97 | u32 dw[2]; | ||
98 | }; | ||
99 | |||
100 | /* | ||
101 | * Event Queue Context Table Entry. Contains the state of an EQ. | ||
102 | * Located in RAM in the CEV block. | ||
103 | */ | ||
104 | struct BE_EQ_CONTEXT_AMAP { | ||
105 | u8 Cidx[13]; /* DWORD 0 */ | ||
106 | u8 rsvd0[2]; /* DWORD 0 */ | ||
107 | u8 Func; /* DWORD 0 */ | ||
108 | u8 EPIdx[13]; /* DWORD 0 */ | ||
109 | u8 valid; /* DWORD 0 */ | ||
110 | u8 rsvd1; /* DWORD 0 */ | ||
111 | u8 Size; /* DWORD 0 */ | ||
112 | u8 Pidx[13]; /* DWORD 1 */ | ||
113 | u8 rsvd2[3]; /* DWORD 1 */ | ||
114 | u8 PD[10]; /* DWORD 1 */ | ||
115 | u8 Count[3]; /* DWORD 1 */ | ||
116 | u8 SolEvent; /* DWORD 1 */ | ||
117 | u8 Stalled; /* DWORD 1 */ | ||
118 | u8 Armed; /* DWORD 1 */ | ||
119 | u8 Watermark[4]; /* DWORD 2 */ | ||
120 | u8 WME; /* DWORD 2 */ | ||
121 | u8 rsvd3[3]; /* DWORD 2 */ | ||
122 | u8 EventVect[6]; /* DWORD 2 */ | ||
123 | u8 rsvd4[2]; /* DWORD 2 */ | ||
124 | u8 Delay[8]; /* DWORD 2 */ | ||
125 | u8 rsvd5[6]; /* DWORD 2 */ | ||
126 | u8 TMR; /* DWORD 2 */ | ||
127 | u8 rsvd6; /* DWORD 2 */ | ||
128 | u8 rsvd7[32]; /* DWORD 3 */ | ||
129 | } __packed; | ||
130 | struct EQ_CONTEXT_AMAP { | ||
131 | u32 dw[4]; | ||
132 | }; | ||
133 | |||
134 | #endif /* __be_cm_amap_h__ */ | ||
diff --git a/drivers/staging/benet/be_common.h b/drivers/staging/benet/be_common.h deleted file mode 100644 index 7e63dc5e3348..000000000000 --- a/drivers/staging/benet/be_common.h +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __be_common_amap_h__ | ||
21 | #define __be_common_amap_h__ | ||
22 | |||
23 | /* Physical Address. */ | ||
24 | struct BE_PHYS_ADDR_AMAP { | ||
25 | u8 lo[32]; /* DWORD 0 */ | ||
26 | u8 hi[32]; /* DWORD 1 */ | ||
27 | } __packed; | ||
28 | struct PHYS_ADDR_AMAP { | ||
29 | u32 dw[2]; | ||
30 | }; | ||
31 | |||
32 | /* Virtual Address. */ | ||
33 | struct BE_VIRT_ADDR_AMAP { | ||
34 | u8 lo[32]; /* DWORD 0 */ | ||
35 | u8 hi[32]; /* DWORD 1 */ | ||
36 | } __packed; | ||
37 | struct VIRT_ADDR_AMAP { | ||
38 | u32 dw[2]; | ||
39 | }; | ||
40 | |||
41 | /* Scatter gather element. */ | ||
42 | struct BE_SGE_AMAP { | ||
43 | u8 addr_hi[32]; /* DWORD 0 */ | ||
44 | u8 addr_lo[32]; /* DWORD 1 */ | ||
45 | u8 rsvd0[32]; /* DWORD 2 */ | ||
46 | u8 len[16]; /* DWORD 3 */ | ||
47 | u8 rsvd1[16]; /* DWORD 3 */ | ||
48 | } __packed; | ||
49 | struct SGE_AMAP { | ||
50 | u32 dw[4]; | ||
51 | }; | ||
52 | |||
53 | #endif /* __be_common_amap_h__ */ | ||
diff --git a/drivers/staging/benet/be_ethtool.c b/drivers/staging/benet/be_ethtool.c deleted file mode 100644 index 027af85707aa..000000000000 --- a/drivers/staging/benet/be_ethtool.c +++ /dev/null | |||
@@ -1,348 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * be_ethtool.c | ||
19 | * | ||
20 | * This file contains various functions that ethtool can use | ||
21 | * to talk to the driver and the BE H/W. | ||
22 | */ | ||
23 | |||
24 | #include "benet.h" | ||
25 | |||
26 | #include <linux/ethtool.h> | ||
27 | |||
28 | static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = { | ||
29 | /* net_device_stats */ | ||
30 | "rx_packets", | ||
31 | "tx_packets", | ||
32 | "rx_bytes", | ||
33 | "tx_bytes", | ||
34 | "rx_errors", | ||
35 | "tx_errors", | ||
36 | "rx_dropped", | ||
37 | "tx_dropped", | ||
38 | "multicast", | ||
39 | "collisions", | ||
40 | "rx_length_errors", | ||
41 | "rx_over_errors", | ||
42 | "rx_crc_errors", | ||
43 | "rx_frame_errors", | ||
44 | "rx_fifo_errors", | ||
45 | "rx_missed_errors", | ||
46 | "tx_aborted_errors", | ||
47 | "tx_carrier_errors", | ||
48 | "tx_fifo_errors", | ||
49 | "tx_heartbeat_errors", | ||
50 | "tx_window_errors", | ||
51 | "rx_compressed", | ||
52 | "tc_compressed", | ||
53 | /* BE driver Stats */ | ||
54 | "bes_tx_reqs", | ||
55 | "bes_tx_fails", | ||
56 | "bes_fwd_reqs", | ||
57 | "bes_tx_wrbs", | ||
58 | "bes_interrupts", | ||
59 | "bes_events", | ||
60 | "bes_tx_events", | ||
61 | "bes_rx_events", | ||
62 | "bes_tx_compl", | ||
63 | "bes_rx_compl", | ||
64 | "bes_ethrx_post_fail", | ||
65 | "bes_802_3_dropped_frames", | ||
66 | "bes_802_3_malformed_frames", | ||
67 | "bes_rx_misc_pkts", | ||
68 | "bes_eth_tx_rate", | ||
69 | "bes_eth_rx_rate", | ||
70 | "Num Packets collected", | ||
71 | "Num Times Flushed", | ||
72 | }; | ||
73 | |||
74 | #define NET_DEV_STATS_LEN \ | ||
75 | (sizeof(struct net_device_stats)/sizeof(unsigned long)) | ||
76 | |||
77 | #define BENET_STATS_LEN ARRAY_SIZE(benet_gstrings_stats) | ||
78 | |||
79 | static void | ||
80 | be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | ||
81 | { | ||
82 | struct be_net_object *pnob = netdev_priv(netdev); | ||
83 | struct be_adapter *adapter = pnob->adapter; | ||
84 | |||
85 | strncpy(drvinfo->driver, be_driver_name, 32); | ||
86 | strncpy(drvinfo->version, be_drvr_ver, 32); | ||
87 | strncpy(drvinfo->fw_version, be_fw_ver, 32); | ||
88 | strcpy(drvinfo->bus_info, pci_name(adapter->pdev)); | ||
89 | drvinfo->testinfo_len = 0; | ||
90 | drvinfo->regdump_len = 0; | ||
91 | drvinfo->eedump_len = 0; | ||
92 | } | ||
93 | |||
94 | static int | ||
95 | be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | ||
96 | { | ||
97 | struct be_net_object *pnob = netdev_priv(netdev); | ||
98 | struct be_adapter *adapter = pnob->adapter; | ||
99 | |||
100 | coalesce->rx_max_coalesced_frames = adapter->max_rx_coal; | ||
101 | |||
102 | coalesce->rx_coalesce_usecs = adapter->cur_eqd; | ||
103 | coalesce->rx_coalesce_usecs_high = adapter->max_eqd; | ||
104 | coalesce->rx_coalesce_usecs_low = adapter->min_eqd; | ||
105 | |||
106 | coalesce->tx_coalesce_usecs = adapter->cur_eqd; | ||
107 | coalesce->tx_coalesce_usecs_high = adapter->max_eqd; | ||
108 | coalesce->tx_coalesce_usecs_low = adapter->min_eqd; | ||
109 | |||
110 | coalesce->use_adaptive_rx_coalesce = adapter->enable_aic; | ||
111 | coalesce->use_adaptive_tx_coalesce = adapter->enable_aic; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * This routine is used to set interrup coalescing delay *as well as* | ||
118 | * the number of pkts to coalesce for LRO. | ||
119 | */ | ||
120 | static int | ||
121 | be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | ||
122 | { | ||
123 | struct be_net_object *pnob = netdev_priv(netdev); | ||
124 | struct be_adapter *adapter = pnob->adapter; | ||
125 | struct be_eq_object *eq_objectp; | ||
126 | u32 max, min, cur; | ||
127 | int status; | ||
128 | |||
129 | adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; | ||
130 | if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS) | ||
131 | adapter->max_rx_coal = BE_LRO_MAX_PKTS; | ||
132 | |||
133 | if (adapter->enable_aic == 0 && | ||
134 | coalesce->use_adaptive_rx_coalesce == 1) { | ||
135 | /* if AIC is being turned on now, start with an EQD of 0 */ | ||
136 | adapter->cur_eqd = 0; | ||
137 | } | ||
138 | adapter->enable_aic = coalesce->use_adaptive_rx_coalesce; | ||
139 | |||
140 | /* round off to nearest multiple of 8 */ | ||
141 | max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3); | ||
142 | min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3); | ||
143 | cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3); | ||
144 | |||
145 | if (adapter->enable_aic) { | ||
146 | /* accept low and high if AIC is enabled */ | ||
147 | if (max > MAX_EQD) | ||
148 | max = MAX_EQD; | ||
149 | if (min > max) | ||
150 | min = max; | ||
151 | adapter->max_eqd = max; | ||
152 | adapter->min_eqd = min; | ||
153 | if (adapter->cur_eqd > max) | ||
154 | adapter->cur_eqd = max; | ||
155 | if (adapter->cur_eqd < min) | ||
156 | adapter->cur_eqd = min; | ||
157 | } else { | ||
158 | /* accept specified coalesce_usecs only if AIC is disabled */ | ||
159 | if (cur > MAX_EQD) | ||
160 | cur = MAX_EQD; | ||
161 | eq_objectp = &pnob->event_q_obj; | ||
162 | status = | ||
163 | be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur, | ||
164 | NULL, NULL, NULL); | ||
165 | if (status == BE_SUCCESS) | ||
166 | adapter->cur_eqd = cur; | ||
167 | } | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static u32 be_get_rx_csum(struct net_device *netdev) | ||
172 | { | ||
173 | struct be_net_object *pnob = netdev_priv(netdev); | ||
174 | struct be_adapter *adapter = pnob->adapter; | ||
175 | return adapter->rx_csum; | ||
176 | } | ||
177 | |||
178 | static int be_set_rx_csum(struct net_device *netdev, uint32_t data) | ||
179 | { | ||
180 | struct be_net_object *pnob = netdev_priv(netdev); | ||
181 | struct be_adapter *adapter = pnob->adapter; | ||
182 | |||
183 | if (data) | ||
184 | adapter->rx_csum = 1; | ||
185 | else | ||
186 | adapter->rx_csum = 0; | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static void | ||
192 | be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | ||
193 | { | ||
194 | switch (stringset) { | ||
195 | case ETH_SS_STATS: | ||
196 | memcpy(data, *benet_gstrings_stats, | ||
197 | sizeof(benet_gstrings_stats)); | ||
198 | break; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static int be_get_stats_count(struct net_device *netdev) | ||
203 | { | ||
204 | return BENET_STATS_LEN; | ||
205 | } | ||
206 | |||
207 | static void | ||
208 | be_get_ethtool_stats(struct net_device *netdev, | ||
209 | struct ethtool_stats *stats, uint64_t *data) | ||
210 | { | ||
211 | struct be_net_object *pnob = netdev_priv(netdev); | ||
212 | struct be_adapter *adapter = pnob->adapter; | ||
213 | int i; | ||
214 | |||
215 | benet_get_stats(netdev); | ||
216 | |||
217 | for (i = 0; i <= NET_DEV_STATS_LEN; i++) | ||
218 | data[i] = ((unsigned long *)&adapter->benet_stats)[i]; | ||
219 | |||
220 | data[i] = adapter->be_stat.bes_tx_reqs; | ||
221 | data[i++] = adapter->be_stat.bes_tx_fails; | ||
222 | data[i++] = adapter->be_stat.bes_fwd_reqs; | ||
223 | data[i++] = adapter->be_stat.bes_tx_wrbs; | ||
224 | |||
225 | data[i++] = adapter->be_stat.bes_ints; | ||
226 | data[i++] = adapter->be_stat.bes_events; | ||
227 | data[i++] = adapter->be_stat.bes_tx_events; | ||
228 | data[i++] = adapter->be_stat.bes_rx_events; | ||
229 | data[i++] = adapter->be_stat.bes_tx_compl; | ||
230 | data[i++] = adapter->be_stat.bes_rx_compl; | ||
231 | data[i++] = adapter->be_stat.bes_ethrx_post_fail; | ||
232 | data[i++] = adapter->be_stat.bes_802_3_dropped_frames; | ||
233 | data[i++] = adapter->be_stat.bes_802_3_malformed_frames; | ||
234 | data[i++] = adapter->be_stat.bes_rx_misc_pkts; | ||
235 | data[i++] = adapter->be_stat.bes_eth_tx_rate; | ||
236 | data[i++] = adapter->be_stat.bes_eth_rx_rate; | ||
237 | data[i++] = adapter->be_stat.bes_rx_coal; | ||
238 | data[i++] = adapter->be_stat.bes_rx_flush; | ||
239 | |||
240 | } | ||
241 | |||
242 | static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
243 | { | ||
244 | ecmd->speed = SPEED_10000; | ||
245 | ecmd->duplex = DUPLEX_FULL; | ||
246 | ecmd->autoneg = AUTONEG_DISABLE; | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | /* Get the Ring parameters from the pnob */ | ||
251 | static void | ||
252 | be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) | ||
253 | { | ||
254 | struct be_net_object *pnob = netdev_priv(netdev); | ||
255 | |||
256 | /* Pre Set Maxims */ | ||
257 | ring->rx_max_pending = pnob->rx_q_len; | ||
258 | ring->rx_mini_max_pending = ring->rx_mini_max_pending; | ||
259 | ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending; | ||
260 | ring->tx_max_pending = pnob->tx_q_len; | ||
261 | |||
262 | /* Current hardware Settings */ | ||
263 | ring->rx_pending = atomic_read(&pnob->rx_q_posted); | ||
264 | ring->rx_mini_pending = ring->rx_mini_pending; | ||
265 | ring->rx_jumbo_pending = ring->rx_jumbo_pending; | ||
266 | ring->tx_pending = atomic_read(&pnob->tx_q_used); | ||
267 | |||
268 | } | ||
269 | |||
270 | static void | ||
271 | be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) | ||
272 | { | ||
273 | struct be_net_object *pnob = netdev_priv(netdev); | ||
274 | bool rxfc, txfc; | ||
275 | int status; | ||
276 | |||
277 | status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc); | ||
278 | if (status != BE_SUCCESS) { | ||
279 | dev_info(&netdev->dev, "Unable to get pause frame settings\n"); | ||
280 | /* return defaults */ | ||
281 | ecmd->rx_pause = 1; | ||
282 | ecmd->tx_pause = 0; | ||
283 | ecmd->autoneg = AUTONEG_ENABLE; | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | if (txfc == true) | ||
288 | ecmd->tx_pause = 1; | ||
289 | else | ||
290 | ecmd->tx_pause = 0; | ||
291 | |||
292 | if (rxfc == true) | ||
293 | ecmd->rx_pause = 1; | ||
294 | else | ||
295 | ecmd->rx_pause = 0; | ||
296 | |||
297 | ecmd->autoneg = AUTONEG_ENABLE; | ||
298 | } | ||
299 | |||
300 | static int | ||
301 | be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) | ||
302 | { | ||
303 | struct be_net_object *pnob = netdev_priv(netdev); | ||
304 | bool txfc, rxfc; | ||
305 | int status; | ||
306 | |||
307 | if (ecmd->autoneg != AUTONEG_ENABLE) | ||
308 | return -EINVAL; | ||
309 | |||
310 | if (ecmd->tx_pause) | ||
311 | txfc = true; | ||
312 | else | ||
313 | txfc = false; | ||
314 | |||
315 | if (ecmd->rx_pause) | ||
316 | rxfc = true; | ||
317 | else | ||
318 | rxfc = false; | ||
319 | |||
320 | status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc); | ||
321 | if (status != BE_SUCCESS) { | ||
322 | dev_info(&netdev->dev, "Unable to set pause frame settings\n"); | ||
323 | return -1; | ||
324 | } | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | struct ethtool_ops be_ethtool_ops = { | ||
329 | .get_settings = be_get_settings, | ||
330 | .get_drvinfo = be_get_drvinfo, | ||
331 | .get_link = ethtool_op_get_link, | ||
332 | .get_coalesce = be_get_coalesce, | ||
333 | .set_coalesce = be_set_coalesce, | ||
334 | .get_ringparam = be_get_ringparam, | ||
335 | .get_pauseparam = be_get_pauseparam, | ||
336 | .set_pauseparam = be_set_pauseparam, | ||
337 | .get_rx_csum = be_get_rx_csum, | ||
338 | .set_rx_csum = be_set_rx_csum, | ||
339 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
340 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
341 | .get_sg = ethtool_op_get_sg, | ||
342 | .set_sg = ethtool_op_set_sg, | ||
343 | .get_tso = ethtool_op_get_tso, | ||
344 | .set_tso = ethtool_op_set_tso, | ||
345 | .get_strings = be_get_strings, | ||
346 | .get_stats_count = be_get_stats_count, | ||
347 | .get_ethtool_stats = be_get_ethtool_stats, | ||
348 | }; | ||
diff --git a/drivers/staging/benet/be_init.c b/drivers/staging/benet/be_init.c deleted file mode 100644 index 12a026c3f9e1..000000000000 --- a/drivers/staging/benet/be_init.c +++ /dev/null | |||
@@ -1,1382 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #include <linux/etherdevice.h> | ||
18 | #include "benet.h" | ||
19 | |||
20 | #define DRVR_VERSION "1.0.728" | ||
21 | |||
22 | static const struct pci_device_id be_device_id_table[] = { | ||
23 | {PCI_DEVICE(0x19a2, 0x0201)}, | ||
24 | {0} | ||
25 | }; | ||
26 | |||
27 | MODULE_DEVICE_TABLE(pci, be_device_id_table); | ||
28 | |||
29 | MODULE_VERSION(DRVR_VERSION); | ||
30 | |||
31 | #define DRV_DESCRIPTION "ServerEngines BladeEngine Network Driver Version " | ||
32 | |||
33 | MODULE_DESCRIPTION(DRV_DESCRIPTION DRVR_VERSION); | ||
34 | MODULE_AUTHOR("ServerEngines"); | ||
35 | MODULE_LICENSE("GPL"); | ||
36 | |||
37 | static unsigned int msix = 1; | ||
38 | module_param(msix, uint, S_IRUGO); | ||
39 | MODULE_PARM_DESC(msix, "Use MSI-x interrupts"); | ||
40 | |||
41 | static unsigned int rxbuf_size = 2048; /* Default RX frag size */ | ||
42 | module_param(rxbuf_size, uint, S_IRUGO); | ||
43 | MODULE_PARM_DESC(rxbuf_size, "Size of buffers to hold Rx data"); | ||
44 | |||
45 | const char be_drvr_ver[] = DRVR_VERSION; | ||
46 | char be_fw_ver[32]; /* F/W version filled in by be_probe */ | ||
47 | char be_driver_name[] = "benet"; | ||
48 | |||
49 | /* | ||
50 | * Number of entries in each queue. | ||
51 | */ | ||
52 | #define EVENT_Q_LEN 1024 | ||
53 | #define ETH_TXQ_LEN 2048 | ||
54 | #define ETH_TXCQ_LEN 1024 | ||
55 | #define ETH_RXQ_LEN 1024 /* Does not support any other value */ | ||
56 | #define ETH_UC_RXCQ_LEN 1024 | ||
57 | #define ETH_BC_RXCQ_LEN 256 | ||
58 | #define MCC_Q_LEN 64 /* total size not to exceed 8 pages */ | ||
59 | #define MCC_CQ_LEN 256 | ||
60 | |||
61 | /* Bit mask describing events of interest to be traced */ | ||
62 | unsigned int trace_level; | ||
63 | |||
64 | static int | ||
65 | init_pci_be_function(struct be_adapter *adapter, struct pci_dev *pdev) | ||
66 | { | ||
67 | u64 pa; | ||
68 | |||
69 | /* CSR */ | ||
70 | pa = pci_resource_start(pdev, 2); | ||
71 | adapter->csr_va = ioremap_nocache(pa, pci_resource_len(pdev, 2)); | ||
72 | if (adapter->csr_va == NULL) | ||
73 | return -ENOMEM; | ||
74 | |||
75 | /* Door Bell */ | ||
76 | pa = pci_resource_start(pdev, 4); | ||
77 | adapter->db_va = ioremap_nocache(pa, (128 * 1024)); | ||
78 | if (adapter->db_va == NULL) { | ||
79 | iounmap(adapter->csr_va); | ||
80 | return -ENOMEM; | ||
81 | } | ||
82 | |||
83 | /* PCI */ | ||
84 | pa = pci_resource_start(pdev, 1); | ||
85 | adapter->pci_va = ioremap_nocache(pa, pci_resource_len(pdev, 1)); | ||
86 | if (adapter->pci_va == NULL) { | ||
87 | iounmap(adapter->csr_va); | ||
88 | iounmap(adapter->db_va); | ||
89 | return -ENOMEM; | ||
90 | } | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | This function enables the interrupt corresponding to the Event | ||
96 | queue ID for the given NetObject | ||
97 | */ | ||
98 | void be_enable_eq_intr(struct be_net_object *pnob) | ||
99 | { | ||
100 | struct CQ_DB_AMAP cqdb; | ||
101 | cqdb.dw[0] = 0; | ||
102 | AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1); | ||
103 | AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 1); | ||
104 | AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0); | ||
105 | AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id); | ||
106 | PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | This function disables the interrupt corresponding to the Event | ||
111 | queue ID for the given NetObject | ||
112 | */ | ||
113 | void be_disable_eq_intr(struct be_net_object *pnob) | ||
114 | { | ||
115 | struct CQ_DB_AMAP cqdb; | ||
116 | cqdb.dw[0] = 0; | ||
117 | AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1); | ||
118 | AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 0); | ||
119 | AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0); | ||
120 | AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id); | ||
121 | PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | This function enables the interrupt from the network function | ||
126 | of the BladeEngine. Use the function be_disable_eq_intr() | ||
127 | to enable the interrupt from the event queue of only one specific | ||
128 | NetObject | ||
129 | */ | ||
130 | void be_enable_intr(struct be_net_object *pnob) | ||
131 | { | ||
132 | struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl; | ||
133 | u32 host_intr; | ||
134 | |||
135 | ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl); | ||
136 | host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, | ||
137 | hostintr, ctrl.dw); | ||
138 | if (!host_intr) { | ||
139 | AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, | ||
140 | hostintr, ctrl.dw, 1); | ||
141 | PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl, | ||
142 | ctrl.dw[0]); | ||
143 | } | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | This function disables the interrupt from the network function of | ||
148 | the BladeEngine. Use the function be_disable_eq_intr() to | ||
149 | disable the interrupt from the event queue of only one specific NetObject | ||
150 | */ | ||
151 | void be_disable_intr(struct be_net_object *pnob) | ||
152 | { | ||
153 | |||
154 | struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl; | ||
155 | u32 host_intr; | ||
156 | ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl); | ||
157 | host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, | ||
158 | hostintr, ctrl.dw); | ||
159 | if (host_intr) { | ||
160 | AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, hostintr, | ||
161 | ctrl.dw, 0); | ||
162 | PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl, | ||
163 | ctrl.dw[0]); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static int be_enable_msix(struct be_adapter *adapter) | ||
168 | { | ||
169 | int i, ret; | ||
170 | |||
171 | if (!msix) | ||
172 | return -1; | ||
173 | |||
174 | for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++) | ||
175 | adapter->msix_entries[i].entry = i; | ||
176 | |||
177 | ret = pci_enable_msix(adapter->pdev, adapter->msix_entries, | ||
178 | BE_MAX_REQ_MSIX_VECTORS); | ||
179 | |||
180 | if (ret == 0) | ||
181 | adapter->msix_enabled = 1; | ||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | static int be_register_isr(struct be_adapter *adapter, | ||
186 | struct be_net_object *pnob) | ||
187 | { | ||
188 | struct net_device *netdev = pnob->netdev; | ||
189 | int intx = 0, r; | ||
190 | |||
191 | netdev->irq = adapter->pdev->irq; | ||
192 | r = be_enable_msix(adapter); | ||
193 | |||
194 | if (r == 0) { | ||
195 | r = request_irq(adapter->msix_entries[0].vector, | ||
196 | be_int, IRQF_SHARED, netdev->name, netdev); | ||
197 | if (r) { | ||
198 | printk(KERN_WARNING | ||
199 | "MSIX Request IRQ failed - Errno %d\n", r); | ||
200 | intx = 1; | ||
201 | pci_disable_msix(adapter->pdev); | ||
202 | adapter->msix_enabled = 0; | ||
203 | } | ||
204 | } else { | ||
205 | intx = 1; | ||
206 | } | ||
207 | |||
208 | if (intx) { | ||
209 | r = request_irq(netdev->irq, be_int, IRQF_SHARED, | ||
210 | netdev->name, netdev); | ||
211 | if (r) { | ||
212 | printk(KERN_WARNING | ||
213 | "INTx Request IRQ failed - Errno %d\n", r); | ||
214 | return -1; | ||
215 | } | ||
216 | } | ||
217 | adapter->isr_registered = 1; | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static void be_unregister_isr(struct be_adapter *adapter) | ||
222 | { | ||
223 | struct net_device *netdev = adapter->netdevp; | ||
224 | if (adapter->isr_registered) { | ||
225 | if (adapter->msix_enabled) { | ||
226 | free_irq(adapter->msix_entries[0].vector, netdev); | ||
227 | pci_disable_msix(adapter->pdev); | ||
228 | adapter->msix_enabled = 0; | ||
229 | } else { | ||
230 | free_irq(netdev->irq, netdev); | ||
231 | } | ||
232 | adapter->isr_registered = 0; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | This function processes the Flush Completions that are issued by the | ||
238 | ARM F/W, when a Recv Ring is destroyed. A flush completion is | ||
239 | identified when a Rx COmpl descriptor has the tcpcksum and udpcksum | ||
240 | set and the pktsize is 32. These completions are received on the | ||
241 | Rx Completion Queue. | ||
242 | */ | ||
243 | static u32 be_process_rx_flush_cmpl(struct be_net_object *pnob) | ||
244 | { | ||
245 | struct ETH_RX_COMPL_AMAP *rxcp; | ||
246 | unsigned int i = 0; | ||
247 | while ((rxcp = be_get_rx_cmpl(pnob)) != NULL) { | ||
248 | be_notify_cmpl(pnob, 1, pnob->rx_cq_id, 1); | ||
249 | i++; | ||
250 | } | ||
251 | return i; | ||
252 | } | ||
253 | |||
254 | static void be_tx_q_clean(struct be_net_object *pnob) | ||
255 | { | ||
256 | while (atomic_read(&pnob->tx_q_used)) | ||
257 | process_one_tx_compl(pnob, tx_compl_lastwrb_idx_get(pnob)); | ||
258 | } | ||
259 | |||
260 | static void be_rx_q_clean(struct be_net_object *pnob) | ||
261 | { | ||
262 | if (pnob->rx_ctxt) { | ||
263 | int i; | ||
264 | struct be_rx_page_info *rx_page_info; | ||
265 | for (i = 0; i < pnob->rx_q_len; i++) { | ||
266 | rx_page_info = &(pnob->rx_page_info[i]); | ||
267 | if (!pnob->rx_pg_shared || rx_page_info->page_offset) { | ||
268 | pci_unmap_page(pnob->adapter->pdev, | ||
269 | pci_unmap_addr(rx_page_info, bus), | ||
270 | pnob->rx_buf_size, | ||
271 | PCI_DMA_FROMDEVICE); | ||
272 | } | ||
273 | if (rx_page_info->page) | ||
274 | put_page(rx_page_info->page); | ||
275 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
276 | } | ||
277 | pnob->rx_pg_info_hd = 0; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static void be_destroy_netobj(struct be_net_object *pnob) | ||
282 | { | ||
283 | int status; | ||
284 | |||
285 | if (pnob->tx_q_created) { | ||
286 | status = be_eth_sq_destroy(&pnob->tx_q_obj); | ||
287 | pnob->tx_q_created = 0; | ||
288 | } | ||
289 | |||
290 | if (pnob->rx_q_created) { | ||
291 | status = be_eth_rq_destroy(&pnob->rx_q_obj); | ||
292 | if (status != 0) { | ||
293 | status = be_eth_rq_destroy_options(&pnob->rx_q_obj, 0, | ||
294 | NULL, NULL); | ||
295 | BUG_ON(status); | ||
296 | } | ||
297 | pnob->rx_q_created = 0; | ||
298 | } | ||
299 | |||
300 | be_process_rx_flush_cmpl(pnob); | ||
301 | |||
302 | if (pnob->tx_cq_created) { | ||
303 | status = be_cq_destroy(&pnob->tx_cq_obj); | ||
304 | pnob->tx_cq_created = 0; | ||
305 | } | ||
306 | |||
307 | if (pnob->rx_cq_created) { | ||
308 | status = be_cq_destroy(&pnob->rx_cq_obj); | ||
309 | pnob->rx_cq_created = 0; | ||
310 | } | ||
311 | |||
312 | if (pnob->mcc_q_created) { | ||
313 | status = be_mcc_ring_destroy(&pnob->mcc_q_obj); | ||
314 | pnob->mcc_q_created = 0; | ||
315 | } | ||
316 | if (pnob->mcc_cq_created) { | ||
317 | status = be_cq_destroy(&pnob->mcc_cq_obj); | ||
318 | pnob->mcc_cq_created = 0; | ||
319 | } | ||
320 | |||
321 | if (pnob->event_q_created) { | ||
322 | status = be_eq_destroy(&pnob->event_q_obj); | ||
323 | pnob->event_q_created = 0; | ||
324 | } | ||
325 | be_function_cleanup(&pnob->fn_obj); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * free all resources associated with a pnob | ||
330 | * Called at the time of module cleanup as well a any error during | ||
331 | * module init. Some resources may be partially allocated in a NetObj. | ||
332 | */ | ||
333 | static void netobject_cleanup(struct be_adapter *adapter, | ||
334 | struct be_net_object *pnob) | ||
335 | { | ||
336 | struct net_device *netdev = adapter->netdevp; | ||
337 | |||
338 | if (netif_running(netdev)) { | ||
339 | netif_stop_queue(netdev); | ||
340 | be_wait_nic_tx_cmplx_cmpl(pnob); | ||
341 | be_disable_eq_intr(pnob); | ||
342 | } | ||
343 | |||
344 | be_unregister_isr(adapter); | ||
345 | |||
346 | if (adapter->tasklet_started) { | ||
347 | tasklet_kill(&(adapter->sts_handler)); | ||
348 | adapter->tasklet_started = 0; | ||
349 | } | ||
350 | if (pnob->fn_obj_created) | ||
351 | be_disable_intr(pnob); | ||
352 | |||
353 | if (adapter->dev_state != BE_DEV_STATE_NONE) | ||
354 | unregister_netdev(netdev); | ||
355 | |||
356 | if (pnob->fn_obj_created) | ||
357 | be_destroy_netobj(pnob); | ||
358 | |||
359 | adapter->net_obj = NULL; | ||
360 | adapter->netdevp = NULL; | ||
361 | |||
362 | be_rx_q_clean(pnob); | ||
363 | if (pnob->rx_ctxt) { | ||
364 | kfree(pnob->rx_page_info); | ||
365 | kfree(pnob->rx_ctxt); | ||
366 | } | ||
367 | |||
368 | be_tx_q_clean(pnob); | ||
369 | kfree(pnob->tx_ctxt); | ||
370 | |||
371 | if (pnob->mcc_q) | ||
372 | pci_free_consistent(adapter->pdev, pnob->mcc_q_size, | ||
373 | pnob->mcc_q, pnob->mcc_q_bus); | ||
374 | |||
375 | if (pnob->mcc_wrb_ctxt) | ||
376 | free_pages((unsigned long)pnob->mcc_wrb_ctxt, | ||
377 | get_order(pnob->mcc_wrb_ctxt_size)); | ||
378 | |||
379 | if (pnob->mcc_cq) | ||
380 | pci_free_consistent(adapter->pdev, pnob->mcc_cq_size, | ||
381 | pnob->mcc_cq, pnob->mcc_cq_bus); | ||
382 | |||
383 | if (pnob->event_q) | ||
384 | pci_free_consistent(adapter->pdev, pnob->event_q_size, | ||
385 | pnob->event_q, pnob->event_q_bus); | ||
386 | |||
387 | if (pnob->tx_cq) | ||
388 | pci_free_consistent(adapter->pdev, pnob->tx_cq_size, | ||
389 | pnob->tx_cq, pnob->tx_cq_bus); | ||
390 | |||
391 | if (pnob->tx_q) | ||
392 | pci_free_consistent(adapter->pdev, pnob->tx_q_size, | ||
393 | pnob->tx_q, pnob->tx_q_bus); | ||
394 | |||
395 | if (pnob->rx_q) | ||
396 | pci_free_consistent(adapter->pdev, pnob->rx_q_size, | ||
397 | pnob->rx_q, pnob->rx_q_bus); | ||
398 | |||
399 | if (pnob->rx_cq) | ||
400 | pci_free_consistent(adapter->pdev, pnob->rx_cq_size, | ||
401 | pnob->rx_cq, pnob->rx_cq_bus); | ||
402 | |||
403 | |||
404 | if (pnob->mb_ptr) | ||
405 | pci_free_consistent(adapter->pdev, pnob->mb_size, pnob->mb_ptr, | ||
406 | pnob->mb_bus); | ||
407 | |||
408 | free_netdev(netdev); | ||
409 | } | ||
410 | |||
411 | |||
412 | static int be_nob_ring_alloc(struct be_adapter *adapter, | ||
413 | struct be_net_object *pnob) | ||
414 | { | ||
415 | u32 size; | ||
416 | |||
417 | /* Mail box rd; mailbox pointer needs to be 16 byte aligned */ | ||
418 | pnob->mb_size = sizeof(struct MCC_MAILBOX_AMAP) + 16; | ||
419 | pnob->mb_ptr = pci_alloc_consistent(adapter->pdev, pnob->mb_size, | ||
420 | &pnob->mb_bus); | ||
421 | if (!pnob->mb_bus) | ||
422 | return -1; | ||
423 | memset(pnob->mb_ptr, 0, pnob->mb_size); | ||
424 | pnob->mb_rd.va = PTR_ALIGN(pnob->mb_ptr, 16); | ||
425 | pnob->mb_rd.pa = PTR_ALIGN(pnob->mb_bus, 16); | ||
426 | pnob->mb_rd.length = sizeof(struct MCC_MAILBOX_AMAP); | ||
427 | /* | ||
428 | * Event queue | ||
429 | */ | ||
430 | pnob->event_q_len = EVENT_Q_LEN; | ||
431 | pnob->event_q_size = pnob->event_q_len * sizeof(struct EQ_ENTRY_AMAP); | ||
432 | pnob->event_q = pci_alloc_consistent(adapter->pdev, pnob->event_q_size, | ||
433 | &pnob->event_q_bus); | ||
434 | if (!pnob->event_q_bus) | ||
435 | return -1; | ||
436 | memset(pnob->event_q, 0, pnob->event_q_size); | ||
437 | /* | ||
438 | * Eth TX queue | ||
439 | */ | ||
440 | pnob->tx_q_len = ETH_TXQ_LEN; | ||
441 | pnob->tx_q_port = 0; | ||
442 | pnob->tx_q_size = pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP); | ||
443 | pnob->tx_q = pci_alloc_consistent(adapter->pdev, pnob->tx_q_size, | ||
444 | &pnob->tx_q_bus); | ||
445 | if (!pnob->tx_q_bus) | ||
446 | return -1; | ||
447 | memset(pnob->tx_q, 0, pnob->tx_q_size); | ||
448 | /* | ||
449 | * Eth TX Compl queue | ||
450 | */ | ||
451 | pnob->txcq_len = ETH_TXCQ_LEN; | ||
452 | pnob->tx_cq_size = pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP); | ||
453 | pnob->tx_cq = pci_alloc_consistent(adapter->pdev, pnob->tx_cq_size, | ||
454 | &pnob->tx_cq_bus); | ||
455 | if (!pnob->tx_cq_bus) | ||
456 | return -1; | ||
457 | memset(pnob->tx_cq, 0, pnob->tx_cq_size); | ||
458 | /* | ||
459 | * Eth RX queue | ||
460 | */ | ||
461 | pnob->rx_q_len = ETH_RXQ_LEN; | ||
462 | pnob->rx_q_size = pnob->rx_q_len * sizeof(struct ETH_RX_D_AMAP); | ||
463 | pnob->rx_q = pci_alloc_consistent(adapter->pdev, pnob->rx_q_size, | ||
464 | &pnob->rx_q_bus); | ||
465 | if (!pnob->rx_q_bus) | ||
466 | return -1; | ||
467 | memset(pnob->rx_q, 0, pnob->rx_q_size); | ||
468 | /* | ||
469 | * Eth Unicast RX Compl queue | ||
470 | */ | ||
471 | pnob->rx_cq_len = ETH_UC_RXCQ_LEN; | ||
472 | pnob->rx_cq_size = pnob->rx_cq_len * | ||
473 | sizeof(struct ETH_RX_COMPL_AMAP); | ||
474 | pnob->rx_cq = pci_alloc_consistent(adapter->pdev, pnob->rx_cq_size, | ||
475 | &pnob->rx_cq_bus); | ||
476 | if (!pnob->rx_cq_bus) | ||
477 | return -1; | ||
478 | memset(pnob->rx_cq, 0, pnob->rx_cq_size); | ||
479 | |||
480 | /* TX resources */ | ||
481 | size = pnob->tx_q_len * sizeof(void **); | ||
482 | pnob->tx_ctxt = kzalloc(size, GFP_KERNEL); | ||
483 | if (pnob->tx_ctxt == NULL) | ||
484 | return -1; | ||
485 | |||
486 | /* RX resources */ | ||
487 | size = pnob->rx_q_len * sizeof(void *); | ||
488 | pnob->rx_ctxt = kzalloc(size, GFP_KERNEL); | ||
489 | if (pnob->rx_ctxt == NULL) | ||
490 | return -1; | ||
491 | |||
492 | size = (pnob->rx_q_len * sizeof(struct be_rx_page_info)); | ||
493 | pnob->rx_page_info = kzalloc(size, GFP_KERNEL); | ||
494 | if (pnob->rx_page_info == NULL) | ||
495 | return -1; | ||
496 | |||
497 | adapter->eth_statsp = kzalloc(sizeof(struct FWCMD_ETH_GET_STATISTICS), | ||
498 | GFP_KERNEL); | ||
499 | if (adapter->eth_statsp == NULL) | ||
500 | return -1; | ||
501 | pnob->rx_buf_size = rxbuf_size; | ||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | This function initializes the be_net_object for subsequent | ||
507 | network operations. | ||
508 | |||
509 | Before calling this function, the driver must have allocated | ||
510 | space for the NetObject structure, initialized the structure, | ||
511 | allocated DMAable memory for all the network queues that form | ||
512 | part of the NetObject and populated the start address (virtual) | ||
513 | and number of entries allocated for each queue in the NetObject structure. | ||
514 | |||
515 | The driver must also have allocated memory to hold the | ||
516 | mailbox structure (MCC_MAILBOX) and post the physical address, | ||
517 | virtual addresses and the size of the mailbox memory in the | ||
518 | NetObj.mb_rd. This structure is used by BECLIB for | ||
519 | initial communication with the embedded MCC processor. BECLIB | ||
520 | uses the mailbox until MCC rings are created for more efficient | ||
521 | communication with the MCC processor. | ||
522 | |||
523 | If the driver wants to create multiple network interface for more | ||
524 | than one protection domain, it can call be_create_netobj() | ||
525 | multiple times once for each protection domain. A Maximum of | ||
526 | 32 protection domains are supported. | ||
527 | |||
528 | */ | ||
529 | static int | ||
530 | be_create_netobj(struct be_net_object *pnob, u8 __iomem *csr_va, | ||
531 | u8 __iomem *db_va, u8 __iomem *pci_va) | ||
532 | { | ||
533 | int status = 0; | ||
534 | bool eventable = false, tx_no_delay = false, rx_no_delay = false; | ||
535 | struct be_eq_object *eq_objectp = NULL; | ||
536 | struct be_function_object *pfob = &pnob->fn_obj; | ||
537 | struct ring_desc rd; | ||
538 | u32 set_rxbuf_size; | ||
539 | u32 tx_cmpl_wm = CEV_WMARK_96; /* 0xffffffff to disable */ | ||
540 | u32 rx_cmpl_wm = CEV_WMARK_160; /* 0xffffffff to disable */ | ||
541 | u32 eq_delay = 0; /* delay in 8usec units. 0xffffffff to disable */ | ||
542 | |||
543 | memset(&rd, 0, sizeof(struct ring_desc)); | ||
544 | |||
545 | status = be_function_object_create(csr_va, db_va, pci_va, | ||
546 | BE_FUNCTION_TYPE_NETWORK, &pnob->mb_rd, pfob); | ||
547 | if (status != BE_SUCCESS) | ||
548 | return status; | ||
549 | pnob->fn_obj_created = true; | ||
550 | |||
551 | if (tx_cmpl_wm == 0xffffffff) | ||
552 | tx_no_delay = true; | ||
553 | if (rx_cmpl_wm == 0xffffffff) | ||
554 | rx_no_delay = true; | ||
555 | /* | ||
556 | * now create the necessary rings | ||
557 | * Event Queue first. | ||
558 | */ | ||
559 | if (pnob->event_q_len) { | ||
560 | rd.va = pnob->event_q; | ||
561 | rd.pa = pnob->event_q_bus; | ||
562 | rd.length = pnob->event_q_size; | ||
563 | |||
564 | status = be_eq_create(pfob, &rd, 4, pnob->event_q_len, | ||
565 | (u32) -1, /* CEV_WMARK_* or -1 */ | ||
566 | eq_delay, /* in 8us units, or -1 */ | ||
567 | &pnob->event_q_obj); | ||
568 | if (status != BE_SUCCESS) | ||
569 | goto error_ret; | ||
570 | pnob->event_q_id = pnob->event_q_obj.eq_id; | ||
571 | pnob->event_q_created = 1; | ||
572 | eventable = true; | ||
573 | eq_objectp = &pnob->event_q_obj; | ||
574 | } | ||
575 | /* | ||
576 | * Now Eth Tx Compl. queue. | ||
577 | */ | ||
578 | if (pnob->txcq_len) { | ||
579 | rd.va = pnob->tx_cq; | ||
580 | rd.pa = pnob->tx_cq_bus; | ||
581 | rd.length = pnob->tx_cq_size; | ||
582 | |||
583 | status = be_cq_create(pfob, &rd, | ||
584 | pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP), | ||
585 | false, /* solicted events, */ | ||
586 | tx_no_delay, /* nodelay */ | ||
587 | tx_cmpl_wm, /* Watermark encodings */ | ||
588 | eq_objectp, &pnob->tx_cq_obj); | ||
589 | if (status != BE_SUCCESS) | ||
590 | goto error_ret; | ||
591 | |||
592 | pnob->tx_cq_id = pnob->tx_cq_obj.cq_id; | ||
593 | pnob->tx_cq_created = 1; | ||
594 | } | ||
595 | /* | ||
596 | * Eth Tx queue | ||
597 | */ | ||
598 | if (pnob->tx_q_len) { | ||
599 | struct be_eth_sq_parameters ex_params = { 0 }; | ||
600 | u32 type; | ||
601 | |||
602 | if (pnob->tx_q_port) { | ||
603 | /* TXQ to be bound to a specific port */ | ||
604 | type = BE_ETH_TX_RING_TYPE_BOUND; | ||
605 | ex_params.port = pnob->tx_q_port - 1; | ||
606 | } else | ||
607 | type = BE_ETH_TX_RING_TYPE_STANDARD; | ||
608 | |||
609 | rd.va = pnob->tx_q; | ||
610 | rd.pa = pnob->tx_q_bus; | ||
611 | rd.length = pnob->tx_q_size; | ||
612 | |||
613 | status = be_eth_sq_create_ex(pfob, &rd, | ||
614 | pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP), | ||
615 | type, 2, &pnob->tx_cq_obj, | ||
616 | &ex_params, &pnob->tx_q_obj); | ||
617 | |||
618 | if (status != BE_SUCCESS) | ||
619 | goto error_ret; | ||
620 | |||
621 | pnob->tx_q_id = pnob->tx_q_obj.bid; | ||
622 | pnob->tx_q_created = 1; | ||
623 | } | ||
624 | /* | ||
625 | * Now Eth Rx compl. queue. Always needed. | ||
626 | */ | ||
627 | rd.va = pnob->rx_cq; | ||
628 | rd.pa = pnob->rx_cq_bus; | ||
629 | rd.length = pnob->rx_cq_size; | ||
630 | |||
631 | status = be_cq_create(pfob, &rd, | ||
632 | pnob->rx_cq_len * sizeof(struct ETH_RX_COMPL_AMAP), | ||
633 | false, /* solicted events, */ | ||
634 | rx_no_delay, /* nodelay */ | ||
635 | rx_cmpl_wm, /* Watermark encodings */ | ||
636 | eq_objectp, &pnob->rx_cq_obj); | ||
637 | if (status != BE_SUCCESS) | ||
638 | goto error_ret; | ||
639 | |||
640 | pnob->rx_cq_id = pnob->rx_cq_obj.cq_id; | ||
641 | pnob->rx_cq_created = 1; | ||
642 | |||
643 | status = be_eth_rq_set_frag_size(pfob, pnob->rx_buf_size, | ||
644 | (u32 *) &set_rxbuf_size); | ||
645 | if (status != BE_SUCCESS) { | ||
646 | be_eth_rq_get_frag_size(pfob, (u32 *) &pnob->rx_buf_size); | ||
647 | if ((pnob->rx_buf_size != 2048) && (pnob->rx_buf_size != 4096) | ||
648 | && (pnob->rx_buf_size != 8192)) | ||
649 | goto error_ret; | ||
650 | } else { | ||
651 | if (pnob->rx_buf_size != set_rxbuf_size) | ||
652 | pnob->rx_buf_size = set_rxbuf_size; | ||
653 | } | ||
654 | /* | ||
655 | * Eth RX queue. be_eth_rq_create() always assumes 2 pages size | ||
656 | */ | ||
657 | rd.va = pnob->rx_q; | ||
658 | rd.pa = pnob->rx_q_bus; | ||
659 | rd.length = pnob->rx_q_size; | ||
660 | |||
661 | status = be_eth_rq_create(pfob, &rd, &pnob->rx_cq_obj, | ||
662 | &pnob->rx_cq_obj, &pnob->rx_q_obj); | ||
663 | |||
664 | if (status != BE_SUCCESS) | ||
665 | goto error_ret; | ||
666 | |||
667 | pnob->rx_q_id = pnob->rx_q_obj.rid; | ||
668 | pnob->rx_q_created = 1; | ||
669 | |||
670 | return BE_SUCCESS; /* All required queues created. */ | ||
671 | |||
672 | error_ret: | ||
673 | be_destroy_netobj(pnob); | ||
674 | return status; | ||
675 | } | ||
676 | |||
677 | static int be_nob_ring_init(struct be_adapter *adapter, | ||
678 | struct be_net_object *pnob) | ||
679 | { | ||
680 | int status; | ||
681 | |||
682 | pnob->event_q_tl = 0; | ||
683 | |||
684 | pnob->tx_q_hd = 0; | ||
685 | pnob->tx_q_tl = 0; | ||
686 | |||
687 | pnob->tx_cq_tl = 0; | ||
688 | |||
689 | pnob->rx_cq_tl = 0; | ||
690 | |||
691 | memset(pnob->event_q, 0, pnob->event_q_size); | ||
692 | memset(pnob->tx_cq, 0, pnob->tx_cq_size); | ||
693 | memset(pnob->tx_ctxt, 0, pnob->tx_q_len * sizeof(void **)); | ||
694 | memset(pnob->rx_ctxt, 0, pnob->rx_q_len * sizeof(void *)); | ||
695 | pnob->rx_pg_info_hd = 0; | ||
696 | pnob->rx_q_hd = 0; | ||
697 | atomic_set(&pnob->rx_q_posted, 0); | ||
698 | |||
699 | status = be_create_netobj(pnob, adapter->csr_va, adapter->db_va, | ||
700 | adapter->pci_va); | ||
701 | if (status != BE_SUCCESS) | ||
702 | return -1; | ||
703 | |||
704 | be_post_eth_rx_buffs(pnob); | ||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | /* This function handles async callback for link status */ | ||
709 | static void | ||
710 | be_link_status_async_callback(void *context, u32 event_code, void *event) | ||
711 | { | ||
712 | struct ASYNC_EVENT_LINK_STATE_AMAP *link_status = event; | ||
713 | struct be_adapter *adapter = context; | ||
714 | bool link_enable = false; | ||
715 | struct be_net_object *pnob; | ||
716 | struct ASYNC_EVENT_TRAILER_AMAP *async_trailer; | ||
717 | struct net_device *netdev; | ||
718 | u32 async_event_code, async_event_type, active_port; | ||
719 | u32 port0_link_status, port1_link_status, port0_duplex, port1_duplex; | ||
720 | u32 port0_speed, port1_speed; | ||
721 | |||
722 | if (event_code != ASYNC_EVENT_CODE_LINK_STATE) { | ||
723 | /* Not our event to handle */ | ||
724 | return; | ||
725 | } | ||
726 | async_trailer = (struct ASYNC_EVENT_TRAILER_AMAP *) | ||
727 | ((u8 *) event + sizeof(struct MCC_CQ_ENTRY_AMAP) - | ||
728 | sizeof(struct ASYNC_EVENT_TRAILER_AMAP)); | ||
729 | |||
730 | async_event_code = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_code, | ||
731 | async_trailer); | ||
732 | BUG_ON(async_event_code != ASYNC_EVENT_CODE_LINK_STATE); | ||
733 | |||
734 | pnob = adapter->net_obj; | ||
735 | netdev = pnob->netdev; | ||
736 | |||
737 | /* Determine if this event is a switch VLD or a physical link event */ | ||
738 | async_event_type = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_type, | ||
739 | async_trailer); | ||
740 | active_port = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE, | ||
741 | active_port, link_status); | ||
742 | port0_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE, | ||
743 | port0_link_status, link_status); | ||
744 | port1_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE, | ||
745 | port1_link_status, link_status); | ||
746 | port0_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE, | ||
747 | port0_duplex, link_status); | ||
748 | port1_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE, | ||
749 | port1_duplex, link_status); | ||
750 | port0_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE, | ||
751 | port0_speed, link_status); | ||
752 | port1_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE, | ||
753 | port1_speed, link_status); | ||
754 | if (async_event_type == NTWK_LINK_TYPE_VIRTUAL) { | ||
755 | adapter->be_stat.bes_link_change_virtual++; | ||
756 | if (adapter->be_link_sts->active_port != active_port) { | ||
757 | dev_notice(&netdev->dev, | ||
758 | "Active port changed due to VLD on switch\n"); | ||
759 | } else { | ||
760 | dev_notice(&netdev->dev, "Link status update\n"); | ||
761 | } | ||
762 | |||
763 | } else { | ||
764 | adapter->be_stat.bes_link_change_physical++; | ||
765 | if (adapter->be_link_sts->active_port != active_port) { | ||
766 | dev_notice(&netdev->dev, | ||
767 | "Active port changed due to port link" | ||
768 | " status change\n"); | ||
769 | } else { | ||
770 | dev_notice(&netdev->dev, "Link status update\n"); | ||
771 | } | ||
772 | } | ||
773 | |||
774 | memset(adapter->be_link_sts, 0, sizeof(adapter->be_link_sts)); | ||
775 | |||
776 | if ((port0_link_status == ASYNC_EVENT_LINK_UP) || | ||
777 | (port1_link_status == ASYNC_EVENT_LINK_UP)) { | ||
778 | if ((adapter->port0_link_sts == BE_PORT_LINK_DOWN) && | ||
779 | (adapter->port1_link_sts == BE_PORT_LINK_DOWN)) { | ||
780 | /* Earlier both the ports are down So link is up */ | ||
781 | link_enable = true; | ||
782 | } | ||
783 | |||
784 | if (port0_link_status == ASYNC_EVENT_LINK_UP) { | ||
785 | adapter->port0_link_sts = BE_PORT_LINK_UP; | ||
786 | adapter->be_link_sts->mac0_duplex = port0_duplex; | ||
787 | adapter->be_link_sts->mac0_speed = port0_speed; | ||
788 | if (active_port == NTWK_PORT_A) | ||
789 | adapter->be_link_sts->active_port = 0; | ||
790 | } else | ||
791 | adapter->port0_link_sts = BE_PORT_LINK_DOWN; | ||
792 | |||
793 | if (port1_link_status == ASYNC_EVENT_LINK_UP) { | ||
794 | adapter->port1_link_sts = BE_PORT_LINK_UP; | ||
795 | adapter->be_link_sts->mac1_duplex = port1_duplex; | ||
796 | adapter->be_link_sts->mac1_speed = port1_speed; | ||
797 | if (active_port == NTWK_PORT_B) | ||
798 | adapter->be_link_sts->active_port = 1; | ||
799 | } else | ||
800 | adapter->port1_link_sts = BE_PORT_LINK_DOWN; | ||
801 | |||
802 | printk(KERN_INFO "Link Properties for %s:\n", netdev->name); | ||
803 | dev_info(&netdev->dev, "Link Properties:\n"); | ||
804 | be_print_link_info(adapter->be_link_sts); | ||
805 | |||
806 | if (!link_enable) | ||
807 | return; | ||
808 | /* | ||
809 | * Both ports were down previously, but atleast one of | ||
810 | * them has come up if this netdevice's carrier is not up, | ||
811 | * then indicate to stack | ||
812 | */ | ||
813 | if (!netif_carrier_ok(netdev)) { | ||
814 | netif_start_queue(netdev); | ||
815 | netif_carrier_on(netdev); | ||
816 | } | ||
817 | return; | ||
818 | } | ||
819 | |||
820 | /* Now both the ports are down. Tell the stack about it */ | ||
821 | dev_info(&netdev->dev, "Both ports are down\n"); | ||
822 | adapter->port0_link_sts = BE_PORT_LINK_DOWN; | ||
823 | adapter->port1_link_sts = BE_PORT_LINK_DOWN; | ||
824 | if (netif_carrier_ok(netdev)) { | ||
825 | netif_carrier_off(netdev); | ||
826 | netif_stop_queue(netdev); | ||
827 | } | ||
828 | return; | ||
829 | } | ||
830 | |||
831 | static int be_mcc_create(struct be_adapter *adapter) | ||
832 | { | ||
833 | struct be_net_object *pnob; | ||
834 | |||
835 | pnob = adapter->net_obj; | ||
836 | /* | ||
837 | * Create the MCC ring so that all further communication with | ||
838 | * MCC can go thru the ring. we do this at the end since | ||
839 | * we do not want to be dealing with interrupts until the | ||
840 | * initialization is complete. | ||
841 | */ | ||
842 | pnob->mcc_q_len = MCC_Q_LEN; | ||
843 | pnob->mcc_q_size = pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP); | ||
844 | pnob->mcc_q = pci_alloc_consistent(adapter->pdev, pnob->mcc_q_size, | ||
845 | &pnob->mcc_q_bus); | ||
846 | if (!pnob->mcc_q_bus) | ||
847 | return -1; | ||
848 | /* | ||
849 | * space for MCC WRB context | ||
850 | */ | ||
851 | pnob->mcc_wrb_ctxtLen = MCC_Q_LEN; | ||
852 | pnob->mcc_wrb_ctxt_size = pnob->mcc_wrb_ctxtLen * | ||
853 | sizeof(struct be_mcc_wrb_context); | ||
854 | pnob->mcc_wrb_ctxt = (void *)__get_free_pages(GFP_KERNEL, | ||
855 | get_order(pnob->mcc_wrb_ctxt_size)); | ||
856 | if (pnob->mcc_wrb_ctxt == NULL) | ||
857 | return -1; | ||
858 | /* | ||
859 | * Space for MCC compl. ring | ||
860 | */ | ||
861 | pnob->mcc_cq_len = MCC_CQ_LEN; | ||
862 | pnob->mcc_cq_size = pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP); | ||
863 | pnob->mcc_cq = pci_alloc_consistent(adapter->pdev, pnob->mcc_cq_size, | ||
864 | &pnob->mcc_cq_bus); | ||
865 | if (!pnob->mcc_cq_bus) | ||
866 | return -1; | ||
867 | return 0; | ||
868 | } | ||
869 | |||
870 | /* | ||
871 | This function creates the MCC request and completion ring required | ||
872 | for communicating with the ARM processor. The caller must have | ||
873 | allocated required amount of memory for the MCC ring and MCC | ||
874 | completion ring and posted the virtual address and number of | ||
875 | entries in the corresponding members (mcc_q and mcc_cq) in the | ||
876 | NetObject struture. | ||
877 | |||
878 | When this call is completed, all further communication with | ||
879 | ARM will switch from mailbox to this ring. | ||
880 | |||
881 | pnob - Pointer to the NetObject structure. This NetObject should | ||
882 | have been created using a previous call to be_create_netobj() | ||
883 | */ | ||
884 | int be_create_mcc_rings(struct be_net_object *pnob) | ||
885 | { | ||
886 | int status = 0; | ||
887 | struct ring_desc rd; | ||
888 | struct be_function_object *pfob = &pnob->fn_obj; | ||
889 | |||
890 | memset(&rd, 0, sizeof(struct ring_desc)); | ||
891 | if (pnob->mcc_cq_len) { | ||
892 | rd.va = pnob->mcc_cq; | ||
893 | rd.pa = pnob->mcc_cq_bus; | ||
894 | rd.length = pnob->mcc_cq_size; | ||
895 | |||
896 | status = be_cq_create(pfob, &rd, | ||
897 | pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP), | ||
898 | false, /* solicted events, */ | ||
899 | true, /* nodelay */ | ||
900 | 0, /* 0 Watermark since Nodelay is true */ | ||
901 | &pnob->event_q_obj, | ||
902 | &pnob->mcc_cq_obj); | ||
903 | |||
904 | if (status != BE_SUCCESS) | ||
905 | return status; | ||
906 | |||
907 | pnob->mcc_cq_id = pnob->mcc_cq_obj.cq_id; | ||
908 | pnob->mcc_cq_created = 1; | ||
909 | } | ||
910 | if (pnob->mcc_q_len) { | ||
911 | rd.va = pnob->mcc_q; | ||
912 | rd.pa = pnob->mcc_q_bus; | ||
913 | rd.length = pnob->mcc_q_size; | ||
914 | |||
915 | status = be_mcc_ring_create(pfob, &rd, | ||
916 | pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP), | ||
917 | pnob->mcc_wrb_ctxt, pnob->mcc_wrb_ctxtLen, | ||
918 | &pnob->mcc_cq_obj, &pnob->mcc_q_obj); | ||
919 | |||
920 | if (status != BE_SUCCESS) | ||
921 | return status; | ||
922 | |||
923 | pnob->mcc_q_created = 1; | ||
924 | } | ||
925 | return BE_SUCCESS; | ||
926 | } | ||
927 | |||
928 | static int be_mcc_init(struct be_adapter *adapter) | ||
929 | { | ||
930 | u32 r; | ||
931 | struct be_net_object *pnob; | ||
932 | |||
933 | pnob = adapter->net_obj; | ||
934 | memset(pnob->mcc_q, 0, pnob->mcc_q_size); | ||
935 | pnob->mcc_q_hd = 0; | ||
936 | |||
937 | memset(pnob->mcc_wrb_ctxt, 0, pnob->mcc_wrb_ctxt_size); | ||
938 | |||
939 | memset(pnob->mcc_cq, 0, pnob->mcc_cq_size); | ||
940 | pnob->mcc_cq_tl = 0; | ||
941 | |||
942 | r = be_create_mcc_rings(adapter->net_obj); | ||
943 | if (r != BE_SUCCESS) | ||
944 | return -1; | ||
945 | |||
946 | return 0; | ||
947 | } | ||
948 | |||
949 | static void be_remove(struct pci_dev *pdev) | ||
950 | { | ||
951 | struct be_net_object *pnob; | ||
952 | struct be_adapter *adapter; | ||
953 | |||
954 | adapter = pci_get_drvdata(pdev); | ||
955 | if (!adapter) | ||
956 | return; | ||
957 | |||
958 | pci_set_drvdata(pdev, NULL); | ||
959 | pnob = (struct be_net_object *)adapter->net_obj; | ||
960 | |||
961 | flush_scheduled_work(); | ||
962 | |||
963 | if (pnob) { | ||
964 | /* Unregister async callback function for link status updates */ | ||
965 | if (pnob->mcc_q_created) | ||
966 | be_mcc_add_async_event_callback(&pnob->mcc_q_obj, | ||
967 | NULL, NULL); | ||
968 | netobject_cleanup(adapter, pnob); | ||
969 | } | ||
970 | |||
971 | if (adapter->csr_va) | ||
972 | iounmap(adapter->csr_va); | ||
973 | if (adapter->db_va) | ||
974 | iounmap(adapter->db_va); | ||
975 | if (adapter->pci_va) | ||
976 | iounmap(adapter->pci_va); | ||
977 | |||
978 | pci_release_regions(adapter->pdev); | ||
979 | pci_disable_device(adapter->pdev); | ||
980 | |||
981 | kfree(adapter->be_link_sts); | ||
982 | kfree(adapter->eth_statsp); | ||
983 | |||
984 | if (adapter->timer_ctxt.get_stats_timer.function) | ||
985 | del_timer_sync(&adapter->timer_ctxt.get_stats_timer); | ||
986 | kfree(adapter); | ||
987 | } | ||
988 | |||
989 | /* | ||
990 | * This function is called by the PCI sub-system when it finds a PCI | ||
991 | * device with dev/vendor IDs that match with one of our devices. | ||
992 | * All of the driver initialization is done in this function. | ||
993 | */ | ||
994 | static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) | ||
995 | { | ||
996 | int status = 0; | ||
997 | struct be_adapter *adapter; | ||
998 | struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD get_fwv; | ||
999 | struct be_net_object *pnob; | ||
1000 | struct net_device *netdev; | ||
1001 | |||
1002 | status = pci_enable_device(pdev); | ||
1003 | if (status) | ||
1004 | goto error; | ||
1005 | |||
1006 | status = pci_request_regions(pdev, be_driver_name); | ||
1007 | if (status) | ||
1008 | goto error_pci_req; | ||
1009 | |||
1010 | pci_set_master(pdev); | ||
1011 | adapter = kzalloc(sizeof(struct be_adapter), GFP_KERNEL); | ||
1012 | if (adapter == NULL) { | ||
1013 | status = -ENOMEM; | ||
1014 | goto error_adapter; | ||
1015 | } | ||
1016 | adapter->dev_state = BE_DEV_STATE_NONE; | ||
1017 | adapter->pdev = pdev; | ||
1018 | pci_set_drvdata(pdev, adapter); | ||
1019 | |||
1020 | adapter->enable_aic = 1; | ||
1021 | adapter->max_eqd = MAX_EQD; | ||
1022 | adapter->min_eqd = 0; | ||
1023 | adapter->cur_eqd = 0; | ||
1024 | |||
1025 | status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | ||
1026 | if (!status) { | ||
1027 | adapter->dma_64bit_cap = true; | ||
1028 | } else { | ||
1029 | adapter->dma_64bit_cap = false; | ||
1030 | status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
1031 | if (status != 0) { | ||
1032 | printk(KERN_ERR "Could not set PCI DMA Mask\n"); | ||
1033 | goto cleanup; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | status = init_pci_be_function(adapter, pdev); | ||
1038 | if (status != 0) { | ||
1039 | printk(KERN_ERR "Failed to map PCI BARS\n"); | ||
1040 | status = -ENOMEM; | ||
1041 | goto cleanup; | ||
1042 | } | ||
1043 | |||
1044 | be_trace_set_level(DL_ALWAYS | DL_ERR); | ||
1045 | |||
1046 | adapter->be_link_sts = kmalloc(sizeof(struct BE_LINK_STATUS), | ||
1047 | GFP_KERNEL); | ||
1048 | if (adapter->be_link_sts == NULL) { | ||
1049 | printk(KERN_ERR "Memory allocation for link status " | ||
1050 | "buffer failed\n"); | ||
1051 | goto cleanup; | ||
1052 | } | ||
1053 | spin_lock_init(&adapter->txq_lock); | ||
1054 | |||
1055 | netdev = alloc_etherdev(sizeof(struct be_net_object)); | ||
1056 | if (netdev == NULL) { | ||
1057 | status = -ENOMEM; | ||
1058 | goto cleanup; | ||
1059 | } | ||
1060 | pnob = netdev_priv(netdev); | ||
1061 | adapter->net_obj = pnob; | ||
1062 | adapter->netdevp = netdev; | ||
1063 | pnob->adapter = adapter; | ||
1064 | pnob->netdev = netdev; | ||
1065 | |||
1066 | status = be_nob_ring_alloc(adapter, pnob); | ||
1067 | if (status != 0) | ||
1068 | goto cleanup; | ||
1069 | |||
1070 | status = be_nob_ring_init(adapter, pnob); | ||
1071 | if (status != 0) | ||
1072 | goto cleanup; | ||
1073 | |||
1074 | be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, false, | ||
1075 | false, false, netdev->dev_addr, NULL, NULL); | ||
1076 | |||
1077 | netdev->init = &benet_init; | ||
1078 | netif_carrier_off(netdev); | ||
1079 | netif_stop_queue(netdev); | ||
1080 | |||
1081 | SET_NETDEV_DEV(netdev, &(adapter->pdev->dev)); | ||
1082 | |||
1083 | netif_napi_add(netdev, &pnob->napi, be_poll, 64); | ||
1084 | |||
1085 | /* if the rx_frag size if 2K, one page is shared as two RX frags */ | ||
1086 | pnob->rx_pg_shared = | ||
1087 | (pnob->rx_buf_size <= PAGE_SIZE / 2) ? true : false; | ||
1088 | if (pnob->rx_buf_size != rxbuf_size) { | ||
1089 | printk(KERN_WARNING | ||
1090 | "Could not set Rx buffer size to %d. Using %d\n", | ||
1091 | rxbuf_size, pnob->rx_buf_size); | ||
1092 | rxbuf_size = pnob->rx_buf_size; | ||
1093 | } | ||
1094 | |||
1095 | tasklet_init(&(adapter->sts_handler), be_process_intr, | ||
1096 | (unsigned long)adapter); | ||
1097 | adapter->tasklet_started = 1; | ||
1098 | spin_lock_init(&(adapter->int_lock)); | ||
1099 | |||
1100 | status = be_register_isr(adapter, pnob); | ||
1101 | if (status != 0) | ||
1102 | goto cleanup; | ||
1103 | |||
1104 | adapter->rx_csum = 1; | ||
1105 | adapter->max_rx_coal = BE_LRO_MAX_PKTS; | ||
1106 | |||
1107 | memset(&get_fwv, 0, | ||
1108 | sizeof(struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD)); | ||
1109 | printk(KERN_INFO "BladeEngine Driver version:%s. " | ||
1110 | "Copyright ServerEngines, Corporation 2005 - 2008\n", | ||
1111 | be_drvr_ver); | ||
1112 | status = be_function_get_fw_version(&pnob->fn_obj, &get_fwv, NULL, | ||
1113 | NULL); | ||
1114 | if (status == BE_SUCCESS) { | ||
1115 | strncpy(be_fw_ver, get_fwv.firmware_version_string, 32); | ||
1116 | printk(KERN_INFO "BladeEngine Firmware Version:%s\n", | ||
1117 | get_fwv.firmware_version_string); | ||
1118 | } else { | ||
1119 | printk(KERN_WARNING "Unable to get BE Firmware Version\n"); | ||
1120 | } | ||
1121 | |||
1122 | sema_init(&adapter->get_eth_stat_sem, 0); | ||
1123 | init_timer(&adapter->timer_ctxt.get_stats_timer); | ||
1124 | atomic_set(&adapter->timer_ctxt.get_stat_flag, 0); | ||
1125 | adapter->timer_ctxt.get_stats_timer.function = | ||
1126 | &be_get_stats_timer_handler; | ||
1127 | |||
1128 | status = be_mcc_create(adapter); | ||
1129 | if (status < 0) | ||
1130 | goto cleanup; | ||
1131 | status = be_mcc_init(adapter); | ||
1132 | if (status < 0) | ||
1133 | goto cleanup; | ||
1134 | |||
1135 | |||
1136 | status = be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj, | ||
1137 | be_link_status_async_callback, (void *)adapter); | ||
1138 | if (status != BE_SUCCESS) { | ||
1139 | printk(KERN_WARNING "add_async_event_callback failed"); | ||
1140 | printk(KERN_WARNING | ||
1141 | "Link status changes may not be reflected\n"); | ||
1142 | } | ||
1143 | |||
1144 | status = register_netdev(netdev); | ||
1145 | if (status != 0) | ||
1146 | goto cleanup; | ||
1147 | be_update_link_status(adapter); | ||
1148 | adapter->dev_state = BE_DEV_STATE_INIT; | ||
1149 | return 0; | ||
1150 | |||
1151 | cleanup: | ||
1152 | be_remove(pdev); | ||
1153 | return status; | ||
1154 | error_adapter: | ||
1155 | pci_release_regions(pdev); | ||
1156 | error_pci_req: | ||
1157 | pci_disable_device(pdev); | ||
1158 | error: | ||
1159 | printk(KERN_ERR "BladeEngine initalization failed\n"); | ||
1160 | return status; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * Get the current link status and print the status on console | ||
1165 | */ | ||
1166 | void be_update_link_status(struct be_adapter *adapter) | ||
1167 | { | ||
1168 | int status; | ||
1169 | struct be_net_object *pnob = adapter->net_obj; | ||
1170 | |||
1171 | status = be_rxf_link_status(&pnob->fn_obj, adapter->be_link_sts, NULL, | ||
1172 | NULL, NULL); | ||
1173 | if (status == BE_SUCCESS) { | ||
1174 | if (adapter->be_link_sts->mac0_speed && | ||
1175 | adapter->be_link_sts->mac0_duplex) | ||
1176 | adapter->port0_link_sts = BE_PORT_LINK_UP; | ||
1177 | else | ||
1178 | adapter->port0_link_sts = BE_PORT_LINK_DOWN; | ||
1179 | |||
1180 | if (adapter->be_link_sts->mac1_speed && | ||
1181 | adapter->be_link_sts->mac1_duplex) | ||
1182 | adapter->port1_link_sts = BE_PORT_LINK_UP; | ||
1183 | else | ||
1184 | adapter->port1_link_sts = BE_PORT_LINK_DOWN; | ||
1185 | |||
1186 | dev_info(&pnob->netdev->dev, "Link Properties:\n"); | ||
1187 | be_print_link_info(adapter->be_link_sts); | ||
1188 | return; | ||
1189 | } | ||
1190 | dev_info(&pnob->netdev->dev, "Could not get link status\n"); | ||
1191 | return; | ||
1192 | } | ||
1193 | |||
1194 | |||
1195 | #ifdef CONFIG_PM | ||
1196 | static void | ||
1197 | be_pm_cleanup(struct be_adapter *adapter, | ||
1198 | struct be_net_object *pnob, struct net_device *netdev) | ||
1199 | { | ||
1200 | netif_carrier_off(netdev); | ||
1201 | netif_stop_queue(netdev); | ||
1202 | |||
1203 | be_wait_nic_tx_cmplx_cmpl(pnob); | ||
1204 | be_disable_eq_intr(pnob); | ||
1205 | |||
1206 | if (adapter->tasklet_started) { | ||
1207 | tasklet_kill(&adapter->sts_handler); | ||
1208 | adapter->tasklet_started = 0; | ||
1209 | } | ||
1210 | |||
1211 | be_unregister_isr(adapter); | ||
1212 | be_disable_intr(pnob); | ||
1213 | |||
1214 | be_tx_q_clean(pnob); | ||
1215 | be_rx_q_clean(pnob); | ||
1216 | |||
1217 | be_destroy_netobj(pnob); | ||
1218 | } | ||
1219 | |||
1220 | static int be_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1221 | { | ||
1222 | struct be_adapter *adapter = pci_get_drvdata(pdev); | ||
1223 | struct net_device *netdev = adapter->netdevp; | ||
1224 | struct be_net_object *pnob = netdev_priv(netdev); | ||
1225 | |||
1226 | adapter->dev_pm_state = adapter->dev_state; | ||
1227 | adapter->dev_state = BE_DEV_STATE_SUSPEND; | ||
1228 | |||
1229 | netif_device_detach(netdev); | ||
1230 | if (netif_running(netdev)) | ||
1231 | be_pm_cleanup(adapter, pnob, netdev); | ||
1232 | |||
1233 | pci_enable_wake(pdev, 3, 1); | ||
1234 | pci_enable_wake(pdev, 4, 1); /* D3 Cold = 4 */ | ||
1235 | pci_save_state(pdev); | ||
1236 | pci_disable_device(pdev); | ||
1237 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
1238 | return 0; | ||
1239 | } | ||
1240 | |||
1241 | static void be_up(struct be_adapter *adapter) | ||
1242 | { | ||
1243 | struct be_net_object *pnob = adapter->net_obj; | ||
1244 | |||
1245 | if (pnob->num_vlans != 0) | ||
1246 | be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans, | ||
1247 | pnob->vlan_tag, NULL, NULL, NULL); | ||
1248 | |||
1249 | } | ||
1250 | |||
1251 | static int be_resume(struct pci_dev *pdev) | ||
1252 | { | ||
1253 | int status = 0; | ||
1254 | struct be_adapter *adapter = pci_get_drvdata(pdev); | ||
1255 | struct net_device *netdev = adapter->netdevp; | ||
1256 | struct be_net_object *pnob = netdev_priv(netdev); | ||
1257 | |||
1258 | netif_device_detach(netdev); | ||
1259 | |||
1260 | status = pci_enable_device(pdev); | ||
1261 | if (status) | ||
1262 | return status; | ||
1263 | |||
1264 | pci_set_power_state(pdev, 0); | ||
1265 | pci_restore_state(pdev); | ||
1266 | pci_enable_wake(pdev, 3, 0); | ||
1267 | pci_enable_wake(pdev, 4, 0); /* 4 is D3 cold */ | ||
1268 | |||
1269 | netif_carrier_on(netdev); | ||
1270 | netif_start_queue(netdev); | ||
1271 | |||
1272 | if (netif_running(netdev)) { | ||
1273 | be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, | ||
1274 | false, true, false, netdev->dev_addr, NULL, NULL); | ||
1275 | |||
1276 | status = be_nob_ring_init(adapter, pnob); | ||
1277 | if (status < 0) | ||
1278 | return status; | ||
1279 | |||
1280 | tasklet_init(&(adapter->sts_handler), be_process_intr, | ||
1281 | (unsigned long)adapter); | ||
1282 | adapter->tasklet_started = 1; | ||
1283 | |||
1284 | if (be_register_isr(adapter, pnob) != 0) { | ||
1285 | printk(KERN_ERR "be_register_isr failed\n"); | ||
1286 | return status; | ||
1287 | } | ||
1288 | |||
1289 | |||
1290 | status = be_mcc_init(adapter); | ||
1291 | if (status < 0) { | ||
1292 | printk(KERN_ERR "be_mcc_init failed\n"); | ||
1293 | return status; | ||
1294 | } | ||
1295 | be_update_link_status(adapter); | ||
1296 | /* | ||
1297 | * Register async call back function to handle link | ||
1298 | * status updates | ||
1299 | */ | ||
1300 | status = be_mcc_add_async_event_callback( | ||
1301 | &adapter->net_obj->mcc_q_obj, | ||
1302 | be_link_status_async_callback, (void *)adapter); | ||
1303 | if (status != BE_SUCCESS) { | ||
1304 | printk(KERN_WARNING "add_async_event_callback failed"); | ||
1305 | printk(KERN_WARNING | ||
1306 | "Link status changes may not be reflected\n"); | ||
1307 | } | ||
1308 | be_enable_intr(pnob); | ||
1309 | be_enable_eq_intr(pnob); | ||
1310 | be_up(adapter); | ||
1311 | } | ||
1312 | netif_device_attach(netdev); | ||
1313 | adapter->dev_state = adapter->dev_pm_state; | ||
1314 | return 0; | ||
1315 | |||
1316 | } | ||
1317 | |||
1318 | #endif | ||
1319 | |||
1320 | /* Wait until no more pending transmits */ | ||
1321 | void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *pnob) | ||
1322 | { | ||
1323 | int i; | ||
1324 | |||
1325 | /* Wait for 20us * 50000 (= 1s) and no more */ | ||
1326 | i = 0; | ||
1327 | while ((pnob->tx_q_tl != pnob->tx_q_hd) && (i < 50000)) { | ||
1328 | ++i; | ||
1329 | udelay(20); | ||
1330 | } | ||
1331 | |||
1332 | /* Check for no more pending transmits */ | ||
1333 | if (i >= 50000) { | ||
1334 | printk(KERN_WARNING | ||
1335 | "Did not receive completions for all TX requests\n"); | ||
1336 | } | ||
1337 | } | ||
1338 | |||
1339 | static struct pci_driver be_driver = { | ||
1340 | .name = be_driver_name, | ||
1341 | .id_table = be_device_id_table, | ||
1342 | .probe = be_probe, | ||
1343 | #ifdef CONFIG_PM | ||
1344 | .suspend = be_suspend, | ||
1345 | .resume = be_resume, | ||
1346 | #endif | ||
1347 | .remove = be_remove | ||
1348 | }; | ||
1349 | |||
1350 | /* | ||
1351 | * Module init entry point. Registers our our device and return. | ||
1352 | * Our probe will be called if the device is found. | ||
1353 | */ | ||
1354 | static int __init be_init_module(void) | ||
1355 | { | ||
1356 | int ret; | ||
1357 | |||
1358 | if (rxbuf_size != 8192 && rxbuf_size != 4096 && rxbuf_size != 2048) { | ||
1359 | printk(KERN_WARNING | ||
1360 | "Unsupported receive buffer size (%d) requested\n", | ||
1361 | rxbuf_size); | ||
1362 | printk(KERN_WARNING | ||
1363 | "Must be 2048, 4096 or 8192. Defaulting to 2048\n"); | ||
1364 | rxbuf_size = 2048; | ||
1365 | } | ||
1366 | |||
1367 | ret = pci_register_driver(&be_driver); | ||
1368 | |||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | module_init(be_init_module); | ||
1373 | |||
1374 | /* | ||
1375 | * be_exit_module - Driver Exit Cleanup Routine | ||
1376 | */ | ||
1377 | static void __exit be_exit_module(void) | ||
1378 | { | ||
1379 | pci_unregister_driver(&be_driver); | ||
1380 | } | ||
1381 | |||
1382 | module_exit(be_exit_module); | ||
diff --git a/drivers/staging/benet/be_int.c b/drivers/staging/benet/be_int.c deleted file mode 100644 index cba95d09a8b6..000000000000 --- a/drivers/staging/benet/be_int.c +++ /dev/null | |||
@@ -1,863 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #include <linux/if_vlan.h> | ||
18 | #include <linux/inet_lro.h> | ||
19 | |||
20 | #include "benet.h" | ||
21 | |||
22 | /* number of bytes of RX frame that are copied to skb->data */ | ||
23 | #define BE_HDR_LEN 64 | ||
24 | |||
25 | #define NETIF_RX(skb) netif_receive_skb(skb) | ||
26 | #define VLAN_ACCEL_RX(skb, pnob, vt) \ | ||
27 | vlan_hwaccel_rx(skb, pnob->vlan_grp, vt) | ||
28 | |||
29 | /* | ||
30 | This function notifies BladeEngine of the number of completion | ||
31 | entries processed from the specified completion queue by writing | ||
32 | the number of popped entries to the door bell. | ||
33 | |||
34 | pnob - Pointer to the NetObject structure | ||
35 | n - Number of completion entries processed | ||
36 | cq_id - Queue ID of the completion queue for which notification | ||
37 | is being done. | ||
38 | re_arm - 1 - rearm the completion ring to generate an event. | ||
39 | - 0 - dont rearm the completion ring to generate an event | ||
40 | */ | ||
41 | void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm) | ||
42 | { | ||
43 | struct CQ_DB_AMAP cqdb; | ||
44 | |||
45 | cqdb.dw[0] = 0; | ||
46 | AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id); | ||
47 | AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm); | ||
48 | AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n); | ||
49 | PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * adds additional receive frags indicated by BE starting from given | ||
54 | * frag index (fi) to specified skb's frag list | ||
55 | */ | ||
56 | static void | ||
57 | add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb, | ||
58 | u32 nresid, u32 fi) | ||
59 | { | ||
60 | struct be_adapter *adapter = pnob->adapter; | ||
61 | u32 sk_frag_idx, n; | ||
62 | struct be_rx_page_info *rx_page_info; | ||
63 | u32 frag_sz = pnob->rx_buf_size; | ||
64 | |||
65 | sk_frag_idx = skb_shinfo(skb)->nr_frags; | ||
66 | while (nresid) { | ||
67 | index_inc(&fi, pnob->rx_q_len); | ||
68 | |||
69 | rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi]; | ||
70 | pnob->rx_ctxt[fi] = NULL; | ||
71 | if ((rx_page_info->page_offset) || | ||
72 | (pnob->rx_pg_shared == false)) { | ||
73 | pci_unmap_page(adapter->pdev, | ||
74 | pci_unmap_addr(rx_page_info, bus), | ||
75 | frag_sz, PCI_DMA_FROMDEVICE); | ||
76 | } | ||
77 | |||
78 | n = min(nresid, frag_sz); | ||
79 | skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page; | ||
80 | skb_shinfo(skb)->frags[sk_frag_idx].page_offset | ||
81 | = rx_page_info->page_offset; | ||
82 | skb_shinfo(skb)->frags[sk_frag_idx].size = n; | ||
83 | |||
84 | sk_frag_idx++; | ||
85 | skb->len += n; | ||
86 | skb->data_len += n; | ||
87 | skb_shinfo(skb)->nr_frags++; | ||
88 | nresid -= n; | ||
89 | |||
90 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
91 | atomic_dec(&pnob->rx_q_posted); | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * This function processes incoming nic packets over various Rx queues. | ||
97 | * This function takes the adapter, the current Rx status descriptor | ||
98 | * entry and the Rx completion queue ID as argument. | ||
99 | */ | ||
100 | static inline int process_nic_rx_completion(struct be_net_object *pnob, | ||
101 | struct ETH_RX_COMPL_AMAP *rxcp) | ||
102 | { | ||
103 | struct be_adapter *adapter = pnob->adapter; | ||
104 | struct sk_buff *skb; | ||
105 | int udpcksm, tcpcksm; | ||
106 | int n; | ||
107 | u32 nresid, fi; | ||
108 | u32 frag_sz = pnob->rx_buf_size; | ||
109 | u8 *va; | ||
110 | struct be_rx_page_info *rx_page_info; | ||
111 | u32 numfrags, vtp, vtm, vlan_tag, pktsize; | ||
112 | |||
113 | fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp); | ||
114 | BUG_ON(fi >= (int)pnob->rx_q_len); | ||
115 | BUG_ON(fi < 0); | ||
116 | |||
117 | rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi]; | ||
118 | BUG_ON(!rx_page_info->page); | ||
119 | pnob->rx_ctxt[fi] = NULL; | ||
120 | |||
121 | /* | ||
122 | * If one page is used per fragment or if this is the second half of | ||
123 | * of the page, unmap the page here | ||
124 | */ | ||
125 | if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) { | ||
126 | pci_unmap_page(adapter->pdev, | ||
127 | pci_unmap_addr(rx_page_info, bus), frag_sz, | ||
128 | PCI_DMA_FROMDEVICE); | ||
129 | } | ||
130 | |||
131 | atomic_dec(&pnob->rx_q_posted); | ||
132 | udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp); | ||
133 | tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp); | ||
134 | pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp); | ||
135 | /* | ||
136 | * get rid of RX flush completions first. | ||
137 | */ | ||
138 | if ((tcpcksm) && (udpcksm) && (pktsize == 32)) { | ||
139 | put_page(rx_page_info->page); | ||
140 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
141 | return 0; | ||
142 | } | ||
143 | skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN); | ||
144 | if (skb == NULL) { | ||
145 | dev_info(&pnob->netdev->dev, "alloc_skb() failed\n"); | ||
146 | put_page(rx_page_info->page); | ||
147 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
148 | goto free_frags; | ||
149 | } | ||
150 | skb_reserve(skb, NET_IP_ALIGN); | ||
151 | |||
152 | skb->dev = pnob->netdev; | ||
153 | |||
154 | n = min(pktsize, frag_sz); | ||
155 | |||
156 | va = page_address(rx_page_info->page) + rx_page_info->page_offset; | ||
157 | prefetch(va); | ||
158 | |||
159 | skb->len = n; | ||
160 | skb->data_len = n; | ||
161 | if (n <= BE_HDR_LEN) { | ||
162 | memcpy(skb->data, va, n); | ||
163 | put_page(rx_page_info->page); | ||
164 | skb->data_len -= n; | ||
165 | skb->tail += n; | ||
166 | } else { | ||
167 | |||
168 | /* Setup the SKB with page buffer information */ | ||
169 | skb_shinfo(skb)->frags[0].page = rx_page_info->page; | ||
170 | skb_shinfo(skb)->nr_frags++; | ||
171 | |||
172 | /* Copy the header into the skb_data */ | ||
173 | memcpy(skb->data, va, BE_HDR_LEN); | ||
174 | skb_shinfo(skb)->frags[0].page_offset = | ||
175 | rx_page_info->page_offset + BE_HDR_LEN; | ||
176 | skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN; | ||
177 | skb->data_len -= BE_HDR_LEN; | ||
178 | skb->tail += BE_HDR_LEN; | ||
179 | } | ||
180 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
181 | nresid = pktsize - n; | ||
182 | |||
183 | skb->protocol = eth_type_trans(skb, pnob->netdev); | ||
184 | |||
185 | if ((tcpcksm || udpcksm) && adapter->rx_csum) | ||
186 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
187 | else | ||
188 | skb->ip_summed = CHECKSUM_NONE; | ||
189 | /* | ||
190 | * if we have more bytes left, the frame has been | ||
191 | * given to us in multiple fragments. This happens | ||
192 | * with Jumbo frames. Add the remaining fragments to | ||
193 | * skb->frags[] array. | ||
194 | */ | ||
195 | if (nresid) | ||
196 | add_skb_frags(pnob, skb, nresid, fi); | ||
197 | |||
198 | /* update the the true size of the skb. */ | ||
199 | skb->truesize = skb->len + sizeof(struct sk_buff); | ||
200 | |||
201 | /* | ||
202 | * If a 802.3 frame or 802.2 LLC frame | ||
203 | * (i.e) contains length field in MAC Hdr | ||
204 | * and frame len is greater than 64 bytes | ||
205 | */ | ||
206 | if (((skb->protocol == ntohs(ETH_P_802_2)) || | ||
207 | (skb->protocol == ntohs(ETH_P_802_3))) | ||
208 | && (pktsize > BE_HDR_LEN)) { | ||
209 | /* | ||
210 | * If the length given in Mac Hdr is less than frame size | ||
211 | * Erraneous frame, Drop it | ||
212 | */ | ||
213 | if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) { | ||
214 | /* Increment Non Ether type II frames dropped */ | ||
215 | adapter->be_stat.bes_802_3_dropped_frames++; | ||
216 | |||
217 | kfree_skb(skb); | ||
218 | return 0; | ||
219 | } | ||
220 | /* | ||
221 | * else if the length given in Mac Hdr is greater than | ||
222 | * frame size, should not be seeing this sort of frames | ||
223 | * dump the pkt and pass to stack | ||
224 | */ | ||
225 | else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) { | ||
226 | /* Increment Non Ether type II frames malformed */ | ||
227 | adapter->be_stat.bes_802_3_malformed_frames++; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp); | ||
232 | vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp); | ||
233 | if (vtp && vtm) { | ||
234 | /* Vlan tag present in pkt and BE found | ||
235 | * that the tag matched an entry in VLAN table | ||
236 | */ | ||
237 | if (!pnob->vlan_grp || pnob->num_vlans == 0) { | ||
238 | /* But we have no VLANs configured. | ||
239 | * This should never happen. Drop the packet. | ||
240 | */ | ||
241 | dev_info(&pnob->netdev->dev, | ||
242 | "BladeEngine: Unexpected vlan tagged packet\n"); | ||
243 | kfree_skb(skb); | ||
244 | return 0; | ||
245 | } | ||
246 | /* pass the VLAN packet to stack */ | ||
247 | vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp); | ||
248 | VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag)); | ||
249 | |||
250 | } else { | ||
251 | NETIF_RX(skb); | ||
252 | } | ||
253 | return 0; | ||
254 | |||
255 | free_frags: | ||
256 | /* free all frags associated with the current rxcp */ | ||
257 | numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp); | ||
258 | while (numfrags-- > 1) { | ||
259 | index_inc(&fi, pnob->rx_q_len); | ||
260 | |||
261 | rx_page_info = (struct be_rx_page_info *) | ||
262 | pnob->rx_ctxt[fi]; | ||
263 | pnob->rx_ctxt[fi] = (void *)NULL; | ||
264 | if (rx_page_info->page_offset || !pnob->rx_pg_shared) { | ||
265 | pci_unmap_page(adapter->pdev, | ||
266 | pci_unmap_addr(rx_page_info, bus), | ||
267 | frag_sz, PCI_DMA_FROMDEVICE); | ||
268 | } | ||
269 | |||
270 | put_page(rx_page_info->page); | ||
271 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
272 | atomic_dec(&pnob->rx_q_posted); | ||
273 | } | ||
274 | return -ENOMEM; | ||
275 | } | ||
276 | |||
277 | static void process_nic_rx_completion_lro(struct be_net_object *pnob, | ||
278 | struct ETH_RX_COMPL_AMAP *rxcp) | ||
279 | { | ||
280 | struct be_adapter *adapter = pnob->adapter; | ||
281 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; | ||
282 | unsigned int udpcksm, tcpcksm; | ||
283 | u32 numfrags, vlanf, vtm, vlan_tag, nresid; | ||
284 | u16 vlant; | ||
285 | unsigned int fi, idx, n; | ||
286 | struct be_rx_page_info *rx_page_info; | ||
287 | u32 frag_sz = pnob->rx_buf_size, pktsize; | ||
288 | bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1; | ||
289 | u8 err, *va; | ||
290 | __wsum csum = 0; | ||
291 | |||
292 | if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) { | ||
293 | /* Drop the pkt and move to the next completion. */ | ||
294 | adapter->be_stat.bes_rx_misc_pkts++; | ||
295 | return; | ||
296 | } | ||
297 | err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp); | ||
298 | if (err || !rx_coal) { | ||
299 | /* We won't coalesce Rx pkts if the err bit set. | ||
300 | * take the path of normal completion processing */ | ||
301 | process_nic_rx_completion(pnob, rxcp); | ||
302 | return; | ||
303 | } | ||
304 | |||
305 | fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp); | ||
306 | BUG_ON(fi >= (int)pnob->rx_q_len); | ||
307 | BUG_ON(fi < 0); | ||
308 | rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi]; | ||
309 | BUG_ON(!rx_page_info->page); | ||
310 | pnob->rx_ctxt[fi] = (void *)NULL; | ||
311 | /* If one page is used per fragment or if this is the | ||
312 | * second half of the page, unmap the page here | ||
313 | */ | ||
314 | if (rx_page_info->page_offset || !pnob->rx_pg_shared) { | ||
315 | pci_unmap_page(adapter->pdev, | ||
316 | pci_unmap_addr(rx_page_info, bus), | ||
317 | frag_sz, PCI_DMA_FROMDEVICE); | ||
318 | } | ||
319 | |||
320 | numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp); | ||
321 | udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp); | ||
322 | tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp); | ||
323 | vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp); | ||
324 | vlant = be16_to_cpu(vlan_tag); | ||
325 | vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp); | ||
326 | vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp); | ||
327 | pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp); | ||
328 | |||
329 | atomic_dec(&pnob->rx_q_posted); | ||
330 | |||
331 | if (tcpcksm && udpcksm && pktsize == 32) { | ||
332 | /* flush completion entries */ | ||
333 | put_page(rx_page_info->page); | ||
334 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
335 | return; | ||
336 | } | ||
337 | /* Only one of udpcksum and tcpcksum can be set */ | ||
338 | BUG_ON(udpcksm && tcpcksm); | ||
339 | |||
340 | /* jumbo frames could come in multiple fragments */ | ||
341 | BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz)); | ||
342 | n = min(pktsize, frag_sz); | ||
343 | nresid = pktsize - n; /* will be useful for jumbo pkts */ | ||
344 | idx = 0; | ||
345 | |||
346 | va = page_address(rx_page_info->page) + rx_page_info->page_offset; | ||
347 | prefetch(va); | ||
348 | rx_frags[idx].page = rx_page_info->page; | ||
349 | rx_frags[idx].page_offset = (rx_page_info->page_offset); | ||
350 | rx_frags[idx].size = n; | ||
351 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
352 | |||
353 | /* If we got multiple fragments, we have more data. */ | ||
354 | while (nresid) { | ||
355 | idx++; | ||
356 | index_inc(&fi, pnob->rx_q_len); | ||
357 | |||
358 | rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi]; | ||
359 | pnob->rx_ctxt[fi] = (void *)NULL; | ||
360 | if (rx_page_info->page_offset || !pnob->rx_pg_shared) { | ||
361 | pci_unmap_page(adapter->pdev, | ||
362 | pci_unmap_addr(rx_page_info, bus), | ||
363 | frag_sz, PCI_DMA_FROMDEVICE); | ||
364 | } | ||
365 | |||
366 | n = min(nresid, frag_sz); | ||
367 | rx_frags[idx].page = rx_page_info->page; | ||
368 | rx_frags[idx].page_offset = (rx_page_info->page_offset); | ||
369 | rx_frags[idx].size = n; | ||
370 | |||
371 | nresid -= n; | ||
372 | memset(rx_page_info, 0, sizeof(struct be_rx_page_info)); | ||
373 | atomic_dec(&pnob->rx_q_posted); | ||
374 | } | ||
375 | |||
376 | if (likely(!(vlanf && vtm))) { | ||
377 | lro_receive_frags(&pnob->lro_mgr, rx_frags, | ||
378 | pktsize, pktsize, | ||
379 | (void *)(unsigned long)csum, csum); | ||
380 | } else { | ||
381 | /* Vlan tag present in pkt and BE found | ||
382 | * that the tag matched an entry in VLAN table | ||
383 | */ | ||
384 | if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) { | ||
385 | /* But we have no VLANs configured. | ||
386 | * This should never happen. Drop the packet. | ||
387 | */ | ||
388 | dev_info(&pnob->netdev->dev, | ||
389 | "BladeEngine: Unexpected vlan tagged packet\n"); | ||
390 | return; | ||
391 | } | ||
392 | /* pass the VLAN packet to stack */ | ||
393 | lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr, | ||
394 | rx_frags, pktsize, pktsize, | ||
395 | pnob->vlan_grp, vlant, | ||
396 | (void *)(unsigned long)csum, | ||
397 | csum); | ||
398 | } | ||
399 | |||
400 | adapter->be_stat.bes_rx_coal++; | ||
401 | } | ||
402 | |||
403 | struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob) | ||
404 | { | ||
405 | struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl]; | ||
406 | u32 valid, ct; | ||
407 | |||
408 | valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp); | ||
409 | if (valid == 0) | ||
410 | return NULL; | ||
411 | |||
412 | ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp); | ||
413 | if (ct != 0) { | ||
414 | /* Invalid chute #. treat as error */ | ||
415 | AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1); | ||
416 | } | ||
417 | |||
418 | be_adv_rxcq_tl(pnob); | ||
419 | AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0); | ||
420 | return rxcp; | ||
421 | } | ||
422 | |||
423 | static void update_rx_rate(struct be_adapter *adapter) | ||
424 | { | ||
425 | /* update the rate once in two seconds */ | ||
426 | if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) { | ||
427 | u32 r; | ||
428 | r = adapter->eth_rx_bytes / | ||
429 | ((jiffies - adapter->eth_rx_jiffies) / (HZ)); | ||
430 | r = (r / 1000000); /* MB/Sec */ | ||
431 | |||
432 | /* Mega Bits/Sec */ | ||
433 | adapter->be_stat.bes_eth_rx_rate = (r * 8); | ||
434 | adapter->eth_rx_jiffies = jiffies; | ||
435 | adapter->eth_rx_bytes = 0; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static int process_rx_completions(struct be_net_object *pnob, int max_work) | ||
440 | { | ||
441 | struct be_adapter *adapter = pnob->adapter; | ||
442 | struct ETH_RX_COMPL_AMAP *rxcp; | ||
443 | u32 nc = 0; | ||
444 | unsigned int pktsize; | ||
445 | |||
446 | while (max_work && (rxcp = be_get_rx_cmpl(pnob))) { | ||
447 | prefetch(rxcp); | ||
448 | pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp); | ||
449 | process_nic_rx_completion_lro(pnob, rxcp); | ||
450 | adapter->eth_rx_bytes += pktsize; | ||
451 | update_rx_rate(adapter); | ||
452 | nc++; | ||
453 | max_work--; | ||
454 | adapter->be_stat.bes_rx_compl++; | ||
455 | } | ||
456 | if (likely(adapter->max_rx_coal > 1)) { | ||
457 | adapter->be_stat.bes_rx_flush++; | ||
458 | lro_flush_all(&pnob->lro_mgr); | ||
459 | } | ||
460 | |||
461 | /* Refill the queue */ | ||
462 | if (atomic_read(&pnob->rx_q_posted) < 900) | ||
463 | be_post_eth_rx_buffs(pnob); | ||
464 | |||
465 | return nc; | ||
466 | } | ||
467 | |||
468 | static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob) | ||
469 | { | ||
470 | struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl]; | ||
471 | u32 valid; | ||
472 | |||
473 | valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp); | ||
474 | if (valid == 0) | ||
475 | return NULL; | ||
476 | |||
477 | AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0); | ||
478 | be_adv_txcq_tl(pnob); | ||
479 | return txcp; | ||
480 | |||
481 | } | ||
482 | |||
483 | void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx) | ||
484 | { | ||
485 | struct be_adapter *adapter = pnob->adapter; | ||
486 | int cur_index, tx_wrbs_completed = 0; | ||
487 | struct sk_buff *skb; | ||
488 | u64 busaddr, pa, pa_lo, pa_hi; | ||
489 | struct ETH_WRB_AMAP *wrb; | ||
490 | u32 frag_len, last_index, j; | ||
491 | |||
492 | last_index = tx_compl_lastwrb_idx_get(pnob); | ||
493 | BUG_ON(last_index != end_idx); | ||
494 | pnob->tx_ctxt[pnob->tx_q_tl] = NULL; | ||
495 | do { | ||
496 | cur_index = pnob->tx_q_tl; | ||
497 | wrb = &pnob->tx_q[cur_index]; | ||
498 | pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb); | ||
499 | pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb); | ||
500 | frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb); | ||
501 | busaddr = (pa_hi << 32) | pa_lo; | ||
502 | if (busaddr != 0) { | ||
503 | pa = le64_to_cpu(busaddr); | ||
504 | pci_unmap_single(adapter->pdev, pa, | ||
505 | frag_len, PCI_DMA_TODEVICE); | ||
506 | } | ||
507 | if (cur_index == last_index) { | ||
508 | skb = (struct sk_buff *)pnob->tx_ctxt[cur_index]; | ||
509 | BUG_ON(!skb); | ||
510 | for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { | ||
511 | struct skb_frag_struct *frag; | ||
512 | frag = &skb_shinfo(skb)->frags[j]; | ||
513 | pci_unmap_page(adapter->pdev, | ||
514 | (ulong) frag->page, frag->size, | ||
515 | PCI_DMA_TODEVICE); | ||
516 | } | ||
517 | kfree_skb(skb); | ||
518 | pnob->tx_ctxt[cur_index] = NULL; | ||
519 | } else { | ||
520 | BUG_ON(pnob->tx_ctxt[cur_index]); | ||
521 | } | ||
522 | tx_wrbs_completed++; | ||
523 | be_adv_txq_tl(pnob); | ||
524 | } while (cur_index != last_index); | ||
525 | atomic_sub(tx_wrbs_completed, &pnob->tx_q_used); | ||
526 | } | ||
527 | |||
528 | /* there is no need to take an SMP lock here since currently | ||
529 | * we have only one instance of the tasklet that does completion | ||
530 | * processing. | ||
531 | */ | ||
532 | static void process_nic_tx_completions(struct be_net_object *pnob) | ||
533 | { | ||
534 | struct be_adapter *adapter = pnob->adapter; | ||
535 | struct ETH_TX_COMPL_AMAP *txcp; | ||
536 | struct net_device *netdev = pnob->netdev; | ||
537 | u32 end_idx, num_processed = 0; | ||
538 | |||
539 | adapter->be_stat.bes_tx_events++; | ||
540 | |||
541 | while ((txcp = be_get_tx_cmpl(pnob))) { | ||
542 | end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp); | ||
543 | process_one_tx_compl(pnob, end_idx); | ||
544 | num_processed++; | ||
545 | adapter->be_stat.bes_tx_compl++; | ||
546 | } | ||
547 | be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1); | ||
548 | /* | ||
549 | * We got Tx completions and have usable WRBs. | ||
550 | * If the netdev's queue has been stopped | ||
551 | * because we had run out of WRBs, wake it now. | ||
552 | */ | ||
553 | spin_lock(&adapter->txq_lock); | ||
554 | if (netif_queue_stopped(netdev) | ||
555 | && atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) { | ||
556 | netif_wake_queue(netdev); | ||
557 | } | ||
558 | spin_unlock(&adapter->txq_lock); | ||
559 | } | ||
560 | |||
561 | static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl) | ||
562 | { | ||
563 | u32 nposted = 0; | ||
564 | struct ETH_RX_D_AMAP *rxd = NULL; | ||
565 | struct be_recv_buffer *rxbp; | ||
566 | void **rx_ctxp; | ||
567 | struct RQ_DB_AMAP rqdb; | ||
568 | |||
569 | rx_ctxp = pnob->rx_ctxt; | ||
570 | |||
571 | while (!list_empty(rxbl) && | ||
572 | (rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) { | ||
573 | |||
574 | rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list); | ||
575 | list_del(&rxbp->rxb_list); | ||
576 | rxd = pnob->rx_q + pnob->rx_q_hd; | ||
577 | AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo); | ||
578 | AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi); | ||
579 | |||
580 | rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt; | ||
581 | be_adv_rxq_hd(pnob); | ||
582 | nposted++; | ||
583 | } | ||
584 | |||
585 | if (nposted) { | ||
586 | /* Now press the door bell to notify BladeEngine. */ | ||
587 | rqdb.dw[0] = 0; | ||
588 | AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted); | ||
589 | AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id); | ||
590 | PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]); | ||
591 | } | ||
592 | atomic_add(nposted, &pnob->rx_q_posted); | ||
593 | return nposted; | ||
594 | } | ||
595 | |||
596 | void be_post_eth_rx_buffs(struct be_net_object *pnob) | ||
597 | { | ||
598 | struct be_adapter *adapter = pnob->adapter; | ||
599 | u32 num_bufs, r; | ||
600 | u64 busaddr = 0, tmp_pa; | ||
601 | u32 max_bufs, pg_hd; | ||
602 | u32 frag_size; | ||
603 | struct be_recv_buffer *rxbp; | ||
604 | struct list_head rxbl; | ||
605 | struct be_rx_page_info *rx_page_info; | ||
606 | struct page *page = NULL; | ||
607 | u32 page_order = 0; | ||
608 | gfp_t alloc_flags = GFP_ATOMIC; | ||
609 | |||
610 | BUG_ON(!adapter); | ||
611 | |||
612 | max_bufs = 64; /* should be even # <= 255. */ | ||
613 | |||
614 | frag_size = pnob->rx_buf_size; | ||
615 | page_order = get_order(frag_size); | ||
616 | |||
617 | if (frag_size == 8192) | ||
618 | alloc_flags |= (gfp_t) __GFP_COMP; | ||
619 | /* | ||
620 | * Form a linked list of RECV_BUFFFER structure to be be posted. | ||
621 | * We will post even number of buffer so that pages can be | ||
622 | * shared. | ||
623 | */ | ||
624 | INIT_LIST_HEAD(&rxbl); | ||
625 | |||
626 | for (num_bufs = 0; num_bufs < max_bufs && | ||
627 | !pnob->rx_page_info[pnob->rx_pg_info_hd].page; ++num_bufs) { | ||
628 | |||
629 | rxbp = &pnob->eth_rx_bufs[num_bufs]; | ||
630 | pg_hd = pnob->rx_pg_info_hd; | ||
631 | rx_page_info = &pnob->rx_page_info[pg_hd]; | ||
632 | |||
633 | if (!page) { | ||
634 | page = alloc_pages(alloc_flags, page_order); | ||
635 | if (unlikely(page == NULL)) { | ||
636 | adapter->be_stat.bes_ethrx_post_fail++; | ||
637 | pnob->rxbuf_post_fail++; | ||
638 | break; | ||
639 | } | ||
640 | pnob->rxbuf_post_fail = 0; | ||
641 | busaddr = pci_map_page(adapter->pdev, page, 0, | ||
642 | frag_size, PCI_DMA_FROMDEVICE); | ||
643 | rx_page_info->page_offset = 0; | ||
644 | rx_page_info->page = page; | ||
645 | /* | ||
646 | * If we are sharing a page among two skbs, | ||
647 | * alloc a new one on the next iteration | ||
648 | */ | ||
649 | if (pnob->rx_pg_shared == false) | ||
650 | page = NULL; | ||
651 | } else { | ||
652 | get_page(page); | ||
653 | rx_page_info->page_offset += frag_size; | ||
654 | rx_page_info->page = page; | ||
655 | /* | ||
656 | * We are finished with the alloced page, | ||
657 | * Alloc a new one on the next iteration | ||
658 | */ | ||
659 | page = NULL; | ||
660 | } | ||
661 | rxbp->rxb_ctxt = (void *)rx_page_info; | ||
662 | index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len); | ||
663 | |||
664 | pci_unmap_addr_set(rx_page_info, bus, busaddr); | ||
665 | tmp_pa = busaddr + rx_page_info->page_offset; | ||
666 | rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF); | ||
667 | rxbp->rxb_pa_hi = (tmp_pa >> 32); | ||
668 | rxbp->rxb_len = frag_size; | ||
669 | list_add_tail(&rxbp->rxb_list, &rxbl); | ||
670 | } /* End of for */ | ||
671 | |||
672 | r = post_rx_buffs(pnob, &rxbl); | ||
673 | BUG_ON(r != num_bufs); | ||
674 | return; | ||
675 | } | ||
676 | |||
677 | /* | ||
678 | * Interrupt service for network function. We just schedule the | ||
679 | * tasklet which does all completion processing. | ||
680 | */ | ||
681 | irqreturn_t be_int(int irq, void *dev) | ||
682 | { | ||
683 | struct net_device *netdev = dev; | ||
684 | struct be_net_object *pnob = netdev_priv(netdev); | ||
685 | struct be_adapter *adapter = pnob->adapter; | ||
686 | u32 isr; | ||
687 | |||
688 | isr = CSR_READ(&pnob->fn_obj, cev.isr1); | ||
689 | if (unlikely(!isr)) | ||
690 | return IRQ_NONE; | ||
691 | |||
692 | spin_lock(&adapter->int_lock); | ||
693 | adapter->isr |= isr; | ||
694 | spin_unlock(&adapter->int_lock); | ||
695 | |||
696 | adapter->be_stat.bes_ints++; | ||
697 | |||
698 | tasklet_schedule(&adapter->sts_handler); | ||
699 | return IRQ_HANDLED; | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * Poll function called by NAPI with a work budget. | ||
704 | * We process as many UC. BC and MC receive completions | ||
705 | * as the budget allows and return the actual number of | ||
706 | * RX ststutses processed. | ||
707 | */ | ||
708 | int be_poll(struct napi_struct *napi, int budget) | ||
709 | { | ||
710 | struct be_net_object *pnob = | ||
711 | container_of(napi, struct be_net_object, napi); | ||
712 | u32 work_done; | ||
713 | |||
714 | pnob->adapter->be_stat.bes_polls++; | ||
715 | work_done = process_rx_completions(pnob, budget); | ||
716 | BUG_ON(work_done > budget); | ||
717 | |||
718 | /* All consumed */ | ||
719 | if (work_done < budget) { | ||
720 | netif_rx_complete(napi); | ||
721 | /* enable intr */ | ||
722 | be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1); | ||
723 | } else { | ||
724 | /* More to be consumed; continue with interrupts disabled */ | ||
725 | be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0); | ||
726 | } | ||
727 | return work_done; | ||
728 | } | ||
729 | |||
730 | static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob) | ||
731 | { | ||
732 | struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]); | ||
733 | if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp)) | ||
734 | return NULL; | ||
735 | be_adv_eq_tl(pnob); | ||
736 | return eqp; | ||
737 | } | ||
738 | |||
739 | /* | ||
740 | * Processes all valid events in the event ring associated with given | ||
741 | * NetObject. Also, notifies BE the number of events processed. | ||
742 | */ | ||
743 | static inline u32 process_events(struct be_net_object *pnob) | ||
744 | { | ||
745 | struct be_adapter *adapter = pnob->adapter; | ||
746 | struct EQ_ENTRY_AMAP *eqp; | ||
747 | u32 rid, num_events = 0; | ||
748 | struct net_device *netdev = pnob->netdev; | ||
749 | |||
750 | while ((eqp = get_event(pnob)) != NULL) { | ||
751 | adapter->be_stat.bes_events++; | ||
752 | rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp); | ||
753 | if (rid == pnob->rx_cq_id) { | ||
754 | adapter->be_stat.bes_rx_events++; | ||
755 | netif_rx_schedule(&pnob->napi); | ||
756 | } else if (rid == pnob->tx_cq_id) { | ||
757 | process_nic_tx_completions(pnob); | ||
758 | } else if (rid == pnob->mcc_cq_id) { | ||
759 | be_mcc_process_cq(&pnob->mcc_q_obj, 1); | ||
760 | } else { | ||
761 | dev_info(&netdev->dev, | ||
762 | "Invalid EQ ResourceID %d\n", rid); | ||
763 | } | ||
764 | AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0); | ||
765 | AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0); | ||
766 | num_events++; | ||
767 | } | ||
768 | return num_events; | ||
769 | } | ||
770 | |||
771 | static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob) | ||
772 | { | ||
773 | int status; | ||
774 | struct be_eq_object *eq_objectp; | ||
775 | |||
776 | /* update once a second */ | ||
777 | if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) { | ||
778 | /* One second elapsed since last update */ | ||
779 | u32 r, new_eqd = -1; | ||
780 | r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints; | ||
781 | r = r / ((jiffies - adapter->ips_jiffies) / (HZ)); | ||
782 | adapter->be_stat.bes_ips = r; | ||
783 | adapter->ips_jiffies = jiffies; | ||
784 | adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints; | ||
785 | if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd) | ||
786 | new_eqd = (adapter->cur_eqd + 8); | ||
787 | if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd) | ||
788 | new_eqd = (adapter->cur_eqd - 8); | ||
789 | if (adapter->enable_aic && new_eqd != -1) { | ||
790 | eq_objectp = &pnob->event_q_obj; | ||
791 | status = be_eq_modify_delay(&pnob->fn_obj, 1, | ||
792 | &eq_objectp, &new_eqd, NULL, | ||
793 | NULL, NULL); | ||
794 | if (status == BE_SUCCESS) | ||
795 | adapter->cur_eqd = new_eqd; | ||
796 | } | ||
797 | } | ||
798 | } | ||
799 | |||
800 | /* | ||
801 | This function notifies BladeEngine of how many events were processed | ||
802 | from the event queue by ringing the corresponding door bell and | ||
803 | optionally re-arms the event queue. | ||
804 | n - number of events processed | ||
805 | re_arm - 1 - re-arm the EQ, 0 - do not re-arm the EQ | ||
806 | |||
807 | */ | ||
808 | static void be_notify_event(struct be_net_object *pnob, int n, int re_arm) | ||
809 | { | ||
810 | struct CQ_DB_AMAP eqdb; | ||
811 | eqdb.dw[0] = 0; | ||
812 | |||
813 | AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id); | ||
814 | AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm); | ||
815 | AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1); | ||
816 | AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n); | ||
817 | /* | ||
818 | * Under some situations we see an interrupt and no valid | ||
819 | * EQ entry. To keep going, we need to ring the DB even if | ||
820 | * numPOsted is 0. | ||
821 | */ | ||
822 | PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]); | ||
823 | return; | ||
824 | } | ||
825 | |||
826 | /* | ||
827 | * Called from the tasklet scheduled by ISR. All real interrupt processing | ||
828 | * is done here. | ||
829 | */ | ||
830 | void be_process_intr(unsigned long context) | ||
831 | { | ||
832 | struct be_adapter *adapter = (struct be_adapter *)context; | ||
833 | struct be_net_object *pnob = adapter->net_obj; | ||
834 | u32 isr, n; | ||
835 | ulong flags = 0; | ||
836 | |||
837 | isr = adapter->isr; | ||
838 | |||
839 | /* | ||
840 | * we create only one NIC event queue in Linux. Event is | ||
841 | * expected only in the first event queue | ||
842 | */ | ||
843 | BUG_ON(isr & 0xfffffffe); | ||
844 | if ((isr & 1) == 0) | ||
845 | return; /* not our interrupt */ | ||
846 | n = process_events(pnob); | ||
847 | /* | ||
848 | * Clear the event bit. adapter->isr is set by | ||
849 | * hard interrupt. Prevent race with lock. | ||
850 | */ | ||
851 | spin_lock_irqsave(&adapter->int_lock, flags); | ||
852 | adapter->isr &= ~1; | ||
853 | spin_unlock_irqrestore(&adapter->int_lock, flags); | ||
854 | be_notify_event(pnob, n, 1); | ||
855 | /* | ||
856 | * If previous allocation attempts had failed and | ||
857 | * BE has used up all posted buffers, post RX buffers here | ||
858 | */ | ||
859 | if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0) | ||
860 | be_post_eth_rx_buffs(pnob); | ||
861 | update_eqd(adapter, pnob); | ||
862 | return; | ||
863 | } | ||
diff --git a/drivers/staging/benet/be_netif.c b/drivers/staging/benet/be_netif.c deleted file mode 100644 index 2b8daf63dc7d..000000000000 --- a/drivers/staging/benet/be_netif.c +++ /dev/null | |||
@@ -1,705 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * be_netif.c | ||
19 | * | ||
20 | * This file contains various entry points of drivers seen by tcp/ip stack. | ||
21 | */ | ||
22 | |||
23 | #include <linux/if_vlan.h> | ||
24 | #include <linux/in.h> | ||
25 | #include "benet.h" | ||
26 | #include <linux/ip.h> | ||
27 | #include <linux/inet_lro.h> | ||
28 | |||
29 | /* Strings to print Link properties */ | ||
30 | static const char *link_speed[] = { | ||
31 | "Invalid link Speed Value", | ||
32 | "10 Mbps", | ||
33 | "100 Mbps", | ||
34 | "1 Gbps", | ||
35 | "10 Gbps" | ||
36 | }; | ||
37 | |||
38 | static const char *link_duplex[] = { | ||
39 | "Invalid Duplex Value", | ||
40 | "Half Duplex", | ||
41 | "Full Duplex" | ||
42 | }; | ||
43 | |||
44 | static const char *link_state[] = { | ||
45 | "", | ||
46 | "(active)" | ||
47 | }; | ||
48 | |||
49 | void be_print_link_info(struct BE_LINK_STATUS *lnk_status) | ||
50 | { | ||
51 | u16 si, di, ai; | ||
52 | |||
53 | /* Port 0 */ | ||
54 | if (lnk_status->mac0_speed && lnk_status->mac0_duplex) { | ||
55 | /* Port is up and running */ | ||
56 | si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0; | ||
57 | di = (lnk_status->mac0_duplex < 3) ? | ||
58 | lnk_status->mac0_duplex : 0; | ||
59 | ai = (lnk_status->active_port == 0) ? 1 : 0; | ||
60 | printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n", | ||
61 | link_speed[si], link_duplex[di], link_state[ai]); | ||
62 | } else | ||
63 | printk(KERN_INFO "PortNo. 0: Down\n"); | ||
64 | |||
65 | /* Port 1 */ | ||
66 | if (lnk_status->mac1_speed && lnk_status->mac1_duplex) { | ||
67 | /* Port is up and running */ | ||
68 | si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0; | ||
69 | di = (lnk_status->mac1_duplex < 3) ? | ||
70 | lnk_status->mac1_duplex : 0; | ||
71 | ai = (lnk_status->active_port == 0) ? 1 : 0; | ||
72 | printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n", | ||
73 | link_speed[si], link_duplex[di], link_state[ai]); | ||
74 | } else | ||
75 | printk(KERN_INFO "PortNo. 1: Down\n"); | ||
76 | |||
77 | return; | ||
78 | } | ||
79 | |||
80 | static int | ||
81 | be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, | ||
82 | void **ip_hdr, void **tcpudp_hdr, | ||
83 | u64 *hdr_flags, void *priv) | ||
84 | { | ||
85 | struct ethhdr *eh; | ||
86 | struct vlan_ethhdr *veh; | ||
87 | struct iphdr *iph; | ||
88 | u8 *va = page_address(frag->page) + frag->page_offset; | ||
89 | unsigned long ll_hlen; | ||
90 | |||
91 | /* find the mac header, abort if not IPv4 */ | ||
92 | |||
93 | prefetch(va); | ||
94 | eh = (struct ethhdr *)va; | ||
95 | *mac_hdr = eh; | ||
96 | ll_hlen = ETH_HLEN; | ||
97 | if (eh->h_proto != htons(ETH_P_IP)) { | ||
98 | if (eh->h_proto == htons(ETH_P_8021Q)) { | ||
99 | veh = (struct vlan_ethhdr *)va; | ||
100 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | ||
101 | return -1; | ||
102 | |||
103 | ll_hlen += VLAN_HLEN; | ||
104 | |||
105 | } else { | ||
106 | return -1; | ||
107 | } | ||
108 | } | ||
109 | *hdr_flags = LRO_IPV4; | ||
110 | |||
111 | iph = (struct iphdr *)(va + ll_hlen); | ||
112 | *ip_hdr = iph; | ||
113 | if (iph->protocol != IPPROTO_TCP) | ||
114 | return -1; | ||
115 | *hdr_flags |= LRO_TCP; | ||
116 | *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static int benet_open(struct net_device *netdev) | ||
122 | { | ||
123 | struct be_net_object *pnob = netdev_priv(netdev); | ||
124 | struct be_adapter *adapter = pnob->adapter; | ||
125 | struct net_lro_mgr *lro_mgr; | ||
126 | |||
127 | if (adapter->dev_state < BE_DEV_STATE_INIT) | ||
128 | return -EAGAIN; | ||
129 | |||
130 | lro_mgr = &pnob->lro_mgr; | ||
131 | lro_mgr->dev = netdev; | ||
132 | |||
133 | lro_mgr->features = LRO_F_NAPI; | ||
134 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | ||
135 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
136 | lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS; | ||
137 | lro_mgr->lro_arr = pnob->lro_desc; | ||
138 | lro_mgr->get_frag_header = be_get_frag_header; | ||
139 | lro_mgr->max_aggr = adapter->max_rx_coal; | ||
140 | lro_mgr->frag_align_pad = 2; | ||
141 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) | ||
142 | lro_mgr->max_aggr = MAX_SKB_FRAGS; | ||
143 | |||
144 | adapter->max_rx_coal = BE_LRO_MAX_PKTS; | ||
145 | |||
146 | be_update_link_status(adapter); | ||
147 | |||
148 | /* | ||
149 | * Set carrier on only if Physical Link up | ||
150 | * Either of the port link status up signifies this | ||
151 | */ | ||
152 | if ((adapter->port0_link_sts == BE_PORT_LINK_UP) || | ||
153 | (adapter->port1_link_sts == BE_PORT_LINK_UP)) { | ||
154 | netif_start_queue(netdev); | ||
155 | netif_carrier_on(netdev); | ||
156 | } | ||
157 | |||
158 | adapter->dev_state = BE_DEV_STATE_OPEN; | ||
159 | napi_enable(&pnob->napi); | ||
160 | be_enable_intr(pnob); | ||
161 | be_enable_eq_intr(pnob); | ||
162 | /* | ||
163 | * RX completion queue may be in dis-armed state. Arm it. | ||
164 | */ | ||
165 | be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1); | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static int benet_close(struct net_device *netdev) | ||
171 | { | ||
172 | struct be_net_object *pnob = netdev_priv(netdev); | ||
173 | struct be_adapter *adapter = pnob->adapter; | ||
174 | |||
175 | netif_stop_queue(netdev); | ||
176 | synchronize_irq(netdev->irq); | ||
177 | |||
178 | be_wait_nic_tx_cmplx_cmpl(pnob); | ||
179 | adapter->dev_state = BE_DEV_STATE_INIT; | ||
180 | netif_carrier_off(netdev); | ||
181 | |||
182 | adapter->port0_link_sts = BE_PORT_LINK_DOWN; | ||
183 | adapter->port1_link_sts = BE_PORT_LINK_DOWN; | ||
184 | be_disable_intr(pnob); | ||
185 | be_disable_eq_intr(pnob); | ||
186 | napi_disable(&pnob->napi); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Setting a Mac Address for BE | ||
193 | * Takes netdev and a void pointer as arguments. | ||
194 | * The pointer holds the new addres to be used. | ||
195 | */ | ||
196 | static int benet_set_mac_addr(struct net_device *netdev, void *p) | ||
197 | { | ||
198 | struct sockaddr *addr = p; | ||
199 | struct be_net_object *pnob = netdev_priv(netdev); | ||
200 | |||
201 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
202 | be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false, | ||
203 | netdev->dev_addr, NULL, NULL); | ||
204 | /* | ||
205 | * Since we are doing Active-Passive failover, both | ||
206 | * ports should have matching MAC addresses everytime. | ||
207 | */ | ||
208 | be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false, | ||
209 | netdev->dev_addr, NULL, NULL); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | void be_get_stats_timer_handler(unsigned long context) | ||
215 | { | ||
216 | struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context; | ||
217 | |||
218 | if (atomic_read(&ctxt->get_stat_flag)) { | ||
219 | atomic_dec(&ctxt->get_stat_flag); | ||
220 | up((void *)ctxt->get_stat_sem_addr); | ||
221 | } | ||
222 | del_timer(&ctxt->get_stats_timer); | ||
223 | return; | ||
224 | } | ||
225 | |||
226 | void be_get_stat_cb(void *context, int status, | ||
227 | struct MCC_WRB_AMAP *optional_wrb) | ||
228 | { | ||
229 | struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context; | ||
230 | /* | ||
231 | * just up the semaphore if the get_stat_flag | ||
232 | * reads 1. so that the waiter can continue. | ||
233 | * If it is 0, then it was handled by the timer handler. | ||
234 | */ | ||
235 | del_timer(&ctxt->get_stats_timer); | ||
236 | if (atomic_read(&ctxt->get_stat_flag)) { | ||
237 | atomic_dec(&ctxt->get_stat_flag); | ||
238 | up((void *)ctxt->get_stat_sem_addr); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | struct net_device_stats *benet_get_stats(struct net_device *dev) | ||
243 | { | ||
244 | struct be_net_object *pnob = netdev_priv(dev); | ||
245 | struct be_adapter *adapter = pnob->adapter; | ||
246 | u64 pa; | ||
247 | struct be_timer_ctxt *ctxt = &adapter->timer_ctxt; | ||
248 | |||
249 | if (adapter->dev_state != BE_DEV_STATE_OPEN) { | ||
250 | /* Return previously read stats */ | ||
251 | return &(adapter->benet_stats); | ||
252 | } | ||
253 | /* Get Physical Addr */ | ||
254 | pa = pci_map_single(adapter->pdev, adapter->eth_statsp, | ||
255 | sizeof(struct FWCMD_ETH_GET_STATISTICS), | ||
256 | PCI_DMA_FROMDEVICE); | ||
257 | ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem; | ||
258 | atomic_inc(&ctxt->get_stat_flag); | ||
259 | |||
260 | be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp, | ||
261 | cpu_to_le64(pa), be_get_stat_cb, ctxt, | ||
262 | NULL); | ||
263 | |||
264 | ctxt->get_stats_timer.data = (unsigned long)ctxt; | ||
265 | mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2))); | ||
266 | down((void *)ctxt->get_stat_sem_addr); /* callback will unblock us */ | ||
267 | |||
268 | /* Adding port0 and port1 stats. */ | ||
269 | adapter->benet_stats.rx_packets = | ||
270 | adapter->eth_statsp->params.response.p0recvdtotalframes + | ||
271 | adapter->eth_statsp->params.response.p1recvdtotalframes; | ||
272 | adapter->benet_stats.tx_packets = | ||
273 | adapter->eth_statsp->params.response.p0xmitunicastframes + | ||
274 | adapter->eth_statsp->params.response.p1xmitunicastframes; | ||
275 | adapter->benet_stats.tx_bytes = | ||
276 | adapter->eth_statsp->params.response.p0xmitbyteslsd + | ||
277 | adapter->eth_statsp->params.response.p1xmitbyteslsd; | ||
278 | adapter->benet_stats.rx_errors = | ||
279 | adapter->eth_statsp->params.response.p0crcerrors + | ||
280 | adapter->eth_statsp->params.response.p1crcerrors; | ||
281 | adapter->benet_stats.rx_errors += | ||
282 | adapter->eth_statsp->params.response.p0alignmentsymerrs + | ||
283 | adapter->eth_statsp->params.response.p1alignmentsymerrs; | ||
284 | adapter->benet_stats.rx_errors += | ||
285 | adapter->eth_statsp->params.response.p0inrangelenerrors + | ||
286 | adapter->eth_statsp->params.response.p1inrangelenerrors; | ||
287 | adapter->benet_stats.rx_bytes = | ||
288 | adapter->eth_statsp->params.response.p0recvdtotalbytesLSD + | ||
289 | adapter->eth_statsp->params.response.p1recvdtotalbytesLSD; | ||
290 | adapter->benet_stats.rx_crc_errors = | ||
291 | adapter->eth_statsp->params.response.p0crcerrors + | ||
292 | adapter->eth_statsp->params.response.p1crcerrors; | ||
293 | |||
294 | adapter->benet_stats.tx_packets += | ||
295 | adapter->eth_statsp->params.response.p0xmitmulticastframes + | ||
296 | adapter->eth_statsp->params.response.p1xmitmulticastframes; | ||
297 | adapter->benet_stats.tx_packets += | ||
298 | adapter->eth_statsp->params.response.p0xmitbroadcastframes + | ||
299 | adapter->eth_statsp->params.response.p1xmitbroadcastframes; | ||
300 | adapter->benet_stats.tx_errors = 0; | ||
301 | |||
302 | adapter->benet_stats.multicast = | ||
303 | adapter->eth_statsp->params.response.p0xmitmulticastframes + | ||
304 | adapter->eth_statsp->params.response.p1xmitmulticastframes; | ||
305 | |||
306 | adapter->benet_stats.rx_fifo_errors = | ||
307 | adapter->eth_statsp->params.response.p0rxfifooverflowdropped + | ||
308 | adapter->eth_statsp->params.response.p1rxfifooverflowdropped; | ||
309 | adapter->benet_stats.rx_frame_errors = | ||
310 | adapter->eth_statsp->params.response.p0alignmentsymerrs + | ||
311 | adapter->eth_statsp->params.response.p1alignmentsymerrs; | ||
312 | adapter->benet_stats.rx_length_errors = | ||
313 | adapter->eth_statsp->params.response.p0inrangelenerrors + | ||
314 | adapter->eth_statsp->params.response.p1inrangelenerrors; | ||
315 | adapter->benet_stats.rx_length_errors += | ||
316 | adapter->eth_statsp->params.response.p0outrangeerrors + | ||
317 | adapter->eth_statsp->params.response.p1outrangeerrors; | ||
318 | adapter->benet_stats.rx_length_errors += | ||
319 | adapter->eth_statsp->params.response.p0frametoolongerrors + | ||
320 | adapter->eth_statsp->params.response.p1frametoolongerrors; | ||
321 | |||
322 | pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp, | ||
323 | sizeof(struct FWCMD_ETH_GET_STATISTICS), | ||
324 | PCI_DMA_FROMDEVICE); | ||
325 | return &(adapter->benet_stats); | ||
326 | |||
327 | } | ||
328 | |||
329 | static void be_start_tx(struct be_net_object *pnob, u32 nposted) | ||
330 | { | ||
331 | #define CSR_ETH_MAX_SQPOSTS 255 | ||
332 | struct SQ_DB_AMAP sqdb; | ||
333 | |||
334 | sqdb.dw[0] = 0; | ||
335 | |||
336 | AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id); | ||
337 | while (nposted) { | ||
338 | if (nposted > CSR_ETH_MAX_SQPOSTS) { | ||
339 | AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, | ||
340 | CSR_ETH_MAX_SQPOSTS); | ||
341 | nposted -= CSR_ETH_MAX_SQPOSTS; | ||
342 | } else { | ||
343 | AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted); | ||
344 | nposted = 0; | ||
345 | } | ||
346 | PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]); | ||
347 | } | ||
348 | |||
349 | return; | ||
350 | } | ||
351 | |||
352 | static void update_tx_rate(struct be_adapter *adapter) | ||
353 | { | ||
354 | /* update the rate once in two seconds */ | ||
355 | if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) { | ||
356 | u32 r; | ||
357 | r = adapter->eth_tx_bytes / | ||
358 | ((jiffies - adapter->eth_tx_jiffies) / (HZ)); | ||
359 | r = (r / 1000000); /* M bytes/s */ | ||
360 | adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */ | ||
361 | adapter->eth_tx_jiffies = jiffies; | ||
362 | adapter->eth_tx_bytes = 0; | ||
363 | } | ||
364 | } | ||
365 | |||
366 | static int wrb_cnt_in_skb(struct sk_buff *skb) | ||
367 | { | ||
368 | int cnt = 0; | ||
369 | while (skb) { | ||
370 | if (skb->len > skb->data_len) | ||
371 | cnt++; | ||
372 | cnt += skb_shinfo(skb)->nr_frags; | ||
373 | skb = skb_shinfo(skb)->frag_list; | ||
374 | } | ||
375 | BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); | ||
376 | return cnt; | ||
377 | } | ||
378 | |||
379 | static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len) | ||
380 | { | ||
381 | AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32); | ||
382 | AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF); | ||
383 | AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len); | ||
384 | } | ||
385 | |||
386 | static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb, | ||
387 | struct be_net_object *pnob) | ||
388 | { | ||
389 | wrb->dw[2] = 0; | ||
390 | wrb->dw[3] = 0; | ||
391 | AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1); | ||
392 | if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { | ||
393 | AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1); | ||
394 | AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb, | ||
395 | skb_shinfo(skb)->gso_size); | ||
396 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
397 | u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol; | ||
398 | if (proto == IPPROTO_TCP) | ||
399 | AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1); | ||
400 | else if (proto == IPPROTO_UDP) | ||
401 | AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1); | ||
402 | } | ||
403 | if (pnob->vlan_grp && vlan_tx_tag_present(skb)) { | ||
404 | AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1); | ||
405 | AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb)); | ||
406 | } | ||
407 | } | ||
408 | |||
409 | static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to, | ||
410 | struct ETH_WRB_AMAP *from) | ||
411 | { | ||
412 | |||
413 | to->dw[2] = from->dw[2]; | ||
414 | to->dw[3] = from->dw[3]; | ||
415 | } | ||
416 | |||
417 | /* Returns the actual count of wrbs used including a possible dummy */ | ||
418 | static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb, | ||
419 | u32 wrb_cnt, u32 *copied) | ||
420 | { | ||
421 | u64 busaddr; | ||
422 | struct ETH_WRB_AMAP *wrb = NULL, *first = NULL; | ||
423 | u32 i; | ||
424 | bool dummy = true; | ||
425 | struct pci_dev *pdev = pnob->adapter->pdev; | ||
426 | |||
427 | if (wrb_cnt & 1) | ||
428 | wrb_cnt++; | ||
429 | else | ||
430 | dummy = false; | ||
431 | |||
432 | atomic_add(wrb_cnt, &pnob->tx_q_used); | ||
433 | |||
434 | while (skb) { | ||
435 | if (skb->len > skb->data_len) { | ||
436 | int len = skb->len - skb->data_len; | ||
437 | busaddr = pci_map_single(pdev, skb->data, len, | ||
438 | PCI_DMA_TODEVICE); | ||
439 | busaddr = cpu_to_le64(busaddr); | ||
440 | wrb = &pnob->tx_q[pnob->tx_q_hd]; | ||
441 | if (first == NULL) { | ||
442 | wrb_fill_extra(wrb, skb, pnob); | ||
443 | first = wrb; | ||
444 | } else { | ||
445 | wrb_copy_extra(wrb, first); | ||
446 | } | ||
447 | wrb_fill(wrb, busaddr, len); | ||
448 | be_adv_txq_hd(pnob); | ||
449 | *copied += len; | ||
450 | } | ||
451 | |||
452 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
453 | struct skb_frag_struct *frag = | ||
454 | &skb_shinfo(skb)->frags[i]; | ||
455 | busaddr = pci_map_page(pdev, frag->page, | ||
456 | frag->page_offset, frag->size, | ||
457 | PCI_DMA_TODEVICE); | ||
458 | busaddr = cpu_to_le64(busaddr); | ||
459 | wrb = &pnob->tx_q[pnob->tx_q_hd]; | ||
460 | if (first == NULL) { | ||
461 | wrb_fill_extra(wrb, skb, pnob); | ||
462 | first = wrb; | ||
463 | } else { | ||
464 | wrb_copy_extra(wrb, first); | ||
465 | } | ||
466 | wrb_fill(wrb, busaddr, frag->size); | ||
467 | be_adv_txq_hd(pnob); | ||
468 | *copied += frag->size; | ||
469 | } | ||
470 | skb = skb_shinfo(skb)->frag_list; | ||
471 | } | ||
472 | |||
473 | if (dummy) { | ||
474 | wrb = &pnob->tx_q[pnob->tx_q_hd]; | ||
475 | BUG_ON(first == NULL); | ||
476 | wrb_copy_extra(wrb, first); | ||
477 | wrb_fill(wrb, 0, 0); | ||
478 | be_adv_txq_hd(pnob); | ||
479 | } | ||
480 | AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1); | ||
481 | AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1); | ||
482 | return wrb_cnt; | ||
483 | } | ||
484 | |||
485 | /* For each skb transmitted, tx_ctxt stores the num of wrbs in the | ||
486 | * start index and skb pointer in the end index | ||
487 | */ | ||
488 | static inline void be_tx_wrb_info_remember(struct be_net_object *pnob, | ||
489 | struct sk_buff *skb, int wrb_cnt, | ||
490 | u32 start) | ||
491 | { | ||
492 | *(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt; | ||
493 | index_adv(&start, wrb_cnt - 1, pnob->tx_q_len); | ||
494 | pnob->tx_ctxt[start] = skb; | ||
495 | } | ||
496 | |||
497 | static int benet_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
498 | { | ||
499 | struct be_net_object *pnob = netdev_priv(netdev); | ||
500 | struct be_adapter *adapter = pnob->adapter; | ||
501 | u32 wrb_cnt, copied = 0; | ||
502 | u32 start = pnob->tx_q_hd; | ||
503 | |||
504 | adapter->be_stat.bes_tx_reqs++; | ||
505 | |||
506 | wrb_cnt = wrb_cnt_in_skb(skb); | ||
507 | spin_lock_bh(&adapter->txq_lock); | ||
508 | if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) { | ||
509 | netif_stop_queue(pnob->netdev); | ||
510 | spin_unlock_bh(&adapter->txq_lock); | ||
511 | adapter->be_stat.bes_tx_fails++; | ||
512 | return NETDEV_TX_BUSY; | ||
513 | } | ||
514 | spin_unlock_bh(&adapter->txq_lock); | ||
515 | |||
516 | wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied); | ||
517 | be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start); | ||
518 | |||
519 | be_start_tx(pnob, wrb_cnt); | ||
520 | |||
521 | adapter->eth_tx_bytes += copied; | ||
522 | adapter->be_stat.bes_tx_wrbs += wrb_cnt; | ||
523 | update_tx_rate(adapter); | ||
524 | netdev->trans_start = jiffies; | ||
525 | |||
526 | return NETDEV_TX_OK; | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * This is the driver entry point to change the mtu of the device | ||
531 | * Returns 0 for success and errno for failure. | ||
532 | */ | ||
533 | static int benet_change_mtu(struct net_device *netdev, int new_mtu) | ||
534 | { | ||
535 | /* | ||
536 | * BE supports jumbo frame size upto 9000 bytes including the link layer | ||
537 | * header. Considering the different variants of frame formats possible | ||
538 | * like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes | ||
539 | */ | ||
540 | |||
541 | if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) { | ||
542 | dev_info(&netdev->dev, "Invalid MTU requested. " | ||
543 | "Must be between %d and %d bytes\n", | ||
544 | (ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU); | ||
545 | return -EINVAL; | ||
546 | } | ||
547 | dev_info(&netdev->dev, "MTU changed from %d to %d\n", | ||
548 | netdev->mtu, new_mtu); | ||
549 | netdev->mtu = new_mtu; | ||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | /* | ||
554 | * This is the driver entry point to register a vlan with the device | ||
555 | */ | ||
556 | static void benet_vlan_register(struct net_device *netdev, | ||
557 | struct vlan_group *grp) | ||
558 | { | ||
559 | struct be_net_object *pnob = netdev_priv(netdev); | ||
560 | |||
561 | be_disable_eq_intr(pnob); | ||
562 | pnob->vlan_grp = grp; | ||
563 | pnob->num_vlans = 0; | ||
564 | be_enable_eq_intr(pnob); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * This is the driver entry point to add a vlan vlan_id | ||
569 | * with the device netdev | ||
570 | */ | ||
571 | static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id) | ||
572 | { | ||
573 | struct be_net_object *pnob = netdev_priv(netdev); | ||
574 | |||
575 | if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) { | ||
576 | /* no way to return an error */ | ||
577 | dev_info(&netdev->dev, | ||
578 | "BladeEngine: Cannot configure more than %d Vlans\n", | ||
579 | BE_NUM_VLAN_SUPPORTED); | ||
580 | return; | ||
581 | } | ||
582 | /* The new vlan tag will be in the slot indicated by num_vlans. */ | ||
583 | pnob->vlan_tag[pnob->num_vlans++] = vlan_id; | ||
584 | be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans, | ||
585 | pnob->vlan_tag, NULL, NULL, NULL); | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * This is the driver entry point to remove a vlan vlan_id | ||
590 | * with the device netdev | ||
591 | */ | ||
592 | static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id) | ||
593 | { | ||
594 | struct be_net_object *pnob = netdev_priv(netdev); | ||
595 | |||
596 | u32 i, value; | ||
597 | |||
598 | /* | ||
599 | * In Blade Engine, we support 32 vlan tag filters across both ports. | ||
600 | * To program a vlan tag, the RXF_RTPR_CSR register is used. | ||
601 | * Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries. | ||
602 | * The Vlan table is of depth 16. thus we support 32 tags. | ||
603 | */ | ||
604 | |||
605 | value = vlan_id | VLAN_VALID_BIT; | ||
606 | for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) { | ||
607 | if (pnob->vlan_tag[i] == vlan_id) | ||
608 | break; | ||
609 | } | ||
610 | |||
611 | if (i == BE_NUM_VLAN_SUPPORTED) | ||
612 | return; | ||
613 | /* Now compact the vlan tag array by removing hole created. */ | ||
614 | while ((i + 1) < BE_NUM_VLAN_SUPPORTED) { | ||
615 | pnob->vlan_tag[i] = pnob->vlan_tag[i + 1]; | ||
616 | i++; | ||
617 | } | ||
618 | if ((i + 1) == BE_NUM_VLAN_SUPPORTED) | ||
619 | pnob->vlan_tag[i] = (u16) 0x0; | ||
620 | pnob->num_vlans--; | ||
621 | be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans, | ||
622 | pnob->vlan_tag, NULL, NULL, NULL); | ||
623 | } | ||
624 | |||
625 | /* | ||
626 | * This function is called to program multicast | ||
627 | * address in the multicast filter of the ASIC. | ||
628 | */ | ||
629 | static void be_set_multicast_filter(struct net_device *netdev) | ||
630 | { | ||
631 | struct be_net_object *pnob = netdev_priv(netdev); | ||
632 | struct dev_mc_list *mc_ptr; | ||
633 | u8 mac_addr[32][ETH_ALEN]; | ||
634 | int i; | ||
635 | |||
636 | if (netdev->flags & IFF_ALLMULTI) { | ||
637 | /* set BE in Multicast promiscuous */ | ||
638 | be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL, | ||
639 | NULL, NULL); | ||
640 | return; | ||
641 | } | ||
642 | |||
643 | for (mc_ptr = netdev->mc_list, i = 0; mc_ptr; | ||
644 | mc_ptr = mc_ptr->next, i++) { | ||
645 | memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); | ||
646 | } | ||
647 | |||
648 | /* reset the promiscuous mode also. */ | ||
649 | be_rxf_multicast_config(&pnob->fn_obj, false, i, | ||
650 | &mac_addr[0][0], NULL, NULL, NULL); | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * This is the driver entry point to set multicast list | ||
655 | * with the device netdev. This function will be used to | ||
656 | * set promiscuous mode or multicast promiscuous mode | ||
657 | * or multicast mode.... | ||
658 | */ | ||
659 | static void benet_set_multicast_list(struct net_device *netdev) | ||
660 | { | ||
661 | struct be_net_object *pnob = netdev_priv(netdev); | ||
662 | |||
663 | if (netdev->flags & IFF_PROMISC) { | ||
664 | be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL); | ||
665 | } else { | ||
666 | be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL); | ||
667 | be_set_multicast_filter(netdev); | ||
668 | } | ||
669 | } | ||
670 | |||
671 | int benet_init(struct net_device *netdev) | ||
672 | { | ||
673 | struct be_net_object *pnob = netdev_priv(netdev); | ||
674 | struct be_adapter *adapter = pnob->adapter; | ||
675 | |||
676 | ether_setup(netdev); | ||
677 | |||
678 | netdev->open = &benet_open; | ||
679 | netdev->stop = &benet_close; | ||
680 | netdev->hard_start_xmit = &benet_xmit; | ||
681 | |||
682 | netdev->get_stats = &benet_get_stats; | ||
683 | |||
684 | netdev->set_multicast_list = &benet_set_multicast_list; | ||
685 | |||
686 | netdev->change_mtu = &benet_change_mtu; | ||
687 | netdev->set_mac_address = &benet_set_mac_addr; | ||
688 | |||
689 | netdev->vlan_rx_register = benet_vlan_register; | ||
690 | netdev->vlan_rx_add_vid = benet_vlan_add_vid; | ||
691 | netdev->vlan_rx_kill_vid = benet_vlan_rem_vid; | ||
692 | |||
693 | netdev->features = | ||
694 | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | ||
695 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM; | ||
696 | |||
697 | netdev->flags |= IFF_MULTICAST; | ||
698 | |||
699 | /* If device is DAC Capable, set the HIGHDMA flag for netdevice. */ | ||
700 | if (adapter->dma_64bit_cap) | ||
701 | netdev->features |= NETIF_F_HIGHDMA; | ||
702 | |||
703 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); | ||
704 | return 0; | ||
705 | } | ||
diff --git a/drivers/staging/benet/benet.h b/drivers/staging/benet/benet.h deleted file mode 100644 index 09a1f0817722..000000000000 --- a/drivers/staging/benet/benet.h +++ /dev/null | |||
@@ -1,429 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #ifndef _BENET_H_ | ||
18 | #define _BENET_H_ | ||
19 | |||
20 | #include <linux/pci.h> | ||
21 | #include <linux/netdevice.h> | ||
22 | #include <linux/inet_lro.h> | ||
23 | #include "hwlib.h" | ||
24 | |||
25 | #define _SA_MODULE_NAME "net-driver" | ||
26 | |||
27 | #define VLAN_VALID_BIT 0x8000 | ||
28 | #define BE_NUM_VLAN_SUPPORTED 32 | ||
29 | #define BE_PORT_LINK_DOWN 0000 | ||
30 | #define BE_PORT_LINK_UP 0001 | ||
31 | #define BE_MAX_TX_FRAG_COUNT (30) | ||
32 | |||
33 | /* Flag bits for send operation */ | ||
34 | #define IPCS (1 << 0) /* Enable IP checksum offload */ | ||
35 | #define UDPCS (1 << 1) /* Enable UDP checksum offload */ | ||
36 | #define TCPCS (1 << 2) /* Enable TCP checksum offload */ | ||
37 | #define LSO (1 << 3) /* Enable Large Segment offload */ | ||
38 | #define ETHVLAN (1 << 4) /* Enable VLAN insert */ | ||
39 | #define ETHEVENT (1 << 5) /* Generate event on completion */ | ||
40 | #define ETHCOMPLETE (1 << 6) /* Generate completion when done */ | ||
41 | #define IPSEC (1 << 7) /* Enable IPSEC */ | ||
42 | #define FORWARD (1 << 8) /* Send the packet in forwarding path */ | ||
43 | #define FIN (1 << 9) /* Issue FIN segment */ | ||
44 | |||
45 | #define BE_MAX_MTU 8974 | ||
46 | |||
47 | #define BE_MAX_LRO_DESCRIPTORS 8 | ||
48 | #define BE_LRO_MAX_PKTS 64 | ||
49 | #define BE_MAX_FRAGS_PER_FRAME 6 | ||
50 | |||
51 | extern const char be_drvr_ver[]; | ||
52 | extern char be_fw_ver[]; | ||
53 | extern char be_driver_name[]; | ||
54 | |||
55 | extern struct ethtool_ops be_ethtool_ops; | ||
56 | |||
57 | #define BE_DEV_STATE_NONE 0 | ||
58 | #define BE_DEV_STATE_INIT 1 | ||
59 | #define BE_DEV_STATE_OPEN 2 | ||
60 | #define BE_DEV_STATE_SUSPEND 3 | ||
61 | |||
62 | /* This structure is used to describe physical fragments to use | ||
63 | * for DMAing data from NIC. | ||
64 | */ | ||
65 | struct be_recv_buffer { | ||
66 | struct list_head rxb_list; /* for maintaining a linked list */ | ||
67 | void *rxb_va; /* buffer virtual address */ | ||
68 | u32 rxb_pa_lo; /* low part of physical address */ | ||
69 | u32 rxb_pa_hi; /* high part of physical address */ | ||
70 | u32 rxb_len; /* length of recv buffer */ | ||
71 | void *rxb_ctxt; /* context for OSM driver to use */ | ||
72 | }; | ||
73 | |||
74 | /* | ||
75 | * fragment list to describe scattered data. | ||
76 | */ | ||
77 | struct be_tx_frag_list { | ||
78 | u32 txb_len; /* Size of this fragment */ | ||
79 | u32 txb_pa_lo; /* Lower 32 bits of 64 bit physical addr */ | ||
80 | u32 txb_pa_hi; /* Higher 32 bits of 64 bit physical addr */ | ||
81 | }; | ||
82 | |||
83 | struct be_rx_page_info { | ||
84 | struct page *page; | ||
85 | dma_addr_t bus; | ||
86 | u16 page_offset; | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * This structure is the main tracking structure for a NIC interface. | ||
91 | */ | ||
92 | struct be_net_object { | ||
93 | /* MCC Ring - used to send fwcmds to embedded ARM processor */ | ||
94 | struct MCC_WRB_AMAP *mcc_q; /* VA of the start of the ring */ | ||
95 | u32 mcc_q_len; /* # of WRB entries in this ring */ | ||
96 | u32 mcc_q_size; | ||
97 | u32 mcc_q_hd; /* MCC ring head */ | ||
98 | u8 mcc_q_created; /* flag to help cleanup */ | ||
99 | struct be_mcc_object mcc_q_obj; /* BECLIB's MCC ring Object */ | ||
100 | dma_addr_t mcc_q_bus; /* DMA'ble bus address */ | ||
101 | |||
102 | /* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */ | ||
103 | struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */ | ||
104 | u32 mcc_cq_len; /* # of compl. entries in this ring */ | ||
105 | u32 mcc_cq_size; | ||
106 | u32 mcc_cq_tl; /* compl. ring tail */ | ||
107 | u8 mcc_cq_created; /* flag to help cleanup */ | ||
108 | struct be_cq_object mcc_cq_obj; /* BECLIB's MCC compl. ring object */ | ||
109 | u32 mcc_cq_id; /* MCC ring ID */ | ||
110 | dma_addr_t mcc_cq_bus; /* DMA'ble bus address */ | ||
111 | |||
112 | struct ring_desc mb_rd; /* RD for MCC_MAIL_BOX */ | ||
113 | void *mb_ptr; /* mailbox ptr to be freed */ | ||
114 | dma_addr_t mb_bus; /* DMA'ble bus address */ | ||
115 | u32 mb_size; | ||
116 | |||
117 | /* BEClib uses an array of context objects to track outstanding | ||
118 | * requests to the MCC. We need allocate the same number of | ||
119 | * conext entries as the number of entries in the MCC WRB ring | ||
120 | */ | ||
121 | u32 mcc_wrb_ctxt_size; | ||
122 | void *mcc_wrb_ctxt; /* pointer to the context area */ | ||
123 | u32 mcc_wrb_ctxtLen; /* Number of entries in the context */ | ||
124 | /* | ||
125 | * NIC send request ring - used for xmitting raw ether frames. | ||
126 | */ | ||
127 | struct ETH_WRB_AMAP *tx_q; /* VA of the start of the ring */ | ||
128 | u32 tx_q_len; /* # if entries in the send ring */ | ||
129 | u32 tx_q_size; | ||
130 | u32 tx_q_hd; /* Head index. Next req. goes here */ | ||
131 | u32 tx_q_tl; /* Tail indx. oldest outstanding req. */ | ||
132 | u8 tx_q_created; /* flag to help cleanup */ | ||
133 | struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */ | ||
134 | dma_addr_t tx_q_bus; /* DMA'ble bus address */ | ||
135 | u32 tx_q_id; /* send queue ring ID */ | ||
136 | u32 tx_q_port; /* 0 no binding, 1 port A, 2 port B */ | ||
137 | atomic_t tx_q_used; /* # of WRBs used */ | ||
138 | /* ptr to an array in which we store context info for each send req. */ | ||
139 | void **tx_ctxt; | ||
140 | /* | ||
141 | * NIC Send compl. ring - completion status for all NIC frames xmitted. | ||
142 | */ | ||
143 | struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */ | ||
144 | u32 txcq_len; /* # of entries in the ring */ | ||
145 | u32 tx_cq_size; | ||
146 | /* | ||
147 | * index into compl ring where the host expects next completion entry | ||
148 | */ | ||
149 | u32 tx_cq_tl; | ||
150 | u32 tx_cq_id; /* completion queue id */ | ||
151 | u8 tx_cq_created; /* flag to help cleanup */ | ||
152 | struct be_cq_object tx_cq_obj; | ||
153 | dma_addr_t tx_cq_bus; /* DMA'ble bus address */ | ||
154 | /* | ||
155 | * Event Queue - all completion entries post events here. | ||
156 | */ | ||
157 | struct EQ_ENTRY_AMAP *event_q; /* VA of start of event queue */ | ||
158 | u32 event_q_len; /* # of entries */ | ||
159 | u32 event_q_size; | ||
160 | u32 event_q_tl; /* Tail of the event queue */ | ||
161 | u32 event_q_id; /* Event queue ID */ | ||
162 | u8 event_q_created; /* flag to help cleanup */ | ||
163 | struct be_eq_object event_q_obj; /* Queue handle */ | ||
164 | dma_addr_t event_q_bus; /* DMA'ble bus address */ | ||
165 | /* | ||
166 | * NIC receive queue - Data buffers to be used for receiving unicast, | ||
167 | * broadcast and multi-cast frames are posted here. | ||
168 | */ | ||
169 | struct ETH_RX_D_AMAP *rx_q; /* VA of start of the queue */ | ||
170 | u32 rx_q_len; /* # of entries */ | ||
171 | u32 rx_q_size; | ||
172 | u32 rx_q_hd; /* Head of the queue */ | ||
173 | atomic_t rx_q_posted; /* number of posted buffers */ | ||
174 | u32 rx_q_id; /* queue ID */ | ||
175 | u8 rx_q_created; /* flag to help cleanup */ | ||
176 | struct be_ethrq_object rx_q_obj; /* NIC RX queue handle */ | ||
177 | dma_addr_t rx_q_bus; /* DMA'ble bus address */ | ||
178 | /* | ||
179 | * Pointer to an array of opaque context object for use by OSM driver | ||
180 | */ | ||
181 | void **rx_ctxt; | ||
182 | /* | ||
183 | * NIC unicast RX completion queue - all unicast ether frame completion | ||
184 | * statuses from BE come here. | ||
185 | */ | ||
186 | struct ETH_RX_COMPL_AMAP *rx_cq; /* VA of start of the queue */ | ||
187 | u32 rx_cq_len; /* # of entries */ | ||
188 | u32 rx_cq_size; | ||
189 | u32 rx_cq_tl; /* Tail of the queue */ | ||
190 | u32 rx_cq_id; /* queue ID */ | ||
191 | u8 rx_cq_created; /* flag to help cleanup */ | ||
192 | struct be_cq_object rx_cq_obj; /* queue handle */ | ||
193 | dma_addr_t rx_cq_bus; /* DMA'ble bus address */ | ||
194 | struct be_function_object fn_obj; /* function object */ | ||
195 | bool fn_obj_created; | ||
196 | u32 rx_buf_size; /* Size of the RX buffers */ | ||
197 | |||
198 | struct net_device *netdev; | ||
199 | struct be_recv_buffer eth_rx_bufs[256]; /* to pass Rx buffer | ||
200 | addresses */ | ||
201 | struct be_adapter *adapter; /* Pointer to OSM adapter */ | ||
202 | u32 devno; /* OSM, network dev no. */ | ||
203 | u32 use_port; /* Current active port */ | ||
204 | struct be_rx_page_info *rx_page_info; /* Array of Rx buf pages */ | ||
205 | u32 rx_pg_info_hd; /* Head of queue */ | ||
206 | int rxbuf_post_fail; /* RxBuff posting fail count */ | ||
207 | bool rx_pg_shared; /* Is an allocsted page shared as two frags ? */ | ||
208 | struct vlan_group *vlan_grp; | ||
209 | u32 num_vlans; /* Number of vlans in BE's filter */ | ||
210 | u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */ | ||
211 | struct napi_struct napi; | ||
212 | struct net_lro_mgr lro_mgr; | ||
213 | struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS]; | ||
214 | }; | ||
215 | |||
216 | #define NET_FH(np) (&(np)->fn_obj) | ||
217 | |||
218 | /* | ||
219 | * BE driver statistics. | ||
220 | */ | ||
221 | struct be_drvr_stat { | ||
222 | u32 bes_tx_reqs; /* number of TX requests initiated */ | ||
223 | u32 bes_tx_fails; /* number of TX requests that failed */ | ||
224 | u32 bes_fwd_reqs; /* number of send reqs through forwarding i/f */ | ||
225 | u32 bes_tx_wrbs; /* number of tx WRBs used */ | ||
226 | |||
227 | u32 bes_ints; /* number of interrupts */ | ||
228 | u32 bes_polls; /* number of times NAPI called poll function */ | ||
229 | u32 bes_events; /* total evet entries processed */ | ||
230 | u32 bes_tx_events; /* number of tx completion events */ | ||
231 | u32 bes_rx_events; /* number of ucast rx completion events */ | ||
232 | u32 bes_tx_compl; /* number of tx completion entries processed */ | ||
233 | u32 bes_rx_compl; /* number of rx completion entries | ||
234 | processed */ | ||
235 | u32 bes_ethrx_post_fail; /* number of ethrx buffer alloc | ||
236 | failures */ | ||
237 | /* | ||
238 | * number of non ether type II frames dropped where | ||
239 | * frame len > length field of Mac Hdr | ||
240 | */ | ||
241 | u32 bes_802_3_dropped_frames; | ||
242 | /* | ||
243 | * number of non ether type II frames malformed where | ||
244 | * in frame len < length field of Mac Hdr | ||
245 | */ | ||
246 | u32 bes_802_3_malformed_frames; | ||
247 | u32 bes_ips; /* interrupts / sec */ | ||
248 | u32 bes_prev_ints; /* bes_ints at last IPS calculation */ | ||
249 | u16 bes_eth_tx_rate; /* ETH TX rate - Mb/sec */ | ||
250 | u16 bes_eth_rx_rate; /* ETH RX rate - Mb/sec */ | ||
251 | u32 bes_rx_coal; /* Num pkts coalasced */ | ||
252 | u32 bes_rx_flush; /* Num times coalasced */ | ||
253 | u32 bes_link_change_physical; /*Num of times physical link changed */ | ||
254 | u32 bes_link_change_virtual; /*Num of times virtual link changed */ | ||
255 | u32 bes_rx_misc_pkts; /* Misc pkts received */ | ||
256 | }; | ||
257 | |||
258 | /* Maximum interrupt delay (in microseconds) allowed */ | ||
259 | #define MAX_EQD 120 | ||
260 | |||
261 | /* | ||
262 | * timer to prevent system shutdown hang for ever if h/w stops responding | ||
263 | */ | ||
264 | struct be_timer_ctxt { | ||
265 | atomic_t get_stat_flag; | ||
266 | struct timer_list get_stats_timer; | ||
267 | unsigned long get_stat_sem_addr; | ||
268 | } ; | ||
269 | |||
270 | /* This structure is the main BladeEngine driver context. */ | ||
271 | struct be_adapter { | ||
272 | struct net_device *netdevp; | ||
273 | struct be_drvr_stat be_stat; | ||
274 | struct net_device_stats benet_stats; | ||
275 | |||
276 | /* PCI BAR mapped addresses */ | ||
277 | u8 __iomem *csr_va; /* CSR */ | ||
278 | u8 __iomem *db_va; /* Door Bell */ | ||
279 | u8 __iomem *pci_va; /* PCI Config */ | ||
280 | |||
281 | struct tasklet_struct sts_handler; | ||
282 | struct timer_list cq_timer; | ||
283 | spinlock_t int_lock; /* to protect the isr field in adapter */ | ||
284 | |||
285 | struct FWCMD_ETH_GET_STATISTICS *eth_statsp; | ||
286 | /* | ||
287 | * This will enable the use of ethtool to enable or disable | ||
288 | * Checksum on Rx pkts to be obeyed or disobeyed. | ||
289 | * If this is true = 1, then whatever is the checksum on the | ||
290 | * Received pkt as per BE, it will be given to the stack. | ||
291 | * Else the stack will re calculate it. | ||
292 | */ | ||
293 | bool rx_csum; | ||
294 | /* | ||
295 | * This will enable the use of ethtool to enable or disable | ||
296 | * Coalese on Rx pkts to be obeyed or disobeyed. | ||
297 | * If this is grater than 0 and less than 16 then coalascing | ||
298 | * is enabled else it is disabled | ||
299 | */ | ||
300 | u32 max_rx_coal; | ||
301 | struct pci_dev *pdev; /* Pointer to OS's PCI dvice */ | ||
302 | |||
303 | spinlock_t txq_lock; /* to stop/wake queue based on tx_q_used */ | ||
304 | |||
305 | u32 isr; /* copy of Intr status reg. */ | ||
306 | |||
307 | u32 port0_link_sts; /* Port 0 link status */ | ||
308 | u32 port1_link_sts; /* port 1 list status */ | ||
309 | struct BE_LINK_STATUS *be_link_sts; | ||
310 | |||
311 | /* pointer to the first netobject of this adapter */ | ||
312 | struct be_net_object *net_obj; | ||
313 | |||
314 | /* Flags to indicate what to clean up */ | ||
315 | bool tasklet_started; | ||
316 | bool isr_registered; | ||
317 | /* | ||
318 | * adaptive interrupt coalescing (AIC) related | ||
319 | */ | ||
320 | bool enable_aic; /* 1 if AIC is enabled */ | ||
321 | u16 min_eqd; /* minimum EQ delay in usec */ | ||
322 | u16 max_eqd; /* minimum EQ delay in usec */ | ||
323 | u16 cur_eqd; /* current EQ delay in usec */ | ||
324 | /* | ||
325 | * book keeping for interrupt / sec and TX/RX rate calculation | ||
326 | */ | ||
327 | ulong ips_jiffies; /* jiffies at last IPS calc */ | ||
328 | u32 eth_tx_bytes; | ||
329 | ulong eth_tx_jiffies; | ||
330 | u32 eth_rx_bytes; | ||
331 | ulong eth_rx_jiffies; | ||
332 | |||
333 | struct semaphore get_eth_stat_sem; | ||
334 | |||
335 | /* timer ctxt to prevent shutdown hanging due to un-responsive BE */ | ||
336 | struct be_timer_ctxt timer_ctxt; | ||
337 | |||
338 | #define BE_MAX_MSIX_VECTORS 32 | ||
339 | #define BE_MAX_REQ_MSIX_VECTORS 1 /* only one EQ in Linux driver */ | ||
340 | struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS]; | ||
341 | bool msix_enabled; | ||
342 | bool dma_64bit_cap; /* the Device DAC capable or not */ | ||
343 | u8 dev_state; /* The current state of the device */ | ||
344 | u8 dev_pm_state; /* The State of device before going to suspend */ | ||
345 | }; | ||
346 | |||
347 | /* | ||
348 | * Every second we look at the ints/sec and adjust eq_delay | ||
349 | * between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between | ||
350 | * IPS_HI_WM and IPS_LO_WM. | ||
351 | */ | ||
352 | #define IPS_HI_WM 18000 | ||
353 | #define IPS_LO_WM 8000 | ||
354 | |||
355 | |||
356 | static inline void index_adv(u32 *index, u32 val, u32 limit) | ||
357 | { | ||
358 | BUG_ON(limit & (limit-1)); | ||
359 | *index = (*index + val) & (limit - 1); | ||
360 | } | ||
361 | |||
362 | static inline void index_inc(u32 *index, u32 limit) | ||
363 | { | ||
364 | BUG_ON(limit & (limit-1)); | ||
365 | *index = (*index + 1) & (limit - 1); | ||
366 | } | ||
367 | |||
368 | static inline void be_adv_eq_tl(struct be_net_object *pnob) | ||
369 | { | ||
370 | index_inc(&pnob->event_q_tl, pnob->event_q_len); | ||
371 | } | ||
372 | |||
373 | static inline void be_adv_txq_hd(struct be_net_object *pnob) | ||
374 | { | ||
375 | index_inc(&pnob->tx_q_hd, pnob->tx_q_len); | ||
376 | } | ||
377 | |||
378 | static inline void be_adv_txq_tl(struct be_net_object *pnob) | ||
379 | { | ||
380 | index_inc(&pnob->tx_q_tl, pnob->tx_q_len); | ||
381 | } | ||
382 | |||
383 | static inline void be_adv_txcq_tl(struct be_net_object *pnob) | ||
384 | { | ||
385 | index_inc(&pnob->tx_cq_tl, pnob->txcq_len); | ||
386 | } | ||
387 | |||
388 | static inline void be_adv_rxq_hd(struct be_net_object *pnob) | ||
389 | { | ||
390 | index_inc(&pnob->rx_q_hd, pnob->rx_q_len); | ||
391 | } | ||
392 | |||
393 | static inline void be_adv_rxcq_tl(struct be_net_object *pnob) | ||
394 | { | ||
395 | index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len); | ||
396 | } | ||
397 | |||
398 | static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob) | ||
399 | { | ||
400 | return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1) | ||
401 | & (pnob->tx_q_len - 1); | ||
402 | } | ||
403 | |||
404 | int benet_init(struct net_device *); | ||
405 | int be_ethtool_ioctl(struct net_device *, struct ifreq *); | ||
406 | struct net_device_stats *benet_get_stats(struct net_device *); | ||
407 | void be_process_intr(unsigned long context); | ||
408 | irqreturn_t be_int(int irq, void *dev); | ||
409 | void be_post_eth_rx_buffs(struct be_net_object *); | ||
410 | void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *); | ||
411 | void be_get_stats_timer_handler(unsigned long); | ||
412 | void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *); | ||
413 | void be_print_link_info(struct BE_LINK_STATUS *); | ||
414 | void be_update_link_status(struct be_adapter *); | ||
415 | void be_init_procfs(struct be_adapter *); | ||
416 | void be_cleanup_procfs(struct be_adapter *); | ||
417 | int be_poll(struct napi_struct *, int); | ||
418 | struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *); | ||
419 | void be_notify_cmpl(struct be_net_object *, int, int, int); | ||
420 | void be_enable_intr(struct be_net_object *); | ||
421 | void be_enable_eq_intr(struct be_net_object *); | ||
422 | void be_disable_intr(struct be_net_object *); | ||
423 | void be_disable_eq_intr(struct be_net_object *); | ||
424 | int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8, | ||
425 | u8 *, mcc_wrb_cqe_callback, void *); | ||
426 | int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *); | ||
427 | void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx); | ||
428 | |||
429 | #endif /* _BENET_H_ */ | ||
diff --git a/drivers/staging/benet/bestatus.h b/drivers/staging/benet/bestatus.h deleted file mode 100644 index 59c7a4b62223..000000000000 --- a/drivers/staging/benet/bestatus.h +++ /dev/null | |||
@@ -1,103 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #ifndef _BESTATUS_H_ | ||
18 | #define _BESTATUS_H_ | ||
19 | |||
20 | #define BE_SUCCESS (0x00000000L) | ||
21 | /* | ||
22 | * MessageId: BE_PENDING | ||
23 | * The BladeEngine Driver call succeeded, and pended operation. | ||
24 | */ | ||
25 | #define BE_PENDING (0x20070001L) | ||
26 | #define BE_STATUS_PENDING (BE_PENDING) | ||
27 | /* | ||
28 | * MessageId: BE_NOT_OK | ||
29 | * An error occurred. | ||
30 | */ | ||
31 | #define BE_NOT_OK (0xE0070002L) | ||
32 | /* | ||
33 | * MessageId: BE_STATUS_SYSTEM_RESOURCES | ||
34 | * Insufficient host system resources exist to complete the API. | ||
35 | */ | ||
36 | #define BE_STATUS_SYSTEM_RESOURCES (0xE0070003L) | ||
37 | /* | ||
38 | * MessageId: BE_STATUS_CHIP_RESOURCES | ||
39 | * Insufficient chip resources exist to complete the API. | ||
40 | */ | ||
41 | #define BE_STATUS_CHIP_RESOURCES (0xE0070004L) | ||
42 | /* | ||
43 | * MessageId: BE_STATUS_NO_RESOURCE | ||
44 | * Insufficient resources to complete request. | ||
45 | */ | ||
46 | #define BE_STATUS_NO_RESOURCE (0xE0070005L) | ||
47 | /* | ||
48 | * MessageId: BE_STATUS_BUSY | ||
49 | * Resource is currently busy. | ||
50 | */ | ||
51 | #define BE_STATUS_BUSY (0xE0070006L) | ||
52 | /* | ||
53 | * MessageId: BE_STATUS_INVALID_PARAMETER | ||
54 | * Invalid Parameter in request. | ||
55 | */ | ||
56 | #define BE_STATUS_INVALID_PARAMETER (0xE0000007L) | ||
57 | /* | ||
58 | * MessageId: BE_STATUS_NOT_SUPPORTED | ||
59 | * Requested operation is not supported. | ||
60 | */ | ||
61 | #define BE_STATUS_NOT_SUPPORTED (0xE000000DL) | ||
62 | |||
63 | /* | ||
64 | * *************************************************************************** | ||
65 | * E T H E R N E T S T A T U S | ||
66 | * *************************************************************************** | ||
67 | */ | ||
68 | |||
69 | /* | ||
70 | * MessageId: BE_ETH_TX_ERROR | ||
71 | * The Ethernet device driver failed to transmit a packet. | ||
72 | */ | ||
73 | #define BE_ETH_TX_ERROR (0xE0070101L) | ||
74 | |||
75 | /* | ||
76 | * *************************************************************************** | ||
77 | * S H A R E D S T A T U S | ||
78 | * *************************************************************************** | ||
79 | */ | ||
80 | |||
81 | /* | ||
82 | * MessageId: BE_STATUS_VBD_INVALID_VERSION | ||
83 | * The device driver is not compatible with this version of the VBD. | ||
84 | */ | ||
85 | #define BE_STATUS_INVALID_VERSION (0xE0070402L) | ||
86 | /* | ||
87 | * MessageId: BE_STATUS_DOMAIN_DENIED | ||
88 | * The operation failed to complete due to insufficient access | ||
89 | * rights for the requesting domain. | ||
90 | */ | ||
91 | #define BE_STATUS_DOMAIN_DENIED (0xE0070403L) | ||
92 | /* | ||
93 | * MessageId: BE_STATUS_TCP_NOT_STARTED | ||
94 | * The embedded TCP/IP stack has not been started. | ||
95 | */ | ||
96 | #define BE_STATUS_TCP_NOT_STARTED (0xE0070409L) | ||
97 | /* | ||
98 | * MessageId: BE_STATUS_NO_MCC_WRB | ||
99 | * No free MCC WRB are available for posting the request. | ||
100 | */ | ||
101 | #define BE_STATUS_NO_MCC_WRB (0xE0070414L) | ||
102 | |||
103 | #endif /* _BESTATUS_ */ | ||
diff --git a/drivers/staging/benet/cev.h b/drivers/staging/benet/cev.h deleted file mode 100644 index 30996920a544..000000000000 --- a/drivers/staging/benet/cev.h +++ /dev/null | |||
@@ -1,243 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __cev_amap_h__ | ||
21 | #define __cev_amap_h__ | ||
22 | #include "ep.h" | ||
23 | |||
24 | /* | ||
25 | * Host Interrupt Status Register 0. The first of four application | ||
26 | * interrupt status registers. This register contains the interrupts | ||
27 | * for Event Queues EQ0 through EQ31. | ||
28 | */ | ||
29 | struct BE_CEV_ISR0_CSR_AMAP { | ||
30 | u8 interrupt0; /* DWORD 0 */ | ||
31 | u8 interrupt1; /* DWORD 0 */ | ||
32 | u8 interrupt2; /* DWORD 0 */ | ||
33 | u8 interrupt3; /* DWORD 0 */ | ||
34 | u8 interrupt4; /* DWORD 0 */ | ||
35 | u8 interrupt5; /* DWORD 0 */ | ||
36 | u8 interrupt6; /* DWORD 0 */ | ||
37 | u8 interrupt7; /* DWORD 0 */ | ||
38 | u8 interrupt8; /* DWORD 0 */ | ||
39 | u8 interrupt9; /* DWORD 0 */ | ||
40 | u8 interrupt10; /* DWORD 0 */ | ||
41 | u8 interrupt11; /* DWORD 0 */ | ||
42 | u8 interrupt12; /* DWORD 0 */ | ||
43 | u8 interrupt13; /* DWORD 0 */ | ||
44 | u8 interrupt14; /* DWORD 0 */ | ||
45 | u8 interrupt15; /* DWORD 0 */ | ||
46 | u8 interrupt16; /* DWORD 0 */ | ||
47 | u8 interrupt17; /* DWORD 0 */ | ||
48 | u8 interrupt18; /* DWORD 0 */ | ||
49 | u8 interrupt19; /* DWORD 0 */ | ||
50 | u8 interrupt20; /* DWORD 0 */ | ||
51 | u8 interrupt21; /* DWORD 0 */ | ||
52 | u8 interrupt22; /* DWORD 0 */ | ||
53 | u8 interrupt23; /* DWORD 0 */ | ||
54 | u8 interrupt24; /* DWORD 0 */ | ||
55 | u8 interrupt25; /* DWORD 0 */ | ||
56 | u8 interrupt26; /* DWORD 0 */ | ||
57 | u8 interrupt27; /* DWORD 0 */ | ||
58 | u8 interrupt28; /* DWORD 0 */ | ||
59 | u8 interrupt29; /* DWORD 0 */ | ||
60 | u8 interrupt30; /* DWORD 0 */ | ||
61 | u8 interrupt31; /* DWORD 0 */ | ||
62 | } __packed; | ||
63 | struct CEV_ISR0_CSR_AMAP { | ||
64 | u32 dw[1]; | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * Host Interrupt Status Register 1. The second of four application | ||
69 | * interrupt status registers. This register contains the interrupts | ||
70 | * for Event Queues EQ32 through EQ63. | ||
71 | */ | ||
72 | struct BE_CEV_ISR1_CSR_AMAP { | ||
73 | u8 interrupt32; /* DWORD 0 */ | ||
74 | u8 interrupt33; /* DWORD 0 */ | ||
75 | u8 interrupt34; /* DWORD 0 */ | ||
76 | u8 interrupt35; /* DWORD 0 */ | ||
77 | u8 interrupt36; /* DWORD 0 */ | ||
78 | u8 interrupt37; /* DWORD 0 */ | ||
79 | u8 interrupt38; /* DWORD 0 */ | ||
80 | u8 interrupt39; /* DWORD 0 */ | ||
81 | u8 interrupt40; /* DWORD 0 */ | ||
82 | u8 interrupt41; /* DWORD 0 */ | ||
83 | u8 interrupt42; /* DWORD 0 */ | ||
84 | u8 interrupt43; /* DWORD 0 */ | ||
85 | u8 interrupt44; /* DWORD 0 */ | ||
86 | u8 interrupt45; /* DWORD 0 */ | ||
87 | u8 interrupt46; /* DWORD 0 */ | ||
88 | u8 interrupt47; /* DWORD 0 */ | ||
89 | u8 interrupt48; /* DWORD 0 */ | ||
90 | u8 interrupt49; /* DWORD 0 */ | ||
91 | u8 interrupt50; /* DWORD 0 */ | ||
92 | u8 interrupt51; /* DWORD 0 */ | ||
93 | u8 interrupt52; /* DWORD 0 */ | ||
94 | u8 interrupt53; /* DWORD 0 */ | ||
95 | u8 interrupt54; /* DWORD 0 */ | ||
96 | u8 interrupt55; /* DWORD 0 */ | ||
97 | u8 interrupt56; /* DWORD 0 */ | ||
98 | u8 interrupt57; /* DWORD 0 */ | ||
99 | u8 interrupt58; /* DWORD 0 */ | ||
100 | u8 interrupt59; /* DWORD 0 */ | ||
101 | u8 interrupt60; /* DWORD 0 */ | ||
102 | u8 interrupt61; /* DWORD 0 */ | ||
103 | u8 interrupt62; /* DWORD 0 */ | ||
104 | u8 interrupt63; /* DWORD 0 */ | ||
105 | } __packed; | ||
106 | struct CEV_ISR1_CSR_AMAP { | ||
107 | u32 dw[1]; | ||
108 | }; | ||
109 | /* | ||
110 | * Host Interrupt Status Register 2. The third of four application | ||
111 | * interrupt status registers. This register contains the interrupts | ||
112 | * for Event Queues EQ64 through EQ95. | ||
113 | */ | ||
114 | struct BE_CEV_ISR2_CSR_AMAP { | ||
115 | u8 interrupt64; /* DWORD 0 */ | ||
116 | u8 interrupt65; /* DWORD 0 */ | ||
117 | u8 interrupt66; /* DWORD 0 */ | ||
118 | u8 interrupt67; /* DWORD 0 */ | ||
119 | u8 interrupt68; /* DWORD 0 */ | ||
120 | u8 interrupt69; /* DWORD 0 */ | ||
121 | u8 interrupt70; /* DWORD 0 */ | ||
122 | u8 interrupt71; /* DWORD 0 */ | ||
123 | u8 interrupt72; /* DWORD 0 */ | ||
124 | u8 interrupt73; /* DWORD 0 */ | ||
125 | u8 interrupt74; /* DWORD 0 */ | ||
126 | u8 interrupt75; /* DWORD 0 */ | ||
127 | u8 interrupt76; /* DWORD 0 */ | ||
128 | u8 interrupt77; /* DWORD 0 */ | ||
129 | u8 interrupt78; /* DWORD 0 */ | ||
130 | u8 interrupt79; /* DWORD 0 */ | ||
131 | u8 interrupt80; /* DWORD 0 */ | ||
132 | u8 interrupt81; /* DWORD 0 */ | ||
133 | u8 interrupt82; /* DWORD 0 */ | ||
134 | u8 interrupt83; /* DWORD 0 */ | ||
135 | u8 interrupt84; /* DWORD 0 */ | ||
136 | u8 interrupt85; /* DWORD 0 */ | ||
137 | u8 interrupt86; /* DWORD 0 */ | ||
138 | u8 interrupt87; /* DWORD 0 */ | ||
139 | u8 interrupt88; /* DWORD 0 */ | ||
140 | u8 interrupt89; /* DWORD 0 */ | ||
141 | u8 interrupt90; /* DWORD 0 */ | ||
142 | u8 interrupt91; /* DWORD 0 */ | ||
143 | u8 interrupt92; /* DWORD 0 */ | ||
144 | u8 interrupt93; /* DWORD 0 */ | ||
145 | u8 interrupt94; /* DWORD 0 */ | ||
146 | u8 interrupt95; /* DWORD 0 */ | ||
147 | } __packed; | ||
148 | struct CEV_ISR2_CSR_AMAP { | ||
149 | u32 dw[1]; | ||
150 | }; | ||
151 | |||
152 | /* | ||
153 | * Host Interrupt Status Register 3. The fourth of four application | ||
154 | * interrupt status registers. This register contains the interrupts | ||
155 | * for Event Queues EQ96 through EQ127. | ||
156 | */ | ||
157 | struct BE_CEV_ISR3_CSR_AMAP { | ||
158 | u8 interrupt96; /* DWORD 0 */ | ||
159 | u8 interrupt97; /* DWORD 0 */ | ||
160 | u8 interrupt98; /* DWORD 0 */ | ||
161 | u8 interrupt99; /* DWORD 0 */ | ||
162 | u8 interrupt100; /* DWORD 0 */ | ||
163 | u8 interrupt101; /* DWORD 0 */ | ||
164 | u8 interrupt102; /* DWORD 0 */ | ||
165 | u8 interrupt103; /* DWORD 0 */ | ||
166 | u8 interrupt104; /* DWORD 0 */ | ||
167 | u8 interrupt105; /* DWORD 0 */ | ||
168 | u8 interrupt106; /* DWORD 0 */ | ||
169 | u8 interrupt107; /* DWORD 0 */ | ||
170 | u8 interrupt108; /* DWORD 0 */ | ||
171 | u8 interrupt109; /* DWORD 0 */ | ||
172 | u8 interrupt110; /* DWORD 0 */ | ||
173 | u8 interrupt111; /* DWORD 0 */ | ||
174 | u8 interrupt112; /* DWORD 0 */ | ||
175 | u8 interrupt113; /* DWORD 0 */ | ||
176 | u8 interrupt114; /* DWORD 0 */ | ||
177 | u8 interrupt115; /* DWORD 0 */ | ||
178 | u8 interrupt116; /* DWORD 0 */ | ||
179 | u8 interrupt117; /* DWORD 0 */ | ||
180 | u8 interrupt118; /* DWORD 0 */ | ||
181 | u8 interrupt119; /* DWORD 0 */ | ||
182 | u8 interrupt120; /* DWORD 0 */ | ||
183 | u8 interrupt121; /* DWORD 0 */ | ||
184 | u8 interrupt122; /* DWORD 0 */ | ||
185 | u8 interrupt123; /* DWORD 0 */ | ||
186 | u8 interrupt124; /* DWORD 0 */ | ||
187 | u8 interrupt125; /* DWORD 0 */ | ||
188 | u8 interrupt126; /* DWORD 0 */ | ||
189 | u8 interrupt127; /* DWORD 0 */ | ||
190 | } __packed; | ||
191 | struct CEV_ISR3_CSR_AMAP { | ||
192 | u32 dw[1]; | ||
193 | }; | ||
194 | |||
195 | /* Completions and Events block Registers. */ | ||
196 | struct BE_CEV_CSRMAP_AMAP { | ||
197 | u8 rsvd0[32]; /* DWORD 0 */ | ||
198 | u8 rsvd1[32]; /* DWORD 1 */ | ||
199 | u8 rsvd2[32]; /* DWORD 2 */ | ||
200 | u8 rsvd3[32]; /* DWORD 3 */ | ||
201 | struct BE_CEV_ISR0_CSR_AMAP isr0; | ||
202 | struct BE_CEV_ISR1_CSR_AMAP isr1; | ||
203 | struct BE_CEV_ISR2_CSR_AMAP isr2; | ||
204 | struct BE_CEV_ISR3_CSR_AMAP isr3; | ||
205 | u8 rsvd4[32]; /* DWORD 8 */ | ||
206 | u8 rsvd5[32]; /* DWORD 9 */ | ||
207 | u8 rsvd6[32]; /* DWORD 10 */ | ||
208 | u8 rsvd7[32]; /* DWORD 11 */ | ||
209 | u8 rsvd8[32]; /* DWORD 12 */ | ||
210 | u8 rsvd9[32]; /* DWORD 13 */ | ||
211 | u8 rsvd10[32]; /* DWORD 14 */ | ||
212 | u8 rsvd11[32]; /* DWORD 15 */ | ||
213 | u8 rsvd12[32]; /* DWORD 16 */ | ||
214 | u8 rsvd13[32]; /* DWORD 17 */ | ||
215 | u8 rsvd14[32]; /* DWORD 18 */ | ||
216 | u8 rsvd15[32]; /* DWORD 19 */ | ||
217 | u8 rsvd16[32]; /* DWORD 20 */ | ||
218 | u8 rsvd17[32]; /* DWORD 21 */ | ||
219 | u8 rsvd18[32]; /* DWORD 22 */ | ||
220 | u8 rsvd19[32]; /* DWORD 23 */ | ||
221 | u8 rsvd20[32]; /* DWORD 24 */ | ||
222 | u8 rsvd21[32]; /* DWORD 25 */ | ||
223 | u8 rsvd22[32]; /* DWORD 26 */ | ||
224 | u8 rsvd23[32]; /* DWORD 27 */ | ||
225 | u8 rsvd24[32]; /* DWORD 28 */ | ||
226 | u8 rsvd25[32]; /* DWORD 29 */ | ||
227 | u8 rsvd26[32]; /* DWORD 30 */ | ||
228 | u8 rsvd27[32]; /* DWORD 31 */ | ||
229 | u8 rsvd28[32]; /* DWORD 32 */ | ||
230 | u8 rsvd29[32]; /* DWORD 33 */ | ||
231 | u8 rsvd30[192]; /* DWORD 34 */ | ||
232 | u8 rsvd31[192]; /* DWORD 40 */ | ||
233 | u8 rsvd32[160]; /* DWORD 46 */ | ||
234 | u8 rsvd33[160]; /* DWORD 51 */ | ||
235 | u8 rsvd34[160]; /* DWORD 56 */ | ||
236 | u8 rsvd35[96]; /* DWORD 61 */ | ||
237 | u8 rsvd36[192][32]; /* DWORD 64 */ | ||
238 | } __packed; | ||
239 | struct CEV_CSRMAP_AMAP { | ||
240 | u32 dw[256]; | ||
241 | }; | ||
242 | |||
243 | #endif /* __cev_amap_h__ */ | ||
diff --git a/drivers/staging/benet/cq.c b/drivers/staging/benet/cq.c deleted file mode 100644 index 650458645433..000000000000 --- a/drivers/staging/benet/cq.c +++ /dev/null | |||
@@ -1,211 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #include "hwlib.h" | ||
18 | #include "bestatus.h" | ||
19 | |||
20 | /* | ||
21 | * Completion Queue Objects | ||
22 | */ | ||
23 | /* | ||
24 | *============================================================================ | ||
25 | * P U B L I C R O U T I N E S | ||
26 | *============================================================================ | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | This routine creates a completion queue based on the client completion | ||
31 | queue configuration information. | ||
32 | |||
33 | |||
34 | FunctionObject - Handle to a function object | ||
35 | CqBaseVa - Base VA for a the CQ ring | ||
36 | NumEntries - CEV_CQ_CNT_* values | ||
37 | solEventEnable - 0 = All CQEs can generate Events if CQ is eventable | ||
38 | 1 = only CQEs with solicited bit set are eventable | ||
39 | eventable - Eventable CQ, generates interrupts. | ||
40 | nodelay - 1 = Force interrupt, relevent if CQ eventable. | ||
41 | Interrupt is asserted immediately after EQE | ||
42 | write is confirmed, regardless of EQ Timer | ||
43 | or watermark settings. | ||
44 | wme - Enable watermark based coalescing | ||
45 | wmThresh - High watermark(CQ fullness at which event | ||
46 | or interrupt should be asserted). These are the | ||
47 | CEV_WATERMARK encoded values. | ||
48 | EqObject - EQ Handle to assign to this CQ | ||
49 | ppCqObject - Internal CQ Handle returned. | ||
50 | |||
51 | Returns BE_SUCCESS if successfull, otherwise a useful error code is | ||
52 | returned. | ||
53 | |||
54 | IRQL < DISPATCH_LEVEL | ||
55 | |||
56 | */ | ||
57 | int be_cq_create(struct be_function_object *pfob, | ||
58 | struct ring_desc *rd, u32 length, bool solicited_eventable, | ||
59 | bool no_delay, u32 wm_thresh, | ||
60 | struct be_eq_object *eq_object, struct be_cq_object *cq_object) | ||
61 | { | ||
62 | int status = BE_SUCCESS; | ||
63 | u32 num_entries_encoding; | ||
64 | u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP); | ||
65 | struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL; | ||
66 | struct MCC_WRB_AMAP *wrb = NULL; | ||
67 | u32 n; | ||
68 | unsigned long irql; | ||
69 | |||
70 | ASSERT(rd); | ||
71 | ASSERT(cq_object); | ||
72 | ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0); | ||
73 | |||
74 | switch (num_entries) { | ||
75 | case 256: | ||
76 | num_entries_encoding = CEV_CQ_CNT_256; | ||
77 | break; | ||
78 | case 512: | ||
79 | num_entries_encoding = CEV_CQ_CNT_512; | ||
80 | break; | ||
81 | case 1024: | ||
82 | num_entries_encoding = CEV_CQ_CNT_1024; | ||
83 | break; | ||
84 | default: | ||
85 | ASSERT(0); | ||
86 | return BE_STATUS_INVALID_PARAMETER; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * All cq entries all the same size. Use iSCSI version | ||
91 | * as a test for the proper rd length. | ||
92 | */ | ||
93 | memset(cq_object, 0, sizeof(*cq_object)); | ||
94 | |||
95 | atomic_set(&cq_object->ref_count, 0); | ||
96 | cq_object->parent_function = pfob; | ||
97 | cq_object->eq_object = eq_object; | ||
98 | cq_object->num_entries = num_entries; | ||
99 | /* save for MCC cq processing */ | ||
100 | cq_object->va = rd->va; | ||
101 | |||
102 | /* map into UT. */ | ||
103 | length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP); | ||
104 | |||
105 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
106 | |||
107 | wrb = be_function_peek_mcc_wrb(pfob); | ||
108 | if (!wrb) { | ||
109 | ASSERT(wrb); | ||
110 | TRACE(DL_ERR, "No free MCC WRBs in create EQ."); | ||
111 | status = BE_STATUS_NO_MCC_WRB; | ||
112 | goto Error; | ||
113 | } | ||
114 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
115 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE); | ||
116 | |||
117 | fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va), | ||
118 | length); | ||
119 | |||
120 | AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1); | ||
121 | n = pfob->pci_function_number; | ||
122 | AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n); | ||
123 | |||
124 | n = (eq_object != NULL); | ||
125 | AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable, | ||
126 | &fwcmd->params.request.context, n); | ||
127 | AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1); | ||
128 | |||
129 | n = eq_object ? eq_object->eq_id : 0; | ||
130 | AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n); | ||
131 | AMAP_SET_BITS_PTR(CQ_CONTEXT, Count, | ||
132 | &fwcmd->params.request.context, num_entries_encoding); | ||
133 | |||
134 | n = 0; /* Protection Domain is always 0 in Linux driver */ | ||
135 | AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n); | ||
136 | AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay, | ||
137 | &fwcmd->params.request.context, no_delay); | ||
138 | AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent, | ||
139 | &fwcmd->params.request.context, solicited_eventable); | ||
140 | |||
141 | n = (wm_thresh != 0xFFFFFFFF); | ||
142 | AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n); | ||
143 | |||
144 | n = (n ? wm_thresh : 0); | ||
145 | AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark, | ||
146 | &fwcmd->params.request.context, n); | ||
147 | /* Create a page list for the FWCMD. */ | ||
148 | be_rd_to_pa_list(rd, fwcmd->params.request.pages, | ||
149 | ARRAY_SIZE(fwcmd->params.request.pages)); | ||
150 | |||
151 | /* Post the f/w command */ | ||
152 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
153 | NULL, NULL, fwcmd, NULL); | ||
154 | if (status != BE_SUCCESS) { | ||
155 | TRACE(DL_ERR, "MCC to create CQ failed."); | ||
156 | goto Error; | ||
157 | } | ||
158 | /* Remember the CQ id. */ | ||
159 | cq_object->cq_id = fwcmd->params.response.cq_id; | ||
160 | |||
161 | /* insert this cq into eq_object reference */ | ||
162 | if (eq_object) { | ||
163 | atomic_inc(&eq_object->ref_count); | ||
164 | list_add_tail(&cq_object->cqlist_for_eq, | ||
165 | &eq_object->cq_list_head); | ||
166 | } | ||
167 | |||
168 | Error: | ||
169 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
170 | |||
171 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
172 | pfob->pend_queue_driving = 0; | ||
173 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
174 | } | ||
175 | return status; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | |||
180 | Deferences the given object. Once the object's reference count drops to | ||
181 | zero, the object is destroyed and all resources that are held by this object | ||
182 | are released. The on-chip context is also destroyed along with the queue | ||
183 | ID, and any mappings made into the UT. | ||
184 | |||
185 | cq_object - CQ handle returned from cq_object_create. | ||
186 | |||
187 | returns the current reference count on the object | ||
188 | |||
189 | IRQL: IRQL < DISPATCH_LEVEL | ||
190 | */ | ||
191 | int be_cq_destroy(struct be_cq_object *cq_object) | ||
192 | { | ||
193 | int status = 0; | ||
194 | |||
195 | /* Nothing should reference this CQ at this point. */ | ||
196 | ASSERT(atomic_read(&cq_object->ref_count) == 0); | ||
197 | |||
198 | /* Send fwcmd to destroy the CQ. */ | ||
199 | status = be_function_ring_destroy(cq_object->parent_function, | ||
200 | cq_object->cq_id, FWCMD_RING_TYPE_CQ, | ||
201 | NULL, NULL, NULL, NULL); | ||
202 | ASSERT(status == 0); | ||
203 | |||
204 | /* Remove reference if this is an eventable CQ. */ | ||
205 | if (cq_object->eq_object) { | ||
206 | atomic_dec(&cq_object->eq_object->ref_count); | ||
207 | list_del(&cq_object->cqlist_for_eq); | ||
208 | } | ||
209 | return BE_SUCCESS; | ||
210 | } | ||
211 | |||
diff --git a/drivers/staging/benet/descriptors.h b/drivers/staging/benet/descriptors.h deleted file mode 100644 index 8da438c407d2..000000000000 --- a/drivers/staging/benet/descriptors.h +++ /dev/null | |||
@@ -1,71 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __descriptors_amap_h__ | ||
21 | #define __descriptors_amap_h__ | ||
22 | |||
23 | /* | ||
24 | * --- IPC_NODE_ID_ENUM --- | ||
25 | * IPC processor id values | ||
26 | */ | ||
27 | #define TPOST_NODE_ID (0) /* TPOST ID */ | ||
28 | #define TPRE_NODE_ID (1) /* TPRE ID */ | ||
29 | #define TXULP0_NODE_ID (2) /* TXULP0 ID */ | ||
30 | #define TXULP1_NODE_ID (3) /* TXULP1 ID */ | ||
31 | #define TXULP2_NODE_ID (4) /* TXULP2 ID */ | ||
32 | #define RXULP0_NODE_ID (5) /* RXULP0 ID */ | ||
33 | #define RXULP1_NODE_ID (6) /* RXULP1 ID */ | ||
34 | #define RXULP2_NODE_ID (7) /* RXULP2 ID */ | ||
35 | #define MPU_NODE_ID (15) /* MPU ID */ | ||
36 | |||
37 | /* | ||
38 | * --- MAC_ID_ENUM --- | ||
39 | * Meaning of the mac_id field in rxpp_eth_d | ||
40 | */ | ||
41 | #define PORT0_HOST_MAC0 (0) /* PD 0, Port 0, host networking, MAC 0. */ | ||
42 | #define PORT0_HOST_MAC1 (1) /* PD 0, Port 0, host networking, MAC 1. */ | ||
43 | #define PORT0_STORAGE_MAC0 (2) /* PD 0, Port 0, host storage, MAC 0. */ | ||
44 | #define PORT0_STORAGE_MAC1 (3) /* PD 0, Port 0, host storage, MAC 1. */ | ||
45 | #define PORT1_HOST_MAC0 (4) /* PD 0, Port 1 host networking, MAC 0. */ | ||
46 | #define PORT1_HOST_MAC1 (5) /* PD 0, Port 1 host networking, MAC 1. */ | ||
47 | #define PORT1_STORAGE_MAC0 (6) /* PD 0, Port 1 host storage, MAC 0. */ | ||
48 | #define PORT1_STORAGE_MAC1 (7) /* PD 0, Port 1 host storage, MAC 1. */ | ||
49 | #define FIRST_VM_MAC (8) /* PD 1 MAC. Protection domains have IDs */ | ||
50 | /* from 0x8-0x26, one per PD. */ | ||
51 | #define LAST_VM_MAC (38) /* PD 31 MAC. */ | ||
52 | #define MGMT_MAC (39) /* Management port MAC. */ | ||
53 | #define MARBLE_MAC0 (59) /* Used for flushing function 0 receive */ | ||
54 | /* | ||
55 | * queues before re-using a torn-down | ||
56 | * receive ring. the DA = | ||
57 | * 00-00-00-00-00-00, and the MSB of the | ||
58 | * SA = 00 | ||
59 | */ | ||
60 | #define MARBLE_MAC1 (60) /* Used for flushing function 1 receive */ | ||
61 | /* | ||
62 | * queues before re-using a torn-down | ||
63 | * receive ring. the DA = | ||
64 | * 00-00-00-00-00-00, and the MSB of the | ||
65 | * SA != 00 | ||
66 | */ | ||
67 | #define NULL_MAC (61) /* Promiscuous mode, indicates no match */ | ||
68 | #define MCAST_MAC (62) /* Multicast match. */ | ||
69 | #define BCAST_MATCH (63) /* Broadcast match. */ | ||
70 | |||
71 | #endif /* __descriptors_amap_h__ */ | ||
diff --git a/drivers/staging/benet/doorbells.h b/drivers/staging/benet/doorbells.h deleted file mode 100644 index 550cc4d5d6f7..000000000000 --- a/drivers/staging/benet/doorbells.h +++ /dev/null | |||
@@ -1,179 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __doorbells_amap_h__ | ||
21 | #define __doorbells_amap_h__ | ||
22 | |||
23 | /* The TX/RDMA send queue doorbell. */ | ||
24 | struct BE_SQ_DB_AMAP { | ||
25 | u8 cid[11]; /* DWORD 0 */ | ||
26 | u8 rsvd0[5]; /* DWORD 0 */ | ||
27 | u8 numPosted[14]; /* DWORD 0 */ | ||
28 | u8 rsvd1[2]; /* DWORD 0 */ | ||
29 | } __packed; | ||
30 | struct SQ_DB_AMAP { | ||
31 | u32 dw[1]; | ||
32 | }; | ||
33 | |||
34 | /* The receive queue doorbell. */ | ||
35 | struct BE_RQ_DB_AMAP { | ||
36 | u8 rq[10]; /* DWORD 0 */ | ||
37 | u8 rsvd0[13]; /* DWORD 0 */ | ||
38 | u8 Invalidate; /* DWORD 0 */ | ||
39 | u8 numPosted[8]; /* DWORD 0 */ | ||
40 | } __packed; | ||
41 | struct RQ_DB_AMAP { | ||
42 | u32 dw[1]; | ||
43 | }; | ||
44 | |||
45 | /* | ||
46 | * The CQ/EQ doorbell. Software MUST set reserved fields in this | ||
47 | * descriptor to zero, otherwise (CEV) hardware will not execute the | ||
48 | * doorbell (flagging a bad_db_qid error instead). | ||
49 | */ | ||
50 | struct BE_CQ_DB_AMAP { | ||
51 | u8 qid[10]; /* DWORD 0 */ | ||
52 | u8 rsvd0[4]; /* DWORD 0 */ | ||
53 | u8 rearm; /* DWORD 0 */ | ||
54 | u8 event; /* DWORD 0 */ | ||
55 | u8 num_popped[13]; /* DWORD 0 */ | ||
56 | u8 rsvd1[3]; /* DWORD 0 */ | ||
57 | } __packed; | ||
58 | struct CQ_DB_AMAP { | ||
59 | u32 dw[1]; | ||
60 | }; | ||
61 | |||
62 | struct BE_TPM_RQ_DB_AMAP { | ||
63 | u8 qid[10]; /* DWORD 0 */ | ||
64 | u8 rsvd0[6]; /* DWORD 0 */ | ||
65 | u8 numPosted[11]; /* DWORD 0 */ | ||
66 | u8 mss_cnt[5]; /* DWORD 0 */ | ||
67 | } __packed; | ||
68 | struct TPM_RQ_DB_AMAP { | ||
69 | u32 dw[1]; | ||
70 | }; | ||
71 | |||
72 | /* | ||
73 | * Post WRB Queue Doorbell Register used by the host Storage stack | ||
74 | * to notify the controller of a posted Work Request Block | ||
75 | */ | ||
76 | struct BE_WRB_POST_DB_AMAP { | ||
77 | u8 wrb_cid[10]; /* DWORD 0 */ | ||
78 | u8 rsvd0[6]; /* DWORD 0 */ | ||
79 | u8 wrb_index[8]; /* DWORD 0 */ | ||
80 | u8 numberPosted[8]; /* DWORD 0 */ | ||
81 | } __packed; | ||
82 | struct WRB_POST_DB_AMAP { | ||
83 | u32 dw[1]; | ||
84 | }; | ||
85 | |||
86 | /* | ||
87 | * Update Default PDU Queue Doorbell Register used to communicate | ||
88 | * to the controller that the driver has stopped processing the queue | ||
89 | * and where in the queue it stopped, this is | ||
90 | * a CQ Entry Type. Used by storage driver. | ||
91 | */ | ||
92 | struct BE_DEFAULT_PDU_DB_AMAP { | ||
93 | u8 qid[10]; /* DWORD 0 */ | ||
94 | u8 rsvd0[4]; /* DWORD 0 */ | ||
95 | u8 rearm; /* DWORD 0 */ | ||
96 | u8 event; /* DWORD 0 */ | ||
97 | u8 cqproc[14]; /* DWORD 0 */ | ||
98 | u8 rsvd1[2]; /* DWORD 0 */ | ||
99 | } __packed; | ||
100 | struct DEFAULT_PDU_DB_AMAP { | ||
101 | u32 dw[1]; | ||
102 | }; | ||
103 | |||
104 | /* Management Command and Controller default fragment ring */ | ||
105 | struct BE_MCC_DB_AMAP { | ||
106 | u8 rid[11]; /* DWORD 0 */ | ||
107 | u8 rsvd0[5]; /* DWORD 0 */ | ||
108 | u8 numPosted[14]; /* DWORD 0 */ | ||
109 | u8 rsvd1[2]; /* DWORD 0 */ | ||
110 | } __packed; | ||
111 | struct MCC_DB_AMAP { | ||
112 | u32 dw[1]; | ||
113 | }; | ||
114 | |||
115 | /* | ||
116 | * Used for bootstrapping the Host interface. This register is | ||
117 | * used for driver communication with the MPU when no MCC Rings exist. | ||
118 | * The software must write this register twice to post any MCC | ||
119 | * command. First, it writes the register with hi=1 and the upper bits of | ||
120 | * the physical address for the MCC_MAILBOX structure. Software must poll | ||
121 | * the ready bit until this is acknowledged. Then, sotware writes the | ||
122 | * register with hi=0 with the lower bits in the address. It must | ||
123 | * poll the ready bit until the MCC command is complete. Upon completion, | ||
124 | * the MCC_MAILBOX will contain a valid completion queue entry. | ||
125 | */ | ||
126 | struct BE_MPU_MAILBOX_DB_AMAP { | ||
127 | u8 ready; /* DWORD 0 */ | ||
128 | u8 hi; /* DWORD 0 */ | ||
129 | u8 address[30]; /* DWORD 0 */ | ||
130 | } __packed; | ||
131 | struct MPU_MAILBOX_DB_AMAP { | ||
132 | u32 dw[1]; | ||
133 | }; | ||
134 | |||
135 | /* | ||
136 | * This is the protection domain doorbell register map. Note that | ||
137 | * while this map shows doorbells for all Blade Engine supported | ||
138 | * protocols, not all of these may be valid in a given function or | ||
139 | * protection domain. It is the responsibility of the application | ||
140 | * accessing the doorbells to know which are valid. Each doorbell | ||
141 | * occupies 32 bytes of space, but unless otherwise specified, | ||
142 | * only the first 4 bytes should be written. There are 32 instances | ||
143 | * of these doorbells for the host and 31 virtual machines respectively. | ||
144 | * The host and VMs will only map the doorbell pages belonging to its | ||
145 | * protection domain. It will not be able to touch the doorbells for | ||
146 | * another VM. The doorbells are the only registers directly accessible | ||
147 | * by a virtual machine. Similarly, there are 511 additional | ||
148 | * doorbells for RDMA protection domains. PD 0 for RDMA shares | ||
149 | * the same physical protection domain doorbell page as ETH/iSCSI. | ||
150 | * | ||
151 | */ | ||
152 | struct BE_PROTECTION_DOMAIN_DBMAP_AMAP { | ||
153 | u8 rsvd0[512]; /* DWORD 0 */ | ||
154 | struct BE_SQ_DB_AMAP rdma_sq_db; | ||
155 | u8 rsvd1[7][32]; /* DWORD 17 */ | ||
156 | struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db; | ||
157 | u8 rsvd2[7][32]; /* DWORD 25 */ | ||
158 | struct BE_SQ_DB_AMAP etx_sq_db; | ||
159 | u8 rsvd3[7][32]; /* DWORD 33 */ | ||
160 | struct BE_RQ_DB_AMAP rdma_rq_db; | ||
161 | u8 rsvd4[7][32]; /* DWORD 41 */ | ||
162 | struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db; | ||
163 | u8 rsvd5[7][32]; /* DWORD 49 */ | ||
164 | struct BE_TPM_RQ_DB_AMAP tpm_rq_db; | ||
165 | u8 rsvd6[7][32]; /* DWORD 57 */ | ||
166 | struct BE_RQ_DB_AMAP erx_rq_db; | ||
167 | u8 rsvd7[7][32]; /* DWORD 65 */ | ||
168 | struct BE_CQ_DB_AMAP cq_db; | ||
169 | u8 rsvd8[7][32]; /* DWORD 73 */ | ||
170 | struct BE_MCC_DB_AMAP mpu_mcc_db; | ||
171 | u8 rsvd9[7][32]; /* DWORD 81 */ | ||
172 | struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db; | ||
173 | u8 rsvd10[935][32]; /* DWORD 89 */ | ||
174 | } __packed; | ||
175 | struct PROTECTION_DOMAIN_DBMAP_AMAP { | ||
176 | u32 dw[1024]; | ||
177 | }; | ||
178 | |||
179 | #endif /* __doorbells_amap_h__ */ | ||
diff --git a/drivers/staging/benet/ep.h b/drivers/staging/benet/ep.h deleted file mode 100644 index 72fcf64a9ffb..000000000000 --- a/drivers/staging/benet/ep.h +++ /dev/null | |||
@@ -1,66 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __ep_amap_h__ | ||
21 | #define __ep_amap_h__ | ||
22 | |||
23 | /* General Control and Status Register. */ | ||
24 | struct BE_EP_CONTROL_CSR_AMAP { | ||
25 | u8 m0_RxPbuf; /* DWORD 0 */ | ||
26 | u8 m1_RxPbuf; /* DWORD 0 */ | ||
27 | u8 m2_RxPbuf; /* DWORD 0 */ | ||
28 | u8 ff_en; /* DWORD 0 */ | ||
29 | u8 rsvd0[27]; /* DWORD 0 */ | ||
30 | u8 CPU_reset; /* DWORD 0 */ | ||
31 | } __packed; | ||
32 | struct EP_CONTROL_CSR_AMAP { | ||
33 | u32 dw[1]; | ||
34 | }; | ||
35 | |||
36 | /* Semaphore Register. */ | ||
37 | struct BE_EP_SEMAPHORE_CSR_AMAP { | ||
38 | u8 value[32]; /* DWORD 0 */ | ||
39 | } __packed; | ||
40 | struct EP_SEMAPHORE_CSR_AMAP { | ||
41 | u32 dw[1]; | ||
42 | }; | ||
43 | |||
44 | /* Embedded Processor Specific Registers. */ | ||
45 | struct BE_EP_CSRMAP_AMAP { | ||
46 | struct BE_EP_CONTROL_CSR_AMAP ep_control; | ||
47 | u8 rsvd0[32]; /* DWORD 1 */ | ||
48 | u8 rsvd1[32]; /* DWORD 2 */ | ||
49 | u8 rsvd2[32]; /* DWORD 3 */ | ||
50 | u8 rsvd3[32]; /* DWORD 4 */ | ||
51 | u8 rsvd4[32]; /* DWORD 5 */ | ||
52 | u8 rsvd5[8][128]; /* DWORD 6 */ | ||
53 | u8 rsvd6[32]; /* DWORD 38 */ | ||
54 | u8 rsvd7[32]; /* DWORD 39 */ | ||
55 | u8 rsvd8[32]; /* DWORD 40 */ | ||
56 | u8 rsvd9[32]; /* DWORD 41 */ | ||
57 | u8 rsvd10[32]; /* DWORD 42 */ | ||
58 | struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore; | ||
59 | u8 rsvd11[32]; /* DWORD 44 */ | ||
60 | u8 rsvd12[19][32]; /* DWORD 45 */ | ||
61 | } __packed; | ||
62 | struct EP_CSRMAP_AMAP { | ||
63 | u32 dw[64]; | ||
64 | }; | ||
65 | |||
66 | #endif /* __ep_amap_h__ */ | ||
diff --git a/drivers/staging/benet/eq.c b/drivers/staging/benet/eq.c deleted file mode 100644 index db92ccd8fed8..000000000000 --- a/drivers/staging/benet/eq.c +++ /dev/null | |||
@@ -1,299 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #include "hwlib.h" | ||
18 | #include "bestatus.h" | ||
19 | /* | ||
20 | This routine creates an event queue based on the client completion | ||
21 | queue configuration information. | ||
22 | |||
23 | FunctionObject - Handle to a function object | ||
24 | EqBaseVa - Base VA for a the EQ ring | ||
25 | SizeEncoding - The encoded size for the EQ entries. This value is | ||
26 | either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16 | ||
27 | NumEntries - CEV_CQ_CNT_* values. | ||
28 | Watermark - Enables watermark based coalescing. This parameter | ||
29 | must be of the type CEV_WMARK_* if watermarks | ||
30 | are enabled. If watermarks to to be disabled | ||
31 | this value should be-1. | ||
32 | TimerDelay - If a timer delay is enabled this value should be the | ||
33 | time of the delay in 8 microsecond units. If | ||
34 | delays are not used this parameter should be | ||
35 | set to -1. | ||
36 | ppEqObject - Internal EQ Handle returned. | ||
37 | |||
38 | Returns BE_SUCCESS if successfull,, otherwise a useful error code | ||
39 | is returned. | ||
40 | |||
41 | IRQL < DISPATCH_LEVEL | ||
42 | */ | ||
43 | int | ||
44 | be_eq_create(struct be_function_object *pfob, | ||
45 | struct ring_desc *rd, u32 eqe_size, u32 num_entries, | ||
46 | u32 watermark, /* CEV_WMARK_* or -1 */ | ||
47 | u32 timer_delay, /* in 8us units, or -1 */ | ||
48 | struct be_eq_object *eq_object) | ||
49 | { | ||
50 | int status = BE_SUCCESS; | ||
51 | u32 num_entries_encoding, eqe_size_encoding, length; | ||
52 | struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL; | ||
53 | struct MCC_WRB_AMAP *wrb = NULL; | ||
54 | u32 n; | ||
55 | unsigned long irql; | ||
56 | |||
57 | ASSERT(rd); | ||
58 | ASSERT(eq_object); | ||
59 | |||
60 | switch (num_entries) { | ||
61 | case 256: | ||
62 | num_entries_encoding = CEV_EQ_CNT_256; | ||
63 | break; | ||
64 | case 512: | ||
65 | num_entries_encoding = CEV_EQ_CNT_512; | ||
66 | break; | ||
67 | case 1024: | ||
68 | num_entries_encoding = CEV_EQ_CNT_1024; | ||
69 | break; | ||
70 | case 2048: | ||
71 | num_entries_encoding = CEV_EQ_CNT_2048; | ||
72 | break; | ||
73 | case 4096: | ||
74 | num_entries_encoding = CEV_EQ_CNT_4096; | ||
75 | break; | ||
76 | default: | ||
77 | ASSERT(0); | ||
78 | return BE_STATUS_INVALID_PARAMETER; | ||
79 | } | ||
80 | |||
81 | switch (eqe_size) { | ||
82 | case 4: | ||
83 | eqe_size_encoding = CEV_EQ_SIZE_4; | ||
84 | break; | ||
85 | case 16: | ||
86 | eqe_size_encoding = CEV_EQ_SIZE_16; | ||
87 | break; | ||
88 | default: | ||
89 | ASSERT(0); | ||
90 | return BE_STATUS_INVALID_PARAMETER; | ||
91 | } | ||
92 | |||
93 | if ((eqe_size == 4 && num_entries < 1024) || | ||
94 | (eqe_size == 16 && num_entries == 4096)) { | ||
95 | TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d", | ||
96 | eqe_size, num_entries); | ||
97 | ASSERT(0); | ||
98 | return BE_STATUS_INVALID_PARAMETER; | ||
99 | } | ||
100 | |||
101 | memset(eq_object, 0, sizeof(*eq_object)); | ||
102 | |||
103 | atomic_set(&eq_object->ref_count, 0); | ||
104 | eq_object->parent_function = pfob; | ||
105 | eq_object->eq_id = 0xFFFFFFFF; | ||
106 | |||
107 | INIT_LIST_HEAD(&eq_object->cq_list_head); | ||
108 | |||
109 | length = num_entries * eqe_size; | ||
110 | |||
111 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
112 | |||
113 | wrb = be_function_peek_mcc_wrb(pfob); | ||
114 | if (!wrb) { | ||
115 | ASSERT(wrb); | ||
116 | TRACE(DL_ERR, "No free MCC WRBs in create EQ."); | ||
117 | status = BE_STATUS_NO_MCC_WRB; | ||
118 | goto Error; | ||
119 | } | ||
120 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
121 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE); | ||
122 | |||
123 | fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va), | ||
124 | length); | ||
125 | n = pfob->pci_function_number; | ||
126 | AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n); | ||
127 | |||
128 | AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1); | ||
129 | |||
130 | AMAP_SET_BITS_PTR(EQ_CONTEXT, Size, | ||
131 | &fwcmd->params.request.context, eqe_size_encoding); | ||
132 | |||
133 | n = 0; /* Protection Domain is always 0 in Linux driver */ | ||
134 | AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n); | ||
135 | |||
136 | /* Let the caller ARM the EQ with the doorbell. */ | ||
137 | AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0); | ||
138 | |||
139 | AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context, | ||
140 | num_entries_encoding); | ||
141 | |||
142 | n = pfob->pci_function_number * 32; | ||
143 | AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect, | ||
144 | &fwcmd->params.request.context, n); | ||
145 | if (watermark != -1) { | ||
146 | AMAP_SET_BITS_PTR(EQ_CONTEXT, WME, | ||
147 | &fwcmd->params.request.context, 1); | ||
148 | AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark, | ||
149 | &fwcmd->params.request.context, watermark); | ||
150 | ASSERT(watermark <= CEV_WMARK_240); | ||
151 | } else | ||
152 | AMAP_SET_BITS_PTR(EQ_CONTEXT, WME, | ||
153 | &fwcmd->params.request.context, 0); | ||
154 | if (timer_delay != -1) { | ||
155 | AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR, | ||
156 | &fwcmd->params.request.context, 1); | ||
157 | |||
158 | ASSERT(timer_delay <= 250); /* max value according to EAS */ | ||
159 | timer_delay = min(timer_delay, (u32)250); | ||
160 | |||
161 | AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay, | ||
162 | &fwcmd->params.request.context, timer_delay); | ||
163 | } else { | ||
164 | AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR, | ||
165 | &fwcmd->params.request.context, 0); | ||
166 | } | ||
167 | /* Create a page list for the FWCMD. */ | ||
168 | be_rd_to_pa_list(rd, fwcmd->params.request.pages, | ||
169 | ARRAY_SIZE(fwcmd->params.request.pages)); | ||
170 | |||
171 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
172 | NULL, NULL, fwcmd, NULL); | ||
173 | if (status != BE_SUCCESS) { | ||
174 | TRACE(DL_ERR, "MCC to create EQ failed."); | ||
175 | goto Error; | ||
176 | } | ||
177 | /* Get the EQ id. The MPU allocates the IDs. */ | ||
178 | eq_object->eq_id = fwcmd->params.response.eq_id; | ||
179 | |||
180 | Error: | ||
181 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
182 | |||
183 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
184 | pfob->pend_queue_driving = 0; | ||
185 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
186 | } | ||
187 | return status; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | Deferences the given object. Once the object's reference count drops to | ||
192 | zero, the object is destroyed and all resources that are held by this | ||
193 | object are released. The on-chip context is also destroyed along with | ||
194 | the queue ID, and any mappings made into the UT. | ||
195 | |||
196 | eq_object - EQ handle returned from eq_object_create. | ||
197 | |||
198 | Returns BE_SUCCESS if successfull, otherwise a useful error code | ||
199 | is returned. | ||
200 | |||
201 | IRQL: IRQL < DISPATCH_LEVEL | ||
202 | */ | ||
203 | int be_eq_destroy(struct be_eq_object *eq_object) | ||
204 | { | ||
205 | int status = 0; | ||
206 | |||
207 | ASSERT(atomic_read(&eq_object->ref_count) == 0); | ||
208 | /* no CQs should reference this EQ now */ | ||
209 | ASSERT(list_empty(&eq_object->cq_list_head)); | ||
210 | |||
211 | /* Send fwcmd to destroy the EQ. */ | ||
212 | status = be_function_ring_destroy(eq_object->parent_function, | ||
213 | eq_object->eq_id, FWCMD_RING_TYPE_EQ, | ||
214 | NULL, NULL, NULL, NULL); | ||
215 | ASSERT(status == 0); | ||
216 | |||
217 | return BE_SUCCESS; | ||
218 | } | ||
219 | /* | ||
220 | *--------------------------------------------------------------------------- | ||
221 | * Function: be_eq_modify_delay | ||
222 | * Changes the EQ delay for a group of EQs. | ||
223 | * num_eq - The number of EQs in the eq_array to adjust. | ||
224 | * This also is the number of delay values in | ||
225 | * the eq_delay_array. | ||
226 | * eq_array - Array of struct be_eq_object pointers to adjust. | ||
227 | * eq_delay_array - Array of "num_eq" timer delays in units | ||
228 | * of microseconds. The be_eq_query_delay_range | ||
229 | * fwcmd returns the resolution and range of | ||
230 | * legal EQ delays. | ||
231 | * cb - | ||
232 | * cb_context - | ||
233 | * q_ctxt - Optional. Pointer to a previously allocated | ||
234 | * struct. If the MCC WRB ring is full, this | ||
235 | * structure is used to queue the operation. It | ||
236 | * will be posted to the MCC ring when space | ||
237 | * becomes available. All queued commands will | ||
238 | * be posted to the ring in the order they are | ||
239 | * received. It is always valid to pass a pointer to | ||
240 | * a generic be_generic_q_cntxt. However, | ||
241 | * the specific context structs | ||
242 | * are generally smaller than the generic struct. | ||
243 | * return pend_status - BE_SUCCESS (0) on success. | ||
244 | * BE_PENDING (postive value) if the FWCMD | ||
245 | * completion is pending. Negative error code on failure. | ||
246 | *------------------------------------------------------------------------- | ||
247 | */ | ||
248 | int | ||
249 | be_eq_modify_delay(struct be_function_object *pfob, | ||
250 | u32 num_eq, struct be_eq_object **eq_array, | ||
251 | u32 *eq_delay_array, mcc_wrb_cqe_callback cb, | ||
252 | void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt) | ||
253 | { | ||
254 | struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL; | ||
255 | struct MCC_WRB_AMAP *wrb = NULL; | ||
256 | int status = 0; | ||
257 | struct be_generic_q_ctxt *gen_ctxt = NULL; | ||
258 | u32 i; | ||
259 | unsigned long irql; | ||
260 | |||
261 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
262 | |||
263 | wrb = be_function_peek_mcc_wrb(pfob); | ||
264 | if (!wrb) { | ||
265 | if (q_ctxt && cb) { | ||
266 | wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
267 | gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt; | ||
268 | gen_ctxt->context.bytes = sizeof(*q_ctxt); | ||
269 | } else { | ||
270 | status = BE_STATUS_NO_MCC_WRB; | ||
271 | goto Error; | ||
272 | } | ||
273 | } | ||
274 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
275 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY); | ||
276 | |||
277 | ASSERT(num_eq > 0); | ||
278 | ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay)); | ||
279 | fwcmd->params.request.num_eq = num_eq; | ||
280 | for (i = 0; i < num_eq; i++) { | ||
281 | fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id; | ||
282 | fwcmd->params.request.delay[i].delay_in_microseconds = | ||
283 | eq_delay_array[i]; | ||
284 | } | ||
285 | |||
286 | /* Post the f/w command */ | ||
287 | status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt, | ||
288 | cb, cb_context, NULL, NULL, fwcmd, NULL); | ||
289 | |||
290 | Error: | ||
291 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
292 | |||
293 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
294 | pfob->pend_queue_driving = 0; | ||
295 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
296 | } | ||
297 | return status; | ||
298 | } | ||
299 | |||
diff --git a/drivers/staging/benet/eth.c b/drivers/staging/benet/eth.c deleted file mode 100644 index f641b6260d07..000000000000 --- a/drivers/staging/benet/eth.c +++ /dev/null | |||
@@ -1,1273 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #include <linux/if_ether.h> | ||
18 | #include "hwlib.h" | ||
19 | #include "bestatus.h" | ||
20 | |||
21 | /* | ||
22 | *--------------------------------------------------------- | ||
23 | * Function: be_eth_sq_create_ex | ||
24 | * Creates an ethernet send ring - extended version with | ||
25 | * additional parameters. | ||
26 | * pfob - | ||
27 | * rd - ring address | ||
28 | * length_in_bytes - | ||
29 | * type - The type of ring to create. | ||
30 | * ulp - The requested ULP number for the ring. | ||
31 | * This should be zero based, i.e. 0,1,2. This must | ||
32 | * be valid NIC ULP based on the firmware config. | ||
33 | * All doorbells for this ring must be sent to | ||
34 | * this ULP. The first network ring allocated for | ||
35 | * each ULP are higher performance than subsequent rings. | ||
36 | * cq_object - cq object for completions | ||
37 | * ex_parameters - Additional parameters (that may increase in | ||
38 | * future revisions). These parameters are only used | ||
39 | * for certain ring types -- see | ||
40 | * struct be_eth_sq_parameters for details. | ||
41 | * eth_sq - | ||
42 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
43 | *--------------------------------------------------------- | ||
44 | */ | ||
45 | int | ||
46 | be_eth_sq_create_ex(struct be_function_object *pfob, struct ring_desc *rd, | ||
47 | u32 length, u32 type, u32 ulp, struct be_cq_object *cq_object, | ||
48 | struct be_eth_sq_parameters *ex_parameters, | ||
49 | struct be_ethsq_object *eth_sq) | ||
50 | { | ||
51 | struct FWCMD_COMMON_ETH_TX_CREATE *fwcmd = NULL; | ||
52 | struct MCC_WRB_AMAP *wrb = NULL; | ||
53 | int status = 0; | ||
54 | u32 n; | ||
55 | unsigned long irql; | ||
56 | |||
57 | ASSERT(rd); | ||
58 | ASSERT(eth_sq); | ||
59 | ASSERT(ex_parameters); | ||
60 | |||
61 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
62 | |||
63 | memset(eth_sq, 0, sizeof(*eth_sq)); | ||
64 | |||
65 | eth_sq->parent_function = pfob; | ||
66 | eth_sq->bid = 0xFFFFFFFF; | ||
67 | eth_sq->cq_object = cq_object; | ||
68 | |||
69 | /* Translate hwlib interface to arm interface. */ | ||
70 | switch (type) { | ||
71 | case BE_ETH_TX_RING_TYPE_FORWARDING: | ||
72 | type = ETH_TX_RING_TYPE_FORWARDING; | ||
73 | break; | ||
74 | case BE_ETH_TX_RING_TYPE_STANDARD: | ||
75 | type = ETH_TX_RING_TYPE_STANDARD; | ||
76 | break; | ||
77 | case BE_ETH_TX_RING_TYPE_BOUND: | ||
78 | ASSERT(ex_parameters->port < 2); | ||
79 | type = ETH_TX_RING_TYPE_BOUND; | ||
80 | break; | ||
81 | default: | ||
82 | TRACE(DL_ERR, "Invalid eth tx ring type:%d", type); | ||
83 | return BE_NOT_OK; | ||
84 | break; | ||
85 | } | ||
86 | |||
87 | wrb = be_function_peek_mcc_wrb(pfob); | ||
88 | if (!wrb) { | ||
89 | ASSERT(wrb); | ||
90 | TRACE(DL_ERR, "No free MCC WRBs in create EQ."); | ||
91 | status = BE_STATUS_NO_MCC_WRB; | ||
92 | goto Error; | ||
93 | } | ||
94 | /* NIC must be supported by the current config. */ | ||
95 | ASSERT(pfob->fw_config.nic_ulp_mask); | ||
96 | |||
97 | /* | ||
98 | * The ulp parameter must select a valid NIC ULP | ||
99 | * for the current config. | ||
100 | */ | ||
101 | ASSERT((1 << ulp) & pfob->fw_config.nic_ulp_mask); | ||
102 | |||
103 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
104 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_TX_CREATE); | ||
105 | fwcmd->header.request.port_number = ex_parameters->port; | ||
106 | |||
107 | AMAP_SET_BITS_PTR(ETX_CONTEXT, pd_id, | ||
108 | &fwcmd->params.request.context, 0); | ||
109 | |||
110 | n = be_ring_length_to_encoding(length, sizeof(struct ETH_WRB_AMAP)); | ||
111 | AMAP_SET_BITS_PTR(ETX_CONTEXT, tx_ring_size, | ||
112 | &fwcmd->params.request.context, n); | ||
113 | |||
114 | AMAP_SET_BITS_PTR(ETX_CONTEXT, cq_id_send, | ||
115 | &fwcmd->params.request.context, cq_object->cq_id); | ||
116 | |||
117 | n = pfob->pci_function_number; | ||
118 | AMAP_SET_BITS_PTR(ETX_CONTEXT, func, &fwcmd->params.request.context, n); | ||
119 | |||
120 | fwcmd->params.request.type = type; | ||
121 | fwcmd->params.request.ulp_num = (1 << ulp); | ||
122 | fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE); | ||
123 | ASSERT(PAGES_SPANNED(rd->va, rd->length) >= | ||
124 | fwcmd->params.request.num_pages); | ||
125 | |||
126 | /* Create a page list for the FWCMD. */ | ||
127 | be_rd_to_pa_list(rd, fwcmd->params.request.pages, | ||
128 | ARRAY_SIZE(fwcmd->params.request.pages)); | ||
129 | |||
130 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
131 | NULL, NULL, fwcmd, NULL); | ||
132 | if (status != BE_SUCCESS) { | ||
133 | TRACE(DL_ERR, "MCC to create etx queue failed."); | ||
134 | goto Error; | ||
135 | } | ||
136 | /* save the butler ID */ | ||
137 | eth_sq->bid = fwcmd->params.response.cid; | ||
138 | |||
139 | /* add a reference to the corresponding CQ */ | ||
140 | atomic_inc(&cq_object->ref_count); | ||
141 | |||
142 | Error: | ||
143 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
144 | |||
145 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
146 | pfob->pend_queue_driving = 0; | ||
147 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
148 | } | ||
149 | return status; | ||
150 | } | ||
151 | |||
152 | |||
153 | /* | ||
154 | This routine destroys an ethernet send queue | ||
155 | |||
156 | EthSq - EthSq Handle returned from EthSqCreate | ||
157 | |||
158 | This function always return BE_SUCCESS. | ||
159 | |||
160 | This function frees memory allocated by EthSqCreate for the EthSq Object. | ||
161 | |||
162 | */ | ||
163 | int be_eth_sq_destroy(struct be_ethsq_object *eth_sq) | ||
164 | { | ||
165 | int status = 0; | ||
166 | |||
167 | /* Send fwcmd to destroy the queue. */ | ||
168 | status = be_function_ring_destroy(eth_sq->parent_function, eth_sq->bid, | ||
169 | FWCMD_RING_TYPE_ETH_TX, NULL, NULL, NULL, NULL); | ||
170 | ASSERT(status == 0); | ||
171 | |||
172 | /* Derefence any associated CQs. */ | ||
173 | atomic_dec(ð_sq->cq_object->ref_count); | ||
174 | return status; | ||
175 | } | ||
176 | /* | ||
177 | This routine attempts to set the transmit flow control parameters. | ||
178 | |||
179 | FunctionObject - Handle to a function object | ||
180 | |||
181 | txfc_enable - transmit flow control enable - true for | ||
182 | enable, false for disable | ||
183 | |||
184 | rxfc_enable - receive flow control enable - true for | ||
185 | enable, false for disable | ||
186 | |||
187 | Returns BE_SUCCESS if successfull, otherwise a useful int error | ||
188 | code is returned. | ||
189 | |||
190 | IRQL: < DISPATCH_LEVEL | ||
191 | |||
192 | This function always fails in non-privileged machine context. | ||
193 | */ | ||
194 | int | ||
195 | be_eth_set_flow_control(struct be_function_object *pfob, | ||
196 | bool txfc_enable, bool rxfc_enable) | ||
197 | { | ||
198 | struct FWCMD_COMMON_SET_FLOW_CONTROL *fwcmd = NULL; | ||
199 | struct MCC_WRB_AMAP *wrb = NULL; | ||
200 | int status = 0; | ||
201 | unsigned long irql; | ||
202 | |||
203 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
204 | |||
205 | wrb = be_function_peek_mcc_wrb(pfob); | ||
206 | if (!wrb) { | ||
207 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
208 | status = BE_STATUS_NO_MCC_WRB; | ||
209 | goto error; | ||
210 | } | ||
211 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
212 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FLOW_CONTROL); | ||
213 | |||
214 | fwcmd->params.request.rx_flow_control = rxfc_enable; | ||
215 | fwcmd->params.request.tx_flow_control = txfc_enable; | ||
216 | |||
217 | /* Post the f/w command */ | ||
218 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
219 | NULL, NULL, fwcmd, NULL); | ||
220 | |||
221 | if (status != 0) { | ||
222 | TRACE(DL_ERR, "set flow control fwcmd failed."); | ||
223 | goto error; | ||
224 | } | ||
225 | |||
226 | error: | ||
227 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
228 | |||
229 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
230 | pfob->pend_queue_driving = 0; | ||
231 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
232 | } | ||
233 | return status; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | This routine attempts to get the transmit flow control parameters. | ||
238 | |||
239 | pfob - Handle to a function object | ||
240 | |||
241 | txfc_enable - transmit flow control enable - true for | ||
242 | enable, false for disable | ||
243 | |||
244 | rxfc_enable - receive flow control enable - true for enable, | ||
245 | false for disable | ||
246 | |||
247 | Returns BE_SUCCESS if successfull, otherwise a useful int error code | ||
248 | is returned. | ||
249 | |||
250 | IRQL: < DISPATCH_LEVEL | ||
251 | |||
252 | This function always fails in non-privileged machine context. | ||
253 | */ | ||
254 | int | ||
255 | be_eth_get_flow_control(struct be_function_object *pfob, | ||
256 | bool *txfc_enable, bool *rxfc_enable) | ||
257 | { | ||
258 | struct FWCMD_COMMON_GET_FLOW_CONTROL *fwcmd = NULL; | ||
259 | struct MCC_WRB_AMAP *wrb = NULL; | ||
260 | int status = 0; | ||
261 | unsigned long irql; | ||
262 | |||
263 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
264 | |||
265 | wrb = be_function_peek_mcc_wrb(pfob); | ||
266 | if (!wrb) { | ||
267 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
268 | status = BE_STATUS_NO_MCC_WRB; | ||
269 | goto error; | ||
270 | } | ||
271 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
272 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FLOW_CONTROL); | ||
273 | |||
274 | /* Post the f/w command */ | ||
275 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
276 | NULL, NULL, fwcmd, NULL); | ||
277 | |||
278 | if (status != 0) { | ||
279 | TRACE(DL_ERR, "get flow control fwcmd failed."); | ||
280 | goto error; | ||
281 | } | ||
282 | |||
283 | *txfc_enable = fwcmd->params.response.tx_flow_control; | ||
284 | *rxfc_enable = fwcmd->params.response.rx_flow_control; | ||
285 | |||
286 | error: | ||
287 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
288 | |||
289 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
290 | pfob->pend_queue_driving = 0; | ||
291 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
292 | } | ||
293 | return status; | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | *--------------------------------------------------------- | ||
298 | * Function: be_eth_set_qos | ||
299 | * This function sets the ethernet transmit Quality of Service (QoS) | ||
300 | * characteristics of BladeEngine for the domain. All ethernet | ||
301 | * transmit rings of the domain will evenly share the bandwidth. | ||
302 | * The exeception to sharing is the host primary (super) ethernet | ||
303 | * transmit ring as well as the host ethernet forwarding ring | ||
304 | * for missed offload data. | ||
305 | * pfob - | ||
306 | * max_bps - the maximum bits per second in units of | ||
307 | * 10 Mbps (valid 0-100) | ||
308 | * max_pps - the maximum packets per second in units | ||
309 | * of 1 Kpps (0 indicates no limit) | ||
310 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
311 | *--------------------------------------------------------- | ||
312 | */ | ||
313 | int | ||
314 | be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps) | ||
315 | { | ||
316 | struct FWCMD_COMMON_SET_QOS *fwcmd = NULL; | ||
317 | struct MCC_WRB_AMAP *wrb = NULL; | ||
318 | int status = 0; | ||
319 | unsigned long irql; | ||
320 | |||
321 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
322 | |||
323 | wrb = be_function_peek_mcc_wrb(pfob); | ||
324 | if (!wrb) { | ||
325 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
326 | status = BE_STATUS_NO_MCC_WRB; | ||
327 | goto error; | ||
328 | } | ||
329 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
330 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_QOS); | ||
331 | |||
332 | /* Set fields in fwcmd */ | ||
333 | fwcmd->params.request.max_bits_per_second_NIC = max_bps; | ||
334 | fwcmd->params.request.max_packets_per_second_NIC = max_pps; | ||
335 | fwcmd->params.request.valid_flags = QOS_BITS_NIC | QOS_PKTS_NIC; | ||
336 | |||
337 | /* Post the f/w command */ | ||
338 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
339 | NULL, NULL, fwcmd, NULL); | ||
340 | |||
341 | if (status != 0) | ||
342 | TRACE(DL_ERR, "network set qos fwcmd failed."); | ||
343 | |||
344 | error: | ||
345 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
346 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
347 | pfob->pend_queue_driving = 0; | ||
348 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
349 | } | ||
350 | return status; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | *--------------------------------------------------------- | ||
355 | * Function: be_eth_get_qos | ||
356 | * This function retrieves the ethernet transmit Quality of Service (QoS) | ||
357 | * characteristics for the domain. | ||
358 | * max_bps - the maximum bits per second in units of | ||
359 | * 10 Mbps (valid 0-100) | ||
360 | * max_pps - the maximum packets per second in units of | ||
361 | * 1 Kpps (0 indicates no limit) | ||
362 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
363 | *--------------------------------------------------------- | ||
364 | */ | ||
365 | int | ||
366 | be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps) | ||
367 | { | ||
368 | struct FWCMD_COMMON_GET_QOS *fwcmd = NULL; | ||
369 | struct MCC_WRB_AMAP *wrb = NULL; | ||
370 | int status = 0; | ||
371 | unsigned long irql; | ||
372 | |||
373 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
374 | |||
375 | wrb = be_function_peek_mcc_wrb(pfob); | ||
376 | if (!wrb) { | ||
377 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
378 | status = BE_STATUS_NO_MCC_WRB; | ||
379 | goto error; | ||
380 | } | ||
381 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
382 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_QOS); | ||
383 | |||
384 | /* Post the f/w command */ | ||
385 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
386 | NULL, NULL, fwcmd, NULL); | ||
387 | |||
388 | if (status != 0) { | ||
389 | TRACE(DL_ERR, "network get qos fwcmd failed."); | ||
390 | goto error; | ||
391 | } | ||
392 | |||
393 | *max_bps = fwcmd->params.response.max_bits_per_second_NIC; | ||
394 | *max_pps = fwcmd->params.response.max_packets_per_second_NIC; | ||
395 | |||
396 | error: | ||
397 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
398 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
399 | pfob->pend_queue_driving = 0; | ||
400 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
401 | } | ||
402 | return status; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | *--------------------------------------------------------- | ||
407 | * Function: be_eth_set_frame_size | ||
408 | * This function sets the ethernet maximum frame size. The previous | ||
409 | * values are returned. | ||
410 | * pfob - | ||
411 | * tx_frame_size - maximum transmit frame size in bytes | ||
412 | * rx_frame_size - maximum receive frame size in bytes | ||
413 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
414 | *--------------------------------------------------------- | ||
415 | */ | ||
416 | int | ||
417 | be_eth_set_frame_size(struct be_function_object *pfob, | ||
418 | u32 *tx_frame_size, u32 *rx_frame_size) | ||
419 | { | ||
420 | struct FWCMD_COMMON_SET_FRAME_SIZE *fwcmd = NULL; | ||
421 | struct MCC_WRB_AMAP *wrb = NULL; | ||
422 | int status = 0; | ||
423 | unsigned long irql; | ||
424 | |||
425 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
426 | |||
427 | wrb = be_function_peek_mcc_wrb(pfob); | ||
428 | if (!wrb) { | ||
429 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
430 | status = BE_STATUS_NO_MCC_WRB; | ||
431 | goto error; | ||
432 | } | ||
433 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
434 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FRAME_SIZE); | ||
435 | fwcmd->params.request.max_tx_frame_size = *tx_frame_size; | ||
436 | fwcmd->params.request.max_rx_frame_size = *rx_frame_size; | ||
437 | |||
438 | /* Post the f/w command */ | ||
439 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
440 | NULL, NULL, fwcmd, NULL); | ||
441 | |||
442 | if (status != 0) { | ||
443 | TRACE(DL_ERR, "network set frame size fwcmd failed."); | ||
444 | goto error; | ||
445 | } | ||
446 | |||
447 | *tx_frame_size = fwcmd->params.response.chip_max_tx_frame_size; | ||
448 | *rx_frame_size = fwcmd->params.response.chip_max_rx_frame_size; | ||
449 | |||
450 | error: | ||
451 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
452 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
453 | pfob->pend_queue_driving = 0; | ||
454 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
455 | } | ||
456 | return status; | ||
457 | } | ||
458 | |||
459 | |||
460 | /* | ||
461 | This routine creates a Ethernet receive ring. | ||
462 | |||
463 | pfob - handle to a function object | ||
464 | rq_base_va - base VA for the default receive ring. this must be | ||
465 | exactly 8K in length and continguous physical memory. | ||
466 | cq_object - handle to a previously created CQ to be associated | ||
467 | with the RQ. | ||
468 | pp_eth_rq - pointer to an opqaue handle where an eth | ||
469 | receive object is returned. | ||
470 | Returns BE_SUCCESS if successfull, , otherwise a useful | ||
471 | int error code is returned. | ||
472 | |||
473 | IRQL: < DISPATCH_LEVEL | ||
474 | this function allocates a struct be_ethrq_object *object. | ||
475 | there must be no more than 1 of these per function object, unless the | ||
476 | function object supports RSS (is networking and on the host). | ||
477 | the rq_base_va must point to a buffer of exactly 8K. | ||
478 | the erx::host_cqid (or host_stor_cqid) register and erx::ring_page registers | ||
479 | will be updated as appropriate on return | ||
480 | */ | ||
481 | int | ||
482 | be_eth_rq_create(struct be_function_object *pfob, | ||
483 | struct ring_desc *rd, struct be_cq_object *cq_object, | ||
484 | struct be_cq_object *bcmc_cq_object, | ||
485 | struct be_ethrq_object *eth_rq) | ||
486 | { | ||
487 | int status = 0; | ||
488 | struct MCC_WRB_AMAP *wrb = NULL; | ||
489 | struct FWCMD_COMMON_ETH_RX_CREATE *fwcmd = NULL; | ||
490 | unsigned long irql; | ||
491 | |||
492 | /* MPU will set the */ | ||
493 | ASSERT(rd); | ||
494 | ASSERT(eth_rq); | ||
495 | |||
496 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
497 | |||
498 | eth_rq->parent_function = pfob; | ||
499 | eth_rq->cq_object = cq_object; | ||
500 | |||
501 | wrb = be_function_peek_mcc_wrb(pfob); | ||
502 | if (!wrb) { | ||
503 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
504 | status = BE_STATUS_NO_MCC_WRB; | ||
505 | goto Error; | ||
506 | } | ||
507 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
508 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_RX_CREATE); | ||
509 | |||
510 | fwcmd->params.request.num_pages = 2; /* required length */ | ||
511 | fwcmd->params.request.cq_id = cq_object->cq_id; | ||
512 | |||
513 | if (bcmc_cq_object) | ||
514 | fwcmd->params.request.bcmc_cq_id = bcmc_cq_object->cq_id; | ||
515 | else | ||
516 | fwcmd->params.request.bcmc_cq_id = 0xFFFF; | ||
517 | |||
518 | /* Create a page list for the FWCMD. */ | ||
519 | be_rd_to_pa_list(rd, fwcmd->params.request.pages, | ||
520 | ARRAY_SIZE(fwcmd->params.request.pages)); | ||
521 | |||
522 | /* Post the f/w command */ | ||
523 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
524 | NULL, NULL, fwcmd, NULL); | ||
525 | if (status != BE_SUCCESS) { | ||
526 | TRACE(DL_ERR, "fwcmd to map eth rxq frags failed."); | ||
527 | goto Error; | ||
528 | } | ||
529 | /* Save the ring ID for cleanup. */ | ||
530 | eth_rq->rid = fwcmd->params.response.id; | ||
531 | |||
532 | atomic_inc(&cq_object->ref_count); | ||
533 | |||
534 | Error: | ||
535 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
536 | |||
537 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
538 | pfob->pend_queue_driving = 0; | ||
539 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
540 | } | ||
541 | return status; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | This routine destroys an Ethernet receive queue | ||
546 | |||
547 | eth_rq - ethernet receive queue handle returned from eth_rq_create | ||
548 | |||
549 | Returns BE_SUCCESS on success and an appropriate int on failure. | ||
550 | |||
551 | This function frees resourcs allocated by EthRqCreate. | ||
552 | The erx::host_cqid (or host_stor_cqid) register and erx::ring_page | ||
553 | registers will be updated as appropriate on return | ||
554 | IRQL: < DISPATCH_LEVEL | ||
555 | */ | ||
556 | |||
557 | static void be_eth_rq_destroy_internal_cb(void *context, int status, | ||
558 | struct MCC_WRB_AMAP *wrb) | ||
559 | { | ||
560 | struct be_ethrq_object *eth_rq = (struct be_ethrq_object *) context; | ||
561 | |||
562 | if (status != BE_SUCCESS) { | ||
563 | TRACE(DL_ERR, "Destroy eth rq failed in internal callback.\n"); | ||
564 | } else { | ||
565 | /* Dereference any CQs associated with this queue. */ | ||
566 | atomic_dec(ð_rq->cq_object->ref_count); | ||
567 | } | ||
568 | |||
569 | return; | ||
570 | } | ||
571 | |||
572 | int be_eth_rq_destroy(struct be_ethrq_object *eth_rq) | ||
573 | { | ||
574 | int status = BE_SUCCESS; | ||
575 | |||
576 | /* Send fwcmd to destroy the RQ. */ | ||
577 | status = be_function_ring_destroy(eth_rq->parent_function, | ||
578 | eth_rq->rid, FWCMD_RING_TYPE_ETH_RX, NULL, NULL, | ||
579 | be_eth_rq_destroy_internal_cb, eth_rq); | ||
580 | |||
581 | return status; | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | *--------------------------------------------------------------------------- | ||
586 | * Function: be_eth_rq_destroy_options | ||
587 | * Destroys an ethernet receive ring with finer granularity options | ||
588 | * than the standard be_eth_rq_destroy() API function. | ||
589 | * eth_rq - | ||
590 | * flush - Set to 1 to flush the ring, set to 0 to bypass the flush | ||
591 | * cb - Callback function on completion | ||
592 | * cb_context - Callback context | ||
593 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
594 | *---------------------------------------------------------------------------- | ||
595 | */ | ||
596 | int | ||
597 | be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush, | ||
598 | mcc_wrb_cqe_callback cb, void *cb_context) | ||
599 | { | ||
600 | struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL; | ||
601 | struct MCC_WRB_AMAP *wrb = NULL; | ||
602 | int status = BE_SUCCESS; | ||
603 | struct be_function_object *pfob = NULL; | ||
604 | unsigned long irql; | ||
605 | |||
606 | pfob = eth_rq->parent_function; | ||
607 | |||
608 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
609 | |||
610 | TRACE(DL_INFO, "Destroy eth_rq ring id:%d, flush:%d", eth_rq->rid, | ||
611 | flush); | ||
612 | |||
613 | wrb = be_function_peek_mcc_wrb(pfob); | ||
614 | if (!wrb) { | ||
615 | ASSERT(wrb); | ||
616 | TRACE(DL_ERR, "No free MCC WRBs in destroy eth_rq ring."); | ||
617 | status = BE_STATUS_NO_MCC_WRB; | ||
618 | goto Error; | ||
619 | } | ||
620 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
621 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY); | ||
622 | |||
623 | fwcmd->params.request.id = eth_rq->rid; | ||
624 | fwcmd->params.request.ring_type = FWCMD_RING_TYPE_ETH_RX; | ||
625 | fwcmd->params.request.bypass_flush = ((0 == flush) ? 1 : 0); | ||
626 | |||
627 | /* Post the f/w command */ | ||
628 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context, | ||
629 | be_eth_rq_destroy_internal_cb, eth_rq, fwcmd, NULL); | ||
630 | |||
631 | if (status != BE_SUCCESS && status != BE_PENDING) { | ||
632 | TRACE(DL_ERR, "eth_rq ring destroy failed. id:%d, flush:%d", | ||
633 | eth_rq->rid, flush); | ||
634 | goto Error; | ||
635 | } | ||
636 | |||
637 | Error: | ||
638 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
639 | |||
640 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
641 | pfob->pend_queue_driving = 0; | ||
642 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
643 | } | ||
644 | return status; | ||
645 | } | ||
646 | |||
647 | /* | ||
648 | This routine queries the frag size for erx. | ||
649 | |||
650 | pfob - handle to a function object | ||
651 | |||
652 | frag_size_bytes - erx frag size in bytes that is/was set. | ||
653 | |||
654 | Returns BE_SUCCESS if successfull, otherwise a useful int error | ||
655 | code is returned. | ||
656 | |||
657 | IRQL: < DISPATCH_LEVEL | ||
658 | |||
659 | */ | ||
660 | int | ||
661 | be_eth_rq_get_frag_size(struct be_function_object *pfob, u32 *frag_size_bytes) | ||
662 | { | ||
663 | struct FWCMD_ETH_GET_RX_FRAG_SIZE *fwcmd = NULL; | ||
664 | struct MCC_WRB_AMAP *wrb = NULL; | ||
665 | int status = 0; | ||
666 | unsigned long irql; | ||
667 | |||
668 | ASSERT(frag_size_bytes); | ||
669 | |||
670 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
671 | |||
672 | wrb = be_function_peek_mcc_wrb(pfob); | ||
673 | if (!wrb) { | ||
674 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
675 | return BE_STATUS_NO_MCC_WRB; | ||
676 | } | ||
677 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
678 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_GET_RX_FRAG_SIZE); | ||
679 | |||
680 | /* Post the f/w command */ | ||
681 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
682 | NULL, NULL, fwcmd, NULL); | ||
683 | |||
684 | if (status != 0) { | ||
685 | TRACE(DL_ERR, "get frag size fwcmd failed."); | ||
686 | goto error; | ||
687 | } | ||
688 | |||
689 | *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2; | ||
690 | |||
691 | error: | ||
692 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
693 | |||
694 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
695 | pfob->pend_queue_driving = 0; | ||
696 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
697 | } | ||
698 | return status; | ||
699 | } | ||
700 | |||
701 | /* | ||
702 | This routine attempts to set the frag size for erx. If the frag size is | ||
703 | already set, the attempt fails and the current frag size is returned. | ||
704 | |||
705 | pfob - Handle to a function object | ||
706 | |||
707 | frag_size - Erx frag size in bytes that is/was set. | ||
708 | |||
709 | current_frag_size_bytes - Pointer to location where currrent frag | ||
710 | is to be rturned | ||
711 | |||
712 | Returns BE_SUCCESS if successfull, otherwise a useful int error | ||
713 | code is returned. | ||
714 | |||
715 | IRQL: < DISPATCH_LEVEL | ||
716 | |||
717 | This function always fails in non-privileged machine context. | ||
718 | */ | ||
719 | int | ||
720 | be_eth_rq_set_frag_size(struct be_function_object *pfob, | ||
721 | u32 frag_size, u32 *frag_size_bytes) | ||
722 | { | ||
723 | struct FWCMD_ETH_SET_RX_FRAG_SIZE *fwcmd = NULL; | ||
724 | struct MCC_WRB_AMAP *wrb = NULL; | ||
725 | int status = 0; | ||
726 | unsigned long irql; | ||
727 | |||
728 | ASSERT(frag_size_bytes); | ||
729 | |||
730 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
731 | |||
732 | wrb = be_function_peek_mcc_wrb(pfob); | ||
733 | if (!wrb) { | ||
734 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
735 | status = BE_STATUS_NO_MCC_WRB; | ||
736 | goto error; | ||
737 | } | ||
738 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
739 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_SET_RX_FRAG_SIZE); | ||
740 | |||
741 | ASSERT(frag_size >= 128 && frag_size <= 16 * 1024); | ||
742 | |||
743 | /* This is the log2 of the fragsize. This is not the exact | ||
744 | * ERX encoding. */ | ||
745 | fwcmd->params.request.new_fragsize_log2 = __ilog2_u32(frag_size); | ||
746 | |||
747 | /* Post the f/w command */ | ||
748 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
749 | NULL, NULL, fwcmd, NULL); | ||
750 | |||
751 | if (status != 0) { | ||
752 | TRACE(DL_ERR, "set frag size fwcmd failed."); | ||
753 | goto error; | ||
754 | } | ||
755 | |||
756 | *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2; | ||
757 | error: | ||
758 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
759 | |||
760 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
761 | pfob->pend_queue_driving = 0; | ||
762 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
763 | } | ||
764 | return status; | ||
765 | } | ||
766 | |||
767 | |||
768 | /* | ||
769 | This routine gets or sets a mac address for a domain | ||
770 | given the port and mac. | ||
771 | |||
772 | FunctionObject - Function object handle. | ||
773 | port1 - Set to TRUE if this function will set/get the Port 1 | ||
774 | address. Only the host may set this to TRUE. | ||
775 | mac1 - Set to TRUE if this function will set/get the | ||
776 | MAC 1 address. Only the host may set this to TRUE. | ||
777 | write - Set to TRUE if this function should write the mac address. | ||
778 | mac_address - Buffer of the mac address to read or write. | ||
779 | |||
780 | Returns BE_SUCCESS if successfull, otherwise a useful int is returned. | ||
781 | |||
782 | IRQL: < DISPATCH_LEVEL | ||
783 | */ | ||
784 | int be_rxf_mac_address_read_write(struct be_function_object *pfob, | ||
785 | bool port1, /* VM must always set to false */ | ||
786 | bool mac1, /* VM must always set to false */ | ||
787 | bool mgmt, bool write, | ||
788 | bool permanent, u8 *mac_address, | ||
789 | mcc_wrb_cqe_callback cb, /* optional */ | ||
790 | void *cb_context) /* optional */ | ||
791 | { | ||
792 | int status = BE_SUCCESS; | ||
793 | union { | ||
794 | struct FWCMD_COMMON_NTWK_MAC_QUERY *query; | ||
795 | struct FWCMD_COMMON_NTWK_MAC_SET *set; | ||
796 | } fwcmd = {NULL}; | ||
797 | struct MCC_WRB_AMAP *wrb = NULL; | ||
798 | u32 type = 0; | ||
799 | unsigned long irql; | ||
800 | struct be_mcc_wrb_response_copy rc; | ||
801 | |||
802 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
803 | |||
804 | ASSERT(mac_address); | ||
805 | |||
806 | ASSERT(port1 == false); | ||
807 | ASSERT(mac1 == false); | ||
808 | |||
809 | wrb = be_function_peek_mcc_wrb(pfob); | ||
810 | if (!wrb) { | ||
811 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
812 | status = BE_STATUS_NO_MCC_WRB; | ||
813 | goto Error; | ||
814 | } | ||
815 | |||
816 | if (mgmt) { | ||
817 | type = MAC_ADDRESS_TYPE_MANAGEMENT; | ||
818 | } else { | ||
819 | if (pfob->type == BE_FUNCTION_TYPE_NETWORK) | ||
820 | type = MAC_ADDRESS_TYPE_NETWORK; | ||
821 | else | ||
822 | type = MAC_ADDRESS_TYPE_STORAGE; | ||
823 | } | ||
824 | |||
825 | if (write) { | ||
826 | /* Prepares an embedded fwcmd, including | ||
827 | * request/response sizes. | ||
828 | */ | ||
829 | fwcmd.set = BE_PREPARE_EMBEDDED_FWCMD(pfob, | ||
830 | wrb, COMMON_NTWK_MAC_SET); | ||
831 | |||
832 | fwcmd.set->params.request.invalidate = 0; | ||
833 | fwcmd.set->params.request.mac1 = (mac1 ? 1 : 0); | ||
834 | fwcmd.set->params.request.port = (port1 ? 1 : 0); | ||
835 | fwcmd.set->params.request.type = type; | ||
836 | |||
837 | /* Copy the mac address to set. */ | ||
838 | fwcmd.set->params.request.mac.SizeOfStructure = | ||
839 | sizeof(fwcmd.set->params.request.mac); | ||
840 | memcpy(fwcmd.set->params.request.mac.MACAddress, | ||
841 | mac_address, ETH_ALEN); | ||
842 | |||
843 | /* Post the f/w command */ | ||
844 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, | ||
845 | cb, cb_context, NULL, NULL, fwcmd.set, NULL); | ||
846 | |||
847 | } else { | ||
848 | |||
849 | /* | ||
850 | * Prepares an embedded fwcmd, including | ||
851 | * request/response sizes. | ||
852 | */ | ||
853 | fwcmd.query = BE_PREPARE_EMBEDDED_FWCMD(pfob, | ||
854 | wrb, COMMON_NTWK_MAC_QUERY); | ||
855 | |||
856 | fwcmd.query->params.request.mac1 = (mac1 ? 1 : 0); | ||
857 | fwcmd.query->params.request.port = (port1 ? 1 : 0); | ||
858 | fwcmd.query->params.request.type = type; | ||
859 | fwcmd.query->params.request.permanent = permanent; | ||
860 | |||
861 | rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_MAC_QUERY, | ||
862 | params.response.mac.MACAddress); | ||
863 | rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_MAC_QUERY, | ||
864 | params.response.mac.MACAddress); | ||
865 | rc.va = mac_address; | ||
866 | /* Post the f/w command (with a copy for the response) */ | ||
867 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, | ||
868 | cb_context, NULL, NULL, fwcmd.query, &rc); | ||
869 | } | ||
870 | |||
871 | if (status < 0) { | ||
872 | TRACE(DL_ERR, "mac set/query failed."); | ||
873 | goto Error; | ||
874 | } | ||
875 | |||
876 | Error: | ||
877 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
878 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
879 | pfob->pend_queue_driving = 0; | ||
880 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
881 | } | ||
882 | return status; | ||
883 | } | ||
884 | |||
885 | /* | ||
886 | This routine writes data to context memory. | ||
887 | |||
888 | pfob - Function object handle. | ||
889 | mac_table - Set to the 128-bit multicast address hash table. | ||
890 | |||
891 | Returns BE_SUCCESS if successfull, otherwise a useful int is returned. | ||
892 | |||
893 | IRQL: < DISPATCH_LEVEL | ||
894 | */ | ||
895 | |||
896 | int be_rxf_multicast_config(struct be_function_object *pfob, | ||
897 | bool promiscuous, u32 num, u8 *mac_table, | ||
898 | mcc_wrb_cqe_callback cb, /* optional */ | ||
899 | void *cb_context, | ||
900 | struct be_multicast_q_ctxt *q_ctxt) | ||
901 | { | ||
902 | int status = BE_SUCCESS; | ||
903 | struct FWCMD_COMMON_NTWK_MULTICAST_SET *fwcmd = NULL; | ||
904 | struct MCC_WRB_AMAP *wrb = NULL; | ||
905 | struct be_generic_q_ctxt *generic_ctxt = NULL; | ||
906 | unsigned long irql; | ||
907 | |||
908 | ASSERT(num <= ARRAY_SIZE(fwcmd->params.request.mac)); | ||
909 | |||
910 | if (num > ARRAY_SIZE(fwcmd->params.request.mac)) { | ||
911 | TRACE(DL_ERR, "Too many multicast addresses. BE supports %d.", | ||
912 | (int) ARRAY_SIZE(fwcmd->params.request.mac)); | ||
913 | return BE_NOT_OK; | ||
914 | } | ||
915 | |||
916 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
917 | |||
918 | wrb = be_function_peek_mcc_wrb(pfob); | ||
919 | if (!wrb) { | ||
920 | if (q_ctxt && cb) { | ||
921 | wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
922 | generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt; | ||
923 | generic_ctxt->context.bytes = sizeof(*q_ctxt); | ||
924 | } else { | ||
925 | status = BE_STATUS_NO_MCC_WRB; | ||
926 | goto Error; | ||
927 | } | ||
928 | } | ||
929 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
930 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_MULTICAST_SET); | ||
931 | |||
932 | fwcmd->params.request.promiscuous = promiscuous; | ||
933 | if (!promiscuous) { | ||
934 | fwcmd->params.request.num_mac = num; | ||
935 | if (num > 0) { | ||
936 | ASSERT(mac_table); | ||
937 | memcpy(fwcmd->params.request.mac, | ||
938 | mac_table, ETH_ALEN * num); | ||
939 | } | ||
940 | } | ||
941 | |||
942 | /* Post the f/w command */ | ||
943 | status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt, | ||
944 | cb, cb_context, NULL, NULL, fwcmd, NULL); | ||
945 | if (status < 0) { | ||
946 | TRACE(DL_ERR, "multicast fwcmd failed."); | ||
947 | goto Error; | ||
948 | } | ||
949 | |||
950 | Error: | ||
951 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
952 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
953 | pfob->pend_queue_driving = 0; | ||
954 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
955 | } | ||
956 | return status; | ||
957 | } | ||
958 | |||
959 | /* | ||
960 | This routine adds or removes a vlan tag from the rxf table. | ||
961 | |||
962 | FunctionObject - Function object handle. | ||
963 | VLanTag - VLan tag to add or remove. | ||
964 | Add - Set to TRUE if this will add a vlan tag | ||
965 | |||
966 | Returns BE_SUCCESS if successfull, otherwise a useful int is returned. | ||
967 | |||
968 | IRQL: < DISPATCH_LEVEL | ||
969 | */ | ||
970 | int be_rxf_vlan_config(struct be_function_object *pfob, | ||
971 | bool promiscuous, u32 num, u16 *vlan_tag_array, | ||
972 | mcc_wrb_cqe_callback cb, /* optional */ | ||
973 | void *cb_context, | ||
974 | struct be_vlan_q_ctxt *q_ctxt) /* optional */ | ||
975 | { | ||
976 | int status = BE_SUCCESS; | ||
977 | struct FWCMD_COMMON_NTWK_VLAN_CONFIG *fwcmd = NULL; | ||
978 | struct MCC_WRB_AMAP *wrb = NULL; | ||
979 | struct be_generic_q_ctxt *generic_ctxt = NULL; | ||
980 | unsigned long irql; | ||
981 | |||
982 | if (num > ARRAY_SIZE(fwcmd->params.request.vlan_tag)) { | ||
983 | TRACE(DL_ERR, "Too many VLAN tags."); | ||
984 | return BE_NOT_OK; | ||
985 | } | ||
986 | |||
987 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
988 | |||
989 | wrb = be_function_peek_mcc_wrb(pfob); | ||
990 | if (!wrb) { | ||
991 | if (q_ctxt && cb) { | ||
992 | wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
993 | generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt; | ||
994 | generic_ctxt->context.bytes = sizeof(*q_ctxt); | ||
995 | } else { | ||
996 | status = BE_STATUS_NO_MCC_WRB; | ||
997 | goto Error; | ||
998 | } | ||
999 | } | ||
1000 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
1001 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_VLAN_CONFIG); | ||
1002 | |||
1003 | fwcmd->params.request.promiscuous = promiscuous; | ||
1004 | if (!promiscuous) { | ||
1005 | fwcmd->params.request.num_vlan = num; | ||
1006 | |||
1007 | if (num > 0) { | ||
1008 | ASSERT(vlan_tag_array); | ||
1009 | memcpy(fwcmd->params.request.vlan_tag, vlan_tag_array, | ||
1010 | num * sizeof(vlan_tag_array[0])); | ||
1011 | } | ||
1012 | } | ||
1013 | |||
1014 | /* Post the commadn */ | ||
1015 | status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt, | ||
1016 | cb, cb_context, NULL, NULL, fwcmd, NULL); | ||
1017 | if (status < 0) { | ||
1018 | TRACE(DL_ERR, "vlan fwcmd failed."); | ||
1019 | goto Error; | ||
1020 | } | ||
1021 | |||
1022 | Error: | ||
1023 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
1024 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
1025 | pfob->pend_queue_driving = 0; | ||
1026 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
1027 | } | ||
1028 | return status; | ||
1029 | } | ||
1030 | |||
1031 | |||
1032 | int be_rxf_link_status(struct be_function_object *pfob, | ||
1033 | struct BE_LINK_STATUS *link_status, | ||
1034 | mcc_wrb_cqe_callback cb, | ||
1035 | void *cb_context, | ||
1036 | struct be_link_status_q_ctxt *q_ctxt) | ||
1037 | { | ||
1038 | struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY *fwcmd = NULL; | ||
1039 | struct MCC_WRB_AMAP *wrb = NULL; | ||
1040 | int status = 0; | ||
1041 | struct be_generic_q_ctxt *generic_ctxt = NULL; | ||
1042 | unsigned long irql; | ||
1043 | struct be_mcc_wrb_response_copy rc; | ||
1044 | |||
1045 | ASSERT(link_status); | ||
1046 | |||
1047 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
1048 | |||
1049 | wrb = be_function_peek_mcc_wrb(pfob); | ||
1050 | |||
1051 | if (!wrb) { | ||
1052 | if (q_ctxt && cb) { | ||
1053 | wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
1054 | generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt; | ||
1055 | generic_ctxt->context.bytes = sizeof(*q_ctxt); | ||
1056 | } else { | ||
1057 | status = BE_STATUS_NO_MCC_WRB; | ||
1058 | goto Error; | ||
1059 | } | ||
1060 | } | ||
1061 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
1062 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, | ||
1063 | COMMON_NTWK_LINK_STATUS_QUERY); | ||
1064 | |||
1065 | rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY, | ||
1066 | params.response); | ||
1067 | rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY, | ||
1068 | params.response); | ||
1069 | rc.va = link_status; | ||
1070 | /* Post or queue the f/w command */ | ||
1071 | status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt, | ||
1072 | cb, cb_context, NULL, NULL, fwcmd, &rc); | ||
1073 | |||
1074 | if (status < 0) { | ||
1075 | TRACE(DL_ERR, "link status fwcmd failed."); | ||
1076 | goto Error; | ||
1077 | } | ||
1078 | |||
1079 | Error: | ||
1080 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
1081 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
1082 | pfob->pend_queue_driving = 0; | ||
1083 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
1084 | } | ||
1085 | return status; | ||
1086 | } | ||
1087 | |||
1088 | int | ||
1089 | be_rxf_query_eth_statistics(struct be_function_object *pfob, | ||
1090 | struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd, | ||
1091 | u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb, | ||
1092 | void *cb_context, | ||
1093 | struct be_nonembedded_q_ctxt *q_ctxt) | ||
1094 | { | ||
1095 | struct MCC_WRB_AMAP *wrb = NULL; | ||
1096 | int status = 0; | ||
1097 | struct be_generic_q_ctxt *generic_ctxt = NULL; | ||
1098 | unsigned long irql; | ||
1099 | |||
1100 | ASSERT(va_for_fwcmd); | ||
1101 | ASSERT(pa_for_fwcmd); | ||
1102 | |||
1103 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
1104 | |||
1105 | wrb = be_function_peek_mcc_wrb(pfob); | ||
1106 | |||
1107 | if (!wrb) { | ||
1108 | if (q_ctxt && cb) { | ||
1109 | wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
1110 | generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt; | ||
1111 | generic_ctxt->context.bytes = sizeof(*q_ctxt); | ||
1112 | } else { | ||
1113 | status = BE_STATUS_NO_MCC_WRB; | ||
1114 | goto Error; | ||
1115 | } | ||
1116 | } | ||
1117 | |||
1118 | TRACE(DL_INFO, "Query eth stats. fwcmd va:%p pa:0x%08x_%08x", | ||
1119 | va_for_fwcmd, upper_32_bits(pa_for_fwcmd), (u32)pa_for_fwcmd); | ||
1120 | |||
1121 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
1122 | va_for_fwcmd = BE_PREPARE_NONEMBEDDED_FWCMD(pfob, wrb, | ||
1123 | va_for_fwcmd, pa_for_fwcmd, ETH_GET_STATISTICS); | ||
1124 | |||
1125 | /* Post the f/w command */ | ||
1126 | status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt, | ||
1127 | cb, cb_context, NULL, NULL, va_for_fwcmd, NULL); | ||
1128 | if (status < 0) { | ||
1129 | TRACE(DL_ERR, "eth stats fwcmd failed."); | ||
1130 | goto Error; | ||
1131 | } | ||
1132 | |||
1133 | Error: | ||
1134 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
1135 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
1136 | pfob->pend_queue_driving = 0; | ||
1137 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
1138 | } | ||
1139 | return status; | ||
1140 | } | ||
1141 | |||
1142 | int | ||
1143 | be_rxf_promiscuous(struct be_function_object *pfob, | ||
1144 | bool enable_port0, bool enable_port1, | ||
1145 | mcc_wrb_cqe_callback cb, void *cb_context, | ||
1146 | struct be_promiscuous_q_ctxt *q_ctxt) | ||
1147 | { | ||
1148 | struct FWCMD_ETH_PROMISCUOUS *fwcmd = NULL; | ||
1149 | struct MCC_WRB_AMAP *wrb = NULL; | ||
1150 | int status = 0; | ||
1151 | struct be_generic_q_ctxt *generic_ctxt = NULL; | ||
1152 | unsigned long irql; | ||
1153 | |||
1154 | |||
1155 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
1156 | |||
1157 | wrb = be_function_peek_mcc_wrb(pfob); | ||
1158 | |||
1159 | if (!wrb) { | ||
1160 | if (q_ctxt && cb) { | ||
1161 | wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
1162 | generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt; | ||
1163 | generic_ctxt->context.bytes = sizeof(*q_ctxt); | ||
1164 | } else { | ||
1165 | status = BE_STATUS_NO_MCC_WRB; | ||
1166 | goto Error; | ||
1167 | } | ||
1168 | } | ||
1169 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
1170 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_PROMISCUOUS); | ||
1171 | |||
1172 | fwcmd->params.request.port0_promiscuous = enable_port0; | ||
1173 | fwcmd->params.request.port1_promiscuous = enable_port1; | ||
1174 | |||
1175 | /* Post the f/w command */ | ||
1176 | status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt, | ||
1177 | cb, cb_context, NULL, NULL, fwcmd, NULL); | ||
1178 | |||
1179 | if (status < 0) { | ||
1180 | TRACE(DL_ERR, "promiscuous fwcmd failed."); | ||
1181 | goto Error; | ||
1182 | } | ||
1183 | |||
1184 | Error: | ||
1185 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
1186 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
1187 | pfob->pend_queue_driving = 0; | ||
1188 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
1189 | } | ||
1190 | return status; | ||
1191 | } | ||
1192 | |||
1193 | |||
1194 | /* | ||
1195 | *------------------------------------------------------------------------- | ||
1196 | * Function: be_rxf_filter_config | ||
1197 | * Configures BladeEngine ethernet receive filter settings. | ||
1198 | * pfob - | ||
1199 | * settings - Pointer to the requested filter settings. | ||
1200 | * The response from BladeEngine will be placed back | ||
1201 | * in this structure. | ||
1202 | * cb - optional | ||
1203 | * cb_context - optional | ||
1204 | * q_ctxt - Optional. Pointer to a previously allocated struct. | ||
1205 | * If the MCC WRB ring is full, this structure is | ||
1206 | * used to queue the operation. It will be posted | ||
1207 | * to the MCC ring when space becomes available. All | ||
1208 | * queued commands will be posted to the ring in | ||
1209 | * the order they are received. It is always valid | ||
1210 | * to pass a pointer to a generic | ||
1211 | * be_generic_q_ctxt. However, the specific | ||
1212 | * context structs are generally smaller than | ||
1213 | * the generic struct. | ||
1214 | * return pend_status - BE_SUCCESS (0) on success. | ||
1215 | * BE_PENDING (postive value) if the FWCMD | ||
1216 | * completion is pending. Negative error code on failure. | ||
1217 | *--------------------------------------------------------------------------- | ||
1218 | */ | ||
1219 | int | ||
1220 | be_rxf_filter_config(struct be_function_object *pfob, | ||
1221 | struct NTWK_RX_FILTER_SETTINGS *settings, | ||
1222 | mcc_wrb_cqe_callback cb, void *cb_context, | ||
1223 | struct be_rxf_filter_q_ctxt *q_ctxt) | ||
1224 | { | ||
1225 | struct FWCMD_COMMON_NTWK_RX_FILTER *fwcmd = NULL; | ||
1226 | struct MCC_WRB_AMAP *wrb = NULL; | ||
1227 | int status = 0; | ||
1228 | struct be_generic_q_ctxt *generic_ctxt = NULL; | ||
1229 | unsigned long irql; | ||
1230 | struct be_mcc_wrb_response_copy rc; | ||
1231 | |||
1232 | ASSERT(settings); | ||
1233 | |||
1234 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
1235 | |||
1236 | wrb = be_function_peek_mcc_wrb(pfob); | ||
1237 | |||
1238 | if (!wrb) { | ||
1239 | if (q_ctxt && cb) { | ||
1240 | wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
1241 | generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt; | ||
1242 | generic_ctxt->context.bytes = sizeof(*q_ctxt); | ||
1243 | } else { | ||
1244 | status = BE_STATUS_NO_MCC_WRB; | ||
1245 | goto Error; | ||
1246 | } | ||
1247 | } | ||
1248 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
1249 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_RX_FILTER); | ||
1250 | memcpy(&fwcmd->params.request, settings, sizeof(*settings)); | ||
1251 | |||
1252 | rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_RX_FILTER, | ||
1253 | params.response); | ||
1254 | rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_RX_FILTER, | ||
1255 | params.response); | ||
1256 | rc.va = settings; | ||
1257 | /* Post or queue the f/w command */ | ||
1258 | status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt, | ||
1259 | cb, cb_context, NULL, NULL, fwcmd, &rc); | ||
1260 | |||
1261 | if (status < 0) { | ||
1262 | TRACE(DL_ERR, "RXF/ERX filter config fwcmd failed."); | ||
1263 | goto Error; | ||
1264 | } | ||
1265 | |||
1266 | Error: | ||
1267 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
1268 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
1269 | pfob->pend_queue_driving = 0; | ||
1270 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
1271 | } | ||
1272 | return status; | ||
1273 | } | ||
diff --git a/drivers/staging/benet/etx_context.h b/drivers/staging/benet/etx_context.h deleted file mode 100644 index 554fbe5d127b..000000000000 --- a/drivers/staging/benet/etx_context.h +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __etx_context_amap_h__ | ||
21 | #define __etx_context_amap_h__ | ||
22 | |||
23 | /* ETX ring context structure. */ | ||
24 | struct BE_ETX_CONTEXT_AMAP { | ||
25 | u8 tx_cidx[11]; /* DWORD 0 */ | ||
26 | u8 rsvd0[5]; /* DWORD 0 */ | ||
27 | u8 rsvd1[16]; /* DWORD 0 */ | ||
28 | u8 tx_pidx[11]; /* DWORD 1 */ | ||
29 | u8 rsvd2; /* DWORD 1 */ | ||
30 | u8 tx_ring_size[4]; /* DWORD 1 */ | ||
31 | u8 pd_id[5]; /* DWORD 1 */ | ||
32 | u8 pd_id_not_valid; /* DWORD 1 */ | ||
33 | u8 cq_id_send[10]; /* DWORD 1 */ | ||
34 | u8 rsvd3[32]; /* DWORD 2 */ | ||
35 | u8 rsvd4[32]; /* DWORD 3 */ | ||
36 | u8 cur_bytes[32]; /* DWORD 4 */ | ||
37 | u8 max_bytes[32]; /* DWORD 5 */ | ||
38 | u8 time_stamp[32]; /* DWORD 6 */ | ||
39 | u8 rsvd5[11]; /* DWORD 7 */ | ||
40 | u8 func; /* DWORD 7 */ | ||
41 | u8 rsvd6[20]; /* DWORD 7 */ | ||
42 | u8 cur_txd_count[32]; /* DWORD 8 */ | ||
43 | u8 max_txd_count[32]; /* DWORD 9 */ | ||
44 | u8 rsvd7[32]; /* DWORD 10 */ | ||
45 | u8 rsvd8[32]; /* DWORD 11 */ | ||
46 | u8 rsvd9[32]; /* DWORD 12 */ | ||
47 | u8 rsvd10[32]; /* DWORD 13 */ | ||
48 | u8 rsvd11[32]; /* DWORD 14 */ | ||
49 | u8 rsvd12[32]; /* DWORD 15 */ | ||
50 | } __packed; | ||
51 | struct ETX_CONTEXT_AMAP { | ||
52 | u32 dw[16]; | ||
53 | }; | ||
54 | |||
55 | #endif /* __etx_context_amap_h__ */ | ||
diff --git a/drivers/staging/benet/funcobj.c b/drivers/staging/benet/funcobj.c deleted file mode 100644 index 0f57eb58daef..000000000000 --- a/drivers/staging/benet/funcobj.c +++ /dev/null | |||
@@ -1,565 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #include "hwlib.h" | ||
18 | #include "bestatus.h" | ||
19 | |||
20 | |||
21 | int | ||
22 | be_function_internal_query_firmware_config(struct be_function_object *pfob, | ||
23 | struct BE_FIRMWARE_CONFIG *config) | ||
24 | { | ||
25 | struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL; | ||
26 | struct MCC_WRB_AMAP *wrb = NULL; | ||
27 | int status = 0; | ||
28 | unsigned long irql; | ||
29 | struct be_mcc_wrb_response_copy rc; | ||
30 | |||
31 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
32 | |||
33 | wrb = be_function_peek_mcc_wrb(pfob); | ||
34 | if (!wrb) { | ||
35 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
36 | status = BE_STATUS_NO_MCC_WRB; | ||
37 | goto error; | ||
38 | } | ||
39 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
40 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG); | ||
41 | |||
42 | rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG, | ||
43 | params.response); | ||
44 | rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG, | ||
45 | params.response); | ||
46 | rc.va = config; | ||
47 | |||
48 | /* Post the f/w command */ | ||
49 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, | ||
50 | NULL, NULL, NULL, fwcmd, &rc); | ||
51 | error: | ||
52 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
53 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
54 | pfob->pend_queue_driving = 0; | ||
55 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
56 | } | ||
57 | return status; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | This allocates and initializes a function object based on the information | ||
62 | provided by upper layer drivers. | ||
63 | |||
64 | Returns BE_SUCCESS on success and an appropriate int on failure. | ||
65 | |||
66 | A function object represents a single BladeEngine (logical) PCI function. | ||
67 | That is a function object either represents | ||
68 | the networking side of BladeEngine or the iSCSI side of BladeEngine. | ||
69 | |||
70 | This routine will also detect and create an appropriate PD object for the | ||
71 | PCI function as needed. | ||
72 | */ | ||
73 | int | ||
74 | be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va, | ||
75 | u8 __iomem *pci_va, u32 function_type, | ||
76 | struct ring_desc *mailbox, struct be_function_object *pfob) | ||
77 | { | ||
78 | int status; | ||
79 | |||
80 | ASSERT(pfob); /* not a magic assert */ | ||
81 | ASSERT(function_type <= 2); | ||
82 | |||
83 | TRACE(DL_INFO, "Create function object. type:%s object:0x%p", | ||
84 | (function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" : | ||
85 | (function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" : | ||
86 | "Arm")), pfob); | ||
87 | |||
88 | memset(pfob, 0, sizeof(*pfob)); | ||
89 | |||
90 | pfob->type = function_type; | ||
91 | pfob->csr_va = csr_va; | ||
92 | pfob->db_va = db_va; | ||
93 | pfob->pci_va = pci_va; | ||
94 | |||
95 | spin_lock_init(&pfob->cq_lock); | ||
96 | spin_lock_init(&pfob->post_lock); | ||
97 | spin_lock_init(&pfob->mcc_context_lock); | ||
98 | |||
99 | |||
100 | pfob->pci_function_number = 1; | ||
101 | |||
102 | |||
103 | pfob->emulate = false; | ||
104 | TRACE(DL_NOTE, "Non-emulation mode"); | ||
105 | status = be_drive_POST(pfob); | ||
106 | if (status != BE_SUCCESS) { | ||
107 | TRACE(DL_ERR, "BladeEngine POST failed."); | ||
108 | goto error; | ||
109 | } | ||
110 | |||
111 | /* Initialize the mailbox */ | ||
112 | status = be_mpu_init_mailbox(pfob, mailbox); | ||
113 | if (status != BE_SUCCESS) { | ||
114 | TRACE(DL_ERR, "Failed to initialize mailbox."); | ||
115 | goto error; | ||
116 | } | ||
117 | /* | ||
118 | * Cache the firmware config for ASSERTs in hwclib and later | ||
119 | * driver queries. | ||
120 | */ | ||
121 | status = be_function_internal_query_firmware_config(pfob, | ||
122 | &pfob->fw_config); | ||
123 | if (status != BE_SUCCESS) { | ||
124 | TRACE(DL_ERR, "Failed to query firmware config."); | ||
125 | goto error; | ||
126 | } | ||
127 | |||
128 | error: | ||
129 | if (status != BE_SUCCESS) { | ||
130 | /* No cleanup necessary */ | ||
131 | TRACE(DL_ERR, "Failed to create function."); | ||
132 | memset(pfob, 0, sizeof(*pfob)); | ||
133 | } | ||
134 | return status; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | This routine drops the reference count on a given function object. Once | ||
139 | the reference count falls to zero, the function object is destroyed and all | ||
140 | resources held are freed. | ||
141 | |||
142 | FunctionObject - The function object to drop the reference to. | ||
143 | */ | ||
144 | int be_function_object_destroy(struct be_function_object *pfob) | ||
145 | { | ||
146 | TRACE(DL_INFO, "Destroy pfob. Object:0x%p", | ||
147 | pfob); | ||
148 | |||
149 | |||
150 | ASSERT(pfob->mcc == NULL); | ||
151 | |||
152 | return BE_SUCCESS; | ||
153 | } | ||
154 | |||
155 | int be_function_cleanup(struct be_function_object *pfob) | ||
156 | { | ||
157 | int status = 0; | ||
158 | u32 isr; | ||
159 | u32 host_intr; | ||
160 | struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl; | ||
161 | |||
162 | |||
163 | if (pfob->type == BE_FUNCTION_TYPE_NETWORK) { | ||
164 | status = be_rxf_multicast_config(pfob, false, 0, | ||
165 | NULL, NULL, NULL, NULL); | ||
166 | ASSERT(status == BE_SUCCESS); | ||
167 | } | ||
168 | /* VLAN */ | ||
169 | status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL); | ||
170 | ASSERT(status == BE_SUCCESS); | ||
171 | /* | ||
172 | * MCC Queue -- Switches to mailbox mode. May want to destroy | ||
173 | * all but the MCC CQ before this call if polling CQ is much better | ||
174 | * performance than polling mailbox register. | ||
175 | */ | ||
176 | if (pfob->mcc) | ||
177 | status = be_mcc_ring_destroy(pfob->mcc); | ||
178 | /* | ||
179 | * If interrupts are disabled, clear any CEV interrupt assertions that | ||
180 | * fired after we stopped processing EQs. | ||
181 | */ | ||
182 | ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl); | ||
183 | host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, | ||
184 | hostintr, ctrl.dw); | ||
185 | if (!host_intr) | ||
186 | if (pfob->type == BE_FUNCTION_TYPE_NETWORK) | ||
187 | isr = CSR_READ(pfob, cev.isr1); | ||
188 | else | ||
189 | isr = CSR_READ(pfob, cev.isr0); | ||
190 | else | ||
191 | /* This should never happen... */ | ||
192 | TRACE(DL_ERR, "function_cleanup called with interrupt enabled"); | ||
193 | /* Function object destroy */ | ||
194 | status = be_function_object_destroy(pfob); | ||
195 | ASSERT(status == BE_SUCCESS); | ||
196 | |||
197 | return status; | ||
198 | } | ||
199 | |||
200 | |||
201 | void * | ||
202 | be_function_prepare_embedded_fwcmd(struct be_function_object *pfob, | ||
203 | struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length, | ||
204 | u32 response_length, u32 opcode, u32 subsystem) | ||
205 | { | ||
206 | struct FWCMD_REQUEST_HEADER *header = NULL; | ||
207 | u32 n; | ||
208 | |||
209 | ASSERT(wrb); | ||
210 | |||
211 | n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8; | ||
212 | AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1); | ||
213 | AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n)); | ||
214 | header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n); | ||
215 | |||
216 | header->timeout = 0; | ||
217 | header->domain = 0; | ||
218 | header->request_length = max(request_length, response_length); | ||
219 | header->opcode = opcode; | ||
220 | header->subsystem = subsystem; | ||
221 | |||
222 | return header; | ||
223 | } | ||
224 | |||
225 | void * | ||
226 | be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob, | ||
227 | struct MCC_WRB_AMAP *wrb, | ||
228 | void *fwcmd_va, u64 fwcmd_pa, | ||
229 | u32 payld_len, | ||
230 | u32 request_length, | ||
231 | u32 response_length, | ||
232 | u32 opcode, u32 subsystem) | ||
233 | { | ||
234 | struct FWCMD_REQUEST_HEADER *header = NULL; | ||
235 | u32 n; | ||
236 | struct MCC_WRB_PAYLOAD_AMAP *plp; | ||
237 | |||
238 | ASSERT(wrb); | ||
239 | ASSERT(fwcmd_va); | ||
240 | |||
241 | header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va; | ||
242 | |||
243 | AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0); | ||
244 | AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len); | ||
245 | |||
246 | /* | ||
247 | * Assume one fragment. The caller may override the SGL by | ||
248 | * rewriting the 0th length and adding more entries. They | ||
249 | * will also need to update the sge_count. | ||
250 | */ | ||
251 | AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1); | ||
252 | |||
253 | n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8; | ||
254 | plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n); | ||
255 | AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len); | ||
256 | AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa); | ||
257 | AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp, | ||
258 | upper_32_bits(fwcmd_pa)); | ||
259 | |||
260 | header->timeout = 0; | ||
261 | header->domain = 0; | ||
262 | header->request_length = max(request_length, response_length); | ||
263 | header->opcode = opcode; | ||
264 | header->subsystem = subsystem; | ||
265 | |||
266 | return header; | ||
267 | } | ||
268 | |||
269 | struct MCC_WRB_AMAP * | ||
270 | be_function_peek_mcc_wrb(struct be_function_object *pfob) | ||
271 | { | ||
272 | struct MCC_WRB_AMAP *wrb = NULL; | ||
273 | u32 offset; | ||
274 | |||
275 | if (pfob->mcc) | ||
276 | wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false); | ||
277 | else { | ||
278 | offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8; | ||
279 | wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va + | ||
280 | offset); | ||
281 | } | ||
282 | |||
283 | if (wrb) | ||
284 | memset(wrb, 0, sizeof(struct MCC_WRB_AMAP)); | ||
285 | |||
286 | return wrb; | ||
287 | } | ||
288 | |||
289 | #if defined(BE_DEBUG) | ||
290 | void be_function_debug_print_wrb(struct be_function_object *pfob, | ||
291 | struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va, | ||
292 | struct be_mcc_wrb_context *wrb_context) | ||
293 | { | ||
294 | |||
295 | struct FWCMD_REQUEST_HEADER *header = NULL; | ||
296 | u8 embedded; | ||
297 | u32 n; | ||
298 | |||
299 | embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb); | ||
300 | |||
301 | if (embedded) { | ||
302 | n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8; | ||
303 | header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n); | ||
304 | } else { | ||
305 | header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va; | ||
306 | } | ||
307 | |||
308 | /* Save the completed count before posting for a debug assert. */ | ||
309 | |||
310 | if (header) { | ||
311 | wrb_context->opcode = header->opcode; | ||
312 | wrb_context->subsystem = header->subsystem; | ||
313 | |||
314 | } else { | ||
315 | wrb_context->opcode = 0; | ||
316 | wrb_context->subsystem = 0; | ||
317 | } | ||
318 | } | ||
319 | #else | ||
320 | #define be_function_debug_print_wrb(a_, b_, c_, d_) | ||
321 | #endif | ||
322 | |||
323 | int | ||
324 | be_function_post_mcc_wrb(struct be_function_object *pfob, | ||
325 | struct MCC_WRB_AMAP *wrb, | ||
326 | struct be_generic_q_ctxt *q_ctxt, | ||
327 | mcc_wrb_cqe_callback cb, void *cb_context, | ||
328 | mcc_wrb_cqe_callback internal_cb, | ||
329 | void *internal_cb_context, void *optional_fwcmd_va, | ||
330 | struct be_mcc_wrb_response_copy *rc) | ||
331 | { | ||
332 | int status; | ||
333 | struct be_mcc_wrb_context *wrb_context = NULL; | ||
334 | u64 *p; | ||
335 | |||
336 | if (q_ctxt) { | ||
337 | /* Initialize context. */ | ||
338 | q_ctxt->context.internal_cb = internal_cb; | ||
339 | q_ctxt->context.internal_cb_context = internal_cb_context; | ||
340 | q_ctxt->context.cb = cb; | ||
341 | q_ctxt->context.cb_context = cb_context; | ||
342 | if (rc) { | ||
343 | q_ctxt->context.copy.length = rc->length; | ||
344 | q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset; | ||
345 | q_ctxt->context.copy.va = rc->va; | ||
346 | } else | ||
347 | q_ctxt->context.copy.length = 0; | ||
348 | |||
349 | q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va; | ||
350 | |||
351 | /* Queue this request */ | ||
352 | status = be_function_queue_mcc_wrb(pfob, q_ctxt); | ||
353 | |||
354 | goto Error; | ||
355 | } | ||
356 | /* | ||
357 | * Allocate a WRB context struct to hold the callback pointers, | ||
358 | * status, etc. This is required if commands complete out of order. | ||
359 | */ | ||
360 | wrb_context = _be_mcc_allocate_wrb_context(pfob); | ||
361 | if (!wrb_context) { | ||
362 | TRACE(DL_WARN, "Failed to allocate MCC WRB context."); | ||
363 | status = BE_STATUS_SYSTEM_RESOURCES; | ||
364 | goto Error; | ||
365 | } | ||
366 | /* Initialize context. */ | ||
367 | memset(wrb_context, 0, sizeof(*wrb_context)); | ||
368 | wrb_context->internal_cb = internal_cb; | ||
369 | wrb_context->internal_cb_context = internal_cb_context; | ||
370 | wrb_context->cb = cb; | ||
371 | wrb_context->cb_context = cb_context; | ||
372 | if (rc) { | ||
373 | wrb_context->copy.length = rc->length; | ||
374 | wrb_context->copy.fwcmd_offset = rc->fwcmd_offset; | ||
375 | wrb_context->copy.va = rc->va; | ||
376 | } else | ||
377 | wrb_context->copy.length = 0; | ||
378 | wrb_context->wrb = wrb; | ||
379 | |||
380 | /* | ||
381 | * Copy the context pointer into the WRB opaque tag field. | ||
382 | * Verify assumption of 64-bit tag with a compile time assert. | ||
383 | */ | ||
384 | p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8); | ||
385 | *p = (u64)(size_t)wrb_context; | ||
386 | |||
387 | /* Print info about this FWCMD for debug builds. */ | ||
388 | be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context); | ||
389 | |||
390 | /* | ||
391 | * issue the WRB to the MPU as appropriate | ||
392 | */ | ||
393 | if (pfob->mcc) { | ||
394 | /* | ||
395 | * we're in WRB mode, pass to the mcc layer | ||
396 | */ | ||
397 | status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context); | ||
398 | } else { | ||
399 | /* | ||
400 | * we're in mailbox mode | ||
401 | */ | ||
402 | status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context); | ||
403 | |||
404 | /* mailbox mode always completes synchronously */ | ||
405 | ASSERT(status != BE_STATUS_PENDING); | ||
406 | } | ||
407 | |||
408 | Error: | ||
409 | |||
410 | return status; | ||
411 | } | ||
412 | |||
413 | int | ||
414 | be_function_ring_destroy(struct be_function_object *pfob, | ||
415 | u32 id, u32 ring_type, mcc_wrb_cqe_callback cb, | ||
416 | void *cb_context, mcc_wrb_cqe_callback internal_cb, | ||
417 | void *internal_cb_context) | ||
418 | { | ||
419 | |||
420 | struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL; | ||
421 | struct MCC_WRB_AMAP *wrb = NULL; | ||
422 | int status = 0; | ||
423 | unsigned long irql; | ||
424 | |||
425 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
426 | |||
427 | TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type); | ||
428 | |||
429 | wrb = be_function_peek_mcc_wrb(pfob); | ||
430 | if (!wrb) { | ||
431 | ASSERT(wrb); | ||
432 | TRACE(DL_ERR, "No free MCC WRBs in destroy ring."); | ||
433 | status = BE_STATUS_NO_MCC_WRB; | ||
434 | goto Error; | ||
435 | } | ||
436 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
437 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY); | ||
438 | |||
439 | fwcmd->params.request.id = id; | ||
440 | fwcmd->params.request.ring_type = ring_type; | ||
441 | |||
442 | /* Post the f/w command */ | ||
443 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context, | ||
444 | internal_cb, internal_cb_context, fwcmd, NULL); | ||
445 | if (status != BE_SUCCESS && status != BE_PENDING) { | ||
446 | TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d", | ||
447 | id, ring_type); | ||
448 | goto Error; | ||
449 | } | ||
450 | |||
451 | Error: | ||
452 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
453 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
454 | pfob->pend_queue_driving = 0; | ||
455 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
456 | } | ||
457 | return status; | ||
458 | } | ||
459 | |||
460 | void | ||
461 | be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num) | ||
462 | { | ||
463 | u32 num_pages = PAGES_SPANNED(rd->va, rd->length); | ||
464 | u32 i = 0; | ||
465 | u64 pa = rd->pa; | ||
466 | __le64 lepa; | ||
467 | |||
468 | ASSERT(pa_list); | ||
469 | ASSERT(pa); | ||
470 | |||
471 | for (i = 0; i < min(num_pages, max_num); i++) { | ||
472 | lepa = cpu_to_le64(pa); | ||
473 | pa_list[i].lo = (u32)lepa; | ||
474 | pa_list[i].hi = upper_32_bits(lepa); | ||
475 | pa += PAGE_SIZE; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | |||
480 | |||
481 | /*----------------------------------------------------------------------------- | ||
482 | * Function: be_function_get_fw_version | ||
483 | * Retrieves the firmware version on the adpater. If the callback is | ||
484 | * NULL this call executes synchronously. If the callback is not NULL, | ||
485 | * the returned status will be BE_PENDING if the command was issued | ||
486 | * successfully. | ||
487 | * pfob - | ||
488 | * fwv - Pointer to response buffer if callback is NULL. | ||
489 | * cb - Callback function invoked when the FWCMD completes. | ||
490 | * cb_context - Passed to the callback function. | ||
491 | * return pend_status - BE_SUCCESS (0) on success. | ||
492 | * BE_PENDING (postive value) if the FWCMD | ||
493 | * completion is pending. Negative error code on failure. | ||
494 | *--------------------------------------------------------------------------- | ||
495 | */ | ||
496 | int | ||
497 | be_function_get_fw_version(struct be_function_object *pfob, | ||
498 | struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv, | ||
499 | mcc_wrb_cqe_callback cb, void *cb_context) | ||
500 | { | ||
501 | int status = BE_SUCCESS; | ||
502 | struct MCC_WRB_AMAP *wrb = NULL; | ||
503 | struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL; | ||
504 | unsigned long irql; | ||
505 | struct be_mcc_wrb_response_copy rc; | ||
506 | |||
507 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
508 | |||
509 | wrb = be_function_peek_mcc_wrb(pfob); | ||
510 | if (!wrb) { | ||
511 | TRACE(DL_ERR, "MCC wrb peek failed."); | ||
512 | status = BE_STATUS_NO_MCC_WRB; | ||
513 | goto Error; | ||
514 | } | ||
515 | |||
516 | if (!cb && !fwv) { | ||
517 | TRACE(DL_ERR, "callback and response buffer NULL!"); | ||
518 | status = BE_NOT_OK; | ||
519 | goto Error; | ||
520 | } | ||
521 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
522 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION); | ||
523 | |||
524 | rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION, | ||
525 | params.response); | ||
526 | rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION, | ||
527 | params.response); | ||
528 | rc.va = fwv; | ||
529 | |||
530 | /* Post the f/w command */ | ||
531 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, | ||
532 | cb_context, NULL, NULL, fwcmd, &rc); | ||
533 | |||
534 | Error: | ||
535 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
536 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
537 | pfob->pend_queue_driving = 0; | ||
538 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
539 | } | ||
540 | return status; | ||
541 | } | ||
542 | |||
543 | int | ||
544 | be_function_queue_mcc_wrb(struct be_function_object *pfob, | ||
545 | struct be_generic_q_ctxt *q_ctxt) | ||
546 | { | ||
547 | int status; | ||
548 | |||
549 | ASSERT(q_ctxt); | ||
550 | |||
551 | /* | ||
552 | * issue the WRB to the MPU as appropriate | ||
553 | */ | ||
554 | if (pfob->mcc) { | ||
555 | |||
556 | /* We're in ring mode. Queue this item. */ | ||
557 | pfob->mcc->backlog_length++; | ||
558 | list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog); | ||
559 | status = BE_PENDING; | ||
560 | } else { | ||
561 | status = BE_NOT_OK; | ||
562 | } | ||
563 | return status; | ||
564 | } | ||
565 | |||
diff --git a/drivers/staging/benet/fwcmd_common.h b/drivers/staging/benet/fwcmd_common.h deleted file mode 100644 index 406e0d6fa985..000000000000 --- a/drivers/staging/benet/fwcmd_common.h +++ /dev/null | |||
@@ -1,222 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __fwcmd_common_amap_h__ | ||
21 | #define __fwcmd_common_amap_h__ | ||
22 | #include "host_struct.h" | ||
23 | |||
24 | /* --- PHY_LINK_DUPLEX_ENUM --- */ | ||
25 | #define PHY_LINK_DUPLEX_NONE (0) | ||
26 | #define PHY_LINK_DUPLEX_HALF (1) | ||
27 | #define PHY_LINK_DUPLEX_FULL (2) | ||
28 | |||
29 | /* --- PHY_LINK_SPEED_ENUM --- */ | ||
30 | #define PHY_LINK_SPEED_ZERO (0) /* No link. */ | ||
31 | #define PHY_LINK_SPEED_10MBPS (1) /* 10 Mbps */ | ||
32 | #define PHY_LINK_SPEED_100MBPS (2) /* 100 Mbps */ | ||
33 | #define PHY_LINK_SPEED_1GBPS (3) /* 1 Gbps */ | ||
34 | #define PHY_LINK_SPEED_10GBPS (4) /* 10 Gbps */ | ||
35 | |||
36 | /* --- PHY_LINK_FAULT_ENUM --- */ | ||
37 | #define PHY_LINK_FAULT_NONE (0) /* No fault status | ||
38 | available or detected */ | ||
39 | #define PHY_LINK_FAULT_LOCAL (1) /* Local fault detected */ | ||
40 | #define PHY_LINK_FAULT_REMOTE (2) /* Remote fault detected */ | ||
41 | |||
42 | /* --- BE_ULP_MASK --- */ | ||
43 | #define BE_ULP0_MASK (1) | ||
44 | #define BE_ULP1_MASK (2) | ||
45 | #define BE_ULP2_MASK (4) | ||
46 | |||
47 | /* --- NTWK_ACTIVE_PORT --- */ | ||
48 | #define NTWK_PORT_A (0) /* Port A is currently active */ | ||
49 | #define NTWK_PORT_B (1) /* Port B is currently active */ | ||
50 | #define NTWK_NO_ACTIVE_PORT (15) /* Both ports have lost link */ | ||
51 | |||
52 | /* --- NTWK_LINK_TYPE --- */ | ||
53 | #define NTWK_LINK_TYPE_PHYSICAL (0) /* link up/down event | ||
54 | applies to BladeEngine's | ||
55 | Physical Ports | ||
56 | */ | ||
57 | #define NTWK_LINK_TYPE_VIRTUAL (1) /* Virtual link up/down event | ||
58 | reported by BladeExchange. | ||
59 | This applies only when the | ||
60 | VLD feature is enabled | ||
61 | */ | ||
62 | |||
63 | /* | ||
64 | * --- FWCMD_MAC_TYPE_ENUM --- | ||
65 | * This enum defines the types of MAC addresses in the RXF MAC Address Table. | ||
66 | */ | ||
67 | #define MAC_ADDRESS_TYPE_STORAGE (0) /* Storage MAC Address */ | ||
68 | #define MAC_ADDRESS_TYPE_NETWORK (1) /* Network MAC Address */ | ||
69 | #define MAC_ADDRESS_TYPE_PD (2) /* Protection Domain MAC Addr */ | ||
70 | #define MAC_ADDRESS_TYPE_MANAGEMENT (3) /* Managment MAC Address */ | ||
71 | |||
72 | |||
73 | /* --- FWCMD_RING_TYPE_ENUM --- */ | ||
74 | #define FWCMD_RING_TYPE_ETH_RX (1) /* Ring created with */ | ||
75 | /* FWCMD_COMMON_ETH_RX_CREATE. */ | ||
76 | #define FWCMD_RING_TYPE_ETH_TX (2) /* Ring created with */ | ||
77 | /* FWCMD_COMMON_ETH_TX_CREATE. */ | ||
78 | #define FWCMD_RING_TYPE_ISCSI_WRBQ (3) /* Ring created with */ | ||
79 | /* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */ | ||
80 | #define FWCMD_RING_TYPE_ISCSI_DEFQ (4) /* Ring created with */ | ||
81 | /* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */ | ||
82 | #define FWCMD_RING_TYPE_TPM_WRBQ (5) /* Ring created with */ | ||
83 | /* FWCMD_COMMON_TPM_WRBQ_CREATE. */ | ||
84 | #define FWCMD_RING_TYPE_TPM_DEFQ (6) /* Ring created with */ | ||
85 | /* FWCMD_COMMONTPM_TDEFQ_CREATE. */ | ||
86 | #define FWCMD_RING_TYPE_TPM_RQ (7) /* Ring created with */ | ||
87 | /* FWCMD_COMMON_TPM_RQ_CREATE. */ | ||
88 | #define FWCMD_RING_TYPE_MCC (8) /* Ring created with */ | ||
89 | /* FWCMD_COMMON_MCC_CREATE. */ | ||
90 | #define FWCMD_RING_TYPE_CQ (9) /* Ring created with */ | ||
91 | /* FWCMD_COMMON_CQ_CREATE. */ | ||
92 | #define FWCMD_RING_TYPE_EQ (10) /* Ring created with */ | ||
93 | /* FWCMD_COMMON_EQ_CREATE. */ | ||
94 | #define FWCMD_RING_TYPE_QP (11) /* Ring created with */ | ||
95 | /* FWCMD_RDMA_QP_CREATE. */ | ||
96 | |||
97 | |||
98 | /* --- ETH_TX_RING_TYPE_ENUM --- */ | ||
99 | #define ETH_TX_RING_TYPE_FORWARDING (1) /* Ethernet ring for | ||
100 | forwarding packets */ | ||
101 | #define ETH_TX_RING_TYPE_STANDARD (2) /* Ethernet ring for sending | ||
102 | network packets. */ | ||
103 | #define ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring bound to the | ||
104 | port specified in the command | ||
105 | header.port_number field. | ||
106 | Rings of this type are | ||
107 | NOT subject to the | ||
108 | failover logic implemented | ||
109 | in the BladeEngine. | ||
110 | */ | ||
111 | |||
112 | /* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */ | ||
113 | #define QOS_BITS_NIC (1) /* max_bits_per_second_NIC */ | ||
114 | /* field is valid. */ | ||
115 | #define QOS_PKTS_NIC (2) /* max_packets_per_second_NIC */ | ||
116 | /* field is valid. */ | ||
117 | #define QOS_IOPS_ISCSI (4) /* max_ios_per_second_iSCSI */ | ||
118 | /*field is valid. */ | ||
119 | #define QOS_VLAN_TAG (8) /* domain_VLAN_tag field | ||
120 | is valid. */ | ||
121 | #define QOS_FABRIC_ID (16) /* fabric_domain_ID field | ||
122 | is valid. */ | ||
123 | #define QOS_OEM_PARAMS (32) /* qos_params_oem field | ||
124 | is valid. */ | ||
125 | #define QOS_TPUT_ISCSI (64) /* max_bytes_per_second_iSCSI | ||
126 | field is valid. */ | ||
127 | |||
128 | |||
129 | /* | ||
130 | * --- FAILOVER_CONFIG_ENUM --- | ||
131 | * Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER | ||
132 | */ | ||
133 | #define FAILOVER_CONFIG_NO_CHANGE (0) /* No change to automatic */ | ||
134 | /* port failover setting. */ | ||
135 | #define FAILOVER_CONFIG_ON (1) /* Automatic port failover | ||
136 | on link down is enabled. */ | ||
137 | #define FAILOVER_CONFIG_OFF (2) /* Automatic port failover | ||
138 | on link down is disabled. */ | ||
139 | |||
140 | /* | ||
141 | * --- FAILOVER_PORT_ENUM --- | ||
142 | * Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER | ||
143 | */ | ||
144 | #define FAILOVER_PORT_A (0) /* Selects port A. */ | ||
145 | #define FAILOVER_PORT_B (1) /* Selects port B. */ | ||
146 | #define FAILOVER_PORT_NONE (15) /* No port change requested. */ | ||
147 | |||
148 | |||
149 | /* | ||
150 | * --- MGMT_FLASHROM_OPCODE --- | ||
151 | * Flash ROM operation code | ||
152 | */ | ||
153 | #define MGMT_FLASHROM_OPCODE_FLASH (1) /* Commit downloaded data | ||
154 | to Flash ROM */ | ||
155 | #define MGMT_FLASHROM_OPCODE_SAVE (2) /* Save downloaded data to | ||
156 | ARM's DDR - do not flash */ | ||
157 | #define MGMT_FLASHROM_OPCODE_CLEAR (3) /* Erase specified component | ||
158 | from FlashROM */ | ||
159 | #define MGMT_FLASHROM_OPCODE_REPORT (4) /* Read specified component | ||
160 | from Flash ROM */ | ||
161 | #define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5) /* Returns size of a | ||
162 | component */ | ||
163 | |||
164 | /* | ||
165 | * --- MGMT_FLASHROM_OPTYPE --- | ||
166 | * Flash ROM operation type | ||
167 | */ | ||
168 | #define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0) /* Includes ARM firmware, | ||
169 | IPSec (optional) and EP | ||
170 | firmware */ | ||
171 | #define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1) | ||
172 | #define MGMT_FLASHROM_OPTYPE_CODE_BIOS (2) | ||
173 | #define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3) | ||
174 | #define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4) | ||
175 | #define MGMT_FLASHROM_OPTYPE_CFG_IPSEC (5) | ||
176 | #define MGMT_FLASHROM_OPTYPE_CFG_INI (6) | ||
177 | #define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7) | ||
178 | |||
179 | /* | ||
180 | * --- FLASHROM_TYPE --- | ||
181 | * Flash ROM manufacturers supported in the f/w | ||
182 | */ | ||
183 | #define INTEL (0) | ||
184 | #define SPANSION (1) | ||
185 | #define MICRON (2) | ||
186 | |||
187 | /* --- DDR_CAS_TYPE --- */ | ||
188 | #define CAS_3 (0) | ||
189 | #define CAS_4 (1) | ||
190 | #define CAS_5 (2) | ||
191 | |||
192 | /* --- DDR_SIZE_TYPE --- */ | ||
193 | #define SIZE_256MB (0) | ||
194 | #define SIZE_512MB (1) | ||
195 | |||
196 | /* --- DDR_MODE_TYPE --- */ | ||
197 | #define DDR_NO_ECC (0) | ||
198 | #define DDR_ECC (1) | ||
199 | |||
200 | /* --- INTERFACE_10GB_TYPE --- */ | ||
201 | #define CX4_TYPE (0) | ||
202 | #define XFP_TYPE (1) | ||
203 | |||
204 | /* --- BE_CHIP_MAX_MTU --- */ | ||
205 | #define CHIP_MAX_MTU (9000) | ||
206 | |||
207 | /* --- XAUI_STATE_ENUM --- */ | ||
208 | #define XAUI_STATE_ENABLE (0) /* This MUST be the default | ||
209 | value for all requests | ||
210 | which set/change | ||
211 | equalization parameter. */ | ||
212 | #define XAUI_STATE_DISABLE (255) /* The XAUI for both ports | ||
213 | may be disabled for EMI | ||
214 | tests. There is no | ||
215 | provision for turning off | ||
216 | individual ports. | ||
217 | */ | ||
218 | /* --- BE_ASIC_REVISION --- */ | ||
219 | #define BE_ASIC_REV_A0 (1) | ||
220 | #define BE_ASIC_REV_A1 (2) | ||
221 | |||
222 | #endif /* __fwcmd_common_amap_h__ */ | ||
diff --git a/drivers/staging/benet/fwcmd_common_bmap.h b/drivers/staging/benet/fwcmd_common_bmap.h deleted file mode 100644 index a007cf276500..000000000000 --- a/drivers/staging/benet/fwcmd_common_bmap.h +++ /dev/null | |||
@@ -1,717 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __fwcmd_common_bmap_h__ | ||
21 | #define __fwcmd_common_bmap_h__ | ||
22 | #include "fwcmd_types_bmap.h" | ||
23 | #include "fwcmd_hdr_bmap.h" | ||
24 | |||
25 | #if defined(__BIG_ENDIAN) | ||
26 | /* Physical Address. */ | ||
27 | struct PHYS_ADDR { | ||
28 | union { | ||
29 | struct { | ||
30 | u32 lo; /* DWORD 0 */ | ||
31 | u32 hi; /* DWORD 1 */ | ||
32 | } __packed; /* unnamed struct */ | ||
33 | u32 dw[2]; /* dword union */ | ||
34 | }; /* unnamed union */ | ||
35 | } __packed ; | ||
36 | |||
37 | |||
38 | #else | ||
39 | /* Physical Address. */ | ||
40 | struct PHYS_ADDR { | ||
41 | union { | ||
42 | struct { | ||
43 | u32 lo; /* DWORD 0 */ | ||
44 | u32 hi; /* DWORD 1 */ | ||
45 | } __packed; /* unnamed struct */ | ||
46 | u32 dw[2]; /* dword union */ | ||
47 | }; /* unnamed union */ | ||
48 | } __packed ; | ||
49 | |||
50 | struct BE_LINK_STATUS { | ||
51 | u8 mac0_duplex; | ||
52 | u8 mac0_speed; | ||
53 | u8 mac1_duplex; | ||
54 | u8 mac1_speed; | ||
55 | u8 mgmt_mac_duplex; | ||
56 | u8 mgmt_mac_speed; | ||
57 | u8 active_port; | ||
58 | u8 rsvd0; | ||
59 | u8 mac0_fault; | ||
60 | u8 mac1_fault; | ||
61 | u16 rsvd1; | ||
62 | } __packed; | ||
63 | #endif | ||
64 | |||
65 | struct FWCMD_COMMON_ANON_170_REQUEST { | ||
66 | u32 rsvd0; | ||
67 | } __packed; | ||
68 | |||
69 | union LINK_STATUS_QUERY_PARAMS { | ||
70 | struct BE_LINK_STATUS response; | ||
71 | struct FWCMD_COMMON_ANON_170_REQUEST request; | ||
72 | } __packed; | ||
73 | |||
74 | /* | ||
75 | * Queries the the link status for all ports. The valid values below | ||
76 | * DO NOT indicate that a particular duplex or speed is supported by | ||
77 | * BladeEngine. These enumerations simply list all possible duplexes | ||
78 | * and speeds for any port. Consult BladeEngine product documentation | ||
79 | * for the supported parameters. | ||
80 | */ | ||
81 | struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY { | ||
82 | union FWCMD_HEADER header; | ||
83 | union LINK_STATUS_QUERY_PARAMS params; | ||
84 | } __packed; | ||
85 | |||
86 | struct FWCMD_COMMON_ANON_171_REQUEST { | ||
87 | u8 type; | ||
88 | u8 port; | ||
89 | u8 mac1; | ||
90 | u8 permanent; | ||
91 | } __packed; | ||
92 | |||
93 | struct FWCMD_COMMON_ANON_172_RESPONSE { | ||
94 | struct MAC_ADDRESS_FORMAT mac; | ||
95 | } __packed; | ||
96 | |||
97 | union NTWK_MAC_QUERY_PARAMS { | ||
98 | struct FWCMD_COMMON_ANON_171_REQUEST request; | ||
99 | struct FWCMD_COMMON_ANON_172_RESPONSE response; | ||
100 | } __packed; | ||
101 | |||
102 | /* Queries one MAC address. */ | ||
103 | struct FWCMD_COMMON_NTWK_MAC_QUERY { | ||
104 | union FWCMD_HEADER header; | ||
105 | union NTWK_MAC_QUERY_PARAMS params; | ||
106 | } __packed; | ||
107 | |||
108 | struct MAC_SET_PARAMS_IN { | ||
109 | u8 type; | ||
110 | u8 port; | ||
111 | u8 mac1; | ||
112 | u8 invalidate; | ||
113 | struct MAC_ADDRESS_FORMAT mac; | ||
114 | } __packed; | ||
115 | |||
116 | struct MAC_SET_PARAMS_OUT { | ||
117 | u32 rsvd0; | ||
118 | } __packed; | ||
119 | |||
120 | union MAC_SET_PARAMS { | ||
121 | struct MAC_SET_PARAMS_IN request; | ||
122 | struct MAC_SET_PARAMS_OUT response; | ||
123 | } __packed; | ||
124 | |||
125 | /* Sets a MAC address. */ | ||
126 | struct FWCMD_COMMON_NTWK_MAC_SET { | ||
127 | union FWCMD_HEADER header; | ||
128 | union MAC_SET_PARAMS params; | ||
129 | } __packed; | ||
130 | |||
131 | /* MAC address list. */ | ||
132 | struct NTWK_MULTICAST_MAC_LIST { | ||
133 | u8 byte[6]; | ||
134 | } __packed; | ||
135 | |||
136 | struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD { | ||
137 | u16 num_mac; | ||
138 | u8 promiscuous; | ||
139 | u8 rsvd0; | ||
140 | struct NTWK_MULTICAST_MAC_LIST mac[32]; | ||
141 | } __packed; | ||
142 | |||
143 | struct FWCMD_COMMON_ANON_174_RESPONSE { | ||
144 | u32 rsvd0; | ||
145 | } __packed; | ||
146 | |||
147 | union FWCMD_COMMON_ANON_173_PARAMS { | ||
148 | struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request; | ||
149 | struct FWCMD_COMMON_ANON_174_RESPONSE response; | ||
150 | } __packed; | ||
151 | |||
152 | /* | ||
153 | * Sets multicast address hash. The MPU will merge the MAC address lists | ||
154 | * from all clients, including the networking and storage functions. | ||
155 | * This command may fail if the final merged list of MAC addresses exceeds | ||
156 | * 32 entries. | ||
157 | */ | ||
158 | struct FWCMD_COMMON_NTWK_MULTICAST_SET { | ||
159 | union FWCMD_HEADER header; | ||
160 | union FWCMD_COMMON_ANON_173_PARAMS params; | ||
161 | } __packed; | ||
162 | |||
163 | struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD { | ||
164 | u16 num_vlan; | ||
165 | u8 promiscuous; | ||
166 | u8 rsvd0; | ||
167 | u16 vlan_tag[32]; | ||
168 | } __packed; | ||
169 | |||
170 | struct FWCMD_COMMON_ANON_176_RESPONSE { | ||
171 | u32 rsvd0; | ||
172 | } __packed; | ||
173 | |||
174 | union FWCMD_COMMON_ANON_175_PARAMS { | ||
175 | struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request; | ||
176 | struct FWCMD_COMMON_ANON_176_RESPONSE response; | ||
177 | } __packed; | ||
178 | |||
179 | /* | ||
180 | * Sets VLAN tag filter. The MPU will merge the VLAN tag list from all | ||
181 | * clients, including the networking and storage functions. This command | ||
182 | * may fail if the final vlan_tag array (from all functions) is longer | ||
183 | * than 32 entries. | ||
184 | */ | ||
185 | struct FWCMD_COMMON_NTWK_VLAN_CONFIG { | ||
186 | union FWCMD_HEADER header; | ||
187 | union FWCMD_COMMON_ANON_175_PARAMS params; | ||
188 | } __packed; | ||
189 | |||
190 | struct RING_DESTROY_REQUEST { | ||
191 | u16 ring_type; | ||
192 | u16 id; | ||
193 | u8 bypass_flush; | ||
194 | u8 rsvd0; | ||
195 | u16 rsvd1; | ||
196 | } __packed; | ||
197 | |||
198 | struct FWCMD_COMMON_ANON_190_RESPONSE { | ||
199 | u32 rsvd0; | ||
200 | } __packed; | ||
201 | |||
202 | union FWCMD_COMMON_ANON_189_PARAMS { | ||
203 | struct RING_DESTROY_REQUEST request; | ||
204 | struct FWCMD_COMMON_ANON_190_RESPONSE response; | ||
205 | } __packed; | ||
206 | /* | ||
207 | * Command for destroying any ring. The connection(s) using the ring should | ||
208 | * be quiesced before destroying the ring. | ||
209 | */ | ||
210 | struct FWCMD_COMMON_RING_DESTROY { | ||
211 | union FWCMD_HEADER header; | ||
212 | union FWCMD_COMMON_ANON_189_PARAMS params; | ||
213 | } __packed; | ||
214 | |||
215 | struct FWCMD_COMMON_ANON_192_REQUEST { | ||
216 | u16 num_pages; | ||
217 | u16 rsvd0; | ||
218 | struct CQ_CONTEXT_AMAP context; | ||
219 | struct PHYS_ADDR pages[4]; | ||
220 | } __packed ; | ||
221 | |||
222 | struct FWCMD_COMMON_ANON_193_RESPONSE { | ||
223 | u16 cq_id; | ||
224 | } __packed ; | ||
225 | |||
226 | union FWCMD_COMMON_ANON_191_PARAMS { | ||
227 | struct FWCMD_COMMON_ANON_192_REQUEST request; | ||
228 | struct FWCMD_COMMON_ANON_193_RESPONSE response; | ||
229 | } __packed ; | ||
230 | |||
231 | /* | ||
232 | * Command for creating a completion queue. A Completion Queue must span | ||
233 | * at least 1 page and at most 4 pages. Each completion queue entry | ||
234 | * is 16 bytes regardless of CQ entry format. Thus the ring must be | ||
235 | * at least 256 entries deep (corresponding to 1 page) and can be at | ||
236 | * most 1024 entries deep (corresponding to 4 pages). The number of | ||
237 | * pages posted must contain the CQ ring size as encoded in the context. | ||
238 | * | ||
239 | */ | ||
240 | struct FWCMD_COMMON_CQ_CREATE { | ||
241 | union FWCMD_HEADER header; | ||
242 | union FWCMD_COMMON_ANON_191_PARAMS params; | ||
243 | } __packed ; | ||
244 | |||
245 | struct FWCMD_COMMON_ANON_198_REQUEST { | ||
246 | u16 num_pages; | ||
247 | u16 rsvd0; | ||
248 | struct EQ_CONTEXT_AMAP context; | ||
249 | struct PHYS_ADDR pages[8]; | ||
250 | } __packed ; | ||
251 | |||
252 | struct FWCMD_COMMON_ANON_199_RESPONSE { | ||
253 | u16 eq_id; | ||
254 | } __packed ; | ||
255 | |||
256 | union FWCMD_COMMON_ANON_197_PARAMS { | ||
257 | struct FWCMD_COMMON_ANON_198_REQUEST request; | ||
258 | struct FWCMD_COMMON_ANON_199_RESPONSE response; | ||
259 | } __packed ; | ||
260 | |||
261 | /* | ||
262 | * Command for creating a event queue. An Event Queue must span at least | ||
263 | * 1 page and at most 8 pages. The number of pages posted must contain | ||
264 | * the EQ ring. The ring is defined by the size of the EQ entries (encoded | ||
265 | * in the context) and the number of EQ entries (also encoded in the | ||
266 | * context). | ||
267 | */ | ||
268 | struct FWCMD_COMMON_EQ_CREATE { | ||
269 | union FWCMD_HEADER header; | ||
270 | union FWCMD_COMMON_ANON_197_PARAMS params; | ||
271 | } __packed ; | ||
272 | |||
273 | struct FWCMD_COMMON_ANON_201_REQUEST { | ||
274 | u16 cq_id; | ||
275 | u16 bcmc_cq_id; | ||
276 | u16 num_pages; | ||
277 | u16 rsvd0; | ||
278 | struct PHYS_ADDR pages[2]; | ||
279 | } __packed; | ||
280 | |||
281 | struct FWCMD_COMMON_ANON_202_RESPONSE { | ||
282 | u16 id; | ||
283 | } __packed; | ||
284 | |||
285 | union FWCMD_COMMON_ANON_200_PARAMS { | ||
286 | struct FWCMD_COMMON_ANON_201_REQUEST request; | ||
287 | struct FWCMD_COMMON_ANON_202_RESPONSE response; | ||
288 | } __packed; | ||
289 | |||
290 | /* | ||
291 | * Command for creating Ethernet receive ring. An ERX ring contains ETH_RX_D | ||
292 | * entries (8 bytes each). An ERX ring must be 1024 entries deep | ||
293 | * (corresponding to 2 pages). | ||
294 | */ | ||
295 | struct FWCMD_COMMON_ETH_RX_CREATE { | ||
296 | union FWCMD_HEADER header; | ||
297 | union FWCMD_COMMON_ANON_200_PARAMS params; | ||
298 | } __packed; | ||
299 | |||
300 | struct FWCMD_COMMON_ANON_204_REQUEST { | ||
301 | u16 num_pages; | ||
302 | u8 ulp_num; | ||
303 | u8 type; | ||
304 | struct ETX_CONTEXT_AMAP context; | ||
305 | struct PHYS_ADDR pages[8]; | ||
306 | } __packed ; | ||
307 | |||
308 | struct FWCMD_COMMON_ANON_205_RESPONSE { | ||
309 | u16 cid; | ||
310 | u8 ulp_num; | ||
311 | u8 rsvd0; | ||
312 | } __packed ; | ||
313 | |||
314 | union FWCMD_COMMON_ANON_203_PARAMS { | ||
315 | struct FWCMD_COMMON_ANON_204_REQUEST request; | ||
316 | struct FWCMD_COMMON_ANON_205_RESPONSE response; | ||
317 | } __packed ; | ||
318 | |||
319 | /* | ||
320 | * Command for creating an Ethernet transmit ring. An ETX ring contains | ||
321 | * ETH_WRB entries (16 bytes each). An ETX ring must be at least 256 | ||
322 | * entries deep (corresponding to 1 page) and at most 2k entries deep | ||
323 | * (corresponding to 8 pages). | ||
324 | */ | ||
325 | struct FWCMD_COMMON_ETH_TX_CREATE { | ||
326 | union FWCMD_HEADER header; | ||
327 | union FWCMD_COMMON_ANON_203_PARAMS params; | ||
328 | } __packed ; | ||
329 | |||
330 | struct FWCMD_COMMON_ANON_222_REQUEST { | ||
331 | u16 num_pages; | ||
332 | u16 rsvd0; | ||
333 | struct MCC_RING_CONTEXT_AMAP context; | ||
334 | struct PHYS_ADDR pages[8]; | ||
335 | } __packed ; | ||
336 | |||
337 | struct FWCMD_COMMON_ANON_223_RESPONSE { | ||
338 | u16 id; | ||
339 | } __packed ; | ||
340 | |||
341 | union FWCMD_COMMON_ANON_221_PARAMS { | ||
342 | struct FWCMD_COMMON_ANON_222_REQUEST request; | ||
343 | struct FWCMD_COMMON_ANON_223_RESPONSE response; | ||
344 | } __packed ; | ||
345 | |||
346 | /* | ||
347 | * Command for creating the MCC ring. An MCC ring must be at least 16 | ||
348 | * entries deep (corresponding to 1 page) and at most 128 entries deep | ||
349 | * (corresponding to 8 pages). | ||
350 | */ | ||
351 | struct FWCMD_COMMON_MCC_CREATE { | ||
352 | union FWCMD_HEADER header; | ||
353 | union FWCMD_COMMON_ANON_221_PARAMS params; | ||
354 | } __packed ; | ||
355 | |||
356 | struct GET_QOS_IN { | ||
357 | u32 qos_params_rsvd; | ||
358 | } __packed; | ||
359 | |||
360 | struct GET_QOS_OUT { | ||
361 | u32 max_bits_per_second_NIC; | ||
362 | u32 max_packets_per_second_NIC; | ||
363 | u32 max_ios_per_second_iSCSI; | ||
364 | u32 max_bytes_per_second_iSCSI; | ||
365 | u16 domain_VLAN_tag; | ||
366 | u16 fabric_domain_ID; | ||
367 | u32 qos_params_oem[4]; | ||
368 | } __packed; | ||
369 | |||
370 | union GET_QOS_PARAMS { | ||
371 | struct GET_QOS_IN request; | ||
372 | struct GET_QOS_OUT response; | ||
373 | } __packed; | ||
374 | |||
375 | /* QOS/Bandwidth settings per domain. Applicable only in VMs. */ | ||
376 | struct FWCMD_COMMON_GET_QOS { | ||
377 | union FWCMD_HEADER header; | ||
378 | union GET_QOS_PARAMS params; | ||
379 | } __packed; | ||
380 | |||
381 | struct SET_QOS_IN { | ||
382 | u32 valid_flags; | ||
383 | u32 max_bits_per_second_NIC; | ||
384 | u32 max_packets_per_second_NIC; | ||
385 | u32 max_ios_per_second_iSCSI; | ||
386 | u32 max_bytes_per_second_iSCSI; | ||
387 | u16 domain_VLAN_tag; | ||
388 | u16 fabric_domain_ID; | ||
389 | u32 qos_params_oem[4]; | ||
390 | } __packed; | ||
391 | |||
392 | struct SET_QOS_OUT { | ||
393 | u32 qos_params_rsvd; | ||
394 | } __packed; | ||
395 | |||
396 | union SET_QOS_PARAMS { | ||
397 | struct SET_QOS_IN request; | ||
398 | struct SET_QOS_OUT response; | ||
399 | } __packed; | ||
400 | |||
401 | /* QOS/Bandwidth settings per domain. Applicable only in VMs. */ | ||
402 | struct FWCMD_COMMON_SET_QOS { | ||
403 | union FWCMD_HEADER header; | ||
404 | union SET_QOS_PARAMS params; | ||
405 | } __packed; | ||
406 | |||
407 | struct SET_FRAME_SIZE_IN { | ||
408 | u32 max_tx_frame_size; | ||
409 | u32 max_rx_frame_size; | ||
410 | } __packed; | ||
411 | |||
412 | struct SET_FRAME_SIZE_OUT { | ||
413 | u32 chip_max_tx_frame_size; | ||
414 | u32 chip_max_rx_frame_size; | ||
415 | } __packed; | ||
416 | |||
417 | union SET_FRAME_SIZE_PARAMS { | ||
418 | struct SET_FRAME_SIZE_IN request; | ||
419 | struct SET_FRAME_SIZE_OUT response; | ||
420 | } __packed; | ||
421 | |||
422 | /* Set frame size command. Only host domain may issue this command. */ | ||
423 | struct FWCMD_COMMON_SET_FRAME_SIZE { | ||
424 | union FWCMD_HEADER header; | ||
425 | union SET_FRAME_SIZE_PARAMS params; | ||
426 | } __packed; | ||
427 | |||
428 | struct FORCE_FAILOVER_IN { | ||
429 | u32 move_to_port; | ||
430 | u32 failover_config; | ||
431 | } __packed; | ||
432 | |||
433 | struct FWCMD_COMMON_ANON_231_RESPONSE { | ||
434 | u32 rsvd0; | ||
435 | } __packed; | ||
436 | |||
437 | union FWCMD_COMMON_ANON_230_PARAMS { | ||
438 | struct FORCE_FAILOVER_IN request; | ||
439 | struct FWCMD_COMMON_ANON_231_RESPONSE response; | ||
440 | } __packed; | ||
441 | |||
442 | /* | ||
443 | * Use this command to control failover in BladeEngine. It may be used | ||
444 | * to failback to a restored port or to forcibly move traffic from | ||
445 | * one port to another. It may also be used to enable or disable the | ||
446 | * automatic failover feature. This command can only be issued by domain | ||
447 | * 0. | ||
448 | */ | ||
449 | struct FWCMD_COMMON_FORCE_FAILOVER { | ||
450 | union FWCMD_HEADER header; | ||
451 | union FWCMD_COMMON_ANON_230_PARAMS params; | ||
452 | } __packed; | ||
453 | |||
454 | struct FWCMD_COMMON_ANON_240_REQUEST { | ||
455 | u64 context; | ||
456 | } __packed; | ||
457 | |||
458 | struct FWCMD_COMMON_ANON_241_RESPONSE { | ||
459 | u64 context; | ||
460 | } __packed; | ||
461 | |||
462 | union FWCMD_COMMON_ANON_239_PARAMS { | ||
463 | struct FWCMD_COMMON_ANON_240_REQUEST request; | ||
464 | struct FWCMD_COMMON_ANON_241_RESPONSE response; | ||
465 | } __packed; | ||
466 | |||
467 | /* | ||
468 | * This command can be used by clients as a no-operation request. Typical | ||
469 | * uses for drivers are as a heartbeat mechanism, or deferred processing | ||
470 | * catalyst. The ARM will always complete this command with a good completion. | ||
471 | * The 64-bit parameter is not touched by the ARM processor. | ||
472 | */ | ||
473 | struct FWCMD_COMMON_NOP { | ||
474 | union FWCMD_HEADER header; | ||
475 | union FWCMD_COMMON_ANON_239_PARAMS params; | ||
476 | } __packed; | ||
477 | |||
478 | struct NTWK_RX_FILTER_SETTINGS { | ||
479 | u8 promiscuous; | ||
480 | u8 ip_cksum; | ||
481 | u8 tcp_cksum; | ||
482 | u8 udp_cksum; | ||
483 | u8 pass_err; | ||
484 | u8 pass_ckerr; | ||
485 | u8 strip_crc; | ||
486 | u8 mcast_en; | ||
487 | u8 bcast_en; | ||
488 | u8 mcast_promiscuous_en; | ||
489 | u8 unicast_en; | ||
490 | u8 vlan_promiscuous; | ||
491 | } __packed; | ||
492 | |||
493 | union FWCMD_COMMON_ANON_242_PARAMS { | ||
494 | struct NTWK_RX_FILTER_SETTINGS request; | ||
495 | struct NTWK_RX_FILTER_SETTINGS response; | ||
496 | } __packed; | ||
497 | |||
498 | /* | ||
499 | * This command is used to modify the ethernet receive filter configuration. | ||
500 | * Only domain 0 network function drivers may issue this command. The | ||
501 | * applied configuration is returned in the response payload. Note: | ||
502 | * Some receive packet filter settings are global on BladeEngine and | ||
503 | * can affect both the storage and network function clients that the | ||
504 | * BladeEngine hardware and firmware serve. Additionaly, depending | ||
505 | * on the revision of BladeEngine, some ethernet receive filter settings | ||
506 | * are dependent on others. If a dependency exists between settings | ||
507 | * for the BladeEngine revision, and the command request settings do | ||
508 | * not meet the dependency requirement, the invalid settings will not | ||
509 | * be applied despite the comand succeeding. For example: a driver may | ||
510 | * request to enable broadcast packets, but not enable multicast packets. | ||
511 | * On early revisions of BladeEngine, there may be no distinction between | ||
512 | * broadcast and multicast filters, so broadcast could not be enabled | ||
513 | * without enabling multicast. In this scenario, the comand would still | ||
514 | * succeed, but the response payload would indicate the previously | ||
515 | * configured broadcast and multicast setting. | ||
516 | */ | ||
517 | struct FWCMD_COMMON_NTWK_RX_FILTER { | ||
518 | union FWCMD_HEADER header; | ||
519 | union FWCMD_COMMON_ANON_242_PARAMS params; | ||
520 | } __packed; | ||
521 | |||
522 | |||
523 | struct FWCMD_COMMON_ANON_244_REQUEST { | ||
524 | u32 rsvd0; | ||
525 | } __packed; | ||
526 | |||
527 | struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD { | ||
528 | u8 firmware_version_string[32]; | ||
529 | u8 fw_on_flash_version_string[32]; | ||
530 | } __packed; | ||
531 | |||
532 | union FWCMD_COMMON_ANON_243_PARAMS { | ||
533 | struct FWCMD_COMMON_ANON_244_REQUEST request; | ||
534 | struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response; | ||
535 | } __packed; | ||
536 | |||
537 | /* This comand retrieves the firmware version. */ | ||
538 | struct FWCMD_COMMON_GET_FW_VERSION { | ||
539 | union FWCMD_HEADER header; | ||
540 | union FWCMD_COMMON_ANON_243_PARAMS params; | ||
541 | } __packed; | ||
542 | |||
543 | struct FWCMD_COMMON_ANON_246_REQUEST { | ||
544 | u16 tx_flow_control; | ||
545 | u16 rx_flow_control; | ||
546 | } __packed; | ||
547 | |||
548 | struct FWCMD_COMMON_ANON_247_RESPONSE { | ||
549 | u32 rsvd0; | ||
550 | } __packed; | ||
551 | |||
552 | union FWCMD_COMMON_ANON_245_PARAMS { | ||
553 | struct FWCMD_COMMON_ANON_246_REQUEST request; | ||
554 | struct FWCMD_COMMON_ANON_247_RESPONSE response; | ||
555 | } __packed; | ||
556 | |||
557 | /* | ||
558 | * This comand is used to program BladeEngine flow control behavior. | ||
559 | * Only the host networking driver is allowed to use this comand. | ||
560 | */ | ||
561 | struct FWCMD_COMMON_SET_FLOW_CONTROL { | ||
562 | union FWCMD_HEADER header; | ||
563 | union FWCMD_COMMON_ANON_245_PARAMS params; | ||
564 | } __packed; | ||
565 | |||
566 | struct FWCMD_COMMON_ANON_249_REQUEST { | ||
567 | u32 rsvd0; | ||
568 | } __packed; | ||
569 | |||
570 | struct FWCMD_COMMON_ANON_250_RESPONSE { | ||
571 | u16 tx_flow_control; | ||
572 | u16 rx_flow_control; | ||
573 | } __packed; | ||
574 | |||
575 | union FWCMD_COMMON_ANON_248_PARAMS { | ||
576 | struct FWCMD_COMMON_ANON_249_REQUEST request; | ||
577 | struct FWCMD_COMMON_ANON_250_RESPONSE response; | ||
578 | } __packed; | ||
579 | |||
580 | /* This comand is used to read BladeEngine flow control settings. */ | ||
581 | struct FWCMD_COMMON_GET_FLOW_CONTROL { | ||
582 | union FWCMD_HEADER header; | ||
583 | union FWCMD_COMMON_ANON_248_PARAMS params; | ||
584 | } __packed; | ||
585 | |||
586 | struct EQ_DELAY_PARAMS { | ||
587 | u32 eq_id; | ||
588 | u32 delay_in_microseconds; | ||
589 | } __packed; | ||
590 | |||
591 | struct FWCMD_COMMON_ANON_257_REQUEST { | ||
592 | u32 num_eq; | ||
593 | u32 rsvd0; | ||
594 | struct EQ_DELAY_PARAMS delay[16]; | ||
595 | } __packed; | ||
596 | |||
597 | struct FWCMD_COMMON_ANON_258_RESPONSE { | ||
598 | u32 delay_resolution_in_microseconds; | ||
599 | u32 delay_max_in_microseconds; | ||
600 | } __packed; | ||
601 | |||
602 | union MODIFY_EQ_DELAY_PARAMS { | ||
603 | struct FWCMD_COMMON_ANON_257_REQUEST request; | ||
604 | struct FWCMD_COMMON_ANON_258_RESPONSE response; | ||
605 | } __packed; | ||
606 | |||
607 | /* This comand changes the EQ delay for a given set of EQs. */ | ||
608 | struct FWCMD_COMMON_MODIFY_EQ_DELAY { | ||
609 | union FWCMD_HEADER header; | ||
610 | union MODIFY_EQ_DELAY_PARAMS params; | ||
611 | } __packed; | ||
612 | |||
613 | struct FWCMD_COMMON_ANON_260_REQUEST { | ||
614 | u32 rsvd0; | ||
615 | } __packed; | ||
616 | |||
617 | struct BE_FIRMWARE_CONFIG { | ||
618 | u16 be_config_number; | ||
619 | u16 asic_revision; | ||
620 | u32 nic_ulp_mask; | ||
621 | u32 tulp_mask; | ||
622 | u32 iscsi_ulp_mask; | ||
623 | u32 rdma_ulp_mask; | ||
624 | u32 rsvd0[4]; | ||
625 | u32 eth_tx_id_start; | ||
626 | u32 eth_tx_id_count; | ||
627 | u32 eth_rx_id_start; | ||
628 | u32 eth_rx_id_count; | ||
629 | u32 tpm_wrbq_id_start; | ||
630 | u32 tpm_wrbq_id_count; | ||
631 | u32 tpm_defq_id_start; | ||
632 | u32 tpm_defq_id_count; | ||
633 | u32 iscsi_wrbq_id_start; | ||
634 | u32 iscsi_wrbq_id_count; | ||
635 | u32 iscsi_defq_id_start; | ||
636 | u32 iscsi_defq_id_count; | ||
637 | u32 rdma_qp_id_start; | ||
638 | u32 rdma_qp_id_count; | ||
639 | u32 rsvd1[8]; | ||
640 | } __packed; | ||
641 | |||
642 | union FWCMD_COMMON_ANON_259_PARAMS { | ||
643 | struct FWCMD_COMMON_ANON_260_REQUEST request; | ||
644 | struct BE_FIRMWARE_CONFIG response; | ||
645 | } __packed; | ||
646 | |||
647 | /* | ||
648 | * This comand queries the current firmware configuration parameters. | ||
649 | * The static configuration type is defined by be_config_number. This | ||
650 | * differentiates different BladeEngine builds, such as iSCSI Initiator | ||
651 | * versus iSCSI Target. For a given static configuration, the Upper | ||
652 | * Layer Protocol (ULP) processors may be reconfigured to support different | ||
653 | * protocols. Each ULP processor supports one or more protocols. The | ||
654 | * masks indicate which processors are configured for each protocol. | ||
655 | * For a given static configuration, the number of TCP connections | ||
656 | * supported for each protocol may vary. The *_id_start and *_id_count | ||
657 | * variables define a linear range of IDs that are available for each | ||
658 | * supported protocol. The *_id_count may be used by the driver to allocate | ||
659 | * the appropriate number of connection resources. The *_id_start may | ||
660 | * be used to map the arbitrary range of IDs to a zero-based range | ||
661 | * of indices. | ||
662 | */ | ||
663 | struct FWCMD_COMMON_FIRMWARE_CONFIG { | ||
664 | union FWCMD_HEADER header; | ||
665 | union FWCMD_COMMON_ANON_259_PARAMS params; | ||
666 | } __packed; | ||
667 | |||
668 | struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS { | ||
669 | u32 emph_lev_sel_port0; | ||
670 | u32 emph_lev_sel_port1; | ||
671 | u8 xaui_vo_sel; | ||
672 | u8 xaui_state; | ||
673 | u16 rsvd0; | ||
674 | u32 xaui_eq_vector; | ||
675 | } __packed; | ||
676 | |||
677 | struct FWCMD_COMMON_ANON_262_REQUEST { | ||
678 | u32 rsvd0; | ||
679 | } __packed; | ||
680 | |||
681 | union FWCMD_COMMON_ANON_261_PARAMS { | ||
682 | struct FWCMD_COMMON_ANON_262_REQUEST request; | ||
683 | struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response; | ||
684 | } __packed; | ||
685 | |||
686 | /* | ||
687 | * This comand can be used to read XAUI equalization parameters. The | ||
688 | * ARM firmware applies default equalization parameters during initialization. | ||
689 | * These parameters may be customer-specific when derived from the | ||
690 | * SEEPROM. See SEEPROM_DATA for equalization specific fields. | ||
691 | */ | ||
692 | struct FWCMD_COMMON_GET_PORT_EQUALIZATION { | ||
693 | union FWCMD_HEADER header; | ||
694 | union FWCMD_COMMON_ANON_261_PARAMS params; | ||
695 | } __packed; | ||
696 | |||
697 | struct FWCMD_COMMON_ANON_264_RESPONSE { | ||
698 | u32 rsvd0; | ||
699 | } __packed; | ||
700 | |||
701 | union FWCMD_COMMON_ANON_263_PARAMS { | ||
702 | struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request; | ||
703 | struct FWCMD_COMMON_ANON_264_RESPONSE response; | ||
704 | } __packed; | ||
705 | |||
706 | /* | ||
707 | * This comand can be used to set XAUI equalization parameters. The ARM | ||
708 | * firmware applies default equalization parameters during initialization. | ||
709 | * These parameters may be customer-specific when derived from the | ||
710 | * SEEPROM. See SEEPROM_DATA for equalization specific fields. | ||
711 | */ | ||
712 | struct FWCMD_COMMON_SET_PORT_EQUALIZATION { | ||
713 | union FWCMD_HEADER header; | ||
714 | union FWCMD_COMMON_ANON_263_PARAMS params; | ||
715 | } __packed; | ||
716 | |||
717 | #endif /* __fwcmd_common_bmap_h__ */ | ||
diff --git a/drivers/staging/benet/fwcmd_eth_bmap.h b/drivers/staging/benet/fwcmd_eth_bmap.h deleted file mode 100644 index 234b179eace6..000000000000 --- a/drivers/staging/benet/fwcmd_eth_bmap.h +++ /dev/null | |||
@@ -1,280 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __fwcmd_eth_bmap_h__ | ||
21 | #define __fwcmd_eth_bmap_h__ | ||
22 | #include "fwcmd_hdr_bmap.h" | ||
23 | #include "fwcmd_types_bmap.h" | ||
24 | |||
25 | struct MIB_ETH_STATISTICS_PARAMS_IN { | ||
26 | u32 rsvd0; | ||
27 | } __packed; | ||
28 | |||
29 | struct BE_RXF_STATS { | ||
30 | u32 p0recvdtotalbytesLSD; /* DWORD 0 */ | ||
31 | u32 p0recvdtotalbytesMSD; /* DWORD 1 */ | ||
32 | u32 p0recvdtotalframes; /* DWORD 2 */ | ||
33 | u32 p0recvdunicastframes; /* DWORD 3 */ | ||
34 | u32 p0recvdmulticastframes; /* DWORD 4 */ | ||
35 | u32 p0recvdbroadcastframes; /* DWORD 5 */ | ||
36 | u32 p0crcerrors; /* DWORD 6 */ | ||
37 | u32 p0alignmentsymerrs; /* DWORD 7 */ | ||
38 | u32 p0pauseframesrecvd; /* DWORD 8 */ | ||
39 | u32 p0controlframesrecvd; /* DWORD 9 */ | ||
40 | u32 p0inrangelenerrors; /* DWORD 10 */ | ||
41 | u32 p0outrangeerrors; /* DWORD 11 */ | ||
42 | u32 p0frametoolongerrors; /* DWORD 12 */ | ||
43 | u32 p0droppedaddressmatch; /* DWORD 13 */ | ||
44 | u32 p0droppedvlanmismatch; /* DWORD 14 */ | ||
45 | u32 p0ipdroppedtoosmall; /* DWORD 15 */ | ||
46 | u32 p0ipdroppedtooshort; /* DWORD 16 */ | ||
47 | u32 p0ipdroppedhdrtoosmall; /* DWORD 17 */ | ||
48 | u32 p0tcpdroppedlen; /* DWORD 18 */ | ||
49 | u32 p0droppedrunt; /* DWORD 19 */ | ||
50 | u32 p0recvd64; /* DWORD 20 */ | ||
51 | u32 p0recvd65_127; /* DWORD 21 */ | ||
52 | u32 p0recvd128_256; /* DWORD 22 */ | ||
53 | u32 p0recvd256_511; /* DWORD 23 */ | ||
54 | u32 p0recvd512_1023; /* DWORD 24 */ | ||
55 | u32 p0recvd1518_1522; /* DWORD 25 */ | ||
56 | u32 p0recvd1522_2047; /* DWORD 26 */ | ||
57 | u32 p0recvd2048_4095; /* DWORD 27 */ | ||
58 | u32 p0recvd4096_8191; /* DWORD 28 */ | ||
59 | u32 p0recvd8192_9216; /* DWORD 29 */ | ||
60 | u32 p0rcvdipcksmerrs; /* DWORD 30 */ | ||
61 | u32 p0recvdtcpcksmerrs; /* DWORD 31 */ | ||
62 | u32 p0recvdudpcksmerrs; /* DWORD 32 */ | ||
63 | u32 p0recvdnonrsspackets; /* DWORD 33 */ | ||
64 | u32 p0recvdippackets; /* DWORD 34 */ | ||
65 | u32 p0recvdchute1packets; /* DWORD 35 */ | ||
66 | u32 p0recvdchute2packets; /* DWORD 36 */ | ||
67 | u32 p0recvdchute3packets; /* DWORD 37 */ | ||
68 | u32 p0recvdipsecpackets; /* DWORD 38 */ | ||
69 | u32 p0recvdmanagementpackets; /* DWORD 39 */ | ||
70 | u32 p0xmitbyteslsd; /* DWORD 40 */ | ||
71 | u32 p0xmitbytesmsd; /* DWORD 41 */ | ||
72 | u32 p0xmitunicastframes; /* DWORD 42 */ | ||
73 | u32 p0xmitmulticastframes; /* DWORD 43 */ | ||
74 | u32 p0xmitbroadcastframes; /* DWORD 44 */ | ||
75 | u32 p0xmitpauseframes; /* DWORD 45 */ | ||
76 | u32 p0xmitcontrolframes; /* DWORD 46 */ | ||
77 | u32 p0xmit64; /* DWORD 47 */ | ||
78 | u32 p0xmit65_127; /* DWORD 48 */ | ||
79 | u32 p0xmit128_256; /* DWORD 49 */ | ||
80 | u32 p0xmit256_511; /* DWORD 50 */ | ||
81 | u32 p0xmit512_1023; /* DWORD 51 */ | ||
82 | u32 p0xmit1518_1522; /* DWORD 52 */ | ||
83 | u32 p0xmit1522_2047; /* DWORD 53 */ | ||
84 | u32 p0xmit2048_4095; /* DWORD 54 */ | ||
85 | u32 p0xmit4096_8191; /* DWORD 55 */ | ||
86 | u32 p0xmit8192_9216; /* DWORD 56 */ | ||
87 | u32 p0rxfifooverflowdropped; /* DWORD 57 */ | ||
88 | u32 p0ipseclookupfaileddropped; /* DWORD 58 */ | ||
89 | u32 p1recvdtotalbytesLSD; /* DWORD 59 */ | ||
90 | u32 p1recvdtotalbytesMSD; /* DWORD 60 */ | ||
91 | u32 p1recvdtotalframes; /* DWORD 61 */ | ||
92 | u32 p1recvdunicastframes; /* DWORD 62 */ | ||
93 | u32 p1recvdmulticastframes; /* DWORD 63 */ | ||
94 | u32 p1recvdbroadcastframes; /* DWORD 64 */ | ||
95 | u32 p1crcerrors; /* DWORD 65 */ | ||
96 | u32 p1alignmentsymerrs; /* DWORD 66 */ | ||
97 | u32 p1pauseframesrecvd; /* DWORD 67 */ | ||
98 | u32 p1controlframesrecvd; /* DWORD 68 */ | ||
99 | u32 p1inrangelenerrors; /* DWORD 69 */ | ||
100 | u32 p1outrangeerrors; /* DWORD 70 */ | ||
101 | u32 p1frametoolongerrors; /* DWORD 71 */ | ||
102 | u32 p1droppedaddressmatch; /* DWORD 72 */ | ||
103 | u32 p1droppedvlanmismatch; /* DWORD 73 */ | ||
104 | u32 p1ipdroppedtoosmall; /* DWORD 74 */ | ||
105 | u32 p1ipdroppedtooshort; /* DWORD 75 */ | ||
106 | u32 p1ipdroppedhdrtoosmall; /* DWORD 76 */ | ||
107 | u32 p1tcpdroppedlen; /* DWORD 77 */ | ||
108 | u32 p1droppedrunt; /* DWORD 78 */ | ||
109 | u32 p1recvd64; /* DWORD 79 */ | ||
110 | u32 p1recvd65_127; /* DWORD 80 */ | ||
111 | u32 p1recvd128_256; /* DWORD 81 */ | ||
112 | u32 p1recvd256_511; /* DWORD 82 */ | ||
113 | u32 p1recvd512_1023; /* DWORD 83 */ | ||
114 | u32 p1recvd1518_1522; /* DWORD 84 */ | ||
115 | u32 p1recvd1522_2047; /* DWORD 85 */ | ||
116 | u32 p1recvd2048_4095; /* DWORD 86 */ | ||
117 | u32 p1recvd4096_8191; /* DWORD 87 */ | ||
118 | u32 p1recvd8192_9216; /* DWORD 88 */ | ||
119 | u32 p1rcvdipcksmerrs; /* DWORD 89 */ | ||
120 | u32 p1recvdtcpcksmerrs; /* DWORD 90 */ | ||
121 | u32 p1recvdudpcksmerrs; /* DWORD 91 */ | ||
122 | u32 p1recvdnonrsspackets; /* DWORD 92 */ | ||
123 | u32 p1recvdippackets; /* DWORD 93 */ | ||
124 | u32 p1recvdchute1packets; /* DWORD 94 */ | ||
125 | u32 p1recvdchute2packets; /* DWORD 95 */ | ||
126 | u32 p1recvdchute3packets; /* DWORD 96 */ | ||
127 | u32 p1recvdipsecpackets; /* DWORD 97 */ | ||
128 | u32 p1recvdmanagementpackets; /* DWORD 98 */ | ||
129 | u32 p1xmitbyteslsd; /* DWORD 99 */ | ||
130 | u32 p1xmitbytesmsd; /* DWORD 100 */ | ||
131 | u32 p1xmitunicastframes; /* DWORD 101 */ | ||
132 | u32 p1xmitmulticastframes; /* DWORD 102 */ | ||
133 | u32 p1xmitbroadcastframes; /* DWORD 103 */ | ||
134 | u32 p1xmitpauseframes; /* DWORD 104 */ | ||
135 | u32 p1xmitcontrolframes; /* DWORD 105 */ | ||
136 | u32 p1xmit64; /* DWORD 106 */ | ||
137 | u32 p1xmit65_127; /* DWORD 107 */ | ||
138 | u32 p1xmit128_256; /* DWORD 108 */ | ||
139 | u32 p1xmit256_511; /* DWORD 109 */ | ||
140 | u32 p1xmit512_1023; /* DWORD 110 */ | ||
141 | u32 p1xmit1518_1522; /* DWORD 111 */ | ||
142 | u32 p1xmit1522_2047; /* DWORD 112 */ | ||
143 | u32 p1xmit2048_4095; /* DWORD 113 */ | ||
144 | u32 p1xmit4096_8191; /* DWORD 114 */ | ||
145 | u32 p1xmit8192_9216; /* DWORD 115 */ | ||
146 | u32 p1rxfifooverflowdropped; /* DWORD 116 */ | ||
147 | u32 p1ipseclookupfaileddropped; /* DWORD 117 */ | ||
148 | u32 pxdroppednopbuf; /* DWORD 118 */ | ||
149 | u32 pxdroppednotxpb; /* DWORD 119 */ | ||
150 | u32 pxdroppednoipsecbuf; /* DWORD 120 */ | ||
151 | u32 pxdroppednoerxdescr; /* DWORD 121 */ | ||
152 | u32 pxdroppednotpredescr; /* DWORD 122 */ | ||
153 | u32 pxrecvdmanagementportpackets; /* DWORD 123 */ | ||
154 | u32 pxrecvdmanagementportbytes; /* DWORD 124 */ | ||
155 | u32 pxrecvdmanagementportpauseframes; /* DWORD 125 */ | ||
156 | u32 pxrecvdmanagementporterrors; /* DWORD 126 */ | ||
157 | u32 pxxmitmanagementportpackets; /* DWORD 127 */ | ||
158 | u32 pxxmitmanagementportbytes; /* DWORD 128 */ | ||
159 | u32 pxxmitmanagementportpause; /* DWORD 129 */ | ||
160 | u32 pxxmitmanagementportrxfifooverflow; /* DWORD 130 */ | ||
161 | u32 pxrecvdipsecipcksmerrs; /* DWORD 131 */ | ||
162 | u32 pxrecvdtcpsecipcksmerrs; /* DWORD 132 */ | ||
163 | u32 pxrecvdudpsecipcksmerrs; /* DWORD 133 */ | ||
164 | u32 pxipsecrunt; /* DWORD 134 */ | ||
165 | u32 pxipsecaddressmismatchdropped; /* DWORD 135 */ | ||
166 | u32 pxipsecrxfifooverflowdropped; /* DWORD 136 */ | ||
167 | u32 pxipsecframestoolong; /* DWORD 137 */ | ||
168 | u32 pxipsectotalipframes; /* DWORD 138 */ | ||
169 | u32 pxipseciptoosmall; /* DWORD 139 */ | ||
170 | u32 pxipseciptooshort; /* DWORD 140 */ | ||
171 | u32 pxipseciphdrtoosmall; /* DWORD 141 */ | ||
172 | u32 pxipsectcphdrbad; /* DWORD 142 */ | ||
173 | u32 pxrecvdipsecchute1; /* DWORD 143 */ | ||
174 | u32 pxrecvdipsecchute2; /* DWORD 144 */ | ||
175 | u32 pxrecvdipsecchute3; /* DWORD 145 */ | ||
176 | u32 pxdropped7frags; /* DWORD 146 */ | ||
177 | u32 pxdroppedfrags; /* DWORD 147 */ | ||
178 | u32 pxdroppedinvalidfragring; /* DWORD 148 */ | ||
179 | u32 pxnumforwardedpackets; /* DWORD 149 */ | ||
180 | } __packed; | ||
181 | |||
182 | union MIB_ETH_STATISTICS_PARAMS { | ||
183 | struct MIB_ETH_STATISTICS_PARAMS_IN request; | ||
184 | struct BE_RXF_STATS response; | ||
185 | } __packed; | ||
186 | |||
187 | /* | ||
188 | * Query ethernet statistics. All domains may issue this command. The | ||
189 | * host domain drivers may optionally reset internal statistic counters | ||
190 | * with a query. | ||
191 | */ | ||
192 | struct FWCMD_ETH_GET_STATISTICS { | ||
193 | union FWCMD_HEADER header; | ||
194 | union MIB_ETH_STATISTICS_PARAMS params; | ||
195 | } __packed; | ||
196 | |||
197 | |||
198 | struct FWCMD_ETH_ANON_175_REQUEST { | ||
199 | u8 port0_promiscuous; | ||
200 | u8 port1_promiscuous; | ||
201 | u16 rsvd0; | ||
202 | } __packed; | ||
203 | |||
204 | struct FWCMD_ETH_ANON_176_RESPONSE { | ||
205 | u32 rsvd0; | ||
206 | } __packed; | ||
207 | |||
208 | union FWCMD_ETH_ANON_174_PARAMS { | ||
209 | struct FWCMD_ETH_ANON_175_REQUEST request; | ||
210 | struct FWCMD_ETH_ANON_176_RESPONSE response; | ||
211 | } __packed; | ||
212 | |||
213 | /* Enables/Disables promiscuous ethernet receive mode. */ | ||
214 | struct FWCMD_ETH_PROMISCUOUS { | ||
215 | union FWCMD_HEADER header; | ||
216 | union FWCMD_ETH_ANON_174_PARAMS params; | ||
217 | } __packed; | ||
218 | |||
219 | struct FWCMD_ETH_ANON_178_REQUEST { | ||
220 | u32 new_fragsize_log2; | ||
221 | } __packed; | ||
222 | |||
223 | struct FWCMD_ETH_ANON_179_RESPONSE { | ||
224 | u32 actual_fragsize_log2; | ||
225 | } __packed; | ||
226 | |||
227 | union FWCMD_ETH_ANON_177_PARAMS { | ||
228 | struct FWCMD_ETH_ANON_178_REQUEST request; | ||
229 | struct FWCMD_ETH_ANON_179_RESPONSE response; | ||
230 | } __packed; | ||
231 | |||
232 | /* | ||
233 | * Sets the Ethernet RX fragment size. Only host (domain 0) networking | ||
234 | * drivers may issue this command. This call will fail for non-host | ||
235 | * protection domains. In this situation the MCC CQ status will indicate | ||
236 | * a failure due to insufficient priviledges. The response should be | ||
237 | * ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to | ||
238 | * query the existing ethernet receive fragment size. It must use this | ||
239 | * fragment size for all fragments in the ethernet receive ring. If | ||
240 | * the command succeeds, the driver must use the frag size indicated | ||
241 | * in the command response since the requested frag size may not be applied | ||
242 | * until the next reboot. When the requested fragsize matches the response | ||
243 | * fragsize, this indicates the request was applied immediately. | ||
244 | */ | ||
245 | struct FWCMD_ETH_SET_RX_FRAG_SIZE { | ||
246 | union FWCMD_HEADER header; | ||
247 | union FWCMD_ETH_ANON_177_PARAMS params; | ||
248 | } __packed; | ||
249 | |||
250 | struct FWCMD_ETH_ANON_181_REQUEST { | ||
251 | u32 rsvd0; | ||
252 | } __packed; | ||
253 | |||
254 | struct FWCMD_ETH_ANON_182_RESPONSE { | ||
255 | u32 actual_fragsize_log2; | ||
256 | } __packed; | ||
257 | |||
258 | union FWCMD_ETH_ANON_180_PARAMS { | ||
259 | struct FWCMD_ETH_ANON_181_REQUEST request; | ||
260 | struct FWCMD_ETH_ANON_182_RESPONSE response; | ||
261 | } __packed; | ||
262 | |||
263 | /* | ||
264 | * Queries the Ethernet RX fragment size. All domains may issue this | ||
265 | * command. The driver should call this command to determine the minimum | ||
266 | * required fragment size for the ethernet RX ring buffers. Drivers | ||
267 | * may choose to use a larger size for each fragment buffer, but BladeEngine | ||
268 | * will use up to the configured minimum required fragsize in each ethernet | ||
269 | * receive fragment buffer. For example, if the ethernet receive fragment | ||
270 | * size is configured to 4kB, and a driver uses 8kB fragments, a 6kB | ||
271 | * ethernet packet received by BladeEngine will be split accross two | ||
272 | * of the driver's receive framgents (4kB in one fragment buffer, and | ||
273 | * 2kB in the subsequent fragment buffer). | ||
274 | */ | ||
275 | struct FWCMD_ETH_GET_RX_FRAG_SIZE { | ||
276 | union FWCMD_HEADER header; | ||
277 | union FWCMD_ETH_ANON_180_PARAMS params; | ||
278 | } __packed; | ||
279 | |||
280 | #endif /* __fwcmd_eth_bmap_h__ */ | ||
diff --git a/drivers/staging/benet/fwcmd_hdr_bmap.h b/drivers/staging/benet/fwcmd_hdr_bmap.h deleted file mode 100644 index 28b45328fe7b..000000000000 --- a/drivers/staging/benet/fwcmd_hdr_bmap.h +++ /dev/null | |||
@@ -1,54 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __fwcmd_hdr_bmap_h__ | ||
21 | #define __fwcmd_hdr_bmap_h__ | ||
22 | |||
23 | struct FWCMD_REQUEST_HEADER { | ||
24 | u8 opcode; | ||
25 | u8 subsystem; | ||
26 | u8 port_number; | ||
27 | u8 domain; | ||
28 | u32 timeout; | ||
29 | u32 request_length; | ||
30 | u32 rsvd0; | ||
31 | } __packed; | ||
32 | |||
33 | struct FWCMD_RESPONSE_HEADER { | ||
34 | u8 opcode; | ||
35 | u8 subsystem; | ||
36 | u8 rsvd0; | ||
37 | u8 domain; | ||
38 | u8 status; | ||
39 | u8 additional_status; | ||
40 | u16 rsvd1; | ||
41 | u32 response_length; | ||
42 | u32 actual_response_length; | ||
43 | } __packed; | ||
44 | |||
45 | /* | ||
46 | * The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with | ||
47 | * the output FWCMD_RESPONSE_HEADER. | ||
48 | */ | ||
49 | union FWCMD_HEADER { | ||
50 | struct FWCMD_REQUEST_HEADER request; | ||
51 | struct FWCMD_RESPONSE_HEADER response; | ||
52 | } __packed; | ||
53 | |||
54 | #endif /* __fwcmd_hdr_bmap_h__ */ | ||
diff --git a/drivers/staging/benet/fwcmd_mcc.h b/drivers/staging/benet/fwcmd_mcc.h deleted file mode 100644 index 9eeca878c1fb..000000000000 --- a/drivers/staging/benet/fwcmd_mcc.h +++ /dev/null | |||
@@ -1,94 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __fwcmd_mcc_amap_h__ | ||
21 | #define __fwcmd_mcc_amap_h__ | ||
22 | #include "fwcmd_opcodes.h" | ||
23 | /* | ||
24 | * Where applicable, a WRB, may contain a list of Scatter-gather elements. | ||
25 | * Each element supports a 64 bit address and a 32bit length field. | ||
26 | */ | ||
27 | struct BE_MCC_SGE_AMAP { | ||
28 | u8 pa_lo[32]; /* DWORD 0 */ | ||
29 | u8 pa_hi[32]; /* DWORD 1 */ | ||
30 | u8 length[32]; /* DWORD 2 */ | ||
31 | } __packed; | ||
32 | struct MCC_SGE_AMAP { | ||
33 | u32 dw[3]; | ||
34 | }; | ||
35 | /* | ||
36 | * The design of an MCC_SGE allows up to 19 elements to be embedded | ||
37 | * in a WRB, supporting 64KB data transfers (assuming a 4KB page size). | ||
38 | */ | ||
39 | struct BE_MCC_WRB_PAYLOAD_AMAP { | ||
40 | union { | ||
41 | struct BE_MCC_SGE_AMAP sgl[19]; | ||
42 | u8 embedded[59][32]; /* DWORD 0 */ | ||
43 | }; | ||
44 | } __packed; | ||
45 | struct MCC_WRB_PAYLOAD_AMAP { | ||
46 | u32 dw[59]; | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * This is the structure of the MCC Command WRB for commands | ||
51 | * sent to the Management Processing Unit (MPU). See section | ||
52 | * for usage in embedded and non-embedded modes. | ||
53 | */ | ||
54 | struct BE_MCC_WRB_AMAP { | ||
55 | u8 embedded; /* DWORD 0 */ | ||
56 | u8 rsvd0[2]; /* DWORD 0 */ | ||
57 | u8 sge_count[5]; /* DWORD 0 */ | ||
58 | u8 rsvd1[16]; /* DWORD 0 */ | ||
59 | u8 special[8]; /* DWORD 0 */ | ||
60 | u8 payload_length[32]; /* DWORD 1 */ | ||
61 | u8 tag[2][32]; /* DWORD 2 */ | ||
62 | u8 rsvd2[32]; /* DWORD 4 */ | ||
63 | struct BE_MCC_WRB_PAYLOAD_AMAP payload; | ||
64 | } __packed; | ||
65 | struct MCC_WRB_AMAP { | ||
66 | u32 dw[64]; | ||
67 | }; | ||
68 | |||
69 | /* This is the structure of the MCC Completion queue entry */ | ||
70 | struct BE_MCC_CQ_ENTRY_AMAP { | ||
71 | u8 completion_status[16]; /* DWORD 0 */ | ||
72 | u8 extended_status[16]; /* DWORD 0 */ | ||
73 | u8 mcc_tag[2][32]; /* DWORD 1 */ | ||
74 | u8 rsvd0[27]; /* DWORD 3 */ | ||
75 | u8 consumed; /* DWORD 3 */ | ||
76 | u8 completed; /* DWORD 3 */ | ||
77 | u8 hpi_buffer_completion; /* DWORD 3 */ | ||
78 | u8 async_event; /* DWORD 3 */ | ||
79 | u8 valid; /* DWORD 3 */ | ||
80 | } __packed; | ||
81 | struct MCC_CQ_ENTRY_AMAP { | ||
82 | u32 dw[4]; | ||
83 | }; | ||
84 | |||
85 | /* Mailbox structures used by the MPU during bootstrap */ | ||
86 | struct BE_MCC_MAILBOX_AMAP { | ||
87 | struct BE_MCC_WRB_AMAP wrb; | ||
88 | struct BE_MCC_CQ_ENTRY_AMAP cq; | ||
89 | } __packed; | ||
90 | struct MCC_MAILBOX_AMAP { | ||
91 | u32 dw[68]; | ||
92 | }; | ||
93 | |||
94 | #endif /* __fwcmd_mcc_amap_h__ */ | ||
diff --git a/drivers/staging/benet/fwcmd_opcodes.h b/drivers/staging/benet/fwcmd_opcodes.h deleted file mode 100644 index 23d569386b46..000000000000 --- a/drivers/staging/benet/fwcmd_opcodes.h +++ /dev/null | |||
@@ -1,244 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __fwcmd_opcodes_amap_h__ | ||
21 | #define __fwcmd_opcodes_amap_h__ | ||
22 | |||
23 | /* | ||
24 | * --- FWCMD_SUBSYSTEMS --- | ||
25 | * The commands are grouped into the following subsystems. The subsystem | ||
26 | * code along with the opcode uniquely identify a particular fwcmd. | ||
27 | */ | ||
28 | #define FWCMD_SUBSYSTEM_RSVD (0) /* This subsystem is reserved. It is */ | ||
29 | /* never used. */ | ||
30 | #define FWCMD_SUBSYSTEM_COMMON (1) /* CMDs in this group are common to | ||
31 | * all subsystems. See | ||
32 | * COMMON_SUBSYSTEM_OPCODES for opcodes | ||
33 | * and Common Host Configuration CMDs | ||
34 | * for the FWCMD descriptions. | ||
35 | */ | ||
36 | #define FWCMD_SUBSYSTEM_COMMON_ISCSI (2) /* CMDs in this group are */ | ||
37 | /* | ||
38 | * common to Initiator and Target. See | ||
39 | * COMMON_ISCSI_SUBSYSTEM_OPCODES and | ||
40 | * Common iSCSI Initiator and Target | ||
41 | * CMDs for the command descriptions. | ||
42 | */ | ||
43 | #define FWCMD_SUBSYSTEM_ETH (3) /* This subsystem is used to | ||
44 | execute Ethernet commands. */ | ||
45 | |||
46 | #define FWCMD_SUBSYSTEM_TPM (4) /* This subsystem is used | ||
47 | to execute TPM commands. */ | ||
48 | #define FWCMD_SUBSYSTEM_PXE_UNDI (5) /* This subsystem is used | ||
49 | * to execute PXE | ||
50 | * and UNDI specific commands. | ||
51 | */ | ||
52 | |||
53 | #define FWCMD_SUBSYSTEM_ISCSI_INI (6) /* This subsystem is used to | ||
54 | execute ISCSI Initiator | ||
55 | specific commands. | ||
56 | */ | ||
57 | #define FWCMD_SUBSYSTEM_ISCSI_TGT (7) /* This subsystem is used | ||
58 | to execute iSCSI Target | ||
59 | specific commands.between | ||
60 | PTL and ARM firmware. | ||
61 | */ | ||
62 | #define FWCMD_SUBSYSTEM_MILI_PTL (8) /* This subsystem is used to | ||
63 | execute iSCSI Target specific | ||
64 | commands.between MILI | ||
65 | and PTL. */ | ||
66 | #define FWCMD_SUBSYSTEM_MILI_TMD (9) /* This subsystem is used to | ||
67 | execute iSCSI Target specific | ||
68 | commands between MILI | ||
69 | and TMD. */ | ||
70 | #define FWCMD_SUBSYSTEM_PROXY (11) /* This subsystem is used | ||
71 | to execute proxied commands | ||
72 | within the host at the | ||
73 | explicit request of a | ||
74 | non priviledged domain. | ||
75 | This 'subsystem' is entirely | ||
76 | virtual from the controller | ||
77 | and firmware perspective as | ||
78 | it is implemented in host | ||
79 | drivers. | ||
80 | */ | ||
81 | |||
82 | /* | ||
83 | * --- COMMON_SUBSYSTEM_OPCODES --- | ||
84 | * These opcodes are common to both networking and storage PCI | ||
85 | * functions. They are used to reserve resources and configure | ||
86 | * BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON | ||
87 | * subsystem code. | ||
88 | */ | ||
89 | #define OPCODE_COMMON_NTWK_MAC_QUERY (1) | ||
90 | #define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1) | ||
91 | #define SUBSYSTEM_COMMON_NTWK_MAC_SET (1) | ||
92 | #define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1) | ||
93 | #define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1) | ||
94 | #define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1) | ||
95 | #define SUBSYSTEM_COMMON_READ_FLASHROM (1) | ||
96 | #define SUBSYSTEM_COMMON_WRITE_FLASHROM (1) | ||
97 | #define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1) | ||
98 | #define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1) | ||
99 | #define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1) | ||
100 | #define SUBSYSTEM_COMMON_RING_DESTROY (1) | ||
101 | #define SUBSYSTEM_COMMON_CQ_CREATE (1) | ||
102 | #define SUBSYSTEM_COMMON_EQ_CREATE (1) | ||
103 | #define SUBSYSTEM_COMMON_ETH_RX_CREATE (1) | ||
104 | #define SUBSYSTEM_COMMON_ETH_TX_CREATE (1) | ||
105 | #define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1) | ||
106 | #define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1) | ||
107 | #define SUBSYSTEM_COMMON_MCC_CREATE (1) | ||
108 | #define SUBSYSTEM_COMMON_JELL_CONFIG (1) | ||
109 | #define SUBSYSTEM_COMMON_FORCE_FAILOVER (1) | ||
110 | #define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1) | ||
111 | #define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1) | ||
112 | #define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1) | ||
113 | #define SUBSYSTEM_COMMON_GET_QOS (1) | ||
114 | #define SUBSYSTEM_COMMON_SET_QOS (1) | ||
115 | #define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1) | ||
116 | #define SUBSYSTEM_COMMON_SEEPROM_READ (1) | ||
117 | #define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1) | ||
118 | #define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1) | ||
119 | #define SUBSYSTEM_COMMON_NOP (1) | ||
120 | #define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1) | ||
121 | #define SUBSYSTEM_COMMON_GET_FW_VERSION (1) | ||
122 | #define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1) | ||
123 | #define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1) | ||
124 | #define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1) | ||
125 | #define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1) | ||
126 | #define SUBSYSTEM_COMMON_GET_FAT (1) | ||
127 | #define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1) | ||
128 | #define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1) | ||
129 | #define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1) | ||
130 | #define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1) | ||
131 | #define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1) | ||
132 | #define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1) | ||
133 | #define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1) | ||
134 | #define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1) | ||
135 | #define SUBSYSTEM_COMMON_RED_CONFIG (1) | ||
136 | #define OPCODE_COMMON_NTWK_MAC_SET (2) | ||
137 | #define OPCODE_COMMON_NTWK_MULTICAST_SET (3) | ||
138 | #define OPCODE_COMMON_NTWK_VLAN_CONFIG (4) | ||
139 | #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5) | ||
140 | #define OPCODE_COMMON_READ_FLASHROM (6) | ||
141 | #define OPCODE_COMMON_WRITE_FLASHROM (7) | ||
142 | #define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8) | ||
143 | #define OPCODE_COMMON_ADD_PAGE_TABLES (9) | ||
144 | #define OPCODE_COMMON_REMOVE_PAGE_TABLES (10) | ||
145 | #define OPCODE_COMMON_RING_DESTROY (11) | ||
146 | #define OPCODE_COMMON_CQ_CREATE (12) | ||
147 | #define OPCODE_COMMON_EQ_CREATE (13) | ||
148 | #define OPCODE_COMMON_ETH_RX_CREATE (14) | ||
149 | #define OPCODE_COMMON_ETH_TX_CREATE (15) | ||
150 | #define OPCODE_COMMON_NET_RESERVED0 (16) /* Reserved */ | ||
151 | #define OPCODE_COMMON_NET_RESERVED1 (17) /* Reserved */ | ||
152 | #define OPCODE_COMMON_NET_RESERVED2 (18) /* Reserved */ | ||
153 | #define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19) | ||
154 | #define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20) | ||
155 | #define OPCODE_COMMON_MCC_CREATE (21) | ||
156 | #define OPCODE_COMMON_JELL_CONFIG (22) | ||
157 | #define OPCODE_COMMON_FORCE_FAILOVER (23) | ||
158 | #define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24) | ||
159 | #define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25) | ||
160 | #define OPCODE_COMMON_POST_ZERO_BUFFER (26) | ||
161 | #define OPCODE_COMMON_GET_QOS (27) | ||
162 | #define OPCODE_COMMON_SET_QOS (28) | ||
163 | #define OPCODE_COMMON_TCP_GET_STATISTICS (29) | ||
164 | #define OPCODE_COMMON_SEEPROM_READ (30) | ||
165 | #define OPCODE_COMMON_TCP_STATE_QUERY (31) | ||
166 | #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32) | ||
167 | #define OPCODE_COMMON_NOP (33) | ||
168 | #define OPCODE_COMMON_NTWK_RX_FILTER (34) | ||
169 | #define OPCODE_COMMON_GET_FW_VERSION (35) | ||
170 | #define OPCODE_COMMON_SET_FLOW_CONTROL (36) | ||
171 | #define OPCODE_COMMON_GET_FLOW_CONTROL (37) | ||
172 | #define OPCODE_COMMON_SET_TCP_PARAMETERS (38) | ||
173 | #define OPCODE_COMMON_SET_FRAME_SIZE (39) | ||
174 | #define OPCODE_COMMON_GET_FAT (40) | ||
175 | #define OPCODE_COMMON_MODIFY_EQ_DELAY (41) | ||
176 | #define OPCODE_COMMON_FIRMWARE_CONFIG (42) | ||
177 | #define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43) | ||
178 | #define OPCODE_COMMON_GET_DOMAIN_CONFIG (44) | ||
179 | #define OPCODE_COMMON_SET_VLD_CONFIG (45) | ||
180 | #define OPCODE_COMMON_GET_VLD_CONFIG (46) | ||
181 | #define OPCODE_COMMON_GET_PORT_EQUALIZATION (47) | ||
182 | #define OPCODE_COMMON_SET_PORT_EQUALIZATION (48) | ||
183 | #define OPCODE_COMMON_RED_CONFIG (49) | ||
184 | |||
185 | |||
186 | |||
187 | /* | ||
188 | * --- ETH_SUBSYSTEM_OPCODES --- | ||
189 | * These opcodes are used for configuring the Ethernet interfaces. These | ||
190 | * opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code. | ||
191 | */ | ||
192 | #define OPCODE_ETH_RSS_CONFIG (1) | ||
193 | #define OPCODE_ETH_ACPI_CONFIG (2) | ||
194 | #define SUBSYSTEM_ETH_RSS_CONFIG (3) | ||
195 | #define SUBSYSTEM_ETH_ACPI_CONFIG (3) | ||
196 | #define OPCODE_ETH_PROMISCUOUS (3) | ||
197 | #define SUBSYSTEM_ETH_PROMISCUOUS (3) | ||
198 | #define SUBSYSTEM_ETH_GET_STATISTICS (3) | ||
199 | #define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE (3) | ||
200 | #define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE (3) | ||
201 | #define OPCODE_ETH_GET_STATISTICS (4) | ||
202 | #define OPCODE_ETH_GET_RX_FRAG_SIZE (5) | ||
203 | #define OPCODE_ETH_SET_RX_FRAG_SIZE (6) | ||
204 | |||
205 | |||
206 | |||
207 | |||
208 | |||
209 | /* | ||
210 | * --- MCC_STATUS_CODE --- | ||
211 | * These are the global status codes used by all subsystems | ||
212 | */ | ||
213 | #define MCC_STATUS_SUCCESS (0) /* Indicates a successful | ||
214 | completion of the command */ | ||
215 | #define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1) /* The client does not have | ||
216 | sufficient privileges to | ||
217 | execute the command */ | ||
218 | #define MCC_STATUS_INVALID_PARAMETER (2) /* A parameter in the command | ||
219 | was invalid. The extended | ||
220 | status contains the index | ||
221 | of the parameter */ | ||
222 | #define MCC_STATUS_INSUFFICIENT_RESOURCES (3) /* There are insufficient | ||
223 | chip resources to execute | ||
224 | the command */ | ||
225 | #define MCC_STATUS_QUEUE_FLUSHING (4) /* The command is completing | ||
226 | because the queue was | ||
227 | getting flushed */ | ||
228 | #define MCC_STATUS_DMA_FAILED (5) /* The command is completing | ||
229 | with a DMA error */ | ||
230 | |||
231 | /* | ||
232 | * --- MGMT_ERROR_CODES --- | ||
233 | * Error Codes returned in the status field of the FWCMD response header | ||
234 | */ | ||
235 | #define MGMT_STATUS_SUCCESS (0) /* The FWCMD completed | ||
236 | without errors */ | ||
237 | #define MGMT_STATUS_FAILED (1) /* Error status in the Status | ||
238 | field of the | ||
239 | struct FWCMD_RESPONSE_HEADER */ | ||
240 | #define MGMT_STATUS_ILLEGAL_REQUEST (2) /* Invalid FWCMD opcode */ | ||
241 | #define MGMT_STATUS_ILLEGAL_FIELD (3) /* Invalid parameter in | ||
242 | the FWCMD payload */ | ||
243 | |||
244 | #endif /* __fwcmd_opcodes_amap_h__ */ | ||
diff --git a/drivers/staging/benet/fwcmd_types_bmap.h b/drivers/staging/benet/fwcmd_types_bmap.h deleted file mode 100644 index 92217aff3a16..000000000000 --- a/drivers/staging/benet/fwcmd_types_bmap.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __fwcmd_types_bmap_h__ | ||
21 | #define __fwcmd_types_bmap_h__ | ||
22 | |||
23 | /* MAC address format */ | ||
24 | struct MAC_ADDRESS_FORMAT { | ||
25 | u16 SizeOfStructure; | ||
26 | u8 MACAddress[6]; | ||
27 | } __packed; | ||
28 | |||
29 | #endif /* __fwcmd_types_bmap_h__ */ | ||
diff --git a/drivers/staging/benet/host_struct.h b/drivers/staging/benet/host_struct.h deleted file mode 100644 index 3de6722b980f..000000000000 --- a/drivers/staging/benet/host_struct.h +++ /dev/null | |||
@@ -1,182 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __host_struct_amap_h__ | ||
21 | #define __host_struct_amap_h__ | ||
22 | #include "be_cm.h" | ||
23 | #include "be_common.h" | ||
24 | #include "descriptors.h" | ||
25 | |||
26 | /* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */ | ||
27 | #define EQ_MAJOR_CODE_COMPLETION (0) /* Completion event on a */ | ||
28 | /* qcompletion ueue. */ | ||
29 | #define EQ_MAJOR_CODE_ETH (1) /* Affiliated Ethernet Event. */ | ||
30 | #define EQ_MAJOR_CODE_RESERVED (2) /* Reserved */ | ||
31 | #define EQ_MAJOR_CODE_RDMA (3) /* Affiliated RDMA Event. */ | ||
32 | #define EQ_MAJOR_CODE_ISCSI (4) /* Affiliated ISCSI Event */ | ||
33 | #define EQ_MAJOR_CODE_UNAFFILIATED (5) /* Unaffiliated Event */ | ||
34 | |||
35 | /* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */ | ||
36 | #define EQ_MINOR_CODE_COMPLETION (0) /* Completion event on a */ | ||
37 | /* completion queue. */ | ||
38 | #define EQ_MINOR_CODE_OTHER (1) /* Other Event (TBD). */ | ||
39 | |||
40 | /* Queue Entry Definition for all 4 byte event queue types. */ | ||
41 | struct BE_EQ_ENTRY_AMAP { | ||
42 | u8 Valid; /* DWORD 0 */ | ||
43 | u8 MajorCode[3]; /* DWORD 0 */ | ||
44 | u8 MinorCode[12]; /* DWORD 0 */ | ||
45 | u8 ResourceID[16]; /* DWORD 0 */ | ||
46 | } __packed; | ||
47 | struct EQ_ENTRY_AMAP { | ||
48 | u32 dw[1]; | ||
49 | }; | ||
50 | |||
51 | /* | ||
52 | * --- ETH_EVENT_CODE --- | ||
53 | * These codes are returned by the MPU when one of these events has occurred, | ||
54 | * and the event is configured to report to an Event Queue when an event | ||
55 | * is detected. | ||
56 | */ | ||
57 | #define ETH_EQ_LINK_STATUS (0) /* Link status change event */ | ||
58 | /* detected. */ | ||
59 | #define ETH_EQ_WATERMARK (1) /* watermark event detected. */ | ||
60 | #define ETH_EQ_MAGIC_PKT (2) /* magic pkt event detected. */ | ||
61 | #define ETH_EQ_ACPI_PKT0 (3) /* ACPI interesting packet */ | ||
62 | /* detected. */ | ||
63 | #define ETH_EQ_ACPI_PKT1 (3) /* ACPI interesting packet */ | ||
64 | /* detected. */ | ||
65 | #define ETH_EQ_ACPI_PKT2 (3) /* ACPI interesting packet */ | ||
66 | /* detected. */ | ||
67 | #define ETH_EQ_ACPI_PKT3 (3) /* ACPI interesting packet */ | ||
68 | /* detected. */ | ||
69 | |||
70 | /* | ||
71 | * --- ETH_TX_COMPL_STATUS_ENUM --- | ||
72 | * Status codes contained in Ethernet TX completion descriptors. | ||
73 | */ | ||
74 | #define ETH_COMP_VALID (0) | ||
75 | #define ETH_COMP_ERROR (1) | ||
76 | #define ETH_COMP_INVALID (15) | ||
77 | |||
78 | /* | ||
79 | * --- ETH_TX_COMPL_PORT_ENUM --- | ||
80 | * Port indicator contained in Ethernet TX completion descriptors. | ||
81 | */ | ||
82 | #define ETH_COMP_PORT0 (0) | ||
83 | #define ETH_COMP_PORT1 (1) | ||
84 | #define ETH_COMP_MGMT (2) | ||
85 | |||
86 | /* | ||
87 | * --- ETH_TX_COMPL_CT_ENUM --- | ||
88 | * Completion type indicator contained in Ethernet TX completion descriptors. | ||
89 | */ | ||
90 | #define ETH_COMP_ETH (0) | ||
91 | |||
92 | /* | ||
93 | * Work request block that the driver issues to the chip for | ||
94 | * Ethernet transmissions. All control fields must be valid in each WRB for | ||
95 | * a message. The controller, as specified by the flags, optionally writes | ||
96 | * an entry to the Completion Ring and generate an event. | ||
97 | */ | ||
98 | struct BE_ETH_WRB_AMAP { | ||
99 | u8 frag_pa_hi[32]; /* DWORD 0 */ | ||
100 | u8 frag_pa_lo[32]; /* DWORD 1 */ | ||
101 | u8 complete; /* DWORD 2 */ | ||
102 | u8 event; /* DWORD 2 */ | ||
103 | u8 crc; /* DWORD 2 */ | ||
104 | u8 forward; /* DWORD 2 */ | ||
105 | u8 ipsec; /* DWORD 2 */ | ||
106 | u8 mgmt; /* DWORD 2 */ | ||
107 | u8 ipcs; /* DWORD 2 */ | ||
108 | u8 udpcs; /* DWORD 2 */ | ||
109 | u8 tcpcs; /* DWORD 2 */ | ||
110 | u8 lso; /* DWORD 2 */ | ||
111 | u8 last; /* DWORD 2 */ | ||
112 | u8 vlan; /* DWORD 2 */ | ||
113 | u8 dbg[3]; /* DWORD 2 */ | ||
114 | u8 hash_val[3]; /* DWORD 2 */ | ||
115 | u8 lso_mss[14]; /* DWORD 2 */ | ||
116 | u8 frag_len[16]; /* DWORD 3 */ | ||
117 | u8 vlan_tag[16]; /* DWORD 3 */ | ||
118 | } __packed; | ||
119 | struct ETH_WRB_AMAP { | ||
120 | u32 dw[4]; | ||
121 | }; | ||
122 | |||
123 | /* This is an Ethernet transmit completion descriptor */ | ||
124 | struct BE_ETH_TX_COMPL_AMAP { | ||
125 | u8 user_bytes[16]; /* DWORD 0 */ | ||
126 | u8 nwh_bytes[8]; /* DWORD 0 */ | ||
127 | u8 lso; /* DWORD 0 */ | ||
128 | u8 rsvd0[7]; /* DWORD 0 */ | ||
129 | u8 wrb_index[16]; /* DWORD 1 */ | ||
130 | u8 ct[2]; /* DWORD 1 */ | ||
131 | u8 port[2]; /* DWORD 1 */ | ||
132 | u8 rsvd1[8]; /* DWORD 1 */ | ||
133 | u8 status[4]; /* DWORD 1 */ | ||
134 | u8 rsvd2[16]; /* DWORD 2 */ | ||
135 | u8 ringid[11]; /* DWORD 2 */ | ||
136 | u8 hash_val[4]; /* DWORD 2 */ | ||
137 | u8 valid; /* DWORD 2 */ | ||
138 | u8 rsvd3[32]; /* DWORD 3 */ | ||
139 | } __packed; | ||
140 | struct ETH_TX_COMPL_AMAP { | ||
141 | u32 dw[4]; | ||
142 | }; | ||
143 | |||
144 | /* Ethernet Receive Buffer descriptor */ | ||
145 | struct BE_ETH_RX_D_AMAP { | ||
146 | u8 fragpa_hi[32]; /* DWORD 0 */ | ||
147 | u8 fragpa_lo[32]; /* DWORD 1 */ | ||
148 | } __packed; | ||
149 | struct ETH_RX_D_AMAP { | ||
150 | u32 dw[2]; | ||
151 | }; | ||
152 | |||
153 | /* This is an Ethernet Receive Completion Descriptor */ | ||
154 | struct BE_ETH_RX_COMPL_AMAP { | ||
155 | u8 vlan_tag[16]; /* DWORD 0 */ | ||
156 | u8 pktsize[14]; /* DWORD 0 */ | ||
157 | u8 port; /* DWORD 0 */ | ||
158 | u8 rsvd0; /* DWORD 0 */ | ||
159 | u8 err; /* DWORD 1 */ | ||
160 | u8 rsshp; /* DWORD 1 */ | ||
161 | u8 ipf; /* DWORD 1 */ | ||
162 | u8 tcpf; /* DWORD 1 */ | ||
163 | u8 udpf; /* DWORD 1 */ | ||
164 | u8 ipcksm; /* DWORD 1 */ | ||
165 | u8 tcpcksm; /* DWORD 1 */ | ||
166 | u8 udpcksm; /* DWORD 1 */ | ||
167 | u8 macdst[6]; /* DWORD 1 */ | ||
168 | u8 vtp; /* DWORD 1 */ | ||
169 | u8 vtm; /* DWORD 1 */ | ||
170 | u8 fragndx[10]; /* DWORD 1 */ | ||
171 | u8 ct[2]; /* DWORD 1 */ | ||
172 | u8 ipsec; /* DWORD 1 */ | ||
173 | u8 numfrags[3]; /* DWORD 1 */ | ||
174 | u8 rsvd1[31]; /* DWORD 2 */ | ||
175 | u8 valid; /* DWORD 2 */ | ||
176 | u8 rsshash[32]; /* DWORD 3 */ | ||
177 | } __packed; | ||
178 | struct ETH_RX_COMPL_AMAP { | ||
179 | u32 dw[4]; | ||
180 | }; | ||
181 | |||
182 | #endif /* __host_struct_amap_h__ */ | ||
diff --git a/drivers/staging/benet/hwlib.h b/drivers/staging/benet/hwlib.h deleted file mode 100644 index afedf4dc5903..000000000000 --- a/drivers/staging/benet/hwlib.h +++ /dev/null | |||
@@ -1,830 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #ifndef __hwlib_h__ | ||
18 | #define __hwlib_h__ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | |||
25 | #include "regmap.h" /* srcgen array map output */ | ||
26 | |||
27 | #include "asyncmesg.h" | ||
28 | #include "fwcmd_opcodes.h" | ||
29 | #include "post_codes.h" | ||
30 | #include "fwcmd_mcc.h" | ||
31 | |||
32 | #include "fwcmd_types_bmap.h" | ||
33 | #include "fwcmd_common_bmap.h" | ||
34 | #include "fwcmd_eth_bmap.h" | ||
35 | #include "bestatus.h" | ||
36 | /* | ||
37 | * | ||
38 | * Macros for reading/writing a protection domain or CSR registers | ||
39 | * in BladeEngine. | ||
40 | */ | ||
41 | #define PD_READ(fo, field) ioread32((fo)->db_va + \ | ||
42 | offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8) | ||
43 | |||
44 | #define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \ | ||
45 | offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8) | ||
46 | |||
47 | #define CSR_READ(fo, field) ioread32((fo)->csr_va + \ | ||
48 | offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8) | ||
49 | |||
50 | #define CSR_WRITE(fo, field, val) iowrite32(val, (fo)->csr_va + \ | ||
51 | offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8) | ||
52 | |||
53 | #define PCICFG0_READ(fo, field) ioread32((fo)->pci_va + \ | ||
54 | offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8) | ||
55 | |||
56 | #define PCICFG0_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \ | ||
57 | offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8) | ||
58 | |||
59 | #define PCICFG1_READ(fo, field) ioread32((fo)->pci_va + \ | ||
60 | offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8) | ||
61 | |||
62 | #define PCICFG1_WRITE(fo, field, val) iowrite32(val, (fo)->pci_va + \ | ||
63 | offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8) | ||
64 | |||
65 | #ifdef BE_DEBUG | ||
66 | #define ASSERT(c) BUG_ON(!(c)); | ||
67 | #else | ||
68 | #define ASSERT(c) | ||
69 | #endif | ||
70 | |||
71 | /* debug levels */ | ||
72 | enum BE_DEBUG_LEVELS { | ||
73 | DL_ALWAYS = 0, /* cannot be masked */ | ||
74 | DL_ERR = 0x1, /* errors that should never happen */ | ||
75 | DL_WARN = 0x2, /* something questionable. | ||
76 | recoverable errors */ | ||
77 | DL_NOTE = 0x4, /* infrequent, important debug info */ | ||
78 | DL_INFO = 0x8, /* debug information */ | ||
79 | DL_VERBOSE = 0x10, /* detailed info, such as buffer traces */ | ||
80 | BE_DL_MIN_VALUE = 0x1, /* this is the min value used */ | ||
81 | BE_DL_MAX_VALUE = 0x80 /* this is the higheset value used */ | ||
82 | } ; | ||
83 | |||
84 | extern unsigned int trace_level; | ||
85 | |||
86 | #define TRACE(lm, fmt, args...) { \ | ||
87 | if (trace_level & lm) { \ | ||
88 | printk(KERN_NOTICE "BE: %s:%d \n" fmt, \ | ||
89 | __FILE__ , __LINE__ , ## args); \ | ||
90 | } \ | ||
91 | } | ||
92 | |||
93 | static inline unsigned int be_trace_set_level(unsigned int level) | ||
94 | { | ||
95 | unsigned int old_level = trace_level; | ||
96 | trace_level = level; | ||
97 | return old_level; | ||
98 | } | ||
99 | |||
100 | #define be_trace_get_level() trace_level | ||
101 | /* | ||
102 | * Returns number of pages spanned by the size of data | ||
103 | * starting at the given address. | ||
104 | */ | ||
105 | #define PAGES_SPANNED(_address, _size) \ | ||
106 | ((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \ | ||
107 | (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) | ||
108 | /* Byte offset into the page corresponding to given address */ | ||
109 | #define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1)) | ||
110 | |||
111 | /* | ||
112 | * circular subtract. | ||
113 | * Returns a - b assuming a circular number system, where a and b are | ||
114 | * in range (0, maxValue-1). If a==b, zero is returned so the | ||
115 | * highest value possible with this subtraction is maxValue-1. | ||
116 | */ | ||
117 | static inline u32 be_subc(u32 a, u32 b, u32 max) | ||
118 | { | ||
119 | ASSERT(a <= max && b <= max); | ||
120 | ASSERT(max > 0); | ||
121 | return a >= b ? (a - b) : (max - b + a); | ||
122 | } | ||
123 | |||
124 | static inline u32 be_addc(u32 a, u32 b, u32 max) | ||
125 | { | ||
126 | ASSERT(a < max); | ||
127 | ASSERT(max > 0); | ||
128 | return (max - a > b) ? (a + b) : (b + a - max); | ||
129 | } | ||
130 | |||
131 | /* descriptor for a physically contiguous memory used for ring */ | ||
132 | struct ring_desc { | ||
133 | u32 length; /* length in bytes */ | ||
134 | void *va; /* virtual address */ | ||
135 | u64 pa; /* bus address */ | ||
136 | } ; | ||
137 | |||
138 | /* | ||
139 | * This structure stores information about a ring shared between hardware | ||
140 | * and software. Each ring is allocated by the driver in the uncached | ||
141 | * extension and mapped into BladeEngine's unified table. | ||
142 | */ | ||
143 | struct mp_ring { | ||
144 | u32 pages; /* queue size in pages */ | ||
145 | u32 id; /* queue id assigned by beklib */ | ||
146 | u32 num; /* number of elements in queue */ | ||
147 | u32 cidx; /* consumer index */ | ||
148 | u32 pidx; /* producer index -- not used by most rings */ | ||
149 | u32 itemSize; /* size in bytes of one object */ | ||
150 | |||
151 | void *va; /* The virtual address of the ring. | ||
152 | This should be last to allow 32 & 64 | ||
153 | bit debugger extensions to work. */ | ||
154 | } ; | ||
155 | |||
156 | /*----------- amap bit filed get / set macros and functions -----*/ | ||
157 | /* | ||
158 | * Structures defined in the map header files (under fw/amap/) with names | ||
159 | * in the format BE_<name>_AMAP are pseudo structures with members | ||
160 | * of type u8. These structures are templates that are used in | ||
161 | * conjuntion with the structures with names in the format | ||
162 | * <name>_AMAP to calculate the bit masks and bit offsets to get or set | ||
163 | * bit fields in structures. The structures <name>_AMAP are arrays | ||
164 | * of 32 bits words and have the correct size. The following macros | ||
165 | * provide convenient ways to get and set the various members | ||
166 | * in the structures without using strucctures with bit fields. | ||
167 | * Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR | ||
168 | * macros to extract and set various members. | ||
169 | */ | ||
170 | |||
171 | /* | ||
172 | * Returns the a bit mask for the register that is NOT shifted into location. | ||
173 | * That means return values always look like: 0x1, 0xFF, 0x7FF, etc... | ||
174 | */ | ||
175 | static inline u32 amap_mask(u32 bit_size) | ||
176 | { | ||
177 | return bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1; | ||
178 | } | ||
179 | |||
180 | #define AMAP_BIT_MASK(_struct_, field) \ | ||
181 | amap_mask(AMAP_BIT_SIZE(_struct_, field)) | ||
182 | |||
183 | /* | ||
184 | * non-optimized set bits function. First clears the bits and then assigns them. | ||
185 | * This does not require knowledge of the particular DWORD you are setting. | ||
186 | * e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123); | ||
187 | */ | ||
188 | static inline void | ||
189 | amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value) | ||
190 | { | ||
191 | u32 *dw = (u32 *)ptr; | ||
192 | *(dw + dw_offset) &= ~(mask << offset); | ||
193 | *(dw + dw_offset) |= (mask & value) << offset; | ||
194 | } | ||
195 | |||
196 | #define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val) \ | ||
197 | amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),\ | ||
198 | AMAP_BIT_MASK(_struct_, field), \ | ||
199 | AMAP_BIT_OFFSET(_struct_, field), val) | ||
200 | /* | ||
201 | * Non-optimized routine that gets the bits without knowing the correct DWORD. | ||
202 | * e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory); | ||
203 | */ | ||
204 | static inline u32 | ||
205 | amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) | ||
206 | { | ||
207 | u32 *dw = (u32 *)ptr; | ||
208 | return mask & (*(dw + dw_offset) >> offset); | ||
209 | } | ||
210 | #define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_) \ | ||
211 | amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field), \ | ||
212 | AMAP_BIT_MASK(_struct_, field), \ | ||
213 | AMAP_BIT_OFFSET(_struct_, field)) | ||
214 | |||
215 | /* Returns 0-31 representing bit offset within a DWORD of a bitfield. */ | ||
216 | #define AMAP_BIT_OFFSET(_struct_, field) \ | ||
217 | (offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32) | ||
218 | |||
219 | /* Returns 0-n representing DWORD offset of bitfield within the structure. */ | ||
220 | #define AMAP_WORD_OFFSET(_struct_, field) \ | ||
221 | (offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32) | ||
222 | |||
223 | /* Returns size of bitfield in bits. */ | ||
224 | #define AMAP_BIT_SIZE(_struct_, field) \ | ||
225 | sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field) | ||
226 | |||
227 | struct be_mcc_wrb_response_copy { | ||
228 | u16 length; /* bytes in response */ | ||
229 | u16 fwcmd_offset; /* offset within the wrb of the response */ | ||
230 | void *va; /* user's va to copy response into */ | ||
231 | |||
232 | } ; | ||
233 | typedef void (*mcc_wrb_cqe_callback) (void *context, int status, | ||
234 | struct MCC_WRB_AMAP *optional_wrb); | ||
235 | struct be_mcc_wrb_context { | ||
236 | |||
237 | mcc_wrb_cqe_callback internal_cb; /* Function to call on | ||
238 | completion */ | ||
239 | void *internal_cb_context; /* Parameter to pass | ||
240 | to completion function */ | ||
241 | |||
242 | mcc_wrb_cqe_callback cb; /* Function to call on completion */ | ||
243 | void *cb_context; /* Parameter to pass to completion function */ | ||
244 | |||
245 | int *users_final_status; /* pointer to a local | ||
246 | variable for synchronous | ||
247 | commands */ | ||
248 | struct MCC_WRB_AMAP *wrb; /* pointer to original wrb for embedded | ||
249 | commands only */ | ||
250 | struct list_head next; /* links context structs together in | ||
251 | free list */ | ||
252 | |||
253 | struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy | ||
254 | embedded response to user's va */ | ||
255 | |||
256 | #if defined(BE_DEBUG) | ||
257 | u16 subsystem, opcode; /* Track this FWCMD for debug builds. */ | ||
258 | struct MCC_WRB_AMAP *ring_wrb; | ||
259 | u32 consumed_count; | ||
260 | #endif | ||
261 | } ; | ||
262 | |||
263 | /* | ||
264 | Represents a function object for network or storage. This | ||
265 | is used to manage per-function resources like MCC CQs, etc. | ||
266 | */ | ||
267 | struct be_function_object { | ||
268 | |||
269 | u32 magic; /*!< magic for detecting memory corruption. */ | ||
270 | |||
271 | /* PCI BAR mapped addresses */ | ||
272 | u8 __iomem *csr_va; /* CSR */ | ||
273 | u8 __iomem *db_va; /* Door Bell */ | ||
274 | u8 __iomem *pci_va; /* PCI config space */ | ||
275 | u32 emulate; /* if set, MPU is not available. | ||
276 | Emulate everything. */ | ||
277 | u32 pend_queue_driving; /* if set, drive the queued WRBs | ||
278 | after releasing the WRB lock */ | ||
279 | |||
280 | spinlock_t post_lock; /* lock for verifying one thread posting wrbs */ | ||
281 | spinlock_t cq_lock; /* lock for verifying one thread | ||
282 | processing cq */ | ||
283 | spinlock_t mcc_context_lock; /* lock for protecting mcc | ||
284 | context free list */ | ||
285 | unsigned long post_irq; | ||
286 | unsigned long cq_irq; | ||
287 | |||
288 | u32 type; | ||
289 | u32 pci_function_number; | ||
290 | |||
291 | struct be_mcc_object *mcc; /* mcc rings. */ | ||
292 | |||
293 | struct { | ||
294 | struct MCC_MAILBOX_AMAP *va; /* VA to the mailbox */ | ||
295 | u64 pa; /* PA to the mailbox */ | ||
296 | u32 length; /* byte length of mailbox */ | ||
297 | |||
298 | /* One default context struct used for posting at | ||
299 | * least one MCC_WRB | ||
300 | */ | ||
301 | struct be_mcc_wrb_context default_context; | ||
302 | bool default_context_allocated; | ||
303 | } mailbox; | ||
304 | |||
305 | struct { | ||
306 | |||
307 | /* Wake on lans configured. */ | ||
308 | u32 wol_bitmask; /* bits 0,1,2,3 are set if | ||
309 | corresponding index is enabled */ | ||
310 | } config; | ||
311 | |||
312 | |||
313 | struct BE_FIRMWARE_CONFIG fw_config; | ||
314 | } ; | ||
315 | |||
316 | /* | ||
317 | Represents an Event Queue | ||
318 | */ | ||
319 | struct be_eq_object { | ||
320 | u32 magic; | ||
321 | atomic_t ref_count; | ||
322 | |||
323 | struct be_function_object *parent_function; | ||
324 | |||
325 | struct list_head eq_list; | ||
326 | struct list_head cq_list_head; | ||
327 | |||
328 | u32 eq_id; | ||
329 | void *cb_context; | ||
330 | |||
331 | } ; | ||
332 | |||
333 | /* | ||
334 | Manages a completion queue | ||
335 | */ | ||
336 | struct be_cq_object { | ||
337 | u32 magic; | ||
338 | atomic_t ref_count; | ||
339 | |||
340 | struct be_function_object *parent_function; | ||
341 | struct be_eq_object *eq_object; | ||
342 | |||
343 | struct list_head cq_list; | ||
344 | struct list_head cqlist_for_eq; | ||
345 | |||
346 | void *va; | ||
347 | u32 num_entries; | ||
348 | |||
349 | void *cb_context; | ||
350 | |||
351 | u32 cq_id; | ||
352 | |||
353 | } ; | ||
354 | |||
355 | /* | ||
356 | Manages an ethernet send queue | ||
357 | */ | ||
358 | struct be_ethsq_object { | ||
359 | u32 magic; | ||
360 | |||
361 | struct list_head list; | ||
362 | |||
363 | struct be_function_object *parent_function; | ||
364 | struct be_cq_object *cq_object; | ||
365 | u32 bid; | ||
366 | |||
367 | } ; | ||
368 | |||
369 | /* | ||
370 | @brief | ||
371 | Manages an ethernet receive queue | ||
372 | */ | ||
373 | struct be_ethrq_object { | ||
374 | u32 magic; | ||
375 | struct list_head list; | ||
376 | struct be_function_object *parent_function; | ||
377 | u32 rid; | ||
378 | struct be_cq_object *cq_object; | ||
379 | struct be_cq_object *rss_cq_object[4]; | ||
380 | |||
381 | } ; | ||
382 | |||
383 | /* | ||
384 | Manages an MCC | ||
385 | */ | ||
386 | typedef void (*mcc_async_event_callback) (void *context, u32 event_code, | ||
387 | void *event); | ||
388 | struct be_mcc_object { | ||
389 | u32 magic; | ||
390 | |||
391 | struct be_function_object *parent_function; | ||
392 | struct list_head mcc_list; | ||
393 | |||
394 | struct be_cq_object *cq_object; | ||
395 | |||
396 | /* Async event callback for MCC CQ. */ | ||
397 | mcc_async_event_callback async_cb; | ||
398 | void *async_context; | ||
399 | |||
400 | struct { | ||
401 | struct be_mcc_wrb_context *base; | ||
402 | u32 num; | ||
403 | struct list_head list_head; | ||
404 | } wrb_context; | ||
405 | |||
406 | struct { | ||
407 | struct ring_desc *rd; | ||
408 | struct mp_ring ring; | ||
409 | } sq; | ||
410 | |||
411 | struct { | ||
412 | struct mp_ring ring; | ||
413 | } cq; | ||
414 | |||
415 | u32 processing; /* flag indicating that one thread | ||
416 | is processing CQ */ | ||
417 | u32 rearm; /* doorbell rearm setting to make | ||
418 | sure the active processing thread */ | ||
419 | /* rearms the CQ if any of the threads requested it. */ | ||
420 | |||
421 | struct list_head backlog; | ||
422 | u32 backlog_length; | ||
423 | u32 driving_backlog; | ||
424 | u32 consumed_index; | ||
425 | |||
426 | } ; | ||
427 | |||
428 | |||
429 | /* Queue context header -- the required software information for | ||
430 | * queueing a WRB. | ||
431 | */ | ||
432 | struct be_queue_driver_context { | ||
433 | mcc_wrb_cqe_callback internal_cb; /* Function to call on | ||
434 | completion */ | ||
435 | void *internal_cb_context; /* Parameter to pass | ||
436 | to completion function */ | ||
437 | |||
438 | mcc_wrb_cqe_callback cb; /* Function to call on completion */ | ||
439 | void *cb_context; /* Parameter to pass to completion function */ | ||
440 | |||
441 | struct be_mcc_wrb_response_copy copy; /* Optional parameters to copy | ||
442 | embedded response to user's va */ | ||
443 | void *optional_fwcmd_va; | ||
444 | struct list_head list; | ||
445 | u32 bytes; | ||
446 | } ; | ||
447 | |||
448 | /* | ||
449 | * Common MCC WRB header that all commands require. | ||
450 | */ | ||
451 | struct be_mcc_wrb_header { | ||
452 | u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8]; | ||
453 | } ; | ||
454 | |||
455 | /* | ||
456 | * All non embedded commands supported by hwlib functions only allow | ||
457 | * 1 SGE. This queue context handles them all. | ||
458 | */ | ||
459 | struct be_nonembedded_q_ctxt { | ||
460 | struct be_queue_driver_context context; | ||
461 | struct be_mcc_wrb_header wrb_header; | ||
462 | struct MCC_SGE_AMAP sge[1]; | ||
463 | } ; | ||
464 | |||
465 | /* | ||
466 | * ------------------------------------------------------------------------ | ||
467 | * This section contains the specific queue struct for each command. | ||
468 | * The user could always provide a be_generic_q_ctxt but this is a | ||
469 | * rather large struct. By using the specific struct, memory consumption | ||
470 | * can be reduced. | ||
471 | * ------------------------------------------------------------------------ | ||
472 | */ | ||
473 | |||
474 | struct be_link_status_q_ctxt { | ||
475 | struct be_queue_driver_context context; | ||
476 | struct be_mcc_wrb_header wrb_header; | ||
477 | struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd; | ||
478 | } ; | ||
479 | |||
480 | struct be_multicast_q_ctxt { | ||
481 | struct be_queue_driver_context context; | ||
482 | struct be_mcc_wrb_header wrb_header; | ||
483 | struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd; | ||
484 | } ; | ||
485 | |||
486 | |||
487 | struct be_vlan_q_ctxt { | ||
488 | struct be_queue_driver_context context; | ||
489 | struct be_mcc_wrb_header wrb_header; | ||
490 | struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd; | ||
491 | } ; | ||
492 | |||
493 | struct be_promiscuous_q_ctxt { | ||
494 | struct be_queue_driver_context context; | ||
495 | struct be_mcc_wrb_header wrb_header; | ||
496 | struct FWCMD_ETH_PROMISCUOUS fwcmd; | ||
497 | } ; | ||
498 | |||
499 | struct be_force_failover_q_ctxt { | ||
500 | struct be_queue_driver_context context; | ||
501 | struct be_mcc_wrb_header wrb_header; | ||
502 | struct FWCMD_COMMON_FORCE_FAILOVER fwcmd; | ||
503 | } ; | ||
504 | |||
505 | |||
506 | struct be_rxf_filter_q_ctxt { | ||
507 | struct be_queue_driver_context context; | ||
508 | struct be_mcc_wrb_header wrb_header; | ||
509 | struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd; | ||
510 | } ; | ||
511 | |||
512 | struct be_eq_modify_delay_q_ctxt { | ||
513 | struct be_queue_driver_context context; | ||
514 | struct be_mcc_wrb_header wrb_header; | ||
515 | struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd; | ||
516 | } ; | ||
517 | |||
518 | /* | ||
519 | * The generic context is the largest size that would be required. | ||
520 | * It is the software context plus an entire WRB. | ||
521 | */ | ||
522 | struct be_generic_q_ctxt { | ||
523 | struct be_queue_driver_context context; | ||
524 | struct be_mcc_wrb_header wrb_header; | ||
525 | struct MCC_WRB_PAYLOAD_AMAP payload; | ||
526 | } ; | ||
527 | |||
528 | /* | ||
529 | * Types for the BE_QUEUE_CONTEXT object. | ||
530 | */ | ||
531 | #define BE_QUEUE_INVALID (0) | ||
532 | #define BE_QUEUE_LINK_STATUS (0xA006) | ||
533 | #define BE_QUEUE_ETH_STATS (0xA007) | ||
534 | #define BE_QUEUE_TPM_STATS (0xA008) | ||
535 | #define BE_QUEUE_TCP_STATS (0xA009) | ||
536 | #define BE_QUEUE_MULTICAST (0xA00A) | ||
537 | #define BE_QUEUE_VLAN (0xA00B) | ||
538 | #define BE_QUEUE_RSS (0xA00C) | ||
539 | #define BE_QUEUE_FORCE_FAILOVER (0xA00D) | ||
540 | #define BE_QUEUE_PROMISCUOUS (0xA00E) | ||
541 | #define BE_QUEUE_WAKE_ON_LAN (0xA00F) | ||
542 | #define BE_QUEUE_NOP (0xA010) | ||
543 | |||
544 | /* --- BE_FUNCTION_ENUM --- */ | ||
545 | #define BE_FUNCTION_TYPE_ISCSI (0) | ||
546 | #define BE_FUNCTION_TYPE_NETWORK (1) | ||
547 | #define BE_FUNCTION_TYPE_ARM (2) | ||
548 | |||
549 | /* --- BE_ETH_TX_RING_TYPE_ENUM --- */ | ||
550 | #define BE_ETH_TX_RING_TYPE_FORWARDING (1) /* Ether ring for forwarding */ | ||
551 | #define BE_ETH_TX_RING_TYPE_STANDARD (2) /* Ether ring for sending */ | ||
552 | /* network packets. */ | ||
553 | #define BE_ETH_TX_RING_TYPE_BOUND (3) /* Ethernet ring for sending */ | ||
554 | /* network packets, bound */ | ||
555 | /* to a physical port. */ | ||
556 | /* | ||
557 | * ---------------------------------------------------------------------- | ||
558 | * API MACROS | ||
559 | * ---------------------------------------------------------------------- | ||
560 | */ | ||
561 | #define BE_FWCMD_NAME(_short_name_) struct FWCMD_##_short_name_ | ||
562 | #define BE_OPCODE_NAME(_short_name_) OPCODE_##_short_name_ | ||
563 | #define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_ | ||
564 | |||
565 | |||
566 | #define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \ | ||
567 | ((BE_FWCMD_NAME(_short_name_) *) \ | ||
568 | be_function_prepare_embedded_fwcmd(_pfob_, _wrb_, \ | ||
569 | sizeof(BE_FWCMD_NAME(_short_name_)), \ | ||
570 | FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \ | ||
571 | FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \ | ||
572 | BE_OPCODE_NAME(_short_name_), \ | ||
573 | BE_SUBSYSTEM_NAME(_short_name_))); | ||
574 | |||
575 | #define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\ | ||
576 | ((BE_FWCMD_NAME(_short_name_) *) \ | ||
577 | be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \ | ||
578 | sizeof(BE_FWCMD_NAME(_short_name_)), \ | ||
579 | FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \ | ||
580 | FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \ | ||
581 | BE_OPCODE_NAME(_short_name_), \ | ||
582 | BE_SUBSYSTEM_NAME(_short_name_))); | ||
583 | |||
584 | int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va, | ||
585 | u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd, | ||
586 | struct be_function_object *pfob); | ||
587 | |||
588 | int be_function_object_destroy(struct be_function_object *pfob); | ||
589 | int be_function_cleanup(struct be_function_object *pfob); | ||
590 | |||
591 | |||
592 | int be_function_get_fw_version(struct be_function_object *pfob, | ||
593 | struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version, | ||
594 | mcc_wrb_cqe_callback cb, void *cb_context); | ||
595 | |||
596 | |||
597 | int be_eq_modify_delay(struct be_function_object *pfob, | ||
598 | u32 num_eq, struct be_eq_object **eq_array, | ||
599 | u32 *eq_delay_array, mcc_wrb_cqe_callback cb, | ||
600 | void *cb_context, | ||
601 | struct be_eq_modify_delay_q_ctxt *q_ctxt); | ||
602 | |||
603 | |||
604 | |||
605 | int be_eq_create(struct be_function_object *pfob, | ||
606 | struct ring_desc *rd, u32 eqe_size, u32 num_entries, | ||
607 | u32 watermark, u32 timer_delay, struct be_eq_object *eq_object); | ||
608 | |||
609 | int be_eq_destroy(struct be_eq_object *eq); | ||
610 | |||
611 | int be_cq_create(struct be_function_object *pfob, | ||
612 | struct ring_desc *rd, u32 length, | ||
613 | bool solicited_eventable, bool no_delay, | ||
614 | u32 wm_thresh, struct be_eq_object *eq_object, | ||
615 | struct be_cq_object *cq_object); | ||
616 | |||
617 | int be_cq_destroy(struct be_cq_object *cq); | ||
618 | |||
619 | int be_mcc_ring_create(struct be_function_object *pfob, | ||
620 | struct ring_desc *rd, u32 length, | ||
621 | struct be_mcc_wrb_context *context_array, | ||
622 | u32 num_context_entries, | ||
623 | struct be_cq_object *cq, struct be_mcc_object *mcc); | ||
624 | int be_mcc_ring_destroy(struct be_mcc_object *mcc_object); | ||
625 | |||
626 | int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm); | ||
627 | |||
628 | int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object, | ||
629 | mcc_async_event_callback cb, void *cb_context); | ||
630 | |||
631 | int be_pci_soft_reset(struct be_function_object *pfob); | ||
632 | |||
633 | |||
634 | int be_drive_POST(struct be_function_object *pfob); | ||
635 | |||
636 | |||
637 | int be_eth_sq_create(struct be_function_object *pfob, | ||
638 | struct ring_desc *rd, u32 length_in_bytes, | ||
639 | u32 type, u32 ulp, struct be_cq_object *cq_object, | ||
640 | struct be_ethsq_object *eth_sq); | ||
641 | |||
642 | struct be_eth_sq_parameters { | ||
643 | u32 port; | ||
644 | u32 rsvd0[2]; | ||
645 | } ; | ||
646 | |||
647 | int be_eth_sq_create_ex(struct be_function_object *pfob, | ||
648 | struct ring_desc *rd, u32 length_in_bytes, | ||
649 | u32 type, u32 ulp, struct be_cq_object *cq_object, | ||
650 | struct be_eth_sq_parameters *ex_parameters, | ||
651 | struct be_ethsq_object *eth_sq); | ||
652 | int be_eth_sq_destroy(struct be_ethsq_object *eth_sq); | ||
653 | |||
654 | int be_eth_set_flow_control(struct be_function_object *pfob, | ||
655 | bool txfc_enable, bool rxfc_enable); | ||
656 | |||
657 | int be_eth_get_flow_control(struct be_function_object *pfob, | ||
658 | bool *txfc_enable, bool *rxfc_enable); | ||
659 | int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps); | ||
660 | |||
661 | int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps); | ||
662 | |||
663 | int be_eth_set_frame_size(struct be_function_object *pfob, | ||
664 | u32 *tx_frame_size, u32 *rx_frame_size); | ||
665 | |||
666 | int be_eth_rq_create(struct be_function_object *pfob, | ||
667 | struct ring_desc *rd, struct be_cq_object *cq_object, | ||
668 | struct be_cq_object *bcmc_cq_object, | ||
669 | struct be_ethrq_object *eth_rq); | ||
670 | |||
671 | int be_eth_rq_destroy(struct be_ethrq_object *eth_rq); | ||
672 | |||
673 | int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush, | ||
674 | mcc_wrb_cqe_callback cb, void *cb_context); | ||
675 | int be_eth_rq_set_frag_size(struct be_function_object *pfob, | ||
676 | u32 new_frag_size_bytes, u32 *actual_frag_size_bytes); | ||
677 | int be_eth_rq_get_frag_size(struct be_function_object *pfob, | ||
678 | u32 *frag_size_bytes); | ||
679 | |||
680 | void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob, | ||
681 | struct MCC_WRB_AMAP *wrb, | ||
682 | u32 payload_length, u32 request_length, | ||
683 | u32 response_length, u32 opcode, u32 subsystem); | ||
684 | void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob, | ||
685 | struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa, | ||
686 | u32 payload_length, u32 request_length, u32 response_length, | ||
687 | u32 opcode, u32 subsystem); | ||
688 | |||
689 | |||
690 | struct MCC_WRB_AMAP * | ||
691 | be_function_peek_mcc_wrb(struct be_function_object *pfob); | ||
692 | |||
693 | int be_rxf_mac_address_read_write(struct be_function_object *pfob, | ||
694 | bool port1, bool mac1, bool mgmt, | ||
695 | bool write, bool permanent, u8 *mac_address, | ||
696 | mcc_wrb_cqe_callback cb, | ||
697 | void *cb_context); | ||
698 | |||
699 | int be_rxf_multicast_config(struct be_function_object *pfob, | ||
700 | bool promiscuous, u32 num, u8 *mac_table, | ||
701 | mcc_wrb_cqe_callback cb, | ||
702 | void *cb_context, | ||
703 | struct be_multicast_q_ctxt *q_ctxt); | ||
704 | |||
705 | int be_rxf_vlan_config(struct be_function_object *pfob, | ||
706 | bool promiscuous, u32 num, u16 *vlan_tag_array, | ||
707 | mcc_wrb_cqe_callback cb, void *cb_context, | ||
708 | struct be_vlan_q_ctxt *q_ctxt); | ||
709 | |||
710 | |||
711 | int be_rxf_link_status(struct be_function_object *pfob, | ||
712 | struct BE_LINK_STATUS *link_status, | ||
713 | mcc_wrb_cqe_callback cb, | ||
714 | void *cb_context, | ||
715 | struct be_link_status_q_ctxt *q_ctxt); | ||
716 | |||
717 | |||
718 | int be_rxf_query_eth_statistics(struct be_function_object *pfob, | ||
719 | struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd, | ||
720 | u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb, | ||
721 | void *cb_context, | ||
722 | struct be_nonembedded_q_ctxt *q_ctxt); | ||
723 | |||
724 | int be_rxf_promiscuous(struct be_function_object *pfob, | ||
725 | bool enable_port0, bool enable_port1, | ||
726 | mcc_wrb_cqe_callback cb, void *cb_context, | ||
727 | struct be_promiscuous_q_ctxt *q_ctxt); | ||
728 | |||
729 | |||
730 | int be_rxf_filter_config(struct be_function_object *pfob, | ||
731 | struct NTWK_RX_FILTER_SETTINGS *settings, | ||
732 | mcc_wrb_cqe_callback cb, | ||
733 | void *cb_context, | ||
734 | struct be_rxf_filter_q_ctxt *q_ctxt); | ||
735 | |||
736 | /* | ||
737 | * ------------------------------------------------------ | ||
738 | * internal functions used by hwlib | ||
739 | * ------------------------------------------------------ | ||
740 | */ | ||
741 | |||
742 | |||
743 | int be_function_ring_destroy(struct be_function_object *pfob, | ||
744 | u32 id, u32 ring_type, mcc_wrb_cqe_callback cb, | ||
745 | void *cb_context, | ||
746 | mcc_wrb_cqe_callback internal_cb, | ||
747 | void *internal_callback_context); | ||
748 | |||
749 | int be_function_post_mcc_wrb(struct be_function_object *pfob, | ||
750 | struct MCC_WRB_AMAP *wrb, | ||
751 | struct be_generic_q_ctxt *q_ctxt, | ||
752 | mcc_wrb_cqe_callback cb, void *cb_context, | ||
753 | mcc_wrb_cqe_callback internal_cb, | ||
754 | void *internal_cb_context, void *optional_fwcmd_va, | ||
755 | struct be_mcc_wrb_response_copy *response_copy); | ||
756 | |||
757 | int be_function_queue_mcc_wrb(struct be_function_object *pfob, | ||
758 | struct be_generic_q_ctxt *q_ctxt); | ||
759 | |||
760 | /* | ||
761 | * ------------------------------------------------------ | ||
762 | * MCC QUEUE | ||
763 | * ------------------------------------------------------ | ||
764 | */ | ||
765 | |||
766 | int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd); | ||
767 | |||
768 | |||
769 | struct MCC_WRB_AMAP * | ||
770 | _be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue); | ||
771 | |||
772 | struct be_mcc_wrb_context * | ||
773 | _be_mcc_allocate_wrb_context(struct be_function_object *pfob); | ||
774 | |||
775 | void _be_mcc_free_wrb_context(struct be_function_object *pfob, | ||
776 | struct be_mcc_wrb_context *context); | ||
777 | |||
778 | int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob, | ||
779 | struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context); | ||
780 | |||
781 | int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc, | ||
782 | struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context); | ||
783 | |||
784 | void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc); | ||
785 | |||
786 | |||
787 | /* | ||
788 | * ------------------------------------------------------ | ||
789 | * Ring Sizes | ||
790 | * ------------------------------------------------------ | ||
791 | */ | ||
792 | static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size) | ||
793 | { | ||
794 | |||
795 | ASSERT(encoding != 1); /* 1 is rsvd */ | ||
796 | ASSERT(encoding < 16); | ||
797 | ASSERT(object_size > 0); | ||
798 | |||
799 | if (encoding == 0) /* 32k deep */ | ||
800 | encoding = 16; | ||
801 | |||
802 | return (1 << (encoding - 1)) * object_size; | ||
803 | } | ||
804 | |||
805 | static inline | ||
806 | u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size) | ||
807 | { | ||
808 | |||
809 | u32 count, encoding; | ||
810 | |||
811 | ASSERT(object_size > 0); | ||
812 | ASSERT(length_in_bytes % object_size == 0); | ||
813 | |||
814 | count = length_in_bytes / object_size; | ||
815 | |||
816 | ASSERT(count > 1); | ||
817 | ASSERT(count <= 32 * 1024); | ||
818 | ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */ | ||
819 | |||
820 | encoding = __ilog2_u32(count) + 1; | ||
821 | |||
822 | if (encoding == 16) | ||
823 | encoding = 0; /* 32k deep */ | ||
824 | |||
825 | return encoding; | ||
826 | } | ||
827 | |||
828 | void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, | ||
829 | u32 max_num); | ||
830 | #endif /* __hwlib_h__ */ | ||
diff --git a/drivers/staging/benet/mpu.c b/drivers/staging/benet/mpu.c deleted file mode 100644 index 269cc11d3055..000000000000 --- a/drivers/staging/benet/mpu.c +++ /dev/null | |||
@@ -1,1364 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | #include <linux/delay.h> | ||
18 | #include "hwlib.h" | ||
19 | #include "bestatus.h" | ||
20 | |||
21 | static | ||
22 | inline void mp_ring_create(struct mp_ring *ring, u32 num, u32 size, void *va) | ||
23 | { | ||
24 | ASSERT(ring); | ||
25 | memset(ring, 0, sizeof(struct mp_ring)); | ||
26 | ring->num = num; | ||
27 | ring->pages = DIV_ROUND_UP(num * size, PAGE_SIZE); | ||
28 | ring->itemSize = size; | ||
29 | ring->va = va; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * ----------------------------------------------------------------------- | ||
34 | * Interface for 2 index rings. i.e. consumer/producer rings | ||
35 | * -------------------------------------------------------------------------- | ||
36 | */ | ||
37 | |||
38 | /* Returns number items pending on ring. */ | ||
39 | static inline u32 mp_ring_num_pending(struct mp_ring *ring) | ||
40 | { | ||
41 | ASSERT(ring); | ||
42 | if (ring->num == 0) | ||
43 | return 0; | ||
44 | return be_subc(ring->pidx, ring->cidx, ring->num); | ||
45 | } | ||
46 | |||
47 | /* Returns number items free on ring. */ | ||
48 | static inline u32 mp_ring_num_empty(struct mp_ring *ring) | ||
49 | { | ||
50 | ASSERT(ring); | ||
51 | return ring->num - 1 - mp_ring_num_pending(ring); | ||
52 | } | ||
53 | |||
54 | /* Consume 1 item */ | ||
55 | static inline void mp_ring_consume(struct mp_ring *ring) | ||
56 | { | ||
57 | ASSERT(ring); | ||
58 | ASSERT(ring->pidx != ring->cidx); | ||
59 | |||
60 | ring->cidx = be_addc(ring->cidx, 1, ring->num); | ||
61 | } | ||
62 | |||
63 | /* Produce 1 item */ | ||
64 | static inline void mp_ring_produce(struct mp_ring *ring) | ||
65 | { | ||
66 | ASSERT(ring); | ||
67 | ring->pidx = be_addc(ring->pidx, 1, ring->num); | ||
68 | } | ||
69 | |||
70 | /* Consume count items */ | ||
71 | static inline void mp_ring_consume_multiple(struct mp_ring *ring, u32 count) | ||
72 | { | ||
73 | ASSERT(ring); | ||
74 | ASSERT(mp_ring_num_pending(ring) >= count); | ||
75 | ring->cidx = be_addc(ring->cidx, count, ring->num); | ||
76 | } | ||
77 | |||
78 | static inline void *mp_ring_item(struct mp_ring *ring, u32 index) | ||
79 | { | ||
80 | ASSERT(ring); | ||
81 | ASSERT(index < ring->num); | ||
82 | ASSERT(ring->itemSize > 0); | ||
83 | return (u8 *) ring->va + index * ring->itemSize; | ||
84 | } | ||
85 | |||
86 | /* Ptr to produce item */ | ||
87 | static inline void *mp_ring_producer_ptr(struct mp_ring *ring) | ||
88 | { | ||
89 | ASSERT(ring); | ||
90 | return mp_ring_item(ring, ring->pidx); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Returns a pointer to the current location in the ring. | ||
95 | * This is used for rings with 1 index. | ||
96 | */ | ||
97 | static inline void *mp_ring_current(struct mp_ring *ring) | ||
98 | { | ||
99 | ASSERT(ring); | ||
100 | ASSERT(ring->pidx == 0); /* not used */ | ||
101 | |||
102 | return mp_ring_item(ring, ring->cidx); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Increment index for rings with only 1 index. | ||
107 | * This is used for rings with 1 index. | ||
108 | */ | ||
109 | static inline void *mp_ring_next(struct mp_ring *ring) | ||
110 | { | ||
111 | ASSERT(ring); | ||
112 | ASSERT(ring->num > 0); | ||
113 | ASSERT(ring->pidx == 0); /* not used */ | ||
114 | |||
115 | ring->cidx = be_addc(ring->cidx, 1, ring->num); | ||
116 | return mp_ring_current(ring); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | This routine waits for a previously posted mailbox WRB to be completed. | ||
121 | Specifically it waits for the mailbox to say that it's ready to accept | ||
122 | more data by setting the LSB of the mailbox pd register to 1. | ||
123 | |||
124 | pcontroller - The function object to post this data to | ||
125 | |||
126 | IRQL < DISPATCH_LEVEL | ||
127 | */ | ||
128 | static void be_mcc_mailbox_wait(struct be_function_object *pfob) | ||
129 | { | ||
130 | struct MPU_MAILBOX_DB_AMAP mailbox_db; | ||
131 | u32 i = 0; | ||
132 | u32 ready; | ||
133 | |||
134 | if (pfob->emulate) { | ||
135 | /* No waiting for mailbox in emulated mode. */ | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db); | ||
140 | ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db); | ||
141 | |||
142 | while (ready == false) { | ||
143 | if ((++i & 0x3FFFF) == 0) { | ||
144 | TRACE(DL_WARN, "Waiting for mailbox ready - %dk polls", | ||
145 | i / 1000); | ||
146 | } | ||
147 | udelay(1); | ||
148 | mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db); | ||
149 | ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | This routine tells the MCC mailbox that there is data to processed | ||
155 | in the mailbox. It does this by setting the physical address for the | ||
156 | mailbox location and clearing the LSB. This routine returns immediately | ||
157 | and does not wait for the WRB to be processed. | ||
158 | |||
159 | pcontroller - The function object to post this data to | ||
160 | |||
161 | IRQL < DISPATCH_LEVEL | ||
162 | |||
163 | */ | ||
164 | static void be_mcc_mailbox_notify(struct be_function_object *pfob) | ||
165 | { | ||
166 | struct MPU_MAILBOX_DB_AMAP mailbox_db; | ||
167 | u32 pa; | ||
168 | |||
169 | ASSERT(pfob->mailbox.pa); | ||
170 | ASSERT(pfob->mailbox.va); | ||
171 | |||
172 | /* If emulated, do not ring the mailbox */ | ||
173 | if (pfob->emulate) { | ||
174 | TRACE(DL_WARN, "MPU disabled. Skipping mailbox notify."); | ||
175 | return; | ||
176 | } | ||
177 | |||
178 | /* form the higher bits in the address */ | ||
179 | mailbox_db.dw[0] = 0; /* init */ | ||
180 | AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 1); | ||
181 | AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0); | ||
182 | |||
183 | /* bits 34 to 63 */ | ||
184 | pa = (u32) (pfob->mailbox.pa >> 34); | ||
185 | AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa); | ||
186 | |||
187 | /* Wait for the MPU to be ready */ | ||
188 | be_mcc_mailbox_wait(pfob); | ||
189 | |||
190 | /* Ring doorbell 1st time */ | ||
191 | PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]); | ||
192 | |||
193 | /* Wait for 1st write to be acknowledged. */ | ||
194 | be_mcc_mailbox_wait(pfob); | ||
195 | |||
196 | /* lower bits 30 bits from 4th bit (bits 4 to 33)*/ | ||
197 | pa = (u32) (pfob->mailbox.pa >> 4) & 0x3FFFFFFF; | ||
198 | |||
199 | AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 0); | ||
200 | AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0); | ||
201 | AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa); | ||
202 | |||
203 | /* Ring doorbell 2nd time */ | ||
204 | PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | This routine tells the MCC mailbox that there is data to processed | ||
209 | in the mailbox. It does this by setting the physical address for the | ||
210 | mailbox location and clearing the LSB. This routine spins until the | ||
211 | MPU writes a 1 into the LSB indicating that the data has been received | ||
212 | and is ready to be processed. | ||
213 | |||
214 | pcontroller - The function object to post this data to | ||
215 | |||
216 | IRQL < DISPATCH_LEVEL | ||
217 | */ | ||
218 | static void | ||
219 | be_mcc_mailbox_notify_and_wait(struct be_function_object *pfob) | ||
220 | { | ||
221 | /* | ||
222 | * Notify it | ||
223 | */ | ||
224 | be_mcc_mailbox_notify(pfob); | ||
225 | /* | ||
226 | * Now wait for completion of WRB | ||
227 | */ | ||
228 | be_mcc_mailbox_wait(pfob); | ||
229 | } | ||
230 | |||
231 | void | ||
232 | be_mcc_process_cqe(struct be_function_object *pfob, | ||
233 | struct MCC_CQ_ENTRY_AMAP *cqe) | ||
234 | { | ||
235 | struct be_mcc_wrb_context *wrb_context = NULL; | ||
236 | u32 offset, status; | ||
237 | u8 *p; | ||
238 | |||
239 | ASSERT(cqe); | ||
240 | /* | ||
241 | * A command completed. Commands complete out-of-order. | ||
242 | * Determine which command completed from the TAG. | ||
243 | */ | ||
244 | offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8; | ||
245 | p = (u8 *) cqe + offset; | ||
246 | wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p); | ||
247 | ASSERT(wrb_context); | ||
248 | |||
249 | /* | ||
250 | * Perform a response copy if requested. | ||
251 | * Only copy data if the FWCMD is successful. | ||
252 | */ | ||
253 | status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, cqe); | ||
254 | if (status == MGMT_STATUS_SUCCESS && wrb_context->copy.length > 0) { | ||
255 | ASSERT(wrb_context->wrb); | ||
256 | ASSERT(wrb_context->copy.va); | ||
257 | p = (u8 *)wrb_context->wrb + | ||
258 | offsetof(struct BE_MCC_WRB_AMAP, payload)/8; | ||
259 | memcpy(wrb_context->copy.va, | ||
260 | (u8 *)p + wrb_context->copy.fwcmd_offset, | ||
261 | wrb_context->copy.length); | ||
262 | } | ||
263 | |||
264 | if (status) | ||
265 | status = BE_NOT_OK; | ||
266 | /* internal callback */ | ||
267 | if (wrb_context->internal_cb) { | ||
268 | wrb_context->internal_cb(wrb_context->internal_cb_context, | ||
269 | status, wrb_context->wrb); | ||
270 | } | ||
271 | |||
272 | /* callback */ | ||
273 | if (wrb_context->cb) { | ||
274 | wrb_context->cb(wrb_context->cb_context, | ||
275 | status, wrb_context->wrb); | ||
276 | } | ||
277 | /* Free the context structure */ | ||
278 | _be_mcc_free_wrb_context(pfob, wrb_context); | ||
279 | } | ||
280 | |||
281 | void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc) | ||
282 | { | ||
283 | struct be_function_object *pfob = NULL; | ||
284 | int status = BE_PENDING; | ||
285 | struct be_generic_q_ctxt *q_ctxt; | ||
286 | struct MCC_WRB_AMAP *wrb; | ||
287 | struct MCC_WRB_AMAP *queue_wrb; | ||
288 | u32 length, payload_length, sge_count, embedded; | ||
289 | unsigned long irql; | ||
290 | |||
291 | BUILD_BUG_ON((sizeof(struct be_generic_q_ctxt) < | ||
292 | sizeof(struct be_queue_driver_context) + | ||
293 | sizeof(struct MCC_WRB_AMAP))); | ||
294 | pfob = mcc->parent_function; | ||
295 | |||
296 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
297 | |||
298 | if (mcc->driving_backlog) { | ||
299 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
300 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
301 | pfob->pend_queue_driving = 0; | ||
302 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
303 | } | ||
304 | return; | ||
305 | } | ||
306 | /* Acquire the flag to limit 1 thread to redrive posts. */ | ||
307 | mcc->driving_backlog = 1; | ||
308 | |||
309 | while (!list_empty(&mcc->backlog)) { | ||
310 | wrb = _be_mpu_peek_ring_wrb(mcc, true); /* Driving the queue */ | ||
311 | if (!wrb) | ||
312 | break; /* No space in the ring yet. */ | ||
313 | /* Get the next queued entry to process. */ | ||
314 | q_ctxt = list_first_entry(&mcc->backlog, | ||
315 | struct be_generic_q_ctxt, context.list); | ||
316 | list_del(&q_ctxt->context.list); | ||
317 | pfob->mcc->backlog_length--; | ||
318 | /* | ||
319 | * Compute the required length of the WRB. | ||
320 | * Since the queue element may be smaller than | ||
321 | * the complete WRB, copy only the required number of bytes. | ||
322 | */ | ||
323 | queue_wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header; | ||
324 | embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, queue_wrb); | ||
325 | if (embedded) { | ||
326 | payload_length = AMAP_GET_BITS_PTR(MCC_WRB, | ||
327 | payload_length, queue_wrb); | ||
328 | length = sizeof(struct be_mcc_wrb_header) + | ||
329 | payload_length; | ||
330 | } else { | ||
331 | sge_count = AMAP_GET_BITS_PTR(MCC_WRB, sge_count, | ||
332 | queue_wrb); | ||
333 | ASSERT(sge_count == 1); /* only 1 frag. */ | ||
334 | length = sizeof(struct be_mcc_wrb_header) + | ||
335 | sge_count * sizeof(struct MCC_SGE_AMAP); | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Truncate the length based on the size of the | ||
340 | * queue element. Some elements that have output parameters | ||
341 | * can be smaller than the payload_length field would | ||
342 | * indicate. We really only need to copy the request | ||
343 | * parameters, not the response. | ||
344 | */ | ||
345 | length = min(length, (u32) (q_ctxt->context.bytes - | ||
346 | offsetof(struct be_generic_q_ctxt, wrb_header))); | ||
347 | |||
348 | /* Copy the queue element WRB into the ring. */ | ||
349 | memcpy(wrb, &q_ctxt->wrb_header, length); | ||
350 | |||
351 | /* Post the wrb. This should not fail assuming we have | ||
352 | * enough context structs. */ | ||
353 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, | ||
354 | q_ctxt->context.cb, q_ctxt->context.cb_context, | ||
355 | q_ctxt->context.internal_cb, | ||
356 | q_ctxt->context.internal_cb_context, | ||
357 | q_ctxt->context.optional_fwcmd_va, | ||
358 | &q_ctxt->context.copy); | ||
359 | |||
360 | if (status == BE_SUCCESS) { | ||
361 | /* | ||
362 | * Synchronous completion. Since it was queued, | ||
363 | * we will invoke the callback. | ||
364 | * To the user, this is an asynchronous request. | ||
365 | */ | ||
366 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
367 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
368 | pfob->pend_queue_driving = 0; | ||
369 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
370 | } | ||
371 | |||
372 | ASSERT(q_ctxt->context.cb); | ||
373 | |||
374 | q_ctxt->context.cb( | ||
375 | q_ctxt->context.cb_context, | ||
376 | BE_SUCCESS, NULL); | ||
377 | |||
378 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
379 | |||
380 | } else if (status != BE_PENDING) { | ||
381 | /* | ||
382 | * Another resource failed. Should never happen | ||
383 | * if we have sufficient MCC_WRB_CONTEXT structs. | ||
384 | * Return to head of the queue. | ||
385 | */ | ||
386 | TRACE(DL_WARN, "Failed to post a queued WRB. 0x%x", | ||
387 | status); | ||
388 | list_add(&q_ctxt->context.list, &mcc->backlog); | ||
389 | pfob->mcc->backlog_length++; | ||
390 | break; | ||
391 | } | ||
392 | } | ||
393 | |||
394 | /* Free the flag to limit 1 thread to redrive posts. */ | ||
395 | mcc->driving_backlog = 0; | ||
396 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
397 | } | ||
398 | |||
399 | /* This function asserts that the WRB was consumed in order. */ | ||
400 | #ifdef BE_DEBUG | ||
401 | u32 be_mcc_wrb_consumed_in_order(struct be_mcc_object *mcc, | ||
402 | struct MCC_CQ_ENTRY_AMAP *cqe) | ||
403 | { | ||
404 | struct be_mcc_wrb_context *wrb_context = NULL; | ||
405 | u32 wrb_index; | ||
406 | u32 wrb_consumed_in_order; | ||
407 | u32 offset; | ||
408 | u8 *p; | ||
409 | |||
410 | ASSERT(cqe); | ||
411 | /* | ||
412 | * A command completed. Commands complete out-of-order. | ||
413 | * Determine which command completed from the TAG. | ||
414 | */ | ||
415 | offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8; | ||
416 | p = (u8 *) cqe + offset; | ||
417 | wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p); | ||
418 | |||
419 | ASSERT(wrb_context); | ||
420 | |||
421 | wrb_index = (u32) (((u64)(size_t)wrb_context->ring_wrb - | ||
422 | (u64)(size_t)mcc->sq.ring.va) / sizeof(struct MCC_WRB_AMAP)); | ||
423 | |||
424 | ASSERT(wrb_index < mcc->sq.ring.num); | ||
425 | |||
426 | wrb_consumed_in_order = (u32) (wrb_index == mcc->consumed_index); | ||
427 | mcc->consumed_index = be_addc(mcc->consumed_index, 1, mcc->sq.ring.num); | ||
428 | return wrb_consumed_in_order; | ||
429 | } | ||
430 | #endif | ||
431 | |||
432 | int be_mcc_process_cq(struct be_mcc_object *mcc, bool rearm) | ||
433 | { | ||
434 | struct be_function_object *pfob = NULL; | ||
435 | struct MCC_CQ_ENTRY_AMAP *cqe; | ||
436 | struct CQ_DB_AMAP db; | ||
437 | struct mp_ring *cq_ring = &mcc->cq.ring; | ||
438 | struct mp_ring *mp_ring = &mcc->sq.ring; | ||
439 | u32 num_processed = 0; | ||
440 | u32 consumed = 0, valid, completed, cqe_consumed, async_event; | ||
441 | |||
442 | pfob = mcc->parent_function; | ||
443 | |||
444 | spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq); | ||
445 | |||
446 | /* | ||
447 | * Verify that only one thread is processing the CQ at once. | ||
448 | * We cannot hold the lock while processing the CQ due to | ||
449 | * the callbacks into the OS. Therefore, this flag is used | ||
450 | * to control it. If any of the threads want to | ||
451 | * rearm the CQ, we need to honor that. | ||
452 | */ | ||
453 | if (mcc->processing != 0) { | ||
454 | mcc->rearm = mcc->rearm || rearm; | ||
455 | goto Error; | ||
456 | } else { | ||
457 | mcc->processing = 1; /* lock processing for this thread. */ | ||
458 | mcc->rearm = rearm; /* set our rearm setting */ | ||
459 | } | ||
460 | |||
461 | spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq); | ||
462 | |||
463 | cqe = mp_ring_current(cq_ring); | ||
464 | valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe); | ||
465 | while (valid) { | ||
466 | |||
467 | if (num_processed >= 8) { | ||
468 | /* coalesce doorbells, but free space in cq | ||
469 | * ring while processing. */ | ||
470 | db.dw[0] = 0; /* clear */ | ||
471 | AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id); | ||
472 | AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, false); | ||
473 | AMAP_SET_BITS_PTR(CQ_DB, event, &db, false); | ||
474 | AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, | ||
475 | num_processed); | ||
476 | num_processed = 0; | ||
477 | |||
478 | PD_WRITE(pfob, cq_db, db.dw[0]); | ||
479 | } | ||
480 | |||
481 | async_event = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, async_event, cqe); | ||
482 | if (async_event) { | ||
483 | /* This is an asynchronous event. */ | ||
484 | struct ASYNC_EVENT_TRAILER_AMAP *async_trailer = | ||
485 | (struct ASYNC_EVENT_TRAILER_AMAP *) | ||
486 | ((u8 *) cqe + sizeof(struct MCC_CQ_ENTRY_AMAP) - | ||
487 | sizeof(struct ASYNC_EVENT_TRAILER_AMAP)); | ||
488 | u32 event_code; | ||
489 | async_event = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, | ||
490 | async_event, async_trailer); | ||
491 | ASSERT(async_event == 1); | ||
492 | |||
493 | |||
494 | valid = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, | ||
495 | valid, async_trailer); | ||
496 | ASSERT(valid == 1); | ||
497 | |||
498 | /* Call the async event handler if it is installed. */ | ||
499 | if (mcc->async_cb) { | ||
500 | event_code = | ||
501 | AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, | ||
502 | event_code, async_trailer); | ||
503 | mcc->async_cb(mcc->async_context, | ||
504 | (u32) event_code, (void *) cqe); | ||
505 | } | ||
506 | |||
507 | } else { | ||
508 | /* This is a completion entry. */ | ||
509 | |||
510 | /* No vm forwarding in this driver. */ | ||
511 | |||
512 | cqe_consumed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, | ||
513 | consumed, cqe); | ||
514 | if (cqe_consumed) { | ||
515 | /* | ||
516 | * A command on the MCC ring was consumed. | ||
517 | * Update the consumer index. | ||
518 | * These occur in order. | ||
519 | */ | ||
520 | ASSERT(be_mcc_wrb_consumed_in_order(mcc, cqe)); | ||
521 | consumed++; | ||
522 | } | ||
523 | |||
524 | completed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, | ||
525 | completed, cqe); | ||
526 | if (completed) { | ||
527 | /* A command completed. Use tag to | ||
528 | * determine which command. */ | ||
529 | be_mcc_process_cqe(pfob, cqe); | ||
530 | } | ||
531 | } | ||
532 | |||
533 | /* Reset the CQE */ | ||
534 | AMAP_SET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe, false); | ||
535 | num_processed++; | ||
536 | |||
537 | /* Update our tracking for the CQ ring. */ | ||
538 | cqe = mp_ring_next(cq_ring); | ||
539 | valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe); | ||
540 | } | ||
541 | |||
542 | TRACE(DL_INFO, "num_processed:0x%x, and consumed:0x%x", | ||
543 | num_processed, consumed); | ||
544 | /* | ||
545 | * Grab the CQ lock to synchronize the "rearm" setting for | ||
546 | * the doorbell, and for clearing the "processing" flag. | ||
547 | */ | ||
548 | spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq); | ||
549 | |||
550 | /* | ||
551 | * Rearm the cq. This is done based on the global mcc->rearm | ||
552 | * flag which combines the rearm parameter from the current | ||
553 | * call to process_cq and any other threads | ||
554 | * that tried to process the CQ while this one was active. | ||
555 | * This handles the situation where a sync. fwcmd was processing | ||
556 | * the CQ while the interrupt/dpc tries to process it. | ||
557 | * The sync process gets to continue -- but it is now | ||
558 | * responsible for the rearming. | ||
559 | */ | ||
560 | if (num_processed > 0 || mcc->rearm == true) { | ||
561 | db.dw[0] = 0; /* clear */ | ||
562 | AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id); | ||
563 | AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, mcc->rearm); | ||
564 | AMAP_SET_BITS_PTR(CQ_DB, event, &db, false); | ||
565 | AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, num_processed); | ||
566 | |||
567 | PD_WRITE(pfob, cq_db, db.dw[0]); | ||
568 | } | ||
569 | /* | ||
570 | * Update the consumer index after ringing the CQ doorbell. | ||
571 | * We don't want another thread to post more WRBs before we | ||
572 | * have CQ space available. | ||
573 | */ | ||
574 | mp_ring_consume_multiple(mp_ring, consumed); | ||
575 | |||
576 | /* Clear the processing flag. */ | ||
577 | mcc->processing = 0; | ||
578 | |||
579 | Error: | ||
580 | spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq); | ||
581 | /* | ||
582 | * Use the local variable to detect if the current thread | ||
583 | * holds the WRB post lock. If rearm is false, this is | ||
584 | * either a synchronous command, or the upper layer driver is polling | ||
585 | * from a thread. We do not drive the queue from that | ||
586 | * context since the driver may hold the | ||
587 | * wrb post lock already. | ||
588 | */ | ||
589 | if (rearm) | ||
590 | be_drive_mcc_wrb_queue(mcc); | ||
591 | else | ||
592 | pfob->pend_queue_driving = 1; | ||
593 | |||
594 | return BE_SUCCESS; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | *============================================================================ | ||
599 | * P U B L I C R O U T I N E S | ||
600 | *============================================================================ | ||
601 | */ | ||
602 | |||
603 | /* | ||
604 | This routine creates an MCC object. This object contains an MCC send queue | ||
605 | and a CQ private to the MCC. | ||
606 | |||
607 | pcontroller - Handle to a function object | ||
608 | |||
609 | EqObject - EQ object that will be used to dispatch this MCC | ||
610 | |||
611 | ppMccObject - Pointer to an internal Mcc Object returned. | ||
612 | |||
613 | Returns BE_SUCCESS if successfull,, otherwise a useful error code | ||
614 | is returned. | ||
615 | |||
616 | IRQL < DISPATCH_LEVEL | ||
617 | |||
618 | */ | ||
619 | int | ||
620 | be_mcc_ring_create(struct be_function_object *pfob, | ||
621 | struct ring_desc *rd, u32 length, | ||
622 | struct be_mcc_wrb_context *context_array, | ||
623 | u32 num_context_entries, | ||
624 | struct be_cq_object *cq, struct be_mcc_object *mcc) | ||
625 | { | ||
626 | int status = 0; | ||
627 | |||
628 | struct FWCMD_COMMON_MCC_CREATE *fwcmd = NULL; | ||
629 | struct MCC_WRB_AMAP *wrb = NULL; | ||
630 | u32 num_entries_encoded, n, i; | ||
631 | void *va = NULL; | ||
632 | unsigned long irql; | ||
633 | |||
634 | if (length < sizeof(struct MCC_WRB_AMAP) * 2) { | ||
635 | TRACE(DL_ERR, "Invalid MCC ring length:%d", length); | ||
636 | return BE_NOT_OK; | ||
637 | } | ||
638 | /* | ||
639 | * Reduce the actual ring size to be less than the number | ||
640 | * of context entries. This ensures that we run out of | ||
641 | * ring WRBs first so the queuing works correctly. We never | ||
642 | * queue based on context structs. | ||
643 | */ | ||
644 | if (num_context_entries + 1 < | ||
645 | length / sizeof(struct MCC_WRB_AMAP) - 1) { | ||
646 | |||
647 | u32 max_length = | ||
648 | (num_context_entries + 2) * sizeof(struct MCC_WRB_AMAP); | ||
649 | |||
650 | if (is_power_of_2(max_length)) | ||
651 | length = __roundup_pow_of_two(max_length+1) / 2; | ||
652 | else | ||
653 | length = __roundup_pow_of_two(max_length) / 2; | ||
654 | |||
655 | ASSERT(length <= max_length); | ||
656 | |||
657 | TRACE(DL_WARN, | ||
658 | "MCC ring length reduced based on context entries." | ||
659 | " length:%d wrbs:%d context_entries:%d", length, | ||
660 | (int) (length / sizeof(struct MCC_WRB_AMAP)), | ||
661 | num_context_entries); | ||
662 | } | ||
663 | |||
664 | spin_lock_irqsave(&pfob->post_lock, irql); | ||
665 | |||
666 | num_entries_encoded = | ||
667 | be_ring_length_to_encoding(length, sizeof(struct MCC_WRB_AMAP)); | ||
668 | |||
669 | /* Init MCC object. */ | ||
670 | memset(mcc, 0, sizeof(*mcc)); | ||
671 | mcc->parent_function = pfob; | ||
672 | mcc->cq_object = cq; | ||
673 | |||
674 | INIT_LIST_HEAD(&mcc->backlog); | ||
675 | |||
676 | wrb = be_function_peek_mcc_wrb(pfob); | ||
677 | if (!wrb) { | ||
678 | ASSERT(wrb); | ||
679 | TRACE(DL_ERR, "No free MCC WRBs in create EQ."); | ||
680 | status = BE_STATUS_NO_MCC_WRB; | ||
681 | goto error; | ||
682 | } | ||
683 | /* Prepares an embedded fwcmd, including request/response sizes. */ | ||
684 | fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MCC_CREATE); | ||
685 | |||
686 | fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE); | ||
687 | /* | ||
688 | * Program MCC ring context | ||
689 | */ | ||
690 | AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, pdid, | ||
691 | &fwcmd->params.request.context, 0); | ||
692 | AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, invalid, | ||
693 | &fwcmd->params.request.context, false); | ||
694 | AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, ring_size, | ||
695 | &fwcmd->params.request.context, num_entries_encoded); | ||
696 | |||
697 | n = cq->cq_id; | ||
698 | AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, | ||
699 | cq_id, &fwcmd->params.request.context, n); | ||
700 | be_rd_to_pa_list(rd, fwcmd->params.request.pages, | ||
701 | ARRAY_SIZE(fwcmd->params.request.pages)); | ||
702 | /* Post the f/w command */ | ||
703 | status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL, | ||
704 | NULL, NULL, fwcmd, NULL); | ||
705 | if (status != BE_SUCCESS) { | ||
706 | TRACE(DL_ERR, "MCC to create CQ failed."); | ||
707 | goto error; | ||
708 | } | ||
709 | /* | ||
710 | * Create a linked list of context structures | ||
711 | */ | ||
712 | mcc->wrb_context.base = context_array; | ||
713 | mcc->wrb_context.num = num_context_entries; | ||
714 | INIT_LIST_HEAD(&mcc->wrb_context.list_head); | ||
715 | memset(context_array, 0, | ||
716 | sizeof(struct be_mcc_wrb_context) * num_context_entries); | ||
717 | for (i = 0; i < mcc->wrb_context.num; i++) { | ||
718 | list_add_tail(&context_array[i].next, | ||
719 | &mcc->wrb_context.list_head); | ||
720 | } | ||
721 | |||
722 | /* | ||
723 | * | ||
724 | * Create an mcc_ring for tracking WRB hw ring | ||
725 | */ | ||
726 | va = rd->va; | ||
727 | ASSERT(va); | ||
728 | mp_ring_create(&mcc->sq.ring, length / sizeof(struct MCC_WRB_AMAP), | ||
729 | sizeof(struct MCC_WRB_AMAP), va); | ||
730 | mcc->sq.ring.id = fwcmd->params.response.id; | ||
731 | /* | ||
732 | * Init a mcc_ring for tracking the MCC CQ. | ||
733 | */ | ||
734 | ASSERT(cq->va); | ||
735 | mp_ring_create(&mcc->cq.ring, cq->num_entries, | ||
736 | sizeof(struct MCC_CQ_ENTRY_AMAP), cq->va); | ||
737 | mcc->cq.ring.id = cq->cq_id; | ||
738 | |||
739 | /* Force zeroing of CQ. */ | ||
740 | memset(cq->va, 0, cq->num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP)); | ||
741 | |||
742 | /* Initialize debug index. */ | ||
743 | mcc->consumed_index = 0; | ||
744 | |||
745 | atomic_inc(&cq->ref_count); | ||
746 | pfob->mcc = mcc; | ||
747 | |||
748 | TRACE(DL_INFO, "MCC ring created. id:%d bytes:%d cq_id:%d cq_entries:%d" | ||
749 | " num_context:%d", mcc->sq.ring.id, length, | ||
750 | cq->cq_id, cq->num_entries, num_context_entries); | ||
751 | |||
752 | error: | ||
753 | spin_unlock_irqrestore(&pfob->post_lock, irql); | ||
754 | if (pfob->pend_queue_driving && pfob->mcc) { | ||
755 | pfob->pend_queue_driving = 0; | ||
756 | be_drive_mcc_wrb_queue(pfob->mcc); | ||
757 | } | ||
758 | return status; | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | This routine destroys an MCC send queue | ||
763 | |||
764 | MccObject - Internal Mcc Object to be destroyed. | ||
765 | |||
766 | Returns BE_SUCCESS if successfull, otherwise an error code is returned. | ||
767 | |||
768 | IRQL < DISPATCH_LEVEL | ||
769 | |||
770 | The caller of this routine must ensure that no other WRB may be posted | ||
771 | until this routine returns. | ||
772 | |||
773 | */ | ||
774 | int be_mcc_ring_destroy(struct be_mcc_object *mcc) | ||
775 | { | ||
776 | int status = 0; | ||
777 | struct be_function_object *pfob = mcc->parent_function; | ||
778 | |||
779 | |||
780 | ASSERT(mcc->processing == 0); | ||
781 | |||
782 | /* | ||
783 | * Remove the ring from the function object. | ||
784 | * This transitions back to mailbox mode. | ||
785 | */ | ||
786 | pfob->mcc = NULL; | ||
787 | |||
788 | /* Send fwcmd to destroy the queue. (Using the mailbox.) */ | ||
789 | status = be_function_ring_destroy(mcc->parent_function, mcc->sq.ring.id, | ||
790 | FWCMD_RING_TYPE_MCC, NULL, NULL, NULL, NULL); | ||
791 | ASSERT(status == 0); | ||
792 | |||
793 | /* Release the SQ reference to the CQ */ | ||
794 | atomic_dec(&mcc->cq_object->ref_count); | ||
795 | |||
796 | return status; | ||
797 | } | ||
798 | |||
799 | static void | ||
800 | mcc_wrb_sync_cb(void *context, int staus, struct MCC_WRB_AMAP *wrb) | ||
801 | { | ||
802 | struct be_mcc_wrb_context *wrb_context = | ||
803 | (struct be_mcc_wrb_context *) context; | ||
804 | ASSERT(wrb_context); | ||
805 | *wrb_context->users_final_status = staus; | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | This routine posts a command to the MCC send queue | ||
810 | |||
811 | mcc - Internal Mcc Object to be destroyed. | ||
812 | |||
813 | wrb - wrb to post. | ||
814 | |||
815 | Returns BE_SUCCESS if successfull, otherwise an error code is returned. | ||
816 | |||
817 | IRQL < DISPATCH_LEVEL if CompletionCallback is not NULL | ||
818 | IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL | ||
819 | |||
820 | If this routine is called with CompletionCallback != NULL the | ||
821 | call is considered to be asynchronous and will return as soon | ||
822 | as the WRB is posted to the MCC with BE_PENDING. | ||
823 | |||
824 | If CompletionCallback is NULL, then this routine will not return until | ||
825 | a completion for this MCC command has been processed. | ||
826 | If called at DISPATCH_LEVEL the CompletionCallback must be NULL. | ||
827 | |||
828 | This routine should only be called if the MPU has been boostraped past | ||
829 | mailbox mode. | ||
830 | |||
831 | |||
832 | */ | ||
833 | int | ||
834 | _be_mpu_post_wrb_ring(struct be_mcc_object *mcc, struct MCC_WRB_AMAP *wrb, | ||
835 | struct be_mcc_wrb_context *wrb_context) | ||
836 | { | ||
837 | |||
838 | struct MCC_WRB_AMAP *ring_wrb = NULL; | ||
839 | int status = BE_PENDING; | ||
840 | int final_status = BE_PENDING; | ||
841 | mcc_wrb_cqe_callback cb = NULL; | ||
842 | struct MCC_DB_AMAP mcc_db; | ||
843 | u32 embedded; | ||
844 | |||
845 | ASSERT(mp_ring_num_empty(&mcc->sq.ring) > 0); | ||
846 | /* | ||
847 | * Input wrb is most likely the next wrb in the ring, since the client | ||
848 | * can peek at the address. | ||
849 | */ | ||
850 | ring_wrb = mp_ring_producer_ptr(&mcc->sq.ring); | ||
851 | if (wrb != ring_wrb) { | ||
852 | /* If not equal, copy it into the ring. */ | ||
853 | memcpy(ring_wrb, wrb, sizeof(struct MCC_WRB_AMAP)); | ||
854 | } | ||
855 | #ifdef BE_DEBUG | ||
856 | wrb_context->ring_wrb = ring_wrb; | ||
857 | #endif | ||
858 | embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, ring_wrb); | ||
859 | if (embedded) { | ||
860 | /* embedded commands will have the response within the WRB. */ | ||
861 | wrb_context->wrb = ring_wrb; | ||
862 | } else { | ||
863 | /* | ||
864 | * non-embedded commands will not have the response | ||
865 | * within the WRB, and they may complete out-of-order. | ||
866 | * The WRB will not be valid to inspect | ||
867 | * during the completion. | ||
868 | */ | ||
869 | wrb_context->wrb = NULL; | ||
870 | } | ||
871 | cb = wrb_context->cb; | ||
872 | |||
873 | if (cb == NULL) { | ||
874 | /* Assign our internal callback if this is a | ||
875 | * synchronous call. */ | ||
876 | wrb_context->cb = mcc_wrb_sync_cb; | ||
877 | wrb_context->cb_context = wrb_context; | ||
878 | wrb_context->users_final_status = &final_status; | ||
879 | } | ||
880 | /* Increment producer index */ | ||
881 | |||
882 | mcc_db.dw[0] = 0; /* initialize */ | ||
883 | AMAP_SET_BITS_PTR(MCC_DB, rid, &mcc_db, mcc->sq.ring.id); | ||
884 | AMAP_SET_BITS_PTR(MCC_DB, numPosted, &mcc_db, 1); | ||
885 | |||
886 | mp_ring_produce(&mcc->sq.ring); | ||
887 | PD_WRITE(mcc->parent_function, mpu_mcc_db, mcc_db.dw[0]); | ||
888 | TRACE(DL_INFO, "pidx: %x and cidx: %x.", mcc->sq.ring.pidx, | ||
889 | mcc->sq.ring.cidx); | ||
890 | |||
891 | if (cb == NULL) { | ||
892 | int polls = 0; /* At >= 1 us per poll */ | ||
893 | /* Wait until this command completes, polling the CQ. */ | ||
894 | do { | ||
895 | TRACE(DL_INFO, "FWCMD submitted in the poll mode."); | ||
896 | /* Do not rearm CQ in this context. */ | ||
897 | be_mcc_process_cq(mcc, false); | ||
898 | |||
899 | if (final_status == BE_PENDING) { | ||
900 | if ((++polls & 0x7FFFF) == 0) { | ||
901 | TRACE(DL_WARN, | ||
902 | "Warning : polling MCC CQ for %d" | ||
903 | "ms.", polls / 1000); | ||
904 | } | ||
905 | |||
906 | udelay(1); | ||
907 | } | ||
908 | |||
909 | /* final_status changed when the command completes */ | ||
910 | } while (final_status == BE_PENDING); | ||
911 | |||
912 | status = final_status; | ||
913 | } | ||
914 | |||
915 | return status; | ||
916 | } | ||
917 | |||
918 | struct MCC_WRB_AMAP * | ||
919 | _be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue) | ||
920 | { | ||
921 | /* If we have queued items, do not allow a post to bypass the queue. */ | ||
922 | if (!driving_queue && !list_empty(&mcc->backlog)) | ||
923 | return NULL; | ||
924 | |||
925 | if (mp_ring_num_empty(&mcc->sq.ring) <= 0) | ||
926 | return NULL; | ||
927 | return (struct MCC_WRB_AMAP *) mp_ring_producer_ptr(&mcc->sq.ring); | ||
928 | } | ||
929 | |||
930 | int | ||
931 | be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *mailbox) | ||
932 | { | ||
933 | ASSERT(mailbox); | ||
934 | pfob->mailbox.va = mailbox->va; | ||
935 | pfob->mailbox.pa = cpu_to_le64(mailbox->pa); | ||
936 | pfob->mailbox.length = mailbox->length; | ||
937 | |||
938 | ASSERT(((u32)(size_t)pfob->mailbox.va & 0xf) == 0); | ||
939 | ASSERT(((u32)(size_t)pfob->mailbox.pa & 0xf) == 0); | ||
940 | /* | ||
941 | * Issue the WRB to set MPU endianness | ||
942 | */ | ||
943 | { | ||
944 | u64 *endian_check = (u64 *) (pfob->mailbox.va + | ||
945 | offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8); | ||
946 | *endian_check = 0xFF1234FFFF5678FFULL; | ||
947 | } | ||
948 | |||
949 | be_mcc_mailbox_notify_and_wait(pfob); | ||
950 | |||
951 | return BE_SUCCESS; | ||
952 | } | ||
953 | |||
954 | |||
955 | /* | ||
956 | This routine posts a command to the MCC mailbox. | ||
957 | |||
958 | FuncObj - Function Object to post the WRB on behalf of. | ||
959 | wrb - wrb to post. | ||
960 | CompletionCallback - Address of a callback routine to invoke once the WRB | ||
961 | is completed. | ||
962 | CompletionCallbackContext - Opaque context to be passed during the call to | ||
963 | the CompletionCallback. | ||
964 | Returns BE_SUCCESS if successfull, otherwise an error code is returned. | ||
965 | |||
966 | IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL | ||
967 | |||
968 | This routine will block until a completion for this MCC command has been | ||
969 | processed. If called at DISPATCH_LEVEL the CompletionCallback must be NULL. | ||
970 | |||
971 | This routine should only be called if the MPU has not been boostraped past | ||
972 | mailbox mode. | ||
973 | */ | ||
974 | int | ||
975 | _be_mpu_post_wrb_mailbox(struct be_function_object *pfob, | ||
976 | struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context) | ||
977 | { | ||
978 | struct MCC_MAILBOX_AMAP *mailbox = NULL; | ||
979 | struct MCC_WRB_AMAP *mb_wrb; | ||
980 | struct MCC_CQ_ENTRY_AMAP *mb_cq; | ||
981 | u32 offset, status; | ||
982 | |||
983 | ASSERT(pfob->mcc == NULL); | ||
984 | mailbox = pfob->mailbox.va; | ||
985 | ASSERT(mailbox); | ||
986 | |||
987 | offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8; | ||
988 | mb_wrb = (struct MCC_WRB_AMAP *) (u8 *)mailbox + offset; | ||
989 | if (mb_wrb != wrb) { | ||
990 | memset(mailbox, 0, sizeof(*mailbox)); | ||
991 | memcpy(mb_wrb, wrb, sizeof(struct MCC_WRB_AMAP)); | ||
992 | } | ||
993 | /* The callback can inspect the final WRB to get output parameters. */ | ||
994 | wrb_context->wrb = mb_wrb; | ||
995 | |||
996 | be_mcc_mailbox_notify_and_wait(pfob); | ||
997 | |||
998 | /* A command completed. Use tag to determine which command. */ | ||
999 | offset = offsetof(struct BE_MCC_MAILBOX_AMAP, cq)/8; | ||
1000 | mb_cq = (struct MCC_CQ_ENTRY_AMAP *) ((u8 *)mailbox + offset); | ||
1001 | be_mcc_process_cqe(pfob, mb_cq); | ||
1002 | |||
1003 | status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, mb_cq); | ||
1004 | if (status) | ||
1005 | status = BE_NOT_OK; | ||
1006 | return status; | ||
1007 | } | ||
1008 | |||
1009 | struct be_mcc_wrb_context * | ||
1010 | _be_mcc_allocate_wrb_context(struct be_function_object *pfob) | ||
1011 | { | ||
1012 | struct be_mcc_wrb_context *context = NULL; | ||
1013 | unsigned long irq; | ||
1014 | |||
1015 | spin_lock_irqsave(&pfob->mcc_context_lock, irq); | ||
1016 | |||
1017 | if (!pfob->mailbox.default_context_allocated) { | ||
1018 | /* Use the single default context that we | ||
1019 | * always have allocated. */ | ||
1020 | pfob->mailbox.default_context_allocated = true; | ||
1021 | context = &pfob->mailbox.default_context; | ||
1022 | } else if (pfob->mcc) { | ||
1023 | /* Get a context from the free list. If any are available. */ | ||
1024 | if (!list_empty(&pfob->mcc->wrb_context.list_head)) { | ||
1025 | context = list_first_entry( | ||
1026 | &pfob->mcc->wrb_context.list_head, | ||
1027 | struct be_mcc_wrb_context, next); | ||
1028 | } | ||
1029 | } | ||
1030 | |||
1031 | spin_unlock_irqrestore(&pfob->mcc_context_lock, irq); | ||
1032 | |||
1033 | return context; | ||
1034 | } | ||
1035 | |||
1036 | void | ||
1037 | _be_mcc_free_wrb_context(struct be_function_object *pfob, | ||
1038 | struct be_mcc_wrb_context *context) | ||
1039 | { | ||
1040 | unsigned long irq; | ||
1041 | |||
1042 | ASSERT(context); | ||
1043 | /* | ||
1044 | * Zero during free to try and catch any bugs where the context | ||
1045 | * is accessed after a free. | ||
1046 | */ | ||
1047 | memset(context, 0, sizeof(context)); | ||
1048 | |||
1049 | spin_lock_irqsave(&pfob->mcc_context_lock, irq); | ||
1050 | |||
1051 | if (context == &pfob->mailbox.default_context) { | ||
1052 | /* Free the default context. */ | ||
1053 | ASSERT(pfob->mailbox.default_context_allocated); | ||
1054 | pfob->mailbox.default_context_allocated = false; | ||
1055 | } else { | ||
1056 | /* Add to free list. */ | ||
1057 | ASSERT(pfob->mcc); | ||
1058 | list_add_tail(&context->next, | ||
1059 | &pfob->mcc->wrb_context.list_head); | ||
1060 | } | ||
1061 | |||
1062 | spin_unlock_irqrestore(&pfob->mcc_context_lock, irq); | ||
1063 | } | ||
1064 | |||
1065 | int | ||
1066 | be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object, | ||
1067 | mcc_async_event_callback cb, void *cb_context) | ||
1068 | { | ||
1069 | /* Lock against anyone trying to change the callback/context pointers | ||
1070 | * while being used. */ | ||
1071 | spin_lock_irqsave(&mcc_object->parent_function->cq_lock, | ||
1072 | mcc_object->parent_function->cq_irq); | ||
1073 | |||
1074 | /* Assign the async callback. */ | ||
1075 | mcc_object->async_context = cb_context; | ||
1076 | mcc_object->async_cb = cb; | ||
1077 | |||
1078 | spin_unlock_irqrestore(&mcc_object->parent_function->cq_lock, | ||
1079 | mcc_object->parent_function->cq_irq); | ||
1080 | |||
1081 | return BE_SUCCESS; | ||
1082 | } | ||
1083 | |||
1084 | #define MPU_EP_CONTROL 0 | ||
1085 | #define MPU_EP_SEMAPHORE 0xac | ||
1086 | |||
1087 | /* | ||
1088 | *------------------------------------------------------------------- | ||
1089 | * Function: be_wait_for_POST_complete | ||
1090 | * Waits until the BladeEngine POST completes (either in error or success). | ||
1091 | * pfob - | ||
1092 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
1093 | *------------------------------------------------------------------- | ||
1094 | */ | ||
1095 | static int be_wait_for_POST_complete(struct be_function_object *pfob) | ||
1096 | { | ||
1097 | struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status; | ||
1098 | int s; | ||
1099 | u32 post_error, post_stage; | ||
1100 | |||
1101 | const u32 us_per_loop = 1000; /* 1000us */ | ||
1102 | const u32 print_frequency_loops = 1000000 / us_per_loop; | ||
1103 | const u32 max_loops = 60 * print_frequency_loops; | ||
1104 | u32 loops = 0; | ||
1105 | |||
1106 | /* | ||
1107 | * Wait for arm fw indicating it is done or a fatal error happened. | ||
1108 | * Note: POST can take some time to complete depending on configuration | ||
1109 | * settings (consider ARM attempts to acquire an IP address | ||
1110 | * over DHCP!!!). | ||
1111 | * | ||
1112 | */ | ||
1113 | do { | ||
1114 | status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE); | ||
1115 | post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, | ||
1116 | error, &status); | ||
1117 | post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, | ||
1118 | stage, &status); | ||
1119 | if (0 == (loops % print_frequency_loops)) { | ||
1120 | /* Print current status */ | ||
1121 | TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)", | ||
1122 | status.dw[0], post_stage); | ||
1123 | } | ||
1124 | udelay(us_per_loop); | ||
1125 | } while ((post_error != 1) && | ||
1126 | (post_stage != POST_STAGE_ARMFW_READY) && | ||
1127 | (++loops < max_loops)); | ||
1128 | |||
1129 | if (post_error == 1) { | ||
1130 | TRACE(DL_ERR, "POST error! Status = 0x%x (stage = 0x%x)", | ||
1131 | status.dw[0], post_stage); | ||
1132 | s = BE_NOT_OK; | ||
1133 | } else if (post_stage != POST_STAGE_ARMFW_READY) { | ||
1134 | TRACE(DL_ERR, "POST time-out! Status = 0x%x (stage = 0x%x)", | ||
1135 | status.dw[0], post_stage); | ||
1136 | s = BE_NOT_OK; | ||
1137 | } else { | ||
1138 | s = BE_SUCCESS; | ||
1139 | } | ||
1140 | return s; | ||
1141 | } | ||
1142 | |||
1143 | /* | ||
1144 | *------------------------------------------------------------------- | ||
1145 | * Function: be_kickoff_and_wait_for_POST | ||
1146 | * Interacts with the BladeEngine management processor to initiate POST, and | ||
1147 | * subsequently waits until POST completes (either in error or success). | ||
1148 | * The caller must acquire the reset semaphore before initiating POST | ||
1149 | * to prevent multiple drivers interacting with the management processor. | ||
1150 | * Once POST is complete the caller must release the reset semaphore. | ||
1151 | * Callers who only want to wait for POST complete may call | ||
1152 | * be_wait_for_POST_complete. | ||
1153 | * pfob - | ||
1154 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
1155 | *------------------------------------------------------------------- | ||
1156 | */ | ||
1157 | static int | ||
1158 | be_kickoff_and_wait_for_POST(struct be_function_object *pfob) | ||
1159 | { | ||
1160 | struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status; | ||
1161 | int s; | ||
1162 | |||
1163 | const u32 us_per_loop = 1000; /* 1000us */ | ||
1164 | const u32 print_frequency_loops = 1000000 / us_per_loop; | ||
1165 | const u32 max_loops = 5 * print_frequency_loops; | ||
1166 | u32 loops = 0; | ||
1167 | u32 post_error, post_stage; | ||
1168 | |||
1169 | /* Wait for arm fw awaiting host ready or a fatal error happened. */ | ||
1170 | TRACE(DL_INFO, "Wait for BladeEngine ready to POST"); | ||
1171 | do { | ||
1172 | status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE); | ||
1173 | post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, | ||
1174 | error, &status); | ||
1175 | post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, | ||
1176 | stage, &status); | ||
1177 | if (0 == (loops % print_frequency_loops)) { | ||
1178 | /* Print current status */ | ||
1179 | TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)", | ||
1180 | status.dw[0], post_stage); | ||
1181 | } | ||
1182 | udelay(us_per_loop); | ||
1183 | } while ((post_error != 1) && | ||
1184 | (post_stage < POST_STAGE_AWAITING_HOST_RDY) && | ||
1185 | (++loops < max_loops)); | ||
1186 | |||
1187 | if (post_error == 1) { | ||
1188 | TRACE(DL_ERR, "Pre-POST error! Status = 0x%x (stage = 0x%x)", | ||
1189 | status.dw[0], post_stage); | ||
1190 | s = BE_NOT_OK; | ||
1191 | } else if (post_stage == POST_STAGE_AWAITING_HOST_RDY) { | ||
1192 | iowrite32(POST_STAGE_HOST_RDY, pfob->csr_va + MPU_EP_SEMAPHORE); | ||
1193 | |||
1194 | /* Wait for POST to complete */ | ||
1195 | s = be_wait_for_POST_complete(pfob); | ||
1196 | } else { | ||
1197 | /* | ||
1198 | * Either a timeout waiting for host ready signal or POST has | ||
1199 | * moved ahead without requiring a host ready signal. | ||
1200 | * Might as well give POST a chance to complete | ||
1201 | * (or timeout again). | ||
1202 | */ | ||
1203 | s = be_wait_for_POST_complete(pfob); | ||
1204 | } | ||
1205 | return s; | ||
1206 | } | ||
1207 | |||
1208 | /* | ||
1209 | *------------------------------------------------------------------- | ||
1210 | * Function: be_pci_soft_reset | ||
1211 | * This function is called to issue a BladeEngine soft reset. | ||
1212 | * Callers should acquire the soft reset semaphore before calling this | ||
1213 | * function. Additionaly, callers should ensure they cannot be pre-empted | ||
1214 | * while the routine executes. Upon completion of this routine, callers | ||
1215 | * should release the reset semaphore. This routine implicitly waits | ||
1216 | * for BladeEngine POST to complete. | ||
1217 | * pfob - | ||
1218 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
1219 | *------------------------------------------------------------------- | ||
1220 | */ | ||
1221 | int be_pci_soft_reset(struct be_function_object *pfob) | ||
1222 | { | ||
1223 | struct PCICFG_SOFT_RESET_CSR_AMAP soft_reset; | ||
1224 | struct PCICFG_ONLINE0_CSR_AMAP pciOnline0; | ||
1225 | struct PCICFG_ONLINE1_CSR_AMAP pciOnline1; | ||
1226 | struct EP_CONTROL_CSR_AMAP epControlCsr; | ||
1227 | int status = BE_SUCCESS; | ||
1228 | u32 i, soft_reset_bit; | ||
1229 | |||
1230 | TRACE(DL_NOTE, "PCI reset..."); | ||
1231 | |||
1232 | /* Issue soft reset #1 to get BladeEngine into a known state. */ | ||
1233 | soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset); | ||
1234 | AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1); | ||
1235 | PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]); | ||
1236 | /* | ||
1237 | * wait til soft reset is deasserted - hardware | ||
1238 | * deasserts after some time. | ||
1239 | */ | ||
1240 | i = 0; | ||
1241 | do { | ||
1242 | udelay(50); | ||
1243 | soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset); | ||
1244 | soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR, | ||
1245 | softreset, soft_reset.dw); | ||
1246 | } while (soft_reset_bit && (i++ < 1024)); | ||
1247 | if (soft_reset_bit != 0) { | ||
1248 | TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected."); | ||
1249 | status = BE_NOT_OK; | ||
1250 | goto Error_label; | ||
1251 | } | ||
1252 | /* Mask everything */ | ||
1253 | PCICFG0_WRITE(pfob, ue_status_low_mask, 0xFFFFFFFF); | ||
1254 | PCICFG0_WRITE(pfob, ue_status_hi_mask, 0xFFFFFFFF); | ||
1255 | /* | ||
1256 | * Set everything offline except MPU IRAM (it is offline with | ||
1257 | * the soft-reset, but soft-reset does not reset the PCICFG registers!) | ||
1258 | */ | ||
1259 | pciOnline0.dw[0] = 0; | ||
1260 | pciOnline1.dw[0] = 0; | ||
1261 | AMAP_SET_BITS_PTR(PCICFG_ONLINE1_CSR, mpu_iram_online, | ||
1262 | pciOnline1.dw, 1); | ||
1263 | PCICFG0_WRITE(pfob, online0, pciOnline0.dw[0]); | ||
1264 | PCICFG0_WRITE(pfob, online1, pciOnline1.dw[0]); | ||
1265 | |||
1266 | udelay(20000); | ||
1267 | |||
1268 | /* Issue soft reset #2. */ | ||
1269 | AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1); | ||
1270 | PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]); | ||
1271 | /* | ||
1272 | * wait til soft reset is deasserted - hardware | ||
1273 | * deasserts after some time. | ||
1274 | */ | ||
1275 | i = 0; | ||
1276 | do { | ||
1277 | udelay(50); | ||
1278 | soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset); | ||
1279 | soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR, | ||
1280 | softreset, soft_reset.dw); | ||
1281 | } while (soft_reset_bit && (i++ < 1024)); | ||
1282 | if (soft_reset_bit != 0) { | ||
1283 | TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected."); | ||
1284 | status = BE_NOT_OK; | ||
1285 | goto Error_label; | ||
1286 | } | ||
1287 | |||
1288 | |||
1289 | udelay(20000); | ||
1290 | |||
1291 | /* Take MPU out of reset. */ | ||
1292 | |||
1293 | epControlCsr.dw[0] = ioread32(pfob->csr_va + MPU_EP_CONTROL); | ||
1294 | AMAP_SET_BITS_PTR(EP_CONTROL_CSR, CPU_reset, &epControlCsr, 0); | ||
1295 | iowrite32((u32)epControlCsr.dw[0], pfob->csr_va + MPU_EP_CONTROL); | ||
1296 | |||
1297 | /* Kickoff BE POST and wait for completion */ | ||
1298 | status = be_kickoff_and_wait_for_POST(pfob); | ||
1299 | |||
1300 | Error_label: | ||
1301 | return status; | ||
1302 | } | ||
1303 | |||
1304 | |||
1305 | /* | ||
1306 | *------------------------------------------------------------------- | ||
1307 | * Function: be_pci_reset_required | ||
1308 | * This private function is called to detect if a host entity is | ||
1309 | * required to issue a PCI soft reset and subsequently drive | ||
1310 | * BladeEngine POST. Scenarios where this is required: | ||
1311 | * 1) BIOS-less configuration | ||
1312 | * 2) Hot-swap/plug/power-on | ||
1313 | * pfob - | ||
1314 | * return true if a reset is required, false otherwise | ||
1315 | *------------------------------------------------------------------- | ||
1316 | */ | ||
1317 | static bool be_pci_reset_required(struct be_function_object *pfob) | ||
1318 | { | ||
1319 | struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status; | ||
1320 | bool do_reset = false; | ||
1321 | u32 post_error, post_stage; | ||
1322 | |||
1323 | /* | ||
1324 | * Read the POST status register | ||
1325 | */ | ||
1326 | status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE); | ||
1327 | post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, error, | ||
1328 | &status); | ||
1329 | post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, stage, | ||
1330 | &status); | ||
1331 | if (post_stage <= POST_STAGE_AWAITING_HOST_RDY) { | ||
1332 | /* | ||
1333 | * If BladeEngine is waiting for host ready indication, | ||
1334 | * we want to do a PCI reset. | ||
1335 | */ | ||
1336 | do_reset = true; | ||
1337 | } | ||
1338 | |||
1339 | return do_reset; | ||
1340 | } | ||
1341 | |||
1342 | /* | ||
1343 | *------------------------------------------------------------------- | ||
1344 | * Function: be_drive_POST | ||
1345 | * This function is called to drive BladeEngine POST. The | ||
1346 | * caller should ensure they cannot be pre-empted while this routine executes. | ||
1347 | * pfob - | ||
1348 | * return status - BE_SUCCESS (0) on success. Negative error code on failure. | ||
1349 | *------------------------------------------------------------------- | ||
1350 | */ | ||
1351 | int be_drive_POST(struct be_function_object *pfob) | ||
1352 | { | ||
1353 | int status; | ||
1354 | |||
1355 | if (false != be_pci_reset_required(pfob)) { | ||
1356 | /* PCI reset is needed (implicitly starts and waits for POST) */ | ||
1357 | status = be_pci_soft_reset(pfob); | ||
1358 | } else { | ||
1359 | /* No PCI reset is needed, start POST */ | ||
1360 | status = be_kickoff_and_wait_for_POST(pfob); | ||
1361 | } | ||
1362 | |||
1363 | return status; | ||
1364 | } | ||
diff --git a/drivers/staging/benet/mpu.h b/drivers/staging/benet/mpu.h deleted file mode 100644 index 41f3f87516e5..000000000000 --- a/drivers/staging/benet/mpu.h +++ /dev/null | |||
@@ -1,74 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __mpu_amap_h__ | ||
21 | #define __mpu_amap_h__ | ||
22 | #include "ep.h" | ||
23 | |||
24 | /* Provide control parameters for the Managment Processor Unit. */ | ||
25 | struct BE_MPU_CSRMAP_AMAP { | ||
26 | struct BE_EP_CSRMAP_AMAP ep; | ||
27 | u8 rsvd0[128]; /* DWORD 64 */ | ||
28 | u8 rsvd1[32]; /* DWORD 68 */ | ||
29 | u8 rsvd2[192]; /* DWORD 69 */ | ||
30 | u8 rsvd3[192]; /* DWORD 75 */ | ||
31 | u8 rsvd4[32]; /* DWORD 81 */ | ||
32 | u8 rsvd5[32]; /* DWORD 82 */ | ||
33 | u8 rsvd6[32]; /* DWORD 83 */ | ||
34 | u8 rsvd7[32]; /* DWORD 84 */ | ||
35 | u8 rsvd8[32]; /* DWORD 85 */ | ||
36 | u8 rsvd9[32]; /* DWORD 86 */ | ||
37 | u8 rsvd10[32]; /* DWORD 87 */ | ||
38 | u8 rsvd11[32]; /* DWORD 88 */ | ||
39 | u8 rsvd12[32]; /* DWORD 89 */ | ||
40 | u8 rsvd13[32]; /* DWORD 90 */ | ||
41 | u8 rsvd14[32]; /* DWORD 91 */ | ||
42 | u8 rsvd15[32]; /* DWORD 92 */ | ||
43 | u8 rsvd16[32]; /* DWORD 93 */ | ||
44 | u8 rsvd17[32]; /* DWORD 94 */ | ||
45 | u8 rsvd18[32]; /* DWORD 95 */ | ||
46 | u8 rsvd19[32]; /* DWORD 96 */ | ||
47 | u8 rsvd20[32]; /* DWORD 97 */ | ||
48 | u8 rsvd21[32]; /* DWORD 98 */ | ||
49 | u8 rsvd22[32]; /* DWORD 99 */ | ||
50 | u8 rsvd23[32]; /* DWORD 100 */ | ||
51 | u8 rsvd24[32]; /* DWORD 101 */ | ||
52 | u8 rsvd25[32]; /* DWORD 102 */ | ||
53 | u8 rsvd26[32]; /* DWORD 103 */ | ||
54 | u8 rsvd27[32]; /* DWORD 104 */ | ||
55 | u8 rsvd28[96]; /* DWORD 105 */ | ||
56 | u8 rsvd29[32]; /* DWORD 108 */ | ||
57 | u8 rsvd30[32]; /* DWORD 109 */ | ||
58 | u8 rsvd31[32]; /* DWORD 110 */ | ||
59 | u8 rsvd32[32]; /* DWORD 111 */ | ||
60 | u8 rsvd33[32]; /* DWORD 112 */ | ||
61 | u8 rsvd34[96]; /* DWORD 113 */ | ||
62 | u8 rsvd35[32]; /* DWORD 116 */ | ||
63 | u8 rsvd36[32]; /* DWORD 117 */ | ||
64 | u8 rsvd37[32]; /* DWORD 118 */ | ||
65 | u8 rsvd38[32]; /* DWORD 119 */ | ||
66 | u8 rsvd39[32]; /* DWORD 120 */ | ||
67 | u8 rsvd40[32]; /* DWORD 121 */ | ||
68 | u8 rsvd41[134][32]; /* DWORD 122 */ | ||
69 | } __packed; | ||
70 | struct MPU_CSRMAP_AMAP { | ||
71 | u32 dw[256]; | ||
72 | }; | ||
73 | |||
74 | #endif /* __mpu_amap_h__ */ | ||
diff --git a/drivers/staging/benet/mpu_context.h b/drivers/staging/benet/mpu_context.h deleted file mode 100644 index 8ce90f9c46c2..000000000000 --- a/drivers/staging/benet/mpu_context.h +++ /dev/null | |||
@@ -1,46 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __mpu_context_amap_h__ | ||
21 | #define __mpu_context_amap_h__ | ||
22 | |||
23 | /* | ||
24 | * Management command and control ring context. The MPUs BTLR_CTRL1 CSR | ||
25 | * controls the writeback behavior of the producer and consumer index values. | ||
26 | */ | ||
27 | struct BE_MCC_RING_CONTEXT_AMAP { | ||
28 | u8 con_index[16]; /* DWORD 0 */ | ||
29 | u8 ring_size[4]; /* DWORD 0 */ | ||
30 | u8 cq_id[11]; /* DWORD 0 */ | ||
31 | u8 rsvd0; /* DWORD 0 */ | ||
32 | u8 prod_index[16]; /* DWORD 1 */ | ||
33 | u8 pdid[15]; /* DWORD 1 */ | ||
34 | u8 invalid; /* DWORD 1 */ | ||
35 | u8 cmd_pending_current[7]; /* DWORD 2 */ | ||
36 | u8 rsvd1[25]; /* DWORD 2 */ | ||
37 | u8 hpi_port_cq_id[11]; /* DWORD 3 */ | ||
38 | u8 rsvd2[5]; /* DWORD 3 */ | ||
39 | u8 cmd_pending_max[7]; /* DWORD 3 */ | ||
40 | u8 rsvd3[9]; /* DWORD 3 */ | ||
41 | } __packed; | ||
42 | struct MCC_RING_CONTEXT_AMAP { | ||
43 | u32 dw[4]; | ||
44 | }; | ||
45 | |||
46 | #endif /* __mpu_context_amap_h__ */ | ||
diff --git a/drivers/staging/benet/pcicfg.h b/drivers/staging/benet/pcicfg.h deleted file mode 100644 index 7c15684adf4a..000000000000 --- a/drivers/staging/benet/pcicfg.h +++ /dev/null | |||
@@ -1,825 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __pcicfg_amap_h__ | ||
21 | #define __pcicfg_amap_h__ | ||
22 | |||
23 | /* Vendor and Device ID Register. */ | ||
24 | struct BE_PCICFG_ID_CSR_AMAP { | ||
25 | u8 vendorid[16]; /* DWORD 0 */ | ||
26 | u8 deviceid[16]; /* DWORD 0 */ | ||
27 | } __packed; | ||
28 | struct PCICFG_ID_CSR_AMAP { | ||
29 | u32 dw[1]; | ||
30 | }; | ||
31 | |||
32 | /* IO Bar Register. */ | ||
33 | struct BE_PCICFG_IOBAR_CSR_AMAP { | ||
34 | u8 iospace; /* DWORD 0 */ | ||
35 | u8 rsvd0[7]; /* DWORD 0 */ | ||
36 | u8 iobar[24]; /* DWORD 0 */ | ||
37 | } __packed; | ||
38 | struct PCICFG_IOBAR_CSR_AMAP { | ||
39 | u32 dw[1]; | ||
40 | }; | ||
41 | |||
42 | /* Memory BAR 0 Register. */ | ||
43 | struct BE_PCICFG_MEMBAR0_CSR_AMAP { | ||
44 | u8 memspace; /* DWORD 0 */ | ||
45 | u8 type[2]; /* DWORD 0 */ | ||
46 | u8 pf; /* DWORD 0 */ | ||
47 | u8 rsvd0[10]; /* DWORD 0 */ | ||
48 | u8 membar0[18]; /* DWORD 0 */ | ||
49 | } __packed; | ||
50 | struct PCICFG_MEMBAR0_CSR_AMAP { | ||
51 | u32 dw[1]; | ||
52 | }; | ||
53 | |||
54 | /* Memory BAR 1 - Low Address Register. */ | ||
55 | struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP { | ||
56 | u8 memspace; /* DWORD 0 */ | ||
57 | u8 type[2]; /* DWORD 0 */ | ||
58 | u8 pf; /* DWORD 0 */ | ||
59 | u8 rsvd0[13]; /* DWORD 0 */ | ||
60 | u8 membar1lo[15]; /* DWORD 0 */ | ||
61 | } __packed; | ||
62 | struct PCICFG_MEMBAR1_LO_CSR_AMAP { | ||
63 | u32 dw[1]; | ||
64 | }; | ||
65 | |||
66 | /* Memory BAR 1 - High Address Register. */ | ||
67 | struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP { | ||
68 | u8 membar1hi[32]; /* DWORD 0 */ | ||
69 | } __packed; | ||
70 | struct PCICFG_MEMBAR1_HI_CSR_AMAP { | ||
71 | u32 dw[1]; | ||
72 | }; | ||
73 | |||
74 | /* Memory BAR 2 - Low Address Register. */ | ||
75 | struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP { | ||
76 | u8 memspace; /* DWORD 0 */ | ||
77 | u8 type[2]; /* DWORD 0 */ | ||
78 | u8 pf; /* DWORD 0 */ | ||
79 | u8 rsvd0[17]; /* DWORD 0 */ | ||
80 | u8 membar2lo[11]; /* DWORD 0 */ | ||
81 | } __packed; | ||
82 | struct PCICFG_MEMBAR2_LO_CSR_AMAP { | ||
83 | u32 dw[1]; | ||
84 | }; | ||
85 | |||
86 | /* Memory BAR 2 - High Address Register. */ | ||
87 | struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP { | ||
88 | u8 membar2hi[32]; /* DWORD 0 */ | ||
89 | } __packed; | ||
90 | struct PCICFG_MEMBAR2_HI_CSR_AMAP { | ||
91 | u32 dw[1]; | ||
92 | }; | ||
93 | |||
94 | /* Subsystem Vendor and ID (Function 0) Register. */ | ||
95 | struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP { | ||
96 | u8 subsys_vendor_id[16]; /* DWORD 0 */ | ||
97 | u8 subsys_id[16]; /* DWORD 0 */ | ||
98 | } __packed; | ||
99 | struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP { | ||
100 | u32 dw[1]; | ||
101 | }; | ||
102 | |||
103 | /* Subsystem Vendor and ID (Function 1) Register. */ | ||
104 | struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP { | ||
105 | u8 subsys_vendor_id[16]; /* DWORD 0 */ | ||
106 | u8 subsys_id[16]; /* DWORD 0 */ | ||
107 | } __packed; | ||
108 | struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP { | ||
109 | u32 dw[1]; | ||
110 | }; | ||
111 | |||
112 | /* Semaphore Register. */ | ||
113 | struct BE_PCICFG_SEMAPHORE_CSR_AMAP { | ||
114 | u8 locked; /* DWORD 0 */ | ||
115 | u8 rsvd0[31]; /* DWORD 0 */ | ||
116 | } __packed; | ||
117 | struct PCICFG_SEMAPHORE_CSR_AMAP { | ||
118 | u32 dw[1]; | ||
119 | }; | ||
120 | |||
121 | /* Soft Reset Register. */ | ||
122 | struct BE_PCICFG_SOFT_RESET_CSR_AMAP { | ||
123 | u8 rsvd0[7]; /* DWORD 0 */ | ||
124 | u8 softreset; /* DWORD 0 */ | ||
125 | u8 rsvd1[16]; /* DWORD 0 */ | ||
126 | u8 nec_ll_rcvdetect_i[8]; /* DWORD 0 */ | ||
127 | } __packed; | ||
128 | struct PCICFG_SOFT_RESET_CSR_AMAP { | ||
129 | u32 dw[1]; | ||
130 | }; | ||
131 | |||
132 | /* Unrecoverable Error Status (Low) Register. Each bit corresponds to | ||
133 | * an internal Unrecoverable Error. These are set by hardware and may be | ||
134 | * cleared by writing a one to the respective bit(s) to be cleared. Any | ||
135 | * bit being set that is also unmasked will result in Unrecoverable Error | ||
136 | * interrupt notification to the host CPU and/or Server Management chip | ||
137 | * and the transitioning of BladeEngine to an Offline state. | ||
138 | */ | ||
139 | struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP { | ||
140 | u8 cev_ue_status; /* DWORD 0 */ | ||
141 | u8 ctx_ue_status; /* DWORD 0 */ | ||
142 | u8 dbuf_ue_status; /* DWORD 0 */ | ||
143 | u8 erx_ue_status; /* DWORD 0 */ | ||
144 | u8 host_ue_status; /* DWORD 0 */ | ||
145 | u8 mpu_ue_status; /* DWORD 0 */ | ||
146 | u8 ndma_ue_status; /* DWORD 0 */ | ||
147 | u8 ptc_ue_status; /* DWORD 0 */ | ||
148 | u8 rdma_ue_status; /* DWORD 0 */ | ||
149 | u8 rxf_ue_status; /* DWORD 0 */ | ||
150 | u8 rxips_ue_status; /* DWORD 0 */ | ||
151 | u8 rxulp0_ue_status; /* DWORD 0 */ | ||
152 | u8 rxulp1_ue_status; /* DWORD 0 */ | ||
153 | u8 rxulp2_ue_status; /* DWORD 0 */ | ||
154 | u8 tim_ue_status; /* DWORD 0 */ | ||
155 | u8 tpost_ue_status; /* DWORD 0 */ | ||
156 | u8 tpre_ue_status; /* DWORD 0 */ | ||
157 | u8 txips_ue_status; /* DWORD 0 */ | ||
158 | u8 txulp0_ue_status; /* DWORD 0 */ | ||
159 | u8 txulp1_ue_status; /* DWORD 0 */ | ||
160 | u8 uc_ue_status; /* DWORD 0 */ | ||
161 | u8 wdma_ue_status; /* DWORD 0 */ | ||
162 | u8 txulp2_ue_status; /* DWORD 0 */ | ||
163 | u8 host1_ue_status; /* DWORD 0 */ | ||
164 | u8 p0_ob_link_ue_status; /* DWORD 0 */ | ||
165 | u8 p1_ob_link_ue_status; /* DWORD 0 */ | ||
166 | u8 host_gpio_ue_status; /* DWORD 0 */ | ||
167 | u8 mbox_netw_ue_status; /* DWORD 0 */ | ||
168 | u8 mbox_stor_ue_status; /* DWORD 0 */ | ||
169 | u8 axgmac0_ue_status; /* DWORD 0 */ | ||
170 | u8 axgmac1_ue_status; /* DWORD 0 */ | ||
171 | u8 mpu_intpend_ue_status; /* DWORD 0 */ | ||
172 | } __packed; | ||
173 | struct PCICFG_UE_STATUS_LOW_CSR_AMAP { | ||
174 | u32 dw[1]; | ||
175 | }; | ||
176 | |||
177 | /* Unrecoverable Error Status (High) Register. Each bit corresponds to | ||
178 | * an internal Unrecoverable Error. These are set by hardware and may be | ||
179 | * cleared by writing a one to the respective bit(s) to be cleared. Any | ||
180 | * bit being set that is also unmasked will result in Unrecoverable Error | ||
181 | * interrupt notification to the host CPU and/or Server Management chip; | ||
182 | * and the transitioning of BladeEngine to an Offline state. | ||
183 | */ | ||
184 | struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP { | ||
185 | u8 jtag_ue_status; /* DWORD 0 */ | ||
186 | u8 lpcmemhost_ue_status; /* DWORD 0 */ | ||
187 | u8 mgmt_mac_ue_status; /* DWORD 0 */ | ||
188 | u8 mpu_iram_ue_status; /* DWORD 0 */ | ||
189 | u8 pcs0online_ue_status; /* DWORD 0 */ | ||
190 | u8 pcs1online_ue_status; /* DWORD 0 */ | ||
191 | u8 pctl0_ue_status; /* DWORD 0 */ | ||
192 | u8 pctl1_ue_status; /* DWORD 0 */ | ||
193 | u8 pmem_ue_status; /* DWORD 0 */ | ||
194 | u8 rr_ue_status; /* DWORD 0 */ | ||
195 | u8 rxpp_ue_status; /* DWORD 0 */ | ||
196 | u8 txpb_ue_status; /* DWORD 0 */ | ||
197 | u8 txp_ue_status; /* DWORD 0 */ | ||
198 | u8 xaui_ue_status; /* DWORD 0 */ | ||
199 | u8 arm_ue_status; /* DWORD 0 */ | ||
200 | u8 ipc_ue_status; /* DWORD 0 */ | ||
201 | u8 rsvd0[16]; /* DWORD 0 */ | ||
202 | } __packed; | ||
203 | struct PCICFG_UE_STATUS_HI_CSR_AMAP { | ||
204 | u32 dw[1]; | ||
205 | }; | ||
206 | |||
207 | /* Unrecoverable Error Mask (Low) Register. Each bit, when set to one, | ||
208 | * will mask the associated Unrecoverable Error status bit from notification | ||
209 | * of Unrecoverable Error to the host CPU and/or Server Managment chip and the | ||
210 | * transitioning of all BladeEngine units to an Offline state. | ||
211 | */ | ||
212 | struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP { | ||
213 | u8 cev_ue_mask; /* DWORD 0 */ | ||
214 | u8 ctx_ue_mask; /* DWORD 0 */ | ||
215 | u8 dbuf_ue_mask; /* DWORD 0 */ | ||
216 | u8 erx_ue_mask; /* DWORD 0 */ | ||
217 | u8 host_ue_mask; /* DWORD 0 */ | ||
218 | u8 mpu_ue_mask; /* DWORD 0 */ | ||
219 | u8 ndma_ue_mask; /* DWORD 0 */ | ||
220 | u8 ptc_ue_mask; /* DWORD 0 */ | ||
221 | u8 rdma_ue_mask; /* DWORD 0 */ | ||
222 | u8 rxf_ue_mask; /* DWORD 0 */ | ||
223 | u8 rxips_ue_mask; /* DWORD 0 */ | ||
224 | u8 rxulp0_ue_mask; /* DWORD 0 */ | ||
225 | u8 rxulp1_ue_mask; /* DWORD 0 */ | ||
226 | u8 rxulp2_ue_mask; /* DWORD 0 */ | ||
227 | u8 tim_ue_mask; /* DWORD 0 */ | ||
228 | u8 tpost_ue_mask; /* DWORD 0 */ | ||
229 | u8 tpre_ue_mask; /* DWORD 0 */ | ||
230 | u8 txips_ue_mask; /* DWORD 0 */ | ||
231 | u8 txulp0_ue_mask; /* DWORD 0 */ | ||
232 | u8 txulp1_ue_mask; /* DWORD 0 */ | ||
233 | u8 uc_ue_mask; /* DWORD 0 */ | ||
234 | u8 wdma_ue_mask; /* DWORD 0 */ | ||
235 | u8 txulp2_ue_mask; /* DWORD 0 */ | ||
236 | u8 host1_ue_mask; /* DWORD 0 */ | ||
237 | u8 p0_ob_link_ue_mask; /* DWORD 0 */ | ||
238 | u8 p1_ob_link_ue_mask; /* DWORD 0 */ | ||
239 | u8 host_gpio_ue_mask; /* DWORD 0 */ | ||
240 | u8 mbox_netw_ue_mask; /* DWORD 0 */ | ||
241 | u8 mbox_stor_ue_mask; /* DWORD 0 */ | ||
242 | u8 axgmac0_ue_mask; /* DWORD 0 */ | ||
243 | u8 axgmac1_ue_mask; /* DWORD 0 */ | ||
244 | u8 mpu_intpend_ue_mask; /* DWORD 0 */ | ||
245 | } __packed; | ||
246 | struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP { | ||
247 | u32 dw[1]; | ||
248 | }; | ||
249 | |||
250 | /* Unrecoverable Error Mask (High) Register. Each bit, when set to one, | ||
251 | * will mask the associated Unrecoverable Error status bit from notification | ||
252 | * of Unrecoverable Error to the host CPU and/or Server Managment chip and the | ||
253 | * transitioning of all BladeEngine units to an Offline state. | ||
254 | */ | ||
255 | struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP { | ||
256 | u8 jtag_ue_mask; /* DWORD 0 */ | ||
257 | u8 lpcmemhost_ue_mask; /* DWORD 0 */ | ||
258 | u8 mgmt_mac_ue_mask; /* DWORD 0 */ | ||
259 | u8 mpu_iram_ue_mask; /* DWORD 0 */ | ||
260 | u8 pcs0online_ue_mask; /* DWORD 0 */ | ||
261 | u8 pcs1online_ue_mask; /* DWORD 0 */ | ||
262 | u8 pctl0_ue_mask; /* DWORD 0 */ | ||
263 | u8 pctl1_ue_mask; /* DWORD 0 */ | ||
264 | u8 pmem_ue_mask; /* DWORD 0 */ | ||
265 | u8 rr_ue_mask; /* DWORD 0 */ | ||
266 | u8 rxpp_ue_mask; /* DWORD 0 */ | ||
267 | u8 txpb_ue_mask; /* DWORD 0 */ | ||
268 | u8 txp_ue_mask; /* DWORD 0 */ | ||
269 | u8 xaui_ue_mask; /* DWORD 0 */ | ||
270 | u8 arm_ue_mask; /* DWORD 0 */ | ||
271 | u8 ipc_ue_mask; /* DWORD 0 */ | ||
272 | u8 rsvd0[16]; /* DWORD 0 */ | ||
273 | } __packed; | ||
274 | struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP { | ||
275 | u32 dw[1]; | ||
276 | }; | ||
277 | |||
278 | /* Online Control Register 0. This register controls various units within | ||
279 | * BladeEngine being in an Online or Offline state. | ||
280 | */ | ||
281 | struct BE_PCICFG_ONLINE0_CSR_AMAP { | ||
282 | u8 cev_online; /* DWORD 0 */ | ||
283 | u8 ctx_online; /* DWORD 0 */ | ||
284 | u8 dbuf_online; /* DWORD 0 */ | ||
285 | u8 erx_online; /* DWORD 0 */ | ||
286 | u8 host_online; /* DWORD 0 */ | ||
287 | u8 mpu_online; /* DWORD 0 */ | ||
288 | u8 ndma_online; /* DWORD 0 */ | ||
289 | u8 ptc_online; /* DWORD 0 */ | ||
290 | u8 rdma_online; /* DWORD 0 */ | ||
291 | u8 rxf_online; /* DWORD 0 */ | ||
292 | u8 rxips_online; /* DWORD 0 */ | ||
293 | u8 rxulp0_online; /* DWORD 0 */ | ||
294 | u8 rxulp1_online; /* DWORD 0 */ | ||
295 | u8 rxulp2_online; /* DWORD 0 */ | ||
296 | u8 tim_online; /* DWORD 0 */ | ||
297 | u8 tpost_online; /* DWORD 0 */ | ||
298 | u8 tpre_online; /* DWORD 0 */ | ||
299 | u8 txips_online; /* DWORD 0 */ | ||
300 | u8 txulp0_online; /* DWORD 0 */ | ||
301 | u8 txulp1_online; /* DWORD 0 */ | ||
302 | u8 uc_online; /* DWORD 0 */ | ||
303 | u8 wdma_online; /* DWORD 0 */ | ||
304 | u8 txulp2_online; /* DWORD 0 */ | ||
305 | u8 host1_online; /* DWORD 0 */ | ||
306 | u8 p0_ob_link_online; /* DWORD 0 */ | ||
307 | u8 p1_ob_link_online; /* DWORD 0 */ | ||
308 | u8 host_gpio_online; /* DWORD 0 */ | ||
309 | u8 mbox_netw_online; /* DWORD 0 */ | ||
310 | u8 mbox_stor_online; /* DWORD 0 */ | ||
311 | u8 axgmac0_online; /* DWORD 0 */ | ||
312 | u8 axgmac1_online; /* DWORD 0 */ | ||
313 | u8 mpu_intpend_online; /* DWORD 0 */ | ||
314 | } __packed; | ||
315 | struct PCICFG_ONLINE0_CSR_AMAP { | ||
316 | u32 dw[1]; | ||
317 | }; | ||
318 | |||
319 | /* Online Control Register 1. This register controls various units within | ||
320 | * BladeEngine being in an Online or Offline state. | ||
321 | */ | ||
322 | struct BE_PCICFG_ONLINE1_CSR_AMAP { | ||
323 | u8 jtag_online; /* DWORD 0 */ | ||
324 | u8 lpcmemhost_online; /* DWORD 0 */ | ||
325 | u8 mgmt_mac_online; /* DWORD 0 */ | ||
326 | u8 mpu_iram_online; /* DWORD 0 */ | ||
327 | u8 pcs0online_online; /* DWORD 0 */ | ||
328 | u8 pcs1online_online; /* DWORD 0 */ | ||
329 | u8 pctl0_online; /* DWORD 0 */ | ||
330 | u8 pctl1_online; /* DWORD 0 */ | ||
331 | u8 pmem_online; /* DWORD 0 */ | ||
332 | u8 rr_online; /* DWORD 0 */ | ||
333 | u8 rxpp_online; /* DWORD 0 */ | ||
334 | u8 txpb_online; /* DWORD 0 */ | ||
335 | u8 txp_online; /* DWORD 0 */ | ||
336 | u8 xaui_online; /* DWORD 0 */ | ||
337 | u8 arm_online; /* DWORD 0 */ | ||
338 | u8 ipc_online; /* DWORD 0 */ | ||
339 | u8 rsvd0[16]; /* DWORD 0 */ | ||
340 | } __packed; | ||
341 | struct PCICFG_ONLINE1_CSR_AMAP { | ||
342 | u32 dw[1]; | ||
343 | }; | ||
344 | |||
345 | /* Host Timer Register. */ | ||
346 | struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP { | ||
347 | u8 hosttimer[24]; /* DWORD 0 */ | ||
348 | u8 hostintr; /* DWORD 0 */ | ||
349 | u8 rsvd0[7]; /* DWORD 0 */ | ||
350 | } __packed; | ||
351 | struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP { | ||
352 | u32 dw[1]; | ||
353 | }; | ||
354 | |||
355 | /* Scratchpad Register (for software use). */ | ||
356 | struct BE_PCICFG_SCRATCHPAD_CSR_AMAP { | ||
357 | u8 scratchpad[32]; /* DWORD 0 */ | ||
358 | } __packed; | ||
359 | struct PCICFG_SCRATCHPAD_CSR_AMAP { | ||
360 | u32 dw[1]; | ||
361 | }; | ||
362 | |||
363 | /* PCI Express Capabilities Register. */ | ||
364 | struct BE_PCICFG_PCIE_CAP_CSR_AMAP { | ||
365 | u8 capid[8]; /* DWORD 0 */ | ||
366 | u8 nextcap[8]; /* DWORD 0 */ | ||
367 | u8 capver[4]; /* DWORD 0 */ | ||
368 | u8 devport[4]; /* DWORD 0 */ | ||
369 | u8 rsvd0[6]; /* DWORD 0 */ | ||
370 | u8 rsvd1[2]; /* DWORD 0 */ | ||
371 | } __packed; | ||
372 | struct PCICFG_PCIE_CAP_CSR_AMAP { | ||
373 | u32 dw[1]; | ||
374 | }; | ||
375 | |||
376 | /* PCI Express Device Capabilities Register. */ | ||
377 | struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP { | ||
378 | u8 payload[3]; /* DWORD 0 */ | ||
379 | u8 rsvd0[3]; /* DWORD 0 */ | ||
380 | u8 lo_lat[3]; /* DWORD 0 */ | ||
381 | u8 l1_lat[3]; /* DWORD 0 */ | ||
382 | u8 rsvd1[3]; /* DWORD 0 */ | ||
383 | u8 rsvd2[3]; /* DWORD 0 */ | ||
384 | u8 pwr_value[8]; /* DWORD 0 */ | ||
385 | u8 pwr_scale[2]; /* DWORD 0 */ | ||
386 | u8 rsvd3[4]; /* DWORD 0 */ | ||
387 | } __packed; | ||
388 | struct PCICFG_PCIE_DEVCAP_CSR_AMAP { | ||
389 | u32 dw[1]; | ||
390 | }; | ||
391 | |||
392 | /* PCI Express Device Control/Status Registers. */ | ||
393 | struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP { | ||
394 | u8 CorrErrReportEn; /* DWORD 0 */ | ||
395 | u8 NonFatalErrReportEn; /* DWORD 0 */ | ||
396 | u8 FatalErrReportEn; /* DWORD 0 */ | ||
397 | u8 UnsuppReqReportEn; /* DWORD 0 */ | ||
398 | u8 EnableRelaxOrder; /* DWORD 0 */ | ||
399 | u8 Max_Payload_Size[3]; /* DWORD 0 */ | ||
400 | u8 ExtendTagFieldEnable; /* DWORD 0 */ | ||
401 | u8 PhantomFnEnable; /* DWORD 0 */ | ||
402 | u8 AuxPwrPMEnable; /* DWORD 0 */ | ||
403 | u8 EnableNoSnoop; /* DWORD 0 */ | ||
404 | u8 Max_Read_Req_Size[3]; /* DWORD 0 */ | ||
405 | u8 rsvd0; /* DWORD 0 */ | ||
406 | u8 CorrErrDetect; /* DWORD 0 */ | ||
407 | u8 NonFatalErrDetect; /* DWORD 0 */ | ||
408 | u8 FatalErrDetect; /* DWORD 0 */ | ||
409 | u8 UnsuppReqDetect; /* DWORD 0 */ | ||
410 | u8 AuxPwrDetect; /* DWORD 0 */ | ||
411 | u8 TransPending; /* DWORD 0 */ | ||
412 | u8 rsvd1[10]; /* DWORD 0 */ | ||
413 | } __packed; | ||
414 | struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP { | ||
415 | u32 dw[1]; | ||
416 | }; | ||
417 | |||
418 | /* PCI Express Link Capabilities Register. */ | ||
419 | struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP { | ||
420 | u8 MaxLinkSpeed[4]; /* DWORD 0 */ | ||
421 | u8 MaxLinkWidth[6]; /* DWORD 0 */ | ||
422 | u8 ASPMSupport[2]; /* DWORD 0 */ | ||
423 | u8 L0sExitLat[3]; /* DWORD 0 */ | ||
424 | u8 L1ExitLat[3]; /* DWORD 0 */ | ||
425 | u8 rsvd0[6]; /* DWORD 0 */ | ||
426 | u8 PortNum[8]; /* DWORD 0 */ | ||
427 | } __packed; | ||
428 | struct PCICFG_PCIE_LINK_CAP_CSR_AMAP { | ||
429 | u32 dw[1]; | ||
430 | }; | ||
431 | |||
432 | /* PCI Express Link Status Register. */ | ||
433 | struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP { | ||
434 | u8 ASPMCtl[2]; /* DWORD 0 */ | ||
435 | u8 rsvd0; /* DWORD 0 */ | ||
436 | u8 ReadCmplBndry; /* DWORD 0 */ | ||
437 | u8 LinkDisable; /* DWORD 0 */ | ||
438 | u8 RetrainLink; /* DWORD 0 */ | ||
439 | u8 CommonClkConfig; /* DWORD 0 */ | ||
440 | u8 ExtendSync; /* DWORD 0 */ | ||
441 | u8 rsvd1[8]; /* DWORD 0 */ | ||
442 | u8 LinkSpeed[4]; /* DWORD 0 */ | ||
443 | u8 NegLinkWidth[6]; /* DWORD 0 */ | ||
444 | u8 LinkTrainErr; /* DWORD 0 */ | ||
445 | u8 LinkTrain; /* DWORD 0 */ | ||
446 | u8 SlotClkConfig; /* DWORD 0 */ | ||
447 | u8 rsvd2[3]; /* DWORD 0 */ | ||
448 | } __packed; | ||
449 | struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP { | ||
450 | u32 dw[1]; | ||
451 | }; | ||
452 | |||
453 | /* PCI Express MSI Configuration Register. */ | ||
454 | struct BE_PCICFG_MSI_CSR_AMAP { | ||
455 | u8 capid[8]; /* DWORD 0 */ | ||
456 | u8 nextptr[8]; /* DWORD 0 */ | ||
457 | u8 tablesize[11]; /* DWORD 0 */ | ||
458 | u8 rsvd0[3]; /* DWORD 0 */ | ||
459 | u8 funcmask; /* DWORD 0 */ | ||
460 | u8 en; /* DWORD 0 */ | ||
461 | } __packed; | ||
462 | struct PCICFG_MSI_CSR_AMAP { | ||
463 | u32 dw[1]; | ||
464 | }; | ||
465 | |||
466 | /* MSI-X Table Offset Register. */ | ||
467 | struct BE_PCICFG_MSIX_TABLE_CSR_AMAP { | ||
468 | u8 tablebir[3]; /* DWORD 0 */ | ||
469 | u8 offset[29]; /* DWORD 0 */ | ||
470 | } __packed; | ||
471 | struct PCICFG_MSIX_TABLE_CSR_AMAP { | ||
472 | u32 dw[1]; | ||
473 | }; | ||
474 | |||
475 | /* MSI-X PBA Offset Register. */ | ||
476 | struct BE_PCICFG_MSIX_PBA_CSR_AMAP { | ||
477 | u8 pbabir[3]; /* DWORD 0 */ | ||
478 | u8 offset[29]; /* DWORD 0 */ | ||
479 | } __packed; | ||
480 | struct PCICFG_MSIX_PBA_CSR_AMAP { | ||
481 | u32 dw[1]; | ||
482 | }; | ||
483 | |||
484 | /* PCI Express MSI-X Message Vector Control Register. */ | ||
485 | struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP { | ||
486 | u8 vector_control; /* DWORD 0 */ | ||
487 | u8 rsvd0[31]; /* DWORD 0 */ | ||
488 | } __packed; | ||
489 | struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP { | ||
490 | u32 dw[1]; | ||
491 | }; | ||
492 | |||
493 | /* PCI Express MSI-X Message Data Register. */ | ||
494 | struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP { | ||
495 | u8 data[16]; /* DWORD 0 */ | ||
496 | u8 rsvd0[16]; /* DWORD 0 */ | ||
497 | } __packed; | ||
498 | struct PCICFG_MSIX_MSG_DATA_CSR_AMAP { | ||
499 | u32 dw[1]; | ||
500 | }; | ||
501 | |||
502 | /* PCI Express MSI-X Message Address Register - High Part. */ | ||
503 | struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP { | ||
504 | u8 addr[32]; /* DWORD 0 */ | ||
505 | } __packed; | ||
506 | struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP { | ||
507 | u32 dw[1]; | ||
508 | }; | ||
509 | |||
510 | /* PCI Express MSI-X Message Address Register - Low Part. */ | ||
511 | struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP { | ||
512 | u8 rsvd0[2]; /* DWORD 0 */ | ||
513 | u8 addr[30]; /* DWORD 0 */ | ||
514 | } __packed; | ||
515 | struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP { | ||
516 | u32 dw[1]; | ||
517 | }; | ||
518 | |||
519 | struct BE_PCICFG_ANON_18_RSVD_AMAP { | ||
520 | u8 rsvd0[32]; /* DWORD 0 */ | ||
521 | } __packed; | ||
522 | struct PCICFG_ANON_18_RSVD_AMAP { | ||
523 | u32 dw[1]; | ||
524 | }; | ||
525 | |||
526 | struct BE_PCICFG_ANON_19_RSVD_AMAP { | ||
527 | u8 rsvd0[32]; /* DWORD 0 */ | ||
528 | } __packed; | ||
529 | struct PCICFG_ANON_19_RSVD_AMAP { | ||
530 | u32 dw[1]; | ||
531 | }; | ||
532 | |||
533 | struct BE_PCICFG_ANON_20_RSVD_AMAP { | ||
534 | u8 rsvd0[32]; /* DWORD 0 */ | ||
535 | u8 rsvd1[25][32]; /* DWORD 1 */ | ||
536 | } __packed; | ||
537 | struct PCICFG_ANON_20_RSVD_AMAP { | ||
538 | u32 dw[26]; | ||
539 | }; | ||
540 | |||
541 | struct BE_PCICFG_ANON_21_RSVD_AMAP { | ||
542 | u8 rsvd0[32]; /* DWORD 0 */ | ||
543 | u8 rsvd1[1919][32]; /* DWORD 1 */ | ||
544 | } __packed; | ||
545 | struct PCICFG_ANON_21_RSVD_AMAP { | ||
546 | u32 dw[1920]; | ||
547 | }; | ||
548 | |||
549 | struct BE_PCICFG_ANON_22_MESSAGE_AMAP { | ||
550 | struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl; | ||
551 | struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data; | ||
552 | struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi; | ||
553 | struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low; | ||
554 | } __packed; | ||
555 | struct PCICFG_ANON_22_MESSAGE_AMAP { | ||
556 | u32 dw[4]; | ||
557 | }; | ||
558 | |||
559 | struct BE_PCICFG_ANON_23_RSVD_AMAP { | ||
560 | u8 rsvd0[32]; /* DWORD 0 */ | ||
561 | u8 rsvd1[895][32]; /* DWORD 1 */ | ||
562 | } __packed; | ||
563 | struct PCICFG_ANON_23_RSVD_AMAP { | ||
564 | u32 dw[896]; | ||
565 | }; | ||
566 | |||
567 | /* These PCI Configuration Space registers are for the Storage Function of | ||
568 | * BladeEngine (Function 0). In the memory map of the registers below their | ||
569 | * table, | ||
570 | */ | ||
571 | struct BE_PCICFG0_CSRMAP_AMAP { | ||
572 | struct BE_PCICFG_ID_CSR_AMAP id; | ||
573 | u8 rsvd0[32]; /* DWORD 1 */ | ||
574 | u8 rsvd1[32]; /* DWORD 2 */ | ||
575 | u8 rsvd2[32]; /* DWORD 3 */ | ||
576 | struct BE_PCICFG_IOBAR_CSR_AMAP iobar; | ||
577 | struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0; | ||
578 | struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo; | ||
579 | struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi; | ||
580 | struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo; | ||
581 | struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi; | ||
582 | u8 rsvd3[32]; /* DWORD 10 */ | ||
583 | struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id; | ||
584 | u8 rsvd4[32]; /* DWORD 12 */ | ||
585 | u8 rsvd5[32]; /* DWORD 13 */ | ||
586 | u8 rsvd6[32]; /* DWORD 14 */ | ||
587 | u8 rsvd7[32]; /* DWORD 15 */ | ||
588 | struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4]; | ||
589 | struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset; | ||
590 | u8 rsvd8[32]; /* DWORD 21 */ | ||
591 | struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad; | ||
592 | u8 rsvd9[32]; /* DWORD 23 */ | ||
593 | u8 rsvd10[32]; /* DWORD 24 */ | ||
594 | u8 rsvd11[32]; /* DWORD 25 */ | ||
595 | u8 rsvd12[32]; /* DWORD 26 */ | ||
596 | u8 rsvd13[32]; /* DWORD 27 */ | ||
597 | u8 rsvd14[2][32]; /* DWORD 28 */ | ||
598 | u8 rsvd15[32]; /* DWORD 30 */ | ||
599 | u8 rsvd16[32]; /* DWORD 31 */ | ||
600 | u8 rsvd17[8][32]; /* DWORD 32 */ | ||
601 | struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low; | ||
602 | struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi; | ||
603 | struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask; | ||
604 | struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask; | ||
605 | struct BE_PCICFG_ONLINE0_CSR_AMAP online0; | ||
606 | struct BE_PCICFG_ONLINE1_CSR_AMAP online1; | ||
607 | u8 rsvd18[32]; /* DWORD 46 */ | ||
608 | u8 rsvd19[32]; /* DWORD 47 */ | ||
609 | u8 rsvd20[32]; /* DWORD 48 */ | ||
610 | u8 rsvd21[32]; /* DWORD 49 */ | ||
611 | struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl; | ||
612 | u8 rsvd22[32]; /* DWORD 51 */ | ||
613 | struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap; | ||
614 | struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap; | ||
615 | struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status; | ||
616 | struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap; | ||
617 | struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status; | ||
618 | struct BE_PCICFG_MSI_CSR_AMAP msi; | ||
619 | struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset; | ||
620 | struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset; | ||
621 | u8 rsvd23[32]; /* DWORD 60 */ | ||
622 | u8 rsvd24[32]; /* DWORD 61 */ | ||
623 | u8 rsvd25[32]; /* DWORD 62 */ | ||
624 | u8 rsvd26[32]; /* DWORD 63 */ | ||
625 | u8 rsvd27[32]; /* DWORD 64 */ | ||
626 | u8 rsvd28[32]; /* DWORD 65 */ | ||
627 | u8 rsvd29[32]; /* DWORD 66 */ | ||
628 | u8 rsvd30[32]; /* DWORD 67 */ | ||
629 | u8 rsvd31[32]; /* DWORD 68 */ | ||
630 | u8 rsvd32[32]; /* DWORD 69 */ | ||
631 | u8 rsvd33[32]; /* DWORD 70 */ | ||
632 | u8 rsvd34[32]; /* DWORD 71 */ | ||
633 | u8 rsvd35[32]; /* DWORD 72 */ | ||
634 | u8 rsvd36[32]; /* DWORD 73 */ | ||
635 | u8 rsvd37[32]; /* DWORD 74 */ | ||
636 | u8 rsvd38[32]; /* DWORD 75 */ | ||
637 | u8 rsvd39[32]; /* DWORD 76 */ | ||
638 | u8 rsvd40[32]; /* DWORD 77 */ | ||
639 | u8 rsvd41[32]; /* DWORD 78 */ | ||
640 | u8 rsvd42[32]; /* DWORD 79 */ | ||
641 | u8 rsvd43[32]; /* DWORD 80 */ | ||
642 | u8 rsvd44[32]; /* DWORD 81 */ | ||
643 | u8 rsvd45[32]; /* DWORD 82 */ | ||
644 | u8 rsvd46[32]; /* DWORD 83 */ | ||
645 | u8 rsvd47[32]; /* DWORD 84 */ | ||
646 | u8 rsvd48[32]; /* DWORD 85 */ | ||
647 | u8 rsvd49[32]; /* DWORD 86 */ | ||
648 | u8 rsvd50[32]; /* DWORD 87 */ | ||
649 | u8 rsvd51[32]; /* DWORD 88 */ | ||
650 | u8 rsvd52[32]; /* DWORD 89 */ | ||
651 | u8 rsvd53[32]; /* DWORD 90 */ | ||
652 | u8 rsvd54[32]; /* DWORD 91 */ | ||
653 | u8 rsvd55[32]; /* DWORD 92 */ | ||
654 | u8 rsvd56[832]; /* DWORD 93 */ | ||
655 | u8 rsvd57[32]; /* DWORD 119 */ | ||
656 | u8 rsvd58[32]; /* DWORD 120 */ | ||
657 | u8 rsvd59[32]; /* DWORD 121 */ | ||
658 | u8 rsvd60[32]; /* DWORD 122 */ | ||
659 | u8 rsvd61[32]; /* DWORD 123 */ | ||
660 | u8 rsvd62[32]; /* DWORD 124 */ | ||
661 | u8 rsvd63[32]; /* DWORD 125 */ | ||
662 | u8 rsvd64[32]; /* DWORD 126 */ | ||
663 | u8 rsvd65[32]; /* DWORD 127 */ | ||
664 | u8 rsvd66[61440]; /* DWORD 128 */ | ||
665 | struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32]; | ||
666 | u8 rsvd67[28672]; /* DWORD 2176 */ | ||
667 | u8 rsvd68[32]; /* DWORD 3072 */ | ||
668 | u8 rsvd69[1023][32]; /* DWORD 3073 */ | ||
669 | } __packed; | ||
670 | struct PCICFG0_CSRMAP_AMAP { | ||
671 | u32 dw[4096]; | ||
672 | }; | ||
673 | |||
674 | struct BE_PCICFG_ANON_24_RSVD_AMAP { | ||
675 | u8 rsvd0[32]; /* DWORD 0 */ | ||
676 | } __packed; | ||
677 | struct PCICFG_ANON_24_RSVD_AMAP { | ||
678 | u32 dw[1]; | ||
679 | }; | ||
680 | |||
681 | struct BE_PCICFG_ANON_25_RSVD_AMAP { | ||
682 | u8 rsvd0[32]; /* DWORD 0 */ | ||
683 | } __packed; | ||
684 | struct PCICFG_ANON_25_RSVD_AMAP { | ||
685 | u32 dw[1]; | ||
686 | }; | ||
687 | |||
688 | struct BE_PCICFG_ANON_26_RSVD_AMAP { | ||
689 | u8 rsvd0[32]; /* DWORD 0 */ | ||
690 | } __packed; | ||
691 | struct PCICFG_ANON_26_RSVD_AMAP { | ||
692 | u32 dw[1]; | ||
693 | }; | ||
694 | |||
695 | struct BE_PCICFG_ANON_27_RSVD_AMAP { | ||
696 | u8 rsvd0[32]; /* DWORD 0 */ | ||
697 | u8 rsvd1[32]; /* DWORD 1 */ | ||
698 | } __packed; | ||
699 | struct PCICFG_ANON_27_RSVD_AMAP { | ||
700 | u32 dw[2]; | ||
701 | }; | ||
702 | |||
703 | struct BE_PCICFG_ANON_28_RSVD_AMAP { | ||
704 | u8 rsvd0[32]; /* DWORD 0 */ | ||
705 | u8 rsvd1[3][32]; /* DWORD 1 */ | ||
706 | } __packed; | ||
707 | struct PCICFG_ANON_28_RSVD_AMAP { | ||
708 | u32 dw[4]; | ||
709 | }; | ||
710 | |||
711 | struct BE_PCICFG_ANON_29_RSVD_AMAP { | ||
712 | u8 rsvd0[32]; /* DWORD 0 */ | ||
713 | u8 rsvd1[36][32]; /* DWORD 1 */ | ||
714 | } __packed; | ||
715 | struct PCICFG_ANON_29_RSVD_AMAP { | ||
716 | u32 dw[37]; | ||
717 | }; | ||
718 | |||
719 | struct BE_PCICFG_ANON_30_RSVD_AMAP { | ||
720 | u8 rsvd0[32]; /* DWORD 0 */ | ||
721 | u8 rsvd1[1930][32]; /* DWORD 1 */ | ||
722 | } __packed; | ||
723 | struct PCICFG_ANON_30_RSVD_AMAP { | ||
724 | u32 dw[1931]; | ||
725 | }; | ||
726 | |||
727 | struct BE_PCICFG_ANON_31_MESSAGE_AMAP { | ||
728 | struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl; | ||
729 | struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data; | ||
730 | struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi; | ||
731 | struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low; | ||
732 | } __packed; | ||
733 | struct PCICFG_ANON_31_MESSAGE_AMAP { | ||
734 | u32 dw[4]; | ||
735 | }; | ||
736 | |||
737 | struct BE_PCICFG_ANON_32_RSVD_AMAP { | ||
738 | u8 rsvd0[32]; /* DWORD 0 */ | ||
739 | u8 rsvd1[895][32]; /* DWORD 1 */ | ||
740 | } __packed; | ||
741 | struct PCICFG_ANON_32_RSVD_AMAP { | ||
742 | u32 dw[896]; | ||
743 | }; | ||
744 | |||
745 | /* This PCI configuration space register map is for the Networking Function of | ||
746 | * BladeEngine (Function 1). | ||
747 | */ | ||
748 | struct BE_PCICFG1_CSRMAP_AMAP { | ||
749 | struct BE_PCICFG_ID_CSR_AMAP id; | ||
750 | u8 rsvd0[32]; /* DWORD 1 */ | ||
751 | u8 rsvd1[32]; /* DWORD 2 */ | ||
752 | u8 rsvd2[32]; /* DWORD 3 */ | ||
753 | struct BE_PCICFG_IOBAR_CSR_AMAP iobar; | ||
754 | struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0; | ||
755 | struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo; | ||
756 | struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi; | ||
757 | struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo; | ||
758 | struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi; | ||
759 | u8 rsvd3[32]; /* DWORD 10 */ | ||
760 | struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id; | ||
761 | u8 rsvd4[32]; /* DWORD 12 */ | ||
762 | u8 rsvd5[32]; /* DWORD 13 */ | ||
763 | u8 rsvd6[32]; /* DWORD 14 */ | ||
764 | u8 rsvd7[32]; /* DWORD 15 */ | ||
765 | struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4]; | ||
766 | struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset; | ||
767 | u8 rsvd8[32]; /* DWORD 21 */ | ||
768 | struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad; | ||
769 | u8 rsvd9[32]; /* DWORD 23 */ | ||
770 | u8 rsvd10[32]; /* DWORD 24 */ | ||
771 | u8 rsvd11[32]; /* DWORD 25 */ | ||
772 | u8 rsvd12[32]; /* DWORD 26 */ | ||
773 | u8 rsvd13[32]; /* DWORD 27 */ | ||
774 | u8 rsvd14[2][32]; /* DWORD 28 */ | ||
775 | u8 rsvd15[32]; /* DWORD 30 */ | ||
776 | u8 rsvd16[32]; /* DWORD 31 */ | ||
777 | u8 rsvd17[8][32]; /* DWORD 32 */ | ||
778 | struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low; | ||
779 | struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi; | ||
780 | struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask; | ||
781 | struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask; | ||
782 | struct BE_PCICFG_ONLINE0_CSR_AMAP online0; | ||
783 | struct BE_PCICFG_ONLINE1_CSR_AMAP online1; | ||
784 | u8 rsvd18[32]; /* DWORD 46 */ | ||
785 | u8 rsvd19[32]; /* DWORD 47 */ | ||
786 | u8 rsvd20[32]; /* DWORD 48 */ | ||
787 | u8 rsvd21[32]; /* DWORD 49 */ | ||
788 | struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl; | ||
789 | u8 rsvd22[32]; /* DWORD 51 */ | ||
790 | struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap; | ||
791 | struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap; | ||
792 | struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status; | ||
793 | struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap; | ||
794 | struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status; | ||
795 | struct BE_PCICFG_MSI_CSR_AMAP msi; | ||
796 | struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset; | ||
797 | struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset; | ||
798 | u8 rsvd23[64]; /* DWORD 60 */ | ||
799 | u8 rsvd24[32]; /* DWORD 62 */ | ||
800 | u8 rsvd25[32]; /* DWORD 63 */ | ||
801 | u8 rsvd26[32]; /* DWORD 64 */ | ||
802 | u8 rsvd27[32]; /* DWORD 65 */ | ||
803 | u8 rsvd28[32]; /* DWORD 66 */ | ||
804 | u8 rsvd29[32]; /* DWORD 67 */ | ||
805 | u8 rsvd30[32]; /* DWORD 68 */ | ||
806 | u8 rsvd31[32]; /* DWORD 69 */ | ||
807 | u8 rsvd32[32]; /* DWORD 70 */ | ||
808 | u8 rsvd33[32]; /* DWORD 71 */ | ||
809 | u8 rsvd34[32]; /* DWORD 72 */ | ||
810 | u8 rsvd35[32]; /* DWORD 73 */ | ||
811 | u8 rsvd36[32]; /* DWORD 74 */ | ||
812 | u8 rsvd37[128]; /* DWORD 75 */ | ||
813 | u8 rsvd38[32]; /* DWORD 79 */ | ||
814 | u8 rsvd39[1184]; /* DWORD 80 */ | ||
815 | u8 rsvd40[61792]; /* DWORD 117 */ | ||
816 | struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32]; | ||
817 | u8 rsvd41[28672]; /* DWORD 2176 */ | ||
818 | u8 rsvd42[32]; /* DWORD 3072 */ | ||
819 | u8 rsvd43[1023][32]; /* DWORD 3073 */ | ||
820 | } __packed; | ||
821 | struct PCICFG1_CSRMAP_AMAP { | ||
822 | u32 dw[4096]; | ||
823 | }; | ||
824 | |||
825 | #endif /* __pcicfg_amap_h__ */ | ||
diff --git a/drivers/staging/benet/post_codes.h b/drivers/staging/benet/post_codes.h deleted file mode 100644 index 6d1621f5f5fb..000000000000 --- a/drivers/staging/benet/post_codes.h +++ /dev/null | |||
@@ -1,111 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __post_codes_amap_h__ | ||
21 | #define __post_codes_amap_h__ | ||
22 | |||
23 | /* --- MGMT_HBA_POST_STAGE_ENUM --- */ | ||
24 | #define POST_STAGE_POWER_ON_RESET (0) /* State after a cold or warm boot. */ | ||
25 | #define POST_STAGE_AWAITING_HOST_RDY (1) /* ARM boot code awaiting a | ||
26 | go-ahed from the host. */ | ||
27 | #define POST_STAGE_HOST_RDY (2) /* Host has given go-ahed to ARM. */ | ||
28 | #define POST_STAGE_BE_RESET (3) /* Host wants to reset chip, this is a chip | ||
29 | workaround */ | ||
30 | #define POST_STAGE_SEEPROM_CS_START (256) /* SEEPROM checksum | ||
31 | test start. */ | ||
32 | #define POST_STAGE_SEEPROM_CS_DONE (257) /* SEEPROM checksum test | ||
33 | done. */ | ||
34 | #define POST_STAGE_DDR_CONFIG_START (512) /* DDR configuration start. */ | ||
35 | #define POST_STAGE_DDR_CONFIG_DONE (513) /* DDR configuration done. */ | ||
36 | #define POST_STAGE_DDR_CALIBRATE_START (768) /* DDR calibration start. */ | ||
37 | #define POST_STAGE_DDR_CALIBRATE_DONE (769) /* DDR calibration done. */ | ||
38 | #define POST_STAGE_DDR_TEST_START (1024) /* DDR memory test start. */ | ||
39 | #define POST_STAGE_DDR_TEST_DONE (1025) /* DDR memory test done. */ | ||
40 | #define POST_STAGE_REDBOOT_INIT_START (1536) /* Redboot starts execution. */ | ||
41 | #define POST_STAGE_REDBOOT_INIT_DONE (1537) /* Redboot done execution. */ | ||
42 | #define POST_STAGE_FW_IMAGE_LOAD_START (1792) /* Firmware image load to | ||
43 | DDR start. */ | ||
44 | #define POST_STAGE_FW_IMAGE_LOAD_DONE (1793) /* Firmware image load | ||
45 | to DDR done. */ | ||
46 | #define POST_STAGE_ARMFW_START (2048) /* ARMfw runtime code | ||
47 | starts execution. */ | ||
48 | #define POST_STAGE_DHCP_QUERY_START (2304) /* DHCP server query start. */ | ||
49 | #define POST_STAGE_DHCP_QUERY_DONE (2305) /* DHCP server query done. */ | ||
50 | #define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560) /* Boot Target | ||
51 | Discovery Start. */ | ||
52 | #define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561) /* Boot Target | ||
53 | Discovery Done. */ | ||
54 | #define POST_STAGE_RC_OPTION_SET (2816) /* Remote configuration | ||
55 | option is set in SEEPROM */ | ||
56 | #define POST_STAGE_SWITCH_LINK (2817) /* Wait for link up on switch */ | ||
57 | #define POST_STAGE_SEND_ICDS_MESSAGE (2818) /* Send the ICDS message | ||
58 | to switch */ | ||
59 | #define POST_STAGE_PERFROM_TFTP (2819) /* Download xml using TFTP */ | ||
60 | #define POST_STAGE_PARSE_XML (2820) /* Parse XML file */ | ||
61 | #define POST_STAGE_DOWNLOAD_IMAGE (2821) /* Download IMAGE from | ||
62 | TFTP server */ | ||
63 | #define POST_STAGE_FLASH_IMAGE (2822) /* Flash the IMAGE */ | ||
64 | #define POST_STAGE_RC_DONE (2823) /* Remote configuration | ||
65 | complete */ | ||
66 | #define POST_STAGE_REBOOT_SYSTEM (2824) /* Upgrade IMAGE done, | ||
67 | reboot required */ | ||
68 | #define POST_STAGE_MAC_ADDRESS (3072) /* MAC Address Check */ | ||
69 | #define POST_STAGE_ARMFW_READY (49152) /* ARMfw is done with POST | ||
70 | and ready. */ | ||
71 | #define POST_STAGE_ARMFW_UE (61440) /* ARMfw has asserted an | ||
72 | unrecoverable error. The | ||
73 | lower 3 hex digits of the | ||
74 | stage code identify the | ||
75 | unique error code. | ||
76 | */ | ||
77 | |||
78 | /* This structure defines the format of the MPU semaphore | ||
79 | * register when used for POST. | ||
80 | */ | ||
81 | struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP { | ||
82 | u8 stage[16]; /* DWORD 0 */ | ||
83 | u8 rsvd0[10]; /* DWORD 0 */ | ||
84 | u8 iscsi_driver_loaded; /* DWORD 0 */ | ||
85 | u8 option_rom_installed; /* DWORD 0 */ | ||
86 | u8 iscsi_ip_conflict; /* DWORD 0 */ | ||
87 | u8 iscsi_no_ip; /* DWORD 0 */ | ||
88 | u8 backup_fw; /* DWORD 0 */ | ||
89 | u8 error; /* DWORD 0 */ | ||
90 | } __packed; | ||
91 | struct MGMT_HBA_POST_STATUS_STRUCT_AMAP { | ||
92 | u32 dw[1]; | ||
93 | }; | ||
94 | |||
95 | /* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */ | ||
96 | #define POST_BIT_ISCSI_LOADED (26) | ||
97 | #define POST_BIT_OPTROM_INST (27) | ||
98 | #define POST_BIT_BAD_IP_ADDR (28) | ||
99 | #define POST_BIT_NO_IP_ADDR (29) | ||
100 | #define POST_BIT_BACKUP_FW (30) | ||
101 | #define POST_BIT_ERROR (31) | ||
102 | |||
103 | /* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */ | ||
104 | #define POST_ISCSI_DRIVER_LOADED (67108864) | ||
105 | #define POST_OPTROM_INSTALLED (134217728) | ||
106 | #define POST_ISCSI_IP_ADDRESS_CONFLICT (268435456) | ||
107 | #define POST_ISCSI_NO_IP_ADDRESS (536870912) | ||
108 | #define POST_BACKUP_FW_LOADED (1073741824) | ||
109 | #define POST_FATAL_ERROR (2147483648) | ||
110 | |||
111 | #endif /* __post_codes_amap_h__ */ | ||
diff --git a/drivers/staging/benet/regmap.h b/drivers/staging/benet/regmap.h deleted file mode 100644 index e816ba210e83..000000000000 --- a/drivers/staging/benet/regmap.h +++ /dev/null | |||
@@ -1,68 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 - 2008 ServerEngines | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License version 2 | ||
7 | * as published by the Free Software Foundation. The full GNU General | ||
8 | * Public License is included in this distribution in the file called COPYING. | ||
9 | * | ||
10 | * Contact Information: | ||
11 | * linux-drivers@serverengines.com | ||
12 | * | ||
13 | * ServerEngines | ||
14 | * 209 N. Fair Oaks Ave | ||
15 | * Sunnyvale, CA 94085 | ||
16 | */ | ||
17 | /* | ||
18 | * Autogenerated by srcgen version: 0127 | ||
19 | */ | ||
20 | #ifndef __regmap_amap_h__ | ||
21 | #define __regmap_amap_h__ | ||
22 | #include "pcicfg.h" | ||
23 | #include "ep.h" | ||
24 | #include "cev.h" | ||
25 | #include "mpu.h" | ||
26 | #include "doorbells.h" | ||
27 | |||
28 | /* | ||
29 | * This is the control and status register map for BladeEngine, showing | ||
30 | * the relative size and offset of each sub-module. The CSR registers | ||
31 | * are identical for the network and storage PCI functions. The | ||
32 | * CSR map is shown below, followed by details of each block, | ||
33 | * in sub-sections. The sub-sections begin with a description | ||
34 | * of CSRs that are instantiated in multiple blocks. | ||
35 | */ | ||
36 | struct BE_BLADE_ENGINE_CSRMAP_AMAP { | ||
37 | struct BE_MPU_CSRMAP_AMAP mpu; | ||
38 | u8 rsvd0[8192]; /* DWORD 256 */ | ||
39 | u8 rsvd1[8192]; /* DWORD 512 */ | ||
40 | struct BE_CEV_CSRMAP_AMAP cev; | ||
41 | u8 rsvd2[8192]; /* DWORD 1024 */ | ||
42 | u8 rsvd3[8192]; /* DWORD 1280 */ | ||
43 | u8 rsvd4[8192]; /* DWORD 1536 */ | ||
44 | u8 rsvd5[8192]; /* DWORD 1792 */ | ||
45 | u8 rsvd6[8192]; /* DWORD 2048 */ | ||
46 | u8 rsvd7[8192]; /* DWORD 2304 */ | ||
47 | u8 rsvd8[8192]; /* DWORD 2560 */ | ||
48 | u8 rsvd9[8192]; /* DWORD 2816 */ | ||
49 | u8 rsvd10[8192]; /* DWORD 3072 */ | ||
50 | u8 rsvd11[8192]; /* DWORD 3328 */ | ||
51 | u8 rsvd12[8192]; /* DWORD 3584 */ | ||
52 | u8 rsvd13[8192]; /* DWORD 3840 */ | ||
53 | u8 rsvd14[8192]; /* DWORD 4096 */ | ||
54 | u8 rsvd15[8192]; /* DWORD 4352 */ | ||
55 | u8 rsvd16[8192]; /* DWORD 4608 */ | ||
56 | u8 rsvd17[8192]; /* DWORD 4864 */ | ||
57 | u8 rsvd18[8192]; /* DWORD 5120 */ | ||
58 | u8 rsvd19[8192]; /* DWORD 5376 */ | ||
59 | u8 rsvd20[8192]; /* DWORD 5632 */ | ||
60 | u8 rsvd21[8192]; /* DWORD 5888 */ | ||
61 | u8 rsvd22[8192]; /* DWORD 6144 */ | ||
62 | u8 rsvd23[17152][32]; /* DWORD 6400 */ | ||
63 | } __packed; | ||
64 | struct BLADE_ENGINE_CSRMAP_AMAP { | ||
65 | u32 dw[23552]; | ||
66 | }; | ||
67 | |||
68 | #endif /* __regmap_amap_h__ */ | ||
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 5ed4ae07bac1..6789089e2461 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
@@ -485,7 +485,7 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm, | |||
485 | usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n", | 485 | usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n", |
486 | wbuflen, rbuflen); | 486 | wbuflen, rbuflen); |
487 | ret = -ENOMEM; | 487 | ret = -ENOMEM; |
488 | goto fail; | 488 | goto err; |
489 | } | 489 | } |
490 | 490 | ||
491 | mutex_lock(&instance->cm_serialize); | 491 | mutex_lock(&instance->cm_serialize); |
@@ -565,6 +565,7 @@ static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm, | |||
565 | dbg("cm %#x", cm); | 565 | dbg("cm %#x", cm); |
566 | fail: | 566 | fail: |
567 | mutex_unlock(&instance->cm_serialize); | 567 | mutex_unlock(&instance->cm_serialize); |
568 | err: | ||
568 | return ret; | 569 | return ret; |
569 | } | 570 | } |
570 | 571 | ||
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 0f5c05f6f9df..c40a9b284cc9 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
@@ -50,6 +50,7 @@ | |||
50 | 50 | ||
51 | static struct usb_device_id usbtmc_devices[] = { | 51 | static struct usb_device_id usbtmc_devices[] = { |
52 | { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, | 52 | { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, |
53 | { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), }, | ||
53 | { 0, } /* terminating entry */ | 54 | { 0, } /* terminating entry */ |
54 | }; | 55 | }; |
55 | MODULE_DEVICE_TABLE(usb, usbtmc_devices); | 56 | MODULE_DEVICE_TABLE(usb, usbtmc_devices); |
@@ -106,12 +107,13 @@ static int usbtmc_open(struct inode *inode, struct file *filp) | |||
106 | { | 107 | { |
107 | struct usb_interface *intf; | 108 | struct usb_interface *intf; |
108 | struct usbtmc_device_data *data; | 109 | struct usbtmc_device_data *data; |
109 | int retval = -ENODEV; | 110 | int retval = 0; |
110 | 111 | ||
111 | intf = usb_find_interface(&usbtmc_driver, iminor(inode)); | 112 | intf = usb_find_interface(&usbtmc_driver, iminor(inode)); |
112 | if (!intf) { | 113 | if (!intf) { |
113 | printk(KERN_ERR KBUILD_MODNAME | 114 | printk(KERN_ERR KBUILD_MODNAME |
114 | ": can not find device for minor %d", iminor(inode)); | 115 | ": can not find device for minor %d", iminor(inode)); |
116 | retval = -ENODEV; | ||
115 | goto exit; | 117 | goto exit; |
116 | } | 118 | } |
117 | 119 | ||
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 7513bb083c15..6585f527e381 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -359,11 +359,6 @@ static void destroy_async(struct dev_state *ps, struct list_head *list) | |||
359 | spin_lock_irqsave(&ps->lock, flags); | 359 | spin_lock_irqsave(&ps->lock, flags); |
360 | } | 360 | } |
361 | spin_unlock_irqrestore(&ps->lock, flags); | 361 | spin_unlock_irqrestore(&ps->lock, flags); |
362 | as = async_getcompleted(ps); | ||
363 | while (as) { | ||
364 | free_async(as); | ||
365 | as = async_getcompleted(ps); | ||
366 | } | ||
367 | } | 362 | } |
368 | 363 | ||
369 | static void destroy_async_on_interface(struct dev_state *ps, | 364 | static void destroy_async_on_interface(struct dev_state *ps, |
@@ -643,6 +638,7 @@ static int usbdev_release(struct inode *inode, struct file *file) | |||
643 | struct dev_state *ps = file->private_data; | 638 | struct dev_state *ps = file->private_data; |
644 | struct usb_device *dev = ps->dev; | 639 | struct usb_device *dev = ps->dev; |
645 | unsigned int ifnum; | 640 | unsigned int ifnum; |
641 | struct async *as; | ||
646 | 642 | ||
647 | usb_lock_device(dev); | 643 | usb_lock_device(dev); |
648 | 644 | ||
@@ -661,6 +657,12 @@ static int usbdev_release(struct inode *inode, struct file *file) | |||
661 | usb_unlock_device(dev); | 657 | usb_unlock_device(dev); |
662 | usb_put_dev(dev); | 658 | usb_put_dev(dev); |
663 | put_pid(ps->disc_pid); | 659 | put_pid(ps->disc_pid); |
660 | |||
661 | as = async_getcompleted(ps); | ||
662 | while (as) { | ||
663 | free_async(as); | ||
664 | as = async_getcompleted(ps); | ||
665 | } | ||
664 | kfree(ps); | 666 | kfree(ps); |
665 | return 0; | 667 | return 0; |
666 | } | 668 | } |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 3712b925b315..ecc9b66c03cd 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -1095,7 +1095,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
1095 | prev->qh_next = qh->qh_next; | 1095 | prev->qh_next = qh->qh_next; |
1096 | wmb (); | 1096 | wmb (); |
1097 | 1097 | ||
1098 | if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) { | 1098 | /* If the controller isn't running, we don't have to wait for it */ |
1099 | if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) { | ||
1099 | /* if (unlikely (qh->reclaim != 0)) | 1100 | /* if (unlikely (qh->reclaim != 0)) |
1100 | * this will recurse, probably not much | 1101 | * this will recurse, probably not much |
1101 | */ | 1102 | */ |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 07bcb931021b..1d0b49e3f192 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -1536,7 +1536,7 @@ itd_link_urb ( | |||
1536 | struct ehci_itd, itd_list); | 1536 | struct ehci_itd, itd_list); |
1537 | list_move_tail (&itd->itd_list, &stream->td_list); | 1537 | list_move_tail (&itd->itd_list, &stream->td_list); |
1538 | itd->stream = iso_stream_get (stream); | 1538 | itd->stream = iso_stream_get (stream); |
1539 | itd->urb = usb_get_urb (urb); | 1539 | itd->urb = urb; |
1540 | itd_init (ehci, stream, itd); | 1540 | itd_init (ehci, stream, itd); |
1541 | } | 1541 | } |
1542 | 1542 | ||
@@ -1645,7 +1645,7 @@ itd_complete ( | |||
1645 | (void) disable_periodic(ehci); | 1645 | (void) disable_periodic(ehci); |
1646 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; | 1646 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; |
1647 | 1647 | ||
1648 | if (unlikely (list_empty (&stream->td_list))) { | 1648 | if (unlikely(list_is_singular(&stream->td_list))) { |
1649 | ehci_to_hcd(ehci)->self.bandwidth_allocated | 1649 | ehci_to_hcd(ehci)->self.bandwidth_allocated |
1650 | -= stream->bandwidth; | 1650 | -= stream->bandwidth; |
1651 | ehci_vdbg (ehci, | 1651 | ehci_vdbg (ehci, |
@@ -1656,7 +1656,6 @@ itd_complete ( | |||
1656 | iso_stream_put (ehci, stream); | 1656 | iso_stream_put (ehci, stream); |
1657 | 1657 | ||
1658 | done: | 1658 | done: |
1659 | usb_put_urb(urb); | ||
1660 | itd->urb = NULL; | 1659 | itd->urb = NULL; |
1661 | if (ehci->clock_frame != itd->frame || itd->index[7] != -1) { | 1660 | if (ehci->clock_frame != itd->frame || itd->index[7] != -1) { |
1662 | /* OK to recycle this ITD now. */ | 1661 | /* OK to recycle this ITD now. */ |
@@ -1949,7 +1948,7 @@ sitd_link_urb ( | |||
1949 | struct ehci_sitd, sitd_list); | 1948 | struct ehci_sitd, sitd_list); |
1950 | list_move_tail (&sitd->sitd_list, &stream->td_list); | 1949 | list_move_tail (&sitd->sitd_list, &stream->td_list); |
1951 | sitd->stream = iso_stream_get (stream); | 1950 | sitd->stream = iso_stream_get (stream); |
1952 | sitd->urb = usb_get_urb (urb); | 1951 | sitd->urb = urb; |
1953 | 1952 | ||
1954 | sitd_patch(ehci, stream, sitd, sched, packet); | 1953 | sitd_patch(ehci, stream, sitd, sched, packet); |
1955 | sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, | 1954 | sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, |
@@ -2034,7 +2033,7 @@ sitd_complete ( | |||
2034 | (void) disable_periodic(ehci); | 2033 | (void) disable_periodic(ehci); |
2035 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; | 2034 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; |
2036 | 2035 | ||
2037 | if (list_empty (&stream->td_list)) { | 2036 | if (list_is_singular(&stream->td_list)) { |
2038 | ehci_to_hcd(ehci)->self.bandwidth_allocated | 2037 | ehci_to_hcd(ehci)->self.bandwidth_allocated |
2039 | -= stream->bandwidth; | 2038 | -= stream->bandwidth; |
2040 | ehci_vdbg (ehci, | 2039 | ehci_vdbg (ehci, |
@@ -2045,7 +2044,6 @@ sitd_complete ( | |||
2045 | iso_stream_put (ehci, stream); | 2044 | iso_stream_put (ehci, stream); |
2046 | /* OK to recycle this SITD now that its completion callback ran. */ | 2045 | /* OK to recycle this SITD now that its completion callback ran. */ |
2047 | done: | 2046 | done: |
2048 | usb_put_urb(urb); | ||
2049 | sitd->urb = NULL; | 2047 | sitd->urb = NULL; |
2050 | sitd->stream = NULL; | 2048 | sitd->stream = NULL; |
2051 | list_move(&sitd->sitd_list, &stream->free_list); | 2049 | list_move(&sitd->sitd_list, &stream->free_list); |
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index 878c77ca086e..972f20b3406c 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c | |||
@@ -499,6 +499,7 @@ static int mdc800_usb_probe (struct usb_interface *intf, | |||
499 | retval = usb_register_dev(intf, &mdc800_class); | 499 | retval = usb_register_dev(intf, &mdc800_class); |
500 | if (retval) { | 500 | if (retval) { |
501 | dev_err(&intf->dev, "Not able to get a minor for this device.\n"); | 501 | dev_err(&intf->dev, "Not able to get a minor for this device.\n"); |
502 | mutex_unlock(&mdc800->io_lock); | ||
502 | return -ENODEV; | 503 | return -ENODEV; |
503 | } | 504 | } |
504 | 505 | ||
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 7b6922e08ed1..203526542013 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c | |||
@@ -376,7 +376,7 @@ static int adu_release(struct inode *inode, struct file *file) | |||
376 | if (dev->open_count <= 0) { | 376 | if (dev->open_count <= 0) { |
377 | dbg(1," %s : device not opened", __func__); | 377 | dbg(1," %s : device not opened", __func__); |
378 | retval = -ENODEV; | 378 | retval = -ENODEV; |
379 | goto exit; | 379 | goto unlock; |
380 | } | 380 | } |
381 | 381 | ||
382 | adu_release_internal(dev); | 382 | adu_release_internal(dev); |
@@ -385,9 +385,9 @@ static int adu_release(struct inode *inode, struct file *file) | |||
385 | if (!dev->open_count) /* ... and we're the last user */ | 385 | if (!dev->open_count) /* ... and we're the last user */ |
386 | adu_delete(dev); | 386 | adu_delete(dev); |
387 | } | 387 | } |
388 | 388 | unlock: | |
389 | exit: | ||
390 | mutex_unlock(&adutux_mutex); | 389 | mutex_unlock(&adutux_mutex); |
390 | exit: | ||
391 | dbg(2," %s : leave, return value %d", __func__, retval); | 391 | dbg(2," %s : leave, return value %d", __func__, retval); |
392 | return retval; | 392 | return retval; |
393 | } | 393 | } |
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c index 63dff9ba73c5..f26ea8dc1577 100644 --- a/drivers/usb/misc/vstusb.c +++ b/drivers/usb/misc/vstusb.c | |||
@@ -401,6 +401,7 @@ static ssize_t vstusb_write(struct file *file, const char __user *buffer, | |||
401 | } | 401 | } |
402 | 402 | ||
403 | if (copy_from_user(buf, buffer, count)) { | 403 | if (copy_from_user(buf, buffer, count)) { |
404 | mutex_unlock(&vstdev->lock); | ||
404 | dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__); | 405 | dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__); |
405 | retval = -EFAULT; | 406 | retval = -EFAULT; |
406 | goto exit; | 407 | goto exit; |
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c index 027f4b7dde86..9b4082b58c5b 100644 --- a/drivers/usb/serial/cp2101.c +++ b/drivers/usb/serial/cp2101.c | |||
@@ -79,6 +79,7 @@ static struct usb_device_id id_table [] = { | |||
79 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ | 79 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
80 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ | 80 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
81 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ | 81 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ |
82 | { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ | ||
82 | { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ | 83 | { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ |
83 | { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ | 84 | { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ |
84 | { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ | 85 | { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index f92f4d773374..ae84c326a540 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -663,6 +663,11 @@ static struct usb_device_id id_table_combined [] = { | |||
663 | { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, | 663 | { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, |
664 | { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, | 664 | { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, |
665 | { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) }, | 665 | { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) }, |
666 | { USB_DEVICE(ATMEL_VID, STK541_PID) }, | ||
667 | { USB_DEVICE(DE_VID, STB_PID) }, | ||
668 | { USB_DEVICE(DE_VID, WHT_PID) }, | ||
669 | { USB_DEVICE(ADI_VID, ADI_GNICE_PID), | ||
670 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
666 | { }, /* Optional parameter entry */ | 671 | { }, /* Optional parameter entry */ |
667 | { } /* Terminating entry */ | 672 | { } /* Terminating entry */ |
668 | }; | 673 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index e300c840f8ca..daaf63db0b50 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -893,6 +893,26 @@ | |||
893 | #define DIEBOLD_BCS_SE923_PID 0xfb99 | 893 | #define DIEBOLD_BCS_SE923_PID 0xfb99 |
894 | 894 | ||
895 | /* | 895 | /* |
896 | * Atmel STK541 | ||
897 | */ | ||
898 | #define ATMEL_VID 0x03eb /* Vendor ID */ | ||
899 | #define STK541_PID 0x2109 /* Zigbee Controller */ | ||
900 | |||
901 | /* | ||
902 | * Dresden Elektronic Sensor Terminal Board | ||
903 | */ | ||
904 | #define DE_VID 0x1cf1 /* Vendor ID */ | ||
905 | #define STB_PID 0x0001 /* Sensor Terminal Board */ | ||
906 | #define WHT_PID 0x0004 /* Wireless Handheld Terminal */ | ||
907 | |||
908 | /* | ||
909 | * Blackfin gnICE JTAG | ||
910 | * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice | ||
911 | */ | ||
912 | #define ADI_VID 0x0456 | ||
913 | #define ADI_GNICE_PID 0xF000 | ||
914 | |||
915 | /* | ||
896 | * BmRequestType: 1100 0000b | 916 | * BmRequestType: 1100 0000b |
897 | * bRequest: FTDI_E2_READ | 917 | * bRequest: FTDI_E2_READ |
898 | * wValue: 0 | 918 | * wValue: 0 |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index b7c132bded7f..61ebddc48497 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -89,6 +89,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
89 | #define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041 | 89 | #define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041 |
90 | #define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061 | 90 | #define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061 |
91 | #define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100 | 91 | #define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100 |
92 | #define OPTION_PRODUCT_GTM380_MODEM 0x7201 | ||
92 | 93 | ||
93 | #define HUAWEI_VENDOR_ID 0x12D1 | 94 | #define HUAWEI_VENDOR_ID 0x12D1 |
94 | #define HUAWEI_PRODUCT_E600 0x1001 | 95 | #define HUAWEI_PRODUCT_E600 0x1001 |
@@ -197,6 +198,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
197 | /* OVATION PRODUCTS */ | 198 | /* OVATION PRODUCTS */ |
198 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | 199 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 |
199 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | 200 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 |
201 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | ||
200 | 202 | ||
201 | /* FUTURE NOVATEL PRODUCTS */ | 203 | /* FUTURE NOVATEL PRODUCTS */ |
202 | #define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000 | 204 | #define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000 |
@@ -288,15 +290,11 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
288 | 290 | ||
289 | /* ZTE PRODUCTS */ | 291 | /* ZTE PRODUCTS */ |
290 | #define ZTE_VENDOR_ID 0x19d2 | 292 | #define ZTE_VENDOR_ID 0x19d2 |
293 | #define ZTE_PRODUCT_MF622 0x0001 | ||
291 | #define ZTE_PRODUCT_MF628 0x0015 | 294 | #define ZTE_PRODUCT_MF628 0x0015 |
292 | #define ZTE_PRODUCT_MF626 0x0031 | 295 | #define ZTE_PRODUCT_MF626 0x0031 |
293 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe | 296 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe |
294 | 297 | ||
295 | /* Ericsson products */ | ||
296 | #define ERICSSON_VENDOR_ID 0x0bdb | ||
297 | #define ERICSSON_PRODUCT_F3507G_1 0x1900 | ||
298 | #define ERICSSON_PRODUCT_F3507G_2 0x1902 | ||
299 | |||
300 | #define BENQ_VENDOR_ID 0x04a5 | 298 | #define BENQ_VENDOR_ID 0x04a5 |
301 | #define BENQ_PRODUCT_H10 0x4068 | 299 | #define BENQ_PRODUCT_H10 0x4068 |
302 | 300 | ||
@@ -325,6 +323,7 @@ static struct usb_device_id option_ids[] = { | |||
325 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) }, | 323 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) }, |
326 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) }, | 324 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) }, |
327 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) }, | 325 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) }, |
326 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) }, | ||
328 | { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) }, | 327 | { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) }, |
329 | { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) }, | 328 | { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) }, |
330 | { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, | 329 | { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, |
@@ -415,6 +414,7 @@ static struct usb_device_id option_ids[] = { | |||
415 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ | 414 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ |
416 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | 415 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ |
417 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | 416 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ |
417 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ | ||
418 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */ | 418 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */ |
419 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ | 419 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ |
420 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ | 420 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ |
@@ -442,7 +442,6 @@ static struct usb_device_id option_ids[] = { | |||
442 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ | 442 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ |
443 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */ | 443 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */ |
444 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ | 444 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ |
445 | { USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */ | ||
446 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ | 445 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
447 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ | 446 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
448 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ | 447 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
@@ -510,11 +509,10 @@ static struct usb_device_id option_ids[] = { | |||
510 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 509 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
511 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 510 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
512 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, | 511 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
512 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, | ||
513 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, | 513 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, |
514 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | 514 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, |
515 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, | 515 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, |
516 | { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G_1) }, | ||
517 | { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G_2) }, | ||
518 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 516 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
519 | { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */ | 517 | { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */ |
520 | { } /* Terminating entry */ | 518 | { } /* Terminating entry */ |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 6f59c8e510ea..cfde74a6faa3 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -226,7 +226,7 @@ UNUSUAL_DEV( 0x0421, 0x047c, 0x0370, 0x0610, | |||
226 | US_FL_MAX_SECTORS_64 ), | 226 | US_FL_MAX_SECTORS_64 ), |
227 | 227 | ||
228 | /* Reported by Manuel Osdoba <manuel.osdoba@tu-ilmenau.de> */ | 228 | /* Reported by Manuel Osdoba <manuel.osdoba@tu-ilmenau.de> */ |
229 | UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x0452, | 229 | UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x9999, |
230 | "Nokia", | 230 | "Nokia", |
231 | "Nokia 6233", | 231 | "Nokia 6233", |
232 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 232 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
@@ -951,7 +951,9 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001, | |||
951 | US_FL_FIX_CAPACITY ), | 951 | US_FL_FIX_CAPACITY ), |
952 | 952 | ||
953 | /* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */ | 953 | /* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */ |
954 | UNUSUAL_DEV( 0x067b, 0x2507, 0x0100, 0x0100, | 954 | /* Change to bcdDeviceMin (0x0100 to 0x0001) reported by |
955 | * Thomas Bartosik <tbartdev@gmx-topmail.de> */ | ||
956 | UNUSUAL_DEV( 0x067b, 0x2507, 0x0001, 0x0100, | ||
955 | "Prolific Technology Inc.", | 957 | "Prolific Technology Inc.", |
956 | "Mass Storage Device", | 958 | "Mass Storage Device", |
957 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 959 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
@@ -1390,6 +1392,16 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000, | |||
1390 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1392 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1391 | 0 ), | 1393 | 0 ), |
1392 | 1394 | ||
1395 | /* Reported by Jan Dumon <j.dumon@option.com> | ||
1396 | * This device (wrongly) has a vendor-specific device descriptor. | ||
1397 | * The entry is needed so usb-storage can bind to it's mass-storage | ||
1398 | * interface as an interface driver */ | ||
1399 | UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000, | ||
1400 | "Option", | ||
1401 | "GI 0431 SD-Card", | ||
1402 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1403 | 0 ), | ||
1404 | |||
1393 | /* Reported by Ben Efros <ben@pc-doctor.com> */ | 1405 | /* Reported by Ben Efros <ben@pc-doctor.com> */ |
1394 | UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, | 1406 | UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, |
1395 | "Seagate", | 1407 | "Seagate", |
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 238a96aee3a1..613a5fc490d3 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -921,8 +921,10 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer) | |||
921 | result = -ENODEV; | 921 | result = -ENODEV; |
922 | /* FIXME: segmentation broken -- kills DWA */ | 922 | /* FIXME: segmentation broken -- kills DWA */ |
923 | mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ | 923 | mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ |
924 | if (urb->dev == NULL) | 924 | if (urb->dev == NULL) { |
925 | mutex_unlock(&wusbhc->mutex); | ||
925 | goto error_dev_gone; | 926 | goto error_dev_gone; |
927 | } | ||
926 | wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); | 928 | wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); |
927 | if (wusb_dev == NULL) { | 929 | if (wusb_dev == NULL) { |
928 | mutex_unlock(&wusbhc->mutex); | 930 | mutex_unlock(&wusbhc->mutex); |
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c index 81603f85e17e..c6d7cc76516f 100644 --- a/drivers/video/aty/radeon_pm.c +++ b/drivers/video/aty/radeon_pm.c | |||
@@ -2507,6 +2507,25 @@ static void radeon_reinitialize_QW(struct radeonfb_info *rinfo) | |||
2507 | 2507 | ||
2508 | #endif /* CONFIG_PPC_OF */ | 2508 | #endif /* CONFIG_PPC_OF */ |
2509 | 2509 | ||
2510 | static void radeonfb_whack_power_state(struct radeonfb_info *rinfo, pci_power_t state) | ||
2511 | { | ||
2512 | u16 pwr_cmd; | ||
2513 | |||
2514 | for (;;) { | ||
2515 | pci_read_config_word(rinfo->pdev, | ||
2516 | rinfo->pm_reg+PCI_PM_CTRL, | ||
2517 | &pwr_cmd); | ||
2518 | if (pwr_cmd & 2) | ||
2519 | break; | ||
2520 | pwr_cmd = (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2; | ||
2521 | pci_write_config_word(rinfo->pdev, | ||
2522 | rinfo->pm_reg+PCI_PM_CTRL, | ||
2523 | pwr_cmd); | ||
2524 | msleep(500); | ||
2525 | } | ||
2526 | rinfo->pdev->current_state = state; | ||
2527 | } | ||
2528 | |||
2510 | static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend) | 2529 | static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend) |
2511 | { | 2530 | { |
2512 | u32 tmp; | 2531 | u32 tmp; |
@@ -2558,6 +2577,11 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend) | |||
2558 | /* Switch PCI power management to D2. */ | 2577 | /* Switch PCI power management to D2. */ |
2559 | pci_disable_device(rinfo->pdev); | 2578 | pci_disable_device(rinfo->pdev); |
2560 | pci_save_state(rinfo->pdev); | 2579 | pci_save_state(rinfo->pdev); |
2580 | /* The chip seems to need us to whack the PM register | ||
2581 | * repeatedly until it sticks. We do that -prior- to | ||
2582 | * calling pci_set_power_state() | ||
2583 | */ | ||
2584 | radeonfb_whack_power_state(rinfo, PCI_D2); | ||
2561 | pci_set_power_state(rinfo->pdev, PCI_D2); | 2585 | pci_set_power_state(rinfo->pdev, PCI_D2); |
2562 | } else { | 2586 | } else { |
2563 | printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n", | 2587 | printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n", |
@@ -443,7 +443,7 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx) | |||
443 | req->private = NULL; | 443 | req->private = NULL; |
444 | req->ki_iovec = NULL; | 444 | req->ki_iovec = NULL; |
445 | INIT_LIST_HEAD(&req->ki_run_list); | 445 | INIT_LIST_HEAD(&req->ki_run_list); |
446 | req->ki_eventfd = ERR_PTR(-EINVAL); | 446 | req->ki_eventfd = NULL; |
447 | 447 | ||
448 | /* Check if the completion queue has enough free space to | 448 | /* Check if the completion queue has enough free space to |
449 | * accept an event from this io. | 449 | * accept an event from this io. |
@@ -485,8 +485,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
485 | { | 485 | { |
486 | assert_spin_locked(&ctx->ctx_lock); | 486 | assert_spin_locked(&ctx->ctx_lock); |
487 | 487 | ||
488 | if (!IS_ERR(req->ki_eventfd)) | ||
489 | fput(req->ki_eventfd); | ||
490 | if (req->ki_dtor) | 488 | if (req->ki_dtor) |
491 | req->ki_dtor(req); | 489 | req->ki_dtor(req); |
492 | if (req->ki_iovec != &req->ki_inline_vec) | 490 | if (req->ki_iovec != &req->ki_inline_vec) |
@@ -508,8 +506,11 @@ static void aio_fput_routine(struct work_struct *data) | |||
508 | list_del(&req->ki_list); | 506 | list_del(&req->ki_list); |
509 | spin_unlock_irq(&fput_lock); | 507 | spin_unlock_irq(&fput_lock); |
510 | 508 | ||
511 | /* Complete the fput */ | 509 | /* Complete the fput(s) */ |
512 | __fput(req->ki_filp); | 510 | if (req->ki_filp != NULL) |
511 | __fput(req->ki_filp); | ||
512 | if (req->ki_eventfd != NULL) | ||
513 | __fput(req->ki_eventfd); | ||
513 | 514 | ||
514 | /* Link the iocb into the context's free list */ | 515 | /* Link the iocb into the context's free list */ |
515 | spin_lock_irq(&ctx->ctx_lock); | 516 | spin_lock_irq(&ctx->ctx_lock); |
@@ -527,12 +528,14 @@ static void aio_fput_routine(struct work_struct *data) | |||
527 | */ | 528 | */ |
528 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | 529 | static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) |
529 | { | 530 | { |
531 | int schedule_putreq = 0; | ||
532 | |||
530 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", | 533 | dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", |
531 | req, atomic_long_read(&req->ki_filp->f_count)); | 534 | req, atomic_long_read(&req->ki_filp->f_count)); |
532 | 535 | ||
533 | assert_spin_locked(&ctx->ctx_lock); | 536 | assert_spin_locked(&ctx->ctx_lock); |
534 | 537 | ||
535 | req->ki_users --; | 538 | req->ki_users--; |
536 | BUG_ON(req->ki_users < 0); | 539 | BUG_ON(req->ki_users < 0); |
537 | if (likely(req->ki_users)) | 540 | if (likely(req->ki_users)) |
538 | return 0; | 541 | return 0; |
@@ -540,10 +543,23 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
540 | req->ki_cancel = NULL; | 543 | req->ki_cancel = NULL; |
541 | req->ki_retry = NULL; | 544 | req->ki_retry = NULL; |
542 | 545 | ||
543 | /* Must be done under the lock to serialise against cancellation. | 546 | /* |
544 | * Call this aio_fput as it duplicates fput via the fput_work. | 547 | * Try to optimize the aio and eventfd file* puts, by avoiding to |
548 | * schedule work in case it is not __fput() time. In normal cases, | ||
549 | * we would not be holding the last reference to the file*, so | ||
550 | * this function will be executed w/out any aio kthread wakeup. | ||
545 | */ | 551 | */ |
546 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { | 552 | if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) |
553 | schedule_putreq++; | ||
554 | else | ||
555 | req->ki_filp = NULL; | ||
556 | if (req->ki_eventfd != NULL) { | ||
557 | if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count))) | ||
558 | schedule_putreq++; | ||
559 | else | ||
560 | req->ki_eventfd = NULL; | ||
561 | } | ||
562 | if (unlikely(schedule_putreq)) { | ||
547 | get_ioctx(ctx); | 563 | get_ioctx(ctx); |
548 | spin_lock(&fput_lock); | 564 | spin_lock(&fput_lock); |
549 | list_add(&req->ki_list, &fput_head); | 565 | list_add(&req->ki_list, &fput_head); |
@@ -571,7 +587,7 @@ int aio_put_req(struct kiocb *req) | |||
571 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) | 587 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
572 | { | 588 | { |
573 | struct mm_struct *mm = current->mm; | 589 | struct mm_struct *mm = current->mm; |
574 | struct kioctx *ctx = NULL; | 590 | struct kioctx *ctx, *ret = NULL; |
575 | struct hlist_node *n; | 591 | struct hlist_node *n; |
576 | 592 | ||
577 | rcu_read_lock(); | 593 | rcu_read_lock(); |
@@ -579,12 +595,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
579 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { | 595 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { |
580 | if (ctx->user_id == ctx_id && !ctx->dead) { | 596 | if (ctx->user_id == ctx_id && !ctx->dead) { |
581 | get_ioctx(ctx); | 597 | get_ioctx(ctx); |
598 | ret = ctx; | ||
582 | break; | 599 | break; |
583 | } | 600 | } |
584 | } | 601 | } |
585 | 602 | ||
586 | rcu_read_unlock(); | 603 | rcu_read_unlock(); |
587 | return ctx; | 604 | return ret; |
588 | } | 605 | } |
589 | 606 | ||
590 | /* | 607 | /* |
@@ -1009,7 +1026,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2) | |||
1009 | * eventfd. The eventfd_signal() function is safe to be called | 1026 | * eventfd. The eventfd_signal() function is safe to be called |
1010 | * from IRQ context. | 1027 | * from IRQ context. |
1011 | */ | 1028 | */ |
1012 | if (!IS_ERR(iocb->ki_eventfd)) | 1029 | if (iocb->ki_eventfd != NULL) |
1013 | eventfd_signal(iocb->ki_eventfd, 1); | 1030 | eventfd_signal(iocb->ki_eventfd, 1); |
1014 | 1031 | ||
1015 | put_rq: | 1032 | put_rq: |
@@ -1608,6 +1625,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1608 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); | 1625 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); |
1609 | if (IS_ERR(req->ki_eventfd)) { | 1626 | if (IS_ERR(req->ki_eventfd)) { |
1610 | ret = PTR_ERR(req->ki_eventfd); | 1627 | ret = PTR_ERR(req->ki_eventfd); |
1628 | req->ki_eventfd = NULL; | ||
1611 | goto out_put_req; | 1629 | goto out_put_req; |
1612 | } | 1630 | } |
1613 | } | 1631 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 82491ba8fa40..5e1d4e30e9d8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -784,7 +784,14 @@ struct btrfs_fs_info { | |||
784 | struct list_head dirty_cowonly_roots; | 784 | struct list_head dirty_cowonly_roots; |
785 | 785 | ||
786 | struct btrfs_fs_devices *fs_devices; | 786 | struct btrfs_fs_devices *fs_devices; |
787 | |||
788 | /* | ||
789 | * the space_info list is almost entirely read only. It only changes | ||
790 | * when we add a new raid type to the FS, and that happens | ||
791 | * very rarely. RCU is used to protect it. | ||
792 | */ | ||
787 | struct list_head space_info; | 793 | struct list_head space_info; |
794 | |||
788 | spinlock_t delalloc_lock; | 795 | spinlock_t delalloc_lock; |
789 | spinlock_t new_trans_lock; | 796 | spinlock_t new_trans_lock; |
790 | u64 delalloc_bytes; | 797 | u64 delalloc_bytes; |
@@ -1797,6 +1804,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root); | |||
1797 | int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); | 1804 | int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); |
1798 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); | 1805 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); |
1799 | void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); | 1806 | void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); |
1807 | void btrfs_clear_space_info_full(struct btrfs_fs_info *info); | ||
1808 | |||
1800 | int btrfs_check_metadata_free_space(struct btrfs_root *root); | 1809 | int btrfs_check_metadata_free_space(struct btrfs_root *root); |
1801 | int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, | 1810 | int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, |
1802 | u64 bytes); | 1811 | u64 bytes); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9abf81f71c46..fefe83ad2059 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
21 | #include <linux/blkdev.h> | 21 | #include <linux/blkdev.h> |
22 | #include <linux/sort.h> | 22 | #include <linux/sort.h> |
23 | #include <linux/rcupdate.h> | ||
23 | #include "compat.h" | 24 | #include "compat.h" |
24 | #include "hash.h" | 25 | #include "hash.h" |
25 | #include "crc32c.h" | 26 | #include "crc32c.h" |
@@ -330,13 +331,33 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, | |||
330 | { | 331 | { |
331 | struct list_head *head = &info->space_info; | 332 | struct list_head *head = &info->space_info; |
332 | struct btrfs_space_info *found; | 333 | struct btrfs_space_info *found; |
333 | list_for_each_entry(found, head, list) { | 334 | |
334 | if (found->flags == flags) | 335 | rcu_read_lock(); |
336 | list_for_each_entry_rcu(found, head, list) { | ||
337 | if (found->flags == flags) { | ||
338 | rcu_read_unlock(); | ||
335 | return found; | 339 | return found; |
340 | } | ||
336 | } | 341 | } |
342 | rcu_read_unlock(); | ||
337 | return NULL; | 343 | return NULL; |
338 | } | 344 | } |
339 | 345 | ||
346 | /* | ||
347 | * after adding space to the filesystem, we need to clear the full flags | ||
348 | * on all the space infos. | ||
349 | */ | ||
350 | void btrfs_clear_space_info_full(struct btrfs_fs_info *info) | ||
351 | { | ||
352 | struct list_head *head = &info->space_info; | ||
353 | struct btrfs_space_info *found; | ||
354 | |||
355 | rcu_read_lock(); | ||
356 | list_for_each_entry_rcu(found, head, list) | ||
357 | found->full = 0; | ||
358 | rcu_read_unlock(); | ||
359 | } | ||
360 | |||
340 | static u64 div_factor(u64 num, int factor) | 361 | static u64 div_factor(u64 num, int factor) |
341 | { | 362 | { |
342 | if (factor == 10) | 363 | if (factor == 10) |
@@ -1903,7 +1924,6 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
1903 | if (!found) | 1924 | if (!found) |
1904 | return -ENOMEM; | 1925 | return -ENOMEM; |
1905 | 1926 | ||
1906 | list_add(&found->list, &info->space_info); | ||
1907 | INIT_LIST_HEAD(&found->block_groups); | 1927 | INIT_LIST_HEAD(&found->block_groups); |
1908 | init_rwsem(&found->groups_sem); | 1928 | init_rwsem(&found->groups_sem); |
1909 | spin_lock_init(&found->lock); | 1929 | spin_lock_init(&found->lock); |
@@ -1917,6 +1937,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
1917 | found->full = 0; | 1937 | found->full = 0; |
1918 | found->force_alloc = 0; | 1938 | found->force_alloc = 0; |
1919 | *space_info = found; | 1939 | *space_info = found; |
1940 | list_add_rcu(&found->list, &info->space_info); | ||
1920 | return 0; | 1941 | return 0; |
1921 | } | 1942 | } |
1922 | 1943 | ||
@@ -6320,6 +6341,7 @@ out: | |||
6320 | int btrfs_free_block_groups(struct btrfs_fs_info *info) | 6341 | int btrfs_free_block_groups(struct btrfs_fs_info *info) |
6321 | { | 6342 | { |
6322 | struct btrfs_block_group_cache *block_group; | 6343 | struct btrfs_block_group_cache *block_group; |
6344 | struct btrfs_space_info *space_info; | ||
6323 | struct rb_node *n; | 6345 | struct rb_node *n; |
6324 | 6346 | ||
6325 | spin_lock(&info->block_group_cache_lock); | 6347 | spin_lock(&info->block_group_cache_lock); |
@@ -6341,6 +6363,23 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
6341 | spin_lock(&info->block_group_cache_lock); | 6363 | spin_lock(&info->block_group_cache_lock); |
6342 | } | 6364 | } |
6343 | spin_unlock(&info->block_group_cache_lock); | 6365 | spin_unlock(&info->block_group_cache_lock); |
6366 | |||
6367 | /* now that all the block groups are freed, go through and | ||
6368 | * free all the space_info structs. This is only called during | ||
6369 | * the final stages of unmount, and so we know nobody is | ||
6370 | * using them. We call synchronize_rcu() once before we start, | ||
6371 | * just to be on the safe side. | ||
6372 | */ | ||
6373 | synchronize_rcu(); | ||
6374 | |||
6375 | while(!list_empty(&info->space_info)) { | ||
6376 | space_info = list_entry(info->space_info.next, | ||
6377 | struct btrfs_space_info, | ||
6378 | list); | ||
6379 | |||
6380 | list_del(&space_info->list); | ||
6381 | kfree(space_info); | ||
6382 | } | ||
6344 | return 0; | 6383 | return 0; |
6345 | } | 6384 | } |
6346 | 6385 | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 1316139bf9e8..dd06e18e5aac 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1374,6 +1374,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1374 | ret = btrfs_add_device(trans, root, device); | 1374 | ret = btrfs_add_device(trans, root, device); |
1375 | } | 1375 | } |
1376 | 1376 | ||
1377 | /* | ||
1378 | * we've got more storage, clear any full flags on the space | ||
1379 | * infos | ||
1380 | */ | ||
1381 | btrfs_clear_space_info_full(root->fs_info); | ||
1382 | |||
1377 | unlock_chunks(root); | 1383 | unlock_chunks(root); |
1378 | btrfs_commit_transaction(trans, root); | 1384 | btrfs_commit_transaction(trans, root); |
1379 | 1385 | ||
@@ -1459,6 +1465,8 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans, | |||
1459 | device->fs_devices->total_rw_bytes += diff; | 1465 | device->fs_devices->total_rw_bytes += diff; |
1460 | 1466 | ||
1461 | device->total_bytes = new_size; | 1467 | device->total_bytes = new_size; |
1468 | btrfs_clear_space_info_full(device->dev_root->fs_info); | ||
1469 | |||
1462 | return btrfs_update_device(trans, device); | 1470 | return btrfs_update_device(trans, device); |
1463 | } | 1471 | } |
1464 | 1472 | ||
diff --git a/fs/buffer.c b/fs/buffer.c index 9f697419ed8e..891e1c78e4f1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -760,15 +760,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); | |||
760 | * If warn is true, then emit a warning if the page is not uptodate and has | 760 | * If warn is true, then emit a warning if the page is not uptodate and has |
761 | * not been truncated. | 761 | * not been truncated. |
762 | */ | 762 | */ |
763 | static int __set_page_dirty(struct page *page, | 763 | static void __set_page_dirty(struct page *page, |
764 | struct address_space *mapping, int warn) | 764 | struct address_space *mapping, int warn) |
765 | { | 765 | { |
766 | if (unlikely(!mapping)) | ||
767 | return !TestSetPageDirty(page); | ||
768 | |||
769 | if (TestSetPageDirty(page)) | ||
770 | return 0; | ||
771 | |||
772 | spin_lock_irq(&mapping->tree_lock); | 766 | spin_lock_irq(&mapping->tree_lock); |
773 | if (page->mapping) { /* Race with truncate? */ | 767 | if (page->mapping) { /* Race with truncate? */ |
774 | WARN_ON_ONCE(warn && !PageUptodate(page)); | 768 | WARN_ON_ONCE(warn && !PageUptodate(page)); |
@@ -785,8 +779,6 @@ static int __set_page_dirty(struct page *page, | |||
785 | } | 779 | } |
786 | spin_unlock_irq(&mapping->tree_lock); | 780 | spin_unlock_irq(&mapping->tree_lock); |
787 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 781 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
788 | |||
789 | return 1; | ||
790 | } | 782 | } |
791 | 783 | ||
792 | /* | 784 | /* |
@@ -816,6 +808,7 @@ static int __set_page_dirty(struct page *page, | |||
816 | */ | 808 | */ |
817 | int __set_page_dirty_buffers(struct page *page) | 809 | int __set_page_dirty_buffers(struct page *page) |
818 | { | 810 | { |
811 | int newly_dirty; | ||
819 | struct address_space *mapping = page_mapping(page); | 812 | struct address_space *mapping = page_mapping(page); |
820 | 813 | ||
821 | if (unlikely(!mapping)) | 814 | if (unlikely(!mapping)) |
@@ -831,9 +824,12 @@ int __set_page_dirty_buffers(struct page *page) | |||
831 | bh = bh->b_this_page; | 824 | bh = bh->b_this_page; |
832 | } while (bh != head); | 825 | } while (bh != head); |
833 | } | 826 | } |
827 | newly_dirty = !TestSetPageDirty(page); | ||
834 | spin_unlock(&mapping->private_lock); | 828 | spin_unlock(&mapping->private_lock); |
835 | 829 | ||
836 | return __set_page_dirty(page, mapping, 1); | 830 | if (newly_dirty) |
831 | __set_page_dirty(page, mapping, 1); | ||
832 | return newly_dirty; | ||
837 | } | 833 | } |
838 | EXPORT_SYMBOL(__set_page_dirty_buffers); | 834 | EXPORT_SYMBOL(__set_page_dirty_buffers); |
839 | 835 | ||
@@ -1262,8 +1258,11 @@ void mark_buffer_dirty(struct buffer_head *bh) | |||
1262 | return; | 1258 | return; |
1263 | } | 1259 | } |
1264 | 1260 | ||
1265 | if (!test_set_buffer_dirty(bh)) | 1261 | if (!test_set_buffer_dirty(bh)) { |
1266 | __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); | 1262 | struct page *page = bh->b_page; |
1263 | if (!TestSetPageDirty(page)) | ||
1264 | __set_page_dirty(page, page_mapping(page), 0); | ||
1265 | } | ||
1267 | } | 1266 | } |
1268 | 1267 | ||
1269 | /* | 1268 | /* |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index bdca1f4b3a3e..8b65f289ee00 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -1324,14 +1324,13 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, | |||
1324 | } | 1324 | } |
1325 | 1325 | ||
1326 | static int | 1326 | static int |
1327 | ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat, | 1327 | ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry, |
1328 | struct dentry *ecryptfs_dentry, | 1328 | char *virt, size_t virt_len) |
1329 | char *virt) | ||
1330 | { | 1329 | { |
1331 | int rc; | 1330 | int rc; |
1332 | 1331 | ||
1333 | rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt, | 1332 | rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt, |
1334 | 0, crypt_stat->num_header_bytes_at_front); | 1333 | 0, virt_len); |
1335 | if (rc) | 1334 | if (rc) |
1336 | printk(KERN_ERR "%s: Error attempting to write header " | 1335 | printk(KERN_ERR "%s: Error attempting to write header " |
1337 | "information to lower file; rc = [%d]\n", __func__, | 1336 | "information to lower file; rc = [%d]\n", __func__, |
@@ -1341,7 +1340,6 @@ ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat, | |||
1341 | 1340 | ||
1342 | static int | 1341 | static int |
1343 | ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry, | 1342 | ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry, |
1344 | struct ecryptfs_crypt_stat *crypt_stat, | ||
1345 | char *page_virt, size_t size) | 1343 | char *page_virt, size_t size) |
1346 | { | 1344 | { |
1347 | int rc; | 1345 | int rc; |
@@ -1351,6 +1349,17 @@ ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry, | |||
1351 | return rc; | 1349 | return rc; |
1352 | } | 1350 | } |
1353 | 1351 | ||
1352 | static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, | ||
1353 | unsigned int order) | ||
1354 | { | ||
1355 | struct page *page; | ||
1356 | |||
1357 | page = alloc_pages(gfp_mask | __GFP_ZERO, order); | ||
1358 | if (page) | ||
1359 | return (unsigned long) page_address(page); | ||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1354 | /** | 1363 | /** |
1355 | * ecryptfs_write_metadata | 1364 | * ecryptfs_write_metadata |
1356 | * @ecryptfs_dentry: The eCryptfs dentry | 1365 | * @ecryptfs_dentry: The eCryptfs dentry |
@@ -1367,7 +1376,9 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1367 | { | 1376 | { |
1368 | struct ecryptfs_crypt_stat *crypt_stat = | 1377 | struct ecryptfs_crypt_stat *crypt_stat = |
1369 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 1378 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; |
1379 | unsigned int order; | ||
1370 | char *virt; | 1380 | char *virt; |
1381 | size_t virt_len; | ||
1371 | size_t size = 0; | 1382 | size_t size = 0; |
1372 | int rc = 0; | 1383 | int rc = 0; |
1373 | 1384 | ||
@@ -1383,33 +1394,35 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1383 | rc = -EINVAL; | 1394 | rc = -EINVAL; |
1384 | goto out; | 1395 | goto out; |
1385 | } | 1396 | } |
1397 | virt_len = crypt_stat->num_header_bytes_at_front; | ||
1398 | order = get_order(virt_len); | ||
1386 | /* Released in this function */ | 1399 | /* Released in this function */ |
1387 | virt = (char *)get_zeroed_page(GFP_KERNEL); | 1400 | virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); |
1388 | if (!virt) { | 1401 | if (!virt) { |
1389 | printk(KERN_ERR "%s: Out of memory\n", __func__); | 1402 | printk(KERN_ERR "%s: Out of memory\n", __func__); |
1390 | rc = -ENOMEM; | 1403 | rc = -ENOMEM; |
1391 | goto out; | 1404 | goto out; |
1392 | } | 1405 | } |
1393 | rc = ecryptfs_write_headers_virt(virt, PAGE_CACHE_SIZE, &size, | 1406 | rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat, |
1394 | crypt_stat, ecryptfs_dentry); | 1407 | ecryptfs_dentry); |
1395 | if (unlikely(rc)) { | 1408 | if (unlikely(rc)) { |
1396 | printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n", | 1409 | printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n", |
1397 | __func__, rc); | 1410 | __func__, rc); |
1398 | goto out_free; | 1411 | goto out_free; |
1399 | } | 1412 | } |
1400 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 1413 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
1401 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, | 1414 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, |
1402 | crypt_stat, virt, size); | 1415 | size); |
1403 | else | 1416 | else |
1404 | rc = ecryptfs_write_metadata_to_contents(crypt_stat, | 1417 | rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt, |
1405 | ecryptfs_dentry, virt); | 1418 | virt_len); |
1406 | if (rc) { | 1419 | if (rc) { |
1407 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " | 1420 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " |
1408 | "rc = [%d]\n", __func__, rc); | 1421 | "rc = [%d]\n", __func__, rc); |
1409 | goto out_free; | 1422 | goto out_free; |
1410 | } | 1423 | } |
1411 | out_free: | 1424 | out_free: |
1412 | free_page((unsigned long)virt); | 1425 | free_pages((unsigned long)virt, order); |
1413 | out: | 1426 | out: |
1414 | return rc; | 1427 | return rc; |
1415 | } | 1428 | } |
@@ -2208,17 +2221,19 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, | |||
2208 | struct dentry *ecryptfs_dir_dentry, | 2221 | struct dentry *ecryptfs_dir_dentry, |
2209 | const char *name, size_t name_size) | 2222 | const char *name, size_t name_size) |
2210 | { | 2223 | { |
2224 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = | ||
2225 | &ecryptfs_superblock_to_private( | ||
2226 | ecryptfs_dir_dentry->d_sb)->mount_crypt_stat; | ||
2211 | char *decoded_name; | 2227 | char *decoded_name; |
2212 | size_t decoded_name_size; | 2228 | size_t decoded_name_size; |
2213 | size_t packet_size; | 2229 | size_t packet_size; |
2214 | int rc = 0; | 2230 | int rc = 0; |
2215 | 2231 | ||
2216 | if ((name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) | 2232 | if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) |
2233 | && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) | ||
2234 | && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) | ||
2217 | && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, | 2235 | && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, |
2218 | ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { | 2236 | ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { |
2219 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = | ||
2220 | &ecryptfs_superblock_to_private( | ||
2221 | ecryptfs_dir_dentry->d_sb)->mount_crypt_stat; | ||
2222 | const char *orig_name = name; | 2237 | const char *orig_name = name; |
2223 | size_t orig_name_size = name_size; | 2238 | size_t orig_name_size = name_size; |
2224 | 2239 | ||
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index eb2267eca1fe..ac749d4d644f 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -620,7 +620,6 @@ int ecryptfs_interpose(struct dentry *hidden_dentry, | |||
620 | u32 flags); | 620 | u32 flags); |
621 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | 621 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, |
622 | struct dentry *lower_dentry, | 622 | struct dentry *lower_dentry, |
623 | struct ecryptfs_crypt_stat *crypt_stat, | ||
624 | struct inode *ecryptfs_dir_inode, | 623 | struct inode *ecryptfs_dir_inode, |
625 | struct nameidata *ecryptfs_nd); | 624 | struct nameidata *ecryptfs_nd); |
626 | int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, | 625 | int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 5697899a168d..55b3145b8072 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -246,7 +246,6 @@ out: | |||
246 | */ | 246 | */ |
247 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | 247 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, |
248 | struct dentry *lower_dentry, | 248 | struct dentry *lower_dentry, |
249 | struct ecryptfs_crypt_stat *crypt_stat, | ||
250 | struct inode *ecryptfs_dir_inode, | 249 | struct inode *ecryptfs_dir_inode, |
251 | struct nameidata *ecryptfs_nd) | 250 | struct nameidata *ecryptfs_nd) |
252 | { | 251 | { |
@@ -254,6 +253,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
254 | struct vfsmount *lower_mnt; | 253 | struct vfsmount *lower_mnt; |
255 | struct inode *lower_inode; | 254 | struct inode *lower_inode; |
256 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; | 255 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat; |
256 | struct ecryptfs_crypt_stat *crypt_stat; | ||
257 | char *page_virt = NULL; | 257 | char *page_virt = NULL; |
258 | u64 file_size; | 258 | u64 file_size; |
259 | int rc = 0; | 259 | int rc = 0; |
@@ -314,6 +314,11 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
314 | goto out_free_kmem; | 314 | goto out_free_kmem; |
315 | } | 315 | } |
316 | } | 316 | } |
317 | crypt_stat = &ecryptfs_inode_to_private( | ||
318 | ecryptfs_dentry->d_inode)->crypt_stat; | ||
319 | /* TODO: lock for crypt_stat comparison */ | ||
320 | if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) | ||
321 | ecryptfs_set_default_sizes(crypt_stat); | ||
317 | rc = ecryptfs_read_and_validate_header_region(page_virt, | 322 | rc = ecryptfs_read_and_validate_header_region(page_virt, |
318 | ecryptfs_dentry->d_inode); | 323 | ecryptfs_dentry->d_inode); |
319 | if (rc) { | 324 | if (rc) { |
@@ -362,9 +367,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
362 | { | 367 | { |
363 | char *encrypted_and_encoded_name = NULL; | 368 | char *encrypted_and_encoded_name = NULL; |
364 | size_t encrypted_and_encoded_name_size; | 369 | size_t encrypted_and_encoded_name_size; |
365 | struct ecryptfs_crypt_stat *crypt_stat = NULL; | ||
366 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; | 370 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; |
367 | struct ecryptfs_inode_info *inode_info; | ||
368 | struct dentry *lower_dir_dentry, *lower_dentry; | 371 | struct dentry *lower_dir_dentry, *lower_dentry; |
369 | int rc = 0; | 372 | int rc = 0; |
370 | 373 | ||
@@ -388,26 +391,15 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
388 | } | 391 | } |
389 | if (lower_dentry->d_inode) | 392 | if (lower_dentry->d_inode) |
390 | goto lookup_and_interpose; | 393 | goto lookup_and_interpose; |
391 | inode_info = ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); | 394 | mount_crypt_stat = &ecryptfs_superblock_to_private( |
392 | if (inode_info) { | 395 | ecryptfs_dentry->d_sb)->mount_crypt_stat; |
393 | crypt_stat = &inode_info->crypt_stat; | 396 | if (!(mount_crypt_stat |
394 | /* TODO: lock for crypt_stat comparison */ | 397 | && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) |
395 | if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) | ||
396 | ecryptfs_set_default_sizes(crypt_stat); | ||
397 | } | ||
398 | if (crypt_stat) | ||
399 | mount_crypt_stat = crypt_stat->mount_crypt_stat; | ||
400 | else | ||
401 | mount_crypt_stat = &ecryptfs_superblock_to_private( | ||
402 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | ||
403 | if (!(crypt_stat && (crypt_stat->flags & ECRYPTFS_ENCRYPT_FILENAMES)) | ||
404 | && !(mount_crypt_stat && (mount_crypt_stat->flags | ||
405 | & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) | ||
406 | goto lookup_and_interpose; | 398 | goto lookup_and_interpose; |
407 | dput(lower_dentry); | 399 | dput(lower_dentry); |
408 | rc = ecryptfs_encrypt_and_encode_filename( | 400 | rc = ecryptfs_encrypt_and_encode_filename( |
409 | &encrypted_and_encoded_name, &encrypted_and_encoded_name_size, | 401 | &encrypted_and_encoded_name, &encrypted_and_encoded_name_size, |
410 | crypt_stat, mount_crypt_stat, ecryptfs_dentry->d_name.name, | 402 | NULL, mount_crypt_stat, ecryptfs_dentry->d_name.name, |
411 | ecryptfs_dentry->d_name.len); | 403 | ecryptfs_dentry->d_name.len); |
412 | if (rc) { | 404 | if (rc) { |
413 | printk(KERN_ERR "%s: Error attempting to encrypt and encode " | 405 | printk(KERN_ERR "%s: Error attempting to encrypt and encode " |
@@ -426,7 +418,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
426 | } | 418 | } |
427 | lookup_and_interpose: | 419 | lookup_and_interpose: |
428 | rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, | 420 | rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, |
429 | crypt_stat, ecryptfs_dir_inode, | 421 | ecryptfs_dir_inode, |
430 | ecryptfs_nd); | 422 | ecryptfs_nd); |
431 | goto out; | 423 | goto out; |
432 | out_d_drop: | 424 | out_d_drop: |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e2eab196875f..e0aa4fe4f596 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -1122,7 +1122,8 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, | |||
1122 | struct ext4_extent_idx *ix; | 1122 | struct ext4_extent_idx *ix; |
1123 | struct ext4_extent *ex; | 1123 | struct ext4_extent *ex; |
1124 | ext4_fsblk_t block; | 1124 | ext4_fsblk_t block; |
1125 | int depth, ee_len; | 1125 | int depth; /* Note, NOT eh_depth; depth from top of tree */ |
1126 | int ee_len; | ||
1126 | 1127 | ||
1127 | BUG_ON(path == NULL); | 1128 | BUG_ON(path == NULL); |
1128 | depth = path->p_depth; | 1129 | depth = path->p_depth; |
@@ -1179,7 +1180,8 @@ got_index: | |||
1179 | if (bh == NULL) | 1180 | if (bh == NULL) |
1180 | return -EIO; | 1181 | return -EIO; |
1181 | eh = ext_block_hdr(bh); | 1182 | eh = ext_block_hdr(bh); |
1182 | if (ext4_ext_check_header(inode, eh, depth)) { | 1183 | /* subtract from p_depth to get proper eh_depth */ |
1184 | if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) { | ||
1183 | put_bh(bh); | 1185 | put_bh(bh); |
1184 | return -EIO; | 1186 | return -EIO; |
1185 | } | 1187 | } |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 627f8c3337a3..2d2b3585ee91 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -698,6 +698,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode) | |||
698 | struct inode *ret; | 698 | struct inode *ret; |
699 | ext4_group_t i; | 699 | ext4_group_t i; |
700 | int free = 0; | 700 | int free = 0; |
701 | static int once = 1; | ||
701 | ext4_group_t flex_group; | 702 | ext4_group_t flex_group; |
702 | 703 | ||
703 | /* Cannot create files in a deleted directory */ | 704 | /* Cannot create files in a deleted directory */ |
@@ -719,7 +720,8 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode) | |||
719 | ret2 = find_group_flex(sb, dir, &group); | 720 | ret2 = find_group_flex(sb, dir, &group); |
720 | if (ret2 == -1) { | 721 | if (ret2 == -1) { |
721 | ret2 = find_group_other(sb, dir, &group); | 722 | ret2 = find_group_other(sb, dir, &group); |
722 | if (ret2 == 0 && printk_ratelimit()) | 723 | if (ret2 == 0 && once) |
724 | once = 0; | ||
723 | printk(KERN_NOTICE "ext4: find_group_flex " | 725 | printk(KERN_NOTICE "ext4: find_group_flex " |
724 | "failed, fallback succeeded dir %lu\n", | 726 | "failed, fallback succeeded dir %lu\n", |
725 | dir->i_ino); | 727 | dir->i_ino); |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4415beeb0b62..9f61e62f435f 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -1447,7 +1447,7 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, | |||
1447 | struct ext4_free_extent *gex = &ac->ac_g_ex; | 1447 | struct ext4_free_extent *gex = &ac->ac_g_ex; |
1448 | 1448 | ||
1449 | BUG_ON(ex->fe_len <= 0); | 1449 | BUG_ON(ex->fe_len <= 0); |
1450 | BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); | 1450 | BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); |
1451 | BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); | 1451 | BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); |
1452 | BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); | 1452 | BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); |
1453 | 1453 | ||
@@ -3292,7 +3292,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, | |||
3292 | } | 3292 | } |
3293 | BUG_ON(start + size <= ac->ac_o_ex.fe_logical && | 3293 | BUG_ON(start + size <= ac->ac_o_ex.fe_logical && |
3294 | start > ac->ac_o_ex.fe_logical); | 3294 | start > ac->ac_o_ex.fe_logical); |
3295 | BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); | 3295 | BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); |
3296 | 3296 | ||
3297 | /* now prepare goal request */ | 3297 | /* now prepare goal request */ |
3298 | 3298 | ||
@@ -3589,6 +3589,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, | |||
3589 | struct super_block *sb, struct ext4_prealloc_space *pa) | 3589 | struct super_block *sb, struct ext4_prealloc_space *pa) |
3590 | { | 3590 | { |
3591 | ext4_group_t grp; | 3591 | ext4_group_t grp; |
3592 | ext4_fsblk_t grp_blk; | ||
3592 | 3593 | ||
3593 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) | 3594 | if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) |
3594 | return; | 3595 | return; |
@@ -3603,8 +3604,12 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, | |||
3603 | pa->pa_deleted = 1; | 3604 | pa->pa_deleted = 1; |
3604 | spin_unlock(&pa->pa_lock); | 3605 | spin_unlock(&pa->pa_lock); |
3605 | 3606 | ||
3606 | /* -1 is to protect from crossing allocation group */ | 3607 | grp_blk = pa->pa_pstart; |
3607 | ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL); | 3608 | /* If linear, pa_pstart may be in the next group when pa is used up */ |
3609 | if (pa->pa_linear) | ||
3610 | grp_blk--; | ||
3611 | |||
3612 | ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL); | ||
3608 | 3613 | ||
3609 | /* | 3614 | /* |
3610 | * possible race: | 3615 | * possible race: |
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index d1d1eb84679d..618865b3128b 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * | 5 | * |
6 | * Copyright (C) 1996 Gertjan van Wingerde (gertjan@cs.vu.nl) | 6 | * Copyright (C) 1996 Gertjan van Wingerde |
7 | * Minix V2 fs support. | 7 | * Minix V2 fs support. |
8 | * | 8 | * |
9 | * Modified for 680x0 by Andreas Schwab | 9 | * Modified for 680x0 by Andreas Schwab |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index f65953be39c0..9250067943d8 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -2596,6 +2596,7 @@ static nfsd4_enc nfsd4_enc_ops[] = { | |||
2596 | [OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop, | 2596 | [OP_LOOKUPP] = (nfsd4_enc)nfsd4_encode_noop, |
2597 | [OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop, | 2597 | [OP_NVERIFY] = (nfsd4_enc)nfsd4_encode_noop, |
2598 | [OP_OPEN] = (nfsd4_enc)nfsd4_encode_open, | 2598 | [OP_OPEN] = (nfsd4_enc)nfsd4_encode_open, |
2599 | [OP_OPENATTR] = (nfsd4_enc)nfsd4_encode_noop, | ||
2599 | [OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm, | 2600 | [OP_OPEN_CONFIRM] = (nfsd4_enc)nfsd4_encode_open_confirm, |
2600 | [OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade, | 2601 | [OP_OPEN_DOWNGRADE] = (nfsd4_enc)nfsd4_encode_open_downgrade, |
2601 | [OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop, | 2602 | [OP_PUTFH] = (nfsd4_enc)nfsd4_encode_noop, |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index e65212dfb60e..261a1c2f22dd 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -41,7 +41,7 @@ | |||
41 | * Stefan Reinauer <stepan@home.culture.mipt.ru> | 41 | * Stefan Reinauer <stepan@home.culture.mipt.ru> |
42 | * | 42 | * |
43 | * Module usage counts added on 96/04/29 by | 43 | * Module usage counts added on 96/04/29 by |
44 | * Gertjan van Wingerde <gertjan@cs.vu.nl> | 44 | * Gertjan van Wingerde <gwingerde@gmail.com> |
45 | * | 45 | * |
46 | * Clean swab support on 19970406 by | 46 | * Clean swab support on 19970406 by |
47 | * Francois-Rene Rideau <fare@tunes.org> | 47 | * Francois-Rene Rideau <fare@tunes.org> |
diff --git a/include/linux/capability.h b/include/linux/capability.h index 1b9872556131..4864a43b2b45 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -393,8 +393,10 @@ struct cpu_vfs_cap_data { | |||
393 | # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) | 393 | # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) |
394 | # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}) | 394 | # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}) |
395 | # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }) | 395 | # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }) |
396 | # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \ | 396 | # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ |
397 | CAP_FS_MASK_B1 } }) | 397 | | CAP_TO_MASK(CAP_SYS_RESOURCE) \ |
398 | | CAP_TO_MASK(CAP_MKNOD), \ | ||
399 | CAP_FS_MASK_B1 } }) | ||
398 | 400 | ||
399 | #endif /* _KERNEL_CAPABILITY_U32S != 2 */ | 401 | #endif /* _KERNEL_CAPABILITY_U32S != 2 */ |
400 | 402 | ||
diff --git a/kernel/module.c b/kernel/module.c index ba22484a987e..1196f5d11700 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2015,14 +2015,6 @@ static noinline struct module *load_module(void __user *umod, | |||
2015 | if (err < 0) | 2015 | if (err < 0) |
2016 | goto free_mod; | 2016 | goto free_mod; |
2017 | 2017 | ||
2018 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2019 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
2020 | mod->name); | ||
2021 | if (!mod->refptr) { | ||
2022 | err = -ENOMEM; | ||
2023 | goto free_mod; | ||
2024 | } | ||
2025 | #endif | ||
2026 | if (pcpuindex) { | 2018 | if (pcpuindex) { |
2027 | /* We have a special allocation for this section. */ | 2019 | /* We have a special allocation for this section. */ |
2028 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, | 2020 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, |
@@ -2030,7 +2022,7 @@ static noinline struct module *load_module(void __user *umod, | |||
2030 | mod->name); | 2022 | mod->name); |
2031 | if (!percpu) { | 2023 | if (!percpu) { |
2032 | err = -ENOMEM; | 2024 | err = -ENOMEM; |
2033 | goto free_percpu; | 2025 | goto free_mod; |
2034 | } | 2026 | } |
2035 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2027 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2036 | mod->percpu = percpu; | 2028 | mod->percpu = percpu; |
@@ -2082,6 +2074,14 @@ static noinline struct module *load_module(void __user *umod, | |||
2082 | /* Module has been moved. */ | 2074 | /* Module has been moved. */ |
2083 | mod = (void *)sechdrs[modindex].sh_addr; | 2075 | mod = (void *)sechdrs[modindex].sh_addr; |
2084 | 2076 | ||
2077 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2078 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
2079 | mod->name); | ||
2080 | if (!mod->refptr) { | ||
2081 | err = -ENOMEM; | ||
2082 | goto free_init; | ||
2083 | } | ||
2084 | #endif | ||
2085 | /* Now we've moved module, initialize linked lists, etc. */ | 2085 | /* Now we've moved module, initialize linked lists, etc. */ |
2086 | module_unload_init(mod); | 2086 | module_unload_init(mod); |
2087 | 2087 | ||
@@ -2288,15 +2288,17 @@ static noinline struct module *load_module(void __user *umod, | |||
2288 | ftrace_release(mod->module_core, mod->core_size); | 2288 | ftrace_release(mod->module_core, mod->core_size); |
2289 | free_unload: | 2289 | free_unload: |
2290 | module_unload_free(mod); | 2290 | module_unload_free(mod); |
2291 | free_init: | ||
2292 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2293 | percpu_modfree(mod->refptr); | ||
2294 | #endif | ||
2291 | module_free(mod, mod->module_init); | 2295 | module_free(mod, mod->module_init); |
2292 | free_core: | 2296 | free_core: |
2293 | module_free(mod, mod->module_core); | 2297 | module_free(mod, mod->module_core); |
2298 | /* mod will be freed with core. Don't access it beyond this line! */ | ||
2294 | free_percpu: | 2299 | free_percpu: |
2295 | if (percpu) | 2300 | if (percpu) |
2296 | percpu_modfree(percpu); | 2301 | percpu_modfree(percpu); |
2297 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2298 | percpu_modfree(mod->refptr); | ||
2299 | #endif | ||
2300 | free_mod: | 2302 | free_mod: |
2301 | kfree(args); | 2303 | kfree(args); |
2302 | free_hdr: | 2304 | free_hdr: |
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 3e1057f885c6..d190092c3b6e 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <time.h> | 11 | #include <time.h> |
12 | #include <unistd.h> | 12 | #include <unistd.h> |
13 | #include <sys/stat.h> | 13 | #include <sys/stat.h> |
14 | #include <sys/time.h> | ||
14 | 15 | ||
15 | #define LKC_DIRECT_LINK | 16 | #define LKC_DIRECT_LINK |
16 | #include "lkc.h" | 17 | #include "lkc.h" |
@@ -464,9 +465,22 @@ int main(int ac, char **av) | |||
464 | input_mode = set_yes; | 465 | input_mode = set_yes; |
465 | break; | 466 | break; |
466 | case 'r': | 467 | case 'r': |
468 | { | ||
469 | struct timeval now; | ||
470 | unsigned int seed; | ||
471 | |||
472 | /* | ||
473 | * Use microseconds derived seed, | ||
474 | * compensate for systems where it may be zero | ||
475 | */ | ||
476 | gettimeofday(&now, NULL); | ||
477 | |||
478 | seed = (unsigned int)((now.tv_sec + 1) * (now.tv_usec + 1)); | ||
479 | srand(seed); | ||
480 | |||
467 | input_mode = set_random; | 481 | input_mode = set_random; |
468 | srand(time(NULL)); | ||
469 | break; | 482 | break; |
483 | } | ||
470 | case 'h': | 484 | case 'h': |
471 | printf(_("See README for usage info\n")); | 485 | printf(_("See README for usage info\n")); |
472 | exit(0); | 486 | exit(0); |
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 830d9eae11f9..273d73888f9d 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c | |||
@@ -843,7 +843,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) | |||
843 | default: | 843 | default: |
844 | continue; | 844 | continue; |
845 | } | 845 | } |
846 | if (!sym_is_choice(sym) || mode != def_random) | 846 | if (!(sym_is_choice(sym) && mode == def_random)) |
847 | sym->flags |= SYMBOL_DEF_USER; | 847 | sym->flags |= SYMBOL_DEF_USER; |
848 | break; | 848 | break; |
849 | default: | 849 | default: |
@@ -856,28 +856,49 @@ void conf_set_all_new_symbols(enum conf_def_mode mode) | |||
856 | 856 | ||
857 | if (mode != def_random) | 857 | if (mode != def_random) |
858 | return; | 858 | return; |
859 | 859 | /* | |
860 | * We have different type of choice blocks. | ||
861 | * If curr.tri equal to mod then we can select several | ||
862 | * choice symbols in one block. | ||
863 | * In this case we do nothing. | ||
864 | * If curr.tri equal yes then only one symbol can be | ||
865 | * selected in a choice block and we set it to yes, | ||
866 | * and the rest to no. | ||
867 | */ | ||
860 | for_all_symbols(i, csym) { | 868 | for_all_symbols(i, csym) { |
861 | if (sym_has_value(csym) || !sym_is_choice(csym)) | 869 | if (sym_has_value(csym) || !sym_is_choice(csym)) |
862 | continue; | 870 | continue; |
863 | 871 | ||
864 | sym_calc_value(csym); | 872 | sym_calc_value(csym); |
873 | |||
874 | if (csym->curr.tri != yes) | ||
875 | continue; | ||
876 | |||
865 | prop = sym_get_choice_prop(csym); | 877 | prop = sym_get_choice_prop(csym); |
866 | def = -1; | 878 | |
867 | while (1) { | 879 | /* count entries in choice block */ |
868 | cnt = 0; | 880 | cnt = 0; |
869 | expr_list_for_each_sym(prop->expr, e, sym) { | 881 | expr_list_for_each_sym(prop->expr, e, sym) |
870 | if (sym->visible == no) | 882 | cnt++; |
871 | continue; | 883 | |
872 | if (def == cnt++) { | 884 | /* |
873 | csym->def[S_DEF_USER].val = sym; | 885 | * find a random value and set it to yes, |
874 | break; | 886 | * set the rest to no so we have only one set |
875 | } | 887 | */ |
888 | def = (rand() % cnt); | ||
889 | |||
890 | cnt = 0; | ||
891 | expr_list_for_each_sym(prop->expr, e, sym) { | ||
892 | if (def == cnt++) { | ||
893 | sym->def[S_DEF_USER].tri = yes; | ||
894 | csym->def[S_DEF_USER].val = sym; | ||
895 | } | ||
896 | else { | ||
897 | sym->def[S_DEF_USER].tri = no; | ||
876 | } | 898 | } |
877 | if (def >= 0 || cnt < 2) | ||
878 | break; | ||
879 | def = (rand() % cnt) + 1; | ||
880 | } | 899 | } |
881 | csym->flags |= SYMBOL_DEF_USER; | 900 | csym->flags |= SYMBOL_DEF_USER; |
901 | /* clear VALID to get value calculated */ | ||
902 | csym->flags &= ~(SYMBOL_VALID); | ||
882 | } | 903 | } |
883 | } | 904 | } |
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c index 4690b8b5681f..e570649184e2 100644 --- a/sound/core/oss/mixer_oss.c +++ b/sound/core/oss/mixer_oss.c | |||
@@ -692,6 +692,9 @@ static int snd_mixer_oss_put_volume1(struct snd_mixer_oss_file *fmixer, | |||
692 | snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_PVOLUME], left, right); | 692 | snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_PVOLUME], left, right); |
693 | if (slot->present & SNDRV_MIXER_OSS_PRESENT_CVOLUME) | 693 | if (slot->present & SNDRV_MIXER_OSS_PRESENT_CVOLUME) |
694 | snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_CVOLUME], left, right); | 694 | snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_CVOLUME], left, right); |
695 | } else if (slot->present & SNDRV_MIXER_OSS_PRESENT_CVOLUME) { | ||
696 | snd_mixer_oss_put_volume1_vol(fmixer, pslot, | ||
697 | slot->numid[SNDRV_MIXER_OSS_ITEM_CVOLUME], left, right); | ||
695 | } else if (slot->present & SNDRV_MIXER_OSS_PRESENT_GVOLUME) { | 698 | } else if (slot->present & SNDRV_MIXER_OSS_PRESENT_GVOLUME) { |
696 | snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_GVOLUME], left, right); | 699 | snd_mixer_oss_put_volume1_vol(fmixer, pslot, slot->numid[SNDRV_MIXER_OSS_ITEM_GVOLUME], left, right); |
697 | } else if (slot->present & SNDRV_MIXER_OSS_PRESENT_GLOBAL) { | 700 | } else if (slot->present & SNDRV_MIXER_OSS_PRESENT_GLOBAL) { |
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 0a1798eafb0b..699d2890535c 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c | |||
@@ -2872,7 +2872,7 @@ static void snd_pcm_oss_proc_write(struct snd_info_entry *entry, | |||
2872 | setup = kmalloc(sizeof(*setup), GFP_KERNEL); | 2872 | setup = kmalloc(sizeof(*setup), GFP_KERNEL); |
2873 | if (! setup) { | 2873 | if (! setup) { |
2874 | buffer->error = -ENOMEM; | 2874 | buffer->error = -ENOMEM; |
2875 | mutex_lock(&pstr->oss.setup_mutex); | 2875 | mutex_unlock(&pstr->oss.setup_mutex); |
2876 | return; | 2876 | return; |
2877 | } | 2877 | } |
2878 | if (pstr->oss.setup_list == NULL) | 2878 | if (pstr->oss.setup_list == NULL) |
@@ -2886,7 +2886,7 @@ static void snd_pcm_oss_proc_write(struct snd_info_entry *entry, | |||
2886 | if (! template.task_name) { | 2886 | if (! template.task_name) { |
2887 | kfree(setup); | 2887 | kfree(setup); |
2888 | buffer->error = -ENOMEM; | 2888 | buffer->error = -ENOMEM; |
2889 | mutex_lock(&pstr->oss.setup_mutex); | 2889 | mutex_unlock(&pstr->oss.setup_mutex); |
2890 | return; | 2890 | return; |
2891 | } | 2891 | } |
2892 | } | 2892 | } |
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index d4564edd61d7..4e7ec2b49873 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c | |||
@@ -38,6 +38,10 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) | |||
38 | if (! sgbuf) | 38 | if (! sgbuf) |
39 | return -EINVAL; | 39 | return -EINVAL; |
40 | 40 | ||
41 | if (dmab->area) | ||
42 | vunmap(dmab->area); | ||
43 | dmab->area = NULL; | ||
44 | |||
41 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV; | 45 | tmpb.dev.type = SNDRV_DMA_TYPE_DEV; |
42 | tmpb.dev.dev = sgbuf->dev; | 46 | tmpb.dev.dev = sgbuf->dev; |
43 | for (i = 0; i < sgbuf->pages; i++) { | 47 | for (i = 0; i < sgbuf->pages; i++) { |
@@ -48,9 +52,6 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) | |||
48 | tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; | 52 | tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; |
49 | snd_dma_free_pages(&tmpb); | 53 | snd_dma_free_pages(&tmpb); |
50 | } | 54 | } |
51 | if (dmab->area) | ||
52 | vunmap(dmab->area); | ||
53 | dmab->area = NULL; | ||
54 | 55 | ||
55 | kfree(sgbuf->table); | 56 | kfree(sgbuf->table); |
56 | kfree(sgbuf->page_table); | 57 | kfree(sgbuf->page_table); |
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c index 58c972b2af03..b848d1001864 100644 --- a/sound/isa/opl3sa2.c +++ b/sound/isa/opl3sa2.c | |||
@@ -550,21 +550,27 @@ static int __devinit snd_opl3sa2_mixer(struct snd_card *card) | |||
550 | #ifdef CONFIG_PM | 550 | #ifdef CONFIG_PM |
551 | static int snd_opl3sa2_suspend(struct snd_card *card, pm_message_t state) | 551 | static int snd_opl3sa2_suspend(struct snd_card *card, pm_message_t state) |
552 | { | 552 | { |
553 | struct snd_opl3sa2 *chip = card->private_data; | 553 | if (card) { |
554 | struct snd_opl3sa2 *chip = card->private_data; | ||
554 | 555 | ||
555 | snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); | 556 | snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); |
556 | chip->wss->suspend(chip->wss); | 557 | chip->wss->suspend(chip->wss); |
557 | /* power down */ | 558 | /* power down */ |
558 | snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D3); | 559 | snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D3); |
560 | } | ||
559 | 561 | ||
560 | return 0; | 562 | return 0; |
561 | } | 563 | } |
562 | 564 | ||
563 | static int snd_opl3sa2_resume(struct snd_card *card) | 565 | static int snd_opl3sa2_resume(struct snd_card *card) |
564 | { | 566 | { |
565 | struct snd_opl3sa2 *chip = card->private_data; | 567 | struct snd_opl3sa2 *chip; |
566 | int i; | 568 | int i; |
567 | 569 | ||
570 | if (!card) | ||
571 | return 0; | ||
572 | |||
573 | chip = card->private_data; | ||
568 | /* power up */ | 574 | /* power up */ |
569 | snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D0); | 575 | snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D0); |
570 | 576 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 5e909e0da04b..f3b5723c2859 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2059,26 +2059,31 @@ static int __devinit check_position_fix(struct azx *chip, int fix) | |||
2059 | { | 2059 | { |
2060 | const struct snd_pci_quirk *q; | 2060 | const struct snd_pci_quirk *q; |
2061 | 2061 | ||
2062 | /* Check VIA HD Audio Controller exist */ | 2062 | switch (fix) { |
2063 | if (chip->pci->vendor == PCI_VENDOR_ID_VIA && | 2063 | case POS_FIX_LPIB: |
2064 | chip->pci->device == VIA_HDAC_DEVICE_ID) { | 2064 | case POS_FIX_POSBUF: |
2065 | return fix; | ||
2066 | } | ||
2067 | |||
2068 | /* Check VIA/ATI HD Audio Controller exist */ | ||
2069 | switch (chip->driver_type) { | ||
2070 | case AZX_DRIVER_VIA: | ||
2071 | case AZX_DRIVER_ATI: | ||
2065 | chip->via_dmapos_patch = 1; | 2072 | chip->via_dmapos_patch = 1; |
2066 | /* Use link position directly, avoid any transfer problem. */ | 2073 | /* Use link position directly, avoid any transfer problem. */ |
2067 | return POS_FIX_LPIB; | 2074 | return POS_FIX_LPIB; |
2068 | } | 2075 | } |
2069 | chip->via_dmapos_patch = 0; | 2076 | chip->via_dmapos_patch = 0; |
2070 | 2077 | ||
2071 | if (fix == POS_FIX_AUTO) { | 2078 | q = snd_pci_quirk_lookup(chip->pci, position_fix_list); |
2072 | q = snd_pci_quirk_lookup(chip->pci, position_fix_list); | 2079 | if (q) { |
2073 | if (q) { | 2080 | printk(KERN_INFO |
2074 | printk(KERN_INFO | 2081 | "hda_intel: position_fix set to %d " |
2075 | "hda_intel: position_fix set to %d " | 2082 | "for device %04x:%04x\n", |
2076 | "for device %04x:%04x\n", | 2083 | q->value, q->subvendor, q->subdevice); |
2077 | q->value, q->subvendor, q->subdevice); | 2084 | return q->value; |
2078 | return q->value; | ||
2079 | } | ||
2080 | } | 2085 | } |
2081 | return fix; | 2086 | return POS_FIX_AUTO; |
2082 | } | 2087 | } |
2083 | 2088 | ||
2084 | /* | 2089 | /* |
@@ -2210,9 +2215,17 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, | |||
2210 | gcap = azx_readw(chip, GCAP); | 2215 | gcap = azx_readw(chip, GCAP); |
2211 | snd_printdd("chipset global capabilities = 0x%x\n", gcap); | 2216 | snd_printdd("chipset global capabilities = 0x%x\n", gcap); |
2212 | 2217 | ||
2218 | /* ATI chips seems buggy about 64bit DMA addresses */ | ||
2219 | if (chip->driver_type == AZX_DRIVER_ATI) | ||
2220 | gcap &= ~0x01; | ||
2221 | |||
2213 | /* allow 64bit DMA address if supported by H/W */ | 2222 | /* allow 64bit DMA address if supported by H/W */ |
2214 | if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_64BIT_MASK)) | 2223 | if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_64BIT_MASK)) |
2215 | pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK); | 2224 | pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK); |
2225 | else { | ||
2226 | pci_set_dma_mask(pci, DMA_32BIT_MASK); | ||
2227 | pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK); | ||
2228 | } | ||
2216 | 2229 | ||
2217 | /* read number of streams from GCAP register instead of using | 2230 | /* read number of streams from GCAP register instead of using |
2218 | * hardcoded value | 2231 | * hardcoded value |
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c index f23a73577c22..bb162507fe6c 100644 --- a/sound/pci/mixart/mixart.c +++ b/sound/pci/mixart/mixart.c | |||
@@ -607,6 +607,7 @@ static int snd_mixart_hw_params(struct snd_pcm_substream *subs, | |||
607 | /* set the format to the board */ | 607 | /* set the format to the board */ |
608 | err = mixart_set_format(stream, format); | 608 | err = mixart_set_format(stream, format); |
609 | if(err < 0) { | 609 | if(err < 0) { |
610 | mutex_unlock(&mgr->setup_mutex); | ||
610 | return err; | 611 | return err; |
611 | } | 612 | } |
612 | 613 | ||