diff options
264 files changed, 9470 insertions, 2583 deletions
diff --git a/Documentation/DocBook/sh.tmpl b/Documentation/DocBook/sh.tmpl index d858d92cf6d9..4a38f604fa66 100644 --- a/Documentation/DocBook/sh.tmpl +++ b/Documentation/DocBook/sh.tmpl | |||
@@ -79,10 +79,6 @@ | |||
79 | </sect2> | 79 | </sect2> |
80 | </sect1> | 80 | </sect1> |
81 | </chapter> | 81 | </chapter> |
82 | <chapter id="clk"> | ||
83 | <title>Clock Framework Extensions</title> | ||
84 | !Iinclude/linux/sh_clk.h | ||
85 | </chapter> | ||
86 | <chapter id="mach"> | 82 | <chapter id="mach"> |
87 | <title>Machine Specific Interfaces</title> | 83 | <title>Machine Specific Interfaces</title> |
88 | <sect1 id="dreamcast"> | 84 | <sect1 id="dreamcast"> |
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX index a618fd99c9f0..30a70542e823 100644 --- a/Documentation/fb/00-INDEX +++ b/Documentation/fb/00-INDEX | |||
@@ -4,33 +4,41 @@ please mail me. | |||
4 | Geert Uytterhoeven <geert@linux-m68k.org> | 4 | Geert Uytterhoeven <geert@linux-m68k.org> |
5 | 5 | ||
6 | 00-INDEX | 6 | 00-INDEX |
7 | - this file | 7 | - this file. |
8 | arkfb.txt | 8 | arkfb.txt |
9 | - info on the fbdev driver for ARK Logic chips. | 9 | - info on the fbdev driver for ARK Logic chips. |
10 | aty128fb.txt | 10 | aty128fb.txt |
11 | - info on the ATI Rage128 frame buffer driver. | 11 | - info on the ATI Rage128 frame buffer driver. |
12 | cirrusfb.txt | 12 | cirrusfb.txt |
13 | - info on the driver for Cirrus Logic chipsets. | 13 | - info on the driver for Cirrus Logic chipsets. |
14 | cmap_xfbdev.txt | ||
15 | - an introduction to fbdev's cmap structures. | ||
14 | deferred_io.txt | 16 | deferred_io.txt |
15 | - an introduction to deferred IO. | 17 | - an introduction to deferred IO. |
18 | efifb.txt | ||
19 | - info on the EFI platform driver for Intel based Apple computers. | ||
20 | ep93xx-fb.txt | ||
21 | - info on the driver for EP93xx LCD controller. | ||
16 | fbcon.txt | 22 | fbcon.txt |
17 | - intro to and usage guide for the framebuffer console (fbcon). | 23 | - intro to and usage guide for the framebuffer console (fbcon). |
18 | framebuffer.txt | 24 | framebuffer.txt |
19 | - introduction to frame buffer devices. | 25 | - introduction to frame buffer devices. |
20 | imacfb.txt | 26 | gxfb.txt |
21 | - info on the generic EFI platform driver for Intel based Macs. | 27 | - info on the framebuffer driver for AMD Geode GX2 based processors. |
22 | intel810.txt | 28 | intel810.txt |
23 | - documentation for the Intel 810/815 framebuffer driver. | 29 | - documentation for the Intel 810/815 framebuffer driver. |
24 | intelfb.txt | 30 | intelfb.txt |
25 | - docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver. | 31 | - docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver. |
26 | internals.txt | 32 | internals.txt |
27 | - quick overview of frame buffer device internals. | 33 | - quick overview of frame buffer device internals. |
34 | lxfb.txt | ||
35 | - info on the framebuffer driver for AMD Geode LX based processors. | ||
28 | matroxfb.txt | 36 | matroxfb.txt |
29 | - info on the Matrox framebuffer driver for Alpha, Intel and PPC. | 37 | - info on the Matrox framebuffer driver for Alpha, Intel and PPC. |
38 | metronomefb.txt | ||
39 | - info on the driver for the Metronome display controller. | ||
30 | modedb.txt | 40 | modedb.txt |
31 | - info on the video mode database. | 41 | - info on the video mode database. |
32 | matroxfb.txt | ||
33 | - info on the Matrox frame buffer driver. | ||
34 | pvr2fb.txt | 42 | pvr2fb.txt |
35 | - info on the PowerVR 2 frame buffer driver. | 43 | - info on the PowerVR 2 frame buffer driver. |
36 | pxafb.txt | 44 | pxafb.txt |
@@ -39,13 +47,23 @@ s3fb.txt | |||
39 | - info on the fbdev driver for S3 Trio/Virge chips. | 47 | - info on the fbdev driver for S3 Trio/Virge chips. |
40 | sa1100fb.txt | 48 | sa1100fb.txt |
41 | - information about the driver for the SA-1100 LCD controller. | 49 | - information about the driver for the SA-1100 LCD controller. |
50 | sh7760fb.txt | ||
51 | - info on the SH7760/SH7763 integrated LCDC Framebuffer driver. | ||
42 | sisfb.txt | 52 | sisfb.txt |
43 | - info on the framebuffer device driver for various SiS chips. | 53 | - info on the framebuffer device driver for various SiS chips. |
44 | sstfb.txt | 54 | sstfb.txt |
45 | - info on the frame buffer driver for 3dfx' Voodoo Graphics boards. | 55 | - info on the frame buffer driver for 3dfx' Voodoo Graphics boards. |
46 | tgafb.txt | 56 | tgafb.txt |
47 | - info on the TGA (DECChip 21030) frame buffer driver | 57 | - info on the TGA (DECChip 21030) frame buffer driver. |
58 | tridentfb.txt | ||
59 | info on the framebuffer driver for some Trident chip based cards. | ||
60 | uvesafb.txt | ||
61 | - info on the userspace VESA (VBE2+ compliant) frame buffer device. | ||
48 | vesafb.txt | 62 | vesafb.txt |
49 | - info on the VESA frame buffer device | 63 | - info on the VESA frame buffer device. |
64 | viafb.modes | ||
65 | - list of modes for VIA Integration Graphic Chip. | ||
66 | viafb.txt | ||
67 | - info on the VIA Integration Graphic Chip console framebuffer driver. | ||
50 | vt8623fb.txt | 68 | vt8623fb.txt |
51 | - info on the fb driver for the graphics core in VIA VT8623 chipsets. | 69 | - info on the fb driver for the graphics core in VIA VT8623 chipsets. |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 92e83e53148f..cdd2a6e8a3b7 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2385,6 +2385,11 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2385 | improve throughput, but will also increase the | 2385 | improve throughput, but will also increase the |
2386 | amount of memory reserved for use by the client. | 2386 | amount of memory reserved for use by the client. |
2387 | 2387 | ||
2388 | swapaccount[=0|1] | ||
2389 | [KNL] Enable accounting of swap in memory resource | ||
2390 | controller if no parameter or 1 is given or disable | ||
2391 | it if 0 is given (See Documentation/cgroups/memory.txt) | ||
2392 | |||
2388 | swiotlb= [IA-64] Number of I/O TLB slabs | 2393 | swiotlb= [IA-64] Number of I/O TLB slabs |
2389 | 2394 | ||
2390 | switches= [HW,M68k] | 2395 | switches= [HW,M68k] |
diff --git a/Documentation/sh/clk.txt b/Documentation/sh/clk.txt deleted file mode 100644 index 114b595cfa97..000000000000 --- a/Documentation/sh/clk.txt +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | Clock framework on SuperH architecture | ||
2 | |||
3 | The framework on SH extends existing API by the function clk_set_rate_ex, | ||
4 | which prototype is as follows: | ||
5 | |||
6 | clk_set_rate_ex (struct clk *clk, unsigned long rate, int algo_id) | ||
7 | |||
8 | The algo_id parameter is used to specify algorithm used to recalculate clocks, | ||
9 | adjanced to clock, specified as first argument. It is assumed that algo_id==0 | ||
10 | means no changes to adjanced clock | ||
11 | |||
12 | Internally, the clk_set_rate_ex forwards request to clk->ops->set_rate method, | ||
13 | if it is present in ops structure. The method should set the clock rate and adjust | ||
14 | all needed clocks according to the passed algo_id. | ||
15 | Exact values for algo_id are machine-dependent. For the sh7722, the following | ||
16 | values are defined: | ||
17 | |||
18 | NO_CHANGE = 0, | ||
19 | IUS_N1_N1, /* I:U = N:1, U:Sh = N:1 */ | ||
20 | IUS_322, /* I:U:Sh = 3:2:2 */ | ||
21 | IUS_522, /* I:U:Sh = 5:2:2 */ | ||
22 | IUS_N11, /* I:U:Sh = N:1:1 */ | ||
23 | SB_N1, /* Sh:B = N:1 */ | ||
24 | SB3_N1, /* Sh:B3 = N:1 */ | ||
25 | SB3_32, /* Sh:B3 = 3:2 */ | ||
26 | SB3_43, /* Sh:B3 = 4:3 */ | ||
27 | SB3_54, /* Sh:B3 = 5:4 */ | ||
28 | BP_N1, /* B:P = N:1 */ | ||
29 | IP_N1 /* I:P = N:1 */ | ||
30 | |||
31 | Each of these constants means relation between clocks that can be set via the FRQCR | ||
32 | register | ||
diff --git a/MAINTAINERS b/MAINTAINERS index a92c994ba935..b3be8b3d0437 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1359,7 +1359,7 @@ F: include/net/bluetooth/ | |||
1359 | 1359 | ||
1360 | BONDING DRIVER | 1360 | BONDING DRIVER |
1361 | M: Jay Vosburgh <fubar@us.ibm.com> | 1361 | M: Jay Vosburgh <fubar@us.ibm.com> |
1362 | L: bonding-devel@lists.sourceforge.net | 1362 | L: netdev@vger.kernel.org |
1363 | W: http://sourceforge.net/projects/bonding/ | 1363 | W: http://sourceforge.net/projects/bonding/ |
1364 | S: Supported | 1364 | S: Supported |
1365 | F: drivers/net/bonding/ | 1365 | F: drivers/net/bonding/ |
@@ -2444,10 +2444,12 @@ F: drivers/net/wan/sdla.c | |||
2444 | FRAMEBUFFER LAYER | 2444 | FRAMEBUFFER LAYER |
2445 | L: linux-fbdev@vger.kernel.org | 2445 | L: linux-fbdev@vger.kernel.org |
2446 | W: http://linux-fbdev.sourceforge.net/ | 2446 | W: http://linux-fbdev.sourceforge.net/ |
2447 | Q: http://patchwork.kernel.org/project/linux-fbdev/list/ | ||
2447 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git | 2448 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git |
2448 | S: Orphan | 2449 | S: Orphan |
2449 | F: Documentation/fb/ | 2450 | F: Documentation/fb/ |
2450 | F: drivers/video/fb* | 2451 | F: drivers/video/ |
2452 | F: include/video/ | ||
2451 | F: include/linux/fb.h | 2453 | F: include/linux/fb.h |
2452 | 2454 | ||
2453 | FREESCALE DMA DRIVER | 2455 | FREESCALE DMA DRIVER |
@@ -5837,6 +5839,8 @@ M: Chris Metcalf <cmetcalf@tilera.com> | |||
5837 | W: http://www.tilera.com/scm/ | 5839 | W: http://www.tilera.com/scm/ |
5838 | S: Supported | 5840 | S: Supported |
5839 | F: arch/tile/ | 5841 | F: arch/tile/ |
5842 | F: drivers/char/hvc_tile.c | ||
5843 | F: drivers/net/tile/ | ||
5840 | 5844 | ||
5841 | TLAN NETWORK DRIVER | 5845 | TLAN NETWORK DRIVER |
5842 | M: Samuel Chessman <chessman@tux.org> | 5846 | M: Samuel Chessman <chessman@tux.org> |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6825c34646d4..9be21ba648cd 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -1084,6 +1084,6 @@ memdump: mov r12, r0 | |||
1084 | reloc_end: | 1084 | reloc_end: |
1085 | 1085 | ||
1086 | .align | 1086 | .align |
1087 | .section ".stack", "w" | 1087 | .section ".stack", "aw", %nobits |
1088 | user_stack: .space 4096 | 1088 | user_stack: .space 4096 |
1089 | user_stack_end: | 1089 | user_stack_end: |
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index d08168941bd6..366a924019ac 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in | |||
@@ -57,7 +57,7 @@ SECTIONS | |||
57 | .bss : { *(.bss) } | 57 | .bss : { *(.bss) } |
58 | _end = .; | 58 | _end = .; |
59 | 59 | ||
60 | .stack (NOLOAD) : { *(.stack) } | 60 | .stack : { *(.stack) } |
61 | 61 | ||
62 | .stab 0 : { *(.stab) } | 62 | .stab 0 : { *(.stab) } |
63 | .stabstr 0 : { *(.stabstr) } | 63 | .stabstr 0 : { *(.stabstr) } |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 062b58c029ab..749bb6622404 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -238,7 +238,7 @@ | |||
238 | @ Slightly optimised to avoid incrementing the pointer twice | 238 | @ Slightly optimised to avoid incrementing the pointer twice |
239 | usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort | 239 | usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort |
240 | .if \rept == 2 | 240 | .if \rept == 2 |
241 | usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort | 241 | usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort |
242 | .endif | 242 | .endif |
243 | 243 | ||
244 | add\cond \ptr, #\rept * \inc | 244 | add\cond \ptr, #\rept * \inc |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 68870c776671..b4ffe9d5b526 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -13,6 +13,10 @@ typedef struct { | |||
13 | 13 | ||
14 | #ifdef CONFIG_CPU_HAS_ASID | 14 | #ifdef CONFIG_CPU_HAS_ASID |
15 | #define ASID(mm) ((mm)->context.id & 255) | 15 | #define ASID(mm) ((mm)->context.id & 255) |
16 | |||
17 | /* init_mm.context.id_lock should be initialized. */ | ||
18 | #define INIT_MM_CONTEXT(name) \ | ||
19 | .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock), | ||
16 | #else | 20 | #else |
17 | #define ASID(mm) (0) | 21 | #define ASID(mm) (0) |
18 | #endif | 22 | #endif |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index b155414192da..53d1d5deb111 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -374,6 +374,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |||
374 | 374 | ||
375 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) | 375 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) |
376 | 376 | ||
377 | /* we don't need complex calculations here as the pmd is folded into the pgd */ | ||
378 | #define pmd_addr_end(addr,end) (end) | ||
379 | |||
377 | /* | 380 | /* |
378 | * Conversion functions: convert a page and protection to a page entry, | 381 | * Conversion functions: convert a page and protection to a page entry, |
379 | * and a page entry and page directory to the page they refer to. | 382 | * and a page entry and page directory to the page they refer to. |
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index 1e4cbd4e7be9..64f6bc1a9132 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S | |||
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be) | |||
174 | */ | 174 | */ |
175 | .L_found: | 175 | .L_found: |
176 | #if __LINUX_ARM_ARCH__ >= 5 | 176 | #if __LINUX_ARM_ARCH__ >= 5 |
177 | rsb r1, r3, #0 | 177 | rsb r0, r3, #0 |
178 | and r3, r3, r1 | 178 | and r3, r3, r0 |
179 | clz r3, r3 | 179 | clz r3, r3 |
180 | rsb r3, r3, #31 | 180 | rsb r3, r3, #31 |
181 | add r0, r2, r3 | 181 | add r0, r2, r3 |
@@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be) | |||
190 | addeq r2, r2, #1 | 190 | addeq r2, r2, #1 |
191 | mov r0, r2 | 191 | mov r0, r2 |
192 | #endif | 192 | #endif |
193 | cmp r1, r0 @ Clamp to maxbit | ||
194 | movlo r0, r1 | ||
193 | mov pc, lr | 195 | mov pc, lr |
194 | 196 | ||
diff --git a/arch/arm/mach-aaec2000/include/mach/vmalloc.h b/arch/arm/mach-aaec2000/include/mach/vmalloc.h index cff4e0a996ce..a6299e8321bd 100644 --- a/arch/arm/mach-aaec2000/include/mach/vmalloc.h +++ b/arch/arm/mach-aaec2000/include/mach/vmalloc.h | |||
@@ -11,6 +11,6 @@ | |||
11 | #ifndef __ASM_ARCH_VMALLOC_H | 11 | #ifndef __ASM_ARCH_VMALLOC_H |
12 | #define __ASM_ARCH_VMALLOC_H | 12 | #define __ASM_ARCH_VMALLOC_H |
13 | 13 | ||
14 | #define VMALLOC_END 0xd0000000 | 14 | #define VMALLOC_END 0xd0000000UL |
15 | 15 | ||
16 | #endif /* __ASM_ARCH_VMALLOC_H */ | 16 | #endif /* __ASM_ARCH_VMALLOC_H */ |
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h index 3db3a09fd398..7397bd7817d9 100644 --- a/arch/arm/mach-bcmring/include/mach/vmalloc.h +++ b/arch/arm/mach-bcmring/include/mach/vmalloc.h | |||
@@ -22,4 +22,4 @@ | |||
22 | * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles | 22 | * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles |
23 | * larger physical memory designs better. | 23 | * larger physical memory designs better. |
24 | */ | 24 | */ |
25 | #define VMALLOC_END 0xf0000000 | 25 | #define VMALLOC_END 0xf0000000UL |
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h index 30b3a287ed88..467b96137e47 100644 --- a/arch/arm/mach-clps711x/include/mach/vmalloc.h +++ b/arch/arm/mach-clps711x/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd0000000 | 20 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 9be261beae7d..2652af124acd 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -359,8 +359,8 @@ static struct clk_lookup dm355_clks[] = { | |||
359 | CLK(NULL, "uart1", &uart1_clk), | 359 | CLK(NULL, "uart1", &uart1_clk), |
360 | CLK(NULL, "uart2", &uart2_clk), | 360 | CLK(NULL, "uart2", &uart2_clk), |
361 | CLK("i2c_davinci.1", NULL, &i2c_clk), | 361 | CLK("i2c_davinci.1", NULL, &i2c_clk), |
362 | CLK("davinci-asp.0", NULL, &asp0_clk), | 362 | CLK("davinci-mcbsp.0", NULL, &asp0_clk), |
363 | CLK("davinci-asp.1", NULL, &asp1_clk), | 363 | CLK("davinci-mcbsp.1", NULL, &asp1_clk), |
364 | CLK("davinci_mmc.0", NULL, &mmcsd0_clk), | 364 | CLK("davinci_mmc.0", NULL, &mmcsd0_clk), |
365 | CLK("davinci_mmc.1", NULL, &mmcsd1_clk), | 365 | CLK("davinci_mmc.1", NULL, &mmcsd1_clk), |
366 | CLK("spi_davinci.0", NULL, &spi0_clk), | 366 | CLK("spi_davinci.0", NULL, &spi0_clk), |
@@ -664,7 +664,7 @@ static struct resource dm355_asp1_resources[] = { | |||
664 | }; | 664 | }; |
665 | 665 | ||
666 | static struct platform_device dm355_asp1_device = { | 666 | static struct platform_device dm355_asp1_device = { |
667 | .name = "davinci-asp", | 667 | .name = "davinci-mcbsp", |
668 | .id = 1, | 668 | .id = 1, |
669 | .num_resources = ARRAY_SIZE(dm355_asp1_resources), | 669 | .num_resources = ARRAY_SIZE(dm355_asp1_resources), |
670 | .resource = dm355_asp1_resources, | 670 | .resource = dm355_asp1_resources, |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index a12065e87266..c466d710d3c1 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -459,7 +459,7 @@ static struct clk_lookup dm365_clks[] = { | |||
459 | CLK(NULL, "usb", &usb_clk), | 459 | CLK(NULL, "usb", &usb_clk), |
460 | CLK("davinci_emac.1", NULL, &emac_clk), | 460 | CLK("davinci_emac.1", NULL, &emac_clk), |
461 | CLK("davinci_voicecodec", NULL, &voicecodec_clk), | 461 | CLK("davinci_voicecodec", NULL, &voicecodec_clk), |
462 | CLK("davinci-asp.0", NULL, &asp0_clk), | 462 | CLK("davinci-mcbsp", NULL, &asp0_clk), |
463 | CLK(NULL, "rto", &rto_clk), | 463 | CLK(NULL, "rto", &rto_clk), |
464 | CLK(NULL, "mjcp", &mjcp_clk), | 464 | CLK(NULL, "mjcp", &mjcp_clk), |
465 | CLK(NULL, NULL, NULL), | 465 | CLK(NULL, NULL, NULL), |
@@ -922,8 +922,8 @@ static struct resource dm365_asp_resources[] = { | |||
922 | }; | 922 | }; |
923 | 923 | ||
924 | static struct platform_device dm365_asp_device = { | 924 | static struct platform_device dm365_asp_device = { |
925 | .name = "davinci-asp", | 925 | .name = "davinci-mcbsp", |
926 | .id = 0, | 926 | .id = -1, |
927 | .num_resources = ARRAY_SIZE(dm365_asp_resources), | 927 | .num_resources = ARRAY_SIZE(dm365_asp_resources), |
928 | .resource = dm365_asp_resources, | 928 | .resource = dm365_asp_resources, |
929 | }; | 929 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 0608dd776a16..9a2376b3137c 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -302,7 +302,7 @@ static struct clk_lookup dm644x_clks[] = { | |||
302 | CLK("davinci_emac.1", NULL, &emac_clk), | 302 | CLK("davinci_emac.1", NULL, &emac_clk), |
303 | CLK("i2c_davinci.1", NULL, &i2c_clk), | 303 | CLK("i2c_davinci.1", NULL, &i2c_clk), |
304 | CLK("palm_bk3710", NULL, &ide_clk), | 304 | CLK("palm_bk3710", NULL, &ide_clk), |
305 | CLK("davinci-asp", NULL, &asp_clk), | 305 | CLK("davinci-mcbsp", NULL, &asp_clk), |
306 | CLK("davinci_mmc.0", NULL, &mmcsd_clk), | 306 | CLK("davinci_mmc.0", NULL, &mmcsd_clk), |
307 | CLK(NULL, "spi", &spi_clk), | 307 | CLK(NULL, "spi", &spi_clk), |
308 | CLK(NULL, "gpio", &gpio_clk), | 308 | CLK(NULL, "gpio", &gpio_clk), |
@@ -580,7 +580,7 @@ static struct resource dm644x_asp_resources[] = { | |||
580 | }; | 580 | }; |
581 | 581 | ||
582 | static struct platform_device dm644x_asp_device = { | 582 | static struct platform_device dm644x_asp_device = { |
583 | .name = "davinci-asp", | 583 | .name = "davinci-mcbsp", |
584 | .id = -1, | 584 | .id = -1, |
585 | .num_resources = ARRAY_SIZE(dm644x_asp_resources), | 585 | .num_resources = ARRAY_SIZE(dm644x_asp_resources), |
586 | .resource = dm644x_asp_resources, | 586 | .resource = dm644x_asp_resources, |
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h index 60bde56fba4c..ea141b7a3e03 100644 --- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h +++ b/arch/arm/mach-ebsa110/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #define VMALLOC_END 0xdf000000 | 10 | #define VMALLOC_END 0xdf000000UL |
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h index 0ffbb7c85e59..40ba78e5782b 100644 --- a/arch/arm/mach-footbridge/include/mach/vmalloc.h +++ b/arch/arm/mach-footbridge/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | 9 | ||
10 | #define VMALLOC_END 0xf0000000 | 10 | #define VMALLOC_END 0xf0000000UL |
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h index a45915b88756..8520b4a4d4e6 100644 --- a/arch/arm/mach-h720x/include/mach/vmalloc.h +++ b/arch/arm/mach-h720x/include/mach/vmalloc.h | |||
@@ -5,6 +5,6 @@ | |||
5 | #ifndef __ARCH_ARM_VMALLOC_H | 5 | #ifndef __ARCH_ARM_VMALLOC_H |
6 | #define __ARCH_ARM_VMALLOC_H | 6 | #define __ARCH_ARM_VMALLOC_H |
7 | 7 | ||
8 | #define VMALLOC_END 0xd0000000 | 8 | #define VMALLOC_END 0xd0000000UL |
9 | 9 | ||
10 | #endif | 10 | #endif |
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c index 026263c665ca..7e1e9dc2c8fc 100644 --- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c +++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c | |||
@@ -250,9 +250,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = { | |||
250 | .flags = IMXUART_HAVE_RTSCTS, | 250 | .flags = IMXUART_HAVE_RTSCTS, |
251 | }; | 251 | }; |
252 | 252 | ||
253 | #if defined(CONFIG_TOUCHSCREEN_ADS7846) \ | ||
254 | || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) | ||
255 | |||
256 | #define ADS7846_PENDOWN (GPIO_PORTD | 25) | 253 | #define ADS7846_PENDOWN (GPIO_PORTD | 25) |
257 | 254 | ||
258 | static void ads7846_dev_init(void) | 255 | static void ads7846_dev_init(void) |
@@ -273,9 +270,7 @@ static struct ads7846_platform_data ads7846_config __initdata = { | |||
273 | .get_pendown_state = ads7846_get_pendown_state, | 270 | .get_pendown_state = ads7846_get_pendown_state, |
274 | .keep_vref_on = 1, | 271 | .keep_vref_on = 1, |
275 | }; | 272 | }; |
276 | #endif | ||
277 | 273 | ||
278 | #if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) | ||
279 | static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = { | 274 | static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = { |
280 | [0] = { | 275 | [0] = { |
281 | .modalias = "ads7846", | 276 | .modalias = "ads7846", |
@@ -294,7 +289,6 @@ static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = { | |||
294 | .chipselect = eukrea_mbimx27_spi_cs, | 289 | .chipselect = eukrea_mbimx27_spi_cs, |
295 | .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs), | 290 | .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs), |
296 | }; | 291 | }; |
297 | #endif | ||
298 | 292 | ||
299 | static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = { | 293 | static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = { |
300 | { | 294 | { |
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h index e056e7cf5645..2f5a2bafb11f 100644 --- a/arch/arm/mach-integrator/include/mach/vmalloc.h +++ b/arch/arm/mach-integrator/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd0000000 | 20 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h index 31a32ad062dc..d138448eff16 100644 --- a/arch/arm/mach-msm/include/mach/vmalloc.h +++ b/arch/arm/mach-msm/include/mach/vmalloc.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #ifndef __ASM_ARCH_MSM_VMALLOC_H | 16 | #ifndef __ASM_ARCH_MSM_VMALLOC_H |
17 | #define __ASM_ARCH_MSM_VMALLOC_H | 17 | #define __ASM_ARCH_MSM_VMALLOC_H |
18 | 18 | ||
19 | #define VMALLOC_END 0xd0000000 | 19 | #define VMALLOC_END 0xd0000000UL |
20 | 20 | ||
21 | #endif | 21 | #endif |
22 | 22 | ||
diff --git a/arch/arm/mach-mx25/devices-imx25.h b/arch/arm/mach-mx25/devices-imx25.h index 93afa10b13cf..d94d282fa676 100644 --- a/arch/arm/mach-mx25/devices-imx25.h +++ b/arch/arm/mach-mx25/devices-imx25.h | |||
@@ -42,9 +42,9 @@ extern const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst; | |||
42 | #define imx25_add_mxc_nand(pdata) \ | 42 | #define imx25_add_mxc_nand(pdata) \ |
43 | imx_add_mxc_nand(&imx25_mxc_nand_data, pdata) | 43 | imx_add_mxc_nand(&imx25_mxc_nand_data, pdata) |
44 | 44 | ||
45 | extern const struct imx_spi_imx_data imx25_spi_imx_data[] __initconst; | 45 | extern const struct imx_spi_imx_data imx25_cspi_data[] __initconst; |
46 | #define imx25_add_spi_imx(id, pdata) \ | 46 | #define imx25_add_spi_imx(id, pdata) \ |
47 | imx_add_spi_imx(&imx25_spi_imx_data[id], pdata) | 47 | imx_add_spi_imx(&imx25_cspi_data[id], pdata) |
48 | #define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata) | 48 | #define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata) |
49 | #define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata) | 49 | #define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata) |
50 | #define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata) | 50 | #define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata) |
diff --git a/arch/arm/mach-mx3/mach-pcm037_eet.c b/arch/arm/mach-mx3/mach-pcm037_eet.c index 99e0894e07db..fda56545d2fd 100644 --- a/arch/arm/mach-mx3/mach-pcm037_eet.c +++ b/arch/arm/mach-mx3/mach-pcm037_eet.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <mach/common.h> | 15 | #include <mach/common.h> |
16 | #include <mach/iomux-mx3.h> | 16 | #include <mach/iomux-mx3.h> |
17 | #include <mach/spi.h> | ||
17 | 18 | ||
18 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
19 | 20 | ||
@@ -59,14 +60,12 @@ static struct spi_board_info pcm037_spi_dev[] = { | |||
59 | }; | 60 | }; |
60 | 61 | ||
61 | /* Platform Data for MXC CSPI */ | 62 | /* Platform Data for MXC CSPI */ |
62 | #if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) | ||
63 | static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; | 63 | static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; |
64 | 64 | ||
65 | static const struct spi_imx_master pcm037_spi1_pdata __initconst = { | 65 | static const struct spi_imx_master pcm037_spi1_pdata __initconst = { |
66 | .chipselect = pcm037_spi1_cs, | 66 | .chipselect = pcm037_spi1_cs, |
67 | .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), | 67 | .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), |
68 | }; | 68 | }; |
69 | #endif | ||
70 | 69 | ||
71 | /* GPIO-keys input device */ | 70 | /* GPIO-keys input device */ |
72 | static struct gpio_keys_button pcm037_gpio_keys[] = { | 71 | static struct gpio_keys_button pcm037_gpio_keys[] = { |
@@ -171,7 +170,7 @@ static struct platform_device pcm037_gpio_keys_device = { | |||
171 | }, | 170 | }, |
172 | }; | 171 | }; |
173 | 172 | ||
174 | static int eet_init_devices(void) | 173 | static int __init eet_init_devices(void) |
175 | { | 174 | { |
176 | if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) | 175 | if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) |
177 | return 0; | 176 | return 0; |
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h index 7cca3574308f..871f1ef7bff5 100644 --- a/arch/arm/mach-netx/include/mach/vmalloc.h +++ b/arch/arm/mach-netx/include/mach/vmalloc.h | |||
@@ -16,4 +16,4 @@ | |||
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #define VMALLOC_END 0xd0000000 | 19 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h index b001f67d695b..22ec4a479577 100644 --- a/arch/arm/mach-omap1/include/mach/vmalloc.h +++ b/arch/arm/mach-omap1/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd8000000 | 20 | #define VMALLOC_END 0xd8000000UL |
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h index 4da31e997efe..866319947760 100644 --- a/arch/arm/mach-omap2/include/mach/vmalloc.h +++ b/arch/arm/mach-omap2/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xf8000000 | 20 | #define VMALLOC_END 0xf8000000UL |
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h index 31b65ee07b0b..184913c71141 100644 --- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h +++ b/arch/arm/mach-pnx4008/include/mach/vmalloc.h | |||
@@ -17,4 +17,4 @@ | |||
17 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 17 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
18 | * area for the same reason. ;) | 18 | * area for the same reason. ;) |
19 | */ | 19 | */ |
20 | #define VMALLOC_END 0xd0000000 | 20 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h index 3bcd86fadb81..fb700228637a 100644 --- a/arch/arm/mach-rpc/include/mach/vmalloc.h +++ b/arch/arm/mach-rpc/include/mach/vmalloc.h | |||
@@ -7,4 +7,4 @@ | |||
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #define VMALLOC_END 0xdc000000 | 10 | #define VMALLOC_END 0xdc000000UL |
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c index 8aa2f1902a94..6b86a722a7db 100644 --- a/arch/arm/mach-s3c2410/h1940-bluetooth.c +++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c | |||
@@ -77,13 +77,13 @@ static int __devinit h1940bt_probe(struct platform_device *pdev) | |||
77 | 77 | ||
78 | /* Configures BT serial port GPIOs */ | 78 | /* Configures BT serial port GPIOs */ |
79 | s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0); | 79 | s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0); |
80 | s3c_gpio_cfgpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE); | 80 | s3c_gpio_setpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE); |
81 | s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT); | 81 | s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT); |
82 | s3c_gpio_cfgpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE); | 82 | s3c_gpio_setpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE); |
83 | s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0); | 83 | s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0); |
84 | s3c_gpio_cfgpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE); | 84 | s3c_gpio_setpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE); |
85 | s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0); | 85 | s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0); |
86 | s3c_gpio_cfgpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE); | 86 | s3c_gpio_setpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE); |
87 | 87 | ||
88 | 88 | ||
89 | rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH, | 89 | rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH, |
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c index 084d121f368c..00174daf1526 100644 --- a/arch/arm/mach-s3c2416/irq.c +++ b/arch/arm/mach-s3c2416/irq.c | |||
@@ -168,12 +168,11 @@ static struct irq_chip s3c2416_irq_dma = { | |||
168 | 168 | ||
169 | static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc) | 169 | static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc) |
170 | { | 170 | { |
171 | s3c2416_irq_demux(IRQ_S3C2443_UART3, 3); | 171 | s3c2416_irq_demux(IRQ_S3C2443_RX3, 3); |
172 | } | 172 | } |
173 | 173 | ||
174 | #define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0)) | 174 | #define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0)) |
175 | #define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0))) | 175 | #define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0))) |
176 | |||
177 | 176 | ||
178 | static void s3c2416_irq_uart3_mask(unsigned int irqno) | 177 | static void s3c2416_irq_uart3_mask(unsigned int irqno) |
179 | { | 178 | { |
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c index 0e0d693f3974..893424767ce1 100644 --- a/arch/arm/mach-s3c2443/irq.c +++ b/arch/arm/mach-s3c2443/irq.c | |||
@@ -166,12 +166,11 @@ static struct irq_chip s3c2443_irq_dma = { | |||
166 | 166 | ||
167 | static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc) | 167 | static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc) |
168 | { | 168 | { |
169 | s3c2443_irq_demux(IRQ_S3C2443_UART3, 3); | 169 | s3c2443_irq_demux(IRQ_S3C2443_RX3, 3); |
170 | } | 170 | } |
171 | 171 | ||
172 | #define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0)) | 172 | #define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0)) |
173 | #define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0))) | 173 | #define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0))) |
174 | |||
175 | 174 | ||
176 | static void s3c2443_irq_uart3_mask(unsigned int irqno) | 175 | static void s3c2443_irq_uart3_mask(unsigned int irqno) |
177 | { | 176 | { |
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c index 249c62956471..89f35e02e883 100644 --- a/arch/arm/mach-s3c64xx/mach-mini6410.c +++ b/arch/arm/mach-s3c64xx/mach-mini6410.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #include <video/platform_lcd.h> | 46 | #include <video/platform_lcd.h> |
47 | 47 | ||
48 | #define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK) | 48 | #define UCON S3C2410_UCON_DEFAULT |
49 | #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) | 49 | #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) |
50 | #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) | 50 | #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) |
51 | 51 | ||
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c index f9ef9b5c5f5a..4957ab0a0d4a 100644 --- a/arch/arm/mach-s3c64xx/mach-real6410.c +++ b/arch/arm/mach-s3c64xx/mach-real6410.c | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | #include <video/platform_lcd.h> | 47 | #include <video/platform_lcd.h> |
48 | 48 | ||
49 | #define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK) | 49 | #define UCON S3C2410_UCON_DEFAULT |
50 | #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) | 50 | #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) |
51 | #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) | 51 | #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) |
52 | 52 | ||
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c index 0ad7924fe62e..5dd1681c069e 100644 --- a/arch/arm/mach-s5pv210/mach-smdkc110.c +++ b/arch/arm/mach-s5pv210/mach-smdkc110.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/serial_core.h> | 14 | #include <linux/serial_core.h> |
15 | #include <linux/i2c.h> | 15 | #include <linux/i2c.h> |
16 | #include <linux/sysdev.h> | ||
16 | 17 | ||
17 | #include <asm/mach/arch.h> | 18 | #include <asm/mach/arch.h> |
18 | #include <asm/mach/map.h> | 19 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c index bcd7a5d53401..1fbc45b2a432 100644 --- a/arch/arm/mach-s5pv210/mach-smdkv210.c +++ b/arch/arm/mach-s5pv210/mach-smdkv210.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/i2c.h> | 13 | #include <linux/i2c.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/serial_core.h> | 15 | #include <linux/serial_core.h> |
16 | #include <linux/sysdev.h> | ||
16 | 17 | ||
17 | #include <asm/mach/arch.h> | 18 | #include <asm/mach/arch.h> |
18 | #include <asm/mach/map.h> | 19 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h index 8e845b6a7cb5..b10df988526d 100644 --- a/arch/arm/mach-shark/include/mach/vmalloc.h +++ b/arch/arm/mach-shark/include/mach/vmalloc.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/arm/mach-shark/include/mach/vmalloc.h | 2 | * arch/arm/mach-shark/include/mach/vmalloc.h |
3 | */ | 3 | */ |
4 | #define VMALLOC_END 0xd0000000 | 4 | #define VMALLOC_END 0xd0000000UL |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index d3260542b943..d440e5f456ad 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -567,38 +567,127 @@ static struct platform_device *qhd_devices[] __initdata = { | |||
567 | 567 | ||
568 | /* FSI */ | 568 | /* FSI */ |
569 | #define IRQ_FSI evt2irq(0x1840) | 569 | #define IRQ_FSI evt2irq(0x1840) |
570 | static int __fsi_set_rate(struct clk *clk, long rate, int enable) | ||
571 | { | ||
572 | int ret = 0; | ||
573 | |||
574 | if (rate <= 0) | ||
575 | return ret; | ||
576 | |||
577 | if (enable) { | ||
578 | ret = clk_set_rate(clk, rate); | ||
579 | if (0 == ret) | ||
580 | ret = clk_enable(clk); | ||
581 | } else { | ||
582 | clk_disable(clk); | ||
583 | } | ||
584 | |||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | static int __fsi_set_round_rate(struct clk *clk, long rate, int enable) | ||
589 | { | ||
590 | return __fsi_set_rate(clk, clk_round_rate(clk, rate), enable); | ||
591 | } | ||
570 | 592 | ||
571 | static int fsi_set_rate(int is_porta, int rate) | 593 | static int fsi_ak4642_set_rate(struct device *dev, int rate, int enable) |
594 | { | ||
595 | struct clk *fsia_ick; | ||
596 | struct clk *fsiack; | ||
597 | int ret = -EIO; | ||
598 | |||
599 | fsia_ick = clk_get(dev, "icka"); | ||
600 | if (IS_ERR(fsia_ick)) | ||
601 | return PTR_ERR(fsia_ick); | ||
602 | |||
603 | /* | ||
604 | * FSIACK is connected to AK4642, | ||
605 | * and use external clock pin from it. | ||
606 | * it is parent of fsia_ick now. | ||
607 | */ | ||
608 | fsiack = clk_get_parent(fsia_ick); | ||
609 | if (!fsiack) | ||
610 | goto fsia_ick_out; | ||
611 | |||
612 | /* | ||
613 | * we get 1/1 divided clock by setting same rate to fsiack and fsia_ick | ||
614 | * | ||
615 | ** FIXME ** | ||
616 | * Because the freq_table of external clk (fsiack) are all 0, | ||
617 | * the return value of clk_round_rate became 0. | ||
618 | * So, it use __fsi_set_rate here. | ||
619 | */ | ||
620 | ret = __fsi_set_rate(fsiack, rate, enable); | ||
621 | if (ret < 0) | ||
622 | goto fsiack_out; | ||
623 | |||
624 | ret = __fsi_set_round_rate(fsia_ick, rate, enable); | ||
625 | if ((ret < 0) && enable) | ||
626 | __fsi_set_round_rate(fsiack, rate, 0); /* disable FSI ACK */ | ||
627 | |||
628 | fsiack_out: | ||
629 | clk_put(fsiack); | ||
630 | |||
631 | fsia_ick_out: | ||
632 | clk_put(fsia_ick); | ||
633 | |||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable) | ||
572 | { | 638 | { |
573 | struct clk *fsib_clk; | 639 | struct clk *fsib_clk; |
574 | struct clk *fdiv_clk = &sh7372_fsidivb_clk; | 640 | struct clk *fdiv_clk = &sh7372_fsidivb_clk; |
641 | long fsib_rate = 0; | ||
642 | long fdiv_rate = 0; | ||
643 | int ackmd_bpfmd; | ||
575 | int ret; | 644 | int ret; |
576 | 645 | ||
577 | /* set_rate is not needed if port A */ | ||
578 | if (is_porta) | ||
579 | return 0; | ||
580 | |||
581 | fsib_clk = clk_get(NULL, "fsib_clk"); | ||
582 | if (IS_ERR(fsib_clk)) | ||
583 | return -EINVAL; | ||
584 | |||
585 | switch (rate) { | 646 | switch (rate) { |
586 | case 44100: | 647 | case 44100: |
587 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000)); | 648 | fsib_rate = rate * 256; |
588 | ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; | 649 | ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; |
589 | break; | 650 | break; |
590 | case 48000: | 651 | case 48000: |
591 | clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000)); | 652 | fsib_rate = 85428000; /* around 48kHz x 256 x 7 */ |
592 | clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000)); | 653 | fdiv_rate = rate * 256; |
593 | ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; | 654 | ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; |
594 | break; | 655 | break; |
595 | default: | 656 | default: |
596 | pr_err("unsupported rate in FSI2 port B\n"); | 657 | pr_err("unsupported rate in FSI2 port B\n"); |
597 | ret = -EINVAL; | 658 | return -EINVAL; |
598 | break; | ||
599 | } | 659 | } |
600 | 660 | ||
661 | /* FSI B setting */ | ||
662 | fsib_clk = clk_get(dev, "ickb"); | ||
663 | if (IS_ERR(fsib_clk)) | ||
664 | return -EIO; | ||
665 | |||
666 | ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable); | ||
601 | clk_put(fsib_clk); | 667 | clk_put(fsib_clk); |
668 | if (ret < 0) | ||
669 | return ret; | ||
670 | |||
671 | /* FSI DIV setting */ | ||
672 | ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable); | ||
673 | if (ret < 0) { | ||
674 | /* disable FSI B */ | ||
675 | if (enable) | ||
676 | __fsi_set_round_rate(fsib_clk, fsib_rate, 0); | ||
677 | return ret; | ||
678 | } | ||
679 | |||
680 | return ackmd_bpfmd; | ||
681 | } | ||
682 | |||
683 | static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable) | ||
684 | { | ||
685 | int ret; | ||
686 | |||
687 | if (is_porta) | ||
688 | ret = fsi_ak4642_set_rate(dev, rate, enable); | ||
689 | else | ||
690 | ret = fsi_hdmi_set_rate(dev, rate, enable); | ||
602 | 691 | ||
603 | return ret; | 692 | return ret; |
604 | } | 693 | } |
@@ -880,6 +969,11 @@ static int __init hdmi_init_pm_clock(void) | |||
880 | goto out; | 969 | goto out; |
881 | } | 970 | } |
882 | 971 | ||
972 | ret = clk_enable(&sh7372_pllc2_clk); | ||
973 | if (ret < 0) { | ||
974 | pr_err("Cannot enable pllc2 clock\n"); | ||
975 | goto out; | ||
976 | } | ||
883 | pr_debug("PLLC2 set frequency %lu\n", rate); | 977 | pr_debug("PLLC2 set frequency %lu\n", rate); |
884 | 978 | ||
885 | ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk); | 979 | ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk); |
@@ -896,23 +990,11 @@ out: | |||
896 | 990 | ||
897 | device_initcall(hdmi_init_pm_clock); | 991 | device_initcall(hdmi_init_pm_clock); |
898 | 992 | ||
899 | #define FSIACK_DUMMY_RATE 48000 | ||
900 | static int __init fsi_init_pm_clock(void) | 993 | static int __init fsi_init_pm_clock(void) |
901 | { | 994 | { |
902 | struct clk *fsia_ick; | 995 | struct clk *fsia_ick; |
903 | int ret; | 996 | int ret; |
904 | 997 | ||
905 | /* | ||
906 | * FSIACK is connected to AK4642, | ||
907 | * and the rate is depend on playing sound rate. | ||
908 | * So, set dummy rate (= 48k) here | ||
909 | */ | ||
910 | ret = clk_set_rate(&sh7372_fsiack_clk, FSIACK_DUMMY_RATE); | ||
911 | if (ret < 0) { | ||
912 | pr_err("Cannot set FSIACK dummy rate: %d\n", ret); | ||
913 | return ret; | ||
914 | } | ||
915 | |||
916 | fsia_ick = clk_get(&fsi_device.dev, "icka"); | 998 | fsia_ick = clk_get(&fsi_device.dev, "icka"); |
917 | if (IS_ERR(fsia_ick)) { | 999 | if (IS_ERR(fsia_ick)) { |
918 | ret = PTR_ERR(fsia_ick); | 1000 | ret = PTR_ERR(fsia_ick); |
@@ -921,16 +1003,9 @@ static int __init fsi_init_pm_clock(void) | |||
921 | } | 1003 | } |
922 | 1004 | ||
923 | ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk); | 1005 | ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk); |
924 | if (ret < 0) { | ||
925 | pr_err("Cannot set FSI-A parent: %d\n", ret); | ||
926 | goto out; | ||
927 | } | ||
928 | |||
929 | ret = clk_set_rate(fsia_ick, FSIACK_DUMMY_RATE); | ||
930 | if (ret < 0) | 1006 | if (ret < 0) |
931 | pr_err("Cannot set FSI-A rate: %d\n", ret); | 1007 | pr_err("Cannot set FSI-A parent: %d\n", ret); |
932 | 1008 | ||
933 | out: | ||
934 | clk_put(fsia_ick); | 1009 | clk_put(fsia_ick); |
935 | 1010 | ||
936 | return ret; | 1011 | return ret; |
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index 7db31e6c6bf2..3aa026069435 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c | |||
@@ -220,8 +220,7 @@ static void pllc2_disable(struct clk *clk) | |||
220 | __raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR); | 220 | __raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int pllc2_set_rate(struct clk *clk, | 223 | static int pllc2_set_rate(struct clk *clk, unsigned long rate) |
224 | unsigned long rate, int algo_id) | ||
225 | { | 224 | { |
226 | unsigned long value; | 225 | unsigned long value; |
227 | int idx; | 226 | int idx; |
@@ -230,21 +229,13 @@ static int pllc2_set_rate(struct clk *clk, | |||
230 | if (idx < 0) | 229 | if (idx < 0) |
231 | return idx; | 230 | return idx; |
232 | 231 | ||
233 | if (rate == clk->parent->rate) { | 232 | if (rate == clk->parent->rate) |
234 | pllc2_disable(clk); | 233 | return -EINVAL; |
235 | return 0; | ||
236 | } | ||
237 | 234 | ||
238 | value = __raw_readl(PLLC2CR) & ~(0x3f << 24); | 235 | value = __raw_readl(PLLC2CR) & ~(0x3f << 24); |
239 | 236 | ||
240 | if (value & 0x80000000) | ||
241 | pllc2_disable(clk); | ||
242 | |||
243 | __raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR); | 237 | __raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR); |
244 | 238 | ||
245 | if (value & 0x80000000) | ||
246 | return pllc2_enable(clk); | ||
247 | |||
248 | return 0; | 239 | return 0; |
249 | } | 240 | } |
250 | 241 | ||
@@ -453,32 +444,24 @@ static int fsidiv_enable(struct clk *clk) | |||
453 | unsigned long value; | 444 | unsigned long value; |
454 | 445 | ||
455 | value = __raw_readl(clk->mapping->base) >> 16; | 446 | value = __raw_readl(clk->mapping->base) >> 16; |
456 | if (value < 2) { | 447 | if (value < 2) |
457 | fsidiv_disable(clk); | 448 | return -EIO; |
458 | return -ENOENT; | ||
459 | } | ||
460 | 449 | ||
461 | __raw_writel((value << 16) | 0x3, clk->mapping->base); | 450 | __raw_writel((value << 16) | 0x3, clk->mapping->base); |
462 | 451 | ||
463 | return 0; | 452 | return 0; |
464 | } | 453 | } |
465 | 454 | ||
466 | static int fsidiv_set_rate(struct clk *clk, | 455 | static int fsidiv_set_rate(struct clk *clk, unsigned long rate) |
467 | unsigned long rate, int algo_id) | ||
468 | { | 456 | { |
469 | int idx; | 457 | int idx; |
470 | 458 | ||
471 | if (clk->parent->rate == rate) { | ||
472 | fsidiv_disable(clk); | ||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | idx = (clk->parent->rate / rate) & 0xffff; | 459 | idx = (clk->parent->rate / rate) & 0xffff; |
477 | if (idx < 2) | 460 | if (idx < 2) |
478 | return -ENOENT; | 461 | return -EINVAL; |
479 | 462 | ||
480 | __raw_writel(idx << 16, clk->mapping->base); | 463 | __raw_writel(idx << 16, clk->mapping->base); |
481 | return fsidiv_enable(clk); | 464 | return 0; |
482 | } | 465 | } |
483 | 466 | ||
484 | static struct clk_ops fsidiv_clk_ops = { | 467 | static struct clk_ops fsidiv_clk_ops = { |
@@ -609,8 +592,6 @@ static struct clk_lookup lookups[] = { | |||
609 | CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), | 592 | CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), |
610 | CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]), | 593 | CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]), |
611 | CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]), | 594 | CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]), |
612 | CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FSIA]), | ||
613 | CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FSIB]), | ||
614 | CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]), | 595 | CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]), |
615 | CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]), | 596 | CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]), |
616 | CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]), | 597 | CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]), |
@@ -647,8 +628,8 @@ static struct clk_lookup lookups[] = { | |||
647 | CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ | 628 | CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ |
648 | CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */ | 629 | CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */ |
649 | CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ | 630 | CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ |
650 | CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP323]), /* USB0 */ | 631 | CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ |
651 | CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP323]), /* USB0 */ | 632 | CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */ |
652 | CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ | 633 | CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ |
653 | CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ | 634 | CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ |
654 | CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ | 635 | CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ |
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index 73fb1a551ec6..608a1372b172 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c | |||
@@ -75,14 +75,14 @@ void __init ux500_init_irq(void) | |||
75 | static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) | 75 | static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) |
76 | { | 76 | { |
77 | /* wait for the operation to complete */ | 77 | /* wait for the operation to complete */ |
78 | while (readl(reg) & mask) | 78 | while (readl_relaxed(reg) & mask) |
79 | ; | 79 | ; |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline void ux500_cache_sync(void) | 82 | static inline void ux500_cache_sync(void) |
83 | { | 83 | { |
84 | void __iomem *base = __io_address(UX500_L2CC_BASE); | 84 | void __iomem *base = __io_address(UX500_L2CC_BASE); |
85 | writel(0, base + L2X0_CACHE_SYNC); | 85 | writel_relaxed(0, base + L2X0_CACHE_SYNC); |
86 | ux500_cache_wait(base + L2X0_CACHE_SYNC, 1); | 86 | ux500_cache_wait(base + L2X0_CACHE_SYNC, 1); |
87 | } | 87 | } |
88 | 88 | ||
@@ -107,7 +107,7 @@ static void ux500_l2x0_inv_all(void) | |||
107 | uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ | 107 | uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ |
108 | 108 | ||
109 | /* invalidate all ways */ | 109 | /* invalidate all ways */ |
110 | writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 110 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
111 | ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 111 | ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
112 | ux500_cache_sync(); | 112 | ux500_cache_sync(); |
113 | } | 113 | } |
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h index ebd8a2543d3b..7d8e069ad51b 100644 --- a/arch/arm/mach-versatile/include/mach/vmalloc.h +++ b/arch/arm/mach-versatile/include/mach/vmalloc.h | |||
@@ -18,4 +18,4 @@ | |||
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | #define VMALLOC_END 0xd8000000 | 21 | #define VMALLOC_END 0xd8000000UL |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 17e7b0b57e49..55c17a6fb22f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -206,8 +206,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
206 | */ | 206 | */ |
207 | if (pfn_valid(pfn)) { | 207 | if (pfn_valid(pfn)) { |
208 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" | 208 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" |
209 | KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" | 209 | "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" |
210 | KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n"); | 210 | "will fail in the next kernel release. Please fix your driver.\n"); |
211 | WARN_ON(1); | 211 | WARN_ON(1); |
212 | } | 212 | } |
213 | 213 | ||
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c index 02d989018059..3a705c7877dd 100644 --- a/arch/arm/plat-mxc/devices/platform-imx-dma.c +++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c | |||
@@ -12,15 +12,7 @@ | |||
12 | 12 | ||
13 | #include <mach/hardware.h> | 13 | #include <mach/hardware.h> |
14 | #include <mach/devices-common.h> | 14 | #include <mach/devices-common.h> |
15 | #ifdef SDMA_IS_MERGED | ||
16 | #include <mach/sdma.h> | 15 | #include <mach/sdma.h> |
17 | #else | ||
18 | struct sdma_platform_data { | ||
19 | int sdma_version; | ||
20 | char *cpu_name; | ||
21 | int to_version; | ||
22 | }; | ||
23 | #endif | ||
24 | 16 | ||
25 | struct imx_imx_sdma_data { | 17 | struct imx_imx_sdma_data { |
26 | resource_size_t iobase; | 18 | resource_size_t iobase; |
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c index e48340ec331e..17f724c9452d 100644 --- a/arch/arm/plat-mxc/devices/platform-spi_imx.c +++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c | |||
@@ -27,6 +27,7 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = { | |||
27 | imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K) | 27 | imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K) |
28 | imx21_cspi_data_entry(0, 1), | 28 | imx21_cspi_data_entry(0, 1), |
29 | imx21_cspi_data_entry(1, 2), | 29 | imx21_cspi_data_entry(1, 2), |
30 | }; | ||
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | #ifdef CONFIG_ARCH_MX25 | 33 | #ifdef CONFIG_ARCH_MX25 |
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c index aedf9c1d645e..63cdc6025bd7 100644 --- a/arch/arm/plat-nomadik/timer.c +++ b/arch/arm/plat-nomadik/timer.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2008 STMicroelectronics | 4 | * Copyright (C) 2008 STMicroelectronics |
5 | * Copyright (C) 2010 Alessandro Rubini | 5 | * Copyright (C) 2010 Alessandro Rubini |
6 | * Copyright (C) 2010 Linus Walleij for ST-Ericsson | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2, as | 9 | * it under the terms of the GNU General Public License version 2, as |
@@ -16,11 +17,13 @@ | |||
16 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
17 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/cnt32_to_63.h> | ||
21 | #include <linux/timer.h> | ||
19 | #include <asm/mach/time.h> | 22 | #include <asm/mach/time.h> |
20 | 23 | ||
21 | #include <plat/mtu.h> | 24 | #include <plat/mtu.h> |
22 | 25 | ||
23 | void __iomem *mtu_base; /* ssigned by machine code */ | 26 | void __iomem *mtu_base; /* Assigned by machine code */ |
24 | 27 | ||
25 | /* | 28 | /* |
26 | * Kernel assumes that sched_clock can be called early | 29 | * Kernel assumes that sched_clock can be called early |
@@ -48,16 +51,82 @@ static struct clocksource nmdk_clksrc = { | |||
48 | /* | 51 | /* |
49 | * Override the global weak sched_clock symbol with this | 52 | * Override the global weak sched_clock symbol with this |
50 | * local implementation which uses the clocksource to get some | 53 | * local implementation which uses the clocksource to get some |
51 | * better resolution when scheduling the kernel. We accept that | 54 | * better resolution when scheduling the kernel. |
52 | * this wraps around for now, since it is just a relative time | 55 | * |
53 | * stamp. (Inspired by OMAP implementation.) | 56 | * Because the hardware timer period may be quite short |
57 | * (32.3 secs on the 133 MHz MTU timer selection on ux500) | ||
58 | * and because cnt32_to_63() needs to be called at least once per | ||
59 | * half period to work properly, a kernel keepwarm() timer is set up | ||
60 | * to ensure this requirement is always met. | ||
61 | * | ||
62 | * Also the sched_clock timer will wrap around at some point, | ||
63 | * here we set it to run continously for a year. | ||
54 | */ | 64 | */ |
65 | #define SCHED_CLOCK_MIN_WRAP 3600*24*365 | ||
66 | static struct timer_list cnt32_to_63_keepwarm_timer; | ||
67 | static u32 sched_mult; | ||
68 | static u32 sched_shift; | ||
69 | |||
55 | unsigned long long notrace sched_clock(void) | 70 | unsigned long long notrace sched_clock(void) |
56 | { | 71 | { |
57 | return clocksource_cyc2ns(nmdk_clksrc.read( | 72 | u64 cycles; |
58 | &nmdk_clksrc), | 73 | |
59 | nmdk_clksrc.mult, | 74 | if (unlikely(!mtu_base)) |
60 | nmdk_clksrc.shift); | 75 | return 0; |
76 | |||
77 | cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0))); | ||
78 | /* | ||
79 | * sched_mult is guaranteed to be even so will | ||
80 | * shift out bit 63 | ||
81 | */ | ||
82 | return (cycles * sched_mult) >> sched_shift; | ||
83 | } | ||
84 | |||
85 | /* Just kick sched_clock every so often */ | ||
86 | static void cnt32_to_63_keepwarm(unsigned long data) | ||
87 | { | ||
88 | mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data)); | ||
89 | (void) sched_clock(); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm | ||
94 | * once in half a 32bit timer wrap interval. | ||
95 | */ | ||
96 | static void __init nmdk_sched_clock_init(unsigned long rate) | ||
97 | { | ||
98 | u32 v; | ||
99 | unsigned long delta; | ||
100 | u64 days; | ||
101 | |||
102 | /* Find the apropriate mult and shift factors */ | ||
103 | clocks_calc_mult_shift(&sched_mult, &sched_shift, | ||
104 | rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP); | ||
105 | /* We need to multiply by an even number to get rid of bit 63 */ | ||
106 | if (sched_mult & 1) | ||
107 | sched_mult++; | ||
108 | |||
109 | /* Let's see what we get, take max counter and scale it */ | ||
110 | days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift; | ||
111 | do_div(days, NSEC_PER_SEC); | ||
112 | do_div(days, (3600*24)); | ||
113 | |||
114 | pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n", | ||
115 | (64 - sched_shift), rate, (unsigned long) days); | ||
116 | |||
117 | /* | ||
118 | * Program a timer to kick us at half 32bit wraparound | ||
119 | * Formula: seconds per wrap = (2^32) / f | ||
120 | */ | ||
121 | v = 0xFFFFFFFFUL / rate; | ||
122 | /* We want half of the wrap time to keep cnt32_to_63 warm */ | ||
123 | v /= 2; | ||
124 | pr_debug("sched_clock: prescaled timer rate: %lu Hz, " | ||
125 | "initialize keepwarm timer every %d seconds\n", rate, v); | ||
126 | /* Convert seconds to jiffies */ | ||
127 | delta = msecs_to_jiffies(v*1000); | ||
128 | setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta); | ||
129 | mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta)); | ||
61 | } | 130 | } |
62 | 131 | ||
63 | /* Clockevent device: use one-shot mode */ | 132 | /* Clockevent device: use one-shot mode */ |
@@ -161,13 +230,15 @@ void __init nmdk_timer_init(void) | |||
161 | writel(0, mtu_base + MTU_BGLR(0)); | 230 | writel(0, mtu_base + MTU_BGLR(0)); |
162 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0)); | 231 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0)); |
163 | 232 | ||
164 | /* Now the scheduling clock is ready */ | 233 | /* Now the clock source is ready */ |
165 | nmdk_clksrc.read = nmdk_read_timer; | 234 | nmdk_clksrc.read = nmdk_read_timer; |
166 | 235 | ||
167 | if (clocksource_register(&nmdk_clksrc)) | 236 | if (clocksource_register(&nmdk_clksrc)) |
168 | pr_err("timer: failed to initialize clock source %s\n", | 237 | pr_err("timer: failed to initialize clock source %s\n", |
169 | nmdk_clksrc.name); | 238 | nmdk_clksrc.name); |
170 | 239 | ||
240 | nmdk_sched_clock_init(rate); | ||
241 | |||
171 | /* Timer 1 is used for events */ | 242 | /* Timer 1 is used for events */ |
172 | 243 | ||
173 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); | 244 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); |
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h index e49c5b6fc4e2..1ab332e37d7d 100644 --- a/arch/arm/plat-pxa/include/plat/sdhci.h +++ b/arch/arm/plat-pxa/include/plat/sdhci.h | |||
@@ -17,6 +17,9 @@ | |||
17 | /* Require clock free running */ | 17 | /* Require clock free running */ |
18 | #define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0) | 18 | #define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0) |
19 | 19 | ||
20 | /* Board design supports 8-bit data on SD/SDIO BUS */ | ||
21 | #define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2) | ||
22 | |||
20 | /* | 23 | /* |
21 | * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI | 24 | * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI |
22 | * @max_speed: the maximum speed supported | 25 | * @max_speed: the maximum speed supported |
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c index 9793544a6ace..704175b0573f 100644 --- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c +++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c | |||
@@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus0_gpe11_12_13(struct s3c2410_spi_info *spi, | |||
29 | } else { | 29 | } else { |
30 | s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT); | 30 | s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT); |
31 | s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT); | 31 | s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT); |
32 | s3c_gpio_cfgpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE); | 32 | s3c_gpio_setpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE); |
33 | s3c_gpio_cfgpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE); | 33 | s3c_gpio_setpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE); |
34 | s3c_gpio_cfgpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE); | 34 | s3c_gpio_setpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE); |
35 | } | 35 | } |
36 | } | 36 | } |
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c index db9e9e477ec1..72457afd6255 100644 --- a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c +++ b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c | |||
@@ -31,8 +31,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpd8_9_10(struct s3c2410_spi_info *spi, | |||
31 | } else { | 31 | } else { |
32 | s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT); | 32 | s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT); |
33 | s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT); | 33 | s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT); |
34 | s3c_gpio_cfgpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE); | 34 | s3c_gpio_setpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE); |
35 | s3c_gpio_cfgpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE); | 35 | s3c_gpio_setpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE); |
36 | s3c_gpio_cfgpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE); | 36 | s3c_gpio_setpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE); |
37 | } | 37 | } |
38 | } | 38 | } |
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c index 8ea663a438bb..c3972b645d13 100644 --- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c +++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c | |||
@@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpg5_6_7(struct s3c2410_spi_info *spi, | |||
29 | } else { | 29 | } else { |
30 | s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT); | 30 | s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT); |
31 | s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT); | 31 | s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT); |
32 | s3c_gpio_cfgpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE); | 32 | s3c_gpio_setpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE); |
33 | s3c_gpio_cfgpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE); | 33 | s3c_gpio_setpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE); |
34 | s3c_gpio_cfgpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE); | 34 | s3c_gpio_setpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE); |
35 | } | 35 | } |
36 | } | 36 | } |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index ac151399ef34..1995c1712fc8 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck); | |||
95 | static int notrace s390_revalidate_registers(struct mci *mci) | 95 | static int notrace s390_revalidate_registers(struct mci *mci) |
96 | { | 96 | { |
97 | int kill_task; | 97 | int kill_task; |
98 | u64 tmpclock; | ||
99 | u64 zero; | 98 | u64 zero; |
100 | void *fpt_save_area, *fpt_creg_save_area; | 99 | void *fpt_save_area, *fpt_creg_save_area; |
101 | 100 | ||
@@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
214 | : "0", "cc"); | 213 | : "0", "cc"); |
215 | #endif | 214 | #endif |
216 | /* Revalidate clock comparator register */ | 215 | /* Revalidate clock comparator register */ |
217 | asm volatile( | 216 | if (S390_lowcore.clock_comparator == -1) |
218 | " stck 0(%1)\n" | 217 | set_clock_comparator(S390_lowcore.mcck_clock); |
219 | " sckc 0(%1)" | 218 | else |
220 | : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); | 219 | set_clock_comparator(S390_lowcore.clock_comparator); |
221 | |||
222 | /* Check if old PSW is valid */ | 220 | /* Check if old PSW is valid */ |
223 | if (!mci->wp) | 221 | if (!mci->wp) |
224 | /* | 222 | /* |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 752b362bf651..7c37ec359ec2 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs) | |||
29 | { | 29 | { |
30 | unsigned long mask, cr0, cr0_saved; | 30 | unsigned long mask, cr0, cr0_saved; |
31 | u64 clock_saved; | 31 | u64 clock_saved; |
32 | u64 end; | ||
32 | 33 | ||
34 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | ||
35 | end = get_clock() + (usecs << 12); | ||
33 | clock_saved = local_tick_disable(); | 36 | clock_saved = local_tick_disable(); |
34 | set_clock_comparator(get_clock() + (usecs << 12)); | ||
35 | __ctl_store(cr0_saved, 0, 0); | 37 | __ctl_store(cr0_saved, 0, 0); |
36 | cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; | 38 | cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; |
37 | __ctl_load(cr0 , 0, 0); | 39 | __ctl_load(cr0 , 0, 0); |
38 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | ||
39 | lockdep_off(); | 40 | lockdep_off(); |
40 | trace_hardirqs_on(); | 41 | do { |
41 | __load_psw_mask(mask); | 42 | set_clock_comparator(end); |
42 | local_irq_disable(); | 43 | trace_hardirqs_on(); |
44 | __load_psw_mask(mask); | ||
45 | local_irq_disable(); | ||
46 | } while (get_clock() < end); | ||
43 | lockdep_on(); | 47 | lockdep_on(); |
44 | __ctl_load(cr0_saved, 0, 0); | 48 | __ctl_load(cr0_saved, 0, 0); |
45 | local_tick_enable(clock_saved); | 49 | local_tick_enable(clock_saved); |
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 46d5179c9f49..e3c73cdd8c90 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h | |||
@@ -199,10 +199,13 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
199 | #define ARCH_HAS_PREFETCHW | 199 | #define ARCH_HAS_PREFETCHW |
200 | static inline void prefetch(void *x) | 200 | static inline void prefetch(void *x) |
201 | { | 201 | { |
202 | __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory"); | 202 | __builtin_prefetch(x, 0, 3); |
203 | } | 203 | } |
204 | 204 | ||
205 | #define prefetchw(x) prefetch(x) | 205 | static inline void prefetchw(void *x) |
206 | { | ||
207 | __builtin_prefetch(x, 1, 3); | ||
208 | } | ||
206 | #endif | 209 | #endif |
207 | 210 | ||
208 | #endif /* __KERNEL__ */ | 211 | #endif /* __KERNEL__ */ |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index 4eabc68cd753..b601fa3978d1 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c | |||
@@ -110,7 +110,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) | |||
110 | return 0; | 110 | return 0; |
111 | } | 111 | } |
112 | 112 | ||
113 | static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id) | 113 | static int shoc_clk_set_rate(struct clk *clk, unsigned long rate) |
114 | { | 114 | { |
115 | unsigned long frqcr3; | 115 | unsigned long frqcr3; |
116 | unsigned int tmp; | 116 | unsigned int tmp; |
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 81f58371613d..8c6a350df751 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c | |||
@@ -88,7 +88,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | if (op & CACHEFLUSH_I) | 90 | if (op & CACHEFLUSH_I) |
91 | flush_cache_all(); | 91 | flush_icache_range(addr, addr+len); |
92 | 92 | ||
93 | up_read(¤t->mm->mmap_sem); | 93 | up_read(¤t->mm->mmap_sem); |
94 | return 0; | 94 | return 0; |
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S index 3b6eb34c43fa..3e70f851cdc6 100644 --- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S +++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S | |||
@@ -8,9 +8,9 @@ __kernel_vsyscall: | |||
8 | * fill out .eh_frame -- PFM. */ | 8 | * fill out .eh_frame -- PFM. */ |
9 | .LEND_vsyscall: | 9 | .LEND_vsyscall: |
10 | .size __kernel_vsyscall,.-.LSTART_vsyscall | 10 | .size __kernel_vsyscall,.-.LSTART_vsyscall |
11 | .previous | ||
12 | 11 | ||
13 | .section .eh_frame,"a",@progbits | 12 | .section .eh_frame,"a",@progbits |
13 | .previous | ||
14 | .LCIE: | 14 | .LCIE: |
15 | .ualong .LCIE_end - .LCIE_start | 15 | .ualong .LCIE_end - .LCIE_start |
16 | .LCIE_start: | 16 | .LCIE_start: |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 07ec8a865c1d..e11b5fcb70eb 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -329,6 +329,18 @@ endmenu # Tilera-specific configuration | |||
329 | 329 | ||
330 | menu "Bus options" | 330 | menu "Bus options" |
331 | 331 | ||
332 | config PCI | ||
333 | bool "PCI support" | ||
334 | default y | ||
335 | select PCI_DOMAINS | ||
336 | ---help--- | ||
337 | Enable PCI root complex support, so PCIe endpoint devices can | ||
338 | be attached to the Tile chip. Many, but not all, PCI devices | ||
339 | are supported under Tilera's root complex driver. | ||
340 | |||
341 | config PCI_DOMAINS | ||
342 | bool | ||
343 | |||
332 | config NO_IOMEM | 344 | config NO_IOMEM |
333 | def_bool !PCI | 345 | def_bool !PCI |
334 | 346 | ||
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index c5741da4eeac..14a3f8556ace 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h | |||
@@ -137,4 +137,56 @@ static inline void finv_buffer(void *buffer, size_t size) | |||
137 | mb_incoherent(); | 137 | mb_incoherent(); |
138 | } | 138 | } |
139 | 139 | ||
140 | /* | ||
141 | * Flush & invalidate a VA range that is homed remotely on a single core, | ||
142 | * waiting until the memory controller holds the flushed values. | ||
143 | */ | ||
144 | static inline void finv_buffer_remote(void *buffer, size_t size) | ||
145 | { | ||
146 | char *p; | ||
147 | int i; | ||
148 | |||
149 | /* | ||
150 | * Flush and invalidate the buffer out of the local L1/L2 | ||
151 | * and request the home cache to flush and invalidate as well. | ||
152 | */ | ||
153 | __finv_buffer(buffer, size); | ||
154 | |||
155 | /* | ||
156 | * Wait for the home cache to acknowledge that it has processed | ||
157 | * all the flush-and-invalidate requests. This does not mean | ||
158 | * that the flushed data has reached the memory controller yet, | ||
159 | * but it does mean the home cache is processing the flushes. | ||
160 | */ | ||
161 | __insn_mf(); | ||
162 | |||
163 | /* | ||
164 | * Issue a load to the last cache line, which can't complete | ||
165 | * until all the previously-issued flushes to the same memory | ||
166 | * controller have also completed. If we weren't striping | ||
167 | * memory, that one load would be sufficient, but since we may | ||
168 | * be, we also need to back up to the last load issued to | ||
169 | * another memory controller, which would be the point where | ||
170 | * we crossed an 8KB boundary (the granularity of striping | ||
171 | * across memory controllers). Keep backing up and doing this | ||
172 | * until we are before the beginning of the buffer, or have | ||
173 | * hit all the controllers. | ||
174 | */ | ||
175 | for (i = 0, p = (char *)buffer + size - 1; | ||
176 | i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer; | ||
177 | ++i) { | ||
178 | const unsigned long STRIPE_WIDTH = 8192; | ||
179 | |||
180 | /* Force a load instruction to issue. */ | ||
181 | *(volatile char *)p; | ||
182 | |||
183 | /* Jump to end of previous stripe. */ | ||
184 | p -= STRIPE_WIDTH; | ||
185 | p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1)); | ||
186 | } | ||
187 | |||
188 | /* Wait for the loads (and thus flushes) to have completed. */ | ||
189 | __insn_mf(); | ||
190 | } | ||
191 | |||
140 | #endif /* _ASM_TILE_CACHEFLUSH_H */ | 192 | #endif /* _ASM_TILE_CACHEFLUSH_H */ |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index ee43328713ab..d3cbb9b14cbe 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
@@ -55,9 +55,6 @@ extern void iounmap(volatile void __iomem *addr); | |||
55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) | 55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) |
56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) | 56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) |
57 | 57 | ||
58 | void __iomem *ioport_map(unsigned long port, unsigned int len); | ||
59 | extern inline void ioport_unmap(void __iomem *addr) {} | ||
60 | |||
61 | #define mmiowb() | 58 | #define mmiowb() |
62 | 59 | ||
63 | /* Conversion between virtual and physical mappings. */ | 60 | /* Conversion between virtual and physical mappings. */ |
@@ -189,12 +186,22 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, | |||
189 | * we never run, uses them unconditionally. | 186 | * we never run, uses them unconditionally. |
190 | */ | 187 | */ |
191 | 188 | ||
192 | static inline int ioport_panic(void) | 189 | static inline long ioport_panic(void) |
193 | { | 190 | { |
194 | panic("inb/outb and friends do not exist on tile"); | 191 | panic("inb/outb and friends do not exist on tile"); |
195 | return 0; | 192 | return 0; |
196 | } | 193 | } |
197 | 194 | ||
195 | static inline void __iomem *ioport_map(unsigned long port, unsigned int len) | ||
196 | { | ||
197 | return (void __iomem *) ioport_panic(); | ||
198 | } | ||
199 | |||
200 | static inline void ioport_unmap(void __iomem *addr) | ||
201 | { | ||
202 | ioport_panic(); | ||
203 | } | ||
204 | |||
198 | static inline u8 inb(unsigned long addr) | 205 | static inline u8 inb(unsigned long addr) |
199 | { | 206 | { |
200 | return ioport_panic(); | 207 | return ioport_panic(); |
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h deleted file mode 100644 index e853b0e2793b..000000000000 --- a/arch/tile/include/asm/pci-bridge.h +++ /dev/null | |||
@@ -1,117 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_BRIDGE_H | ||
16 | #define _ASM_TILE_PCI_BRIDGE_H | ||
17 | |||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | struct device_node; | ||
22 | struct pci_controller; | ||
23 | |||
24 | /* | ||
25 | * pci_io_base returns the memory address at which you can access | ||
26 | * the I/O space for PCI bus number `bus' (or NULL on error). | ||
27 | */ | ||
28 | extern void __iomem *pci_bus_io_base(unsigned int bus); | ||
29 | extern unsigned long pci_bus_io_base_phys(unsigned int bus); | ||
30 | extern unsigned long pci_bus_mem_base_phys(unsigned int bus); | ||
31 | |||
32 | /* Allocate a new PCI host bridge structure */ | ||
33 | extern struct pci_controller *pcibios_alloc_controller(void); | ||
34 | |||
35 | /* Helper function for setting up resources */ | ||
36 | extern void pci_init_resource(struct resource *res, unsigned long start, | ||
37 | unsigned long end, int flags, char *name); | ||
38 | |||
39 | /* Get the PCI host controller for a bus */ | ||
40 | extern struct pci_controller *pci_bus_to_hose(int bus); | ||
41 | |||
42 | /* | ||
43 | * Structure of a PCI controller (host bridge) | ||
44 | */ | ||
45 | struct pci_controller { | ||
46 | int index; /* PCI domain number */ | ||
47 | struct pci_bus *root_bus; | ||
48 | |||
49 | int first_busno; | ||
50 | int last_busno; | ||
51 | |||
52 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
53 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
54 | |||
55 | struct pci_ops *ops; | ||
56 | |||
57 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
58 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
59 | |||
60 | /* Address ranges that are routed to this controller/bridge. */ | ||
61 | struct resource mem_resources[3]; | ||
62 | }; | ||
63 | |||
64 | static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) | ||
65 | { | ||
66 | return bus->sysdata; | ||
67 | } | ||
68 | |||
69 | extern void setup_indirect_pci_nomap(struct pci_controller *hose, | ||
70 | void __iomem *cfg_addr, void __iomem *cfg_data); | ||
71 | extern void setup_indirect_pci(struct pci_controller *hose, | ||
72 | u32 cfg_addr, u32 cfg_data); | ||
73 | extern void setup_grackle(struct pci_controller *hose); | ||
74 | |||
75 | extern unsigned char common_swizzle(struct pci_dev *, unsigned char *); | ||
76 | |||
77 | /* | ||
78 | * The following code swizzles for exactly one bridge. The routine | ||
79 | * common_swizzle below handles multiple bridges. But there are a | ||
80 | * some boards that don't follow the PCI spec's suggestion so we | ||
81 | * break this piece out separately. | ||
82 | */ | ||
83 | static inline unsigned char bridge_swizzle(unsigned char pin, | ||
84 | unsigned char idsel) | ||
85 | { | ||
86 | return (((pin-1) + idsel) % 4) + 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * The following macro is used to lookup irqs in a standard table | ||
91 | * format for those PPC systems that do not already have PCI | ||
92 | * interrupts properly routed. | ||
93 | */ | ||
94 | /* FIXME - double check this */ | ||
95 | #define PCI_IRQ_TABLE_LOOKUP ({ \ | ||
96 | long _ctl_ = -1; \ | ||
97 | if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \ | ||
98 | _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \ | ||
99 | _ctl_; \ | ||
100 | }) | ||
101 | |||
102 | /* | ||
103 | * Scan the buses below a given PCI host bridge and assign suitable | ||
104 | * resources to all devices found. | ||
105 | */ | ||
106 | extern int pciauto_bus_scan(struct pci_controller *, int); | ||
107 | |||
108 | #ifdef CONFIG_PCI | ||
109 | extern unsigned long pci_address_to_pio(phys_addr_t address); | ||
110 | #else | ||
111 | static inline unsigned long pci_address_to_pio(phys_addr_t address) | ||
112 | { | ||
113 | return (unsigned long)-1; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | #endif /* _ASM_TILE_PCI_BRIDGE_H */ | ||
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h index b0c15da2d5d5..c3fc458a0d32 100644 --- a/arch/tile/include/asm/pci.h +++ b/arch/tile/include/asm/pci.h | |||
@@ -15,7 +15,29 @@ | |||
15 | #ifndef _ASM_TILE_PCI_H | 15 | #ifndef _ASM_TILE_PCI_H |
16 | #define _ASM_TILE_PCI_H | 16 | #define _ASM_TILE_PCI_H |
17 | 17 | ||
18 | #include <asm/pci-bridge.h> | 18 | #include <linux/pci.h> |
19 | |||
20 | /* | ||
21 | * Structure of a PCI controller (host bridge) | ||
22 | */ | ||
23 | struct pci_controller { | ||
24 | int index; /* PCI domain number */ | ||
25 | struct pci_bus *root_bus; | ||
26 | |||
27 | int first_busno; | ||
28 | int last_busno; | ||
29 | |||
30 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
31 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
32 | |||
33 | struct pci_ops *ops; | ||
34 | |||
35 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
36 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
37 | |||
38 | /* Address ranges that are routed to this controller/bridge. */ | ||
39 | struct resource mem_resources[3]; | ||
40 | }; | ||
19 | 41 | ||
20 | /* | 42 | /* |
21 | * The hypervisor maps the entirety of CPA-space as bus addresses, so | 43 | * The hypervisor maps the entirety of CPA-space as bus addresses, so |
@@ -24,56 +46,12 @@ | |||
24 | */ | 46 | */ |
25 | #define PCI_DMA_BUS_IS_PHYS 1 | 47 | #define PCI_DMA_BUS_IS_PHYS 1 |
26 | 48 | ||
27 | struct pci_controller *pci_bus_to_hose(int bus); | ||
28 | unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp); | ||
29 | int __init tile_pci_init(void); | 49 | int __init tile_pci_init(void); |
30 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | ||
31 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
32 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); | ||
33 | 50 | ||
34 | int __devinit _tile_cfg_read(struct pci_controller *hose, | 51 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); |
35 | int bus, | 52 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} |
36 | int slot, | ||
37 | int function, | ||
38 | int offset, | ||
39 | int size, | ||
40 | u32 *val); | ||
41 | int __devinit _tile_cfg_write(struct pci_controller *hose, | ||
42 | int bus, | ||
43 | int slot, | ||
44 | int function, | ||
45 | int offset, | ||
46 | int size, | ||
47 | u32 val); | ||
48 | 53 | ||
49 | /* | 54 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); |
50 | * These are used to to config reads and writes in the early stages of | ||
51 | * setup before the driver infrastructure has been set up enough to be | ||
52 | * able to do config reads and writes. | ||
53 | */ | ||
54 | #define early_cfg_read(where, size, value) \ | ||
55 | _tile_cfg_read(controller, \ | ||
56 | current_bus, \ | ||
57 | pci_slot, \ | ||
58 | pci_fn, \ | ||
59 | where, \ | ||
60 | size, \ | ||
61 | value) | ||
62 | |||
63 | #define early_cfg_write(where, size, value) \ | ||
64 | _tile_cfg_write(controller, \ | ||
65 | current_bus, \ | ||
66 | pci_slot, \ | ||
67 | pci_fn, \ | ||
68 | where, \ | ||
69 | size, \ | ||
70 | value) | ||
71 | |||
72 | |||
73 | |||
74 | #define PCICFG_BYTE 1 | ||
75 | #define PCICFG_WORD 2 | ||
76 | #define PCICFG_DWORD 4 | ||
77 | 55 | ||
78 | #define TILE_NUM_PCIE 2 | 56 | #define TILE_NUM_PCIE 2 |
79 | 57 | ||
@@ -88,33 +66,33 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
88 | } | 66 | } |
89 | 67 | ||
90 | /* | 68 | /* |
91 | * I/O space is currently not supported. | 69 | * pcibios_assign_all_busses() tells whether or not the bus numbers |
70 | * should be reassigned, in case the BIOS didn't do it correctly, or | ||
71 | * in case we don't have a BIOS and we want to let Linux do it. | ||
92 | */ | 72 | */ |
73 | static inline int pcibios_assign_all_busses(void) | ||
74 | { | ||
75 | return 1; | ||
76 | } | ||
93 | 77 | ||
94 | #define TILE_PCIE_LOWER_IO 0x0 | 78 | /* |
95 | #define TILE_PCIE_UPPER_IO 0x10000 | 79 | * No special bus mastering setup handling. |
96 | #define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF | 80 | */ |
97 | |||
98 | #define _PAGE_NO_CACHE 0 | ||
99 | #define _PAGE_GUARDED 0 | ||
100 | |||
101 | |||
102 | #define pcibios_assign_all_busses() pci_assign_all_buses | ||
103 | extern int pci_assign_all_buses; | ||
104 | |||
105 | static inline void pcibios_set_master(struct pci_dev *dev) | 81 | static inline void pcibios_set_master(struct pci_dev *dev) |
106 | { | 82 | { |
107 | /* No special bus mastering setup handling */ | ||
108 | } | 83 | } |
109 | 84 | ||
110 | #define PCIBIOS_MIN_MEM 0 | 85 | #define PCIBIOS_MIN_MEM 0 |
111 | #define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO | 86 | #define PCIBIOS_MIN_IO 0 |
112 | 87 | ||
113 | /* | 88 | /* |
114 | * This flag tells if the platform is TILEmpower that needs | 89 | * This flag tells if the platform is TILEmpower that needs |
115 | * special configuration for the PLX switch chip. | 90 | * special configuration for the PLX switch chip. |
116 | */ | 91 | */ |
117 | extern int blade_pci; | 92 | extern int tile_plx_gen1; |
93 | |||
94 | /* Use any cpu for PCI. */ | ||
95 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
118 | 96 | ||
119 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 97 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
120 | #include <asm-generic/pci-dma-compat.h> | 98 | #include <asm-generic/pci-dma-compat.h> |
@@ -122,7 +100,4 @@ extern int blade_pci; | |||
122 | /* generic pci stuff */ | 100 | /* generic pci stuff */ |
123 | #include <asm-generic/pci.h> | 101 | #include <asm-generic/pci.h> |
124 | 102 | ||
125 | /* Use any cpu for PCI. */ | ||
126 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
127 | |||
128 | #endif /* _ASM_TILE_PCI_H */ | 103 | #endif /* _ASM_TILE_PCI_H */ |
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 1747ff3946b2..a9e7c8760334 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h | |||
@@ -292,8 +292,18 @@ extern int kstack_hash; | |||
292 | /* Are we using huge pages in the TLB for kernel data? */ | 292 | /* Are we using huge pages in the TLB for kernel data? */ |
293 | extern int kdata_huge; | 293 | extern int kdata_huge; |
294 | 294 | ||
295 | /* Support standard Linux prefetching. */ | ||
296 | #define ARCH_HAS_PREFETCH | ||
297 | #define prefetch(x) __builtin_prefetch(x) | ||
295 | #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE() | 298 | #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE() |
296 | 299 | ||
300 | /* Bring a value into the L1D, faulting the TLB if necessary. */ | ||
301 | #ifdef __tilegx__ | ||
302 | #define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x)) | ||
303 | #else | ||
304 | #define prefetch_L1(x) __insn_prefetch_L1((void *)(x)) | ||
305 | #endif | ||
306 | |||
297 | #else /* __ASSEMBLY__ */ | 307 | #else /* __ASSEMBLY__ */ |
298 | 308 | ||
299 | /* Do some slow action (e.g. read a slow SPR). */ | 309 | /* Do some slow action (e.g. read a slow SPR). */ |
diff --git a/arch/tile/include/hv/drv_xgbe_impl.h b/arch/tile/include/hv/drv_xgbe_impl.h new file mode 100644 index 000000000000..3a73b2b44913 --- /dev/null +++ b/arch/tile/include/hv/drv_xgbe_impl.h | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file drivers/xgbe/impl.h | ||
17 | * Implementation details for the NetIO library. | ||
18 | */ | ||
19 | |||
20 | #ifndef __DRV_XGBE_IMPL_H__ | ||
21 | #define __DRV_XGBE_IMPL_H__ | ||
22 | |||
23 | #include <hv/netio_errors.h> | ||
24 | #include <hv/netio_intf.h> | ||
25 | #include <hv/drv_xgbe_intf.h> | ||
26 | |||
27 | |||
28 | /** How many groups we have (log2). */ | ||
29 | #define LOG2_NUM_GROUPS (12) | ||
30 | /** How many groups we have. */ | ||
31 | #define NUM_GROUPS (1 << LOG2_NUM_GROUPS) | ||
32 | |||
33 | /** Number of output requests we'll buffer per tile. */ | ||
34 | #define EPP_REQS_PER_TILE (32) | ||
35 | |||
36 | /** Words used in an eDMA command without checksum acceleration. */ | ||
37 | #define EDMA_WDS_NO_CSUM 8 | ||
38 | /** Words used in an eDMA command with checksum acceleration. */ | ||
39 | #define EDMA_WDS_CSUM 10 | ||
40 | /** Total available words in the eDMA command FIFO. */ | ||
41 | #define EDMA_WDS_TOTAL 128 | ||
42 | |||
43 | |||
44 | /* | ||
45 | * FIXME: These definitions are internal and should have underscores! | ||
46 | * NOTE: The actual numeric values here are intentional and allow us to | ||
47 | * optimize the concept "if small ... else if large ... else ...", by | ||
48 | * checking for the low bit being set, and then for non-zero. | ||
49 | * These are used as array indices, so they must have the values (0, 1, 2) | ||
50 | * in some order. | ||
51 | */ | ||
52 | #define SIZE_SMALL (1) /**< Small packet queue. */ | ||
53 | #define SIZE_LARGE (2) /**< Large packet queue. */ | ||
54 | #define SIZE_JUMBO (0) /**< Jumbo packet queue. */ | ||
55 | |||
56 | /** The number of "SIZE_xxx" values. */ | ||
57 | #define NETIO_NUM_SIZES 3 | ||
58 | |||
59 | |||
60 | /* | ||
61 | * Default numbers of packets for IPP drivers. These values are chosen | ||
62 | * such that CIPP1 will not overflow its L2 cache. | ||
63 | */ | ||
64 | |||
65 | /** The default number of small packets. */ | ||
66 | #define NETIO_DEFAULT_SMALL_PACKETS 2750 | ||
67 | /** The default number of large packets. */ | ||
68 | #define NETIO_DEFAULT_LARGE_PACKETS 2500 | ||
69 | /** The default number of jumbo packets. */ | ||
70 | #define NETIO_DEFAULT_JUMBO_PACKETS 250 | ||
71 | |||
72 | |||
73 | /** Log2 of the size of a memory arena. */ | ||
74 | #define NETIO_ARENA_SHIFT 24 /* 16 MB */ | ||
75 | /** Size of a memory arena. */ | ||
76 | #define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT) | ||
77 | |||
78 | |||
79 | /** A queue of packets. | ||
80 | * | ||
81 | * This structure partially defines a queue of packets waiting to be | ||
82 | * processed. The queue as a whole is written to by an interrupt handler and | ||
83 | * read by non-interrupt code; this data structure is what's touched by the | ||
84 | * interrupt handler. The other part of the queue state, the read offset, is | ||
85 | * kept in user space, not in hypervisor space, so it is in a separate data | ||
86 | * structure. | ||
87 | * | ||
88 | * The read offset (__packet_receive_read in the user part of the queue | ||
89 | * structure) points to the next packet to be read. When the read offset is | ||
90 | * equal to the write offset, the queue is empty; therefore the queue must | ||
91 | * contain one more slot than the required maximum queue size. | ||
92 | * | ||
93 | * Here's an example of all 3 state variables and what they mean. All | ||
94 | * pointers move left to right. | ||
95 | * | ||
96 | * @code | ||
97 | * I I V V V V I I I I | ||
98 | * 0 1 2 3 4 5 6 7 8 9 10 | ||
99 | * ^ ^ ^ ^ | ||
100 | * | | | | ||
101 | * | | __last_packet_plus_one | ||
102 | * | __buffer_write | ||
103 | * __packet_receive_read | ||
104 | * @endcode | ||
105 | * | ||
106 | * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one | ||
107 | * = 10). The read pointer is at 2, and the write pointer is at 6; thus, | ||
108 | * there are valid, unread packets in slots 2, 3, 4, and 5. The remaining | ||
109 | * slots are invalid (do not contain a packet). | ||
110 | */ | ||
111 | typedef struct { | ||
112 | /** Byte offset of the next notify packet to be written: zero for the first | ||
113 | * packet on the queue, sizeof (netio_pkt_t) for the second packet on the | ||
114 | * queue, etc. */ | ||
115 | volatile uint32_t __packet_write; | ||
116 | |||
117 | /** Offset of the packet after the last valid packet (i.e., when any | ||
118 | * pointer is incremented to this value, it wraps back to zero). */ | ||
119 | uint32_t __last_packet_plus_one; | ||
120 | } | ||
121 | __netio_packet_queue_t; | ||
122 | |||
123 | |||
124 | /** A queue of buffers. | ||
125 | * | ||
126 | * This structure partially defines a queue of empty buffers which have been | ||
127 | * obtained via requests to the IPP. (The elements of the queue are packet | ||
128 | * handles, which are transformed into a full netio_pkt_t when the buffer is | ||
129 | * retrieved.) The queue as a whole is written to by an interrupt handler and | ||
130 | * read by non-interrupt code; this data structure is what's touched by the | ||
131 | * interrupt handler. The other parts of the queue state, the read offset and | ||
132 | * requested write offset, are kept in user space, not in hypervisor space, so | ||
133 | * they are in a separate data structure. | ||
134 | * | ||
135 | * The read offset (__buffer_read in the user part of the queue structure) | ||
136 | * points to the next buffer to be read. When the read offset is equal to the | ||
137 | * write offset, the queue is empty; therefore the queue must contain one more | ||
138 | * slot than the required maximum queue size. | ||
139 | * | ||
140 | * The requested write offset (__buffer_requested_write in the user part of | ||
141 | * the queue structure) points to the slot which will hold the next buffer we | ||
142 | * request from the IPP, once we get around to sending such a request. When | ||
143 | * the requested write offset is equal to the write offset, no requests for | ||
144 | * new buffers are outstanding; when the requested write offset is one greater | ||
145 | * than the read offset, no more requests may be sent. | ||
146 | * | ||
147 | * Note that, unlike the packet_queue, the buffer_queue places incoming | ||
148 | * buffers at decreasing addresses. This makes the check for "is it time to | ||
149 | * wrap the buffer pointer" cheaper in the assembly code which receives new | ||
150 | * buffers, and means that the value which defines the queue size, | ||
151 | * __last_buffer, is different than in the packet queue. Also, the offset | ||
152 | * used in the packet_queue is already scaled by the size of a packet; here we | ||
153 | * use unscaled slot indices for the offsets. (These differences are | ||
154 | * historical, and in the future it's possible that the packet_queue will look | ||
155 | * more like this queue.) | ||
156 | * | ||
157 | * @code | ||
158 | * Here's an example of all 4 state variables and what they mean. Remember: | ||
159 | * all pointers move right to left. | ||
160 | * | ||
161 | * V V V I I R R V V V | ||
162 | * 0 1 2 3 4 5 6 7 8 9 | ||
163 | * ^ ^ ^ ^ | ||
164 | * | | | | | ||
165 | * | | | __last_buffer | ||
166 | * | | __buffer_write | ||
167 | * | __buffer_requested_write | ||
168 | * __buffer_read | ||
169 | * @endcode | ||
170 | * | ||
171 | * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9). | ||
172 | * The read pointer is at 2, and the write pointer is at 6; thus, there are | ||
173 | * valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write | ||
174 | * pointer is at 4; thus, requests have been made to the IPP for buffers which | ||
175 | * will be placed in slots 6 and 5 when they arrive. Finally, the remaining | ||
176 | * slots are invalid (do not contain a buffer). | ||
177 | */ | ||
178 | typedef struct | ||
179 | { | ||
180 | /** Ordinal number of the next buffer to be written: 0 for the first slot in | ||
181 | * the queue, 1 for the second slot in the queue, etc. */ | ||
182 | volatile uint32_t __buffer_write; | ||
183 | |||
184 | /** Ordinal number of the last buffer (i.e., when any pointer is decremented | ||
185 | * below zero, it is reloaded with this value). */ | ||
186 | uint32_t __last_buffer; | ||
187 | } | ||
188 | __netio_buffer_queue_t; | ||
189 | |||
190 | |||
191 | /** | ||
192 | * An object for providing Ethernet packets to a process. | ||
193 | */ | ||
194 | typedef struct __netio_queue_impl_t | ||
195 | { | ||
196 | /** The queue of packets waiting to be received. */ | ||
197 | __netio_packet_queue_t __packet_receive_queue; | ||
198 | /** The intr bit mask that IDs this device. */ | ||
199 | unsigned int __intr_id; | ||
200 | /** Offset to queues of empty buffers, one per size. */ | ||
201 | uint32_t __buffer_queue[NETIO_NUM_SIZES]; | ||
202 | /** The address of the first EPP tile, or -1 if no EPP. */ | ||
203 | /* ISSUE: Actually this is always "0" or "~0". */ | ||
204 | uint32_t __epp_location; | ||
205 | /** The queue ID that this queue represents. */ | ||
206 | unsigned int __queue_id; | ||
207 | /** Number of acknowledgements received. */ | ||
208 | volatile uint32_t __acks_received; | ||
209 | /** Last completion number received for packet_sendv. */ | ||
210 | volatile uint32_t __last_completion_rcv; | ||
211 | /** Number of packets allowed to be outstanding. */ | ||
212 | uint32_t __max_outstanding; | ||
213 | /** First VA available for packets. */ | ||
214 | void* __va_0; | ||
215 | /** First VA in second range available for packets. */ | ||
216 | void* __va_1; | ||
217 | /** Padding to align the "__packets" field to the size of a netio_pkt_t. */ | ||
218 | uint32_t __padding[3]; | ||
219 | /** The packets themselves. */ | ||
220 | netio_pkt_t __packets[0]; | ||
221 | } | ||
222 | netio_queue_impl_t; | ||
223 | |||
224 | |||
225 | /** | ||
226 | * An object for managing the user end of a NetIO queue. | ||
227 | */ | ||
228 | typedef struct __netio_queue_user_impl_t | ||
229 | { | ||
230 | /** The next incoming packet to be read. */ | ||
231 | uint32_t __packet_receive_read; | ||
232 | /** The next empty buffers to be read, one index per size. */ | ||
233 | uint8_t __buffer_read[NETIO_NUM_SIZES]; | ||
234 | /** Where the empty buffer we next request from the IPP will go, one index | ||
235 | * per size. */ | ||
236 | uint8_t __buffer_requested_write[NETIO_NUM_SIZES]; | ||
237 | /** PCIe interface flag. */ | ||
238 | uint8_t __pcie; | ||
239 | /** Number of packets left to be received before we send a credit update. */ | ||
240 | uint32_t __receive_credit_remaining; | ||
241 | /** Value placed in __receive_credit_remaining when it reaches zero. */ | ||
242 | uint32_t __receive_credit_interval; | ||
243 | /** First fast I/O routine index. */ | ||
244 | uint32_t __fastio_index; | ||
245 | /** Number of acknowledgements expected. */ | ||
246 | uint32_t __acks_outstanding; | ||
247 | /** Last completion number requested. */ | ||
248 | uint32_t __last_completion_req; | ||
249 | /** File descriptor for driver. */ | ||
250 | int __fd; | ||
251 | } | ||
252 | netio_queue_user_impl_t; | ||
253 | |||
254 | |||
255 | #define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */ | ||
256 | #define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */ | ||
257 | |||
258 | |||
259 | /** Internal structure used to convey packet send information to the | ||
260 | * hypervisor. FIXME: Actually, it's not used for that anymore, but | ||
261 | * netio_packet_send() still uses it internally. | ||
262 | */ | ||
263 | typedef struct | ||
264 | { | ||
265 | uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */ | ||
266 | uint16_t transfer_size; /**< Size of packet */ | ||
267 | uint32_t va; /**< VA of start of packet */ | ||
268 | __netio_pkt_handle_t handle; /**< Packet handle */ | ||
269 | uint32_t csum0; /**< First checksum word */ | ||
270 | uint32_t csum1; /**< Second checksum word */ | ||
271 | } | ||
272 | __netio_send_cmd_t; | ||
273 | |||
274 | |||
275 | /** Flags used in two contexts: | ||
276 | * - As the "flags" member in the __netio_send_cmd_t, above; used only | ||
277 | * for netio_pkt_send_{prepare,commit}. | ||
278 | * - As part of the flags passed to the various send packet fast I/O calls. | ||
279 | */ | ||
280 | |||
281 | /** Need acknowledgement on this packet. Note that some code in the | ||
282 | * normal send_pkt fast I/O handler assumes that this is equal to 1. */ | ||
283 | #define __NETIO_SEND_FLG_ACK 0x1 | ||
284 | |||
285 | /** Do checksum on this packet. (Only used with the __netio_send_cmd_t; | ||
286 | * normal packet sends use a special fast I/O index to denote checksumming, | ||
287 | * and multi-segment sends test the checksum descriptor.) */ | ||
288 | #define __NETIO_SEND_FLG_CSUM 0x2 | ||
289 | |||
290 | /** Get a completion on this packet. Only used with multi-segment sends. */ | ||
291 | #define __NETIO_SEND_FLG_COMPLETION 0x4 | ||
292 | |||
293 | /** Position of the number-of-extra-segments value in the flags word. | ||
294 | Only used with multi-segment sends. */ | ||
295 | #define __NETIO_SEND_FLG_XSEG_SHIFT 3 | ||
296 | |||
297 | /** Width of the number-of-extra-segments value in the flags word. */ | ||
298 | #define __NETIO_SEND_FLG_XSEG_WIDTH 2 | ||
299 | |||
300 | #endif /* __DRV_XGBE_IMPL_H__ */ | ||
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h new file mode 100644 index 000000000000..146e47d5334b --- /dev/null +++ b/arch/tile/include/hv/drv_xgbe_intf.h | |||
@@ -0,0 +1,615 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file drv_xgbe_intf.h | ||
17 | * Interface to the hypervisor XGBE driver. | ||
18 | */ | ||
19 | |||
20 | #ifndef __DRV_XGBE_INTF_H__ | ||
21 | #define __DRV_XGBE_INTF_H__ | ||
22 | |||
23 | /** | ||
24 | * An object for forwarding VAs and PAs to the hypervisor. | ||
25 | * @ingroup types | ||
26 | * | ||
27 | * This allows the supervisor to specify a number of areas of memory to | ||
28 | * store packet buffers. | ||
29 | */ | ||
30 | typedef struct | ||
31 | { | ||
32 | /** The physical address of the memory. */ | ||
33 | HV_PhysAddr pa; | ||
34 | /** Page table entry for the memory. This is only used to derive the | ||
35 | * memory's caching mode; the PA bits are ignored. */ | ||
36 | HV_PTE pte; | ||
37 | /** The virtual address of the memory. */ | ||
38 | HV_VirtAddr va; | ||
39 | /** Size (in bytes) of the memory area. */ | ||
40 | int size; | ||
41 | |||
42 | } | ||
43 | netio_ipp_address_t; | ||
44 | |||
45 | /** The various pread/pwrite offsets into the hypervisor-level driver. | ||
46 | * @ingroup types | ||
47 | */ | ||
48 | typedef enum | ||
49 | { | ||
50 | /** Inform the Linux driver of the address of the NetIO arena memory. | ||
51 | * This offset is actually only used to convey information from netio | ||
52 | * to the Linux driver; it never makes it from there to the hypervisor. | ||
53 | * Write-only; takes a uint32_t specifying the VA address. */ | ||
54 | NETIO_FIXED_ADDR = 0x5000000000000000ULL, | ||
55 | |||
56 | /** Inform the Linux driver of the size of the NetIO arena memory. | ||
57 | * This offset is actually only used to convey information from netio | ||
58 | * to the Linux driver; it never makes it from there to the hypervisor. | ||
59 | * Write-only; takes a uint32_t specifying the VA size. */ | ||
60 | NETIO_FIXED_SIZE = 0x5100000000000000ULL, | ||
61 | |||
62 | /** Register current tile with IPP. Write then read: write, takes a | ||
63 | * netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */ | ||
64 | NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL, | ||
65 | |||
66 | /** Unregister current tile from IPP. Write-only, takes a dummy argument. */ | ||
67 | NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL, | ||
68 | |||
69 | /** Start packets flowing. Write-only, takes a dummy argument. */ | ||
70 | NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL, | ||
71 | |||
72 | /** Stop packets flowing. Write-only, takes a dummy argument. */ | ||
73 | NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL, | ||
74 | |||
75 | /** Configure group (typically we group on VLAN). Write-only: takes an | ||
76 | * array of netio_group_t's, low 24 bits of the offset is the base group | ||
77 | * number times the size of a netio_group_t. */ | ||
78 | NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL, | ||
79 | |||
80 | /** Configure bucket. Write-only: takes an array of netio_bucket_t's, low | ||
81 | * 24 bits of the offset is the base bucket number times the size of a | ||
82 | * netio_bucket_t. */ | ||
83 | NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL, | ||
84 | |||
85 | /** Get/set a parameter. Read or write: read or write data is the parameter | ||
86 | * value, low 32 bits of the offset is a __netio_getset_offset_t. */ | ||
87 | NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL, | ||
88 | |||
89 | /** Get fast I/O index. Read-only; returns a 4-byte base index value. */ | ||
90 | NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL, | ||
91 | |||
92 | /** Configure hijack IP address. Packets with this IPv4 dest address | ||
93 | * go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address | ||
94 | * in some standard form. FIXME: Define the form! */ | ||
95 | NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL, | ||
96 | |||
97 | /** | ||
98 | * Offsets beyond this point are reserved for the supervisor (although that | ||
99 | * enforcement must be done by the supervisor driver itself). | ||
100 | */ | ||
101 | NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL, | ||
102 | |||
103 | /** Register I/O memory. Write-only, takes a netio_ipp_address_t. */ | ||
104 | NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL, | ||
105 | |||
106 | /** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */ | ||
107 | NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL, | ||
108 | |||
109 | /* Offsets greater than 0x7FFFFFFF can't be used directly from Linux | ||
110 | * userspace code due to limitations in the pread/pwrite syscalls. */ | ||
111 | |||
112 | /** Drain LIPP buffers. */ | ||
113 | NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL, | ||
114 | |||
115 | /** Supply a netio_ipp_address_t to be used as shared memory for the | ||
116 | * LEPP command queue. */ | ||
117 | NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL, | ||
118 | |||
119 | /* 0xFC... is currently unused. */ | ||
120 | |||
121 | /** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */ | ||
122 | NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL, | ||
123 | |||
124 | /** Start IPP/EPP tiles. Write-only, takes a dummy argument. */ | ||
125 | NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL, | ||
126 | |||
127 | /** Supply packet arena. Write-only, takes an array of | ||
128 | * netio_ipp_address_t values. */ | ||
129 | NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL, | ||
130 | } netio_hv_offset_t; | ||
131 | |||
132 | /** Extract the base offset from an offset */ | ||
133 | #define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL) | ||
134 | /** Extract the local offset from an offset */ | ||
135 | #define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL) | ||
136 | |||
137 | |||
138 | /** | ||
139 | * Get/set offset. | ||
140 | */ | ||
141 | typedef union | ||
142 | { | ||
143 | struct | ||
144 | { | ||
145 | uint64_t addr:48; /**< Class-specific address */ | ||
146 | unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */ | ||
147 | unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */ | ||
148 | } | ||
149 | bits; /**< Bitfields */ | ||
150 | uint64_t word; /**< Aggregated value to use as the offset */ | ||
151 | } | ||
152 | __netio_getset_offset_t; | ||
153 | |||
154 | /** | ||
155 | * Fast I/O index offsets (must be contiguous). | ||
156 | */ | ||
157 | typedef enum | ||
158 | { | ||
159 | NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */ | ||
160 | NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */ | ||
161 | NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */ | ||
162 | NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */ | ||
163 | NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */ | ||
164 | NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */ | ||
165 | NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */ | ||
166 | NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */ | ||
167 | } netio_fastio_index_t; | ||
168 | |||
169 | /** 3-word return type for Fast I/O call. */ | ||
170 | typedef struct | ||
171 | { | ||
172 | int err; /**< Error code. */ | ||
173 | uint32_t val0; /**< Value. Meaning depends upon the specific call. */ | ||
174 | uint32_t val1; /**< Value. Meaning depends upon the specific call. */ | ||
175 | } netio_fastio_rv3_t; | ||
176 | |||
177 | /** 0-argument fast I/O call */ | ||
178 | int __netio_fastio0(uint32_t fastio_index); | ||
179 | /** 1-argument fast I/O call */ | ||
180 | int __netio_fastio1(uint32_t fastio_index, uint32_t arg0); | ||
181 | /** 3-argument fast I/O call, 2-word return value */ | ||
182 | netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0, | ||
183 | uint32_t arg1, uint32_t arg2); | ||
184 | /** 4-argument fast I/O call */ | ||
185 | int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1, | ||
186 | uint32_t arg2, uint32_t arg3); | ||
187 | /** 6-argument fast I/O call */ | ||
188 | int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1, | ||
189 | uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5); | ||
190 | /** 9-argument fast I/O call */ | ||
191 | int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1, | ||
192 | uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5, | ||
193 | uint32_t arg6, uint32_t arg7, uint32_t arg8); | ||
194 | |||
195 | /** Allocate an empty packet. | ||
196 | * @param fastio_index Fast I/O index. | ||
197 | * @param size Size of the packet to allocate. | ||
198 | */ | ||
199 | #define __netio_fastio_allocate(fastio_index, size) \ | ||
200 | __netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size) | ||
201 | |||
202 | /** Free a buffer. | ||
203 | * @param fastio_index Fast I/O index. | ||
204 | * @param handle Handle for the packet to free. | ||
205 | */ | ||
206 | #define __netio_fastio_free_buffer(fastio_index, handle) \ | ||
207 | __netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle) | ||
208 | |||
209 | /** Increment our receive credits. | ||
210 | * @param fastio_index Fast I/O index. | ||
211 | * @param credits Number of credits to add. | ||
212 | */ | ||
213 | #define __netio_fastio_return_credits(fastio_index, credits) \ | ||
214 | __netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits) | ||
215 | |||
216 | /** Send packet, no checksum. | ||
217 | * @param fastio_index Fast I/O index. | ||
218 | * @param ackflag Nonzero if we want an ack. | ||
219 | * @param size Size of the packet. | ||
220 | * @param va Virtual address of start of packet. | ||
221 | * @param handle Packet handle. | ||
222 | */ | ||
223 | #define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \ | ||
224 | __netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \ | ||
225 | size, va, handle) | ||
226 | |||
227 | /** Send packet, calculate checksum. | ||
228 | * @param fastio_index Fast I/O index. | ||
229 | * @param ackflag Nonzero if we want an ack. | ||
230 | * @param size Size of the packet. | ||
231 | * @param va Virtual address of start of packet. | ||
232 | * @param handle Packet handle. | ||
233 | * @param csum0 Shim checksum header. | ||
234 | * @param csum1 Checksum seed. | ||
235 | */ | ||
236 | #define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \ | ||
237 | csum0, csum1) \ | ||
238 | __netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \ | ||
239 | size, va, handle, csum0, csum1) | ||
240 | |||
241 | |||
242 | /** Format for the "csum0" argument to the __netio_fastio_send routines | ||
243 | * and LEPP. Note that this is currently exactly identical to the | ||
244 | * ShimProtocolOffloadHeader. | ||
245 | */ | ||
246 | typedef union | ||
247 | { | ||
248 | struct | ||
249 | { | ||
250 | unsigned int start_byte:7; /**< The first byte to be checksummed */ | ||
251 | unsigned int count:14; /**< Number of bytes to be checksummed. */ | ||
252 | unsigned int destination_byte:7; /**< The byte to write the checksum to. */ | ||
253 | unsigned int reserved:4; /**< Reserved. */ | ||
254 | } bits; /**< Decomposed method of access. */ | ||
255 | unsigned int word; /**< To send out the IDN. */ | ||
256 | } __netio_checksum_header_t; | ||
257 | |||
258 | |||
259 | /** Sendv packet with 1 or 2 segments. | ||
260 | * @param fastio_index Fast I/O index. | ||
261 | * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus | ||
262 | * 1 in next 2 bits; expected checksum in high 16 bits. | ||
263 | * @param confno Confirmation number to request, if notify flag set. | ||
264 | * @param csum0 Checksum descriptor; if zero, no checksum. | ||
265 | * @param va_F Virtual address of first segment. | ||
266 | * @param va_L Virtual address of last segment, if 2 segments. | ||
267 | * @param len_F_L Length of first segment in low 16 bits; length of last | ||
268 | * segment, if 2 segments, in high 16 bits. | ||
269 | */ | ||
270 | #define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \ | ||
271 | va_F, va_L, len_F_L) \ | ||
272 | __netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \ | ||
273 | csum0, va_F, va_L, len_F_L) | ||
274 | |||
275 | /** Send packet on PCIe interface. | ||
276 | * @param fastio_index Fast I/O index. | ||
277 | * @param flags Ack/csum/notify flags in low 3 bits. | ||
278 | * @param confno Confirmation number to request, if notify flag set. | ||
279 | * @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe. | ||
280 | * @param va_F Virtual address of the packet buffer. | ||
281 | * @param va_L Virtual address of last segment, if 2 segments. Hard wired 0. | ||
282 | * @param len_F_L Length of the packet buffer in low 16 bits. | ||
283 | */ | ||
284 | #define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \ | ||
285 | va_F, va_L, len_F_L) \ | ||
286 | __netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \ | ||
287 | csum0, va_F, va_L, len_F_L) | ||
288 | |||
289 | /** Sendv packet with 3 or 4 segments. | ||
290 | * @param fastio_index Fast I/O index. | ||
291 | * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus | ||
292 | * 1 in next 2 bits; expected checksum in high 16 bits. | ||
293 | * @param confno Confirmation number to request, if notify flag set. | ||
294 | * @param csum0 Checksum descriptor; if zero, no checksum. | ||
295 | * @param va_F Virtual address of first segment. | ||
296 | * @param va_L Virtual address of last segment (third segment if 3 segments, | ||
297 | * fourth segment if 4 segments). | ||
298 | * @param len_F_L Length of first segment in low 16 bits; length of last | ||
299 | * segment in high 16 bits. | ||
300 | * @param va_M0 Virtual address of "middle 0" segment; this segment is sent | ||
301 | * second when there are three segments, and third if there are four. | ||
302 | * @param va_M1 Virtual address of "middle 1" segment; this segment is sent | ||
303 | * second when there are four segments. | ||
304 | * @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle | ||
305 | * 1 segment, if 4 segments, in high 16 bits. | ||
306 | */ | ||
307 | #define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \ | ||
308 | va_L, len_F_L, va_M0, va_M1, len_M0_M1) \ | ||
309 | __netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \ | ||
310 | csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1) | ||
311 | |||
312 | /** Send vector of packets. | ||
313 | * @param fastio_index Fast I/O index. | ||
314 | * @param seqno Number of packets transmitted so far on this interface; | ||
315 | * used to decide which packets should be acknowledged. | ||
316 | * @param nentries Number of entries in vector. | ||
317 | * @param va Virtual address of start of vector entry array. | ||
318 | * @return 3-word netio_fastio_rv3_t structure. The structure's err member | ||
319 | * is an error code, or zero if no error. The val0 member is the | ||
320 | * updated value of seqno; it has been incremented by 1 for each | ||
321 | * packet sent. That increment may be less than nentries if an | ||
322 | * error occured, or if some of the entries in the vector contain | ||
323 | * handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the | ||
324 | * updated value of nentries; it has been decremented by 1 for each | ||
325 | * vector entry processed. Again, that decrement may be less than | ||
326 | * nentries (leaving the returned value positive) if an error | ||
327 | * occurred. | ||
328 | */ | ||
329 | #define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \ | ||
330 | __netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \ | ||
331 | nentries, va) | ||
332 | |||
333 | |||
334 | /** An egress DMA command for LEPP. */ | ||
335 | typedef struct | ||
336 | { | ||
337 | /** Is this a TSO transfer? | ||
338 | * | ||
339 | * NOTE: This field is always 0, to distinguish it from | ||
340 | * lepp_tso_cmd_t. It must come first! | ||
341 | */ | ||
342 | uint8_t tso : 1; | ||
343 | |||
344 | /** Unused padding bits. */ | ||
345 | uint8_t _unused : 3; | ||
346 | |||
347 | /** Should this packet be sent directly from caches instead of DRAM, | ||
348 | * using hash-for-home to locate the packet data? | ||
349 | */ | ||
350 | uint8_t hash_for_home : 1; | ||
351 | |||
352 | /** Should we compute a checksum? */ | ||
353 | uint8_t compute_checksum : 1; | ||
354 | |||
355 | /** Is this the final buffer for this packet? | ||
356 | * | ||
357 | * A single packet can be split over several input buffers (a "gather" | ||
358 | * operation). This flag indicates that this is the last buffer | ||
359 | * in a packet. | ||
360 | */ | ||
361 | uint8_t end_of_packet : 1; | ||
362 | |||
363 | /** Should LEPP advance 'comp_busy' when this DMA is fully finished? */ | ||
364 | uint8_t send_completion : 1; | ||
365 | |||
366 | /** High bits of Client Physical Address of the start of the buffer | ||
367 | * to be egressed. | ||
368 | * | ||
369 | * NOTE: Only 6 bits are actually needed here, as CPAs are | ||
370 | * currently 38 bits. So two bits could be scavenged from this. | ||
371 | */ | ||
372 | uint8_t cpa_hi; | ||
373 | |||
374 | /** The number of bytes to be egressed. */ | ||
375 | uint16_t length; | ||
376 | |||
377 | /** Low 32 bits of Client Physical Address of the start of the buffer | ||
378 | * to be egressed. | ||
379 | */ | ||
380 | uint32_t cpa_lo; | ||
381 | |||
382 | /** Checksum information (only used if 'compute_checksum'). */ | ||
383 | __netio_checksum_header_t checksum_data; | ||
384 | |||
385 | } lepp_cmd_t; | ||
386 | |||
387 | |||
388 | /** A chunk of physical memory for a TSO egress. */ | ||
389 | typedef struct | ||
390 | { | ||
391 | /** The low bits of the CPA. */ | ||
392 | uint32_t cpa_lo; | ||
393 | /** The high bits of the CPA. */ | ||
394 | uint16_t cpa_hi : 15; | ||
395 | /** Should this packet be sent directly from caches instead of DRAM, | ||
396 | * using hash-for-home to locate the packet data? | ||
397 | */ | ||
398 | uint16_t hash_for_home : 1; | ||
399 | /** The length in bytes. */ | ||
400 | uint16_t length; | ||
401 | } lepp_frag_t; | ||
402 | |||
403 | |||
404 | /** An LEPP command that handles TSO. */ | ||
405 | typedef struct | ||
406 | { | ||
407 | /** Is this a TSO transfer? | ||
408 | * | ||
409 | * NOTE: This field is always 1, to distinguish it from | ||
410 | * lepp_cmd_t. It must come first! | ||
411 | */ | ||
412 | uint8_t tso : 1; | ||
413 | |||
414 | /** Unused padding bits. */ | ||
415 | uint8_t _unused : 7; | ||
416 | |||
417 | /** Size of the header[] array in bytes. It must be in the range | ||
418 | * [40, 127], which are the smallest header for a TCP packet over | ||
419 | * Ethernet and the maximum possible prepend size supported by | ||
420 | * hardware, respectively. Note that the array storage must be | ||
421 | * padded out to a multiple of four bytes so that the following | ||
422 | * LEPP command is aligned properly. | ||
423 | */ | ||
424 | uint8_t header_size; | ||
425 | |||
426 | /** Byte offset of the IP header in header[]. */ | ||
427 | uint8_t ip_offset; | ||
428 | |||
429 | /** Byte offset of the TCP header in header[]. */ | ||
430 | uint8_t tcp_offset; | ||
431 | |||
432 | /** The number of bytes to use for the payload of each packet, | ||
433 | * except of course the last one, which may not have enough bytes. | ||
434 | * This means that each Ethernet packet except the last will have a | ||
435 | * size of header_size + payload_size. | ||
436 | */ | ||
437 | uint16_t payload_size; | ||
438 | |||
439 | /** The length of the 'frags' array that follows this struct. */ | ||
440 | uint16_t num_frags; | ||
441 | |||
442 | /** The actual frags. */ | ||
443 | lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */]; | ||
444 | |||
445 | /* | ||
446 | * The packet header template logically follows frags[], | ||
447 | * but you can't declare that in C. | ||
448 | * | ||
449 | * uint32_t header[header_size_in_words_rounded_up]; | ||
450 | */ | ||
451 | |||
452 | } lepp_tso_cmd_t; | ||
453 | |||
454 | |||
455 | /** An LEPP completion ring entry. */ | ||
456 | typedef void* lepp_comp_t; | ||
457 | |||
458 | |||
459 | /** Maximum number of frags for one TSO command. This is adapted from | ||
460 | * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for | ||
461 | * our page size of exactly 65536. We add one for a "body" fragment. | ||
462 | */ | ||
463 | #define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1) | ||
464 | |||
465 | /** Total number of bytes needed for an lepp_tso_cmd_t. */ | ||
466 | #define LEPP_TSO_CMD_SIZE(num_frags, header_size) \ | ||
467 | (sizeof(lepp_tso_cmd_t) + \ | ||
468 | (num_frags) * sizeof(lepp_frag_t) + \ | ||
469 | (((header_size) + 3) & -4)) | ||
470 | |||
471 | /** The size of the lepp "cmd" queue. */ | ||
472 | #define LEPP_CMD_QUEUE_BYTES \ | ||
473 | (((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \ | ||
474 | (sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t)) | ||
475 | |||
476 | /** The largest possible command that can go in lepp_queue_t::cmds[]. */ | ||
477 | #define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128) | ||
478 | |||
479 | /** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive). | ||
480 | */ | ||
481 | #define LEPP_CMD_LIMIT \ | ||
482 | (LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE) | ||
483 | |||
484 | /** The maximum number of completions in an LEPP queue. */ | ||
485 | #define LEPP_COMP_QUEUE_SIZE \ | ||
486 | ((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t)) | ||
487 | |||
488 | /** Increment an index modulo the queue size. */ | ||
489 | #define LEPP_QINC(var) \ | ||
490 | (var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1)) | ||
491 | |||
492 | /** A queue used to convey egress commands from the client to LEPP. */ | ||
493 | typedef struct | ||
494 | { | ||
495 | /** Index of first completion not yet processed by user code. | ||
496 | * If this is equal to comp_busy, there are no such completions. | ||
497 | * | ||
498 | * NOTE: This is only read/written by the user. | ||
499 | */ | ||
500 | unsigned int comp_head; | ||
501 | |||
502 | /** Index of first completion record not yet completed. | ||
503 | * If this is equal to comp_tail, there are no such completions. | ||
504 | * This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever | ||
505 | * a command with the 'completion' bit set is finished. | ||
506 | * | ||
507 | * NOTE: This is only written by LEPP, only read by the user. | ||
508 | */ | ||
509 | volatile unsigned int comp_busy; | ||
510 | |||
511 | /** Index of the first empty slot in the completion ring. | ||
512 | * Entries from this up to but not including comp_head (in ring order) | ||
513 | * can be filled in with completion data. | ||
514 | * | ||
515 | * NOTE: This is only read/written by the user. | ||
516 | */ | ||
517 | unsigned int comp_tail; | ||
518 | |||
519 | /** Byte index of first command enqueued for LEPP but not yet processed. | ||
520 | * | ||
521 | * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT. | ||
522 | * | ||
523 | * NOTE: LEPP advances this counter as soon as it no longer needs | ||
524 | * the cmds[] storage for this entry, but the transfer is not actually | ||
525 | * complete (i.e. the buffer pointed to by the command is no longer | ||
526 | * needed) until comp_busy advances. | ||
527 | * | ||
528 | * If this is equal to cmd_tail, the ring is empty. | ||
529 | * | ||
530 | * NOTE: This is only written by LEPP, only read by the user. | ||
531 | */ | ||
532 | volatile unsigned int cmd_head; | ||
533 | |||
534 | /** Byte index of first empty slot in the command ring. This field can | ||
535 | * be incremented up to but not equal to cmd_head (because that would | ||
536 | * mean the ring is empty). | ||
537 | * | ||
538 | * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT. | ||
539 | * | ||
540 | * NOTE: This is read/written by the user, only read by LEPP. | ||
541 | */ | ||
542 | volatile unsigned int cmd_tail; | ||
543 | |||
544 | /** A ring of variable-sized egress DMA commands. | ||
545 | * | ||
546 | * NOTE: Only written by the user, only read by LEPP. | ||
547 | */ | ||
548 | char cmds[LEPP_CMD_QUEUE_BYTES] | ||
549 | __attribute__((aligned(CHIP_L2_LINE_SIZE()))); | ||
550 | |||
551 | /** A ring of user completion data. | ||
552 | * NOTE: Only read/written by the user. | ||
553 | */ | ||
554 | lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE] | ||
555 | __attribute__((aligned(CHIP_L2_LINE_SIZE()))); | ||
556 | } lepp_queue_t; | ||
557 | |||
558 | |||
559 | /** An internal helper function for determining the number of entries | ||
560 | * available in a ring buffer, given that there is one sentinel. | ||
561 | */ | ||
562 | static inline unsigned int | ||
563 | _lepp_num_free_slots(unsigned int head, unsigned int tail) | ||
564 | { | ||
565 | /* | ||
566 | * One entry is reserved for use as a sentinel, to distinguish | ||
567 | * "empty" from "full". So we compute | ||
568 | * (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation. | ||
569 | */ | ||
570 | return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0); | ||
571 | } | ||
572 | |||
573 | |||
574 | /** Returns how many new comp entries can be enqueued. */ | ||
575 | static inline unsigned int | ||
576 | lepp_num_free_comp_slots(const lepp_queue_t* q) | ||
577 | { | ||
578 | return _lepp_num_free_slots(q->comp_head, q->comp_tail); | ||
579 | } | ||
580 | |||
581 | static inline int | ||
582 | lepp_qsub(int v1, int v2) | ||
583 | { | ||
584 | int delta = v1 - v2; | ||
585 | return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE); | ||
586 | } | ||
587 | |||
588 | |||
589 | /** FIXME: Check this from linux, via a new "pwrite()" call. */ | ||
590 | #define LIPP_VERSION 1 | ||
591 | |||
592 | |||
593 | /** We use exactly two bytes of alignment padding. */ | ||
594 | #define LIPP_PACKET_PADDING 2 | ||
595 | |||
596 | /** The minimum size of a "small" buffer (including the padding). */ | ||
597 | #define LIPP_SMALL_PACKET_SIZE 128 | ||
598 | |||
599 | /* | ||
600 | * NOTE: The following two values should total to less than around | ||
601 | * 13582, to keep the total size used for "lipp_state_t" below 64K. | ||
602 | */ | ||
603 | |||
604 | /** The maximum number of "small" buffers. | ||
605 | * This is enough for 53 network cpus with 128 credits. Note that | ||
606 | * if these are exhausted, we will fall back to using large buffers. | ||
607 | */ | ||
608 | #define LIPP_SMALL_BUFFERS 6785 | ||
609 | |||
610 | /** The maximum number of "large" buffers. | ||
611 | * This is enough for 53 network cpus with 128 credits. | ||
612 | */ | ||
613 | #define LIPP_LARGE_BUFFERS 6785 | ||
614 | |||
615 | #endif /* __DRV_XGBE_INTF_H__ */ | ||
diff --git a/arch/tile/include/hv/netio_errors.h b/arch/tile/include/hv/netio_errors.h new file mode 100644 index 000000000000..e1591bff61b5 --- /dev/null +++ b/arch/tile/include/hv/netio_errors.h | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * Error codes returned from NetIO routines. | ||
17 | */ | ||
18 | |||
19 | #ifndef __NETIO_ERRORS_H__ | ||
20 | #define __NETIO_ERRORS_H__ | ||
21 | |||
22 | /** | ||
23 | * @addtogroup error | ||
24 | * | ||
25 | * @brief The error codes returned by NetIO functions. | ||
26 | * | ||
27 | * NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and | ||
28 | * a negative value if an error occurs. | ||
29 | * | ||
30 | * In cases where a NetIO function failed due to a error reported by | ||
31 | * system libraries, the error code will be the negation of the | ||
32 | * system errno at the time of failure. The @ref netio_strerror() | ||
33 | * function will deliver error strings for both NetIO and system error | ||
34 | * codes. | ||
35 | * | ||
36 | * @{ | ||
37 | */ | ||
38 | |||
39 | /** The set of all NetIO errors. */ | ||
40 | typedef enum | ||
41 | { | ||
42 | /** Operation successfully completed. */ | ||
43 | NETIO_NO_ERROR = 0, | ||
44 | |||
45 | /** A packet was successfully retrieved from an input queue. */ | ||
46 | NETIO_PKT = 0, | ||
47 | |||
48 | /** Largest NetIO error number. */ | ||
49 | NETIO_ERR_MAX = -701, | ||
50 | |||
51 | /** The tile is not registered with the IPP. */ | ||
52 | NETIO_NOT_REGISTERED = -701, | ||
53 | |||
54 | /** No packet was available to retrieve from the input queue. */ | ||
55 | NETIO_NOPKT = -702, | ||
56 | |||
57 | /** The requested function is not implemented. */ | ||
58 | NETIO_NOT_IMPLEMENTED = -703, | ||
59 | |||
60 | /** On a registration operation, the target queue already has the maximum | ||
61 | * number of tiles registered for it, and no more may be added. On a | ||
62 | * packet send operation, the output queue is full and nothing more can | ||
63 | * be queued until some of the queued packets are actually transmitted. */ | ||
64 | NETIO_QUEUE_FULL = -704, | ||
65 | |||
66 | /** The calling process or thread is not bound to exactly one CPU. */ | ||
67 | NETIO_BAD_AFFINITY = -705, | ||
68 | |||
69 | /** Cannot allocate memory on requested controllers. */ | ||
70 | NETIO_CANNOT_HOME = -706, | ||
71 | |||
72 | /** On a registration operation, the IPP specified is not configured | ||
73 | * to support the options requested; for instance, the application | ||
74 | * wants a specific type of tagged headers which the configured IPP | ||
75 | * doesn't support. Or, the supplied configuration information is | ||
76 | * not self-consistent, or is out of range; for instance, specifying | ||
77 | * both NETIO_RECV and NETIO_NO_RECV, or asking for more than | ||
78 | * NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket | ||
79 | * configure operation, the number of items, or the base item, was | ||
80 | * out of range. | ||
81 | */ | ||
82 | NETIO_BAD_CONFIG = -707, | ||
83 | |||
84 | /** Too many tiles have registered to transmit packets. */ | ||
85 | NETIO_TOOMANY_XMIT = -708, | ||
86 | |||
87 | /** Packet transmission was attempted on a queue which was registered | ||
88 | with transmit disabled. */ | ||
89 | NETIO_UNREG_XMIT = -709, | ||
90 | |||
91 | /** This tile is already registered with the IPP. */ | ||
92 | NETIO_ALREADY_REGISTERED = -710, | ||
93 | |||
94 | /** The Ethernet link is down. The application should try again later. */ | ||
95 | NETIO_LINK_DOWN = -711, | ||
96 | |||
97 | /** An invalid memory buffer has been specified. This may be an unmapped | ||
98 | * virtual address, or one which does not meet alignment requirements. | ||
99 | * For netio_input_register(), this error may be returned when multiple | ||
100 | * processes specify different memory regions to be used for NetIO | ||
101 | * buffers. That can happen if these processes specify explicit memory | ||
102 | * regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init() | ||
103 | * has not been called by a common ancestor of the processes. | ||
104 | */ | ||
105 | NETIO_FAULT = -712, | ||
106 | |||
107 | /** Cannot combine user-managed shared memory and cache coherence. */ | ||
108 | NETIO_BAD_CACHE_CONFIG = -713, | ||
109 | |||
110 | /** Smallest NetIO error number. */ | ||
111 | NETIO_ERR_MIN = -713, | ||
112 | |||
113 | #ifndef __DOXYGEN__ | ||
114 | /** Used internally to mean that no response is needed; never returned to | ||
115 | * an application. */ | ||
116 | NETIO_NO_RESPONSE = 1 | ||
117 | #endif | ||
118 | } netio_error_t; | ||
119 | |||
120 | /** @} */ | ||
121 | |||
122 | #endif /* __NETIO_ERRORS_H__ */ | ||
diff --git a/arch/tile/include/hv/netio_intf.h b/arch/tile/include/hv/netio_intf.h new file mode 100644 index 000000000000..8d20972aba2c --- /dev/null +++ b/arch/tile/include/hv/netio_intf.h | |||
@@ -0,0 +1,2975 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * NetIO interface structures and macros. | ||
17 | */ | ||
18 | |||
19 | #ifndef __NETIO_INTF_H__ | ||
20 | #define __NETIO_INTF_H__ | ||
21 | |||
22 | #include <hv/netio_errors.h> | ||
23 | |||
24 | #ifdef __KERNEL__ | ||
25 | #include <linux/types.h> | ||
26 | #else | ||
27 | #include <stdint.h> | ||
28 | #endif | ||
29 | |||
30 | #if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) | ||
31 | #include <assert.h> | ||
32 | #define netio_assert assert /**< Enable assertions from macros */ | ||
33 | #else | ||
34 | #define netio_assert(...) ((void)(0)) /**< Disable assertions from macros */ | ||
35 | #endif | ||
36 | |||
37 | /* | ||
38 | * If none of these symbols are defined, we're building libnetio in an | ||
39 | * environment where we have pthreads, so we'll enable locking. | ||
40 | */ | ||
41 | #if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) && \ | ||
42 | !defined(__NEWLIB__) | ||
43 | #define _NETIO_PTHREAD /**< Include a mutex in netio_queue_t below */ | ||
44 | |||
45 | /* | ||
46 | * If NETIO_UNLOCKED is defined, we don't do use per-cpu locks on | ||
47 | * per-packet NetIO operations. We still do pthread locking on things | ||
48 | * like netio_input_register, though. This is used for building | ||
49 | * libnetio_unlocked. | ||
50 | */ | ||
51 | #ifndef NETIO_UNLOCKED | ||
52 | |||
53 | /* Avoid PLT overhead by using our own inlined per-cpu lock. */ | ||
54 | #include <sched.h> | ||
55 | typedef int _netio_percpu_mutex_t; | ||
56 | |||
57 | static __inline int | ||
58 | _netio_percpu_mutex_init(_netio_percpu_mutex_t* lock) | ||
59 | { | ||
60 | *lock = 0; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static __inline int | ||
65 | _netio_percpu_mutex_lock(_netio_percpu_mutex_t* lock) | ||
66 | { | ||
67 | while (__builtin_expect(__insn_tns(lock), 0)) | ||
68 | sched_yield(); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static __inline int | ||
73 | _netio_percpu_mutex_unlock(_netio_percpu_mutex_t* lock) | ||
74 | { | ||
75 | *lock = 0; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | #else /* NETIO_UNLOCKED */ | ||
80 | |||
81 | /* Don't do any locking for per-packet NetIO operations. */ | ||
82 | typedef int _netio_percpu_mutex_t; | ||
83 | #define _netio_percpu_mutex_init(L) | ||
84 | #define _netio_percpu_mutex_lock(L) | ||
85 | #define _netio_percpu_mutex_unlock(L) | ||
86 | |||
87 | #endif /* NETIO_UNLOCKED */ | ||
88 | #endif /* !__HV__, !__BOGUX, !__KERNEL__, !__NEWLIB__ */ | ||
89 | |||
90 | /** How many tiles can register for a given queue. | ||
91 | * @ingroup setup */ | ||
92 | #define NETIO_MAX_TILES_PER_QUEUE 64 | ||
93 | |||
94 | |||
95 | /** Largest permissible queue identifier. | ||
96 | * @ingroup setup */ | ||
97 | #define NETIO_MAX_QUEUE_ID 255 | ||
98 | |||
99 | |||
100 | #ifndef __DOXYGEN__ | ||
101 | |||
102 | /* Metadata packet checksum/ethertype flags. */ | ||
103 | |||
104 | /** The L4 checksum has not been calculated. */ | ||
105 | #define _NETIO_PKT_NO_L4_CSUM_SHIFT 0 | ||
106 | #define _NETIO_PKT_NO_L4_CSUM_RMASK 1 | ||
107 | #define _NETIO_PKT_NO_L4_CSUM_MASK \ | ||
108 | (_NETIO_PKT_NO_L4_CSUM_RMASK << _NETIO_PKT_NO_L4_CSUM_SHIFT) | ||
109 | |||
110 | /** The L3 checksum has not been calculated. */ | ||
111 | #define _NETIO_PKT_NO_L3_CSUM_SHIFT 1 | ||
112 | #define _NETIO_PKT_NO_L3_CSUM_RMASK 1 | ||
113 | #define _NETIO_PKT_NO_L3_CSUM_MASK \ | ||
114 | (_NETIO_PKT_NO_L3_CSUM_RMASK << _NETIO_PKT_NO_L3_CSUM_SHIFT) | ||
115 | |||
116 | /** The L3 checksum is incorrect (or perhaps has not been calculated). */ | ||
117 | #define _NETIO_PKT_BAD_L3_CSUM_SHIFT 2 | ||
118 | #define _NETIO_PKT_BAD_L3_CSUM_RMASK 1 | ||
119 | #define _NETIO_PKT_BAD_L3_CSUM_MASK \ | ||
120 | (_NETIO_PKT_BAD_L3_CSUM_RMASK << _NETIO_PKT_BAD_L3_CSUM_SHIFT) | ||
121 | |||
122 | /** The Ethernet packet type is unrecognized. */ | ||
123 | #define _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT 3 | ||
124 | #define _NETIO_PKT_TYPE_UNRECOGNIZED_RMASK 1 | ||
125 | #define _NETIO_PKT_TYPE_UNRECOGNIZED_MASK \ | ||
126 | (_NETIO_PKT_TYPE_UNRECOGNIZED_RMASK << \ | ||
127 | _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT) | ||
128 | |||
129 | /* Metadata packet type flags. */ | ||
130 | |||
131 | /** Where the packet type bits are; this field is the index into | ||
132 | * _netio_pkt_info. */ | ||
133 | #define _NETIO_PKT_TYPE_SHIFT 4 | ||
134 | #define _NETIO_PKT_TYPE_RMASK 0x3F | ||
135 | |||
136 | /** How many VLAN tags the packet has, and, if we have two, which one we | ||
137 | * actually grouped on. A VLAN within a proprietary (Marvell or Broadcom) | ||
138 | * tag is counted here. */ | ||
139 | #define _NETIO_PKT_VLAN_SHIFT 4 | ||
140 | #define _NETIO_PKT_VLAN_RMASK 0x3 | ||
141 | #define _NETIO_PKT_VLAN_MASK \ | ||
142 | (_NETIO_PKT_VLAN_RMASK << _NETIO_PKT_VLAN_SHIFT) | ||
143 | #define _NETIO_PKT_VLAN_NONE 0 /* No VLAN tag. */ | ||
144 | #define _NETIO_PKT_VLAN_ONE 1 /* One VLAN tag. */ | ||
145 | #define _NETIO_PKT_VLAN_TWO_OUTER 2 /* Two VLAN tags, outer one used. */ | ||
146 | #define _NETIO_PKT_VLAN_TWO_INNER 3 /* Two VLAN tags, inner one used. */ | ||
147 | |||
148 | /** Which proprietary tags the packet has. */ | ||
149 | #define _NETIO_PKT_TAG_SHIFT 6 | ||
150 | #define _NETIO_PKT_TAG_RMASK 0x3 | ||
151 | #define _NETIO_PKT_TAG_MASK \ | ||
152 | (_NETIO_PKT_TAG_RMASK << _NETIO_PKT_TAG_SHIFT) | ||
153 | #define _NETIO_PKT_TAG_NONE 0 /* No proprietary tags. */ | ||
154 | #define _NETIO_PKT_TAG_MRVL 1 /* Marvell HyperG.Stack tags. */ | ||
155 | #define _NETIO_PKT_TAG_MRVL_EXT 2 /* HyperG.Stack extended tags. */ | ||
156 | #define _NETIO_PKT_TAG_BRCM 3 /* Broadcom HiGig tags. */ | ||
157 | |||
158 | /** Whether a packet has an LLC + SNAP header. */ | ||
159 | #define _NETIO_PKT_SNAP_SHIFT 8 | ||
160 | #define _NETIO_PKT_SNAP_RMASK 0x1 | ||
161 | #define _NETIO_PKT_SNAP_MASK \ | ||
162 | (_NETIO_PKT_SNAP_RMASK << _NETIO_PKT_SNAP_SHIFT) | ||
163 | |||
164 | /* NOTE: Bits 9 and 10 are unused. */ | ||
165 | |||
166 | /** Length of any custom data before the L2 header, in words. */ | ||
167 | #define _NETIO_PKT_CUSTOM_LEN_SHIFT 11 | ||
168 | #define _NETIO_PKT_CUSTOM_LEN_RMASK 0x1F | ||
169 | #define _NETIO_PKT_CUSTOM_LEN_MASK \ | ||
170 | (_NETIO_PKT_CUSTOM_LEN_RMASK << _NETIO_PKT_CUSTOM_LEN_SHIFT) | ||
171 | |||
172 | /** The L4 checksum is incorrect (or perhaps has not been calculated). */ | ||
173 | #define _NETIO_PKT_BAD_L4_CSUM_SHIFT 16 | ||
174 | #define _NETIO_PKT_BAD_L4_CSUM_RMASK 0x1 | ||
175 | #define _NETIO_PKT_BAD_L4_CSUM_MASK \ | ||
176 | (_NETIO_PKT_BAD_L4_CSUM_RMASK << _NETIO_PKT_BAD_L4_CSUM_SHIFT) | ||
177 | |||
178 | /** Length of the L2 header, in words. */ | ||
179 | #define _NETIO_PKT_L2_LEN_SHIFT 17 | ||
180 | #define _NETIO_PKT_L2_LEN_RMASK 0x1F | ||
181 | #define _NETIO_PKT_L2_LEN_MASK \ | ||
182 | (_NETIO_PKT_L2_LEN_RMASK << _NETIO_PKT_L2_LEN_SHIFT) | ||
183 | |||
184 | |||
185 | /* Flags in minimal packet metadata. */ | ||
186 | |||
187 | /** We need an eDMA checksum on this packet. */ | ||
188 | #define _NETIO_PKT_NEED_EDMA_CSUM_SHIFT 0 | ||
189 | #define _NETIO_PKT_NEED_EDMA_CSUM_RMASK 1 | ||
190 | #define _NETIO_PKT_NEED_EDMA_CSUM_MASK \ | ||
191 | (_NETIO_PKT_NEED_EDMA_CSUM_RMASK << _NETIO_PKT_NEED_EDMA_CSUM_SHIFT) | ||
192 | |||
193 | /* Data within the packet information table. */ | ||
194 | |||
195 | /* Note that, for efficiency, code which uses these fields assumes that none | ||
196 | * of the shift values below are zero. See uses below for an explanation. */ | ||
197 | |||
198 | /** Offset within the L2 header of the innermost ethertype (in halfwords). */ | ||
199 | #define _NETIO_PKT_INFO_ETYPE_SHIFT 6 | ||
200 | #define _NETIO_PKT_INFO_ETYPE_RMASK 0x1F | ||
201 | |||
202 | /** Offset within the L2 header of the VLAN tag (in halfwords). */ | ||
203 | #define _NETIO_PKT_INFO_VLAN_SHIFT 11 | ||
204 | #define _NETIO_PKT_INFO_VLAN_RMASK 0x1F | ||
205 | |||
206 | #endif | ||
207 | |||
208 | |||
209 | /** The size of a memory buffer representing a small packet. | ||
210 | * @ingroup egress */ | ||
211 | #define SMALL_PACKET_SIZE 256 | ||
212 | |||
213 | /** The size of a memory buffer representing a large packet. | ||
214 | * @ingroup egress */ | ||
215 | #define LARGE_PACKET_SIZE 2048 | ||
216 | |||
217 | /** The size of a memory buffer representing a jumbo packet. | ||
218 | * @ingroup egress */ | ||
219 | #define JUMBO_PACKET_SIZE (12 * 1024) | ||
220 | |||
221 | |||
222 | /* Common ethertypes. | ||
223 | * @ingroup ingress */ | ||
224 | /** @{ */ | ||
225 | /** The ethertype of IPv4. */ | ||
226 | #define ETHERTYPE_IPv4 (0x0800) | ||
227 | /** The ethertype of ARP. */ | ||
228 | #define ETHERTYPE_ARP (0x0806) | ||
229 | /** The ethertype of VLANs. */ | ||
230 | #define ETHERTYPE_VLAN (0x8100) | ||
231 | /** The ethertype of a Q-in-Q header. */ | ||
232 | #define ETHERTYPE_Q_IN_Q (0x9100) | ||
233 | /** The ethertype of IPv6. */ | ||
234 | #define ETHERTYPE_IPv6 (0x86DD) | ||
235 | /** The ethertype of MPLS. */ | ||
236 | #define ETHERTYPE_MPLS (0x8847) | ||
237 | /** @} */ | ||
238 | |||
239 | |||
240 | /** The possible return values of NETIO_PKT_STATUS. | ||
241 | * @ingroup ingress | ||
242 | */ | ||
243 | typedef enum | ||
244 | { | ||
245 | /** No problems were detected with this packet. */ | ||
246 | NETIO_PKT_STATUS_OK, | ||
247 | /** The packet is undersized; this is expected behavior if the packet's | ||
248 | * ethertype is unrecognized, but otherwise the packet is likely corrupt. */ | ||
249 | NETIO_PKT_STATUS_UNDERSIZE, | ||
250 | /** The packet is oversized and some trailing bytes have been discarded. | ||
251 | This is expected behavior for short packets, since it's impossible to | ||
252 | precisely determine the amount of padding which may have been added to | ||
253 | them to make them meet the minimum Ethernet packet size. */ | ||
254 | NETIO_PKT_STATUS_OVERSIZE, | ||
255 | /** The packet was judged to be corrupt by hardware (for instance, it had | ||
256 | a bad CRC, or part of it was discarded due to lack of buffer space in | ||
257 | the I/O shim) and should be discarded. */ | ||
258 | NETIO_PKT_STATUS_BAD | ||
259 | } netio_pkt_status_t; | ||
260 | |||
261 | |||
262 | /** Log2 of how many buckets we have. */ | ||
263 | #define NETIO_LOG2_NUM_BUCKETS (10) | ||
264 | |||
265 | /** How many buckets we have. | ||
266 | * @ingroup ingress */ | ||
267 | #define NETIO_NUM_BUCKETS (1 << NETIO_LOG2_NUM_BUCKETS) | ||
268 | |||
269 | |||
270 | /** | ||
271 | * @brief A group-to-bucket identifier. | ||
272 | * | ||
273 | * @ingroup setup | ||
274 | * | ||
275 | * This tells us what to do with a given group. | ||
276 | */ | ||
277 | typedef union { | ||
278 | /** The header broken down into bits. */ | ||
279 | struct { | ||
280 | /** Whether we should balance on L4, if available */ | ||
281 | unsigned int __balance_on_l4:1; | ||
282 | /** Whether we should balance on L3, if available */ | ||
283 | unsigned int __balance_on_l3:1; | ||
284 | /** Whether we should balance on L2, if available */ | ||
285 | unsigned int __balance_on_l2:1; | ||
286 | /** Reserved for future use */ | ||
287 | unsigned int __reserved:1; | ||
288 | /** The base bucket to use to send traffic */ | ||
289 | unsigned int __bucket_base:NETIO_LOG2_NUM_BUCKETS; | ||
290 | /** The mask to apply to the balancing value. This must be one less | ||
291 | * than a power of two, e.g. 0x3 or 0xFF. | ||
292 | */ | ||
293 | unsigned int __bucket_mask:NETIO_LOG2_NUM_BUCKETS; | ||
294 | /** Pad to 32 bits */ | ||
295 | unsigned int __padding:(32 - 4 - 2 * NETIO_LOG2_NUM_BUCKETS); | ||
296 | } bits; | ||
297 | /** To send out the IDN. */ | ||
298 | unsigned int word; | ||
299 | } | ||
300 | netio_group_t; | ||
301 | |||
302 | |||
303 | /** | ||
304 | * @brief A VLAN-to-bucket identifier. | ||
305 | * | ||
306 | * @ingroup setup | ||
307 | * | ||
308 | * This tells us what to do with a given VLAN. | ||
309 | */ | ||
310 | typedef netio_group_t netio_vlan_t; | ||
311 | |||
312 | |||
313 | /** | ||
314 | * A bucket-to-queue mapping. | ||
315 | * @ingroup setup | ||
316 | */ | ||
317 | typedef unsigned char netio_bucket_t; | ||
318 | |||
319 | |||
320 | /** | ||
321 | * A packet size can always fit in a netio_size_t. | ||
322 | * @ingroup setup | ||
323 | */ | ||
324 | typedef unsigned int netio_size_t; | ||
325 | |||
326 | |||
327 | /** | ||
328 | * @brief Ethernet standard (ingress) packet metadata. | ||
329 | * | ||
330 | * @ingroup ingress | ||
331 | * | ||
332 | * This is additional data associated with each packet. | ||
333 | * This structure is opaque and accessed through the @ref ingress. | ||
334 | * | ||
335 | * Also, the buffer population operation currently assumes that standard | ||
336 | * metadata is at least as large as minimal metadata, and will need to be | ||
337 | * modified if that is no longer the case. | ||
338 | */ | ||
339 | typedef struct | ||
340 | { | ||
341 | #ifdef __DOXYGEN__ | ||
342 | /** This structure is opaque. */ | ||
343 | unsigned char opaque[24]; | ||
344 | #else | ||
345 | /** The overall ordinal of the packet */ | ||
346 | unsigned int __packet_ordinal; | ||
347 | /** The ordinal of the packet within the group */ | ||
348 | unsigned int __group_ordinal; | ||
349 | /** The best flow hash IPP could compute. */ | ||
350 | unsigned int __flow_hash; | ||
351 | /** Flags pertaining to checksum calculation, packet type, etc. */ | ||
352 | unsigned int __flags; | ||
353 | /** The first word of "user data". */ | ||
354 | unsigned int __user_data_0; | ||
355 | /** The second word of "user data". */ | ||
356 | unsigned int __user_data_1; | ||
357 | #endif | ||
358 | } | ||
359 | netio_pkt_metadata_t; | ||
360 | |||
361 | |||
362 | /** To ensure that the L3 header is aligned mod 4, the L2 header should be | ||
363 | * aligned mod 4 plus 2, since every supported L2 header is 4n + 2 bytes | ||
364 | * long. The standard way to do this is to simply add 2 bytes of padding | ||
365 | * before the L2 header. | ||
366 | */ | ||
367 | #define NETIO_PACKET_PADDING 2 | ||
368 | |||
369 | |||
370 | |||
371 | /** | ||
372 | * @brief Ethernet minimal (egress) packet metadata. | ||
373 | * | ||
374 | * @ingroup egress | ||
375 | * | ||
376 | * This structure represents information about packets which have | ||
377 | * been processed by @ref netio_populate_buffer() or | ||
378 | * @ref netio_populate_prepend_buffer(). This structure is opaque | ||
379 | * and accessed through the @ref egress. | ||
380 | * | ||
381 | * @internal This structure is actually copied into the memory used by | ||
382 | * standard metadata, which is assumed to be large enough. | ||
383 | */ | ||
384 | typedef struct | ||
385 | { | ||
386 | #ifdef __DOXYGEN__ | ||
387 | /** This structure is opaque. */ | ||
388 | unsigned char opaque[14]; | ||
389 | #else | ||
390 | /** The offset of the L2 header from the start of the packet data. */ | ||
391 | unsigned short l2_offset; | ||
392 | /** The offset of the L3 header from the start of the packet data. */ | ||
393 | unsigned short l3_offset; | ||
394 | /** Where to write the checksum. */ | ||
395 | unsigned char csum_location; | ||
396 | /** Where to start checksumming from. */ | ||
397 | unsigned char csum_start; | ||
398 | /** Flags pertaining to checksum calculation etc. */ | ||
399 | unsigned short flags; | ||
400 | /** The L2 length of the packet. */ | ||
401 | unsigned short l2_length; | ||
402 | /** The checksum with which to seed the checksum generator. */ | ||
403 | unsigned short csum_seed; | ||
404 | /** How much to checksum. */ | ||
405 | unsigned short csum_length; | ||
406 | #endif | ||
407 | } | ||
408 | netio_pkt_minimal_metadata_t; | ||
409 | |||
410 | |||
411 | #ifndef __DOXYGEN__ | ||
412 | |||
413 | /** | ||
414 | * @brief An I/O notification header. | ||
415 | * | ||
416 | * This is the first word of data received from an I/O shim in a notification | ||
417 | * packet. It contains framing and status information. | ||
418 | */ | ||
419 | typedef union | ||
420 | { | ||
421 | unsigned int word; /**< The whole word. */ | ||
422 | /** The various fields. */ | ||
423 | struct | ||
424 | { | ||
425 | unsigned int __channel:7; /**< Resource channel. */ | ||
426 | unsigned int __type:4; /**< Type. */ | ||
427 | unsigned int __ack:1; /**< Whether an acknowledgement is needed. */ | ||
428 | unsigned int __reserved:1; /**< Reserved. */ | ||
429 | unsigned int __protocol:1; /**< A protocol-specific word is added. */ | ||
430 | unsigned int __status:2; /**< Status of the transfer. */ | ||
431 | unsigned int __framing:2; /**< Framing of the transfer. */ | ||
432 | unsigned int __transfer_size:14; /**< Transfer size in bytes (total). */ | ||
433 | } bits; | ||
434 | } | ||
435 | __netio_pkt_notif_t; | ||
436 | |||
437 | |||
438 | /** | ||
439 | * Returns the base address of the packet. | ||
440 | */ | ||
441 | #define _NETIO_PKT_HANDLE_BASE(p) \ | ||
442 | ((unsigned char*)((p).word & 0xFFFFFFC0)) | ||
443 | |||
444 | /** | ||
445 | * Returns the base address of the packet. | ||
446 | */ | ||
447 | #define _NETIO_PKT_BASE(p) \ | ||
448 | _NETIO_PKT_HANDLE_BASE(p->__packet) | ||
449 | |||
450 | /** | ||
451 | * @brief An I/O notification packet (second word) | ||
452 | * | ||
453 | * This is the second word of data received from an I/O shim in a notification | ||
454 | * packet. This is the virtual address of the packet buffer, plus some flag | ||
455 | * bits. (The virtual address of the packet is always 256-byte aligned so we | ||
456 | * have room for 8 bits' worth of flags in the low 8 bits.) | ||
457 | * | ||
458 | * @internal | ||
459 | * NOTE: The low two bits must contain "__queue", so the "packet size" | ||
460 | * (SIZE_SMALL, SIZE_LARGE, or SIZE_JUMBO) can be determined quickly. | ||
461 | * | ||
462 | * If __addr or __offset are moved, _NETIO_PKT_BASE | ||
463 | * (defined right below this) must be changed. | ||
464 | */ | ||
465 | typedef union | ||
466 | { | ||
467 | unsigned int word; /**< The whole word. */ | ||
468 | /** The various fields. */ | ||
469 | struct | ||
470 | { | ||
471 | /** Which queue the packet will be returned to once it is sent back to | ||
472 | the IPP. This is one of the SIZE_xxx values. */ | ||
473 | unsigned int __queue:2; | ||
474 | |||
475 | /** The IPP handle of the sending IPP. */ | ||
476 | unsigned int __ipp_handle:2; | ||
477 | |||
478 | /** Reserved for future use. */ | ||
479 | unsigned int __reserved:1; | ||
480 | |||
481 | /** If 1, this packet has minimal (egress) metadata; otherwise, it | ||
482 | has standard (ingress) metadata. */ | ||
483 | unsigned int __minimal:1; | ||
484 | |||
485 | /** Offset of the metadata within the packet. This value is multiplied | ||
486 | * by 64 and added to the base packet address to get the metadata | ||
487 | * address. Note that this field is aligned within the word such that | ||
488 | * you can easily extract the metadata address with a 26-bit mask. */ | ||
489 | unsigned int __offset:2; | ||
490 | |||
491 | /** The top 24 bits of the packet's virtual address. */ | ||
492 | unsigned int __addr:24; | ||
493 | } bits; | ||
494 | } | ||
495 | __netio_pkt_handle_t; | ||
496 | |||
497 | #endif /* !__DOXYGEN__ */ | ||
498 | |||
499 | |||
500 | /** | ||
501 | * @brief A handle for an I/O packet's storage. | ||
502 | * @ingroup ingress | ||
503 | * | ||
504 | * netio_pkt_handle_t encodes the concept of a ::netio_pkt_t with its | ||
505 | * packet metadata removed. It is a much smaller type that exists to | ||
506 | * facilitate applications where the full ::netio_pkt_t type is too | ||
507 | * large, such as those that cache enormous numbers of packets or wish | ||
508 | * to transmit packet descriptors over the UDN. | ||
509 | * | ||
510 | * Because there is no metadata, most ::netio_pkt_t operations cannot be | ||
511 | * performed on a netio_pkt_handle_t. It supports only | ||
512 | * netio_free_handle() (to free the buffer) and | ||
513 | * NETIO_PKT_CUSTOM_DATA_H() (to access a pointer to its contents). | ||
514 | * The application must acquire any additional metadata it wants from the | ||
515 | * original ::netio_pkt_t and record it separately. | ||
516 | * | ||
517 | * A netio_pkt_handle_t can be extracted from a ::netio_pkt_t by calling | ||
518 | * NETIO_PKT_HANDLE(). An invalid handle (analogous to NULL) can be | ||
519 | * created by assigning the value ::NETIO_PKT_HANDLE_NONE. A handle can | ||
520 | * be tested for validity with NETIO_PKT_HANDLE_IS_VALID(). | ||
521 | */ | ||
522 | typedef struct | ||
523 | { | ||
524 | unsigned int word; /**< Opaque bits. */ | ||
525 | } netio_pkt_handle_t; | ||
526 | |||
527 | /** | ||
528 | * @brief A packet descriptor. | ||
529 | * | ||
530 | * @ingroup ingress | ||
531 | * @ingroup egress | ||
532 | * | ||
533 | * This data structure represents a packet. The structure is manipulated | ||
534 | * through the @ref ingress and the @ref egress. | ||
535 | * | ||
536 | * While the contents of a netio_pkt_t are opaque, the structure itself is | ||
537 | * portable. This means that it may be shared between all tiles which have | ||
538 | * done a netio_input_register() call for the interface on which the pkt_t | ||
539 | * was initially received (via netio_get_packet()) or retrieved (via | ||
540 | * netio_get_buffer()). The contents of a netio_pkt_t can be transmitted to | ||
541 | * another tile via shared memory, or via a UDN message, or by other means. | ||
542 | * The destination tile may then use the pkt_t as if it had originally been | ||
543 | * received locally; it may read or write the packet's data, read its | ||
544 | * metadata, free the packet, send the packet, transfer the netio_pkt_t to | ||
545 | * yet another tile, and so forth. | ||
546 | * | ||
547 | * Once a netio_pkt_t has been transferred to a second tile, the first tile | ||
548 | * should not reference the original copy; in particular, if more than one | ||
549 | * tile frees or sends the same netio_pkt_t, the IPP's packet free lists will | ||
550 | * become corrupted. Note also that each tile which reads or modifies | ||
551 | * packet data must obey the memory coherency rules outlined in @ref input. | ||
552 | */ | ||
553 | typedef struct | ||
554 | { | ||
555 | #ifdef __DOXYGEN__ | ||
556 | /** This structure is opaque. */ | ||
557 | unsigned char opaque[32]; | ||
558 | #else | ||
559 | /** For an ingress packet (one with standard metadata), this is the | ||
560 | * notification header we got from the I/O shim. For an egress packet | ||
561 | * (one with minimal metadata), this word is zero if the packet has not | ||
562 | * been populated, and nonzero if it has. */ | ||
563 | __netio_pkt_notif_t __notif_header; | ||
564 | |||
565 | /** Virtual address of the packet buffer, plus state flags. */ | ||
566 | __netio_pkt_handle_t __packet; | ||
567 | |||
568 | /** Metadata associated with the packet. */ | ||
569 | netio_pkt_metadata_t __metadata; | ||
570 | #endif | ||
571 | } | ||
572 | netio_pkt_t; | ||
573 | |||
574 | |||
575 | #ifndef __DOXYGEN__ | ||
576 | |||
577 | #define __NETIO_PKT_NOTIF_HEADER(pkt) ((pkt)->__notif_header) | ||
578 | #define __NETIO_PKT_IPP_HANDLE(pkt) ((pkt)->__packet.bits.__ipp_handle) | ||
579 | #define __NETIO_PKT_QUEUE(pkt) ((pkt)->__packet.bits.__queue) | ||
580 | #define __NETIO_PKT_NOTIF_HEADER_M(mda, pkt) ((pkt)->__notif_header) | ||
581 | #define __NETIO_PKT_IPP_HANDLE_M(mda, pkt) ((pkt)->__packet.bits.__ipp_handle) | ||
582 | #define __NETIO_PKT_MINIMAL(pkt) ((pkt)->__packet.bits.__minimal) | ||
583 | #define __NETIO_PKT_QUEUE_M(mda, pkt) ((pkt)->__packet.bits.__queue) | ||
584 | #define __NETIO_PKT_FLAGS_M(mda, pkt) ((mda)->__flags) | ||
585 | |||
586 | /* Packet information table, used by the attribute access functions below. */ | ||
587 | extern const uint16_t _netio_pkt_info[]; | ||
588 | |||
589 | #endif /* __DOXYGEN__ */ | ||
590 | |||
591 | |||
592 | #ifndef __DOXYGEN__ | ||
593 | /* These macros are deprecated and will disappear in a future MDE release. */ | ||
594 | #define NETIO_PKT_GOOD_CHECKSUM(pkt) \ | ||
595 | NETIO_PKT_L4_CSUM_CORRECT(pkt) | ||
596 | #define NETIO_PKT_GOOD_CHECKSUM_M(mda, pkt) \ | ||
597 | NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt) | ||
598 | #endif /* __DOXYGEN__ */ | ||
599 | |||
600 | |||
601 | /* Packet attribute access functions. */ | ||
602 | |||
603 | /** Return a pointer to the metadata for a packet. | ||
604 | * @ingroup ingress | ||
605 | * | ||
606 | * Calling this function once and passing the result to other retrieval | ||
607 | * functions with a "_M" suffix usually improves performance. This | ||
608 | * function must be called on an 'ingress' packet (i.e. one retrieved | ||
609 | * by @ref netio_get_packet(), on which @ref netio_populate_buffer() or | ||
610 | * @ref netio_populate_prepend_buffer have not been called). Use of this | ||
611 | * function on an 'egress' packet will cause an assertion failure. | ||
612 | * | ||
613 | * @param[in] pkt Packet on which to operate. | ||
614 | * @return A pointer to the packet's standard metadata. | ||
615 | */ | ||
616 | static __inline netio_pkt_metadata_t* | ||
617 | NETIO_PKT_METADATA(netio_pkt_t* pkt) | ||
618 | { | ||
619 | netio_assert(!pkt->__packet.bits.__minimal); | ||
620 | return &pkt->__metadata; | ||
621 | } | ||
622 | |||
623 | |||
624 | /** Return a pointer to the minimal metadata for a packet. | ||
625 | * @ingroup egress | ||
626 | * | ||
627 | * Calling this function once and passing the result to other retrieval | ||
628 | * functions with a "_MM" suffix usually improves performance. This | ||
629 | * function must be called on an 'egress' packet (i.e. one on which | ||
630 | * @ref netio_populate_buffer() or @ref netio_populate_prepend_buffer() | ||
631 | * have been called, or one retrieved by @ref netio_get_buffer()). Use of | ||
632 | * this function on an 'ingress' packet will cause an assertion failure. | ||
633 | * | ||
634 | * @param[in] pkt Packet on which to operate. | ||
635 | * @return A pointer to the packet's standard metadata. | ||
636 | */ | ||
637 | static __inline netio_pkt_minimal_metadata_t* | ||
638 | NETIO_PKT_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
639 | { | ||
640 | netio_assert(pkt->__packet.bits.__minimal); | ||
641 | return (netio_pkt_minimal_metadata_t*) &pkt->__metadata; | ||
642 | } | ||
643 | |||
644 | |||
645 | /** Determine whether a packet has 'minimal' metadata. | ||
646 | * @ingroup pktfuncs | ||
647 | * | ||
648 | * This function will return nonzero if the packet is an 'egress' | ||
649 | * packet (i.e. one on which @ref netio_populate_buffer() or | ||
650 | * @ref netio_populate_prepend_buffer() have been called, or one | ||
651 | * retrieved by @ref netio_get_buffer()), and zero if the packet | ||
652 | * is an 'ingress' packet (i.e. one retrieved by @ref netio_get_packet(), | ||
653 | * which has not been converted into an 'egress' packet). | ||
654 | * | ||
655 | * @param[in] pkt Packet on which to operate. | ||
656 | * @return Nonzero if the packet has minimal metadata. | ||
657 | */ | ||
658 | static __inline unsigned int | ||
659 | NETIO_PKT_IS_MINIMAL(netio_pkt_t* pkt) | ||
660 | { | ||
661 | return pkt->__packet.bits.__minimal; | ||
662 | } | ||
663 | |||
664 | |||
665 | /** Return a handle for a packet's storage. | ||
666 | * @ingroup pktfuncs | ||
667 | * | ||
668 | * @param[in] pkt Packet on which to operate. | ||
669 | * @return A handle for the packet's storage. | ||
670 | */ | ||
671 | static __inline netio_pkt_handle_t | ||
672 | NETIO_PKT_HANDLE(netio_pkt_t* pkt) | ||
673 | { | ||
674 | netio_pkt_handle_t h; | ||
675 | h.word = pkt->__packet.word; | ||
676 | return h; | ||
677 | } | ||
678 | |||
679 | |||
680 | /** A special reserved value indicating the absence of a packet handle. | ||
681 | * | ||
682 | * @ingroup pktfuncs | ||
683 | */ | ||
684 | #define NETIO_PKT_HANDLE_NONE ((netio_pkt_handle_t) { 0 }) | ||
685 | |||
686 | |||
687 | /** Test whether a packet handle is valid. | ||
688 | * | ||
689 | * Applications may wish to use the reserved value NETIO_PKT_HANDLE_NONE | ||
690 | * to indicate no packet at all. This function tests to see if a packet | ||
691 | * handle is a real handle, not this special reserved value. | ||
692 | * | ||
693 | * @ingroup pktfuncs | ||
694 | * | ||
695 | * @param[in] handle Handle on which to operate. | ||
696 | * @return One if the packet handle is valid, else zero. | ||
697 | */ | ||
698 | static __inline unsigned int | ||
699 | NETIO_PKT_HANDLE_IS_VALID(netio_pkt_handle_t handle) | ||
700 | { | ||
701 | return handle.word != 0; | ||
702 | } | ||
703 | |||
704 | |||
705 | |||
706 | /** Return a pointer to the start of the packet's custom header. | ||
707 | * A custom header may or may not be present, depending upon the IPP; its | ||
708 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
709 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
710 | * the custom header precedes the L2 header in the packet buffer. | ||
711 | * @ingroup ingress | ||
712 | * | ||
713 | * @param[in] handle Handle on which to operate. | ||
714 | * @return A pointer to start of the packet. | ||
715 | */ | ||
716 | static __inline unsigned char* | ||
717 | NETIO_PKT_CUSTOM_DATA_H(netio_pkt_handle_t handle) | ||
718 | { | ||
719 | return _NETIO_PKT_HANDLE_BASE(handle) + NETIO_PACKET_PADDING; | ||
720 | } | ||
721 | |||
722 | |||
723 | /** Return the length of the packet's custom header. | ||
724 | * A custom header may or may not be present, depending upon the IPP; its | ||
725 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
726 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
727 | * the custom header precedes the L2 header in the packet buffer. | ||
728 | * | ||
729 | * @ingroup ingress | ||
730 | * | ||
731 | * @param[in] mda Pointer to packet's standard metadata. | ||
732 | * @param[in] pkt Packet on which to operate. | ||
733 | * @return The length of the packet's custom header, in bytes. | ||
734 | */ | ||
735 | static __inline netio_size_t | ||
736 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
737 | { | ||
738 | /* | ||
739 | * Note that we effectively need to extract a quantity from the flags word | ||
740 | * which is measured in words, and then turn it into bytes by shifting | ||
741 | * it left by 2. We do this all at once by just shifting right two less | ||
742 | * bits, and shifting the mask up two bits. | ||
743 | */ | ||
744 | return ((mda->__flags >> (_NETIO_PKT_CUSTOM_LEN_SHIFT - 2)) & | ||
745 | (_NETIO_PKT_CUSTOM_LEN_RMASK << 2)); | ||
746 | } | ||
747 | |||
748 | |||
749 | /** Return the length of the packet, starting with the custom header. | ||
750 | * A custom header may or may not be present, depending upon the IPP; its | ||
751 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
752 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
753 | * the custom header precedes the L2 header in the packet buffer. | ||
754 | * @ingroup ingress | ||
755 | * | ||
756 | * @param[in] mda Pointer to packet's standard metadata. | ||
757 | * @param[in] pkt Packet on which to operate. | ||
758 | * @return The length of the packet, in bytes. | ||
759 | */ | ||
760 | static __inline netio_size_t | ||
761 | NETIO_PKT_CUSTOM_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
762 | { | ||
763 | return (__NETIO_PKT_NOTIF_HEADER(pkt).bits.__transfer_size - | ||
764 | NETIO_PACKET_PADDING); | ||
765 | } | ||
766 | |||
767 | |||
768 | /** Return a pointer to the start of the packet's custom header. | ||
769 | * A custom header may or may not be present, depending upon the IPP; its | ||
770 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
771 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
772 | * the custom header precedes the L2 header in the packet buffer. | ||
773 | * @ingroup ingress | ||
774 | * | ||
775 | * @param[in] mda Pointer to packet's standard metadata. | ||
776 | * @param[in] pkt Packet on which to operate. | ||
777 | * @return A pointer to start of the packet. | ||
778 | */ | ||
779 | static __inline unsigned char* | ||
780 | NETIO_PKT_CUSTOM_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
781 | { | ||
782 | return NETIO_PKT_CUSTOM_DATA_H(NETIO_PKT_HANDLE(pkt)); | ||
783 | } | ||
784 | |||
785 | |||
786 | /** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header. | ||
787 | * @ingroup ingress | ||
788 | * | ||
789 | * @param[in] mda Pointer to packet's standard metadata. | ||
790 | * @param[in] pkt Packet on which to operate. | ||
791 | * @return The length of the packet's L2 header, in bytes. | ||
792 | */ | ||
793 | static __inline netio_size_t | ||
794 | NETIO_PKT_L2_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
795 | { | ||
796 | /* | ||
797 | * Note that we effectively need to extract a quantity from the flags word | ||
798 | * which is measured in words, and then turn it into bytes by shifting | ||
799 | * it left by 2. We do this all at once by just shifting right two less | ||
800 | * bits, and shifting the mask up two bits. We then add two bytes. | ||
801 | */ | ||
802 | return ((mda->__flags >> (_NETIO_PKT_L2_LEN_SHIFT - 2)) & | ||
803 | (_NETIO_PKT_L2_LEN_RMASK << 2)) + 2; | ||
804 | } | ||
805 | |||
806 | |||
807 | /** Return the length of the packet, starting with the L2 (Ethernet) header. | ||
808 | * @ingroup ingress | ||
809 | * | ||
810 | * @param[in] mda Pointer to packet's standard metadata. | ||
811 | * @param[in] pkt Packet on which to operate. | ||
812 | * @return The length of the packet, in bytes. | ||
813 | */ | ||
814 | static __inline netio_size_t | ||
815 | NETIO_PKT_L2_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
816 | { | ||
817 | return (NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt) - | ||
818 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda,pkt)); | ||
819 | } | ||
820 | |||
821 | |||
822 | /** Return a pointer to the start of the packet's L2 (Ethernet) header. | ||
823 | * @ingroup ingress | ||
824 | * | ||
825 | * @param[in] mda Pointer to packet's standard metadata. | ||
826 | * @param[in] pkt Packet on which to operate. | ||
827 | * @return A pointer to start of the packet. | ||
828 | */ | ||
829 | static __inline unsigned char* | ||
830 | NETIO_PKT_L2_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
831 | { | ||
832 | return (NETIO_PKT_CUSTOM_DATA_M(mda, pkt) + | ||
833 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt)); | ||
834 | } | ||
835 | |||
836 | |||
837 | /** Retrieve the length of the packet, starting with the L3 (generally, | ||
838 | * the IP) header. | ||
839 | * @ingroup ingress | ||
840 | * | ||
841 | * @param[in] mda Pointer to packet's standard metadata. | ||
842 | * @param[in] pkt Packet on which to operate. | ||
843 | * @return Length of the packet's L3 header and data, in bytes. | ||
844 | */ | ||
845 | static __inline netio_size_t | ||
846 | NETIO_PKT_L3_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
847 | { | ||
848 | return (NETIO_PKT_L2_LENGTH_M(mda, pkt) - | ||
849 | NETIO_PKT_L2_HEADER_LENGTH_M(mda,pkt)); | ||
850 | } | ||
851 | |||
852 | |||
853 | /** Return a pointer to the packet's L3 (generally, the IP) header. | ||
854 | * @ingroup ingress | ||
855 | * | ||
856 | * Note that we guarantee word alignment of the L3 header. | ||
857 | * | ||
858 | * @param[in] mda Pointer to packet's standard metadata. | ||
859 | * @param[in] pkt Packet on which to operate. | ||
860 | * @return A pointer to the packet's L3 header. | ||
861 | */ | ||
862 | static __inline unsigned char* | ||
863 | NETIO_PKT_L3_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
864 | { | ||
865 | return (NETIO_PKT_L2_DATA_M(mda, pkt) + | ||
866 | NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt)); | ||
867 | } | ||
868 | |||
869 | |||
870 | /** Return the ordinal of the packet. | ||
871 | * @ingroup ingress | ||
872 | * | ||
873 | * Each packet is given an ordinal number when it is delivered by the IPP. | ||
874 | * In the medium term, the ordinal is unique and monotonically increasing, | ||
875 | * being incremented by 1 for each packet; the ordinal of the first packet | ||
876 | * delivered after the IPP starts is zero. (Since the ordinal is of finite | ||
877 | * size, given enough input packets, it will eventually wrap around to zero; | ||
878 | * in the long term, therefore, ordinals are not unique.) The ordinals | ||
879 | * handed out by different IPPs are not disjoint, so two packets from | ||
880 | * different IPPs may have identical ordinals. Packets dropped by the | ||
881 | * IPP or by the I/O shim are not assigned ordinals. | ||
882 | * | ||
883 | * @param[in] mda Pointer to packet's standard metadata. | ||
884 | * @param[in] pkt Packet on which to operate. | ||
885 | * @return The packet's per-IPP packet ordinal. | ||
886 | */ | ||
887 | static __inline unsigned int | ||
888 | NETIO_PKT_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
889 | { | ||
890 | return mda->__packet_ordinal; | ||
891 | } | ||
892 | |||
893 | |||
894 | /** Return the per-group ordinal of the packet. | ||
895 | * @ingroup ingress | ||
896 | * | ||
897 | * Each packet is given a per-group ordinal number when it is | ||
898 | * delivered by the IPP. By default, the group is the packet's VLAN, | ||
899 | * although IPP can be recompiled to use different values. In | ||
900 | * the medium term, the ordinal is unique and monotonically | ||
901 | * increasing, being incremented by 1 for each packet; the ordinal of | ||
902 | * the first packet distributed to a particular group is zero. | ||
903 | * (Since the ordinal is of finite size, given enough input packets, | ||
904 | * it will eventually wrap around to zero; in the long term, | ||
905 | * therefore, ordinals are not unique.) The ordinals handed out by | ||
906 | * different IPPs are not disjoint, so two packets from different IPPs | ||
907 | * may have identical ordinals; similarly, packets distributed to | ||
908 | * different groups may have identical ordinals. Packets dropped by | ||
909 | * the IPP or by the I/O shim are not assigned ordinals. | ||
910 | * | ||
911 | * @param[in] mda Pointer to packet's standard metadata. | ||
912 | * @param[in] pkt Packet on which to operate. | ||
913 | * @return The packet's per-IPP, per-group ordinal. | ||
914 | */ | ||
915 | static __inline unsigned int | ||
916 | NETIO_PKT_GROUP_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
917 | { | ||
918 | return mda->__group_ordinal; | ||
919 | } | ||
920 | |||
921 | |||
922 | /** Return the VLAN ID assigned to the packet. | ||
923 | * @ingroup ingress | ||
924 | * | ||
925 | * This value is usually contained within the packet header. | ||
926 | * | ||
927 | * This value will be zero if the packet does not have a VLAN tag, or if | ||
928 | * this value was not extracted from the packet. | ||
929 | * | ||
930 | * @param[in] mda Pointer to packet's standard metadata. | ||
931 | * @param[in] pkt Packet on which to operate. | ||
932 | * @return The packet's VLAN ID. | ||
933 | */ | ||
934 | static __inline unsigned short | ||
935 | NETIO_PKT_VLAN_ID_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
936 | { | ||
937 | int vl = (mda->__flags >> _NETIO_PKT_VLAN_SHIFT) & _NETIO_PKT_VLAN_RMASK; | ||
938 | unsigned short* pkt_p; | ||
939 | int index; | ||
940 | unsigned short val; | ||
941 | |||
942 | if (vl == _NETIO_PKT_VLAN_NONE) | ||
943 | return 0; | ||
944 | |||
945 | pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt); | ||
946 | index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK; | ||
947 | |||
948 | val = pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_VLAN_SHIFT) & | ||
949 | _NETIO_PKT_INFO_VLAN_RMASK]; | ||
950 | |||
951 | #ifdef __TILECC__ | ||
952 | return (__insn_bytex(val) >> 16) & 0xFFF; | ||
953 | #else | ||
954 | return (__builtin_bswap32(val) >> 16) & 0xFFF; | ||
955 | #endif | ||
956 | } | ||
957 | |||
958 | |||
959 | /** Return the ethertype of the packet. | ||
960 | * @ingroup ingress | ||
961 | * | ||
962 | * This value is usually contained within the packet header. | ||
963 | * | ||
964 | * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED_M() | ||
965 | * returns true, and otherwise, may not be well defined. | ||
966 | * | ||
967 | * @param[in] mda Pointer to packet's standard metadata. | ||
968 | * @param[in] pkt Packet on which to operate. | ||
969 | * @return The packet's ethertype. | ||
970 | */ | ||
971 | static __inline unsigned short | ||
972 | NETIO_PKT_ETHERTYPE_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
973 | { | ||
974 | unsigned short* pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt); | ||
975 | int index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK; | ||
976 | |||
977 | unsigned short val = | ||
978 | pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_ETYPE_SHIFT) & | ||
979 | _NETIO_PKT_INFO_ETYPE_RMASK]; | ||
980 | |||
981 | return __builtin_bswap32(val) >> 16; | ||
982 | } | ||
983 | |||
984 | |||
985 | /** Return the flow hash computed on the packet. | ||
986 | * @ingroup ingress | ||
987 | * | ||
988 | * For TCP and UDP packets, this hash is calculated by hashing together | ||
989 | * the "5-tuple" values, specifically the source IP address, destination | ||
990 | * IP address, protocol type, source port and destination port. | ||
991 | * The hash value is intended to be helpful for millions of distinct | ||
992 | * flows. | ||
993 | * | ||
994 | * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is | ||
995 | * derived by hashing together the source and destination IP addresses. | ||
996 | * | ||
997 | * For MPLS-encapsulated packets, the flow hash is derived by hashing | ||
998 | * the first MPLS label. | ||
999 | * | ||
1000 | * For all other packets the flow hash is computed from the source | ||
1001 | * and destination Ethernet addresses. | ||
1002 | * | ||
1003 | * The hash is symmetric, meaning it produces the same value if the | ||
1004 | * source and destination are swapped. The only exceptions are | ||
1005 | * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple | ||
1006 | * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32 | ||
1007 | * (Encap Security Payload), which use only the destination address | ||
1008 | * since the source address is not meaningful. | ||
1009 | * | ||
1010 | * @param[in] mda Pointer to packet's standard metadata. | ||
1011 | * @param[in] pkt Packet on which to operate. | ||
1012 | * @return The packet's 32-bit flow hash. | ||
1013 | */ | ||
1014 | static __inline unsigned int | ||
1015 | NETIO_PKT_FLOW_HASH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1016 | { | ||
1017 | return mda->__flow_hash; | ||
1018 | } | ||
1019 | |||
1020 | |||
1021 | /** Return the first word of "user data" for the packet. | ||
1022 | * | ||
1023 | * The contents of the user data words depend on the IPP. | ||
1024 | * | ||
1025 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first | ||
1026 | * word of user data contains the least significant bits of the 64-bit | ||
1027 | * arrival cycle count (see @c get_cycle_count_low()). | ||
1028 | * | ||
1029 | * See the <em>System Programmer's Guide</em> for details. | ||
1030 | * | ||
1031 | * @ingroup ingress | ||
1032 | * | ||
1033 | * @param[in] mda Pointer to packet's standard metadata. | ||
1034 | * @param[in] pkt Packet on which to operate. | ||
1035 | * @return The packet's first word of "user data". | ||
1036 | */ | ||
1037 | static __inline unsigned int | ||
1038 | NETIO_PKT_USER_DATA_0_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1039 | { | ||
1040 | return mda->__user_data_0; | ||
1041 | } | ||
1042 | |||
1043 | |||
1044 | /** Return the second word of "user data" for the packet. | ||
1045 | * | ||
1046 | * The contents of the user data words depend on the IPP. | ||
1047 | * | ||
1048 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second | ||
1049 | * word of user data contains the most significant bits of the 64-bit | ||
1050 | * arrival cycle count (see @c get_cycle_count_high()). | ||
1051 | * | ||
1052 | * See the <em>System Programmer's Guide</em> for details. | ||
1053 | * | ||
1054 | * @ingroup ingress | ||
1055 | * | ||
1056 | * @param[in] mda Pointer to packet's standard metadata. | ||
1057 | * @param[in] pkt Packet on which to operate. | ||
1058 | * @return The packet's second word of "user data". | ||
1059 | */ | ||
1060 | static __inline unsigned int | ||
1061 | NETIO_PKT_USER_DATA_1_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1062 | { | ||
1063 | return mda->__user_data_1; | ||
1064 | } | ||
1065 | |||
1066 | |||
1067 | /** Determine whether the L4 (TCP/UDP) checksum was calculated. | ||
1068 | * @ingroup ingress | ||
1069 | * | ||
1070 | * @param[in] mda Pointer to packet's standard metadata. | ||
1071 | * @param[in] pkt Packet on which to operate. | ||
1072 | * @return Nonzero if the L4 checksum was calculated. | ||
1073 | */ | ||
1074 | static __inline unsigned int | ||
1075 | NETIO_PKT_L4_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1076 | { | ||
1077 | return !(mda->__flags & _NETIO_PKT_NO_L4_CSUM_MASK); | ||
1078 | } | ||
1079 | |||
1080 | |||
1081 | /** Determine whether the L4 (TCP/UDP) checksum was calculated and found to | ||
1082 | * be correct. | ||
1083 | * @ingroup ingress | ||
1084 | * | ||
1085 | * @param[in] mda Pointer to packet's standard metadata. | ||
1086 | * @param[in] pkt Packet on which to operate. | ||
1087 | * @return Nonzero if the checksum was calculated and is correct. | ||
1088 | */ | ||
1089 | static __inline unsigned int | ||
1090 | NETIO_PKT_L4_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1091 | { | ||
1092 | return !(mda->__flags & | ||
1093 | (_NETIO_PKT_BAD_L4_CSUM_MASK | _NETIO_PKT_NO_L4_CSUM_MASK)); | ||
1094 | } | ||
1095 | |||
1096 | |||
1097 | /** Determine whether the L3 (IP) checksum was calculated. | ||
1098 | * @ingroup ingress | ||
1099 | * | ||
1100 | * @param[in] mda Pointer to packet's standard metadata. | ||
1101 | * @param[in] pkt Packet on which to operate. | ||
1102 | * @return Nonzero if the L3 (IP) checksum was calculated. | ||
1103 | */ | ||
1104 | static __inline unsigned int | ||
1105 | NETIO_PKT_L3_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1106 | { | ||
1107 | return !(mda->__flags & _NETIO_PKT_NO_L3_CSUM_MASK); | ||
1108 | } | ||
1109 | |||
1110 | |||
1111 | /** Determine whether the L3 (IP) checksum was calculated and found to be | ||
1112 | * correct. | ||
1113 | * @ingroup ingress | ||
1114 | * | ||
1115 | * @param[in] mda Pointer to packet's standard metadata. | ||
1116 | * @param[in] pkt Packet on which to operate. | ||
1117 | * @return Nonzero if the checksum was calculated and is correct. | ||
1118 | */ | ||
1119 | static __inline unsigned int | ||
1120 | NETIO_PKT_L3_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1121 | { | ||
1122 | return !(mda->__flags & | ||
1123 | (_NETIO_PKT_BAD_L3_CSUM_MASK | _NETIO_PKT_NO_L3_CSUM_MASK)); | ||
1124 | } | ||
1125 | |||
1126 | |||
1127 | /** Determine whether the ethertype was recognized and L3 packet data was | ||
1128 | * processed. | ||
1129 | * @ingroup ingress | ||
1130 | * | ||
1131 | * @param[in] mda Pointer to packet's standard metadata. | ||
1132 | * @param[in] pkt Packet on which to operate. | ||
1133 | * @return Nonzero if the ethertype was recognized and L3 packet data was | ||
1134 | * processed. | ||
1135 | */ | ||
1136 | static __inline unsigned int | ||
1137 | NETIO_PKT_ETHERTYPE_RECOGNIZED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1138 | { | ||
1139 | return !(mda->__flags & _NETIO_PKT_TYPE_UNRECOGNIZED_MASK); | ||
1140 | } | ||
1141 | |||
1142 | |||
1143 | /** Retrieve the status of a packet and any errors that may have occurred | ||
1144 | * during ingress processing (length mismatches, CRC errors, etc.). | ||
1145 | * @ingroup ingress | ||
1146 | * | ||
1147 | * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED() | ||
1148 | * returns zero are always reported as underlength, as there is no a priori | ||
1149 | * means to determine their length. Normally, applications should use | ||
1150 | * @ref NETIO_PKT_BAD_M() instead of explicitly checking status with this | ||
1151 | * function. | ||
1152 | * | ||
1153 | * @param[in] mda Pointer to packet's standard metadata. | ||
1154 | * @param[in] pkt Packet on which to operate. | ||
1155 | * @return The packet's status. | ||
1156 | */ | ||
1157 | static __inline netio_pkt_status_t | ||
1158 | NETIO_PKT_STATUS_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1159 | { | ||
1160 | return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status; | ||
1161 | } | ||
1162 | |||
1163 | |||
1164 | /** Report whether a packet is bad (i.e., was shorter than expected based on | ||
1165 | * its headers, or had a bad CRC). | ||
1166 | * @ingroup ingress | ||
1167 | * | ||
1168 | * Note that this function does not verify L3 or L4 checksums. | ||
1169 | * | ||
1170 | * @param[in] mda Pointer to packet's standard metadata. | ||
1171 | * @param[in] pkt Packet on which to operate. | ||
1172 | * @return Nonzero if the packet is bad and should be discarded. | ||
1173 | */ | ||
1174 | static __inline unsigned int | ||
1175 | NETIO_PKT_BAD_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1176 | { | ||
1177 | return ((NETIO_PKT_STATUS_M(mda, pkt) & 1) && | ||
1178 | (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt) || | ||
1179 | NETIO_PKT_STATUS_M(mda, pkt) == NETIO_PKT_STATUS_BAD)); | ||
1180 | } | ||
1181 | |||
1182 | |||
1183 | /** Return the length of the packet, starting with the L2 (Ethernet) header. | ||
1184 | * @ingroup egress | ||
1185 | * | ||
1186 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1187 | * @param[in] pkt Packet on which to operate. | ||
1188 | * @return The length of the packet, in bytes. | ||
1189 | */ | ||
1190 | static __inline netio_size_t | ||
1191 | NETIO_PKT_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1192 | { | ||
1193 | return mmd->l2_length; | ||
1194 | } | ||
1195 | |||
1196 | |||
1197 | /** Return the length of the L2 (Ethernet) header. | ||
1198 | * @ingroup egress | ||
1199 | * | ||
1200 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1201 | * @param[in] pkt Packet on which to operate. | ||
1202 | * @return The length of the packet's L2 header, in bytes. | ||
1203 | */ | ||
1204 | static __inline netio_size_t | ||
1205 | NETIO_PKT_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1206 | netio_pkt_t* pkt) | ||
1207 | { | ||
1208 | return mmd->l3_offset - mmd->l2_offset; | ||
1209 | } | ||
1210 | |||
1211 | |||
1212 | /** Return the length of the packet, starting with the L3 (IP) header. | ||
1213 | * @ingroup egress | ||
1214 | * | ||
1215 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1216 | * @param[in] pkt Packet on which to operate. | ||
1217 | * @return Length of the packet's L3 header and data, in bytes. | ||
1218 | */ | ||
1219 | static __inline netio_size_t | ||
1220 | NETIO_PKT_L3_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1221 | { | ||
1222 | return (NETIO_PKT_L2_LENGTH_MM(mmd, pkt) - | ||
1223 | NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt)); | ||
1224 | } | ||
1225 | |||
1226 | |||
1227 | /** Return a pointer to the packet's L3 (generally, the IP) header. | ||
1228 | * @ingroup egress | ||
1229 | * | ||
1230 | * Note that we guarantee word alignment of the L3 header. | ||
1231 | * | ||
1232 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1233 | * @param[in] pkt Packet on which to operate. | ||
1234 | * @return A pointer to the packet's L3 header. | ||
1235 | */ | ||
1236 | static __inline unsigned char* | ||
1237 | NETIO_PKT_L3_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1238 | { | ||
1239 | return _NETIO_PKT_BASE(pkt) + mmd->l3_offset; | ||
1240 | } | ||
1241 | |||
1242 | |||
1243 | /** Return a pointer to the packet's L2 (Ethernet) header. | ||
1244 | * @ingroup egress | ||
1245 | * | ||
1246 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1247 | * @param[in] pkt Packet on which to operate. | ||
1248 | * @return A pointer to start of the packet. | ||
1249 | */ | ||
1250 | static __inline unsigned char* | ||
1251 | NETIO_PKT_L2_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1252 | { | ||
1253 | return _NETIO_PKT_BASE(pkt) + mmd->l2_offset; | ||
1254 | } | ||
1255 | |||
1256 | |||
1257 | /** Retrieve the status of a packet and any errors that may have occurred | ||
1258 | * during ingress processing (length mismatches, CRC errors, etc.). | ||
1259 | * @ingroup ingress | ||
1260 | * | ||
1261 | * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED() | ||
1262 | * returns zero are always reported as underlength, as there is no a priori | ||
1263 | * means to determine their length. Normally, applications should use | ||
1264 | * @ref NETIO_PKT_BAD() instead of explicitly checking status with this | ||
1265 | * function. | ||
1266 | * | ||
1267 | * @param[in] pkt Packet on which to operate. | ||
1268 | * @return The packet's status. | ||
1269 | */ | ||
1270 | static __inline netio_pkt_status_t | ||
1271 | NETIO_PKT_STATUS(netio_pkt_t* pkt) | ||
1272 | { | ||
1273 | netio_assert(!pkt->__packet.bits.__minimal); | ||
1274 | |||
1275 | return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status; | ||
1276 | } | ||
1277 | |||
1278 | |||
1279 | /** Report whether a packet is bad (i.e., was shorter than expected based on | ||
1280 | * its headers, or had a bad CRC). | ||
1281 | * @ingroup ingress | ||
1282 | * | ||
1283 | * Note that this function does not verify L3 or L4 checksums. | ||
1284 | * | ||
1285 | * @param[in] pkt Packet on which to operate. | ||
1286 | * @return Nonzero if the packet is bad and should be discarded. | ||
1287 | */ | ||
1288 | static __inline unsigned int | ||
1289 | NETIO_PKT_BAD(netio_pkt_t* pkt) | ||
1290 | { | ||
1291 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1292 | |||
1293 | return NETIO_PKT_BAD_M(mda, pkt); | ||
1294 | } | ||
1295 | |||
1296 | |||
1297 | /** Return the length of the packet's custom header. | ||
1298 | * A custom header may or may not be present, depending upon the IPP; its | ||
1299 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
1300 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
1301 | * the custom header precedes the L2 header in the packet buffer. | ||
1302 | * @ingroup pktfuncs | ||
1303 | * | ||
1304 | * @param[in] pkt Packet on which to operate. | ||
1305 | * @return The length of the packet's custom header, in bytes. | ||
1306 | */ | ||
1307 | static __inline netio_size_t | ||
1308 | NETIO_PKT_CUSTOM_HEADER_LENGTH(netio_pkt_t* pkt) | ||
1309 | { | ||
1310 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1311 | |||
1312 | return NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt); | ||
1313 | } | ||
1314 | |||
1315 | |||
1316 | /** Return the length of the packet, starting with the custom header. | ||
1317 | * A custom header may or may not be present, depending upon the IPP; its | ||
1318 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
1319 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
1320 | * the custom header precedes the L2 header in the packet buffer. | ||
1321 | * @ingroup pktfuncs | ||
1322 | * | ||
1323 | * @param[in] pkt Packet on which to operate. | ||
1324 | * @return The length of the packet, in bytes. | ||
1325 | */ | ||
1326 | static __inline netio_size_t | ||
1327 | NETIO_PKT_CUSTOM_LENGTH(netio_pkt_t* pkt) | ||
1328 | { | ||
1329 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1330 | |||
1331 | return NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt); | ||
1332 | } | ||
1333 | |||
1334 | |||
1335 | /** Return a pointer to the packet's custom header. | ||
1336 | * A custom header may or may not be present, depending upon the IPP; its | ||
1337 | * contents and alignment are also IPP-dependent. Currently, none of the | ||
1338 | * standard IPPs supplied by Tilera produce a custom header. If present, | ||
1339 | * the custom header precedes the L2 header in the packet buffer. | ||
1340 | * @ingroup pktfuncs | ||
1341 | * | ||
1342 | * @param[in] pkt Packet on which to operate. | ||
1343 | * @return A pointer to start of the packet. | ||
1344 | */ | ||
1345 | static __inline unsigned char* | ||
1346 | NETIO_PKT_CUSTOM_DATA(netio_pkt_t* pkt) | ||
1347 | { | ||
1348 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1349 | |||
1350 | return NETIO_PKT_CUSTOM_DATA_M(mda, pkt); | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | /** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header. | ||
1355 | * @ingroup pktfuncs | ||
1356 | * | ||
1357 | * @param[in] pkt Packet on which to operate. | ||
1358 | * @return The length of the packet's L2 header, in bytes. | ||
1359 | */ | ||
1360 | static __inline netio_size_t | ||
1361 | NETIO_PKT_L2_HEADER_LENGTH(netio_pkt_t* pkt) | ||
1362 | { | ||
1363 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1364 | { | ||
1365 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1366 | |||
1367 | return NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt); | ||
1368 | } | ||
1369 | else | ||
1370 | { | ||
1371 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1372 | |||
1373 | return NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt); | ||
1374 | } | ||
1375 | } | ||
1376 | |||
1377 | |||
1378 | /** Return the length of the packet, starting with the L2 (Ethernet) header. | ||
1379 | * @ingroup pktfuncs | ||
1380 | * | ||
1381 | * @param[in] pkt Packet on which to operate. | ||
1382 | * @return The length of the packet, in bytes. | ||
1383 | */ | ||
1384 | static __inline netio_size_t | ||
1385 | NETIO_PKT_L2_LENGTH(netio_pkt_t* pkt) | ||
1386 | { | ||
1387 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1388 | { | ||
1389 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1390 | |||
1391 | return NETIO_PKT_L2_LENGTH_MM(mmd, pkt); | ||
1392 | } | ||
1393 | else | ||
1394 | { | ||
1395 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1396 | |||
1397 | return NETIO_PKT_L2_LENGTH_M(mda, pkt); | ||
1398 | } | ||
1399 | } | ||
1400 | |||
1401 | |||
1402 | /** Return a pointer to the packet's L2 (Ethernet) header. | ||
1403 | * @ingroup pktfuncs | ||
1404 | * | ||
1405 | * @param[in] pkt Packet on which to operate. | ||
1406 | * @return A pointer to start of the packet. | ||
1407 | */ | ||
1408 | static __inline unsigned char* | ||
1409 | NETIO_PKT_L2_DATA(netio_pkt_t* pkt) | ||
1410 | { | ||
1411 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1412 | { | ||
1413 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1414 | |||
1415 | return NETIO_PKT_L2_DATA_MM(mmd, pkt); | ||
1416 | } | ||
1417 | else | ||
1418 | { | ||
1419 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1420 | |||
1421 | return NETIO_PKT_L2_DATA_M(mda, pkt); | ||
1422 | } | ||
1423 | } | ||
1424 | |||
1425 | |||
1426 | /** Retrieve the length of the packet, starting with the L3 (generally, the IP) | ||
1427 | * header. | ||
1428 | * @ingroup pktfuncs | ||
1429 | * | ||
1430 | * @param[in] pkt Packet on which to operate. | ||
1431 | * @return Length of the packet's L3 header and data, in bytes. | ||
1432 | */ | ||
1433 | static __inline netio_size_t | ||
1434 | NETIO_PKT_L3_LENGTH(netio_pkt_t* pkt) | ||
1435 | { | ||
1436 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1437 | { | ||
1438 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1439 | |||
1440 | return NETIO_PKT_L3_LENGTH_MM(mmd, pkt); | ||
1441 | } | ||
1442 | else | ||
1443 | { | ||
1444 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1445 | |||
1446 | return NETIO_PKT_L3_LENGTH_M(mda, pkt); | ||
1447 | } | ||
1448 | } | ||
1449 | |||
1450 | |||
1451 | /** Return a pointer to the packet's L3 (generally, the IP) header. | ||
1452 | * @ingroup pktfuncs | ||
1453 | * | ||
1454 | * Note that we guarantee word alignment of the L3 header. | ||
1455 | * | ||
1456 | * @param[in] pkt Packet on which to operate. | ||
1457 | * @return A pointer to the packet's L3 header. | ||
1458 | */ | ||
1459 | static __inline unsigned char* | ||
1460 | NETIO_PKT_L3_DATA(netio_pkt_t* pkt) | ||
1461 | { | ||
1462 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1463 | { | ||
1464 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1465 | |||
1466 | return NETIO_PKT_L3_DATA_MM(mmd, pkt); | ||
1467 | } | ||
1468 | else | ||
1469 | { | ||
1470 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1471 | |||
1472 | return NETIO_PKT_L3_DATA_M(mda, pkt); | ||
1473 | } | ||
1474 | } | ||
1475 | |||
1476 | |||
1477 | /** Return the ordinal of the packet. | ||
1478 | * @ingroup ingress | ||
1479 | * | ||
1480 | * Each packet is given an ordinal number when it is delivered by the IPP. | ||
1481 | * In the medium term, the ordinal is unique and monotonically increasing, | ||
1482 | * being incremented by 1 for each packet; the ordinal of the first packet | ||
1483 | * delivered after the IPP starts is zero. (Since the ordinal is of finite | ||
1484 | * size, given enough input packets, it will eventually wrap around to zero; | ||
1485 | * in the long term, therefore, ordinals are not unique.) The ordinals | ||
1486 | * handed out by different IPPs are not disjoint, so two packets from | ||
1487 | * different IPPs may have identical ordinals. Packets dropped by the | ||
1488 | * IPP or by the I/O shim are not assigned ordinals. | ||
1489 | * | ||
1490 | * | ||
1491 | * @param[in] pkt Packet on which to operate. | ||
1492 | * @return The packet's per-IPP packet ordinal. | ||
1493 | */ | ||
1494 | static __inline unsigned int | ||
1495 | NETIO_PKT_ORDINAL(netio_pkt_t* pkt) | ||
1496 | { | ||
1497 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1498 | |||
1499 | return NETIO_PKT_ORDINAL_M(mda, pkt); | ||
1500 | } | ||
1501 | |||
1502 | |||
1503 | /** Return the per-group ordinal of the packet. | ||
1504 | * @ingroup ingress | ||
1505 | * | ||
1506 | * Each packet is given a per-group ordinal number when it is | ||
1507 | * delivered by the IPP. By default, the group is the packet's VLAN, | ||
1508 | * although IPP can be recompiled to use different values. In | ||
1509 | * the medium term, the ordinal is unique and monotonically | ||
1510 | * increasing, being incremented by 1 for each packet; the ordinal of | ||
1511 | * the first packet distributed to a particular group is zero. | ||
1512 | * (Since the ordinal is of finite size, given enough input packets, | ||
1513 | * it will eventually wrap around to zero; in the long term, | ||
1514 | * therefore, ordinals are not unique.) The ordinals handed out by | ||
1515 | * different IPPs are not disjoint, so two packets from different IPPs | ||
1516 | * may have identical ordinals; similarly, packets distributed to | ||
1517 | * different groups may have identical ordinals. Packets dropped by | ||
1518 | * the IPP or by the I/O shim are not assigned ordinals. | ||
1519 | * | ||
1520 | * @param[in] pkt Packet on which to operate. | ||
1521 | * @return The packet's per-IPP, per-group ordinal. | ||
1522 | */ | ||
1523 | static __inline unsigned int | ||
1524 | NETIO_PKT_GROUP_ORDINAL(netio_pkt_t* pkt) | ||
1525 | { | ||
1526 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1527 | |||
1528 | return NETIO_PKT_GROUP_ORDINAL_M(mda, pkt); | ||
1529 | } | ||
1530 | |||
1531 | |||
1532 | /** Return the VLAN ID assigned to the packet. | ||
1533 | * @ingroup ingress | ||
1534 | * | ||
1535 | * This is usually also contained within the packet header. If the packet | ||
1536 | * does not have a VLAN tag, the VLAN ID returned by this function is zero. | ||
1537 | * | ||
1538 | * @param[in] pkt Packet on which to operate. | ||
1539 | * @return The packet's VLAN ID. | ||
1540 | */ | ||
1541 | static __inline unsigned short | ||
1542 | NETIO_PKT_VLAN_ID(netio_pkt_t* pkt) | ||
1543 | { | ||
1544 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1545 | |||
1546 | return NETIO_PKT_VLAN_ID_M(mda, pkt); | ||
1547 | } | ||
1548 | |||
1549 | |||
1550 | /** Return the ethertype of the packet. | ||
1551 | * @ingroup ingress | ||
1552 | * | ||
1553 | * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED() | ||
1554 | * returns true, and otherwise, may not be well defined. | ||
1555 | * | ||
1556 | * @param[in] pkt Packet on which to operate. | ||
1557 | * @return The packet's ethertype. | ||
1558 | */ | ||
1559 | static __inline unsigned short | ||
1560 | NETIO_PKT_ETHERTYPE(netio_pkt_t* pkt) | ||
1561 | { | ||
1562 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1563 | |||
1564 | return NETIO_PKT_ETHERTYPE_M(mda, pkt); | ||
1565 | } | ||
1566 | |||
1567 | |||
1568 | /** Return the flow hash computed on the packet. | ||
1569 | * @ingroup ingress | ||
1570 | * | ||
1571 | * For TCP and UDP packets, this hash is calculated by hashing together | ||
1572 | * the "5-tuple" values, specifically the source IP address, destination | ||
1573 | * IP address, protocol type, source port and destination port. | ||
1574 | * The hash value is intended to be helpful for millions of distinct | ||
1575 | * flows. | ||
1576 | * | ||
1577 | * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is | ||
1578 | * derived by hashing together the source and destination IP addresses. | ||
1579 | * | ||
1580 | * For MPLS-encapsulated packets, the flow hash is derived by hashing | ||
1581 | * the first MPLS label. | ||
1582 | * | ||
1583 | * For all other packets the flow hash is computed from the source | ||
1584 | * and destination Ethernet addresses. | ||
1585 | * | ||
1586 | * The hash is symmetric, meaning it produces the same value if the | ||
1587 | * source and destination are swapped. The only exceptions are | ||
1588 | * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple | ||
1589 | * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32 | ||
1590 | * (Encap Security Payload), which use only the destination address | ||
1591 | * since the source address is not meaningful. | ||
1592 | * | ||
1593 | * @param[in] pkt Packet on which to operate. | ||
1594 | * @return The packet's 32-bit flow hash. | ||
1595 | */ | ||
1596 | static __inline unsigned int | ||
1597 | NETIO_PKT_FLOW_HASH(netio_pkt_t* pkt) | ||
1598 | { | ||
1599 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1600 | |||
1601 | return NETIO_PKT_FLOW_HASH_M(mda, pkt); | ||
1602 | } | ||
1603 | |||
1604 | |||
1605 | /** Return the first word of "user data" for the packet. | ||
1606 | * | ||
1607 | * The contents of the user data words depend on the IPP. | ||
1608 | * | ||
1609 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first | ||
1610 | * word of user data contains the least significant bits of the 64-bit | ||
1611 | * arrival cycle count (see @c get_cycle_count_low()). | ||
1612 | * | ||
1613 | * See the <em>System Programmer's Guide</em> for details. | ||
1614 | * | ||
1615 | * @ingroup ingress | ||
1616 | * | ||
1617 | * @param[in] pkt Packet on which to operate. | ||
1618 | * @return The packet's first word of "user data". | ||
1619 | */ | ||
1620 | static __inline unsigned int | ||
1621 | NETIO_PKT_USER_DATA_0(netio_pkt_t* pkt) | ||
1622 | { | ||
1623 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1624 | |||
1625 | return NETIO_PKT_USER_DATA_0_M(mda, pkt); | ||
1626 | } | ||
1627 | |||
1628 | |||
1629 | /** Return the second word of "user data" for the packet. | ||
1630 | * | ||
1631 | * The contents of the user data words depend on the IPP. | ||
1632 | * | ||
1633 | * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second | ||
1634 | * word of user data contains the most significant bits of the 64-bit | ||
1635 | * arrival cycle count (see @c get_cycle_count_high()). | ||
1636 | * | ||
1637 | * See the <em>System Programmer's Guide</em> for details. | ||
1638 | * | ||
1639 | * @ingroup ingress | ||
1640 | * | ||
1641 | * @param[in] pkt Packet on which to operate. | ||
1642 | * @return The packet's second word of "user data". | ||
1643 | */ | ||
1644 | static __inline unsigned int | ||
1645 | NETIO_PKT_USER_DATA_1(netio_pkt_t* pkt) | ||
1646 | { | ||
1647 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1648 | |||
1649 | return NETIO_PKT_USER_DATA_1_M(mda, pkt); | ||
1650 | } | ||
1651 | |||
1652 | |||
1653 | /** Determine whether the L4 (TCP/UDP) checksum was calculated. | ||
1654 | * @ingroup ingress | ||
1655 | * | ||
1656 | * @param[in] pkt Packet on which to operate. | ||
1657 | * @return Nonzero if the L4 checksum was calculated. | ||
1658 | */ | ||
1659 | static __inline unsigned int | ||
1660 | NETIO_PKT_L4_CSUM_CALCULATED(netio_pkt_t* pkt) | ||
1661 | { | ||
1662 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1663 | |||
1664 | return NETIO_PKT_L4_CSUM_CALCULATED_M(mda, pkt); | ||
1665 | } | ||
1666 | |||
1667 | |||
1668 | /** Determine whether the L4 (TCP/UDP) checksum was calculated and found to | ||
1669 | * be correct. | ||
1670 | * @ingroup ingress | ||
1671 | * | ||
1672 | * @param[in] pkt Packet on which to operate. | ||
1673 | * @return Nonzero if the checksum was calculated and is correct. | ||
1674 | */ | ||
1675 | static __inline unsigned int | ||
1676 | NETIO_PKT_L4_CSUM_CORRECT(netio_pkt_t* pkt) | ||
1677 | { | ||
1678 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1679 | |||
1680 | return NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt); | ||
1681 | } | ||
1682 | |||
1683 | |||
1684 | /** Determine whether the L3 (IP) checksum was calculated. | ||
1685 | * @ingroup ingress | ||
1686 | * | ||
1687 | * @param[in] pkt Packet on which to operate. | ||
1688 | * @return Nonzero if the L3 (IP) checksum was calculated. | ||
1689 | */ | ||
1690 | static __inline unsigned int | ||
1691 | NETIO_PKT_L3_CSUM_CALCULATED(netio_pkt_t* pkt) | ||
1692 | { | ||
1693 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1694 | |||
1695 | return NETIO_PKT_L3_CSUM_CALCULATED_M(mda, pkt); | ||
1696 | } | ||
1697 | |||
1698 | |||
1699 | /** Determine whether the L3 (IP) checksum was calculated and found to be | ||
1700 | * correct. | ||
1701 | * @ingroup ingress | ||
1702 | * | ||
1703 | * @param[in] pkt Packet on which to operate. | ||
1704 | * @return Nonzero if the checksum was calculated and is correct. | ||
1705 | */ | ||
1706 | static __inline unsigned int | ||
1707 | NETIO_PKT_L3_CSUM_CORRECT(netio_pkt_t* pkt) | ||
1708 | { | ||
1709 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1710 | |||
1711 | return NETIO_PKT_L3_CSUM_CORRECT_M(mda, pkt); | ||
1712 | } | ||
1713 | |||
1714 | |||
1715 | /** Determine whether the Ethertype was recognized and L3 packet data was | ||
1716 | * processed. | ||
1717 | * @ingroup ingress | ||
1718 | * | ||
1719 | * @param[in] pkt Packet on which to operate. | ||
1720 | * @return Nonzero if the Ethertype was recognized and L3 packet data was | ||
1721 | * processed. | ||
1722 | */ | ||
1723 | static __inline unsigned int | ||
1724 | NETIO_PKT_ETHERTYPE_RECOGNIZED(netio_pkt_t* pkt) | ||
1725 | { | ||
1726 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1727 | |||
1728 | return NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt); | ||
1729 | } | ||
1730 | |||
1731 | |||
1732 | /** Set an egress packet's L2 length, using a metadata pointer to speed the | ||
1733 | * computation. | ||
1734 | * @ingroup egress | ||
1735 | * | ||
1736 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1737 | * @param[in] pkt Packet on which to operate. | ||
1738 | * @param[in] len Packet L2 length, in bytes. | ||
1739 | */ | ||
1740 | static __inline void | ||
1741 | NETIO_PKT_SET_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt, | ||
1742 | int len) | ||
1743 | { | ||
1744 | mmd->l2_length = len; | ||
1745 | } | ||
1746 | |||
1747 | |||
1748 | /** Set an egress packet's L2 length. | ||
1749 | * @ingroup egress | ||
1750 | * | ||
1751 | * @param[in,out] pkt Packet on which to operate. | ||
1752 | * @param[in] len Packet L2 length, in bytes. | ||
1753 | */ | ||
1754 | static __inline void | ||
1755 | NETIO_PKT_SET_L2_LENGTH(netio_pkt_t* pkt, int len) | ||
1756 | { | ||
1757 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1758 | |||
1759 | NETIO_PKT_SET_L2_LENGTH_MM(mmd, pkt, len); | ||
1760 | } | ||
1761 | |||
1762 | |||
1763 | /** Set an egress packet's L2 header length, using a metadata pointer to | ||
1764 | * speed the computation. | ||
1765 | * @ingroup egress | ||
1766 | * | ||
1767 | * It is not normally necessary to call this routine; only the L2 length, | ||
1768 | * not the header length, is needed to transmit a packet. It may be useful if | ||
1769 | * the egress packet will later be processed by code which expects to use | ||
1770 | * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload. | ||
1771 | * | ||
1772 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1773 | * @param[in] pkt Packet on which to operate. | ||
1774 | * @param[in] len Packet L2 header length, in bytes. | ||
1775 | */ | ||
1776 | static __inline void | ||
1777 | NETIO_PKT_SET_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1778 | netio_pkt_t* pkt, int len) | ||
1779 | { | ||
1780 | mmd->l3_offset = mmd->l2_offset + len; | ||
1781 | } | ||
1782 | |||
1783 | |||
1784 | /** Set an egress packet's L2 header length. | ||
1785 | * @ingroup egress | ||
1786 | * | ||
1787 | * It is not normally necessary to call this routine; only the L2 length, | ||
1788 | * not the header length, is needed to transmit a packet. It may be useful if | ||
1789 | * the egress packet will later be processed by code which expects to use | ||
1790 | * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload. | ||
1791 | * | ||
1792 | * @param[in,out] pkt Packet on which to operate. | ||
1793 | * @param[in] len Packet L2 header length, in bytes. | ||
1794 | */ | ||
1795 | static __inline void | ||
1796 | NETIO_PKT_SET_L2_HEADER_LENGTH(netio_pkt_t* pkt, int len) | ||
1797 | { | ||
1798 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1799 | |||
1800 | NETIO_PKT_SET_L2_HEADER_LENGTH_MM(mmd, pkt, len); | ||
1801 | } | ||
1802 | |||
1803 | |||
1804 | /** Set up an egress packet for hardware checksum computation, using a | ||
1805 | * metadata pointer to speed the operation. | ||
1806 | * @ingroup egress | ||
1807 | * | ||
1808 | * NetIO provides the ability to automatically calculate a standard | ||
1809 | * 16-bit Internet checksum on transmitted packets. The application | ||
1810 | * may specify the point in the packet where the checksum starts, the | ||
1811 | * number of bytes to be checksummed, and the two bytes in the packet | ||
1812 | * which will be replaced with the completed checksum. (If the range | ||
1813 | * of bytes to be checksummed includes the bytes to be replaced, the | ||
1814 | * initial values of those bytes will be included in the checksum.) | ||
1815 | * | ||
1816 | * For some protocols, the packet checksum covers data which is not present | ||
1817 | * in the packet, or is at least not contiguous to the main data payload. | ||
1818 | * For instance, the TCP checksum includes a "pseudo-header" which includes | ||
1819 | * the source and destination IP addresses of the packet. To accommodate | ||
1820 | * this, the checksum engine may be "seeded" with an initial value, which | ||
1821 | * the application would need to compute based on the specific protocol's | ||
1822 | * requirements. Note that the seed is given in host byte order (little- | ||
1823 | * endian), not network byte order (big-endian); code written to compute a | ||
1824 | * pseudo-header checksum in network byte order will need to byte-swap it | ||
1825 | * before use as the seed. | ||
1826 | * | ||
1827 | * Note that the checksum is computed as part of the transmission process, | ||
1828 | * so it will not be present in the packet upon completion of this routine. | ||
1829 | * | ||
1830 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1831 | * @param[in] pkt Packet on which to operate. | ||
1832 | * @param[in] start Offset within L2 packet of the first byte to include in | ||
1833 | * the checksum. | ||
1834 | * @param[in] length Number of bytes to include in the checksum. | ||
1835 | * the checksum. | ||
1836 | * @param[in] location Offset within L2 packet of the first of the two bytes | ||
1837 | * to be replaced with the calculated checksum. | ||
1838 | * @param[in] seed Initial value of the running checksum before any of the | ||
1839 | * packet data is added. | ||
1840 | */ | ||
1841 | static __inline void | ||
1842 | NETIO_PKT_DO_EGRESS_CSUM_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1843 | netio_pkt_t* pkt, int start, int length, | ||
1844 | int location, uint16_t seed) | ||
1845 | { | ||
1846 | mmd->csum_start = start; | ||
1847 | mmd->csum_length = length; | ||
1848 | mmd->csum_location = location; | ||
1849 | mmd->csum_seed = seed; | ||
1850 | mmd->flags |= _NETIO_PKT_NEED_EDMA_CSUM_MASK; | ||
1851 | } | ||
1852 | |||
1853 | |||
1854 | /** Set up an egress packet for hardware checksum computation. | ||
1855 | * @ingroup egress | ||
1856 | * | ||
1857 | * NetIO provides the ability to automatically calculate a standard | ||
1858 | * 16-bit Internet checksum on transmitted packets. The application | ||
1859 | * may specify the point in the packet where the checksum starts, the | ||
1860 | * number of bytes to be checksummed, and the two bytes in the packet | ||
1861 | * which will be replaced with the completed checksum. (If the range | ||
1862 | * of bytes to be checksummed includes the bytes to be replaced, the | ||
1863 | * initial values of those bytes will be included in the checksum.) | ||
1864 | * | ||
1865 | * For some protocols, the packet checksum covers data which is not present | ||
1866 | * in the packet, or is at least not contiguous to the main data payload. | ||
1867 | * For instance, the TCP checksum includes a "pseudo-header" which includes | ||
1868 | * the source and destination IP addresses of the packet. To accommodate | ||
1869 | * this, the checksum engine may be "seeded" with an initial value, which | ||
1870 | * the application would need to compute based on the specific protocol's | ||
1871 | * requirements. Note that the seed is given in host byte order (little- | ||
1872 | * endian), not network byte order (big-endian); code written to compute a | ||
1873 | * pseudo-header checksum in network byte order will need to byte-swap it | ||
1874 | * before use as the seed. | ||
1875 | * | ||
1876 | * Note that the checksum is computed as part of the transmission process, | ||
1877 | * so it will not be present in the packet upon completion of this routine. | ||
1878 | * | ||
1879 | * @param[in,out] pkt Packet on which to operate. | ||
1880 | * @param[in] start Offset within L2 packet of the first byte to include in | ||
1881 | * the checksum. | ||
1882 | * @param[in] length Number of bytes to include in the checksum. | ||
1883 | * the checksum. | ||
1884 | * @param[in] location Offset within L2 packet of the first of the two bytes | ||
1885 | * to be replaced with the calculated checksum. | ||
1886 | * @param[in] seed Initial value of the running checksum before any of the | ||
1887 | * packet data is added. | ||
1888 | */ | ||
1889 | static __inline void | ||
1890 | NETIO_PKT_DO_EGRESS_CSUM(netio_pkt_t* pkt, int start, int length, | ||
1891 | int location, uint16_t seed) | ||
1892 | { | ||
1893 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1894 | |||
1895 | NETIO_PKT_DO_EGRESS_CSUM_MM(mmd, pkt, start, length, location, seed); | ||
1896 | } | ||
1897 | |||
1898 | |||
1899 | /** Return the number of bytes which could be prepended to a packet, using a | ||
1900 | * metadata pointer to speed the operation. | ||
1901 | * See @ref netio_populate_prepend_buffer() to get a full description of | ||
1902 | * prepending. | ||
1903 | * | ||
1904 | * @param[in,out] mda Pointer to packet's standard metadata. | ||
1905 | * @param[in] pkt Packet on which to operate. | ||
1906 | */ | ||
1907 | static __inline int | ||
1908 | NETIO_PKT_PREPEND_AVAIL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
1909 | { | ||
1910 | return (pkt->__packet.bits.__offset << 6) + | ||
1911 | NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt); | ||
1912 | } | ||
1913 | |||
1914 | |||
1915 | /** Return the number of bytes which could be prepended to a packet, using a | ||
1916 | * metadata pointer to speed the operation. | ||
1917 | * See @ref netio_populate_prepend_buffer() to get a full description of | ||
1918 | * prepending. | ||
1919 | * @ingroup egress | ||
1920 | * | ||
1921 | * @param[in,out] mmd Pointer to packet's minimal metadata. | ||
1922 | * @param[in] pkt Packet on which to operate. | ||
1923 | */ | ||
1924 | static __inline int | ||
1925 | NETIO_PKT_PREPEND_AVAIL_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt) | ||
1926 | { | ||
1927 | return (pkt->__packet.bits.__offset << 6) + mmd->l2_offset; | ||
1928 | } | ||
1929 | |||
1930 | |||
1931 | /** Return the number of bytes which could be prepended to a packet. | ||
1932 | * See @ref netio_populate_prepend_buffer() to get a full description of | ||
1933 | * prepending. | ||
1934 | * @ingroup egress | ||
1935 | * | ||
1936 | * @param[in] pkt Packet on which to operate. | ||
1937 | */ | ||
1938 | static __inline int | ||
1939 | NETIO_PKT_PREPEND_AVAIL(netio_pkt_t* pkt) | ||
1940 | { | ||
1941 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
1942 | { | ||
1943 | netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt); | ||
1944 | |||
1945 | return NETIO_PKT_PREPEND_AVAIL_MM(mmd, pkt); | ||
1946 | } | ||
1947 | else | ||
1948 | { | ||
1949 | netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt); | ||
1950 | |||
1951 | return NETIO_PKT_PREPEND_AVAIL_M(mda, pkt); | ||
1952 | } | ||
1953 | } | ||
1954 | |||
1955 | |||
1956 | /** Flush a packet's minimal metadata from the cache, using a metadata pointer | ||
1957 | * to speed the operation. | ||
1958 | * @ingroup egress | ||
1959 | * | ||
1960 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1961 | * @param[in] pkt Packet on which to operate. | ||
1962 | */ | ||
1963 | static __inline void | ||
1964 | NETIO_PKT_FLUSH_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1965 | netio_pkt_t* pkt) | ||
1966 | { | ||
1967 | } | ||
1968 | |||
1969 | |||
1970 | /** Invalidate a packet's minimal metadata from the cache, using a metadata | ||
1971 | * pointer to speed the operation. | ||
1972 | * @ingroup egress | ||
1973 | * | ||
1974 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1975 | * @param[in] pkt Packet on which to operate. | ||
1976 | */ | ||
1977 | static __inline void | ||
1978 | NETIO_PKT_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1979 | netio_pkt_t* pkt) | ||
1980 | { | ||
1981 | } | ||
1982 | |||
1983 | |||
1984 | /** Flush and then invalidate a packet's minimal metadata from the cache, | ||
1985 | * using a metadata pointer to speed the operation. | ||
1986 | * @ingroup egress | ||
1987 | * | ||
1988 | * @param[in] mmd Pointer to packet's minimal metadata. | ||
1989 | * @param[in] pkt Packet on which to operate. | ||
1990 | */ | ||
1991 | static __inline void | ||
1992 | NETIO_PKT_FLUSH_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd, | ||
1993 | netio_pkt_t* pkt) | ||
1994 | { | ||
1995 | } | ||
1996 | |||
1997 | |||
1998 | /** Flush a packet's metadata from the cache, using a metadata pointer | ||
1999 | * to speed the operation. | ||
2000 | * @ingroup ingress | ||
2001 | * | ||
2002 | * @param[in] mda Pointer to packet's minimal metadata. | ||
2003 | * @param[in] pkt Packet on which to operate. | ||
2004 | */ | ||
2005 | static __inline void | ||
2006 | NETIO_PKT_FLUSH_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
2007 | { | ||
2008 | } | ||
2009 | |||
2010 | |||
2011 | /** Invalidate a packet's metadata from the cache, using a metadata | ||
2012 | * pointer to speed the operation. | ||
2013 | * @ingroup ingress | ||
2014 | * | ||
2015 | * @param[in] mda Pointer to packet's metadata. | ||
2016 | * @param[in] pkt Packet on which to operate. | ||
2017 | */ | ||
2018 | static __inline void | ||
2019 | NETIO_PKT_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
2020 | { | ||
2021 | } | ||
2022 | |||
2023 | |||
2024 | /** Flush and then invalidate a packet's metadata from the cache, | ||
2025 | * using a metadata pointer to speed the operation. | ||
2026 | * @ingroup ingress | ||
2027 | * | ||
2028 | * @param[in] mda Pointer to packet's metadata. | ||
2029 | * @param[in] pkt Packet on which to operate. | ||
2030 | */ | ||
2031 | static __inline void | ||
2032 | NETIO_PKT_FLUSH_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt) | ||
2033 | { | ||
2034 | } | ||
2035 | |||
2036 | |||
2037 | /** Flush a packet's minimal metadata from the cache. | ||
2038 | * @ingroup egress | ||
2039 | * | ||
2040 | * @param[in] pkt Packet on which to operate. | ||
2041 | */ | ||
2042 | static __inline void | ||
2043 | NETIO_PKT_FLUSH_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
2044 | { | ||
2045 | } | ||
2046 | |||
2047 | |||
2048 | /** Invalidate a packet's minimal metadata from the cache. | ||
2049 | * @ingroup egress | ||
2050 | * | ||
2051 | * @param[in] pkt Packet on which to operate. | ||
2052 | */ | ||
2053 | static __inline void | ||
2054 | NETIO_PKT_INV_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
2055 | { | ||
2056 | } | ||
2057 | |||
2058 | |||
2059 | /** Flush and then invalidate a packet's minimal metadata from the cache. | ||
2060 | * @ingroup egress | ||
2061 | * | ||
2062 | * @param[in] pkt Packet on which to operate. | ||
2063 | */ | ||
2064 | static __inline void | ||
2065 | NETIO_PKT_FLUSH_INV_MINIMAL_METADATA(netio_pkt_t* pkt) | ||
2066 | { | ||
2067 | } | ||
2068 | |||
2069 | |||
2070 | /** Flush a packet's metadata from the cache. | ||
2071 | * @ingroup ingress | ||
2072 | * | ||
2073 | * @param[in] pkt Packet on which to operate. | ||
2074 | */ | ||
2075 | static __inline void | ||
2076 | NETIO_PKT_FLUSH_METADATA(netio_pkt_t* pkt) | ||
2077 | { | ||
2078 | } | ||
2079 | |||
2080 | |||
2081 | /** Invalidate a packet's metadata from the cache. | ||
2082 | * @ingroup ingress | ||
2083 | * | ||
2084 | * @param[in] pkt Packet on which to operate. | ||
2085 | */ | ||
2086 | static __inline void | ||
2087 | NETIO_PKT_INV_METADATA(netio_pkt_t* pkt) | ||
2088 | { | ||
2089 | } | ||
2090 | |||
2091 | |||
2092 | /** Flush and then invalidate a packet's metadata from the cache. | ||
2093 | * @ingroup ingress | ||
2094 | * | ||
2095 | * @param[in] pkt Packet on which to operate. | ||
2096 | */ | ||
2097 | static __inline void | ||
2098 | NETIO_PKT_FLUSH_INV_METADATA(netio_pkt_t* pkt) | ||
2099 | { | ||
2100 | } | ||
2101 | |||
2102 | /** Number of NUMA nodes we can distribute buffers to. | ||
2103 | * @ingroup setup */ | ||
2104 | #define NETIO_NUM_NODE_WEIGHTS 16 | ||
2105 | |||
2106 | /** | ||
2107 | * @brief An object for specifying the characteristics of NetIO communication | ||
2108 | * endpoint. | ||
2109 | * | ||
2110 | * @ingroup setup | ||
2111 | * | ||
2112 | * The @ref netio_input_register() function uses this structure to define | ||
2113 | * how an application tile will communicate with an IPP. | ||
2114 | * | ||
2115 | * | ||
2116 | * Future updates to NetIO may add new members to this structure, | ||
2117 | * which can affect the success of the registration operation. Thus, | ||
2118 | * if dynamically initializing the structure, applications are urged to | ||
2119 | * zero it out first, for example: | ||
2120 | * | ||
2121 | * @code | ||
2122 | * netio_input_config_t config; | ||
2123 | * memset(&config, 0, sizeof (config)); | ||
2124 | * config.flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE; | ||
2125 | * config.num_receive_packets = NETIO_MAX_RECEIVE_PKTS; | ||
2126 | * config.queue_id = 0; | ||
2127 | * . | ||
2128 | * . | ||
2129 | * . | ||
2130 | * @endcode | ||
2131 | * | ||
2132 | * since that guarantees that any unused structure members, including | ||
2133 | * members which did not exist when the application was first developed, | ||
2134 | * will not have unexpected values. | ||
2135 | * | ||
2136 | * If statically initializing the structure, we strongly recommend use of | ||
2137 | * C99-style named initializers, for example: | ||
2138 | * | ||
2139 | * @code | ||
2140 | * netio_input_config_t config = { | ||
2141 | * .flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, | ||
2142 | * .num_receive_packets = NETIO_MAX_RECEIVE_PKTS, | ||
2143 | * .queue_id = 0, | ||
2144 | * }, | ||
2145 | * @endcode | ||
2146 | * | ||
2147 | * instead of the old-style structure initialization: | ||
2148 | * | ||
2149 | * @code | ||
2150 | * // Bad example! Currently equivalent to the above, but don't do this. | ||
2151 | * netio_input_config_t config = { | ||
2152 | * NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, NETIO_MAX_RECEIVE_PKTS, 0 | ||
2153 | * }, | ||
2154 | * @endcode | ||
2155 | * | ||
2156 | * since the C99 style requires no changes to the code if elements of the | ||
2157 | * config structure are rearranged. (It also makes the initialization much | ||
2158 | * easier to understand.) | ||
2159 | * | ||
2160 | * Except for items which address a particular tile's transmit or receive | ||
2161 | * characteristics, such as the ::NETIO_RECV flag, applications are advised | ||
2162 | * to specify the same set of configuration data on all registrations. | ||
2163 | * This prevents differing results if multiple tiles happen to do their | ||
2164 | * registration operations in a different order on different invocations of | ||
2165 | * the application. This is particularly important for things like link | ||
2166 | * management flags, and buffer size and homing specifications. | ||
2167 | * | ||
2168 | * Unless the ::NETIO_FIXED_BUFFER_VA flag is specified in flags, the NetIO | ||
2169 | * buffer pool is automatically created and mapped into the application's | ||
2170 | * virtual address space at an address chosen by the operating system, | ||
2171 | * using the common memory (cmem) facility in the Tilera Multicore | ||
2172 | * Components library. The cmem facility allows multiple processes to gain | ||
2173 | * access to shared memory which is mapped into each process at an | ||
2174 | * identical virtual address. In order for this to work, the processes | ||
2175 | * must have a common ancestor, which must create the common memory using | ||
2176 | * tmc_cmem_init(). | ||
2177 | * | ||
2178 | * In programs using the iLib process creation API, or in programs which use | ||
2179 | * only one process (which include programs using the pthreads library), | ||
2180 | * tmc_cmem_init() is called automatically. All other applications | ||
2181 | * must call it explicitly, before any child processes which might call | ||
2182 | * netio_input_register() are created. | ||
2183 | */ | ||
2184 | typedef struct | ||
2185 | { | ||
2186 | /** Registration characteristics. | ||
2187 | |||
2188 | This value determines several characteristics of the registration; | ||
2189 | flags for different types of behavior are ORed together to make the | ||
2190 | final flag value. Generally applications should specify exactly | ||
2191 | one flag from each of the following categories: | ||
2192 | |||
2193 | - Whether the application will be receiving packets on this queue | ||
2194 | (::NETIO_RECV or ::NETIO_NO_RECV). | ||
2195 | |||
2196 | - Whether the application will be transmitting packets on this queue, | ||
2197 | and if so, whether it will request egress checksum calculation | ||
2198 | (::NETIO_XMIT, ::NETIO_XMIT_CSUM, or ::NETIO_NO_XMIT). It is | ||
2199 | legal to call netio_get_buffer() without one of the XMIT flags, | ||
2200 | as long as ::NETIO_RECV is specified; in this case, the retrieved | ||
2201 | buffers must be passed to another tile for transmission. | ||
2202 | |||
2203 | - Whether the application expects any vendor-specific tags in | ||
2204 | its packets' L2 headers (::NETIO_TAG_NONE, ::NETIO_TAG_BRCM, | ||
2205 | or ::NETIO_TAG_MRVL). This must match the configuration of the | ||
2206 | target IPP. | ||
2207 | |||
2208 | To accommodate applications written to previous versions of the NetIO | ||
2209 | interface, none of the flags above are currently required; if omitted, | ||
2210 | NetIO behaves more or less as if ::NETIO_RECV | ::NETIO_XMIT_CSUM | | ||
2211 | ::NETIO_TAG_NONE were used. However, explicit specification of | ||
2212 | the relevant flags allows NetIO to do a better job of resource | ||
2213 | allocation, allows earlier detection of certain configuration errors, | ||
2214 | and may enable advanced features or higher performance in the future, | ||
2215 | so their use is strongly recommended. | ||
2216 | |||
2217 | Note that specifying ::NETIO_NO_RECV along with ::NETIO_NO_XMIT | ||
2218 | is a special case, intended primarily for use by programs which | ||
2219 | retrieve network statistics or do link management operations. | ||
2220 | When these flags are both specified, the resulting queue may not | ||
2221 | be used with NetIO routines other than netio_get(), netio_set(), | ||
2222 | and netio_input_unregister(). See @ref link for more information | ||
2223 | on link management. | ||
2224 | |||
2225 | Other flags are optional; their use is described below. | ||
2226 | */ | ||
2227 | int flags; | ||
2228 | |||
2229 | /** Interface name. This is a string which identifies the specific | ||
2230 | Ethernet controller hardware to be used. The format of the string | ||
2231 | is a device type and a device index, separated by a slash; so, | ||
2232 | the first 10 Gigabit Ethernet controller is named "xgbe/0", while | ||
2233 | the second 10/100/1000 Megabit Ethernet controller is named "gbe/1". | ||
2234 | */ | ||
2235 | const char* interface; | ||
2236 | |||
2237 | /** Receive packet queue size. This specifies the maximum number | ||
2238 | of ingress packets that can be received on this queue without | ||
2239 | being retrieved by @ref netio_get_packet(). If the IPP's distribution | ||
2240 | algorithm calls for a packet to be sent to this queue, and this | ||
2241 | number of packets are already pending there, the new packet | ||
2242 | will either be discarded, or sent to another tile registered | ||
2243 | for the same queue_id (see @ref drops). This value must | ||
2244 | be at least ::NETIO_MIN_RECEIVE_PKTS, can always be at least | ||
2245 | ::NETIO_MAX_RECEIVE_PKTS, and may be larger than that on certain | ||
2246 | interfaces. | ||
2247 | */ | ||
2248 | int num_receive_packets; | ||
2249 | |||
2250 | /** The queue ID being requested. Legal values for this range from 0 | ||
2251 | to ::NETIO_MAX_QUEUE_ID, inclusive. ::NETIO_MAX_QUEUE_ID is always | ||
2252 | greater than or equal to the number of tiles; this allows one queue | ||
2253 | for each tile, plus at least one additional queue. Some applications | ||
2254 | may wish to use the additional queue as a destination for unwanted | ||
2255 | packets, since packets delivered to queues for which no tiles have | ||
2256 | registered are discarded. | ||
2257 | */ | ||
2258 | unsigned int queue_id; | ||
2259 | |||
2260 | /** Maximum number of small send buffers to be held in the local empty | ||
2261 | buffer cache. This specifies the size of the area which holds | ||
2262 | empty small egress buffers requested from the IPP but not yet | ||
2263 | retrieved via @ref netio_get_buffer(). This value must be greater | ||
2264 | than zero if the application will ever use @ref netio_get_buffer() | ||
2265 | to allocate empty small egress buffers; it may be no larger than | ||
2266 | ::NETIO_MAX_SEND_BUFFERS. See @ref epp for more details on empty | ||
2267 | buffer caching. | ||
2268 | */ | ||
2269 | int num_send_buffers_small_total; | ||
2270 | |||
2271 | /** Number of small send buffers to be preallocated at registration. | ||
2272 | If this value is nonzero, the specified number of empty small egress | ||
2273 | buffers will be requested from the IPP during the netio_input_register | ||
2274 | operation; this may speed the execution of @ref netio_get_buffer(). | ||
2275 | This may be no larger than @ref num_send_buffers_small_total. See @ref | ||
2276 | epp for more details on empty buffer caching. | ||
2277 | */ | ||
2278 | int num_send_buffers_small_prealloc; | ||
2279 | |||
2280 | /** Maximum number of large send buffers to be held in the local empty | ||
2281 | buffer cache. This specifies the size of the area which holds empty | ||
2282 | large egress buffers requested from the IPP but not yet retrieved via | ||
2283 | @ref netio_get_buffer(). This value must be greater than zero if the | ||
2284 | application will ever use @ref netio_get_buffer() to allocate empty | ||
2285 | large egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS. | ||
2286 | See @ref epp for more details on empty buffer caching. | ||
2287 | */ | ||
2288 | int num_send_buffers_large_total; | ||
2289 | |||
2290 | /** Number of large send buffers to be preallocated at registration. | ||
2291 | If this value is nonzero, the specified number of empty large egress | ||
2292 | buffers will be requested from the IPP during the netio_input_register | ||
2293 | operation; this may speed the execution of @ref netio_get_buffer(). | ||
2294 | This may be no larger than @ref num_send_buffers_large_total. See @ref | ||
2295 | epp for more details on empty buffer caching. | ||
2296 | */ | ||
2297 | int num_send_buffers_large_prealloc; | ||
2298 | |||
2299 | /** Maximum number of jumbo send buffers to be held in the local empty | ||
2300 | buffer cache. This specifies the size of the area which holds empty | ||
2301 | jumbo egress buffers requested from the IPP but not yet retrieved via | ||
2302 | @ref netio_get_buffer(). This value must be greater than zero if the | ||
2303 | application will ever use @ref netio_get_buffer() to allocate empty | ||
2304 | jumbo egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS. | ||
2305 | See @ref epp for more details on empty buffer caching. | ||
2306 | */ | ||
2307 | int num_send_buffers_jumbo_total; | ||
2308 | |||
2309 | /** Number of jumbo send buffers to be preallocated at registration. | ||
2310 | If this value is nonzero, the specified number of empty jumbo egress | ||
2311 | buffers will be requested from the IPP during the netio_input_register | ||
2312 | operation; this may speed the execution of @ref netio_get_buffer(). | ||
2313 | This may be no larger than @ref num_send_buffers_jumbo_total. See @ref | ||
2314 | epp for more details on empty buffer caching. | ||
2315 | */ | ||
2316 | int num_send_buffers_jumbo_prealloc; | ||
2317 | |||
2318 | /** Total packet buffer size. This determines the total size, in bytes, | ||
2319 | of the NetIO buffer pool. Note that the maximum number of available | ||
2320 | buffers of each size is determined during hypervisor configuration | ||
2321 | (see the <em>System Programmer's Guide</em> for details); this just | ||
2322 | influences how much host memory is allocated for those buffers. | ||
2323 | |||
2324 | The buffer pool is allocated from common memory, which will be | ||
2325 | automatically initialized if needed. If your buffer pool is larger | ||
2326 | than 240 MB, you might need to explicitly call @c tmc_cmem_init(), | ||
2327 | as described in the Application Libraries Reference Manual (UG227). | ||
2328 | |||
2329 | Packet buffers are currently allocated in chunks of 16 MB; this | ||
2330 | value will be rounded up to the next larger multiple of 16 MB. | ||
2331 | If this value is zero, a default of 32 MB will be used; this was | ||
2332 | the value used by previous versions of NetIO. Note that taking this | ||
2333 | default also affects the placement of buffers on Linux NUMA nodes. | ||
2334 | See @ref buffer_node_weights for an explanation of buffer placement. | ||
2335 | |||
2336 | In order to successfully allocate packet buffers, Linux must have | ||
2337 | available huge pages on the relevant Linux NUMA nodes. See the | ||
2338 | <em>System Programmer's Guide</em> for information on configuring | ||
2339 | huge page support in Linux. | ||
2340 | */ | ||
2341 | uint64_t total_buffer_size; | ||
2342 | |||
2343 | /** Buffer placement weighting factors. | ||
2344 | |||
2345 | This array specifies the relative amount of buffering to place | ||
2346 | on each of the available Linux NUMA nodes. This array is | ||
2347 | indexed by the NUMA node, and the values in the array are | ||
2348 | proportional to the amount of buffer space to allocate on that | ||
2349 | node. | ||
2350 | |||
2351 | If memory striping is enabled in the Hypervisor, then there is | ||
2352 | only one logical NUMA node (node 0). In that case, NetIO will by | ||
2353 | default ignore the suggested buffer node weights, and buffers | ||
2354 | will be striped across the physical memory controllers. See | ||
2355 | UG209 System Programmer's Guide for a description of the | ||
2356 | hypervisor option that controls memory striping. | ||
2357 | |||
2358 | If memory striping is disabled, then there are up to four NUMA | ||
2359 | nodes, corresponding to the four DDRAM controllers in the TILE | ||
2360 | processor architecture. See UG100 Tile Processor Architecture | ||
2361 | Overview for a diagram showing the location of each of the DDRAM | ||
2362 | controllers relative to the tile array. | ||
2363 | |||
2364 | For instance, if memory striping is disabled, the following | ||
2365 | configuration strucure: | ||
2366 | |||
2367 | @code | ||
2368 | netio_input_config_t config = { | ||
2369 | . | ||
2370 | . | ||
2371 | . | ||
2372 | .total_buffer_size = 4 * 16 * 1024 * 1024; | ||
2373 | .buffer_node_weights = { 1, 0, 1, 0 }, | ||
2374 | }, | ||
2375 | @endcode | ||
2376 | |||
2377 | would result in 32 MB of buffers being placed on controller 0, and | ||
2378 | 32 MB on controller 2. (Since buffers are allocated in units of | ||
2379 | 16 MB, some sets of weights will not be able to be matched exactly.) | ||
2380 | |||
2381 | For the weights to be effective, @ref total_buffer_size must be | ||
2382 | nonzero. If @ref total_buffer_size is zero, causing the default | ||
2383 | 32 MB of buffer space to be used, then any specified weights will | ||
2384 | be ignored, and buffers will positioned as they were in previous | ||
2385 | versions of NetIO: | ||
2386 | |||
2387 | - For xgbe/0 and gbe/0, 16 MB of buffers will be placed on controller 1, | ||
2388 | and the other 16 MB will be placed on controller 2. | ||
2389 | |||
2390 | - For xgbe/1 and gbe/1, 16 MB of buffers will be placed on controller 2, | ||
2391 | and the other 16 MB will be placed on controller 3. | ||
2392 | |||
2393 | If @ref total_buffer_size is nonzero, but all weights are zero, | ||
2394 | then all buffer space will be allocated on Linux NUMA node zero. | ||
2395 | |||
2396 | By default, the specified buffer placement is treated as a hint; | ||
2397 | if sufficient free memory is not available on the specified | ||
2398 | controllers, the buffers will be allocated elsewhere. However, | ||
2399 | if the ::NETIO_STRICT_HOMING flag is specified in @ref flags, then a | ||
2400 | failure to allocate buffer space exactly as requested will cause the | ||
2401 | registration operation to fail with an error of ::NETIO_CANNOT_HOME. | ||
2402 | |||
2403 | Note that maximal network performance cannot be achieved with | ||
2404 | only one memory controller. | ||
2405 | */ | ||
2406 | uint8_t buffer_node_weights[NETIO_NUM_NODE_WEIGHTS]; | ||
2407 | |||
2408 | /** Fixed virtual address for packet buffers. Only valid when | ||
2409 | ::NETIO_FIXED_BUFFER_VA is specified in @ref flags; see the | ||
2410 | description of that flag for details. | ||
2411 | */ | ||
2412 | void* fixed_buffer_va; | ||
2413 | |||
2414 | /** | ||
2415 | Maximum number of outstanding send packet requests. This value is | ||
2416 | only relevant when an EPP is in use; it determines the number of | ||
2417 | slots in the EPP's outgoing packet queue which this tile is allowed | ||
2418 | to consume, and thus the number of packets which may be sent before | ||
2419 | the sending tile must wait for an acknowledgment from the EPP. | ||
2420 | Modifying this value is generally only helpful when using @ref | ||
2421 | netio_send_packet_vector(), where it can help improve performance by | ||
2422 | allowing a single vector send operation to process more packets. | ||
2423 | Typically it is not specified, and the default, which divides the | ||
2424 | outgoing packet slots evenly between all tiles on the chip, is used. | ||
2425 | |||
2426 | If a registration asks for more outgoing packet queue slots than are | ||
2427 | available, ::NETIO_TOOMANY_XMIT will be returned. The total number | ||
2428 | of packet queue slots which are available for all tiles for each EPP | ||
2429 | is subject to change, but is currently ::NETIO_TOTAL_SENDS_OUTSTANDING. | ||
2430 | |||
2431 | |||
2432 | This value is ignored if ::NETIO_XMIT is not specified in flags. | ||
2433 | If you want to specify a large value here for a specific tile, you are | ||
2434 | advised to specify NETIO_NO_XMIT on other, non-transmitting tiles so | ||
2435 | that they do not consume a default number of packet slots. Any tile | ||
2436 | transmitting is required to have at least ::NETIO_MIN_SENDS_OUTSTANDING | ||
2437 | slots allocated to it; values less than that will be silently | ||
2438 | increased by the NetIO library. | ||
2439 | */ | ||
2440 | int num_sends_outstanding; | ||
2441 | } | ||
2442 | netio_input_config_t; | ||
2443 | |||
2444 | |||
2445 | /** Registration flags; used in the @ref netio_input_config_t structure. | ||
2446 | * @addtogroup setup | ||
2447 | */ | ||
2448 | /** @{ */ | ||
2449 | |||
2450 | /** Fail a registration request if we can't put packet buffers | ||
2451 | on the specified memory controllers. */ | ||
2452 | #define NETIO_STRICT_HOMING 0x00000002 | ||
2453 | |||
2454 | /** This application expects no tags on its L2 headers. */ | ||
2455 | #define NETIO_TAG_NONE 0x00000004 | ||
2456 | |||
2457 | /** This application expects Marvell extended tags on its L2 headers. */ | ||
2458 | #define NETIO_TAG_MRVL 0x00000008 | ||
2459 | |||
2460 | /** This application expects Broadcom tags on its L2 headers. */ | ||
2461 | #define NETIO_TAG_BRCM 0x00000010 | ||
2462 | |||
2463 | /** This registration may call routines which receive packets. */ | ||
2464 | #define NETIO_RECV 0x00000020 | ||
2465 | |||
2466 | /** This registration may not call routines which receive packets. */ | ||
2467 | #define NETIO_NO_RECV 0x00000040 | ||
2468 | |||
2469 | /** This registration may call routines which transmit packets. */ | ||
2470 | #define NETIO_XMIT 0x00000080 | ||
2471 | |||
2472 | /** This registration may call routines which transmit packets with | ||
2473 | checksum acceleration. */ | ||
2474 | #define NETIO_XMIT_CSUM 0x00000100 | ||
2475 | |||
2476 | /** This registration may not call routines which transmit packets. */ | ||
2477 | #define NETIO_NO_XMIT 0x00000200 | ||
2478 | |||
2479 | /** This registration wants NetIO buffers mapped at an application-specified | ||
2480 | virtual address. | ||
2481 | |||
2482 | NetIO buffers are by default created by the TMC common memory facility, | ||
2483 | which must be configured by a common ancestor of all processes sharing | ||
2484 | a network interface. When this flag is specified, NetIO buffers are | ||
2485 | instead mapped at an address chosen by the application (and specified | ||
2486 | in @ref netio_input_config_t::fixed_buffer_va). This allows multiple | ||
2487 | unrelated but cooperating processes to share a NetIO interface. | ||
2488 | All processes sharing the same interface must specify this flag, | ||
2489 | and all must specify the same fixed virtual address. | ||
2490 | |||
2491 | @ref netio_input_config_t::fixed_buffer_va must be a | ||
2492 | multiple of 16 MB, and the packet buffers will occupy @ref | ||
2493 | netio_input_config_t::total_buffer_size bytes of virtual address | ||
2494 | space, beginning at that address. If any of those virtual addresses | ||
2495 | are currently occupied by other memory objects, like application or | ||
2496 | shared library code or data, @ref netio_input_register() will return | ||
2497 | ::NETIO_FAULT. While it is impossible to provide a fixed_buffer_va | ||
2498 | which will work for all applications, a good first guess might be to | ||
2499 | use 0xb0000000 minus @ref netio_input_config_t::total_buffer_size. | ||
2500 | If that fails, it might be helpful to consult the running application's | ||
2501 | virtual address description file (/proc/<em>pid</em>/maps) to see | ||
2502 | which regions of virtual address space are available. | ||
2503 | */ | ||
2504 | #define NETIO_FIXED_BUFFER_VA 0x00000400 | ||
2505 | |||
2506 | /** This registration call will not complete unless the network link | ||
2507 | is up. The process will wait several seconds for this to happen (the | ||
2508 | precise interval is link-dependent), but if the link does not come up, | ||
2509 | ::NETIO_LINK_DOWN will be returned. This flag is the default if | ||
2510 | ::NETIO_NOREQUIRE_LINK_UP is not specified. Note that this flag by | ||
2511 | itself does not request that the link be brought up; that can be done | ||
2512 | with the ::NETIO_AUTO_LINK_UPDN or ::NETIO_AUTO_LINK_UP flags (the | ||
2513 | latter is the default if no NETIO_AUTO_LINK_xxx flags are specified), | ||
2514 | or by explicitly setting the link's desired state via netio_set(). | ||
2515 | If the link is not brought up by one of those methods, and this flag | ||
2516 | is specified, the registration operation will return ::NETIO_LINK_DOWN. | ||
2517 | This flag is ignored if it is specified along with ::NETIO_NO_XMIT and | ||
2518 | ::NETIO_NO_RECV. See @ref link for more information on link | ||
2519 | management. | ||
2520 | */ | ||
2521 | #define NETIO_REQUIRE_LINK_UP 0x00000800 | ||
2522 | |||
2523 | /** This registration call will complete even if the network link is not up. | ||
2524 | Whenever the link is not up, packets will not be sent or received: | ||
2525 | netio_get_packet() will return ::NETIO_NOPKT once all queued packets | ||
2526 | have been drained, and netio_send_packet() and similar routines will | ||
2527 | return NETIO_QUEUE_FULL once the outgoing packet queue in the EPP | ||
2528 | or the I/O shim is full. See @ref link for more information on link | ||
2529 | management. | ||
2530 | */ | ||
2531 | #define NETIO_NOREQUIRE_LINK_UP 0x00001000 | ||
2532 | |||
2533 | #ifndef __DOXYGEN__ | ||
2534 | /* | ||
2535 | * These are part of the implementation of the NETIO_AUTO_LINK_xxx flags, | ||
2536 | * but should not be used directly by applications, and are thus not | ||
2537 | * documented. | ||
2538 | */ | ||
2539 | #define _NETIO_AUTO_UP 0x00002000 | ||
2540 | #define _NETIO_AUTO_DN 0x00004000 | ||
2541 | #define _NETIO_AUTO_PRESENT 0x00008000 | ||
2542 | #endif | ||
2543 | |||
2544 | /** Set the desired state of the link to up, allowing any speeds which are | ||
2545 | supported by the link hardware, as part of this registration operation. | ||
2546 | Do not take down the link automatically. This is the default if | ||
2547 | no other NETIO_AUTO_LINK_xxx flags are specified. This flag is ignored | ||
2548 | if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV. | ||
2549 | See @ref link for more information on link management. | ||
2550 | */ | ||
2551 | #define NETIO_AUTO_LINK_UP (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP) | ||
2552 | |||
2553 | /** Set the desired state of the link to up, allowing any speeds which are | ||
2554 | supported by the link hardware, as part of this registration operation. | ||
2555 | Set the desired state of the link to down the next time no tiles are | ||
2556 | registered for packet reception or transmission. This flag is ignored | ||
2557 | if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV. | ||
2558 | See @ref link for more information on link management. | ||
2559 | */ | ||
2560 | #define NETIO_AUTO_LINK_UPDN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP | \ | ||
2561 | _NETIO_AUTO_DN) | ||
2562 | |||
2563 | /** Set the desired state of the link to down the next time no tiles are | ||
2564 | registered for packet reception or transmission. This flag is ignored | ||
2565 | if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV. | ||
2566 | See @ref link for more information on link management. | ||
2567 | */ | ||
2568 | #define NETIO_AUTO_LINK_DN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_DN) | ||
2569 | |||
2570 | /** Do not bring up the link automatically as part of this registration | ||
2571 | operation. Do not take down the link automatically. This flag | ||
2572 | is ignored if it is specified along with ::NETIO_NO_XMIT and | ||
2573 | ::NETIO_NO_RECV. See @ref link for more information on link management. | ||
2574 | */ | ||
2575 | #define NETIO_AUTO_LINK_NONE _NETIO_AUTO_PRESENT | ||
2576 | |||
2577 | |||
2578 | /** Minimum number of receive packets. */ | ||
2579 | #define NETIO_MIN_RECEIVE_PKTS 16 | ||
2580 | |||
2581 | /** Lower bound on the maximum number of receive packets; may be higher | ||
2582 | than this on some interfaces. */ | ||
2583 | #define NETIO_MAX_RECEIVE_PKTS 128 | ||
2584 | |||
2585 | /** Maximum number of send buffers, per packet size. */ | ||
2586 | #define NETIO_MAX_SEND_BUFFERS 16 | ||
2587 | |||
2588 | /** Number of EPP queue slots, and thus outstanding sends, per EPP. */ | ||
2589 | #define NETIO_TOTAL_SENDS_OUTSTANDING 2015 | ||
2590 | |||
2591 | /** Minimum number of EPP queue slots, and thus outstanding sends, per | ||
2592 | * transmitting tile. */ | ||
2593 | #define NETIO_MIN_SENDS_OUTSTANDING 16 | ||
2594 | |||
2595 | |||
2596 | /**@}*/ | ||
2597 | |||
2598 | #ifndef __DOXYGEN__ | ||
2599 | |||
2600 | /** | ||
2601 | * An object for providing Ethernet packets to a process. | ||
2602 | */ | ||
2603 | struct __netio_queue_impl_t; | ||
2604 | |||
2605 | /** | ||
2606 | * An object for managing the user end of a NetIO queue. | ||
2607 | */ | ||
2608 | struct __netio_queue_user_impl_t; | ||
2609 | |||
2610 | #endif /* !__DOXYGEN__ */ | ||
2611 | |||
2612 | |||
2613 | /** A netio_queue_t describes a NetIO communications endpoint. | ||
2614 | * @ingroup setup | ||
2615 | */ | ||
2616 | typedef struct | ||
2617 | { | ||
2618 | #ifdef __DOXYGEN__ | ||
2619 | uint8_t opaque[8]; /**< This is an opaque structure. */ | ||
2620 | #else | ||
2621 | struct __netio_queue_impl_t* __system_part; /**< The system part. */ | ||
2622 | struct __netio_queue_user_impl_t* __user_part; /**< The user part. */ | ||
2623 | #ifdef _NETIO_PTHREAD | ||
2624 | _netio_percpu_mutex_t lock; /**< Queue lock. */ | ||
2625 | #endif | ||
2626 | #endif | ||
2627 | } | ||
2628 | netio_queue_t; | ||
2629 | |||
2630 | |||
2631 | /** | ||
2632 | * @brief Packet send context. | ||
2633 | * | ||
2634 | * @ingroup egress | ||
2635 | * | ||
2636 | * Packet send context for use with netio_send_packet_prepare and _commit. | ||
2637 | */ | ||
2638 | typedef struct | ||
2639 | { | ||
2640 | #ifdef __DOXYGEN__ | ||
2641 | uint8_t opaque[44]; /**< This is an opaque structure. */ | ||
2642 | #else | ||
2643 | uint8_t flags; /**< Defined below */ | ||
2644 | uint8_t datalen; /**< Number of valid words pointed to by data. */ | ||
2645 | uint32_t request[9]; /**< Request to be sent to the EPP or shim. Note | ||
2646 | that this is smaller than the 11-word maximum | ||
2647 | request size, since some constant values are | ||
2648 | not saved in the context. */ | ||
2649 | uint32_t *data; /**< Data to be sent to the EPP or shim via IDN. */ | ||
2650 | #endif | ||
2651 | } | ||
2652 | netio_send_pkt_context_t; | ||
2653 | |||
2654 | |||
2655 | #ifndef __DOXYGEN__ | ||
2656 | #define SEND_PKT_CTX_USE_EPP 1 /**< We're sending to an EPP. */ | ||
2657 | #define SEND_PKT_CTX_SEND_CSUM 2 /**< Request includes a checksum. */ | ||
2658 | #endif | ||
2659 | |||
2660 | /** | ||
2661 | * @brief Packet vector entry. | ||
2662 | * | ||
2663 | * @ingroup egress | ||
2664 | * | ||
2665 | * This data structure is used with netio_send_packet_vector() to send multiple | ||
2666 | * packets with one NetIO call. The structure should be initialized by | ||
2667 | * calling netio_pkt_vector_set(), rather than by setting the fields | ||
2668 | * directly. | ||
2669 | * | ||
2670 | * This structure is guaranteed to be a power of two in size, no | ||
2671 | * bigger than one L2 cache line, and to be aligned modulo its size. | ||
2672 | */ | ||
2673 | typedef struct | ||
2674 | #ifndef __DOXYGEN__ | ||
2675 | __attribute__((aligned(8))) | ||
2676 | #endif | ||
2677 | { | ||
2678 | /** Reserved for use by the user application. When initialized with | ||
2679 | * the netio_set_pkt_vector_entry() function, this field is guaranteed | ||
2680 | * to be visible to readers only after all other fields are already | ||
2681 | * visible. This way it can be used as a valid flag or generation | ||
2682 | * counter. */ | ||
2683 | uint8_t user_data; | ||
2684 | |||
2685 | /* Structure members below this point should not be accessed directly by | ||
2686 | * applications, as they may change in the future. */ | ||
2687 | |||
2688 | /** Low 8 bits of the packet address to send. The high bits are | ||
2689 | * acquired from the 'handle' field. */ | ||
2690 | uint8_t buffer_address_low; | ||
2691 | |||
2692 | /** Number of bytes to transmit. */ | ||
2693 | uint16_t size; | ||
2694 | |||
2695 | /** The raw handle from a netio_pkt_t. If this is NETIO_PKT_HANDLE_NONE, | ||
2696 | * this vector entry will be skipped and no packet will be transmitted. */ | ||
2697 | netio_pkt_handle_t handle; | ||
2698 | } | ||
2699 | netio_pkt_vector_entry_t; | ||
2700 | |||
2701 | |||
2702 | /** | ||
2703 | * @brief Initialize fields in a packet vector entry. | ||
2704 | * | ||
2705 | * @ingroup egress | ||
2706 | * | ||
2707 | * @param[out] v Pointer to the vector entry to be initialized. | ||
2708 | * @param[in] pkt Packet to be transmitted when the vector entry is passed to | ||
2709 | * netio_send_packet_vector(). Note that the packet's attributes | ||
2710 | * (e.g., its L2 offset and length) are captured at the time this | ||
2711 | * routine is called; subsequent changes in those attributes will not | ||
2712 | * be reflected in the packet which is actually transmitted. | ||
2713 | * Changes in the packet's contents, however, will be so reflected. | ||
2714 | * If this is NULL, no packet will be transmitted. | ||
2715 | * @param[in] user_data User data to be set in the vector entry. | ||
2716 | * This function guarantees that the "user_data" field will become | ||
2717 | * visible to a reader only after all other fields have become visible. | ||
2718 | * This allows a structure in a ring buffer to be written and read | ||
2719 | * by a polling reader without any locks or other synchronization. | ||
2720 | */ | ||
2721 | static __inline void | ||
2722 | netio_pkt_vector_set(volatile netio_pkt_vector_entry_t* v, netio_pkt_t* pkt, | ||
2723 | uint8_t user_data) | ||
2724 | { | ||
2725 | if (pkt) | ||
2726 | { | ||
2727 | if (NETIO_PKT_IS_MINIMAL(pkt)) | ||
2728 | { | ||
2729 | netio_pkt_minimal_metadata_t* mmd = | ||
2730 | (netio_pkt_minimal_metadata_t*) &pkt->__metadata; | ||
2731 | v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_MM(mmd, pkt) & 0xFF; | ||
2732 | v->size = NETIO_PKT_L2_LENGTH_MM(mmd, pkt); | ||
2733 | } | ||
2734 | else | ||
2735 | { | ||
2736 | netio_pkt_metadata_t* mda = &pkt->__metadata; | ||
2737 | v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_M(mda, pkt) & 0xFF; | ||
2738 | v->size = NETIO_PKT_L2_LENGTH_M(mda, pkt); | ||
2739 | } | ||
2740 | v->handle.word = pkt->__packet.word; | ||
2741 | } | ||
2742 | else | ||
2743 | { | ||
2744 | v->handle.word = 0; /* Set handle to NETIO_PKT_HANDLE_NONE. */ | ||
2745 | } | ||
2746 | |||
2747 | __asm__("" : : : "memory"); | ||
2748 | |||
2749 | v->user_data = user_data; | ||
2750 | } | ||
2751 | |||
2752 | |||
2753 | /** | ||
2754 | * Flags and structures for @ref netio_get() and @ref netio_set(). | ||
2755 | * @ingroup config | ||
2756 | */ | ||
2757 | |||
2758 | /** @{ */ | ||
2759 | /** Parameter class; addr is a NETIO_PARAM_xxx value. */ | ||
2760 | #define NETIO_PARAM 0 | ||
2761 | /** Interface MAC address. This address is only valid with @ref netio_get(). | ||
2762 | * The value is a 6-byte MAC address. Depending upon the overall system | ||
2763 | * design, a MAC address may or may not be available for each interface. */ | ||
2764 | #define NETIO_PARAM_MAC 0 | ||
2765 | |||
2766 | /** Determine whether to suspend output on the receipt of pause frames. | ||
2767 | * If the value is nonzero, the I/O shim will suspend output when a pause | ||
2768 | * frame is received. If the value is zero, pause frames will be ignored. */ | ||
2769 | #define NETIO_PARAM_PAUSE_IN 1 | ||
2770 | |||
2771 | /** Determine whether to send pause frames if the I/O shim packet FIFOs are | ||
2772 | * nearly full. If the value is zero, pause frames are not sent. If | ||
2773 | * the value is nonzero, it is the delay value which will be sent in any | ||
2774 | * pause frames which are output, in units of 512 bit times. */ | ||
2775 | #define NETIO_PARAM_PAUSE_OUT 2 | ||
2776 | |||
2777 | /** Jumbo frame support. The value is a 4-byte integer. If the value is | ||
2778 | * nonzero, the MAC will accept frames of up to 10240 bytes. If the value | ||
2779 | * is zero, the MAC will only accept frames of up to 1544 bytes. */ | ||
2780 | #define NETIO_PARAM_JUMBO 3 | ||
2781 | |||
2782 | /** I/O shim's overflow statistics register. The value is two 16-bit integers. | ||
2783 | * The first 16-bit value (or the low 16 bits, if the value is treated as a | ||
2784 | * 32-bit number) is the count of packets which were completely dropped and | ||
2785 | * not delivered by the shim. The second 16-bit value (or the high 16 bits, | ||
2786 | * if the value is treated as a 32-bit number) is the count of packets | ||
2787 | * which were truncated and thus only partially delivered by the shim. This | ||
2788 | * register is automatically reset to zero after it has been read. | ||
2789 | */ | ||
2790 | #define NETIO_PARAM_OVERFLOW 4 | ||
2791 | |||
2792 | /** IPP statistics. This address is only valid with @ref netio_get(). The | ||
2793 | * value is a netio_stat_t structure. Unlike the I/O shim statistics, the | ||
2794 | * IPP statistics are not all reset to zero on read; see the description | ||
2795 | * of the netio_stat_t for details. */ | ||
2796 | #define NETIO_PARAM_STAT 5 | ||
2797 | |||
2798 | /** Possible link state. The value is a combination of "NETIO_LINK_xxx" | ||
2799 | * flags. With @ref netio_get(), this will indicate which flags are | ||
2800 | * actually supported by the hardware. | ||
2801 | * | ||
2802 | * For historical reasons, specifying this value to netio_set() will have | ||
2803 | * the same behavior as using ::NETIO_PARAM_LINK_CONFIG, but this usage is | ||
2804 | * discouraged. | ||
2805 | */ | ||
2806 | #define NETIO_PARAM_LINK_POSSIBLE_STATE 6 | ||
2807 | |||
2808 | /** Link configuration. The value is a combination of "NETIO_LINK_xxx" flags. | ||
2809 | * With @ref netio_set(), this will attempt to immediately bring up the | ||
2810 | * link using whichever of the requested flags are supported by the | ||
2811 | * hardware, or take down the link if the flags are zero; if this is | ||
2812 | * not possible, an error will be returned. Many programs will want | ||
2813 | * to use ::NETIO_PARAM_LINK_DESIRED_STATE instead. | ||
2814 | * | ||
2815 | * For historical reasons, specifying this value to netio_get() will | ||
2816 | * have the same behavior as using ::NETIO_PARAM_LINK_POSSIBLE_STATE, | ||
2817 | * but this usage is discouraged. | ||
2818 | */ | ||
2819 | #define NETIO_PARAM_LINK_CONFIG NETIO_PARAM_LINK_POSSIBLE_STATE | ||
2820 | |||
2821 | /** Current link state. This address is only valid with @ref netio_get(). | ||
2822 | * The value is zero or more of the "NETIO_LINK_xxx" flags, ORed together. | ||
2823 | * If the link is down, the value ANDed with NETIO_LINK_SPEED will be | ||
2824 | * zero; if the link is up, the value ANDed with NETIO_LINK_SPEED will | ||
2825 | * result in exactly one of the NETIO_LINK_xxx values, indicating the | ||
2826 | * current speed. */ | ||
2827 | #define NETIO_PARAM_LINK_CURRENT_STATE 7 | ||
2828 | |||
2829 | /** Variant symbol for current state, retained for compatibility with | ||
2830 | * pre-MDE-2.1 programs. */ | ||
2831 | #define NETIO_PARAM_LINK_STATUS NETIO_PARAM_LINK_CURRENT_STATE | ||
2832 | |||
2833 | /** Packet Coherence protocol. This address is only valid with @ref netio_get(). | ||
2834 | * The value is nonzero if the interface is configured for cache-coherent DMA. | ||
2835 | */ | ||
2836 | #define NETIO_PARAM_COHERENT 8 | ||
2837 | |||
2838 | /** Desired link state. The value is a conbination of "NETIO_LINK_xxx" | ||
2839 | * flags, which specify the desired state for the link. With @ref | ||
2840 | * netio_set(), this will, in the background, attempt to bring up the link | ||
2841 | * using whichever of the requested flags are reasonable, or take down the | ||
2842 | * link if the flags are zero. The actual link up or down operation may | ||
2843 | * happen after this call completes. If the link state changes in the | ||
2844 | * future, the system will continue to try to get back to the desired link | ||
2845 | * state; for instance, if the link is brought up successfully, and then | ||
2846 | * the network cable is disconnected, the link will go down. However, the | ||
2847 | * desired state of the link is still up, so if the cable is reconnected, | ||
2848 | * the link will be brought up again. | ||
2849 | * | ||
2850 | * With @ref netio_get(), this will indicate the desired state for the | ||
2851 | * link, as set with a previous netio_set() call, or implicitly by a | ||
2852 | * netio_input_register() or netio_input_unregister() operation. This may | ||
2853 | * not reflect the current state of the link; to get that, use | ||
2854 | * ::NETIO_PARAM_LINK_CURRENT_STATE. */ | ||
2855 | #define NETIO_PARAM_LINK_DESIRED_STATE 9 | ||
2856 | |||
2857 | /** NetIO statistics structure. Retrieved using the ::NETIO_PARAM_STAT | ||
2858 | * address passed to @ref netio_get(). */ | ||
2859 | typedef struct | ||
2860 | { | ||
2861 | /** Number of packets which have been received by the IPP and forwarded | ||
2862 | * to a tile's receive queue for processing. This value wraps at its | ||
2863 | * maximum, and is not cleared upon read. */ | ||
2864 | uint32_t packets_received; | ||
2865 | |||
2866 | /** Number of packets which have been dropped by the IPP, because they could | ||
2867 | * not be received, or could not be forwarded to a tile. The former happens | ||
2868 | * when the IPP does not have a free packet buffer of suitable size for an | ||
2869 | * incoming frame. The latter happens when all potential destination tiles | ||
2870 | * for a packet, as defined by the group, bucket, and queue configuration, | ||
2871 | * have full receive queues. This value wraps at its maximum, and is not | ||
2872 | * cleared upon read. */ | ||
2873 | uint32_t packets_dropped; | ||
2874 | |||
2875 | /* | ||
2876 | * Note: the #defines after each of the following four one-byte values | ||
2877 | * denote their location within the third word of the netio_stat_t. They | ||
2878 | * are intended for use only by the IPP implementation and are thus omitted | ||
2879 | * from the Doxygen output. | ||
2880 | */ | ||
2881 | |||
2882 | /** Number of packets dropped because no worker was able to accept a new | ||
2883 | * packet. This value saturates at its maximum, and is cleared upon | ||
2884 | * read. */ | ||
2885 | uint8_t drops_no_worker; | ||
2886 | #ifndef __DOXYGEN__ | ||
2887 | #define NETIO_STAT_DROPS_NO_WORKER 0 | ||
2888 | #endif | ||
2889 | |||
2890 | /** Number of packets dropped because no small buffers were available. | ||
2891 | * This value saturates at its maximum, and is cleared upon read. */ | ||
2892 | uint8_t drops_no_smallbuf; | ||
2893 | #ifndef __DOXYGEN__ | ||
2894 | #define NETIO_STAT_DROPS_NO_SMALLBUF 1 | ||
2895 | #endif | ||
2896 | |||
2897 | /** Number of packets dropped because no large buffers were available. | ||
2898 | * This value saturates at its maximum, and is cleared upon read. */ | ||
2899 | uint8_t drops_no_largebuf; | ||
2900 | #ifndef __DOXYGEN__ | ||
2901 | #define NETIO_STAT_DROPS_NO_LARGEBUF 2 | ||
2902 | #endif | ||
2903 | |||
2904 | /** Number of packets dropped because no jumbo buffers were available. | ||
2905 | * This value saturates at its maximum, and is cleared upon read. */ | ||
2906 | uint8_t drops_no_jumbobuf; | ||
2907 | #ifndef __DOXYGEN__ | ||
2908 | #define NETIO_STAT_DROPS_NO_JUMBOBUF 3 | ||
2909 | #endif | ||
2910 | } | ||
2911 | netio_stat_t; | ||
2912 | |||
2913 | |||
2914 | /** Link can run, should run, or is running at 10 Mbps. */ | ||
2915 | #define NETIO_LINK_10M 0x01 | ||
2916 | |||
2917 | /** Link can run, should run, or is running at 100 Mbps. */ | ||
2918 | #define NETIO_LINK_100M 0x02 | ||
2919 | |||
2920 | /** Link can run, should run, or is running at 1 Gbps. */ | ||
2921 | #define NETIO_LINK_1G 0x04 | ||
2922 | |||
2923 | /** Link can run, should run, or is running at 10 Gbps. */ | ||
2924 | #define NETIO_LINK_10G 0x08 | ||
2925 | |||
2926 | /** Link should run at the highest speed supported by the link and by | ||
2927 | * the device connected to the link. Only usable as a value for | ||
2928 | * the link's desired state; never returned as a value for the current | ||
2929 | * or possible states. */ | ||
2930 | #define NETIO_LINK_ANYSPEED 0x10 | ||
2931 | |||
2932 | /** All legal link speeds. */ | ||
2933 | #define NETIO_LINK_SPEED (NETIO_LINK_10M | \ | ||
2934 | NETIO_LINK_100M | \ | ||
2935 | NETIO_LINK_1G | \ | ||
2936 | NETIO_LINK_10G | \ | ||
2937 | NETIO_LINK_ANYSPEED) | ||
2938 | |||
2939 | |||
2940 | /** MAC register class. Addr is a register offset within the MAC. | ||
2941 | * Registers within the XGbE and GbE MACs are documented in the Tile | ||
2942 | * Processor I/O Device Guide (UG104). MAC registers start at address | ||
2943 | * 0x4000, and do not include the MAC_INTERFACE registers. */ | ||
2944 | #define NETIO_MAC 1 | ||
2945 | |||
2946 | /** MDIO register class (IEEE 802.3 clause 22 format). Addr is the "addr" | ||
2947 | * member of a netio_mdio_addr_t structure. */ | ||
2948 | #define NETIO_MDIO 2 | ||
2949 | |||
2950 | /** MDIO register class (IEEE 802.3 clause 45 format). Addr is the "addr" | ||
2951 | * member of a netio_mdio_addr_t structure. */ | ||
2952 | #define NETIO_MDIO_CLAUSE45 3 | ||
2953 | |||
2954 | /** NetIO MDIO address type. Retrieved or provided using the ::NETIO_MDIO | ||
2955 | * address passed to @ref netio_get() or @ref netio_set(). */ | ||
2956 | typedef union | ||
2957 | { | ||
2958 | struct | ||
2959 | { | ||
2960 | unsigned int reg:16; /**< MDIO register offset. For clause 22 access, | ||
2961 | must be less than 32. */ | ||
2962 | unsigned int phy:5; /**< Which MDIO PHY to access. */ | ||
2963 | unsigned int dev:5; /**< Which MDIO device to access within that PHY. | ||
2964 | Applicable for clause 45 access only; ignored | ||
2965 | for clause 22 access. */ | ||
2966 | } | ||
2967 | bits; /**< Container for bitfields. */ | ||
2968 | uint64_t addr; /**< Value to pass to @ref netio_get() or | ||
2969 | * @ref netio_set(). */ | ||
2970 | } | ||
2971 | netio_mdio_addr_t; | ||
2972 | |||
2973 | /** @} */ | ||
2974 | |||
2975 | #endif /* __NETIO_INTF_H__ */ | ||
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile index 112b1e248f05..b4c8e8ec45dc 100644 --- a/arch/tile/kernel/Makefile +++ b/arch/tile/kernel/Makefile | |||
@@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o | |||
15 | obj-$(CONFIG_MODULES) += module.o | 15 | obj-$(CONFIG_MODULES) += module.o |
16 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 16 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
17 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | 17 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
18 | obj-$(CONFIG_PCI) += pci.o | ||
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c new file mode 100644 index 000000000000..a1ee25be9ad9 --- /dev/null +++ b/arch/tile/kernel/pci.c | |||
@@ -0,0 +1,621 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/capability.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/irq.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | |||
28 | #include <asm/processor.h> | ||
29 | #include <asm/sections.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/hv_driver.h> | ||
32 | #include <hv/drv_pcie_rc_intf.h> | ||
33 | |||
34 | |||
35 | /* | ||
36 | * Initialization flow and process | ||
37 | * ------------------------------- | ||
38 | * | ||
39 | * This files containes the routines to search for PCI buses, | ||
40 | * enumerate the buses, and configure any attached devices. | ||
41 | * | ||
42 | * There are two entry points here: | ||
43 | * 1) tile_pci_init | ||
44 | * This sets up the pci_controller structs, and opens the | ||
45 | * FDs to the hypervisor. This is called from setup_arch() early | ||
46 | * in the boot process. | ||
47 | * 2) pcibios_init | ||
48 | * This probes the PCI bus(es) for any attached hardware. It's | ||
49 | * called by subsys_initcall. All of the real work is done by the | ||
50 | * generic Linux PCI layer. | ||
51 | * | ||
52 | */ | ||
53 | |||
54 | /* | ||
55 | * This flag tells if the platform is TILEmpower that needs | ||
56 | * special configuration for the PLX switch chip. | ||
57 | */ | ||
58 | int __write_once tile_plx_gen1; | ||
59 | |||
60 | static struct pci_controller controllers[TILE_NUM_PCIE]; | ||
61 | static int num_controllers; | ||
62 | |||
63 | static struct pci_ops tile_cfg_ops; | ||
64 | |||
65 | |||
66 | /* | ||
67 | * We don't need to worry about the alignment of resources. | ||
68 | */ | ||
69 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, | ||
70 | resource_size_t size, resource_size_t align) | ||
71 | { | ||
72 | return res->start; | ||
73 | } | ||
74 | EXPORT_SYMBOL(pcibios_align_resource); | ||
75 | |||
76 | /* | ||
77 | * Open a FD to the hypervisor PCI device. | ||
78 | * | ||
79 | * controller_id is the controller number, config type is 0 or 1 for | ||
80 | * config0 or config1 operations. | ||
81 | */ | ||
82 | static int __init tile_pcie_open(int controller_id, int config_type) | ||
83 | { | ||
84 | char filename[32]; | ||
85 | int fd; | ||
86 | |||
87 | sprintf(filename, "pcie/%d/config%d", controller_id, config_type); | ||
88 | |||
89 | fd = hv_dev_open((HV_VirtAddr)filename, 0); | ||
90 | |||
91 | return fd; | ||
92 | } | ||
93 | |||
94 | |||
95 | /* | ||
96 | * Get the IRQ numbers from the HV and set up the handlers for them. | ||
97 | */ | ||
98 | static int __init tile_init_irqs(int controller_id, | ||
99 | struct pci_controller *controller) | ||
100 | { | ||
101 | char filename[32]; | ||
102 | int fd; | ||
103 | int ret; | ||
104 | int x; | ||
105 | struct pcie_rc_config rc_config; | ||
106 | |||
107 | sprintf(filename, "pcie/%d/ctl", controller_id); | ||
108 | fd = hv_dev_open((HV_VirtAddr)filename, 0); | ||
109 | if (fd < 0) { | ||
110 | pr_err("PCI: hv_dev_open(%s) failed\n", filename); | ||
111 | return -1; | ||
112 | } | ||
113 | ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config), | ||
114 | sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF); | ||
115 | hv_dev_close(fd); | ||
116 | if (ret != sizeof(rc_config)) { | ||
117 | pr_err("PCI: wanted %zd bytes, got %d\n", | ||
118 | sizeof(rc_config), ret); | ||
119 | return -1; | ||
120 | } | ||
121 | /* Record irq_base so that we can map INTx to IRQ # later. */ | ||
122 | controller->irq_base = rc_config.intr; | ||
123 | |||
124 | for (x = 0; x < 4; x++) | ||
125 | tile_irq_activate(rc_config.intr + x, | ||
126 | TILE_IRQ_HW_CLEAR); | ||
127 | |||
128 | if (rc_config.plx_gen1) | ||
129 | controller->plx_gen1 = 1; | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * First initialization entry point, called from setup_arch(). | ||
136 | * | ||
137 | * Find valid controllers and fill in pci_controller structs for each | ||
138 | * of them. | ||
139 | * | ||
140 | * Returns the number of controllers discovered. | ||
141 | */ | ||
142 | int __init tile_pci_init(void) | ||
143 | { | ||
144 | int i; | ||
145 | |||
146 | pr_info("PCI: Searching for controllers...\n"); | ||
147 | |||
148 | /* Do any configuration we need before using the PCIe */ | ||
149 | |||
150 | for (i = 0; i < TILE_NUM_PCIE; i++) { | ||
151 | int hv_cfg_fd0 = -1; | ||
152 | int hv_cfg_fd1 = -1; | ||
153 | int hv_mem_fd = -1; | ||
154 | char name[32]; | ||
155 | struct pci_controller *controller; | ||
156 | |||
157 | /* | ||
158 | * Open the fd to the HV. If it fails then this | ||
159 | * device doesn't exist. | ||
160 | */ | ||
161 | hv_cfg_fd0 = tile_pcie_open(i, 0); | ||
162 | if (hv_cfg_fd0 < 0) | ||
163 | continue; | ||
164 | hv_cfg_fd1 = tile_pcie_open(i, 1); | ||
165 | if (hv_cfg_fd1 < 0) { | ||
166 | pr_err("PCI: Couldn't open config fd to HV " | ||
167 | "for controller %d\n", i); | ||
168 | goto err_cont; | ||
169 | } | ||
170 | |||
171 | sprintf(name, "pcie/%d/mem", i); | ||
172 | hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0); | ||
173 | if (hv_mem_fd < 0) { | ||
174 | pr_err("PCI: Could not open mem fd to HV!\n"); | ||
175 | goto err_cont; | ||
176 | } | ||
177 | |||
178 | pr_info("PCI: Found PCI controller #%d\n", i); | ||
179 | |||
180 | controller = &controllers[num_controllers]; | ||
181 | |||
182 | if (tile_init_irqs(i, controller)) { | ||
183 | pr_err("PCI: Could not initialize " | ||
184 | "IRQs, aborting.\n"); | ||
185 | goto err_cont; | ||
186 | } | ||
187 | |||
188 | controller->index = num_controllers; | ||
189 | controller->hv_cfg_fd[0] = hv_cfg_fd0; | ||
190 | controller->hv_cfg_fd[1] = hv_cfg_fd1; | ||
191 | controller->hv_mem_fd = hv_mem_fd; | ||
192 | controller->first_busno = 0; | ||
193 | controller->last_busno = 0xff; | ||
194 | controller->ops = &tile_cfg_ops; | ||
195 | |||
196 | num_controllers++; | ||
197 | continue; | ||
198 | |||
199 | err_cont: | ||
200 | if (hv_cfg_fd0 >= 0) | ||
201 | hv_dev_close(hv_cfg_fd0); | ||
202 | if (hv_cfg_fd1 >= 0) | ||
203 | hv_dev_close(hv_cfg_fd1); | ||
204 | if (hv_mem_fd >= 0) | ||
205 | hv_dev_close(hv_mem_fd); | ||
206 | continue; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Before using the PCIe, see if we need to do any platform-specific | ||
211 | * configuration, such as the PLX switch Gen 1 issue on TILEmpower. | ||
212 | */ | ||
213 | for (i = 0; i < num_controllers; i++) { | ||
214 | struct pci_controller *controller = &controllers[i]; | ||
215 | |||
216 | if (controller->plx_gen1) | ||
217 | tile_plx_gen1 = 1; | ||
218 | } | ||
219 | |||
220 | return num_controllers; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * (pin - 1) converts from the PCI standard's [1:4] convention to | ||
225 | * a normal [0:3] range. | ||
226 | */ | ||
227 | static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
228 | { | ||
229 | struct pci_controller *controller = | ||
230 | (struct pci_controller *)dev->sysdata; | ||
231 | return (pin - 1) + controller->irq_base; | ||
232 | } | ||
233 | |||
234 | |||
235 | static void __init fixup_read_and_payload_sizes(void) | ||
236 | { | ||
237 | struct pci_dev *dev = NULL; | ||
238 | int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ | ||
239 | int max_read_size = 0x2; /* Limit to 512 byte reads. */ | ||
240 | u16 new_values; | ||
241 | |||
242 | /* Scan for the smallest maximum payload size. */ | ||
243 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
244 | int pcie_caps_offset; | ||
245 | u32 devcap; | ||
246 | int max_payload; | ||
247 | |||
248 | pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
249 | if (pcie_caps_offset == 0) | ||
250 | continue; | ||
251 | |||
252 | pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP, | ||
253 | &devcap); | ||
254 | max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; | ||
255 | if (max_payload < smallest_max_payload) | ||
256 | smallest_max_payload = max_payload; | ||
257 | } | ||
258 | |||
259 | /* Now, set the max_payload_size for all devices to that value. */ | ||
260 | new_values = (max_read_size << 12) | (smallest_max_payload << 5); | ||
261 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
262 | int pcie_caps_offset; | ||
263 | u16 devctl; | ||
264 | |||
265 | pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
266 | if (pcie_caps_offset == 0) | ||
267 | continue; | ||
268 | |||
269 | pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, | ||
270 | &devctl); | ||
271 | devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); | ||
272 | devctl |= new_values; | ||
273 | pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, | ||
274 | devctl); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | |||
279 | /* | ||
280 | * Second PCI initialization entry point, called by subsys_initcall. | ||
281 | * | ||
282 | * The controllers have been set up by the time we get here, by a call to | ||
283 | * tile_pci_init. | ||
284 | */ | ||
285 | static int __init pcibios_init(void) | ||
286 | { | ||
287 | int i; | ||
288 | |||
289 | pr_info("PCI: Probing PCI hardware\n"); | ||
290 | |||
291 | /* | ||
292 | * Delay a bit in case devices aren't ready. Some devices are | ||
293 | * known to require at least 20ms here, but we use a more | ||
294 | * conservative value. | ||
295 | */ | ||
296 | mdelay(250); | ||
297 | |||
298 | /* Scan all of the recorded PCI controllers. */ | ||
299 | for (i = 0; i < num_controllers; i++) { | ||
300 | struct pci_controller *controller = &controllers[i]; | ||
301 | struct pci_bus *bus; | ||
302 | |||
303 | pr_info("PCI: initializing controller #%d\n", i); | ||
304 | |||
305 | /* | ||
306 | * This comes from the generic Linux PCI driver. | ||
307 | * | ||
308 | * It reads the PCI tree for this bus into the Linux | ||
309 | * data structures. | ||
310 | * | ||
311 | * This is inlined in linux/pci.h and calls into | ||
312 | * pci_scan_bus_parented() in probe.c. | ||
313 | */ | ||
314 | bus = pci_scan_bus(0, controller->ops, controller); | ||
315 | controller->root_bus = bus; | ||
316 | controller->last_busno = bus->subordinate; | ||
317 | |||
318 | } | ||
319 | |||
320 | /* Do machine dependent PCI interrupt routing */ | ||
321 | pci_fixup_irqs(pci_common_swizzle, tile_map_irq); | ||
322 | |||
323 | /* | ||
324 | * This comes from the generic Linux PCI driver. | ||
325 | * | ||
326 | * It allocates all of the resources (I/O memory, etc) | ||
327 | * associated with the devices read in above. | ||
328 | */ | ||
329 | |||
330 | pci_assign_unassigned_resources(); | ||
331 | |||
332 | /* Configure the max_read_size and max_payload_size values. */ | ||
333 | fixup_read_and_payload_sizes(); | ||
334 | |||
335 | /* Record the I/O resources in the PCI controller structure. */ | ||
336 | for (i = 0; i < num_controllers; i++) { | ||
337 | struct pci_bus *root_bus = controllers[i].root_bus; | ||
338 | struct pci_bus *next_bus; | ||
339 | struct pci_dev *dev; | ||
340 | |||
341 | list_for_each_entry(dev, &root_bus->devices, bus_list) { | ||
342 | /* Find the PCI host controller, ie. the 1st bridge. */ | ||
343 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && | ||
344 | (PCI_SLOT(dev->devfn) == 0)) { | ||
345 | next_bus = dev->subordinate; | ||
346 | controllers[i].mem_resources[0] = | ||
347 | *next_bus->resource[0]; | ||
348 | controllers[i].mem_resources[1] = | ||
349 | *next_bus->resource[1]; | ||
350 | controllers[i].mem_resources[2] = | ||
351 | *next_bus->resource[2]; | ||
352 | |||
353 | break; | ||
354 | } | ||
355 | } | ||
356 | |||
357 | } | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | subsys_initcall(pcibios_init); | ||
362 | |||
363 | /* | ||
364 | * No bus fixups needed. | ||
365 | */ | ||
366 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
367 | { | ||
368 | /* Nothing needs to be done. */ | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * This can be called from the generic PCI layer, but doesn't need to | ||
373 | * do anything. | ||
374 | */ | ||
375 | char __devinit *pcibios_setup(char *str) | ||
376 | { | ||
377 | /* Nothing needs to be done. */ | ||
378 | return str; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * This is called from the generic Linux layer. | ||
383 | */ | ||
384 | void __init pcibios_update_irq(struct pci_dev *dev, int irq) | ||
385 | { | ||
386 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Enable memory and/or address decoding, as appropriate, for the | ||
391 | * device described by the 'dev' struct. | ||
392 | * | ||
393 | * This is called from the generic PCI layer, and can be called | ||
394 | * for bridges or endpoints. | ||
395 | */ | ||
396 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
397 | { | ||
398 | u16 cmd, old_cmd; | ||
399 | u8 header_type; | ||
400 | int i; | ||
401 | struct resource *r; | ||
402 | |||
403 | pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); | ||
404 | |||
405 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
406 | old_cmd = cmd; | ||
407 | if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { | ||
408 | /* | ||
409 | * For bridges, we enable both memory and I/O decoding | ||
410 | * in call cases. | ||
411 | */ | ||
412 | cmd |= PCI_COMMAND_IO; | ||
413 | cmd |= PCI_COMMAND_MEMORY; | ||
414 | } else { | ||
415 | /* | ||
416 | * For endpoints, we enable memory and/or I/O decoding | ||
417 | * only if they have a memory resource of that type. | ||
418 | */ | ||
419 | for (i = 0; i < 6; i++) { | ||
420 | r = &dev->resource[i]; | ||
421 | if (r->flags & IORESOURCE_UNSET) { | ||
422 | pr_err("PCI: Device %s not available " | ||
423 | "because of resource collisions\n", | ||
424 | pci_name(dev)); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | if (r->flags & IORESOURCE_IO) | ||
428 | cmd |= PCI_COMMAND_IO; | ||
429 | if (r->flags & IORESOURCE_MEM) | ||
430 | cmd |= PCI_COMMAND_MEMORY; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * We only write the command if it changed. | ||
436 | */ | ||
437 | if (cmd != old_cmd) | ||
438 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) | ||
443 | { | ||
444 | unsigned long start = pci_resource_start(dev, bar); | ||
445 | unsigned long len = pci_resource_len(dev, bar); | ||
446 | unsigned long flags = pci_resource_flags(dev, bar); | ||
447 | |||
448 | if (!len) | ||
449 | return NULL; | ||
450 | if (max && len > max) | ||
451 | len = max; | ||
452 | |||
453 | if (!(flags & IORESOURCE_MEM)) { | ||
454 | pr_info("PCI: Trying to map invalid resource %#lx\n", flags); | ||
455 | start = 0; | ||
456 | } | ||
457 | |||
458 | return (void __iomem *)start; | ||
459 | } | ||
460 | EXPORT_SYMBOL(pci_iomap); | ||
461 | |||
462 | |||
463 | /**************************************************************** | ||
464 | * | ||
465 | * Tile PCI config space read/write routines | ||
466 | * | ||
467 | ****************************************************************/ | ||
468 | |||
469 | /* | ||
470 | * These are the normal read and write ops | ||
471 | * These are expanded with macros from pci_bus_read_config_byte() etc. | ||
472 | * | ||
473 | * devfn is the combined PCI slot & function. | ||
474 | * | ||
475 | * offset is in bytes, from the start of config space for the | ||
476 | * specified bus & slot. | ||
477 | */ | ||
478 | |||
479 | static int __devinit tile_cfg_read(struct pci_bus *bus, | ||
480 | unsigned int devfn, | ||
481 | int offset, | ||
482 | int size, | ||
483 | u32 *val) | ||
484 | { | ||
485 | struct pci_controller *controller = bus->sysdata; | ||
486 | int busnum = bus->number & 0xff; | ||
487 | int slot = (devfn >> 3) & 0x1f; | ||
488 | int function = devfn & 0x7; | ||
489 | u32 addr; | ||
490 | int config_mode = 1; | ||
491 | |||
492 | /* | ||
493 | * There is no bridge between the Tile and bus 0, so we | ||
494 | * use config0 to talk to bus 0. | ||
495 | * | ||
496 | * If we're talking to a bus other than zero then we | ||
497 | * must have found a bridge. | ||
498 | */ | ||
499 | if (busnum == 0) { | ||
500 | /* | ||
501 | * We fake an empty slot for (busnum == 0) && (slot > 0), | ||
502 | * since there is only one slot on bus 0. | ||
503 | */ | ||
504 | if (slot) { | ||
505 | *val = 0xFFFFFFFF; | ||
506 | return 0; | ||
507 | } | ||
508 | config_mode = 0; | ||
509 | } | ||
510 | |||
511 | addr = busnum << 20; /* Bus in 27:20 */ | ||
512 | addr |= slot << 15; /* Slot (device) in 19:15 */ | ||
513 | addr |= function << 12; /* Function is in 14:12 */ | ||
514 | addr |= (offset & 0xFFF); /* byte address in 0:11 */ | ||
515 | |||
516 | return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0, | ||
517 | (HV_VirtAddr)(val), size, addr); | ||
518 | } | ||
519 | |||
520 | |||
521 | /* | ||
522 | * See tile_cfg_read() for relevent comments. | ||
523 | * Note that "val" is the value to write, not a pointer to that value. | ||
524 | */ | ||
525 | static int __devinit tile_cfg_write(struct pci_bus *bus, | ||
526 | unsigned int devfn, | ||
527 | int offset, | ||
528 | int size, | ||
529 | u32 val) | ||
530 | { | ||
531 | struct pci_controller *controller = bus->sysdata; | ||
532 | int busnum = bus->number & 0xff; | ||
533 | int slot = (devfn >> 3) & 0x1f; | ||
534 | int function = devfn & 0x7; | ||
535 | u32 addr; | ||
536 | int config_mode = 1; | ||
537 | HV_VirtAddr valp = (HV_VirtAddr)&val; | ||
538 | |||
539 | /* | ||
540 | * For bus 0 slot 0 we use config 0 accesses. | ||
541 | */ | ||
542 | if (busnum == 0) { | ||
543 | /* | ||
544 | * We fake an empty slot for (busnum == 0) && (slot > 0), | ||
545 | * since there is only one slot on bus 0. | ||
546 | */ | ||
547 | if (slot) | ||
548 | return 0; | ||
549 | config_mode = 0; | ||
550 | } | ||
551 | |||
552 | addr = busnum << 20; /* Bus in 27:20 */ | ||
553 | addr |= slot << 15; /* Slot (device) in 19:15 */ | ||
554 | addr |= function << 12; /* Function is in 14:12 */ | ||
555 | addr |= (offset & 0xFFF); /* byte address in 0:11 */ | ||
556 | |||
557 | #ifdef __BIG_ENDIAN | ||
558 | /* Point to the correct part of the 32-bit "val". */ | ||
559 | valp += 4 - size; | ||
560 | #endif | ||
561 | |||
562 | return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0, | ||
563 | valp, size, addr); | ||
564 | } | ||
565 | |||
566 | |||
567 | static struct pci_ops tile_cfg_ops = { | ||
568 | .read = tile_cfg_read, | ||
569 | .write = tile_cfg_write, | ||
570 | }; | ||
571 | |||
572 | |||
573 | /* | ||
574 | * In the following, each PCI controller's mem_resources[1] | ||
575 | * represents its (non-prefetchable) PCI memory resource. | ||
576 | * mem_resources[0] and mem_resources[2] refer to its PCI I/O and | ||
577 | * prefetchable PCI memory resources, respectively. | ||
578 | * For more details, see pci_setup_bridge() in setup-bus.c. | ||
579 | * By comparing the target PCI memory address against the | ||
580 | * end address of controller 0, we can determine the controller | ||
581 | * that should accept the PCI memory access. | ||
582 | */ | ||
583 | #define TILE_READ(size, type) \ | ||
584 | type _tile_read##size(unsigned long addr) \ | ||
585 | { \ | ||
586 | type val; \ | ||
587 | int idx = 0; \ | ||
588 | if (addr > controllers[0].mem_resources[1].end && \ | ||
589 | addr > controllers[0].mem_resources[2].end) \ | ||
590 | idx = 1; \ | ||
591 | if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \ | ||
592 | (HV_VirtAddr)(&val), sizeof(type), addr)) \ | ||
593 | pr_err("PCI: read %zd bytes at 0x%lX failed\n", \ | ||
594 | sizeof(type), addr); \ | ||
595 | return val; \ | ||
596 | } \ | ||
597 | EXPORT_SYMBOL(_tile_read##size) | ||
598 | |||
599 | TILE_READ(b, u8); | ||
600 | TILE_READ(w, u16); | ||
601 | TILE_READ(l, u32); | ||
602 | TILE_READ(q, u64); | ||
603 | |||
604 | #define TILE_WRITE(size, type) \ | ||
605 | void _tile_write##size(type val, unsigned long addr) \ | ||
606 | { \ | ||
607 | int idx = 0; \ | ||
608 | if (addr > controllers[0].mem_resources[1].end && \ | ||
609 | addr > controllers[0].mem_resources[2].end) \ | ||
610 | idx = 1; \ | ||
611 | if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \ | ||
612 | (HV_VirtAddr)(&val), sizeof(type), addr)) \ | ||
613 | pr_err("PCI: write %zd bytes at 0x%lX failed\n", \ | ||
614 | sizeof(type), addr); \ | ||
615 | } \ | ||
616 | EXPORT_SYMBOL(_tile_write##size) | ||
617 | |||
618 | TILE_WRITE(b, u8); | ||
619 | TILE_WRITE(w, u16); | ||
620 | TILE_WRITE(l, u32); | ||
621 | TILE_WRITE(q, u64); | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index fb0b3cbeae14..f18573643ed1 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -840,7 +840,7 @@ static int __init topology_init(void) | |||
840 | for_each_online_node(i) | 840 | for_each_online_node(i) |
841 | register_one_node(i); | 841 | register_one_node(i); |
842 | 842 | ||
843 | for_each_present_cpu(i) | 843 | for (i = 0; i < smp_height * smp_width; ++i) |
844 | register_cpu(&cpu_devices[i], i); | 844 | register_cpu(&cpu_devices[i], i); |
845 | 845 | ||
846 | return 0; | 846 | return 0; |
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c index 6235283b4859..cc3d9badf030 100644 --- a/arch/tile/lib/memchr_32.c +++ b/arch/tile/lib/memchr_32.c | |||
@@ -18,12 +18,24 @@ | |||
18 | 18 | ||
19 | void *memchr(const void *s, int c, size_t n) | 19 | void *memchr(const void *s, int c, size_t n) |
20 | { | 20 | { |
21 | const uint32_t *last_word_ptr; | ||
22 | const uint32_t *p; | ||
23 | const char *last_byte_ptr; | ||
24 | uintptr_t s_int; | ||
25 | uint32_t goal, before_mask, v, bits; | ||
26 | char *ret; | ||
27 | |||
28 | if (__builtin_expect(n == 0, 0)) { | ||
29 | /* Don't dereference any memory if the array is empty. */ | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
21 | /* Get an aligned pointer. */ | 33 | /* Get an aligned pointer. */ |
22 | const uintptr_t s_int = (uintptr_t) s; | 34 | s_int = (uintptr_t) s; |
23 | const uint32_t *p = (const uint32_t *)(s_int & -4); | 35 | p = (const uint32_t *)(s_int & -4); |
24 | 36 | ||
25 | /* Create four copies of the byte for which we are looking. */ | 37 | /* Create four copies of the byte for which we are looking. */ |
26 | const uint32_t goal = 0x01010101 * (uint8_t) c; | 38 | goal = 0x01010101 * (uint8_t) c; |
27 | 39 | ||
28 | /* Read the first word, but munge it so that bytes before the array | 40 | /* Read the first word, but munge it so that bytes before the array |
29 | * will not match goal. | 41 | * will not match goal. |
@@ -31,23 +43,14 @@ void *memchr(const void *s, int c, size_t n) | |||
31 | * Note that this shift count expression works because we know | 43 | * Note that this shift count expression works because we know |
32 | * shift counts are taken mod 32. | 44 | * shift counts are taken mod 32. |
33 | */ | 45 | */ |
34 | const uint32_t before_mask = (1 << (s_int << 3)) - 1; | 46 | before_mask = (1 << (s_int << 3)) - 1; |
35 | uint32_t v = (*p | before_mask) ^ (goal & before_mask); | 47 | v = (*p | before_mask) ^ (goal & before_mask); |
36 | 48 | ||
37 | /* Compute the address of the last byte. */ | 49 | /* Compute the address of the last byte. */ |
38 | const char *const last_byte_ptr = (const char *)s + n - 1; | 50 | last_byte_ptr = (const char *)s + n - 1; |
39 | 51 | ||
40 | /* Compute the address of the word containing the last byte. */ | 52 | /* Compute the address of the word containing the last byte. */ |
41 | const uint32_t *const last_word_ptr = | 53 | last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4); |
42 | (const uint32_t *)((uintptr_t) last_byte_ptr & -4); | ||
43 | |||
44 | uint32_t bits; | ||
45 | char *ret; | ||
46 | |||
47 | if (__builtin_expect(n == 0, 0)) { | ||
48 | /* Don't dereference any memory if the array is empty. */ | ||
49 | return NULL; | ||
50 | } | ||
51 | 54 | ||
52 | while ((bits = __insn_seqb(v, goal)) == 0) { | 55 | while ((bits = __insn_seqb(v, goal)) == 0) { |
53 | if (__builtin_expect(p == last_word_ptr, 0)) { | 56 | if (__builtin_expect(p == last_word_ptr, 0)) { |
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c index 485e24d62c6b..5cd1c4004eca 100644 --- a/arch/tile/lib/spinlock_32.c +++ b/arch/tile/lib/spinlock_32.c | |||
@@ -167,23 +167,30 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val) | |||
167 | * when we compare them. | 167 | * when we compare them. |
168 | */ | 168 | */ |
169 | u32 my_ticket_; | 169 | u32 my_ticket_; |
170 | u32 iterations = 0; | ||
170 | 171 | ||
171 | /* Take out the next ticket; this will also stop would-be readers. */ | 172 | /* |
172 | if (val & 1) | 173 | * Wait until there are no readers, then bump up the next |
173 | val = get_rwlock(rwlock); | 174 | * field and capture the ticket value. |
174 | rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); | 175 | */ |
176 | for (;;) { | ||
177 | if (!(val & 1)) { | ||
178 | if ((val >> RD_COUNT_SHIFT) == 0) | ||
179 | break; | ||
180 | rwlock->lock = val; | ||
181 | } | ||
182 | delay_backoff(iterations++); | ||
183 | val = __insn_tns((int *)&rwlock->lock); | ||
184 | } | ||
175 | 185 | ||
176 | /* Extract my ticket value from the original word. */ | 186 | /* Take out the next ticket and extract my ticket value. */ |
187 | rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); | ||
177 | my_ticket_ = val >> WR_NEXT_SHIFT; | 188 | my_ticket_ = val >> WR_NEXT_SHIFT; |
178 | 189 | ||
179 | /* | 190 | /* Wait until the "current" field matches our ticket. */ |
180 | * Wait until the "current" field matches our ticket, and | ||
181 | * there are no remaining readers. | ||
182 | */ | ||
183 | for (;;) { | 191 | for (;;) { |
184 | u32 curr_ = val >> WR_CURR_SHIFT; | 192 | u32 curr_ = val >> WR_CURR_SHIFT; |
185 | u32 readers = val >> RD_COUNT_SHIFT; | 193 | u32 delta = ((my_ticket_ - curr_) & WR_MASK); |
186 | u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers; | ||
187 | if (likely(delta == 0)) | 194 | if (likely(delta == 0)) |
188 | break; | 195 | break; |
189 | 196 | ||
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 7f7338c90784..1664cce7b0ac 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
@@ -727,6 +727,9 @@ struct winch { | |||
727 | 727 | ||
728 | static void free_winch(struct winch *winch, int free_irq_ok) | 728 | static void free_winch(struct winch *winch, int free_irq_ok) |
729 | { | 729 | { |
730 | if (free_irq_ok) | ||
731 | free_irq(WINCH_IRQ, winch); | ||
732 | |||
730 | list_del(&winch->list); | 733 | list_del(&winch->list); |
731 | 734 | ||
732 | if (winch->pid != -1) | 735 | if (winch->pid != -1) |
@@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok) | |||
735 | os_close_file(winch->fd); | 738 | os_close_file(winch->fd); |
736 | if (winch->stack != 0) | 739 | if (winch->stack != 0) |
737 | free_stack(winch->stack, 0); | 740 | free_stack(winch->stack, 0); |
738 | if (free_irq_ok) | ||
739 | free_irq(WINCH_IRQ, winch); | ||
740 | kfree(winch); | 741 | kfree(winch); |
741 | } | 742 | } |
742 | 743 | ||
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 4d293dced62f..9479a037419f 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -216,8 +216,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | /* Return an pointer with offset calculated */ | 218 | /* Return an pointer with offset calculated */ |
219 | static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx, | 219 | static __always_inline unsigned long |
220 | phys_addr_t phys, pgprot_t flags) | 220 | __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) |
221 | { | 221 | { |
222 | __set_fixmap(idx, phys, flags); | 222 | __set_fixmap(idx, phys, flags); |
223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); | 223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 3ea3dc487047..6b89f5e86021 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -128,7 +128,7 @@ | |||
128 | #define FAM10H_MMIO_CONF_ENABLE (1<<0) | 128 | #define FAM10H_MMIO_CONF_ENABLE (1<<0) |
129 | #define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf | 129 | #define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf |
130 | #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 | 130 | #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 |
131 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff | 131 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
132 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 | 132 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
133 | #define MSR_FAM10H_NODE_ID 0xc001100c | 133 | #define MSR_FAM10H_NODE_ID 0xc001100c |
134 | 134 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 18e3b8a8709f..ef9975812c77 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) | |||
824 | #define __PV_IS_CALLEE_SAVE(func) \ | 824 | #define __PV_IS_CALLEE_SAVE(func) \ |
825 | ((struct paravirt_callee_save) { func }) | 825 | ((struct paravirt_callee_save) { func }) |
826 | 826 | ||
827 | static inline unsigned long arch_local_save_flags(void) | 827 | static inline notrace unsigned long arch_local_save_flags(void) |
828 | { | 828 | { |
829 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); | 829 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); |
830 | } | 830 | } |
831 | 831 | ||
832 | static inline void arch_local_irq_restore(unsigned long f) | 832 | static inline notrace void arch_local_irq_restore(unsigned long f) |
833 | { | 833 | { |
834 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); | 834 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); |
835 | } | 835 | } |
836 | 836 | ||
837 | static inline void arch_local_irq_disable(void) | 837 | static inline notrace void arch_local_irq_disable(void) |
838 | { | 838 | { |
839 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); | 839 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); |
840 | } | 840 | } |
841 | 841 | ||
842 | static inline void arch_local_irq_enable(void) | 842 | static inline notrace void arch_local_irq_enable(void) |
843 | { | 843 | { |
844 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); | 844 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); |
845 | } | 845 | } |
846 | 846 | ||
847 | static inline unsigned long arch_local_irq_save(void) | 847 | static inline notrace unsigned long arch_local_irq_save(void) |
848 | { | 848 | { |
849 | unsigned long f; | 849 | unsigned long f; |
850 | 850 | ||
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index e969f691cbfd..a501741c2335 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -199,6 +199,8 @@ union uvh_apicid { | |||
199 | #define UVH_APICID 0x002D0E00L | 199 | #define UVH_APICID 0x002D0E00L |
200 | #define UV_APIC_PNODE_SHIFT 6 | 200 | #define UV_APIC_PNODE_SHIFT 6 |
201 | 201 | ||
202 | #define UV_APICID_HIBIT_MASK 0xffff0000 | ||
203 | |||
202 | /* Local Bus from cpu's perspective */ | 204 | /* Local Bus from cpu's perspective */ |
203 | #define LOCAL_BUS_BASE 0x1c00000 | 205 | #define LOCAL_BUS_BASE 0x1c00000 |
204 | #define LOCAL_BUS_SIZE (4 * 1024 * 1024) | 206 | #define LOCAL_BUS_SIZE (4 * 1024 * 1024) |
@@ -491,8 +493,10 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | |||
491 | } | 493 | } |
492 | } | 494 | } |
493 | 495 | ||
496 | extern unsigned int uv_apicid_hibits; | ||
494 | static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) | 497 | static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) |
495 | { | 498 | { |
499 | apicid |= uv_apicid_hibits; | ||
496 | return (1UL << UVH_IPI_INT_SEND_SHFT) | | 500 | return (1UL << UVH_IPI_INT_SEND_SHFT) | |
497 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | | 501 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | |
498 | (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | | 502 | (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 6d90adf4428a..20cafeac7455 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV MMR definitions | 6 | * SGI UV MMR definitions |
7 | * | 7 | * |
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_X86_UV_UV_MMRS_H | 11 | #ifndef _ASM_X86_UV_UV_MMRS_H |
@@ -754,6 +754,23 @@ union uvh_lb_bau_sb_descriptor_base_u { | |||
754 | }; | 754 | }; |
755 | 755 | ||
756 | /* ========================================================================= */ | 756 | /* ========================================================================= */ |
757 | /* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */ | ||
758 | /* ========================================================================= */ | ||
759 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL | ||
760 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0 | ||
761 | |||
762 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0 | ||
763 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL | ||
764 | |||
765 | union uvh_lb_target_physical_apic_id_mask_u { | ||
766 | unsigned long v; | ||
767 | struct uvh_lb_target_physical_apic_id_mask_s { | ||
768 | unsigned long bit_enables : 32; /* RW */ | ||
769 | unsigned long rsvd_32_63 : 32; /* */ | ||
770 | } s; | ||
771 | }; | ||
772 | |||
773 | /* ========================================================================= */ | ||
757 | /* UVH_NODE_ID */ | 774 | /* UVH_NODE_ID */ |
758 | /* ========================================================================= */ | 775 | /* ========================================================================= */ |
759 | #define UVH_NODE_ID 0x0UL | 776 | #define UVH_NODE_ID 0x0UL |
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index e8506c1f0c55..1c10c88ee4e1 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h | |||
@@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void); | |||
61 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | 61 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifndef machine_to_phys_mapping | 64 | #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) |
65 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | 65 | #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) |
66 | #endif | 66 | #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT) |
67 | 67 | ||
68 | /* Maximum number of virtual CPUs in multi-processor guests. */ | 68 | /* Maximum number of virtual CPUs in multi-processor guests. */ |
69 | #define MAX_VIRT_CPUS 32 | 69 | #define MAX_VIRT_CPUS 32 |
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h index 42a7e004ae5c..8413688b2571 100644 --- a/arch/x86/include/asm/xen/interface_32.h +++ b/arch/x86/include/asm/xen/interface_32.h | |||
@@ -32,6 +32,11 @@ | |||
32 | /* And the trap vector is... */ | 32 | /* And the trap vector is... */ |
33 | #define TRAP_INSTR "int $0x82" | 33 | #define TRAP_INSTR "int $0x82" |
34 | 34 | ||
35 | #define __MACH2PHYS_VIRT_START 0xF5800000 | ||
36 | #define __MACH2PHYS_VIRT_END 0xF6800000 | ||
37 | |||
38 | #define __MACH2PHYS_SHIFT 2 | ||
39 | |||
35 | /* | 40 | /* |
36 | * Virtual addresses beyond this are not modifiable by guest OSes. The | 41 | * Virtual addresses beyond this are not modifiable by guest OSes. The |
37 | * machine->physical mapping table starts at this address, read-only. | 42 | * machine->physical mapping table starts at this address, read-only. |
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h index 100d2662b97c..839a4811cf98 100644 --- a/arch/x86/include/asm/xen/interface_64.h +++ b/arch/x86/include/asm/xen/interface_64.h | |||
@@ -39,18 +39,7 @@ | |||
39 | #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 | 39 | #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 |
40 | #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 | 40 | #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 |
41 | #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 | 41 | #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 |
42 | 42 | #define __MACH2PHYS_SHIFT 3 | |
43 | #ifndef HYPERVISOR_VIRT_START | ||
44 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | ||
45 | #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) | ||
46 | #endif | ||
47 | |||
48 | #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) | ||
49 | #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) | ||
50 | #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) | ||
51 | #ifndef machine_to_phys_mapping | ||
52 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | ||
53 | #endif | ||
54 | 43 | ||
55 | /* | 44 | /* |
56 | * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) | 45 | * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index dd8c1414b3d5..8760cc60a21c 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/pfn.h> | 7 | #include <linux/pfn.h> |
8 | #include <linux/mm.h> | ||
8 | 9 | ||
9 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
10 | #include <asm/page.h> | 11 | #include <asm/page.h> |
@@ -35,6 +36,8 @@ typedef struct xpaddr { | |||
35 | #define MAX_DOMAIN_PAGES \ | 36 | #define MAX_DOMAIN_PAGES \ |
36 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) | 37 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) |
37 | 38 | ||
39 | extern unsigned long *machine_to_phys_mapping; | ||
40 | extern unsigned int machine_to_phys_order; | ||
38 | 41 | ||
39 | extern unsigned long get_phys_to_machine(unsigned long pfn); | 42 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
40 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 43 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
@@ -69,10 +72,8 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) | |||
69 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 72 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
70 | return mfn; | 73 | return mfn; |
71 | 74 | ||
72 | #if 0 | ||
73 | if (unlikely((mfn >> machine_to_phys_order) != 0)) | 75 | if (unlikely((mfn >> machine_to_phys_order) != 0)) |
74 | return max_mapnr; | 76 | return ~0; |
75 | #endif | ||
76 | 77 | ||
77 | pfn = 0; | 78 | pfn = 0; |
78 | /* | 79 | /* |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index cefd6942f0e9..62f6e1e55b90 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -17,15 +17,16 @@ | |||
17 | #include <linux/nmi.h> | 17 | #include <linux/nmi.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | 19 | ||
20 | /* For reliability, we're prepared to waste bits here. */ | ||
21 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | ||
22 | |||
23 | u64 hw_nmi_get_sample_period(void) | 20 | u64 hw_nmi_get_sample_period(void) |
24 | { | 21 | { |
25 | return (u64)(cpu_khz) * 1000 * 60; | 22 | return (u64)(cpu_khz) * 1000 * 60; |
26 | } | 23 | } |
27 | 24 | ||
28 | #ifdef ARCH_HAS_NMI_WATCHDOG | 25 | #ifdef ARCH_HAS_NMI_WATCHDOG |
26 | |||
27 | /* For reliability, we're prepared to waste bits here. */ | ||
28 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | ||
29 | |||
29 | void arch_trigger_all_cpu_backtrace(void) | 30 | void arch_trigger_all_cpu_backtrace(void) |
30 | { | 31 | { |
31 | int i; | 32 | int i; |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 194539aea175..c1c52c341f40 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -44,6 +44,8 @@ static u64 gru_start_paddr, gru_end_paddr; | |||
44 | static union uvh_apicid uvh_apicid; | 44 | static union uvh_apicid uvh_apicid; |
45 | int uv_min_hub_revision_id; | 45 | int uv_min_hub_revision_id; |
46 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); | 46 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); |
47 | unsigned int uv_apicid_hibits; | ||
48 | EXPORT_SYMBOL_GPL(uv_apicid_hibits); | ||
47 | static DEFINE_SPINLOCK(uv_nmi_lock); | 49 | static DEFINE_SPINLOCK(uv_nmi_lock); |
48 | 50 | ||
49 | static inline bool is_GRU_range(u64 start, u64 end) | 51 | static inline bool is_GRU_range(u64 start, u64 end) |
@@ -85,6 +87,23 @@ static void __init early_get_apic_pnode_shift(void) | |||
85 | uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT; | 87 | uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT; |
86 | } | 88 | } |
87 | 89 | ||
90 | /* | ||
91 | * Add an extra bit as dictated by bios to the destination apicid of | ||
92 | * interrupts potentially passing through the UV HUB. This prevents | ||
93 | * a deadlock between interrupts and IO port operations. | ||
94 | */ | ||
95 | static void __init uv_set_apicid_hibit(void) | ||
96 | { | ||
97 | union uvh_lb_target_physical_apic_id_mask_u apicid_mask; | ||
98 | unsigned long *mmr; | ||
99 | |||
100 | mmr = early_ioremap(UV_LOCAL_MMR_BASE | | ||
101 | UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK, sizeof(*mmr)); | ||
102 | apicid_mask.v = *mmr; | ||
103 | early_iounmap(mmr, sizeof(*mmr)); | ||
104 | uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK; | ||
105 | } | ||
106 | |||
88 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 107 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
89 | { | 108 | { |
90 | int nodeid; | 109 | int nodeid; |
@@ -102,6 +121,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
102 | __get_cpu_var(x2apic_extra_bits) = | 121 | __get_cpu_var(x2apic_extra_bits) = |
103 | nodeid << (uvh_apicid.s.pnode_shift - 1); | 122 | nodeid << (uvh_apicid.s.pnode_shift - 1); |
104 | uv_system_type = UV_NON_UNIQUE_APIC; | 123 | uv_system_type = UV_NON_UNIQUE_APIC; |
124 | uv_set_apicid_hibit(); | ||
105 | return 1; | 125 | return 1; |
106 | } | 126 | } |
107 | } | 127 | } |
@@ -155,6 +175,7 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri | |||
155 | int pnode; | 175 | int pnode; |
156 | 176 | ||
157 | pnode = uv_apicid_to_pnode(phys_apicid); | 177 | pnode = uv_apicid_to_pnode(phys_apicid); |
178 | phys_apicid |= uv_apicid_hibits; | ||
158 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 179 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
159 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 180 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
160 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 181 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
@@ -236,7 +257,7 @@ static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
236 | int cpu = cpumask_first(cpumask); | 257 | int cpu = cpumask_first(cpumask); |
237 | 258 | ||
238 | if ((unsigned)cpu < nr_cpu_ids) | 259 | if ((unsigned)cpu < nr_cpu_ids) |
239 | return per_cpu(x86_cpu_to_apicid, cpu); | 260 | return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; |
240 | else | 261 | else |
241 | return BAD_APICID; | 262 | return BAD_APICID; |
242 | } | 263 | } |
@@ -255,7 +276,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
255 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | 276 | if (cpumask_test_cpu(cpu, cpu_online_mask)) |
256 | break; | 277 | break; |
257 | } | 278 | } |
258 | return per_cpu(x86_cpu_to_apicid, cpu); | 279 | return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; |
259 | } | 280 | } |
260 | 281 | ||
261 | static unsigned int x2apic_get_apic_id(unsigned long x) | 282 | static unsigned int x2apic_get_apic_id(unsigned long x) |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 59e175e89599..591e60104278 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -395,7 +395,7 @@ sysenter_past_esp: | |||
395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | 395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | 396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
397 | */ | 397 | */ |
398 | pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp) | 398 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) |
399 | CFI_REL_OFFSET eip, 0 | 399 | CFI_REL_OFFSET eip, 0 |
400 | 400 | ||
401 | pushl_cfi %eax | 401 | pushl_cfi %eax |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index fe2690d71c0c..e3ba417e8697 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -295,6 +295,7 @@ ENDPROC(native_usergs_sysret64) | |||
295 | .endm | 295 | .endm |
296 | 296 | ||
297 | /* save partial stack frame */ | 297 | /* save partial stack frame */ |
298 | .pushsection .kprobes.text, "ax" | ||
298 | ENTRY(save_args) | 299 | ENTRY(save_args) |
299 | XCPT_FRAME | 300 | XCPT_FRAME |
300 | cld | 301 | cld |
@@ -334,6 +335,7 @@ ENTRY(save_args) | |||
334 | ret | 335 | ret |
335 | CFI_ENDPROC | 336 | CFI_ENDPROC |
336 | END(save_args) | 337 | END(save_args) |
338 | .popsection | ||
337 | 339 | ||
338 | ENTRY(save_rest) | 340 | ENTRY(save_rest) |
339 | PARTIAL_FRAME 1 REST_SKIP+8 | 341 | PARTIAL_FRAME 1 REST_SKIP+8 |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index ff15c9dcc25d..42c594254507 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
433 | dr6_p = (unsigned long *)ERR_PTR(args->err); | 433 | dr6_p = (unsigned long *)ERR_PTR(args->err); |
434 | dr6 = *dr6_p; | 434 | dr6 = *dr6_p; |
435 | 435 | ||
436 | /* If it's a single step, TRAP bits are random */ | ||
437 | if (dr6 & DR_STEP) | ||
438 | return NOTIFY_DONE; | ||
439 | |||
436 | /* Do an early return if no trap bits are set in DR6 */ | 440 | /* Do an early return if no trap bits are set in DR6 */ |
437 | if ((dr6 & DR_TRAP_BITS) == 0) | 441 | if ((dr6 & DR_TRAP_BITS) == 0) |
438 | return NOTIFY_DONE; | 442 | return NOTIFY_DONE; |
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index 6da143c2a6b8..ac861b8348e2 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c | |||
@@ -25,7 +25,6 @@ struct pci_hostbridge_probe { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | static u64 __cpuinitdata fam10h_pci_mmconf_base; | 27 | static u64 __cpuinitdata fam10h_pci_mmconf_base; |
28 | static int __cpuinitdata fam10h_pci_mmconf_base_status; | ||
29 | 28 | ||
30 | static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { | 29 | static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { |
31 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, | 30 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, |
@@ -44,10 +43,12 @@ static int __cpuinit cmp_range(const void *x1, const void *x2) | |||
44 | return start1 - start2; | 43 | return start1 - start2; |
45 | } | 44 | } |
46 | 45 | ||
47 | /*[47:0] */ | 46 | #define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT) |
48 | /* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */ | 47 | #define MMCONF_MASK (~(MMCONF_UNIT - 1)) |
48 | #define MMCONF_SIZE (MMCONF_UNIT << 8) | ||
49 | /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ | ||
49 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) | 50 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) |
50 | #define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32))) | 51 | #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) |
51 | static void __cpuinit get_fam10h_pci_mmconf_base(void) | 52 | static void __cpuinit get_fam10h_pci_mmconf_base(void) |
52 | { | 53 | { |
53 | int i; | 54 | int i; |
@@ -64,12 +65,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
64 | struct range range[8]; | 65 | struct range range[8]; |
65 | 66 | ||
66 | /* only try to get setting from BSP */ | 67 | /* only try to get setting from BSP */ |
67 | /* -1 or 1 */ | 68 | if (fam10h_pci_mmconf_base) |
68 | if (fam10h_pci_mmconf_base_status) | ||
69 | return; | 69 | return; |
70 | 70 | ||
71 | if (!early_pci_allowed()) | 71 | if (!early_pci_allowed()) |
72 | goto fail; | 72 | return; |
73 | 73 | ||
74 | found = 0; | 74 | found = 0; |
75 | for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { | 75 | for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { |
@@ -91,7 +91,7 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | if (!found) | 93 | if (!found) |
94 | goto fail; | 94 | return; |
95 | 95 | ||
96 | /* SYS_CFG */ | 96 | /* SYS_CFG */ |
97 | address = MSR_K8_SYSCFG; | 97 | address = MSR_K8_SYSCFG; |
@@ -99,16 +99,16 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
99 | 99 | ||
100 | /* TOP_MEM2 is not enabled? */ | 100 | /* TOP_MEM2 is not enabled? */ |
101 | if (!(val & (1<<21))) { | 101 | if (!(val & (1<<21))) { |
102 | tom2 = 0; | 102 | tom2 = 1ULL << 32; |
103 | } else { | 103 | } else { |
104 | /* TOP_MEM2 */ | 104 | /* TOP_MEM2 */ |
105 | address = MSR_K8_TOP_MEM2; | 105 | address = MSR_K8_TOP_MEM2; |
106 | rdmsrl(address, val); | 106 | rdmsrl(address, val); |
107 | tom2 = val & (0xffffULL<<32); | 107 | tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); |
108 | } | 108 | } |
109 | 109 | ||
110 | if (base <= tom2) | 110 | if (base <= tom2) |
111 | base = tom2 + (1ULL<<32); | 111 | base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK; |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * need to check if the range is in the high mmio range that is | 114 | * need to check if the range is in the high mmio range that is |
@@ -123,11 +123,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
123 | if (!(reg & 3)) | 123 | if (!(reg & 3)) |
124 | continue; | 124 | continue; |
125 | 125 | ||
126 | start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ | 126 | start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/ |
127 | reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); | 127 | reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); |
128 | end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ | 128 | end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/ |
129 | 129 | ||
130 | if (!end) | 130 | if (end < tom2) |
131 | continue; | 131 | continue; |
132 | 132 | ||
133 | range[hi_mmio_num].start = start; | 133 | range[hi_mmio_num].start = start; |
@@ -143,32 +143,27 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
143 | 143 | ||
144 | if (range[hi_mmio_num - 1].end < base) | 144 | if (range[hi_mmio_num - 1].end < base) |
145 | goto out; | 145 | goto out; |
146 | if (range[0].start > base) | 146 | if (range[0].start > base + MMCONF_SIZE) |
147 | goto out; | 147 | goto out; |
148 | 148 | ||
149 | /* need to find one window */ | 149 | /* need to find one window */ |
150 | base = range[0].start - (1ULL << 32); | 150 | base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT; |
151 | if ((base > tom2) && BASE_VALID(base)) | 151 | if ((base > tom2) && BASE_VALID(base)) |
152 | goto out; | 152 | goto out; |
153 | base = range[hi_mmio_num - 1].end + (1ULL << 32); | 153 | base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK; |
154 | if ((base > tom2) && BASE_VALID(base)) | 154 | if (BASE_VALID(base)) |
155 | goto out; | 155 | goto out; |
156 | /* need to find window between ranges */ | 156 | /* need to find window between ranges */ |
157 | if (hi_mmio_num > 1) | 157 | for (i = 1; i < hi_mmio_num; i++) { |
158 | for (i = 0; i < hi_mmio_num - 1; i++) { | 158 | base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK; |
159 | if (range[i + 1].start > (range[i].end + (1ULL << 32))) { | 159 | val = range[i].start & MMCONF_MASK; |
160 | base = range[i].end + (1ULL << 32); | 160 | if (val >= base + MMCONF_SIZE && BASE_VALID(base)) |
161 | if ((base > tom2) && BASE_VALID(base)) | 161 | goto out; |
162 | goto out; | ||
163 | } | ||
164 | } | 162 | } |
165 | |||
166 | fail: | ||
167 | fam10h_pci_mmconf_base_status = -1; | ||
168 | return; | 163 | return; |
164 | |||
169 | out: | 165 | out: |
170 | fam10h_pci_mmconf_base = base; | 166 | fam10h_pci_mmconf_base = base; |
171 | fam10h_pci_mmconf_base_status = 1; | ||
172 | } | 167 | } |
173 | 168 | ||
174 | void __cpuinit fam10h_check_enable_mmcfg(void) | 169 | void __cpuinit fam10h_check_enable_mmcfg(void) |
@@ -190,11 +185,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void) | |||
190 | 185 | ||
191 | /* only trust the one handle 256 buses, if acpi=off */ | 186 | /* only trust the one handle 256 buses, if acpi=off */ |
192 | if (!acpi_pci_disabled || busnbits >= 8) { | 187 | if (!acpi_pci_disabled || busnbits >= 8) { |
193 | u64 base; | 188 | u64 base = val & MMCONF_MASK; |
194 | base = val & (0xffffULL << 32); | 189 | |
195 | if (fam10h_pci_mmconf_base_status <= 0) { | 190 | if (!fam10h_pci_mmconf_base) { |
196 | fam10h_pci_mmconf_base = base; | 191 | fam10h_pci_mmconf_base = base; |
197 | fam10h_pci_mmconf_base_status = 1; | ||
198 | return; | 192 | return; |
199 | } else if (fam10h_pci_mmconf_base == base) | 193 | } else if (fam10h_pci_mmconf_base == base) |
200 | return; | 194 | return; |
@@ -206,8 +200,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void) | |||
206 | * with 256 buses | 200 | * with 256 buses |
207 | */ | 201 | */ |
208 | get_fam10h_pci_mmconf_base(); | 202 | get_fam10h_pci_mmconf_base(); |
209 | if (fam10h_pci_mmconf_base_status <= 0) | 203 | if (!fam10h_pci_mmconf_base) { |
204 | pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF; | ||
210 | return; | 205 | return; |
206 | } | ||
211 | 207 | ||
212 | printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); | 208 | printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); |
213 | val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | | 209 | val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 12cdbb17ad18..6acc724d5d8f 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -223,7 +223,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
223 | 223 | ||
224 | static void __cpuinit calculate_tlb_offset(void) | 224 | static void __cpuinit calculate_tlb_offset(void) |
225 | { | 225 | { |
226 | int cpu, node, nr_node_vecs; | 226 | int cpu, node, nr_node_vecs, idx = 0; |
227 | /* | 227 | /* |
228 | * we are changing tlb_vector_offset for each CPU in runtime, but this | 228 | * we are changing tlb_vector_offset for each CPU in runtime, but this |
229 | * will not cause inconsistency, as the write is atomic under X86. we | 229 | * will not cause inconsistency, as the write is atomic under X86. we |
@@ -239,7 +239,7 @@ static void __cpuinit calculate_tlb_offset(void) | |||
239 | nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; | 239 | nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; |
240 | 240 | ||
241 | for_each_online_node(node) { | 241 | for_each_online_node(node) { |
242 | int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * | 242 | int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) * |
243 | nr_node_vecs; | 243 | nr_node_vecs; |
244 | int cpu_offset = 0; | 244 | int cpu_offset = 0; |
245 | for_each_cpu(cpu, cpumask_of_node(node)) { | 245 | for_each_cpu(cpu, cpumask_of_node(node)) { |
@@ -248,6 +248,7 @@ static void __cpuinit calculate_tlb_offset(void) | |||
248 | cpu_offset++; | 248 | cpu_offset++; |
249 | cpu_offset = cpu_offset % nr_node_vecs; | 249 | cpu_offset = cpu_offset % nr_node_vecs; |
250 | } | 250 | } |
251 | idx++; | ||
251 | } | 252 | } |
252 | } | 253 | } |
253 | 254 | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index a318194002b5..ba9caa808a9c 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1455,7 +1455,7 @@ static void __init uv_init_uvhub(int uvhub, int vector) | |||
1455 | * the below initialization can't be in firmware because the | 1455 | * the below initialization can't be in firmware because the |
1456 | * messaging IRQ will be determined by the OS | 1456 | * messaging IRQ will be determined by the OS |
1457 | */ | 1457 | */ |
1458 | apicid = uvhub_to_first_apicid(uvhub); | 1458 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; |
1459 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 1459 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
1460 | ((apicid << 32) | vector)); | 1460 | ((apicid << 32) | vector)); |
1461 | } | 1461 | } |
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 56e421bc379b..9daf5d1af9f1 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -89,6 +89,7 @@ static void uv_rtc_send_IPI(int cpu) | |||
89 | 89 | ||
90 | apicid = cpu_physical_id(cpu); | 90 | apicid = cpu_physical_id(cpu); |
91 | pnode = uv_apicid_to_pnode(apicid); | 91 | pnode = uv_apicid_to_pnode(apicid); |
92 | apicid |= uv_apicid_hibits; | ||
92 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 93 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
93 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 94 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
94 | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); | 95 | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); |
@@ -107,6 +108,7 @@ static int uv_intr_pending(int pnode) | |||
107 | static int uv_setup_intr(int cpu, u64 expires) | 108 | static int uv_setup_intr(int cpu, u64 expires) |
108 | { | 109 | { |
109 | u64 val; | 110 | u64 val; |
111 | unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; | ||
110 | int pnode = uv_cpu_to_pnode(cpu); | 112 | int pnode = uv_cpu_to_pnode(cpu); |
111 | 113 | ||
112 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, | 114 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, |
@@ -117,7 +119,7 @@ static int uv_setup_intr(int cpu, u64 expires) | |||
117 | UVH_EVENT_OCCURRED0_RTC1_MASK); | 119 | UVH_EVENT_OCCURRED0_RTC1_MASK); |
118 | 120 | ||
119 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | | 121 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | |
120 | ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); | 122 | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); |
121 | 123 | ||
122 | /* Set configuration */ | 124 | /* Set configuration */ |
123 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); | 125 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 235c0f4d3861..02c710bebf7a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -75,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |||
75 | enum xen_domain_type xen_domain_type = XEN_NATIVE; | 75 | enum xen_domain_type xen_domain_type = XEN_NATIVE; |
76 | EXPORT_SYMBOL_GPL(xen_domain_type); | 76 | EXPORT_SYMBOL_GPL(xen_domain_type); |
77 | 77 | ||
78 | unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; | ||
79 | EXPORT_SYMBOL(machine_to_phys_mapping); | ||
80 | unsigned int machine_to_phys_order; | ||
81 | EXPORT_SYMBOL(machine_to_phys_order); | ||
82 | |||
78 | struct start_info *xen_start_info; | 83 | struct start_info *xen_start_info; |
79 | EXPORT_SYMBOL_GPL(xen_start_info); | 84 | EXPORT_SYMBOL_GPL(xen_start_info); |
80 | 85 | ||
@@ -1090,6 +1095,8 @@ static void __init xen_setup_stackprotector(void) | |||
1090 | /* First C function to be called on Xen boot */ | 1095 | /* First C function to be called on Xen boot */ |
1091 | asmlinkage void __init xen_start_kernel(void) | 1096 | asmlinkage void __init xen_start_kernel(void) |
1092 | { | 1097 | { |
1098 | struct physdev_set_iopl set_iopl; | ||
1099 | int rc; | ||
1093 | pgd_t *pgd; | 1100 | pgd_t *pgd; |
1094 | 1101 | ||
1095 | if (!xen_start_info) | 1102 | if (!xen_start_info) |
@@ -1097,6 +1104,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
1097 | 1104 | ||
1098 | xen_domain_type = XEN_PV_DOMAIN; | 1105 | xen_domain_type = XEN_PV_DOMAIN; |
1099 | 1106 | ||
1107 | xen_setup_machphys_mapping(); | ||
1108 | |||
1100 | /* Install Xen paravirt ops */ | 1109 | /* Install Xen paravirt ops */ |
1101 | pv_info = xen_info; | 1110 | pv_info = xen_info; |
1102 | pv_init_ops = xen_init_ops; | 1111 | pv_init_ops = xen_init_ops; |
@@ -1191,8 +1200,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1191 | /* Allocate and initialize top and mid mfn levels for p2m structure */ | 1200 | /* Allocate and initialize top and mid mfn levels for p2m structure */ |
1192 | xen_build_mfn_list_list(); | 1201 | xen_build_mfn_list_list(); |
1193 | 1202 | ||
1194 | init_mm.pgd = pgd; | ||
1195 | |||
1196 | /* keep using Xen gdt for now; no urgent need to change it */ | 1203 | /* keep using Xen gdt for now; no urgent need to change it */ |
1197 | 1204 | ||
1198 | #ifdef CONFIG_X86_32 | 1205 | #ifdef CONFIG_X86_32 |
@@ -1202,10 +1209,18 @@ asmlinkage void __init xen_start_kernel(void) | |||
1202 | #else | 1209 | #else |
1203 | pv_info.kernel_rpl = 0; | 1210 | pv_info.kernel_rpl = 0; |
1204 | #endif | 1211 | #endif |
1205 | |||
1206 | /* set the limit of our address space */ | 1212 | /* set the limit of our address space */ |
1207 | xen_reserve_top(); | 1213 | xen_reserve_top(); |
1208 | 1214 | ||
1215 | /* We used to do this in xen_arch_setup, but that is too late on AMD | ||
1216 | * were early_cpu_init (run before ->arch_setup()) calls early_amd_init | ||
1217 | * which pokes 0xcf8 port. | ||
1218 | */ | ||
1219 | set_iopl.iopl = 1; | ||
1220 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | ||
1221 | if (rc != 0) | ||
1222 | xen_raw_printk("physdev_op failed %d\n", rc); | ||
1223 | |||
1209 | #ifdef CONFIG_X86_32 | 1224 | #ifdef CONFIG_X86_32 |
1210 | /* set up basic CPUID stuff */ | 1225 | /* set up basic CPUID stuff */ |
1211 | cpu_detect(&new_cpu_data); | 1226 | cpu_detect(&new_cpu_data); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 21ed8d7f75a5..a1feff9e59b6 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -2034,6 +2034,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
2034 | set_page_prot(pmd, PAGE_KERNEL_RO); | 2034 | set_page_prot(pmd, PAGE_KERNEL_RO); |
2035 | } | 2035 | } |
2036 | 2036 | ||
2037 | void __init xen_setup_machphys_mapping(void) | ||
2038 | { | ||
2039 | struct xen_machphys_mapping mapping; | ||
2040 | unsigned long machine_to_phys_nr_ents; | ||
2041 | |||
2042 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | ||
2043 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | ||
2044 | machine_to_phys_nr_ents = mapping.max_mfn + 1; | ||
2045 | } else { | ||
2046 | machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; | ||
2047 | } | ||
2048 | machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); | ||
2049 | } | ||
2050 | |||
2037 | #ifdef CONFIG_X86_64 | 2051 | #ifdef CONFIG_X86_64 |
2038 | static void convert_pfn_mfn(void *v) | 2052 | static void convert_pfn_mfn(void *v) |
2039 | { | 2053 | { |
@@ -2119,44 +2133,83 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
2119 | return pgd; | 2133 | return pgd; |
2120 | } | 2134 | } |
2121 | #else /* !CONFIG_X86_64 */ | 2135 | #else /* !CONFIG_X86_64 */ |
2122 | static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); | 2136 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
2137 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | ||
2138 | |||
2139 | static __init void xen_write_cr3_init(unsigned long cr3) | ||
2140 | { | ||
2141 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | ||
2142 | |||
2143 | BUG_ON(read_cr3() != __pa(initial_page_table)); | ||
2144 | BUG_ON(cr3 != __pa(swapper_pg_dir)); | ||
2145 | |||
2146 | /* | ||
2147 | * We are switching to swapper_pg_dir for the first time (from | ||
2148 | * initial_page_table) and therefore need to mark that page | ||
2149 | * read-only and then pin it. | ||
2150 | * | ||
2151 | * Xen disallows sharing of kernel PMDs for PAE | ||
2152 | * guests. Therefore we must copy the kernel PMD from | ||
2153 | * initial_page_table into a new kernel PMD to be used in | ||
2154 | * swapper_pg_dir. | ||
2155 | */ | ||
2156 | swapper_kernel_pmd = | ||
2157 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | ||
2158 | memcpy(swapper_kernel_pmd, initial_kernel_pmd, | ||
2159 | sizeof(pmd_t) * PTRS_PER_PMD); | ||
2160 | swapper_pg_dir[KERNEL_PGD_BOUNDARY] = | ||
2161 | __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); | ||
2162 | set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); | ||
2163 | |||
2164 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | ||
2165 | xen_write_cr3(cr3); | ||
2166 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); | ||
2167 | |||
2168 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, | ||
2169 | PFN_DOWN(__pa(initial_page_table))); | ||
2170 | set_page_prot(initial_page_table, PAGE_KERNEL); | ||
2171 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL); | ||
2172 | |||
2173 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | ||
2174 | } | ||
2123 | 2175 | ||
2124 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 2176 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, |
2125 | unsigned long max_pfn) | 2177 | unsigned long max_pfn) |
2126 | { | 2178 | { |
2127 | pmd_t *kernel_pmd; | 2179 | pmd_t *kernel_pmd; |
2128 | 2180 | ||
2129 | level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | 2181 | initial_kernel_pmd = |
2182 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | ||
2130 | 2183 | ||
2131 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 2184 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + |
2132 | xen_start_info->nr_pt_frames * PAGE_SIZE + | 2185 | xen_start_info->nr_pt_frames * PAGE_SIZE + |
2133 | 512*1024); | 2186 | 512*1024); |
2134 | 2187 | ||
2135 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | 2188 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
2136 | memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); | 2189 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); |
2137 | 2190 | ||
2138 | xen_map_identity_early(level2_kernel_pgt, max_pfn); | 2191 | xen_map_identity_early(initial_kernel_pmd, max_pfn); |
2139 | 2192 | ||
2140 | memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); | 2193 | memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD); |
2141 | set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], | 2194 | initial_page_table[KERNEL_PGD_BOUNDARY] = |
2142 | __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); | 2195 | __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); |
2143 | 2196 | ||
2144 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 2197 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); |
2145 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | 2198 | set_page_prot(initial_page_table, PAGE_KERNEL_RO); |
2146 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); | 2199 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); |
2147 | 2200 | ||
2148 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 2201 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
2149 | 2202 | ||
2150 | xen_write_cr3(__pa(swapper_pg_dir)); | 2203 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, |
2151 | 2204 | PFN_DOWN(__pa(initial_page_table))); | |
2152 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); | 2205 | xen_write_cr3(__pa(initial_page_table)); |
2153 | 2206 | ||
2154 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), | 2207 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), |
2155 | __pa(xen_start_info->pt_base + | 2208 | __pa(xen_start_info->pt_base + |
2156 | xen_start_info->nr_pt_frames * PAGE_SIZE), | 2209 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
2157 | "XEN PAGETABLES"); | 2210 | "XEN PAGETABLES"); |
2158 | 2211 | ||
2159 | return swapper_pg_dir; | 2212 | return initial_page_table; |
2160 | } | 2213 | } |
2161 | #endif /* CONFIG_X86_64 */ | 2214 | #endif /* CONFIG_X86_64 */ |
2162 | 2215 | ||
@@ -2290,7 +2343,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
2290 | .write_cr2 = xen_write_cr2, | 2343 | .write_cr2 = xen_write_cr2, |
2291 | 2344 | ||
2292 | .read_cr3 = xen_read_cr3, | 2345 | .read_cr3 = xen_read_cr3, |
2346 | #ifdef CONFIG_X86_32 | ||
2347 | .write_cr3 = xen_write_cr3_init, | ||
2348 | #else | ||
2293 | .write_cr3 = xen_write_cr3, | 2349 | .write_cr3 = xen_write_cr3, |
2350 | #endif | ||
2294 | 2351 | ||
2295 | .flush_tlb_user = xen_flush_tlb, | 2352 | .flush_tlb_user = xen_flush_tlb, |
2296 | .flush_tlb_kernel = xen_flush_tlb, | 2353 | .flush_tlb_kernel = xen_flush_tlb, |
@@ -2627,7 +2684,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, | |||
2627 | 2684 | ||
2628 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); | 2685 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); |
2629 | 2686 | ||
2630 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 2687 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == |
2688 | (VM_PFNMAP | VM_RESERVED | VM_IO))); | ||
2631 | 2689 | ||
2632 | rmd.mfn = mfn; | 2690 | rmd.mfn = mfn; |
2633 | rmd.prot = prot; | 2691 | rmd.prot = prot; |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 769c4b01fa32..01afd8a94607 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <xen/interface/callback.h> | 23 | #include <xen/interface/callback.h> |
24 | #include <xen/interface/memory.h> | 24 | #include <xen/interface/memory.h> |
25 | #include <xen/interface/physdev.h> | 25 | #include <xen/interface/physdev.h> |
26 | #include <xen/interface/memory.h> | ||
27 | #include <xen/features.h> | 26 | #include <xen/features.h> |
28 | 27 | ||
29 | #include "xen-ops.h" | 28 | #include "xen-ops.h" |
@@ -248,8 +247,7 @@ char * __init xen_memory_setup(void) | |||
248 | else | 247 | else |
249 | extra_pages = 0; | 248 | extra_pages = 0; |
250 | 249 | ||
251 | if (!xen_initial_domain()) | 250 | xen_add_extra_mem(extra_pages); |
252 | xen_add_extra_mem(extra_pages); | ||
253 | 251 | ||
254 | return "Xen"; | 252 | return "Xen"; |
255 | } | 253 | } |
@@ -337,9 +335,6 @@ void __cpuinit xen_enable_syscall(void) | |||
337 | 335 | ||
338 | void __init xen_arch_setup(void) | 336 | void __init xen_arch_setup(void) |
339 | { | 337 | { |
340 | struct physdev_set_iopl set_iopl; | ||
341 | int rc; | ||
342 | |||
343 | xen_panic_handler_init(); | 338 | xen_panic_handler_init(); |
344 | 339 | ||
345 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); | 340 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); |
@@ -356,11 +351,6 @@ void __init xen_arch_setup(void) | |||
356 | xen_enable_sysenter(); | 351 | xen_enable_sysenter(); |
357 | xen_enable_syscall(); | 352 | xen_enable_syscall(); |
358 | 353 | ||
359 | set_iopl.iopl = 1; | ||
360 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | ||
361 | if (rc != 0) | ||
362 | printk(KERN_INFO "physdev_op failed %d\n", rc); | ||
363 | |||
364 | #ifdef CONFIG_ACPI | 354 | #ifdef CONFIG_ACPI |
365 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { | 355 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
366 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); | 356 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 56ad4531b412..004be80fd894 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -645,7 +645,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg, | |||
645 | { | 645 | { |
646 | unsigned int nr_reads = 0, nr_writes = 0; | 646 | unsigned int nr_reads = 0, nr_writes = 0; |
647 | unsigned int max_nr_reads = throtl_grp_quantum*3/4; | 647 | unsigned int max_nr_reads = throtl_grp_quantum*3/4; |
648 | unsigned int max_nr_writes = throtl_grp_quantum - nr_reads; | 648 | unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; |
649 | struct bio *bio; | 649 | struct bio *bio; |
650 | 650 | ||
651 | /* Try to dispatch 75% READS and 25% WRITES */ | 651 | /* Try to dispatch 75% READS and 25% WRITES */ |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index a1725e6488d3..7888501ad9ee 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1341,7 +1341,7 @@ static struct request *set_next_request(void) | |||
1341 | { | 1341 | { |
1342 | struct request_queue *q; | 1342 | struct request_queue *q; |
1343 | int cnt = FD_MAX_UNITS; | 1343 | int cnt = FD_MAX_UNITS; |
1344 | struct request *rq; | 1344 | struct request *rq = NULL; |
1345 | 1345 | ||
1346 | /* Find next queue we can dispatch from */ | 1346 | /* Find next queue we can dispatch from */ |
1347 | fdc_queue = fdc_queue + 1; | 1347 | fdc_queue = fdc_queue + 1; |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 4e4cc6c828cb..605a67e40bbf 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1399,7 +1399,7 @@ static struct request *set_next_request(void) | |||
1399 | { | 1399 | { |
1400 | struct request_queue *q; | 1400 | struct request_queue *q; |
1401 | int old_pos = fdc_queue; | 1401 | int old_pos = fdc_queue; |
1402 | struct request *rq; | 1402 | struct request *rq = NULL; |
1403 | 1403 | ||
1404 | do { | 1404 | do { |
1405 | q = unit[fdc_queue].disk->queue; | 1405 | q = unit[fdc_queue].disk->queue; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index a67d0a611a8a..f291587d753e 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26"); | |||
66 | MODULE_LICENSE("GPL"); | 66 | MODULE_LICENSE("GPL"); |
67 | 67 | ||
68 | static DEFINE_MUTEX(cciss_mutex); | 68 | static DEFINE_MUTEX(cciss_mutex); |
69 | static struct proc_dir_entry *proc_cciss; | ||
69 | 70 | ||
70 | #include "cciss_cmd.h" | 71 | #include "cciss_cmd.h" |
71 | #include "cciss.h" | 72 | #include "cciss.h" |
@@ -363,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | |||
363 | #define ENG_GIG_FACTOR (ENG_GIG/512) | 364 | #define ENG_GIG_FACTOR (ENG_GIG/512) |
364 | #define ENGAGE_SCSI "engage scsi" | 365 | #define ENGAGE_SCSI "engage scsi" |
365 | 366 | ||
366 | static struct proc_dir_entry *proc_cciss; | ||
367 | |||
368 | static void cciss_seq_show_header(struct seq_file *seq) | 367 | static void cciss_seq_show_header(struct seq_file *seq) |
369 | { | 368 | { |
370 | ctlr_info_t *h = seq->private; | 369 | ctlr_info_t *h = seq->private; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 255035cfc88a..4f9e22f29138 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -65,7 +65,7 @@ enum blkif_state { | |||
65 | 65 | ||
66 | struct blk_shadow { | 66 | struct blk_shadow { |
67 | struct blkif_request req; | 67 | struct blkif_request req; |
68 | unsigned long request; | 68 | struct request *request; |
69 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 69 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
70 | }; | 70 | }; |
71 | 71 | ||
@@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, | |||
136 | unsigned long id) | 136 | unsigned long id) |
137 | { | 137 | { |
138 | info->shadow[id].req.id = info->shadow_free; | 138 | info->shadow[id].req.id = info->shadow_free; |
139 | info->shadow[id].request = 0; | 139 | info->shadow[id].request = NULL; |
140 | info->shadow_free = id; | 140 | info->shadow_free = id; |
141 | } | 141 | } |
142 | 142 | ||
@@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, | |||
245 | } | 245 | } |
246 | 246 | ||
247 | /* | 247 | /* |
248 | * blkif_queue_request | 248 | * Generate a Xen blkfront IO request from a blk layer request. Reads |
249 | * and writes are handled as expected. Since we lack a loose flush | ||
250 | * request, we map flushes into a full ordered barrier. | ||
249 | * | 251 | * |
250 | * request block io | 252 | * @req: a request struct |
251 | * | ||
252 | * id: for guest use only. | ||
253 | * operation: BLKIF_OP_{READ,WRITE,PROBE} | ||
254 | * buffer: buffer to read/write into. this should be a | ||
255 | * virtual address in the guest os. | ||
256 | */ | 253 | */ |
257 | static int blkif_queue_request(struct request *req) | 254 | static int blkif_queue_request(struct request *req) |
258 | { | 255 | { |
@@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) | |||
281 | /* Fill out a communications ring structure. */ | 278 | /* Fill out a communications ring structure. */ |
282 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | 279 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); |
283 | id = get_id_from_freelist(info); | 280 | id = get_id_from_freelist(info); |
284 | info->shadow[id].request = (unsigned long)req; | 281 | info->shadow[id].request = req; |
285 | 282 | ||
286 | ring_req->id = id; | 283 | ring_req->id = id; |
287 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); | 284 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); |
@@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req) | |||
290 | ring_req->operation = rq_data_dir(req) ? | 287 | ring_req->operation = rq_data_dir(req) ? |
291 | BLKIF_OP_WRITE : BLKIF_OP_READ; | 288 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
292 | 289 | ||
290 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | ||
291 | /* | ||
292 | * Ideally we could just do an unordered | ||
293 | * flush-to-disk, but all we have is a full write | ||
294 | * barrier at the moment. However, a barrier write is | ||
295 | * a superset of FUA, so we can implement it the same | ||
296 | * way. (It's also a FLUSH+FUA, since it is | ||
297 | * guaranteed ordered WRT previous writes.) | ||
298 | */ | ||
299 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; | ||
300 | } | ||
301 | |||
293 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); | 302 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); |
294 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); | 303 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
295 | 304 | ||
@@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
634 | 643 | ||
635 | bret = RING_GET_RESPONSE(&info->ring, i); | 644 | bret = RING_GET_RESPONSE(&info->ring, i); |
636 | id = bret->id; | 645 | id = bret->id; |
637 | req = (struct request *)info->shadow[id].request; | 646 | req = info->shadow[id].request; |
638 | 647 | ||
639 | blkif_completion(&info->shadow[id]); | 648 | blkif_completion(&info->shadow[id]); |
640 | 649 | ||
@@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
647 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", | 656 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", |
648 | info->gd->disk_name); | 657 | info->gd->disk_name); |
649 | error = -EOPNOTSUPP; | 658 | error = -EOPNOTSUPP; |
659 | } | ||
660 | if (unlikely(bret->status == BLKIF_RSP_ERROR && | ||
661 | info->shadow[id].req.nr_segments == 0)) { | ||
662 | printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", | ||
663 | info->gd->disk_name); | ||
664 | error = -EOPNOTSUPP; | ||
665 | } | ||
666 | if (unlikely(error)) { | ||
667 | if (error == -EOPNOTSUPP) | ||
668 | error = 0; | ||
650 | info->feature_flush = 0; | 669 | info->feature_flush = 0; |
651 | xlvbd_flush(info); | 670 | xlvbd_flush(info); |
652 | } | 671 | } |
@@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
899 | /* Stage 3: Find pending requests and requeue them. */ | 918 | /* Stage 3: Find pending requests and requeue them. */ |
900 | for (i = 0; i < BLK_RING_SIZE; i++) { | 919 | for (i = 0; i < BLK_RING_SIZE; i++) { |
901 | /* Not in use? */ | 920 | /* Not in use? */ |
902 | if (copy[i].request == 0) | 921 | if (!copy[i].request) |
903 | continue; | 922 | continue; |
904 | 923 | ||
905 | /* Grab a request slot and copy shadow state into it. */ | 924 | /* Grab a request slot and copy shadow state into it. */ |
@@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
916 | req->seg[j].gref, | 935 | req->seg[j].gref, |
917 | info->xbdev->otherend_id, | 936 | info->xbdev->otherend_id, |
918 | pfn_to_mfn(info->shadow[req->id].frame[j]), | 937 | pfn_to_mfn(info->shadow[req->id].frame[j]), |
919 | rq_data_dir( | 938 | rq_data_dir(info->shadow[req->id].request)); |
920 | (struct request *) | ||
921 | info->shadow[req->id].request)); | ||
922 | info->shadow[req->id].req = *req; | 939 | info->shadow[req->id].req = *req; |
923 | 940 | ||
924 | info->ring.req_prod_pvt++; | 941 | info->ring.req_prod_pvt++; |
@@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) | |||
1067 | */ | 1084 | */ |
1068 | info->feature_flush = 0; | 1085 | info->feature_flush = 0; |
1069 | 1086 | ||
1070 | /* | ||
1071 | * The driver doesn't properly handled empty flushes, so | ||
1072 | * lets disable barrier support for now. | ||
1073 | */ | ||
1074 | #if 0 | ||
1075 | if (!err && barrier) | 1087 | if (!err && barrier) |
1076 | info->feature_flush = REQ_FLUSH; | 1088 | info->feature_flush = REQ_FLUSH | REQ_FUA; |
1077 | #endif | ||
1078 | 1089 | ||
1079 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); | 1090 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); |
1080 | if (err) { | 1091 | if (err) { |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 6c1b676643a9..896a2ced1d27 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -1547,31 +1547,16 @@ static int init_vqs(struct ports_device *portdev) | |||
1547 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; | 1547 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; |
1548 | 1548 | ||
1549 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); | 1549 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); |
1550 | if (!vqs) { | ||
1551 | err = -ENOMEM; | ||
1552 | goto fail; | ||
1553 | } | ||
1554 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); | 1550 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); |
1555 | if (!io_callbacks) { | ||
1556 | err = -ENOMEM; | ||
1557 | goto free_vqs; | ||
1558 | } | ||
1559 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); | 1551 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); |
1560 | if (!io_names) { | ||
1561 | err = -ENOMEM; | ||
1562 | goto free_callbacks; | ||
1563 | } | ||
1564 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | 1552 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), |
1565 | GFP_KERNEL); | 1553 | GFP_KERNEL); |
1566 | if (!portdev->in_vqs) { | ||
1567 | err = -ENOMEM; | ||
1568 | goto free_names; | ||
1569 | } | ||
1570 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | 1554 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), |
1571 | GFP_KERNEL); | 1555 | GFP_KERNEL); |
1572 | if (!portdev->out_vqs) { | 1556 | if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || |
1557 | !portdev->out_vqs) { | ||
1573 | err = -ENOMEM; | 1558 | err = -ENOMEM; |
1574 | goto free_invqs; | 1559 | goto free; |
1575 | } | 1560 | } |
1576 | 1561 | ||
1577 | /* | 1562 | /* |
@@ -1605,7 +1590,7 @@ static int init_vqs(struct ports_device *portdev) | |||
1605 | io_callbacks, | 1590 | io_callbacks, |
1606 | (const char **)io_names); | 1591 | (const char **)io_names); |
1607 | if (err) | 1592 | if (err) |
1608 | goto free_outvqs; | 1593 | goto free; |
1609 | 1594 | ||
1610 | j = 0; | 1595 | j = 0; |
1611 | portdev->in_vqs[0] = vqs[0]; | 1596 | portdev->in_vqs[0] = vqs[0]; |
@@ -1621,23 +1606,19 @@ static int init_vqs(struct ports_device *portdev) | |||
1621 | portdev->out_vqs[i] = vqs[j + 1]; | 1606 | portdev->out_vqs[i] = vqs[j + 1]; |
1622 | } | 1607 | } |
1623 | } | 1608 | } |
1624 | kfree(io_callbacks); | ||
1625 | kfree(io_names); | 1609 | kfree(io_names); |
1610 | kfree(io_callbacks); | ||
1626 | kfree(vqs); | 1611 | kfree(vqs); |
1627 | 1612 | ||
1628 | return 0; | 1613 | return 0; |
1629 | 1614 | ||
1630 | free_names: | 1615 | free: |
1631 | kfree(io_names); | ||
1632 | free_callbacks: | ||
1633 | kfree(io_callbacks); | ||
1634 | free_outvqs: | ||
1635 | kfree(portdev->out_vqs); | 1616 | kfree(portdev->out_vqs); |
1636 | free_invqs: | ||
1637 | kfree(portdev->in_vqs); | 1617 | kfree(portdev->in_vqs); |
1638 | free_vqs: | 1618 | kfree(io_names); |
1619 | kfree(io_callbacks); | ||
1639 | kfree(vqs); | 1620 | kfree(vqs); |
1640 | fail: | 1621 | |
1641 | return err; | 1622 | return err; |
1642 | } | 1623 | } |
1643 | 1624 | ||
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index 937983407e2a..c4c40be0edbf 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c | |||
@@ -497,12 +497,14 @@ static unsigned long chipset_ids[] = { | |||
497 | 0 | 497 | 0 |
498 | }; | 498 | }; |
499 | 499 | ||
500 | #ifdef MODULE | ||
500 | static struct pci_device_id i5k_amb_ids[] __devinitdata = { | 501 | static struct pci_device_id i5k_amb_ids[] __devinitdata = { |
501 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, | 502 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, |
502 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, | 503 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, |
503 | { 0, } | 504 | { 0, } |
504 | }; | 505 | }; |
505 | MODULE_DEVICE_TABLE(pci, i5k_amb_ids); | 506 | MODULE_DEVICE_TABLE(pci, i5k_amb_ids); |
507 | #endif | ||
506 | 508 | ||
507 | static int __devinit i5k_amb_probe(struct platform_device *pdev) | 509 | static int __devinit i5k_amb_probe(struct platform_device *pdev) |
508 | { | 510 | { |
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c index 9f4bae07f719..8853afce85ce 100644 --- a/drivers/hwmon/lis3lv02d_i2c.c +++ b/drivers/hwmon/lis3lv02d_i2c.c | |||
@@ -186,7 +186,7 @@ static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client) | |||
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | #ifdef CONFIG_PM | 189 | #ifdef CONFIG_PM_SLEEP |
190 | static int lis3lv02d_i2c_suspend(struct device *dev) | 190 | static int lis3lv02d_i2c_suspend(struct device *dev) |
191 | { | 191 | { |
192 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 192 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); |
@@ -213,12 +213,9 @@ static int lis3lv02d_i2c_resume(struct device *dev) | |||
213 | 213 | ||
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | #else | 216 | #endif /* CONFIG_PM_SLEEP */ |
217 | #define lis3lv02d_i2c_suspend NULL | ||
218 | #define lis3lv02d_i2c_resume NULL | ||
219 | #define lis3lv02d_i2c_shutdown NULL | ||
220 | #endif | ||
221 | 217 | ||
218 | #ifdef CONFIG_PM_RUNTIME | ||
222 | static int lis3_i2c_runtime_suspend(struct device *dev) | 219 | static int lis3_i2c_runtime_suspend(struct device *dev) |
223 | { | 220 | { |
224 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 221 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); |
@@ -236,6 +233,7 @@ static int lis3_i2c_runtime_resume(struct device *dev) | |||
236 | lis3lv02d_poweron(lis3); | 233 | lis3lv02d_poweron(lis3); |
237 | return 0; | 234 | return 0; |
238 | } | 235 | } |
236 | #endif /* CONFIG_PM_RUNTIME */ | ||
239 | 237 | ||
240 | static const struct i2c_device_id lis3lv02d_id[] = { | 238 | static const struct i2c_device_id lis3lv02d_id[] = { |
241 | {"lis3lv02d", 0 }, | 239 | {"lis3lv02d", 0 }, |
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 3782f31f06d2..33facd0c45d1 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c | |||
@@ -125,11 +125,22 @@ struct lp5521_chip { | |||
125 | u8 num_leds; | 125 | u8 num_leds; |
126 | }; | 126 | }; |
127 | 127 | ||
128 | #define cdev_to_led(c) container_of(c, struct lp5521_led, cdev) | 128 | static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev) |
129 | #define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \ | 129 | { |
130 | engines[(eng)->id - 1]) | 130 | return container_of(cdev, struct lp5521_led, cdev); |
131 | #define led_to_lp5521(led) container_of((led), struct lp5521_chip, \ | 131 | } |
132 | leds[(led)->id]) | 132 | |
133 | static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine) | ||
134 | { | ||
135 | return container_of(engine, struct lp5521_chip, | ||
136 | engines[engine->id - 1]); | ||
137 | } | ||
138 | |||
139 | static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led) | ||
140 | { | ||
141 | return container_of(led, struct lp5521_chip, | ||
142 | leds[led->id]); | ||
143 | } | ||
133 | 144 | ||
134 | static void lp5521_led_brightness_work(struct work_struct *work); | 145 | static void lp5521_led_brightness_work(struct work_struct *work); |
135 | 146 | ||
@@ -185,14 +196,17 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) | |||
185 | 196 | ||
186 | /* move current engine to direct mode and remember the state */ | 197 | /* move current engine to direct mode and remember the state */ |
187 | ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); | 198 | ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); |
188 | usleep_range(1000, 10000); | 199 | /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ |
200 | usleep_range(1000, 2000); | ||
189 | ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); | 201 | ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); |
190 | 202 | ||
191 | /* For loading, all the engines to load mode */ | 203 | /* For loading, all the engines to load mode */ |
192 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); | 204 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); |
193 | usleep_range(1000, 10000); | 205 | /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ |
206 | usleep_range(1000, 2000); | ||
194 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); | 207 | lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); |
195 | usleep_range(1000, 10000); | 208 | /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ |
209 | usleep_range(1000, 2000); | ||
196 | 210 | ||
197 | addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; | 211 | addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; |
198 | i2c_smbus_write_i2c_block_data(client, | 212 | i2c_smbus_write_i2c_block_data(client, |
@@ -231,10 +245,6 @@ static int lp5521_configure(struct i2c_client *client, | |||
231 | 245 | ||
232 | lp5521_init_engine(chip, attr_group); | 246 | lp5521_init_engine(chip, attr_group); |
233 | 247 | ||
234 | lp5521_write(client, LP5521_REG_RESET, 0xff); | ||
235 | |||
236 | usleep_range(10000, 20000); | ||
237 | |||
238 | /* Set all PWMs to direct control mode */ | 248 | /* Set all PWMs to direct control mode */ |
239 | ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); | 249 | ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); |
240 | 250 | ||
@@ -251,8 +261,8 @@ static int lp5521_configure(struct i2c_client *client, | |||
251 | ret |= lp5521_write(client, LP5521_REG_ENABLE, | 261 | ret |= lp5521_write(client, LP5521_REG_ENABLE, |
252 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | | 262 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | |
253 | LP5521_EXEC_RUN); | 263 | LP5521_EXEC_RUN); |
254 | /* enable takes 500us */ | 264 | /* enable takes 500us. 1 - 2 ms leaves some margin */ |
255 | usleep_range(500, 20000); | 265 | usleep_range(1000, 2000); |
256 | 266 | ||
257 | return ret; | 267 | return ret; |
258 | } | 268 | } |
@@ -305,7 +315,8 @@ static int lp5521_detect(struct i2c_client *client) | |||
305 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); | 315 | LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); |
306 | if (ret) | 316 | if (ret) |
307 | return ret; | 317 | return ret; |
308 | usleep_range(1000, 10000); | 318 | /* enable takes 500us. 1 - 2 ms leaves some margin */ |
319 | usleep_range(1000, 2000); | ||
309 | ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); | 320 | ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); |
310 | if (ret) | 321 | if (ret) |
311 | return ret; | 322 | return ret; |
@@ -693,11 +704,16 @@ static int lp5521_probe(struct i2c_client *client, | |||
693 | 704 | ||
694 | if (pdata->enable) { | 705 | if (pdata->enable) { |
695 | pdata->enable(0); | 706 | pdata->enable(0); |
696 | usleep_range(1000, 10000); | 707 | usleep_range(1000, 2000); /* Keep enable down at least 1ms */ |
697 | pdata->enable(1); | 708 | pdata->enable(1); |
698 | usleep_range(1000, 10000); /* Spec says min 500us */ | 709 | usleep_range(1000, 2000); /* 500us abs min. */ |
699 | } | 710 | } |
700 | 711 | ||
712 | lp5521_write(client, LP5521_REG_RESET, 0xff); | ||
713 | usleep_range(10000, 20000); /* | ||
714 | * Exact value is not available. 10 - 20ms | ||
715 | * appears to be enough for reset. | ||
716 | */ | ||
701 | ret = lp5521_detect(client); | 717 | ret = lp5521_detect(client); |
702 | 718 | ||
703 | if (ret) { | 719 | if (ret) { |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 1e11fcc08b28..0cc4ead2fd8b 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
@@ -134,15 +134,18 @@ struct lp5523_chip { | |||
134 | u8 num_leds; | 134 | u8 num_leds; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | #define cdev_to_led(c) container_of(c, struct lp5523_led, cdev) | 137 | static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev) |
138 | { | ||
139 | return container_of(cdev, struct lp5523_led, cdev); | ||
140 | } | ||
138 | 141 | ||
139 | static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) | 142 | static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) |
140 | { | 143 | { |
141 | return container_of(engine, struct lp5523_chip, | 144 | return container_of(engine, struct lp5523_chip, |
142 | engines[engine->id - 1]); | 145 | engines[engine->id - 1]); |
143 | } | 146 | } |
144 | 147 | ||
145 | static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) | 148 | static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) |
146 | { | 149 | { |
147 | return container_of(led, struct lp5523_chip, | 150 | return container_of(led, struct lp5523_chip, |
148 | leds[led->id]); | 151 | leds[led->id]); |
@@ -200,13 +203,9 @@ static int lp5523_configure(struct i2c_client *client) | |||
200 | { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, | 203 | { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, |
201 | }; | 204 | }; |
202 | 205 | ||
203 | lp5523_write(client, LP5523_REG_RESET, 0xff); | ||
204 | |||
205 | usleep_range(10000, 100000); | ||
206 | |||
207 | ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); | 206 | ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); |
208 | /* Chip startup time after reset is 500 us */ | 207 | /* Chip startup time is 500 us, 1 - 2 ms gives some margin */ |
209 | usleep_range(1000, 10000); | 208 | usleep_range(1000, 2000); |
210 | 209 | ||
211 | ret |= lp5523_write(client, LP5523_REG_CONFIG, | 210 | ret |= lp5523_write(client, LP5523_REG_CONFIG, |
212 | LP5523_AUTO_INC | LP5523_PWR_SAVE | | 211 | LP5523_AUTO_INC | LP5523_PWR_SAVE | |
@@ -243,8 +242,8 @@ static int lp5523_configure(struct i2c_client *client) | |||
243 | return -1; | 242 | return -1; |
244 | } | 243 | } |
245 | 244 | ||
246 | /* Wait 3ms and check the engine status */ | 245 | /* Let the programs run for couple of ms and check the engine status */ |
247 | usleep_range(3000, 20000); | 246 | usleep_range(3000, 6000); |
248 | lp5523_read(client, LP5523_REG_STATUS, &status); | 247 | lp5523_read(client, LP5523_REG_STATUS, &status); |
249 | status &= LP5523_ENG_STATUS_MASK; | 248 | status &= LP5523_ENG_STATUS_MASK; |
250 | 249 | ||
@@ -449,10 +448,10 @@ static ssize_t lp5523_selftest(struct device *dev, | |||
449 | /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ | 448 | /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ |
450 | lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, | 449 | lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, |
451 | LP5523_EN_LEDTEST | 16); | 450 | LP5523_EN_LEDTEST | 16); |
452 | usleep_range(3000, 10000); | 451 | usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */ |
453 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); | 452 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); |
454 | if (!(status & LP5523_LEDTEST_DONE)) | 453 | if (!(status & LP5523_LEDTEST_DONE)) |
455 | usleep_range(3000, 10000); | 454 | usleep_range(3000, 6000); /* Was not ready. Wait little bit */ |
456 | 455 | ||
457 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); | 456 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); |
458 | vdd--; /* There may be some fluctuation in measurement */ | 457 | vdd--; /* There may be some fluctuation in measurement */ |
@@ -468,16 +467,16 @@ static ssize_t lp5523_selftest(struct device *dev, | |||
468 | chip->pdata->led_config[i].led_current); | 467 | chip->pdata->led_config[i].led_current); |
469 | 468 | ||
470 | lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); | 469 | lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); |
471 | /* let current stabilize 2ms before measurements start */ | 470 | /* let current stabilize 2 - 4ms before measurements start */ |
472 | usleep_range(2000, 10000); | 471 | usleep_range(2000, 4000); |
473 | lp5523_write(chip->client, | 472 | lp5523_write(chip->client, |
474 | LP5523_REG_LED_TEST_CTRL, | 473 | LP5523_REG_LED_TEST_CTRL, |
475 | LP5523_EN_LEDTEST | i); | 474 | LP5523_EN_LEDTEST | i); |
476 | /* ledtest takes 2.7ms */ | 475 | /* ADC conversion time is 2.7 ms typically */ |
477 | usleep_range(3000, 10000); | 476 | usleep_range(3000, 6000); |
478 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); | 477 | ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); |
479 | if (!(status & LP5523_LEDTEST_DONE)) | 478 | if (!(status & LP5523_LEDTEST_DONE)) |
480 | usleep_range(3000, 10000); | 479 | usleep_range(3000, 6000);/* Was not ready. Wait. */ |
481 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); | 480 | ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); |
482 | 481 | ||
483 | if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) | 482 | if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) |
@@ -930,11 +929,16 @@ static int lp5523_probe(struct i2c_client *client, | |||
930 | 929 | ||
931 | if (pdata->enable) { | 930 | if (pdata->enable) { |
932 | pdata->enable(0); | 931 | pdata->enable(0); |
933 | usleep_range(1000, 10000); | 932 | usleep_range(1000, 2000); /* Keep enable down at least 1ms */ |
934 | pdata->enable(1); | 933 | pdata->enable(1); |
935 | usleep_range(1000, 10000); /* Spec says min 500us */ | 934 | usleep_range(1000, 2000); /* 500us abs min. */ |
936 | } | 935 | } |
937 | 936 | ||
937 | lp5523_write(client, LP5523_REG_RESET, 0xff); | ||
938 | usleep_range(10000, 20000); /* | ||
939 | * Exact value is not available. 10 - 20ms | ||
940 | * appears to be enough for reset. | ||
941 | */ | ||
938 | ret = lp5523_detect(client); | 942 | ret = lp5523_detect(client); |
939 | if (ret) | 943 | if (ret) |
940 | goto fail2; | 944 | goto fail2; |
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index a688293abd0b..614ebebaaa28 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c | |||
@@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = { | |||
102 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") | 102 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") |
103 | } | 103 | } |
104 | }, | 104 | }, |
105 | {} | ||
105 | }; | 106 | }; |
106 | 107 | ||
107 | /* | 108 | /* |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 324a3663fcda..84c46a161927 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1337,7 +1337,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1337 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1337 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1338 | rdev->sb_page); | 1338 | rdev->sb_page); |
1339 | md_super_wait(rdev->mddev); | 1339 | md_super_wait(rdev->mddev); |
1340 | return num_sectors / 2; /* kB for sysfs */ | 1340 | return num_sectors; |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | 1343 | ||
@@ -1704,7 +1704,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1704 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1704 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1705 | rdev->sb_page); | 1705 | rdev->sb_page); |
1706 | md_super_wait(rdev->mddev); | 1706 | md_super_wait(rdev->mddev); |
1707 | return num_sectors / 2; /* kB for sysfs */ | 1707 | return num_sectors; |
1708 | } | 1708 | } |
1709 | 1709 | ||
1710 | static struct super_type super_types[] = { | 1710 | static struct super_type super_types[] = { |
@@ -4338,6 +4338,8 @@ static int md_alloc(dev_t dev, char *name) | |||
4338 | if (mddev->kobj.sd && | 4338 | if (mddev->kobj.sd && |
4339 | sysfs_create_group(&mddev->kobj, &md_bitmap_group)) | 4339 | sysfs_create_group(&mddev->kobj, &md_bitmap_group)) |
4340 | printk(KERN_DEBUG "pointless warning\n"); | 4340 | printk(KERN_DEBUG "pointless warning\n"); |
4341 | |||
4342 | blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); | ||
4341 | abort: | 4343 | abort: |
4342 | mutex_unlock(&disks_mutex); | 4344 | mutex_unlock(&disks_mutex); |
4343 | if (!error && mddev->kobj.sd) { | 4345 | if (!error && mddev->kobj.sd) { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 45f8324196ec..845cf95b612c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1161,6 +1161,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) | |||
1161 | * is not possible. | 1161 | * is not possible. |
1162 | */ | 1162 | */ |
1163 | if (!test_bit(Faulty, &rdev->flags) && | 1163 | if (!test_bit(Faulty, &rdev->flags) && |
1164 | !mddev->recovery_disabled && | ||
1164 | mddev->degraded < conf->raid_disks) { | 1165 | mddev->degraded < conf->raid_disks) { |
1165 | err = -EBUSY; | 1166 | err = -EBUSY; |
1166 | goto abort; | 1167 | goto abort; |
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index ca47e6285075..307aada5fffe 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c | |||
@@ -183,9 +183,7 @@ static int isl29020_probe(struct i2c_client *client, | |||
183 | 183 | ||
184 | static int isl29020_remove(struct i2c_client *client) | 184 | static int isl29020_remove(struct i2c_client *client) |
185 | { | 185 | { |
186 | struct als_data *data = i2c_get_clientdata(client); | ||
187 | sysfs_remove_group(&client->dev.kobj, &m_als_gr); | 186 | sysfs_remove_group(&client->dev.kobj, &m_als_gr); |
188 | kfree(data); | ||
189 | return 0; | 187 | return 0; |
190 | } | 188 | } |
191 | 189 | ||
@@ -245,6 +243,6 @@ static void __exit sensor_isl29020_exit(void) | |||
245 | module_init(sensor_isl29020_init); | 243 | module_init(sensor_isl29020_init); |
246 | module_exit(sensor_isl29020_exit); | 244 | module_exit(sensor_isl29020_exit); |
247 | 245 | ||
248 | MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); | 246 | MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>"); |
249 | MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); | 247 | MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); |
250 | MODULE_LICENSE("GPL v2"); | 248 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index d551f09ccb79..6956f7e7d439 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -439,18 +439,23 @@ xpc_discovery(void) | |||
439 | * nodes that can comprise an access protection grouping. The access | 439 | * nodes that can comprise an access protection grouping. The access |
440 | * protection is in regards to memory, IOI and IPI. | 440 | * protection is in regards to memory, IOI and IPI. |
441 | */ | 441 | */ |
442 | max_regions = 64; | ||
443 | region_size = xp_region_size; | 442 | region_size = xp_region_size; |
444 | 443 | ||
445 | switch (region_size) { | 444 | if (is_uv()) |
446 | case 128: | 445 | max_regions = 256; |
447 | max_regions *= 2; | 446 | else { |
448 | case 64: | 447 | max_regions = 64; |
449 | max_regions *= 2; | 448 | |
450 | case 32: | 449 | switch (region_size) { |
451 | max_regions *= 2; | 450 | case 128: |
452 | region_size = 16; | 451 | max_regions *= 2; |
453 | DBUG_ON(!is_shub2()); | 452 | case 64: |
453 | max_regions *= 2; | ||
454 | case 32: | ||
455 | max_regions *= 2; | ||
456 | region_size = 16; | ||
457 | DBUG_ON(!is_shub2()); | ||
458 | } | ||
454 | } | 459 | } |
455 | 460 | ||
456 | for (region = 0; region < max_regions; region++) { | 461 | for (region = 0; region < max_regions; region++) { |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8f86d702e46e..31ae07a36576 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -1559,7 +1559,7 @@ void mmc_stop_host(struct mmc_host *host) | |||
1559 | 1559 | ||
1560 | if (host->caps & MMC_CAP_DISABLE) | 1560 | if (host->caps & MMC_CAP_DISABLE) |
1561 | cancel_delayed_work(&host->disable); | 1561 | cancel_delayed_work(&host->disable); |
1562 | cancel_delayed_work(&host->detect); | 1562 | cancel_delayed_work_sync(&host->detect); |
1563 | mmc_flush_scheduled_work(); | 1563 | mmc_flush_scheduled_work(); |
1564 | 1564 | ||
1565 | /* clear pm flags now and let card drivers set them as needed */ | 1565 | /* clear pm flags now and let card drivers set them as needed */ |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 995261f7fd70..77f93c3b8808 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -375,7 +375,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
375 | struct mmc_card *oldcard) | 375 | struct mmc_card *oldcard) |
376 | { | 376 | { |
377 | struct mmc_card *card; | 377 | struct mmc_card *card; |
378 | int err, ddr = MMC_SDR_MODE; | 378 | int err, ddr = 0; |
379 | u32 cid[4]; | 379 | u32 cid[4]; |
380 | unsigned int max_dtr; | 380 | unsigned int max_dtr; |
381 | 381 | ||
@@ -562,7 +562,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
562 | 1 << bus_width, ddr); | 562 | 1 << bus_width, ddr); |
563 | err = 0; | 563 | err = 0; |
564 | } else { | 564 | } else { |
565 | mmc_card_set_ddr_mode(card); | 565 | if (ddr) |
566 | mmc_card_set_ddr_mode(card); | ||
567 | else | ||
568 | ddr = MMC_SDR_MODE; | ||
569 | |||
566 | mmc_set_bus_width_ddr(card->host, bus_width, ddr); | 570 | mmc_set_bus_width_ddr(card->host, bus_width, ddr); |
567 | } | 571 | } |
568 | } | 572 | } |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index c3ad1058cd31..efef5f94ac42 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
@@ -547,9 +547,11 @@ static void mmc_sdio_detect(struct mmc_host *host) | |||
547 | BUG_ON(!host->card); | 547 | BUG_ON(!host->card); |
548 | 548 | ||
549 | /* Make sure card is powered before detecting it */ | 549 | /* Make sure card is powered before detecting it */ |
550 | err = pm_runtime_get_sync(&host->card->dev); | 550 | if (host->caps & MMC_CAP_POWER_OFF_CARD) { |
551 | if (err < 0) | 551 | err = pm_runtime_get_sync(&host->card->dev); |
552 | goto out; | 552 | if (err < 0) |
553 | goto out; | ||
554 | } | ||
553 | 555 | ||
554 | mmc_claim_host(host); | 556 | mmc_claim_host(host); |
555 | 557 | ||
@@ -560,6 +562,20 @@ static void mmc_sdio_detect(struct mmc_host *host) | |||
560 | 562 | ||
561 | mmc_release_host(host); | 563 | mmc_release_host(host); |
562 | 564 | ||
565 | /* | ||
566 | * Tell PM core it's OK to power off the card now. | ||
567 | * | ||
568 | * The _sync variant is used in order to ensure that the card | ||
569 | * is left powered off in case an error occurred, and the card | ||
570 | * is going to be removed. | ||
571 | * | ||
572 | * Since there is no specific reason to believe a new user | ||
573 | * is about to show up at this point, the _sync variant is | ||
574 | * desirable anyway. | ||
575 | */ | ||
576 | if (host->caps & MMC_CAP_POWER_OFF_CARD) | ||
577 | pm_runtime_put_sync(&host->card->dev); | ||
578 | |||
563 | out: | 579 | out: |
564 | if (err) { | 580 | if (err) { |
565 | mmc_sdio_remove(host); | 581 | mmc_sdio_remove(host); |
@@ -568,9 +584,6 @@ out: | |||
568 | mmc_detach_bus(host); | 584 | mmc_detach_bus(host); |
569 | mmc_release_host(host); | 585 | mmc_release_host(host); |
570 | } | 586 | } |
571 | |||
572 | /* Tell PM core that we're done */ | ||
573 | pm_runtime_put(&host->card->dev); | ||
574 | } | 587 | } |
575 | 588 | ||
576 | /* | 589 | /* |
@@ -718,16 +731,21 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) | |||
718 | card = host->card; | 731 | card = host->card; |
719 | 732 | ||
720 | /* | 733 | /* |
721 | * Let runtime PM core know our card is active | 734 | * Enable runtime PM only if supported by host+card+board |
722 | */ | 735 | */ |
723 | err = pm_runtime_set_active(&card->dev); | 736 | if (host->caps & MMC_CAP_POWER_OFF_CARD) { |
724 | if (err) | 737 | /* |
725 | goto remove; | 738 | * Let runtime PM core know our card is active |
739 | */ | ||
740 | err = pm_runtime_set_active(&card->dev); | ||
741 | if (err) | ||
742 | goto remove; | ||
726 | 743 | ||
727 | /* | 744 | /* |
728 | * Enable runtime PM for this card | 745 | * Enable runtime PM for this card |
729 | */ | 746 | */ |
730 | pm_runtime_enable(&card->dev); | 747 | pm_runtime_enable(&card->dev); |
748 | } | ||
731 | 749 | ||
732 | /* | 750 | /* |
733 | * The number of functions on the card is encoded inside | 751 | * The number of functions on the card is encoded inside |
@@ -745,9 +763,10 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) | |||
745 | goto remove; | 763 | goto remove; |
746 | 764 | ||
747 | /* | 765 | /* |
748 | * Enable Runtime PM for this func | 766 | * Enable Runtime PM for this func (if supported) |
749 | */ | 767 | */ |
750 | pm_runtime_enable(&card->sdio_func[i]->dev); | 768 | if (host->caps & MMC_CAP_POWER_OFF_CARD) |
769 | pm_runtime_enable(&card->sdio_func[i]->dev); | ||
751 | } | 770 | } |
752 | 771 | ||
753 | mmc_release_host(host); | 772 | mmc_release_host(host); |
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 2716c7ab6bbf..203da443e339 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
18 | 18 | ||
19 | #include <linux/mmc/card.h> | 19 | #include <linux/mmc/card.h> |
20 | #include <linux/mmc/host.h> | ||
20 | #include <linux/mmc/sdio_func.h> | 21 | #include <linux/mmc/sdio_func.h> |
21 | 22 | ||
22 | #include "sdio_cis.h" | 23 | #include "sdio_cis.h" |
@@ -132,9 +133,11 @@ static int sdio_bus_probe(struct device *dev) | |||
132 | * it should call pm_runtime_put_noidle() in its probe routine and | 133 | * it should call pm_runtime_put_noidle() in its probe routine and |
133 | * pm_runtime_get_noresume() in its remove routine. | 134 | * pm_runtime_get_noresume() in its remove routine. |
134 | */ | 135 | */ |
135 | ret = pm_runtime_get_sync(dev); | 136 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { |
136 | if (ret < 0) | 137 | ret = pm_runtime_get_sync(dev); |
137 | goto out; | 138 | if (ret < 0) |
139 | goto out; | ||
140 | } | ||
138 | 141 | ||
139 | /* Set the default block size so the driver is sure it's something | 142 | /* Set the default block size so the driver is sure it's something |
140 | * sensible. */ | 143 | * sensible. */ |
@@ -151,7 +154,8 @@ static int sdio_bus_probe(struct device *dev) | |||
151 | return 0; | 154 | return 0; |
152 | 155 | ||
153 | disable_runtimepm: | 156 | disable_runtimepm: |
154 | pm_runtime_put_noidle(dev); | 157 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) |
158 | pm_runtime_put_noidle(dev); | ||
155 | out: | 159 | out: |
156 | return ret; | 160 | return ret; |
157 | } | 161 | } |
@@ -160,12 +164,14 @@ static int sdio_bus_remove(struct device *dev) | |||
160 | { | 164 | { |
161 | struct sdio_driver *drv = to_sdio_driver(dev->driver); | 165 | struct sdio_driver *drv = to_sdio_driver(dev->driver); |
162 | struct sdio_func *func = dev_to_sdio_func(dev); | 166 | struct sdio_func *func = dev_to_sdio_func(dev); |
163 | int ret; | 167 | int ret = 0; |
164 | 168 | ||
165 | /* Make sure card is powered before invoking ->remove() */ | 169 | /* Make sure card is powered before invoking ->remove() */ |
166 | ret = pm_runtime_get_sync(dev); | 170 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { |
167 | if (ret < 0) | 171 | ret = pm_runtime_get_sync(dev); |
168 | goto out; | 172 | if (ret < 0) |
173 | goto out; | ||
174 | } | ||
169 | 175 | ||
170 | drv->remove(func); | 176 | drv->remove(func); |
171 | 177 | ||
@@ -178,10 +184,12 @@ static int sdio_bus_remove(struct device *dev) | |||
178 | } | 184 | } |
179 | 185 | ||
180 | /* First, undo the increment made directly above */ | 186 | /* First, undo the increment made directly above */ |
181 | pm_runtime_put_noidle(dev); | 187 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) |
188 | pm_runtime_put_noidle(dev); | ||
182 | 189 | ||
183 | /* Then undo the runtime PM settings in sdio_bus_probe() */ | 190 | /* Then undo the runtime PM settings in sdio_bus_probe() */ |
184 | pm_runtime_put_noidle(dev); | 191 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) |
192 | pm_runtime_put_noidle(dev); | ||
185 | 193 | ||
186 | out: | 194 | out: |
187 | return ret; | 195 | return ret; |
@@ -191,6 +199,8 @@ out: | |||
191 | 199 | ||
192 | static int sdio_bus_pm_prepare(struct device *dev) | 200 | static int sdio_bus_pm_prepare(struct device *dev) |
193 | { | 201 | { |
202 | struct sdio_func *func = dev_to_sdio_func(dev); | ||
203 | |||
194 | /* | 204 | /* |
195 | * Resume an SDIO device which was suspended at run time at this | 205 | * Resume an SDIO device which was suspended at run time at this |
196 | * point, in order to allow standard SDIO suspend/resume paths | 206 | * point, in order to allow standard SDIO suspend/resume paths |
@@ -212,7 +222,8 @@ static int sdio_bus_pm_prepare(struct device *dev) | |||
212 | * since there is little point in failing system suspend if a | 222 | * since there is little point in failing system suspend if a |
213 | * device can't be resumed. | 223 | * device can't be resumed. |
214 | */ | 224 | */ |
215 | pm_runtime_resume(dev); | 225 | if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) |
226 | pm_runtime_resume(dev); | ||
216 | 227 | ||
217 | return 0; | 228 | return 0; |
218 | } | 229 | } |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 82a1079bbdc7..5d46021cbb57 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -1002,7 +1002,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, | |||
1002 | * Monitor a 0->1 transition first | 1002 | * Monitor a 0->1 transition first |
1003 | */ | 1003 | */ |
1004 | if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { | 1004 | if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { |
1005 | while ((!(OMAP_HSMMC_READ(host, SYSCTL) & bit)) | 1005 | while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) |
1006 | && (i++ < limit)) | 1006 | && (i++ < limit)) |
1007 | cpu_relax(); | 1007 | cpu_relax(); |
1008 | } | 1008 | } |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 2e9cca19c90b..9b82910b9dbb 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/mmc/host.h> | 18 | #include <linux/mmc/host.h> |
19 | #include <linux/mmc/sdhci-pltfm.h> | 19 | #include <linux/mmc/sdhci-pltfm.h> |
20 | #include <mach/hardware.h> | ||
20 | #include "sdhci.h" | 21 | #include "sdhci.h" |
21 | #include "sdhci-pltfm.h" | 22 | #include "sdhci-pltfm.h" |
22 | #include "sdhci-esdhc.h" | 23 | #include "sdhci-esdhc.h" |
@@ -112,6 +113,13 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
112 | clk_enable(clk); | 113 | clk_enable(clk); |
113 | pltfm_host->clk = clk; | 114 | pltfm_host->clk = clk; |
114 | 115 | ||
116 | if (cpu_is_mx35() || cpu_is_mx51()) | ||
117 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | ||
118 | |||
119 | /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */ | ||
120 | if (cpu_is_mx25() || cpu_is_mx35()) | ||
121 | host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; | ||
122 | |||
115 | return 0; | 123 | return 0; |
116 | } | 124 | } |
117 | 125 | ||
@@ -133,10 +141,8 @@ static struct sdhci_ops sdhci_esdhc_ops = { | |||
133 | }; | 141 | }; |
134 | 142 | ||
135 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 143 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { |
136 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_MULTIBLOCK | 144 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA, |
137 | | SDHCI_QUIRK_BROKEN_ADMA, | ||
138 | /* ADMA has issues. Might be fixable */ | 145 | /* ADMA has issues. Might be fixable */ |
139 | /* NO_MULTIBLOCK might be MX35 only (Errata: ENGcm07207) */ | ||
140 | .ops = &sdhci_esdhc_ops, | 146 | .ops = &sdhci_esdhc_ops, |
141 | .init = esdhc_pltfm_init, | 147 | .init = esdhc_pltfm_init, |
142 | .exit = esdhc_pltfm_exit, | 148 | .exit = esdhc_pltfm_exit, |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 55746bac2f44..3d9c2460d437 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -149,11 +149,11 @@ static const struct sdhci_pci_fixes sdhci_cafe = { | |||
149 | * ADMA operation is disabled for Moorestown platform due to | 149 | * ADMA operation is disabled for Moorestown platform due to |
150 | * hardware bugs. | 150 | * hardware bugs. |
151 | */ | 151 | */ |
152 | static int mrst_hc1_probe(struct sdhci_pci_chip *chip) | 152 | static int mrst_hc_probe(struct sdhci_pci_chip *chip) |
153 | { | 153 | { |
154 | /* | 154 | /* |
155 | * slots number is fixed here for MRST as SDIO3 is never used and has | 155 | * slots number is fixed here for MRST as SDIO3/5 are never used and |
156 | * hardware bugs. | 156 | * have hardware bugs. |
157 | */ | 157 | */ |
158 | chip->num_slots = 1; | 158 | chip->num_slots = 1; |
159 | return 0; | 159 | return 0; |
@@ -163,9 +163,9 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { | |||
163 | .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, | 163 | .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, |
164 | }; | 164 | }; |
165 | 165 | ||
166 | static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1 = { | 166 | static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { |
167 | .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, | 167 | .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, |
168 | .probe = mrst_hc1_probe, | 168 | .probe = mrst_hc_probe, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { | 171 | static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { |
@@ -538,7 +538,15 @@ static const struct pci_device_id pci_ids[] __devinitdata = { | |||
538 | .device = PCI_DEVICE_ID_INTEL_MRST_SD1, | 538 | .device = PCI_DEVICE_ID_INTEL_MRST_SD1, |
539 | .subvendor = PCI_ANY_ID, | 539 | .subvendor = PCI_ANY_ID, |
540 | .subdevice = PCI_ANY_ID, | 540 | .subdevice = PCI_ANY_ID, |
541 | .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1, | 541 | .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, |
542 | }, | ||
543 | |||
544 | { | ||
545 | .vendor = PCI_VENDOR_ID_INTEL, | ||
546 | .device = PCI_DEVICE_ID_INTEL_MRST_SD2, | ||
547 | .subvendor = PCI_ANY_ID, | ||
548 | .subdevice = PCI_ANY_ID, | ||
549 | .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, | ||
542 | }, | 550 | }, |
543 | 551 | ||
544 | { | 552 | { |
@@ -637,6 +645,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) | |||
637 | { | 645 | { |
638 | struct sdhci_pci_chip *chip; | 646 | struct sdhci_pci_chip *chip; |
639 | struct sdhci_pci_slot *slot; | 647 | struct sdhci_pci_slot *slot; |
648 | mmc_pm_flag_t slot_pm_flags; | ||
640 | mmc_pm_flag_t pm_flags = 0; | 649 | mmc_pm_flag_t pm_flags = 0; |
641 | int i, ret; | 650 | int i, ret; |
642 | 651 | ||
@@ -657,7 +666,11 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) | |||
657 | return ret; | 666 | return ret; |
658 | } | 667 | } |
659 | 668 | ||
660 | pm_flags |= slot->host->mmc->pm_flags; | 669 | slot_pm_flags = slot->host->mmc->pm_flags; |
670 | if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) | ||
671 | sdhci_enable_irq_wakeups(slot->host); | ||
672 | |||
673 | pm_flags |= slot_pm_flags; | ||
661 | } | 674 | } |
662 | 675 | ||
663 | if (chip->fixes && chip->fixes->suspend) { | 676 | if (chip->fixes && chip->fixes->suspend) { |
@@ -671,8 +684,10 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) | |||
671 | 684 | ||
672 | pci_save_state(pdev); | 685 | pci_save_state(pdev); |
673 | if (pm_flags & MMC_PM_KEEP_POWER) { | 686 | if (pm_flags & MMC_PM_KEEP_POWER) { |
674 | if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) | 687 | if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) { |
688 | pci_pme_active(pdev, true); | ||
675 | pci_enable_wake(pdev, PCI_D3hot, 1); | 689 | pci_enable_wake(pdev, PCI_D3hot, 1); |
690 | } | ||
676 | pci_set_power_state(pdev, PCI_D3hot); | 691 | pci_set_power_state(pdev, PCI_D3hot); |
677 | } else { | 692 | } else { |
678 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | 693 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); |
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c index fc406ac5d193..5a61208cbc66 100644 --- a/drivers/mmc/host/sdhci-pxa.c +++ b/drivers/mmc/host/sdhci-pxa.c | |||
@@ -141,6 +141,10 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev) | |||
141 | if (pdata->quirks) | 141 | if (pdata->quirks) |
142 | host->quirks |= pdata->quirks; | 142 | host->quirks |= pdata->quirks; |
143 | 143 | ||
144 | /* If slot design supports 8 bit data, indicate this to MMC. */ | ||
145 | if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) | ||
146 | host->mmc->caps |= MMC_CAP_8_BIT_DATA; | ||
147 | |||
144 | ret = sdhci_add_host(host); | 148 | ret = sdhci_add_host(host); |
145 | if (ret) { | 149 | if (ret) { |
146 | dev_err(&pdev->dev, "failed to add host\n"); | 150 | dev_err(&pdev->dev, "failed to add host\n"); |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 782c0ee3c925..a25db426c910 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -1185,17 +1185,31 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1185 | if (host->ops->platform_send_init_74_clocks) | 1185 | if (host->ops->platform_send_init_74_clocks) |
1186 | host->ops->platform_send_init_74_clocks(host, ios->power_mode); | 1186 | host->ops->platform_send_init_74_clocks(host, ios->power_mode); |
1187 | 1187 | ||
1188 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 1188 | /* |
1189 | 1189 | * If your platform has 8-bit width support but is not a v3 controller, | |
1190 | if (ios->bus_width == MMC_BUS_WIDTH_8) | 1190 | * or if it requires special setup code, you should implement that in |
1191 | ctrl |= SDHCI_CTRL_8BITBUS; | 1191 | * platform_8bit_width(). |
1192 | else | 1192 | */ |
1193 | ctrl &= ~SDHCI_CTRL_8BITBUS; | 1193 | if (host->ops->platform_8bit_width) |
1194 | host->ops->platform_8bit_width(host, ios->bus_width); | ||
1195 | else { | ||
1196 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | ||
1197 | if (ios->bus_width == MMC_BUS_WIDTH_8) { | ||
1198 | ctrl &= ~SDHCI_CTRL_4BITBUS; | ||
1199 | if (host->version >= SDHCI_SPEC_300) | ||
1200 | ctrl |= SDHCI_CTRL_8BITBUS; | ||
1201 | } else { | ||
1202 | if (host->version >= SDHCI_SPEC_300) | ||
1203 | ctrl &= ~SDHCI_CTRL_8BITBUS; | ||
1204 | if (ios->bus_width == MMC_BUS_WIDTH_4) | ||
1205 | ctrl |= SDHCI_CTRL_4BITBUS; | ||
1206 | else | ||
1207 | ctrl &= ~SDHCI_CTRL_4BITBUS; | ||
1208 | } | ||
1209 | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | ||
1210 | } | ||
1194 | 1211 | ||
1195 | if (ios->bus_width == MMC_BUS_WIDTH_4) | 1212 | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); |
1196 | ctrl |= SDHCI_CTRL_4BITBUS; | ||
1197 | else | ||
1198 | ctrl &= ~SDHCI_CTRL_4BITBUS; | ||
1199 | 1213 | ||
1200 | if ((ios->timing == MMC_TIMING_SD_HS || | 1214 | if ((ios->timing == MMC_TIMING_SD_HS || |
1201 | ios->timing == MMC_TIMING_MMC_HS) | 1215 | ios->timing == MMC_TIMING_MMC_HS) |
@@ -1681,6 +1695,16 @@ int sdhci_resume_host(struct sdhci_host *host) | |||
1681 | 1695 | ||
1682 | EXPORT_SYMBOL_GPL(sdhci_resume_host); | 1696 | EXPORT_SYMBOL_GPL(sdhci_resume_host); |
1683 | 1697 | ||
1698 | void sdhci_enable_irq_wakeups(struct sdhci_host *host) | ||
1699 | { | ||
1700 | u8 val; | ||
1701 | val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); | ||
1702 | val |= SDHCI_WAKE_ON_INT; | ||
1703 | sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); | ||
1704 | } | ||
1705 | |||
1706 | EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); | ||
1707 | |||
1684 | #endif /* CONFIG_PM */ | 1708 | #endif /* CONFIG_PM */ |
1685 | 1709 | ||
1686 | /*****************************************************************************\ | 1710 | /*****************************************************************************\ |
@@ -1845,11 +1869,19 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1845 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; | 1869 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; |
1846 | else | 1870 | else |
1847 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; | 1871 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; |
1872 | |||
1848 | mmc->f_max = host->max_clk; | 1873 | mmc->f_max = host->max_clk; |
1849 | mmc->caps |= MMC_CAP_SDIO_IRQ; | 1874 | mmc->caps |= MMC_CAP_SDIO_IRQ; |
1850 | 1875 | ||
1876 | /* | ||
1877 | * A controller may support 8-bit width, but the board itself | ||
1878 | * might not have the pins brought out. Boards that support | ||
1879 | * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in | ||
1880 | * their platform code before calling sdhci_add_host(), and we | ||
1881 | * won't assume 8-bit width for hosts without that CAP. | ||
1882 | */ | ||
1851 | if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) | 1883 | if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) |
1852 | mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; | 1884 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
1853 | 1885 | ||
1854 | if (caps & SDHCI_CAN_DO_HISPD) | 1886 | if (caps & SDHCI_CAN_DO_HISPD) |
1855 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; | 1887 | mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index b7b8a3b28b01..e42d7f00c060 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -76,7 +76,7 @@ | |||
76 | #define SDHCI_CTRL_ADMA1 0x08 | 76 | #define SDHCI_CTRL_ADMA1 0x08 |
77 | #define SDHCI_CTRL_ADMA32 0x10 | 77 | #define SDHCI_CTRL_ADMA32 0x10 |
78 | #define SDHCI_CTRL_ADMA64 0x18 | 78 | #define SDHCI_CTRL_ADMA64 0x18 |
79 | #define SDHCI_CTRL_8BITBUS 0x20 | 79 | #define SDHCI_CTRL_8BITBUS 0x20 |
80 | 80 | ||
81 | #define SDHCI_POWER_CONTROL 0x29 | 81 | #define SDHCI_POWER_CONTROL 0x29 |
82 | #define SDHCI_POWER_ON 0x01 | 82 | #define SDHCI_POWER_ON 0x01 |
@@ -87,6 +87,9 @@ | |||
87 | #define SDHCI_BLOCK_GAP_CONTROL 0x2A | 87 | #define SDHCI_BLOCK_GAP_CONTROL 0x2A |
88 | 88 | ||
89 | #define SDHCI_WAKE_UP_CONTROL 0x2B | 89 | #define SDHCI_WAKE_UP_CONTROL 0x2B |
90 | #define SDHCI_WAKE_ON_INT 0x01 | ||
91 | #define SDHCI_WAKE_ON_INSERT 0x02 | ||
92 | #define SDHCI_WAKE_ON_REMOVE 0x04 | ||
90 | 93 | ||
91 | #define SDHCI_CLOCK_CONTROL 0x2C | 94 | #define SDHCI_CLOCK_CONTROL 0x2C |
92 | #define SDHCI_DIVIDER_SHIFT 8 | 95 | #define SDHCI_DIVIDER_SHIFT 8 |
@@ -152,6 +155,7 @@ | |||
152 | #define SDHCI_CLOCK_BASE_SHIFT 8 | 155 | #define SDHCI_CLOCK_BASE_SHIFT 8 |
153 | #define SDHCI_MAX_BLOCK_MASK 0x00030000 | 156 | #define SDHCI_MAX_BLOCK_MASK 0x00030000 |
154 | #define SDHCI_MAX_BLOCK_SHIFT 16 | 157 | #define SDHCI_MAX_BLOCK_SHIFT 16 |
158 | #define SDHCI_CAN_DO_8BIT 0x00040000 | ||
155 | #define SDHCI_CAN_DO_ADMA2 0x00080000 | 159 | #define SDHCI_CAN_DO_ADMA2 0x00080000 |
156 | #define SDHCI_CAN_DO_ADMA1 0x00100000 | 160 | #define SDHCI_CAN_DO_ADMA1 0x00100000 |
157 | #define SDHCI_CAN_DO_HISPD 0x00200000 | 161 | #define SDHCI_CAN_DO_HISPD 0x00200000 |
@@ -212,6 +216,8 @@ struct sdhci_ops { | |||
212 | unsigned int (*get_max_clock)(struct sdhci_host *host); | 216 | unsigned int (*get_max_clock)(struct sdhci_host *host); |
213 | unsigned int (*get_min_clock)(struct sdhci_host *host); | 217 | unsigned int (*get_min_clock)(struct sdhci_host *host); |
214 | unsigned int (*get_timeout_clock)(struct sdhci_host *host); | 218 | unsigned int (*get_timeout_clock)(struct sdhci_host *host); |
219 | int (*platform_8bit_width)(struct sdhci_host *host, | ||
220 | int width); | ||
215 | void (*platform_send_init_74_clocks)(struct sdhci_host *host, | 221 | void (*platform_send_init_74_clocks)(struct sdhci_host *host, |
216 | u8 power_mode); | 222 | u8 power_mode); |
217 | unsigned int (*get_ro)(struct sdhci_host *host); | 223 | unsigned int (*get_ro)(struct sdhci_host *host); |
@@ -317,6 +323,7 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead); | |||
317 | #ifdef CONFIG_PM | 323 | #ifdef CONFIG_PM |
318 | extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); | 324 | extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); |
319 | extern int sdhci_resume_host(struct sdhci_host *host); | 325 | extern int sdhci_resume_host(struct sdhci_host *host); |
326 | extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); | ||
320 | #endif | 327 | #endif |
321 | 328 | ||
322 | #endif /* __SDHCI_HW_H */ | 329 | #endif /* __SDHCI_HW_H */ |
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index b4ead4a13c98..f8f65df9b017 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c | |||
@@ -425,7 +425,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
425 | struct usb_device *usb_dev = interface_to_usbdev(intf); | 425 | struct usb_device *usb_dev = interface_to_usbdev(intf); |
426 | struct mmc_host *mmc; | 426 | struct mmc_host *mmc; |
427 | struct ushc_data *ushc; | 427 | struct ushc_data *ushc; |
428 | int ret = -ENOMEM; | 428 | int ret; |
429 | 429 | ||
430 | mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); | 430 | mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); |
431 | if (mmc == NULL) | 431 | if (mmc == NULL) |
@@ -462,11 +462,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
462 | mmc->max_blk_count = 511; | 462 | mmc->max_blk_count = 511; |
463 | 463 | ||
464 | ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); | 464 | ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); |
465 | if (ushc->int_urb == NULL) | 465 | if (ushc->int_urb == NULL) { |
466 | ret = -ENOMEM; | ||
466 | goto err; | 467 | goto err; |
468 | } | ||
467 | ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); | 469 | ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); |
468 | if (ushc->int_data == NULL) | 470 | if (ushc->int_data == NULL) { |
471 | ret = -ENOMEM; | ||
469 | goto err; | 472 | goto err; |
473 | } | ||
470 | usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, | 474 | usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, |
471 | usb_rcvintpipe(usb_dev, | 475 | usb_rcvintpipe(usb_dev, |
472 | intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), | 476 | intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), |
@@ -475,11 +479,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
475 | intf->cur_altsetting->endpoint[0].desc.bInterval); | 479 | intf->cur_altsetting->endpoint[0].desc.bInterval); |
476 | 480 | ||
477 | ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); | 481 | ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); |
478 | if (ushc->cbw_urb == NULL) | 482 | if (ushc->cbw_urb == NULL) { |
483 | ret = -ENOMEM; | ||
479 | goto err; | 484 | goto err; |
485 | } | ||
480 | ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); | 486 | ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); |
481 | if (ushc->cbw == NULL) | 487 | if (ushc->cbw == NULL) { |
488 | ret = -ENOMEM; | ||
482 | goto err; | 489 | goto err; |
490 | } | ||
483 | ushc->cbw->signature = USHC_CBW_SIGNATURE; | 491 | ushc->cbw->signature = USHC_CBW_SIGNATURE; |
484 | 492 | ||
485 | usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), | 493 | usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), |
@@ -487,15 +495,21 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
487 | cbw_callback, ushc); | 495 | cbw_callback, ushc); |
488 | 496 | ||
489 | ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); | 497 | ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); |
490 | if (ushc->data_urb == NULL) | 498 | if (ushc->data_urb == NULL) { |
499 | ret = -ENOMEM; | ||
491 | goto err; | 500 | goto err; |
501 | } | ||
492 | 502 | ||
493 | ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); | 503 | ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); |
494 | if (ushc->csw_urb == NULL) | 504 | if (ushc->csw_urb == NULL) { |
505 | ret = -ENOMEM; | ||
495 | goto err; | 506 | goto err; |
507 | } | ||
496 | ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); | 508 | ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); |
497 | if (ushc->csw == NULL) | 509 | if (ushc->csw == NULL) { |
510 | ret = -ENOMEM; | ||
498 | goto err; | 511 | goto err; |
512 | } | ||
499 | usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), | 513 | usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), |
500 | ushc->csw, sizeof(struct ushc_csw), | 514 | ushc->csw, sizeof(struct ushc_csw), |
501 | csw_callback, ushc); | 515 | csw_callback, ushc); |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 3c631863bf40..204345be8e62 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -787,16 +787,15 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, | |||
787 | * erased, so it became unstable and corrupted, and should be | 787 | * erased, so it became unstable and corrupted, and should be |
788 | * erased. | 788 | * erased. |
789 | */ | 789 | */ |
790 | return 0; | 790 | err = 0; |
791 | goto out_unlock; | ||
791 | } | 792 | } |
792 | 793 | ||
793 | if (err) | 794 | if (err) |
794 | return err; | 795 | goto out_unlock; |
795 | 796 | ||
796 | if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) { | 797 | if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) |
797 | mutex_unlock(&ubi->buf_mutex); | 798 | goto out_unlock; |
798 | return 0; | ||
799 | } | ||
800 | 799 | ||
801 | ubi_err("PEB %d contains corrupted VID header, and the data does not " | 800 | ubi_err("PEB %d contains corrupted VID header, and the data does not " |
802 | "contain all 0xFF, this may be a non-UBI PEB or a severe VID " | 801 | "contain all 0xFF, this may be a non-UBI PEB or a severe VID " |
@@ -806,8 +805,11 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, | |||
806 | pnum, ubi->leb_start, ubi->leb_size); | 805 | pnum, ubi->leb_start, ubi->leb_size); |
807 | ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | 806 | ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
808 | ubi->peb_buf1, ubi->leb_size, 1); | 807 | ubi->peb_buf1, ubi->leb_size, 1); |
808 | err = 1; | ||
809 | |||
810 | out_unlock: | ||
809 | mutex_unlock(&ubi->buf_mutex); | 811 | mutex_unlock(&ubi->buf_mutex); |
810 | return 1; | 812 | return err; |
811 | } | 813 | } |
812 | 814 | ||
813 | /** | 815 | /** |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f6668cdaac85..43db398437b7 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2945,6 +2945,18 @@ source "drivers/s390/net/Kconfig" | |||
2945 | 2945 | ||
2946 | source "drivers/net/caif/Kconfig" | 2946 | source "drivers/net/caif/Kconfig" |
2947 | 2947 | ||
2948 | config TILE_NET | ||
2949 | tristate "Tilera GBE/XGBE network driver support" | ||
2950 | depends on TILE | ||
2951 | default y | ||
2952 | select CRC32 | ||
2953 | help | ||
2954 | This is a standard Linux network device driver for the | ||
2955 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. | ||
2956 | |||
2957 | To compile this driver as a module, choose M here: the module | ||
2958 | will be called tile_net. | ||
2959 | |||
2948 | config XEN_NETDEV_FRONTEND | 2960 | config XEN_NETDEV_FRONTEND |
2949 | tristate "Xen network device frontend driver" | 2961 | tristate "Xen network device frontend driver" |
2950 | depends on XEN | 2962 | depends on XEN |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 652fc6b98039..b90738d13994 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/ | |||
301 | 301 | ||
302 | obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ | 302 | obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ |
303 | obj-$(CONFIG_PCH_GBE) += pch_gbe/ | 303 | obj-$(CONFIG_PCH_GBE) += pch_gbe/ |
304 | obj-$(CONFIG_TILE_NET) += tile/ | ||
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c index 919080b2c3a5..1bf672009948 100644 --- a/drivers/net/atl1c/atl1c_hw.c +++ b/drivers/net/atl1c/atl1c_hw.c | |||
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) | |||
82 | addr[0] = addr[1] = 0; | 82 | addr[0] = addr[1] = 0; |
83 | AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); | 83 | AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); |
84 | if (atl1c_check_eeprom_exist(hw)) { | 84 | if (atl1c_check_eeprom_exist(hw)) { |
85 | if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { | 85 | if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { |
86 | /* Enable OTP CLK */ | 86 | /* Enable OTP CLK */ |
87 | if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { | 87 | if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { |
88 | otp_ctrl_data |= OTP_CTRL_CLK_EN; | 88 | otp_ctrl_data |= OTP_CTRL_CLK_EN; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4686c3983fc3..4d62f7bfa036 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | char e1000_driver_name[] = "e1000"; | 32 | char e1000_driver_name[] = "e1000"; |
33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | 33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; |
34 | #define DRV_VERSION "7.3.21-k6-NAPI" | 34 | #define DRV_VERSION "7.3.21-k8-NAPI" |
35 | const char e1000_driver_version[] = DRV_VERSION; | 35 | const char e1000_driver_version[] = DRV_VERSION; |
36 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | 36 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
37 | 37 | ||
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter) | |||
485 | struct net_device *netdev = adapter->netdev; | 485 | struct net_device *netdev = adapter->netdev; |
486 | u32 rctl, tctl; | 486 | u32 rctl, tctl; |
487 | 487 | ||
488 | /* signal that we're down so the interrupt handler does not | ||
489 | * reschedule our watchdog timer */ | ||
490 | set_bit(__E1000_DOWN, &adapter->flags); | ||
491 | 488 | ||
492 | /* disable receives in the hardware */ | 489 | /* disable receives in the hardware */ |
493 | rctl = er32(RCTL); | 490 | rctl = er32(RCTL); |
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter) | |||
508 | 505 | ||
509 | e1000_irq_disable(adapter); | 506 | e1000_irq_disable(adapter); |
510 | 507 | ||
508 | /* | ||
509 | * Setting DOWN must be after irq_disable to prevent | ||
510 | * a screaming interrupt. Setting DOWN also prevents | ||
511 | * timers and tasks from rescheduling. | ||
512 | */ | ||
513 | set_bit(__E1000_DOWN, &adapter->flags); | ||
514 | |||
511 | del_timer_sync(&adapter->tx_fifo_stall_timer); | 515 | del_timer_sync(&adapter->tx_fifo_stall_timer); |
512 | del_timer_sync(&adapter->watchdog_timer); | 516 | del_timer_sync(&adapter->watchdog_timer); |
513 | del_timer_sync(&adapter->phy_info_timer); | 517 | del_timer_sync(&adapter->phy_info_timer); |
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 00b38bccd6d0..52a7c86af663 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c | |||
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate) | |||
258 | 258 | ||
259 | /* Baud Rate Error Correction x 10000 */ | 259 | /* Baud Rate Error Correction x 10000 */ |
260 | u32 rate_err_array[] = { | 260 | u32 rate_err_array[] = { |
261 | 0000, 0625, 1250, 1875, | 261 | 0, 625, 1250, 1875, |
262 | 2500, 3125, 3750, 4375, | 262 | 2500, 3125, 3750, 4375, |
263 | 5000, 5625, 6250, 6875, | 263 | 5000, 5625, 6250, 6875, |
264 | 7500, 8125, 8750, 9375, | 264 | 7500, 8125, 8750, 9375, |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f0bd1a1aba3a..e8b9c53c304b 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -30,11 +30,14 @@ | |||
30 | #include <linux/ethtool.h> | 30 | #include <linux/ethtool.h> |
31 | #include <linux/phy.h> | 31 | #include <linux/phy.h> |
32 | #include <linux/marvell_phy.h> | 32 | #include <linux/marvell_phy.h> |
33 | #include <linux/of.h> | ||
33 | 34 | ||
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
35 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | 38 | ||
39 | #define MII_MARVELL_PHY_PAGE 22 | ||
40 | |||
38 | #define MII_M1011_IEVENT 0x13 | 41 | #define MII_M1011_IEVENT 0x13 |
39 | #define MII_M1011_IEVENT_CLEAR 0x0000 | 42 | #define MII_M1011_IEVENT_CLEAR 0x0000 |
40 | 43 | ||
@@ -80,7 +83,6 @@ | |||
80 | #define MII_88E1121_PHY_LED_CTRL 16 | 83 | #define MII_88E1121_PHY_LED_CTRL 16 |
81 | #define MII_88E1121_PHY_LED_PAGE 3 | 84 | #define MII_88E1121_PHY_LED_PAGE 3 |
82 | #define MII_88E1121_PHY_LED_DEF 0x0030 | 85 | #define MII_88E1121_PHY_LED_DEF 0x0030 |
83 | #define MII_88E1121_PHY_PAGE 22 | ||
84 | 86 | ||
85 | #define MII_M1011_PHY_STATUS 0x11 | 87 | #define MII_M1011_PHY_STATUS 0x11 |
86 | #define MII_M1011_PHY_STATUS_1000 0x8000 | 88 | #define MII_M1011_PHY_STATUS_1000 0x8000 |
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev) | |||
186 | return 0; | 188 | return 0; |
187 | } | 189 | } |
188 | 190 | ||
191 | #ifdef CONFIG_OF_MDIO | ||
192 | /* | ||
193 | * Set and/or override some configuration registers based on the | ||
194 | * marvell,reg-init property stored in the of_node for the phydev. | ||
195 | * | ||
196 | * marvell,reg-init = <reg-page reg mask value>,...; | ||
197 | * | ||
198 | * There may be one or more sets of <reg-page reg mask value>: | ||
199 | * | ||
200 | * reg-page: which register bank to use. | ||
201 | * reg: the register. | ||
202 | * mask: if non-zero, ANDed with existing register value. | ||
203 | * value: ORed with the masked value and written to the regiser. | ||
204 | * | ||
205 | */ | ||
206 | static int marvell_of_reg_init(struct phy_device *phydev) | ||
207 | { | ||
208 | const __be32 *paddr; | ||
209 | int len, i, saved_page, current_page, page_changed, ret; | ||
210 | |||
211 | if (!phydev->dev.of_node) | ||
212 | return 0; | ||
213 | |||
214 | paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len); | ||
215 | if (!paddr || len < (4 * sizeof(*paddr))) | ||
216 | return 0; | ||
217 | |||
218 | saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE); | ||
219 | if (saved_page < 0) | ||
220 | return saved_page; | ||
221 | page_changed = 0; | ||
222 | current_page = saved_page; | ||
223 | |||
224 | ret = 0; | ||
225 | len /= sizeof(*paddr); | ||
226 | for (i = 0; i < len - 3; i += 4) { | ||
227 | u16 reg_page = be32_to_cpup(paddr + i); | ||
228 | u16 reg = be32_to_cpup(paddr + i + 1); | ||
229 | u16 mask = be32_to_cpup(paddr + i + 2); | ||
230 | u16 val_bits = be32_to_cpup(paddr + i + 3); | ||
231 | int val; | ||
232 | |||
233 | if (reg_page != current_page) { | ||
234 | current_page = reg_page; | ||
235 | page_changed = 1; | ||
236 | ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page); | ||
237 | if (ret < 0) | ||
238 | goto err; | ||
239 | } | ||
240 | |||
241 | val = 0; | ||
242 | if (mask) { | ||
243 | val = phy_read(phydev, reg); | ||
244 | if (val < 0) { | ||
245 | ret = val; | ||
246 | goto err; | ||
247 | } | ||
248 | val &= mask; | ||
249 | } | ||
250 | val |= val_bits; | ||
251 | |||
252 | ret = phy_write(phydev, reg, val); | ||
253 | if (ret < 0) | ||
254 | goto err; | ||
255 | |||
256 | } | ||
257 | err: | ||
258 | if (page_changed) { | ||
259 | i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page); | ||
260 | if (ret == 0) | ||
261 | ret = i; | ||
262 | } | ||
263 | return ret; | ||
264 | } | ||
265 | #else | ||
266 | static int marvell_of_reg_init(struct phy_device *phydev) | ||
267 | { | ||
268 | return 0; | ||
269 | } | ||
270 | #endif /* CONFIG_OF_MDIO */ | ||
271 | |||
189 | static int m88e1121_config_aneg(struct phy_device *phydev) | 272 | static int m88e1121_config_aneg(struct phy_device *phydev) |
190 | { | 273 | { |
191 | int err, oldpage, mscr; | 274 | int err, oldpage, mscr; |
192 | 275 | ||
193 | oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); | 276 | oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); |
194 | 277 | ||
195 | err = phy_write(phydev, MII_88E1121_PHY_PAGE, | 278 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, |
196 | MII_88E1121_PHY_MSCR_PAGE); | 279 | MII_88E1121_PHY_MSCR_PAGE); |
197 | if (err < 0) | 280 | if (err < 0) |
198 | return err; | 281 | return err; |
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) | |||
218 | return err; | 301 | return err; |
219 | } | 302 | } |
220 | 303 | ||
221 | phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); | 304 | phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); |
222 | 305 | ||
223 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | 306 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); |
224 | if (err < 0) | 307 | if (err < 0) |
@@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev) | |||
229 | if (err < 0) | 312 | if (err < 0) |
230 | return err; | 313 | return err; |
231 | 314 | ||
232 | oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); | 315 | oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); |
233 | 316 | ||
234 | phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); | 317 | phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); |
235 | phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); | 318 | phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); |
236 | phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); | 319 | phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); |
237 | 320 | ||
238 | err = genphy_config_aneg(phydev); | 321 | err = genphy_config_aneg(phydev); |
239 | 322 | ||
@@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev) | |||
244 | { | 327 | { |
245 | int err, oldpage, mscr; | 328 | int err, oldpage, mscr; |
246 | 329 | ||
247 | oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); | 330 | oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); |
248 | 331 | ||
249 | err = phy_write(phydev, MII_88E1121_PHY_PAGE, | 332 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, |
250 | MII_88E1121_PHY_MSCR_PAGE); | 333 | MII_88E1121_PHY_MSCR_PAGE); |
251 | if (err < 0) | 334 | if (err < 0) |
252 | return err; | 335 | return err; |
@@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev) | |||
258 | if (err < 0) | 341 | if (err < 0) |
259 | return err; | 342 | return err; |
260 | 343 | ||
261 | err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); | 344 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); |
262 | if (err < 0) | 345 | if (err < 0) |
263 | return err; | 346 | return err; |
264 | 347 | ||
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev) | |||
368 | return err; | 451 | return err; |
369 | } | 452 | } |
370 | 453 | ||
454 | err = marvell_of_reg_init(phydev); | ||
455 | if (err < 0) | ||
456 | return err; | ||
371 | 457 | ||
372 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | 458 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); |
373 | if (err < 0) | 459 | if (err < 0) |
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev) | |||
398 | int err; | 484 | int err; |
399 | 485 | ||
400 | /* Change address */ | 486 | /* Change address */ |
401 | err = phy_write(phydev, 0x16, 0x0002); | 487 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); |
402 | if (err < 0) | 488 | if (err < 0) |
403 | return err; | 489 | return err; |
404 | 490 | ||
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev) | |||
408 | return err; | 494 | return err; |
409 | 495 | ||
410 | /* Change address */ | 496 | /* Change address */ |
411 | err = phy_write(phydev, 0x16, 0x0003); | 497 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003); |
412 | if (err < 0) | 498 | if (err < 0) |
413 | return err; | 499 | return err; |
414 | 500 | ||
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev) | |||
420 | if (err < 0) | 506 | if (err < 0) |
421 | return err; | 507 | return err; |
422 | 508 | ||
509 | err = marvell_of_reg_init(phydev); | ||
510 | if (err < 0) | ||
511 | return err; | ||
512 | |||
423 | /* Reset address */ | 513 | /* Reset address */ |
424 | err = phy_write(phydev, 0x16, 0x0); | 514 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); |
515 | if (err < 0) | ||
516 | return err; | ||
517 | |||
518 | err = phy_write(phydev, MII_BMCR, BMCR_RESET); | ||
519 | if (err < 0) | ||
520 | return err; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | static int m88e1149_config_init(struct phy_device *phydev) | ||
526 | { | ||
527 | int err; | ||
528 | |||
529 | /* Change address */ | ||
530 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); | ||
531 | if (err < 0) | ||
532 | return err; | ||
533 | |||
534 | /* Enable 1000 Mbit */ | ||
535 | err = phy_write(phydev, 0x15, 0x1048); | ||
536 | if (err < 0) | ||
537 | return err; | ||
538 | |||
539 | err = marvell_of_reg_init(phydev); | ||
540 | if (err < 0) | ||
541 | return err; | ||
542 | |||
543 | /* Reset address */ | ||
544 | err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); | ||
425 | if (err < 0) | 545 | if (err < 0) |
426 | return err; | 546 | return err; |
427 | 547 | ||
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev) | |||
491 | } | 611 | } |
492 | } | 612 | } |
493 | 613 | ||
614 | err = marvell_of_reg_init(phydev); | ||
615 | if (err < 0) | ||
616 | return err; | ||
617 | |||
494 | return 0; | 618 | return 0; |
495 | } | 619 | } |
496 | 620 | ||
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = { | |||
685 | .driver = { .owner = THIS_MODULE }, | 809 | .driver = { .owner = THIS_MODULE }, |
686 | }, | 810 | }, |
687 | { | 811 | { |
812 | .phy_id = MARVELL_PHY_ID_88E1149R, | ||
813 | .phy_id_mask = MARVELL_PHY_ID_MASK, | ||
814 | .name = "Marvell 88E1149R", | ||
815 | .features = PHY_GBIT_FEATURES, | ||
816 | .flags = PHY_HAS_INTERRUPT, | ||
817 | .config_init = &m88e1149_config_init, | ||
818 | .config_aneg = &m88e1118_config_aneg, | ||
819 | .read_status = &genphy_read_status, | ||
820 | .ack_interrupt = &marvell_ack_interrupt, | ||
821 | .config_intr = &marvell_config_intr, | ||
822 | .driver = { .owner = THIS_MODULE }, | ||
823 | }, | ||
824 | { | ||
688 | .phy_id = MARVELL_PHY_ID_88E1240, | 825 | .phy_id = MARVELL_PHY_ID_88E1240, |
689 | .phy_id_mask = MARVELL_PHY_ID_MASK, | 826 | .phy_id_mask = MARVELL_PHY_ID_MASK, |
690 | .name = "Marvell 88E1240", | 827 | .name = "Marvell 88E1240", |
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { | |||
735 | { 0x01410e10, 0xfffffff0 }, | 872 | { 0x01410e10, 0xfffffff0 }, |
736 | { 0x01410cb0, 0xfffffff0 }, | 873 | { 0x01410cb0, 0xfffffff0 }, |
737 | { 0x01410cd0, 0xfffffff0 }, | 874 | { 0x01410cd0, 0xfffffff0 }, |
875 | { 0x01410e50, 0xfffffff0 }, | ||
738 | { 0x01410e30, 0xfffffff0 }, | 876 | { 0x01410e30, 0xfffffff0 }, |
739 | { 0x01410e90, 0xfffffff0 }, | 877 | { 0x01410e90, 0xfffffff0 }, |
740 | { } | 878 | { } |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c30e0fe55a31..528eaef5308f 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -62,15 +62,15 @@ static const u32 default_msg = | |||
62 | /* NETIF_MSG_PKTDATA | */ | 62 | /* NETIF_MSG_PKTDATA | */ |
63 | NETIF_MSG_HW | NETIF_MSG_WOL | 0; | 63 | NETIF_MSG_HW | NETIF_MSG_WOL | 0; |
64 | 64 | ||
65 | static int debug = 0x00007fff; /* defaults above */ | 65 | static int debug = -1; /* defaults above */ |
66 | module_param(debug, int, 0); | 66 | module_param(debug, int, 0664); |
67 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 67 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
68 | 68 | ||
69 | #define MSIX_IRQ 0 | 69 | #define MSIX_IRQ 0 |
70 | #define MSI_IRQ 1 | 70 | #define MSI_IRQ 1 |
71 | #define LEG_IRQ 2 | 71 | #define LEG_IRQ 2 |
72 | static int qlge_irq_type = MSIX_IRQ; | 72 | static int qlge_irq_type = MSIX_IRQ; |
73 | module_param(qlge_irq_type, int, MSIX_IRQ); | 73 | module_param(qlge_irq_type, int, 0664); |
74 | MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); | 74 | MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); |
75 | 75 | ||
76 | static int qlge_mpi_coredump; | 76 | static int qlge_mpi_coredump; |
diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile new file mode 100644 index 000000000000..f634f142cab4 --- /dev/null +++ b/drivers/net/tile/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # Makefile for the TILE on-chip networking support. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_TILE_NET) += tile_net.o | ||
6 | ifdef CONFIG_TILEGX | ||
7 | tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o | ||
8 | else | ||
9 | tile_net-objs := tilepro.o | ||
10 | endif | ||
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c new file mode 100644 index 000000000000..0e6bac5ec65b --- /dev/null +++ b/drivers/net/tile/tilepro.c | |||
@@ -0,0 +1,2406 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> /* printk() */ | ||
20 | #include <linux/slab.h> /* kmalloc() */ | ||
21 | #include <linux/errno.h> /* error codes */ | ||
22 | #include <linux/types.h> /* size_t */ | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/in.h> | ||
25 | #include <linux/netdevice.h> /* struct device, and other headers */ | ||
26 | #include <linux/etherdevice.h> /* eth_type_trans */ | ||
27 | #include <linux/skbuff.h> | ||
28 | #include <linux/ioctl.h> | ||
29 | #include <linux/cdev.h> | ||
30 | #include <linux/hugetlb.h> | ||
31 | #include <linux/in6.h> | ||
32 | #include <linux/timer.h> | ||
33 | #include <linux/io.h> | ||
34 | #include <asm/checksum.h> | ||
35 | #include <asm/homecache.h> | ||
36 | |||
37 | #include <hv/drv_xgbe_intf.h> | ||
38 | #include <hv/drv_xgbe_impl.h> | ||
39 | #include <hv/hypervisor.h> | ||
40 | #include <hv/netio_intf.h> | ||
41 | |||
42 | /* For TSO */ | ||
43 | #include <linux/ip.h> | ||
44 | #include <linux/tcp.h> | ||
45 | |||
46 | |||
47 | /* There is no singlethread_cpu, so schedule work on the current cpu. */ | ||
48 | #define singlethread_cpu -1 | ||
49 | |||
50 | |||
51 | /* | ||
52 | * First, "tile_net_init_module()" initializes all four "devices" which | ||
53 | * can be used by linux. | ||
54 | * | ||
55 | * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes | ||
56 | * the network cpus, then uses "tile_net_open_aux()" to initialize | ||
57 | * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all | ||
58 | * the tiles, provide buffers to LIPP, allow ingress to start, and | ||
59 | * turn on hypervisor interrupt handling (and NAPI) on all tiles. | ||
60 | * | ||
61 | * If registration fails due to the link being down, then "retry_work" | ||
62 | * is used to keep calling "tile_net_open_inner()" until it succeeds. | ||
63 | * | ||
64 | * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to | ||
65 | * stop egress, drain the LIPP buffers, unregister all the tiles, stop | ||
66 | * LIPP/LEPP, and wipe the LEPP queue. | ||
67 | * | ||
68 | * We start out with the ingress interrupt enabled on each CPU. When | ||
69 | * this interrupt fires, we disable it, and call "napi_schedule()". | ||
70 | * This will cause "tile_net_poll()" to be called, which will pull | ||
71 | * packets from the netio queue, filtering them out, or passing them | ||
72 | * to "netif_receive_skb()". If our budget is exhausted, we will | ||
73 | * return, knowing we will be called again later. Otherwise, we | ||
74 | * reenable the ingress interrupt, and call "napi_complete()". | ||
75 | * | ||
76 | * | ||
77 | * NOTE: The use of "native_driver" ensures that EPP exists, and that | ||
78 | * "epp_sendv" is legal, and that "LIPP" is being used. | ||
79 | * | ||
80 | * NOTE: Failing to free completions for an arbitrarily long time | ||
81 | * (which is defined to be illegal) does in fact cause bizarre | ||
82 | * problems. The "egress_timer" helps prevent this from happening. | ||
83 | * | ||
84 | * NOTE: The egress code can be interrupted by the interrupt handler. | ||
85 | */ | ||
86 | |||
87 | |||
88 | /* HACK: Allow use of "jumbo" packets. */ | ||
89 | /* This should be 1500 if "jumbo" is not set in LIPP. */ | ||
90 | /* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ | ||
91 | /* ISSUE: This has not been thoroughly tested (except at 1500). */ | ||
92 | #define TILE_NET_MTU 1500 | ||
93 | |||
94 | /* HACK: Define to support GSO. */ | ||
95 | /* ISSUE: This may actually hurt performance of the TCP blaster. */ | ||
96 | /* #define TILE_NET_GSO */ | ||
97 | |||
98 | /* Define this to collapse "duplicate" acks. */ | ||
99 | /* #define IGNORE_DUP_ACKS */ | ||
100 | |||
101 | /* HACK: Define this to verify incoming packets. */ | ||
102 | /* #define TILE_NET_VERIFY_INGRESS */ | ||
103 | |||
104 | /* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ | ||
105 | #define TILE_NET_TX_QUEUE_LEN 0 | ||
106 | |||
107 | /* Define to dump packets (prints out the whole packet on tx and rx). */ | ||
108 | /* #define TILE_NET_DUMP_PACKETS */ | ||
109 | |||
110 | /* Define to enable debug spew (all PDEBUG's are enabled). */ | ||
111 | /* #define TILE_NET_DEBUG */ | ||
112 | |||
113 | |||
114 | /* Define to activate paranoia checks. */ | ||
115 | /* #define TILE_NET_PARANOIA */ | ||
116 | |||
117 | /* Default transmit lockup timeout period, in jiffies. */ | ||
118 | #define TILE_NET_TIMEOUT (5 * HZ) | ||
119 | |||
120 | /* Default retry interval for bringing up the NetIO interface, in jiffies. */ | ||
121 | #define TILE_NET_RETRY_INTERVAL (5 * HZ) | ||
122 | |||
123 | /* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ | ||
124 | #define TILE_NET_DEVS 4 | ||
125 | |||
126 | |||
127 | |||
128 | /* Paranoia. */ | ||
129 | #if NET_IP_ALIGN != LIPP_PACKET_PADDING | ||
130 | #error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." | ||
131 | #endif | ||
132 | |||
133 | |||
134 | /* Debug print. */ | ||
135 | #ifdef TILE_NET_DEBUG | ||
136 | #define PDEBUG(fmt, args...) net_printk(fmt, ## args) | ||
137 | #else | ||
138 | #define PDEBUG(fmt, args...) | ||
139 | #endif | ||
140 | |||
141 | |||
142 | MODULE_AUTHOR("Tilera"); | ||
143 | MODULE_LICENSE("GPL"); | ||
144 | |||
145 | |||
146 | #define IS_MULTICAST(mac_addr) \ | ||
147 | (((u8 *)(mac_addr))[0] & 0x01) | ||
148 | |||
149 | #define IS_BROADCAST(mac_addr) \ | ||
150 | (((u16 *)(mac_addr))[0] == 0xffff) | ||
151 | |||
152 | |||
153 | /* | ||
154 | * Queue of incoming packets for a specific cpu and device. | ||
155 | * | ||
156 | * Includes a pointer to the "system" data, and the actual "user" data. | ||
157 | */ | ||
158 | struct tile_netio_queue { | ||
159 | netio_queue_impl_t *__system_part; | ||
160 | netio_queue_user_impl_t __user_part; | ||
161 | |||
162 | }; | ||
163 | |||
164 | |||
165 | /* | ||
166 | * Statistics counters for a specific cpu and device. | ||
167 | */ | ||
168 | struct tile_net_stats_t { | ||
169 | u32 rx_packets; | ||
170 | u32 rx_bytes; | ||
171 | u32 tx_packets; | ||
172 | u32 tx_bytes; | ||
173 | }; | ||
174 | |||
175 | |||
176 | /* | ||
177 | * Info for a specific cpu and device. | ||
178 | * | ||
179 | * ISSUE: There is a "dev" pointer in "napi" as well. | ||
180 | */ | ||
181 | struct tile_net_cpu { | ||
182 | /* The NAPI struct. */ | ||
183 | struct napi_struct napi; | ||
184 | /* Packet queue. */ | ||
185 | struct tile_netio_queue queue; | ||
186 | /* Statistics. */ | ||
187 | struct tile_net_stats_t stats; | ||
188 | /* ISSUE: Is this needed? */ | ||
189 | bool napi_enabled; | ||
190 | /* True if this tile has succcessfully registered with the IPP. */ | ||
191 | bool registered; | ||
192 | /* True if the link was down last time we tried to register. */ | ||
193 | bool link_down; | ||
194 | /* True if "egress_timer" is scheduled. */ | ||
195 | bool egress_timer_scheduled; | ||
196 | /* Number of small sk_buffs which must still be provided. */ | ||
197 | unsigned int num_needed_small_buffers; | ||
198 | /* Number of large sk_buffs which must still be provided. */ | ||
199 | unsigned int num_needed_large_buffers; | ||
200 | /* A timer for handling egress completions. */ | ||
201 | struct timer_list egress_timer; | ||
202 | }; | ||
203 | |||
204 | |||
205 | /* | ||
206 | * Info for a specific device. | ||
207 | */ | ||
208 | struct tile_net_priv { | ||
209 | /* Our network device. */ | ||
210 | struct net_device *dev; | ||
211 | /* The actual egress queue. */ | ||
212 | lepp_queue_t *epp_queue; | ||
213 | /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */ | ||
214 | spinlock_t cmd_lock; | ||
215 | /* Protects "epp_queue->comp_head". */ | ||
216 | spinlock_t comp_lock; | ||
217 | /* The hypervisor handle for this interface. */ | ||
218 | int hv_devhdl; | ||
219 | /* The intr bit mask that IDs this device. */ | ||
220 | u32 intr_id; | ||
221 | /* True iff "tile_net_open_aux()" has succeeded. */ | ||
222 | int partly_opened; | ||
223 | /* True iff "tile_net_open_inner()" has succeeded. */ | ||
224 | int fully_opened; | ||
225 | /* Effective network cpus. */ | ||
226 | struct cpumask network_cpus_map; | ||
227 | /* Number of network cpus. */ | ||
228 | int network_cpus_count; | ||
229 | /* Credits per network cpu. */ | ||
230 | int network_cpus_credits; | ||
231 | /* Network stats. */ | ||
232 | struct net_device_stats stats; | ||
233 | /* For NetIO bringup retries. */ | ||
234 | struct delayed_work retry_work; | ||
235 | /* Quick access to per cpu data. */ | ||
236 | struct tile_net_cpu *cpu[NR_CPUS]; | ||
237 | }; | ||
238 | |||
239 | |||
240 | /* | ||
241 | * The actual devices (xgbe0, xgbe1, gbe0, gbe1). | ||
242 | */ | ||
243 | static struct net_device *tile_net_devs[TILE_NET_DEVS]; | ||
244 | |||
245 | /* | ||
246 | * The "tile_net_cpu" structures for each device. | ||
247 | */ | ||
248 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); | ||
249 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); | ||
250 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); | ||
251 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); | ||
252 | |||
253 | |||
254 | /* | ||
255 | * True if "network_cpus" was specified. | ||
256 | */ | ||
257 | static bool network_cpus_used; | ||
258 | |||
259 | /* | ||
260 | * The actual cpus in "network_cpus". | ||
261 | */ | ||
262 | static struct cpumask network_cpus_map; | ||
263 | |||
264 | |||
265 | |||
266 | #ifdef TILE_NET_DEBUG | ||
267 | /* | ||
268 | * printk with extra stuff. | ||
269 | * | ||
270 | * We print the CPU we're running in brackets. | ||
271 | */ | ||
272 | static void net_printk(char *fmt, ...) | ||
273 | { | ||
274 | int i; | ||
275 | int len; | ||
276 | va_list args; | ||
277 | static char buf[256]; | ||
278 | |||
279 | len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); | ||
280 | va_start(args, fmt); | ||
281 | i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); | ||
282 | va_end(args); | ||
283 | buf[255] = '\0'; | ||
284 | pr_notice(buf); | ||
285 | } | ||
286 | #endif | ||
287 | |||
288 | |||
289 | #ifdef TILE_NET_DUMP_PACKETS | ||
290 | /* | ||
291 | * Dump a packet. | ||
292 | */ | ||
293 | static void dump_packet(unsigned char *data, unsigned long length, char *s) | ||
294 | { | ||
295 | unsigned long i; | ||
296 | static unsigned int count; | ||
297 | |||
298 | pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", | ||
299 | data, length, s, count++); | ||
300 | |||
301 | pr_info("\n"); | ||
302 | |||
303 | for (i = 0; i < length; i++) { | ||
304 | if ((i & 0xf) == 0) | ||
305 | sprintf(buf, "%8.8lx:", i); | ||
306 | sprintf(buf + strlen(buf), " %2.2x", data[i]); | ||
307 | if ((i & 0xf) == 0xf || i == length - 1) | ||
308 | pr_info("%s\n", buf); | ||
309 | } | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | |||
314 | /* | ||
315 | * Provide support for the __netio_fastio1() swint | ||
316 | * (see <hv/drv_xgbe_intf.h> for how it is used). | ||
317 | * | ||
318 | * The fastio swint2 call may clobber all the caller-saved registers. | ||
319 | * It rarely clobbers memory, but we allow for the possibility in | ||
320 | * the signature just to be on the safe side. | ||
321 | * | ||
322 | * Also, gcc doesn't seem to allow an input operand to be | ||
323 | * clobbered, so we fake it with dummy outputs. | ||
324 | * | ||
325 | * This function can't be static because of the way it is declared | ||
326 | * in the netio header. | ||
327 | */ | ||
328 | inline int __netio_fastio1(u32 fastio_index, u32 arg0) | ||
329 | { | ||
330 | long result, clobber_r1, clobber_r10; | ||
331 | asm volatile("swint2" | ||
332 | : "=R00" (result), | ||
333 | "=R01" (clobber_r1), "=R10" (clobber_r10) | ||
334 | : "R10" (fastio_index), "R01" (arg0) | ||
335 | : "memory", "r2", "r3", "r4", | ||
336 | "r5", "r6", "r7", "r8", "r9", | ||
337 | "r11", "r12", "r13", "r14", | ||
338 | "r15", "r16", "r17", "r18", "r19", | ||
339 | "r20", "r21", "r22", "r23", "r24", | ||
340 | "r25", "r26", "r27", "r28", "r29"); | ||
341 | return result; | ||
342 | } | ||
343 | |||
344 | |||
345 | /* | ||
346 | * Provide a linux buffer to LIPP. | ||
347 | */ | ||
348 | static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, | ||
349 | void *va, bool small) | ||
350 | { | ||
351 | struct tile_netio_queue *queue = &info->queue; | ||
352 | |||
353 | /* Convert "va" and "small" to "linux_buffer_t". */ | ||
354 | unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; | ||
355 | |||
356 | __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); | ||
357 | } | ||
358 | |||
359 | |||
360 | /* | ||
361 | * Provide a linux buffer for LIPP. | ||
362 | */ | ||
363 | static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, | ||
364 | bool small) | ||
365 | { | ||
366 | /* ISSUE: What should we use here? */ | ||
367 | unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; | ||
368 | |||
369 | /* Round up to ensure to avoid "false sharing" with last cache line. */ | ||
370 | unsigned int buffer_size = | ||
371 | (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + | ||
372 | CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); | ||
373 | |||
374 | /* | ||
375 | * ISSUE: Since CPAs are 38 bits, and we can only encode the | ||
376 | * high 31 bits in a "linux_buffer_t", the low 7 bits must be | ||
377 | * zero, and thus, we must align the actual "va" mod 128. | ||
378 | */ | ||
379 | const unsigned long align = 128; | ||
380 | |||
381 | struct sk_buff *skb; | ||
382 | void *va; | ||
383 | |||
384 | struct sk_buff **skb_ptr; | ||
385 | |||
386 | /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */ | ||
387 | /* and also "reserves" that many bytes. */ | ||
388 | /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */ | ||
389 | int len = sizeof(*skb_ptr) + align + buffer_size; | ||
390 | |||
391 | while (1) { | ||
392 | |||
393 | /* Allocate (or fail). */ | ||
394 | skb = dev_alloc_skb(len); | ||
395 | if (skb == NULL) | ||
396 | return false; | ||
397 | |||
398 | /* Make room for a back-pointer to 'skb'. */ | ||
399 | skb_reserve(skb, sizeof(*skb_ptr)); | ||
400 | |||
401 | /* Make sure we are aligned. */ | ||
402 | skb_reserve(skb, -(long)skb->data & (align - 1)); | ||
403 | |||
404 | /* This address is given to IPP. */ | ||
405 | va = skb->data; | ||
406 | |||
407 | if (small) | ||
408 | break; | ||
409 | |||
410 | /* ISSUE: This has never been observed! */ | ||
411 | /* Large buffers must not span a huge page. */ | ||
412 | if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0) | ||
413 | break; | ||
414 | pr_err("Leaking unaligned linux buffer at %p.\n", va); | ||
415 | } | ||
416 | |||
417 | /* Skip two bytes to satisfy LIPP assumptions. */ | ||
418 | /* Note that this aligns IP on a 16 byte boundary. */ | ||
419 | /* ISSUE: Do this when the packet arrives? */ | ||
420 | skb_reserve(skb, NET_IP_ALIGN); | ||
421 | |||
422 | /* Save a back-pointer to 'skb'. */ | ||
423 | skb_ptr = va - sizeof(*skb_ptr); | ||
424 | *skb_ptr = skb; | ||
425 | |||
426 | /* Invalidate the packet buffer. */ | ||
427 | if (!hash_default) | ||
428 | __inv_buffer(skb->data, buffer_size); | ||
429 | |||
430 | /* Make sure "skb_ptr" has been flushed. */ | ||
431 | __insn_mf(); | ||
432 | |||
433 | #ifdef TILE_NET_PARANOIA | ||
434 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
435 | if (hash_default) { | ||
436 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); | ||
437 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | ||
438 | panic("Non-coherent ingress buffer!"); | ||
439 | } | ||
440 | #endif | ||
441 | #endif | ||
442 | |||
443 | /* Provide the new buffer. */ | ||
444 | tile_net_provide_linux_buffer(info, va, small); | ||
445 | |||
446 | return true; | ||
447 | } | ||
448 | |||
449 | |||
450 | /* | ||
451 | * Provide linux buffers for LIPP. | ||
452 | */ | ||
453 | static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) | ||
454 | { | ||
455 | while (info->num_needed_small_buffers != 0) { | ||
456 | if (!tile_net_provide_needed_buffer(info, true)) | ||
457 | goto oops; | ||
458 | info->num_needed_small_buffers--; | ||
459 | } | ||
460 | |||
461 | while (info->num_needed_large_buffers != 0) { | ||
462 | if (!tile_net_provide_needed_buffer(info, false)) | ||
463 | goto oops; | ||
464 | info->num_needed_large_buffers--; | ||
465 | } | ||
466 | |||
467 | return; | ||
468 | |||
469 | oops: | ||
470 | |||
471 | /* Add a description to the page allocation failure dump. */ | ||
472 | pr_notice("Could not provide a linux buffer to LIPP.\n"); | ||
473 | } | ||
474 | |||
475 | |||
476 | /* | ||
477 | * Grab some LEPP completions, and store them in "comps", of size | ||
478 | * "comps_size", and return the number of completions which were | ||
479 | * stored, so the caller can free them. | ||
480 | * | ||
481 | * If "pending" is not NULL, it will be set to true if there might | ||
482 | * still be some pending completions caused by this tile, else false. | ||
483 | */ | ||
484 | static unsigned int tile_net_lepp_grab_comps(struct net_device *dev, | ||
485 | struct sk_buff *comps[], | ||
486 | unsigned int comps_size, | ||
487 | bool *pending) | ||
488 | { | ||
489 | struct tile_net_priv *priv = netdev_priv(dev); | ||
490 | |||
491 | lepp_queue_t *eq = priv->epp_queue; | ||
492 | |||
493 | unsigned int n = 0; | ||
494 | |||
495 | unsigned int comp_head; | ||
496 | unsigned int comp_busy; | ||
497 | unsigned int comp_tail; | ||
498 | |||
499 | spin_lock(&priv->comp_lock); | ||
500 | |||
501 | comp_head = eq->comp_head; | ||
502 | comp_busy = eq->comp_busy; | ||
503 | comp_tail = eq->comp_tail; | ||
504 | |||
505 | while (comp_head != comp_busy && n < comps_size) { | ||
506 | comps[n++] = eq->comps[comp_head]; | ||
507 | LEPP_QINC(comp_head); | ||
508 | } | ||
509 | |||
510 | if (pending != NULL) | ||
511 | *pending = (comp_head != comp_tail); | ||
512 | |||
513 | eq->comp_head = comp_head; | ||
514 | |||
515 | spin_unlock(&priv->comp_lock); | ||
516 | |||
517 | return n; | ||
518 | } | ||
519 | |||
520 | |||
521 | /* | ||
522 | * Make sure the egress timer is scheduled. | ||
523 | * | ||
524 | * Note that we use "schedule if not scheduled" logic instead of the more | ||
525 | * obvious "reschedule" logic, because "reschedule" is fairly expensive. | ||
526 | */ | ||
527 | static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) | ||
528 | { | ||
529 | if (!info->egress_timer_scheduled) { | ||
530 | mod_timer_pinned(&info->egress_timer, jiffies + 1); | ||
531 | info->egress_timer_scheduled = true; | ||
532 | } | ||
533 | } | ||
534 | |||
535 | |||
536 | /* | ||
537 | * The "function" for "info->egress_timer". | ||
538 | * | ||
539 | * This timer will reschedule itself as long as there are any pending | ||
540 | * completions expected (on behalf of any tile). | ||
541 | * | ||
542 | * ISSUE: Realistically, will the timer ever stop scheduling itself? | ||
543 | * | ||
544 | * ISSUE: This timer is almost never actually needed, so just use a global | ||
545 | * timer that can run on any tile. | ||
546 | * | ||
547 | * ISSUE: Maybe instead track number of expected completions, and free | ||
548 | * only that many, resetting to zero if "pending" is ever false. | ||
549 | */ | ||
550 | static void tile_net_handle_egress_timer(unsigned long arg) | ||
551 | { | ||
552 | struct tile_net_cpu *info = (struct tile_net_cpu *)arg; | ||
553 | struct net_device *dev = info->napi.dev; | ||
554 | |||
555 | struct sk_buff *olds[32]; | ||
556 | unsigned int wanted = 32; | ||
557 | unsigned int i, nolds = 0; | ||
558 | bool pending; | ||
559 | |||
560 | /* The timer is no longer scheduled. */ | ||
561 | info->egress_timer_scheduled = false; | ||
562 | |||
563 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending); | ||
564 | |||
565 | for (i = 0; i < nolds; i++) | ||
566 | kfree_skb(olds[i]); | ||
567 | |||
568 | /* Reschedule timer if needed. */ | ||
569 | if (pending) | ||
570 | tile_net_schedule_egress_timer(info); | ||
571 | } | ||
572 | |||
573 | |||
574 | #ifdef IGNORE_DUP_ACKS | ||
575 | |||
576 | /* | ||
577 | * Help detect "duplicate" ACKs. These are sequential packets (for a | ||
578 | * given flow) which are exactly 66 bytes long, sharing everything but | ||
579 | * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, | ||
580 | * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are | ||
581 | * +N, and the Tstamps are usually identical. | ||
582 | * | ||
583 | * NOTE: Apparently truly duplicate acks (with identical "ack" values), | ||
584 | * should not be collapsed, as they are used for some kind of flow control. | ||
585 | */ | ||
586 | static bool is_dup_ack(char *s1, char *s2, unsigned int len) | ||
587 | { | ||
588 | int i; | ||
589 | |||
590 | unsigned long long ignorable = 0; | ||
591 | |||
592 | /* Identification. */ | ||
593 | ignorable |= (1ULL << 0x12); | ||
594 | ignorable |= (1ULL << 0x13); | ||
595 | |||
596 | /* Header checksum. */ | ||
597 | ignorable |= (1ULL << 0x18); | ||
598 | ignorable |= (1ULL << 0x19); | ||
599 | |||
600 | /* ACK. */ | ||
601 | ignorable |= (1ULL << 0x2a); | ||
602 | ignorable |= (1ULL << 0x2b); | ||
603 | ignorable |= (1ULL << 0x2c); | ||
604 | ignorable |= (1ULL << 0x2d); | ||
605 | |||
606 | /* WinSize. */ | ||
607 | ignorable |= (1ULL << 0x30); | ||
608 | ignorable |= (1ULL << 0x31); | ||
609 | |||
610 | /* Checksum. */ | ||
611 | ignorable |= (1ULL << 0x32); | ||
612 | ignorable |= (1ULL << 0x33); | ||
613 | |||
614 | for (i = 0; i < len; i++, ignorable >>= 1) { | ||
615 | |||
616 | if ((ignorable & 1) || (s1[i] == s2[i])) | ||
617 | continue; | ||
618 | |||
619 | #ifdef TILE_NET_DEBUG | ||
620 | /* HACK: Mention non-timestamp diffs. */ | ||
621 | if (i < 0x38 && i != 0x2f && | ||
622 | net_ratelimit()) | ||
623 | pr_info("Diff at 0x%x\n", i); | ||
624 | #endif | ||
625 | |||
626 | return false; | ||
627 | } | ||
628 | |||
629 | #ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS | ||
630 | /* HACK: Do not suppress truly duplicate ACKs. */ | ||
631 | /* ISSUE: Is this actually necessary or helpful? */ | ||
632 | if (s1[0x2a] == s2[0x2a] && | ||
633 | s1[0x2b] == s2[0x2b] && | ||
634 | s1[0x2c] == s2[0x2c] && | ||
635 | s1[0x2d] == s2[0x2d]) { | ||
636 | return false; | ||
637 | } | ||
638 | #endif | ||
639 | |||
640 | return true; | ||
641 | } | ||
642 | |||
643 | #endif | ||
644 | |||
645 | |||
646 | |||
647 | /* | ||
648 | * Like "tile_net_handle_packets()", but just discard packets. | ||
649 | */ | ||
650 | static void tile_net_discard_packets(struct net_device *dev) | ||
651 | { | ||
652 | struct tile_net_priv *priv = netdev_priv(dev); | ||
653 | int my_cpu = smp_processor_id(); | ||
654 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
655 | struct tile_netio_queue *queue = &info->queue; | ||
656 | netio_queue_impl_t *qsp = queue->__system_part; | ||
657 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
658 | |||
659 | while (qup->__packet_receive_read != | ||
660 | qsp->__packet_receive_queue.__packet_write) { | ||
661 | |||
662 | int index = qup->__packet_receive_read; | ||
663 | |||
664 | int index2_aux = index + sizeof(netio_pkt_t); | ||
665 | int index2 = | ||
666 | ((index2_aux == | ||
667 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | ||
668 | 0 : index2_aux); | ||
669 | |||
670 | netio_pkt_t *pkt = (netio_pkt_t *) | ||
671 | ((unsigned long) &qsp[1] + index); | ||
672 | |||
673 | /* Extract the "linux_buffer_t". */ | ||
674 | unsigned int buffer = pkt->__packet.word; | ||
675 | |||
676 | /* Convert "linux_buffer_t" to "va". */ | ||
677 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | ||
678 | |||
679 | /* Acquire the associated "skb". */ | ||
680 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
681 | struct sk_buff *skb = *skb_ptr; | ||
682 | |||
683 | kfree_skb(skb); | ||
684 | |||
685 | /* Consume this packet. */ | ||
686 | qup->__packet_receive_read = index2; | ||
687 | } | ||
688 | } | ||
689 | |||
690 | |||
691 | /* | ||
692 | * Handle the next packet. Return true if "processed", false if "filtered". | ||
693 | */ | ||
694 | static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | ||
695 | { | ||
696 | struct net_device *dev = info->napi.dev; | ||
697 | |||
698 | struct tile_netio_queue *queue = &info->queue; | ||
699 | netio_queue_impl_t *qsp = queue->__system_part; | ||
700 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
701 | struct tile_net_stats_t *stats = &info->stats; | ||
702 | |||
703 | int filter; | ||
704 | |||
705 | int index2_aux = index + sizeof(netio_pkt_t); | ||
706 | int index2 = | ||
707 | ((index2_aux == | ||
708 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | ||
709 | 0 : index2_aux); | ||
710 | |||
711 | netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); | ||
712 | |||
713 | netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); | ||
714 | |||
715 | /* Extract the packet size. */ | ||
716 | unsigned long len = | ||
717 | (NETIO_PKT_CUSTOM_LENGTH(pkt) + | ||
718 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | ||
719 | |||
720 | /* Extract the "linux_buffer_t". */ | ||
721 | unsigned int buffer = pkt->__packet.word; | ||
722 | |||
723 | /* Extract "small" (vs "large"). */ | ||
724 | bool small = ((buffer & 1) != 0); | ||
725 | |||
726 | /* Convert "linux_buffer_t" to "va". */ | ||
727 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | ||
728 | |||
729 | /* Extract the packet data pointer. */ | ||
730 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | ||
731 | unsigned char *buf = va + NET_IP_ALIGN; | ||
732 | |||
733 | #ifdef IGNORE_DUP_ACKS | ||
734 | |||
735 | static int other; | ||
736 | static int final; | ||
737 | static int keep; | ||
738 | static int skip; | ||
739 | |||
740 | #endif | ||
741 | |||
742 | /* Invalidate the packet buffer. */ | ||
743 | if (!hash_default) | ||
744 | __inv_buffer(buf, len); | ||
745 | |||
746 | /* ISSUE: Is this needed? */ | ||
747 | dev->last_rx = jiffies; | ||
748 | |||
749 | #ifdef TILE_NET_DUMP_PACKETS | ||
750 | dump_packet(buf, len, "rx"); | ||
751 | #endif /* TILE_NET_DUMP_PACKETS */ | ||
752 | |||
753 | #ifdef TILE_NET_VERIFY_INGRESS | ||
754 | if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && | ||
755 | NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { | ||
756 | /* | ||
757 | * FIXME: This complains about UDP packets | ||
758 | * with a "zero" checksum (bug 6624). | ||
759 | */ | ||
760 | #ifdef TILE_NET_PANIC_ON_BAD | ||
761 | dump_packet(buf, len, "rx"); | ||
762 | panic("Bad L4 checksum."); | ||
763 | #else | ||
764 | pr_warning("Bad L4 checksum on %d byte packet.\n", len); | ||
765 | #endif | ||
766 | } | ||
767 | if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && | ||
768 | NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { | ||
769 | dump_packet(buf, len, "rx"); | ||
770 | panic("Bad L3 checksum."); | ||
771 | } | ||
772 | switch (NETIO_PKT_STATUS_M(metadata, pkt)) { | ||
773 | case NETIO_PKT_STATUS_OVERSIZE: | ||
774 | if (len >= 64) { | ||
775 | dump_packet(buf, len, "rx"); | ||
776 | panic("Unexpected OVERSIZE."); | ||
777 | } | ||
778 | break; | ||
779 | case NETIO_PKT_STATUS_BAD: | ||
780 | #ifdef TILE_NET_PANIC_ON_BAD | ||
781 | dump_packet(buf, len, "rx"); | ||
782 | panic("Unexpected BAD packet."); | ||
783 | #else | ||
784 | pr_warning("Unexpected BAD %d byte packet.\n", len); | ||
785 | #endif | ||
786 | } | ||
787 | #endif | ||
788 | |||
789 | filter = 0; | ||
790 | |||
791 | if (!(dev->flags & IFF_UP)) { | ||
792 | /* Filter packets received before we're up. */ | ||
793 | filter = 1; | ||
794 | } else if (!(dev->flags & IFF_PROMISC)) { | ||
795 | /* | ||
796 | * FIXME: Implement HW multicast filter. | ||
797 | */ | ||
798 | if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { | ||
799 | /* Filter packets not for our address. */ | ||
800 | const u8 *mine = dev->dev_addr; | ||
801 | filter = compare_ether_addr(mine, buf); | ||
802 | } | ||
803 | } | ||
804 | |||
805 | #ifdef IGNORE_DUP_ACKS | ||
806 | |||
807 | if (len != 66) { | ||
808 | /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */ | ||
809 | |||
810 | other++; | ||
811 | |||
812 | } else if (index2 == | ||
813 | qsp->__packet_receive_queue.__packet_write) { | ||
814 | |||
815 | final++; | ||
816 | |||
817 | } else { | ||
818 | |||
819 | netio_pkt_t *pkt2 = (netio_pkt_t *) | ||
820 | ((unsigned long) &qsp[1] + index2); | ||
821 | |||
822 | netio_pkt_metadata_t *metadata2 = | ||
823 | NETIO_PKT_METADATA(pkt2); | ||
824 | |||
825 | /* Extract the packet size. */ | ||
826 | unsigned long len2 = | ||
827 | (NETIO_PKT_CUSTOM_LENGTH(pkt2) + | ||
828 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | ||
829 | |||
830 | if (len2 == 66 && | ||
831 | NETIO_PKT_FLOW_HASH_M(metadata, pkt) == | ||
832 | NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) { | ||
833 | |||
834 | /* Extract the "linux_buffer_t". */ | ||
835 | unsigned int buffer2 = pkt2->__packet.word; | ||
836 | |||
837 | /* Convert "linux_buffer_t" to "va". */ | ||
838 | void *va2 = | ||
839 | __va((phys_addr_t)(buffer2 >> 1) << 7); | ||
840 | |||
841 | /* Extract the packet data pointer. */ | ||
842 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | ||
843 | unsigned char *buf2 = va2 + NET_IP_ALIGN; | ||
844 | |||
845 | /* Invalidate the packet buffer. */ | ||
846 | if (!hash_default) | ||
847 | __inv_buffer(buf2, len2); | ||
848 | |||
849 | if (is_dup_ack(buf, buf2, len)) { | ||
850 | skip++; | ||
851 | filter = 1; | ||
852 | } else { | ||
853 | keep++; | ||
854 | } | ||
855 | } | ||
856 | } | ||
857 | |||
858 | if (net_ratelimit()) | ||
859 | pr_info("Other %d Final %d Keep %d Skip %d.\n", | ||
860 | other, final, keep, skip); | ||
861 | |||
862 | #endif | ||
863 | |||
864 | if (filter) { | ||
865 | |||
866 | /* ISSUE: Update "drop" statistics? */ | ||
867 | |||
868 | tile_net_provide_linux_buffer(info, va, small); | ||
869 | |||
870 | } else { | ||
871 | |||
872 | /* Acquire the associated "skb". */ | ||
873 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
874 | struct sk_buff *skb = *skb_ptr; | ||
875 | |||
876 | /* Paranoia. */ | ||
877 | if (skb->data != buf) | ||
878 | panic("Corrupt linux buffer from LIPP! " | ||
879 | "VA=%p, skb=%p, skb->data=%p\n", | ||
880 | va, skb, skb->data); | ||
881 | |||
882 | /* Encode the actual packet length. */ | ||
883 | skb_put(skb, len); | ||
884 | |||
885 | /* NOTE: This call also sets "skb->dev = dev". */ | ||
886 | skb->protocol = eth_type_trans(skb, dev); | ||
887 | |||
888 | /* ISSUE: Discard corrupt packets? */ | ||
889 | /* ISSUE: Discard packets with bad checksums? */ | ||
890 | |||
891 | /* Avoid recomputing TCP/UDP checksums. */ | ||
892 | if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) | ||
893 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
894 | |||
895 | netif_receive_skb(skb); | ||
896 | |||
897 | stats->rx_packets++; | ||
898 | stats->rx_bytes += len; | ||
899 | |||
900 | if (small) | ||
901 | info->num_needed_small_buffers++; | ||
902 | else | ||
903 | info->num_needed_large_buffers++; | ||
904 | } | ||
905 | |||
906 | /* Return four credits after every fourth packet. */ | ||
907 | if (--qup->__receive_credit_remaining == 0) { | ||
908 | u32 interval = qup->__receive_credit_interval; | ||
909 | qup->__receive_credit_remaining = interval; | ||
910 | __netio_fastio_return_credits(qup->__fastio_index, interval); | ||
911 | } | ||
912 | |||
913 | /* Consume this packet. */ | ||
914 | qup->__packet_receive_read = index2; | ||
915 | |||
916 | return !filter; | ||
917 | } | ||
918 | |||
919 | |||
920 | /* | ||
921 | * Handle some packets for the given device on the current CPU. | ||
922 | * | ||
923 | * ISSUE: The "rotting packet" race condition occurs if a packet | ||
924 | * arrives after the queue appears to be empty, and before the | ||
925 | * hypervisor interrupt is re-enabled. | ||
926 | */ | ||
927 | static int tile_net_poll(struct napi_struct *napi, int budget) | ||
928 | { | ||
929 | struct net_device *dev = napi->dev; | ||
930 | struct tile_net_priv *priv = netdev_priv(dev); | ||
931 | int my_cpu = smp_processor_id(); | ||
932 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
933 | struct tile_netio_queue *queue = &info->queue; | ||
934 | netio_queue_impl_t *qsp = queue->__system_part; | ||
935 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
936 | |||
937 | unsigned int work = 0; | ||
938 | |||
939 | while (1) { | ||
940 | int index = qup->__packet_receive_read; | ||
941 | if (index == qsp->__packet_receive_queue.__packet_write) | ||
942 | break; | ||
943 | |||
944 | if (tile_net_poll_aux(info, index)) { | ||
945 | if (++work >= budget) | ||
946 | goto done; | ||
947 | } | ||
948 | } | ||
949 | |||
950 | napi_complete(&info->napi); | ||
951 | |||
952 | /* Re-enable hypervisor interrupts. */ | ||
953 | enable_percpu_irq(priv->intr_id); | ||
954 | |||
955 | /* HACK: Avoid the "rotting packet" problem. */ | ||
956 | if (qup->__packet_receive_read != | ||
957 | qsp->__packet_receive_queue.__packet_write) | ||
958 | napi_schedule(&info->napi); | ||
959 | |||
960 | /* ISSUE: Handle completions? */ | ||
961 | |||
962 | done: | ||
963 | |||
964 | tile_net_provide_needed_buffers(info); | ||
965 | |||
966 | return work; | ||
967 | } | ||
968 | |||
969 | |||
970 | /* | ||
971 | * Handle an ingress interrupt for the given device on the current cpu. | ||
972 | */ | ||
973 | static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) | ||
974 | { | ||
975 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
976 | struct tile_net_priv *priv = netdev_priv(dev); | ||
977 | int my_cpu = smp_processor_id(); | ||
978 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
979 | |||
980 | /* Disable hypervisor interrupt. */ | ||
981 | disable_percpu_irq(priv->intr_id); | ||
982 | |||
983 | napi_schedule(&info->napi); | ||
984 | |||
985 | return IRQ_HANDLED; | ||
986 | } | ||
987 | |||
988 | |||
989 | /* | ||
990 | * One time initialization per interface. | ||
991 | */ | ||
992 | static int tile_net_open_aux(struct net_device *dev) | ||
993 | { | ||
994 | struct tile_net_priv *priv = netdev_priv(dev); | ||
995 | |||
996 | int ret; | ||
997 | int dummy; | ||
998 | unsigned int epp_lotar; | ||
999 | |||
1000 | /* | ||
1001 | * Find out where EPP memory should be homed. | ||
1002 | */ | ||
1003 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
1004 | (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), | ||
1005 | NETIO_EPP_SHM_OFF); | ||
1006 | if (ret < 0) { | ||
1007 | pr_err("could not read epp_shm_queue lotar.\n"); | ||
1008 | return -EIO; | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * Home the page on the EPP. | ||
1013 | */ | ||
1014 | { | ||
1015 | int epp_home = hv_lotar_to_cpu(epp_lotar); | ||
1016 | struct page *page = virt_to_page(priv->epp_queue); | ||
1017 | homecache_change_page_home(page, 0, epp_home); | ||
1018 | } | ||
1019 | |||
1020 | /* | ||
1021 | * Register the EPP shared memory queue. | ||
1022 | */ | ||
1023 | { | ||
1024 | netio_ipp_address_t ea = { | ||
1025 | .va = 0, | ||
1026 | .pa = __pa(priv->epp_queue), | ||
1027 | .pte = hv_pte(0), | ||
1028 | .size = PAGE_SIZE, | ||
1029 | }; | ||
1030 | ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); | ||
1031 | ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
1032 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | ||
1033 | (HV_VirtAddr)&ea, | ||
1034 | sizeof(ea), | ||
1035 | NETIO_EPP_SHM_OFF); | ||
1036 | if (ret < 0) | ||
1037 | return -EIO; | ||
1038 | } | ||
1039 | |||
1040 | /* | ||
1041 | * Start LIPP/LEPP. | ||
1042 | */ | ||
1043 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1044 | sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { | ||
1045 | pr_warning("Failed to start LIPP/LEPP.\n"); | ||
1046 | return -EIO; | ||
1047 | } | ||
1048 | |||
1049 | return 0; | ||
1050 | } | ||
1051 | |||
1052 | |||
1053 | /* | ||
1054 | * Register with hypervisor on each CPU. | ||
1055 | * | ||
1056 | * Strangely, this function does important things even if it "fails", | ||
1057 | * which is especially common if the link is not up yet. Hopefully | ||
1058 | * these things are all "harmless" if done twice! | ||
1059 | */ | ||
1060 | static void tile_net_register(void *dev_ptr) | ||
1061 | { | ||
1062 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1063 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1064 | int my_cpu = smp_processor_id(); | ||
1065 | struct tile_net_cpu *info; | ||
1066 | |||
1067 | struct tile_netio_queue *queue; | ||
1068 | |||
1069 | /* Only network cpus can receive packets. */ | ||
1070 | int queue_id = | ||
1071 | cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; | ||
1072 | |||
1073 | netio_input_config_t config = { | ||
1074 | .flags = 0, | ||
1075 | .num_receive_packets = priv->network_cpus_credits, | ||
1076 | .queue_id = queue_id | ||
1077 | }; | ||
1078 | |||
1079 | int ret = 0; | ||
1080 | netio_queue_impl_t *queuep; | ||
1081 | |||
1082 | PDEBUG("tile_net_register(queue_id %d)\n", queue_id); | ||
1083 | |||
1084 | if (!strcmp(dev->name, "xgbe0")) | ||
1085 | info = &__get_cpu_var(hv_xgbe0); | ||
1086 | else if (!strcmp(dev->name, "xgbe1")) | ||
1087 | info = &__get_cpu_var(hv_xgbe1); | ||
1088 | else if (!strcmp(dev->name, "gbe0")) | ||
1089 | info = &__get_cpu_var(hv_gbe0); | ||
1090 | else if (!strcmp(dev->name, "gbe1")) | ||
1091 | info = &__get_cpu_var(hv_gbe1); | ||
1092 | else | ||
1093 | BUG(); | ||
1094 | |||
1095 | /* Initialize the egress timer. */ | ||
1096 | init_timer(&info->egress_timer); | ||
1097 | info->egress_timer.data = (long)info; | ||
1098 | info->egress_timer.function = tile_net_handle_egress_timer; | ||
1099 | |||
1100 | priv->cpu[my_cpu] = info; | ||
1101 | |||
1102 | /* | ||
1103 | * Register ourselves with the IPP. | ||
1104 | */ | ||
1105 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | ||
1106 | (HV_VirtAddr)&config, | ||
1107 | sizeof(netio_input_config_t), | ||
1108 | NETIO_IPP_INPUT_REGISTER_OFF); | ||
1109 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | ||
1110 | ret); | ||
1111 | if (ret < 0) { | ||
1112 | printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF" | ||
1113 | " failure %d\n", ret); | ||
1114 | info->link_down = (ret == NETIO_LINK_DOWN); | ||
1115 | return; | ||
1116 | } | ||
1117 | |||
1118 | /* | ||
1119 | * Get the pointer to our queue's system part. | ||
1120 | */ | ||
1121 | |||
1122 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
1123 | (HV_VirtAddr)&queuep, | ||
1124 | sizeof(netio_queue_impl_t *), | ||
1125 | NETIO_IPP_INPUT_REGISTER_OFF); | ||
1126 | PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | ||
1127 | ret); | ||
1128 | PDEBUG("queuep %p\n", queuep); | ||
1129 | if (ret <= 0) { | ||
1130 | /* ISSUE: Shouldn't this be a fatal error? */ | ||
1131 | pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); | ||
1132 | return; | ||
1133 | } | ||
1134 | |||
1135 | queue = &info->queue; | ||
1136 | |||
1137 | queue->__system_part = queuep; | ||
1138 | |||
1139 | memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); | ||
1140 | |||
1141 | /* This is traditionally "config.num_receive_packets / 2". */ | ||
1142 | queue->__user_part.__receive_credit_interval = 4; | ||
1143 | queue->__user_part.__receive_credit_remaining = | ||
1144 | queue->__user_part.__receive_credit_interval; | ||
1145 | |||
1146 | /* | ||
1147 | * Get a fastio index from the hypervisor. | ||
1148 | * ISSUE: Shouldn't this check the result? | ||
1149 | */ | ||
1150 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
1151 | (HV_VirtAddr)&queue->__user_part.__fastio_index, | ||
1152 | sizeof(queue->__user_part.__fastio_index), | ||
1153 | NETIO_IPP_GET_FASTIO_OFF); | ||
1154 | PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); | ||
1155 | |||
1156 | netif_napi_add(dev, &info->napi, tile_net_poll, 64); | ||
1157 | |||
1158 | /* Now we are registered. */ | ||
1159 | info->registered = true; | ||
1160 | } | ||
1161 | |||
1162 | |||
1163 | /* | ||
1164 | * Unregister with hypervisor on each CPU. | ||
1165 | */ | ||
1166 | static void tile_net_unregister(void *dev_ptr) | ||
1167 | { | ||
1168 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1169 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1170 | int my_cpu = smp_processor_id(); | ||
1171 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1172 | |||
1173 | int ret = 0; | ||
1174 | int dummy = 0; | ||
1175 | |||
1176 | /* Do nothing if never registered. */ | ||
1177 | if (info == NULL) | ||
1178 | return; | ||
1179 | |||
1180 | /* Do nothing if already unregistered. */ | ||
1181 | if (!info->registered) | ||
1182 | return; | ||
1183 | |||
1184 | /* | ||
1185 | * Unregister ourselves with LIPP. | ||
1186 | */ | ||
1187 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1188 | sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); | ||
1189 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n", | ||
1190 | ret); | ||
1191 | if (ret < 0) { | ||
1192 | /* FIXME: Just panic? */ | ||
1193 | pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF" | ||
1194 | " failure %d\n", ret); | ||
1195 | } | ||
1196 | |||
1197 | /* | ||
1198 | * Discard all packets still in our NetIO queue. Hopefully, | ||
1199 | * once the unregister call is complete, there will be no | ||
1200 | * packets still in flight on the IDN. | ||
1201 | */ | ||
1202 | tile_net_discard_packets(dev); | ||
1203 | |||
1204 | /* Reset state. */ | ||
1205 | info->num_needed_small_buffers = 0; | ||
1206 | info->num_needed_large_buffers = 0; | ||
1207 | |||
1208 | /* Cancel egress timer. */ | ||
1209 | del_timer(&info->egress_timer); | ||
1210 | info->egress_timer_scheduled = false; | ||
1211 | |||
1212 | netif_napi_del(&info->napi); | ||
1213 | |||
1214 | /* Now we are unregistered. */ | ||
1215 | info->registered = false; | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | /* | ||
1220 | * Helper function for "tile_net_stop()". | ||
1221 | * | ||
1222 | * Also used to handle registration failure in "tile_net_open_inner()", | ||
1223 | * when "fully_opened" is known to be false, and the various extra | ||
1224 | * steps in "tile_net_stop()" are not necessary. ISSUE: It might be | ||
1225 | * simpler if we could just call "tile_net_stop()" anyway. | ||
1226 | */ | ||
1227 | static void tile_net_stop_aux(struct net_device *dev) | ||
1228 | { | ||
1229 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1230 | |||
1231 | int dummy = 0; | ||
1232 | |||
1233 | /* Unregister all tiles, so LIPP will stop delivering packets. */ | ||
1234 | on_each_cpu(tile_net_unregister, (void *)dev, 1); | ||
1235 | |||
1236 | /* Stop LIPP/LEPP. */ | ||
1237 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1238 | sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) | ||
1239 | panic("Failed to stop LIPP/LEPP!\n"); | ||
1240 | |||
1241 | priv->partly_opened = 0; | ||
1242 | } | ||
1243 | |||
1244 | |||
1245 | /* | ||
1246 | * Disable ingress interrupts for the given device on the current cpu. | ||
1247 | */ | ||
1248 | static void tile_net_disable_intr(void *dev_ptr) | ||
1249 | { | ||
1250 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1251 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1252 | int my_cpu = smp_processor_id(); | ||
1253 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1254 | |||
1255 | /* Disable hypervisor interrupt. */ | ||
1256 | disable_percpu_irq(priv->intr_id); | ||
1257 | |||
1258 | /* Disable NAPI if needed. */ | ||
1259 | if (info != NULL && info->napi_enabled) { | ||
1260 | napi_disable(&info->napi); | ||
1261 | info->napi_enabled = false; | ||
1262 | } | ||
1263 | } | ||
1264 | |||
1265 | |||
1266 | /* | ||
1267 | * Enable ingress interrupts for the given device on the current cpu. | ||
1268 | */ | ||
1269 | static void tile_net_enable_intr(void *dev_ptr) | ||
1270 | { | ||
1271 | struct net_device *dev = (struct net_device *)dev_ptr; | ||
1272 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1273 | int my_cpu = smp_processor_id(); | ||
1274 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1275 | |||
1276 | /* Enable hypervisor interrupt. */ | ||
1277 | enable_percpu_irq(priv->intr_id); | ||
1278 | |||
1279 | /* Enable NAPI. */ | ||
1280 | napi_enable(&info->napi); | ||
1281 | info->napi_enabled = true; | ||
1282 | } | ||
1283 | |||
1284 | |||
1285 | /* | ||
1286 | * tile_net_open_inner does most of the work of bringing up the interface. | ||
1287 | * It's called from tile_net_open(), and also from tile_net_retry_open(). | ||
1288 | * The return value is 0 if the interface was brought up, < 0 if | ||
1289 | * tile_net_open() should return the return value as an error, and > 0 if | ||
1290 | * tile_net_open() should return success and schedule a work item to | ||
1291 | * periodically retry the bringup. | ||
1292 | */ | ||
1293 | static int tile_net_open_inner(struct net_device *dev) | ||
1294 | { | ||
1295 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1296 | int my_cpu = smp_processor_id(); | ||
1297 | struct tile_net_cpu *info; | ||
1298 | struct tile_netio_queue *queue; | ||
1299 | unsigned int irq; | ||
1300 | int i; | ||
1301 | |||
1302 | /* | ||
1303 | * First try to register just on the local CPU, and handle any | ||
1304 | * semi-expected "link down" failure specially. Note that we | ||
1305 | * do NOT call "tile_net_stop_aux()", unlike below. | ||
1306 | */ | ||
1307 | tile_net_register(dev); | ||
1308 | info = priv->cpu[my_cpu]; | ||
1309 | if (!info->registered) { | ||
1310 | if (info->link_down) | ||
1311 | return 1; | ||
1312 | return -EAGAIN; | ||
1313 | } | ||
1314 | |||
1315 | /* | ||
1316 | * Now register everywhere else. If any registration fails, | ||
1317 | * even for "link down" (which might not be possible), we | ||
1318 | * clean up using "tile_net_stop_aux()". | ||
1319 | */ | ||
1320 | smp_call_function(tile_net_register, (void *)dev, 1); | ||
1321 | for_each_online_cpu(i) { | ||
1322 | if (!priv->cpu[i]->registered) { | ||
1323 | tile_net_stop_aux(dev); | ||
1324 | return -EAGAIN; | ||
1325 | } | ||
1326 | } | ||
1327 | |||
1328 | queue = &info->queue; | ||
1329 | |||
1330 | /* | ||
1331 | * Set the device intr bit mask. | ||
1332 | * The tile_net_register above sets per tile __intr_id. | ||
1333 | */ | ||
1334 | priv->intr_id = queue->__system_part->__intr_id; | ||
1335 | BUG_ON(!priv->intr_id); | ||
1336 | |||
1337 | /* | ||
1338 | * Register the device interrupt handler. | ||
1339 | * The __ffs() function returns the index into the interrupt handler | ||
1340 | * table from the interrupt bit mask which should have one bit | ||
1341 | * and one bit only set. | ||
1342 | */ | ||
1343 | irq = __ffs(priv->intr_id); | ||
1344 | tile_irq_activate(irq, TILE_IRQ_PERCPU); | ||
1345 | BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, | ||
1346 | 0, dev->name, (void *)dev) != 0); | ||
1347 | |||
1348 | /* ISSUE: How could "priv->fully_opened" ever be "true" here? */ | ||
1349 | |||
1350 | if (!priv->fully_opened) { | ||
1351 | |||
1352 | int dummy = 0; | ||
1353 | |||
1354 | /* Allocate initial buffers. */ | ||
1355 | |||
1356 | int max_buffers = | ||
1357 | priv->network_cpus_count * priv->network_cpus_credits; | ||
1358 | |||
1359 | info->num_needed_small_buffers = | ||
1360 | min(LIPP_SMALL_BUFFERS, max_buffers); | ||
1361 | |||
1362 | info->num_needed_large_buffers = | ||
1363 | min(LIPP_LARGE_BUFFERS, max_buffers); | ||
1364 | |||
1365 | tile_net_provide_needed_buffers(info); | ||
1366 | |||
1367 | if (info->num_needed_small_buffers != 0 || | ||
1368 | info->num_needed_large_buffers != 0) | ||
1369 | panic("Insufficient memory for buffer stack!"); | ||
1370 | |||
1371 | /* Start LIPP/LEPP and activate "ingress" at the shim. */ | ||
1372 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | ||
1373 | sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) | ||
1374 | panic("Failed to activate the LIPP Shim!\n"); | ||
1375 | |||
1376 | priv->fully_opened = 1; | ||
1377 | } | ||
1378 | |||
1379 | /* On each tile, enable the hypervisor to trigger interrupts. */ | ||
1380 | /* ISSUE: Do this before starting LIPP/LEPP? */ | ||
1381 | on_each_cpu(tile_net_enable_intr, (void *)dev, 1); | ||
1382 | |||
1383 | /* Start our transmit queue. */ | ||
1384 | netif_start_queue(dev); | ||
1385 | |||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | |||
1390 | /* | ||
1391 | * Called periodically to retry bringing up the NetIO interface, | ||
1392 | * if it doesn't come up cleanly during tile_net_open(). | ||
1393 | */ | ||
1394 | static void tile_net_open_retry(struct work_struct *w) | ||
1395 | { | ||
1396 | struct delayed_work *dw = | ||
1397 | container_of(w, struct delayed_work, work); | ||
1398 | |||
1399 | struct tile_net_priv *priv = | ||
1400 | container_of(dw, struct tile_net_priv, retry_work); | ||
1401 | |||
1402 | /* | ||
1403 | * Try to bring the NetIO interface up. If it fails, reschedule | ||
1404 | * ourselves to try again later; otherwise, tell Linux we now have | ||
1405 | * a working link. ISSUE: What if the return value is negative? | ||
1406 | */ | ||
1407 | if (tile_net_open_inner(priv->dev)) | ||
1408 | schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, | ||
1409 | TILE_NET_RETRY_INTERVAL); | ||
1410 | else | ||
1411 | netif_carrier_on(priv->dev); | ||
1412 | } | ||
1413 | |||
1414 | |||
1415 | /* | ||
1416 | * Called when a network interface is made active. | ||
1417 | * | ||
1418 | * Returns 0 on success, negative value on failure. | ||
1419 | * | ||
1420 | * The open entry point is called when a network interface is made | ||
1421 | * active by the system (IFF_UP). At this point all resources needed | ||
1422 | * for transmit and receive operations are allocated, the interrupt | ||
1423 | * handler is registered with the OS, the watchdog timer is started, | ||
1424 | * and the stack is notified that the interface is ready. | ||
1425 | * | ||
1426 | * If the actual link is not available yet, then we tell Linux that | ||
1427 | * we have no carrier, and we keep checking until the link comes up. | ||
1428 | */ | ||
1429 | static int tile_net_open(struct net_device *dev) | ||
1430 | { | ||
1431 | int ret = 0; | ||
1432 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1433 | |||
1434 | /* | ||
1435 | * We rely on priv->partly_opened to tell us if this is the | ||
1436 | * first time this interface is being brought up. If it is | ||
1437 | * set, the IPP was already initialized and should not be | ||
1438 | * initialized again. | ||
1439 | */ | ||
1440 | if (!priv->partly_opened) { | ||
1441 | |||
1442 | int count; | ||
1443 | int credits; | ||
1444 | |||
1445 | /* Initialize LIPP/LEPP, and start the Shim. */ | ||
1446 | ret = tile_net_open_aux(dev); | ||
1447 | if (ret < 0) { | ||
1448 | pr_err("tile_net_open_aux failed: %d\n", ret); | ||
1449 | return ret; | ||
1450 | } | ||
1451 | |||
1452 | /* Analyze the network cpus. */ | ||
1453 | |||
1454 | if (network_cpus_used) | ||
1455 | cpumask_copy(&priv->network_cpus_map, | ||
1456 | &network_cpus_map); | ||
1457 | else | ||
1458 | cpumask_copy(&priv->network_cpus_map, cpu_online_mask); | ||
1459 | |||
1460 | |||
1461 | count = cpumask_weight(&priv->network_cpus_map); | ||
1462 | |||
1463 | /* Limit credits to available buffers, and apply min. */ | ||
1464 | credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); | ||
1465 | |||
1466 | /* Apply "GBE" max limit. */ | ||
1467 | /* ISSUE: Use higher limit for XGBE? */ | ||
1468 | credits = min(NETIO_MAX_RECEIVE_PKTS, credits); | ||
1469 | |||
1470 | priv->network_cpus_count = count; | ||
1471 | priv->network_cpus_credits = credits; | ||
1472 | |||
1473 | #ifdef TILE_NET_DEBUG | ||
1474 | pr_info("Using %d network cpus, with %d credits each\n", | ||
1475 | priv->network_cpus_count, priv->network_cpus_credits); | ||
1476 | #endif | ||
1477 | |||
1478 | priv->partly_opened = 1; | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * Attempt to bring up the link. | ||
1483 | */ | ||
1484 | ret = tile_net_open_inner(dev); | ||
1485 | if (ret <= 0) { | ||
1486 | if (ret == 0) | ||
1487 | netif_carrier_on(dev); | ||
1488 | return ret; | ||
1489 | } | ||
1490 | |||
1491 | /* | ||
1492 | * We were unable to bring up the NetIO interface, but we want to | ||
1493 | * try again in a little bit. Tell Linux that we have no carrier | ||
1494 | * so it doesn't try to use the interface before the link comes up | ||
1495 | * and then remember to try again later. | ||
1496 | */ | ||
1497 | netif_carrier_off(dev); | ||
1498 | schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, | ||
1499 | TILE_NET_RETRY_INTERVAL); | ||
1500 | |||
1501 | return 0; | ||
1502 | } | ||
1503 | |||
1504 | |||
1505 | /* | ||
1506 | * Disables a network interface. | ||
1507 | * | ||
1508 | * Returns 0, this is not allowed to fail. | ||
1509 | * | ||
1510 | * The close entry point is called when an interface is de-activated | ||
1511 | * by the OS. The hardware is still under the drivers control, but | ||
1512 | * needs to be disabled. A global MAC reset is issued to stop the | ||
1513 | * hardware, and all transmit and receive resources are freed. | ||
1514 | * | ||
1515 | * ISSUE: Can this can be called while "tile_net_poll()" is running? | ||
1516 | */ | ||
1517 | static int tile_net_stop(struct net_device *dev) | ||
1518 | { | ||
1519 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1520 | |||
1521 | bool pending = true; | ||
1522 | |||
1523 | PDEBUG("tile_net_stop()\n"); | ||
1524 | |||
1525 | /* ISSUE: Only needed if not yet fully open. */ | ||
1526 | cancel_delayed_work_sync(&priv->retry_work); | ||
1527 | |||
1528 | /* Can't transmit any more. */ | ||
1529 | netif_stop_queue(dev); | ||
1530 | |||
1531 | /* | ||
1532 | * Disable hypervisor interrupts on each tile. | ||
1533 | */ | ||
1534 | on_each_cpu(tile_net_disable_intr, (void *)dev, 1); | ||
1535 | |||
1536 | /* | ||
1537 | * Unregister the interrupt handler. | ||
1538 | * The __ffs() function returns the index into the interrupt handler | ||
1539 | * table from the interrupt bit mask which should have one bit | ||
1540 | * and one bit only set. | ||
1541 | */ | ||
1542 | if (priv->intr_id) | ||
1543 | free_irq(__ffs(priv->intr_id), dev); | ||
1544 | |||
1545 | /* | ||
1546 | * Drain all the LIPP buffers. | ||
1547 | */ | ||
1548 | |||
1549 | while (true) { | ||
1550 | int buffer; | ||
1551 | |||
1552 | /* NOTE: This should never fail. */ | ||
1553 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, | ||
1554 | sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) | ||
1555 | break; | ||
1556 | |||
1557 | /* Stop when done. */ | ||
1558 | if (buffer == 0) | ||
1559 | break; | ||
1560 | |||
1561 | { | ||
1562 | /* Convert "linux_buffer_t" to "va". */ | ||
1563 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | ||
1564 | |||
1565 | /* Acquire the associated "skb". */ | ||
1566 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
1567 | struct sk_buff *skb = *skb_ptr; | ||
1568 | |||
1569 | kfree_skb(skb); | ||
1570 | } | ||
1571 | } | ||
1572 | |||
1573 | /* Stop LIPP/LEPP. */ | ||
1574 | tile_net_stop_aux(dev); | ||
1575 | |||
1576 | |||
1577 | priv->fully_opened = 0; | ||
1578 | |||
1579 | |||
1580 | /* | ||
1581 | * XXX: ISSUE: It appears that, in practice anyway, by the | ||
1582 | * time we get here, there are no pending completions. | ||
1583 | */ | ||
1584 | while (pending) { | ||
1585 | |||
1586 | struct sk_buff *olds[32]; | ||
1587 | unsigned int wanted = 32; | ||
1588 | unsigned int i, nolds = 0; | ||
1589 | |||
1590 | nolds = tile_net_lepp_grab_comps(dev, olds, | ||
1591 | wanted, &pending); | ||
1592 | |||
1593 | /* ISSUE: We have never actually seen this debug spew. */ | ||
1594 | if (nolds != 0) | ||
1595 | pr_info("During tile_net_stop(), grabbed %d comps.\n", | ||
1596 | nolds); | ||
1597 | |||
1598 | for (i = 0; i < nolds; i++) | ||
1599 | kfree_skb(olds[i]); | ||
1600 | } | ||
1601 | |||
1602 | |||
1603 | /* Wipe the EPP queue. */ | ||
1604 | memset(priv->epp_queue, 0, sizeof(lepp_queue_t)); | ||
1605 | |||
1606 | /* Evict the EPP queue. */ | ||
1607 | finv_buffer(priv->epp_queue, PAGE_SIZE); | ||
1608 | |||
1609 | return 0; | ||
1610 | } | ||
1611 | |||
1612 | |||
1613 | /* | ||
1614 | * Prepare the "frags" info for the resulting LEPP command. | ||
1615 | * | ||
1616 | * If needed, flush the memory used by the frags. | ||
1617 | */ | ||
1618 | static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | ||
1619 | struct sk_buff *skb, | ||
1620 | void *b_data, unsigned int b_len) | ||
1621 | { | ||
1622 | unsigned int i, n = 0; | ||
1623 | |||
1624 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1625 | |||
1626 | phys_addr_t cpa; | ||
1627 | |||
1628 | if (b_len != 0) { | ||
1629 | |||
1630 | if (!hash_default) | ||
1631 | finv_buffer_remote(b_data, b_len); | ||
1632 | |||
1633 | cpa = __pa(b_data); | ||
1634 | frags[n].cpa_lo = cpa; | ||
1635 | frags[n].cpa_hi = cpa >> 32; | ||
1636 | frags[n].length = b_len; | ||
1637 | frags[n].hash_for_home = hash_default; | ||
1638 | n++; | ||
1639 | } | ||
1640 | |||
1641 | for (i = 0; i < sh->nr_frags; i++) { | ||
1642 | |||
1643 | skb_frag_t *f = &sh->frags[i]; | ||
1644 | unsigned long pfn = page_to_pfn(f->page); | ||
1645 | |||
1646 | /* FIXME: Compute "hash_for_home" properly. */ | ||
1647 | /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ | ||
1648 | int hash_for_home = hash_default; | ||
1649 | |||
1650 | /* FIXME: Hmmm. */ | ||
1651 | if (!hash_default) { | ||
1652 | void *va = pfn_to_kaddr(pfn) + f->page_offset; | ||
1653 | BUG_ON(PageHighMem(f->page)); | ||
1654 | finv_buffer_remote(va, f->size); | ||
1655 | } | ||
1656 | |||
1657 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; | ||
1658 | frags[n].cpa_lo = cpa; | ||
1659 | frags[n].cpa_hi = cpa >> 32; | ||
1660 | frags[n].length = f->size; | ||
1661 | frags[n].hash_for_home = hash_for_home; | ||
1662 | n++; | ||
1663 | } | ||
1664 | |||
1665 | return n; | ||
1666 | } | ||
1667 | |||
1668 | |||
1669 | /* | ||
1670 | * This function takes "skb", consisting of a header template and a | ||
1671 | * payload, and hands it to LEPP, to emit as one or more segments, | ||
1672 | * each consisting of a possibly modified header, plus a piece of the | ||
1673 | * payload, via a process known as "tcp segmentation offload". | ||
1674 | * | ||
1675 | * Usually, "data" will contain the header template, of size "sh_len", | ||
1676 | * and "sh->frags" will contain "skb->data_len" bytes of payload, and | ||
1677 | * there will be "sh->gso_segs" segments. | ||
1678 | * | ||
1679 | * Sometimes, if "sendfile()" requires copying, we will be called with | ||
1680 | * "data" containing the header and payload, with "frags" being empty. | ||
1681 | * | ||
1682 | * In theory, "sh->nr_frags" could be 3, but in practice, it seems | ||
1683 | * that this will never actually happen. | ||
1684 | * | ||
1685 | * See "emulate_large_send_offload()" for some reference code, which | ||
1686 | * does not handle checksumming. | ||
1687 | * | ||
1688 | * ISSUE: How do we make sure that high memory DMA does not migrate? | ||
1689 | */ | ||
1690 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | ||
1691 | { | ||
1692 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1693 | int my_cpu = smp_processor_id(); | ||
1694 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1695 | struct tile_net_stats_t *stats = &info->stats; | ||
1696 | |||
1697 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1698 | |||
1699 | unsigned char *data = skb->data; | ||
1700 | |||
1701 | /* The ip header follows the ethernet header. */ | ||
1702 | struct iphdr *ih = ip_hdr(skb); | ||
1703 | unsigned int ih_len = ih->ihl * 4; | ||
1704 | |||
1705 | /* Note that "nh == ih", by definition. */ | ||
1706 | unsigned char *nh = skb_network_header(skb); | ||
1707 | unsigned int eh_len = nh - data; | ||
1708 | |||
1709 | /* The tcp header follows the ip header. */ | ||
1710 | struct tcphdr *th = (struct tcphdr *)(nh + ih_len); | ||
1711 | unsigned int th_len = th->doff * 4; | ||
1712 | |||
1713 | /* The total number of header bytes. */ | ||
1714 | /* NOTE: This may be less than skb_headlen(skb). */ | ||
1715 | unsigned int sh_len = eh_len + ih_len + th_len; | ||
1716 | |||
1717 | /* The number of payload bytes at "skb->data + sh_len". */ | ||
1718 | /* This is non-zero for sendfile() without HIGHDMA. */ | ||
1719 | unsigned int b_len = skb_headlen(skb) - sh_len; | ||
1720 | |||
1721 | /* The total number of payload bytes. */ | ||
1722 | unsigned int d_len = b_len + skb->data_len; | ||
1723 | |||
1724 | /* The maximum payload size. */ | ||
1725 | unsigned int p_len = sh->gso_size; | ||
1726 | |||
1727 | /* The total number of segments. */ | ||
1728 | unsigned int num_segs = sh->gso_segs; | ||
1729 | |||
1730 | /* The temporary copy of the command. */ | ||
1731 | u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; | ||
1732 | lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; | ||
1733 | |||
1734 | /* Analyze the "frags". */ | ||
1735 | unsigned int num_frags = | ||
1736 | tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); | ||
1737 | |||
1738 | /* The size of the command, including frags and header. */ | ||
1739 | size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); | ||
1740 | |||
1741 | /* The command header. */ | ||
1742 | lepp_tso_cmd_t cmd_init = { | ||
1743 | .tso = true, | ||
1744 | .header_size = sh_len, | ||
1745 | .ip_offset = eh_len, | ||
1746 | .tcp_offset = eh_len + ih_len, | ||
1747 | .payload_size = p_len, | ||
1748 | .num_frags = num_frags, | ||
1749 | }; | ||
1750 | |||
1751 | unsigned long irqflags; | ||
1752 | |||
1753 | lepp_queue_t *eq = priv->epp_queue; | ||
1754 | |||
1755 | struct sk_buff *olds[4]; | ||
1756 | unsigned int wanted = 4; | ||
1757 | unsigned int i, nolds = 0; | ||
1758 | |||
1759 | unsigned int cmd_head, cmd_tail, cmd_next; | ||
1760 | unsigned int comp_tail; | ||
1761 | |||
1762 | unsigned int free_slots; | ||
1763 | |||
1764 | |||
1765 | /* Paranoia. */ | ||
1766 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | ||
1767 | BUG_ON(ih->protocol != IPPROTO_TCP); | ||
1768 | BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); | ||
1769 | BUG_ON(num_frags > LEPP_MAX_FRAGS); | ||
1770 | /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ | ||
1771 | BUG_ON(num_segs <= 1); | ||
1772 | |||
1773 | |||
1774 | /* Finish preparing the command. */ | ||
1775 | |||
1776 | /* Copy the command header. */ | ||
1777 | *cmd = cmd_init; | ||
1778 | |||
1779 | /* Copy the "header". */ | ||
1780 | memcpy(&cmd->frags[num_frags], data, sh_len); | ||
1781 | |||
1782 | |||
1783 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | ||
1784 | prefetch_L1(&eq->comp_tail); | ||
1785 | prefetch_L1(&eq->cmd_tail); | ||
1786 | mb(); | ||
1787 | |||
1788 | |||
1789 | /* Enqueue the command. */ | ||
1790 | |||
1791 | spin_lock_irqsave(&priv->cmd_lock, irqflags); | ||
1792 | |||
1793 | /* | ||
1794 | * Handle completions if needed to make room. | ||
1795 | * HACK: Spin until there is sufficient room. | ||
1796 | */ | ||
1797 | free_slots = lepp_num_free_comp_slots(eq); | ||
1798 | if (free_slots < 1) { | ||
1799 | spin: | ||
1800 | nolds += tile_net_lepp_grab_comps(dev, olds + nolds, | ||
1801 | wanted - nolds, NULL); | ||
1802 | if (lepp_num_free_comp_slots(eq) < 1) | ||
1803 | goto spin; | ||
1804 | } | ||
1805 | |||
1806 | cmd_head = eq->cmd_head; | ||
1807 | cmd_tail = eq->cmd_tail; | ||
1808 | |||
1809 | /* NOTE: The "gotos" below are untested. */ | ||
1810 | |||
1811 | /* Prepare to advance, detecting full queue. */ | ||
1812 | cmd_next = cmd_tail + cmd_size; | ||
1813 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | ||
1814 | goto spin; | ||
1815 | if (cmd_next > LEPP_CMD_LIMIT) { | ||
1816 | cmd_next = 0; | ||
1817 | if (cmd_next == cmd_head) | ||
1818 | goto spin; | ||
1819 | } | ||
1820 | |||
1821 | /* Copy the command. */ | ||
1822 | memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); | ||
1823 | |||
1824 | /* Advance. */ | ||
1825 | cmd_tail = cmd_next; | ||
1826 | |||
1827 | /* Record "skb" for eventual freeing. */ | ||
1828 | comp_tail = eq->comp_tail; | ||
1829 | eq->comps[comp_tail] = skb; | ||
1830 | LEPP_QINC(comp_tail); | ||
1831 | eq->comp_tail = comp_tail; | ||
1832 | |||
1833 | /* Flush before allowing LEPP to handle the command. */ | ||
1834 | __insn_mf(); | ||
1835 | |||
1836 | eq->cmd_tail = cmd_tail; | ||
1837 | |||
1838 | spin_unlock_irqrestore(&priv->cmd_lock, irqflags); | ||
1839 | |||
1840 | if (nolds == 0) | ||
1841 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); | ||
1842 | |||
1843 | /* Handle completions. */ | ||
1844 | for (i = 0; i < nolds; i++) | ||
1845 | kfree_skb(olds[i]); | ||
1846 | |||
1847 | /* Update stats. */ | ||
1848 | stats->tx_packets += num_segs; | ||
1849 | stats->tx_bytes += (num_segs * sh_len) + d_len; | ||
1850 | |||
1851 | /* Make sure the egress timer is scheduled. */ | ||
1852 | tile_net_schedule_egress_timer(info); | ||
1853 | |||
1854 | return NETDEV_TX_OK; | ||
1855 | } | ||
1856 | |||
1857 | |||
1858 | /* | ||
1859 | * Transmit a packet (called by the kernel via "hard_start_xmit" hook). | ||
1860 | */ | ||
1861 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | ||
1862 | { | ||
1863 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1864 | int my_cpu = smp_processor_id(); | ||
1865 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | ||
1866 | struct tile_net_stats_t *stats = &info->stats; | ||
1867 | |||
1868 | unsigned long irqflags; | ||
1869 | |||
1870 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1871 | |||
1872 | unsigned int len = skb->len; | ||
1873 | unsigned char *data = skb->data; | ||
1874 | |||
1875 | unsigned int csum_start = skb->csum_start - skb_headroom(skb); | ||
1876 | |||
1877 | lepp_frag_t frags[LEPP_MAX_FRAGS]; | ||
1878 | |||
1879 | unsigned int num_frags; | ||
1880 | |||
1881 | lepp_queue_t *eq = priv->epp_queue; | ||
1882 | |||
1883 | struct sk_buff *olds[4]; | ||
1884 | unsigned int wanted = 4; | ||
1885 | unsigned int i, nolds = 0; | ||
1886 | |||
1887 | unsigned int cmd_size = sizeof(lepp_cmd_t); | ||
1888 | |||
1889 | unsigned int cmd_head, cmd_tail, cmd_next; | ||
1890 | unsigned int comp_tail; | ||
1891 | |||
1892 | lepp_cmd_t cmds[LEPP_MAX_FRAGS]; | ||
1893 | |||
1894 | unsigned int free_slots; | ||
1895 | |||
1896 | |||
1897 | /* | ||
1898 | * This is paranoia, since we think that if the link doesn't come | ||
1899 | * up, telling Linux we have no carrier will keep it from trying | ||
1900 | * to transmit. If it does, though, we can't execute this routine, | ||
1901 | * since data structures we depend on aren't set up yet. | ||
1902 | */ | ||
1903 | if (!info->registered) | ||
1904 | return NETDEV_TX_BUSY; | ||
1905 | |||
1906 | |||
1907 | /* Save the timestamp. */ | ||
1908 | dev->trans_start = jiffies; | ||
1909 | |||
1910 | |||
1911 | #ifdef TILE_NET_PARANOIA | ||
1912 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1913 | if (hash_default) { | ||
1914 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); | ||
1915 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | ||
1916 | panic("Non-coherent egress buffer!"); | ||
1917 | } | ||
1918 | #endif | ||
1919 | #endif | ||
1920 | |||
1921 | |||
1922 | #ifdef TILE_NET_DUMP_PACKETS | ||
1923 | /* ISSUE: Does not dump the "frags". */ | ||
1924 | dump_packet(data, skb_headlen(skb), "tx"); | ||
1925 | #endif /* TILE_NET_DUMP_PACKETS */ | ||
1926 | |||
1927 | |||
1928 | if (sh->gso_size != 0) | ||
1929 | return tile_net_tx_tso(skb, dev); | ||
1930 | |||
1931 | |||
1932 | /* Prepare the commands. */ | ||
1933 | |||
1934 | num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | ||
1935 | |||
1936 | for (i = 0; i < num_frags; i++) { | ||
1937 | |||
1938 | bool final = (i == num_frags - 1); | ||
1939 | |||
1940 | lepp_cmd_t cmd = { | ||
1941 | .cpa_lo = frags[i].cpa_lo, | ||
1942 | .cpa_hi = frags[i].cpa_hi, | ||
1943 | .length = frags[i].length, | ||
1944 | .hash_for_home = frags[i].hash_for_home, | ||
1945 | .send_completion = final, | ||
1946 | .end_of_packet = final | ||
1947 | }; | ||
1948 | |||
1949 | if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1950 | cmd.compute_checksum = 1; | ||
1951 | cmd.checksum_data.bits.start_byte = csum_start; | ||
1952 | cmd.checksum_data.bits.count = len - csum_start; | ||
1953 | cmd.checksum_data.bits.destination_byte = | ||
1954 | csum_start + skb->csum_offset; | ||
1955 | } | ||
1956 | |||
1957 | cmds[i] = cmd; | ||
1958 | } | ||
1959 | |||
1960 | |||
1961 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | ||
1962 | prefetch_L1(&eq->comp_tail); | ||
1963 | prefetch_L1(&eq->cmd_tail); | ||
1964 | mb(); | ||
1965 | |||
1966 | |||
1967 | /* Enqueue the commands. */ | ||
1968 | |||
1969 | spin_lock_irqsave(&priv->cmd_lock, irqflags); | ||
1970 | |||
1971 | /* | ||
1972 | * Handle completions if needed to make room. | ||
1973 | * HACK: Spin until there is sufficient room. | ||
1974 | */ | ||
1975 | free_slots = lepp_num_free_comp_slots(eq); | ||
1976 | if (free_slots < 1) { | ||
1977 | spin: | ||
1978 | nolds += tile_net_lepp_grab_comps(dev, olds + nolds, | ||
1979 | wanted - nolds, NULL); | ||
1980 | if (lepp_num_free_comp_slots(eq) < 1) | ||
1981 | goto spin; | ||
1982 | } | ||
1983 | |||
1984 | cmd_head = eq->cmd_head; | ||
1985 | cmd_tail = eq->cmd_tail; | ||
1986 | |||
1987 | /* NOTE: The "gotos" below are untested. */ | ||
1988 | |||
1989 | /* Copy the commands, or fail. */ | ||
1990 | for (i = 0; i < num_frags; i++) { | ||
1991 | |||
1992 | /* Prepare to advance, detecting full queue. */ | ||
1993 | cmd_next = cmd_tail + cmd_size; | ||
1994 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | ||
1995 | goto spin; | ||
1996 | if (cmd_next > LEPP_CMD_LIMIT) { | ||
1997 | cmd_next = 0; | ||
1998 | if (cmd_next == cmd_head) | ||
1999 | goto spin; | ||
2000 | } | ||
2001 | |||
2002 | /* Copy the command. */ | ||
2003 | *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; | ||
2004 | |||
2005 | /* Advance. */ | ||
2006 | cmd_tail = cmd_next; | ||
2007 | } | ||
2008 | |||
2009 | /* Record "skb" for eventual freeing. */ | ||
2010 | comp_tail = eq->comp_tail; | ||
2011 | eq->comps[comp_tail] = skb; | ||
2012 | LEPP_QINC(comp_tail); | ||
2013 | eq->comp_tail = comp_tail; | ||
2014 | |||
2015 | /* Flush before allowing LEPP to handle the command. */ | ||
2016 | __insn_mf(); | ||
2017 | |||
2018 | eq->cmd_tail = cmd_tail; | ||
2019 | |||
2020 | spin_unlock_irqrestore(&priv->cmd_lock, irqflags); | ||
2021 | |||
2022 | if (nolds == 0) | ||
2023 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); | ||
2024 | |||
2025 | /* Handle completions. */ | ||
2026 | for (i = 0; i < nolds; i++) | ||
2027 | kfree_skb(olds[i]); | ||
2028 | |||
2029 | /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ | ||
2030 | stats->tx_packets++; | ||
2031 | stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); | ||
2032 | |||
2033 | /* Make sure the egress timer is scheduled. */ | ||
2034 | tile_net_schedule_egress_timer(info); | ||
2035 | |||
2036 | return NETDEV_TX_OK; | ||
2037 | } | ||
2038 | |||
2039 | |||
2040 | /* | ||
2041 | * Deal with a transmit timeout. | ||
2042 | */ | ||
2043 | static void tile_net_tx_timeout(struct net_device *dev) | ||
2044 | { | ||
2045 | PDEBUG("tile_net_tx_timeout()\n"); | ||
2046 | PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, | ||
2047 | jiffies - dev->trans_start); | ||
2048 | |||
2049 | /* XXX: ISSUE: This doesn't seem useful for us. */ | ||
2050 | netif_wake_queue(dev); | ||
2051 | } | ||
2052 | |||
2053 | |||
2054 | /* | ||
2055 | * Ioctl commands. | ||
2056 | */ | ||
2057 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
2058 | { | ||
2059 | return -EOPNOTSUPP; | ||
2060 | } | ||
2061 | |||
2062 | |||
2063 | /* | ||
2064 | * Get System Network Statistics. | ||
2065 | * | ||
2066 | * Returns the address of the device statistics structure. | ||
2067 | */ | ||
2068 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | ||
2069 | { | ||
2070 | struct tile_net_priv *priv = netdev_priv(dev); | ||
2071 | u32 rx_packets = 0; | ||
2072 | u32 tx_packets = 0; | ||
2073 | u32 rx_bytes = 0; | ||
2074 | u32 tx_bytes = 0; | ||
2075 | int i; | ||
2076 | |||
2077 | for_each_online_cpu(i) { | ||
2078 | if (priv->cpu[i]) { | ||
2079 | rx_packets += priv->cpu[i]->stats.rx_packets; | ||
2080 | rx_bytes += priv->cpu[i]->stats.rx_bytes; | ||
2081 | tx_packets += priv->cpu[i]->stats.tx_packets; | ||
2082 | tx_bytes += priv->cpu[i]->stats.tx_bytes; | ||
2083 | } | ||
2084 | } | ||
2085 | |||
2086 | priv->stats.rx_packets = rx_packets; | ||
2087 | priv->stats.rx_bytes = rx_bytes; | ||
2088 | priv->stats.tx_packets = tx_packets; | ||
2089 | priv->stats.tx_bytes = tx_bytes; | ||
2090 | |||
2091 | return &priv->stats; | ||
2092 | } | ||
2093 | |||
2094 | |||
2095 | /* | ||
2096 | * Change the "mtu". | ||
2097 | * | ||
2098 | * The "change_mtu" method is usually not needed. | ||
2099 | * If you need it, it must be like this. | ||
2100 | */ | ||
2101 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | ||
2102 | { | ||
2103 | PDEBUG("tile_net_change_mtu()\n"); | ||
2104 | |||
2105 | /* Check ranges. */ | ||
2106 | if ((new_mtu < 68) || (new_mtu > 1500)) | ||
2107 | return -EINVAL; | ||
2108 | |||
2109 | /* Accept the value. */ | ||
2110 | dev->mtu = new_mtu; | ||
2111 | |||
2112 | return 0; | ||
2113 | } | ||
2114 | |||
2115 | |||
2116 | /* | ||
2117 | * Change the Ethernet Address of the NIC. | ||
2118 | * | ||
2119 | * The hypervisor driver does not support changing MAC address. However, | ||
2120 | * the IPP does not do anything with the MAC address, so the address which | ||
2121 | * gets used on outgoing packets, and which is accepted on incoming packets, | ||
2122 | * is completely up to the NetIO program or kernel driver which is actually | ||
2123 | * handling them. | ||
2124 | * | ||
2125 | * Returns 0 on success, negative on failure. | ||
2126 | */ | ||
2127 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | ||
2128 | { | ||
2129 | struct sockaddr *addr = p; | ||
2130 | |||
2131 | if (!is_valid_ether_addr(addr->sa_data)) | ||
2132 | return -EINVAL; | ||
2133 | |||
2134 | /* ISSUE: Note that "dev_addr" is now a pointer. */ | ||
2135 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
2136 | |||
2137 | return 0; | ||
2138 | } | ||
2139 | |||
2140 | |||
2141 | /* | ||
2142 | * Obtain the MAC address from the hypervisor. | ||
2143 | * This must be done before opening the device. | ||
2144 | */ | ||
2145 | static int tile_net_get_mac(struct net_device *dev) | ||
2146 | { | ||
2147 | struct tile_net_priv *priv = netdev_priv(dev); | ||
2148 | |||
2149 | char hv_dev_name[32]; | ||
2150 | int len; | ||
2151 | |||
2152 | __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; | ||
2153 | |||
2154 | int ret; | ||
2155 | |||
2156 | /* For example, "xgbe0". */ | ||
2157 | strcpy(hv_dev_name, dev->name); | ||
2158 | len = strlen(hv_dev_name); | ||
2159 | |||
2160 | /* For example, "xgbe/0". */ | ||
2161 | hv_dev_name[len] = hv_dev_name[len - 1]; | ||
2162 | hv_dev_name[len - 1] = '/'; | ||
2163 | len++; | ||
2164 | |||
2165 | /* For example, "xgbe/0/native_hash". */ | ||
2166 | strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); | ||
2167 | |||
2168 | /* Get the hypervisor handle for this device. */ | ||
2169 | priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); | ||
2170 | PDEBUG("hv_dev_open(%s) returned %d %p\n", | ||
2171 | hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); | ||
2172 | if (priv->hv_devhdl < 0) { | ||
2173 | if (priv->hv_devhdl == HV_ENODEV) | ||
2174 | printk(KERN_DEBUG "Ignoring unconfigured device %s\n", | ||
2175 | hv_dev_name); | ||
2176 | else | ||
2177 | printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", | ||
2178 | hv_dev_name, priv->hv_devhdl); | ||
2179 | return -1; | ||
2180 | } | ||
2181 | |||
2182 | /* | ||
2183 | * Read the hardware address from the hypervisor. | ||
2184 | * ISSUE: Note that "dev_addr" is now a pointer. | ||
2185 | */ | ||
2186 | offset.bits.class = NETIO_PARAM; | ||
2187 | offset.bits.addr = NETIO_PARAM_MAC; | ||
2188 | ret = hv_dev_pread(priv->hv_devhdl, 0, | ||
2189 | (HV_VirtAddr)dev->dev_addr, dev->addr_len, | ||
2190 | offset.word); | ||
2191 | PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); | ||
2192 | if (ret <= 0) { | ||
2193 | printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", | ||
2194 | dev->name); | ||
2195 | /* | ||
2196 | * Since the device is configured by the hypervisor but we | ||
2197 | * can't get its MAC address, we are most likely running | ||
2198 | * the simulator, so let's generate a random MAC address. | ||
2199 | */ | ||
2200 | random_ether_addr(dev->dev_addr); | ||
2201 | } | ||
2202 | |||
2203 | return 0; | ||
2204 | } | ||
2205 | |||
2206 | |||
2207 | static struct net_device_ops tile_net_ops = { | ||
2208 | .ndo_open = tile_net_open, | ||
2209 | .ndo_stop = tile_net_stop, | ||
2210 | .ndo_start_xmit = tile_net_tx, | ||
2211 | .ndo_do_ioctl = tile_net_ioctl, | ||
2212 | .ndo_get_stats = tile_net_get_stats, | ||
2213 | .ndo_change_mtu = tile_net_change_mtu, | ||
2214 | .ndo_tx_timeout = tile_net_tx_timeout, | ||
2215 | .ndo_set_mac_address = tile_net_set_mac_address | ||
2216 | }; | ||
2217 | |||
2218 | |||
2219 | /* | ||
2220 | * The setup function. | ||
2221 | * | ||
2222 | * This uses ether_setup() to assign various fields in dev, including | ||
2223 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | ||
2224 | */ | ||
2225 | static void tile_net_setup(struct net_device *dev) | ||
2226 | { | ||
2227 | PDEBUG("tile_net_setup()\n"); | ||
2228 | |||
2229 | ether_setup(dev); | ||
2230 | |||
2231 | dev->netdev_ops = &tile_net_ops; | ||
2232 | |||
2233 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | ||
2234 | |||
2235 | /* We want lockless xmit. */ | ||
2236 | dev->features |= NETIF_F_LLTX; | ||
2237 | |||
2238 | /* We support hardware tx checksums. */ | ||
2239 | dev->features |= NETIF_F_HW_CSUM; | ||
2240 | |||
2241 | /* We support scatter/gather. */ | ||
2242 | dev->features |= NETIF_F_SG; | ||
2243 | |||
2244 | /* We support TSO. */ | ||
2245 | dev->features |= NETIF_F_TSO; | ||
2246 | |||
2247 | #ifdef TILE_NET_GSO | ||
2248 | /* We support GSO. */ | ||
2249 | dev->features |= NETIF_F_GSO; | ||
2250 | #endif | ||
2251 | |||
2252 | if (hash_default) | ||
2253 | dev->features |= NETIF_F_HIGHDMA; | ||
2254 | |||
2255 | /* ISSUE: We should support NETIF_F_UFO. */ | ||
2256 | |||
2257 | dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; | ||
2258 | |||
2259 | dev->mtu = TILE_NET_MTU; | ||
2260 | } | ||
2261 | |||
2262 | |||
2263 | /* | ||
2264 | * Allocate the device structure, register the device, and obtain the | ||
2265 | * MAC address from the hypervisor. | ||
2266 | */ | ||
2267 | static struct net_device *tile_net_dev_init(const char *name) | ||
2268 | { | ||
2269 | int ret; | ||
2270 | struct net_device *dev; | ||
2271 | struct tile_net_priv *priv; | ||
2272 | struct page *page; | ||
2273 | |||
2274 | /* | ||
2275 | * Allocate the device structure. This allocates "priv", calls | ||
2276 | * tile_net_setup(), and saves "name". Normally, "name" is a | ||
2277 | * template, instantiated by register_netdev(), but not for us. | ||
2278 | */ | ||
2279 | dev = alloc_netdev(sizeof(*priv), name, tile_net_setup); | ||
2280 | if (!dev) { | ||
2281 | pr_err("alloc_netdev(%s) failed\n", name); | ||
2282 | return NULL; | ||
2283 | } | ||
2284 | |||
2285 | priv = netdev_priv(dev); | ||
2286 | |||
2287 | /* Initialize "priv". */ | ||
2288 | |||
2289 | memset(priv, 0, sizeof(*priv)); | ||
2290 | |||
2291 | /* Save "dev" for "tile_net_open_retry()". */ | ||
2292 | priv->dev = dev; | ||
2293 | |||
2294 | INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); | ||
2295 | |||
2296 | spin_lock_init(&priv->cmd_lock); | ||
2297 | spin_lock_init(&priv->comp_lock); | ||
2298 | |||
2299 | /* Allocate "epp_queue". */ | ||
2300 | BUG_ON(get_order(sizeof(lepp_queue_t)) != 0); | ||
2301 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); | ||
2302 | if (!page) { | ||
2303 | free_netdev(dev); | ||
2304 | return NULL; | ||
2305 | } | ||
2306 | priv->epp_queue = page_address(page); | ||
2307 | |||
2308 | /* Register the network device. */ | ||
2309 | ret = register_netdev(dev); | ||
2310 | if (ret) { | ||
2311 | pr_err("register_netdev %s failed %d\n", dev->name, ret); | ||
2312 | free_page((unsigned long)priv->epp_queue); | ||
2313 | free_netdev(dev); | ||
2314 | return NULL; | ||
2315 | } | ||
2316 | |||
2317 | /* Get the MAC address. */ | ||
2318 | ret = tile_net_get_mac(dev); | ||
2319 | if (ret < 0) { | ||
2320 | unregister_netdev(dev); | ||
2321 | free_page((unsigned long)priv->epp_queue); | ||
2322 | free_netdev(dev); | ||
2323 | return NULL; | ||
2324 | } | ||
2325 | |||
2326 | return dev; | ||
2327 | } | ||
2328 | |||
2329 | |||
2330 | /* | ||
2331 | * Module cleanup. | ||
2332 | */ | ||
2333 | static void tile_net_cleanup(void) | ||
2334 | { | ||
2335 | int i; | ||
2336 | |||
2337 | for (i = 0; i < TILE_NET_DEVS; i++) { | ||
2338 | if (tile_net_devs[i]) { | ||
2339 | struct net_device *dev = tile_net_devs[i]; | ||
2340 | struct tile_net_priv *priv = netdev_priv(dev); | ||
2341 | unregister_netdev(dev); | ||
2342 | finv_buffer(priv->epp_queue, PAGE_SIZE); | ||
2343 | free_page((unsigned long)priv->epp_queue); | ||
2344 | free_netdev(dev); | ||
2345 | } | ||
2346 | } | ||
2347 | } | ||
2348 | |||
2349 | |||
2350 | /* | ||
2351 | * Module initialization. | ||
2352 | */ | ||
2353 | static int tile_net_init_module(void) | ||
2354 | { | ||
2355 | pr_info("Tilera IPP Net Driver\n"); | ||
2356 | |||
2357 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); | ||
2358 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); | ||
2359 | tile_net_devs[2] = tile_net_dev_init("gbe0"); | ||
2360 | tile_net_devs[3] = tile_net_dev_init("gbe1"); | ||
2361 | |||
2362 | return 0; | ||
2363 | } | ||
2364 | |||
2365 | |||
2366 | #ifndef MODULE | ||
2367 | /* | ||
2368 | * The "network_cpus" boot argument specifies the cpus that are dedicated | ||
2369 | * to handle ingress packets. | ||
2370 | * | ||
2371 | * The parameter should be in the form "network_cpus=m-n[,x-y]", where | ||
2372 | * m, n, x, y are integer numbers that represent the cpus that can be | ||
2373 | * neither a dedicated cpu nor a dataplane cpu. | ||
2374 | */ | ||
2375 | static int __init network_cpus_setup(char *str) | ||
2376 | { | ||
2377 | int rc = cpulist_parse_crop(str, &network_cpus_map); | ||
2378 | if (rc != 0) { | ||
2379 | pr_warning("network_cpus=%s: malformed cpu list\n", | ||
2380 | str); | ||
2381 | } else { | ||
2382 | |||
2383 | /* Remove dedicated cpus. */ | ||
2384 | cpumask_and(&network_cpus_map, &network_cpus_map, | ||
2385 | cpu_possible_mask); | ||
2386 | |||
2387 | |||
2388 | if (cpumask_empty(&network_cpus_map)) { | ||
2389 | pr_warning("Ignoring network_cpus='%s'.\n", | ||
2390 | str); | ||
2391 | } else { | ||
2392 | char buf[1024]; | ||
2393 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | ||
2394 | pr_info("Linux network CPUs: %s\n", buf); | ||
2395 | network_cpus_used = true; | ||
2396 | } | ||
2397 | } | ||
2398 | |||
2399 | return 0; | ||
2400 | } | ||
2401 | __setup("network_cpus=", network_cpus_setup); | ||
2402 | #endif | ||
2403 | |||
2404 | |||
2405 | module_init(tile_net_init_module); | ||
2406 | module_exit(tile_net_cleanup); | ||
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index f01e344cf4bd..98e6fdf34d30 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -49,6 +49,7 @@ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o | |||
49 | obj-$(CONFIG_X86_VISWS) += setup-irq.o | 49 | obj-$(CONFIG_X86_VISWS) += setup-irq.o |
50 | obj-$(CONFIG_MN10300) += setup-bus.o | 50 | obj-$(CONFIG_MN10300) += setup-bus.o |
51 | obj-$(CONFIG_MICROBLAZE) += setup-bus.o | 51 | obj-$(CONFIG_MICROBLAZE) += setup-bus.o |
52 | obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o | ||
52 | 53 | ||
53 | # | 54 | # |
54 | # ACPI Related PCI FW Functions | 55 | # ACPI Related PCI FW Functions |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f5c63fe9db5c..6f9350cabbd5 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2136,6 +2136,24 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, | |||
2136 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, | 2136 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, |
2137 | quirk_unhide_mch_dev6); | 2137 | quirk_unhide_mch_dev6); |
2138 | 2138 | ||
2139 | #ifdef CONFIG_TILE | ||
2140 | /* | ||
2141 | * The Tilera TILEmpower platform needs to set the link speed | ||
2142 | * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed | ||
2143 | * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe | ||
2144 | * capability register of the PEX8624 PCIe switch. The switch | ||
2145 | * supports link speed auto negotiation, but falsely sets | ||
2146 | * the link speed to 5GT/s. | ||
2147 | */ | ||
2148 | static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev) | ||
2149 | { | ||
2150 | if (tile_plx_gen1) { | ||
2151 | pci_write_config_dword(dev, 0x98, 0x1); | ||
2152 | mdelay(50); | ||
2153 | } | ||
2154 | } | ||
2155 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); | ||
2156 | #endif /* CONFIG_TILE */ | ||
2139 | 2157 | ||
2140 | #ifdef CONFIG_PCI_MSI | 2158 | #ifdef CONFIG_PCI_MSI |
2141 | /* Some chipsets do not support MSI. We cannot easily rely on setting | 2159 | /* Some chipsets do not support MSI. We cannot easily rely on setting |
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 752dbee06af5..5d9c66627b6e 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -292,8 +292,8 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) | |||
292 | return; | 292 | return; |
293 | 293 | ||
294 | /* reset adapter interrupt indicators */ | 294 | /* reset adapter interrupt indicators */ |
295 | put_indicator(irq_ptr->dsci); | ||
296 | set_subchannel_ind(irq_ptr, 1); | 295 | set_subchannel_ind(irq_ptr, 1); |
296 | put_indicator(irq_ptr->dsci); | ||
297 | } | 297 | } |
298 | 298 | ||
299 | void __exit tiqdio_unregister_thinints(void) | 299 | void __exit tiqdio_unregister_thinints(void) |
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index 377cfb72cc66..f30f8d659dc4 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h | |||
@@ -345,7 +345,7 @@ extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); | |||
345 | * : SCpnt - Command to queue | 345 | * : SCpnt - Command to queue |
346 | * Returns : 0 - success, else error | 346 | * Returns : 0 - success, else error |
347 | */ | 347 | */ |
348 | extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *) | 348 | extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *); |
349 | 349 | ||
350 | /* Function: irqreturn_t fas216_intr (FAS216_Info *info) | 350 | /* Function: irqreturn_t fas216_intr (FAS216_Info *info) |
351 | * Purpose : handle interrupts from the interface to progress a command | 351 | * Purpose : handle interrupts from the interface to progress a command |
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index cb12a8e1466b..3f5e387ed564 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c | |||
@@ -418,8 +418,11 @@ int clk_register(struct clk *clk) | |||
418 | list_add(&clk->sibling, &root_clks); | 418 | list_add(&clk->sibling, &root_clks); |
419 | 419 | ||
420 | list_add(&clk->node, &clock_list); | 420 | list_add(&clk->node, &clock_list); |
421 | |||
422 | #ifdef CONFIG_SH_CLK_CPG_LEGACY | ||
421 | if (clk->ops && clk->ops->init) | 423 | if (clk->ops && clk->ops->init) |
422 | clk->ops->init(clk); | 424 | clk->ops->init(clk); |
425 | #endif | ||
423 | 426 | ||
424 | out_unlock: | 427 | out_unlock: |
425 | mutex_unlock(&clock_list_sem); | 428 | mutex_unlock(&clock_list_sem); |
@@ -455,19 +458,13 @@ EXPORT_SYMBOL_GPL(clk_get_rate); | |||
455 | 458 | ||
456 | int clk_set_rate(struct clk *clk, unsigned long rate) | 459 | int clk_set_rate(struct clk *clk, unsigned long rate) |
457 | { | 460 | { |
458 | return clk_set_rate_ex(clk, rate, 0); | ||
459 | } | ||
460 | EXPORT_SYMBOL_GPL(clk_set_rate); | ||
461 | |||
462 | int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) | ||
463 | { | ||
464 | int ret = -EOPNOTSUPP; | 461 | int ret = -EOPNOTSUPP; |
465 | unsigned long flags; | 462 | unsigned long flags; |
466 | 463 | ||
467 | spin_lock_irqsave(&clock_lock, flags); | 464 | spin_lock_irqsave(&clock_lock, flags); |
468 | 465 | ||
469 | if (likely(clk->ops && clk->ops->set_rate)) { | 466 | if (likely(clk->ops && clk->ops->set_rate)) { |
470 | ret = clk->ops->set_rate(clk, rate, algo_id); | 467 | ret = clk->ops->set_rate(clk, rate); |
471 | if (ret != 0) | 468 | if (ret != 0) |
472 | goto out_unlock; | 469 | goto out_unlock; |
473 | } else { | 470 | } else { |
@@ -485,7 +482,7 @@ out_unlock: | |||
485 | 482 | ||
486 | return ret; | 483 | return ret; |
487 | } | 484 | } |
488 | EXPORT_SYMBOL_GPL(clk_set_rate_ex); | 485 | EXPORT_SYMBOL_GPL(clk_set_rate); |
489 | 486 | ||
490 | int clk_set_parent(struct clk *clk, struct clk *parent) | 487 | int clk_set_parent(struct clk *clk, struct clk *parent) |
491 | { | 488 | { |
@@ -653,8 +650,7 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) | |||
653 | clkp->ops->set_parent(clkp, | 650 | clkp->ops->set_parent(clkp, |
654 | clkp->parent); | 651 | clkp->parent); |
655 | if (likely(clkp->ops->set_rate)) | 652 | if (likely(clkp->ops->set_rate)) |
656 | clkp->ops->set_rate(clkp, | 653 | clkp->ops->set_rate(clkp, rate); |
657 | rate, NO_CHANGE); | ||
658 | else if (likely(clkp->ops->recalc)) | 654 | else if (likely(clkp->ops->recalc)) |
659 | clkp->rate = clkp->ops->recalc(clkp); | 655 | clkp->rate = clkp->ops->recalc(clkp); |
660 | } | 656 | } |
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 3aea5f0ceb09..6172335ae323 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c | |||
@@ -110,8 +110,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) | |||
110 | return 0; | 110 | return 0; |
111 | } | 111 | } |
112 | 112 | ||
113 | static int sh_clk_div6_set_rate(struct clk *clk, | 113 | static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate) |
114 | unsigned long rate, int algo_id) | ||
115 | { | 114 | { |
116 | unsigned long value; | 115 | unsigned long value; |
117 | int idx; | 116 | int idx; |
@@ -132,7 +131,7 @@ static int sh_clk_div6_enable(struct clk *clk) | |||
132 | unsigned long value; | 131 | unsigned long value; |
133 | int ret; | 132 | int ret; |
134 | 133 | ||
135 | ret = sh_clk_div6_set_rate(clk, clk->rate, 0); | 134 | ret = sh_clk_div6_set_rate(clk, clk->rate); |
136 | if (ret == 0) { | 135 | if (ret == 0) { |
137 | value = __raw_readl(clk->enable_reg); | 136 | value = __raw_readl(clk->enable_reg); |
138 | value &= ~0x100; /* clear stop bit to enable clock */ | 137 | value &= ~0x100; /* clear stop bit to enable clock */ |
@@ -253,7 +252,7 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) | |||
253 | return 0; | 252 | return 0; |
254 | } | 253 | } |
255 | 254 | ||
256 | static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) | 255 | static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate) |
257 | { | 256 | { |
258 | struct clk_div4_table *d4t = clk->priv; | 257 | struct clk_div4_table *d4t = clk->priv; |
259 | unsigned long value; | 258 | unsigned long value; |
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index ea071a5b6eee..44447f54942f 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -2301,7 +2301,7 @@ out: | |||
2301 | return ret; | 2301 | return ret; |
2302 | } | 2302 | } |
2303 | 2303 | ||
2304 | static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot); | 2304 | static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot); |
2305 | 2305 | ||
2306 | static ssize_t read_human_status(struct device *dev, | 2306 | static ssize_t read_human_status(struct device *dev, |
2307 | struct device_attribute *attr, char *buf) | 2307 | struct device_attribute *attr, char *buf) |
@@ -2364,8 +2364,7 @@ out: | |||
2364 | return ret; | 2364 | return ret; |
2365 | } | 2365 | } |
2366 | 2366 | ||
2367 | static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, | 2367 | static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL); |
2368 | read_human_status, NULL); | ||
2369 | 2368 | ||
2370 | static ssize_t read_delin(struct device *dev, struct device_attribute *attr, | 2369 | static ssize_t read_delin(struct device *dev, struct device_attribute *attr, |
2371 | char *buf) | 2370 | char *buf) |
@@ -2397,7 +2396,7 @@ out: | |||
2397 | return ret; | 2396 | return ret; |
2398 | } | 2397 | } |
2399 | 2398 | ||
2400 | static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL); | 2399 | static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL); |
2401 | 2400 | ||
2402 | #define UEA_ATTR(name, reset) \ | 2401 | #define UEA_ATTR(name, reset) \ |
2403 | \ | 2402 | \ |
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index b5e20e873cba..717ff653fa23 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c | |||
@@ -2017,7 +2017,7 @@ static int __init usba_udc_probe(struct platform_device *pdev) | |||
2017 | } | 2017 | } |
2018 | } else { | 2018 | } else { |
2019 | /* gpio_request fail so use -EINVAL for gpio_is_valid */ | 2019 | /* gpio_request fail so use -EINVAL for gpio_is_valid */ |
2020 | ubc->vbus_pin = -EINVAL; | 2020 | udc->vbus_pin = -EINVAL; |
2021 | } | 2021 | } |
2022 | } | 2022 | } |
2023 | 2023 | ||
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 86afdc73322f..6e2599661b5b 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c | |||
@@ -1067,7 +1067,7 @@ static inline void create_debug_files (struct ehci_hcd *ehci) | |||
1067 | &debug_registers_fops)) | 1067 | &debug_registers_fops)) |
1068 | goto file_error; | 1068 | goto file_error; |
1069 | 1069 | ||
1070 | if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus, | 1070 | if (!debugfs_create_file("lpm", S_IRUGO|S_IWUSR, ehci->debug_dir, bus, |
1071 | &debug_lpm_fops)) | 1071 | &debug_lpm_fops)) |
1072 | goto file_error; | 1072 | goto file_error; |
1073 | 1073 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 502a7e6fef42..e9062806d4a2 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -1063,10 +1063,11 @@ rescan: | |||
1063 | tmp && tmp != qh; | 1063 | tmp && tmp != qh; |
1064 | tmp = tmp->qh_next.qh) | 1064 | tmp = tmp->qh_next.qh) |
1065 | continue; | 1065 | continue; |
1066 | /* periodic qh self-unlinks on empty */ | 1066 | /* periodic qh self-unlinks on empty, and a COMPLETING qh |
1067 | if (!tmp) | 1067 | * may already be unlinked. |
1068 | goto nogood; | 1068 | */ |
1069 | unlink_async (ehci, qh); | 1069 | if (tmp) |
1070 | unlink_async(ehci, qh); | ||
1070 | /* FALL THROUGH */ | 1071 | /* FALL THROUGH */ |
1071 | case QH_STATE_UNLINK: /* wait for hw to finish? */ | 1072 | case QH_STATE_UNLINK: /* wait for hw to finish? */ |
1072 | case QH_STATE_UNLINK_WAIT: | 1073 | case QH_STATE_UNLINK_WAIT: |
@@ -1083,7 +1084,6 @@ idle_timeout: | |||
1083 | } | 1084 | } |
1084 | /* else FALL THROUGH */ | 1085 | /* else FALL THROUGH */ |
1085 | default: | 1086 | default: |
1086 | nogood: | ||
1087 | /* caller was supposed to have unlinked any requests; | 1087 | /* caller was supposed to have unlinked any requests; |
1088 | * that's not our job. just leak this memory. | 1088 | * that's not our job. just leak this memory. |
1089 | */ | 1089 | */ |
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index d36e4e75e08d..12f70c302b0b 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c | |||
@@ -141,6 +141,10 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci) | |||
141 | qh_put (ehci->async); | 141 | qh_put (ehci->async); |
142 | ehci->async = NULL; | 142 | ehci->async = NULL; |
143 | 143 | ||
144 | if (ehci->dummy) | ||
145 | qh_put(ehci->dummy); | ||
146 | ehci->dummy = NULL; | ||
147 | |||
144 | /* DMA consistent memory and pools */ | 148 | /* DMA consistent memory and pools */ |
145 | if (ehci->qtd_pool) | 149 | if (ehci->qtd_pool) |
146 | dma_pool_destroy (ehci->qtd_pool); | 150 | dma_pool_destroy (ehci->qtd_pool); |
@@ -227,8 +231,26 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) | |||
227 | if (ehci->periodic == NULL) { | 231 | if (ehci->periodic == NULL) { |
228 | goto fail; | 232 | goto fail; |
229 | } | 233 | } |
230 | for (i = 0; i < ehci->periodic_size; i++) | 234 | |
231 | ehci->periodic [i] = EHCI_LIST_END(ehci); | 235 | if (ehci->use_dummy_qh) { |
236 | struct ehci_qh_hw *hw; | ||
237 | ehci->dummy = ehci_qh_alloc(ehci, flags); | ||
238 | if (!ehci->dummy) | ||
239 | goto fail; | ||
240 | |||
241 | hw = ehci->dummy->hw; | ||
242 | hw->hw_next = EHCI_LIST_END(ehci); | ||
243 | hw->hw_qtd_next = EHCI_LIST_END(ehci); | ||
244 | hw->hw_alt_next = EHCI_LIST_END(ehci); | ||
245 | hw->hw_token &= ~QTD_STS_ACTIVE; | ||
246 | ehci->dummy->hw = hw; | ||
247 | |||
248 | for (i = 0; i < ehci->periodic_size; i++) | ||
249 | ehci->periodic[i] = ehci->dummy->qh_dma; | ||
250 | } else { | ||
251 | for (i = 0; i < ehci->periodic_size; i++) | ||
252 | ehci->periodic[i] = EHCI_LIST_END(ehci); | ||
253 | } | ||
232 | 254 | ||
233 | /* software shadow of hardware table */ | 255 | /* software shadow of hardware table */ |
234 | ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); | 256 | ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index a1e8d273103f..01bb72b71832 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -103,6 +103,19 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
103 | if (retval) | 103 | if (retval) |
104 | return retval; | 104 | return retval; |
105 | 105 | ||
106 | if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) || | ||
107 | (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) { | ||
108 | /* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may | ||
109 | * read/write memory space which does not belong to it when | ||
110 | * there is NULL pointer with T-bit set to 1 in the frame list | ||
111 | * table. To avoid the issue, the frame list link pointer | ||
112 | * should always contain a valid pointer to a inactive qh. | ||
113 | */ | ||
114 | ehci->use_dummy_qh = 1; | ||
115 | ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI " | ||
116 | "dummy qh workaround\n"); | ||
117 | } | ||
118 | |||
106 | /* data structure init */ | 119 | /* data structure init */ |
107 | retval = ehci_init(hcd); | 120 | retval = ehci_init(hcd); |
108 | if (retval) | 121 | if (retval) |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a92526d6e5ae..d9f78eb26572 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -98,7 +98,14 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) | |||
98 | */ | 98 | */ |
99 | *prev_p = *periodic_next_shadow(ehci, &here, | 99 | *prev_p = *periodic_next_shadow(ehci, &here, |
100 | Q_NEXT_TYPE(ehci, *hw_p)); | 100 | Q_NEXT_TYPE(ehci, *hw_p)); |
101 | *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); | 101 | |
102 | if (!ehci->use_dummy_qh || | ||
103 | *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) | ||
104 | != EHCI_LIST_END(ehci)) | ||
105 | *hw_p = *shadow_next_periodic(ehci, &here, | ||
106 | Q_NEXT_TYPE(ehci, *hw_p)); | ||
107 | else | ||
108 | *hw_p = ehci->dummy->qh_dma; | ||
102 | } | 109 | } |
103 | 110 | ||
104 | /* how many of the uframe's 125 usecs are allocated? */ | 111 | /* how many of the uframe's 125 usecs are allocated? */ |
@@ -2335,7 +2342,11 @@ restart: | |||
2335 | * pointer for much longer, if at all. | 2342 | * pointer for much longer, if at all. |
2336 | */ | 2343 | */ |
2337 | *q_p = q.itd->itd_next; | 2344 | *q_p = q.itd->itd_next; |
2338 | *hw_p = q.itd->hw_next; | 2345 | if (!ehci->use_dummy_qh || |
2346 | q.itd->hw_next != EHCI_LIST_END(ehci)) | ||
2347 | *hw_p = q.itd->hw_next; | ||
2348 | else | ||
2349 | *hw_p = ehci->dummy->qh_dma; | ||
2339 | type = Q_NEXT_TYPE(ehci, q.itd->hw_next); | 2350 | type = Q_NEXT_TYPE(ehci, q.itd->hw_next); |
2340 | wmb(); | 2351 | wmb(); |
2341 | modified = itd_complete (ehci, q.itd); | 2352 | modified = itd_complete (ehci, q.itd); |
@@ -2368,7 +2379,11 @@ restart: | |||
2368 | * URB completion. | 2379 | * URB completion. |
2369 | */ | 2380 | */ |
2370 | *q_p = q.sitd->sitd_next; | 2381 | *q_p = q.sitd->sitd_next; |
2371 | *hw_p = q.sitd->hw_next; | 2382 | if (!ehci->use_dummy_qh || |
2383 | q.sitd->hw_next != EHCI_LIST_END(ehci)) | ||
2384 | *hw_p = q.sitd->hw_next; | ||
2385 | else | ||
2386 | *hw_p = ehci->dummy->qh_dma; | ||
2372 | type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); | 2387 | type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); |
2373 | wmb(); | 2388 | wmb(); |
2374 | modified = sitd_complete (ehci, q.sitd); | 2389 | modified = sitd_complete (ehci, q.sitd); |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index bde823f704e9..ba8eab366b82 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -73,6 +73,7 @@ struct ehci_hcd { /* one per controller */ | |||
73 | 73 | ||
74 | /* async schedule support */ | 74 | /* async schedule support */ |
75 | struct ehci_qh *async; | 75 | struct ehci_qh *async; |
76 | struct ehci_qh *dummy; /* For AMD quirk use */ | ||
76 | struct ehci_qh *reclaim; | 77 | struct ehci_qh *reclaim; |
77 | unsigned scanning : 1; | 78 | unsigned scanning : 1; |
78 | 79 | ||
@@ -131,6 +132,7 @@ struct ehci_hcd { /* one per controller */ | |||
131 | unsigned need_io_watchdog:1; | 132 | unsigned need_io_watchdog:1; |
132 | unsigned broken_periodic:1; | 133 | unsigned broken_periodic:1; |
133 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ | 134 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ |
135 | unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ | ||
134 | 136 | ||
135 | /* required for usb32 quirk */ | 137 | /* required for usb32 quirk */ |
136 | #define OHCI_CTRL_HCFS (3 << 6) | 138 | #define OHCI_CTRL_HCFS (3 << 6) |
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 6c4fb4efb4bb..43a39eb56cc6 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c | |||
@@ -2683,7 +2683,7 @@ static int __devexit isp1362_remove(struct platform_device *pdev) | |||
2683 | return 0; | 2683 | return 0; |
2684 | } | 2684 | } |
2685 | 2685 | ||
2686 | static int __init isp1362_probe(struct platform_device *pdev) | 2686 | static int __devinit isp1362_probe(struct platform_device *pdev) |
2687 | { | 2687 | { |
2688 | struct usb_hcd *hcd; | 2688 | struct usb_hcd *hcd; |
2689 | struct isp1362_hcd *isp1362_hcd; | 2689 | struct isp1362_hcd *isp1362_hcd; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 202770676da3..d178761c3981 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1045,7 +1045,7 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, | |||
1045 | if (udev->speed == USB_SPEED_SUPER) | 1045 | if (udev->speed == USB_SPEED_SUPER) |
1046 | return ep->ss_ep_comp.wBytesPerInterval; | 1046 | return ep->ss_ep_comp.wBytesPerInterval; |
1047 | 1047 | ||
1048 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | 1048 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); |
1049 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | 1049 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; |
1050 | /* A 0 in max burst means 1 transfer per ESIT */ | 1050 | /* A 0 in max burst means 1 transfer per ESIT */ |
1051 | return max_packet * (max_burst + 1); | 1051 | return max_packet * (max_burst + 1); |
@@ -1135,7 +1135,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1135 | /* Fall through */ | 1135 | /* Fall through */ |
1136 | case USB_SPEED_FULL: | 1136 | case USB_SPEED_FULL: |
1137 | case USB_SPEED_LOW: | 1137 | case USB_SPEED_LOW: |
1138 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | 1138 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); |
1139 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 1139 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); |
1140 | break; | 1140 | break; |
1141 | default: | 1141 | default: |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9f3115e729b1..df558f6f84e3 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -2104,7 +2104,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
2104 | 2104 | ||
2105 | if (!(status & STS_EINT)) { | 2105 | if (!(status & STS_EINT)) { |
2106 | spin_unlock(&xhci->lock); | 2106 | spin_unlock(&xhci->lock); |
2107 | xhci_warn(xhci, "Spurious interrupt.\n"); | ||
2108 | return IRQ_NONE; | 2107 | return IRQ_NONE; |
2109 | } | 2108 | } |
2110 | xhci_dbg(xhci, "op reg status = %08x\n", status); | 2109 | xhci_dbg(xhci, "op reg status = %08x\n", status); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 5d7d4e951ea4..06fca0835b52 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -577,6 +577,65 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) | |||
577 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); | 577 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); |
578 | } | 578 | } |
579 | 579 | ||
580 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | ||
581 | { | ||
582 | u64 val_64; | ||
583 | |||
584 | /* step 2: initialize command ring buffer */ | ||
585 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | ||
586 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | ||
587 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | ||
588 | xhci->cmd_ring->dequeue) & | ||
589 | (u64) ~CMD_RING_RSVD_BITS) | | ||
590 | xhci->cmd_ring->cycle_state; | ||
591 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", | ||
592 | (long unsigned long) val_64); | ||
593 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * The whole command ring must be cleared to zero when we suspend the host. | ||
598 | * | ||
599 | * The host doesn't save the command ring pointer in the suspend well, so we | ||
600 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte | ||
601 | * aligned, because of the reserved bits in the command ring dequeue pointer | ||
602 | * register. Therefore, we can't just set the dequeue pointer back in the | ||
603 | * middle of the ring (TRBs are 16-byte aligned). | ||
604 | */ | ||
605 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) | ||
606 | { | ||
607 | struct xhci_ring *ring; | ||
608 | struct xhci_segment *seg; | ||
609 | |||
610 | ring = xhci->cmd_ring; | ||
611 | seg = ring->deq_seg; | ||
612 | do { | ||
613 | memset(seg->trbs, 0, SEGMENT_SIZE); | ||
614 | seg = seg->next; | ||
615 | } while (seg != ring->deq_seg); | ||
616 | |||
617 | /* Reset the software enqueue and dequeue pointers */ | ||
618 | ring->deq_seg = ring->first_seg; | ||
619 | ring->dequeue = ring->first_seg->trbs; | ||
620 | ring->enq_seg = ring->deq_seg; | ||
621 | ring->enqueue = ring->dequeue; | ||
622 | |||
623 | /* | ||
624 | * Ring is now zeroed, so the HW should look for change of ownership | ||
625 | * when the cycle bit is set to 1. | ||
626 | */ | ||
627 | ring->cycle_state = 1; | ||
628 | |||
629 | /* | ||
630 | * Reset the hardware dequeue pointer. | ||
631 | * Yes, this will need to be re-written after resume, but we're paranoid | ||
632 | * and want to make sure the hardware doesn't access bogus memory | ||
633 | * because, say, the BIOS or an SMI started the host without changing | ||
634 | * the command ring pointers. | ||
635 | */ | ||
636 | xhci_set_cmd_ring_deq(xhci); | ||
637 | } | ||
638 | |||
580 | /* | 639 | /* |
581 | * Stop HC (not bus-specific) | 640 | * Stop HC (not bus-specific) |
582 | * | 641 | * |
@@ -604,6 +663,7 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
604 | spin_unlock_irq(&xhci->lock); | 663 | spin_unlock_irq(&xhci->lock); |
605 | return -ETIMEDOUT; | 664 | return -ETIMEDOUT; |
606 | } | 665 | } |
666 | xhci_clear_command_ring(xhci); | ||
607 | 667 | ||
608 | /* step 3: save registers */ | 668 | /* step 3: save registers */ |
609 | xhci_save_registers(xhci); | 669 | xhci_save_registers(xhci); |
@@ -635,7 +695,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
635 | u32 command, temp = 0; | 695 | u32 command, temp = 0; |
636 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 696 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
637 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | 697 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
638 | u64 val_64; | ||
639 | int old_state, retval; | 698 | int old_state, retval; |
640 | 699 | ||
641 | old_state = hcd->state; | 700 | old_state = hcd->state; |
@@ -648,15 +707,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
648 | /* step 1: restore register */ | 707 | /* step 1: restore register */ |
649 | xhci_restore_registers(xhci); | 708 | xhci_restore_registers(xhci); |
650 | /* step 2: initialize command ring buffer */ | 709 | /* step 2: initialize command ring buffer */ |
651 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | 710 | xhci_set_cmd_ring_deq(xhci); |
652 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | ||
653 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | ||
654 | xhci->cmd_ring->dequeue) & | ||
655 | (u64) ~CMD_RING_RSVD_BITS) | | ||
656 | xhci->cmd_ring->cycle_state; | ||
657 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", | ||
658 | (long unsigned long) val_64); | ||
659 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | ||
660 | /* step 3: restore state and start state*/ | 711 | /* step 3: restore state and start state*/ |
661 | /* step 3: set CRS flag */ | 712 | /* step 3: set CRS flag */ |
662 | command = xhci_readl(xhci, &xhci->op_regs->command); | 713 | command = xhci_readl(xhci, &xhci->op_regs->command); |
@@ -714,6 +765,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
714 | return retval; | 765 | return retval; |
715 | } | 766 | } |
716 | 767 | ||
768 | spin_unlock_irq(&xhci->lock); | ||
717 | /* Re-setup MSI-X */ | 769 | /* Re-setup MSI-X */ |
718 | if (hcd->irq) | 770 | if (hcd->irq) |
719 | free_irq(hcd->irq, hcd); | 771 | free_irq(hcd->irq, hcd); |
@@ -736,6 +788,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
736 | hcd->irq = pdev->irq; | 788 | hcd->irq = pdev->irq; |
737 | } | 789 | } |
738 | 790 | ||
791 | spin_lock_irq(&xhci->lock); | ||
739 | /* step 4: set Run/Stop bit */ | 792 | /* step 4: set Run/Stop bit */ |
740 | command = xhci_readl(xhci, &xhci->op_regs->command); | 793 | command = xhci_readl(xhci, &xhci->op_regs->command); |
741 | command |= CMD_RUN; | 794 | command |= CMD_RUN; |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 93d3bf4d213c..85e65647d445 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -621,6 +621,11 @@ struct xhci_ep_ctx { | |||
621 | #define MAX_PACKET_MASK (0xffff << 16) | 621 | #define MAX_PACKET_MASK (0xffff << 16) |
622 | #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) | 622 | #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) |
623 | 623 | ||
624 | /* Get max packet size from ep desc. Bit 10..0 specify the max packet size. | ||
625 | * USB2.0 spec 9.6.6. | ||
626 | */ | ||
627 | #define GET_MAX_PACKET(p) ((p) & 0x7ff) | ||
628 | |||
624 | /* tx_info bitmasks */ | 629 | /* tx_info bitmasks */ |
625 | #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) | 630 | #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) |
626 | #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) | 631 | #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) |
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index 2f43c57743c9..9251773ecef4 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c | |||
@@ -196,11 +196,9 @@ static ssize_t get_port1_handler(struct device *dev, | |||
196 | return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); | 196 | return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); |
197 | } | 197 | } |
198 | 198 | ||
199 | static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO, | 199 | static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler); |
200 | get_port0_handler, set_port0_handler); | ||
201 | 200 | ||
202 | static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO, | 201 | static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler); |
203 | get_port1_handler, set_port1_handler); | ||
204 | 202 | ||
205 | 203 | ||
206 | static int cypress_probe(struct usb_interface *interface, | 204 | static int cypress_probe(struct usb_interface *interface, |
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index d77aba46ae85..f63776a48e2a 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c | |||
@@ -86,7 +86,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr, | |||
86 | return count; | 86 | return count; |
87 | } | 87 | } |
88 | 88 | ||
89 | static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed); | 89 | static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed); |
90 | 90 | ||
91 | static int tv_probe(struct usb_interface *interface, | 91 | static int tv_probe(struct usb_interface *interface, |
92 | const struct usb_device_id *id) | 92 | const struct usb_device_id *id) |
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c index 63da2c3c838f..c96f51de1696 100644 --- a/drivers/usb/misc/usbled.c +++ b/drivers/usb/misc/usbled.c | |||
@@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co | |||
94 | change_color(led); \ | 94 | change_color(led); \ |
95 | return count; \ | 95 | return count; \ |
96 | } \ | 96 | } \ |
97 | static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); | 97 | static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value); |
98 | show_set(blue); | 98 | show_set(blue); |
99 | show_set(red); | 99 | show_set(red); |
100 | show_set(green); | 100 | show_set(green); |
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index de8ef945b536..417b8f207e8b 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c | |||
@@ -192,7 +192,7 @@ static ssize_t set_attr_##name(struct device *dev, \ | |||
192 | \ | 192 | \ |
193 | return count; \ | 193 | return count; \ |
194 | } \ | 194 | } \ |
195 | static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name); | 195 | static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name); |
196 | 196 | ||
197 | static ssize_t show_attr_text(struct device *dev, | 197 | static ssize_t show_attr_text(struct device *dev, |
198 | struct device_attribute *attr, char *buf) | 198 | struct device_attribute *attr, char *buf) |
@@ -223,7 +223,7 @@ static ssize_t set_attr_text(struct device *dev, | |||
223 | return count; | 223 | return count; |
224 | } | 224 | } |
225 | 225 | ||
226 | static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text); | 226 | static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text); |
227 | 227 | ||
228 | static ssize_t show_attr_decimals(struct device *dev, | 228 | static ssize_t show_attr_decimals(struct device *dev, |
229 | struct device_attribute *attr, char *buf) | 229 | struct device_attribute *attr, char *buf) |
@@ -272,8 +272,7 @@ static ssize_t set_attr_decimals(struct device *dev, | |||
272 | return count; | 272 | return count; |
273 | } | 273 | } |
274 | 274 | ||
275 | static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO, | 275 | static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals); |
276 | show_attr_decimals, set_attr_decimals); | ||
277 | 276 | ||
278 | static ssize_t show_attr_textmode(struct device *dev, | 277 | static ssize_t show_attr_textmode(struct device *dev, |
279 | struct device_attribute *attr, char *buf) | 278 | struct device_attribute *attr, char *buf) |
@@ -319,8 +318,7 @@ static ssize_t set_attr_textmode(struct device *dev, | |||
319 | return -EINVAL; | 318 | return -EINVAL; |
320 | } | 319 | } |
321 | 320 | ||
322 | static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO, | 321 | static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode); |
323 | show_attr_textmode, set_attr_textmode); | ||
324 | 322 | ||
325 | 323 | ||
326 | MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); | 324 | MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); |
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c index bdc3ea66be69..9fea48264fa2 100644 --- a/drivers/usb/otg/langwell_otg.c +++ b/drivers/usb/otg/langwell_otg.c | |||
@@ -1896,7 +1896,7 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr, | |||
1896 | } | 1896 | } |
1897 | return count; | 1897 | return count; |
1898 | } | 1898 | } |
1899 | static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req); | 1899 | static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req); |
1900 | 1900 | ||
1901 | static ssize_t | 1901 | static ssize_t |
1902 | get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) | 1902 | get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -1942,8 +1942,7 @@ set_a_bus_drop(struct device *dev, struct device_attribute *attr, | |||
1942 | } | 1942 | } |
1943 | return count; | 1943 | return count; |
1944 | } | 1944 | } |
1945 | static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO, | 1945 | static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop); |
1946 | get_a_bus_drop, set_a_bus_drop); | ||
1947 | 1946 | ||
1948 | static ssize_t | 1947 | static ssize_t |
1949 | get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) | 1948 | get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -1988,7 +1987,7 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr, | |||
1988 | } | 1987 | } |
1989 | return count; | 1988 | return count; |
1990 | } | 1989 | } |
1991 | static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req); | 1990 | static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req); |
1992 | 1991 | ||
1993 | static ssize_t | 1992 | static ssize_t |
1994 | set_a_clr_err(struct device *dev, struct device_attribute *attr, | 1993 | set_a_clr_err(struct device *dev, struct device_attribute *attr, |
@@ -2012,7 +2011,7 @@ set_a_clr_err(struct device *dev, struct device_attribute *attr, | |||
2012 | } | 2011 | } |
2013 | return count; | 2012 | return count; |
2014 | } | 2013 | } |
2015 | static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err); | 2014 | static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err); |
2016 | 2015 | ||
2017 | static struct attribute *inputs_attrs[] = { | 2016 | static struct attribute *inputs_attrs[] = { |
2018 | &dev_attr_a_bus_req.attr, | 2017 | &dev_attr_a_bus_req.attr, |
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c index 57fc2f532cab..ceba512f84d0 100644 --- a/drivers/usb/storage/sierra_ms.c +++ b/drivers/usb/storage/sierra_ms.c | |||
@@ -121,7 +121,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, | |||
121 | } | 121 | } |
122 | return result; | 122 | return result; |
123 | } | 123 | } |
124 | static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); | 124 | static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL); |
125 | 125 | ||
126 | int sierra_ms_init(struct us_data *us) | 126 | int sierra_ms_init(struct us_data *us) |
127 | { | 127 | { |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index e207810bba3c..08703299ef61 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -197,12 +197,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state) | |||
197 | { | 197 | { |
198 | struct backlight_device *bd = to_backlight_device(dev); | 198 | struct backlight_device *bd = to_backlight_device(dev); |
199 | 199 | ||
200 | if (bd->ops->options & BL_CORE_SUSPENDRESUME) { | 200 | mutex_lock(&bd->ops_lock); |
201 | mutex_lock(&bd->ops_lock); | 201 | if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { |
202 | bd->props.state |= BL_CORE_SUSPENDED; | 202 | bd->props.state |= BL_CORE_SUSPENDED; |
203 | backlight_update_status(bd); | 203 | backlight_update_status(bd); |
204 | mutex_unlock(&bd->ops_lock); | ||
205 | } | 204 | } |
205 | mutex_unlock(&bd->ops_lock); | ||
206 | 206 | ||
207 | return 0; | 207 | return 0; |
208 | } | 208 | } |
@@ -211,12 +211,12 @@ static int backlight_resume(struct device *dev) | |||
211 | { | 211 | { |
212 | struct backlight_device *bd = to_backlight_device(dev); | 212 | struct backlight_device *bd = to_backlight_device(dev); |
213 | 213 | ||
214 | if (bd->ops->options & BL_CORE_SUSPENDRESUME) { | 214 | mutex_lock(&bd->ops_lock); |
215 | mutex_lock(&bd->ops_lock); | 215 | if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { |
216 | bd->props.state &= ~BL_CORE_SUSPENDED; | 216 | bd->props.state &= ~BL_CORE_SUSPENDED; |
217 | backlight_update_status(bd); | 217 | backlight_update_status(bd); |
218 | mutex_unlock(&bd->ops_lock); | ||
219 | } | 218 | } |
219 | mutex_unlock(&bd->ops_lock); | ||
220 | 220 | ||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index f53b9f1d6aba..affdf3e32cf3 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c | |||
@@ -88,34 +88,48 @@ static const struct fb_cmap default_16_colors = { | |||
88 | * | 88 | * |
89 | */ | 89 | */ |
90 | 90 | ||
91 | int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) | 91 | int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) |
92 | { | 92 | { |
93 | int size = len*sizeof(u16); | 93 | int size = len * sizeof(u16); |
94 | 94 | int ret = -ENOMEM; | |
95 | if (cmap->len != len) { | 95 | |
96 | fb_dealloc_cmap(cmap); | 96 | if (cmap->len != len) { |
97 | if (!len) | 97 | fb_dealloc_cmap(cmap); |
98 | return 0; | 98 | if (!len) |
99 | if (!(cmap->red = kmalloc(size, GFP_ATOMIC))) | 99 | return 0; |
100 | goto fail; | 100 | |
101 | if (!(cmap->green = kmalloc(size, GFP_ATOMIC))) | 101 | cmap->red = kmalloc(size, flags); |
102 | goto fail; | 102 | if (!cmap->red) |
103 | if (!(cmap->blue = kmalloc(size, GFP_ATOMIC))) | 103 | goto fail; |
104 | goto fail; | 104 | cmap->green = kmalloc(size, flags); |
105 | if (transp) { | 105 | if (!cmap->green) |
106 | if (!(cmap->transp = kmalloc(size, GFP_ATOMIC))) | 106 | goto fail; |
107 | cmap->blue = kmalloc(size, flags); | ||
108 | if (!cmap->blue) | ||
109 | goto fail; | ||
110 | if (transp) { | ||
111 | cmap->transp = kmalloc(size, flags); | ||
112 | if (!cmap->transp) | ||
113 | goto fail; | ||
114 | } else { | ||
115 | cmap->transp = NULL; | ||
116 | } | ||
117 | } | ||
118 | cmap->start = 0; | ||
119 | cmap->len = len; | ||
120 | ret = fb_copy_cmap(fb_default_cmap(len), cmap); | ||
121 | if (ret) | ||
107 | goto fail; | 122 | goto fail; |
108 | } else | 123 | return 0; |
109 | cmap->transp = NULL; | ||
110 | } | ||
111 | cmap->start = 0; | ||
112 | cmap->len = len; | ||
113 | fb_copy_cmap(fb_default_cmap(len), cmap); | ||
114 | return 0; | ||
115 | 124 | ||
116 | fail: | 125 | fail: |
117 | fb_dealloc_cmap(cmap); | 126 | fb_dealloc_cmap(cmap); |
118 | return -ENOMEM; | 127 | return ret; |
128 | } | ||
129 | |||
130 | int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) | ||
131 | { | ||
132 | return fb_alloc_cmap_gfp(cmap, len, transp, GFP_ATOMIC); | ||
119 | } | 133 | } |
120 | 134 | ||
121 | /** | 135 | /** |
@@ -250,8 +264,12 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) | |||
250 | int rc, size = cmap->len * sizeof(u16); | 264 | int rc, size = cmap->len * sizeof(u16); |
251 | struct fb_cmap umap; | 265 | struct fb_cmap umap; |
252 | 266 | ||
267 | if (size < 0 || size < cmap->len) | ||
268 | return -E2BIG; | ||
269 | |||
253 | memset(&umap, 0, sizeof(struct fb_cmap)); | 270 | memset(&umap, 0, sizeof(struct fb_cmap)); |
254 | rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL); | 271 | rc = fb_alloc_cmap_gfp(&umap, cmap->len, cmap->transp != NULL, |
272 | GFP_KERNEL); | ||
255 | if (rc) | 273 | if (rc) |
256 | return rc; | 274 | return rc; |
257 | if (copy_from_user(umap.red, cmap->red, size) || | 275 | if (copy_from_user(umap.red, cmap->red, size) || |
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index bc35a95e59d4..85ec7f64c42a 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c | |||
@@ -276,10 +276,10 @@ static void lx_graphics_enable(struct fb_info *info) | |||
276 | write_fp(par, FP_PT1, 0); | 276 | write_fp(par, FP_PT1, 0); |
277 | temp = FP_PT2_SCRC; | 277 | temp = FP_PT2_SCRC; |
278 | 278 | ||
279 | if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) | 279 | if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT)) |
280 | temp |= FP_PT2_HSP; | 280 | temp |= FP_PT2_HSP; |
281 | 281 | ||
282 | if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) | 282 | if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT)) |
283 | temp |= FP_PT2_VSP; | 283 | temp |= FP_PT2_VSP; |
284 | 284 | ||
285 | write_fp(par, FP_PT2, temp); | 285 | write_fp(par, FP_PT2, temp); |
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 7cfc170bce19..ca0f6be9d12e 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
29 | 29 | ||
30 | #include <mach/dma.h> | ||
30 | #include <mach/hardware.h> | 31 | #include <mach/hardware.h> |
31 | #include <mach/ipu.h> | 32 | #include <mach/ipu.h> |
32 | #include <mach/mx3fb.h> | 33 | #include <mach/mx3fb.h> |
@@ -1420,6 +1421,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg) | |||
1420 | struct device *dev; | 1421 | struct device *dev; |
1421 | struct mx3fb_platform_data *mx3fb_pdata; | 1422 | struct mx3fb_platform_data *mx3fb_pdata; |
1422 | 1423 | ||
1424 | if (!imx_dma_is_ipu(chan)) | ||
1425 | return false; | ||
1426 | |||
1423 | if (!rq) | 1427 | if (!rq) |
1424 | return false; | 1428 | return false; |
1425 | 1429 | ||
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 9b1364723c65..b02d97a879d6 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -860,7 +860,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) | |||
860 | /* Couldn't reconfigure, hopefully, can continue as before */ | 860 | /* Couldn't reconfigure, hopefully, can continue as before */ |
861 | return; | 861 | return; |
862 | 862 | ||
863 | info->fix.line_length = mode2.xres * (ch->cfg.bpp / 8); | 863 | info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8); |
864 | 864 | ||
865 | /* | 865 | /* |
866 | * fb_set_var() calls the notifier change internally, only if | 866 | * fb_set_var() calls the notifier change internally, only if |
@@ -868,7 +868,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) | |||
868 | * user event, we have to call the chain ourselves. | 868 | * user event, we have to call the chain ourselves. |
869 | */ | 869 | */ |
870 | event.info = info; | 870 | event.info = info; |
871 | event.data = &mode2; | 871 | event.data = &mode1; |
872 | fb_notifier_call_chain(evnt, &event); | 872 | fb_notifier_call_chain(evnt, &event); |
873 | } | 873 | } |
874 | 874 | ||
diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c index c311ad3c3687..31137adc8fba 100644 --- a/drivers/video/sis/init.c +++ b/drivers/video/sis/init.c | |||
@@ -62,11 +62,11 @@ | |||
62 | 62 | ||
63 | #include "init.h" | 63 | #include "init.h" |
64 | 64 | ||
65 | #ifdef SIS300 | 65 | #ifdef CONFIG_FB_SIS_300 |
66 | #include "300vtbl.h" | 66 | #include "300vtbl.h" |
67 | #endif | 67 | #endif |
68 | 68 | ||
69 | #ifdef SIS315H | 69 | #ifdef CONFIG_FB_SIS_315 |
70 | #include "310vtbl.h" | 70 | #include "310vtbl.h" |
71 | #endif | 71 | #endif |
72 | 72 | ||
@@ -78,7 +78,7 @@ | |||
78 | /* POINTER INITIALIZATION */ | 78 | /* POINTER INITIALIZATION */ |
79 | /*********************************************/ | 79 | /*********************************************/ |
80 | 80 | ||
81 | #if defined(SIS300) || defined(SIS315H) | 81 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
82 | static void | 82 | static void |
83 | InitCommonPointer(struct SiS_Private *SiS_Pr) | 83 | InitCommonPointer(struct SiS_Private *SiS_Pr) |
84 | { | 84 | { |
@@ -160,7 +160,7 @@ InitCommonPointer(struct SiS_Private *SiS_Pr) | |||
160 | } | 160 | } |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #ifdef SIS300 | 163 | #ifdef CONFIG_FB_SIS_300 |
164 | static void | 164 | static void |
165 | InitTo300Pointer(struct SiS_Private *SiS_Pr) | 165 | InitTo300Pointer(struct SiS_Private *SiS_Pr) |
166 | { | 166 | { |
@@ -237,7 +237,7 @@ InitTo300Pointer(struct SiS_Private *SiS_Pr) | |||
237 | } | 237 | } |
238 | #endif | 238 | #endif |
239 | 239 | ||
240 | #ifdef SIS315H | 240 | #ifdef CONFIG_FB_SIS_315 |
241 | static void | 241 | static void |
242 | InitTo310Pointer(struct SiS_Private *SiS_Pr) | 242 | InitTo310Pointer(struct SiS_Private *SiS_Pr) |
243 | { | 243 | { |
@@ -321,13 +321,13 @@ bool | |||
321 | SiSInitPtr(struct SiS_Private *SiS_Pr) | 321 | SiSInitPtr(struct SiS_Private *SiS_Pr) |
322 | { | 322 | { |
323 | if(SiS_Pr->ChipType < SIS_315H) { | 323 | if(SiS_Pr->ChipType < SIS_315H) { |
324 | #ifdef SIS300 | 324 | #ifdef CONFIG_FB_SIS_300 |
325 | InitTo300Pointer(SiS_Pr); | 325 | InitTo300Pointer(SiS_Pr); |
326 | #else | 326 | #else |
327 | return false; | 327 | return false; |
328 | #endif | 328 | #endif |
329 | } else { | 329 | } else { |
330 | #ifdef SIS315H | 330 | #ifdef CONFIG_FB_SIS_315 |
331 | InitTo310Pointer(SiS_Pr); | 331 | InitTo310Pointer(SiS_Pr); |
332 | #else | 332 | #else |
333 | return false; | 333 | return false; |
@@ -340,9 +340,7 @@ SiSInitPtr(struct SiS_Private *SiS_Pr) | |||
340 | /* HELPER: Get ModeID */ | 340 | /* HELPER: Get ModeID */ |
341 | /*********************************************/ | 341 | /*********************************************/ |
342 | 342 | ||
343 | #ifndef SIS_XORG_XF86 | ||
344 | static | 343 | static |
345 | #endif | ||
346 | unsigned short | 344 | unsigned short |
347 | SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, | 345 | SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, |
348 | int Depth, bool FSTN, int LCDwidth, int LCDheight) | 346 | int Depth, bool FSTN, int LCDwidth, int LCDheight) |
@@ -884,51 +882,51 @@ SiS_GetModeID_VGA2(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDispl | |||
884 | void | 882 | void |
885 | SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data) | 883 | SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data) |
886 | { | 884 | { |
887 | OutPortByte(port, index); | 885 | outb((u8)index, port); |
888 | OutPortByte(port + 1, data); | 886 | outb((u8)data, port + 1); |
889 | } | 887 | } |
890 | 888 | ||
891 | void | 889 | void |
892 | SiS_SetRegByte(SISIOADDRESS port, unsigned short data) | 890 | SiS_SetRegByte(SISIOADDRESS port, unsigned short data) |
893 | { | 891 | { |
894 | OutPortByte(port, data); | 892 | outb((u8)data, port); |
895 | } | 893 | } |
896 | 894 | ||
897 | void | 895 | void |
898 | SiS_SetRegShort(SISIOADDRESS port, unsigned short data) | 896 | SiS_SetRegShort(SISIOADDRESS port, unsigned short data) |
899 | { | 897 | { |
900 | OutPortWord(port, data); | 898 | outw((u16)data, port); |
901 | } | 899 | } |
902 | 900 | ||
903 | void | 901 | void |
904 | SiS_SetRegLong(SISIOADDRESS port, unsigned int data) | 902 | SiS_SetRegLong(SISIOADDRESS port, unsigned int data) |
905 | { | 903 | { |
906 | OutPortLong(port, data); | 904 | outl((u32)data, port); |
907 | } | 905 | } |
908 | 906 | ||
909 | unsigned char | 907 | unsigned char |
910 | SiS_GetReg(SISIOADDRESS port, unsigned short index) | 908 | SiS_GetReg(SISIOADDRESS port, unsigned short index) |
911 | { | 909 | { |
912 | OutPortByte(port, index); | 910 | outb((u8)index, port); |
913 | return(InPortByte(port + 1)); | 911 | return inb(port + 1); |
914 | } | 912 | } |
915 | 913 | ||
916 | unsigned char | 914 | unsigned char |
917 | SiS_GetRegByte(SISIOADDRESS port) | 915 | SiS_GetRegByte(SISIOADDRESS port) |
918 | { | 916 | { |
919 | return(InPortByte(port)); | 917 | return inb(port); |
920 | } | 918 | } |
921 | 919 | ||
922 | unsigned short | 920 | unsigned short |
923 | SiS_GetRegShort(SISIOADDRESS port) | 921 | SiS_GetRegShort(SISIOADDRESS port) |
924 | { | 922 | { |
925 | return(InPortWord(port)); | 923 | return inw(port); |
926 | } | 924 | } |
927 | 925 | ||
928 | unsigned int | 926 | unsigned int |
929 | SiS_GetRegLong(SISIOADDRESS port) | 927 | SiS_GetRegLong(SISIOADDRESS port) |
930 | { | 928 | { |
931 | return(InPortLong(port)); | 929 | return inl(port); |
932 | } | 930 | } |
933 | 931 | ||
934 | void | 932 | void |
@@ -1089,7 +1087,7 @@ static void | |||
1089 | SiSInitPCIetc(struct SiS_Private *SiS_Pr) | 1087 | SiSInitPCIetc(struct SiS_Private *SiS_Pr) |
1090 | { | 1088 | { |
1091 | switch(SiS_Pr->ChipType) { | 1089 | switch(SiS_Pr->ChipType) { |
1092 | #ifdef SIS300 | 1090 | #ifdef CONFIG_FB_SIS_300 |
1093 | case SIS_300: | 1091 | case SIS_300: |
1094 | case SIS_540: | 1092 | case SIS_540: |
1095 | case SIS_630: | 1093 | case SIS_630: |
@@ -1108,7 +1106,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) | |||
1108 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); | 1106 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); |
1109 | break; | 1107 | break; |
1110 | #endif | 1108 | #endif |
1111 | #ifdef SIS315H | 1109 | #ifdef CONFIG_FB_SIS_315 |
1112 | case SIS_315H: | 1110 | case SIS_315H: |
1113 | case SIS_315: | 1111 | case SIS_315: |
1114 | case SIS_315PRO: | 1112 | case SIS_315PRO: |
@@ -1152,9 +1150,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) | |||
1152 | /* HELPER: SetLVDSetc */ | 1150 | /* HELPER: SetLVDSetc */ |
1153 | /*********************************************/ | 1151 | /*********************************************/ |
1154 | 1152 | ||
1155 | #ifdef SIS_LINUX_KERNEL | ||
1156 | static | 1153 | static |
1157 | #endif | ||
1158 | void | 1154 | void |
1159 | SiSSetLVDSetc(struct SiS_Private *SiS_Pr) | 1155 | SiSSetLVDSetc(struct SiS_Private *SiS_Pr) |
1160 | { | 1156 | { |
@@ -1174,7 +1170,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) | |||
1174 | if((temp == 1) || (temp == 2)) return; | 1170 | if((temp == 1) || (temp == 2)) return; |
1175 | 1171 | ||
1176 | switch(SiS_Pr->ChipType) { | 1172 | switch(SiS_Pr->ChipType) { |
1177 | #ifdef SIS300 | 1173 | #ifdef CONFIG_FB_SIS_300 |
1178 | case SIS_540: | 1174 | case SIS_540: |
1179 | case SIS_630: | 1175 | case SIS_630: |
1180 | case SIS_730: | 1176 | case SIS_730: |
@@ -1188,7 +1184,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) | |||
1188 | } | 1184 | } |
1189 | break; | 1185 | break; |
1190 | #endif | 1186 | #endif |
1191 | #ifdef SIS315H | 1187 | #ifdef CONFIG_FB_SIS_315 |
1192 | case SIS_550: | 1188 | case SIS_550: |
1193 | case SIS_650: | 1189 | case SIS_650: |
1194 | case SIS_740: | 1190 | case SIS_740: |
@@ -1420,9 +1416,7 @@ SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) | |||
1420 | /* HELPER: GetVBType */ | 1416 | /* HELPER: GetVBType */ |
1421 | /*********************************************/ | 1417 | /*********************************************/ |
1422 | 1418 | ||
1423 | #ifdef SIS_LINUX_KERNEL | ||
1424 | static | 1419 | static |
1425 | #endif | ||
1426 | void | 1420 | void |
1427 | SiS_GetVBType(struct SiS_Private *SiS_Pr) | 1421 | SiS_GetVBType(struct SiS_Private *SiS_Pr) |
1428 | { | 1422 | { |
@@ -1487,7 +1481,6 @@ SiS_GetVBType(struct SiS_Private *SiS_Pr) | |||
1487 | /* HELPER: Check RAM size */ | 1481 | /* HELPER: Check RAM size */ |
1488 | /*********************************************/ | 1482 | /*********************************************/ |
1489 | 1483 | ||
1490 | #ifdef SIS_LINUX_KERNEL | ||
1491 | static bool | 1484 | static bool |
1492 | SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1485 | SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
1493 | unsigned short ModeIdIndex) | 1486 | unsigned short ModeIdIndex) |
@@ -1501,13 +1494,12 @@ SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
1501 | if(AdapterMemSize < memorysize) return false; | 1494 | if(AdapterMemSize < memorysize) return false; |
1502 | return true; | 1495 | return true; |
1503 | } | 1496 | } |
1504 | #endif | ||
1505 | 1497 | ||
1506 | /*********************************************/ | 1498 | /*********************************************/ |
1507 | /* HELPER: Get DRAM type */ | 1499 | /* HELPER: Get DRAM type */ |
1508 | /*********************************************/ | 1500 | /*********************************************/ |
1509 | 1501 | ||
1510 | #ifdef SIS315H | 1502 | #ifdef CONFIG_FB_SIS_315 |
1511 | static unsigned char | 1503 | static unsigned char |
1512 | SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) | 1504 | SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) |
1513 | { | 1505 | { |
@@ -1574,7 +1566,6 @@ SiS_GetMCLK(struct SiS_Private *SiS_Pr) | |||
1574 | /* HELPER: ClearBuffer */ | 1566 | /* HELPER: ClearBuffer */ |
1575 | /*********************************************/ | 1567 | /*********************************************/ |
1576 | 1568 | ||
1577 | #ifdef SIS_LINUX_KERNEL | ||
1578 | static void | 1569 | static void |
1579 | SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | 1570 | SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) |
1580 | { | 1571 | { |
@@ -1587,7 +1578,7 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
1587 | 1578 | ||
1588 | if(SiS_Pr->SiS_ModeType >= ModeEGA) { | 1579 | if(SiS_Pr->SiS_ModeType >= ModeEGA) { |
1589 | if(ModeNo > 0x13) { | 1580 | if(ModeNo > 0x13) { |
1590 | SiS_SetMemory(memaddr, memsize, 0); | 1581 | memset_io(memaddr, 0, memsize); |
1591 | } else { | 1582 | } else { |
1592 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; | 1583 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; |
1593 | for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); | 1584 | for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); |
@@ -1596,10 +1587,9 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
1596 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; | 1587 | pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; |
1597 | for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); | 1588 | for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); |
1598 | } else { | 1589 | } else { |
1599 | SiS_SetMemory(memaddr, 0x8000, 0); | 1590 | memset_io(memaddr, 0, 0x8000); |
1600 | } | 1591 | } |
1601 | } | 1592 | } |
1602 | #endif | ||
1603 | 1593 | ||
1604 | /*********************************************/ | 1594 | /*********************************************/ |
1605 | /* HELPER: SearchModeID */ | 1595 | /* HELPER: SearchModeID */ |
@@ -2132,7 +2122,7 @@ SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2132 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); | 2122 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); |
2133 | } | 2123 | } |
2134 | 2124 | ||
2135 | #ifdef SIS315H | 2125 | #ifdef CONFIG_FB_SIS_315 |
2136 | if(SiS_Pr->ChipType == XGI_20) { | 2126 | if(SiS_Pr->ChipType == XGI_20) { |
2137 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); | 2127 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); |
2138 | if(!(temp = crt1data[5] & 0x1f)) { | 2128 | if(!(temp = crt1data[5] & 0x1f)) { |
@@ -2215,7 +2205,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2215 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); | 2205 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); |
2216 | 2206 | ||
2217 | if(SiS_Pr->ChipType >= SIS_315H) { | 2207 | if(SiS_Pr->ChipType >= SIS_315H) { |
2218 | #ifdef SIS315H | 2208 | #ifdef CONFIG_FB_SIS_315 |
2219 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); | 2209 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); |
2220 | if(SiS_Pr->ChipType == XGI_20) { | 2210 | if(SiS_Pr->ChipType == XGI_20) { |
2221 | unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); | 2211 | unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); |
@@ -2236,7 +2226,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2236 | /* FIFO */ | 2226 | /* FIFO */ |
2237 | /*********************************************/ | 2227 | /*********************************************/ |
2238 | 2228 | ||
2239 | #ifdef SIS300 | 2229 | #ifdef CONFIG_FB_SIS_300 |
2240 | void | 2230 | void |
2241 | SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, | 2231 | SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, |
2242 | unsigned short *idx2) | 2232 | unsigned short *idx2) |
@@ -2506,11 +2496,7 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2506 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); | 2496 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); |
2507 | 2497 | ||
2508 | /* Write foreground and background queue */ | 2498 | /* Write foreground and background queue */ |
2509 | #ifdef SIS_LINUX_KERNEL | ||
2510 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); | 2499 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); |
2511 | #else | ||
2512 | templ = pciReadLong(0x00000000, 0x50); | ||
2513 | #endif | ||
2514 | 2500 | ||
2515 | if(SiS_Pr->ChipType == SIS_730) { | 2501 | if(SiS_Pr->ChipType == SIS_730) { |
2516 | 2502 | ||
@@ -2530,13 +2516,8 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2530 | 2516 | ||
2531 | } | 2517 | } |
2532 | 2518 | ||
2533 | #ifdef SIS_LINUX_KERNEL | ||
2534 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); | 2519 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); |
2535 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); | 2520 | templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); |
2536 | #else | ||
2537 | pciWriteLong(0x00000000, 0x50, templ); | ||
2538 | templ = pciReadLong(0x00000000, 0xA0); | ||
2539 | #endif | ||
2540 | 2521 | ||
2541 | /* GUI grant timer (PCI config 0xA3) */ | 2522 | /* GUI grant timer (PCI config 0xA3) */ |
2542 | if(SiS_Pr->ChipType == SIS_730) { | 2523 | if(SiS_Pr->ChipType == SIS_730) { |
@@ -2552,15 +2533,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2552 | 2533 | ||
2553 | } | 2534 | } |
2554 | 2535 | ||
2555 | #ifdef SIS_LINUX_KERNEL | ||
2556 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); | 2536 | sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); |
2557 | #else | ||
2558 | pciWriteLong(0x00000000, 0xA0, templ); | ||
2559 | #endif | ||
2560 | } | 2537 | } |
2561 | #endif /* SIS300 */ | 2538 | #endif /* CONFIG_FB_SIS_300 */ |
2562 | 2539 | ||
2563 | #ifdef SIS315H | 2540 | #ifdef CONFIG_FB_SIS_315 |
2564 | static void | 2541 | static void |
2565 | SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) | 2542 | SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) |
2566 | { | 2543 | { |
@@ -2612,7 +2589,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2612 | } | 2589 | } |
2613 | 2590 | ||
2614 | if(SiS_Pr->ChipType < SIS_315H) { | 2591 | if(SiS_Pr->ChipType < SIS_315H) { |
2615 | #ifdef SIS300 | 2592 | #ifdef CONFIG_FB_SIS_300 |
2616 | if(VCLK > 150) data |= 0x80; | 2593 | if(VCLK > 150) data |= 0x80; |
2617 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); | 2594 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); |
2618 | 2595 | ||
@@ -2621,7 +2598,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2621 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); | 2598 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); |
2622 | #endif | 2599 | #endif |
2623 | } else if(SiS_Pr->ChipType < XGI_20) { | 2600 | } else if(SiS_Pr->ChipType < XGI_20) { |
2624 | #ifdef SIS315H | 2601 | #ifdef CONFIG_FB_SIS_315 |
2625 | if(VCLK >= 166) data |= 0x0c; | 2602 | if(VCLK >= 166) data |= 0x0c; |
2626 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); | 2603 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); |
2627 | 2604 | ||
@@ -2630,7 +2607,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2630 | } | 2607 | } |
2631 | #endif | 2608 | #endif |
2632 | } else { | 2609 | } else { |
2633 | #ifdef SIS315H | 2610 | #ifdef CONFIG_FB_SIS_315 |
2634 | if(VCLK >= 200) data |= 0x0c; | 2611 | if(VCLK >= 200) data |= 0x0c; |
2635 | if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; | 2612 | if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; |
2636 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); | 2613 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); |
@@ -2675,7 +2652,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2675 | unsigned short ModeIdIndex, unsigned short RRTI) | 2652 | unsigned short ModeIdIndex, unsigned short RRTI) |
2676 | { | 2653 | { |
2677 | unsigned short data, infoflag = 0, modeflag, resindex; | 2654 | unsigned short data, infoflag = 0, modeflag, resindex; |
2678 | #ifdef SIS315H | 2655 | #ifdef CONFIG_FB_SIS_315 |
2679 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 2656 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
2680 | unsigned short data2, data3; | 2657 | unsigned short data2, data3; |
2681 | #endif | 2658 | #endif |
@@ -2736,7 +2713,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2736 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); | 2713 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); |
2737 | } | 2714 | } |
2738 | 2715 | ||
2739 | #ifdef SIS315H | 2716 | #ifdef CONFIG_FB_SIS_315 |
2740 | if(SiS_Pr->ChipType >= SIS_315H) { | 2717 | if(SiS_Pr->ChipType >= SIS_315H) { |
2741 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); | 2718 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); |
2742 | } | 2719 | } |
@@ -2826,7 +2803,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2826 | 2803 | ||
2827 | SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); | 2804 | SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); |
2828 | 2805 | ||
2829 | #ifdef SIS315H | 2806 | #ifdef CONFIG_FB_SIS_315 |
2830 | if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || | 2807 | if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || |
2831 | (SiS_Pr->ChipType == XGI_40)) { | 2808 | (SiS_Pr->ChipType == XGI_40)) { |
2832 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { | 2809 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { |
@@ -2845,7 +2822,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
2845 | #endif | 2822 | #endif |
2846 | } | 2823 | } |
2847 | 2824 | ||
2848 | #ifdef SIS315H | 2825 | #ifdef CONFIG_FB_SIS_315 |
2849 | static void | 2826 | static void |
2850 | SiS_SetupDualChip(struct SiS_Private *SiS_Pr) | 2827 | SiS_SetupDualChip(struct SiS_Private *SiS_Pr) |
2851 | { | 2828 | { |
@@ -2999,11 +2976,6 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
2999 | SiS_Pr->SiS_SelectCRT2Rate = 0; | 2976 | SiS_Pr->SiS_SelectCRT2Rate = 0; |
3000 | SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); | 2977 | SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); |
3001 | 2978 | ||
3002 | #ifdef SIS_XORG_XF86 | ||
3003 | xf86DrvMsgVerb(0, X_PROBED, 4, "(init: VBType=0x%04x, VBInfo=0x%04x)\n", | ||
3004 | SiS_Pr->SiS_VBType, SiS_Pr->SiS_VBInfo); | ||
3005 | #endif | ||
3006 | |||
3007 | if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { | 2979 | if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { |
3008 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { | 2980 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { |
3009 | SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; | 2981 | SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; |
@@ -3028,7 +3000,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3028 | } | 3000 | } |
3029 | 3001 | ||
3030 | switch(SiS_Pr->ChipType) { | 3002 | switch(SiS_Pr->ChipType) { |
3031 | #ifdef SIS300 | 3003 | #ifdef CONFIG_FB_SIS_300 |
3032 | case SIS_300: | 3004 | case SIS_300: |
3033 | SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); | 3005 | SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); |
3034 | break; | 3006 | break; |
@@ -3039,7 +3011,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3039 | break; | 3011 | break; |
3040 | #endif | 3012 | #endif |
3041 | default: | 3013 | default: |
3042 | #ifdef SIS315H | 3014 | #ifdef CONFIG_FB_SIS_315 |
3043 | if(SiS_Pr->ChipType == XGI_20) { | 3015 | if(SiS_Pr->ChipType == XGI_20) { |
3044 | unsigned char sr2b = 0, sr2c = 0; | 3016 | unsigned char sr2b = 0, sr2c = 0; |
3045 | switch(ModeNo) { | 3017 | switch(ModeNo) { |
@@ -3062,7 +3034,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3062 | 3034 | ||
3063 | SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 3035 | SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
3064 | 3036 | ||
3065 | #ifdef SIS315H | 3037 | #ifdef CONFIG_FB_SIS_315 |
3066 | if(SiS_Pr->ChipType == XGI_40) { | 3038 | if(SiS_Pr->ChipType == XGI_40) { |
3067 | SiS_SetupDualChip(SiS_Pr); | 3039 | SiS_SetupDualChip(SiS_Pr); |
3068 | } | 3040 | } |
@@ -3070,11 +3042,9 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho | |||
3070 | 3042 | ||
3071 | SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); | 3043 | SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); |
3072 | 3044 | ||
3073 | #ifdef SIS_LINUX_KERNEL | ||
3074 | if(SiS_Pr->SiS_flag_clearbuffer) { | 3045 | if(SiS_Pr->SiS_flag_clearbuffer) { |
3075 | SiS_ClearBuffer(SiS_Pr, ModeNo); | 3046 | SiS_ClearBuffer(SiS_Pr, ModeNo); |
3076 | } | 3047 | } |
3077 | #endif | ||
3078 | 3048 | ||
3079 | if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { | 3049 | if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { |
3080 | SiS_WaitRetrace1(SiS_Pr); | 3050 | SiS_WaitRetrace1(SiS_Pr); |
@@ -3104,7 +3074,7 @@ SiS_InitVB(struct SiS_Private *SiS_Pr) | |||
3104 | static void | 3074 | static void |
3105 | SiS_ResetVB(struct SiS_Private *SiS_Pr) | 3075 | SiS_ResetVB(struct SiS_Private *SiS_Pr) |
3106 | { | 3076 | { |
3107 | #ifdef SIS315H | 3077 | #ifdef CONFIG_FB_SIS_315 |
3108 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 3078 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
3109 | unsigned short temp; | 3079 | unsigned short temp; |
3110 | 3080 | ||
@@ -3139,7 +3109,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) | |||
3139 | * which locks CRT2 in some way to CRT1 timing. Disable | 3109 | * which locks CRT2 in some way to CRT1 timing. Disable |
3140 | * this here. | 3110 | * this here. |
3141 | */ | 3111 | */ |
3142 | #ifdef SIS315H | 3112 | #ifdef CONFIG_FB_SIS_315 |
3143 | if((IS_SIS651) || (IS_SISM650) || | 3113 | if((IS_SIS651) || (IS_SISM650) || |
3144 | SiS_Pr->ChipType == SIS_340 || | 3114 | SiS_Pr->ChipType == SIS_340 || |
3145 | SiS_Pr->ChipType == XGI_40) { | 3115 | SiS_Pr->ChipType == XGI_40) { |
@@ -3160,7 +3130,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) | |||
3160 | static void | 3130 | static void |
3161 | SiS_Handle760(struct SiS_Private *SiS_Pr) | 3131 | SiS_Handle760(struct SiS_Private *SiS_Pr) |
3162 | { | 3132 | { |
3163 | #ifdef SIS315H | 3133 | #ifdef CONFIG_FB_SIS_315 |
3164 | unsigned int somebase; | 3134 | unsigned int somebase; |
3165 | unsigned char temp1, temp2, temp3; | 3135 | unsigned char temp1, temp2, temp3; |
3166 | 3136 | ||
@@ -3170,11 +3140,7 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) | |||
3170 | (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) | 3140 | (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) |
3171 | return; | 3141 | return; |
3172 | 3142 | ||
3173 | #ifdef SIS_LINUX_KERNEL | ||
3174 | somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); | 3143 | somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); |
3175 | #else | ||
3176 | somebase = pciReadWord(0x00001000, 0x74); | ||
3177 | #endif | ||
3178 | somebase &= 0xffff; | 3144 | somebase &= 0xffff; |
3179 | 3145 | ||
3180 | if(somebase == 0) return; | 3146 | if(somebase == 0) return; |
@@ -3190,105 +3156,34 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) | |||
3190 | temp2 = 0x0b; | 3156 | temp2 = 0x0b; |
3191 | } | 3157 | } |
3192 | 3158 | ||
3193 | #ifdef SIS_LINUX_KERNEL | ||
3194 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); | 3159 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); |
3195 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); | 3160 | sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); |
3196 | #else | ||
3197 | pciWriteByte(0x00000000, 0x7e, temp1); | ||
3198 | pciWriteByte(0x00000000, 0x8d, temp2); | ||
3199 | #endif | ||
3200 | 3161 | ||
3201 | SiS_SetRegByte((somebase + 0x85), temp3); | 3162 | SiS_SetRegByte((somebase + 0x85), temp3); |
3202 | #endif | 3163 | #endif |
3203 | } | 3164 | } |
3204 | 3165 | ||
3205 | /*********************************************/ | 3166 | /*********************************************/ |
3206 | /* X.org/XFree86: SET SCREEN PITCH */ | ||
3207 | /*********************************************/ | ||
3208 | |||
3209 | #ifdef SIS_XORG_XF86 | ||
3210 | static void | ||
3211 | SiS_SetPitchCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) | ||
3212 | { | ||
3213 | SISPtr pSiS = SISPTR(pScrn); | ||
3214 | unsigned short HDisplay = pSiS->scrnPitch >> 3; | ||
3215 | |||
3216 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x13,(HDisplay & 0xFF)); | ||
3217 | SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0E,0xF0,(HDisplay >> 8)); | ||
3218 | } | ||
3219 | |||
3220 | static void | ||
3221 | SiS_SetPitchCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) | ||
3222 | { | ||
3223 | SISPtr pSiS = SISPTR(pScrn); | ||
3224 | unsigned short HDisplay = pSiS->scrnPitch2 >> 3; | ||
3225 | |||
3226 | /* Unlock CRT2 */ | ||
3227 | if(pSiS->VGAEngine == SIS_315_VGA) | ||
3228 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2F, 0x01); | ||
3229 | else | ||
3230 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24, 0x01); | ||
3231 | |||
3232 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x07,(HDisplay & 0xFF)); | ||
3233 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x09,0xF0,(HDisplay >> 8)); | ||
3234 | } | ||
3235 | |||
3236 | static void | ||
3237 | SiS_SetPitch(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) | ||
3238 | { | ||
3239 | SISPtr pSiS = SISPTR(pScrn); | ||
3240 | bool isslavemode = false; | ||
3241 | |||
3242 | if( (pSiS->VBFlags2 & VB2_VIDEOBRIDGE) && | ||
3243 | ( ((pSiS->VGAEngine == SIS_300_VGA) && | ||
3244 | (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0xa0) == 0x20) || | ||
3245 | ((pSiS->VGAEngine == SIS_315_VGA) && | ||
3246 | (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x50) == 0x10) ) ) { | ||
3247 | isslavemode = true; | ||
3248 | } | ||
3249 | |||
3250 | /* We need to set pitch for CRT1 if bridge is in slave mode, too */ | ||
3251 | if((pSiS->VBFlags & DISPTYPE_DISP1) || (isslavemode)) { | ||
3252 | SiS_SetPitchCRT1(SiS_Pr, pScrn); | ||
3253 | } | ||
3254 | /* We must not set the pitch for CRT2 if bridge is in slave mode */ | ||
3255 | if((pSiS->VBFlags & DISPTYPE_DISP2) && (!isslavemode)) { | ||
3256 | SiS_SetPitchCRT2(SiS_Pr, pScrn); | ||
3257 | } | ||
3258 | } | ||
3259 | #endif | ||
3260 | |||
3261 | /*********************************************/ | ||
3262 | /* SiSSetMode() */ | 3167 | /* SiSSetMode() */ |
3263 | /*********************************************/ | 3168 | /*********************************************/ |
3264 | 3169 | ||
3265 | #ifdef SIS_XORG_XF86 | ||
3266 | /* We need pScrn for setting the pitch correctly */ | ||
3267 | bool | ||
3268 | SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, bool dosetpitch) | ||
3269 | #else | ||
3270 | bool | 3170 | bool |
3271 | SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | 3171 | SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) |
3272 | #endif | ||
3273 | { | 3172 | { |
3274 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; | 3173 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; |
3275 | unsigned short RealModeNo, ModeIdIndex; | 3174 | unsigned short RealModeNo, ModeIdIndex; |
3276 | unsigned char backupreg = 0; | 3175 | unsigned char backupreg = 0; |
3277 | #ifdef SIS_LINUX_KERNEL | ||
3278 | unsigned short KeepLockReg; | 3176 | unsigned short KeepLockReg; |
3279 | 3177 | ||
3280 | SiS_Pr->UseCustomMode = false; | 3178 | SiS_Pr->UseCustomMode = false; |
3281 | SiS_Pr->CRT1UsesCustomMode = false; | 3179 | SiS_Pr->CRT1UsesCustomMode = false; |
3282 | #endif | ||
3283 | 3180 | ||
3284 | SiS_Pr->SiS_flag_clearbuffer = 0; | 3181 | SiS_Pr->SiS_flag_clearbuffer = 0; |
3285 | 3182 | ||
3286 | if(SiS_Pr->UseCustomMode) { | 3183 | if(SiS_Pr->UseCustomMode) { |
3287 | ModeNo = 0xfe; | 3184 | ModeNo = 0xfe; |
3288 | } else { | 3185 | } else { |
3289 | #ifdef SIS_LINUX_KERNEL | ||
3290 | if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; | 3186 | if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; |
3291 | #endif | ||
3292 | ModeNo &= 0x7f; | 3187 | ModeNo &= 0x7f; |
3293 | } | 3188 | } |
3294 | 3189 | ||
@@ -3301,13 +3196,8 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3301 | SiS_GetSysFlags(SiS_Pr); | 3196 | SiS_GetSysFlags(SiS_Pr); |
3302 | 3197 | ||
3303 | SiS_Pr->SiS_VGAINFO = 0x11; | 3198 | SiS_Pr->SiS_VGAINFO = 0x11; |
3304 | #if defined(SIS_XORG_XF86) && (defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__)) | ||
3305 | if(pScrn) SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); | ||
3306 | #endif | ||
3307 | 3199 | ||
3308 | #ifdef SIS_LINUX_KERNEL | ||
3309 | KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); | 3200 | KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); |
3310 | #endif | ||
3311 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); | 3201 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); |
3312 | 3202 | ||
3313 | SiSInitPCIetc(SiS_Pr); | 3203 | SiSInitPCIetc(SiS_Pr); |
@@ -3344,12 +3234,10 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3344 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); | 3234 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); |
3345 | SiS_SetLowModeTest(SiS_Pr, ModeNo); | 3235 | SiS_SetLowModeTest(SiS_Pr, ModeNo); |
3346 | 3236 | ||
3347 | #ifdef SIS_LINUX_KERNEL | ||
3348 | /* Check memory size (kernel framebuffer driver only) */ | 3237 | /* Check memory size (kernel framebuffer driver only) */ |
3349 | if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { | 3238 | if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { |
3350 | return false; | 3239 | return false; |
3351 | } | 3240 | } |
3352 | #endif | ||
3353 | 3241 | ||
3354 | SiS_OpenCRTC(SiS_Pr); | 3242 | SiS_OpenCRTC(SiS_Pr); |
3355 | 3243 | ||
@@ -3384,7 +3272,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3384 | SiS_DisplayOn(SiS_Pr); | 3272 | SiS_DisplayOn(SiS_Pr); |
3385 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); | 3273 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); |
3386 | 3274 | ||
3387 | #ifdef SIS315H | 3275 | #ifdef CONFIG_FB_SIS_315 |
3388 | if(SiS_Pr->ChipType >= SIS_315H) { | 3276 | if(SiS_Pr->ChipType >= SIS_315H) { |
3389 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 3277 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
3390 | if(!(SiS_IsDualEdge(SiS_Pr))) { | 3278 | if(!(SiS_IsDualEdge(SiS_Pr))) { |
@@ -3396,7 +3284,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3396 | 3284 | ||
3397 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | 3285 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { |
3398 | if(SiS_Pr->ChipType >= SIS_315H) { | 3286 | if(SiS_Pr->ChipType >= SIS_315H) { |
3399 | #ifdef SIS315H | 3287 | #ifdef CONFIG_FB_SIS_315 |
3400 | if(!SiS_Pr->SiS_ROMNew) { | 3288 | if(!SiS_Pr->SiS_ROMNew) { |
3401 | if(SiS_IsVAMode(SiS_Pr)) { | 3289 | if(SiS_IsVAMode(SiS_Pr)) { |
3402 | SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); | 3290 | SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); |
@@ -3424,424 +3312,16 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
3424 | } | 3312 | } |
3425 | } | 3313 | } |
3426 | 3314 | ||
3427 | #ifdef SIS_XORG_XF86 | ||
3428 | if(pScrn) { | ||
3429 | /* SetPitch: Adapt to virtual size & position */ | ||
3430 | if((ModeNo > 0x13) && (dosetpitch)) { | ||
3431 | SiS_SetPitch(SiS_Pr, pScrn); | ||
3432 | } | ||
3433 | |||
3434 | /* Backup/Set ModeNo in BIOS scratch area */ | ||
3435 | SiS_GetSetModeID(pScrn, ModeNo); | ||
3436 | } | ||
3437 | #endif | ||
3438 | |||
3439 | SiS_CloseCRTC(SiS_Pr); | 3315 | SiS_CloseCRTC(SiS_Pr); |
3440 | 3316 | ||
3441 | SiS_Handle760(SiS_Pr); | 3317 | SiS_Handle760(SiS_Pr); |
3442 | 3318 | ||
3443 | #ifdef SIS_LINUX_KERNEL | ||
3444 | /* We never lock registers in XF86 */ | 3319 | /* We never lock registers in XF86 */ |
3445 | if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); | 3320 | if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); |
3446 | #endif | ||
3447 | 3321 | ||
3448 | return true; | 3322 | return true; |
3449 | } | 3323 | } |
3450 | 3324 | ||
3451 | /*********************************************/ | ||
3452 | /* X.org/XFree86: SiSBIOSSetMode() */ | ||
3453 | /* for non-Dual-Head mode */ | ||
3454 | /*********************************************/ | ||
3455 | |||
3456 | #ifdef SIS_XORG_XF86 | ||
3457 | bool | ||
3458 | SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
3459 | DisplayModePtr mode, bool IsCustom) | ||
3460 | { | ||
3461 | SISPtr pSiS = SISPTR(pScrn); | ||
3462 | unsigned short ModeNo = 0; | ||
3463 | |||
3464 | SiS_Pr->UseCustomMode = false; | ||
3465 | |||
3466 | if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { | ||
3467 | |||
3468 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting custom mode %dx%d\n", | ||
3469 | SiS_Pr->CHDisplay, | ||
3470 | (mode->Flags & V_INTERLACE ? SiS_Pr->CVDisplay * 2 : | ||
3471 | (mode->Flags & V_DBLSCAN ? SiS_Pr->CVDisplay / 2 : | ||
3472 | SiS_Pr->CVDisplay))); | ||
3473 | |||
3474 | } else { | ||
3475 | |||
3476 | /* Don't need vbflags here; checks done earlier */ | ||
3477 | ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); | ||
3478 | if(!ModeNo) return false; | ||
3479 | |||
3480 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting standard mode 0x%x\n", ModeNo); | ||
3481 | |||
3482 | } | ||
3483 | |||
3484 | return(SiSSetMode(SiS_Pr, pScrn, ModeNo, true)); | ||
3485 | } | ||
3486 | |||
3487 | /*********************************************/ | ||
3488 | /* X.org/XFree86: SiSBIOSSetModeCRT2() */ | ||
3489 | /* for Dual-Head modes */ | ||
3490 | /*********************************************/ | ||
3491 | |||
3492 | bool | ||
3493 | SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
3494 | DisplayModePtr mode, bool IsCustom) | ||
3495 | { | ||
3496 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; | ||
3497 | SISPtr pSiS = SISPTR(pScrn); | ||
3498 | #ifdef SISDUALHEAD | ||
3499 | SISEntPtr pSiSEnt = pSiS->entityPrivate; | ||
3500 | #endif | ||
3501 | unsigned short ModeIdIndex; | ||
3502 | unsigned short ModeNo = 0; | ||
3503 | unsigned char backupreg = 0; | ||
3504 | |||
3505 | SiS_Pr->UseCustomMode = false; | ||
3506 | |||
3507 | /* Remember: Custom modes for CRT2 are ONLY supported | ||
3508 | * -) on the 30x/B/C, and | ||
3509 | * -) if CRT2 is LCD or VGA, or CRT1 is LCDA | ||
3510 | */ | ||
3511 | |||
3512 | if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { | ||
3513 | |||
3514 | ModeNo = 0xfe; | ||
3515 | |||
3516 | } else { | ||
3517 | |||
3518 | ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); | ||
3519 | if(!ModeNo) return false; | ||
3520 | |||
3521 | } | ||
3522 | |||
3523 | SiSRegInit(SiS_Pr, BaseAddr); | ||
3524 | SiSInitPtr(SiS_Pr); | ||
3525 | SiS_GetSysFlags(SiS_Pr); | ||
3526 | #if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) | ||
3527 | SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); | ||
3528 | #else | ||
3529 | SiS_Pr->SiS_VGAINFO = 0x11; | ||
3530 | #endif | ||
3531 | |||
3532 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); | ||
3533 | |||
3534 | SiSInitPCIetc(SiS_Pr); | ||
3535 | SiSSetLVDSetc(SiS_Pr); | ||
3536 | SiSDetermineROMUsage(SiS_Pr); | ||
3537 | |||
3538 | /* Save mode info so we can set it from within SetMode for CRT1 */ | ||
3539 | #ifdef SISDUALHEAD | ||
3540 | if(pSiS->DualHeadMode) { | ||
3541 | pSiSEnt->CRT2ModeNo = ModeNo; | ||
3542 | pSiSEnt->CRT2DMode = mode; | ||
3543 | pSiSEnt->CRT2IsCustom = IsCustom; | ||
3544 | pSiSEnt->CRT2CR30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); | ||
3545 | pSiSEnt->CRT2CR31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); | ||
3546 | pSiSEnt->CRT2CR35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3547 | pSiSEnt->CRT2CR38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3548 | #if 0 | ||
3549 | /* We can't set CRT2 mode before CRT1 mode is set - says who...? */ | ||
3550 | if(pSiSEnt->CRT1ModeNo == -1) { | ||
3551 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3552 | "Setting CRT2 mode delayed until after setting CRT1 mode\n"); | ||
3553 | return true; | ||
3554 | } | ||
3555 | #endif | ||
3556 | pSiSEnt->CRT2ModeSet = true; | ||
3557 | } | ||
3558 | #endif | ||
3559 | |||
3560 | if(SiS_Pr->UseCustomMode) { | ||
3561 | |||
3562 | unsigned short temptemp = SiS_Pr->CVDisplay; | ||
3563 | |||
3564 | if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; | ||
3565 | else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; | ||
3566 | |||
3567 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3568 | "Setting custom mode %dx%d on CRT2\n", | ||
3569 | SiS_Pr->CHDisplay, temptemp); | ||
3570 | |||
3571 | } else { | ||
3572 | |||
3573 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3574 | "Setting standard mode 0x%x on CRT2\n", ModeNo); | ||
3575 | |||
3576 | } | ||
3577 | |||
3578 | SiS_UnLockCRT2(SiS_Pr); | ||
3579 | |||
3580 | if(!SiS_Pr->UseCustomMode) { | ||
3581 | if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; | ||
3582 | } else { | ||
3583 | ModeIdIndex = 0; | ||
3584 | } | ||
3585 | |||
3586 | SiS_GetVBType(SiS_Pr); | ||
3587 | |||
3588 | SiS_InitVB(SiS_Pr); | ||
3589 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3590 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3591 | SiS_ResetVB(SiS_Pr); | ||
3592 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x32,0x10); | ||
3593 | SiS_SetRegOR(SiS_Pr->SiS_Part2Port,0x00,0x0c); | ||
3594 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3595 | } else { | ||
3596 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3597 | } | ||
3598 | } | ||
3599 | |||
3600 | /* Get VB information (connectors, connected devices) */ | ||
3601 | if(!SiS_Pr->UseCustomMode) { | ||
3602 | SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 1); | ||
3603 | } else { | ||
3604 | /* If this is a custom mode, we don't check the modeflag for CRT2Mode */ | ||
3605 | SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); | ||
3606 | } | ||
3607 | SiS_SetYPbPr(SiS_Pr); | ||
3608 | SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); | ||
3609 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); | ||
3610 | SiS_SetLowModeTest(SiS_Pr, ModeNo); | ||
3611 | |||
3612 | SiS_ResetSegmentRegisters(SiS_Pr); | ||
3613 | |||
3614 | /* Set mode on CRT2 */ | ||
3615 | if( (SiS_Pr->SiS_VBType & VB_SISVB) || | ||
3616 | (SiS_Pr->SiS_IF_DEF_LVDS == 1) || | ||
3617 | (SiS_Pr->SiS_IF_DEF_CH70xx != 0) || | ||
3618 | (SiS_Pr->SiS_IF_DEF_TRUMPION != 0) ) { | ||
3619 | SiS_SetCRT2Group(SiS_Pr, ModeNo); | ||
3620 | } | ||
3621 | |||
3622 | SiS_StrangeStuff(SiS_Pr); | ||
3623 | |||
3624 | SiS_DisplayOn(SiS_Pr); | ||
3625 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); | ||
3626 | |||
3627 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3628 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | ||
3629 | if(!(SiS_IsDualEdge(SiS_Pr))) { | ||
3630 | SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x13,0xfb); | ||
3631 | } | ||
3632 | } | ||
3633 | } | ||
3634 | |||
3635 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3636 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3637 | if(!SiS_Pr->SiS_ROMNew) { | ||
3638 | if(SiS_IsVAMode(SiS_Pr)) { | ||
3639 | SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); | ||
3640 | } else { | ||
3641 | SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x35,0xFE); | ||
3642 | } | ||
3643 | } | ||
3644 | |||
3645 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); | ||
3646 | |||
3647 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x30) & SetCRT2ToLCD) { | ||
3648 | SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x38,0xfc); | ||
3649 | } | ||
3650 | } else if((SiS_Pr->ChipType == SIS_630) || | ||
3651 | (SiS_Pr->ChipType == SIS_730)) { | ||
3652 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); | ||
3653 | } | ||
3654 | } | ||
3655 | |||
3656 | /* SetPitch: Adapt to virtual size & position */ | ||
3657 | SiS_SetPitchCRT2(SiS_Pr, pScrn); | ||
3658 | |||
3659 | SiS_Handle760(SiS_Pr); | ||
3660 | |||
3661 | return true; | ||
3662 | } | ||
3663 | |||
3664 | /*********************************************/ | ||
3665 | /* X.org/XFree86: SiSBIOSSetModeCRT1() */ | ||
3666 | /* for Dual-Head modes */ | ||
3667 | /*********************************************/ | ||
3668 | |||
3669 | bool | ||
3670 | SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
3671 | DisplayModePtr mode, bool IsCustom) | ||
3672 | { | ||
3673 | SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; | ||
3674 | SISPtr pSiS = SISPTR(pScrn); | ||
3675 | unsigned short ModeIdIndex, ModeNo = 0; | ||
3676 | unsigned char backupreg = 0; | ||
3677 | #ifdef SISDUALHEAD | ||
3678 | SISEntPtr pSiSEnt = pSiS->entityPrivate; | ||
3679 | unsigned char backupcr30, backupcr31, backupcr38, backupcr35, backupp40d=0; | ||
3680 | bool backupcustom; | ||
3681 | #endif | ||
3682 | |||
3683 | SiS_Pr->UseCustomMode = false; | ||
3684 | |||
3685 | if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { | ||
3686 | |||
3687 | unsigned short temptemp = SiS_Pr->CVDisplay; | ||
3688 | |||
3689 | if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; | ||
3690 | else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; | ||
3691 | |||
3692 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3693 | "Setting custom mode %dx%d on CRT1\n", | ||
3694 | SiS_Pr->CHDisplay, temptemp); | ||
3695 | ModeNo = 0xfe; | ||
3696 | |||
3697 | } else { | ||
3698 | |||
3699 | ModeNo = SiS_GetModeNumber(pScrn, mode, 0); /* don't give VBFlags */ | ||
3700 | if(!ModeNo) return false; | ||
3701 | |||
3702 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3703 | "Setting standard mode 0x%x on CRT1\n", ModeNo); | ||
3704 | } | ||
3705 | |||
3706 | SiSInitPtr(SiS_Pr); | ||
3707 | SiSRegInit(SiS_Pr, BaseAddr); | ||
3708 | SiS_GetSysFlags(SiS_Pr); | ||
3709 | #if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) | ||
3710 | SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); | ||
3711 | #else | ||
3712 | SiS_Pr->SiS_VGAINFO = 0x11; | ||
3713 | #endif | ||
3714 | |||
3715 | SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); | ||
3716 | |||
3717 | SiSInitPCIetc(SiS_Pr); | ||
3718 | SiSSetLVDSetc(SiS_Pr); | ||
3719 | SiSDetermineROMUsage(SiS_Pr); | ||
3720 | |||
3721 | SiS_UnLockCRT2(SiS_Pr); | ||
3722 | |||
3723 | if(!SiS_Pr->UseCustomMode) { | ||
3724 | if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; | ||
3725 | } else { | ||
3726 | ModeIdIndex = 0; | ||
3727 | } | ||
3728 | |||
3729 | /* Determine VBType */ | ||
3730 | SiS_GetVBType(SiS_Pr); | ||
3731 | |||
3732 | SiS_InitVB(SiS_Pr); | ||
3733 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3734 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3735 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3736 | } else { | ||
3737 | backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3738 | } | ||
3739 | } | ||
3740 | |||
3741 | /* Get VB information (connectors, connected devices) */ | ||
3742 | /* (We don't care if the current mode is a CRT2 mode) */ | ||
3743 | SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); | ||
3744 | SiS_SetYPbPr(SiS_Pr); | ||
3745 | SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); | ||
3746 | SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); | ||
3747 | SiS_SetLowModeTest(SiS_Pr, ModeNo); | ||
3748 | |||
3749 | SiS_OpenCRTC(SiS_Pr); | ||
3750 | |||
3751 | /* Set mode on CRT1 */ | ||
3752 | SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); | ||
3753 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { | ||
3754 | SiS_SetCRT2Group(SiS_Pr, ModeNo); | ||
3755 | } | ||
3756 | |||
3757 | /* SetPitch: Adapt to virtual size & position */ | ||
3758 | SiS_SetPitchCRT1(SiS_Pr, pScrn); | ||
3759 | |||
3760 | SiS_HandleCRT1(SiS_Pr); | ||
3761 | |||
3762 | SiS_StrangeStuff(SiS_Pr); | ||
3763 | |||
3764 | SiS_CloseCRTC(SiS_Pr); | ||
3765 | |||
3766 | #ifdef SISDUALHEAD | ||
3767 | if(pSiS->DualHeadMode) { | ||
3768 | pSiSEnt->CRT1ModeNo = ModeNo; | ||
3769 | pSiSEnt->CRT1DMode = mode; | ||
3770 | } | ||
3771 | #endif | ||
3772 | |||
3773 | if(SiS_Pr->UseCustomMode) { | ||
3774 | SiS_Pr->CRT1UsesCustomMode = true; | ||
3775 | SiS_Pr->CSRClock_CRT1 = SiS_Pr->CSRClock; | ||
3776 | SiS_Pr->CModeFlag_CRT1 = SiS_Pr->CModeFlag; | ||
3777 | } else { | ||
3778 | SiS_Pr->CRT1UsesCustomMode = false; | ||
3779 | } | ||
3780 | |||
3781 | /* Reset CRT2 if changing mode on CRT1 */ | ||
3782 | #ifdef SISDUALHEAD | ||
3783 | if(pSiS->DualHeadMode) { | ||
3784 | if(pSiSEnt->CRT2ModeNo != -1) { | ||
3785 | xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, | ||
3786 | "(Re-)Setting mode for CRT2\n"); | ||
3787 | backupcustom = SiS_Pr->UseCustomMode; | ||
3788 | backupcr30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); | ||
3789 | backupcr31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); | ||
3790 | backupcr35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); | ||
3791 | backupcr38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); | ||
3792 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | ||
3793 | /* Backup LUT-enable */ | ||
3794 | if(pSiSEnt->CRT2ModeSet) { | ||
3795 | backupp40d = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x0d) & 0x08; | ||
3796 | } | ||
3797 | } | ||
3798 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { | ||
3799 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,pSiSEnt->CRT2CR30); | ||
3800 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,pSiSEnt->CRT2CR31); | ||
3801 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,pSiSEnt->CRT2CR35); | ||
3802 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,pSiSEnt->CRT2CR38); | ||
3803 | } | ||
3804 | |||
3805 | SiSBIOSSetModeCRT2(SiS_Pr, pSiSEnt->pScrn_1, | ||
3806 | pSiSEnt->CRT2DMode, pSiSEnt->CRT2IsCustom); | ||
3807 | |||
3808 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,backupcr30); | ||
3809 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,backupcr31); | ||
3810 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupcr35); | ||
3811 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupcr38); | ||
3812 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | ||
3813 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x0d, ~0x08, backupp40d); | ||
3814 | } | ||
3815 | SiS_Pr->UseCustomMode = backupcustom; | ||
3816 | } | ||
3817 | } | ||
3818 | #endif | ||
3819 | |||
3820 | /* Warning: From here, the custom mode entries in SiS_Pr are | ||
3821 | * possibly overwritten | ||
3822 | */ | ||
3823 | |||
3824 | SiS_DisplayOn(SiS_Pr); | ||
3825 | SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); | ||
3826 | |||
3827 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | ||
3828 | if(SiS_Pr->ChipType >= SIS_315H) { | ||
3829 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); | ||
3830 | } else if((SiS_Pr->ChipType == SIS_630) || | ||
3831 | (SiS_Pr->ChipType == SIS_730)) { | ||
3832 | SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); | ||
3833 | } | ||
3834 | } | ||
3835 | |||
3836 | SiS_Handle760(SiS_Pr); | ||
3837 | |||
3838 | /* Backup/Set ModeNo in BIOS scratch area */ | ||
3839 | SiS_GetSetModeID(pScrn,ModeNo); | ||
3840 | |||
3841 | return true; | ||
3842 | } | ||
3843 | #endif /* Linux_XF86 */ | ||
3844 | |||
3845 | #ifndef GETBITSTR | 3325 | #ifndef GETBITSTR |
3846 | #define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) | 3326 | #define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) |
3847 | #define GENMASK(mask) BITMASK(1?mask,0?mask) | 3327 | #define GENMASK(mask) BITMASK(1?mask,0?mask) |
@@ -3927,7 +3407,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3927 | SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; | 3407 | SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; |
3928 | 3408 | ||
3929 | if(SiS_Pr->ChipType < SIS_315H) { | 3409 | if(SiS_Pr->ChipType < SIS_315H) { |
3930 | #ifdef SIS300 | 3410 | #ifdef CONFIG_FB_SIS_300 |
3931 | tempbx = SiS_Pr->SiS_VGAHT; | 3411 | tempbx = SiS_Pr->SiS_VGAHT; |
3932 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3412 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
3933 | tempbx = SiS_Pr->PanelHT; | 3413 | tempbx = SiS_Pr->PanelHT; |
@@ -3936,7 +3416,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3936 | remaining = tempbx % 8; | 3416 | remaining = tempbx % 8; |
3937 | #endif | 3417 | #endif |
3938 | } else { | 3418 | } else { |
3939 | #ifdef SIS315H | 3419 | #ifdef CONFIG_FB_SIS_315 |
3940 | /* OK for LCDA, LVDS */ | 3420 | /* OK for LCDA, LVDS */ |
3941 | tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; | 3421 | tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; |
3942 | tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ | 3422 | tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ |
@@ -3950,7 +3430,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3950 | SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; | 3430 | SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; |
3951 | 3431 | ||
3952 | if(SiS_Pr->ChipType < SIS_315H) { | 3432 | if(SiS_Pr->ChipType < SIS_315H) { |
3953 | #ifdef SIS300 | 3433 | #ifdef CONFIG_FB_SIS_300 |
3954 | if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { | 3434 | if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { |
3955 | SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); | 3435 | SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); |
3956 | SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; | 3436 | SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; |
@@ -3982,7 +3462,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
3982 | } | 3462 | } |
3983 | #endif | 3463 | #endif |
3984 | } else { | 3464 | } else { |
3985 | #ifdef SIS315H | 3465 | #ifdef CONFIG_FB_SIS_315 |
3986 | tempax = VGAHDE; | 3466 | tempax = VGAHDE; |
3987 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3467 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
3988 | tempbx = SiS_Pr->PanelXRes; | 3468 | tempbx = SiS_Pr->PanelXRes; |
@@ -4001,7 +3481,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
4001 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3481 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
4002 | tempax = SiS_Pr->PanelYRes; | 3482 | tempax = SiS_Pr->PanelYRes; |
4003 | } else if(SiS_Pr->ChipType < SIS_315H) { | 3483 | } else if(SiS_Pr->ChipType < SIS_315H) { |
4004 | #ifdef SIS300 | 3484 | #ifdef CONFIG_FB_SIS_300 |
4005 | /* Stupid hack for 640x400/320x200 */ | 3485 | /* Stupid hack for 640x400/320x200 */ |
4006 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { | 3486 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { |
4007 | if((tempax + tempbx) == 438) tempbx += 16; | 3487 | if((tempax + tempbx) == 438) tempbx += 16; |
@@ -4054,36 +3534,12 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
4054 | if(modeflag & DoubleScanMode) tempax |= 0x80; | 3534 | if(modeflag & DoubleScanMode) tempax |= 0x80; |
4055 | SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); | 3535 | SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); |
4056 | 3536 | ||
4057 | #ifdef SIS_XORG_XF86 | ||
4058 | #ifdef TWDEBUG | ||
4059 | xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", | ||
4060 | SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, | ||
4061 | SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, | ||
4062 | SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); | ||
4063 | xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
4064 | SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], | ||
4065 | SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], | ||
4066 | SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], | ||
4067 | SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); | ||
4068 | xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
4069 | SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], | ||
4070 | SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], | ||
4071 | SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], | ||
4072 | SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); | ||
4073 | xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); | ||
4074 | #endif | ||
4075 | #endif | ||
4076 | } | 3537 | } |
4077 | 3538 | ||
4078 | void | 3539 | void |
4079 | SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | 3540 | SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, |
4080 | int xres, int yres, | 3541 | int xres, int yres, |
4081 | #ifdef SIS_XORG_XF86 | ||
4082 | DisplayModePtr current | ||
4083 | #endif | ||
4084 | #ifdef SIS_LINUX_KERNEL | ||
4085 | struct fb_var_screeninfo *var, bool writeres | 3542 | struct fb_var_screeninfo *var, bool writeres |
4086 | #endif | ||
4087 | ) | 3543 | ) |
4088 | { | 3544 | { |
4089 | unsigned short HRE, HBE, HRS, HBS, HDE, HT; | 3545 | unsigned short HRE, HBE, HRS, HBS, HDE, HT; |
@@ -4127,25 +3583,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | |||
4127 | 3583 | ||
4128 | D = B - F - C; | 3584 | D = B - F - C; |
4129 | 3585 | ||
4130 | #ifdef SIS_XORG_XF86 | ||
4131 | current->HDisplay = (E * 8); | ||
4132 | current->HSyncStart = (E * 8) + (F * 8); | ||
4133 | current->HSyncEnd = (E * 8) + (F * 8) + (C * 8); | ||
4134 | current->HTotal = (E * 8) + (F * 8) + (C * 8) + (D * 8); | ||
4135 | #ifdef TWDEBUG | ||
4136 | xf86DrvMsg(0, X_INFO, | ||
4137 | "H: A %d B %d C %d D %d E %d F %d HT %d HDE %d HRS %d HBS %d HBE %d HRE %d\n", | ||
4138 | A, B, C, D, E, F, HT, HDE, HRS, HBS, HBE, HRE); | ||
4139 | #else | ||
4140 | (void)VBS; (void)HBS; (void)A; | ||
4141 | #endif | ||
4142 | #endif | ||
4143 | #ifdef SIS_LINUX_KERNEL | ||
4144 | if(writeres) var->xres = xres = E * 8; | 3586 | if(writeres) var->xres = xres = E * 8; |
4145 | var->left_margin = D * 8; | 3587 | var->left_margin = D * 8; |
4146 | var->right_margin = F * 8; | 3588 | var->right_margin = F * 8; |
4147 | var->hsync_len = C * 8; | 3589 | var->hsync_len = C * 8; |
4148 | #endif | ||
4149 | 3590 | ||
4150 | /* Vertical */ | 3591 | /* Vertical */ |
4151 | sr_data = crdata[13]; | 3592 | sr_data = crdata[13]; |
@@ -4192,30 +3633,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | |||
4192 | 3633 | ||
4193 | D = B - F - C; | 3634 | D = B - F - C; |
4194 | 3635 | ||
4195 | #ifdef SIS_XORG_XF86 | ||
4196 | current->VDisplay = VDE + 1; | ||
4197 | current->VSyncStart = VRS + 1; | ||
4198 | current->VSyncEnd = ((VRS & ~0x1f) | VRE) + 1; | ||
4199 | if(VRE <= (VRS & 0x1f)) current->VSyncEnd += 32; | ||
4200 | current->VTotal = E + D + C + F; | ||
4201 | #if 0 | ||
4202 | current->VDisplay = E; | ||
4203 | current->VSyncStart = E + D; | ||
4204 | current->VSyncEnd = E + D + C; | ||
4205 | current->VTotal = E + D + C + F; | ||
4206 | #endif | ||
4207 | #ifdef TWDEBUG | ||
4208 | xf86DrvMsg(0, X_INFO, | ||
4209 | "V: A %d B %d C %d D %d E %d F %d VT %d VDE %d VRS %d VBS %d VBE %d VRE %d\n", | ||
4210 | A, B, C, D, E, F, VT, VDE, VRS, VBS, VBE, VRE); | ||
4211 | #endif | ||
4212 | #endif | ||
4213 | #ifdef SIS_LINUX_KERNEL | ||
4214 | if(writeres) var->yres = yres = E; | 3636 | if(writeres) var->yres = yres = E; |
4215 | var->upper_margin = D; | 3637 | var->upper_margin = D; |
4216 | var->lower_margin = F; | 3638 | var->lower_margin = F; |
4217 | var->vsync_len = C; | 3639 | var->vsync_len = C; |
4218 | #endif | ||
4219 | 3640 | ||
4220 | if((xres == 320) && ((yres == 200) || (yres == 240))) { | 3641 | if((xres == 320) && ((yres == 200) || (yres == 240))) { |
4221 | /* Terrible hack, but correct CRTC data for | 3642 | /* Terrible hack, but correct CRTC data for |
@@ -4224,17 +3645,9 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, | |||
4224 | * a negative D. The CRT controller does not | 3645 | * a negative D. The CRT controller does not |
4225 | * seem to like correcting HRE to 50) | 3646 | * seem to like correcting HRE to 50) |
4226 | */ | 3647 | */ |
4227 | #ifdef SIS_XORG_XF86 | ||
4228 | current->HDisplay = 320; | ||
4229 | current->HSyncStart = 328; | ||
4230 | current->HSyncEnd = 376; | ||
4231 | current->HTotal = 400; | ||
4232 | #endif | ||
4233 | #ifdef SIS_LINUX_KERNEL | ||
4234 | var->left_margin = (400 - 376); | 3648 | var->left_margin = (400 - 376); |
4235 | var->right_margin = (328 - 320); | 3649 | var->right_margin = (328 - 320); |
4236 | var->hsync_len = (376 - 328); | 3650 | var->hsync_len = (376 - 328); |
4237 | #endif | ||
4238 | 3651 | ||
4239 | } | 3652 | } |
4240 | 3653 | ||
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h index b96005c39c67..ee8ed3c203da 100644 --- a/drivers/video/sis/init.h +++ b/drivers/video/sis/init.h | |||
@@ -53,21 +53,8 @@ | |||
53 | #ifndef _INIT_H_ | 53 | #ifndef _INIT_H_ |
54 | #define _INIT_H_ | 54 | #define _INIT_H_ |
55 | 55 | ||
56 | #include "osdef.h" | ||
57 | #include "initdef.h" | 56 | #include "initdef.h" |
58 | 57 | ||
59 | #ifdef SIS_XORG_XF86 | ||
60 | #include "sis.h" | ||
61 | #define SIS_NEED_inSISREG | ||
62 | #define SIS_NEED_inSISREGW | ||
63 | #define SIS_NEED_inSISREGL | ||
64 | #define SIS_NEED_outSISREG | ||
65 | #define SIS_NEED_outSISREGW | ||
66 | #define SIS_NEED_outSISREGL | ||
67 | #include "sis_regs.h" | ||
68 | #endif | ||
69 | |||
70 | #ifdef SIS_LINUX_KERNEL | ||
71 | #include "vgatypes.h" | 58 | #include "vgatypes.h" |
72 | #include "vstruct.h" | 59 | #include "vstruct.h" |
73 | #ifdef SIS_CP | 60 | #ifdef SIS_CP |
@@ -78,7 +65,6 @@ | |||
78 | #include <linux/fb.h> | 65 | #include <linux/fb.h> |
79 | #include "sis.h" | 66 | #include "sis.h" |
80 | #include <video/sisfb.h> | 67 | #include <video/sisfb.h> |
81 | #endif | ||
82 | 68 | ||
83 | /* Mode numbers */ | 69 | /* Mode numbers */ |
84 | static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; | 70 | static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; |
@@ -286,7 +272,7 @@ static const struct SiS_ModeResInfo_S SiS_ModeResInfo[] = | |||
286 | { 1280, 854, 8,16} /* 0x22 */ | 272 | { 1280, 854, 8,16} /* 0x22 */ |
287 | }; | 273 | }; |
288 | 274 | ||
289 | #if defined(SIS300) || defined(SIS315H) | 275 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
290 | static const struct SiS_StandTable_S SiS_StandTable[]= | 276 | static const struct SiS_StandTable_S SiS_StandTable[]= |
291 | { | 277 | { |
292 | /* 0x00: MD_0_200 */ | 278 | /* 0x00: MD_0_200 */ |
@@ -1521,10 +1507,6 @@ static const struct SiS_LVDSCRT1Data SiS_LVDSCRT1640x480_1_H[] = | |||
1521 | }; | 1507 | }; |
1522 | 1508 | ||
1523 | bool SiSInitPtr(struct SiS_Private *SiS_Pr); | 1509 | bool SiSInitPtr(struct SiS_Private *SiS_Pr); |
1524 | #ifdef SIS_XORG_XF86 | ||
1525 | unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, | ||
1526 | int Depth, bool FSTN, int LCDwith, int LCDheight); | ||
1527 | #endif | ||
1528 | unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, | 1510 | unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, |
1529 | int VDisplay, int Depth, bool FSTN, | 1511 | int VDisplay, int Depth, bool FSTN, |
1530 | unsigned short CustomT, int LCDwith, int LCDheight, | 1512 | unsigned short CustomT, int LCDwith, int LCDheight, |
@@ -1550,17 +1532,11 @@ void SiS_SetRegOR(SISIOADDRESS Port,unsigned short Index, unsigned short DataOR | |||
1550 | void SiS_DisplayOn(struct SiS_Private *SiS_Pr); | 1532 | void SiS_DisplayOn(struct SiS_Private *SiS_Pr); |
1551 | void SiS_DisplayOff(struct SiS_Private *SiS_Pr); | 1533 | void SiS_DisplayOff(struct SiS_Private *SiS_Pr); |
1552 | void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr); | 1534 | void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr); |
1553 | #ifndef SIS_LINUX_KERNEL | ||
1554 | void SiSSetLVDSetc(struct SiS_Private *SiS_Pr); | ||
1555 | #endif | ||
1556 | void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable); | 1535 | void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable); |
1557 | void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable); | 1536 | void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable); |
1558 | unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1537 | unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
1559 | unsigned short ModeIdIndex); | 1538 | unsigned short ModeIdIndex); |
1560 | bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr); | 1539 | bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr); |
1561 | #ifndef SIS_LINUX_KERNEL | ||
1562 | void SiS_GetVBType(struct SiS_Private *SiS_Pr); | ||
1563 | #endif | ||
1564 | 1540 | ||
1565 | bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, | 1541 | bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, |
1566 | unsigned short *ModeIdIndex); | 1542 | unsigned short *ModeIdIndex); |
@@ -1572,37 +1548,19 @@ unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short Mode | |||
1572 | unsigned short ModeIdIndex); | 1548 | unsigned short ModeIdIndex); |
1573 | unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo, | 1549 | unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo, |
1574 | unsigned short ModeIdIndex, unsigned short RRTI); | 1550 | unsigned short ModeIdIndex, unsigned short RRTI); |
1575 | #ifdef SIS300 | 1551 | #ifdef CONFIG_FB_SIS_300 |
1576 | void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, | 1552 | void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, |
1577 | unsigned short *idx2); | 1553 | unsigned short *idx2); |
1578 | unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2); | 1554 | unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2); |
1579 | unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); | 1555 | unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); |
1580 | #endif | 1556 | #endif |
1581 | void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); | 1557 | void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); |
1582 | #ifdef SIS_XORG_XF86 | ||
1583 | bool SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, | ||
1584 | bool dosetpitch); | ||
1585 | bool SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
1586 | DisplayModePtr mode, bool IsCustom); | ||
1587 | bool SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
1588 | DisplayModePtr mode, bool IsCustom); | ||
1589 | bool SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, | ||
1590 | DisplayModePtr mode, bool IsCustom); | ||
1591 | #endif | ||
1592 | #ifdef SIS_LINUX_KERNEL | ||
1593 | bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); | 1558 | bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); |
1594 | #endif | ||
1595 | void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); | 1559 | void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); |
1596 | void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1560 | void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
1597 | unsigned short ModeIdIndex); | 1561 | unsigned short ModeIdIndex); |
1598 | #ifdef SIS_XORG_XF86 | ||
1599 | void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, | ||
1600 | int yres, DisplayModePtr current); | ||
1601 | #endif | ||
1602 | #ifdef SIS_LINUX_KERNEL | ||
1603 | void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, | 1562 | void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, |
1604 | int yres, struct fb_var_screeninfo *var, bool writeres); | 1563 | int yres, struct fb_var_screeninfo *var, bool writeres); |
1605 | #endif | ||
1606 | 1564 | ||
1607 | /* From init301.c: */ | 1565 | /* From init301.c: */ |
1608 | extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | 1566 | extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, |
@@ -1626,29 +1584,16 @@ extern unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short | |||
1626 | extern bool SiS_IsVAMode(struct SiS_Private *); | 1584 | extern bool SiS_IsVAMode(struct SiS_Private *); |
1627 | extern bool SiS_IsDualEdge(struct SiS_Private *); | 1585 | extern bool SiS_IsDualEdge(struct SiS_Private *); |
1628 | 1586 | ||
1629 | #ifdef SIS_XORG_XF86 | 1587 | #ifdef CONFIG_FB_SIS_300 |
1630 | /* From other modules: */ | ||
1631 | extern unsigned short SiS_CheckBuildCustomMode(ScrnInfoPtr pScrn, DisplayModePtr mode, | ||
1632 | unsigned int VBFlags); | ||
1633 | extern unsigned char SiS_GetSetBIOSScratch(ScrnInfoPtr pScrn, unsigned short offset, | ||
1634 | unsigned char value); | ||
1635 | extern unsigned char SiS_GetSetModeID(ScrnInfoPtr pScrn, unsigned char id); | ||
1636 | extern unsigned short SiS_GetModeNumber(ScrnInfoPtr pScrn, DisplayModePtr mode, | ||
1637 | unsigned int VBFlags); | ||
1638 | #endif | ||
1639 | |||
1640 | #ifdef SIS_LINUX_KERNEL | ||
1641 | #ifdef SIS300 | ||
1642 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); | 1588 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); |
1643 | extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, | 1589 | extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, |
1644 | unsigned int val); | 1590 | unsigned int val); |
1645 | #endif | 1591 | #endif |
1646 | #ifdef SIS315H | 1592 | #ifdef CONFIG_FB_SIS_315 |
1647 | extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, | 1593 | extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, |
1648 | unsigned char val); | 1594 | unsigned char val); |
1649 | extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg); | 1595 | extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg); |
1650 | #endif | 1596 | #endif |
1651 | #endif | ||
1652 | 1597 | ||
1653 | #endif | 1598 | #endif |
1654 | 1599 | ||
diff --git a/drivers/video/sis/init301.c b/drivers/video/sis/init301.c index da33d801c22e..9fa66fd4052a 100644 --- a/drivers/video/sis/init301.c +++ b/drivers/video/sis/init301.c | |||
@@ -75,11 +75,11 @@ | |||
75 | 75 | ||
76 | #include "init301.h" | 76 | #include "init301.h" |
77 | 77 | ||
78 | #ifdef SIS300 | 78 | #ifdef CONFIG_FB_SIS_300 |
79 | #include "oem300.h" | 79 | #include "oem300.h" |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | #ifdef SIS315H | 82 | #ifdef CONFIG_FB_SIS_315 |
83 | #include "oem310.h" | 83 | #include "oem310.h" |
84 | #endif | 84 | #endif |
85 | 85 | ||
@@ -87,9 +87,7 @@ | |||
87 | #define SiS_I2CDELAYSHORT 150 | 87 | #define SiS_I2CDELAYSHORT 150 |
88 | 88 | ||
89 | static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr); | 89 | static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr); |
90 | #ifdef SIS_LINUX_KERNEL | ||
91 | static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); | 90 | static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); |
92 | #endif | ||
93 | 91 | ||
94 | /*********************************************/ | 92 | /*********************************************/ |
95 | /* HELPER: Lock/Unlock CRT2 */ | 93 | /* HELPER: Lock/Unlock CRT2 */ |
@@ -106,9 +104,7 @@ SiS_UnLockCRT2(struct SiS_Private *SiS_Pr) | |||
106 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01); | 104 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01); |
107 | } | 105 | } |
108 | 106 | ||
109 | #ifdef SIS_LINUX_KERNEL | ||
110 | static | 107 | static |
111 | #endif | ||
112 | void | 108 | void |
113 | SiS_LockCRT2(struct SiS_Private *SiS_Pr) | 109 | SiS_LockCRT2(struct SiS_Private *SiS_Pr) |
114 | { | 110 | { |
@@ -138,7 +134,7 @@ SiS_SetRegSR11ANDOR(struct SiS_Private *SiS_Pr, unsigned short DataAND, unsigned | |||
138 | /* HELPER: Get Pointer to LCD structure */ | 134 | /* HELPER: Get Pointer to LCD structure */ |
139 | /*********************************************/ | 135 | /*********************************************/ |
140 | 136 | ||
141 | #ifdef SIS315H | 137 | #ifdef CONFIG_FB_SIS_315 |
142 | static unsigned char * | 138 | static unsigned char * |
143 | GetLCDStructPtr661(struct SiS_Private *SiS_Pr) | 139 | GetLCDStructPtr661(struct SiS_Private *SiS_Pr) |
144 | { | 140 | { |
@@ -404,7 +400,7 @@ SiS_SaveCRT2Info(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
404 | /* HELPER: GET SOME DATA FROM BIOS ROM */ | 400 | /* HELPER: GET SOME DATA FROM BIOS ROM */ |
405 | /*********************************************/ | 401 | /*********************************************/ |
406 | 402 | ||
407 | #ifdef SIS300 | 403 | #ifdef CONFIG_FB_SIS_300 |
408 | static bool | 404 | static bool |
409 | SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr) | 405 | SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr) |
410 | { | 406 | { |
@@ -449,7 +445,7 @@ SiS_DDC2Delay(struct SiS_Private *SiS_Pr, unsigned int delaytime) | |||
449 | SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05); | 445 | SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05); |
450 | } | 446 | } |
451 | 447 | ||
452 | #if defined(SIS300) || defined(SIS315H) | 448 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
453 | static void | 449 | static void |
454 | SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | 450 | SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) |
455 | { | 451 | { |
@@ -457,7 +453,7 @@ SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | |||
457 | } | 453 | } |
458 | #endif | 454 | #endif |
459 | 455 | ||
460 | #ifdef SIS315H | 456 | #ifdef CONFIG_FB_SIS_315 |
461 | static void | 457 | static void |
462 | SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | 458 | SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) |
463 | { | 459 | { |
@@ -467,7 +463,7 @@ SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | |||
467 | } | 463 | } |
468 | #endif | 464 | #endif |
469 | 465 | ||
470 | #if defined(SIS300) || defined(SIS315H) | 466 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
471 | static void | 467 | static void |
472 | SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | 468 | SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) |
473 | { | 469 | { |
@@ -480,14 +476,14 @@ SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) | |||
480 | static void | 476 | static void |
481 | SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) | 477 | SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) |
482 | { | 478 | { |
483 | #if defined(SIS300) || defined(SIS315H) | 479 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
484 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 480 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
485 | unsigned short PanelID, DelayIndex, Delay=0; | 481 | unsigned short PanelID, DelayIndex, Delay=0; |
486 | #endif | 482 | #endif |
487 | 483 | ||
488 | if(SiS_Pr->ChipType < SIS_315H) { | 484 | if(SiS_Pr->ChipType < SIS_315H) { |
489 | 485 | ||
490 | #ifdef SIS300 | 486 | #ifdef CONFIG_FB_SIS_300 |
491 | 487 | ||
492 | PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); | 488 | PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); |
493 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | 489 | if(SiS_Pr->SiS_VBType & VB_SISVB) { |
@@ -513,11 +509,11 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) | |||
513 | } | 509 | } |
514 | SiS_ShortDelay(SiS_Pr, Delay); | 510 | SiS_ShortDelay(SiS_Pr, Delay); |
515 | 511 | ||
516 | #endif /* SIS300 */ | 512 | #endif /* CONFIG_FB_SIS_300 */ |
517 | 513 | ||
518 | } else { | 514 | } else { |
519 | 515 | ||
520 | #ifdef SIS315H | 516 | #ifdef CONFIG_FB_SIS_315 |
521 | 517 | ||
522 | if((SiS_Pr->ChipType >= SIS_661) || | 518 | if((SiS_Pr->ChipType >= SIS_661) || |
523 | (SiS_Pr->ChipType <= SIS_315PRO) || | 519 | (SiS_Pr->ChipType <= SIS_315PRO) || |
@@ -579,12 +575,12 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) | |||
579 | 575 | ||
580 | } | 576 | } |
581 | 577 | ||
582 | #endif /* SIS315H */ | 578 | #endif /* CONFIG_FB_SIS_315 */ |
583 | 579 | ||
584 | } | 580 | } |
585 | } | 581 | } |
586 | 582 | ||
587 | #ifdef SIS315H | 583 | #ifdef CONFIG_FB_SIS_315 |
588 | static void | 584 | static void |
589 | SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop) | 585 | SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop) |
590 | { | 586 | { |
@@ -613,7 +609,7 @@ SiS_WaitRetrace1(struct SiS_Private *SiS_Pr) | |||
613 | while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog); | 609 | while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog); |
614 | } | 610 | } |
615 | 611 | ||
616 | #if defined(SIS300) || defined(SIS315H) | 612 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
617 | static void | 613 | static void |
618 | SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg) | 614 | SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg) |
619 | { | 615 | { |
@@ -630,7 +626,7 @@ static void | |||
630 | SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) | 626 | SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) |
631 | { | 627 | { |
632 | if(SiS_Pr->ChipType < SIS_315H) { | 628 | if(SiS_Pr->ChipType < SIS_315H) { |
633 | #ifdef SIS300 | 629 | #ifdef CONFIG_FB_SIS_300 |
634 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | 630 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { |
635 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return; | 631 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return; |
636 | } | 632 | } |
@@ -641,7 +637,7 @@ SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) | |||
641 | } | 637 | } |
642 | #endif | 638 | #endif |
643 | } else { | 639 | } else { |
644 | #ifdef SIS315H | 640 | #ifdef CONFIG_FB_SIS_315 |
645 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) { | 641 | if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) { |
646 | SiS_WaitRetrace1(SiS_Pr); | 642 | SiS_WaitRetrace1(SiS_Pr); |
647 | } else { | 643 | } else { |
@@ -686,7 +682,7 @@ SiS_VBLongWait(struct SiS_Private *SiS_Pr) | |||
686 | /* HELPER: MISC */ | 682 | /* HELPER: MISC */ |
687 | /*********************************************/ | 683 | /*********************************************/ |
688 | 684 | ||
689 | #ifdef SIS300 | 685 | #ifdef CONFIG_FB_SIS_300 |
690 | static bool | 686 | static bool |
691 | SiS_Is301B(struct SiS_Private *SiS_Pr) | 687 | SiS_Is301B(struct SiS_Private *SiS_Pr) |
692 | { | 688 | { |
@@ -708,7 +704,7 @@ SiS_CRT2IsLCD(struct SiS_Private *SiS_Pr) | |||
708 | bool | 704 | bool |
709 | SiS_IsDualEdge(struct SiS_Private *SiS_Pr) | 705 | SiS_IsDualEdge(struct SiS_Private *SiS_Pr) |
710 | { | 706 | { |
711 | #ifdef SIS315H | 707 | #ifdef CONFIG_FB_SIS_315 |
712 | if(SiS_Pr->ChipType >= SIS_315H) { | 708 | if(SiS_Pr->ChipType >= SIS_315H) { |
713 | if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) { | 709 | if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) { |
714 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true; | 710 | if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true; |
@@ -721,7 +717,7 @@ SiS_IsDualEdge(struct SiS_Private *SiS_Pr) | |||
721 | bool | 717 | bool |
722 | SiS_IsVAMode(struct SiS_Private *SiS_Pr) | 718 | SiS_IsVAMode(struct SiS_Private *SiS_Pr) |
723 | { | 719 | { |
724 | #ifdef SIS315H | 720 | #ifdef CONFIG_FB_SIS_315 |
725 | unsigned short flag; | 721 | unsigned short flag; |
726 | 722 | ||
727 | if(SiS_Pr->ChipType >= SIS_315H) { | 723 | if(SiS_Pr->ChipType >= SIS_315H) { |
@@ -732,7 +728,7 @@ SiS_IsVAMode(struct SiS_Private *SiS_Pr) | |||
732 | return false; | 728 | return false; |
733 | } | 729 | } |
734 | 730 | ||
735 | #ifdef SIS315H | 731 | #ifdef CONFIG_FB_SIS_315 |
736 | static bool | 732 | static bool |
737 | SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) | 733 | SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) |
738 | { | 734 | { |
@@ -745,7 +741,7 @@ SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) | |||
745 | static bool | 741 | static bool |
746 | SiS_IsDualLink(struct SiS_Private *SiS_Pr) | 742 | SiS_IsDualLink(struct SiS_Private *SiS_Pr) |
747 | { | 743 | { |
748 | #ifdef SIS315H | 744 | #ifdef CONFIG_FB_SIS_315 |
749 | if(SiS_Pr->ChipType >= SIS_315H) { | 745 | if(SiS_Pr->ChipType >= SIS_315H) { |
750 | if((SiS_CRT2IsLCD(SiS_Pr)) || | 746 | if((SiS_CRT2IsLCD(SiS_Pr)) || |
751 | (SiS_IsVAMode(SiS_Pr))) { | 747 | (SiS_IsVAMode(SiS_Pr))) { |
@@ -756,7 +752,7 @@ SiS_IsDualLink(struct SiS_Private *SiS_Pr) | |||
756 | return false; | 752 | return false; |
757 | } | 753 | } |
758 | 754 | ||
759 | #ifdef SIS315H | 755 | #ifdef CONFIG_FB_SIS_315 |
760 | static bool | 756 | static bool |
761 | SiS_TVEnabled(struct SiS_Private *SiS_Pr) | 757 | SiS_TVEnabled(struct SiS_Private *SiS_Pr) |
762 | { | 758 | { |
@@ -768,7 +764,7 @@ SiS_TVEnabled(struct SiS_Private *SiS_Pr) | |||
768 | } | 764 | } |
769 | #endif | 765 | #endif |
770 | 766 | ||
771 | #ifdef SIS315H | 767 | #ifdef CONFIG_FB_SIS_315 |
772 | static bool | 768 | static bool |
773 | SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) | 769 | SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) |
774 | { | 770 | { |
@@ -777,7 +773,7 @@ SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) | |||
777 | } | 773 | } |
778 | #endif | 774 | #endif |
779 | 775 | ||
780 | #ifdef SIS315H | 776 | #ifdef CONFIG_FB_SIS_315 |
781 | static bool | 777 | static bool |
782 | SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) | 778 | SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) |
783 | { | 779 | { |
@@ -788,7 +784,7 @@ SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) | |||
788 | } | 784 | } |
789 | #endif | 785 | #endif |
790 | 786 | ||
791 | #ifdef SIS315H | 787 | #ifdef CONFIG_FB_SIS_315 |
792 | static bool | 788 | static bool |
793 | SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) | 789 | SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) |
794 | { | 790 | { |
@@ -804,7 +800,7 @@ SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) | |||
804 | } | 800 | } |
805 | #endif | 801 | #endif |
806 | 802 | ||
807 | #ifdef SIS315H | 803 | #ifdef CONFIG_FB_SIS_315 |
808 | static bool | 804 | static bool |
809 | SiS_IsYPbPr(struct SiS_Private *SiS_Pr) | 805 | SiS_IsYPbPr(struct SiS_Private *SiS_Pr) |
810 | { | 806 | { |
@@ -816,7 +812,7 @@ SiS_IsYPbPr(struct SiS_Private *SiS_Pr) | |||
816 | } | 812 | } |
817 | #endif | 813 | #endif |
818 | 814 | ||
819 | #ifdef SIS315H | 815 | #ifdef CONFIG_FB_SIS_315 |
820 | static bool | 816 | static bool |
821 | SiS_IsChScart(struct SiS_Private *SiS_Pr) | 817 | SiS_IsChScart(struct SiS_Private *SiS_Pr) |
822 | { | 818 | { |
@@ -828,7 +824,7 @@ SiS_IsChScart(struct SiS_Private *SiS_Pr) | |||
828 | } | 824 | } |
829 | #endif | 825 | #endif |
830 | 826 | ||
831 | #ifdef SIS315H | 827 | #ifdef CONFIG_FB_SIS_315 |
832 | static bool | 828 | static bool |
833 | SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) | 829 | SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) |
834 | { | 830 | { |
@@ -848,7 +844,7 @@ SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) | |||
848 | } | 844 | } |
849 | #endif | 845 | #endif |
850 | 846 | ||
851 | #ifdef SIS315H | 847 | #ifdef CONFIG_FB_SIS_315 |
852 | static bool | 848 | static bool |
853 | SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr) | 849 | SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr) |
854 | { | 850 | { |
@@ -914,7 +910,7 @@ SiS_BridgeInSlavemode(struct SiS_Private *SiS_Pr) | |||
914 | /*********************************************/ | 910 | /*********************************************/ |
915 | 911 | ||
916 | /* Setup general purpose IO for Chrontel communication */ | 912 | /* Setup general purpose IO for Chrontel communication */ |
917 | #ifdef SIS300 | 913 | #ifdef CONFIG_FB_SIS_300 |
918 | void | 914 | void |
919 | SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) | 915 | SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) |
920 | { | 916 | { |
@@ -923,11 +919,7 @@ SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) | |||
923 | 919 | ||
924 | if(!(SiS_Pr->SiS_ChSW)) return; | 920 | if(!(SiS_Pr->SiS_ChSW)) return; |
925 | 921 | ||
926 | #ifdef SIS_LINUX_KERNEL | ||
927 | acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74); | 922 | acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74); |
928 | #else | ||
929 | acpibase = pciReadLong(0x00000800, 0x74); | ||
930 | #endif | ||
931 | acpibase &= 0xFFFF; | 923 | acpibase &= 0xFFFF; |
932 | if(!acpibase) return; | 924 | if(!acpibase) return; |
933 | temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */ | 925 | temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */ |
@@ -969,7 +961,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
969 | tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV); | 961 | tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV); |
970 | tempbx |= tempax; | 962 | tempbx |= tempax; |
971 | 963 | ||
972 | #ifdef SIS315H | 964 | #ifdef CONFIG_FB_SIS_315 |
973 | if(SiS_Pr->ChipType >= SIS_315H) { | 965 | if(SiS_Pr->ChipType >= SIS_315H) { |
974 | if(SiS_Pr->SiS_VBType & VB_SISLCDA) { | 966 | if(SiS_Pr->SiS_VBType & VB_SISLCDA) { |
975 | if(ModeNo == 0x03) { | 967 | if(ModeNo == 0x03) { |
@@ -1019,7 +1011,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
1019 | } | 1011 | } |
1020 | } | 1012 | } |
1021 | 1013 | ||
1022 | #endif /* SIS315H */ | 1014 | #endif /* CONFIG_FB_SIS_315 */ |
1023 | 1015 | ||
1024 | if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) { | 1016 | if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) { |
1025 | tempbx &= ~(SetCRT2ToRAMDAC); | 1017 | tempbx &= ~(SetCRT2ToRAMDAC); |
@@ -1154,24 +1146,16 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, | |||
1154 | 1146 | ||
1155 | SiS_Pr->SiS_VBInfo = tempbx; | 1147 | SiS_Pr->SiS_VBInfo = tempbx; |
1156 | 1148 | ||
1157 | #ifdef SIS300 | 1149 | #ifdef CONFIG_FB_SIS_300 |
1158 | if(SiS_Pr->ChipType == SIS_630) { | 1150 | if(SiS_Pr->ChipType == SIS_630) { |
1159 | SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo); | 1151 | SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo); |
1160 | } | 1152 | } |
1161 | #endif | 1153 | #endif |
1162 | 1154 | ||
1163 | #ifdef SIS_LINUX_KERNEL | ||
1164 | #if 0 | 1155 | #if 0 |
1165 | printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n", | 1156 | printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n", |
1166 | SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); | 1157 | SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); |
1167 | #endif | 1158 | #endif |
1168 | #endif | ||
1169 | #ifdef SIS_XORG_XF86 | ||
1170 | #ifdef TWDEBUG | ||
1171 | xf86DrvMsg(0, X_PROBED, "(init301: VBInfo=0x%04x, SetFlag=0x%04x)\n", | ||
1172 | SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); | ||
1173 | #endif | ||
1174 | #endif | ||
1175 | } | 1159 | } |
1176 | 1160 | ||
1177 | /*********************************************/ | 1161 | /*********************************************/ |
@@ -1415,12 +1399,6 @@ SiS_SetTVMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
1415 | } | 1399 | } |
1416 | 1400 | ||
1417 | SiS_Pr->SiS_VBInfo &= ~SetPALTV; | 1401 | SiS_Pr->SiS_VBInfo &= ~SetPALTV; |
1418 | |||
1419 | #ifdef SIS_XORG_XF86 | ||
1420 | #ifdef TWDEBUG | ||
1421 | xf86DrvMsg(0, X_INFO, "(init301: TVMode %x, VBInfo %x)\n", SiS_Pr->SiS_TVMode, SiS_Pr->SiS_VBInfo); | ||
1422 | #endif | ||
1423 | #endif | ||
1424 | } | 1402 | } |
1425 | 1403 | ||
1426 | /*********************************************/ | 1404 | /*********************************************/ |
@@ -1443,22 +1421,10 @@ SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr) | |||
1443 | static void | 1421 | static void |
1444 | SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) | 1422 | SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) |
1445 | { | 1423 | { |
1446 | #ifdef SIS315H | 1424 | #ifdef CONFIG_FB_SIS_315 |
1447 | unsigned char *ROMAddr; | 1425 | unsigned char *ROMAddr; |
1448 | unsigned short temp; | 1426 | unsigned short temp; |
1449 | 1427 | ||
1450 | #ifdef SIS_XORG_XF86 | ||
1451 | #ifdef TWDEBUG | ||
1452 | xf86DrvMsg(0, X_INFO, "Paneldata driver: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", | ||
1453 | SiS_Pr->PanelHT, SiS_Pr->PanelVT, | ||
1454 | SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, | ||
1455 | SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, | ||
1456 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, | ||
1457 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, | ||
1458 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); | ||
1459 | #endif | ||
1460 | #endif | ||
1461 | |||
1462 | if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) { | 1428 | if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) { |
1463 | if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) { | 1429 | if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) { |
1464 | SiS_Pr->SiS_NeedRomModeData = true; | 1430 | SiS_Pr->SiS_NeedRomModeData = true; |
@@ -1480,18 +1446,6 @@ SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) | |||
1480 | SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C = | 1446 | SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C = |
1481 | SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20]; | 1447 | SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20]; |
1482 | 1448 | ||
1483 | #ifdef SIS_XORG_XF86 | ||
1484 | #ifdef TWDEBUG | ||
1485 | xf86DrvMsg(0, X_INFO, "Paneldata BIOS: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", | ||
1486 | SiS_Pr->PanelHT, SiS_Pr->PanelVT, | ||
1487 | SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, | ||
1488 | SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, | ||
1489 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, | ||
1490 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, | ||
1491 | SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); | ||
1492 | #endif | ||
1493 | #endif | ||
1494 | |||
1495 | } | 1449 | } |
1496 | #endif | 1450 | #endif |
1497 | } | 1451 | } |
@@ -1517,13 +1471,13 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1517 | { | 1471 | { |
1518 | unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0; | 1472 | unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0; |
1519 | bool panelcanscale = false; | 1473 | bool panelcanscale = false; |
1520 | #ifdef SIS300 | 1474 | #ifdef CONFIG_FB_SIS_300 |
1521 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 1475 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
1522 | static const unsigned char SiS300SeriesLCDRes[] = | 1476 | static const unsigned char SiS300SeriesLCDRes[] = |
1523 | { 0, 1, 2, 3, 7, 4, 5, 8, | 1477 | { 0, 1, 2, 3, 7, 4, 5, 8, |
1524 | 0, 0, 10, 0, 0, 0, 0, 15 }; | 1478 | 0, 0, 10, 0, 0, 0, 0, 15 }; |
1525 | #endif | 1479 | #endif |
1526 | #ifdef SIS315H | 1480 | #ifdef CONFIG_FB_SIS_315 |
1527 | unsigned char *myptr = NULL; | 1481 | unsigned char *myptr = NULL; |
1528 | #endif | 1482 | #endif |
1529 | 1483 | ||
@@ -1562,7 +1516,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1562 | SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1; | 1516 | SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1; |
1563 | } | 1517 | } |
1564 | temp &= 0x0f; | 1518 | temp &= 0x0f; |
1565 | #ifdef SIS300 | 1519 | #ifdef CONFIG_FB_SIS_300 |
1566 | if(SiS_Pr->ChipType < SIS_315H) { | 1520 | if(SiS_Pr->ChipType < SIS_315H) { |
1567 | /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */ | 1521 | /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */ |
1568 | if(SiS_Pr->SiS_VBType & VB_SIS301) { | 1522 | if(SiS_Pr->SiS_VBType & VB_SIS301) { |
@@ -1574,7 +1528,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1574 | #endif | 1528 | #endif |
1575 | 1529 | ||
1576 | /* Translate to our internal types */ | 1530 | /* Translate to our internal types */ |
1577 | #ifdef SIS315H | 1531 | #ifdef CONFIG_FB_SIS_315 |
1578 | if(SiS_Pr->ChipType == SIS_550) { | 1532 | if(SiS_Pr->ChipType == SIS_550) { |
1579 | if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */ | 1533 | if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */ |
1580 | else if(temp == Panel310_320x240_2) temp = Panel_320x240_2; | 1534 | else if(temp == Panel310_320x240_2) temp = Panel_320x240_2; |
@@ -1597,7 +1551,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1597 | 1551 | ||
1598 | SiS_Pr->SiS_LCDResInfo = temp; | 1552 | SiS_Pr->SiS_LCDResInfo = temp; |
1599 | 1553 | ||
1600 | #ifdef SIS300 | 1554 | #ifdef CONFIG_FB_SIS_300 |
1601 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 1555 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
1602 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { | 1556 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { |
1603 | SiS_Pr->SiS_LCDResInfo = Panel_Barco1366; | 1557 | SiS_Pr->SiS_LCDResInfo = Panel_Barco1366; |
@@ -1639,7 +1593,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
1639 | else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD; | 1593 | else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD; |
1640 | 1594 | ||
1641 | /* Dual link, Pass 1:1 BIOS default, etc. */ | 1595 | /* Dual link, Pass 1:1 BIOS default, etc. */ |
1642 | #ifdef SIS315H | 1596 | #ifdef CONFIG_FB_SIS_315 |
1643 | if(SiS_Pr->ChipType >= SIS_661) { | 1597 | if(SiS_Pr->ChipType >= SIS_661) { |
1644 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 1598 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
1645 | if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11; | 1599 | if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11; |
@@ -2076,7 +2030,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
2076 | } | 2030 | } |
2077 | } | 2031 | } |
2078 | 2032 | ||
2079 | #ifdef SIS300 | 2033 | #ifdef CONFIG_FB_SIS_300 |
2080 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 2034 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
2081 | if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) { | 2035 | if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) { |
2082 | SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */ | 2036 | SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */ |
@@ -2186,17 +2140,10 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh | |||
2186 | SiS_Pr->SiS_SetFlag |= LCDVESATiming; | 2140 | SiS_Pr->SiS_SetFlag |= LCDVESATiming; |
2187 | } | 2141 | } |
2188 | 2142 | ||
2189 | #ifdef SIS_LINUX_KERNEL | ||
2190 | #if 0 | 2143 | #if 0 |
2191 | printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n", | 2144 | printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n", |
2192 | SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo); | 2145 | SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo); |
2193 | #endif | 2146 | #endif |
2194 | #endif | ||
2195 | #ifdef SIS_XORG_XF86 | ||
2196 | xf86DrvMsgVerb(0, X_PROBED, 4, | ||
2197 | "(init301: LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x SetFlag=0x%04x)\n", | ||
2198 | SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo, SiS_Pr->SiS_SetFlag); | ||
2199 | #endif | ||
2200 | } | 2147 | } |
2201 | 2148 | ||
2202 | /*********************************************/ | 2149 | /*********************************************/ |
@@ -2359,7 +2306,7 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
2359 | VCLKIndex = SiS_Pr->PanelVCLKIdx315; | 2306 | VCLKIndex = SiS_Pr->PanelVCLKIdx315; |
2360 | } | 2307 | } |
2361 | 2308 | ||
2362 | #ifdef SIS300 | 2309 | #ifdef CONFIG_FB_SIS_300 |
2363 | /* Special Timing: Barco iQ Pro R series */ | 2310 | /* Special Timing: Barco iQ Pro R series */ |
2364 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44; | 2311 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44; |
2365 | 2312 | ||
@@ -2410,12 +2357,6 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
2410 | 2357 | ||
2411 | } | 2358 | } |
2412 | 2359 | ||
2413 | #ifdef SIS_XORG_XF86 | ||
2414 | #ifdef TWDEBUG | ||
2415 | xf86DrvMsg(0, X_INFO, "VCLKIndex %d (0x%x)\n", VCLKIndex, VCLKIndex); | ||
2416 | #endif | ||
2417 | #endif | ||
2418 | |||
2419 | return VCLKIndex; | 2360 | return VCLKIndex; |
2420 | } | 2361 | } |
2421 | 2362 | ||
@@ -2428,10 +2369,10 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2428 | { | 2369 | { |
2429 | unsigned short i, j, modeflag, tempah=0; | 2370 | unsigned short i, j, modeflag, tempah=0; |
2430 | short tempcl; | 2371 | short tempcl; |
2431 | #if defined(SIS300) || defined(SIS315H) | 2372 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
2432 | unsigned short tempbl; | 2373 | unsigned short tempbl; |
2433 | #endif | 2374 | #endif |
2434 | #ifdef SIS315H | 2375 | #ifdef CONFIG_FB_SIS_315 |
2435 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 2376 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
2436 | unsigned short tempah2, tempbl2; | 2377 | unsigned short tempah2, tempbl2; |
2437 | #endif | 2378 | #endif |
@@ -2454,7 +2395,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2454 | 2395 | ||
2455 | if(SiS_Pr->ChipType < SIS_315H) { | 2396 | if(SiS_Pr->ChipType < SIS_315H) { |
2456 | 2397 | ||
2457 | #ifdef SIS300 /* ---- 300 series ---- */ | 2398 | #ifdef CONFIG_FB_SIS_300 /* ---- 300 series ---- */ |
2458 | 2399 | ||
2459 | /* For 301BDH: (with LCD via LVDS) */ | 2400 | /* For 301BDH: (with LCD via LVDS) */ |
2460 | if(SiS_Pr->SiS_VBType & VB_NoLCD) { | 2401 | if(SiS_Pr->SiS_VBType & VB_NoLCD) { |
@@ -2477,11 +2418,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2477 | 2418 | ||
2478 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0; | 2419 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0; |
2479 | 2420 | ||
2480 | #endif /* SIS300 */ | 2421 | #endif /* CONFIG_FB_SIS_300 */ |
2481 | 2422 | ||
2482 | } else { | 2423 | } else { |
2483 | 2424 | ||
2484 | #ifdef SIS315H /* ------- 315/330 series ------ */ | 2425 | #ifdef CONFIG_FB_SIS_315 /* ------- 315/330 series ------ */ |
2485 | 2426 | ||
2486 | if(ModeNo > 0x13) { | 2427 | if(ModeNo > 0x13) { |
2487 | tempcl -= ModeVGA; | 2428 | tempcl -= ModeVGA; |
@@ -2494,7 +2435,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2494 | 2435 | ||
2495 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50; | 2436 | if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50; |
2496 | 2437 | ||
2497 | #endif /* SIS315H */ | 2438 | #endif /* CONFIG_FB_SIS_315 */ |
2498 | 2439 | ||
2499 | } | 2440 | } |
2500 | 2441 | ||
@@ -2503,7 +2444,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2503 | if(SiS_Pr->ChipType < SIS_315H) { | 2444 | if(SiS_Pr->ChipType < SIS_315H) { |
2504 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah); | 2445 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah); |
2505 | } else { | 2446 | } else { |
2506 | #ifdef SIS315H | 2447 | #ifdef CONFIG_FB_SIS_315 |
2507 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { | 2448 | if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { |
2508 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah); | 2449 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah); |
2509 | } else if(SiS_Pr->SiS_VBType & VB_SISVB) { | 2450 | } else if(SiS_Pr->SiS_VBType & VB_SISVB) { |
@@ -2584,7 +2525,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2584 | 2525 | ||
2585 | if(SiS_Pr->ChipType >= SIS_315H) { | 2526 | if(SiS_Pr->ChipType >= SIS_315H) { |
2586 | 2527 | ||
2587 | #ifdef SIS315H | 2528 | #ifdef CONFIG_FB_SIS_315 |
2588 | /* LVDS can only be slave in 8bpp modes */ | 2529 | /* LVDS can only be slave in 8bpp modes */ |
2589 | tempah = 0x80; | 2530 | tempah = 0x80; |
2590 | if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) { | 2531 | if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) { |
@@ -2604,7 +2545,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2604 | 2545 | ||
2605 | } else { | 2546 | } else { |
2606 | 2547 | ||
2607 | #ifdef SIS300 | 2548 | #ifdef CONFIG_FB_SIS_300 |
2608 | tempah = 0; | 2549 | tempah = 0; |
2609 | if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) { | 2550 | if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) { |
2610 | tempah |= 0x02; | 2551 | tempah |= 0x02; |
@@ -2626,7 +2567,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2626 | 2567 | ||
2627 | if(SiS_Pr->ChipType >= SIS_315H) { | 2568 | if(SiS_Pr->ChipType >= SIS_315H) { |
2628 | 2569 | ||
2629 | #ifdef SIS315H | 2570 | #ifdef CONFIG_FB_SIS_315 |
2630 | /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */ | 2571 | /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */ |
2631 | 2572 | ||
2632 | /* The following is nearly unpreditable and varies from machine | 2573 | /* The following is nearly unpreditable and varies from machine |
@@ -2718,11 +2659,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2718 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah); | 2659 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah); |
2719 | } | 2660 | } |
2720 | 2661 | ||
2721 | #endif /* SIS315H */ | 2662 | #endif /* CONFIG_FB_SIS_315 */ |
2722 | 2663 | ||
2723 | } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { | 2664 | } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { |
2724 | 2665 | ||
2725 | #ifdef SIS300 | 2666 | #ifdef CONFIG_FB_SIS_300 |
2726 | SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f); | 2667 | SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f); |
2727 | 2668 | ||
2728 | if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) || | 2669 | if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) || |
@@ -2745,7 +2686,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
2745 | 2686 | ||
2746 | } else { /* LVDS */ | 2687 | } else { /* LVDS */ |
2747 | 2688 | ||
2748 | #ifdef SIS315H | 2689 | #ifdef CONFIG_FB_SIS_315 |
2749 | if(SiS_Pr->ChipType >= SIS_315H) { | 2690 | if(SiS_Pr->ChipType >= SIS_315H) { |
2750 | 2691 | ||
2751 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { | 2692 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { |
@@ -2931,7 +2872,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
2931 | } | 2872 | } |
2932 | } | 2873 | } |
2933 | 2874 | ||
2934 | #ifdef SIS315H | 2875 | #ifdef CONFIG_FB_SIS_315 |
2935 | if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) { | 2876 | if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) { |
2936 | if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) { | 2877 | if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) { |
2937 | if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) { | 2878 | if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) { |
@@ -3036,7 +2977,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
3036 | case Panel_1280x1024: tempbx = 24; break; | 2977 | case Panel_1280x1024: tempbx = 24; break; |
3037 | case Panel_1400x1050: tempbx = 26; break; | 2978 | case Panel_1400x1050: tempbx = 26; break; |
3038 | case Panel_1600x1200: tempbx = 28; break; | 2979 | case Panel_1600x1200: tempbx = 28; break; |
3039 | #ifdef SIS300 | 2980 | #ifdef CONFIG_FB_SIS_300 |
3040 | case Panel_Barco1366: tempbx = 80; break; | 2981 | case Panel_Barco1366: tempbx = 80; break; |
3041 | #endif | 2982 | #endif |
3042 | } | 2983 | } |
@@ -3053,7 +2994,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
3053 | 2994 | ||
3054 | if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30; | 2995 | if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30; |
3055 | 2996 | ||
3056 | #ifdef SIS300 | 2997 | #ifdef CONFIG_FB_SIS_300 |
3057 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) { | 2998 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) { |
3058 | tempbx = 82; | 2999 | tempbx = 82; |
3059 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++; | 3000 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++; |
@@ -3189,7 +3130,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
3189 | 3130 | ||
3190 | if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { | 3131 | if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { |
3191 | 3132 | ||
3192 | #ifdef SIS315H | 3133 | #ifdef CONFIG_FB_SIS_315 |
3193 | SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 3134 | SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
3194 | SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex); | 3135 | SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex); |
3195 | #endif | 3136 | #endif |
@@ -3214,7 +3155,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
3214 | case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break; | 3155 | case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break; |
3215 | case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break; | 3156 | case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break; |
3216 | case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break; | 3157 | case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break; |
3217 | #ifdef SIS300 | 3158 | #ifdef CONFIG_FB_SIS_300 |
3218 | case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break; | 3159 | case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break; |
3219 | case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break; | 3160 | case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break; |
3220 | case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break; | 3161 | case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break; |
@@ -3248,7 +3189,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned | |||
3248 | (SiS_Pr->SiS_SetFlag & SetDOSMode) ) { | 3189 | (SiS_Pr->SiS_SetFlag & SetDOSMode) ) { |
3249 | SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes; | 3190 | SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes; |
3250 | SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes; | 3191 | SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes; |
3251 | #ifdef SIS300 | 3192 | #ifdef CONFIG_FB_SIS_300 |
3252 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { | 3193 | if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { |
3253 | if(ResIndex < 0x08) { | 3194 | if(ResIndex < 0x08) { |
3254 | SiS_Pr->SiS_HDE = 1280; | 3195 | SiS_Pr->SiS_HDE = 1280; |
@@ -3270,7 +3211,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3270 | unsigned short resinfo, CRT2Index, ResIndex; | 3211 | unsigned short resinfo, CRT2Index, ResIndex; |
3271 | const struct SiS_LCDData *LCDPtr = NULL; | 3212 | const struct SiS_LCDData *LCDPtr = NULL; |
3272 | const struct SiS_TVData *TVPtr = NULL; | 3213 | const struct SiS_TVData *TVPtr = NULL; |
3273 | #ifdef SIS315H | 3214 | #ifdef CONFIG_FB_SIS_315 |
3274 | short resinfo661; | 3215 | short resinfo661; |
3275 | #endif | 3216 | #endif |
3276 | 3217 | ||
@@ -3283,7 +3224,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3283 | } else { | 3224 | } else { |
3284 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; | 3225 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; |
3285 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; | 3226 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; |
3286 | #ifdef SIS315H | 3227 | #ifdef CONFIG_FB_SIS_315 |
3287 | resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661; | 3228 | resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661; |
3288 | if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && | 3229 | if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && |
3289 | (SiS_Pr->SiS_SetFlag & LCDVESATiming) && | 3230 | (SiS_Pr->SiS_SetFlag & LCDVESATiming) && |
@@ -3460,7 +3401,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3460 | 3401 | ||
3461 | } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) { | 3402 | } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) { |
3462 | 3403 | ||
3463 | #ifdef SIS315H | 3404 | #ifdef CONFIG_FB_SIS_315 |
3464 | SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr]; | 3405 | SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr]; |
3465 | SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1]; | 3406 | SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1]; |
3466 | SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8); | 3407 | SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8); |
@@ -3520,19 +3461,13 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3520 | case Panel_1680x1050 : | 3461 | case Panel_1680x1050 : |
3521 | case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break; | 3462 | case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break; |
3522 | case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break; | 3463 | case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break; |
3523 | #ifdef SIS315H | 3464 | #ifdef CONFIG_FB_SIS_315 |
3524 | case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break; | 3465 | case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break; |
3525 | case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break; | 3466 | case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break; |
3526 | #endif | 3467 | #endif |
3527 | default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break; | 3468 | default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break; |
3528 | } | 3469 | } |
3529 | 3470 | ||
3530 | #ifdef SIS_XORG_XF86 | ||
3531 | #ifdef TWDEBUG | ||
3532 | xf86DrvMsg(0, X_INFO, "GetCRT2Data: Index %d ResIndex %d\n", CRT2Index, ResIndex); | ||
3533 | #endif | ||
3534 | #endif | ||
3535 | |||
3536 | SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX; | 3471 | SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX; |
3537 | SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT; | 3472 | SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT; |
3538 | SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT; | 3473 | SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT; |
@@ -3624,7 +3559,7 @@ SiS_GetLVDSDesPtr(struct SiS_Private *SiS_Pr) | |||
3624 | { | 3559 | { |
3625 | const struct SiS_LVDSDes *PanelDesPtr = NULL; | 3560 | const struct SiS_LVDSDes *PanelDesPtr = NULL; |
3626 | 3561 | ||
3627 | #ifdef SIS300 | 3562 | #ifdef CONFIG_FB_SIS_300 |
3628 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { | 3563 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { |
3629 | 3564 | ||
3630 | if(SiS_Pr->ChipType < SIS_315H) { | 3565 | if(SiS_Pr->ChipType < SIS_315H) { |
@@ -3696,7 +3631,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3696 | 3631 | ||
3697 | if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { | 3632 | if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { |
3698 | 3633 | ||
3699 | #ifdef SIS315H | 3634 | #ifdef CONFIG_FB_SIS_315 |
3700 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { | 3635 | if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { |
3701 | /* non-pass 1:1 only, see above */ | 3636 | /* non-pass 1:1 only, see above */ |
3702 | if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) { | 3637 | if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) { |
@@ -3771,7 +3706,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3771 | } else { | 3706 | } else { |
3772 | 3707 | ||
3773 | if(SiS_Pr->ChipType < SIS_315H) { | 3708 | if(SiS_Pr->ChipType < SIS_315H) { |
3774 | #ifdef SIS300 | 3709 | #ifdef CONFIG_FB_SIS_300 |
3775 | switch(SiS_Pr->SiS_LCDResInfo) { | 3710 | switch(SiS_Pr->SiS_LCDResInfo) { |
3776 | case Panel_800x600: | 3711 | case Panel_800x600: |
3777 | if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) { | 3712 | if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) { |
@@ -3816,7 +3751,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3816 | } | 3751 | } |
3817 | #endif | 3752 | #endif |
3818 | } else { | 3753 | } else { |
3819 | #ifdef SIS315H | 3754 | #ifdef CONFIG_FB_SIS_315 |
3820 | switch(SiS_Pr->SiS_LCDResInfo) { | 3755 | switch(SiS_Pr->SiS_LCDResInfo) { |
3821 | case Panel_1024x768: | 3756 | case Panel_1024x768: |
3822 | case Panel_1280x1024: | 3757 | case Panel_1280x1024: |
@@ -3844,7 +3779,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3844 | if(SiS_Pr->ChipType < SIS_315H) { | 3779 | if(SiS_Pr->ChipType < SIS_315H) { |
3845 | if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320; | 3780 | if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320; |
3846 | } else { | 3781 | } else { |
3847 | #ifdef SIS315H | 3782 | #ifdef CONFIG_FB_SIS_315 |
3848 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480; | 3783 | if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480; |
3849 | if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804; | 3784 | if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804; |
3850 | if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704; | 3785 | if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704; |
@@ -3866,7 +3801,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
3866 | /* DISABLE VIDEO BRIDGE */ | 3801 | /* DISABLE VIDEO BRIDGE */ |
3867 | /*********************************************/ | 3802 | /*********************************************/ |
3868 | 3803 | ||
3869 | #ifdef SIS315H | 3804 | #ifdef CONFIG_FB_SIS_315 |
3870 | static int | 3805 | static int |
3871 | SiS_HandlePWD(struct SiS_Private *SiS_Pr) | 3806 | SiS_HandlePWD(struct SiS_Private *SiS_Pr) |
3872 | { | 3807 | { |
@@ -3891,11 +3826,6 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) | |||
3891 | ret = 1; | 3826 | ret = 1; |
3892 | } | 3827 | } |
3893 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp); | 3828 | SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp); |
3894 | #ifdef SIS_XORG_XF86 | ||
3895 | #ifdef TWDEBUG | ||
3896 | xf86DrvMsg(0, 0, "Setting PWD %x\n", temp); | ||
3897 | #endif | ||
3898 | #endif | ||
3899 | } | 3829 | } |
3900 | #endif | 3830 | #endif |
3901 | return ret; | 3831 | return ret; |
@@ -3909,7 +3839,7 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) | |||
3909 | void | 3839 | void |
3910 | SiS_DisableBridge(struct SiS_Private *SiS_Pr) | 3840 | SiS_DisableBridge(struct SiS_Private *SiS_Pr) |
3911 | { | 3841 | { |
3912 | #ifdef SIS315H | 3842 | #ifdef CONFIG_FB_SIS_315 |
3913 | unsigned short tempah, pushax=0, modenum; | 3843 | unsigned short tempah, pushax=0, modenum; |
3914 | #endif | 3844 | #endif |
3915 | unsigned short temp=0; | 3845 | unsigned short temp=0; |
@@ -3920,7 +3850,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
3920 | 3850 | ||
3921 | if(SiS_Pr->ChipType < SIS_315H) { | 3851 | if(SiS_Pr->ChipType < SIS_315H) { |
3922 | 3852 | ||
3923 | #ifdef SIS300 /* 300 series */ | 3853 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
3924 | 3854 | ||
3925 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { | 3855 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { |
3926 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { | 3856 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { |
@@ -3953,11 +3883,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
3953 | } | 3883 | } |
3954 | } | 3884 | } |
3955 | 3885 | ||
3956 | #endif /* SIS300 */ | 3886 | #endif /* CONFIG_FB_SIS_300 */ |
3957 | 3887 | ||
3958 | } else { | 3888 | } else { |
3959 | 3889 | ||
3960 | #ifdef SIS315H /* 315 series */ | 3890 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
3961 | 3891 | ||
3962 | int didpwd = 0; | 3892 | int didpwd = 0; |
3963 | bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || | 3893 | bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || |
@@ -4081,14 +4011,14 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4081 | 4011 | ||
4082 | } | 4012 | } |
4083 | 4013 | ||
4084 | #endif /* SIS315H */ | 4014 | #endif /* CONFIG_FB_SIS_315 */ |
4085 | 4015 | ||
4086 | } | 4016 | } |
4087 | 4017 | ||
4088 | } else { /* ============ For 301 ================ */ | 4018 | } else { /* ============ For 301 ================ */ |
4089 | 4019 | ||
4090 | if(SiS_Pr->ChipType < SIS_315H) { | 4020 | if(SiS_Pr->ChipType < SIS_315H) { |
4091 | #ifdef SIS300 | 4021 | #ifdef CONFIG_FB_SIS_300 |
4092 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { | 4022 | if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { |
4093 | SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08); | 4023 | SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08); |
4094 | SiS_PanelDelay(SiS_Pr, 3); | 4024 | SiS_PanelDelay(SiS_Pr, 3); |
@@ -4111,7 +4041,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4111 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20); | 4041 | SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20); |
4112 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp); | 4042 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp); |
4113 | } else { | 4043 | } else { |
4114 | #ifdef SIS300 | 4044 | #ifdef CONFIG_FB_SIS_300 |
4115 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */ | 4045 | SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */ |
4116 | if( (!(SiS_CRT2IsLCD(SiS_Pr))) || | 4046 | if( (!(SiS_CRT2IsLCD(SiS_Pr))) || |
4117 | (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) { | 4047 | (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) { |
@@ -4127,7 +4057,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4127 | 4057 | ||
4128 | if(SiS_Pr->ChipType < SIS_315H) { | 4058 | if(SiS_Pr->ChipType < SIS_315H) { |
4129 | 4059 | ||
4130 | #ifdef SIS300 /* 300 series */ | 4060 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
4131 | 4061 | ||
4132 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { | 4062 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { |
4133 | SiS_SetCH700x(SiS_Pr,0x0E,0x09); | 4063 | SiS_SetCH700x(SiS_Pr,0x0E,0x09); |
@@ -4171,11 +4101,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4171 | SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04); | 4101 | SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04); |
4172 | } | 4102 | } |
4173 | 4103 | ||
4174 | #endif /* SIS300 */ | 4104 | #endif /* CONFIG_FB_SIS_300 */ |
4175 | 4105 | ||
4176 | } else { | 4106 | } else { |
4177 | 4107 | ||
4178 | #ifdef SIS315H /* 315 series */ | 4108 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
4179 | 4109 | ||
4180 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { | 4110 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { |
4181 | /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */ | 4111 | /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */ |
@@ -4288,7 +4218,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4288 | } | 4218 | } |
4289 | } | 4219 | } |
4290 | 4220 | ||
4291 | #endif /* SIS315H */ | 4221 | #endif /* CONFIG_FB_SIS_315 */ |
4292 | 4222 | ||
4293 | } /* 315 series */ | 4223 | } /* 315 series */ |
4294 | 4224 | ||
@@ -4304,14 +4234,12 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) | |||
4304 | * from outside the context of a mode switch! | 4234 | * from outside the context of a mode switch! |
4305 | * MUST call getVBType before calling this | 4235 | * MUST call getVBType before calling this |
4306 | */ | 4236 | */ |
4307 | #ifdef SIS_LINUX_KERNEL | ||
4308 | static | 4237 | static |
4309 | #endif | ||
4310 | void | 4238 | void |
4311 | SiS_EnableBridge(struct SiS_Private *SiS_Pr) | 4239 | SiS_EnableBridge(struct SiS_Private *SiS_Pr) |
4312 | { | 4240 | { |
4313 | unsigned short temp=0, tempah; | 4241 | unsigned short temp=0, tempah; |
4314 | #ifdef SIS315H | 4242 | #ifdef CONFIG_FB_SIS_315 |
4315 | unsigned short temp1, pushax=0; | 4243 | unsigned short temp1, pushax=0; |
4316 | bool delaylong = false; | 4244 | bool delaylong = false; |
4317 | #endif | 4245 | #endif |
@@ -4322,7 +4250,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4322 | 4250 | ||
4323 | if(SiS_Pr->ChipType < SIS_315H) { | 4251 | if(SiS_Pr->ChipType < SIS_315H) { |
4324 | 4252 | ||
4325 | #ifdef SIS300 /* 300 series */ | 4253 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
4326 | 4254 | ||
4327 | if(SiS_CRT2IsLCD(SiS_Pr)) { | 4255 | if(SiS_CRT2IsLCD(SiS_Pr)) { |
4328 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { | 4256 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { |
@@ -4385,11 +4313,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4385 | } | 4313 | } |
4386 | 4314 | ||
4387 | 4315 | ||
4388 | #endif /* SIS300 */ | 4316 | #endif /* CONFIG_FB_SIS_300 */ |
4389 | 4317 | ||
4390 | } else { | 4318 | } else { |
4391 | 4319 | ||
4392 | #ifdef SIS315H /* 315 series */ | 4320 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
4393 | 4321 | ||
4394 | #ifdef SET_EMI | 4322 | #ifdef SET_EMI |
4395 | unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0; | 4323 | unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0; |
@@ -4688,7 +4616,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4688 | SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f); | 4616 | SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f); |
4689 | } | 4617 | } |
4690 | 4618 | ||
4691 | #endif /* SIS315H */ | 4619 | #endif /* CONFIG_FB_SIS_315 */ |
4692 | 4620 | ||
4693 | } | 4621 | } |
4694 | 4622 | ||
@@ -4739,7 +4667,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4739 | 4667 | ||
4740 | if(SiS_Pr->ChipType < SIS_315H) { | 4668 | if(SiS_Pr->ChipType < SIS_315H) { |
4741 | 4669 | ||
4742 | #ifdef SIS300 /* 300 series */ | 4670 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
4743 | 4671 | ||
4744 | if(SiS_CRT2IsLCD(SiS_Pr)) { | 4672 | if(SiS_CRT2IsLCD(SiS_Pr)) { |
4745 | if(SiS_Pr->ChipType == SIS_730) { | 4673 | if(SiS_Pr->ChipType == SIS_730) { |
@@ -4783,11 +4711,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4783 | } | 4711 | } |
4784 | } | 4712 | } |
4785 | 4713 | ||
4786 | #endif /* SIS300 */ | 4714 | #endif /* CONFIG_FB_SIS_300 */ |
4787 | 4715 | ||
4788 | } else { | 4716 | } else { |
4789 | 4717 | ||
4790 | #ifdef SIS315H /* 315 series */ | 4718 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
4791 | 4719 | ||
4792 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { | 4720 | if(!(SiS_IsNotM650orLater(SiS_Pr))) { |
4793 | /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */ | 4721 | /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */ |
@@ -4881,7 +4809,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) | |||
4881 | } | 4809 | } |
4882 | } | 4810 | } |
4883 | 4811 | ||
4884 | #endif /* SIS315H */ | 4812 | #endif /* CONFIG_FB_SIS_315 */ |
4885 | 4813 | ||
4886 | } /* 310 series */ | 4814 | } /* 310 series */ |
4887 | 4815 | ||
@@ -4971,7 +4899,7 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
4971 | 4899 | ||
4972 | if(SiS_Pr->ChipType < SIS_315H) { | 4900 | if(SiS_Pr->ChipType < SIS_315H) { |
4973 | 4901 | ||
4974 | #ifdef SIS300 /* ---- 300 series --- */ | 4902 | #ifdef CONFIG_FB_SIS_300 /* ---- 300 series --- */ |
4975 | 4903 | ||
4976 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */ | 4904 | if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */ |
4977 | 4905 | ||
@@ -5000,11 +4928,11 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
5000 | 4928 | ||
5001 | } | 4929 | } |
5002 | 4930 | ||
5003 | #endif /* SIS300 */ | 4931 | #endif /* CONFIG_FB_SIS_300 */ |
5004 | 4932 | ||
5005 | } else { | 4933 | } else { |
5006 | 4934 | ||
5007 | #ifdef SIS315H /* ------- 315 series ------ */ | 4935 | #ifdef CONFIG_FB_SIS_315 /* ------- 315 series ------ */ |
5008 | 4936 | ||
5009 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */ | 4937 | if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */ |
5010 | 4938 | ||
@@ -5076,13 +5004,13 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
5076 | } | 5004 | } |
5077 | 5005 | ||
5078 | } | 5006 | } |
5079 | #endif /* SIS315H */ | 5007 | #endif /* CONFIG_FB_SIS_315 */ |
5080 | } | 5008 | } |
5081 | } | 5009 | } |
5082 | } | 5010 | } |
5083 | 5011 | ||
5084 | /* Set CRT2 FIFO on 300/540/630/730 */ | 5012 | /* Set CRT2 FIFO on 300/540/630/730 */ |
5085 | #ifdef SIS300 | 5013 | #ifdef CONFIG_FB_SIS_300 |
5086 | static void | 5014 | static void |
5087 | SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) | 5015 | SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) |
5088 | { | 5016 | { |
@@ -5154,13 +5082,8 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) | |||
5154 | 5082 | ||
5155 | } else { | 5083 | } else { |
5156 | 5084 | ||
5157 | #ifdef SIS_LINUX_KERNEL | ||
5158 | pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); | 5085 | pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); |
5159 | pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0); | 5086 | pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0); |
5160 | #else | ||
5161 | pci50 = pciReadLong(0x00000000, 0x50); | ||
5162 | pciA0 = pciReadLong(0x00000000, 0xA0); | ||
5163 | #endif | ||
5164 | 5087 | ||
5165 | if(SiS_Pr->ChipType == SIS_730) { | 5088 | if(SiS_Pr->ChipType == SIS_730) { |
5166 | 5089 | ||
@@ -5262,7 +5185,7 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) | |||
5262 | #endif | 5185 | #endif |
5263 | 5186 | ||
5264 | /* Set CRT2 FIFO on 315/330 series */ | 5187 | /* Set CRT2 FIFO on 315/330 series */ |
5265 | #ifdef SIS315H | 5188 | #ifdef CONFIG_FB_SIS_315 |
5266 | static void | 5189 | static void |
5267 | SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr) | 5190 | SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr) |
5268 | { | 5191 | { |
@@ -5420,27 +5343,6 @@ SiS_SetGroup1_301(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned sho | |||
5420 | 5343 | ||
5421 | temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)); | 5344 | temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)); |
5422 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */ | 5345 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */ |
5423 | |||
5424 | #ifdef SIS_XORG_XF86 | ||
5425 | #ifdef TWDEBUG | ||
5426 | xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", | ||
5427 | SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, | ||
5428 | SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, | ||
5429 | SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); | ||
5430 | |||
5431 | xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
5432 | SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], | ||
5433 | SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], | ||
5434 | SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], | ||
5435 | SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); | ||
5436 | xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", | ||
5437 | SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], | ||
5438 | SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], | ||
5439 | SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], | ||
5440 | SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); | ||
5441 | xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); | ||
5442 | #endif | ||
5443 | #endif | ||
5444 | } | 5346 | } |
5445 | 5347 | ||
5446 | /* Setup panel link | 5348 | /* Setup panel link |
@@ -5455,17 +5357,17 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5455 | unsigned short push2, tempax, tempbx, tempcx, temp; | 5357 | unsigned short push2, tempax, tempbx, tempcx, temp; |
5456 | unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0; | 5358 | unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0; |
5457 | bool islvds = false, issis = false, chkdclkfirst = false; | 5359 | bool islvds = false, issis = false, chkdclkfirst = false; |
5458 | #ifdef SIS300 | 5360 | #ifdef CONFIG_FB_SIS_300 |
5459 | unsigned short crt2crtc = 0; | 5361 | unsigned short crt2crtc = 0; |
5460 | #endif | 5362 | #endif |
5461 | #ifdef SIS315H | 5363 | #ifdef CONFIG_FB_SIS_315 |
5462 | unsigned short pushcx; | 5364 | unsigned short pushcx; |
5463 | #endif | 5365 | #endif |
5464 | 5366 | ||
5465 | if(ModeNo <= 0x13) { | 5367 | if(ModeNo <= 0x13) { |
5466 | modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; | 5368 | modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; |
5467 | resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; | 5369 | resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; |
5468 | #ifdef SIS300 | 5370 | #ifdef CONFIG_FB_SIS_300 |
5469 | crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; | 5371 | crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; |
5470 | #endif | 5372 | #endif |
5471 | } else if(SiS_Pr->UseCustomMode) { | 5373 | } else if(SiS_Pr->UseCustomMode) { |
@@ -5473,7 +5375,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5473 | } else { | 5375 | } else { |
5474 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; | 5376 | modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; |
5475 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; | 5377 | resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; |
5476 | #ifdef SIS300 | 5378 | #ifdef CONFIG_FB_SIS_300 |
5477 | crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; | 5379 | crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; |
5478 | #endif | 5380 | #endif |
5479 | } | 5381 | } |
@@ -5494,7 +5396,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5494 | } | 5396 | } |
5495 | } | 5397 | } |
5496 | 5398 | ||
5497 | #ifdef SIS315H | 5399 | #ifdef CONFIG_FB_SIS_315 |
5498 | if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { | 5400 | if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { |
5499 | if(IS_SIS330) { | 5401 | if(IS_SIS330) { |
5500 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10); | 5402 | SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10); |
@@ -5744,7 +5646,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5744 | 5646 | ||
5745 | if(SiS_Pr->ChipType < SIS_315H) { | 5647 | if(SiS_Pr->ChipType < SIS_315H) { |
5746 | 5648 | ||
5747 | #ifdef SIS300 /* 300 series */ | 5649 | #ifdef CONFIG_FB_SIS_300 /* 300 series */ |
5748 | tempeax = SiS_Pr->SiS_VGAVDE << 6; | 5650 | tempeax = SiS_Pr->SiS_VGAVDE << 6; |
5749 | temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE); | 5651 | temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE); |
5750 | tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE; | 5652 | tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE; |
@@ -5755,11 +5657,11 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5755 | temp = (unsigned short)(tempeax & 0x00FF); | 5657 | temp = (unsigned short)(tempeax & 0x00FF); |
5756 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */ | 5658 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */ |
5757 | tempvcfact = temp; | 5659 | tempvcfact = temp; |
5758 | #endif /* SIS300 */ | 5660 | #endif /* CONFIG_FB_SIS_300 */ |
5759 | 5661 | ||
5760 | } else { | 5662 | } else { |
5761 | 5663 | ||
5762 | #ifdef SIS315H /* 315 series */ | 5664 | #ifdef CONFIG_FB_SIS_315 /* 315 series */ |
5763 | tempeax = SiS_Pr->SiS_VGAVDE << 18; | 5665 | tempeax = SiS_Pr->SiS_VGAVDE << 18; |
5764 | tempebx = SiS_Pr->SiS_VDE; | 5666 | tempebx = SiS_Pr->SiS_VDE; |
5765 | temp = (tempeax % tempebx); | 5667 | temp = (tempeax % tempebx); |
@@ -5845,7 +5747,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5845 | temp = (unsigned short)(tempecx & 0x00FF); | 5747 | temp = (unsigned short)(tempecx & 0x00FF); |
5846 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp); | 5748 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp); |
5847 | 5749 | ||
5848 | #ifdef SIS315H | 5750 | #ifdef CONFIG_FB_SIS_315 |
5849 | if(SiS_Pr->ChipType >= SIS_315H) { | 5751 | if(SiS_Pr->ChipType >= SIS_315H) { |
5850 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { | 5752 | if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { |
5851 | if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) { | 5753 | if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) { |
@@ -5863,7 +5765,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5863 | } | 5765 | } |
5864 | #endif | 5766 | #endif |
5865 | 5767 | ||
5866 | #ifdef SIS300 | 5768 | #ifdef CONFIG_FB_SIS_300 |
5867 | if(SiS_Pr->SiS_IF_DEF_TRUMPION) { | 5769 | if(SiS_Pr->SiS_IF_DEF_TRUMPION) { |
5868 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 5770 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
5869 | unsigned char *trumpdata; | 5771 | unsigned char *trumpdata; |
@@ -5899,7 +5801,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5899 | } | 5801 | } |
5900 | #endif | 5802 | #endif |
5901 | 5803 | ||
5902 | #ifdef SIS315H | 5804 | #ifdef CONFIG_FB_SIS_315 |
5903 | if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) { | 5805 | if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) { |
5904 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00); | 5806 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00); |
5905 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00); | 5807 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00); |
@@ -5999,7 +5901,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s | |||
5999 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a); | 5901 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a); |
6000 | } | 5902 | } |
6001 | } | 5903 | } |
6002 | #endif /* SIS315H */ | 5904 | #endif /* CONFIG_FB_SIS_315 */ |
6003 | } | 5905 | } |
6004 | 5906 | ||
6005 | /* Set Part 1 */ | 5907 | /* Set Part 1 */ |
@@ -6007,12 +5909,12 @@ static void | |||
6007 | SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, | 5909 | SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, |
6008 | unsigned short RefreshRateTableIndex) | 5910 | unsigned short RefreshRateTableIndex) |
6009 | { | 5911 | { |
6010 | #if defined(SIS300) || defined(SIS315H) | 5912 | #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) |
6011 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 5913 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
6012 | #endif | 5914 | #endif |
6013 | unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0; | 5915 | unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0; |
6014 | unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0; | 5916 | unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0; |
6015 | #ifdef SIS315H | 5917 | #ifdef CONFIG_FB_SIS_315 |
6016 | unsigned short tempbl=0; | 5918 | unsigned short tempbl=0; |
6017 | #endif | 5919 | #endif |
6018 | 5920 | ||
@@ -6038,11 +5940,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6038 | (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) { | 5940 | (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) { |
6039 | 5941 | ||
6040 | if(SiS_Pr->ChipType < SIS_315H ) { | 5942 | if(SiS_Pr->ChipType < SIS_315H ) { |
6041 | #ifdef SIS300 | 5943 | #ifdef CONFIG_FB_SIS_300 |
6042 | SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo); | 5944 | SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo); |
6043 | #endif | 5945 | #endif |
6044 | } else { | 5946 | } else { |
6045 | #ifdef SIS315H | 5947 | #ifdef CONFIG_FB_SIS_315 |
6046 | SiS_SetCRT2FIFO_310(SiS_Pr); | 5948 | SiS_SetCRT2FIFO_310(SiS_Pr); |
6047 | #endif | 5949 | #endif |
6048 | } | 5950 | } |
@@ -6051,7 +5953,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6051 | 5953 | ||
6052 | if(SiS_Pr->ChipType < SIS_315H ) { | 5954 | if(SiS_Pr->ChipType < SIS_315H ) { |
6053 | 5955 | ||
6054 | #ifdef SIS300 /* ------------- 300 series --------------*/ | 5956 | #ifdef CONFIG_FB_SIS_300 /* ------------- 300 series --------------*/ |
6055 | 5957 | ||
6056 | temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */ | 5958 | temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */ |
6057 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */ | 5959 | SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */ |
@@ -6070,11 +5972,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6070 | 5972 | ||
6071 | bridgeadd = 12; | 5973 | bridgeadd = 12; |
6072 | 5974 | ||
6073 | #endif /* SIS300 */ | 5975 | #endif /* CONFIG_FB_SIS_300 */ |
6074 | 5976 | ||
6075 | } else { | 5977 | } else { |
6076 | 5978 | ||
6077 | #ifdef SIS315H /* ------------------- 315/330 series --------------- */ | 5979 | #ifdef CONFIG_FB_SIS_315 /* ------------------- 315/330 series --------------- */ |
6078 | 5980 | ||
6079 | tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */ | 5981 | tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */ |
6080 | if(modeflag & HalfDCLK) { | 5982 | if(modeflag & HalfDCLK) { |
@@ -6125,7 +6027,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6125 | } | 6027 | } |
6126 | } | 6028 | } |
6127 | 6029 | ||
6128 | #endif /* SIS315H */ | 6030 | #endif /* CONFIG_FB_SIS_315 */ |
6129 | 6031 | ||
6130 | } /* 315/330 series */ | 6032 | } /* 315/330 series */ |
6131 | 6033 | ||
@@ -6256,7 +6158,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6256 | 6158 | ||
6257 | if(SiS_Pr->ChipType < SIS_315H) { | 6159 | if(SiS_Pr->ChipType < SIS_315H) { |
6258 | 6160 | ||
6259 | #ifdef SIS300 /* ---------- 300 series -------------- */ | 6161 | #ifdef CONFIG_FB_SIS_300 /* ---------- 300 series -------------- */ |
6260 | 6162 | ||
6261 | if(SiS_Pr->SiS_VBType & VB_SISVB) { | 6163 | if(SiS_Pr->SiS_VBType & VB_SISVB) { |
6262 | temp = 0x20; | 6164 | temp = 0x20; |
@@ -6310,11 +6212,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6310 | 6212 | ||
6311 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */ | 6213 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */ |
6312 | 6214 | ||
6313 | #endif /* SIS300 */ | 6215 | #endif /* CONFIG_FB_SIS_300 */ |
6314 | 6216 | ||
6315 | } else { | 6217 | } else { |
6316 | 6218 | ||
6317 | #ifdef SIS315H /* --------------- 315/330 series ---------------*/ | 6219 | #ifdef CONFIG_FB_SIS_315 /* --------------- 315/330 series ---------------*/ |
6318 | 6220 | ||
6319 | if(SiS_Pr->ChipType < SIS_661) { | 6221 | if(SiS_Pr->ChipType < SIS_661) { |
6320 | 6222 | ||
@@ -6349,7 +6251,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6349 | if(modeflag & HalfDCLK) tempax |= 0x40; | 6251 | if(modeflag & HalfDCLK) tempax |= 0x40; |
6350 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax); | 6252 | SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax); |
6351 | 6253 | ||
6352 | #endif /* SIS315H */ | 6254 | #endif /* CONFIG_FB_SIS_315 */ |
6353 | 6255 | ||
6354 | } | 6256 | } |
6355 | 6257 | ||
@@ -6381,7 +6283,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6381 | /* SET PART 2 REGISTER GROUP */ | 6283 | /* SET PART 2 REGISTER GROUP */ |
6382 | /*********************************************/ | 6284 | /*********************************************/ |
6383 | 6285 | ||
6384 | #ifdef SIS315H | 6286 | #ifdef CONFIG_FB_SIS_315 |
6385 | static unsigned char * | 6287 | static unsigned char * |
6386 | SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype) | 6288 | SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype) |
6387 | { | 6289 | { |
@@ -6478,7 +6380,7 @@ SiS_GetCRT2Part2Ptr(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned sh | |||
6478 | } | 6380 | } |
6479 | #endif | 6381 | #endif |
6480 | 6382 | ||
6481 | #ifdef SIS300 | 6383 | #ifdef CONFIG_FB_SIS_300 |
6482 | static void | 6384 | static void |
6483 | SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc) | 6385 | SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc) |
6484 | { | 6386 | { |
@@ -6690,7 +6592,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
6690 | unsigned int longtemp, PhaseIndex; | 6592 | unsigned int longtemp, PhaseIndex; |
6691 | bool newtvphase; | 6593 | bool newtvphase; |
6692 | const unsigned char *TimingPoint; | 6594 | const unsigned char *TimingPoint; |
6693 | #ifdef SIS315H | 6595 | #ifdef CONFIG_FB_SIS_315 |
6694 | unsigned short resindex, CRT2Index; | 6596 | unsigned short resindex, CRT2Index; |
6695 | const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL; | 6597 | const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL; |
6696 | 6598 | ||
@@ -7069,7 +6971,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7069 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB); | 6971 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB); |
7070 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF); | 6972 | SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF); |
7071 | 6973 | ||
7072 | #ifdef SIS315H | 6974 | #ifdef CONFIG_FB_SIS_315 |
7073 | if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex, | 6975 | if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex, |
7074 | &CRT2Index, &resindex)) { | 6976 | &CRT2Index, &resindex)) { |
7075 | switch(CRT2Index) { | 6977 | switch(CRT2Index) { |
@@ -7130,12 +7032,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7130 | 7032 | ||
7131 | /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */ | 7033 | /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */ |
7132 | 7034 | ||
7133 | #ifdef SIS_XORG_XF86 | ||
7134 | #ifdef TWDEBUG | ||
7135 | xf86DrvMsg(0, X_INFO, "lcdvdes 0x%x lcdvdee 0x%x\n", tempcx, tempbx); | ||
7136 | #endif | ||
7137 | #endif | ||
7138 | |||
7139 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */ | 7035 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */ |
7140 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */ | 7036 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */ |
7141 | 7037 | ||
@@ -7184,12 +7080,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7184 | tempbx = SiS_Pr->CVSyncStart; | 7080 | tempbx = SiS_Pr->CVSyncStart; |
7185 | } | 7081 | } |
7186 | 7082 | ||
7187 | #ifdef SIS_XORG_XF86 | ||
7188 | #ifdef TWDEBUG | ||
7189 | xf86DrvMsg(0, X_INFO, "lcdvrs 0x%x\n", tempbx); | ||
7190 | #endif | ||
7191 | #endif | ||
7192 | |||
7193 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */ | 7083 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */ |
7194 | 7084 | ||
7195 | temp = (tempbx >> 4) & 0xF0; | 7085 | temp = (tempbx >> 4) & 0xF0; |
@@ -7201,15 +7091,9 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7201 | temp |= (SiS_Pr->CVSyncEnd & 0x0f); | 7091 | temp |= (SiS_Pr->CVSyncEnd & 0x0f); |
7202 | } | 7092 | } |
7203 | 7093 | ||
7204 | #ifdef SIS_XORG_XF86 | ||
7205 | #ifdef TWDEBUG | ||
7206 | xf86DrvMsg(0, X_INFO, "lcdvre[3:0] 0x%x\n", (temp & 0x0f)); | ||
7207 | #endif | ||
7208 | #endif | ||
7209 | |||
7210 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp); | 7094 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp); |
7211 | 7095 | ||
7212 | #ifdef SIS300 | 7096 | #ifdef CONFIG_FB_SIS_300 |
7213 | SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc); | 7097 | SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc); |
7214 | #endif | 7098 | #endif |
7215 | 7099 | ||
@@ -7245,12 +7129,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7245 | tempax >>= 1; | 7129 | tempax >>= 1; |
7246 | } | 7130 | } |
7247 | 7131 | ||
7248 | #ifdef SIS_XORG_XF86 | ||
7249 | #ifdef TWDEBUG | ||
7250 | xf86DrvMsg(0, X_INFO, "lcdhdee 0x%x\n", tempbx); | ||
7251 | #endif | ||
7252 | #endif | ||
7253 | |||
7254 | tempbx += bridgeoffset; | 7132 | tempbx += bridgeoffset; |
7255 | 7133 | ||
7256 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */ | 7134 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */ |
@@ -7276,12 +7154,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7276 | tempbx += bridgeoffset; | 7154 | tempbx += bridgeoffset; |
7277 | } | 7155 | } |
7278 | 7156 | ||
7279 | #ifdef SIS_XORG_XF86 | ||
7280 | #ifdef TWDEBUG | ||
7281 | xf86DrvMsg(0, X_INFO, "lcdhrs 0x%x\n", tempbx); | ||
7282 | #endif | ||
7283 | #endif | ||
7284 | |||
7285 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */ | 7157 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */ |
7286 | SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0)); | 7158 | SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0)); |
7287 | 7159 | ||
@@ -7300,20 +7172,14 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7300 | tempbx += bridgeoffset; | 7172 | tempbx += bridgeoffset; |
7301 | } | 7173 | } |
7302 | 7174 | ||
7303 | #ifdef SIS_XORG_XF86 | ||
7304 | #ifdef TWDEBUG | ||
7305 | xf86DrvMsg(0, X_INFO, "lcdhre 0x%x\n", tempbx); | ||
7306 | #endif | ||
7307 | #endif | ||
7308 | |||
7309 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */ | 7175 | SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */ |
7310 | 7176 | ||
7311 | SiS_SetGroup2_Tail(SiS_Pr, ModeNo); | 7177 | SiS_SetGroup2_Tail(SiS_Pr, ModeNo); |
7312 | 7178 | ||
7313 | #ifdef SIS300 | 7179 | #ifdef CONFIG_FB_SIS_300 |
7314 | SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo); | 7180 | SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo); |
7315 | #endif | 7181 | #endif |
7316 | #ifdef SIS315H | 7182 | #ifdef CONFIG_FB_SIS_315 |
7317 | } /* CRT2-LCD from table */ | 7183 | } /* CRT2-LCD from table */ |
7318 | #endif | 7184 | #endif |
7319 | } | 7185 | } |
@@ -7382,7 +7248,7 @@ SiS_SetGroup3(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
7382 | /* SET PART 4 REGISTER GROUP */ | 7248 | /* SET PART 4 REGISTER GROUP */ |
7383 | /*********************************************/ | 7249 | /*********************************************/ |
7384 | 7250 | ||
7385 | #ifdef SIS315H | 7251 | #ifdef CONFIG_FB_SIS_315 |
7386 | #if 0 | 7252 | #if 0 |
7387 | static void | 7253 | static void |
7388 | SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift) | 7254 | SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift) |
@@ -8011,7 +7877,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
8011 | 7877 | ||
8012 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { | 7878 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { |
8013 | 7879 | ||
8014 | #ifdef SIS300 | 7880 | #ifdef CONFIG_FB_SIS_300 |
8015 | 7881 | ||
8016 | /* Chrontel 7005 - I assume that it does not come with a 315 series chip */ | 7882 | /* Chrontel 7005 - I assume that it does not come with a 315 series chip */ |
8017 | 7883 | ||
@@ -8124,7 +7990,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
8124 | 7990 | ||
8125 | /* Chrontel 7019 - assumed that it does not come with a 300 series chip */ | 7991 | /* Chrontel 7019 - assumed that it does not come with a 300 series chip */ |
8126 | 7992 | ||
8127 | #ifdef SIS315H | 7993 | #ifdef CONFIG_FB_SIS_315 |
8128 | 7994 | ||
8129 | unsigned short temp; | 7995 | unsigned short temp; |
8130 | 7996 | ||
@@ -8175,7 +8041,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short | |||
8175 | 8041 | ||
8176 | } | 8042 | } |
8177 | 8043 | ||
8178 | #ifdef SIS315H /* ----------- 315 series only ---------- */ | 8044 | #ifdef CONFIG_FB_SIS_315 /* ----------- 315 series only ---------- */ |
8179 | 8045 | ||
8180 | void | 8046 | void |
8181 | SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr) | 8047 | SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr) |
@@ -8657,7 +8523,7 @@ SiS_ChrontelDoSomething1(struct SiS_Private *SiS_Pr) | |||
8657 | bool | 8523 | bool |
8658 | SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | 8524 | SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) |
8659 | { | 8525 | { |
8660 | #ifdef SIS300 | 8526 | #ifdef CONFIG_FB_SIS_300 |
8661 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; | 8527 | unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; |
8662 | #endif | 8528 | #endif |
8663 | unsigned short ModeIdIndex, RefreshRateTableIndex; | 8529 | unsigned short ModeIdIndex, RefreshRateTableIndex; |
@@ -8703,16 +8569,6 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8703 | SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8569 | SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8704 | } | 8570 | } |
8705 | 8571 | ||
8706 | #ifdef SIS_XORG_XF86 | ||
8707 | #ifdef TWDEBUG | ||
8708 | xf86DrvMsg(0, X_INFO, "(init301: LCDHDES 0x%03x LCDVDES 0x%03x)\n", SiS_Pr->SiS_LCDHDES, SiS_Pr->SiS_LCDVDES); | ||
8709 | xf86DrvMsg(0, X_INFO, "(init301: HDE 0x%03x VDE 0x%03x)\n", SiS_Pr->SiS_HDE, SiS_Pr->SiS_VDE); | ||
8710 | xf86DrvMsg(0, X_INFO, "(init301: VGAHDE 0x%03x VGAVDE 0x%03x)\n", SiS_Pr->SiS_VGAHDE, SiS_Pr->SiS_VGAVDE); | ||
8711 | xf86DrvMsg(0, X_INFO, "(init301: HT 0x%03x VT 0x%03x)\n", SiS_Pr->SiS_HT, SiS_Pr->SiS_VT); | ||
8712 | xf86DrvMsg(0, X_INFO, "(init301: VGAHT 0x%03x VGAVT 0x%03x)\n", SiS_Pr->SiS_VGAHT, SiS_Pr->SiS_VGAVT); | ||
8713 | #endif | ||
8714 | #endif | ||
8715 | |||
8716 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8572 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8717 | SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8573 | SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8718 | } | 8574 | } |
@@ -8722,12 +8578,12 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8722 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8578 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8723 | 8579 | ||
8724 | SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8580 | SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8725 | #ifdef SIS315H | 8581 | #ifdef CONFIG_FB_SIS_315 |
8726 | SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8582 | SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8727 | #endif | 8583 | #endif |
8728 | SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex); | 8584 | SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex); |
8729 | SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); | 8585 | SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); |
8730 | #ifdef SIS315H | 8586 | #ifdef CONFIG_FB_SIS_315 |
8731 | SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex); | 8587 | SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex); |
8732 | #endif | 8588 | #endif |
8733 | SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex); | 8589 | SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex); |
@@ -8758,7 +8614,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8758 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { | 8614 | if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { |
8759 | if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { | 8615 | if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { |
8760 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) { | 8616 | if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) { |
8761 | #ifdef SIS315H | 8617 | #ifdef CONFIG_FB_SIS_315 |
8762 | SiS_SetCH701xForLCD(SiS_Pr); | 8618 | SiS_SetCH701xForLCD(SiS_Pr); |
8763 | #endif | 8619 | #endif |
8764 | } | 8620 | } |
@@ -8771,7 +8627,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8771 | 8627 | ||
8772 | } | 8628 | } |
8773 | 8629 | ||
8774 | #ifdef SIS300 | 8630 | #ifdef CONFIG_FB_SIS_300 |
8775 | if(SiS_Pr->ChipType < SIS_315H) { | 8631 | if(SiS_Pr->ChipType < SIS_315H) { |
8776 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8632 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8777 | if(SiS_Pr->SiS_UseOEM) { | 8633 | if(SiS_Pr->SiS_UseOEM) { |
@@ -8794,7 +8650,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) | |||
8794 | } | 8650 | } |
8795 | #endif | 8651 | #endif |
8796 | 8652 | ||
8797 | #ifdef SIS315H | 8653 | #ifdef CONFIG_FB_SIS_315 |
8798 | if(SiS_Pr->ChipType >= SIS_315H) { | 8654 | if(SiS_Pr->ChipType >= SIS_315H) { |
8799 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { | 8655 | if(SiS_Pr->SiS_SetFlag & LowModeTests) { |
8800 | if(SiS_Pr->ChipType < SIS_661) { | 8656 | if(SiS_Pr->ChipType < SIS_661) { |
@@ -8873,7 +8729,7 @@ SiS_SetupDDCN(struct SiS_Private *SiS_Pr) | |||
8873 | } | 8729 | } |
8874 | } | 8730 | } |
8875 | 8731 | ||
8876 | #ifdef SIS300 | 8732 | #ifdef CONFIG_FB_SIS_300 |
8877 | static unsigned char * | 8733 | static unsigned char * |
8878 | SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr) | 8734 | SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr) |
8879 | { | 8735 | { |
@@ -8923,11 +8779,6 @@ SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr) | |||
8923 | dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr); | 8779 | dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr); |
8924 | if(!dataptr) return false; | 8780 | if(!dataptr) return false; |
8925 | } | 8781 | } |
8926 | #ifdef SIS_XORG_XF86 | ||
8927 | #ifdef TWDEBUG | ||
8928 | xf86DrvMsg(0, X_INFO, "Trumpion block success\n"); | ||
8929 | #endif | ||
8930 | #endif | ||
8931 | return true; | 8782 | return true; |
8932 | } | 8783 | } |
8933 | #endif | 8784 | #endif |
@@ -9002,9 +8853,7 @@ SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) | |||
9002 | SiS_SetChReg(SiS_Pr, reg, val, 0); | 8853 | SiS_SetChReg(SiS_Pr, reg, val, 0); |
9003 | } | 8854 | } |
9004 | 8855 | ||
9005 | #ifdef SIS_LINUX_KERNEL | ||
9006 | static | 8856 | static |
9007 | #endif | ||
9008 | void | 8857 | void |
9009 | SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) | 8858 | SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) |
9010 | { | 8859 | { |
@@ -9091,9 +8940,7 @@ SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempbx) | |||
9091 | 8940 | ||
9092 | /* Read from Chrontel 70xx */ | 8941 | /* Read from Chrontel 70xx */ |
9093 | /* Parameter is [Register no (S7-S0)] */ | 8942 | /* Parameter is [Register no (S7-S0)] */ |
9094 | #ifdef SIS_LINUX_KERNEL | ||
9095 | static | 8943 | static |
9096 | #endif | ||
9097 | unsigned short | 8944 | unsigned short |
9098 | SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx) | 8945 | SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx) |
9099 | { | 8946 | { |
@@ -9114,9 +8961,7 @@ SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, | |||
9114 | } | 8961 | } |
9115 | 8962 | ||
9116 | /* Our own DDC functions */ | 8963 | /* Our own DDC functions */ |
9117 | #ifndef SIS_XORG_XF86 | ||
9118 | static | 8964 | static |
9119 | #endif | ||
9120 | unsigned short | 8965 | unsigned short |
9121 | SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, | 8966 | SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, |
9122 | unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, | 8967 | unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, |
@@ -9224,12 +9069,6 @@ SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, | |||
9224 | 9069 | ||
9225 | SiS_SetupDDCN(SiS_Pr); | 9070 | SiS_SetupDDCN(SiS_Pr); |
9226 | 9071 | ||
9227 | #ifdef SIS_XORG_XF86 | ||
9228 | #ifdef TWDEBUG | ||
9229 | xf86DrvMsg(0, X_INFO, "DDC Port %x Index %x Shift %d\n", | ||
9230 | SiS_Pr->SiS_DDC_Port, SiS_Pr->SiS_DDC_Index, temp); | ||
9231 | #endif | ||
9232 | #endif | ||
9233 | return 0; | 9072 | return 0; |
9234 | } | 9073 | } |
9235 | 9074 | ||
@@ -9292,11 +9131,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9292 | SiS_SetSwitchDDC2(SiS_Pr); | 9131 | SiS_SetSwitchDDC2(SiS_Pr); |
9293 | if(SiS_PrepareDDC(SiS_Pr)) { | 9132 | if(SiS_PrepareDDC(SiS_Pr)) { |
9294 | SiS_SetStop(SiS_Pr); | 9133 | SiS_SetStop(SiS_Pr); |
9295 | #ifdef SIS_XORG_XF86 | ||
9296 | #ifdef TWDEBUG | ||
9297 | xf86DrvMsg(0, X_INFO, "Probe: Prepare failed\n"); | ||
9298 | #endif | ||
9299 | #endif | ||
9300 | return 0xFFFF; | 9134 | return 0xFFFF; |
9301 | } | 9135 | } |
9302 | mask = 0xf0; | 9136 | mask = 0xf0; |
@@ -9310,11 +9144,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9310 | } else { | 9144 | } else { |
9311 | failed = true; | 9145 | failed = true; |
9312 | ret = 0xFFFF; | 9146 | ret = 0xFFFF; |
9313 | #ifdef SIS_XORG_XF86 | ||
9314 | #ifdef TWDEBUG | ||
9315 | xf86DrvMsg(0, X_INFO, "Probe: Read 1 failed\n"); | ||
9316 | #endif | ||
9317 | #endif | ||
9318 | } | 9147 | } |
9319 | } | 9148 | } |
9320 | if(!failed) { | 9149 | if(!failed) { |
@@ -9324,11 +9153,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9324 | if(temp == value) ret = 0; | 9153 | if(temp == value) ret = 0; |
9325 | else { | 9154 | else { |
9326 | ret = 0xFFFF; | 9155 | ret = 0xFFFF; |
9327 | #ifdef SIS_XORG_XF86 | ||
9328 | #ifdef TWDEBUG | ||
9329 | xf86DrvMsg(0, X_INFO, "Probe: Read 2 failed\n"); | ||
9330 | #endif | ||
9331 | #endif | ||
9332 | if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) { | 9156 | if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) { |
9333 | if(temp == 0x30) ret = 0; | 9157 | if(temp == 0x30) ret = 0; |
9334 | } | 9158 | } |
@@ -9338,9 +9162,7 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) | |||
9338 | return ret; | 9162 | return ret; |
9339 | } | 9163 | } |
9340 | 9164 | ||
9341 | #ifndef SIS_XORG_XF86 | ||
9342 | static | 9165 | static |
9343 | #endif | ||
9344 | unsigned short | 9166 | unsigned short |
9345 | SiS_ProbeDDC(struct SiS_Private *SiS_Pr) | 9167 | SiS_ProbeDDC(struct SiS_Private *SiS_Pr) |
9346 | { | 9168 | { |
@@ -9357,9 +9179,7 @@ SiS_ProbeDDC(struct SiS_Private *SiS_Pr) | |||
9357 | return flag; | 9179 | return flag; |
9358 | } | 9180 | } |
9359 | 9181 | ||
9360 | #ifndef SIS_XORG_XF86 | ||
9361 | static | 9182 | static |
9362 | #endif | ||
9363 | unsigned short | 9183 | unsigned short |
9364 | SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer) | 9184 | SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer) |
9365 | { | 9185 | { |
@@ -9606,11 +9426,6 @@ SiS_SetSCLKHigh(struct SiS_Private *SiS_Pr) | |||
9606 | temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index); | 9426 | temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index); |
9607 | } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog); | 9427 | } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog); |
9608 | if (!watchdog) { | 9428 | if (!watchdog) { |
9609 | #ifdef SIS_XORG_XF86 | ||
9610 | #ifdef TWDEBUG | ||
9611 | xf86DrvMsg(0, X_INFO, "SetClkHigh failed\n"); | ||
9612 | #endif | ||
9613 | #endif | ||
9614 | return 0xFFFF; | 9429 | return 0xFFFF; |
9615 | } | 9430 | } |
9616 | SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT); | 9431 | SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT); |
@@ -9641,7 +9456,7 @@ SiS_CheckACK(struct SiS_Private *SiS_Pr) | |||
9641 | 9456 | ||
9642 | /* =============== SiS 315/330 O.E.M. ================= */ | 9457 | /* =============== SiS 315/330 O.E.M. ================= */ |
9643 | 9458 | ||
9644 | #ifdef SIS315H | 9459 | #ifdef CONFIG_FB_SIS_315 |
9645 | 9460 | ||
9646 | static unsigned short | 9461 | static unsigned short |
9647 | GetRAMDACromptr(struct SiS_Private *SiS_Pr) | 9462 | GetRAMDACromptr(struct SiS_Private *SiS_Pr) |
@@ -10829,7 +10644,7 @@ SiS_FinalizeLCD(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor | |||
10829 | 10644 | ||
10830 | /* ================= SiS 300 O.E.M. ================== */ | 10645 | /* ================= SiS 300 O.E.M. ================== */ |
10831 | 10646 | ||
10832 | #ifdef SIS300 | 10647 | #ifdef CONFIG_FB_SIS_300 |
10833 | 10648 | ||
10834 | static void | 10649 | static void |
10835 | SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, | 10650 | SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, |
diff --git a/drivers/video/sis/init301.h b/drivers/video/sis/init301.h index 51d99222375d..e1fd31d0fddf 100644 --- a/drivers/video/sis/init301.h +++ b/drivers/video/sis/init301.h | |||
@@ -53,15 +53,8 @@ | |||
53 | #ifndef _INIT301_H_ | 53 | #ifndef _INIT301_H_ |
54 | #define _INIT301_H_ | 54 | #define _INIT301_H_ |
55 | 55 | ||
56 | #include "osdef.h" | ||
57 | #include "initdef.h" | 56 | #include "initdef.h" |
58 | 57 | ||
59 | #ifdef SIS_XORG_XF86 | ||
60 | #include "sis.h" | ||
61 | #include "sis_regs.h" | ||
62 | #endif | ||
63 | |||
64 | #ifdef SIS_LINUX_KERNEL | ||
65 | #include "vgatypes.h" | 58 | #include "vgatypes.h" |
66 | #include "vstruct.h" | 59 | #include "vstruct.h" |
67 | #ifdef SIS_CP | 60 | #ifdef SIS_CP |
@@ -72,7 +65,6 @@ | |||
72 | #include <linux/fb.h> | 65 | #include <linux/fb.h> |
73 | #include "sis.h" | 66 | #include "sis.h" |
74 | #include <video/sisfb.h> | 67 | #include <video/sisfb.h> |
75 | #endif | ||
76 | 68 | ||
77 | static const unsigned char SiS_YPbPrTable[3][64] = { | 69 | static const unsigned char SiS_YPbPrTable[3][64] = { |
78 | { | 70 | { |
@@ -237,7 +229,7 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */ | |||
237 | 0xFF,0xFF, | 229 | 0xFF,0xFF, |
238 | }; | 230 | }; |
239 | 231 | ||
240 | #ifdef SIS315H | 232 | #ifdef CONFIG_FB_SIS_315 |
241 | /* 661 et al LCD data structure (2.03.00) */ | 233 | /* 661 et al LCD data structure (2.03.00) */ |
242 | static const unsigned char SiS_LCDStruct661[] = { | 234 | static const unsigned char SiS_LCDStruct661[] = { |
243 | /* 1024x768 */ | 235 | /* 1024x768 */ |
@@ -279,7 +271,7 @@ static const unsigned char SiS_LCDStruct661[] = { | |||
279 | }; | 271 | }; |
280 | #endif | 272 | #endif |
281 | 273 | ||
282 | #ifdef SIS300 | 274 | #ifdef CONFIG_FB_SIS_300 |
283 | static unsigned char SiS300_TrumpionData[14][80] = { | 275 | static unsigned char SiS300_TrumpionData[14][80] = { |
284 | { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02, | 276 | { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02, |
285 | 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23, | 277 | 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23, |
@@ -356,9 +348,6 @@ static unsigned char SiS300_TrumpionData[14][80] = { | |||
356 | #endif | 348 | #endif |
357 | 349 | ||
358 | void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr); | 350 | void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr); |
359 | #ifndef SIS_LINUX_KERNEL | ||
360 | void SiS_LockCRT2(struct SiS_Private *SiS_Pr); | ||
361 | #endif | ||
362 | void SiS_EnableCRT2(struct SiS_Private *SiS_Pr); | 351 | void SiS_EnableCRT2(struct SiS_Private *SiS_Pr); |
363 | unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); | 352 | unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); |
364 | void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr); | 353 | void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr); |
@@ -375,9 +364,6 @@ unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo | |||
375 | unsigned short RefreshRateTableIndex); | 364 | unsigned short RefreshRateTableIndex); |
376 | unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex); | 365 | unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex); |
377 | void SiS_DisableBridge(struct SiS_Private *SiS_Pr); | 366 | void SiS_DisableBridge(struct SiS_Private *SiS_Pr); |
378 | #ifndef SIS_LINUX_KERNEL | ||
379 | void SiS_EnableBridge(struct SiS_Private *SiS_Pr); | ||
380 | #endif | ||
381 | bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo); | 367 | bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo); |
382 | void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr); | 368 | void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr); |
383 | void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr); | 369 | void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr); |
@@ -386,13 +372,9 @@ void SiS_SetCH700x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned cha | |||
386 | unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax); | 372 | unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax); |
387 | void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); | 373 | void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); |
388 | unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax); | 374 | unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax); |
389 | #ifndef SIS_LINUX_KERNEL | ||
390 | void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); | ||
391 | unsigned short SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempax); | ||
392 | #endif | ||
393 | void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, | 375 | void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, |
394 | unsigned char orval,unsigned short andval); | 376 | unsigned char orval,unsigned short andval); |
395 | #ifdef SIS315H | 377 | #ifdef CONFIG_FB_SIS_315 |
396 | static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr); | 378 | static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr); |
397 | static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr); | 379 | static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr); |
398 | static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr); | 380 | static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr); |
@@ -401,7 +383,7 @@ void SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr); | |||
401 | void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr); | 383 | void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr); |
402 | #endif /* 315 */ | 384 | #endif /* 315 */ |
403 | 385 | ||
404 | #ifdef SIS300 | 386 | #ifdef CONFIG_FB_SIS_300 |
405 | static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr); | 387 | static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr); |
406 | void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); | 388 | void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); |
407 | #endif | 389 | #endif |
@@ -412,21 +394,12 @@ unsigned short SiS_HandleDDC(struct SiS_Private *SiS_Pr, unsigned int VBFlags, i | |||
412 | unsigned short adaptnum, unsigned short DDCdatatype, | 394 | unsigned short adaptnum, unsigned short DDCdatatype, |
413 | unsigned char *buffer, unsigned int VBFlags2); | 395 | unsigned char *buffer, unsigned int VBFlags2); |
414 | 396 | ||
415 | #ifdef SIS_XORG_XF86 | ||
416 | unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, | ||
417 | int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, | ||
418 | bool checkcr32, unsigned int VBFlags2); | ||
419 | unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); | ||
420 | unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, | ||
421 | unsigned char *buffer); | ||
422 | #else | ||
423 | static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, | 397 | static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, |
424 | int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, | 398 | int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, |
425 | bool checkcr32, unsigned int VBFlags2); | 399 | bool checkcr32, unsigned int VBFlags2); |
426 | static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); | 400 | static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); |
427 | static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, | 401 | static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, |
428 | unsigned char *buffer); | 402 | unsigned char *buffer); |
429 | #endif | ||
430 | static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr); | 403 | static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr); |
431 | static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr); | 404 | static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr); |
432 | static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr); | 405 | static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr); |
@@ -441,13 +414,13 @@ static unsigned short SiS_PrepareDDC(struct SiS_Private *SiS_Pr); | |||
441 | static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno); | 414 | static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno); |
442 | static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr); | 415 | static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr); |
443 | 416 | ||
444 | #ifdef SIS300 | 417 | #ifdef CONFIG_FB_SIS_300 |
445 | static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr, | 418 | static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr, |
446 | unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex); | 419 | unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex); |
447 | static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, | 420 | static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, |
448 | unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex); | 421 | unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex); |
449 | #endif | 422 | #endif |
450 | #ifdef SIS315H | 423 | #ifdef CONFIG_FB_SIS_315 |
451 | static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr, | 424 | static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr, |
452 | unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI); | 425 | unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI); |
453 | static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr, | 426 | static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr, |
@@ -482,15 +455,13 @@ extern void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short M | |||
482 | extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); | 455 | extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); |
483 | extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); | 456 | extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); |
484 | extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); | 457 | extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); |
485 | #ifdef SIS300 | 458 | #ifdef CONFIG_FB_SIS_300 |
486 | extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx, | 459 | extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx, |
487 | unsigned short *tempcl); | 460 | unsigned short *tempcl); |
488 | extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl); | 461 | extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl); |
489 | extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); | 462 | extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); |
490 | #ifdef SIS_LINUX_KERNEL | ||
491 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); | 463 | extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); |
492 | extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg); | 464 | extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg); |
493 | #endif | 465 | #endif |
494 | #endif | ||
495 | 466 | ||
496 | #endif | 467 | #endif |
diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c index 99c04a4855d1..9dec64da4015 100644 --- a/drivers/video/sis/initextlfb.c +++ b/drivers/video/sis/initextlfb.c | |||
@@ -25,7 +25,6 @@ | |||
25 | * Author: Thomas Winischhofer <thomas@winischhofer.net> | 25 | * Author: Thomas Winischhofer <thomas@winischhofer.net> |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include "osdef.h" | ||
29 | #include "initdef.h" | 28 | #include "initdef.h" |
30 | #include "vgatypes.h" | 29 | #include "vgatypes.h" |
31 | #include "vstruct.h" | 30 | #include "vstruct.h" |
@@ -59,7 +58,7 @@ sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr, unsigned char modeno, | |||
59 | 58 | ||
60 | if(rateindex > 0) rateindex--; | 59 | if(rateindex > 0) rateindex--; |
61 | 60 | ||
62 | #ifdef SIS315H | 61 | #ifdef CONFIG_FB_SIS_315 |
63 | switch(ModeNo) { | 62 | switch(ModeNo) { |
64 | case 0x5a: ModeNo = 0x50; break; | 63 | case 0x5a: ModeNo = 0x50; break; |
65 | case 0x5b: ModeNo = 0x56; | 64 | case 0x5b: ModeNo = 0x56; |
@@ -103,7 +102,7 @@ sisfb_mode_rate_to_ddata(struct SiS_Private *SiS_Pr, unsigned char modeno, | |||
103 | 102 | ||
104 | if(rateindex > 0) rateindex--; | 103 | if(rateindex > 0) rateindex--; |
105 | 104 | ||
106 | #ifdef SIS315H | 105 | #ifdef CONFIG_FB_SIS_315 |
107 | switch(ModeNo) { | 106 | switch(ModeNo) { |
108 | case 0x5a: ModeNo = 0x50; break; | 107 | case 0x5a: ModeNo = 0x50; break; |
109 | case 0x5b: ModeNo = 0x56; | 108 | case 0x5b: ModeNo = 0x56; |
@@ -187,7 +186,7 @@ sisfb_gettotalfrommode(struct SiS_Private *SiS_Pr, unsigned char modeno, int *ht | |||
187 | 186 | ||
188 | if(rateindex > 0) rateindex--; | 187 | if(rateindex > 0) rateindex--; |
189 | 188 | ||
190 | #ifdef SIS315H | 189 | #ifdef CONFIG_FB_SIS_315 |
191 | switch(ModeNo) { | 190 | switch(ModeNo) { |
192 | case 0x5a: ModeNo = 0x50; break; | 191 | case 0x5a: ModeNo = 0x50; break; |
193 | case 0x5b: ModeNo = 0x56; | 192 | case 0x5b: ModeNo = 0x56; |
diff --git a/drivers/video/sis/osdef.h b/drivers/video/sis/osdef.h deleted file mode 100644 index 6ff8f988a1a7..000000000000 --- a/drivers/video/sis/osdef.h +++ /dev/null | |||
@@ -1,133 +0,0 @@ | |||
1 | /* $XFree86$ */ | ||
2 | /* $XdotOrg$ */ | ||
3 | /* | ||
4 | * OS depending defines | ||
5 | * | ||
6 | * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria | ||
7 | * | ||
8 | * If distributed as part of the Linux kernel, the following license terms | ||
9 | * apply: | ||
10 | * | ||
11 | * * This program is free software; you can redistribute it and/or modify | ||
12 | * * it under the terms of the GNU General Public License as published by | ||
13 | * * the Free Software Foundation; either version 2 of the named License, | ||
14 | * * or any later version. | ||
15 | * * | ||
16 | * * This program is distributed in the hope that it will be useful, | ||
17 | * * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * * GNU General Public License for more details. | ||
20 | * * | ||
21 | * * You should have received a copy of the GNU General Public License | ||
22 | * * along with this program; if not, write to the Free Software | ||
23 | * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA | ||
24 | * | ||
25 | * Otherwise, the following license terms apply: | ||
26 | * | ||
27 | * * Redistribution and use in source and binary forms, with or without | ||
28 | * * modification, are permitted provided that the following conditions | ||
29 | * * are met: | ||
30 | * * 1) Redistributions of source code must retain the above copyright | ||
31 | * * notice, this list of conditions and the following disclaimer. | ||
32 | * * 2) Redistributions in binary form must reproduce the above copyright | ||
33 | * * notice, this list of conditions and the following disclaimer in the | ||
34 | * * documentation and/or other materials provided with the distribution. | ||
35 | * * 3) The name of the author may not be used to endorse or promote products | ||
36 | * * derived from this software without specific prior written permission. | ||
37 | * * | ||
38 | * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | ||
39 | * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
40 | * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | ||
41 | * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
42 | * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
43 | * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
44 | * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
45 | * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
46 | * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
47 | * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
48 | * | ||
49 | * Author: Thomas Winischhofer <thomas@winischhofer.net> | ||
50 | * Silicon Integrated Systems, Inc. (used by permission) | ||
51 | * | ||
52 | */ | ||
53 | |||
54 | #ifndef _SIS_OSDEF_H_ | ||
55 | #define _SIS_OSDEF_H_ | ||
56 | |||
57 | /* The choices are: */ | ||
58 | #define SIS_LINUX_KERNEL /* Linux kernel framebuffer */ | ||
59 | #undef SIS_XORG_XF86 /* XFree86/X.org */ | ||
60 | |||
61 | #ifdef OutPortByte | ||
62 | #undef OutPortByte | ||
63 | #endif | ||
64 | |||
65 | #ifdef OutPortWord | ||
66 | #undef OutPortWord | ||
67 | #endif | ||
68 | |||
69 | #ifdef OutPortLong | ||
70 | #undef OutPortLong | ||
71 | #endif | ||
72 | |||
73 | #ifdef InPortByte | ||
74 | #undef InPortByte | ||
75 | #endif | ||
76 | |||
77 | #ifdef InPortWord | ||
78 | #undef InPortWord | ||
79 | #endif | ||
80 | |||
81 | #ifdef InPortLong | ||
82 | #undef InPortLong | ||
83 | #endif | ||
84 | |||
85 | /**********************************************************************/ | ||
86 | /* LINUX KERNEL */ | ||
87 | /**********************************************************************/ | ||
88 | |||
89 | #ifdef SIS_LINUX_KERNEL | ||
90 | |||
91 | #ifdef CONFIG_FB_SIS_300 | ||
92 | #define SIS300 | ||
93 | #endif | ||
94 | |||
95 | #ifdef CONFIG_FB_SIS_315 | ||
96 | #define SIS315H | ||
97 | #endif | ||
98 | |||
99 | #if !defined(SIS300) && !defined(SIS315H) | ||
100 | #warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set | ||
101 | #warning sisfb will not work! | ||
102 | #endif | ||
103 | |||
104 | #define OutPortByte(p,v) outb((u8)(v),(SISIOADDRESS)(p)) | ||
105 | #define OutPortWord(p,v) outw((u16)(v),(SISIOADDRESS)(p)) | ||
106 | #define OutPortLong(p,v) outl((u32)(v),(SISIOADDRESS)(p)) | ||
107 | #define InPortByte(p) inb((SISIOADDRESS)(p)) | ||
108 | #define InPortWord(p) inw((SISIOADDRESS)(p)) | ||
109 | #define InPortLong(p) inl((SISIOADDRESS)(p)) | ||
110 | #define SiS_SetMemory(MemoryAddress,MemorySize,value) memset_io(MemoryAddress, value, MemorySize) | ||
111 | |||
112 | #endif /* LINUX_KERNEL */ | ||
113 | |||
114 | /**********************************************************************/ | ||
115 | /* XFree86/X.org */ | ||
116 | /**********************************************************************/ | ||
117 | |||
118 | #ifdef SIS_XORG_XF86 | ||
119 | |||
120 | #define SIS300 | ||
121 | #define SIS315H | ||
122 | |||
123 | #define OutPortByte(p,v) outSISREG((IOADDRESS)(p),(CARD8)(v)) | ||
124 | #define OutPortWord(p,v) outSISREGW((IOADDRESS)(p),(CARD16)(v)) | ||
125 | #define OutPortLong(p,v) outSISREGL((IOADDRESS)(p),(CARD32)(v)) | ||
126 | #define InPortByte(p) inSISREG((IOADDRESS)(p)) | ||
127 | #define InPortWord(p) inSISREGW((IOADDRESS)(p)) | ||
128 | #define InPortLong(p) inSISREGL((IOADDRESS)(p)) | ||
129 | #define SiS_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize) | ||
130 | |||
131 | #endif /* XF86 */ | ||
132 | |||
133 | #endif /* _OSDEF_H_ */ | ||
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h index 7c5710e3fb56..80d89d37c414 100644 --- a/drivers/video/sis/sis.h +++ b/drivers/video/sis/sis.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #ifndef _SIS_H_ | 24 | #ifndef _SIS_H_ |
25 | #define _SIS_H_ | 25 | #define _SIS_H_ |
26 | 26 | ||
27 | #include "osdef.h" | ||
28 | #include <video/sisfb.h> | 27 | #include <video/sisfb.h> |
29 | 28 | ||
30 | #include "vgatypes.h" | 29 | #include "vgatypes.h" |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 3dde12b0ab06..7e3370f115b6 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -60,6 +60,11 @@ | |||
60 | #include "sis.h" | 60 | #include "sis.h" |
61 | #include "sis_main.h" | 61 | #include "sis_main.h" |
62 | 62 | ||
63 | #if !defined(CONFIG_FB_SIS_300) && !defined(CONFIG_FB_SIS_315) | ||
64 | #warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set | ||
65 | #warning sisfb will not work! | ||
66 | #endif | ||
67 | |||
63 | static void sisfb_handle_command(struct sis_video_info *ivideo, | 68 | static void sisfb_handle_command(struct sis_video_info *ivideo, |
64 | struct sisfb_cmd *sisfb_command); | 69 | struct sisfb_cmd *sisfb_command); |
65 | 70 | ||
@@ -4114,14 +4119,6 @@ sisfb_find_rom(struct pci_dev *pdev) | |||
4114 | if(sisfb_check_rom(rom_base, ivideo)) { | 4119 | if(sisfb_check_rom(rom_base, ivideo)) { |
4115 | 4120 | ||
4116 | if((myrombase = vmalloc(65536))) { | 4121 | if((myrombase = vmalloc(65536))) { |
4117 | |||
4118 | /* Work around bug in pci/rom.c: Folks forgot to check | ||
4119 | * whether the size retrieved from the BIOS image eventually | ||
4120 | * is larger than the mapped size | ||
4121 | */ | ||
4122 | if(pci_resource_len(pdev, PCI_ROM_RESOURCE) < romsize) | ||
4123 | romsize = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
4124 | |||
4125 | memcpy_fromio(myrombase, rom_base, | 4122 | memcpy_fromio(myrombase, rom_base, |
4126 | (romsize > 65536) ? 65536 : romsize); | 4123 | (romsize > 65536) ? 65536 : romsize); |
4127 | } | 4124 | } |
@@ -4155,23 +4152,6 @@ sisfb_find_rom(struct pci_dev *pdev) | |||
4155 | 4152 | ||
4156 | } | 4153 | } |
4157 | 4154 | ||
4158 | #else | ||
4159 | |||
4160 | pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &temp); | ||
4161 | pci_write_config_dword(pdev, PCI_ROM_ADDRESS, | ||
4162 | (ivideo->video_base & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); | ||
4163 | |||
4164 | rom_base = ioremap(ivideo->video_base, 65536); | ||
4165 | if(rom_base) { | ||
4166 | if(sisfb_check_rom(rom_base, ivideo)) { | ||
4167 | if((myrombase = vmalloc(65536))) | ||
4168 | memcpy_fromio(myrombase, rom_base, 65536); | ||
4169 | } | ||
4170 | iounmap(rom_base); | ||
4171 | } | ||
4172 | |||
4173 | pci_write_config_dword(pdev, PCI_ROM_ADDRESS, temp); | ||
4174 | |||
4175 | #endif | 4155 | #endif |
4176 | 4156 | ||
4177 | return myrombase; | 4157 | return myrombase; |
diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h index 81a22eaabfde..12c0dfaf2518 100644 --- a/drivers/video/sis/vgatypes.h +++ b/drivers/video/sis/vgatypes.h | |||
@@ -55,21 +55,10 @@ | |||
55 | 55 | ||
56 | #define SISIOMEMTYPE | 56 | #define SISIOMEMTYPE |
57 | 57 | ||
58 | #ifdef SIS_LINUX_KERNEL | ||
59 | typedef unsigned long SISIOADDRESS; | 58 | typedef unsigned long SISIOADDRESS; |
60 | #include <linux/types.h> /* Need __iomem */ | 59 | #include <linux/types.h> /* Need __iomem */ |
61 | #undef SISIOMEMTYPE | 60 | #undef SISIOMEMTYPE |
62 | #define SISIOMEMTYPE __iomem | 61 | #define SISIOMEMTYPE __iomem |
63 | #endif | ||
64 | |||
65 | #ifdef SIS_XORG_XF86 | ||
66 | #if XF86_VERSION_CURRENT < XF86_VERSION_NUMERIC(4,2,0,0,0) | ||
67 | typedef unsigned long IOADDRESS; | ||
68 | typedef unsigned long SISIOADDRESS; | ||
69 | #else | ||
70 | typedef IOADDRESS SISIOADDRESS; | ||
71 | #endif | ||
72 | #endif | ||
73 | 62 | ||
74 | typedef enum _SIS_CHIP_TYPE { | 63 | typedef enum _SIS_CHIP_TYPE { |
75 | SIS_VGALegacy = 0, | 64 | SIS_VGALegacy = 0, |
diff --git a/drivers/video/sis/vstruct.h b/drivers/video/sis/vstruct.h index bef4aae388d0..ea94d214dcff 100644 --- a/drivers/video/sis/vstruct.h +++ b/drivers/video/sis/vstruct.h | |||
@@ -233,24 +233,15 @@ struct SiS_Private | |||
233 | { | 233 | { |
234 | unsigned char ChipType; | 234 | unsigned char ChipType; |
235 | unsigned char ChipRevision; | 235 | unsigned char ChipRevision; |
236 | #ifdef SIS_XORG_XF86 | ||
237 | PCITAG PciTag; | ||
238 | #endif | ||
239 | #ifdef SIS_LINUX_KERNEL | ||
240 | void *ivideo; | 236 | void *ivideo; |
241 | #endif | ||
242 | unsigned char *VirtualRomBase; | 237 | unsigned char *VirtualRomBase; |
243 | bool UseROM; | 238 | bool UseROM; |
244 | #ifdef SIS_LINUX_KERNEL | ||
245 | unsigned char SISIOMEMTYPE *VideoMemoryAddress; | 239 | unsigned char SISIOMEMTYPE *VideoMemoryAddress; |
246 | unsigned int VideoMemorySize; | 240 | unsigned int VideoMemorySize; |
247 | #endif | ||
248 | SISIOADDRESS IOAddress; | 241 | SISIOADDRESS IOAddress; |
249 | SISIOADDRESS IOAddress2; /* For dual chip XGI volari */ | 242 | SISIOADDRESS IOAddress2; /* For dual chip XGI volari */ |
250 | 243 | ||
251 | #ifdef SIS_LINUX_KERNEL | ||
252 | SISIOADDRESS RelIO; | 244 | SISIOADDRESS RelIO; |
253 | #endif | ||
254 | SISIOADDRESS SiS_P3c4; | 245 | SISIOADDRESS SiS_P3c4; |
255 | SISIOADDRESS SiS_P3d4; | 246 | SISIOADDRESS SiS_P3d4; |
256 | SISIOADDRESS SiS_P3c0; | 247 | SISIOADDRESS SiS_P3c0; |
@@ -280,9 +271,6 @@ struct SiS_Private | |||
280 | unsigned short SiS_IF_DEF_FSTN; | 271 | unsigned short SiS_IF_DEF_FSTN; |
281 | unsigned short SiS_SysFlags; | 272 | unsigned short SiS_SysFlags; |
282 | unsigned char SiS_VGAINFO; | 273 | unsigned char SiS_VGAINFO; |
283 | #ifdef SIS_XORG_XF86 | ||
284 | unsigned short SiS_CP1, SiS_CP2, SiS_CP3, SiS_CP4; | ||
285 | #endif | ||
286 | bool SiS_UseROM; | 274 | bool SiS_UseROM; |
287 | bool SiS_ROMNew; | 275 | bool SiS_ROMNew; |
288 | bool SiS_XGIROM; | 276 | bool SiS_XGIROM; |
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 3a43ebf83a49..efb35aa8309a 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
@@ -9,19 +9,19 @@ static ssize_t device_show(struct device *_d, | |||
9 | struct device_attribute *attr, char *buf) | 9 | struct device_attribute *attr, char *buf) |
10 | { | 10 | { |
11 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 11 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); |
12 | return sprintf(buf, "%hu", dev->id.device); | 12 | return sprintf(buf, "0x%04x\n", dev->id.device); |
13 | } | 13 | } |
14 | static ssize_t vendor_show(struct device *_d, | 14 | static ssize_t vendor_show(struct device *_d, |
15 | struct device_attribute *attr, char *buf) | 15 | struct device_attribute *attr, char *buf) |
16 | { | 16 | { |
17 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 17 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); |
18 | return sprintf(buf, "%hu", dev->id.vendor); | 18 | return sprintf(buf, "0x%04x\n", dev->id.vendor); |
19 | } | 19 | } |
20 | static ssize_t status_show(struct device *_d, | 20 | static ssize_t status_show(struct device *_d, |
21 | struct device_attribute *attr, char *buf) | 21 | struct device_attribute *attr, char *buf) |
22 | { | 22 | { |
23 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 23 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); |
24 | return sprintf(buf, "0x%08x", dev->config->get_status(dev)); | 24 | return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); |
25 | } | 25 | } |
26 | static ssize_t modalias_show(struct device *_d, | 26 | static ssize_t modalias_show(struct device *_d, |
27 | struct device_attribute *attr, char *buf) | 27 | struct device_attribute *attr, char *buf) |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 1475ed6b575f..cc2f73e03475 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -230,9 +230,6 @@ add_head: | |||
230 | pr_debug("Added buffer head %i to %p\n", head, vq); | 230 | pr_debug("Added buffer head %i to %p\n", head, vq); |
231 | END_USE(vq); | 231 | END_USE(vq); |
232 | 232 | ||
233 | /* If we're indirect, we can fit many (assuming not OOM). */ | ||
234 | if (vq->indirect) | ||
235 | return vq->num_free ? vq->vring.num : 0; | ||
236 | return vq->num_free; | 233 | return vq->num_free; |
237 | } | 234 | } |
238 | EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); | 235 | EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index eb8a78d77d9d..533a199e7a3f 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -8,9 +8,12 @@ obj-$(CONFIG_BLOCK) += biomerge.o | |||
8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 8 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 9 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
10 | obj-$(CONFIG_XEN_BALLOON) += balloon.o | 10 | obj-$(CONFIG_XEN_BALLOON) += balloon.o |
11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o | 11 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o |
12 | obj-$(CONFIG_XENFS) += xenfs/ | 12 | obj-$(CONFIG_XENFS) += xenfs/ |
13 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o | 13 | obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o |
14 | obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o | 14 | obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o |
15 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | 15 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o |
16 | obj-$(CONFIG_XEN_DOM0) += pci.o | 16 | obj-$(CONFIG_XEN_DOM0) += pci.o |
17 | |||
18 | xen-evtchn-y := evtchn.o | ||
19 | |||
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 500290b150bb..2b17ad5b4b32 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <asm/pgtable.h> | 50 | #include <asm/pgtable.h> |
51 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
52 | #include <asm/tlb.h> | 52 | #include <asm/tlb.h> |
53 | #include <asm/e820.h> | ||
53 | 54 | ||
54 | #include <asm/xen/hypervisor.h> | 55 | #include <asm/xen/hypervisor.h> |
55 | #include <asm/xen/hypercall.h> | 56 | #include <asm/xen/hypercall.h> |
@@ -119,7 +120,7 @@ static void scrub_page(struct page *page) | |||
119 | } | 120 | } |
120 | 121 | ||
121 | /* balloon_append: add the given page to the balloon. */ | 122 | /* balloon_append: add the given page to the balloon. */ |
122 | static void balloon_append(struct page *page) | 123 | static void __balloon_append(struct page *page) |
123 | { | 124 | { |
124 | /* Lowmem is re-populated first, so highmem pages go at list tail. */ | 125 | /* Lowmem is re-populated first, so highmem pages go at list tail. */ |
125 | if (PageHighMem(page)) { | 126 | if (PageHighMem(page)) { |
@@ -130,7 +131,11 @@ static void balloon_append(struct page *page) | |||
130 | list_add(&page->lru, &ballooned_pages); | 131 | list_add(&page->lru, &ballooned_pages); |
131 | balloon_stats.balloon_low++; | 132 | balloon_stats.balloon_low++; |
132 | } | 133 | } |
134 | } | ||
133 | 135 | ||
136 | static void balloon_append(struct page *page) | ||
137 | { | ||
138 | __balloon_append(page); | ||
134 | totalram_pages--; | 139 | totalram_pages--; |
135 | } | 140 | } |
136 | 141 | ||
@@ -191,7 +196,7 @@ static unsigned long current_target(void) | |||
191 | 196 | ||
192 | static int increase_reservation(unsigned long nr_pages) | 197 | static int increase_reservation(unsigned long nr_pages) |
193 | { | 198 | { |
194 | unsigned long pfn, i, flags; | 199 | unsigned long pfn, i; |
195 | struct page *page; | 200 | struct page *page; |
196 | long rc; | 201 | long rc; |
197 | struct xen_memory_reservation reservation = { | 202 | struct xen_memory_reservation reservation = { |
@@ -203,8 +208,6 @@ static int increase_reservation(unsigned long nr_pages) | |||
203 | if (nr_pages > ARRAY_SIZE(frame_list)) | 208 | if (nr_pages > ARRAY_SIZE(frame_list)) |
204 | nr_pages = ARRAY_SIZE(frame_list); | 209 | nr_pages = ARRAY_SIZE(frame_list); |
205 | 210 | ||
206 | spin_lock_irqsave(&xen_reservation_lock, flags); | ||
207 | |||
208 | page = balloon_first_page(); | 211 | page = balloon_first_page(); |
209 | for (i = 0; i < nr_pages; i++) { | 212 | for (i = 0; i < nr_pages; i++) { |
210 | BUG_ON(page == NULL); | 213 | BUG_ON(page == NULL); |
@@ -247,14 +250,12 @@ static int increase_reservation(unsigned long nr_pages) | |||
247 | balloon_stats.current_pages += rc; | 250 | balloon_stats.current_pages += rc; |
248 | 251 | ||
249 | out: | 252 | out: |
250 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | ||
251 | |||
252 | return rc < 0 ? rc : rc != nr_pages; | 253 | return rc < 0 ? rc : rc != nr_pages; |
253 | } | 254 | } |
254 | 255 | ||
255 | static int decrease_reservation(unsigned long nr_pages) | 256 | static int decrease_reservation(unsigned long nr_pages) |
256 | { | 257 | { |
257 | unsigned long pfn, i, flags; | 258 | unsigned long pfn, i; |
258 | struct page *page; | 259 | struct page *page; |
259 | int need_sleep = 0; | 260 | int need_sleep = 0; |
260 | int ret; | 261 | int ret; |
@@ -292,8 +293,6 @@ static int decrease_reservation(unsigned long nr_pages) | |||
292 | kmap_flush_unused(); | 293 | kmap_flush_unused(); |
293 | flush_tlb_all(); | 294 | flush_tlb_all(); |
294 | 295 | ||
295 | spin_lock_irqsave(&xen_reservation_lock, flags); | ||
296 | |||
297 | /* No more mappings: invalidate P2M and add to balloon. */ | 296 | /* No more mappings: invalidate P2M and add to balloon. */ |
298 | for (i = 0; i < nr_pages; i++) { | 297 | for (i = 0; i < nr_pages; i++) { |
299 | pfn = mfn_to_pfn(frame_list[i]); | 298 | pfn = mfn_to_pfn(frame_list[i]); |
@@ -308,8 +307,6 @@ static int decrease_reservation(unsigned long nr_pages) | |||
308 | 307 | ||
309 | balloon_stats.current_pages -= nr_pages; | 308 | balloon_stats.current_pages -= nr_pages; |
310 | 309 | ||
311 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | ||
312 | |||
313 | return need_sleep; | 310 | return need_sleep; |
314 | } | 311 | } |
315 | 312 | ||
@@ -395,7 +392,7 @@ static struct notifier_block xenstore_notifier; | |||
395 | 392 | ||
396 | static int __init balloon_init(void) | 393 | static int __init balloon_init(void) |
397 | { | 394 | { |
398 | unsigned long pfn; | 395 | unsigned long pfn, extra_pfn_end; |
399 | struct page *page; | 396 | struct page *page; |
400 | 397 | ||
401 | if (!xen_pv_domain()) | 398 | if (!xen_pv_domain()) |
@@ -416,10 +413,15 @@ static int __init balloon_init(void) | |||
416 | register_balloon(&balloon_sysdev); | 413 | register_balloon(&balloon_sysdev); |
417 | 414 | ||
418 | /* Initialise the balloon with excess memory space. */ | 415 | /* Initialise the balloon with excess memory space. */ |
419 | for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { | 416 | extra_pfn_end = min(e820_end_of_ram_pfn(), |
417 | (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size)); | ||
418 | for (pfn = PFN_UP(xen_extra_mem_start); | ||
419 | pfn < extra_pfn_end; | ||
420 | pfn++) { | ||
420 | page = pfn_to_page(pfn); | 421 | page = pfn_to_page(pfn); |
421 | if (!PageReserved(page)) | 422 | /* totalram_pages doesn't include the boot-time |
422 | balloon_append(page); | 423 | balloon extension, so don't subtract from it. */ |
424 | __balloon_append(page); | ||
423 | } | 425 | } |
424 | 426 | ||
425 | target_watch.callback = watch_target; | 427 | target_watch.callback = watch_target; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 321a0c8346e5..2811bb988ea0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -278,17 +278,17 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
278 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); | 278 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
279 | #endif | 279 | #endif |
280 | 280 | ||
281 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); | 281 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
282 | __set_bit(chn, cpu_evtchn_mask(cpu)); | 282 | set_bit(chn, cpu_evtchn_mask(cpu)); |
283 | 283 | ||
284 | irq_info[irq].cpu = cpu; | 284 | irq_info[irq].cpu = cpu; |
285 | } | 285 | } |
286 | 286 | ||
287 | static void init_evtchn_cpu_bindings(void) | 287 | static void init_evtchn_cpu_bindings(void) |
288 | { | 288 | { |
289 | int i; | ||
289 | #ifdef CONFIG_SMP | 290 | #ifdef CONFIG_SMP |
290 | struct irq_desc *desc; | 291 | struct irq_desc *desc; |
291 | int i; | ||
292 | 292 | ||
293 | /* By default all event channels notify CPU#0. */ | 293 | /* By default all event channels notify CPU#0. */ |
294 | for_each_irq_desc(i, desc) { | 294 | for_each_irq_desc(i, desc) { |
@@ -296,7 +296,10 @@ static void init_evtchn_cpu_bindings(void) | |||
296 | } | 296 | } |
297 | #endif | 297 | #endif |
298 | 298 | ||
299 | memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); | 299 | for_each_possible_cpu(i) |
300 | memset(cpu_evtchn_mask(i), | ||
301 | (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); | ||
302 | |||
300 | } | 303 | } |
301 | 304 | ||
302 | static inline void clear_evtchn(int port) | 305 | static inline void clear_evtchn(int port) |
@@ -752,7 +755,7 @@ int xen_destroy_irq(int irq) | |||
752 | goto out; | 755 | goto out; |
753 | 756 | ||
754 | if (xen_initial_domain()) { | 757 | if (xen_initial_domain()) { |
755 | unmap_irq.pirq = info->u.pirq.gsi; | 758 | unmap_irq.pirq = info->u.pirq.pirq; |
756 | unmap_irq.domid = DOMID_SELF; | 759 | unmap_irq.domid = DOMID_SELF; |
757 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); | 760 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); |
758 | if (rc) { | 761 | if (rc) { |
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index fec6ba3c08a8..ef11daf0cafe 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
@@ -69,20 +69,51 @@ struct per_user_data { | |||
69 | const char *name; | 69 | const char *name; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* Who's bound to each port? */ | 72 | /* |
73 | static struct per_user_data *port_user[NR_EVENT_CHANNELS]; | 73 | * Who's bound to each port? This is logically an array of struct |
74 | * per_user_data *, but we encode the current enabled-state in bit 0. | ||
75 | */ | ||
76 | static unsigned long *port_user; | ||
74 | static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ | 77 | static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ |
75 | 78 | ||
76 | irqreturn_t evtchn_interrupt(int irq, void *data) | 79 | static inline struct per_user_data *get_port_user(unsigned port) |
80 | { | ||
81 | return (struct per_user_data *)(port_user[port] & ~1); | ||
82 | } | ||
83 | |||
84 | static inline void set_port_user(unsigned port, struct per_user_data *u) | ||
85 | { | ||
86 | port_user[port] = (unsigned long)u; | ||
87 | } | ||
88 | |||
89 | static inline bool get_port_enabled(unsigned port) | ||
90 | { | ||
91 | return port_user[port] & 1; | ||
92 | } | ||
93 | |||
94 | static inline void set_port_enabled(unsigned port, bool enabled) | ||
95 | { | ||
96 | if (enabled) | ||
97 | port_user[port] |= 1; | ||
98 | else | ||
99 | port_user[port] &= ~1; | ||
100 | } | ||
101 | |||
102 | static irqreturn_t evtchn_interrupt(int irq, void *data) | ||
77 | { | 103 | { |
78 | unsigned int port = (unsigned long)data; | 104 | unsigned int port = (unsigned long)data; |
79 | struct per_user_data *u; | 105 | struct per_user_data *u; |
80 | 106 | ||
81 | spin_lock(&port_user_lock); | 107 | spin_lock(&port_user_lock); |
82 | 108 | ||
83 | u = port_user[port]; | 109 | u = get_port_user(port); |
110 | |||
111 | WARN(!get_port_enabled(port), | ||
112 | "Interrupt for port %d, but apparently not enabled; per-user %p\n", | ||
113 | port, u); | ||
84 | 114 | ||
85 | disable_irq_nosync(irq); | 115 | disable_irq_nosync(irq); |
116 | set_port_enabled(port, false); | ||
86 | 117 | ||
87 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { | 118 | if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { |
88 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; | 119 | u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; |
@@ -92,9 +123,8 @@ irqreturn_t evtchn_interrupt(int irq, void *data) | |||
92 | kill_fasync(&u->evtchn_async_queue, | 123 | kill_fasync(&u->evtchn_async_queue, |
93 | SIGIO, POLL_IN); | 124 | SIGIO, POLL_IN); |
94 | } | 125 | } |
95 | } else { | 126 | } else |
96 | u->ring_overflow = 1; | 127 | u->ring_overflow = 1; |
97 | } | ||
98 | 128 | ||
99 | spin_unlock(&port_user_lock); | 129 | spin_unlock(&port_user_lock); |
100 | 130 | ||
@@ -198,9 +228,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, | |||
198 | goto out; | 228 | goto out; |
199 | 229 | ||
200 | spin_lock_irq(&port_user_lock); | 230 | spin_lock_irq(&port_user_lock); |
201 | for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) | 231 | |
202 | if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) | 232 | for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { |
203 | enable_irq(irq_from_evtchn(kbuf[i])); | 233 | unsigned port = kbuf[i]; |
234 | |||
235 | if (port < NR_EVENT_CHANNELS && | ||
236 | get_port_user(port) == u && | ||
237 | !get_port_enabled(port)) { | ||
238 | set_port_enabled(port, true); | ||
239 | enable_irq(irq_from_evtchn(port)); | ||
240 | } | ||
241 | } | ||
242 | |||
204 | spin_unlock_irq(&port_user_lock); | 243 | spin_unlock_irq(&port_user_lock); |
205 | 244 | ||
206 | rc = count; | 245 | rc = count; |
@@ -222,8 +261,9 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) | |||
222 | * interrupt handler yet, and our caller has already | 261 | * interrupt handler yet, and our caller has already |
223 | * serialized bind operations.) | 262 | * serialized bind operations.) |
224 | */ | 263 | */ |
225 | BUG_ON(port_user[port] != NULL); | 264 | BUG_ON(get_port_user(port) != NULL); |
226 | port_user[port] = u; | 265 | set_port_user(port, u); |
266 | set_port_enabled(port, true); /* start enabled */ | ||
227 | 267 | ||
228 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, | 268 | rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, |
229 | u->name, (void *)(unsigned long)port); | 269 | u->name, (void *)(unsigned long)port); |
@@ -239,10 +279,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) | |||
239 | 279 | ||
240 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); | 280 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); |
241 | 281 | ||
242 | /* make sure we unbind the irq handler before clearing the port */ | 282 | set_port_user(port, NULL); |
243 | barrier(); | ||
244 | |||
245 | port_user[port] = NULL; | ||
246 | } | 283 | } |
247 | 284 | ||
248 | static long evtchn_ioctl(struct file *file, | 285 | static long evtchn_ioctl(struct file *file, |
@@ -333,15 +370,17 @@ static long evtchn_ioctl(struct file *file, | |||
333 | spin_lock_irq(&port_user_lock); | 370 | spin_lock_irq(&port_user_lock); |
334 | 371 | ||
335 | rc = -ENOTCONN; | 372 | rc = -ENOTCONN; |
336 | if (port_user[unbind.port] != u) { | 373 | if (get_port_user(unbind.port) != u) { |
337 | spin_unlock_irq(&port_user_lock); | 374 | spin_unlock_irq(&port_user_lock); |
338 | break; | 375 | break; |
339 | } | 376 | } |
340 | 377 | ||
341 | evtchn_unbind_from_user(u, unbind.port); | 378 | disable_irq(irq_from_evtchn(unbind.port)); |
342 | 379 | ||
343 | spin_unlock_irq(&port_user_lock); | 380 | spin_unlock_irq(&port_user_lock); |
344 | 381 | ||
382 | evtchn_unbind_from_user(u, unbind.port); | ||
383 | |||
345 | rc = 0; | 384 | rc = 0; |
346 | break; | 385 | break; |
347 | } | 386 | } |
@@ -355,7 +394,7 @@ static long evtchn_ioctl(struct file *file, | |||
355 | 394 | ||
356 | if (notify.port >= NR_EVENT_CHANNELS) { | 395 | if (notify.port >= NR_EVENT_CHANNELS) { |
357 | rc = -EINVAL; | 396 | rc = -EINVAL; |
358 | } else if (port_user[notify.port] != u) { | 397 | } else if (get_port_user(notify.port) != u) { |
359 | rc = -ENOTCONN; | 398 | rc = -ENOTCONN; |
360 | } else { | 399 | } else { |
361 | notify_remote_via_evtchn(notify.port); | 400 | notify_remote_via_evtchn(notify.port); |
@@ -431,7 +470,7 @@ static int evtchn_open(struct inode *inode, struct file *filp) | |||
431 | 470 | ||
432 | filp->private_data = u; | 471 | filp->private_data = u; |
433 | 472 | ||
434 | return 0; | 473 | return nonseekable_open(inode, filp);; |
435 | } | 474 | } |
436 | 475 | ||
437 | static int evtchn_release(struct inode *inode, struct file *filp) | 476 | static int evtchn_release(struct inode *inode, struct file *filp) |
@@ -444,14 +483,21 @@ static int evtchn_release(struct inode *inode, struct file *filp) | |||
444 | free_page((unsigned long)u->ring); | 483 | free_page((unsigned long)u->ring); |
445 | 484 | ||
446 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | 485 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
447 | if (port_user[i] != u) | 486 | if (get_port_user(i) != u) |
448 | continue; | 487 | continue; |
449 | 488 | ||
450 | evtchn_unbind_from_user(port_user[i], i); | 489 | disable_irq(irq_from_evtchn(i)); |
451 | } | 490 | } |
452 | 491 | ||
453 | spin_unlock_irq(&port_user_lock); | 492 | spin_unlock_irq(&port_user_lock); |
454 | 493 | ||
494 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | ||
495 | if (get_port_user(i) != u) | ||
496 | continue; | ||
497 | |||
498 | evtchn_unbind_from_user(get_port_user(i), i); | ||
499 | } | ||
500 | |||
455 | kfree(u->name); | 501 | kfree(u->name); |
456 | kfree(u); | 502 | kfree(u); |
457 | 503 | ||
@@ -467,12 +513,12 @@ static const struct file_operations evtchn_fops = { | |||
467 | .fasync = evtchn_fasync, | 513 | .fasync = evtchn_fasync, |
468 | .open = evtchn_open, | 514 | .open = evtchn_open, |
469 | .release = evtchn_release, | 515 | .release = evtchn_release, |
470 | .llseek = noop_llseek, | 516 | .llseek = no_llseek, |
471 | }; | 517 | }; |
472 | 518 | ||
473 | static struct miscdevice evtchn_miscdev = { | 519 | static struct miscdevice evtchn_miscdev = { |
474 | .minor = MISC_DYNAMIC_MINOR, | 520 | .minor = MISC_DYNAMIC_MINOR, |
475 | .name = "evtchn", | 521 | .name = "xen/evtchn", |
476 | .fops = &evtchn_fops, | 522 | .fops = &evtchn_fops, |
477 | }; | 523 | }; |
478 | static int __init evtchn_init(void) | 524 | static int __init evtchn_init(void) |
@@ -482,8 +528,11 @@ static int __init evtchn_init(void) | |||
482 | if (!xen_domain()) | 528 | if (!xen_domain()) |
483 | return -ENODEV; | 529 | return -ENODEV; |
484 | 530 | ||
531 | port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL); | ||
532 | if (port_user == NULL) | ||
533 | return -ENOMEM; | ||
534 | |||
485 | spin_lock_init(&port_user_lock); | 535 | spin_lock_init(&port_user_lock); |
486 | memset(port_user, 0, sizeof(port_user)); | ||
487 | 536 | ||
488 | /* Create '/dev/misc/evtchn'. */ | 537 | /* Create '/dev/misc/evtchn'. */ |
489 | err = misc_register(&evtchn_miscdev); | 538 | err = misc_register(&evtchn_miscdev); |
@@ -499,6 +548,9 @@ static int __init evtchn_init(void) | |||
499 | 548 | ||
500 | static void __exit evtchn_cleanup(void) | 549 | static void __exit evtchn_cleanup(void) |
501 | { | 550 | { |
551 | kfree(port_user); | ||
552 | port_user = NULL; | ||
553 | |||
502 | misc_deregister(&evtchn_miscdev); | 554 | misc_deregister(&evtchn_miscdev); |
503 | } | 555 | } |
504 | 556 | ||
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c index 0f5d4162b22d..dbd3b16fd131 100644 --- a/drivers/xen/xenfs/privcmd.c +++ b/drivers/xen/xenfs/privcmd.c | |||
@@ -265,9 +265,7 @@ static int mmap_return_errors(void *data, void *state) | |||
265 | xen_pfn_t *mfnp = data; | 265 | xen_pfn_t *mfnp = data; |
266 | struct mmap_batch_state *st = state; | 266 | struct mmap_batch_state *st = state; |
267 | 267 | ||
268 | put_user(*mfnp, st->user++); | 268 | return put_user(*mfnp, st->user++); |
269 | |||
270 | return 0; | ||
271 | } | 269 | } |
272 | 270 | ||
273 | static struct vm_operations_struct privcmd_vm_ops; | 271 | static struct vm_operations_struct privcmd_vm_ops; |
@@ -322,10 +320,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) | |||
322 | up_write(&mm->mmap_sem); | 320 | up_write(&mm->mmap_sem); |
323 | 321 | ||
324 | if (state.err > 0) { | 322 | if (state.err > 0) { |
325 | ret = 0; | ||
326 | |||
327 | state.user = m.arr; | 323 | state.user = m.arr; |
328 | traverse_pages(m.num, sizeof(xen_pfn_t), | 324 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), |
329 | &pagelist, | 325 | &pagelist, |
330 | mmap_return_errors, &state); | 326 | mmap_return_errors, &state); |
331 | } | 327 | } |
@@ -383,8 +379,9 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) | |||
383 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 379 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
384 | return -ENOSYS; | 380 | return -ENOSYS; |
385 | 381 | ||
386 | /* DONTCOPY is essential for Xen as copy_page_range is broken. */ | 382 | /* DONTCOPY is essential for Xen because copy_page_range doesn't know |
387 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; | 383 | * how to recreate these mappings */ |
384 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; | ||
388 | vma->vm_ops = &privcmd_vm_ops; | 385 | vma->vm_ops = &privcmd_vm_ops; |
389 | vma->vm_private_data = NULL; | 386 | vma->vm_private_data = NULL; |
390 | 387 | ||
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index f6339d11d59c..1aa389719846 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c | |||
@@ -12,8 +12,6 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | #include <linux/magic.h> | 14 | #include <linux/magic.h> |
15 | #include <linux/mm.h> | ||
16 | #include <linux/backing-dev.h> | ||
17 | 15 | ||
18 | #include <xen/xen.h> | 16 | #include <xen/xen.h> |
19 | 17 | ||
@@ -24,28 +22,12 @@ | |||
24 | MODULE_DESCRIPTION("Xen filesystem"); | 22 | MODULE_DESCRIPTION("Xen filesystem"); |
25 | MODULE_LICENSE("GPL"); | 23 | MODULE_LICENSE("GPL"); |
26 | 24 | ||
27 | static int xenfs_set_page_dirty(struct page *page) | ||
28 | { | ||
29 | return !TestSetPageDirty(page); | ||
30 | } | ||
31 | |||
32 | static const struct address_space_operations xenfs_aops = { | ||
33 | .set_page_dirty = xenfs_set_page_dirty, | ||
34 | }; | ||
35 | |||
36 | static struct backing_dev_info xenfs_backing_dev_info = { | ||
37 | .ra_pages = 0, /* No readahead */ | ||
38 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | ||
39 | }; | ||
40 | |||
41 | static struct inode *xenfs_make_inode(struct super_block *sb, int mode) | 25 | static struct inode *xenfs_make_inode(struct super_block *sb, int mode) |
42 | { | 26 | { |
43 | struct inode *ret = new_inode(sb); | 27 | struct inode *ret = new_inode(sb); |
44 | 28 | ||
45 | if (ret) { | 29 | if (ret) { |
46 | ret->i_mode = mode; | 30 | ret->i_mode = mode; |
47 | ret->i_mapping->a_ops = &xenfs_aops; | ||
48 | ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info; | ||
49 | ret->i_uid = ret->i_gid = 0; | 31 | ret->i_uid = ret->i_gid = 0; |
50 | ret->i_blocks = 0; | 32 | ret->i_blocks = 0; |
51 | ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; | 33 | ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; |
@@ -121,9 +103,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent) | |||
121 | return rc; | 103 | return rc; |
122 | } | 104 | } |
123 | 105 | ||
124 | static int xenfs_mount(struct file_system_type *fs_type, | 106 | static struct dentry *xenfs_mount(struct file_system_type *fs_type, |
125 | int flags, const char *dev_name, | 107 | int flags, const char *dev_name, |
126 | void *data) | 108 | void *data) |
127 | { | 109 | { |
128 | return mount_single(fs_type, flags, data, xenfs_fill_super); | 110 | return mount_single(fs_type, flags, data, xenfs_fill_super); |
129 | } | 111 | } |
@@ -137,25 +119,11 @@ static struct file_system_type xenfs_type = { | |||
137 | 119 | ||
138 | static int __init xenfs_init(void) | 120 | static int __init xenfs_init(void) |
139 | { | 121 | { |
140 | int err; | 122 | if (xen_domain()) |
141 | if (!xen_domain()) { | 123 | return register_filesystem(&xenfs_type); |
142 | printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n"); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | err = register_filesystem(&xenfs_type); | ||
147 | if (err) { | ||
148 | printk(KERN_ERR "xenfs: Unable to register filesystem!\n"); | ||
149 | goto out; | ||
150 | } | ||
151 | |||
152 | err = bdi_init(&xenfs_backing_dev_info); | ||
153 | if (err) | ||
154 | unregister_filesystem(&xenfs_type); | ||
155 | |||
156 | out: | ||
157 | 124 | ||
158 | return err; | 125 | printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); |
126 | return 0; | ||
159 | } | 127 | } |
160 | 128 | ||
161 | static void __exit xenfs_exit(void) | 129 | static void __exit xenfs_exit(void) |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c8224587123f..9242d294fe90 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -134,6 +134,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open); | |||
134 | void fuse_finish_open(struct inode *inode, struct file *file) | 134 | void fuse_finish_open(struct inode *inode, struct file *file) |
135 | { | 135 | { |
136 | struct fuse_file *ff = file->private_data; | 136 | struct fuse_file *ff = file->private_data; |
137 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
137 | 138 | ||
138 | if (ff->open_flags & FOPEN_DIRECT_IO) | 139 | if (ff->open_flags & FOPEN_DIRECT_IO) |
139 | file->f_op = &fuse_direct_io_file_operations; | 140 | file->f_op = &fuse_direct_io_file_operations; |
@@ -141,6 +142,15 @@ void fuse_finish_open(struct inode *inode, struct file *file) | |||
141 | invalidate_inode_pages2(inode->i_mapping); | 142 | invalidate_inode_pages2(inode->i_mapping); |
142 | if (ff->open_flags & FOPEN_NONSEEKABLE) | 143 | if (ff->open_flags & FOPEN_NONSEEKABLE) |
143 | nonseekable_open(inode, file); | 144 | nonseekable_open(inode, file); |
145 | if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { | ||
146 | struct fuse_inode *fi = get_fuse_inode(inode); | ||
147 | |||
148 | spin_lock(&fc->lock); | ||
149 | fi->attr_version = ++fc->attr_version; | ||
150 | i_size_write(inode, 0); | ||
151 | spin_unlock(&fc->lock); | ||
152 | fuse_invalidate_attr(inode); | ||
153 | } | ||
144 | } | 154 | } |
145 | 155 | ||
146 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) | 156 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) |
diff --git a/fs/ioprio.c b/fs/ioprio.c index 2f7d05c89922..7da2a06508e5 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c | |||
@@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | ret = -ESRCH; | 105 | ret = -ESRCH; |
106 | /* | 106 | rcu_read_lock(); |
107 | * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic", | ||
108 | * so we can't use rcu_read_lock(). See re-copy of ->ioprio | ||
109 | * in copy_process(). | ||
110 | */ | ||
111 | read_lock(&tasklist_lock); | ||
112 | switch (which) { | 107 | switch (which) { |
113 | case IOPRIO_WHO_PROCESS: | 108 | case IOPRIO_WHO_PROCESS: |
114 | rcu_read_lock(); | ||
115 | if (!who) | 109 | if (!who) |
116 | p = current; | 110 | p = current; |
117 | else | 111 | else |
118 | p = find_task_by_vpid(who); | 112 | p = find_task_by_vpid(who); |
119 | if (p) | 113 | if (p) |
120 | ret = set_task_ioprio(p, ioprio); | 114 | ret = set_task_ioprio(p, ioprio); |
121 | rcu_read_unlock(); | ||
122 | break; | 115 | break; |
123 | case IOPRIO_WHO_PGRP: | 116 | case IOPRIO_WHO_PGRP: |
124 | if (!who) | 117 | if (!who) |
@@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) | |||
141 | break; | 134 | break; |
142 | 135 | ||
143 | do_each_thread(g, p) { | 136 | do_each_thread(g, p) { |
144 | int match; | 137 | if (__task_cred(p)->uid != who) |
145 | |||
146 | rcu_read_lock(); | ||
147 | match = __task_cred(p)->uid == who; | ||
148 | rcu_read_unlock(); | ||
149 | if (!match) | ||
150 | continue; | 138 | continue; |
151 | ret = set_task_ioprio(p, ioprio); | 139 | ret = set_task_ioprio(p, ioprio); |
152 | if (ret) | 140 | if (ret) |
@@ -160,7 +148,7 @@ free_uid: | |||
160 | ret = -EINVAL; | 148 | ret = -EINVAL; |
161 | } | 149 | } |
162 | 150 | ||
163 | read_unlock(&tasklist_lock); | 151 | rcu_read_unlock(); |
164 | return ret; | 152 | return ret; |
165 | } | 153 | } |
166 | 154 | ||
@@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) | |||
204 | int ret = -ESRCH; | 192 | int ret = -ESRCH; |
205 | int tmpio; | 193 | int tmpio; |
206 | 194 | ||
207 | read_lock(&tasklist_lock); | 195 | rcu_read_lock(); |
208 | switch (which) { | 196 | switch (which) { |
209 | case IOPRIO_WHO_PROCESS: | 197 | case IOPRIO_WHO_PROCESS: |
210 | rcu_read_lock(); | ||
211 | if (!who) | 198 | if (!who) |
212 | p = current; | 199 | p = current; |
213 | else | 200 | else |
214 | p = find_task_by_vpid(who); | 201 | p = find_task_by_vpid(who); |
215 | if (p) | 202 | if (p) |
216 | ret = get_task_ioprio(p); | 203 | ret = get_task_ioprio(p); |
217 | rcu_read_unlock(); | ||
218 | break; | 204 | break; |
219 | case IOPRIO_WHO_PGRP: | 205 | case IOPRIO_WHO_PGRP: |
220 | if (!who) | 206 | if (!who) |
@@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) | |||
241 | break; | 227 | break; |
242 | 228 | ||
243 | do_each_thread(g, p) { | 229 | do_each_thread(g, p) { |
244 | int match; | 230 | if (__task_cred(p)->uid != user->uid) |
245 | |||
246 | rcu_read_lock(); | ||
247 | match = __task_cred(p)->uid == user->uid; | ||
248 | rcu_read_unlock(); | ||
249 | if (!match) | ||
250 | continue; | 231 | continue; |
251 | tmpio = get_task_ioprio(p); | 232 | tmpio = get_task_ioprio(p); |
252 | if (tmpio < 0) | 233 | if (tmpio < 0) |
@@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) | |||
264 | ret = -EINVAL; | 245 | ret = -EINVAL; |
265 | } | 246 | } |
266 | 247 | ||
267 | read_unlock(&tasklist_lock); | 248 | rcu_read_unlock(); |
268 | return ret; | 249 | return ret; |
269 | } | 250 | } |
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 49c844dab33a..59e5fe742f7b 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c | |||
@@ -335,7 +335,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) | |||
335 | * the device at this point. | 335 | * the device at this point. |
336 | * | 336 | * |
337 | * To prevent nilfs_dat_translate() from returning the | 337 | * To prevent nilfs_dat_translate() from returning the |
338 | * uncommited block number, this makes a copy of the entry | 338 | * uncommitted block number, this makes a copy of the entry |
339 | * buffer and redirects nilfs_dat_translate() to the copy. | 339 | * buffer and redirects nilfs_dat_translate() to the copy. |
340 | */ | 340 | */ |
341 | if (!buffer_nilfs_redirected(entry_bh)) { | 341 | if (!buffer_nilfs_redirected(entry_bh)) { |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 3e90f86d5bfe..e00d9457c256 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
@@ -349,8 +349,8 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb, | |||
349 | ino = vdesc->vd_ino; | 349 | ino = vdesc->vd_ino; |
350 | cno = vdesc->vd_cno; | 350 | cno = vdesc->vd_cno; |
351 | inode = nilfs_iget_for_gc(sb, ino, cno); | 351 | inode = nilfs_iget_for_gc(sb, ino, cno); |
352 | if (unlikely(inode == NULL)) { | 352 | if (IS_ERR(inode)) { |
353 | ret = -ENOMEM; | 353 | ret = PTR_ERR(inode); |
354 | goto failed; | 354 | goto failed; |
355 | } | 355 | } |
356 | do { | 356 | do { |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index da6b01d70f01..c126c83b9a45 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -706,6 +706,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
706 | * skip over unmapped regions. | 706 | * skip over unmapped regions. |
707 | */ | 707 | */ |
708 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | 708 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
709 | #define PAGEMAP_WALK_MASK (PMD_MASK) | ||
709 | static ssize_t pagemap_read(struct file *file, char __user *buf, | 710 | static ssize_t pagemap_read(struct file *file, char __user *buf, |
710 | size_t count, loff_t *ppos) | 711 | size_t count, loff_t *ppos) |
711 | { | 712 | { |
@@ -776,7 +777,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
776 | unsigned long end; | 777 | unsigned long end; |
777 | 778 | ||
778 | pm.pos = 0; | 779 | pm.pos = 0; |
779 | end = start_vaddr + PAGEMAP_WALK_SIZE; | 780 | end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; |
780 | /* overflow ? */ | 781 | /* overflow ? */ |
781 | if (end < start_vaddr || end > end_vaddr) | 782 | if (end < start_vaddr || end > end_vaddr) |
782 | end = end_vaddr; | 783 | end = end_vaddr; |
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index bd9763e76bae..79265fdc317a 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c | |||
@@ -183,12 +183,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | /* we need to make sure nobody is changing the file size beneath | ||
187 | ** us | ||
188 | */ | ||
189 | reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); | ||
190 | depth = reiserfs_write_lock_once(inode->i_sb); | 186 | depth = reiserfs_write_lock_once(inode->i_sb); |
191 | 187 | ||
188 | /* we need to make sure nobody is changing the file size beneath us */ | ||
189 | reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); | ||
190 | |||
192 | write_from = inode->i_size & (blocksize - 1); | 191 | write_from = inode->i_size & (blocksize - 1); |
193 | /* if we are on a block boundary, we are already unpacked. */ | 192 | /* if we are on a block boundary, we are already unpacked. */ |
194 | if (write_from == 0) { | 193 | if (write_from == 0) { |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index a7d9dc21391d..7b776d71d36d 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -175,10 +175,21 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
175 | return 0; | 175 | return 0; |
176 | } | 176 | } |
177 | 177 | ||
178 | #define enable_intr_remapping(mode) (-1) | ||
179 | #define disable_intr_remapping() (0) | ||
180 | #define reenable_intr_remapping(mode) (0) | ||
181 | #define intr_remapping_enabled (0) | 178 | #define intr_remapping_enabled (0) |
179 | |||
180 | static inline int enable_intr_remapping(int eim) | ||
181 | { | ||
182 | return -1; | ||
183 | } | ||
184 | |||
185 | static inline void disable_intr_remapping(void) | ||
186 | { | ||
187 | } | ||
188 | |||
189 | static inline int reenable_intr_remapping(int eim) | ||
190 | { | ||
191 | return 0; | ||
192 | } | ||
182 | #endif | 193 | #endif |
183 | 194 | ||
184 | /* Can't use the common MSI interrupt functions | 195 | /* Can't use the common MSI interrupt functions |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 7fca3dc4e475..d1631d37e9e0 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -1122,6 +1122,7 @@ extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs | |||
1122 | 1122 | ||
1123 | /* drivers/video/fbcmap.c */ | 1123 | /* drivers/video/fbcmap.c */ |
1124 | extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); | 1124 | extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); |
1125 | extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags); | ||
1125 | extern void fb_dealloc_cmap(struct fb_cmap *cmap); | 1126 | extern void fb_dealloc_cmap(struct fb_cmap *cmap); |
1126 | extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); | 1127 | extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); |
1127 | extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); | 1128 | extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index eedc00b7b1ee..c9e06cc70dad 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -34,9 +34,9 @@ | |||
34 | #define SEEK_MAX SEEK_END | 34 | #define SEEK_MAX SEEK_END |
35 | 35 | ||
36 | struct fstrim_range { | 36 | struct fstrim_range { |
37 | uint64_t start; | 37 | __u64 start; |
38 | uint64_t len; | 38 | __u64 len; |
39 | uint64_t minlen; | 39 | __u64 minlen; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | /* And dynamically-tunable limits and defaults: */ | 42 | /* And dynamically-tunable limits and defaults: */ |
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index a2d6ea49ec56..d1e55fed2c7d 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h | |||
@@ -33,6 +33,8 @@ enum bp_type_idx { | |||
33 | 33 | ||
34 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 34 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
35 | 35 | ||
36 | extern int __init init_hw_breakpoint(void); | ||
37 | |||
36 | static inline void hw_breakpoint_init(struct perf_event_attr *attr) | 38 | static inline void hw_breakpoint_init(struct perf_event_attr *attr) |
37 | { | 39 | { |
38 | memset(attr, 0, sizeof(*attr)); | 40 | memset(attr, 0, sizeof(*attr)); |
@@ -108,6 +110,8 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) | |||
108 | 110 | ||
109 | #else /* !CONFIG_HAVE_HW_BREAKPOINT */ | 111 | #else /* !CONFIG_HAVE_HW_BREAKPOINT */ |
110 | 112 | ||
113 | static inline int __init init_hw_breakpoint(void) { return 0; } | ||
114 | |||
111 | static inline struct perf_event * | 115 | static inline struct perf_event * |
112 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 116 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
113 | perf_overflow_handler_t triggered, | 117 | perf_overflow_handler_t triggered, |
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 1ff81b51b656..dd3c34ebca9a 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define MARVELL_PHY_ID_88E1118 0x01410e10 | 11 | #define MARVELL_PHY_ID_88E1118 0x01410e10 |
12 | #define MARVELL_PHY_ID_88E1121R 0x01410cb0 | 12 | #define MARVELL_PHY_ID_88E1121R 0x01410cb0 |
13 | #define MARVELL_PHY_ID_88E1145 0x01410cd0 | 13 | #define MARVELL_PHY_ID_88E1145 0x01410cd0 |
14 | #define MARVELL_PHY_ID_88E1149R 0x01410e50 | ||
14 | #define MARVELL_PHY_ID_88E1240 0x01410e30 | 15 | #define MARVELL_PHY_ID_88E1240 0x01410e30 |
15 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
16 | 17 | ||
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h index a95141eafce3..bd581c6fa085 100644 --- a/include/linux/mfd/wm8350/audio.h +++ b/include/linux/mfd/wm8350/audio.h | |||
@@ -522,9 +522,6 @@ | |||
522 | #define WM8350_MCLK_SEL_PLL_32K 3 | 522 | #define WM8350_MCLK_SEL_PLL_32K 3 |
523 | #define WM8350_MCLK_SEL_MCLK 5 | 523 | #define WM8350_MCLK_SEL_MCLK 5 |
524 | 524 | ||
525 | #define WM8350_MCLK_DIR_OUT 0 | ||
526 | #define WM8350_MCLK_DIR_IN 1 | ||
527 | |||
528 | /* clock divider id's */ | 525 | /* clock divider id's */ |
529 | #define WM8350_ADC_CLKDIV 0 | 526 | #define WM8350_ADC_CLKDIV 0 |
530 | #define WM8350_DAC_CLKDIV 1 | 527 | #define WM8350_DAC_CLKDIV 1 |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 6d87f68ce4b6..30f6fad99a58 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -168,6 +168,7 @@ struct mmc_host { | |||
168 | /* DDR mode at 1.8V */ | 168 | /* DDR mode at 1.8V */ |
169 | #define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ | 169 | #define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ |
170 | /* DDR mode at 1.2V */ | 170 | /* DDR mode at 1.2V */ |
171 | #define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ | ||
171 | 172 | ||
172 | mmc_pm_flag_t pm_caps; /* supported pm features */ | 173 | mmc_pm_flag_t pm_caps; /* supported pm features */ |
173 | 174 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index b29e7458b966..7575bbbdf2a2 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -517,7 +517,7 @@ static inline void __module_get(struct module *module) | |||
517 | #define symbol_put_addr(p) do { } while(0) | 517 | #define symbol_put_addr(p) do { } while(0) |
518 | 518 | ||
519 | #endif /* CONFIG_MODULE_UNLOAD */ | 519 | #endif /* CONFIG_MODULE_UNLOAD */ |
520 | int use_module(struct module *a, struct module *b); | 520 | int ref_module(struct module *a, struct module *b); |
521 | 521 | ||
522 | /* This is a #define so the string doesn't get put in every .o file */ | 522 | /* This is a #define so the string doesn't get put in every .o file */ |
523 | #define module_name(mod) \ | 523 | #define module_name(mod) \ |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 5bb13b3db84d..b02195dfc1b0 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -59,8 +59,6 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ | |||
59 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ | 59 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ |
60 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } | 60 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } |
61 | 61 | ||
62 | TESTPCGFLAG(Locked, LOCK) | ||
63 | |||
64 | /* Cache flag is set only once (at allocation) */ | 62 | /* Cache flag is set only once (at allocation) */ |
65 | TESTPCGFLAG(Cache, CACHE) | 63 | TESTPCGFLAG(Cache, CACHE) |
66 | CLEARPCGFLAG(Cache, CACHE) | 64 | CLEARPCGFLAG(Cache, CACHE) |
@@ -104,6 +102,11 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc) | |||
104 | bit_spin_unlock(PCG_LOCK, &pc->flags); | 102 | bit_spin_unlock(PCG_LOCK, &pc->flags); |
105 | } | 103 | } |
106 | 104 | ||
105 | static inline int page_is_cgroup_locked(struct page_cgroup *pc) | ||
106 | { | ||
107 | return bit_spin_is_locked(PCG_LOCK, &pc->flags); | ||
108 | } | ||
109 | |||
107 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 110 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
108 | struct page_cgroup; | 111 | struct page_cgroup; |
109 | 112 | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c6bcfe93b9ca..d369b533dc2a 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2441,6 +2441,7 @@ | |||
2441 | #define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822 | 2441 | #define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822 |
2442 | #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 | 2442 | #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 |
2443 | #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 | 2443 | #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 |
2444 | #define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F | ||
2444 | #define PCI_DEVICE_ID_INTEL_I960 0x0960 | 2445 | #define PCI_DEVICE_ID_INTEL_I960 0x0960 |
2445 | #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 | 2446 | #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 |
2446 | #define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 | 2447 | #define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 |
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index cea0c38e7a63..9a52f72527dc 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h | |||
@@ -19,11 +19,13 @@ struct clk_mapping { | |||
19 | }; | 19 | }; |
20 | 20 | ||
21 | struct clk_ops { | 21 | struct clk_ops { |
22 | #ifdef CONFIG_SH_CLK_CPG_LEGACY | ||
22 | void (*init)(struct clk *clk); | 23 | void (*init)(struct clk *clk); |
24 | #endif | ||
23 | int (*enable)(struct clk *clk); | 25 | int (*enable)(struct clk *clk); |
24 | void (*disable)(struct clk *clk); | 26 | void (*disable)(struct clk *clk); |
25 | unsigned long (*recalc)(struct clk *clk); | 27 | unsigned long (*recalc)(struct clk *clk); |
26 | int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); | 28 | int (*set_rate)(struct clk *clk, unsigned long rate); |
27 | int (*set_parent)(struct clk *clk, struct clk *parent); | 29 | int (*set_parent)(struct clk *clk, struct clk *parent); |
28 | long (*round_rate)(struct clk *clk, unsigned long rate); | 30 | long (*round_rate)(struct clk *clk, unsigned long rate); |
29 | }; | 31 | }; |
@@ -67,36 +69,6 @@ int clk_register(struct clk *); | |||
67 | void clk_unregister(struct clk *); | 69 | void clk_unregister(struct clk *); |
68 | void clk_enable_init_clocks(void); | 70 | void clk_enable_init_clocks(void); |
69 | 71 | ||
70 | /** | ||
71 | * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter | ||
72 | * @clk: clock source | ||
73 | * @rate: desired clock rate in Hz | ||
74 | * @algo_id: algorithm id to be passed down to ops->set_rate | ||
75 | * | ||
76 | * Returns success (0) or negative errno. | ||
77 | */ | ||
78 | int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id); | ||
79 | |||
80 | enum clk_sh_algo_id { | ||
81 | NO_CHANGE = 0, | ||
82 | |||
83 | IUS_N1_N1, | ||
84 | IUS_322, | ||
85 | IUS_522, | ||
86 | IUS_N11, | ||
87 | |||
88 | SB_N1, | ||
89 | |||
90 | SB3_N1, | ||
91 | SB3_32, | ||
92 | SB3_43, | ||
93 | SB3_54, | ||
94 | |||
95 | BP_N1, | ||
96 | |||
97 | IP_N1, | ||
98 | }; | ||
99 | |||
100 | struct clk_div_mult_table { | 72 | struct clk_div_mult_table { |
101 | unsigned int *divisors; | 73 | unsigned int *divisors; |
102 | unsigned int nr_divisors; | 74 | unsigned int nr_divisors; |
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h index fa60cbda90a4..d79894192ae3 100644 --- a/include/sound/sh_fsi.h +++ b/include/sound/sh_fsi.h | |||
@@ -85,7 +85,9 @@ | |||
85 | * ACK_MD (FSI2) | 85 | * ACK_MD (FSI2) |
86 | * CKG1 (FSI) | 86 | * CKG1 (FSI) |
87 | * | 87 | * |
88 | * err: return value < 0 | 88 | * err : return value < 0 |
89 | * no change : return value == 0 | ||
90 | * change xMD : return value > 0 | ||
89 | * | 91 | * |
90 | * 0x-00000AB | 92 | * 0x-00000AB |
91 | * | 93 | * |
@@ -111,7 +113,7 @@ | |||
111 | struct sh_fsi_platform_info { | 113 | struct sh_fsi_platform_info { |
112 | unsigned long porta_flags; | 114 | unsigned long porta_flags; |
113 | unsigned long portb_flags; | 115 | unsigned long portb_flags; |
114 | int (*set_rate)(int is_porta, int rate); /* for master mode */ | 116 | int (*set_rate)(struct device *dev, int is_porta, int rate, int enable); |
115 | }; | 117 | }; |
116 | 118 | ||
117 | #endif /* __SOUND_FSI_H */ | 119 | #endif /* __SOUND_FSI_H */ |
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index 6316cdabf73f..89d43b3d4cb9 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h | |||
@@ -99,7 +99,6 @@ struct lcd_sync_arg { | |||
99 | #define FBIPUT_COLOR _IOW('F', 6, int) | 99 | #define FBIPUT_COLOR _IOW('F', 6, int) |
100 | #define FBIPUT_HSYNC _IOW('F', 9, int) | 100 | #define FBIPUT_HSYNC _IOW('F', 9, int) |
101 | #define FBIPUT_VSYNC _IOW('F', 10, int) | 101 | #define FBIPUT_VSYNC _IOW('F', 10, int) |
102 | #define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t) | ||
103 | 102 | ||
104 | #endif /* ifndef DA8XX_FB_H */ | 103 | #endif /* ifndef DA8XX_FB_H */ |
105 | 104 | ||
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h index d7a6c13bde69..eac3ce153719 100644 --- a/include/xen/interface/memory.h +++ b/include/xen/interface/memory.h | |||
@@ -141,6 +141,19 @@ struct xen_machphys_mfn_list { | |||
141 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); | 141 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * Returns the location in virtual address space of the machine_to_phys | ||
145 | * mapping table. Architectures which do not have a m2p table, or which do not | ||
146 | * map it by default into guest address space, do not implement this command. | ||
147 | * arg == addr of xen_machphys_mapping_t. | ||
148 | */ | ||
149 | #define XENMEM_machphys_mapping 12 | ||
150 | struct xen_machphys_mapping { | ||
151 | unsigned long v_start, v_end; /* Start and end virtual addresses. */ | ||
152 | unsigned long max_mfn; /* Maximum MFN that can be looked up. */ | ||
153 | }; | ||
154 | DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t); | ||
155 | |||
156 | /* | ||
144 | * Sets the GPFN at which a particular page appears in the specified guest's | 157 | * Sets the GPFN at which a particular page appears in the specified guest's |
145 | * pseudophysical address space. | 158 | * pseudophysical address space. |
146 | * arg == addr of xen_add_to_physmap_t. | 159 | * arg == addr of xen_add_to_physmap_t. |
diff --git a/include/xen/page.h b/include/xen/page.h index eaf85fab1263..0be36b976f4b 100644 --- a/include/xen/page.h +++ b/include/xen/page.h | |||
@@ -1 +1,8 @@ | |||
1 | #ifndef _XEN_PAGE_H | ||
2 | #define _XEN_PAGE_H | ||
3 | |||
1 | #include <asm/xen/page.h> | 4 | #include <asm/xen/page.h> |
5 | |||
6 | extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size; | ||
7 | |||
8 | #endif /* _XEN_PAGE_H */ | ||
diff --git a/include/xen/privcmd.h b/include/xen/privcmd.h index b42cdfd92fee..17857fb4d550 100644 --- a/include/xen/privcmd.h +++ b/include/xen/privcmd.h | |||
@@ -34,13 +34,10 @@ | |||
34 | #define __LINUX_PUBLIC_PRIVCMD_H__ | 34 | #define __LINUX_PUBLIC_PRIVCMD_H__ |
35 | 35 | ||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/compiler.h> | ||
37 | 38 | ||
38 | typedef unsigned long xen_pfn_t; | 39 | typedef unsigned long xen_pfn_t; |
39 | 40 | ||
40 | #ifndef __user | ||
41 | #define __user | ||
42 | #endif | ||
43 | |||
44 | struct privcmd_hypercall { | 41 | struct privcmd_hypercall { |
45 | __u64 op; | 42 | __u64 op; |
46 | __u64 arg[5]; | 43 | __u64 arg[5]; |
diff --git a/init/Kconfig b/init/Kconfig index 88c10468db46..c9728992a776 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -613,6 +613,19 @@ config CGROUP_MEM_RES_CTLR_SWAP | |||
613 | if boot option "noswapaccount" is set, swap will not be accounted. | 613 | if boot option "noswapaccount" is set, swap will not be accounted. |
614 | Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page | 614 | Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page |
615 | size is 4096bytes, 512k per 1Gbytes of swap. | 615 | size is 4096bytes, 512k per 1Gbytes of swap. |
616 | config CGROUP_MEM_RES_CTLR_SWAP_ENABLED | ||
617 | bool "Memory Resource Controller Swap Extension enabled by default" | ||
618 | depends on CGROUP_MEM_RES_CTLR_SWAP | ||
619 | default y | ||
620 | help | ||
621 | Memory Resource Controller Swap Extension comes with its price in | ||
622 | a bigger memory consumption. General purpose distribution kernels | ||
623 | which want to enable the feautre but keep it disabled by default | ||
624 | and let the user enable it by swapaccount boot command line | ||
625 | parameter should have this option unselected. | ||
626 | For those who want to have the feature enabled by default should | ||
627 | select this option (if, for some reason, they need to disable it | ||
628 | then noswapaccount does the trick). | ||
616 | 629 | ||
617 | menuconfig CGROUP_SCHED | 630 | menuconfig CGROUP_SCHED |
618 | bool "Group CPU scheduler" | 631 | bool "Group CPU scheduler" |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 2c9120f0afca..e5325825aeb6 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -620,7 +620,7 @@ static struct pmu perf_breakpoint = { | |||
620 | .read = hw_breakpoint_pmu_read, | 620 | .read = hw_breakpoint_pmu_read, |
621 | }; | 621 | }; |
622 | 622 | ||
623 | static int __init init_hw_breakpoint(void) | 623 | int __init init_hw_breakpoint(void) |
624 | { | 624 | { |
625 | unsigned int **task_bp_pinned; | 625 | unsigned int **task_bp_pinned; |
626 | int cpu, err_cpu; | 626 | int cpu, err_cpu; |
@@ -655,6 +655,5 @@ static int __init init_hw_breakpoint(void) | |||
655 | 655 | ||
656 | return -ENOMEM; | 656 | return -ENOMEM; |
657 | } | 657 | } |
658 | core_initcall(init_hw_breakpoint); | ||
659 | 658 | ||
660 | 659 | ||
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index f16763ff8481..90f881904bb1 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -145,7 +145,9 @@ void irq_work_run(void) | |||
145 | * Clear the BUSY bit and return to the free state if | 145 | * Clear the BUSY bit and return to the free state if |
146 | * no-one else claimed it meanwhile. | 146 | * no-one else claimed it meanwhile. |
147 | */ | 147 | */ |
148 | cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); | 148 | (void)cmpxchg(&entry->next, |
149 | next_flags(NULL, IRQ_WORK_BUSY), | ||
150 | NULL); | ||
149 | } | 151 | } |
150 | } | 152 | } |
151 | EXPORT_SYMBOL_GPL(irq_work_run); | 153 | EXPORT_SYMBOL_GPL(irq_work_run); |
diff --git a/kernel/module.c b/kernel/module.c index 437a74a7524a..d190664f25ff 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
2326 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * | 2326 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * |
2327 | mod->num_trace_events, GFP_KERNEL); | 2327 | mod->num_trace_events, GFP_KERNEL); |
2328 | #endif | 2328 | #endif |
2329 | #ifdef CONFIG_TRACING | ||
2330 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", | ||
2331 | sizeof(*mod->trace_bprintk_fmt_start), | ||
2332 | &mod->num_trace_bprintk_fmt); | ||
2333 | /* | ||
2334 | * This section contains pointers to allocated objects in the trace | ||
2335 | * code and not scanning it leads to false positives. | ||
2336 | */ | ||
2337 | kmemleak_scan_area(mod->trace_bprintk_fmt_start, | ||
2338 | sizeof(*mod->trace_bprintk_fmt_start) * | ||
2339 | mod->num_trace_bprintk_fmt, GFP_KERNEL); | ||
2340 | #endif | ||
2329 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2341 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
2330 | /* sechdrs[0].sh_size is always zero */ | 2342 | /* sechdrs[0].sh_size is always zero */ |
2331 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", | 2343 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index cb6c0d2af68f..671f6c8c8a32 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/kernel_stat.h> | 31 | #include <linux/kernel_stat.h> |
32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
33 | #include <linux/ftrace_event.h> | 33 | #include <linux/ftrace_event.h> |
34 | #include <linux/hw_breakpoint.h> | ||
34 | 35 | ||
35 | #include <asm/irq_regs.h> | 36 | #include <asm/irq_regs.h> |
36 | 37 | ||
@@ -2234,11 +2235,6 @@ int perf_event_release_kernel(struct perf_event *event) | |||
2234 | raw_spin_unlock_irq(&ctx->lock); | 2235 | raw_spin_unlock_irq(&ctx->lock); |
2235 | mutex_unlock(&ctx->mutex); | 2236 | mutex_unlock(&ctx->mutex); |
2236 | 2237 | ||
2237 | mutex_lock(&event->owner->perf_event_mutex); | ||
2238 | list_del_init(&event->owner_entry); | ||
2239 | mutex_unlock(&event->owner->perf_event_mutex); | ||
2240 | put_task_struct(event->owner); | ||
2241 | |||
2242 | free_event(event); | 2238 | free_event(event); |
2243 | 2239 | ||
2244 | return 0; | 2240 | return 0; |
@@ -2251,9 +2247,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel); | |||
2251 | static int perf_release(struct inode *inode, struct file *file) | 2247 | static int perf_release(struct inode *inode, struct file *file) |
2252 | { | 2248 | { |
2253 | struct perf_event *event = file->private_data; | 2249 | struct perf_event *event = file->private_data; |
2250 | struct task_struct *owner; | ||
2254 | 2251 | ||
2255 | file->private_data = NULL; | 2252 | file->private_data = NULL; |
2256 | 2253 | ||
2254 | rcu_read_lock(); | ||
2255 | owner = ACCESS_ONCE(event->owner); | ||
2256 | /* | ||
2257 | * Matches the smp_wmb() in perf_event_exit_task(). If we observe | ||
2258 | * !owner it means the list deletion is complete and we can indeed | ||
2259 | * free this event, otherwise we need to serialize on | ||
2260 | * owner->perf_event_mutex. | ||
2261 | */ | ||
2262 | smp_read_barrier_depends(); | ||
2263 | if (owner) { | ||
2264 | /* | ||
2265 | * Since delayed_put_task_struct() also drops the last | ||
2266 | * task reference we can safely take a new reference | ||
2267 | * while holding the rcu_read_lock(). | ||
2268 | */ | ||
2269 | get_task_struct(owner); | ||
2270 | } | ||
2271 | rcu_read_unlock(); | ||
2272 | |||
2273 | if (owner) { | ||
2274 | mutex_lock(&owner->perf_event_mutex); | ||
2275 | /* | ||
2276 | * We have to re-check the event->owner field, if it is cleared | ||
2277 | * we raced with perf_event_exit_task(), acquiring the mutex | ||
2278 | * ensured they're done, and we can proceed with freeing the | ||
2279 | * event. | ||
2280 | */ | ||
2281 | if (event->owner) | ||
2282 | list_del_init(&event->owner_entry); | ||
2283 | mutex_unlock(&owner->perf_event_mutex); | ||
2284 | put_task_struct(owner); | ||
2285 | } | ||
2286 | |||
2257 | return perf_event_release_kernel(event); | 2287 | return perf_event_release_kernel(event); |
2258 | } | 2288 | } |
2259 | 2289 | ||
@@ -5677,7 +5707,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
5677 | mutex_unlock(&ctx->mutex); | 5707 | mutex_unlock(&ctx->mutex); |
5678 | 5708 | ||
5679 | event->owner = current; | 5709 | event->owner = current; |
5680 | get_task_struct(current); | 5710 | |
5681 | mutex_lock(¤t->perf_event_mutex); | 5711 | mutex_lock(¤t->perf_event_mutex); |
5682 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); | 5712 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); |
5683 | mutex_unlock(¤t->perf_event_mutex); | 5713 | mutex_unlock(¤t->perf_event_mutex); |
@@ -5745,12 +5775,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
5745 | ++ctx->generation; | 5775 | ++ctx->generation; |
5746 | mutex_unlock(&ctx->mutex); | 5776 | mutex_unlock(&ctx->mutex); |
5747 | 5777 | ||
5748 | event->owner = current; | ||
5749 | get_task_struct(current); | ||
5750 | mutex_lock(¤t->perf_event_mutex); | ||
5751 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); | ||
5752 | mutex_unlock(¤t->perf_event_mutex); | ||
5753 | |||
5754 | return event; | 5778 | return event; |
5755 | 5779 | ||
5756 | err_free: | 5780 | err_free: |
@@ -5901,8 +5925,24 @@ again: | |||
5901 | */ | 5925 | */ |
5902 | void perf_event_exit_task(struct task_struct *child) | 5926 | void perf_event_exit_task(struct task_struct *child) |
5903 | { | 5927 | { |
5928 | struct perf_event *event, *tmp; | ||
5904 | int ctxn; | 5929 | int ctxn; |
5905 | 5930 | ||
5931 | mutex_lock(&child->perf_event_mutex); | ||
5932 | list_for_each_entry_safe(event, tmp, &child->perf_event_list, | ||
5933 | owner_entry) { | ||
5934 | list_del_init(&event->owner_entry); | ||
5935 | |||
5936 | /* | ||
5937 | * Ensure the list deletion is visible before we clear | ||
5938 | * the owner, closes a race against perf_release() where | ||
5939 | * we need to serialize on the owner->perf_event_mutex. | ||
5940 | */ | ||
5941 | smp_wmb(); | ||
5942 | event->owner = NULL; | ||
5943 | } | ||
5944 | mutex_unlock(&child->perf_event_mutex); | ||
5945 | |||
5906 | for_each_task_context_nr(ctxn) | 5946 | for_each_task_context_nr(ctxn) |
5907 | perf_event_exit_task_context(child, ctxn); | 5947 | perf_event_exit_task_context(child, ctxn); |
5908 | } | 5948 | } |
@@ -6321,6 +6361,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
6321 | 6361 | ||
6322 | void __init perf_event_init(void) | 6362 | void __init perf_event_init(void) |
6323 | { | 6363 | { |
6364 | int ret; | ||
6365 | |||
6324 | perf_event_init_all_cpus(); | 6366 | perf_event_init_all_cpus(); |
6325 | init_srcu_struct(&pmus_srcu); | 6367 | init_srcu_struct(&pmus_srcu); |
6326 | perf_pmu_register(&perf_swevent); | 6368 | perf_pmu_register(&perf_swevent); |
@@ -6328,4 +6370,7 @@ void __init perf_event_init(void) | |||
6328 | perf_pmu_register(&perf_task_clock); | 6370 | perf_pmu_register(&perf_task_clock); |
6329 | perf_tp_register(); | 6371 | perf_tp_register(); |
6330 | perf_cpu_notifier(perf_cpu_notify); | 6372 | perf_cpu_notifier(perf_cpu_notify); |
6373 | |||
6374 | ret = init_hw_breakpoint(); | ||
6375 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); | ||
6331 | } | 6376 | } |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 6842eeba5879..05bb7173850e 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -37,13 +37,13 @@ static int check_clock(const clockid_t which_clock) | |||
37 | if (pid == 0) | 37 | if (pid == 0) |
38 | return 0; | 38 | return 0; |
39 | 39 | ||
40 | read_lock(&tasklist_lock); | 40 | rcu_read_lock(); |
41 | p = find_task_by_vpid(pid); | 41 | p = find_task_by_vpid(pid); |
42 | if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? | 42 | if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? |
43 | same_thread_group(p, current) : thread_group_leader(p))) { | 43 | same_thread_group(p, current) : has_group_leader_pid(p))) { |
44 | error = -EINVAL; | 44 | error = -EINVAL; |
45 | } | 45 | } |
46 | read_unlock(&tasklist_lock); | 46 | rcu_read_unlock(); |
47 | 47 | ||
48 | return error; | 48 | return error; |
49 | } | 49 | } |
@@ -390,7 +390,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) | |||
390 | 390 | ||
391 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); | 391 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); |
392 | 392 | ||
393 | read_lock(&tasklist_lock); | 393 | rcu_read_lock(); |
394 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { | 394 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { |
395 | if (pid == 0) { | 395 | if (pid == 0) { |
396 | p = current; | 396 | p = current; |
@@ -404,7 +404,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) | |||
404 | p = current->group_leader; | 404 | p = current->group_leader; |
405 | } else { | 405 | } else { |
406 | p = find_task_by_vpid(pid); | 406 | p = find_task_by_vpid(pid); |
407 | if (p && !thread_group_leader(p)) | 407 | if (p && !has_group_leader_pid(p)) |
408 | p = NULL; | 408 | p = NULL; |
409 | } | 409 | } |
410 | } | 410 | } |
@@ -414,7 +414,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) | |||
414 | } else { | 414 | } else { |
415 | ret = -EINVAL; | 415 | ret = -EINVAL; |
416 | } | 416 | } |
417 | read_unlock(&tasklist_lock); | 417 | rcu_read_unlock(); |
418 | 418 | ||
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 52ab113d8bb9..00ebd7686676 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1758,10 +1758,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
1758 | set_task_cpu(p, this_cpu); | 1758 | set_task_cpu(p, this_cpu); |
1759 | activate_task(this_rq, p, 0); | 1759 | activate_task(this_rq, p, 0); |
1760 | check_preempt_curr(this_rq, p, 0); | 1760 | check_preempt_curr(this_rq, p, 0); |
1761 | |||
1762 | /* re-arm NEWIDLE balancing when moving tasks */ | ||
1763 | src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost; | ||
1764 | this_rq->idle_stamp = 0; | ||
1765 | } | 1761 | } |
1766 | 1762 | ||
1767 | /* | 1763 | /* |
@@ -3219,8 +3215,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3219 | interval = msecs_to_jiffies(sd->balance_interval); | 3215 | interval = msecs_to_jiffies(sd->balance_interval); |
3220 | if (time_after(next_balance, sd->last_balance + interval)) | 3216 | if (time_after(next_balance, sd->last_balance + interval)) |
3221 | next_balance = sd->last_balance + interval; | 3217 | next_balance = sd->last_balance + interval; |
3222 | if (pulled_task) | 3218 | if (pulled_task) { |
3219 | this_rq->idle_stamp = 0; | ||
3223 | break; | 3220 | break; |
3221 | } | ||
3224 | } | 3222 | } |
3225 | 3223 | ||
3226 | raw_spin_lock(&this_rq->lock); | 3224 | raw_spin_lock(&this_rq->lock); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 042084157980..c380612273bf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1283,6 +1283,8 @@ void trace_dump_stack(void) | |||
1283 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1283 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static DEFINE_PER_CPU(int, user_stack_count); | ||
1287 | |||
1286 | void | 1288 | void |
1287 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1289 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1288 | { | 1290 | { |
@@ -1301,6 +1303,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1301 | if (unlikely(in_nmi())) | 1303 | if (unlikely(in_nmi())) |
1302 | return; | 1304 | return; |
1303 | 1305 | ||
1306 | /* | ||
1307 | * prevent recursion, since the user stack tracing may | ||
1308 | * trigger other kernel events. | ||
1309 | */ | ||
1310 | preempt_disable(); | ||
1311 | if (__this_cpu_read(user_stack_count)) | ||
1312 | goto out; | ||
1313 | |||
1314 | __this_cpu_inc(user_stack_count); | ||
1315 | |||
1316 | |||
1317 | |||
1304 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1318 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1305 | sizeof(*entry), flags, pc); | 1319 | sizeof(*entry), flags, pc); |
1306 | if (!event) | 1320 | if (!event) |
@@ -1318,6 +1332,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1318 | save_stack_trace_user(&trace); | 1332 | save_stack_trace_user(&trace); |
1319 | if (!filter_check_discard(call, entry, buffer, event)) | 1333 | if (!filter_check_discard(call, entry, buffer, event)) |
1320 | ring_buffer_unlock_commit(buffer, event); | 1334 | ring_buffer_unlock_commit(buffer, event); |
1335 | |||
1336 | __this_cpu_dec(user_stack_count); | ||
1337 | |||
1338 | out: | ||
1339 | preempt_enable(); | ||
1321 | } | 1340 | } |
1322 | 1341 | ||
1323 | #ifdef UNUSED | 1342 | #ifdef UNUSED |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2efa8ea07ff7..7a22b4129211 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -61,7 +61,14 @@ struct mem_cgroup *root_mem_cgroup __read_mostly; | |||
61 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 61 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
62 | /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ | 62 | /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ |
63 | int do_swap_account __read_mostly; | 63 | int do_swap_account __read_mostly; |
64 | static int really_do_swap_account __initdata = 1; /* for remember boot option*/ | 64 | |
65 | /* for remember boot option*/ | ||
66 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED | ||
67 | static int really_do_swap_account __initdata = 1; | ||
68 | #else | ||
69 | static int really_do_swap_account __initdata = 0; | ||
70 | #endif | ||
71 | |||
65 | #else | 72 | #else |
66 | #define do_swap_account (0) | 73 | #define do_swap_account (0) |
67 | #endif | 74 | #endif |
@@ -278,13 +285,14 @@ enum move_type { | |||
278 | 285 | ||
279 | /* "mc" and its members are protected by cgroup_mutex */ | 286 | /* "mc" and its members are protected by cgroup_mutex */ |
280 | static struct move_charge_struct { | 287 | static struct move_charge_struct { |
281 | spinlock_t lock; /* for from, to, moving_task */ | 288 | spinlock_t lock; /* for from, to */ |
282 | struct mem_cgroup *from; | 289 | struct mem_cgroup *from; |
283 | struct mem_cgroup *to; | 290 | struct mem_cgroup *to; |
284 | unsigned long precharge; | 291 | unsigned long precharge; |
285 | unsigned long moved_charge; | 292 | unsigned long moved_charge; |
286 | unsigned long moved_swap; | 293 | unsigned long moved_swap; |
287 | struct task_struct *moving_task; /* a task moving charges */ | 294 | struct task_struct *moving_task; /* a task moving charges */ |
295 | struct mm_struct *mm; | ||
288 | wait_queue_head_t waitq; /* a waitq for other context */ | 296 | wait_queue_head_t waitq; /* a waitq for other context */ |
289 | } mc = { | 297 | } mc = { |
290 | .lock = __SPIN_LOCK_UNLOCKED(mc.lock), | 298 | .lock = __SPIN_LOCK_UNLOCKED(mc.lock), |
@@ -2152,7 +2160,7 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc, | |||
2152 | { | 2160 | { |
2153 | VM_BUG_ON(from == to); | 2161 | VM_BUG_ON(from == to); |
2154 | VM_BUG_ON(PageLRU(pc->page)); | 2162 | VM_BUG_ON(PageLRU(pc->page)); |
2155 | VM_BUG_ON(!PageCgroupLocked(pc)); | 2163 | VM_BUG_ON(!page_is_cgroup_locked(pc)); |
2156 | VM_BUG_ON(!PageCgroupUsed(pc)); | 2164 | VM_BUG_ON(!PageCgroupUsed(pc)); |
2157 | VM_BUG_ON(pc->mem_cgroup != from); | 2165 | VM_BUG_ON(pc->mem_cgroup != from); |
2158 | 2166 | ||
@@ -4631,7 +4639,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | |||
4631 | unsigned long precharge; | 4639 | unsigned long precharge; |
4632 | struct vm_area_struct *vma; | 4640 | struct vm_area_struct *vma; |
4633 | 4641 | ||
4634 | down_read(&mm->mmap_sem); | 4642 | /* We've already held the mmap_sem */ |
4635 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 4643 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
4636 | struct mm_walk mem_cgroup_count_precharge_walk = { | 4644 | struct mm_walk mem_cgroup_count_precharge_walk = { |
4637 | .pmd_entry = mem_cgroup_count_precharge_pte_range, | 4645 | .pmd_entry = mem_cgroup_count_precharge_pte_range, |
@@ -4643,7 +4651,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | |||
4643 | walk_page_range(vma->vm_start, vma->vm_end, | 4651 | walk_page_range(vma->vm_start, vma->vm_end, |
4644 | &mem_cgroup_count_precharge_walk); | 4652 | &mem_cgroup_count_precharge_walk); |
4645 | } | 4653 | } |
4646 | up_read(&mm->mmap_sem); | ||
4647 | 4654 | ||
4648 | precharge = mc.precharge; | 4655 | precharge = mc.precharge; |
4649 | mc.precharge = 0; | 4656 | mc.precharge = 0; |
@@ -4694,11 +4701,16 @@ static void mem_cgroup_clear_mc(void) | |||
4694 | 4701 | ||
4695 | mc.moved_swap = 0; | 4702 | mc.moved_swap = 0; |
4696 | } | 4703 | } |
4704 | if (mc.mm) { | ||
4705 | up_read(&mc.mm->mmap_sem); | ||
4706 | mmput(mc.mm); | ||
4707 | } | ||
4697 | spin_lock(&mc.lock); | 4708 | spin_lock(&mc.lock); |
4698 | mc.from = NULL; | 4709 | mc.from = NULL; |
4699 | mc.to = NULL; | 4710 | mc.to = NULL; |
4700 | mc.moving_task = NULL; | ||
4701 | spin_unlock(&mc.lock); | 4711 | spin_unlock(&mc.lock); |
4712 | mc.moving_task = NULL; | ||
4713 | mc.mm = NULL; | ||
4702 | mem_cgroup_end_move(from); | 4714 | mem_cgroup_end_move(from); |
4703 | memcg_oom_recover(from); | 4715 | memcg_oom_recover(from); |
4704 | memcg_oom_recover(to); | 4716 | memcg_oom_recover(to); |
@@ -4724,12 +4736,21 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
4724 | return 0; | 4736 | return 0; |
4725 | /* We move charges only when we move a owner of the mm */ | 4737 | /* We move charges only when we move a owner of the mm */ |
4726 | if (mm->owner == p) { | 4738 | if (mm->owner == p) { |
4739 | /* | ||
4740 | * We do all the move charge works under one mmap_sem to | ||
4741 | * avoid deadlock with down_write(&mmap_sem) | ||
4742 | * -> try_charge() -> if (mc.moving_task) -> sleep. | ||
4743 | */ | ||
4744 | down_read(&mm->mmap_sem); | ||
4745 | |||
4727 | VM_BUG_ON(mc.from); | 4746 | VM_BUG_ON(mc.from); |
4728 | VM_BUG_ON(mc.to); | 4747 | VM_BUG_ON(mc.to); |
4729 | VM_BUG_ON(mc.precharge); | 4748 | VM_BUG_ON(mc.precharge); |
4730 | VM_BUG_ON(mc.moved_charge); | 4749 | VM_BUG_ON(mc.moved_charge); |
4731 | VM_BUG_ON(mc.moved_swap); | 4750 | VM_BUG_ON(mc.moved_swap); |
4732 | VM_BUG_ON(mc.moving_task); | 4751 | VM_BUG_ON(mc.moving_task); |
4752 | VM_BUG_ON(mc.mm); | ||
4753 | |||
4733 | mem_cgroup_start_move(from); | 4754 | mem_cgroup_start_move(from); |
4734 | spin_lock(&mc.lock); | 4755 | spin_lock(&mc.lock); |
4735 | mc.from = from; | 4756 | mc.from = from; |
@@ -4737,14 +4758,16 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
4737 | mc.precharge = 0; | 4758 | mc.precharge = 0; |
4738 | mc.moved_charge = 0; | 4759 | mc.moved_charge = 0; |
4739 | mc.moved_swap = 0; | 4760 | mc.moved_swap = 0; |
4740 | mc.moving_task = current; | ||
4741 | spin_unlock(&mc.lock); | 4761 | spin_unlock(&mc.lock); |
4762 | mc.moving_task = current; | ||
4763 | mc.mm = mm; | ||
4742 | 4764 | ||
4743 | ret = mem_cgroup_precharge_mc(mm); | 4765 | ret = mem_cgroup_precharge_mc(mm); |
4744 | if (ret) | 4766 | if (ret) |
4745 | mem_cgroup_clear_mc(); | 4767 | mem_cgroup_clear_mc(); |
4746 | } | 4768 | /* We call up_read() and mmput() in clear_mc(). */ |
4747 | mmput(mm); | 4769 | } else |
4770 | mmput(mm); | ||
4748 | } | 4771 | } |
4749 | return ret; | 4772 | return ret; |
4750 | } | 4773 | } |
@@ -4832,7 +4855,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) | |||
4832 | struct vm_area_struct *vma; | 4855 | struct vm_area_struct *vma; |
4833 | 4856 | ||
4834 | lru_add_drain_all(); | 4857 | lru_add_drain_all(); |
4835 | down_read(&mm->mmap_sem); | 4858 | /* We've already held the mmap_sem */ |
4836 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 4859 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
4837 | int ret; | 4860 | int ret; |
4838 | struct mm_walk mem_cgroup_move_charge_walk = { | 4861 | struct mm_walk mem_cgroup_move_charge_walk = { |
@@ -4851,7 +4874,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) | |||
4851 | */ | 4874 | */ |
4852 | break; | 4875 | break; |
4853 | } | 4876 | } |
4854 | up_read(&mm->mmap_sem); | ||
4855 | } | 4877 | } |
4856 | 4878 | ||
4857 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 4879 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
@@ -4860,17 +4882,11 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
4860 | struct task_struct *p, | 4882 | struct task_struct *p, |
4861 | bool threadgroup) | 4883 | bool threadgroup) |
4862 | { | 4884 | { |
4863 | struct mm_struct *mm; | 4885 | if (!mc.mm) |
4864 | |||
4865 | if (!mc.to) | ||
4866 | /* no need to move charge */ | 4886 | /* no need to move charge */ |
4867 | return; | 4887 | return; |
4868 | 4888 | ||
4869 | mm = get_task_mm(p); | 4889 | mem_cgroup_move_charge(mc.mm); |
4870 | if (mm) { | ||
4871 | mem_cgroup_move_charge(mm); | ||
4872 | mmput(mm); | ||
4873 | } | ||
4874 | mem_cgroup_clear_mc(); | 4890 | mem_cgroup_clear_mc(); |
4875 | } | 4891 | } |
4876 | #else /* !CONFIG_MMU */ | 4892 | #else /* !CONFIG_MMU */ |
@@ -4911,10 +4927,20 @@ struct cgroup_subsys mem_cgroup_subsys = { | |||
4911 | }; | 4927 | }; |
4912 | 4928 | ||
4913 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 4929 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
4930 | static int __init enable_swap_account(char *s) | ||
4931 | { | ||
4932 | /* consider enabled if no parameter or 1 is given */ | ||
4933 | if (!s || !strcmp(s, "1")) | ||
4934 | really_do_swap_account = 1; | ||
4935 | else if (!strcmp(s, "0")) | ||
4936 | really_do_swap_account = 0; | ||
4937 | return 1; | ||
4938 | } | ||
4939 | __setup("swapaccount", enable_swap_account); | ||
4914 | 4940 | ||
4915 | static int __init disable_swap_account(char *s) | 4941 | static int __init disable_swap_account(char *s) |
4916 | { | 4942 | { |
4917 | really_do_swap_account = 0; | 4943 | enable_swap_account("0"); |
4918 | return 1; | 4944 | return 1; |
4919 | } | 4945 | } |
4920 | __setup("noswapaccount", disable_swap_account); | 4946 | __setup("noswapaccount", disable_swap_account); |
diff --git a/mm/nommu.c b/mm/nommu.c index 3613517c7592..27a9ac588516 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1717,6 +1717,7 @@ void exit_mmap(struct mm_struct *mm) | |||
1717 | mm->mmap = vma->vm_next; | 1717 | mm->mmap = vma->vm_next; |
1718 | delete_vma_from_mm(vma); | 1718 | delete_vma_from_mm(vma); |
1719 | delete_vma(mm, vma); | 1719 | delete_vma(mm, vma); |
1720 | cond_resched(); | ||
1720 | } | 1721 | } |
1721 | 1722 | ||
1722 | kleave(""); | 1723 | kleave(""); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 07a654486f75..e4092704c1a9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3008,14 +3008,6 @@ static __init_refok int __build_all_zonelists(void *data) | |||
3008 | build_zonelist_cache(pgdat); | 3008 | build_zonelist_cache(pgdat); |
3009 | } | 3009 | } |
3010 | 3010 | ||
3011 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
3012 | /* Setup real pagesets for the new zone */ | ||
3013 | if (data) { | ||
3014 | struct zone *zone = data; | ||
3015 | setup_zone_pageset(zone); | ||
3016 | } | ||
3017 | #endif | ||
3018 | |||
3019 | /* | 3011 | /* |
3020 | * Initialize the boot_pagesets that are going to be used | 3012 | * Initialize the boot_pagesets that are going to be used |
3021 | * for bootstrapping processors. The real pagesets for | 3013 | * for bootstrapping processors. The real pagesets for |
@@ -3064,7 +3056,11 @@ void build_all_zonelists(void *data) | |||
3064 | } else { | 3056 | } else { |
3065 | /* we have to stop all cpus to guarantee there is no user | 3057 | /* we have to stop all cpus to guarantee there is no user |
3066 | of zonelist */ | 3058 | of zonelist */ |
3067 | stop_machine(__build_all_zonelists, data, NULL); | 3059 | #ifdef CONFIG_MEMORY_HOTPLUG |
3060 | if (data) | ||
3061 | setup_zone_pageset((struct zone *)data); | ||
3062 | #endif | ||
3063 | stop_machine(__build_all_zonelists, NULL, NULL); | ||
3068 | /* cpuset refresh routine should be here */ | 3064 | /* cpuset refresh routine should be here */ |
3069 | } | 3065 | } |
3070 | vm_total_pages = nr_free_pagecache_pages(); | 3066 | vm_total_pages = nr_free_pagecache_pages(); |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 8b1a2ce21ee5..38cc58b8b2b0 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -139,7 +139,6 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
139 | pgd_t *pgd; | 139 | pgd_t *pgd; |
140 | unsigned long next; | 140 | unsigned long next; |
141 | int err = 0; | 141 | int err = 0; |
142 | struct vm_area_struct *vma; | ||
143 | 142 | ||
144 | if (addr >= end) | 143 | if (addr >= end) |
145 | return err; | 144 | return err; |
@@ -149,15 +148,17 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
149 | 148 | ||
150 | pgd = pgd_offset(walk->mm, addr); | 149 | pgd = pgd_offset(walk->mm, addr); |
151 | do { | 150 | do { |
151 | struct vm_area_struct *uninitialized_var(vma); | ||
152 | |||
152 | next = pgd_addr_end(addr, end); | 153 | next = pgd_addr_end(addr, end); |
153 | 154 | ||
155 | #ifdef CONFIG_HUGETLB_PAGE | ||
154 | /* | 156 | /* |
155 | * handle hugetlb vma individually because pagetable walk for | 157 | * handle hugetlb vma individually because pagetable walk for |
156 | * the hugetlb page is dependent on the architecture and | 158 | * the hugetlb page is dependent on the architecture and |
157 | * we can't handled it in the same manner as non-huge pages. | 159 | * we can't handled it in the same manner as non-huge pages. |
158 | */ | 160 | */ |
159 | vma = find_vma(walk->mm, addr); | 161 | vma = find_vma(walk->mm, addr); |
160 | #ifdef CONFIG_HUGETLB_PAGE | ||
161 | if (vma && is_vm_hugetlb_page(vma)) { | 162 | if (vma && is_vm_hugetlb_page(vma)) { |
162 | if (vma->vm_end < next) | 163 | if (vma->vm_end < next) |
163 | next = vma->vm_end; | 164 | next = vma->vm_end; |
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c index 53d8abfa25d5..bf3e6a13c215 100644 --- a/net/ceph/buffer.c +++ b/net/ceph/buffer.c | |||
@@ -19,7 +19,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) | |||
19 | if (b->vec.iov_base) { | 19 | if (b->vec.iov_base) { |
20 | b->is_vmalloc = false; | 20 | b->is_vmalloc = false; |
21 | } else { | 21 | } else { |
22 | b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); | 22 | b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL); |
23 | if (!b->vec.iov_base) { | 23 | if (!b->vec.iov_base) { |
24 | kfree(b); | 24 | kfree(b); |
25 | return NULL; | 25 | return NULL; |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 7552495aff7a..fceeb37d7161 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -45,9 +45,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, | |||
45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); | 45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); |
46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); | 46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); |
47 | if (lopt_size > PAGE_SIZE) | 47 | if (lopt_size > PAGE_SIZE) |
48 | lopt = __vmalloc(lopt_size, | 48 | lopt = vzalloc(lopt_size); |
49 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
50 | PAGE_KERNEL); | ||
51 | else | 49 | else |
52 | lopt = kzalloc(lopt_size, GFP_KERNEL); | 50 | lopt = kzalloc(lopt_size, GFP_KERNEL); |
53 | if (lopt == NULL) | 51 | if (lopt == NULL) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 200eb538fbb3..0f280348e0fd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -365,7 +365,7 @@ static struct tnode *tnode_alloc(size_t size) | |||
365 | if (size <= PAGE_SIZE) | 365 | if (size <= PAGE_SIZE) |
366 | return kzalloc(size, GFP_KERNEL); | 366 | return kzalloc(size, GFP_KERNEL); |
367 | else | 367 | else |
368 | return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | 368 | return vzalloc(size); |
369 | } | 369 | } |
370 | 370 | ||
371 | static void __tnode_vfree(struct work_struct *arg) | 371 | static void __tnode_vfree(struct work_struct *arg) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2fc35b32df9e..23cc8e1ce8d4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2758,13 +2758,13 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2758 | ifa->state = INET6_IFADDR_STATE_DEAD; | 2758 | ifa->state = INET6_IFADDR_STATE_DEAD; |
2759 | spin_unlock_bh(&ifa->state_lock); | 2759 | spin_unlock_bh(&ifa->state_lock); |
2760 | 2760 | ||
2761 | if (state == INET6_IFADDR_STATE_DEAD) { | 2761 | if (state != INET6_IFADDR_STATE_DEAD) { |
2762 | in6_ifa_put(ifa); | ||
2763 | } else { | ||
2764 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2762 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2765 | atomic_notifier_call_chain(&inet6addr_chain, | 2763 | atomic_notifier_call_chain(&inet6addr_chain, |
2766 | NETDEV_DOWN, ifa); | 2764 | NETDEV_DOWN, ifa); |
2767 | } | 2765 | } |
2766 | |||
2767 | in6_ifa_put(ifa); | ||
2768 | write_lock_bh(&idev->lock); | 2768 | write_lock_bh(&idev->lock); |
2769 | } | 2769 | } |
2770 | } | 2770 | } |
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c index a2023ec52329..1e98bc0fe0a5 100644 --- a/net/xfrm/xfrm_hash.c +++ b/net/xfrm/xfrm_hash.c | |||
@@ -19,7 +19,7 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz) | |||
19 | if (sz <= PAGE_SIZE) | 19 | if (sz <= PAGE_SIZE) |
20 | n = kzalloc(sz, GFP_KERNEL); | 20 | n = kzalloc(sz, GFP_KERNEL); |
21 | else if (hashdist) | 21 | else if (hashdist) |
22 | n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | 22 | n = vzalloc(sz); |
23 | else | 23 | else |
24 | n = (struct hlist_head *) | 24 | n = (struct hlist_head *) |
25 | __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, | 25 | __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, |
diff --git a/scripts/gfp-translate b/scripts/gfp-translate index d81b968d864e..c9230e158a8f 100644 --- a/scripts/gfp-translate +++ b/scripts/gfp-translate | |||
@@ -63,7 +63,12 @@ fi | |||
63 | 63 | ||
64 | # Extract GFP flags from the kernel source | 64 | # Extract GFP flags from the kernel source |
65 | TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1 | 65 | TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1 |
66 | grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE | 66 | grep -q ___GFP $SOURCE/include/linux/gfp.h |
67 | if [ $? -eq 0 ]; then | ||
68 | grep "^#define ___GFP" $SOURCE/include/linux/gfp.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE | ||
69 | else | ||
70 | grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE | ||
71 | fi | ||
67 | 72 | ||
68 | # Parse the flags | 73 | # Parse the flags |
69 | IFS=" | 74 | IFS=" |
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index f2f41c854221..6e2409181895 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c | |||
@@ -420,9 +420,9 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev) | |||
420 | return PTR_ERR(pclk); | 420 | return PTR_ERR(pclk); |
421 | } | 421 | } |
422 | sample_clk = clk_get(&pdev->dev, "sample_clk"); | 422 | sample_clk = clk_get(&pdev->dev, "sample_clk"); |
423 | if (IS_ERR(pclk)) { | 423 | if (IS_ERR(sample_clk)) { |
424 | dev_dbg(&pdev->dev, "no sample clock\n"); | 424 | dev_dbg(&pdev->dev, "no sample clock\n"); |
425 | retval = PTR_ERR(pclk); | 425 | retval = PTR_ERR(sample_clk); |
426 | goto out_put_pclk; | 426 | goto out_put_pclk; |
427 | } | 427 | } |
428 | clk_enable(pclk); | 428 | clk_enable(pclk); |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index a1707cca9c66..b75db8e9cc0f 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -223,7 +223,7 @@ static void xrun_log(struct snd_pcm_substream *substream, | |||
223 | entry->jiffies = jiffies; | 223 | entry->jiffies = jiffies; |
224 | entry->pos = pos; | 224 | entry->pos = pos; |
225 | entry->period_size = runtime->period_size; | 225 | entry->period_size = runtime->period_size; |
226 | entry->buffer_size = runtime->buffer_size;; | 226 | entry->buffer_size = runtime->buffer_size; |
227 | entry->old_hw_ptr = runtime->status->hw_ptr; | 227 | entry->old_hw_ptr = runtime->status->hw_ptr; |
228 | entry->hw_ptr_base = runtime->hw_ptr_base; | 228 | entry->hw_ptr_base = runtime->hw_ptr_base; |
229 | log->idx = (log->idx + 1) % XRUN_LOG_CNT; | 229 | log->idx = (log->idx + 1) % XRUN_LOG_CNT; |
diff --git a/sound/oss/dev_table.c b/sound/oss/dev_table.c index 727bdb9ba2dc..d8cf3e58dc76 100644 --- a/sound/oss/dev_table.c +++ b/sound/oss/dev_table.c | |||
@@ -71,7 +71,7 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver, | |||
71 | if (sound_nblocks >= MAX_MEM_BLOCKS) | 71 | if (sound_nblocks >= MAX_MEM_BLOCKS) |
72 | sound_nblocks = MAX_MEM_BLOCKS - 1; | 72 | sound_nblocks = MAX_MEM_BLOCKS - 1; |
73 | 73 | ||
74 | op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_operations))); | 74 | op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct audio_operations))); |
75 | sound_nblocks++; | 75 | sound_nblocks++; |
76 | if (sound_nblocks >= MAX_MEM_BLOCKS) | 76 | if (sound_nblocks >= MAX_MEM_BLOCKS) |
77 | sound_nblocks = MAX_MEM_BLOCKS - 1; | 77 | sound_nblocks = MAX_MEM_BLOCKS - 1; |
@@ -81,7 +81,6 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver, | |||
81 | sound_unload_audiodev(num); | 81 | sound_unload_audiodev(num); |
82 | return -(ENOMEM); | 82 | return -(ENOMEM); |
83 | } | 83 | } |
84 | memset((char *) op, 0, sizeof(struct audio_operations)); | ||
85 | init_waitqueue_head(&op->in_sleeper); | 84 | init_waitqueue_head(&op->in_sleeper); |
86 | init_waitqueue_head(&op->out_sleeper); | 85 | init_waitqueue_head(&op->out_sleeper); |
87 | init_waitqueue_head(&op->poll_sleeper); | 86 | init_waitqueue_head(&op->poll_sleeper); |
@@ -128,7 +127,7 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver, | |||
128 | /* FIXME: This leaks a mixer_operations struct every time its called | 127 | /* FIXME: This leaks a mixer_operations struct every time its called |
129 | until you unload sound! */ | 128 | until you unload sound! */ |
130 | 129 | ||
131 | op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct mixer_operations))); | 130 | op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct mixer_operations))); |
132 | sound_nblocks++; | 131 | sound_nblocks++; |
133 | if (sound_nblocks >= MAX_MEM_BLOCKS) | 132 | if (sound_nblocks >= MAX_MEM_BLOCKS) |
134 | sound_nblocks = MAX_MEM_BLOCKS - 1; | 133 | sound_nblocks = MAX_MEM_BLOCKS - 1; |
@@ -137,7 +136,6 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver, | |||
137 | printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name); | 136 | printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name); |
138 | return -ENOMEM; | 137 | return -ENOMEM; |
139 | } | 138 | } |
140 | memset((char *) op, 0, sizeof(struct mixer_operations)); | ||
141 | memcpy((char *) op, (char *) driver, driver_size); | 139 | memcpy((char *) op, (char *) driver, driver_size); |
142 | 140 | ||
143 | strlcpy(op->name, name, sizeof(op->name)); | 141 | strlcpy(op->name, name, sizeof(op->name)); |
diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c index 782b3b84dac6..ceedb1eff203 100644 --- a/sound/oss/midibuf.c +++ b/sound/oss/midibuf.c | |||
@@ -178,7 +178,7 @@ int MIDIbuf_open(int dev, struct file *file) | |||
178 | return err; | 178 | return err; |
179 | 179 | ||
180 | parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT; | 180 | parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT; |
181 | midi_in_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); | 181 | midi_in_buf[dev] = vmalloc(sizeof(struct midi_buf)); |
182 | 182 | ||
183 | if (midi_in_buf[dev] == NULL) | 183 | if (midi_in_buf[dev] == NULL) |
184 | { | 184 | { |
@@ -188,7 +188,7 @@ int MIDIbuf_open(int dev, struct file *file) | |||
188 | } | 188 | } |
189 | midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; | 189 | midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; |
190 | 190 | ||
191 | midi_out_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); | 191 | midi_out_buf[dev] = vmalloc(sizeof(struct midi_buf)); |
192 | 192 | ||
193 | if (midi_out_buf[dev] == NULL) | 193 | if (midi_out_buf[dev] == NULL) |
194 | { | 194 | { |
diff --git a/sound/oss/pss.c b/sound/oss/pss.c index e19dd5dcc2de..9b800ce5100e 100644 --- a/sound/oss/pss.c +++ b/sound/oss/pss.c | |||
@@ -859,7 +859,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, | |||
859 | return 0; | 859 | return 0; |
860 | 860 | ||
861 | case SNDCTL_COPR_LOAD: | 861 | case SNDCTL_COPR_LOAD: |
862 | buf = (copr_buffer *) vmalloc(sizeof(copr_buffer)); | 862 | buf = vmalloc(sizeof(copr_buffer)); |
863 | if (buf == NULL) | 863 | if (buf == NULL) |
864 | return -ENOSPC; | 864 | return -ENOSPC; |
865 | if (copy_from_user(buf, arg, sizeof(copr_buffer))) { | 865 | if (copy_from_user(buf, arg, sizeof(copr_buffer))) { |
@@ -871,7 +871,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, | |||
871 | return err; | 871 | return err; |
872 | 872 | ||
873 | case SNDCTL_COPR_SENDMSG: | 873 | case SNDCTL_COPR_SENDMSG: |
874 | mbuf = (copr_msg *)vmalloc(sizeof(copr_msg)); | 874 | mbuf = vmalloc(sizeof(copr_msg)); |
875 | if (mbuf == NULL) | 875 | if (mbuf == NULL) |
876 | return -ENOSPC; | 876 | return -ENOSPC; |
877 | if (copy_from_user(mbuf, arg, sizeof(copr_msg))) { | 877 | if (copy_from_user(mbuf, arg, sizeof(copr_msg))) { |
@@ -895,7 +895,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, | |||
895 | 895 | ||
896 | case SNDCTL_COPR_RCVMSG: | 896 | case SNDCTL_COPR_RCVMSG: |
897 | err = 0; | 897 | err = 0; |
898 | mbuf = (copr_msg *)vmalloc(sizeof(copr_msg)); | 898 | mbuf = vmalloc(sizeof(copr_msg)); |
899 | if (mbuf == NULL) | 899 | if (mbuf == NULL) |
900 | return -ENOSPC; | 900 | return -ENOSPC; |
901 | data = (unsigned short *)mbuf->data; | 901 | data = (unsigned short *)mbuf->data; |
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index e85789e53816..5ea1098ac427 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c | |||
@@ -1646,13 +1646,13 @@ void sequencer_init(void) | |||
1646 | { | 1646 | { |
1647 | if (sequencer_ok) | 1647 | if (sequencer_ok) |
1648 | return; | 1648 | return; |
1649 | queue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * EV_SZ); | 1649 | queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ); |
1650 | if (queue == NULL) | 1650 | if (queue == NULL) |
1651 | { | 1651 | { |
1652 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n"); | 1652 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n"); |
1653 | return; | 1653 | return; |
1654 | } | 1654 | } |
1655 | iqueue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * IEV_SZ); | 1655 | iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ); |
1656 | if (iqueue == NULL) | 1656 | if (iqueue == NULL) |
1657 | { | 1657 | { |
1658 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n"); | 1658 | printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n"); |
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 62895a719fcb..22dbd91811a4 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c | |||
@@ -435,7 +435,7 @@ void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev) | |||
435 | struct hpi_message hm; | 435 | struct hpi_message hm; |
436 | struct hpi_response hr; | 436 | struct hpi_response hr; |
437 | struct hpi_adapter *pa; | 437 | struct hpi_adapter *pa; |
438 | pa = (struct hpi_adapter *)pci_get_drvdata(pci_dev); | 438 | pa = pci_get_drvdata(pci_dev); |
439 | 439 | ||
440 | hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, | 440 | hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, |
441 | HPI_SUBSYS_DELETE_ADAPTER); | 441 | HPI_SUBSYS_DELETE_ADAPTER); |
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c index 4679ed83a43b..2f3cacbd5528 100644 --- a/sound/pci/azt3328.c +++ b/sound/pci/azt3328.c | |||
@@ -1129,10 +1129,11 @@ snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip, | |||
1129 | 1129 | ||
1130 | count_areas = size/2; | 1130 | count_areas = size/2; |
1131 | addr_area2 = addr+count_areas; | 1131 | addr_area2 = addr+count_areas; |
1132 | count_areas--; /* max. index */ | ||
1133 | snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n", | 1132 | snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n", |
1134 | addr, count_areas, addr_area2, count_areas); | 1133 | addr, count_areas, addr_area2, count_areas); |
1135 | 1134 | ||
1135 | count_areas--; /* max. index */ | ||
1136 | |||
1136 | /* build combined I/O buffer length word */ | 1137 | /* build combined I/O buffer length word */ |
1137 | lengths = (count_areas << 16) | (count_areas); | 1138 | lengths = (count_areas << 16) | (count_areas); |
1138 | spin_lock_irqsave(&chip->reg_lock, flags); | 1139 | spin_lock_irqsave(&chip->reg_lock, flags); |
@@ -1740,11 +1741,15 @@ static const struct snd_pcm_hardware snd_azf3328_hardware = | |||
1740 | .rate_max = AZF_FREQ_66200, | 1741 | .rate_max = AZF_FREQ_66200, |
1741 | .channels_min = 1, | 1742 | .channels_min = 1, |
1742 | .channels_max = 2, | 1743 | .channels_max = 2, |
1743 | .buffer_bytes_max = 65536, | 1744 | .buffer_bytes_max = (64*1024), |
1744 | .period_bytes_min = 64, | 1745 | .period_bytes_min = 1024, |
1745 | .period_bytes_max = 65536, | 1746 | .period_bytes_max = (32*1024), |
1746 | .periods_min = 1, | 1747 | /* We simply have two DMA areas (instead of a list of descriptors |
1747 | .periods_max = 1024, | 1748 | such as other cards); I believe that this is a fixed hardware |
1749 | attribute and there isn't much driver magic to be done to expand it. | ||
1750 | Thus indicate that we have at least and at most 2 periods. */ | ||
1751 | .periods_min = 2, | ||
1752 | .periods_max = 2, | ||
1748 | /* FIXME: maybe that card actually has a FIFO? | 1753 | /* FIXME: maybe that card actually has a FIFO? |
1749 | * Hmm, it seems newer revisions do have one, but we still don't know | 1754 | * Hmm, it seems newer revisions do have one, but we still don't know |
1750 | * its size... */ | 1755 | * its size... */ |
@@ -1980,8 +1985,13 @@ snd_azf3328_timer_stop(struct snd_timer *timer) | |||
1980 | chip = snd_timer_chip(timer); | 1985 | chip = snd_timer_chip(timer); |
1981 | spin_lock_irqsave(&chip->reg_lock, flags); | 1986 | spin_lock_irqsave(&chip->reg_lock, flags); |
1982 | /* disable timer countdown and interrupt */ | 1987 | /* disable timer countdown and interrupt */ |
1983 | /* FIXME: should we write TIMER_IRQ_ACK here? */ | 1988 | /* Hmm, should we write TIMER_IRQ_ACK here? |
1984 | snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0); | 1989 | YES indeed, otherwise a rogue timer operation - which prompts |
1990 | ALSA(?) to call repeated stop() in vain, but NOT start() - | ||
1991 | will never end (value 0x03 is kept shown in control byte). | ||
1992 | Simply manually poking 0x04 _once_ immediately successfully stops | ||
1993 | the hardware/ALSA interrupt activity. */ | ||
1994 | snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0x04); | ||
1985 | spin_unlock_irqrestore(&chip->reg_lock, flags); | 1995 | spin_unlock_irqrestore(&chip->reg_lock, flags); |
1986 | snd_azf3328_dbgcallleave(); | 1996 | snd_azf3328_dbgcallleave(); |
1987 | return 0; | 1997 | return 0; |
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c index 85ab43e89212..457d21189b0d 100644 --- a/sound/pci/ctxfi/ctpcm.c +++ b/sound/pci/ctxfi/ctpcm.c | |||
@@ -129,8 +129,6 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream) | |||
129 | 129 | ||
130 | apcm->substream = substream; | 130 | apcm->substream = substream; |
131 | apcm->interrupt = ct_atc_pcm_interrupt; | 131 | apcm->interrupt = ct_atc_pcm_interrupt; |
132 | runtime->private_data = apcm; | ||
133 | runtime->private_free = ct_atc_pcm_free_substream; | ||
134 | if (IEC958 == substream->pcm->device) { | 132 | if (IEC958 == substream->pcm->device) { |
135 | runtime->hw = ct_spdif_passthru_playback_hw; | 133 | runtime->hw = ct_spdif_passthru_playback_hw; |
136 | atc->spdif_out_passthru(atc, 1); | 134 | atc->spdif_out_passthru(atc, 1); |
@@ -155,8 +153,12 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream) | |||
155 | } | 153 | } |
156 | 154 | ||
157 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); | 155 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); |
158 | if (!apcm->timer) | 156 | if (!apcm->timer) { |
157 | kfree(apcm); | ||
159 | return -ENOMEM; | 158 | return -ENOMEM; |
159 | } | ||
160 | runtime->private_data = apcm; | ||
161 | runtime->private_free = ct_atc_pcm_free_substream; | ||
160 | 162 | ||
161 | return 0; | 163 | return 0; |
162 | } | 164 | } |
@@ -278,8 +280,6 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream) | |||
278 | apcm->started = 0; | 280 | apcm->started = 0; |
279 | apcm->substream = substream; | 281 | apcm->substream = substream; |
280 | apcm->interrupt = ct_atc_pcm_interrupt; | 282 | apcm->interrupt = ct_atc_pcm_interrupt; |
281 | runtime->private_data = apcm; | ||
282 | runtime->private_free = ct_atc_pcm_free_substream; | ||
283 | runtime->hw = ct_pcm_capture_hw; | 283 | runtime->hw = ct_pcm_capture_hw; |
284 | runtime->hw.rate_max = atc->rsr * atc->msr; | 284 | runtime->hw.rate_max = atc->rsr * atc->msr; |
285 | 285 | ||
@@ -298,8 +298,12 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); | 300 | apcm->timer = ct_timer_instance_new(atc->timer, apcm); |
301 | if (!apcm->timer) | 301 | if (!apcm->timer) { |
302 | kfree(apcm); | ||
302 | return -ENOMEM; | 303 | return -ENOMEM; |
304 | } | ||
305 | runtime->private_data = apcm; | ||
306 | runtime->private_free = ct_atc_pcm_free_substream; | ||
303 | 307 | ||
304 | return 0; | 308 | return 0; |
305 | } | 309 | } |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 6361f752b5f3..846d1ead47fd 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3100,6 +3100,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3100 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), | 3100 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), |
3101 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 3101 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
3102 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), | 3102 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), |
3103 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP), | ||
3103 | SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), | 3104 | SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), |
3104 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), | 3105 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), |
3105 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), | 3106 | SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), |
@@ -3110,6 +3111,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3110 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), | 3111 | SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), |
3111 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), | 3112 | SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), |
3112 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), | 3113 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), |
3114 | SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD), | ||
3113 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), | 3115 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), |
3114 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), | 3116 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), |
3115 | SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), | 3117 | SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5f00589cb791..0ac6aed0c889 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -14623,7 +14623,10 @@ static int alc275_setup_dual_adc(struct hda_codec *codec) | |||
14623 | /* different alc269-variants */ | 14623 | /* different alc269-variants */ |
14624 | enum { | 14624 | enum { |
14625 | ALC269_TYPE_NORMAL, | 14625 | ALC269_TYPE_NORMAL, |
14626 | ALC269_TYPE_ALC258, | ||
14626 | ALC269_TYPE_ALC259, | 14627 | ALC269_TYPE_ALC259, |
14628 | ALC269_TYPE_ALC269VB, | ||
14629 | ALC269_TYPE_ALC270, | ||
14627 | ALC269_TYPE_ALC271X, | 14630 | ALC269_TYPE_ALC271X, |
14628 | }; | 14631 | }; |
14629 | 14632 | ||
@@ -15023,7 +15026,7 @@ static int alc269_fill_coef(struct hda_codec *codec) | |||
15023 | static int patch_alc269(struct hda_codec *codec) | 15026 | static int patch_alc269(struct hda_codec *codec) |
15024 | { | 15027 | { |
15025 | struct alc_spec *spec; | 15028 | struct alc_spec *spec; |
15026 | int board_config; | 15029 | int board_config, coef; |
15027 | int err; | 15030 | int err; |
15028 | 15031 | ||
15029 | spec = kzalloc(sizeof(*spec), GFP_KERNEL); | 15032 | spec = kzalloc(sizeof(*spec), GFP_KERNEL); |
@@ -15034,14 +15037,23 @@ static int patch_alc269(struct hda_codec *codec) | |||
15034 | 15037 | ||
15035 | alc_auto_parse_customize_define(codec); | 15038 | alc_auto_parse_customize_define(codec); |
15036 | 15039 | ||
15037 | if ((alc_read_coef_idx(codec, 0) & 0x00f0) == 0x0010){ | 15040 | coef = alc_read_coef_idx(codec, 0); |
15041 | if ((coef & 0x00f0) == 0x0010) { | ||
15038 | if (codec->bus->pci->subsystem_vendor == 0x1025 && | 15042 | if (codec->bus->pci->subsystem_vendor == 0x1025 && |
15039 | spec->cdefine.platform_type == 1) { | 15043 | spec->cdefine.platform_type == 1) { |
15040 | alc_codec_rename(codec, "ALC271X"); | 15044 | alc_codec_rename(codec, "ALC271X"); |
15041 | spec->codec_variant = ALC269_TYPE_ALC271X; | 15045 | spec->codec_variant = ALC269_TYPE_ALC271X; |
15042 | } else { | 15046 | } else if ((coef & 0xf000) == 0x1000) { |
15047 | spec->codec_variant = ALC269_TYPE_ALC270; | ||
15048 | } else if ((coef & 0xf000) == 0x2000) { | ||
15043 | alc_codec_rename(codec, "ALC259"); | 15049 | alc_codec_rename(codec, "ALC259"); |
15044 | spec->codec_variant = ALC269_TYPE_ALC259; | 15050 | spec->codec_variant = ALC269_TYPE_ALC259; |
15051 | } else if ((coef & 0xf000) == 0x3000) { | ||
15052 | alc_codec_rename(codec, "ALC258"); | ||
15053 | spec->codec_variant = ALC269_TYPE_ALC258; | ||
15054 | } else { | ||
15055 | alc_codec_rename(codec, "ALC269VB"); | ||
15056 | spec->codec_variant = ALC269_TYPE_ALC269VB; | ||
15045 | } | 15057 | } |
15046 | } else | 15058 | } else |
15047 | alc_fix_pll_init(codec, 0x20, 0x04, 15); | 15059 | alc_fix_pll_init(codec, 0x20, 0x04, 15); |
@@ -15104,7 +15116,7 @@ static int patch_alc269(struct hda_codec *codec) | |||
15104 | spec->stream_digital_capture = &alc269_pcm_digital_capture; | 15116 | spec->stream_digital_capture = &alc269_pcm_digital_capture; |
15105 | 15117 | ||
15106 | if (!spec->adc_nids) { /* wasn't filled automatically? use default */ | 15118 | if (!spec->adc_nids) { /* wasn't filled automatically? use default */ |
15107 | if (spec->codec_variant != ALC269_TYPE_NORMAL) { | 15119 | if (spec->codec_variant == ALC269_TYPE_NORMAL) { |
15108 | spec->adc_nids = alc269_adc_nids; | 15120 | spec->adc_nids = alc269_adc_nids; |
15109 | spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); | 15121 | spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); |
15110 | spec->capsrc_nids = alc269_capsrc_nids; | 15122 | spec->capsrc_nids = alc269_capsrc_nids; |
@@ -19298,6 +19310,7 @@ static const struct alc_fixup alc662_fixups[] = { | |||
19298 | 19310 | ||
19299 | static struct snd_pci_quirk alc662_fixup_tbl[] = { | 19311 | static struct snd_pci_quirk alc662_fixup_tbl[] = { |
19300 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), | 19312 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), |
19313 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), | ||
19301 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), | 19314 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
19302 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), | 19315 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), |
19303 | {} | 19316 | {} |
@@ -19419,7 +19432,10 @@ static int patch_alc888(struct hda_codec *codec) | |||
19419 | { | 19432 | { |
19420 | if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){ | 19433 | if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){ |
19421 | kfree(codec->chip_name); | 19434 | kfree(codec->chip_name); |
19422 | codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL); | 19435 | if (codec->vendor_id == 0x10ec0887) |
19436 | codec->chip_name = kstrdup("ALC887-VD", GFP_KERNEL); | ||
19437 | else | ||
19438 | codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL); | ||
19423 | if (!codec->chip_name) { | 19439 | if (!codec->chip_name) { |
19424 | alc_free(codec); | 19440 | alc_free(codec); |
19425 | return -ENOMEM; | 19441 | return -ENOMEM; |
@@ -19909,7 +19925,7 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = { | |||
19909 | { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A", | 19925 | { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A", |
19910 | .patch = patch_alc882 }, | 19926 | .patch = patch_alc882 }, |
19911 | { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 }, | 19927 | { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 }, |
19912 | { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 }, | 19928 | { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc888 }, |
19913 | { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200", | 19929 | { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200", |
19914 | .patch = patch_alc882 }, | 19930 | .patch = patch_alc882 }, |
19915 | { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 }, | 19931 | { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 }, |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 93fa59cc60ef..5c710807dfe5 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -389,6 +389,11 @@ static hda_nid_t stac92hd83xxx_dmic_nids[STAC92HD83XXX_NUM_DMICS + 1] = { | |||
389 | 0x11, 0x20, 0 | 389 | 0x11, 0x20, 0 |
390 | }; | 390 | }; |
391 | 391 | ||
392 | #define STAC92HD87B_NUM_DMICS 1 | ||
393 | static hda_nid_t stac92hd87b_dmic_nids[STAC92HD87B_NUM_DMICS + 1] = { | ||
394 | 0x11, 0 | ||
395 | }; | ||
396 | |||
392 | #define STAC92HD83XXX_NUM_CAPS 2 | 397 | #define STAC92HD83XXX_NUM_CAPS 2 |
393 | static unsigned long stac92hd83xxx_capvols[] = { | 398 | static unsigned long stac92hd83xxx_capvols[] = { |
394 | HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT), | 399 | HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT), |
@@ -3486,10 +3491,8 @@ static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec, | |||
3486 | return err; | 3491 | return err; |
3487 | } | 3492 | } |
3488 | 3493 | ||
3489 | if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) { | 3494 | if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) |
3490 | snd_hda_add_imux_item(imux, label, index, NULL); | 3495 | snd_hda_add_imux_item(imux, label, index, NULL); |
3491 | spec->num_analog_muxes++; | ||
3492 | } | ||
3493 | } | 3496 | } |
3494 | 3497 | ||
3495 | return 0; | 3498 | return 0; |
@@ -5452,12 +5455,17 @@ again: | |||
5452 | stac92hd83xxx_brd_tbl[spec->board_config]); | 5455 | stac92hd83xxx_brd_tbl[spec->board_config]); |
5453 | 5456 | ||
5454 | switch (codec->vendor_id) { | 5457 | switch (codec->vendor_id) { |
5458 | case 0x111d76d1: | ||
5459 | case 0x111d76d9: | ||
5460 | spec->dmic_nids = stac92hd87b_dmic_nids; | ||
5461 | spec->num_dmics = stac92xx_connected_ports(codec, | ||
5462 | stac92hd87b_dmic_nids, | ||
5463 | STAC92HD87B_NUM_DMICS); | ||
5464 | /* Fall through */ | ||
5455 | case 0x111d7666: | 5465 | case 0x111d7666: |
5456 | case 0x111d7667: | 5466 | case 0x111d7667: |
5457 | case 0x111d7668: | 5467 | case 0x111d7668: |
5458 | case 0x111d7669: | 5468 | case 0x111d7669: |
5459 | case 0x111d76d1: | ||
5460 | case 0x111d76d9: | ||
5461 | spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); | 5469 | spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); |
5462 | spec->pin_nids = stac92hd88xxx_pin_nids; | 5470 | spec->pin_nids = stac92hd88xxx_pin_nids; |
5463 | spec->mono_nid = 0; | 5471 | spec->mono_nid = 0; |
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 400f9ebd243e..629a5494347a 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c | |||
@@ -1866,6 +1866,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { | |||
1866 | }, | 1866 | }, |
1867 | { | 1867 | { |
1868 | .subvendor = 0x1028, | 1868 | .subvendor = 0x1028, |
1869 | .subdevice = 0x0182, | ||
1870 | .name = "Dell Latitude D610", /* STAC9750/51 */ | ||
1871 | .type = AC97_TUNE_HP_ONLY | ||
1872 | }, | ||
1873 | { | ||
1874 | .subvendor = 0x1028, | ||
1869 | .subdevice = 0x0186, | 1875 | .subdevice = 0x0186, |
1870 | .name = "Dell Latitude D810", /* cf. Malone #41015 */ | 1876 | .name = "Dell Latitude D810", /* cf. Malone #41015 */ |
1871 | .type = AC97_TUNE_HP_MUTE_LED | 1877 | .type = AC97_TUNE_HP_MUTE_LED |
diff --git a/sound/pci/mixart/mixart_hwdep.h b/sound/pci/mixart/mixart_hwdep.h index a46f5083db99..812e288ef2e7 100644 --- a/sound/pci/mixart/mixart_hwdep.h +++ b/sound/pci/mixart/mixart_hwdep.h | |||
@@ -25,11 +25,21 @@ | |||
25 | 25 | ||
26 | #include <sound/hwdep.h> | 26 | #include <sound/hwdep.h> |
27 | 27 | ||
28 | #ifndef readl_be | ||
28 | #define readl_be(x) be32_to_cpu(__raw_readl(x)) | 29 | #define readl_be(x) be32_to_cpu(__raw_readl(x)) |
30 | #endif | ||
31 | |||
32 | #ifndef writel_be | ||
29 | #define writel_be(data,addr) __raw_writel(cpu_to_be32(data),addr) | 33 | #define writel_be(data,addr) __raw_writel(cpu_to_be32(data),addr) |
34 | #endif | ||
30 | 35 | ||
36 | #ifndef readl_le | ||
31 | #define readl_le(x) le32_to_cpu(__raw_readl(x)) | 37 | #define readl_le(x) le32_to_cpu(__raw_readl(x)) |
38 | #endif | ||
39 | |||
40 | #ifndef writel_le | ||
32 | #define writel_le(data,addr) __raw_writel(cpu_to_le32(data),addr) | 41 | #define writel_le(data,addr) __raw_writel(cpu_to_le32(data),addr) |
42 | #endif | ||
33 | 43 | ||
34 | #define MIXART_MEM(mgr,x) ((mgr)->mem[0].virt + (x)) | 44 | #define MIXART_MEM(mgr,x) ((mgr)->mem[0].virt + (x)) |
35 | #define MIXART_REG(mgr,x) ((mgr)->mem[1].virt + (x)) | 45 | #define MIXART_REG(mgr,x) ((mgr)->mem[1].virt + (x)) |
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c index 85081172403f..b47cfd45b3b9 100644 --- a/sound/ppc/pmac.c +++ b/sound/ppc/pmac.c | |||
@@ -1228,10 +1228,8 @@ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1228 | chip->rsrc[i].start + 1, | 1228 | chip->rsrc[i].start + 1, |
1229 | rnames[i]) == NULL) { | 1229 | rnames[i]) == NULL) { |
1230 | printk(KERN_ERR "snd: can't request rsrc " | 1230 | printk(KERN_ERR "snd: can't request rsrc " |
1231 | " %d (%s: 0x%016llx:%016llx)\n", | 1231 | " %d (%s: %pR)\n", |
1232 | i, rnames[i], | 1232 | i, rnames[i], &chip->rsrc[i]); |
1233 | (unsigned long long)chip->rsrc[i].start, | ||
1234 | (unsigned long long)chip->rsrc[i].end); | ||
1235 | err = -ENODEV; | 1233 | err = -ENODEV; |
1236 | goto __error; | 1234 | goto __error; |
1237 | } | 1235 | } |
@@ -1256,10 +1254,8 @@ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1256 | chip->rsrc[i].start + 1, | 1254 | chip->rsrc[i].start + 1, |
1257 | rnames[i]) == NULL) { | 1255 | rnames[i]) == NULL) { |
1258 | printk(KERN_ERR "snd: can't request rsrc " | 1256 | printk(KERN_ERR "snd: can't request rsrc " |
1259 | " %d (%s: 0x%016llx:%016llx)\n", | 1257 | " %d (%s: %pR)\n", |
1260 | i, rnames[i], | 1258 | i, rnames[i], &chip->rsrc[i]); |
1261 | (unsigned long long)chip->rsrc[i].start, | ||
1262 | (unsigned long long)chip->rsrc[i].end); | ||
1263 | err = -ENODEV; | 1259 | err = -ENODEV; |
1264 | goto __error; | 1260 | goto __error; |
1265 | } | 1261 | } |
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig index e720d5e6f04c..bee3c94f58b0 100644 --- a/sound/soc/atmel/Kconfig +++ b/sound/soc/atmel/Kconfig | |||
@@ -16,7 +16,8 @@ config SND_ATMEL_SOC_SSC | |||
16 | 16 | ||
17 | config SND_AT91_SOC_SAM9G20_WM8731 | 17 | config SND_AT91_SOC_SAM9G20_WM8731 |
18 | tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board" | 18 | tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board" |
19 | depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC | 19 | depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC && \ |
20 | AT91_PROGRAMMABLE_CLOCKS | ||
20 | select SND_ATMEL_SOC_SSC | 21 | select SND_ATMEL_SOC_SSC |
21 | select SND_SOC_WM8731 | 22 | select SND_SOC_WM8731 |
22 | help | 23 | help |
@@ -25,7 +26,7 @@ config SND_AT91_SOC_SAM9G20_WM8731 | |||
25 | 26 | ||
26 | config SND_AT32_SOC_PLAYPAQ | 27 | config SND_AT32_SOC_PLAYPAQ |
27 | tristate "SoC Audio support for PlayPaq with WM8510" | 28 | tristate "SoC Audio support for PlayPaq with WM8510" |
28 | depends on SND_ATMEL_SOC && BOARD_PLAYPAQ | 29 | depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS |
29 | select SND_ATMEL_SOC_SSC | 30 | select SND_ATMEL_SOC_SSC |
30 | select SND_SOC_WM8510 | 31 | select SND_SOC_WM8510 |
31 | help | 32 | help |
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c index bc22ee93a75d..470cb93b1d1f 100644 --- a/sound/soc/codecs/max98088.c +++ b/sound/soc/codecs/max98088.c | |||
@@ -28,6 +28,11 @@ | |||
28 | #include <sound/max98088.h> | 28 | #include <sound/max98088.h> |
29 | #include "max98088.h" | 29 | #include "max98088.h" |
30 | 30 | ||
31 | enum max98088_type { | ||
32 | MAX98088, | ||
33 | MAX98089, | ||
34 | }; | ||
35 | |||
31 | struct max98088_cdata { | 36 | struct max98088_cdata { |
32 | unsigned int rate; | 37 | unsigned int rate; |
33 | unsigned int fmt; | 38 | unsigned int fmt; |
@@ -36,6 +41,7 @@ struct max98088_cdata { | |||
36 | 41 | ||
37 | struct max98088_priv { | 42 | struct max98088_priv { |
38 | u8 reg_cache[M98088_REG_CNT]; | 43 | u8 reg_cache[M98088_REG_CNT]; |
44 | enum max98088_type devtype; | ||
39 | void *control_data; | 45 | void *control_data; |
40 | struct max98088_pdata *pdata; | 46 | struct max98088_pdata *pdata; |
41 | unsigned int sysclk; | 47 | unsigned int sysclk; |
@@ -2040,6 +2046,8 @@ static int max98088_i2c_probe(struct i2c_client *i2c, | |||
2040 | if (max98088 == NULL) | 2046 | if (max98088 == NULL) |
2041 | return -ENOMEM; | 2047 | return -ENOMEM; |
2042 | 2048 | ||
2049 | max98088->devtype = id->driver_data; | ||
2050 | |||
2043 | i2c_set_clientdata(i2c, max98088); | 2051 | i2c_set_clientdata(i2c, max98088); |
2044 | max98088->control_data = i2c; | 2052 | max98088->control_data = i2c; |
2045 | max98088->pdata = i2c->dev.platform_data; | 2053 | max98088->pdata = i2c->dev.platform_data; |
@@ -2059,7 +2067,8 @@ static int __devexit max98088_i2c_remove(struct i2c_client *client) | |||
2059 | } | 2067 | } |
2060 | 2068 | ||
2061 | static const struct i2c_device_id max98088_i2c_id[] = { | 2069 | static const struct i2c_device_id max98088_i2c_id[] = { |
2062 | { "max98088", 0 }, | 2070 | { "max98088", MAX98088 }, |
2071 | { "max98089", MAX98089 }, | ||
2063 | { } | 2072 | { } |
2064 | }; | 2073 | }; |
2065 | MODULE_DEVICE_TABLE(i2c, max98088_i2c_id); | 2074 | MODULE_DEVICE_TABLE(i2c, max98088_i2c_id); |
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index 7540a509a6f5..464f0cfa4c7a 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c | |||
@@ -597,6 +597,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = { | |||
597 | .resume = uda134x_soc_resume, | 597 | .resume = uda134x_soc_resume, |
598 | .reg_cache_size = sizeof(uda134x_reg), | 598 | .reg_cache_size = sizeof(uda134x_reg), |
599 | .reg_word_size = sizeof(u8), | 599 | .reg_word_size = sizeof(u8), |
600 | .reg_cache_default = uda134x_reg, | ||
600 | .reg_cache_step = 1, | 601 | .reg_cache_step = 1, |
601 | .read = uda134x_read_reg_cache, | 602 | .read = uda134x_read_reg_cache, |
602 | .write = uda134x_write, | 603 | .write = uda134x_write, |
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index f4f1fba38eb9..7611add7f8c3 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c | |||
@@ -831,7 +831,7 @@ static int wm8350_set_dai_sysclk(struct snd_soc_dai *codec_dai, | |||
831 | } | 831 | } |
832 | 832 | ||
833 | /* MCLK direction */ | 833 | /* MCLK direction */ |
834 | if (dir == WM8350_MCLK_DIR_OUT) | 834 | if (dir == SND_SOC_CLOCK_OUT) |
835 | wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_2, | 835 | wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_2, |
836 | WM8350_MCLK_DIR); | 836 | WM8350_MCLK_DIR); |
837 | else | 837 | else |
@@ -1586,6 +1586,13 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec) | |||
1586 | wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME, | 1586 | wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME, |
1587 | WM8350_OUT2_VU | WM8350_OUT2R_MUTE); | 1587 | WM8350_OUT2_VU | WM8350_OUT2R_MUTE); |
1588 | 1588 | ||
1589 | /* Make sure AIF tristating is disabled by default */ | ||
1590 | wm8350_clear_bits(wm8350, WM8350_AI_FORMATING, WM8350_AIF_TRI); | ||
1591 | |||
1592 | /* Make sure we've got a sane companding setup too */ | ||
1593 | wm8350_clear_bits(wm8350, WM8350_ADC_DAC_COMP, | ||
1594 | WM8350_DAC_COMP | WM8350_LOOPBACK); | ||
1595 | |||
1589 | /* Make sure jack detect is disabled to start off with */ | 1596 | /* Make sure jack detect is disabled to start off with */ |
1590 | wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, | 1597 | wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, |
1591 | WM8350_JDL_ENA | WM8350_JDR_ENA); | 1598 | WM8350_JDL_ENA | WM8350_JDR_ENA); |
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c index 04182c464e35..0132a27140ae 100644 --- a/sound/soc/codecs/wm8776.c +++ b/sound/soc/codecs/wm8776.c | |||
@@ -34,7 +34,6 @@ | |||
34 | /* codec private data */ | 34 | /* codec private data */ |
35 | struct wm8776_priv { | 35 | struct wm8776_priv { |
36 | enum snd_soc_control_type control_type; | 36 | enum snd_soc_control_type control_type; |
37 | u16 reg_cache[WM8776_CACHEREGNUM]; | ||
38 | int sysclk[2]; | 37 | int sysclk[2]; |
39 | }; | 38 | }; |
40 | 39 | ||
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 894d0cd3aa9b..e8092745a207 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -3500,8 +3500,11 @@ static ssize_t wm8962_beep_set(struct device *dev, | |||
3500 | { | 3500 | { |
3501 | struct wm8962_priv *wm8962 = dev_get_drvdata(dev); | 3501 | struct wm8962_priv *wm8962 = dev_get_drvdata(dev); |
3502 | long int time; | 3502 | long int time; |
3503 | int ret; | ||
3503 | 3504 | ||
3504 | strict_strtol(buf, 10, &time); | 3505 | ret = strict_strtol(buf, 10, &time); |
3506 | if (ret != 0) | ||
3507 | return ret; | ||
3505 | 3508 | ||
3506 | input_event(wm8962->beep, EV_SND, SND_TONE, time); | 3509 | input_event(wm8962->beep, EV_SND, SND_TONE, time); |
3507 | 3510 | ||
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 0db59c3aa5d4..830dfdd66c5f 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
@@ -3903,6 +3903,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3903 | return -ENOMEM; | 3903 | return -ENOMEM; |
3904 | snd_soc_codec_set_drvdata(codec, wm8994); | 3904 | snd_soc_codec_set_drvdata(codec, wm8994); |
3905 | 3905 | ||
3906 | codec->reg_cache = &wm8994->reg_cache; | ||
3907 | |||
3906 | wm8994->pdata = dev_get_platdata(codec->dev->parent); | 3908 | wm8994->pdata = dev_get_platdata(codec->dev->parent); |
3907 | wm8994->codec = codec; | 3909 | wm8994->codec = codec; |
3908 | 3910 | ||
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c index 2b07b17a6b2d..bc9e6b0b3f6f 100644 --- a/sound/soc/davinci/davinci-evm.c +++ b/sound/soc/davinci/davinci-evm.c | |||
@@ -157,12 +157,23 @@ static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | /* davinci-evm digital audio interface glue - connects codec <--> CPU */ | 159 | /* davinci-evm digital audio interface glue - connects codec <--> CPU */ |
160 | static struct snd_soc_dai_link evm_dai = { | 160 | static struct snd_soc_dai_link dm6446_evm_dai = { |
161 | .name = "TLV320AIC3X", | 161 | .name = "TLV320AIC3X", |
162 | .stream_name = "AIC3X", | 162 | .stream_name = "AIC3X", |
163 | .cpu_dai_name = "davinci-mcasp.0", | 163 | .cpu_dai_name = "davinci-mcbsp", |
164 | .codec_dai_name = "tlv320aic3x-hifi", | 164 | .codec_dai_name = "tlv320aic3x-hifi", |
165 | .codec_name = "tlv320aic3x-codec.0-001a", | 165 | .codec_name = "tlv320aic3x-codec.1-001b", |
166 | .platform_name = "davinci-pcm-audio", | ||
167 | .init = evm_aic3x_init, | ||
168 | .ops = &evm_ops, | ||
169 | }; | ||
170 | |||
171 | static struct snd_soc_dai_link dm355_evm_dai = { | ||
172 | .name = "TLV320AIC3X", | ||
173 | .stream_name = "AIC3X", | ||
174 | .cpu_dai_name = "davinci-mcbsp.1", | ||
175 | .codec_dai_name = "tlv320aic3x-hifi", | ||
176 | .codec_name = "tlv320aic3x-codec.1-001b", | ||
166 | .platform_name = "davinci-pcm-audio", | 177 | .platform_name = "davinci-pcm-audio", |
167 | .init = evm_aic3x_init, | 178 | .init = evm_aic3x_init, |
168 | .ops = &evm_ops, | 179 | .ops = &evm_ops, |
@@ -172,10 +183,10 @@ static struct snd_soc_dai_link dm365_evm_dai = { | |||
172 | #ifdef CONFIG_SND_DM365_AIC3X_CODEC | 183 | #ifdef CONFIG_SND_DM365_AIC3X_CODEC |
173 | .name = "TLV320AIC3X", | 184 | .name = "TLV320AIC3X", |
174 | .stream_name = "AIC3X", | 185 | .stream_name = "AIC3X", |
175 | .cpu_dai_name = "davinci-i2s", | 186 | .cpu_dai_name = "davinci-mcbsp", |
176 | .codec_dai_name = "tlv320aic3x-hifi", | 187 | .codec_dai_name = "tlv320aic3x-hifi", |
177 | .init = evm_aic3x_init, | 188 | .init = evm_aic3x_init, |
178 | .codec_name = "tlv320aic3x-codec.0-001a", | 189 | .codec_name = "tlv320aic3x-codec.1-0018", |
179 | .ops = &evm_ops, | 190 | .ops = &evm_ops, |
180 | #elif defined(CONFIG_SND_DM365_VOICE_CODEC) | 191 | #elif defined(CONFIG_SND_DM365_VOICE_CODEC) |
181 | .name = "Voice Codec - CQ93VC", | 192 | .name = "Voice Codec - CQ93VC", |
@@ -219,10 +230,17 @@ static struct snd_soc_dai_link da8xx_evm_dai = { | |||
219 | .ops = &evm_ops, | 230 | .ops = &evm_ops, |
220 | }; | 231 | }; |
221 | 232 | ||
222 | /* davinci dm6446, dm355 evm audio machine driver */ | 233 | /* davinci dm6446 evm audio machine driver */ |
223 | static struct snd_soc_card snd_soc_card_evm = { | 234 | static struct snd_soc_card dm6446_snd_soc_card_evm = { |
224 | .name = "DaVinci EVM", | 235 | .name = "DaVinci DM6446 EVM", |
225 | .dai_link = &evm_dai, | 236 | .dai_link = &dm6446_evm_dai, |
237 | .num_links = 1, | ||
238 | }; | ||
239 | |||
240 | /* davinci dm355 evm audio machine driver */ | ||
241 | static struct snd_soc_card dm355_snd_soc_card_evm = { | ||
242 | .name = "DaVinci DM355 EVM", | ||
243 | .dai_link = &dm355_evm_dai, | ||
226 | .num_links = 1, | 244 | .num_links = 1, |
227 | }; | 245 | }; |
228 | 246 | ||
@@ -261,10 +279,10 @@ static int __init evm_init(void) | |||
261 | int ret; | 279 | int ret; |
262 | 280 | ||
263 | if (machine_is_davinci_evm()) { | 281 | if (machine_is_davinci_evm()) { |
264 | evm_snd_dev_data = &snd_soc_card_evm; | 282 | evm_snd_dev_data = &dm6446_snd_soc_card_evm; |
265 | index = 0; | 283 | index = 0; |
266 | } else if (machine_is_davinci_dm355_evm()) { | 284 | } else if (machine_is_davinci_dm355_evm()) { |
267 | evm_snd_dev_data = &snd_soc_card_evm; | 285 | evm_snd_dev_data = &dm355_snd_soc_card_evm; |
268 | index = 1; | 286 | index = 1; |
269 | } else if (machine_is_davinci_dm365_evm()) { | 287 | } else if (machine_is_davinci_dm365_evm()) { |
270 | evm_snd_dev_data = &dm365_snd_soc_card_evm; | 288 | evm_snd_dev_data = &dm365_snd_soc_card_evm; |
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c index d46b545d41f4..9e0e565e6ed9 100644 --- a/sound/soc/davinci/davinci-i2s.c +++ b/sound/soc/davinci/davinci-i2s.c | |||
@@ -426,9 +426,6 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream, | |||
426 | snd_pcm_format_t fmt; | 426 | snd_pcm_format_t fmt; |
427 | unsigned element_cnt = 1; | 427 | unsigned element_cnt = 1; |
428 | 428 | ||
429 | dai->capture_dma_data = dev->dma_params; | ||
430 | dai->playback_dma_data = dev->dma_params; | ||
431 | |||
432 | /* general line settings */ | 429 | /* general line settings */ |
433 | spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); | 430 | spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); |
434 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { | 431 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { |
@@ -601,6 +598,15 @@ static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd, | |||
601 | return ret; | 598 | return ret; |
602 | } | 599 | } |
603 | 600 | ||
601 | static int davinci_i2s_startup(struct snd_pcm_substream *substream, | ||
602 | struct snd_soc_dai *dai) | ||
603 | { | ||
604 | struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); | ||
605 | |||
606 | snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); | ||
607 | return 0; | ||
608 | } | ||
609 | |||
604 | static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, | 610 | static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, |
605 | struct snd_soc_dai *dai) | 611 | struct snd_soc_dai *dai) |
606 | { | 612 | { |
@@ -612,6 +618,7 @@ static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, | |||
612 | #define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 | 618 | #define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 |
613 | 619 | ||
614 | static struct snd_soc_dai_ops davinci_i2s_dai_ops = { | 620 | static struct snd_soc_dai_ops davinci_i2s_dai_ops = { |
621 | .startup = davinci_i2s_startup, | ||
615 | .shutdown = davinci_i2s_shutdown, | 622 | .shutdown = davinci_i2s_shutdown, |
616 | .prepare = davinci_i2s_prepare, | 623 | .prepare = davinci_i2s_prepare, |
617 | .trigger = davinci_i2s_trigger, | 624 | .trigger = davinci_i2s_trigger, |
@@ -749,7 +756,7 @@ static struct platform_driver davinci_mcbsp_driver = { | |||
749 | .probe = davinci_i2s_probe, | 756 | .probe = davinci_i2s_probe, |
750 | .remove = davinci_i2s_remove, | 757 | .remove = davinci_i2s_remove, |
751 | .driver = { | 758 | .driver = { |
752 | .name = "davinci-i2s", | 759 | .name = "davinci-mcbsp", |
753 | .owner = THIS_MODULE, | 760 | .owner = THIS_MODULE, |
754 | }, | 761 | }, |
755 | }; | 762 | }; |
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 86918ee12419..fb55d2c5d704 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c | |||
@@ -715,9 +715,6 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, | |||
715 | int word_length; | 715 | int word_length; |
716 | u8 fifo_level; | 716 | u8 fifo_level; |
717 | 717 | ||
718 | cpu_dai->capture_dma_data = dev->dma_params; | ||
719 | cpu_dai->playback_dma_data = dev->dma_params; | ||
720 | |||
721 | davinci_hw_common_param(dev, substream->stream); | 718 | davinci_hw_common_param(dev, substream->stream); |
722 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 719 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
723 | fifo_level = dev->txnumevt; | 720 | fifo_level = dev->txnumevt; |
@@ -799,7 +796,17 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream, | |||
799 | return ret; | 796 | return ret; |
800 | } | 797 | } |
801 | 798 | ||
799 | static int davinci_mcasp_startup(struct snd_pcm_substream *substream, | ||
800 | struct snd_soc_dai *dai) | ||
801 | { | ||
802 | struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(dai); | ||
803 | |||
804 | snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); | ||
805 | return 0; | ||
806 | } | ||
807 | |||
802 | static struct snd_soc_dai_ops davinci_mcasp_dai_ops = { | 808 | static struct snd_soc_dai_ops davinci_mcasp_dai_ops = { |
809 | .startup = davinci_mcasp_startup, | ||
803 | .trigger = davinci_mcasp_trigger, | 810 | .trigger = davinci_mcasp_trigger, |
804 | .hw_params = davinci_mcasp_hw_params, | 811 | .hw_params = davinci_mcasp_hw_params, |
805 | .set_fmt = davinci_mcasp_set_dai_fmt, | 812 | .set_fmt = davinci_mcasp_set_dai_fmt, |
diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c index 009b6521a1bf..6c6666a1f942 100644 --- a/sound/soc/davinci/davinci-sffsdr.c +++ b/sound/soc/davinci/davinci-sffsdr.c | |||
@@ -84,7 +84,7 @@ static struct snd_soc_ops sffsdr_ops = { | |||
84 | static struct snd_soc_dai_link sffsdr_dai = { | 84 | static struct snd_soc_dai_link sffsdr_dai = { |
85 | .name = "PCM3008", /* Codec name */ | 85 | .name = "PCM3008", /* Codec name */ |
86 | .stream_name = "PCM3008 HiFi", | 86 | .stream_name = "PCM3008 HiFi", |
87 | .cpu_dai_name = "davinci-asp.0", | 87 | .cpu_dai_name = "davinci-mcbsp", |
88 | .codec_dai_name = "pcm3008-hifi", | 88 | .codec_dai_name = "pcm3008-hifi", |
89 | .codec_name = "pcm3008-codec", | 89 | .codec_name = "pcm3008-codec", |
90 | .platform_name = "davinci-pcm-audio", | 90 | .platform_name = "davinci-pcm-audio", |
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c index ea232f6a2c21..fb4cc1edf339 100644 --- a/sound/soc/davinci/davinci-vcif.c +++ b/sound/soc/davinci/davinci-vcif.c | |||
@@ -97,9 +97,6 @@ static int davinci_vcif_hw_params(struct snd_pcm_substream *substream, | |||
97 | &davinci_vcif_dev->dma_params[substream->stream]; | 97 | &davinci_vcif_dev->dma_params[substream->stream]; |
98 | u32 w; | 98 | u32 w; |
99 | 99 | ||
100 | dai->capture_dma_data = davinci_vcif_dev->dma_params; | ||
101 | dai->playback_dma_data = davinci_vcif_dev->dma_params; | ||
102 | |||
103 | /* Restart the codec before setup */ | 100 | /* Restart the codec before setup */ |
104 | davinci_vcif_stop(substream); | 101 | davinci_vcif_stop(substream); |
105 | davinci_vcif_start(substream); | 102 | davinci_vcif_start(substream); |
@@ -174,9 +171,19 @@ static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd, | |||
174 | return ret; | 171 | return ret; |
175 | } | 172 | } |
176 | 173 | ||
174 | static int davinci_vcif_startup(struct snd_pcm_substream *substream, | ||
175 | struct snd_soc_dai *dai) | ||
176 | { | ||
177 | struct davinci_vcif_dev *dev = snd_soc_dai_get_drvdata(dai); | ||
178 | |||
179 | snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
177 | #define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000 | 183 | #define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000 |
178 | 184 | ||
179 | static struct snd_soc_dai_ops davinci_vcif_dai_ops = { | 185 | static struct snd_soc_dai_ops davinci_vcif_dai_ops = { |
186 | .startup = davinci_vcif_startup, | ||
180 | .trigger = davinci_vcif_trigger, | 187 | .trigger = davinci_vcif_trigger, |
181 | .hw_params = davinci_vcif_hw_params, | 188 | .hw_params = davinci_vcif_hw_params, |
182 | }; | 189 | }; |
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c index 74ffed41340f..9018fa5bf0db 100644 --- a/sound/soc/fsl/mpc5200_psc_i2s.c +++ b/sound/soc/fsl/mpc5200_psc_i2s.c | |||
@@ -160,7 +160,7 @@ static int __devinit psc_i2s_of_probe(struct platform_device *op, | |||
160 | rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai)); | 160 | rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai)); |
161 | if (rc != 0) { | 161 | if (rc != 0) { |
162 | pr_err("Failed to register DAI\n"); | 162 | pr_err("Failed to register DAI\n"); |
163 | return 0; | 163 | return rc; |
164 | } | 164 | } |
165 | 165 | ||
166 | psc_dma = dev_get_drvdata(&op->dev); | 166 | psc_dma = dev_get_drvdata(&op->dev); |
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c index b59675257ce5..dd4fffdbd177 100644 --- a/sound/soc/imx/eukrea-tlv320.c +++ b/sound/soc/imx/eukrea-tlv320.c | |||
@@ -34,8 +34,8 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream, | |||
34 | struct snd_pcm_hw_params *params) | 34 | struct snd_pcm_hw_params *params) |
35 | { | 35 | { |
36 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 36 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
37 | struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; | 37 | struct snd_soc_dai *codec_dai = rtd->codec_dai; |
38 | struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; | 38 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; |
39 | int ret; | 39 | int ret; |
40 | 40 | ||
41 | ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | | 41 | ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | |
@@ -79,10 +79,10 @@ static struct snd_soc_ops eukrea_tlv320_snd_ops = { | |||
79 | static struct snd_soc_dai_link eukrea_tlv320_dai = { | 79 | static struct snd_soc_dai_link eukrea_tlv320_dai = { |
80 | .name = "tlv320aic23", | 80 | .name = "tlv320aic23", |
81 | .stream_name = "TLV320AIC23", | 81 | .stream_name = "TLV320AIC23", |
82 | .codec_dai = "tlv320aic23-hifi", | 82 | .codec_dai_name = "tlv320aic23-hifi", |
83 | .platform_name = "imx-pcm-audio.0", | 83 | .platform_name = "imx-pcm-audio.0", |
84 | .codec_name = "tlv320aic23-codec.0-001a", | 84 | .codec_name = "tlv320aic23-codec.0-001a", |
85 | .cpu_dai = "imx-ssi.0", | 85 | .cpu_dai_name = "imx-ssi.0", |
86 | .ops = &eukrea_tlv320_snd_ops, | 86 | .ops = &eukrea_tlv320_snd_ops, |
87 | }; | 87 | }; |
88 | 88 | ||
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index fd493ee1428e..671ef8dd524c 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/dmaengine.h> | ||
23 | 24 | ||
24 | #include <sound/core.h> | 25 | #include <sound/core.h> |
25 | #include <sound/initval.h> | 26 | #include <sound/initval.h> |
@@ -27,165 +28,146 @@ | |||
27 | #include <sound/pcm_params.h> | 28 | #include <sound/pcm_params.h> |
28 | #include <sound/soc.h> | 29 | #include <sound/soc.h> |
29 | 30 | ||
30 | #include <mach/dma-mx1-mx2.h> | 31 | #include <mach/dma.h> |
31 | 32 | ||
32 | #include "imx-ssi.h" | 33 | #include "imx-ssi.h" |
33 | 34 | ||
34 | struct imx_pcm_runtime_data { | 35 | struct imx_pcm_runtime_data { |
35 | int sg_count; | 36 | int period_bytes; |
36 | struct scatterlist *sg_list; | ||
37 | int period; | ||
38 | int periods; | 37 | int periods; |
39 | unsigned long dma_addr; | ||
40 | int dma; | 38 | int dma; |
41 | struct snd_pcm_substream *substream; | ||
42 | unsigned long offset; | 39 | unsigned long offset; |
43 | unsigned long size; | 40 | unsigned long size; |
44 | unsigned long period_cnt; | ||
45 | void *buf; | 41 | void *buf; |
46 | int period_time; | 42 | int period_time; |
43 | struct dma_async_tx_descriptor *desc; | ||
44 | struct dma_chan *dma_chan; | ||
45 | struct imx_dma_data dma_data; | ||
47 | }; | 46 | }; |
48 | 47 | ||
49 | /* Called by the DMA framework when a period has elapsed */ | 48 | static void audio_dma_irq(void *data) |
50 | static void imx_ssi_dma_progression(int channel, void *data, | ||
51 | struct scatterlist *sg) | ||
52 | { | 49 | { |
53 | struct snd_pcm_substream *substream = data; | 50 | struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data; |
54 | struct snd_pcm_runtime *runtime = substream->runtime; | 51 | struct snd_pcm_runtime *runtime = substream->runtime; |
55 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 52 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
56 | 53 | ||
57 | if (!sg) | 54 | iprtd->offset += iprtd->period_bytes; |
58 | return; | 55 | iprtd->offset %= iprtd->period_bytes * iprtd->periods; |
59 | |||
60 | runtime = iprtd->substream->runtime; | ||
61 | 56 | ||
62 | iprtd->offset = sg->dma_address - runtime->dma_addr; | 57 | snd_pcm_period_elapsed(substream); |
63 | |||
64 | snd_pcm_period_elapsed(iprtd->substream); | ||
65 | } | 58 | } |
66 | 59 | ||
67 | static void imx_ssi_dma_callback(int channel, void *data) | 60 | static bool filter(struct dma_chan *chan, void *param) |
68 | { | 61 | { |
69 | pr_err("%s shouldn't be called\n", __func__); | 62 | struct imx_pcm_runtime_data *iprtd = param; |
70 | } | ||
71 | 63 | ||
72 | static void snd_imx_dma_err_callback(int channel, void *data, int err) | 64 | if (!imx_dma_is_general_purpose(chan)) |
73 | { | 65 | return false; |
74 | struct snd_pcm_substream *substream = data; | ||
75 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | ||
76 | struct imx_pcm_dma_params *dma_params = | ||
77 | snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); | ||
78 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
79 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
80 | int ret; | ||
81 | 66 | ||
82 | pr_err("DMA timeout on channel %d -%s%s%s%s\n", | 67 | chan->private = &iprtd->dma_data; |
83 | channel, | ||
84 | err & IMX_DMA_ERR_BURST ? " burst" : "", | ||
85 | err & IMX_DMA_ERR_REQUEST ? " request" : "", | ||
86 | err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | ||
87 | err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | ||
88 | 68 | ||
89 | imx_dma_disable(iprtd->dma); | 69 | return true; |
90 | ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, | ||
91 | IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, | ||
92 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | ||
93 | DMA_MODE_WRITE : DMA_MODE_READ); | ||
94 | if (!ret) | ||
95 | imx_dma_enable(iprtd->dma); | ||
96 | } | 70 | } |
97 | 71 | ||
98 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) | 72 | static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream, |
73 | struct snd_pcm_hw_params *params) | ||
99 | { | 74 | { |
100 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 75 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
101 | struct imx_pcm_dma_params *dma_params; | 76 | struct imx_pcm_dma_params *dma_params; |
102 | struct snd_pcm_runtime *runtime = substream->runtime; | 77 | struct snd_pcm_runtime *runtime = substream->runtime; |
103 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 78 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
79 | struct dma_slave_config slave_config; | ||
80 | dma_cap_mask_t mask; | ||
81 | enum dma_slave_buswidth buswidth; | ||
104 | int ret; | 82 | int ret; |
105 | 83 | ||
106 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); | 84 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); |
107 | 85 | ||
108 | iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); | 86 | iprtd->dma_data.peripheral_type = IMX_DMATYPE_SSI; |
109 | if (iprtd->dma < 0) { | 87 | iprtd->dma_data.priority = DMA_PRIO_HIGH; |
110 | pr_err("Failed to claim the audio DMA\n"); | 88 | iprtd->dma_data.dma_request = dma_params->dma; |
111 | return -ENODEV; | ||
112 | } | ||
113 | 89 | ||
114 | ret = imx_dma_setup_handlers(iprtd->dma, | 90 | /* Try to grab a DMA channel */ |
115 | imx_ssi_dma_callback, | 91 | dma_cap_zero(mask); |
116 | snd_imx_dma_err_callback, substream); | 92 | dma_cap_set(DMA_SLAVE, mask); |
117 | if (ret) | 93 | iprtd->dma_chan = dma_request_channel(mask, filter, iprtd); |
118 | goto out; | 94 | if (!iprtd->dma_chan) |
95 | return -EINVAL; | ||
119 | 96 | ||
120 | ret = imx_dma_setup_progression_handler(iprtd->dma, | 97 | switch (params_format(params)) { |
121 | imx_ssi_dma_progression); | 98 | case SNDRV_PCM_FORMAT_S16_LE: |
122 | if (ret) { | 99 | buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; |
123 | pr_err("Failed to setup the DMA handler\n"); | 100 | break; |
124 | goto out; | 101 | case SNDRV_PCM_FORMAT_S20_3LE: |
102 | case SNDRV_PCM_FORMAT_S24_LE: | ||
103 | buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
104 | break; | ||
105 | default: | ||
106 | return 0; | ||
125 | } | 107 | } |
126 | 108 | ||
127 | ret = imx_dma_config_channel(iprtd->dma, | 109 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
128 | IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO, | 110 | slave_config.direction = DMA_TO_DEVICE; |
129 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | 111 | slave_config.dst_addr = dma_params->dma_addr; |
130 | dma_params->dma, 1); | 112 | slave_config.dst_addr_width = buswidth; |
131 | if (ret < 0) { | 113 | slave_config.dst_maxburst = dma_params->burstsize; |
132 | pr_err("Cannot configure DMA channel: %d\n", ret); | 114 | } else { |
133 | goto out; | 115 | slave_config.direction = DMA_FROM_DEVICE; |
116 | slave_config.src_addr = dma_params->dma_addr; | ||
117 | slave_config.src_addr_width = buswidth; | ||
118 | slave_config.src_maxburst = dma_params->burstsize; | ||
134 | } | 119 | } |
135 | 120 | ||
136 | imx_dma_config_burstlen(iprtd->dma, dma_params->burstsize * 2); | 121 | ret = dmaengine_slave_config(iprtd->dma_chan, &slave_config); |
122 | if (ret) | ||
123 | return ret; | ||
137 | 124 | ||
138 | return 0; | 125 | return 0; |
139 | out: | ||
140 | imx_dma_free(iprtd->dma); | ||
141 | return ret; | ||
142 | } | 126 | } |
143 | 127 | ||
144 | static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, | 128 | static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, |
145 | struct snd_pcm_hw_params *params) | 129 | struct snd_pcm_hw_params *params) |
146 | { | 130 | { |
131 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | ||
147 | struct snd_pcm_runtime *runtime = substream->runtime; | 132 | struct snd_pcm_runtime *runtime = substream->runtime; |
148 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 133 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
149 | int i; | ||
150 | unsigned long dma_addr; | 134 | unsigned long dma_addr; |
135 | struct dma_chan *chan; | ||
136 | struct imx_pcm_dma_params *dma_params; | ||
137 | int ret; | ||
151 | 138 | ||
152 | imx_ssi_dma_alloc(substream); | 139 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); |
140 | ret = imx_ssi_dma_alloc(substream, params); | ||
141 | if (ret) | ||
142 | return ret; | ||
143 | chan = iprtd->dma_chan; | ||
153 | 144 | ||
154 | iprtd->size = params_buffer_bytes(params); | 145 | iprtd->size = params_buffer_bytes(params); |
155 | iprtd->periods = params_periods(params); | 146 | iprtd->periods = params_periods(params); |
156 | iprtd->period = params_period_bytes(params); | 147 | iprtd->period_bytes = params_period_bytes(params); |
157 | iprtd->offset = 0; | 148 | iprtd->offset = 0; |
158 | iprtd->period_time = HZ / (params_rate(params) / | 149 | iprtd->period_time = HZ / (params_rate(params) / |
159 | params_period_size(params)); | 150 | params_period_size(params)); |
160 | 151 | ||
161 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); | 152 | snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); |
162 | 153 | ||
163 | if (iprtd->sg_count != iprtd->periods) { | ||
164 | kfree(iprtd->sg_list); | ||
165 | |||
166 | iprtd->sg_list = kcalloc(iprtd->periods + 1, | ||
167 | sizeof(struct scatterlist), GFP_KERNEL); | ||
168 | if (!iprtd->sg_list) | ||
169 | return -ENOMEM; | ||
170 | iprtd->sg_count = iprtd->periods + 1; | ||
171 | } | ||
172 | |||
173 | sg_init_table(iprtd->sg_list, iprtd->sg_count); | ||
174 | dma_addr = runtime->dma_addr; | 154 | dma_addr = runtime->dma_addr; |
175 | 155 | ||
176 | for (i = 0; i < iprtd->periods; i++) { | 156 | iprtd->buf = (unsigned int *)substream->dma_buffer.area; |
177 | iprtd->sg_list[i].page_link = 0; | 157 | |
178 | iprtd->sg_list[i].offset = 0; | 158 | iprtd->desc = chan->device->device_prep_dma_cyclic(chan, dma_addr, |
179 | iprtd->sg_list[i].dma_address = dma_addr; | 159 | iprtd->period_bytes * iprtd->periods, |
180 | iprtd->sg_list[i].length = iprtd->period; | 160 | iprtd->period_bytes, |
181 | dma_addr += iprtd->period; | 161 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
162 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | ||
163 | if (!iprtd->desc) { | ||
164 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); | ||
165 | return -EINVAL; | ||
182 | } | 166 | } |
183 | 167 | ||
184 | /* close the loop */ | 168 | iprtd->desc->callback = audio_dma_irq; |
185 | iprtd->sg_list[iprtd->sg_count - 1].offset = 0; | 169 | iprtd->desc->callback_param = substream; |
186 | iprtd->sg_list[iprtd->sg_count - 1].length = 0; | 170 | |
187 | iprtd->sg_list[iprtd->sg_count - 1].page_link = | ||
188 | ((unsigned long) iprtd->sg_list | 0x01) & ~0x02; | ||
189 | return 0; | 171 | return 0; |
190 | } | 172 | } |
191 | 173 | ||
@@ -194,41 +176,21 @@ static int snd_imx_pcm_hw_free(struct snd_pcm_substream *substream) | |||
194 | struct snd_pcm_runtime *runtime = substream->runtime; | 176 | struct snd_pcm_runtime *runtime = substream->runtime; |
195 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 177 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
196 | 178 | ||
197 | if (iprtd->dma >= 0) { | 179 | if (iprtd->dma_chan) { |
198 | imx_dma_free(iprtd->dma); | 180 | dma_release_channel(iprtd->dma_chan); |
199 | iprtd->dma = -EINVAL; | 181 | iprtd->dma_chan = NULL; |
200 | } | 182 | } |
201 | 183 | ||
202 | kfree(iprtd->sg_list); | ||
203 | iprtd->sg_list = NULL; | ||
204 | |||
205 | return 0; | 184 | return 0; |
206 | } | 185 | } |
207 | 186 | ||
208 | static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) | 187 | static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) |
209 | { | 188 | { |
210 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
211 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 189 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
212 | struct imx_pcm_dma_params *dma_params; | 190 | struct imx_pcm_dma_params *dma_params; |
213 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
214 | int err; | ||
215 | 191 | ||
216 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); | 192 | dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); |
217 | 193 | ||
218 | iprtd->substream = substream; | ||
219 | iprtd->buf = (unsigned int *)substream->dma_buffer.area; | ||
220 | iprtd->period_cnt = 0; | ||
221 | |||
222 | pr_debug("%s: buf: %p period: %d periods: %d\n", | ||
223 | __func__, iprtd->buf, iprtd->period, iprtd->periods); | ||
224 | |||
225 | err = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, | ||
226 | IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, | ||
227 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | ||
228 | DMA_MODE_WRITE : DMA_MODE_READ); | ||
229 | if (err) | ||
230 | return err; | ||
231 | |||
232 | return 0; | 194 | return 0; |
233 | } | 195 | } |
234 | 196 | ||
@@ -241,14 +203,14 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
241 | case SNDRV_PCM_TRIGGER_START: | 203 | case SNDRV_PCM_TRIGGER_START: |
242 | case SNDRV_PCM_TRIGGER_RESUME: | 204 | case SNDRV_PCM_TRIGGER_RESUME: |
243 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 205 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
244 | imx_dma_enable(iprtd->dma); | 206 | dmaengine_submit(iprtd->desc); |
245 | 207 | ||
246 | break; | 208 | break; |
247 | 209 | ||
248 | case SNDRV_PCM_TRIGGER_STOP: | 210 | case SNDRV_PCM_TRIGGER_STOP: |
249 | case SNDRV_PCM_TRIGGER_SUSPEND: | 211 | case SNDRV_PCM_TRIGGER_SUSPEND: |
250 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 212 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
251 | imx_dma_disable(iprtd->dma); | 213 | dmaengine_terminate_all(iprtd->dma_chan); |
252 | 214 | ||
253 | break; | 215 | break; |
254 | default: | 216 | default: |
@@ -263,6 +225,9 @@ static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream | |||
263 | struct snd_pcm_runtime *runtime = substream->runtime; | 225 | struct snd_pcm_runtime *runtime = substream->runtime; |
264 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | 226 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
265 | 227 | ||
228 | pr_debug("%s: %ld %ld\n", __func__, iprtd->offset, | ||
229 | bytes_to_frames(substream->runtime, iprtd->offset)); | ||
230 | |||
266 | return bytes_to_frames(substream->runtime, iprtd->offset); | 231 | return bytes_to_frames(substream->runtime, iprtd->offset); |
267 | } | 232 | } |
268 | 233 | ||
@@ -279,7 +244,7 @@ static struct snd_pcm_hardware snd_imx_hardware = { | |||
279 | .channels_max = 2, | 244 | .channels_max = 2, |
280 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, | 245 | .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, |
281 | .period_bytes_min = 128, | 246 | .period_bytes_min = 128, |
282 | .period_bytes_max = 16 * 1024, | 247 | .period_bytes_max = 65535, /* Limited by SDMA engine */ |
283 | .periods_min = 2, | 248 | .periods_min = 2, |
284 | .periods_max = 255, | 249 | .periods_max = 255, |
285 | .fifo_size = 0, | 250 | .fifo_size = 0, |
@@ -304,11 +269,23 @@ static int snd_imx_open(struct snd_pcm_substream *substream) | |||
304 | } | 269 | } |
305 | 270 | ||
306 | snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware); | 271 | snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware); |
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int snd_imx_close(struct snd_pcm_substream *substream) | ||
277 | { | ||
278 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
279 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; | ||
280 | |||
281 | kfree(iprtd); | ||
282 | |||
307 | return 0; | 283 | return 0; |
308 | } | 284 | } |
309 | 285 | ||
310 | static struct snd_pcm_ops imx_pcm_ops = { | 286 | static struct snd_pcm_ops imx_pcm_ops = { |
311 | .open = snd_imx_open, | 287 | .open = snd_imx_open, |
288 | .close = snd_imx_close, | ||
312 | .ioctl = snd_pcm_lib_ioctl, | 289 | .ioctl = snd_pcm_lib_ioctl, |
313 | .hw_params = snd_imx_pcm_hw_params, | 290 | .hw_params = snd_imx_pcm_hw_params, |
314 | .hw_free = snd_imx_pcm_hw_free, | 291 | .hw_free = snd_imx_pcm_hw_free, |
@@ -340,7 +317,6 @@ static struct platform_driver imx_pcm_driver = { | |||
340 | .name = "imx-pcm-audio", | 317 | .name = "imx-pcm-audio", |
341 | .owner = THIS_MODULE, | 318 | .owner = THIS_MODULE, |
342 | }, | 319 | }, |
343 | |||
344 | .probe = imx_soc_platform_probe, | 320 | .probe = imx_soc_platform_probe, |
345 | .remove = __devexit_p(imx_soc_platform_remove), | 321 | .remove = __devexit_p(imx_soc_platform_remove), |
346 | }; | 322 | }; |
@@ -356,4 +332,3 @@ static void __exit snd_imx_pcm_exit(void) | |||
356 | platform_driver_unregister(&imx_pcm_driver); | 332 | platform_driver_unregister(&imx_pcm_driver); |
357 | } | 333 | } |
358 | module_exit(snd_imx_pcm_exit); | 334 | module_exit(snd_imx_pcm_exit); |
359 | |||
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index d4bd345b0a8d..d2d98c75ee8a 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c | |||
@@ -439,7 +439,22 @@ void imx_pcm_free(struct snd_pcm *pcm) | |||
439 | } | 439 | } |
440 | EXPORT_SYMBOL_GPL(imx_pcm_free); | 440 | EXPORT_SYMBOL_GPL(imx_pcm_free); |
441 | 441 | ||
442 | static int imx_ssi_dai_probe(struct snd_soc_dai *dai) | ||
443 | { | ||
444 | struct imx_ssi *ssi = dev_get_drvdata(dai->dev); | ||
445 | uint32_t val; | ||
446 | |||
447 | snd_soc_dai_set_drvdata(dai, ssi); | ||
448 | |||
449 | val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) | | ||
450 | SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize); | ||
451 | writel(val, ssi->base + SSI_SFCSR); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
442 | static struct snd_soc_dai_driver imx_ssi_dai = { | 456 | static struct snd_soc_dai_driver imx_ssi_dai = { |
457 | .probe = imx_ssi_dai_probe, | ||
443 | .playback = { | 458 | .playback = { |
444 | .channels_min = 2, | 459 | .channels_min = 2, |
445 | .channels_max = 2, | 460 | .channels_max = 2, |
@@ -455,20 +470,6 @@ static struct snd_soc_dai_driver imx_ssi_dai = { | |||
455 | .ops = &imx_ssi_pcm_dai_ops, | 470 | .ops = &imx_ssi_pcm_dai_ops, |
456 | }; | 471 | }; |
457 | 472 | ||
458 | static int imx_ssi_dai_probe(struct snd_soc_dai *dai) | ||
459 | { | ||
460 | struct imx_ssi *ssi = dev_get_drvdata(dai->dev); | ||
461 | uint32_t val; | ||
462 | |||
463 | snd_soc_dai_set_drvdata(dai, ssi); | ||
464 | |||
465 | val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) | | ||
466 | SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize); | ||
467 | writel(val, ssi->base + SSI_SFCSR); | ||
468 | |||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static struct snd_soc_dai_driver imx_ac97_dai = { | 473 | static struct snd_soc_dai_driver imx_ac97_dai = { |
473 | .probe = imx_ssi_dai_probe, | 474 | .probe = imx_ssi_dai_probe, |
474 | .ac97_control = 1, | 475 | .ac97_control = 1, |
@@ -677,7 +678,17 @@ static int imx_ssi_probe(struct platform_device *pdev) | |||
677 | goto failed_register; | 678 | goto failed_register; |
678 | } | 679 | } |
679 | 680 | ||
680 | ssi->soc_platform_pdev = platform_device_alloc("imx-fiq-pcm-audio", pdev->id); | 681 | ssi->soc_platform_pdev_fiq = platform_device_alloc("imx-fiq-pcm-audio", pdev->id); |
682 | if (!ssi->soc_platform_pdev_fiq) | ||
683 | goto failed_pdev_fiq_alloc; | ||
684 | platform_set_drvdata(ssi->soc_platform_pdev_fiq, ssi); | ||
685 | ret = platform_device_add(ssi->soc_platform_pdev_fiq); | ||
686 | if (ret) { | ||
687 | dev_err(&pdev->dev, "failed to add platform device\n"); | ||
688 | goto failed_pdev_fiq_add; | ||
689 | } | ||
690 | |||
691 | ssi->soc_platform_pdev = platform_device_alloc("imx-pcm-audio", pdev->id); | ||
681 | if (!ssi->soc_platform_pdev) | 692 | if (!ssi->soc_platform_pdev) |
682 | goto failed_pdev_alloc; | 693 | goto failed_pdev_alloc; |
683 | platform_set_drvdata(ssi->soc_platform_pdev, ssi); | 694 | platform_set_drvdata(ssi->soc_platform_pdev, ssi); |
@@ -692,6 +703,9 @@ static int imx_ssi_probe(struct platform_device *pdev) | |||
692 | failed_pdev_add: | 703 | failed_pdev_add: |
693 | platform_device_put(ssi->soc_platform_pdev); | 704 | platform_device_put(ssi->soc_platform_pdev); |
694 | failed_pdev_alloc: | 705 | failed_pdev_alloc: |
706 | failed_pdev_fiq_add: | ||
707 | platform_device_put(ssi->soc_platform_pdev_fiq); | ||
708 | failed_pdev_fiq_alloc: | ||
695 | snd_soc_unregister_dai(&pdev->dev); | 709 | snd_soc_unregister_dai(&pdev->dev); |
696 | failed_register: | 710 | failed_register: |
697 | failed_ac97: | 711 | failed_ac97: |
diff --git a/sound/soc/imx/imx-ssi.h b/sound/soc/imx/imx-ssi.h index 53b780d9b2b0..a4406a134892 100644 --- a/sound/soc/imx/imx-ssi.h +++ b/sound/soc/imx/imx-ssi.h | |||
@@ -185,6 +185,9 @@ | |||
185 | 185 | ||
186 | #define DRV_NAME "imx-ssi" | 186 | #define DRV_NAME "imx-ssi" |
187 | 187 | ||
188 | #include <linux/dmaengine.h> | ||
189 | #include <mach/dma.h> | ||
190 | |||
188 | struct imx_pcm_dma_params { | 191 | struct imx_pcm_dma_params { |
189 | int dma; | 192 | int dma; |
190 | unsigned long dma_addr; | 193 | unsigned long dma_addr; |
@@ -212,6 +215,7 @@ struct imx_ssi { | |||
212 | int enabled; | 215 | int enabled; |
213 | 216 | ||
214 | struct platform_device *soc_platform_pdev; | 217 | struct platform_device *soc_platform_pdev; |
218 | struct platform_device *soc_platform_pdev_fiq; | ||
215 | }; | 219 | }; |
216 | 220 | ||
217 | struct snd_soc_platform *imx_ssi_fiq_init(struct platform_device *pdev, | 221 | struct snd_soc_platform *imx_ssi_fiq_init(struct platform_device *pdev, |
diff --git a/sound/soc/imx/phycore-ac97.c b/sound/soc/imx/phycore-ac97.c index 6a65dd705519..39f23734781a 100644 --- a/sound/soc/imx/phycore-ac97.c +++ b/sound/soc/imx/phycore-ac97.c | |||
@@ -20,9 +20,6 @@ | |||
20 | #include <sound/soc-dapm.h> | 20 | #include <sound/soc-dapm.h> |
21 | #include <asm/mach-types.h> | 21 | #include <asm/mach-types.h> |
22 | 22 | ||
23 | #include "../codecs/wm9712.h" | ||
24 | #include "imx-ssi.h" | ||
25 | |||
26 | static struct snd_soc_card imx_phycore; | 23 | static struct snd_soc_card imx_phycore; |
27 | 24 | ||
28 | static struct snd_soc_ops imx_phycore_hifi_ops = { | 25 | static struct snd_soc_ops imx_phycore_hifi_ops = { |
@@ -41,7 +38,7 @@ static struct snd_soc_dai_link imx_phycore_dai_ac97[] = { | |||
41 | }; | 38 | }; |
42 | 39 | ||
43 | static struct snd_soc_card imx_phycore = { | 40 | static struct snd_soc_card imx_phycore = { |
44 | .name = "PhyCORE-audio", | 41 | .name = "PhyCORE-ac97-audio", |
45 | .dai_link = imx_phycore_dai_ac97, | 42 | .dai_link = imx_phycore_dai_ac97, |
46 | .num_links = ARRAY_SIZE(imx_phycore_dai_ac97), | 43 | .num_links = ARRAY_SIZE(imx_phycore_dai_ac97), |
47 | }; | 44 | }; |
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index d211c9fa5a91..7e84f24b9a88 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c | |||
@@ -644,15 +644,23 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, | |||
644 | 644 | ||
645 | 645 | ||
646 | case OMAP_MCBSP_CLKR_SRC_CLKR: | 646 | case OMAP_MCBSP_CLKR_SRC_CLKR: |
647 | if (cpu_class_is_omap1()) | ||
648 | break; | ||
647 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKR); | 649 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKR); |
648 | break; | 650 | break; |
649 | case OMAP_MCBSP_CLKR_SRC_CLKX: | 651 | case OMAP_MCBSP_CLKR_SRC_CLKX: |
652 | if (cpu_class_is_omap1()) | ||
653 | break; | ||
650 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKX); | 654 | omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKX); |
651 | break; | 655 | break; |
652 | case OMAP_MCBSP_FSR_SRC_FSR: | 656 | case OMAP_MCBSP_FSR_SRC_FSR: |
657 | if (cpu_class_is_omap1()) | ||
658 | break; | ||
653 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSR); | 659 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSR); |
654 | break; | 660 | break; |
655 | case OMAP_MCBSP_FSR_SRC_FSX: | 661 | case OMAP_MCBSP_FSR_SRC_FSX: |
662 | if (cpu_class_is_omap1()) | ||
663 | break; | ||
656 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSX); | 664 | omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSX); |
657 | break; | 665 | break; |
658 | default: | 666 | default: |
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c index 97e9423615c9..f451acd4935b 100644 --- a/sound/soc/pxa/corgi.c +++ b/sound/soc/pxa/corgi.c | |||
@@ -100,8 +100,13 @@ static int corgi_startup(struct snd_pcm_substream *substream) | |||
100 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 100 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
101 | struct snd_soc_codec *codec = rtd->codec; | 101 | struct snd_soc_codec *codec = rtd->codec; |
102 | 102 | ||
103 | mutex_lock(&codec->mutex); | ||
104 | |||
103 | /* check the jack status at stream startup */ | 105 | /* check the jack status at stream startup */ |
104 | corgi_ext_control(codec); | 106 | corgi_ext_control(codec); |
107 | |||
108 | mutex_unlock(&codec->mutex); | ||
109 | |||
105 | return 0; | 110 | return 0; |
106 | } | 111 | } |
107 | 112 | ||
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c index b8207ced4072..5ef0526924b9 100644 --- a/sound/soc/pxa/magician.c +++ b/sound/soc/pxa/magician.c | |||
@@ -72,9 +72,13 @@ static int magician_startup(struct snd_pcm_substream *substream) | |||
72 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 72 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
73 | struct snd_soc_codec *codec = rtd->codec; | 73 | struct snd_soc_codec *codec = rtd->codec; |
74 | 74 | ||
75 | mutex_lock(&codec->mutex); | ||
76 | |||
75 | /* check the jack status at stream startup */ | 77 | /* check the jack status at stream startup */ |
76 | magician_ext_control(codec); | 78 | magician_ext_control(codec); |
77 | 79 | ||
80 | mutex_unlock(&codec->mutex); | ||
81 | |||
78 | return 0; | 82 | return 0; |
79 | } | 83 | } |
80 | 84 | ||
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c index af84ee9c5e11..84edd0385a21 100644 --- a/sound/soc/pxa/poodle.c +++ b/sound/soc/pxa/poodle.c | |||
@@ -77,8 +77,13 @@ static int poodle_startup(struct snd_pcm_substream *substream) | |||
77 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 77 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
78 | struct snd_soc_codec *codec = rtd->codec; | 78 | struct snd_soc_codec *codec = rtd->codec; |
79 | 79 | ||
80 | mutex_lock(&codec->mutex); | ||
81 | |||
80 | /* check the jack status at stream startup */ | 82 | /* check the jack status at stream startup */ |
81 | poodle_ext_control(codec); | 83 | poodle_ext_control(codec); |
84 | |||
85 | mutex_unlock(&codec->mutex); | ||
86 | |||
82 | return 0; | 87 | return 0; |
83 | } | 88 | } |
84 | 89 | ||
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c index f470f360f4dd..0b30d7de24ec 100644 --- a/sound/soc/pxa/spitz.c +++ b/sound/soc/pxa/spitz.c | |||
@@ -108,8 +108,13 @@ static int spitz_startup(struct snd_pcm_substream *substream) | |||
108 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 108 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
109 | struct snd_soc_codec *codec = rtd->codec; | 109 | struct snd_soc_codec *codec = rtd->codec; |
110 | 110 | ||
111 | mutex_lock(&codec->mutex); | ||
112 | |||
111 | /* check the jack status at stream startup */ | 113 | /* check the jack status at stream startup */ |
112 | spitz_ext_control(codec); | 114 | spitz_ext_control(codec); |
115 | |||
116 | mutex_unlock(&codec->mutex); | ||
117 | |||
113 | return 0; | 118 | return 0; |
114 | } | 119 | } |
115 | 120 | ||
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index 73d0edd8ded9..7b983f935454 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c | |||
@@ -81,8 +81,13 @@ static int tosa_startup(struct snd_pcm_substream *substream) | |||
81 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 81 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
82 | struct snd_soc_codec *codec = rtd->codec; | 82 | struct snd_soc_codec *codec = rtd->codec; |
83 | 83 | ||
84 | mutex_lock(&codec->mutex); | ||
85 | |||
84 | /* check the jack status at stream startup */ | 86 | /* check the jack status at stream startup */ |
85 | tosa_ext_control(codec); | 87 | tosa_ext_control(codec); |
88 | |||
89 | mutex_unlock(&codec->mutex); | ||
90 | |||
86 | return 0; | 91 | return 0; |
87 | } | 92 | } |
88 | 93 | ||
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig index 8a6b53ccd203..d85bf8a0abb2 100644 --- a/sound/soc/s3c24xx/Kconfig +++ b/sound/soc/s3c24xx/Kconfig | |||
@@ -2,6 +2,7 @@ config SND_S3C24XX_SOC | |||
2 | tristate "SoC Audio for the Samsung S3CXXXX chips" | 2 | tristate "SoC Audio for the Samsung S3CXXXX chips" |
3 | depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 | 3 | depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 |
4 | select S3C64XX_DMA if ARCH_S3C64XX | 4 | select S3C64XX_DMA if ARCH_S3C64XX |
5 | select S3C2410_DMA if ARCH_S3C2410 | ||
5 | help | 6 | help |
6 | Say Y or M if you want to add support for codecs attached to | 7 | Say Y or M if you want to add support for codecs attached to |
7 | the S3C24XX AC97 or I2S interfaces. You will also need to | 8 | the S3C24XX AC97 or I2S interfaces. You will also need to |
diff --git a/sound/soc/s3c24xx/rx1950_uda1380.c b/sound/soc/s3c24xx/rx1950_uda1380.c index ffd5cf2fb0a9..468cc11fdf47 100644 --- a/sound/soc/s3c24xx/rx1950_uda1380.c +++ b/sound/soc/s3c24xx/rx1950_uda1380.c | |||
@@ -50,7 +50,6 @@ static unsigned int rates[] = { | |||
50 | 16000, | 50 | 16000, |
51 | 44100, | 51 | 44100, |
52 | 48000, | 52 | 48000, |
53 | 88200, | ||
54 | }; | 53 | }; |
55 | 54 | ||
56 | static struct snd_pcm_hw_constraint_list hw_rates = { | 55 | static struct snd_pcm_hw_constraint_list hw_rates = { |
@@ -130,7 +129,6 @@ static const struct snd_soc_dapm_route audio_map[] = { | |||
130 | }; | 129 | }; |
131 | 130 | ||
132 | static struct platform_device *s3c24xx_snd_device; | 131 | static struct platform_device *s3c24xx_snd_device; |
133 | static struct clk *xtal; | ||
134 | 132 | ||
135 | static int rx1950_startup(struct snd_pcm_substream *substream) | 133 | static int rx1950_startup(struct snd_pcm_substream *substream) |
136 | { | 134 | { |
@@ -179,10 +177,8 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream, | |||
179 | case 44100: | 177 | case 44100: |
180 | case 88200: | 178 | case 88200: |
181 | clk_source = S3C24XX_CLKSRC_MPLL; | 179 | clk_source = S3C24XX_CLKSRC_MPLL; |
182 | fs_mode = S3C2410_IISMOD_256FS; | 180 | fs_mode = S3C2410_IISMOD_384FS; |
183 | div = clk_get_rate(xtal) / (256 * rate); | 181 | div = 1; |
184 | if (clk_get_rate(xtal) % (256 * rate) > (128 * rate)) | ||
185 | div++; | ||
186 | break; | 182 | break; |
187 | default: | 183 | default: |
188 | printk(KERN_ERR "%s: rate %d is not supported\n", | 184 | printk(KERN_ERR "%s: rate %d is not supported\n", |
@@ -210,7 +206,7 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream, | |||
210 | 206 | ||
211 | /* set MCLK division for sample rate */ | 207 | /* set MCLK division for sample rate */ |
212 | ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, | 208 | ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, |
213 | S3C2410_IISMOD_384FS); | 209 | fs_mode); |
214 | if (ret < 0) | 210 | if (ret < 0) |
215 | return ret; | 211 | return ret; |
216 | 212 | ||
@@ -295,17 +291,8 @@ static int __init rx1950_init(void) | |||
295 | goto err_plat_add; | 291 | goto err_plat_add; |
296 | } | 292 | } |
297 | 293 | ||
298 | xtal = clk_get(&s3c24xx_snd_device->dev, "xtal"); | ||
299 | |||
300 | if (IS_ERR(xtal)) { | ||
301 | ret = PTR_ERR(xtal); | ||
302 | platform_device_unregister(s3c24xx_snd_device); | ||
303 | goto err_clk; | ||
304 | } | ||
305 | |||
306 | return 0; | 294 | return 0; |
307 | 295 | ||
308 | err_clk: | ||
309 | err_plat_add: | 296 | err_plat_add: |
310 | err_plat_alloc: | 297 | err_plat_alloc: |
311 | err_gpio_conf: | 298 | err_gpio_conf: |
@@ -320,7 +307,6 @@ static void __exit rx1950_exit(void) | |||
320 | platform_device_unregister(s3c24xx_snd_device); | 307 | platform_device_unregister(s3c24xx_snd_device); |
321 | snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios), | 308 | snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios), |
322 | hp_jack_gpios); | 309 | hp_jack_gpios); |
323 | clk_put(xtal); | ||
324 | gpio_free(S3C2410_GPA(1)); | 310 | gpio_free(S3C2410_GPA(1)); |
325 | } | 311 | } |
326 | 312 | ||
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 507e709f2807..4c2404b1b862 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c | |||
@@ -132,6 +132,8 @@ struct fsi_priv { | |||
132 | struct fsi_stream playback; | 132 | struct fsi_stream playback; |
133 | struct fsi_stream capture; | 133 | struct fsi_stream capture; |
134 | 134 | ||
135 | long rate; | ||
136 | |||
135 | u32 mst_ctrl; | 137 | u32 mst_ctrl; |
136 | }; | 138 | }; |
137 | 139 | ||
@@ -854,10 +856,17 @@ static void fsi_dai_shutdown(struct snd_pcm_substream *substream, | |||
854 | { | 856 | { |
855 | struct fsi_priv *fsi = fsi_get_priv(substream); | 857 | struct fsi_priv *fsi = fsi_get_priv(substream); |
856 | int is_play = fsi_is_play(substream); | 858 | int is_play = fsi_is_play(substream); |
859 | struct fsi_master *master = fsi_get_master(fsi); | ||
860 | int (*set_rate)(struct device *dev, int is_porta, int rate, int enable); | ||
857 | 861 | ||
858 | fsi_irq_disable(fsi, is_play); | 862 | fsi_irq_disable(fsi, is_play); |
859 | fsi_clk_ctrl(fsi, 0); | 863 | fsi_clk_ctrl(fsi, 0); |
860 | 864 | ||
865 | set_rate = master->info->set_rate; | ||
866 | if (set_rate && fsi->rate) | ||
867 | set_rate(dai->dev, fsi_is_port_a(fsi), fsi->rate, 0); | ||
868 | fsi->rate = 0; | ||
869 | |||
861 | pm_runtime_put_sync(dai->dev); | 870 | pm_runtime_put_sync(dai->dev); |
862 | } | 871 | } |
863 | 872 | ||
@@ -891,20 +900,20 @@ static int fsi_dai_hw_params(struct snd_pcm_substream *substream, | |||
891 | { | 900 | { |
892 | struct fsi_priv *fsi = fsi_get_priv(substream); | 901 | struct fsi_priv *fsi = fsi_get_priv(substream); |
893 | struct fsi_master *master = fsi_get_master(fsi); | 902 | struct fsi_master *master = fsi_get_master(fsi); |
894 | int (*set_rate)(int is_porta, int rate) = master->info->set_rate; | 903 | int (*set_rate)(struct device *dev, int is_porta, int rate, int enable); |
895 | int fsi_ver = master->core->ver; | 904 | int fsi_ver = master->core->ver; |
896 | int is_play = fsi_is_play(substream); | 905 | long rate = params_rate(params); |
897 | int ret; | 906 | int ret; |
898 | 907 | ||
899 | /* if slave mode, set_rate is not needed */ | 908 | set_rate = master->info->set_rate; |
900 | if (!fsi_is_master_mode(fsi, is_play)) | 909 | if (!set_rate) |
901 | return 0; | 910 | return 0; |
902 | 911 | ||
903 | /* it is error if no set_rate */ | 912 | ret = set_rate(dai->dev, fsi_is_port_a(fsi), rate, 1); |
904 | if (!set_rate) | 913 | if (ret < 0) /* error */ |
905 | return -EIO; | 914 | return ret; |
906 | 915 | ||
907 | ret = set_rate(fsi_is_port_a(fsi), params_rate(params)); | 916 | fsi->rate = rate; |
908 | if (ret > 0) { | 917 | if (ret > 0) { |
909 | u32 data = 0; | 918 | u32 data = 0; |
910 | 919 | ||
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 614a8b30d87b..441285ade024 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -3043,8 +3043,10 @@ int snd_soc_register_dais(struct device *dev, | |||
3043 | for (i = 0; i < count; i++) { | 3043 | for (i = 0; i < count; i++) { |
3044 | 3044 | ||
3045 | dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); | 3045 | dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); |
3046 | if (dai == NULL) | 3046 | if (dai == NULL) { |
3047 | return -ENOMEM; | 3047 | ret = -ENOMEM; |
3048 | goto err; | ||
3049 | } | ||
3048 | 3050 | ||
3049 | /* create DAI component name */ | 3051 | /* create DAI component name */ |
3050 | dai->name = fmt_multiple_name(dev, &dai_drv[i]); | 3052 | dai->name = fmt_multiple_name(dev, &dai_drv[i]); |
@@ -3263,9 +3265,6 @@ int snd_soc_register_codec(struct device *dev, | |||
3263 | return 0; | 3265 | return 0; |
3264 | 3266 | ||
3265 | error: | 3267 | error: |
3266 | for (i--; i >= 0; i--) | ||
3267 | snd_soc_unregister_dai(dev); | ||
3268 | |||
3269 | if (codec->reg_cache) | 3268 | if (codec->reg_cache) |
3270 | kfree(codec->reg_cache); | 3269 | kfree(codec->reg_cache); |
3271 | kfree(codec->name); | 3270 | kfree(codec->name); |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7d85c6496afa..75ed6491222d 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -683,12 +683,12 @@ static int dapm_seq_compare(struct snd_soc_dapm_widget *a, | |||
683 | struct snd_soc_dapm_widget *b, | 683 | struct snd_soc_dapm_widget *b, |
684 | int sort[]) | 684 | int sort[]) |
685 | { | 685 | { |
686 | if (a->codec != b->codec) | ||
687 | return (unsigned long)a - (unsigned long)b; | ||
688 | if (sort[a->id] != sort[b->id]) | 686 | if (sort[a->id] != sort[b->id]) |
689 | return sort[a->id] - sort[b->id]; | 687 | return sort[a->id] - sort[b->id]; |
690 | if (a->reg != b->reg) | 688 | if (a->reg != b->reg) |
691 | return a->reg - b->reg; | 689 | return a->reg - b->reg; |
690 | if (a->codec != b->codec) | ||
691 | return (unsigned long)a->codec - (unsigned long)b->codec; | ||
692 | 692 | ||
693 | return 0; | 693 | return 0; |
694 | } | 694 | } |
diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c index 1bc56b2b94e2..337a00241a1f 100644 --- a/sound/spi/at73c213.c +++ b/sound/spi/at73c213.c | |||
@@ -155,7 +155,7 @@ static int snd_at73c213_set_bitrate(struct snd_at73c213 *chip) | |||
155 | if (max_tries < 1) | 155 | if (max_tries < 1) |
156 | max_tries = 1; | 156 | max_tries = 1; |
157 | 157 | ||
158 | /* ssc_div must be a power of 2. */ | 158 | /* ssc_div must be even. */ |
159 | ssc_div = (ssc_div + 1) & ~1UL; | 159 | ssc_div = (ssc_div + 1) & ~1UL; |
160 | 160 | ||
161 | if ((ssc_rate / (ssc_div * 2 * 16)) < BITRATE_MIN) { | 161 | if ((ssc_rate / (ssc_div * 2 * 16)) < BITRATE_MIN) { |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 93bd2ff001fb..e2c2de201eec 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -697,17 +697,18 @@ static int __cmd_record(int argc, const char **argv) | |||
697 | if (err < 0) | 697 | if (err < 0) |
698 | err = event__synthesize_kernel_mmap(process_synthesized_event, | 698 | err = event__synthesize_kernel_mmap(process_synthesized_event, |
699 | session, machine, "_stext"); | 699 | session, machine, "_stext"); |
700 | if (err < 0) { | 700 | if (err < 0) |
701 | pr_err("Couldn't record kernel reference relocation symbol.\n"); | 701 | pr_err("Couldn't record kernel reference relocation symbol\n" |
702 | return err; | 702 | "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" |
703 | } | 703 | "Check /proc/kallsyms permission or run as root.\n"); |
704 | 704 | ||
705 | err = event__synthesize_modules(process_synthesized_event, | 705 | err = event__synthesize_modules(process_synthesized_event, |
706 | session, machine); | 706 | session, machine); |
707 | if (err < 0) { | 707 | if (err < 0) |
708 | pr_err("Couldn't record kernel reference relocation symbol.\n"); | 708 | pr_err("Couldn't record kernel module information.\n" |
709 | return err; | 709 | "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" |
710 | } | 710 | "Check /proc/modules permission or run as root.\n"); |
711 | |||
711 | if (perf_guest) | 712 | if (perf_guest) |
712 | perf_session__process_machines(session, event__synthesize_guest_os); | 713 | perf_session__process_machines(session, event__synthesize_guest_os); |
713 | 714 | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b39f499e575a..0500895a45af 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -295,7 +295,9 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) | |||
295 | { | 295 | { |
296 | struct rb_node **p = &self->rb_node; | 296 | struct rb_node **p = &self->rb_node; |
297 | struct rb_node *parent = NULL; | 297 | struct rb_node *parent = NULL; |
298 | struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s; | 298 | struct symbol_name_rb_node *symn, *s; |
299 | |||
300 | symn = container_of(sym, struct symbol_name_rb_node, sym); | ||
299 | 301 | ||
300 | while (*p != NULL) { | 302 | while (*p != NULL) { |
301 | parent = *p; | 303 | parent = *p; |