diff options
227 files changed, 3442 insertions, 1689 deletions
diff --git a/.gitignore b/.gitignore index 9c0d650385be..d24ad506e799 100644 --- a/.gitignore +++ b/.gitignore | |||
@@ -52,6 +52,7 @@ series | |||
52 | 52 | ||
53 | # cscope files | 53 | # cscope files |
54 | cscope.* | 54 | cscope.* |
55 | ncscope.* | ||
55 | 56 | ||
56 | *.orig | 57 | *.orig |
57 | *~ | 58 | *~ |
diff --git a/Documentation/ABI/testing/sysfs-class-bdi b/Documentation/ABI/testing/sysfs-class-bdi index 5ac1e01bbd48..5f500977b42f 100644 --- a/Documentation/ABI/testing/sysfs-class-bdi +++ b/Documentation/ABI/testing/sysfs-class-bdi | |||
@@ -14,6 +14,10 @@ MAJOR:MINOR | |||
14 | non-block filesystems which provide their own BDI, such as NFS | 14 | non-block filesystems which provide their own BDI, such as NFS |
15 | and FUSE. | 15 | and FUSE. |
16 | 16 | ||
17 | MAJOR:MINOR-fuseblk | ||
18 | |||
19 | Value of st_dev on fuseblk filesystems. | ||
20 | |||
17 | default | 21 | default |
18 | 22 | ||
19 | The default backing dev, used for non-block device backed | 23 | The default backing dev, used for non-block device backed |
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 77c42f40be5d..2510763295d0 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
@@ -703,6 +703,31 @@ | |||
703 | </sect1> | 703 | </sect1> |
704 | </chapter> | 704 | </chapter> |
705 | 705 | ||
706 | <chapter id="trylock-functions"> | ||
707 | <title>The trylock Functions</title> | ||
708 | <para> | ||
709 | There are functions that try to acquire a lock only once and immediately | ||
710 | return a value telling about success or failure to acquire the lock. | ||
711 | They can be used if you need no access to the data protected with the lock | ||
712 | when some other thread is holding the lock. You should acquire the lock | ||
713 | later if you then need access to the data protected with the lock. | ||
714 | </para> | ||
715 | |||
716 | <para> | ||
717 | <function>spin_trylock()</function> does not spin but returns non-zero if | ||
718 | it acquires the spinlock on the first try or 0 if not. This function can | ||
719 | be used in all contexts like <function>spin_lock</function>: you must have | ||
720 | disabled the contexts that might interrupt you and acquire the spin lock. | ||
721 | </para> | ||
722 | |||
723 | <para> | ||
724 | <function>mutex_trylock()</function> does not suspend your task | ||
725 | but returns non-zero if it could lock the mutex on the first try | ||
726 | or 0 if not. This function cannot be safely used in hardware or software | ||
727 | interrupt contexts despite not sleeping. | ||
728 | </para> | ||
729 | </chapter> | ||
730 | |||
706 | <chapter id="Examples"> | 731 | <chapter id="Examples"> |
707 | <title>Common Examples</title> | 732 | <title>Common Examples</title> |
708 | <para> | 733 | <para> |
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index 6a9c55bd556b..dcec0564d040 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt | |||
@@ -129,14 +129,6 @@ to its default value of '80' it means that between the checking | |||
129 | intervals the CPU needs to be on average more than 80% in use to then | 129 | intervals the CPU needs to be on average more than 80% in use to then |
130 | decide that the CPU frequency needs to be increased. | 130 | decide that the CPU frequency needs to be increased. |
131 | 131 | ||
132 | sampling_down_factor: this parameter controls the rate that the CPU | ||
133 | makes a decision on when to decrease the frequency. When set to its | ||
134 | default value of '5' it means that at 1/5 the sampling_rate the kernel | ||
135 | makes a decision to lower the frequency. Five "lower rate" decisions | ||
136 | have to be made in a row before the CPU frequency is actually lower. | ||
137 | If set to '1' then the frequency decreases as quickly as it increases, | ||
138 | if set to '2' it decreases at half the rate of the increase. | ||
139 | |||
140 | ignore_nice_load: this parameter takes a value of '0' or '1'. When | 132 | ignore_nice_load: this parameter takes a value of '0' or '1'. When |
141 | set to '0' (its default), all processes are counted towards the | 133 | set to '0' (its default), all processes are counted towards the |
142 | 'cpu utilisation' value. When set to '1', the processes that are | 134 | 'cpu utilisation' value. When set to '1', the processes that are |
diff --git a/Documentation/hwmon/ibmaem b/Documentation/hwmon/ibmaem new file mode 100644 index 000000000000..2fefaf582a43 --- /dev/null +++ b/Documentation/hwmon/ibmaem | |||
@@ -0,0 +1,37 @@ | |||
1 | Kernel driver ibmaem | ||
2 | ====================== | ||
3 | |||
4 | Supported systems: | ||
5 | * Any recent IBM System X server with Active Energy Manager support. | ||
6 | This includes the x3350, x3550, x3650, x3655, x3755, x3850 M2, | ||
7 | x3950 M2, and certain HS2x/LS2x/QS2x blades. The IPMI host interface | ||
8 | driver ("ipmi-si") needs to be loaded for this driver to do anything. | ||
9 | Prefix: 'ibmaem' | ||
10 | Datasheet: Not available | ||
11 | |||
12 | Author: Darrick J. Wong | ||
13 | |||
14 | Description | ||
15 | ----------- | ||
16 | |||
17 | This driver implements sensor reading support for the energy and power | ||
18 | meters available on various IBM System X hardware through the BMC. All | ||
19 | sensor banks will be exported as platform devices; this driver can talk | ||
20 | to both v1 and v2 interfaces. This driver is completely separate from the | ||
21 | older ibmpex driver. | ||
22 | |||
23 | The v1 AEM interface has a simple set of features to monitor energy use. | ||
24 | There is a register that displays an estimate of raw energy consumption | ||
25 | since the last BMC reset, and a power sensor that returns average power | ||
26 | use over a configurable interval. | ||
27 | |||
28 | The v2 AEM interface is a bit more sophisticated, being able to present | ||
29 | a wider range of energy and power use registers, the power cap as | ||
30 | set by the AEM software, and temperature sensors. | ||
31 | |||
32 | Special Features | ||
33 | ---------------- | ||
34 | |||
35 | The "power_cap" value displays the current system power cap, as set by | ||
36 | the Active Energy Manager software. Setting the power cap from the host | ||
37 | is not currently supported. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index b42dcfcbee44..0a6d2ca03cea 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1240,6 +1240,20 @@ L: video4linux-list@redhat.com | |||
1240 | W: http://linuxtv.org | 1240 | W: http://linuxtv.org |
1241 | S: Maintained | 1241 | S: Maintained |
1242 | 1242 | ||
1243 | CXGB3 ETHERNET DRIVER (CXGB3) | ||
1244 | P: Divy Le Ray | ||
1245 | M: divy@chelsio.com | ||
1246 | L: netdev@vger.kernel.org | ||
1247 | W: http://www.chelsio.com | ||
1248 | S: Supported | ||
1249 | |||
1250 | CXGB3 IWARP RNIC DRIVER (IW_CXGB3) | ||
1251 | P: Steve Wise | ||
1252 | M: swise@chelsio.com | ||
1253 | L: general@lists.openfabrics.org | ||
1254 | W: http://www.openfabrics.org | ||
1255 | S: Supported | ||
1256 | |||
1243 | CYBERPRO FB DRIVER | 1257 | CYBERPRO FB DRIVER |
1244 | P: Russell King | 1258 | P: Russell King |
1245 | M: rmk@arm.linux.org.uk | 1259 | M: rmk@arm.linux.org.uk |
@@ -4361,6 +4375,14 @@ M: gregkh@suse.de | |||
4361 | L: linux-kernel@vger.kernel.org | 4375 | L: linux-kernel@vger.kernel.org |
4362 | S: Maintained | 4376 | S: Maintained |
4363 | 4377 | ||
4378 | UTIL-LINUX-NG PACKAGE | ||
4379 | P: Karel Zak | ||
4380 | M: kzak@redhat.com | ||
4381 | L: util-linux-ng@vger.kernel.org | ||
4382 | W: http://kernel.org/~kzak/util-linux-ng/ | ||
4383 | T: git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git | ||
4384 | S: Maintained | ||
4385 | |||
4364 | VFAT/FAT/MSDOS FILESYSTEM: | 4386 | VFAT/FAT/MSDOS FILESYSTEM: |
4365 | P: OGAWA Hirofumi | 4387 | P: OGAWA Hirofumi |
4366 | M: hirofumi@mail.parknet.co.jp | 4388 | M: hirofumi@mail.parknet.co.jp |
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c index 1de121fc55f4..f44647738ee4 100644 --- a/arch/arm/mach-at91/at91x40.c +++ b/arch/arm/mach-at91/at91x40.c | |||
@@ -16,16 +16,32 @@ | |||
16 | #include <asm/mach/arch.h> | 16 | #include <asm/mach/arch.h> |
17 | #include <asm/arch/at91x40.h> | 17 | #include <asm/arch/at91x40.h> |
18 | #include <asm/arch/at91_st.h> | 18 | #include <asm/arch/at91_st.h> |
19 | #include <asm/arch/timex.h> | ||
19 | #include "generic.h" | 20 | #include "generic.h" |
20 | 21 | ||
21 | /* | 22 | /* |
22 | * This is used in the gpio code, stub locally. | 23 | * Export the clock functions for the AT91X40. Some external code common |
24 | * to all AT91 family parts relys on this, like the gpio and serial support. | ||
23 | */ | 25 | */ |
24 | int clk_enable(struct clk *clk) | 26 | int clk_enable(struct clk *clk) |
25 | { | 27 | { |
26 | return 0; | 28 | return 0; |
27 | } | 29 | } |
28 | 30 | ||
31 | void clk_disable(struct clk *clk) | ||
32 | { | ||
33 | } | ||
34 | |||
35 | unsigned long clk_get_rate(struct clk *clk) | ||
36 | { | ||
37 | return AT91X40_MASTER_CLOCK; | ||
38 | } | ||
39 | |||
40 | struct clk *clk_get(struct device *dev, const char *id) | ||
41 | { | ||
42 | return NULL; | ||
43 | } | ||
44 | |||
29 | void __init at91x40_initialize(unsigned long main_clock) | 45 | void __init at91x40_initialize(unsigned long main_clock) |
30 | { | 46 | { |
31 | at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1) | 47 | at91_extern_irq = (1 << AT91X40_ID_IRQ0) | (1 << AT91X40_ID_IRQ1) |
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c index 92d79fb39311..62e653a3ea1a 100644 --- a/arch/arm/mach-integrator/impd1.c +++ b/arch/arm/mach-integrator/impd1.c | |||
@@ -369,7 +369,8 @@ static int impd1_probe(struct lm_device *dev) | |||
369 | 369 | ||
370 | lm_set_drvdata(dev, impd1); | 370 | lm_set_drvdata(dev, impd1); |
371 | 371 | ||
372 | printk("IM-PD1 found at 0x%08lx\n", dev->resource.start); | 372 | printk("IM-PD1 found at 0x%08lx\n", |
373 | (unsigned long)dev->resource.start); | ||
373 | 374 | ||
374 | for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++) { | 375 | for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++) { |
375 | impd1->vcos[i].owner = THIS_MODULE, | 376 | impd1->vcos[i].owner = THIS_MODULE, |
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index d55fa4e9bb43..c07f497000ca 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c | |||
@@ -405,7 +405,6 @@ v3_pci_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
405 | addr, fsr, pc, instr, __raw_readl(SC_LBFADDR), __raw_readl(SC_LBFCODE) & 255, | 405 | addr, fsr, pc, instr, __raw_readl(SC_LBFADDR), __raw_readl(SC_LBFCODE) & 255, |
406 | v3_readb(V3_LB_ISTAT)); | 406 | v3_readb(V3_LB_ISTAT)); |
407 | printk(KERN_DEBUG "%s", buf); | 407 | printk(KERN_DEBUG "%s", buf); |
408 | printascii(buf); | ||
409 | #endif | 408 | #endif |
410 | 409 | ||
411 | v3_writeb(V3_LB_ISTAT, 0); | 410 | v3_writeb(V3_LB_ISTAT, 0); |
@@ -447,6 +446,7 @@ static irqreturn_t v3_irq(int dummy, void *devid) | |||
447 | unsigned long pc = instruction_pointer(regs); | 446 | unsigned long pc = instruction_pointer(regs); |
448 | unsigned long instr = *(unsigned long *)pc; | 447 | unsigned long instr = *(unsigned long *)pc; |
449 | char buf[128]; | 448 | char buf[128]; |
449 | extern void printascii(const char *); | ||
450 | 450 | ||
451 | sprintf(buf, "V3 int %d: pc=0x%08lx [%08lx] LBFADDR=%08x LBFCODE=%02x " | 451 | sprintf(buf, "V3 int %d: pc=0x%08lx [%08lx] LBFADDR=%08x LBFCODE=%02x " |
452 | "ISTAT=%02x\n", IRQ_AP_V3INT, pc, instr, | 452 | "ISTAT=%02x\n", IRQ_AP_V3INT, pc, instr, |
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c index a0b16a7e8a04..a4d20127a60e 100644 --- a/arch/arm/mach-omap1/board-palmte.c +++ b/arch/arm/mach-omap1/board-palmte.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/mtd/mtd.h> | 24 | #include <linux/mtd/mtd.h> |
25 | #include <linux/mtd/partitions.h> | 25 | #include <linux/mtd/partitions.h> |
26 | #include <linux/spi/spi.h> | 26 | #include <linux/spi/spi.h> |
27 | #include <linux/spi/tsc2102.h> | ||
28 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
29 | #include <linux/apm-emulation.h> | 28 | #include <linux/apm-emulation.h> |
30 | 29 | ||
@@ -315,14 +314,6 @@ static void palmte_get_power_status(struct apm_power_info *info, int *battery) | |||
315 | #define palmte_get_power_status NULL | 314 | #define palmte_get_power_status NULL |
316 | #endif | 315 | #endif |
317 | 316 | ||
318 | static struct tsc2102_config palmte_tsc2102_config = { | ||
319 | .use_internal = 0, | ||
320 | .monitor = TSC_BAT1 | TSC_AUX | TSC_TEMP, | ||
321 | .temp_at25c = { 2200, 2615 }, | ||
322 | .apm_report = palmte_get_power_status, | ||
323 | .alsa_config = &palmte_alsa_config, | ||
324 | }; | ||
325 | |||
326 | static struct omap_board_config_kernel palmte_config[] __initdata = { | 317 | static struct omap_board_config_kernel palmte_config[] __initdata = { |
327 | { OMAP_TAG_USB, &palmte_usb_config }, | 318 | { OMAP_TAG_USB, &palmte_usb_config }, |
328 | { OMAP_TAG_MMC, &palmte_mmc_config }, | 319 | { OMAP_TAG_MMC, &palmte_mmc_config }, |
@@ -336,7 +327,6 @@ static struct spi_board_info palmte_spi_info[] __initdata = { | |||
336 | .bus_num = 2, /* uWire (officially) */ | 327 | .bus_num = 2, /* uWire (officially) */ |
337 | .chip_select = 0, /* As opposed to 3 */ | 328 | .chip_select = 0, /* As opposed to 3 */ |
338 | .irq = OMAP_GPIO_IRQ(PALMTE_PINTDAV_GPIO), | 329 | .irq = OMAP_GPIO_IRQ(PALMTE_PINTDAV_GPIO), |
339 | .platform_data = &palmte_tsc2102_config, | ||
340 | .max_speed_hz = 8000000, | 330 | .max_speed_hz = 8000000, |
341 | }, | 331 | }, |
342 | }; | 332 | }; |
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index dace3820f1ee..e7d0fcd9b43f 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <asm/arch/pxa-regs.h> | 38 | #include <asm/arch/pxa-regs.h> |
39 | #include <asm/arch/pxa2xx-regs.h> | 39 | #include <asm/arch/pxa2xx-regs.h> |
40 | #include <asm/arch/pxa2xx-gpio.h> | 40 | #include <asm/arch/pxa2xx-gpio.h> |
41 | #include <asm/arch/pxa27x-udc.h> | ||
42 | #include <asm/arch/irda.h> | 41 | #include <asm/arch/irda.h> |
43 | #include <asm/arch/mmc.h> | 42 | #include <asm/arch/mmc.h> |
44 | #include <asm/arch/ohci.h> | 43 | #include <asm/arch/ohci.h> |
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c index 661a2358ac22..27f63d5d3a7b 100644 --- a/arch/arm/mach-s3c2410/mach-bast.c +++ b/arch/arm/mach-s3c2410/mach-bast.c | |||
@@ -374,7 +374,7 @@ static struct resource bast_dm9k_resource[] = { | |||
374 | [2] = { | 374 | [2] = { |
375 | .start = IRQ_DM9000, | 375 | .start = IRQ_DM9000, |
376 | .end = IRQ_DM9000, | 376 | .end = IRQ_DM9000, |
377 | .flags = IORESOURCE_IRQ, | 377 | .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH, |
378 | } | 378 | } |
379 | 379 | ||
380 | }; | 380 | }; |
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c index c56423373ff3..4c4b5c4207c4 100644 --- a/arch/arm/mach-s3c2410/mach-vr1000.c +++ b/arch/arm/mach-s3c2410/mach-vr1000.c | |||
@@ -263,7 +263,7 @@ static struct resource vr1000_dm9k0_resource[] = { | |||
263 | [2] = { | 263 | [2] = { |
264 | .start = IRQ_VR1000_DM9000A, | 264 | .start = IRQ_VR1000_DM9000A, |
265 | .end = IRQ_VR1000_DM9000A, | 265 | .end = IRQ_VR1000_DM9000A, |
266 | .flags = IORESOURCE_IRQ | 266 | .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH, |
267 | } | 267 | } |
268 | 268 | ||
269 | }; | 269 | }; |
@@ -282,7 +282,7 @@ static struct resource vr1000_dm9k1_resource[] = { | |||
282 | [2] = { | 282 | [2] = { |
283 | .start = IRQ_VR1000_DM9000N, | 283 | .start = IRQ_VR1000_DM9000N, |
284 | .end = IRQ_VR1000_DM9000N, | 284 | .end = IRQ_VR1000_DM9000N, |
285 | .flags = IORESOURCE_IRQ | 285 | .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH, |
286 | } | 286 | } |
287 | }; | 287 | }; |
288 | 288 | ||
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index 6496eb645cee..2f772a3965c4 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c | |||
@@ -225,26 +225,28 @@ static void __init collie_init(void) | |||
225 | int ret = 0; | 225 | int ret = 0; |
226 | 226 | ||
227 | /* cpu initialize */ | 227 | /* cpu initialize */ |
228 | GAFR = ( GPIO_SSP_TXD | \ | 228 | GAFR = GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SSP_CLK | |
229 | GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SSP_CLK | GPIO_TIC_ACK | \ | 229 | GPIO_MCP_CLK | GPIO_32_768kHz; |
230 | GPIO_32_768kHz ); | 230 | |
231 | 231 | GPDR = GPIO_LDD8 | GPIO_LDD9 | GPIO_LDD10 | GPIO_LDD11 | GPIO_LDD12 | | |
232 | GPDR = ( GPIO_LDD8 | GPIO_LDD9 | GPIO_LDD10 | GPIO_LDD11 | GPIO_LDD12 | \ | 232 | GPIO_LDD13 | GPIO_LDD14 | GPIO_LDD15 | GPIO_SSP_TXD | |
233 | GPIO_LDD13 | GPIO_LDD14 | GPIO_LDD15 | GPIO_SSP_TXD | \ | 233 | GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SDLC_SCLK | |
234 | GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SDLC_SCLK | \ | 234 | COLLIE_GPIO_UCB1x00_RESET | COLLIE_GPIO_nMIC_ON | |
235 | GPIO_SDLC_AAF | GPIO_UART_SCLK1 | GPIO_32_768kHz ); | 235 | COLLIE_GPIO_nREMOCON_ON | GPIO_32_768kHz; |
236 | GPLR = GPIO_GPIO18; | 236 | |
237 | 237 | PPDR = PPC_LDD0 | PPC_LDD1 | PPC_LDD2 | PPC_LDD3 | PPC_LDD4 | PPC_LDD5 | | |
238 | // PPC pin setting | 238 | PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS | |
239 | PPDR = ( PPC_LDD0 | PPC_LDD1 | PPC_LDD2 | PPC_LDD3 | PPC_LDD4 | PPC_LDD5 | \ | 239 | PPC_TXD1 | PPC_TXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM; |
240 | PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS | \ | 240 | |
241 | PPC_TXD1 | PPC_TXD2 | PPC_RXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM ); | 241 | PWER = COLLIE_GPIO_AC_IN | COLLIE_GPIO_CO | COLLIE_GPIO_ON_KEY | |
242 | 242 | COLLIE_GPIO_WAKEUP | COLLIE_GPIO_nREMOCON_INT | PWER_RTC; | |
243 | PSDR = ( PPC_RXD1 | PPC_RXD2 | PPC_RXD3 | PPC_RXD4 ); | 243 | |
244 | 244 | PGSR = COLLIE_GPIO_nREMOCON_ON; | |
245 | GAFR |= GPIO_32_768kHz; | 245 | |
246 | GPDR |= GPIO_32_768kHz; | 246 | PSDR = PPC_RXD1 | PPC_RXD2 | PPC_RXD3 | PPC_RXD4; |
247 | TUCR = TUCR_32_768kHz; | 247 | |
248 | PCFR = PCFR_OPDE; | ||
249 | |||
248 | 250 | ||
249 | platform_scoop_config = &collie_pcmcia_config; | 251 | platform_scoop_config = &collie_pcmcia_config; |
250 | 252 | ||
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c index 2946c193a7d6..2db5580048d8 100644 --- a/arch/arm/plat-omap/clock.c +++ b/arch/arm/plat-omap/clock.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/cpufreq.h> | ||
24 | 25 | ||
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
26 | 27 | ||
diff --git a/arch/arm/plat-s3c24xx/s3c244x.c b/arch/arm/plat-s3c24xx/s3c244x.c index f197bb3a2366..2f01af5f64c4 100644 --- a/arch/arm/plat-s3c24xx/s3c244x.c +++ b/arch/arm/plat-s3c24xx/s3c244x.c | |||
@@ -65,6 +65,7 @@ void __init s3c244x_map_io(struct map_desc *mach_desc, int size) | |||
65 | 65 | ||
66 | /* rename any peripherals used differing from the s3c2410 */ | 66 | /* rename any peripherals used differing from the s3c2410 */ |
67 | 67 | ||
68 | s3c_device_sdi.name = "s3c2440-sdi"; | ||
68 | s3c_device_i2c.name = "s3c2440-i2c"; | 69 | s3c_device_i2c.name = "s3c2440-i2c"; |
69 | s3c_device_nand.name = "s3c2440-nand"; | 70 | s3c_device_nand.name = "s3c2440-nand"; |
70 | s3c_device_usbgadget.name = "s3c2440-usbgadget"; | 71 | s3c_device_usbgadget.name = "s3c2440-usbgadget"; |
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index b841ecfd5d5a..9af7740f32fb 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/bootmem.h> | 27 | #include <linux/bootmem.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/module.h> | ||
29 | 30 | ||
30 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
31 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
@@ -56,7 +57,9 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |||
56 | */ | 57 | */ |
57 | static unsigned long empty_bad_page_table; | 58 | static unsigned long empty_bad_page_table; |
58 | static unsigned long empty_bad_page; | 59 | static unsigned long empty_bad_page; |
60 | |||
59 | unsigned long empty_zero_page; | 61 | unsigned long empty_zero_page; |
62 | EXPORT_SYMBOL(empty_zero_page); | ||
60 | 63 | ||
61 | /*****************************************************************************/ | 64 | /*****************************************************************************/ |
62 | /* | 65 | /* |
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 8e24fc1821e8..31729a9387df 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh | |||
@@ -20,7 +20,7 @@ WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush | |||
20 | _end enter_prom memcpy memset reloc_offset __secondary_hold | 20 | _end enter_prom memcpy memset reloc_offset __secondary_hold |
21 | __secondary_hold_acknowledge __secondary_hold_spinloop __start | 21 | __secondary_hold_acknowledge __secondary_hold_spinloop __start |
22 | strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 | 22 | strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 |
23 | reloc_got2" | 23 | reloc_got2 kernstart_addr" |
24 | 24 | ||
25 | NM="$1" | 25 | NM="$1" |
26 | OBJ="$2" | 26 | OBJ="$2" |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 80d1babb230d..e0ff59f21135 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -402,7 +402,7 @@ void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) | |||
402 | return; | 402 | return; |
403 | } | 403 | } |
404 | 404 | ||
405 | map_page(address, phys, flags); | 405 | map_page(address, phys, pgprot_val(flags)); |
406 | fixmaps++; | 406 | fixmaps++; |
407 | } | 407 | } |
408 | 408 | ||
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 5b3fb2b321ab..3a58ffabccd9 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c | |||
@@ -317,6 +317,9 @@ static int __init ps3_mm_add_memory(void) | |||
317 | return result; | 317 | return result; |
318 | } | 318 | } |
319 | 319 | ||
320 | lmb_add(start_addr, map.r1.size); | ||
321 | lmb_analyze(); | ||
322 | |||
320 | result = online_pages(start_pfn, nr_pages); | 323 | result = online_pages(start_pfn, nr_pages); |
321 | 324 | ||
322 | if (result) | 325 | if (result) |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 8619f2a3f1f6..7680001676a6 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -1331,6 +1331,9 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | |||
1331 | unsigned long flags; | 1331 | unsigned long flags; |
1332 | u32 reg; | 1332 | u32 reg; |
1333 | 1333 | ||
1334 | if (!mpic) | ||
1335 | return; | ||
1336 | |||
1334 | spin_lock_irqsave(&mpic_lock, flags); | 1337 | spin_lock_irqsave(&mpic_lock, flags); |
1335 | if (is_ipi) { | 1338 | if (is_ipi) { |
1336 | reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & | 1339 | reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & |
@@ -1346,23 +1349,6 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | |||
1346 | spin_unlock_irqrestore(&mpic_lock, flags); | 1349 | spin_unlock_irqrestore(&mpic_lock, flags); |
1347 | } | 1350 | } |
1348 | 1351 | ||
1349 | unsigned int mpic_irq_get_priority(unsigned int irq) | ||
1350 | { | ||
1351 | unsigned int is_ipi; | ||
1352 | struct mpic *mpic = mpic_find(irq, &is_ipi); | ||
1353 | unsigned int src = mpic_irq_to_hw(irq); | ||
1354 | unsigned long flags; | ||
1355 | u32 reg; | ||
1356 | |||
1357 | spin_lock_irqsave(&mpic_lock, flags); | ||
1358 | if (is_ipi) | ||
1359 | reg = mpic_ipi_read(src = mpic->ipi_vecs[0]); | ||
1360 | else | ||
1361 | reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); | ||
1362 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
1363 | return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; | ||
1364 | } | ||
1365 | |||
1366 | void mpic_setup_this_cpu(void) | 1352 | void mpic_setup_this_cpu(void) |
1367 | { | 1353 | { |
1368 | #ifdef CONFIG_SMP | 1354 | #ifdef CONFIG_SMP |
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 0a0c05fc3a33..2084f81a76e1 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -657,20 +657,39 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
657 | struct task_struct *p, struct pt_regs *regs) | 657 | struct task_struct *p, struct pt_regs *regs) |
658 | { | 658 | { |
659 | struct thread_info *t = task_thread_info(p); | 659 | struct thread_info *t = task_thread_info(p); |
660 | struct sparc_stackf *parent_sf; | ||
661 | unsigned long child_stack_sz; | ||
660 | char *child_trap_frame; | 662 | char *child_trap_frame; |
663 | int kernel_thread; | ||
661 | 664 | ||
662 | /* Calculate offset to stack_frame & pt_regs */ | 665 | kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0; |
663 | child_trap_frame = task_stack_page(p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); | 666 | parent_sf = ((struct sparc_stackf *) regs) - 1; |
664 | memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); | ||
665 | 667 | ||
666 | t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | | 668 | /* Calculate offset to stack_frame & pt_regs */ |
669 | child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + | ||
670 | (kernel_thread ? STACKFRAME_SZ : 0)); | ||
671 | child_trap_frame = (task_stack_page(p) + | ||
672 | (THREAD_SIZE - child_stack_sz)); | ||
673 | memcpy(child_trap_frame, parent_sf, child_stack_sz); | ||
674 | |||
675 | t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | | ||
676 | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | | ||
667 | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); | 677 | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); |
668 | t->new_child = 1; | 678 | t->new_child = 1; |
669 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; | 679 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; |
670 | t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf)); | 680 | t->kregs = (struct pt_regs *) (child_trap_frame + |
681 | sizeof(struct sparc_stackf)); | ||
671 | t->fpsaved[0] = 0; | 682 | t->fpsaved[0] = 0; |
672 | 683 | ||
673 | if (regs->tstate & TSTATE_PRIV) { | 684 | if (kernel_thread) { |
685 | struct sparc_stackf *child_sf = (struct sparc_stackf *) | ||
686 | (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); | ||
687 | |||
688 | /* Zero terminate the stack backtrace. */ | ||
689 | child_sf->fp = NULL; | ||
690 | t->kregs->u_regs[UREG_FP] = | ||
691 | ((unsigned long) child_sf) - STACK_BIAS; | ||
692 | |||
674 | /* Special case, if we are spawning a kernel thread from | 693 | /* Special case, if we are spawning a kernel thread from |
675 | * a userspace task (via KMOD, NFS, or similar) we must | 694 | * a userspace task (via KMOD, NFS, or similar) we must |
676 | * disable performance counters in the child because the | 695 | * disable performance counters in the child because the |
@@ -681,12 +700,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
681 | t->pcr_reg = 0; | 700 | t->pcr_reg = 0; |
682 | t->flags &= ~_TIF_PERFCTR; | 701 | t->flags &= ~_TIF_PERFCTR; |
683 | } | 702 | } |
684 | t->kregs->u_regs[UREG_FP] = t->ksp; | ||
685 | t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); | 703 | t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); |
686 | flush_register_windows(); | ||
687 | memcpy((void *)(t->ksp + STACK_BIAS), | ||
688 | (void *)(regs->u_regs[UREG_FP] + STACK_BIAS), | ||
689 | sizeof(struct sparc_stackf)); | ||
690 | t->kregs->u_regs[UREG_G6] = (unsigned long) t; | 704 | t->kregs->u_regs[UREG_G6] = (unsigned long) t; |
691 | t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; | 705 | t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; |
692 | } else { | 706 | } else { |
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 3afacbb5781d..c6fc695fe1fe 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S | |||
@@ -363,6 +363,7 @@ kern_rtt: rdpr %canrestore, %g1 | |||
363 | brz,pn %g1, kern_rtt_fill | 363 | brz,pn %g1, kern_rtt_fill |
364 | nop | 364 | nop |
365 | kern_rtt_restore: | 365 | kern_rtt_restore: |
366 | stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC] | ||
366 | restore | 367 | restore |
367 | retry | 368 | retry |
368 | 369 | ||
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c index 01b52f561af4..c73ce3f4197e 100644 --- a/arch/sparc64/kernel/stacktrace.c +++ b/arch/sparc64/kernel/stacktrace.c | |||
@@ -19,7 +19,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
19 | fp = ksp + STACK_BIAS; | 19 | fp = ksp + STACK_BIAS; |
20 | thread_base = (unsigned long) tp; | 20 | thread_base = (unsigned long) tp; |
21 | do { | 21 | do { |
22 | struct reg_window *rw; | 22 | struct sparc_stackf *sf; |
23 | struct pt_regs *regs; | 23 | struct pt_regs *regs; |
24 | unsigned long pc; | 24 | unsigned long pc; |
25 | 25 | ||
@@ -28,15 +28,17 @@ void save_stack_trace(struct stack_trace *trace) | |||
28 | fp >= (thread_base + THREAD_SIZE)) | 28 | fp >= (thread_base + THREAD_SIZE)) |
29 | break; | 29 | break; |
30 | 30 | ||
31 | rw = (struct reg_window *) fp; | 31 | sf = (struct sparc_stackf *) fp; |
32 | regs = (struct pt_regs *) (rw + 1); | 32 | regs = (struct pt_regs *) (sf + 1); |
33 | 33 | ||
34 | if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { | 34 | if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { |
35 | if (!(regs->tstate & TSTATE_PRIV)) | ||
36 | break; | ||
35 | pc = regs->tpc; | 37 | pc = regs->tpc; |
36 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; | 38 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; |
37 | } else { | 39 | } else { |
38 | pc = rw->ins[7]; | 40 | pc = sf->callers_pc; |
39 | fp = rw->ins[6] + STACK_BIAS; | 41 | fp = (unsigned long)sf->fp + STACK_BIAS; |
40 | } | 42 | } |
41 | 43 | ||
42 | if (trace->skip > 0) | 44 | if (trace->skip > 0) |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index d9b8d46707d1..369749262653 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -2116,7 +2116,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) | |||
2116 | printk("\n"); | 2116 | printk("\n"); |
2117 | #endif | 2117 | #endif |
2118 | do { | 2118 | do { |
2119 | struct reg_window *rw; | 2119 | struct sparc_stackf *sf; |
2120 | struct pt_regs *regs; | 2120 | struct pt_regs *regs; |
2121 | unsigned long pc; | 2121 | unsigned long pc; |
2122 | 2122 | ||
@@ -2124,15 +2124,17 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) | |||
2124 | if (fp < (thread_base + sizeof(struct thread_info)) || | 2124 | if (fp < (thread_base + sizeof(struct thread_info)) || |
2125 | fp >= (thread_base + THREAD_SIZE)) | 2125 | fp >= (thread_base + THREAD_SIZE)) |
2126 | break; | 2126 | break; |
2127 | rw = (struct reg_window *)fp; | 2127 | sf = (struct sparc_stackf *) fp; |
2128 | regs = (struct pt_regs *) (rw + 1); | 2128 | regs = (struct pt_regs *) (sf + 1); |
2129 | 2129 | ||
2130 | if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { | 2130 | if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { |
2131 | if (!(regs->tstate & TSTATE_PRIV)) | ||
2132 | break; | ||
2131 | pc = regs->tpc; | 2133 | pc = regs->tpc; |
2132 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; | 2134 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; |
2133 | } else { | 2135 | } else { |
2134 | pc = rw->ins[7]; | 2136 | pc = sf->callers_pc; |
2135 | fp = rw->ins[6] + STACK_BIAS; | 2137 | fp = (unsigned long)sf->fp + STACK_BIAS; |
2136 | } | 2138 | } |
2137 | 2139 | ||
2138 | printk(" [%016lx] ", pc); | 2140 | printk(" [%016lx] ", pc); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fe361ae7ef2f..dcbec34154cf 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -26,17 +26,10 @@ config X86 | |||
26 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 26 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
27 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 27 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
28 | 28 | ||
29 | config DEFCONFIG_LIST | 29 | config ARCH_DEFCONFIG |
30 | string | 30 | string |
31 | depends on X86_32 | 31 | default "arch/x86/configs/i386_defconfig" if X86_32 |
32 | option defconfig_list | 32 | default "arch/x86/configs/x86_64_defconfig" if X86_64 |
33 | default "arch/x86/configs/i386_defconfig" | ||
34 | |||
35 | config DEFCONFIG_LIST | ||
36 | string | ||
37 | depends on X86_64 | ||
38 | option defconfig_list | ||
39 | default "arch/x86/configs/x86_64_defconfig" | ||
40 | 33 | ||
41 | 34 | ||
42 | config GENERIC_LOCKBREAK | 35 | config GENERIC_LOCKBREAK |
diff --git a/arch/x86/boot/printf.c b/arch/x86/boot/printf.c index c1d00c0274c4..50e47cdbdddd 100644 --- a/arch/x86/boot/printf.c +++ b/arch/x86/boot/printf.c | |||
@@ -56,7 +56,7 @@ static char *number(char *str, long num, int base, int size, int precision, | |||
56 | if (type & LEFT) | 56 | if (type & LEFT) |
57 | type &= ~ZEROPAD; | 57 | type &= ~ZEROPAD; |
58 | if (base < 2 || base > 36) | 58 | if (base < 2 || base > 36) |
59 | return 0; | 59 | return NULL; |
60 | c = (type & ZEROPAD) ? '0' : ' '; | 60 | c = (type & ZEROPAD) ? '0' : ' '; |
61 | sign = 0; | 61 | sign = 0; |
62 | if (type & SIGN) { | 62 | if (type & SIGN) { |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 5910020c3f24..0633cfd0dc29 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
534 | */ | 534 | */ |
535 | void clear_local_APIC(void) | 535 | void clear_local_APIC(void) |
536 | { | 536 | { |
537 | int maxlvt = lapic_get_maxlvt(); | 537 | int maxlvt; |
538 | u32 v; | 538 | u32 v; |
539 | 539 | ||
540 | /* APIC hasn't been mapped yet */ | 540 | /* APIC hasn't been mapped yet */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index af4a867a097c..777a7ff075de 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c | |||
@@ -245,7 +245,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, | |||
245 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) | 245 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) |
246 | return -EIO; | 246 | return -EIO; |
247 | 247 | ||
248 | edx = (eax - ebx) / (100 - ecx); | 248 | edx = ((eax - ebx) * 100) / (100 - ecx); |
249 | *low_freq = edx * 1000; /* back to kHz */ | 249 | *low_freq = edx * 1000; /* back to kHz */ |
250 | 250 | ||
251 | dprintk("low frequency is %u kHz\n", *low_freq); | 251 | dprintk("low frequency is %u kHz\n", *low_freq); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 46d4034d9f37..206791eb46e3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1127,12 +1127,23 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1127 | * an UP version, and is deprecated by AMD. | 1127 | * an UP version, and is deprecated by AMD. |
1128 | */ | 1128 | */ |
1129 | if (num_online_cpus() != 1) { | 1129 | if (num_online_cpus() != 1) { |
1130 | printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); | 1130 | #ifndef CONFIG_ACPI_PROCESSOR |
1131 | printk(KERN_ERR PFX "ACPI Processor support is required " | ||
1132 | "for SMP systems but is absent. Please load the " | ||
1133 | "ACPI Processor module before starting this " | ||
1134 | "driver.\n"); | ||
1135 | #else | ||
1136 | printk(KERN_ERR PFX "Your BIOS does not provide ACPI " | ||
1137 | "_PSS objects in a way that Linux understands. " | ||
1138 | "Please report this to the Linux ACPI maintainers" | ||
1139 | " and complain to your BIOS vendor.\n"); | ||
1140 | #endif | ||
1131 | kfree(data); | 1141 | kfree(data); |
1132 | return -ENODEV; | 1142 | return -ENODEV; |
1133 | } | 1143 | } |
1134 | if (pol->cpu != 0) { | 1144 | if (pol->cpu != 0) { |
1135 | printk(KERN_ERR PFX "No _PSS objects for CPU other than CPU0\n"); | 1145 | printk(KERN_ERR PFX "No ACPI _PSS objects for CPU other than " |
1146 | "CPU0. Complain to your BIOS vendor.\n"); | ||
1136 | kfree(data); | 1147 | kfree(data); |
1137 | return -ENODEV; | 1148 | return -ENODEV; |
1138 | } | 1149 | } |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 4bc1be5d5472..08a30986d472 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void); | |||
53 | * have elapsed since the hypervisor wrote the data. So we try to account for | 53 | * have elapsed since the hypervisor wrote the data. So we try to account for |
54 | * that with system time | 54 | * that with system time |
55 | */ | 55 | */ |
56 | unsigned long kvm_get_wallclock(void) | 56 | static unsigned long kvm_get_wallclock(void) |
57 | { | 57 | { |
58 | u32 wc_sec, wc_nsec; | 58 | u32 wc_sec, wc_nsec; |
59 | u64 delta; | 59 | u64 delta; |
@@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void) | |||
86 | return ts.tv_sec + 1; | 86 | return ts.tv_sec + 1; |
87 | } | 87 | } |
88 | 88 | ||
89 | int kvm_set_wallclock(unsigned long now) | 89 | static int kvm_set_wallclock(unsigned long now) |
90 | { | 90 | { |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index e4790728b224..068759db63dd 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include "mach_timer.h" | 15 | #include "mach_timer.h" |
16 | 16 | ||
17 | static int tsc_enabled; | 17 | static int tsc_disabled; |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * On some systems the TSC frequency does not | 20 | * On some systems the TSC frequency does not |
@@ -28,8 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz); | |||
28 | static int __init tsc_setup(char *str) | 28 | static int __init tsc_setup(char *str) |
29 | { | 29 | { |
30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | 30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " |
31 | "cannot disable TSC completely.\n"); | 31 | "cannot disable TSC completely.\n"); |
32 | mark_tsc_unstable("user disabled TSC"); | 32 | tsc_disabled = 1; |
33 | return 1; | 33 | return 1; |
34 | } | 34 | } |
35 | #else | 35 | #else |
@@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void) | |||
120 | * very important for it to be as fast as the platform | 120 | * very important for it to be as fast as the platform |
121 | * can achive it. ) | 121 | * can achive it. ) |
122 | */ | 122 | */ |
123 | if (unlikely(!tsc_enabled && !tsc_unstable)) | 123 | if (unlikely(tsc_disabled)) |
124 | /* No locking but a rare wrong value is not a big deal: */ | 124 | /* No locking but a rare wrong value is not a big deal: */ |
125 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | 125 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
126 | 126 | ||
@@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason) | |||
322 | { | 322 | { |
323 | if (!tsc_unstable) { | 323 | if (!tsc_unstable) { |
324 | tsc_unstable = 1; | 324 | tsc_unstable = 1; |
325 | tsc_enabled = 0; | ||
326 | printk("Marking TSC unstable due to: %s.\n", reason); | 325 | printk("Marking TSC unstable due to: %s.\n", reason); |
327 | /* Can be called before registration */ | 326 | /* Can be called before registration */ |
328 | if (clocksource_tsc.mult) | 327 | if (clocksource_tsc.mult) |
@@ -336,7 +335,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |||
336 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | 335 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) |
337 | { | 336 | { |
338 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | 337 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", |
339 | d->ident); | 338 | d->ident); |
340 | tsc_unstable = 1; | 339 | tsc_unstable = 1; |
341 | return 0; | 340 | return 0; |
342 | } | 341 | } |
@@ -403,14 +402,22 @@ void __init tsc_init(void) | |||
403 | { | 402 | { |
404 | int cpu; | 403 | int cpu; |
405 | 404 | ||
406 | if (!cpu_has_tsc) | 405 | if (!cpu_has_tsc || tsc_disabled) { |
406 | /* Disable the TSC in case of !cpu_has_tsc */ | ||
407 | tsc_disabled = 1; | ||
407 | return; | 408 | return; |
409 | } | ||
408 | 410 | ||
409 | cpu_khz = calculate_cpu_khz(); | 411 | cpu_khz = calculate_cpu_khz(); |
410 | tsc_khz = cpu_khz; | 412 | tsc_khz = cpu_khz; |
411 | 413 | ||
412 | if (!cpu_khz) { | 414 | if (!cpu_khz) { |
413 | mark_tsc_unstable("could not calculate TSC khz"); | 415 | mark_tsc_unstable("could not calculate TSC khz"); |
416 | /* | ||
417 | * We need to disable the TSC completely in this case | ||
418 | * to prevent sched_clock() from using it. | ||
419 | */ | ||
420 | tsc_disabled = 1; | ||
414 | return; | 421 | return; |
415 | } | 422 | } |
416 | 423 | ||
@@ -441,8 +448,6 @@ void __init tsc_init(void) | |||
441 | if (check_tsc_unstable()) { | 448 | if (check_tsc_unstable()) { |
442 | clocksource_tsc.rating = 0; | 449 | clocksource_tsc.rating = 0; |
443 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 450 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
444 | } else | 451 | } |
445 | tsc_enabled = 1; | ||
446 | |||
447 | clocksource_register(&clocksource_tsc); | 452 | clocksource_register(&clocksource_tsc); |
448 | } | 453 | } |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index fcc16e58609e..1784b8077a12 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -227,14 +227,14 @@ void __init tsc_calibrate(void) | |||
227 | /* hpet or pmtimer available ? */ | 227 | /* hpet or pmtimer available ? */ |
228 | if (!hpet && !pm1 && !pm2) { | 228 | if (!hpet && !pm1 && !pm2) { |
229 | printk(KERN_INFO "TSC calibrated against PIT\n"); | 229 | printk(KERN_INFO "TSC calibrated against PIT\n"); |
230 | return; | 230 | goto out; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* Check, whether the sampling was disturbed by an SMI */ | 233 | /* Check, whether the sampling was disturbed by an SMI */ |
234 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { | 234 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { |
235 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | 235 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " |
236 | "using PIT calibration result\n"); | 236 | "using PIT calibration result\n"); |
237 | return; | 237 | goto out; |
238 | } | 238 | } |
239 | 239 | ||
240 | tsc2 = (tsc2 - tsc1) * 1000000L; | 240 | tsc2 = (tsc2 - tsc1) * 1000000L; |
@@ -255,6 +255,7 @@ void __init tsc_calibrate(void) | |||
255 | 255 | ||
256 | tsc_khz = tsc2 / tsc1; | 256 | tsc_khz = tsc2 / tsc1; |
257 | 257 | ||
258 | out: | ||
258 | for_each_possible_cpu(cpu) | 259 | for_each_possible_cpu(cpu) |
259 | set_cyc2ns_scale(tsc_khz, cpu); | 260 | set_cyc2ns_scale(tsc_khz, cpu); |
260 | } | 261 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 36c5406b1813..7246b60afb96 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1996,7 +1996,7 @@ static struct shrinker mmu_shrinker = { | |||
1996 | .seeks = DEFAULT_SEEKS * 10, | 1996 | .seeks = DEFAULT_SEEKS * 10, |
1997 | }; | 1997 | }; |
1998 | 1998 | ||
1999 | void mmu_destroy_caches(void) | 1999 | static void mmu_destroy_caches(void) |
2000 | { | 2000 | { |
2001 | if (pte_chain_cache) | 2001 | if (pte_chain_cache) |
2002 | kmem_cache_destroy(pte_chain_cache); | 2002 | kmem_cache_destroy(pte_chain_cache); |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 23476c2ebfc4..efa2ba7c6005 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -106,9 +106,9 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | |||
106 | do_realtime((struct timespec *)tv); | 106 | do_realtime((struct timespec *)tv); |
107 | tv->tv_usec /= 1000; | 107 | tv->tv_usec /= 1000; |
108 | if (unlikely(tz != NULL)) { | 108 | if (unlikely(tz != NULL)) { |
109 | /* This relies on gcc inlining the memcpy. We'll notice | 109 | /* Avoid memcpy. Some old compilers fail to inline it */ |
110 | if it ever fails to do so. */ | 110 | tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; |
111 | memcpy(tz, >od->sys_tz, sizeof(struct timezone)); | 111 | tz->tz_dsttime = gtod->sys_tz.tz_dsttime; |
112 | } | 112 | } |
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 126766d43aea..3525ef523a74 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -60,7 +60,7 @@ xmaddr_t arbitrary_virt_to_machine(unsigned long address) | |||
60 | { | 60 | { |
61 | unsigned int level; | 61 | unsigned int level; |
62 | pte_t *pte = lookup_address(address, &level); | 62 | pte_t *pte = lookup_address(address, &level); |
63 | unsigned offset = address & PAGE_MASK; | 63 | unsigned offset = address & ~PAGE_MASK; |
64 | 64 | ||
65 | BUG_ON(pte == NULL); | 65 | BUG_ON(pte == NULL); |
66 | 66 | ||
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c index e48a3ea03117..2509809a36cf 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/dispatcher/dsmethod.c | |||
@@ -565,7 +565,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, | |||
565 | 565 | ||
566 | acpi_os_release_mutex(method_desc->method. | 566 | acpi_os_release_mutex(method_desc->method. |
567 | mutex->mutex.os_mutex); | 567 | mutex->mutex.os_mutex); |
568 | method_desc->method.mutex->mutex.thread_id = 0; | 568 | method_desc->method.mutex->mutex.thread_id = NULL; |
569 | } | 569 | } |
570 | } | 570 | } |
571 | 571 | ||
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c index c873ab40cd0e..a8bf3d713e28 100644 --- a/drivers/acpi/executer/exmutex.c +++ b/drivers/acpi/executer/exmutex.c | |||
@@ -326,7 +326,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc) | |||
326 | 326 | ||
327 | /* Clear mutex info */ | 327 | /* Clear mutex info */ |
328 | 328 | ||
329 | obj_desc->mutex.thread_id = 0; | 329 | obj_desc->mutex.thread_id = NULL; |
330 | return_ACPI_STATUS(status); | 330 | return_ACPI_STATUS(status); |
331 | } | 331 | } |
332 | 332 | ||
@@ -463,7 +463,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) | |||
463 | /* Mark mutex unowned */ | 463 | /* Mark mutex unowned */ |
464 | 464 | ||
465 | obj_desc->mutex.owner_thread = NULL; | 465 | obj_desc->mutex.owner_thread = NULL; |
466 | obj_desc->mutex.thread_id = 0; | 466 | obj_desc->mutex.thread_id = NULL; |
467 | 467 | ||
468 | /* Update Thread sync_level (Last mutex is the important one) */ | 468 | /* Update Thread sync_level (Last mutex is the important one) */ |
469 | 469 | ||
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index a196ef7f147f..680cdfc00b90 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -447,6 +447,7 @@ static struct brd_device *brd_alloc(int i) | |||
447 | disk->fops = &brd_fops; | 447 | disk->fops = &brd_fops; |
448 | disk->private_data = brd; | 448 | disk->private_data = brd; |
449 | disk->queue = brd->brd_queue; | 449 | disk->queue = brd->brd_queue; |
450 | disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; | ||
450 | sprintf(disk->disk_name, "ram%d", i); | 451 | sprintf(disk->disk_name, "ram%d", i); |
451 | set_capacity(disk, rd_size * 2); | 452 | set_capacity(disk, rd_size * 2); |
452 | 453 | ||
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index ebfe038d859e..f1c8feb5510b 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Authors: Dave Boutcher <boutcher@us.ibm.com> | 3 | * Authors: Dave Boutcher <boutcher@us.ibm.com> |
4 | * Ryan Arnold <ryanarn@us.ibm.com> | 4 | * Ryan Arnold <ryanarn@us.ibm.com> |
5 | * Colin Devilbiss <devilbis@us.ibm.com> | 5 | * Colin Devilbiss <devilbis@us.ibm.com> |
6 | * Stephen Rothwell <sfr@au1.ibm.com> | 6 | * Stephen Rothwell |
7 | * | 7 | * |
8 | * (C) Copyright 2000-2004 IBM Corporation | 8 | * (C) Copyright 2000-2004 IBM Corporation |
9 | * | 9 | * |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 5245a4a0ba74..9d0dfe6e0d63 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Authors: Dave Boutcher <boutcher@us.ibm.com> | 6 | * Authors: Dave Boutcher <boutcher@us.ibm.com> |
7 | * Ryan Arnold <ryanarn@us.ibm.com> | 7 | * Ryan Arnold <ryanarn@us.ibm.com> |
8 | * Colin Devilbiss <devilbis@us.ibm.com> | 8 | * Colin Devilbiss <devilbis@us.ibm.com> |
9 | * Stephen Rothwell <sfr@au1.ibm.com> | 9 | * Stephen Rothwell |
10 | * | 10 | * |
11 | * (C) Copyright 2000-2004 IBM Corporation | 11 | * (C) Copyright 2000-2004 IBM Corporation |
12 | * | 12 | * |
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c index 9a32169e88fb..af211a0ef179 100644 --- a/drivers/char/drm/drm_sysfs.c +++ b/drivers/char/drm/drm_sysfs.c | |||
@@ -34,8 +34,6 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state) | |||
34 | struct drm_minor *drm_minor = to_drm_minor(dev); | 34 | struct drm_minor *drm_minor = to_drm_minor(dev); |
35 | struct drm_device *drm_dev = drm_minor->dev; | 35 | struct drm_device *drm_dev = drm_minor->dev; |
36 | 36 | ||
37 | printk(KERN_ERR "%s\n", __func__); | ||
38 | |||
39 | if (drm_dev->driver->suspend) | 37 | if (drm_dev->driver->suspend) |
40 | return drm_dev->driver->suspend(drm_dev, state); | 38 | return drm_dev->driver->suspend(drm_dev, state); |
41 | 39 | ||
diff --git a/drivers/char/ip2/Makefile b/drivers/char/ip2/Makefile index 6bfe2543ddc2..939618f62fe1 100644 --- a/drivers/char/ip2/Makefile +++ b/drivers/char/ip2/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Computone IntelliPort Plus Driver | 2 | # Makefile for the Computone IntelliPort Plus Driver |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_COMPUTONE) += ip2.o ip2main.o | 5 | obj-$(CONFIG_COMPUTONE) += ip2.o |
6 | 6 | ||
7 | ip2-objs := ip2base.o | 7 | ip2-objs := ip2base.o ip2main.o |
8 | 8 | ||
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index 70957acaa960..c12cf8fc4be0 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c | |||
@@ -346,27 +346,6 @@ have_requested_irq( char irq ) | |||
346 | } | 346 | } |
347 | 347 | ||
348 | /******************************************************************************/ | 348 | /******************************************************************************/ |
349 | /* Function: init_module() */ | ||
350 | /* Parameters: None */ | ||
351 | /* Returns: Success (0) */ | ||
352 | /* */ | ||
353 | /* Description: */ | ||
354 | /* This is a required entry point for an installable module. It simply calls */ | ||
355 | /* the driver initialisation function and returns what it returns. */ | ||
356 | /******************************************************************************/ | ||
357 | #ifdef MODULE | ||
358 | static int __init | ||
359 | ip2_init_module(void) | ||
360 | { | ||
361 | #ifdef IP2DEBUG_INIT | ||
362 | printk (KERN_DEBUG "Loading module ...\n" ); | ||
363 | #endif | ||
364 | return 0; | ||
365 | } | ||
366 | module_init(ip2_init_module); | ||
367 | #endif /* MODULE */ | ||
368 | |||
369 | /******************************************************************************/ | ||
370 | /* Function: cleanup_module() */ | 349 | /* Function: cleanup_module() */ |
371 | /* Parameters: None */ | 350 | /* Parameters: None */ |
372 | /* Returns: Nothing */ | 351 | /* Returns: Nothing */ |
@@ -779,8 +758,6 @@ out: | |||
779 | return err; | 758 | return err; |
780 | } | 759 | } |
781 | 760 | ||
782 | EXPORT_SYMBOL(ip2_loadmain); | ||
783 | |||
784 | /******************************************************************************/ | 761 | /******************************************************************************/ |
785 | /* Function: ip2_init_board() */ | 762 | /* Function: ip2_init_board() */ |
786 | /* Parameters: Index of board in configuration structure */ | 763 | /* Parameters: Index of board in configuration structure */ |
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c index 3d3e1c2b310f..65fb848e1cce 100644 --- a/drivers/char/viocons.c +++ b/drivers/char/viocons.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Authors: Dave Boutcher <boutcher@us.ibm.com> | 7 | * Authors: Dave Boutcher <boutcher@us.ibm.com> |
8 | * Ryan Arnold <ryanarn@us.ibm.com> | 8 | * Ryan Arnold <ryanarn@us.ibm.com> |
9 | * Colin Devilbiss <devilbis@us.ibm.com> | 9 | * Colin Devilbiss <devilbis@us.ibm.com> |
10 | * Stephen Rothwell <sfr@au1.ibm.com> | 10 | * Stephen Rothwell |
11 | * | 11 | * |
12 | * (C) Copyright 2000, 2001, 2002, 2003, 2004 IBM Corporation | 12 | * (C) Copyright 2000, 2001, 2002, 2003, 2004 IBM Corporation |
13 | * | 13 | * |
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index 58aad63831f4..c39ddaff5e8f 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Authors: Dave Boutcher <boutcher@us.ibm.com> | 6 | * Authors: Dave Boutcher <boutcher@us.ibm.com> |
7 | * Ryan Arnold <ryanarn@us.ibm.com> | 7 | * Ryan Arnold <ryanarn@us.ibm.com> |
8 | * Colin Devilbiss <devilbis@us.ibm.com> | 8 | * Colin Devilbiss <devilbis@us.ibm.com> |
9 | * Stephen Rothwell <sfr@au1.ibm.com> | 9 | * Stephen Rothwell |
10 | * | 10 | * |
11 | * (C) Copyright 2000-2004 IBM Corporation | 11 | * (C) Copyright 2000-2004 IBM Corporation |
12 | * | 12 | * |
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index ae6cd60d5c14..b64c6bc445e3 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -2,6 +2,11 @@ | |||
2 | * linux/drivers/cpufreq/freq_table.c | 2 | * linux/drivers/cpufreq/freq_table.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002 - 2003 Dominik Brodowski | 4 | * Copyright (C) 2002 - 2003 Dominik Brodowski |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
5 | */ | 10 | */ |
6 | 11 | ||
7 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 065732ddf40c..d49361bfe670 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -20,7 +20,6 @@ | |||
20 | 20 | ||
21 | #include <linux/of_platform.h> | 21 | #include <linux/of_platform.h> |
22 | #include <linux/of_device.h> | 22 | #include <linux/of_device.h> |
23 | #include <asm/mpc85xx.h> | ||
24 | #include "edac_module.h" | 23 | #include "edac_module.h" |
25 | #include "edac_core.h" | 24 | #include "edac_core.h" |
26 | #include "mpc85xx_edac.h" | 25 | #include "mpc85xx_edac.h" |
@@ -43,8 +42,6 @@ static u32 orig_pci_err_en; | |||
43 | static u32 orig_l2_err_disable; | 42 | static u32 orig_l2_err_disable; |
44 | static u32 orig_hid1; | 43 | static u32 orig_hid1; |
45 | 44 | ||
46 | static const char *mpc85xx_ctl_name = "MPC85xx"; | ||
47 | |||
48 | /************************ MC SYSFS parts ***********************************/ | 45 | /************************ MC SYSFS parts ***********************************/ |
49 | 46 | ||
50 | static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci, | 47 | static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci, |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7f138c6195ff..beaf6b3a37dc 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -127,7 +127,7 @@ int __init gpiochip_reserve(int start, int ngpio) | |||
127 | unsigned long flags; | 127 | unsigned long flags; |
128 | int i; | 128 | int i; |
129 | 129 | ||
130 | if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio)) | 130 | if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio - 1)) |
131 | return -EINVAL; | 131 | return -EINVAL; |
132 | 132 | ||
133 | spin_lock_irqsave(&gpio_lock, flags); | 133 | spin_lock_irqsave(&gpio_lock, flags); |
@@ -170,7 +170,7 @@ int gpiochip_add(struct gpio_chip *chip) | |||
170 | unsigned id; | 170 | unsigned id; |
171 | int base = chip->base; | 171 | int base = chip->base; |
172 | 172 | ||
173 | if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio)) | 173 | if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio - 1)) |
174 | && base >= 0) { | 174 | && base >= 0) { |
175 | status = -EINVAL; | 175 | status = -EINVAL; |
176 | goto fail; | 176 | goto fail; |
@@ -207,7 +207,7 @@ fail: | |||
207 | /* failures here can mean systems won't boot... */ | 207 | /* failures here can mean systems won't boot... */ |
208 | if (status) | 208 | if (status) |
209 | pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", | 209 | pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", |
210 | chip->base, chip->base + chip->ngpio, | 210 | chip->base, chip->base + chip->ngpio - 1, |
211 | chip->label ? : "generic"); | 211 | chip->label ? : "generic"); |
212 | return status; | 212 | return status; |
213 | } | 213 | } |
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c index 7fb5b9d009d4..7f92fdd5f0e2 100644 --- a/drivers/gpio/mcp23s08.c +++ b/drivers/gpio/mcp23s08.c | |||
@@ -168,7 +168,7 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip) | |||
168 | { | 168 | { |
169 | struct mcp23s08 *mcp; | 169 | struct mcp23s08 *mcp; |
170 | char bank; | 170 | char bank; |
171 | unsigned t; | 171 | int t; |
172 | unsigned mask; | 172 | unsigned mask; |
173 | 173 | ||
174 | mcp = container_of(chip, struct mcp23s08, chip); | 174 | mcp = container_of(chip, struct mcp23s08, chip); |
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 93f916720b13..7e40e8a55edf 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
@@ -30,6 +30,7 @@ static const struct i2c_device_id pca953x_id[] = { | |||
30 | { "pca9537", 4, }, | 30 | { "pca9537", 4, }, |
31 | { "pca9538", 8, }, | 31 | { "pca9538", 8, }, |
32 | { "pca9539", 16, }, | 32 | { "pca9539", 16, }, |
33 | { "pca9554", 8, }, | ||
33 | { "pca9555", 16, }, | 34 | { "pca9555", 16, }, |
34 | { "pca9557", 8, }, | 35 | { "pca9557", 8, }, |
35 | /* REVISIT several pca955x parts should work here too */ | 36 | /* REVISIT several pca955x parts should work here too */ |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 4dc76bc45c9d..00ff53348491 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -330,6 +330,20 @@ config SENSORS_CORETEMP | |||
330 | sensor inside your CPU. Supported all are all known variants | 330 | sensor inside your CPU. Supported all are all known variants |
331 | of Intel Core family. | 331 | of Intel Core family. |
332 | 332 | ||
333 | config SENSORS_IBMAEM | ||
334 | tristate "IBM Active Energy Manager temperature/power sensors and control" | ||
335 | select IPMI_SI | ||
336 | depends on IPMI_HANDLER | ||
337 | help | ||
338 | If you say yes here you get support for the temperature and | ||
339 | power sensors and capping hardware in various IBM System X | ||
340 | servers that support Active Energy Manager. This includes | ||
341 | the x3350, x3550, x3650, x3655, x3755, x3850 M2, x3950 M2, | ||
342 | and certain HS2x/LS2x/QS2x blades. | ||
343 | |||
344 | This driver can also be built as a module. If so, the module | ||
345 | will be called ibmaem. | ||
346 | |||
333 | config SENSORS_IBMPEX | 347 | config SENSORS_IBMPEX |
334 | tristate "IBM PowerExecutive temperature/power sensors" | 348 | tristate "IBM PowerExecutive temperature/power sensors" |
335 | select IPMI_SI | 349 | select IPMI_SI |
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 3bdb05a5cbd7..d098677e08de 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile | |||
@@ -41,6 +41,7 @@ obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o | |||
41 | obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o | 41 | obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o |
42 | obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o | 42 | obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o |
43 | obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o | 43 | obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o |
44 | obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o | ||
44 | obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o | 45 | obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o |
45 | obj-$(CONFIG_SENSORS_IT87) += it87.o | 46 | obj-$(CONFIG_SENSORS_IT87) += it87.o |
46 | obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o | 47 | obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o |
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c index bab5fd2e4dfd..88e89653daaf 100644 --- a/drivers/hwmon/hdaps.c +++ b/drivers/hwmon/hdaps.c | |||
@@ -515,6 +515,7 @@ static struct dmi_system_id __initdata hdaps_whitelist[] = { | |||
515 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"), | 515 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"), |
516 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"), | 516 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"), |
517 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R52"), | 517 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R52"), |
518 | HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad R61i"), | ||
518 | HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T41p"), | 519 | HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T41p"), |
519 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T41"), | 520 | HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T41"), |
520 | HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p"), | 521 | HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p"), |
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index 6ac5c6f53585..f9e2ed621f7b 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c | |||
@@ -111,6 +111,7 @@ struct i5k_amb_data { | |||
111 | void __iomem *amb_mmio; | 111 | void __iomem *amb_mmio; |
112 | struct i5k_device_attribute *attrs; | 112 | struct i5k_device_attribute *attrs; |
113 | unsigned int num_attrs; | 113 | unsigned int num_attrs; |
114 | unsigned long chipset_id; | ||
114 | }; | 115 | }; |
115 | 116 | ||
116 | static ssize_t show_name(struct device *dev, struct device_attribute *devattr, | 117 | static ssize_t show_name(struct device *dev, struct device_attribute *devattr, |
@@ -382,7 +383,8 @@ err: | |||
382 | return res; | 383 | return res; |
383 | } | 384 | } |
384 | 385 | ||
385 | static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data) | 386 | static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data, |
387 | unsigned long devid) | ||
386 | { | 388 | { |
387 | struct pci_dev *pcidev; | 389 | struct pci_dev *pcidev; |
388 | u32 val32; | 390 | u32 val32; |
@@ -390,7 +392,7 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data) | |||
390 | 392 | ||
391 | /* Find AMB register memory space */ | 393 | /* Find AMB register memory space */ |
392 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 394 | pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
393 | PCI_DEVICE_ID_INTEL_5000_ERR, | 395 | devid, |
394 | NULL); | 396 | NULL); |
395 | if (!pcidev) | 397 | if (!pcidev) |
396 | return -ENODEV; | 398 | return -ENODEV; |
@@ -409,6 +411,8 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data) | |||
409 | goto out; | 411 | goto out; |
410 | } | 412 | } |
411 | 413 | ||
414 | data->chipset_id = devid; | ||
415 | |||
412 | res = 0; | 416 | res = 0; |
413 | out: | 417 | out: |
414 | pci_dev_put(pcidev); | 418 | pci_dev_put(pcidev); |
@@ -441,10 +445,30 @@ out: | |||
441 | return res; | 445 | return res; |
442 | } | 446 | } |
443 | 447 | ||
448 | static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, | ||
449 | unsigned long channel) | ||
450 | { | ||
451 | switch (data->chipset_id) { | ||
452 | case PCI_DEVICE_ID_INTEL_5000_ERR: | ||
453 | return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; | ||
454 | case PCI_DEVICE_ID_INTEL_5400_ERR: | ||
455 | return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel; | ||
456 | default: | ||
457 | BUG(); | ||
458 | } | ||
459 | } | ||
460 | |||
461 | static unsigned long chipset_ids[] = { | ||
462 | PCI_DEVICE_ID_INTEL_5000_ERR, | ||
463 | PCI_DEVICE_ID_INTEL_5400_ERR, | ||
464 | 0 | ||
465 | }; | ||
466 | |||
444 | static int __devinit i5k_amb_probe(struct platform_device *pdev) | 467 | static int __devinit i5k_amb_probe(struct platform_device *pdev) |
445 | { | 468 | { |
446 | struct i5k_amb_data *data; | 469 | struct i5k_amb_data *data; |
447 | struct resource *reso; | 470 | struct resource *reso; |
471 | int i; | ||
448 | int res = -ENODEV; | 472 | int res = -ENODEV; |
449 | 473 | ||
450 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 474 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
@@ -452,19 +476,24 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev) | |||
452 | return -ENOMEM; | 476 | return -ENOMEM; |
453 | 477 | ||
454 | /* Figure out where the AMB registers live */ | 478 | /* Figure out where the AMB registers live */ |
455 | res = i5k_find_amb_registers(data); | 479 | i = 0; |
480 | do { | ||
481 | res = i5k_find_amb_registers(data, chipset_ids[i]); | ||
482 | i++; | ||
483 | } while (res && chipset_ids[i]); | ||
484 | |||
456 | if (res) | 485 | if (res) |
457 | goto err; | 486 | goto err; |
458 | 487 | ||
459 | /* Copy the DIMM presence map for the first two channels */ | 488 | /* Copy the DIMM presence map for the first two channels */ |
460 | res = i5k_channel_probe(&data->amb_present[0], | 489 | res = i5k_channel_probe(&data->amb_present[0], |
461 | PCI_DEVICE_ID_INTEL_5000_FBD0); | 490 | i5k_channel_pci_id(data, 0)); |
462 | if (res) | 491 | if (res) |
463 | goto err; | 492 | goto err; |
464 | 493 | ||
465 | /* Copy the DIMM presence map for the optional second two channels */ | 494 | /* Copy the DIMM presence map for the optional second two channels */ |
466 | i5k_channel_probe(&data->amb_present[2], | 495 | i5k_channel_probe(&data->amb_present[2], |
467 | PCI_DEVICE_ID_INTEL_5000_FBD1); | 496 | i5k_channel_pci_id(data, 1)); |
468 | 497 | ||
469 | /* Set up resource regions */ | 498 | /* Set up resource regions */ |
470 | reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); | 499 | reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); |
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c new file mode 100644 index 000000000000..5c006c9a4311 --- /dev/null +++ b/drivers/hwmon/ibmaem.c | |||
@@ -0,0 +1,1111 @@ | |||
1 | /* | ||
2 | * A hwmon driver for the IBM Active Energy Manager temperature/power sensors | ||
3 | * and capping functionality. | ||
4 | * Copyright (C) 2008 IBM | ||
5 | * | ||
6 | * Author: Darrick J. Wong <djwong@us.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | */ | ||
22 | |||
23 | #include <linux/ipmi.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/hwmon.h> | ||
26 | #include <linux/hwmon-sysfs.h> | ||
27 | #include <linux/jiffies.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/kdev_t.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/idr.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/math64.h> | ||
35 | #include <linux/time.h> | ||
36 | |||
37 | #define REFRESH_INTERVAL (HZ) | ||
38 | #define IPMI_TIMEOUT (30 * HZ) | ||
39 | #define DRVNAME "aem" | ||
40 | |||
41 | #define AEM_NETFN 0x2E | ||
42 | |||
43 | #define AEM_FIND_FW_CMD 0x80 | ||
44 | #define AEM_ELEMENT_CMD 0x81 | ||
45 | #define AEM_FW_INSTANCE_CMD 0x82 | ||
46 | |||
47 | #define AEM_READ_ELEMENT_CFG 0x80 | ||
48 | #define AEM_READ_BUFFER 0x81 | ||
49 | #define AEM_READ_REGISTER 0x82 | ||
50 | #define AEM_WRITE_REGISTER 0x83 | ||
51 | #define AEM_SET_REG_MASK 0x84 | ||
52 | #define AEM_CLEAR_REG_MASK 0x85 | ||
53 | #define AEM_READ_ELEMENT_CFG2 0x86 | ||
54 | |||
55 | #define AEM_CONTROL_ELEMENT 0 | ||
56 | #define AEM_ENERGY_ELEMENT 1 | ||
57 | #define AEM_CLOCK_ELEMENT 4 | ||
58 | #define AEM_POWER_CAP_ELEMENT 7 | ||
59 | #define AEM_EXHAUST_ELEMENT 9 | ||
60 | #define AEM_POWER_ELEMENT 10 | ||
61 | |||
62 | #define AEM_MODULE_TYPE_ID 0x0001 | ||
63 | |||
64 | #define AEM2_NUM_ENERGY_REGS 2 | ||
65 | #define AEM2_NUM_PCAP_REGS 6 | ||
66 | #define AEM2_NUM_TEMP_REGS 2 | ||
67 | #define AEM2_NUM_SENSORS 14 | ||
68 | |||
69 | #define AEM1_NUM_ENERGY_REGS 1 | ||
70 | #define AEM1_NUM_SENSORS 3 | ||
71 | |||
72 | /* AEM 2.x has more energy registers */ | ||
73 | #define AEM_NUM_ENERGY_REGS AEM2_NUM_ENERGY_REGS | ||
74 | /* AEM 2.x needs more sensor files */ | ||
75 | #define AEM_NUM_SENSORS AEM2_NUM_SENSORS | ||
76 | |||
77 | #define POWER_CAP 0 | ||
78 | #define POWER_CAP_MAX_HOTPLUG 1 | ||
79 | #define POWER_CAP_MAX 2 | ||
80 | #define POWER_CAP_MIN_WARNING 3 | ||
81 | #define POWER_CAP_MIN 4 | ||
82 | #define POWER_AUX 5 | ||
83 | |||
84 | #define AEM_DEFAULT_POWER_INTERVAL 1000 | ||
85 | #define AEM_MIN_POWER_INTERVAL 200 | ||
86 | #define UJ_PER_MJ 1000L | ||
87 | |||
88 | static DEFINE_IDR(aem_idr); | ||
89 | static DEFINE_SPINLOCK(aem_idr_lock); | ||
90 | |||
91 | static struct device_driver aem_driver = { | ||
92 | .name = DRVNAME, | ||
93 | .bus = &platform_bus_type, | ||
94 | }; | ||
95 | |||
96 | struct aem_ipmi_data { | ||
97 | struct completion read_complete; | ||
98 | struct ipmi_addr address; | ||
99 | ipmi_user_t user; | ||
100 | int interface; | ||
101 | |||
102 | struct kernel_ipmi_msg tx_message; | ||
103 | long tx_msgid; | ||
104 | |||
105 | void *rx_msg_data; | ||
106 | unsigned short rx_msg_len; | ||
107 | unsigned char rx_result; | ||
108 | int rx_recv_type; | ||
109 | |||
110 | struct device *bmc_device; | ||
111 | }; | ||
112 | |||
113 | struct aem_ro_sensor_template { | ||
114 | char *label; | ||
115 | ssize_t (*show)(struct device *dev, | ||
116 | struct device_attribute *devattr, | ||
117 | char *buf); | ||
118 | int index; | ||
119 | }; | ||
120 | |||
121 | struct aem_rw_sensor_template { | ||
122 | char *label; | ||
123 | ssize_t (*show)(struct device *dev, | ||
124 | struct device_attribute *devattr, | ||
125 | char *buf); | ||
126 | ssize_t (*set)(struct device *dev, | ||
127 | struct device_attribute *devattr, | ||
128 | const char *buf, size_t count); | ||
129 | int index; | ||
130 | }; | ||
131 | |||
132 | struct aem_data { | ||
133 | struct list_head list; | ||
134 | |||
135 | struct device *hwmon_dev; | ||
136 | struct platform_device *pdev; | ||
137 | struct mutex lock; | ||
138 | char valid; | ||
139 | unsigned long last_updated; /* In jiffies */ | ||
140 | u8 ver_major; | ||
141 | u8 ver_minor; | ||
142 | u8 module_handle; | ||
143 | int id; | ||
144 | struct aem_ipmi_data ipmi; | ||
145 | |||
146 | /* Function to update sensors */ | ||
147 | void (*update)(struct aem_data *data); | ||
148 | |||
149 | /* | ||
150 | * AEM 1.x sensors: | ||
151 | * Available sensors: | ||
152 | * Energy meter | ||
153 | * Power meter | ||
154 | * | ||
155 | * AEM 2.x sensors: | ||
156 | * Two energy meters | ||
157 | * Two power meters | ||
158 | * Two temperature sensors | ||
159 | * Six power cap registers | ||
160 | */ | ||
161 | |||
162 | /* sysfs attrs */ | ||
163 | struct sensor_device_attribute sensors[AEM_NUM_SENSORS]; | ||
164 | |||
165 | /* energy use in mJ */ | ||
166 | u64 energy[AEM_NUM_ENERGY_REGS]; | ||
167 | |||
168 | /* power sampling interval in ms */ | ||
169 | unsigned long power_period[AEM_NUM_ENERGY_REGS]; | ||
170 | |||
171 | /* Everything past here is for AEM2 only */ | ||
172 | |||
173 | /* power caps in dW */ | ||
174 | u16 pcap[AEM2_NUM_PCAP_REGS]; | ||
175 | |||
176 | /* exhaust temperature in C */ | ||
177 | u8 temp[AEM2_NUM_TEMP_REGS]; | ||
178 | }; | ||
179 | |||
180 | /* Data structures returned by the AEM firmware */ | ||
181 | struct aem_iana_id { | ||
182 | u8 bytes[3]; | ||
183 | }; | ||
184 | static struct aem_iana_id system_x_id = { | ||
185 | .bytes = {0x4D, 0x4F, 0x00} | ||
186 | }; | ||
187 | |||
188 | /* These are used to find AEM1 instances */ | ||
189 | struct aem_find_firmware_req { | ||
190 | struct aem_iana_id id; | ||
191 | u8 rsvd; | ||
192 | u16 index; | ||
193 | u16 module_type_id; | ||
194 | } __packed; | ||
195 | |||
196 | struct aem_find_firmware_resp { | ||
197 | struct aem_iana_id id; | ||
198 | u8 num_instances; | ||
199 | } __packed; | ||
200 | |||
201 | /* These are used to find AEM2 instances */ | ||
202 | struct aem_find_instance_req { | ||
203 | struct aem_iana_id id; | ||
204 | u8 instance_number; | ||
205 | u16 module_type_id; | ||
206 | } __packed; | ||
207 | |||
208 | struct aem_find_instance_resp { | ||
209 | struct aem_iana_id id; | ||
210 | u8 num_instances; | ||
211 | u8 major; | ||
212 | u8 minor; | ||
213 | u8 module_handle; | ||
214 | u16 record_id; | ||
215 | } __packed; | ||
216 | |||
217 | /* These are used to query sensors */ | ||
218 | struct aem_read_sensor_req { | ||
219 | struct aem_iana_id id; | ||
220 | u8 module_handle; | ||
221 | u8 element; | ||
222 | u8 subcommand; | ||
223 | u8 reg; | ||
224 | u8 rx_buf_size; | ||
225 | } __packed; | ||
226 | |||
227 | struct aem_read_sensor_resp { | ||
228 | struct aem_iana_id id; | ||
229 | u8 bytes[0]; | ||
230 | } __packed; | ||
231 | |||
232 | /* Data structures to talk to the IPMI layer */ | ||
233 | struct aem_driver_data { | ||
234 | struct list_head aem_devices; | ||
235 | struct ipmi_smi_watcher bmc_events; | ||
236 | struct ipmi_user_hndl ipmi_hndlrs; | ||
237 | }; | ||
238 | |||
239 | static void aem_register_bmc(int iface, struct device *dev); | ||
240 | static void aem_bmc_gone(int iface); | ||
241 | static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); | ||
242 | |||
243 | static void aem_remove_sensors(struct aem_data *data); | ||
244 | static int aem_init_aem1(struct aem_ipmi_data *probe); | ||
245 | static int aem_init_aem2(struct aem_ipmi_data *probe); | ||
246 | static int aem1_find_sensors(struct aem_data *data); | ||
247 | static int aem2_find_sensors(struct aem_data *data); | ||
248 | static void update_aem1_sensors(struct aem_data *data); | ||
249 | static void update_aem2_sensors(struct aem_data *data); | ||
250 | |||
251 | static struct aem_driver_data driver_data = { | ||
252 | .aem_devices = LIST_HEAD_INIT(driver_data.aem_devices), | ||
253 | .bmc_events = { | ||
254 | .owner = THIS_MODULE, | ||
255 | .new_smi = aem_register_bmc, | ||
256 | .smi_gone = aem_bmc_gone, | ||
257 | }, | ||
258 | .ipmi_hndlrs = { | ||
259 | .ipmi_recv_hndl = aem_msg_handler, | ||
260 | }, | ||
261 | }; | ||
262 | |||
263 | /* Functions to talk to the IPMI layer */ | ||
264 | |||
265 | /* Initialize IPMI address, message buffers and user data */ | ||
266 | static int aem_init_ipmi_data(struct aem_ipmi_data *data, int iface, | ||
267 | struct device *bmc) | ||
268 | { | ||
269 | int err; | ||
270 | |||
271 | init_completion(&data->read_complete); | ||
272 | data->bmc_device = bmc; | ||
273 | |||
274 | /* Initialize IPMI address */ | ||
275 | data->address.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | ||
276 | data->address.channel = IPMI_BMC_CHANNEL; | ||
277 | data->address.data[0] = 0; | ||
278 | data->interface = iface; | ||
279 | |||
280 | /* Initialize message buffers */ | ||
281 | data->tx_msgid = 0; | ||
282 | data->tx_message.netfn = AEM_NETFN; | ||
283 | |||
284 | /* Create IPMI messaging interface user */ | ||
285 | err = ipmi_create_user(data->interface, &driver_data.ipmi_hndlrs, | ||
286 | data, &data->user); | ||
287 | if (err < 0) { | ||
288 | dev_err(bmc, "Unable to register user with IPMI " | ||
289 | "interface %d\n", data->interface); | ||
290 | return -EACCES; | ||
291 | } | ||
292 | |||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | /* Send an IPMI command */ | ||
297 | static int aem_send_message(struct aem_ipmi_data *data) | ||
298 | { | ||
299 | int err; | ||
300 | |||
301 | err = ipmi_validate_addr(&data->address, sizeof(data->address)); | ||
302 | if (err) | ||
303 | goto out; | ||
304 | |||
305 | data->tx_msgid++; | ||
306 | err = ipmi_request_settime(data->user, &data->address, data->tx_msgid, | ||
307 | &data->tx_message, data, 0, 0, 0); | ||
308 | if (err) | ||
309 | goto out1; | ||
310 | |||
311 | return 0; | ||
312 | out1: | ||
313 | dev_err(data->bmc_device, "request_settime=%x\n", err); | ||
314 | return err; | ||
315 | out: | ||
316 | dev_err(data->bmc_device, "validate_addr=%x\n", err); | ||
317 | return err; | ||
318 | } | ||
319 | |||
320 | /* Dispatch IPMI messages to callers */ | ||
321 | static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) | ||
322 | { | ||
323 | unsigned short rx_len; | ||
324 | struct aem_ipmi_data *data = user_msg_data; | ||
325 | |||
326 | if (msg->msgid != data->tx_msgid) { | ||
327 | dev_err(data->bmc_device, "Mismatch between received msgid " | ||
328 | "(%02x) and transmitted msgid (%02x)!\n", | ||
329 | (int)msg->msgid, | ||
330 | (int)data->tx_msgid); | ||
331 | ipmi_free_recv_msg(msg); | ||
332 | return; | ||
333 | } | ||
334 | |||
335 | data->rx_recv_type = msg->recv_type; | ||
336 | if (msg->msg.data_len > 0) | ||
337 | data->rx_result = msg->msg.data[0]; | ||
338 | else | ||
339 | data->rx_result = IPMI_UNKNOWN_ERR_COMPLETION_CODE; | ||
340 | |||
341 | if (msg->msg.data_len > 1) { | ||
342 | rx_len = msg->msg.data_len - 1; | ||
343 | if (data->rx_msg_len < rx_len) | ||
344 | rx_len = data->rx_msg_len; | ||
345 | data->rx_msg_len = rx_len; | ||
346 | memcpy(data->rx_msg_data, msg->msg.data + 1, data->rx_msg_len); | ||
347 | } else | ||
348 | data->rx_msg_len = 0; | ||
349 | |||
350 | ipmi_free_recv_msg(msg); | ||
351 | complete(&data->read_complete); | ||
352 | } | ||
353 | |||
354 | /* ID functions */ | ||
355 | |||
356 | /* Obtain an id */ | ||
357 | static int aem_idr_get(int *id) | ||
358 | { | ||
359 | int i, err; | ||
360 | |||
361 | again: | ||
362 | if (unlikely(!idr_pre_get(&aem_idr, GFP_KERNEL))) | ||
363 | return -ENOMEM; | ||
364 | |||
365 | spin_lock(&aem_idr_lock); | ||
366 | err = idr_get_new(&aem_idr, NULL, &i); | ||
367 | spin_unlock(&aem_idr_lock); | ||
368 | |||
369 | if (unlikely(err == -EAGAIN)) | ||
370 | goto again; | ||
371 | else if (unlikely(err)) | ||
372 | return err; | ||
373 | |||
374 | *id = i & MAX_ID_MASK; | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | /* Release an object ID */ | ||
379 | static void aem_idr_put(int id) | ||
380 | { | ||
381 | spin_lock(&aem_idr_lock); | ||
382 | idr_remove(&aem_idr, id); | ||
383 | spin_unlock(&aem_idr_lock); | ||
384 | } | ||
385 | |||
386 | /* Sensor support functions */ | ||
387 | |||
388 | /* Read a sensor value */ | ||
389 | static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, | ||
390 | void *buf, size_t size) | ||
391 | { | ||
392 | int rs_size, res; | ||
393 | struct aem_read_sensor_req rs_req; | ||
394 | struct aem_read_sensor_resp *rs_resp; | ||
395 | struct aem_ipmi_data *ipmi = &data->ipmi; | ||
396 | |||
397 | /* AEM registers are 1, 2, 4 or 8 bytes */ | ||
398 | switch (size) { | ||
399 | case 1: | ||
400 | case 2: | ||
401 | case 4: | ||
402 | case 8: | ||
403 | break; | ||
404 | default: | ||
405 | return -EINVAL; | ||
406 | } | ||
407 | |||
408 | rs_req.id = system_x_id; | ||
409 | rs_req.module_handle = data->module_handle; | ||
410 | rs_req.element = elt; | ||
411 | rs_req.subcommand = AEM_READ_REGISTER; | ||
412 | rs_req.reg = reg; | ||
413 | rs_req.rx_buf_size = size; | ||
414 | |||
415 | ipmi->tx_message.cmd = AEM_ELEMENT_CMD; | ||
416 | ipmi->tx_message.data = (char *)&rs_req; | ||
417 | ipmi->tx_message.data_len = sizeof(rs_req); | ||
418 | |||
419 | rs_size = sizeof(*rs_resp) + size; | ||
420 | rs_resp = kzalloc(rs_size, GFP_KERNEL); | ||
421 | if (!rs_resp) | ||
422 | return -ENOMEM; | ||
423 | |||
424 | ipmi->rx_msg_data = rs_resp; | ||
425 | ipmi->rx_msg_len = rs_size; | ||
426 | |||
427 | aem_send_message(ipmi); | ||
428 | |||
429 | res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); | ||
430 | if (!res) | ||
431 | return -ETIMEDOUT; | ||
432 | |||
433 | if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || | ||
434 | memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { | ||
435 | kfree(rs_resp); | ||
436 | return -ENOENT; | ||
437 | } | ||
438 | |||
439 | switch (size) { | ||
440 | case 1: { | ||
441 | u8 *x = buf; | ||
442 | *x = rs_resp->bytes[0]; | ||
443 | break; | ||
444 | } | ||
445 | case 2: { | ||
446 | u16 *x = buf; | ||
447 | *x = be16_to_cpup((u16 *)rs_resp->bytes); | ||
448 | break; | ||
449 | } | ||
450 | case 4: { | ||
451 | u32 *x = buf; | ||
452 | *x = be32_to_cpup((u32 *)rs_resp->bytes); | ||
453 | break; | ||
454 | } | ||
455 | case 8: { | ||
456 | u64 *x = buf; | ||
457 | *x = be64_to_cpup((u64 *)rs_resp->bytes); | ||
458 | break; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | /* Update AEM energy registers */ | ||
466 | static void update_aem_energy(struct aem_data *data) | ||
467 | { | ||
468 | aem_read_sensor(data, AEM_ENERGY_ELEMENT, 0, &data->energy[0], 8); | ||
469 | if (data->ver_major < 2) | ||
470 | return; | ||
471 | aem_read_sensor(data, AEM_ENERGY_ELEMENT, 1, &data->energy[1], 8); | ||
472 | } | ||
473 | |||
474 | /* Update all AEM1 sensors */ | ||
475 | static void update_aem1_sensors(struct aem_data *data) | ||
476 | { | ||
477 | mutex_lock(&data->lock); | ||
478 | if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) && | ||
479 | data->valid) | ||
480 | goto out; | ||
481 | |||
482 | update_aem_energy(data); | ||
483 | out: | ||
484 | mutex_unlock(&data->lock); | ||
485 | } | ||
486 | |||
487 | /* Update all AEM2 sensors */ | ||
488 | static void update_aem2_sensors(struct aem_data *data) | ||
489 | { | ||
490 | int i; | ||
491 | |||
492 | mutex_lock(&data->lock); | ||
493 | if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) && | ||
494 | data->valid) | ||
495 | goto out; | ||
496 | |||
497 | update_aem_energy(data); | ||
498 | aem_read_sensor(data, AEM_EXHAUST_ELEMENT, 0, &data->temp[0], 1); | ||
499 | aem_read_sensor(data, AEM_EXHAUST_ELEMENT, 1, &data->temp[1], 1); | ||
500 | |||
501 | for (i = POWER_CAP; i <= POWER_AUX; i++) | ||
502 | aem_read_sensor(data, AEM_POWER_CAP_ELEMENT, i, | ||
503 | &data->pcap[i], 2); | ||
504 | out: | ||
505 | mutex_unlock(&data->lock); | ||
506 | } | ||
507 | |||
508 | /* Delete an AEM instance */ | ||
509 | static void aem_delete(struct aem_data *data) | ||
510 | { | ||
511 | list_del(&data->list); | ||
512 | aem_remove_sensors(data); | ||
513 | hwmon_device_unregister(data->hwmon_dev); | ||
514 | ipmi_destroy_user(data->ipmi.user); | ||
515 | dev_set_drvdata(&data->pdev->dev, NULL); | ||
516 | platform_device_unregister(data->pdev); | ||
517 | aem_idr_put(data->id); | ||
518 | kfree(data); | ||
519 | } | ||
520 | |||
521 | /* Probe functions for AEM1 devices */ | ||
522 | |||
523 | /* Retrieve version and module handle for an AEM1 instance */ | ||
524 | static int aem_find_aem1_count(struct aem_ipmi_data *data) | ||
525 | { | ||
526 | int res; | ||
527 | struct aem_find_firmware_req ff_req; | ||
528 | struct aem_find_firmware_resp ff_resp; | ||
529 | |||
530 | ff_req.id = system_x_id; | ||
531 | ff_req.index = 0; | ||
532 | ff_req.module_type_id = cpu_to_be16(AEM_MODULE_TYPE_ID); | ||
533 | |||
534 | data->tx_message.cmd = AEM_FIND_FW_CMD; | ||
535 | data->tx_message.data = (char *)&ff_req; | ||
536 | data->tx_message.data_len = sizeof(ff_req); | ||
537 | |||
538 | data->rx_msg_data = &ff_resp; | ||
539 | data->rx_msg_len = sizeof(ff_resp); | ||
540 | |||
541 | aem_send_message(data); | ||
542 | |||
543 | res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT); | ||
544 | if (!res) | ||
545 | return -ETIMEDOUT; | ||
546 | |||
547 | if (data->rx_result || data->rx_msg_len != sizeof(ff_resp) || | ||
548 | memcmp(&ff_resp.id, &system_x_id, sizeof(system_x_id))) | ||
549 | return -ENOENT; | ||
550 | |||
551 | return ff_resp.num_instances; | ||
552 | } | ||
553 | |||
554 | /* Find and initialize one AEM1 instance */ | ||
555 | static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle) | ||
556 | { | ||
557 | struct aem_data *data; | ||
558 | int i; | ||
559 | int res = -ENOMEM; | ||
560 | |||
561 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
562 | if (!data) | ||
563 | return res; | ||
564 | mutex_init(&data->lock); | ||
565 | |||
566 | /* Copy instance data */ | ||
567 | data->ver_major = 1; | ||
568 | data->ver_minor = 0; | ||
569 | data->module_handle = module_handle; | ||
570 | for (i = 0; i < AEM1_NUM_ENERGY_REGS; i++) | ||
571 | data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL; | ||
572 | |||
573 | /* Create sub-device for this fw instance */ | ||
574 | if (aem_idr_get(&data->id)) | ||
575 | goto id_err; | ||
576 | |||
577 | data->pdev = platform_device_alloc(DRVNAME, data->id); | ||
578 | if (!data->pdev) | ||
579 | goto dev_err; | ||
580 | data->pdev->dev.driver = &aem_driver; | ||
581 | |||
582 | res = platform_device_add(data->pdev); | ||
583 | if (res) | ||
584 | goto ipmi_err; | ||
585 | |||
586 | dev_set_drvdata(&data->pdev->dev, data); | ||
587 | |||
588 | /* Set up IPMI interface */ | ||
589 | if (aem_init_ipmi_data(&data->ipmi, probe->interface, | ||
590 | probe->bmc_device)) | ||
591 | goto ipmi_err; | ||
592 | |||
593 | /* Register with hwmon */ | ||
594 | data->hwmon_dev = hwmon_device_register(&data->pdev->dev); | ||
595 | |||
596 | if (IS_ERR(data->hwmon_dev)) { | ||
597 | dev_err(&data->pdev->dev, "Unable to register hwmon " | ||
598 | "device for IPMI interface %d\n", | ||
599 | probe->interface); | ||
600 | goto hwmon_reg_err; | ||
601 | } | ||
602 | |||
603 | data->update = update_aem1_sensors; | ||
604 | |||
605 | /* Find sensors */ | ||
606 | if (aem1_find_sensors(data)) | ||
607 | goto sensor_err; | ||
608 | |||
609 | /* Add to our list of AEM devices */ | ||
610 | list_add_tail(&data->list, &driver_data.aem_devices); | ||
611 | |||
612 | dev_info(data->ipmi.bmc_device, "Found AEM v%d.%d at 0x%X\n", | ||
613 | data->ver_major, data->ver_minor, | ||
614 | data->module_handle); | ||
615 | return 0; | ||
616 | |||
617 | sensor_err: | ||
618 | hwmon_device_unregister(data->hwmon_dev); | ||
619 | hwmon_reg_err: | ||
620 | ipmi_destroy_user(data->ipmi.user); | ||
621 | ipmi_err: | ||
622 | dev_set_drvdata(&data->pdev->dev, NULL); | ||
623 | platform_device_unregister(data->pdev); | ||
624 | dev_err: | ||
625 | aem_idr_put(data->id); | ||
626 | id_err: | ||
627 | kfree(data); | ||
628 | |||
629 | return res; | ||
630 | } | ||
631 | |||
632 | /* Find and initialize all AEM1 instances */ | ||
633 | static int aem_init_aem1(struct aem_ipmi_data *probe) | ||
634 | { | ||
635 | int num, i, err; | ||
636 | |||
637 | num = aem_find_aem1_count(probe); | ||
638 | for (i = 0; i < num; i++) { | ||
639 | err = aem_init_aem1_inst(probe, i); | ||
640 | if (err) { | ||
641 | dev_err(probe->bmc_device, | ||
642 | "Error %d initializing AEM1 0x%X\n", | ||
643 | err, i); | ||
644 | return err; | ||
645 | } | ||
646 | } | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | /* Probe functions for AEM2 devices */ | ||
652 | |||
653 | /* Retrieve version and module handle for an AEM2 instance */ | ||
654 | static int aem_find_aem2(struct aem_ipmi_data *data, | ||
655 | struct aem_find_instance_resp *fi_resp, | ||
656 | int instance_num) | ||
657 | { | ||
658 | int res; | ||
659 | struct aem_find_instance_req fi_req; | ||
660 | |||
661 | fi_req.id = system_x_id; | ||
662 | fi_req.instance_number = instance_num; | ||
663 | fi_req.module_type_id = cpu_to_be16(AEM_MODULE_TYPE_ID); | ||
664 | |||
665 | data->tx_message.cmd = AEM_FW_INSTANCE_CMD; | ||
666 | data->tx_message.data = (char *)&fi_req; | ||
667 | data->tx_message.data_len = sizeof(fi_req); | ||
668 | |||
669 | data->rx_msg_data = fi_resp; | ||
670 | data->rx_msg_len = sizeof(*fi_resp); | ||
671 | |||
672 | aem_send_message(data); | ||
673 | |||
674 | res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT); | ||
675 | if (!res) | ||
676 | return -ETIMEDOUT; | ||
677 | |||
678 | if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) || | ||
679 | memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id))) | ||
680 | return -ENOENT; | ||
681 | |||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | /* Find and initialize one AEM2 instance */ | ||
686 | static int aem_init_aem2_inst(struct aem_ipmi_data *probe, | ||
687 | struct aem_find_instance_resp *fi_resp) | ||
688 | { | ||
689 | struct aem_data *data; | ||
690 | int i; | ||
691 | int res = -ENOMEM; | ||
692 | |||
693 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
694 | if (!data) | ||
695 | return res; | ||
696 | mutex_init(&data->lock); | ||
697 | |||
698 | /* Copy instance data */ | ||
699 | data->ver_major = fi_resp->major; | ||
700 | data->ver_minor = fi_resp->minor; | ||
701 | data->module_handle = fi_resp->module_handle; | ||
702 | for (i = 0; i < AEM2_NUM_ENERGY_REGS; i++) | ||
703 | data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL; | ||
704 | |||
705 | /* Create sub-device for this fw instance */ | ||
706 | if (aem_idr_get(&data->id)) | ||
707 | goto id_err; | ||
708 | |||
709 | data->pdev = platform_device_alloc(DRVNAME, data->id); | ||
710 | if (!data->pdev) | ||
711 | goto dev_err; | ||
712 | data->pdev->dev.driver = &aem_driver; | ||
713 | |||
714 | res = platform_device_add(data->pdev); | ||
715 | if (res) | ||
716 | goto ipmi_err; | ||
717 | |||
718 | dev_set_drvdata(&data->pdev->dev, data); | ||
719 | |||
720 | /* Set up IPMI interface */ | ||
721 | if (aem_init_ipmi_data(&data->ipmi, probe->interface, | ||
722 | probe->bmc_device)) | ||
723 | goto ipmi_err; | ||
724 | |||
725 | /* Register with hwmon */ | ||
726 | data->hwmon_dev = hwmon_device_register(&data->pdev->dev); | ||
727 | |||
728 | if (IS_ERR(data->hwmon_dev)) { | ||
729 | dev_err(&data->pdev->dev, "Unable to register hwmon " | ||
730 | "device for IPMI interface %d\n", | ||
731 | probe->interface); | ||
732 | goto hwmon_reg_err; | ||
733 | } | ||
734 | |||
735 | data->update = update_aem2_sensors; | ||
736 | |||
737 | /* Find sensors */ | ||
738 | if (aem2_find_sensors(data)) | ||
739 | goto sensor_err; | ||
740 | |||
741 | /* Add to our list of AEM devices */ | ||
742 | list_add_tail(&data->list, &driver_data.aem_devices); | ||
743 | |||
744 | dev_info(data->ipmi.bmc_device, "Found AEM v%d.%d at 0x%X\n", | ||
745 | data->ver_major, data->ver_minor, | ||
746 | data->module_handle); | ||
747 | return 0; | ||
748 | |||
749 | sensor_err: | ||
750 | hwmon_device_unregister(data->hwmon_dev); | ||
751 | hwmon_reg_err: | ||
752 | ipmi_destroy_user(data->ipmi.user); | ||
753 | ipmi_err: | ||
754 | dev_set_drvdata(&data->pdev->dev, NULL); | ||
755 | platform_device_unregister(data->pdev); | ||
756 | dev_err: | ||
757 | aem_idr_put(data->id); | ||
758 | id_err: | ||
759 | kfree(data); | ||
760 | |||
761 | return res; | ||
762 | } | ||
763 | |||
764 | /* Find and initialize all AEM2 instances */ | ||
765 | static int aem_init_aem2(struct aem_ipmi_data *probe) | ||
766 | { | ||
767 | struct aem_find_instance_resp fi_resp; | ||
768 | int err; | ||
769 | int i = 0; | ||
770 | |||
771 | while (!aem_find_aem2(probe, &fi_resp, i)) { | ||
772 | if (fi_resp.major != 2) { | ||
773 | dev_err(probe->bmc_device, "Unknown AEM v%d; please " | ||
774 | "report this to the maintainer.\n", | ||
775 | fi_resp.major); | ||
776 | i++; | ||
777 | continue; | ||
778 | } | ||
779 | err = aem_init_aem2_inst(probe, &fi_resp); | ||
780 | if (err) { | ||
781 | dev_err(probe->bmc_device, | ||
782 | "Error %d initializing AEM2 0x%X\n", | ||
783 | err, fi_resp.module_handle); | ||
784 | return err; | ||
785 | } | ||
786 | i++; | ||
787 | } | ||
788 | |||
789 | return 0; | ||
790 | } | ||
791 | |||
792 | /* Probe a BMC for AEM firmware instances */ | ||
793 | static void aem_register_bmc(int iface, struct device *dev) | ||
794 | { | ||
795 | struct aem_ipmi_data probe; | ||
796 | |||
797 | if (aem_init_ipmi_data(&probe, iface, dev)) | ||
798 | return; | ||
799 | |||
800 | /* Ignore probe errors; they won't cause problems */ | ||
801 | aem_init_aem1(&probe); | ||
802 | aem_init_aem2(&probe); | ||
803 | |||
804 | ipmi_destroy_user(probe.user); | ||
805 | } | ||
806 | |||
807 | /* Handle BMC deletion */ | ||
808 | static void aem_bmc_gone(int iface) | ||
809 | { | ||
810 | struct aem_data *p1, *next1; | ||
811 | |||
812 | list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list) | ||
813 | if (p1->ipmi.interface == iface) | ||
814 | aem_delete(p1); | ||
815 | } | ||
816 | |||
817 | /* sysfs support functions */ | ||
818 | |||
819 | /* AEM device name */ | ||
820 | static ssize_t show_name(struct device *dev, struct device_attribute *devattr, | ||
821 | char *buf) | ||
822 | { | ||
823 | struct aem_data *data = dev_get_drvdata(dev); | ||
824 | |||
825 | return sprintf(buf, "%s%d\n", DRVNAME, data->ver_major); | ||
826 | } | ||
827 | static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); | ||
828 | |||
829 | /* AEM device version */ | ||
830 | static ssize_t show_version(struct device *dev, | ||
831 | struct device_attribute *devattr, | ||
832 | char *buf) | ||
833 | { | ||
834 | struct aem_data *data = dev_get_drvdata(dev); | ||
835 | |||
836 | return sprintf(buf, "%d.%d\n", data->ver_major, data->ver_minor); | ||
837 | } | ||
838 | static SENSOR_DEVICE_ATTR(version, S_IRUGO, show_version, NULL, 0); | ||
839 | |||
840 | /* Display power use */ | ||
841 | static ssize_t aem_show_power(struct device *dev, | ||
842 | struct device_attribute *devattr, | ||
843 | char *buf) | ||
844 | { | ||
845 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
846 | struct aem_data *data = dev_get_drvdata(dev); | ||
847 | u64 before, after, delta, time; | ||
848 | signed long leftover; | ||
849 | struct timespec b, a; | ||
850 | |||
851 | mutex_lock(&data->lock); | ||
852 | update_aem_energy(data); | ||
853 | getnstimeofday(&b); | ||
854 | before = data->energy[attr->index]; | ||
855 | |||
856 | leftover = schedule_timeout_interruptible( | ||
857 | msecs_to_jiffies(data->power_period[attr->index]) | ||
858 | ); | ||
859 | if (leftover) { | ||
860 | mutex_unlock(&data->lock); | ||
861 | return 0; | ||
862 | } | ||
863 | |||
864 | update_aem_energy(data); | ||
865 | getnstimeofday(&a); | ||
866 | after = data->energy[attr->index]; | ||
867 | mutex_unlock(&data->lock); | ||
868 | |||
869 | time = timespec_to_ns(&a) - timespec_to_ns(&b); | ||
870 | delta = (after - before) * UJ_PER_MJ; | ||
871 | |||
872 | return sprintf(buf, "%llu\n", | ||
873 | (unsigned long long)div64_u64(delta * NSEC_PER_SEC, time)); | ||
874 | } | ||
875 | |||
876 | /* Display energy use */ | ||
877 | static ssize_t aem_show_energy(struct device *dev, | ||
878 | struct device_attribute *devattr, | ||
879 | char *buf) | ||
880 | { | ||
881 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
882 | struct aem_data *a = dev_get_drvdata(dev); | ||
883 | a->update(a); | ||
884 | |||
885 | return sprintf(buf, "%llu\n", | ||
886 | (unsigned long long)a->energy[attr->index] * 1000); | ||
887 | } | ||
888 | |||
889 | /* Display power interval registers */ | ||
890 | static ssize_t aem_show_power_period(struct device *dev, | ||
891 | struct device_attribute *devattr, | ||
892 | char *buf) | ||
893 | { | ||
894 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
895 | struct aem_data *a = dev_get_drvdata(dev); | ||
896 | a->update(a); | ||
897 | |||
898 | return sprintf(buf, "%lu\n", a->power_period[attr->index]); | ||
899 | } | ||
900 | |||
901 | /* Set power interval registers */ | ||
902 | static ssize_t aem_set_power_period(struct device *dev, | ||
903 | struct device_attribute *devattr, | ||
904 | const char *buf, size_t count) | ||
905 | { | ||
906 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
907 | struct aem_data *a = dev_get_drvdata(dev); | ||
908 | unsigned long temp; | ||
909 | int res; | ||
910 | |||
911 | res = strict_strtoul(buf, 10, &temp); | ||
912 | if (res) | ||
913 | return res; | ||
914 | |||
915 | if (temp < AEM_MIN_POWER_INTERVAL) | ||
916 | return -EINVAL; | ||
917 | |||
918 | mutex_lock(&a->lock); | ||
919 | a->power_period[attr->index] = temp; | ||
920 | mutex_unlock(&a->lock); | ||
921 | |||
922 | return count; | ||
923 | } | ||
924 | |||
925 | /* Discover sensors on an AEM device */ | ||
926 | static int aem_register_sensors(struct aem_data *data, | ||
927 | struct aem_ro_sensor_template *ro, | ||
928 | struct aem_rw_sensor_template *rw) | ||
929 | { | ||
930 | struct device *dev = &data->pdev->dev; | ||
931 | struct sensor_device_attribute *sensors = data->sensors; | ||
932 | int err; | ||
933 | |||
934 | /* Set up read-only sensors */ | ||
935 | while (ro->label) { | ||
936 | sensors->dev_attr.attr.name = ro->label; | ||
937 | sensors->dev_attr.attr.mode = S_IRUGO; | ||
938 | sensors->dev_attr.show = ro->show; | ||
939 | sensors->index = ro->index; | ||
940 | |||
941 | err = device_create_file(dev, &sensors->dev_attr); | ||
942 | if (err) { | ||
943 | sensors->dev_attr.attr.name = NULL; | ||
944 | goto error; | ||
945 | } | ||
946 | sensors++; | ||
947 | ro++; | ||
948 | } | ||
949 | |||
950 | /* Set up read-write sensors */ | ||
951 | while (rw->label) { | ||
952 | sensors->dev_attr.attr.name = rw->label; | ||
953 | sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
954 | sensors->dev_attr.show = rw->show; | ||
955 | sensors->dev_attr.store = rw->set; | ||
956 | sensors->index = rw->index; | ||
957 | |||
958 | err = device_create_file(dev, &sensors->dev_attr); | ||
959 | if (err) { | ||
960 | sensors->dev_attr.attr.name = NULL; | ||
961 | goto error; | ||
962 | } | ||
963 | sensors++; | ||
964 | rw++; | ||
965 | } | ||
966 | |||
967 | err = device_create_file(dev, &sensor_dev_attr_name.dev_attr); | ||
968 | if (err) | ||
969 | goto error; | ||
970 | err = device_create_file(dev, &sensor_dev_attr_version.dev_attr); | ||
971 | return err; | ||
972 | |||
973 | error: | ||
974 | aem_remove_sensors(data); | ||
975 | return err; | ||
976 | } | ||
977 | |||
978 | /* sysfs support functions for AEM2 sensors */ | ||
979 | |||
980 | /* Display temperature use */ | ||
981 | static ssize_t aem2_show_temp(struct device *dev, | ||
982 | struct device_attribute *devattr, | ||
983 | char *buf) | ||
984 | { | ||
985 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
986 | struct aem_data *a = dev_get_drvdata(dev); | ||
987 | a->update(a); | ||
988 | |||
989 | return sprintf(buf, "%u\n", a->temp[attr->index] * 1000); | ||
990 | } | ||
991 | |||
992 | /* Display power-capping registers */ | ||
993 | static ssize_t aem2_show_pcap_value(struct device *dev, | ||
994 | struct device_attribute *devattr, | ||
995 | char *buf) | ||
996 | { | ||
997 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
998 | struct aem_data *a = dev_get_drvdata(dev); | ||
999 | a->update(a); | ||
1000 | |||
1001 | return sprintf(buf, "%u\n", a->pcap[attr->index] * 100000); | ||
1002 | } | ||
1003 | |||
1004 | /* Remove sensors attached to an AEM device */ | ||
1005 | static void aem_remove_sensors(struct aem_data *data) | ||
1006 | { | ||
1007 | int i; | ||
1008 | |||
1009 | for (i = 0; i < AEM_NUM_SENSORS; i++) { | ||
1010 | if (!data->sensors[i].dev_attr.attr.name) | ||
1011 | continue; | ||
1012 | device_remove_file(&data->pdev->dev, | ||
1013 | &data->sensors[i].dev_attr); | ||
1014 | } | ||
1015 | |||
1016 | device_remove_file(&data->pdev->dev, | ||
1017 | &sensor_dev_attr_name.dev_attr); | ||
1018 | device_remove_file(&data->pdev->dev, | ||
1019 | &sensor_dev_attr_version.dev_attr); | ||
1020 | } | ||
1021 | |||
1022 | /* Sensor probe functions */ | ||
1023 | |||
1024 | /* Description of AEM1 sensors */ | ||
1025 | static struct aem_ro_sensor_template aem1_ro_sensors[] = { | ||
1026 | {"energy1_input", aem_show_energy, 0}, | ||
1027 | {"power1_average", aem_show_power, 0}, | ||
1028 | {NULL, NULL, 0}, | ||
1029 | }; | ||
1030 | |||
1031 | static struct aem_rw_sensor_template aem1_rw_sensors[] = { | ||
1032 | {"power1_average_interval", aem_show_power_period, aem_set_power_period, 0}, | ||
1033 | {NULL, NULL, NULL, 0}, | ||
1034 | }; | ||
1035 | |||
1036 | /* Description of AEM2 sensors */ | ||
1037 | static struct aem_ro_sensor_template aem2_ro_sensors[] = { | ||
1038 | {"energy1_input", aem_show_energy, 0}, | ||
1039 | {"energy2_input", aem_show_energy, 1}, | ||
1040 | {"power1_average", aem_show_power, 0}, | ||
1041 | {"power2_average", aem_show_power, 1}, | ||
1042 | {"temp1_input", aem2_show_temp, 0}, | ||
1043 | {"temp2_input", aem2_show_temp, 1}, | ||
1044 | |||
1045 | {"power4_average", aem2_show_pcap_value, POWER_CAP_MAX_HOTPLUG}, | ||
1046 | {"power5_average", aem2_show_pcap_value, POWER_CAP_MAX}, | ||
1047 | {"power6_average", aem2_show_pcap_value, POWER_CAP_MIN_WARNING}, | ||
1048 | {"power7_average", aem2_show_pcap_value, POWER_CAP_MIN}, | ||
1049 | |||
1050 | {"power3_average", aem2_show_pcap_value, POWER_AUX}, | ||
1051 | {"power_cap", aem2_show_pcap_value, POWER_CAP}, | ||
1052 | {NULL, NULL, 0}, | ||
1053 | }; | ||
1054 | |||
1055 | static struct aem_rw_sensor_template aem2_rw_sensors[] = { | ||
1056 | {"power1_average_interval", aem_show_power_period, aem_set_power_period, 0}, | ||
1057 | {"power2_average_interval", aem_show_power_period, aem_set_power_period, 1}, | ||
1058 | {NULL, NULL, NULL, 0}, | ||
1059 | }; | ||
1060 | |||
1061 | /* Set up AEM1 sensor attrs */ | ||
1062 | static int aem1_find_sensors(struct aem_data *data) | ||
1063 | { | ||
1064 | return aem_register_sensors(data, aem1_ro_sensors, aem1_rw_sensors); | ||
1065 | } | ||
1066 | |||
1067 | /* Set up AEM2 sensor attrs */ | ||
1068 | static int aem2_find_sensors(struct aem_data *data) | ||
1069 | { | ||
1070 | return aem_register_sensors(data, aem2_ro_sensors, aem2_rw_sensors); | ||
1071 | } | ||
1072 | |||
1073 | /* Module init/exit routines */ | ||
1074 | |||
1075 | static int __init aem_init(void) | ||
1076 | { | ||
1077 | int res; | ||
1078 | |||
1079 | res = driver_register(&aem_driver); | ||
1080 | if (res) { | ||
1081 | printk(KERN_ERR "Can't register aem driver\n"); | ||
1082 | return res; | ||
1083 | } | ||
1084 | |||
1085 | res = ipmi_smi_watcher_register(&driver_data.bmc_events); | ||
1086 | if (res) | ||
1087 | goto ipmi_reg_err; | ||
1088 | return 0; | ||
1089 | |||
1090 | ipmi_reg_err: | ||
1091 | driver_unregister(&aem_driver); | ||
1092 | return res; | ||
1093 | |||
1094 | } | ||
1095 | |||
1096 | static void __exit aem_exit(void) | ||
1097 | { | ||
1098 | struct aem_data *p1, *next1; | ||
1099 | |||
1100 | ipmi_smi_watcher_unregister(&driver_data.bmc_events); | ||
1101 | driver_unregister(&aem_driver); | ||
1102 | list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list) | ||
1103 | aem_delete(p1); | ||
1104 | } | ||
1105 | |||
1106 | MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); | ||
1107 | MODULE_DESCRIPTION("IBM Active Energy Manager power/temp sensor driver"); | ||
1108 | MODULE_LICENSE("GPL"); | ||
1109 | |||
1110 | module_init(aem_init); | ||
1111 | module_exit(aem_exit); | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index fbe16d5250a4..1adf2efd3cb3 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -747,7 +747,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
747 | break; | 747 | break; |
748 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: | 748 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: |
749 | kmem_cache_free(ib_mad_cache, mad_priv); | 749 | kmem_cache_free(ib_mad_cache, mad_priv); |
750 | break; | 750 | kfree(local); |
751 | ret = 1; | ||
752 | goto out; | ||
751 | case IB_MAD_RESULT_SUCCESS: | 753 | case IB_MAD_RESULT_SUCCESS: |
752 | /* Treat like an incoming receive MAD */ | 754 | /* Treat like an incoming receive MAD */ |
753 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, | 755 | port_priv = ib_get_mad_port(mad_agent_priv->agent.device, |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 79dbe5beae52..992613799228 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -229,7 +229,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
229 | struct ib_send_wr **bad_wr) | 229 | struct ib_send_wr **bad_wr) |
230 | { | 230 | { |
231 | int err = 0; | 231 | int err = 0; |
232 | u8 t3_wr_flit_cnt; | 232 | u8 uninitialized_var(t3_wr_flit_cnt); |
233 | enum t3_wr_opcode t3_wr_opcode = 0; | 233 | enum t3_wr_opcode t3_wr_opcode = 0; |
234 | enum t3_wr_flags t3_wr_flags; | 234 | enum t3_wr_flags t3_wr_flags; |
235 | struct iwch_qp *qhp; | 235 | struct iwch_qp *qhp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c index 3697449c1ba4..0a8c1b8091a2 100644 --- a/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/drivers/infiniband/hw/ipath/ipath_sdma.c | |||
@@ -345,7 +345,7 @@ resched: | |||
345 | * state change | 345 | * state change |
346 | */ | 346 | */ |
347 | if (jiffies > dd->ipath_sdma_abort_jiffies) { | 347 | if (jiffies > dd->ipath_sdma_abort_jiffies) { |
348 | ipath_dbg("looping with status 0x%016llx\n", | 348 | ipath_dbg("looping with status 0x%08lx\n", |
349 | dd->ipath_sdma_status); | 349 | dd->ipath_sdma_status); |
350 | dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; | 350 | dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; |
351 | } | 351 | } |
@@ -615,7 +615,7 @@ void ipath_restart_sdma(struct ipath_devdata *dd) | |||
615 | } | 615 | } |
616 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); | 616 | spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); |
617 | if (!needed) { | 617 | if (!needed) { |
618 | ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n", | 618 | ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n", |
619 | dd->ipath_sdma_status); | 619 | dd->ipath_sdma_status); |
620 | goto bail; | 620 | goto bail; |
621 | } | 621 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c index 7fd18e833907..0596ec16fcbd 100644 --- a/drivers/infiniband/hw/ipath/ipath_uc.c +++ b/drivers/infiniband/hw/ipath/ipath_uc.c | |||
@@ -407,12 +407,11 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
407 | dev->n_pkt_drops++; | 407 | dev->n_pkt_drops++; |
408 | goto done; | 408 | goto done; |
409 | } | 409 | } |
410 | /* XXX Need to free SGEs */ | 410 | wc.opcode = IB_WC_RECV; |
411 | last_imm: | 411 | last_imm: |
412 | ipath_copy_sge(&qp->r_sge, data, tlen); | 412 | ipath_copy_sge(&qp->r_sge, data, tlen); |
413 | wc.wr_id = qp->r_wr_id; | 413 | wc.wr_id = qp->r_wr_id; |
414 | wc.status = IB_WC_SUCCESS; | 414 | wc.status = IB_WC_SUCCESS; |
415 | wc.opcode = IB_WC_RECV; | ||
416 | wc.qp = &qp->ibqp; | 415 | wc.qp = &qp->ibqp; |
417 | wc.src_qp = qp->remote_qpn; | 416 | wc.src_qp = qp->remote_qpn; |
418 | wc.slid = qp->remote_ah_attr.dlid; | 417 | wc.slid = qp->remote_ah_attr.dlid; |
@@ -514,6 +513,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
514 | goto done; | 513 | goto done; |
515 | } | 514 | } |
516 | wc.byte_len = qp->r_len; | 515 | wc.byte_len = qp->r_len; |
516 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; | ||
517 | goto last_imm; | 517 | goto last_imm; |
518 | 518 | ||
519 | case OP(RDMA_WRITE_LAST): | 519 | case OP(RDMA_WRITE_LAST): |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8e02ecfec188..a80df22deae8 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -333,6 +333,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
333 | cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + | 333 | cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + |
334 | send_wqe_overhead(type, qp->flags); | 334 | send_wqe_overhead(type, qp->flags); |
335 | 335 | ||
336 | if (s > dev->dev->caps.max_sq_desc_sz) | ||
337 | return -EINVAL; | ||
338 | |||
336 | /* | 339 | /* |
337 | * Hermon supports shrinking WQEs, such that a single work | 340 | * Hermon supports shrinking WQEs, such that a single work |
338 | * request can include multiple units of 1 << wqe_shift. This | 341 | * request can include multiple units of 1 << wqe_shift. This |
@@ -372,9 +375,6 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
372 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); | 375 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); |
373 | 376 | ||
374 | for (;;) { | 377 | for (;;) { |
375 | if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz) | ||
376 | return -EINVAL; | ||
377 | |||
378 | qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); | 378 | qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); |
379 | 379 | ||
380 | /* | 380 | /* |
@@ -395,7 +395,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
395 | ++qp->sq.wqe_shift; | 395 | ++qp->sq.wqe_shift; |
396 | } | 396 | } |
397 | 397 | ||
398 | qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - | 398 | qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, |
399 | (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - | ||
399 | send_wqe_overhead(type, qp->flags)) / | 400 | send_wqe_overhead(type, qp->flags)) / |
400 | sizeof (struct mlx4_wqe_data_seg); | 401 | sizeof (struct mlx4_wqe_data_seg); |
401 | 402 | ||
@@ -411,7 +412,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
411 | 412 | ||
412 | cap->max_send_wr = qp->sq.max_post = | 413 | cap->max_send_wr = qp->sq.max_post = |
413 | (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; | 414 | (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; |
414 | cap->max_send_sge = qp->sq.max_gs; | 415 | cap->max_send_sge = min(qp->sq.max_gs, |
416 | min(dev->dev->caps.max_sq_sg, | ||
417 | dev->dev->caps.max_rq_sg)); | ||
415 | /* We don't support inline sends for kernel QPs (yet) */ | 418 | /* We don't support inline sends for kernel QPs (yet) */ |
416 | cap->max_inline_data = 0; | 419 | cap->max_inline_data = 0; |
417 | 420 | ||
@@ -1457,7 +1460,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1457 | unsigned ind; | 1460 | unsigned ind; |
1458 | int uninitialized_var(stamp); | 1461 | int uninitialized_var(stamp); |
1459 | int uninitialized_var(size); | 1462 | int uninitialized_var(size); |
1460 | unsigned seglen; | 1463 | unsigned uninitialized_var(seglen); |
1461 | int i; | 1464 | int i; |
1462 | 1465 | ||
1463 | spin_lock_irqsave(&qp->sq.lock, flags); | 1466 | spin_lock_irqsave(&qp->sq.lock, flags); |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 9ebadd6e0cfb..200cf13fc9bb 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "mthca_cmd.h" | 45 | #include "mthca_cmd.h" |
46 | #include "mthca_profile.h" | 46 | #include "mthca_profile.h" |
47 | #include "mthca_memfree.h" | 47 | #include "mthca_memfree.h" |
48 | #include "mthca_wqe.h" | ||
48 | 49 | ||
49 | MODULE_AUTHOR("Roland Dreier"); | 50 | MODULE_AUTHOR("Roland Dreier"); |
50 | MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); | 51 | MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); |
@@ -200,7 +201,18 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) | |||
200 | mdev->limits.gid_table_len = dev_lim->max_gids; | 201 | mdev->limits.gid_table_len = dev_lim->max_gids; |
201 | mdev->limits.pkey_table_len = dev_lim->max_pkeys; | 202 | mdev->limits.pkey_table_len = dev_lim->max_pkeys; |
202 | mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; | 203 | mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; |
203 | mdev->limits.max_sg = dev_lim->max_sg; | 204 | /* |
205 | * Need to allow for worst case send WQE overhead and check | ||
206 | * whether max_desc_sz imposes a lower limit than max_sg; UD | ||
207 | * send has the biggest overhead. | ||
208 | */ | ||
209 | mdev->limits.max_sg = min_t(int, dev_lim->max_sg, | ||
210 | (dev_lim->max_desc_sz - | ||
211 | sizeof (struct mthca_next_seg) - | ||
212 | (mthca_is_memfree(mdev) ? | ||
213 | sizeof (struct mthca_arbel_ud_seg) : | ||
214 | sizeof (struct mthca_tavor_ud_seg))) / | ||
215 | sizeof (struct mthca_data_seg)); | ||
204 | mdev->limits.max_wqes = dev_lim->max_qp_sz; | 216 | mdev->limits.max_wqes = dev_lim->max_qp_sz; |
205 | mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; | 217 | mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; |
206 | mdev->limits.reserved_qps = dev_lim->reserved_qps; | 218 | mdev->limits.reserved_qps = dev_lim->reserved_qps; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d00a2c174aee..3f663fb852c1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -194,7 +194,13 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
194 | /* Set the cached Q_Key before we attach if it's the broadcast group */ | 194 | /* Set the cached Q_Key before we attach if it's the broadcast group */ |
195 | if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | 195 | if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, |
196 | sizeof (union ib_gid))) { | 196 | sizeof (union ib_gid))) { |
197 | spin_lock_irq(&priv->lock); | ||
198 | if (!priv->broadcast) { | ||
199 | spin_unlock_irq(&priv->lock); | ||
200 | return -EAGAIN; | ||
201 | } | ||
197 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); | 202 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); |
203 | spin_unlock_irq(&priv->lock); | ||
198 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; | 204 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; |
199 | } | 205 | } |
200 | 206 | ||
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c index d3999a8e9f88..53f6ad1235db 100644 --- a/drivers/isdn/hysdn/hycapi.c +++ b/drivers/isdn/hysdn/hycapi.c | |||
@@ -462,11 +462,11 @@ static int hycapi_read_proc(char *page, char **start, off_t off, | |||
462 | default: s = "???"; break; | 462 | default: s = "???"; break; |
463 | } | 463 | } |
464 | len += sprintf(page+len, "%-16s %s\n", "type", s); | 464 | len += sprintf(page+len, "%-16s %s\n", "type", s); |
465 | if ((s = cinfo->version[VER_DRIVER]) != 0) | 465 | if ((s = cinfo->version[VER_DRIVER]) != NULL) |
466 | len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); | 466 | len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); |
467 | if ((s = cinfo->version[VER_CARDTYPE]) != 0) | 467 | if ((s = cinfo->version[VER_CARDTYPE]) != NULL) |
468 | len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); | 468 | len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); |
469 | if ((s = cinfo->version[VER_SERIAL]) != 0) | 469 | if ((s = cinfo->version[VER_SERIAL]) != NULL) |
470 | len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); | 470 | len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); |
471 | 471 | ||
472 | len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); | 472 | len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index c14dacdacfac..b26927ce889c 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -203,17 +203,6 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) | |||
203 | * bitmap file handling - read and write the bitmap file and its superblock | 203 | * bitmap file handling - read and write the bitmap file and its superblock |
204 | */ | 204 | */ |
205 | 205 | ||
206 | /* copy the pathname of a file to a buffer */ | ||
207 | char *file_path(struct file *file, char *buf, int count) | ||
208 | { | ||
209 | if (!buf) | ||
210 | return NULL; | ||
211 | |||
212 | buf = d_path(&file->f_path, buf, count); | ||
213 | |||
214 | return IS_ERR(buf) ? NULL : buf; | ||
215 | } | ||
216 | |||
217 | /* | 206 | /* |
218 | * basic page I/O operations | 207 | * basic page I/O operations |
219 | */ | 208 | */ |
@@ -721,11 +710,13 @@ static void bitmap_file_kick(struct bitmap *bitmap) | |||
721 | if (bitmap->file) { | 710 | if (bitmap->file) { |
722 | path = kmalloc(PAGE_SIZE, GFP_KERNEL); | 711 | path = kmalloc(PAGE_SIZE, GFP_KERNEL); |
723 | if (path) | 712 | if (path) |
724 | ptr = file_path(bitmap->file, path, PAGE_SIZE); | 713 | ptr = d_path(&bitmap->file->f_path, path, |
714 | PAGE_SIZE); | ||
715 | |||
725 | 716 | ||
726 | printk(KERN_ALERT | 717 | printk(KERN_ALERT |
727 | "%s: kicking failed bitmap file %s from array!\n", | 718 | "%s: kicking failed bitmap file %s from array!\n", |
728 | bmname(bitmap), ptr ? ptr : ""); | 719 | bmname(bitmap), IS_ERR(ptr) ? "" : ptr); |
729 | 720 | ||
730 | kfree(path); | 721 | kfree(path); |
731 | } else | 722 | } else |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 83eb78b00137..51c19f86ff99 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -74,6 +74,8 @@ static DEFINE_SPINLOCK(pers_lock); | |||
74 | 74 | ||
75 | static void md_print_devices(void); | 75 | static void md_print_devices(void); |
76 | 76 | ||
77 | static DECLARE_WAIT_QUEUE_HEAD(resync_wait); | ||
78 | |||
77 | #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } | 79 | #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } |
78 | 80 | ||
79 | /* | 81 | /* |
@@ -3013,6 +3015,36 @@ degraded_show(mddev_t *mddev, char *page) | |||
3013 | static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); | 3015 | static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); |
3014 | 3016 | ||
3015 | static ssize_t | 3017 | static ssize_t |
3018 | sync_force_parallel_show(mddev_t *mddev, char *page) | ||
3019 | { | ||
3020 | return sprintf(page, "%d\n", mddev->parallel_resync); | ||
3021 | } | ||
3022 | |||
3023 | static ssize_t | ||
3024 | sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) | ||
3025 | { | ||
3026 | long n; | ||
3027 | |||
3028 | if (strict_strtol(buf, 10, &n)) | ||
3029 | return -EINVAL; | ||
3030 | |||
3031 | if (n != 0 && n != 1) | ||
3032 | return -EINVAL; | ||
3033 | |||
3034 | mddev->parallel_resync = n; | ||
3035 | |||
3036 | if (mddev->sync_thread) | ||
3037 | wake_up(&resync_wait); | ||
3038 | |||
3039 | return len; | ||
3040 | } | ||
3041 | |||
3042 | /* force parallel resync, even with shared block devices */ | ||
3043 | static struct md_sysfs_entry md_sync_force_parallel = | ||
3044 | __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, | ||
3045 | sync_force_parallel_show, sync_force_parallel_store); | ||
3046 | |||
3047 | static ssize_t | ||
3016 | sync_speed_show(mddev_t *mddev, char *page) | 3048 | sync_speed_show(mddev_t *mddev, char *page) |
3017 | { | 3049 | { |
3018 | unsigned long resync, dt, db; | 3050 | unsigned long resync, dt, db; |
@@ -3187,6 +3219,7 @@ static struct attribute *md_redundancy_attrs[] = { | |||
3187 | &md_sync_min.attr, | 3219 | &md_sync_min.attr, |
3188 | &md_sync_max.attr, | 3220 | &md_sync_max.attr, |
3189 | &md_sync_speed.attr, | 3221 | &md_sync_speed.attr, |
3222 | &md_sync_force_parallel.attr, | ||
3190 | &md_sync_completed.attr, | 3223 | &md_sync_completed.attr, |
3191 | &md_max_sync.attr, | 3224 | &md_max_sync.attr, |
3192 | &md_suspend_lo.attr, | 3225 | &md_suspend_lo.attr, |
@@ -3691,6 +3724,8 @@ static int do_md_stop(mddev_t * mddev, int mode) | |||
3691 | 3724 | ||
3692 | module_put(mddev->pers->owner); | 3725 | module_put(mddev->pers->owner); |
3693 | mddev->pers = NULL; | 3726 | mddev->pers = NULL; |
3727 | /* tell userspace to handle 'inactive' */ | ||
3728 | sysfs_notify(&mddev->kobj, NULL, "array_state"); | ||
3694 | 3729 | ||
3695 | set_capacity(disk, 0); | 3730 | set_capacity(disk, 0); |
3696 | mddev->changed = 1; | 3731 | mddev->changed = 1; |
@@ -3987,8 +4022,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg) | |||
3987 | if (!buf) | 4022 | if (!buf) |
3988 | goto out; | 4023 | goto out; |
3989 | 4024 | ||
3990 | ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); | 4025 | ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); |
3991 | if (!ptr) | 4026 | if (IS_ERR(ptr)) |
3992 | goto out; | 4027 | goto out; |
3993 | 4028 | ||
3994 | strcpy(file->pathname, ptr); | 4029 | strcpy(file->pathname, ptr); |
@@ -5399,7 +5434,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok) | |||
5399 | atomic_sub(blocks, &mddev->recovery_active); | 5434 | atomic_sub(blocks, &mddev->recovery_active); |
5400 | wake_up(&mddev->recovery_wait); | 5435 | wake_up(&mddev->recovery_wait); |
5401 | if (!ok) { | 5436 | if (!ok) { |
5402 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 5437 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
5403 | md_wakeup_thread(mddev->thread); | 5438 | md_wakeup_thread(mddev->thread); |
5404 | // stop recovery, signal do_sync .... | 5439 | // stop recovery, signal do_sync .... |
5405 | } | 5440 | } |
@@ -5435,8 +5470,11 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
5435 | md_wakeup_thread(mddev->thread); | 5470 | md_wakeup_thread(mddev->thread); |
5436 | } | 5471 | } |
5437 | spin_unlock_irq(&mddev->write_lock); | 5472 | spin_unlock_irq(&mddev->write_lock); |
5473 | sysfs_notify(&mddev->kobj, NULL, "array_state"); | ||
5438 | } | 5474 | } |
5439 | wait_event(mddev->sb_wait, mddev->flags==0); | 5475 | wait_event(mddev->sb_wait, |
5476 | !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && | ||
5477 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | ||
5440 | } | 5478 | } |
5441 | 5479 | ||
5442 | void md_write_end(mddev_t *mddev) | 5480 | void md_write_end(mddev_t *mddev) |
@@ -5471,13 +5509,17 @@ void md_allow_write(mddev_t *mddev) | |||
5471 | mddev->safemode = 1; | 5509 | mddev->safemode = 1; |
5472 | spin_unlock_irq(&mddev->write_lock); | 5510 | spin_unlock_irq(&mddev->write_lock); |
5473 | md_update_sb(mddev, 0); | 5511 | md_update_sb(mddev, 0); |
5512 | |||
5513 | sysfs_notify(&mddev->kobj, NULL, "array_state"); | ||
5514 | /* wait for the dirty state to be recorded in the metadata */ | ||
5515 | wait_event(mddev->sb_wait, | ||
5516 | !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && | ||
5517 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | ||
5474 | } else | 5518 | } else |
5475 | spin_unlock_irq(&mddev->write_lock); | 5519 | spin_unlock_irq(&mddev->write_lock); |
5476 | } | 5520 | } |
5477 | EXPORT_SYMBOL_GPL(md_allow_write); | 5521 | EXPORT_SYMBOL_GPL(md_allow_write); |
5478 | 5522 | ||
5479 | static DECLARE_WAIT_QUEUE_HEAD(resync_wait); | ||
5480 | |||
5481 | #define SYNC_MARKS 10 | 5523 | #define SYNC_MARKS 10 |
5482 | #define SYNC_MARK_STEP (3*HZ) | 5524 | #define SYNC_MARK_STEP (3*HZ) |
5483 | void md_do_sync(mddev_t *mddev) | 5525 | void md_do_sync(mddev_t *mddev) |
@@ -5541,8 +5583,9 @@ void md_do_sync(mddev_t *mddev) | |||
5541 | for_each_mddev(mddev2, tmp) { | 5583 | for_each_mddev(mddev2, tmp) { |
5542 | if (mddev2 == mddev) | 5584 | if (mddev2 == mddev) |
5543 | continue; | 5585 | continue; |
5544 | if (mddev2->curr_resync && | 5586 | if (!mddev->parallel_resync |
5545 | match_mddev_units(mddev,mddev2)) { | 5587 | && mddev2->curr_resync |
5588 | && match_mddev_units(mddev, mddev2)) { | ||
5546 | DEFINE_WAIT(wq); | 5589 | DEFINE_WAIT(wq); |
5547 | if (mddev < mddev2 && mddev->curr_resync == 2) { | 5590 | if (mddev < mddev2 && mddev->curr_resync == 2) { |
5548 | /* arbitrarily yield */ | 5591 | /* arbitrarily yield */ |
@@ -5647,7 +5690,7 @@ void md_do_sync(mddev_t *mddev) | |||
5647 | sectors = mddev->pers->sync_request(mddev, j, &skipped, | 5690 | sectors = mddev->pers->sync_request(mddev, j, &skipped, |
5648 | currspeed < speed_min(mddev)); | 5691 | currspeed < speed_min(mddev)); |
5649 | if (sectors == 0) { | 5692 | if (sectors == 0) { |
5650 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 5693 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
5651 | goto out; | 5694 | goto out; |
5652 | } | 5695 | } |
5653 | 5696 | ||
@@ -5670,8 +5713,7 @@ void md_do_sync(mddev_t *mddev) | |||
5670 | 5713 | ||
5671 | last_check = io_sectors; | 5714 | last_check = io_sectors; |
5672 | 5715 | ||
5673 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || | 5716 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) |
5674 | test_bit(MD_RECOVERY_ERR, &mddev->recovery)) | ||
5675 | break; | 5717 | break; |
5676 | 5718 | ||
5677 | repeat: | 5719 | repeat: |
@@ -5725,8 +5767,7 @@ void md_do_sync(mddev_t *mddev) | |||
5725 | /* tell personality that we are finished */ | 5767 | /* tell personality that we are finished */ |
5726 | mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); | 5768 | mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); |
5727 | 5769 | ||
5728 | if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && | 5770 | if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && |
5729 | !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && | ||
5730 | mddev->curr_resync > 2) { | 5771 | mddev->curr_resync > 2) { |
5731 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 5772 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
5732 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 5773 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
@@ -5795,7 +5836,10 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
5795 | } | 5836 | } |
5796 | 5837 | ||
5797 | if (mddev->degraded) { | 5838 | if (mddev->degraded) { |
5798 | rdev_for_each(rdev, rtmp, mddev) | 5839 | rdev_for_each(rdev, rtmp, mddev) { |
5840 | if (rdev->raid_disk >= 0 && | ||
5841 | !test_bit(In_sync, &rdev->flags)) | ||
5842 | spares++; | ||
5799 | if (rdev->raid_disk < 0 | 5843 | if (rdev->raid_disk < 0 |
5800 | && !test_bit(Faulty, &rdev->flags)) { | 5844 | && !test_bit(Faulty, &rdev->flags)) { |
5801 | rdev->recovery_offset = 0; | 5845 | rdev->recovery_offset = 0; |
@@ -5813,6 +5857,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
5813 | } else | 5857 | } else |
5814 | break; | 5858 | break; |
5815 | } | 5859 | } |
5860 | } | ||
5816 | } | 5861 | } |
5817 | return spares; | 5862 | return spares; |
5818 | } | 5863 | } |
@@ -5826,7 +5871,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
5826 | * to do that as needed. | 5871 | * to do that as needed. |
5827 | * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in | 5872 | * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in |
5828 | * "->recovery" and create a thread at ->sync_thread. | 5873 | * "->recovery" and create a thread at ->sync_thread. |
5829 | * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) | 5874 | * When the thread finishes it sets MD_RECOVERY_DONE |
5830 | * and wakeups up this thread which will reap the thread and finish up. | 5875 | * and wakeups up this thread which will reap the thread and finish up. |
5831 | * This thread also removes any faulty devices (with nr_pending == 0). | 5876 | * This thread also removes any faulty devices (with nr_pending == 0). |
5832 | * | 5877 | * |
@@ -5901,8 +5946,7 @@ void md_check_recovery(mddev_t *mddev) | |||
5901 | /* resync has finished, collect result */ | 5946 | /* resync has finished, collect result */ |
5902 | md_unregister_thread(mddev->sync_thread); | 5947 | md_unregister_thread(mddev->sync_thread); |
5903 | mddev->sync_thread = NULL; | 5948 | mddev->sync_thread = NULL; |
5904 | if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && | 5949 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
5905 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | ||
5906 | /* success...*/ | 5950 | /* success...*/ |
5907 | /* activate any spares */ | 5951 | /* activate any spares */ |
5908 | mddev->pers->spare_active(mddev); | 5952 | mddev->pers->spare_active(mddev); |
@@ -5926,7 +5970,6 @@ void md_check_recovery(mddev_t *mddev) | |||
5926 | * might be left set | 5970 | * might be left set |
5927 | */ | 5971 | */ |
5928 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 5972 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
5929 | clear_bit(MD_RECOVERY_ERR, &mddev->recovery); | ||
5930 | clear_bit(MD_RECOVERY_INTR, &mddev->recovery); | 5973 | clear_bit(MD_RECOVERY_INTR, &mddev->recovery); |
5931 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); | 5974 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); |
5932 | 5975 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 4f4d1f383842..e968116e0de9 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -327,7 +327,8 @@ static int multipath_remove_disk(mddev_t *mddev, int number) | |||
327 | if (rdev) { | 327 | if (rdev) { |
328 | if (test_bit(In_sync, &rdev->flags) || | 328 | if (test_bit(In_sync, &rdev->flags) || |
329 | atomic_read(&rdev->nr_pending)) { | 329 | atomic_read(&rdev->nr_pending)) { |
330 | printk(KERN_ERR "hot-remove-disk, slot %d is identified" " but is still operational!\n", number); | 330 | printk(KERN_ERR "hot-remove-disk, slot %d is identified" |
331 | " but is still operational!\n", number); | ||
331 | err = -EBUSY; | 332 | err = -EBUSY; |
332 | goto abort; | 333 | goto abort; |
333 | } | 334 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ac409b7d83f5..c610b947218a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -773,7 +773,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
773 | r1bio_t *r1_bio; | 773 | r1bio_t *r1_bio; |
774 | struct bio *read_bio; | 774 | struct bio *read_bio; |
775 | int i, targets = 0, disks; | 775 | int i, targets = 0, disks; |
776 | struct bitmap *bitmap = mddev->bitmap; | 776 | struct bitmap *bitmap; |
777 | unsigned long flags; | 777 | unsigned long flags; |
778 | struct bio_list bl; | 778 | struct bio_list bl; |
779 | struct page **behind_pages = NULL; | 779 | struct page **behind_pages = NULL; |
@@ -802,6 +802,8 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
802 | 802 | ||
803 | wait_barrier(conf); | 803 | wait_barrier(conf); |
804 | 804 | ||
805 | bitmap = mddev->bitmap; | ||
806 | |||
805 | disk_stat_inc(mddev->gendisk, ios[rw]); | 807 | disk_stat_inc(mddev->gendisk, ios[rw]); |
806 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); | 808 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); |
807 | 809 | ||
@@ -1025,7 +1027,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1025 | /* | 1027 | /* |
1026 | * if recovery is running, make sure it aborts. | 1028 | * if recovery is running, make sure it aborts. |
1027 | */ | 1029 | */ |
1028 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 1030 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
1029 | } else | 1031 | } else |
1030 | set_bit(Faulty, &rdev->flags); | 1032 | set_bit(Faulty, &rdev->flags); |
1031 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 1033 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
@@ -1146,6 +1148,14 @@ static int raid1_remove_disk(mddev_t *mddev, int number) | |||
1146 | err = -EBUSY; | 1148 | err = -EBUSY; |
1147 | goto abort; | 1149 | goto abort; |
1148 | } | 1150 | } |
1151 | /* Only remove non-faulty devices is recovery | ||
1152 | * is not possible. | ||
1153 | */ | ||
1154 | if (!test_bit(Faulty, &rdev->flags) && | ||
1155 | mddev->degraded < conf->raid_disks) { | ||
1156 | err = -EBUSY; | ||
1157 | goto abort; | ||
1158 | } | ||
1149 | p->rdev = NULL; | 1159 | p->rdev = NULL; |
1150 | synchronize_rcu(); | 1160 | synchronize_rcu(); |
1151 | if (atomic_read(&rdev->nr_pending)) { | 1161 | if (atomic_read(&rdev->nr_pending)) { |
@@ -1282,6 +1292,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | |||
1282 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); | 1292 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); |
1283 | } else { | 1293 | } else { |
1284 | /* fixup the bio for reuse */ | 1294 | /* fixup the bio for reuse */ |
1295 | int size; | ||
1285 | sbio->bi_vcnt = vcnt; | 1296 | sbio->bi_vcnt = vcnt; |
1286 | sbio->bi_size = r1_bio->sectors << 9; | 1297 | sbio->bi_size = r1_bio->sectors << 9; |
1287 | sbio->bi_idx = 0; | 1298 | sbio->bi_idx = 0; |
@@ -1295,10 +1306,20 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | |||
1295 | sbio->bi_sector = r1_bio->sector + | 1306 | sbio->bi_sector = r1_bio->sector + |
1296 | conf->mirrors[i].rdev->data_offset; | 1307 | conf->mirrors[i].rdev->data_offset; |
1297 | sbio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1308 | sbio->bi_bdev = conf->mirrors[i].rdev->bdev; |
1298 | for (j = 0; j < vcnt ; j++) | 1309 | size = sbio->bi_size; |
1299 | memcpy(page_address(sbio->bi_io_vec[j].bv_page), | 1310 | for (j = 0; j < vcnt ; j++) { |
1311 | struct bio_vec *bi; | ||
1312 | bi = &sbio->bi_io_vec[j]; | ||
1313 | bi->bv_offset = 0; | ||
1314 | if (size > PAGE_SIZE) | ||
1315 | bi->bv_len = PAGE_SIZE; | ||
1316 | else | ||
1317 | bi->bv_len = size; | ||
1318 | size -= PAGE_SIZE; | ||
1319 | memcpy(page_address(bi->bv_page), | ||
1300 | page_address(pbio->bi_io_vec[j].bv_page), | 1320 | page_address(pbio->bi_io_vec[j].bv_page), |
1301 | PAGE_SIZE); | 1321 | PAGE_SIZE); |
1322 | } | ||
1302 | 1323 | ||
1303 | } | 1324 | } |
1304 | } | 1325 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8536ede1e712..1de17da34a95 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1020,7 +1020,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1020 | /* | 1020 | /* |
1021 | * if recovery is running, make sure it aborts. | 1021 | * if recovery is running, make sure it aborts. |
1022 | */ | 1022 | */ |
1023 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 1023 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
1024 | } | 1024 | } |
1025 | set_bit(Faulty, &rdev->flags); | 1025 | set_bit(Faulty, &rdev->flags); |
1026 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 1026 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
@@ -1171,6 +1171,14 @@ static int raid10_remove_disk(mddev_t *mddev, int number) | |||
1171 | err = -EBUSY; | 1171 | err = -EBUSY; |
1172 | goto abort; | 1172 | goto abort; |
1173 | } | 1173 | } |
1174 | /* Only remove faulty devices in recovery | ||
1175 | * is not possible. | ||
1176 | */ | ||
1177 | if (!test_bit(Faulty, &rdev->flags) && | ||
1178 | enough(conf)) { | ||
1179 | err = -EBUSY; | ||
1180 | goto abort; | ||
1181 | } | ||
1174 | p->rdev = NULL; | 1182 | p->rdev = NULL; |
1175 | synchronize_rcu(); | 1183 | synchronize_rcu(); |
1176 | if (atomic_read(&rdev->nr_pending)) { | 1184 | if (atomic_read(&rdev->nr_pending)) { |
@@ -1237,6 +1245,7 @@ static void end_sync_write(struct bio *bio, int error) | |||
1237 | 1245 | ||
1238 | if (!uptodate) | 1246 | if (!uptodate) |
1239 | md_error(mddev, conf->mirrors[d].rdev); | 1247 | md_error(mddev, conf->mirrors[d].rdev); |
1248 | |||
1240 | update_head_pos(i, r10_bio); | 1249 | update_head_pos(i, r10_bio); |
1241 | 1250 | ||
1242 | while (atomic_dec_and_test(&r10_bio->remaining)) { | 1251 | while (atomic_dec_and_test(&r10_bio->remaining)) { |
@@ -1844,7 +1853,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1844 | if (rb2) | 1853 | if (rb2) |
1845 | atomic_dec(&rb2->remaining); | 1854 | atomic_dec(&rb2->remaining); |
1846 | r10_bio = rb2; | 1855 | r10_bio = rb2; |
1847 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) | 1856 | if (!test_and_set_bit(MD_RECOVERY_INTR, |
1857 | &mddev->recovery)) | ||
1848 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", | 1858 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", |
1849 | mdname(mddev)); | 1859 | mdname(mddev)); |
1850 | break; | 1860 | break; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 93fde48c0f42..425958a76b84 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -94,6 +94,8 @@ | |||
94 | #define __inline__ | 94 | #define __inline__ |
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) | ||
98 | |||
97 | #if !RAID6_USE_EMPTY_ZERO_PAGE | 99 | #if !RAID6_USE_EMPTY_ZERO_PAGE |
98 | /* In .bss so it's zeroed */ | 100 | /* In .bss so it's zeroed */ |
99 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); | 101 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); |
@@ -1143,10 +1145,12 @@ static void raid5_end_read_request(struct bio * bi, int error) | |||
1143 | set_bit(R5_UPTODATE, &sh->dev[i].flags); | 1145 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
1144 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { | 1146 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
1145 | rdev = conf->disks[i].rdev; | 1147 | rdev = conf->disks[i].rdev; |
1146 | printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", | 1148 | printk_rl(KERN_INFO "raid5:%s: read error corrected" |
1147 | mdname(conf->mddev), STRIPE_SECTORS, | 1149 | " (%lu sectors at %llu on %s)\n", |
1148 | (unsigned long long)(sh->sector + rdev->data_offset), | 1150 | mdname(conf->mddev), STRIPE_SECTORS, |
1149 | bdevname(rdev->bdev, b)); | 1151 | (unsigned long long)(sh->sector |
1152 | + rdev->data_offset), | ||
1153 | bdevname(rdev->bdev, b)); | ||
1150 | clear_bit(R5_ReadError, &sh->dev[i].flags); | 1154 | clear_bit(R5_ReadError, &sh->dev[i].flags); |
1151 | clear_bit(R5_ReWrite, &sh->dev[i].flags); | 1155 | clear_bit(R5_ReWrite, &sh->dev[i].flags); |
1152 | } | 1156 | } |
@@ -1160,16 +1164,22 @@ static void raid5_end_read_request(struct bio * bi, int error) | |||
1160 | clear_bit(R5_UPTODATE, &sh->dev[i].flags); | 1164 | clear_bit(R5_UPTODATE, &sh->dev[i].flags); |
1161 | atomic_inc(&rdev->read_errors); | 1165 | atomic_inc(&rdev->read_errors); |
1162 | if (conf->mddev->degraded) | 1166 | if (conf->mddev->degraded) |
1163 | printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n", | 1167 | printk_rl(KERN_WARNING |
1164 | mdname(conf->mddev), | 1168 | "raid5:%s: read error not correctable " |
1165 | (unsigned long long)(sh->sector + rdev->data_offset), | 1169 | "(sector %llu on %s).\n", |
1166 | bdn); | 1170 | mdname(conf->mddev), |
1171 | (unsigned long long)(sh->sector | ||
1172 | + rdev->data_offset), | ||
1173 | bdn); | ||
1167 | else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) | 1174 | else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) |
1168 | /* Oh, no!!! */ | 1175 | /* Oh, no!!! */ |
1169 | printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n", | 1176 | printk_rl(KERN_WARNING |
1170 | mdname(conf->mddev), | 1177 | "raid5:%s: read error NOT corrected!! " |
1171 | (unsigned long long)(sh->sector + rdev->data_offset), | 1178 | "(sector %llu on %s).\n", |
1172 | bdn); | 1179 | mdname(conf->mddev), |
1180 | (unsigned long long)(sh->sector | ||
1181 | + rdev->data_offset), | ||
1182 | bdn); | ||
1173 | else if (atomic_read(&rdev->read_errors) | 1183 | else if (atomic_read(&rdev->read_errors) |
1174 | > conf->max_nr_stripes) | 1184 | > conf->max_nr_stripes) |
1175 | printk(KERN_WARNING | 1185 | printk(KERN_WARNING |
@@ -1258,7 +1268,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1258 | /* | 1268 | /* |
1259 | * if recovery was running, make sure it aborts. | 1269 | * if recovery was running, make sure it aborts. |
1260 | */ | 1270 | */ |
1261 | set_bit(MD_RECOVERY_ERR, &mddev->recovery); | 1271 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
1262 | } | 1272 | } |
1263 | set_bit(Faulty, &rdev->flags); | 1273 | set_bit(Faulty, &rdev->flags); |
1264 | printk (KERN_ALERT | 1274 | printk (KERN_ALERT |
@@ -4564,6 +4574,14 @@ static int raid5_remove_disk(mddev_t *mddev, int number) | |||
4564 | err = -EBUSY; | 4574 | err = -EBUSY; |
4565 | goto abort; | 4575 | goto abort; |
4566 | } | 4576 | } |
4577 | /* Only remove non-faulty devices if recovery | ||
4578 | * isn't possible. | ||
4579 | */ | ||
4580 | if (!test_bit(Faulty, &rdev->flags) && | ||
4581 | mddev->degraded <= conf->max_degraded) { | ||
4582 | err = -EBUSY; | ||
4583 | goto abort; | ||
4584 | } | ||
4567 | p->rdev = NULL; | 4585 | p->rdev = NULL; |
4568 | synchronize_rcu(); | 4586 | synchronize_rcu(); |
4569 | if (atomic_read(&rdev->nr_pending)) { | 4587 | if (atomic_read(&rdev->nr_pending)) { |
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 87d8795823d7..b9d097c9f6bb 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -1062,7 +1062,6 @@ el3_rx(struct net_device *dev) | |||
1062 | struct sk_buff *skb; | 1062 | struct sk_buff *skb; |
1063 | 1063 | ||
1064 | skb = dev_alloc_skb(pkt_len+5); | 1064 | skb = dev_alloc_skb(pkt_len+5); |
1065 | dev->stats.rx_bytes += pkt_len; | ||
1066 | if (el3_debug > 4) | 1065 | if (el3_debug > 4) |
1067 | printk("Receiving packet size %d status %4.4x.\n", | 1066 | printk("Receiving packet size %d status %4.4x.\n", |
1068 | pkt_len, rx_status); | 1067 | pkt_len, rx_status); |
@@ -1077,6 +1076,7 @@ el3_rx(struct net_device *dev) | |||
1077 | skb->protocol = eth_type_trans(skb,dev); | 1076 | skb->protocol = eth_type_trans(skb,dev); |
1078 | netif_rx(skb); | 1077 | netif_rx(skb); |
1079 | dev->last_rx = jiffies; | 1078 | dev->last_rx = jiffies; |
1079 | dev->stats.rx_bytes += pkt_len; | ||
1080 | dev->stats.rx_packets++; | 1080 | dev->stats.rx_packets++; |
1081 | continue; | 1081 | continue; |
1082 | } | 1082 | } |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 3634b5fd7919..7023d77bf380 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev) | |||
1239 | */ | 1239 | */ |
1240 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) | 1240 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) |
1241 | { | 1241 | { |
1242 | struct net_device *dev = (struct net_device *) dev_id; | 1242 | struct net_device *dev = dev_id; |
1243 | |||
1244 | if (dev == NULL) { | ||
1245 | printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); | ||
1246 | return IRQ_RETVAL(1); | ||
1247 | } | ||
1248 | 1243 | ||
1249 | /* Handle RX interrupts first to minimize chance of overrun */ | 1244 | /* Handle RX interrupts first to minimize chance of overrun */ |
1250 | 1245 | ||
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 89c0018132ec..41443435ab1c 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/crc32.h> | 22 | #include <linux/crc32.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/ethtool.h> | ||
26 | #include <linux/mii.h> | 25 | #include <linux/mii.h> |
27 | #include <linux/phy.h> | 26 | #include <linux/phy.h> |
28 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 93e13636f8dd..83768df27806 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -142,8 +142,8 @@ | |||
142 | 142 | ||
143 | #define DRV_MODULE_NAME "cassini" | 143 | #define DRV_MODULE_NAME "cassini" |
144 | #define PFX DRV_MODULE_NAME ": " | 144 | #define PFX DRV_MODULE_NAME ": " |
145 | #define DRV_MODULE_VERSION "1.5" | 145 | #define DRV_MODULE_VERSION "1.6" |
146 | #define DRV_MODULE_RELDATE "4 Jan 2008" | 146 | #define DRV_MODULE_RELDATE "21 May 2008" |
147 | 147 | ||
148 | #define CAS_DEF_MSG_ENABLE \ | 148 | #define CAS_DEF_MSG_ENABLE \ |
149 | (NETIF_MSG_DRV | \ | 149 | (NETIF_MSG_DRV | \ |
@@ -2136,9 +2136,12 @@ end_copy_pkt: | |||
2136 | if (addr) | 2136 | if (addr) |
2137 | cas_page_unmap(addr); | 2137 | cas_page_unmap(addr); |
2138 | } | 2138 | } |
2139 | skb->csum = csum_unfold(~csum); | ||
2140 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2141 | skb->protocol = eth_type_trans(skb, cp->dev); | 2139 | skb->protocol = eth_type_trans(skb, cp->dev); |
2140 | if (skb->protocol == htons(ETH_P_IP)) { | ||
2141 | skb->csum = csum_unfold(~csum); | ||
2142 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2143 | } else | ||
2144 | skb->ip_summed = CHECKSUM_NONE; | ||
2142 | return len; | 2145 | return len; |
2143 | } | 2146 | } |
2144 | 2147 | ||
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 2b5740b3d182..7f3f62e1b113 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/platform_device.h> | 38 | #include <linux/platform_device.h> |
39 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
40 | #include <asm/gpio.h> | 40 | #include <asm/gpio.h> |
41 | #include <asm/atomic.h> | ||
41 | 42 | ||
42 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); | 43 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); |
43 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); | 44 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); |
@@ -187,6 +188,7 @@ struct cpmac_desc { | |||
187 | #define CPMAC_EOQ 0x1000 | 188 | #define CPMAC_EOQ 0x1000 |
188 | struct sk_buff *skb; | 189 | struct sk_buff *skb; |
189 | struct cpmac_desc *next; | 190 | struct cpmac_desc *next; |
191 | struct cpmac_desc *prev; | ||
190 | dma_addr_t mapping; | 192 | dma_addr_t mapping; |
191 | dma_addr_t data_mapping; | 193 | dma_addr_t data_mapping; |
192 | }; | 194 | }; |
@@ -208,6 +210,7 @@ struct cpmac_priv { | |||
208 | struct work_struct reset_work; | 210 | struct work_struct reset_work; |
209 | struct platform_device *pdev; | 211 | struct platform_device *pdev; |
210 | struct napi_struct napi; | 212 | struct napi_struct napi; |
213 | atomic_t reset_pending; | ||
211 | }; | 214 | }; |
212 | 215 | ||
213 | static irqreturn_t cpmac_irq(int, void *); | 216 | static irqreturn_t cpmac_irq(int, void *); |
@@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) | |||
241 | printk("\n"); | 244 | printk("\n"); |
242 | } | 245 | } |
243 | 246 | ||
247 | static void cpmac_dump_all_desc(struct net_device *dev) | ||
248 | { | ||
249 | struct cpmac_priv *priv = netdev_priv(dev); | ||
250 | struct cpmac_desc *dump = priv->rx_head; | ||
251 | do { | ||
252 | cpmac_dump_desc(dev, dump); | ||
253 | dump = dump->next; | ||
254 | } while (dump != priv->rx_head); | ||
255 | } | ||
256 | |||
244 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) | 257 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) |
245 | { | 258 | { |
246 | int i; | 259 | int i; |
@@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, | |||
412 | static int cpmac_poll(struct napi_struct *napi, int budget) | 425 | static int cpmac_poll(struct napi_struct *napi, int budget) |
413 | { | 426 | { |
414 | struct sk_buff *skb; | 427 | struct sk_buff *skb; |
415 | struct cpmac_desc *desc; | 428 | struct cpmac_desc *desc, *restart; |
416 | int received = 0; | ||
417 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); | 429 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); |
430 | int received = 0, processed = 0; | ||
418 | 431 | ||
419 | spin_lock(&priv->rx_lock); | 432 | spin_lock(&priv->rx_lock); |
420 | if (unlikely(!priv->rx_head)) { | 433 | if (unlikely(!priv->rx_head)) { |
421 | if (netif_msg_rx_err(priv) && net_ratelimit()) | 434 | if (netif_msg_rx_err(priv) && net_ratelimit()) |
422 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", | 435 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", |
423 | priv->dev->name); | 436 | priv->dev->name); |
437 | spin_unlock(&priv->rx_lock); | ||
424 | netif_rx_complete(priv->dev, napi); | 438 | netif_rx_complete(priv->dev, napi); |
425 | return 0; | 439 | return 0; |
426 | } | 440 | } |
427 | 441 | ||
428 | desc = priv->rx_head; | 442 | desc = priv->rx_head; |
443 | restart = NULL; | ||
429 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { | 444 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { |
445 | processed++; | ||
446 | |||
447 | if ((desc->dataflags & CPMAC_EOQ) != 0) { | ||
448 | /* The last update to eoq->hw_next didn't happen | ||
449 | * soon enough, and the receiver stopped here. | ||
450 | *Remember this descriptor so we can restart | ||
451 | * the receiver after freeing some space. | ||
452 | */ | ||
453 | if (unlikely(restart)) { | ||
454 | if (netif_msg_rx_err(priv)) | ||
455 | printk(KERN_ERR "%s: poll found a" | ||
456 | " duplicate EOQ: %p and %p\n", | ||
457 | priv->dev->name, restart, desc); | ||
458 | goto fatal_error; | ||
459 | } | ||
460 | |||
461 | restart = desc->next; | ||
462 | } | ||
463 | |||
430 | skb = cpmac_rx_one(priv, desc); | 464 | skb = cpmac_rx_one(priv, desc); |
431 | if (likely(skb)) { | 465 | if (likely(skb)) { |
432 | netif_receive_skb(skb); | 466 | netif_receive_skb(skb); |
@@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget) | |||
435 | desc = desc->next; | 469 | desc = desc->next; |
436 | } | 470 | } |
437 | 471 | ||
472 | if (desc != priv->rx_head) { | ||
473 | /* We freed some buffers, but not the whole ring, | ||
474 | * add what we did free to the rx list */ | ||
475 | desc->prev->hw_next = (u32)0; | ||
476 | priv->rx_head->prev->hw_next = priv->rx_head->mapping; | ||
477 | } | ||
478 | |||
479 | /* Optimization: If we did not actually process an EOQ (perhaps because | ||
480 | * of quota limits), check to see if the tail of the queue has EOQ set. | ||
481 | * We should immediately restart in that case so that the receiver can | ||
482 | * restart and run in parallel with more packet processing. | ||
483 | * This lets us handle slightly larger bursts before running | ||
484 | * out of ring space (assuming dev->weight < ring_size) */ | ||
485 | |||
486 | if (!restart && | ||
487 | (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) | ||
488 | == CPMAC_EOQ && | ||
489 | (priv->rx_head->dataflags & CPMAC_OWN) != 0) { | ||
490 | /* reset EOQ so the poll loop (above) doesn't try to | ||
491 | * restart this when it eventually gets to this descriptor. | ||
492 | */ | ||
493 | priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; | ||
494 | restart = priv->rx_head; | ||
495 | } | ||
496 | |||
497 | if (restart) { | ||
498 | priv->dev->stats.rx_errors++; | ||
499 | priv->dev->stats.rx_fifo_errors++; | ||
500 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
501 | printk(KERN_WARNING "%s: rx dma ring overrun\n", | ||
502 | priv->dev->name); | ||
503 | |||
504 | if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { | ||
505 | if (netif_msg_drv(priv)) | ||
506 | printk(KERN_ERR "%s: cpmac_poll is trying to " | ||
507 | "restart rx from a descriptor that's " | ||
508 | "not free: %p\n", | ||
509 | priv->dev->name, restart); | ||
510 | goto fatal_error; | ||
511 | } | ||
512 | |||
513 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); | ||
514 | } | ||
515 | |||
438 | priv->rx_head = desc; | 516 | priv->rx_head = desc; |
439 | spin_unlock(&priv->rx_lock); | 517 | spin_unlock(&priv->rx_lock); |
440 | if (unlikely(netif_msg_rx_status(priv))) | 518 | if (unlikely(netif_msg_rx_status(priv))) |
441 | printk(KERN_DEBUG "%s: poll processed %d packets\n", | 519 | printk(KERN_DEBUG "%s: poll processed %d packets\n", |
442 | priv->dev->name, received); | 520 | priv->dev->name, received); |
443 | if (desc->dataflags & CPMAC_OWN) { | 521 | if (processed == 0) { |
522 | /* we ran out of packets to read, | ||
523 | * revert to interrupt-driven mode */ | ||
444 | netif_rx_complete(priv->dev, napi); | 524 | netif_rx_complete(priv->dev, napi); |
445 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); | ||
446 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | 525 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); |
447 | return 0; | 526 | return 0; |
448 | } | 527 | } |
449 | 528 | ||
450 | return 1; | 529 | return 1; |
530 | |||
531 | fatal_error: | ||
532 | /* Something went horribly wrong. | ||
533 | * Reset hardware to try to recover rather than wedging. */ | ||
534 | |||
535 | if (netif_msg_drv(priv)) { | ||
536 | printk(KERN_ERR "%s: cpmac_poll is confused. " | ||
537 | "Resetting hardware\n", priv->dev->name); | ||
538 | cpmac_dump_all_desc(priv->dev); | ||
539 | printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", | ||
540 | priv->dev->name, | ||
541 | cpmac_read(priv->regs, CPMAC_RX_PTR(0)), | ||
542 | cpmac_read(priv->regs, CPMAC_RX_ACK(0))); | ||
543 | } | ||
544 | |||
545 | spin_unlock(&priv->rx_lock); | ||
546 | netif_rx_complete(priv->dev, napi); | ||
547 | netif_stop_queue(priv->dev); | ||
548 | napi_disable(&priv->napi); | ||
549 | |||
550 | atomic_inc(&priv->reset_pending); | ||
551 | cpmac_hw_stop(priv->dev); | ||
552 | if (!schedule_work(&priv->reset_work)) | ||
553 | atomic_dec(&priv->reset_pending); | ||
554 | return 0; | ||
555 | |||
451 | } | 556 | } |
452 | 557 | ||
453 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | 558 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
456 | struct cpmac_desc *desc; | 561 | struct cpmac_desc *desc; |
457 | struct cpmac_priv *priv = netdev_priv(dev); | 562 | struct cpmac_priv *priv = netdev_priv(dev); |
458 | 563 | ||
564 | if (unlikely(atomic_read(&priv->reset_pending))) | ||
565 | return NETDEV_TX_BUSY; | ||
566 | |||
459 | if (unlikely(skb_padto(skb, ETH_ZLEN))) | 567 | if (unlikely(skb_padto(skb, ETH_ZLEN))) |
460 | return NETDEV_TX_OK; | 568 | return NETDEV_TX_OK; |
461 | 569 | ||
@@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev) | |||
621 | desc->dataflags = CPMAC_OWN; | 729 | desc->dataflags = CPMAC_OWN; |
622 | dev->stats.rx_dropped++; | 730 | dev->stats.rx_dropped++; |
623 | } | 731 | } |
732 | desc->hw_next = desc->next->mapping; | ||
624 | desc = desc->next; | 733 | desc = desc->next; |
625 | } | 734 | } |
735 | priv->rx_head->prev->hw_next = 0; | ||
626 | } | 736 | } |
627 | 737 | ||
628 | static void cpmac_clear_tx(struct net_device *dev) | 738 | static void cpmac_clear_tx(struct net_device *dev) |
@@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev) | |||
635 | priv->desc_ring[i].dataflags = 0; | 745 | priv->desc_ring[i].dataflags = 0; |
636 | if (priv->desc_ring[i].skb) { | 746 | if (priv->desc_ring[i].skb) { |
637 | dev_kfree_skb_any(priv->desc_ring[i].skb); | 747 | dev_kfree_skb_any(priv->desc_ring[i].skb); |
638 | if (netif_subqueue_stopped(dev, i)) | 748 | priv->desc_ring[i].skb = NULL; |
639 | netif_wake_subqueue(dev, i); | ||
640 | } | 749 | } |
641 | } | 750 | } |
642 | } | 751 | } |
643 | 752 | ||
644 | static void cpmac_hw_error(struct work_struct *work) | 753 | static void cpmac_hw_error(struct work_struct *work) |
645 | { | 754 | { |
755 | int i; | ||
646 | struct cpmac_priv *priv = | 756 | struct cpmac_priv *priv = |
647 | container_of(work, struct cpmac_priv, reset_work); | 757 | container_of(work, struct cpmac_priv, reset_work); |
648 | 758 | ||
@@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work) | |||
651 | spin_unlock(&priv->rx_lock); | 761 | spin_unlock(&priv->rx_lock); |
652 | cpmac_clear_tx(priv->dev); | 762 | cpmac_clear_tx(priv->dev); |
653 | cpmac_hw_start(priv->dev); | 763 | cpmac_hw_start(priv->dev); |
654 | napi_enable(&priv->napi); | 764 | barrier(); |
655 | netif_start_queue(priv->dev); | 765 | atomic_dec(&priv->reset_pending); |
766 | |||
767 | for (i = 0; i < CPMAC_QUEUES; i++) | ||
768 | netif_wake_subqueue(priv->dev, i); | ||
769 | netif_wake_queue(priv->dev); | ||
770 | cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); | ||
771 | } | ||
772 | |||
773 | static void cpmac_check_status(struct net_device *dev) | ||
774 | { | ||
775 | struct cpmac_priv *priv = netdev_priv(dev); | ||
776 | |||
777 | u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); | ||
778 | int rx_channel = (macstatus >> 8) & 7; | ||
779 | int rx_code = (macstatus >> 12) & 15; | ||
780 | int tx_channel = (macstatus >> 16) & 7; | ||
781 | int tx_code = (macstatus >> 20) & 15; | ||
782 | |||
783 | if (rx_code || tx_code) { | ||
784 | if (netif_msg_drv(priv) && net_ratelimit()) { | ||
785 | /* Can't find any documentation on what these | ||
786 | *error codes actually are. So just log them and hope.. | ||
787 | */ | ||
788 | if (rx_code) | ||
789 | printk(KERN_WARNING "%s: host error %d on rx " | ||
790 | "channel %d (macstatus %08x), resetting\n", | ||
791 | dev->name, rx_code, rx_channel, macstatus); | ||
792 | if (tx_code) | ||
793 | printk(KERN_WARNING "%s: host error %d on tx " | ||
794 | "channel %d (macstatus %08x), resetting\n", | ||
795 | dev->name, tx_code, tx_channel, macstatus); | ||
796 | } | ||
797 | |||
798 | netif_stop_queue(dev); | ||
799 | cpmac_hw_stop(dev); | ||
800 | if (schedule_work(&priv->reset_work)) | ||
801 | atomic_inc(&priv->reset_pending); | ||
802 | if (unlikely(netif_msg_hw(priv))) | ||
803 | cpmac_dump_regs(dev); | ||
804 | } | ||
805 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
656 | } | 806 | } |
657 | 807 | ||
658 | static irqreturn_t cpmac_irq(int irq, void *dev_id) | 808 | static irqreturn_t cpmac_irq(int irq, void *dev_id) |
@@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) | |||
683 | 833 | ||
684 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); | 834 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); |
685 | 835 | ||
686 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { | 836 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) |
687 | if (netif_msg_drv(priv) && net_ratelimit()) | 837 | cpmac_check_status(dev); |
688 | printk(KERN_ERR "%s: hw error, resetting...\n", | ||
689 | dev->name); | ||
690 | netif_stop_queue(dev); | ||
691 | napi_disable(&priv->napi); | ||
692 | cpmac_hw_stop(dev); | ||
693 | schedule_work(&priv->reset_work); | ||
694 | if (unlikely(netif_msg_hw(priv))) | ||
695 | cpmac_dump_regs(dev); | ||
696 | } | ||
697 | 838 | ||
698 | return IRQ_HANDLED; | 839 | return IRQ_HANDLED; |
699 | } | 840 | } |
700 | 841 | ||
701 | static void cpmac_tx_timeout(struct net_device *dev) | 842 | static void cpmac_tx_timeout(struct net_device *dev) |
702 | { | 843 | { |
703 | struct cpmac_priv *priv = netdev_priv(dev); | ||
704 | int i; | 844 | int i; |
845 | struct cpmac_priv *priv = netdev_priv(dev); | ||
705 | 846 | ||
706 | spin_lock(&priv->lock); | 847 | spin_lock(&priv->lock); |
707 | dev->stats.tx_errors++; | 848 | dev->stats.tx_errors++; |
708 | spin_unlock(&priv->lock); | 849 | spin_unlock(&priv->lock); |
709 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 850 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
710 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); | 851 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); |
711 | /* | 852 | |
712 | * FIXME: waking up random queue is not the best thing to | 853 | atomic_inc(&priv->reset_pending); |
713 | * do... on the other hand why we got here at all? | 854 | barrier(); |
714 | */ | 855 | cpmac_clear_tx(dev); |
715 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 856 | barrier(); |
857 | atomic_dec(&priv->reset_pending); | ||
858 | |||
859 | netif_wake_queue(priv->dev); | ||
716 | for (i = 0; i < CPMAC_QUEUES; i++) | 860 | for (i = 0; i < CPMAC_QUEUES; i++) |
717 | if (priv->desc_ring[i].skb) { | 861 | netif_wake_subqueue(dev, i); |
718 | priv->desc_ring[i].dataflags = 0; | ||
719 | dev_kfree_skb_any(priv->desc_ring[i].skb); | ||
720 | netif_wake_subqueue(dev, i); | ||
721 | break; | ||
722 | } | ||
723 | #else | ||
724 | priv->desc_ring[0].dataflags = 0; | ||
725 | if (priv->desc_ring[0].skb) | ||
726 | dev_kfree_skb_any(priv->desc_ring[0].skb); | ||
727 | netif_wake_queue(dev); | ||
728 | #endif | ||
729 | } | 862 | } |
730 | 863 | ||
731 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 864 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
@@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev) | |||
901 | desc->buflen = CPMAC_SKB_SIZE; | 1034 | desc->buflen = CPMAC_SKB_SIZE; |
902 | desc->dataflags = CPMAC_OWN; | 1035 | desc->dataflags = CPMAC_OWN; |
903 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; | 1036 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; |
1037 | desc->next->prev = desc; | ||
904 | desc->hw_next = (u32)desc->next->mapping; | 1038 | desc->hw_next = (u32)desc->next->mapping; |
905 | } | 1039 | } |
906 | 1040 | ||
1041 | priv->rx_head->prev->hw_next = (u32)0; | ||
1042 | |||
907 | if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, | 1043 | if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, |
908 | dev->name, dev))) { | 1044 | dev->name, dev))) { |
909 | if (netif_msg_drv(priv)) | 1045 | if (netif_msg_drv(priv)) |
@@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev) | |||
912 | goto fail_irq; | 1048 | goto fail_irq; |
913 | } | 1049 | } |
914 | 1050 | ||
1051 | atomic_set(&priv->reset_pending, 0); | ||
915 | INIT_WORK(&priv->reset_work, cpmac_hw_error); | 1052 | INIT_WORK(&priv->reset_work, cpmac_hw_error); |
916 | cpmac_hw_start(dev); | 1053 | cpmac_hw_start(dev); |
917 | 1054 | ||
@@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1007 | 1144 | ||
1008 | if (phy_id == PHY_MAX_ADDR) { | 1145 | if (phy_id == PHY_MAX_ADDR) { |
1009 | if (external_switch || dumb_switch) { | 1146 | if (external_switch || dumb_switch) { |
1010 | struct fixed_phy_status status = {}; | 1147 | mdio_bus_id = 0; /* fixed phys bus */ |
1011 | 1148 | phy_id = pdev->id; | |
1012 | /* | ||
1013 | * FIXME: this should be in the platform code! | ||
1014 | * Since there is not platform code at all (that is, | ||
1015 | * no mainline users of that driver), place it here | ||
1016 | * for now. | ||
1017 | */ | ||
1018 | phy_id = 0; | ||
1019 | status.link = 1; | ||
1020 | status.duplex = 1; | ||
1021 | status.speed = 100; | ||
1022 | fixed_phy_add(PHY_POLL, phy_id, &status); | ||
1023 | } else { | 1149 | } else { |
1024 | printk(KERN_ERR "cpmac: no PHY present\n"); | 1150 | dev_err(&pdev->dev, "no PHY present\n"); |
1025 | return -ENODEV; | 1151 | return -ENODEV; |
1026 | } | 1152 | } |
1027 | } | 1153 | } |
@@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1064 | priv->msg_enable = netif_msg_init(debug_level, 0xff); | 1190 | priv->msg_enable = netif_msg_init(debug_level, 0xff); |
1065 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); | 1191 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); |
1066 | 1192 | ||
1067 | snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); | 1193 | priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id, |
1068 | 1194 | &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); | |
1069 | priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, | ||
1070 | PHY_INTERFACE_MODE_MII); | ||
1071 | if (IS_ERR(priv->phy)) { | 1195 | if (IS_ERR(priv->phy)) { |
1072 | if (netif_msg_drv(priv)) | 1196 | if (netif_msg_drv(priv)) |
1073 | printk(KERN_ERR "%s: Could not attach to PHY\n", | 1197 | printk(KERN_ERR "%s: Could not attach to PHY\n", |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index d45bcd2660af..864295e081b6 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev) | |||
903 | if (netif_msg_ifdown(db)) | 903 | if (netif_msg_ifdown(db)) |
904 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); | 904 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); |
905 | 905 | ||
906 | cancel_delayed_work(&db->phy_poll); | 906 | cancel_delayed_work_sync(&db->phy_poll); |
907 | 907 | ||
908 | netif_stop_queue(ndev); | 908 | netif_stop_queue(ndev); |
909 | netif_carrier_off(ndev); | 909 | netif_carrier_off(ndev); |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 8cbb40f3a506..cab1835173cd 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4201 | struct e1000_adapter *adapter; | 4201 | struct e1000_adapter *adapter; |
4202 | struct e1000_hw *hw; | 4202 | struct e1000_hw *hw; |
4203 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; | 4203 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; |
4204 | unsigned long mmio_start, mmio_len; | 4204 | resource_size_t mmio_start, mmio_len; |
4205 | unsigned long flash_start, flash_len; | 4205 | resource_size_t flash_start, flash_len; |
4206 | 4206 | ||
4207 | static int cards_found; | 4207 | static int cards_found; |
4208 | int i, err, pci_using_dac; | 4208 | int i, err, pci_using_dac; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index d1b6d4e7495d..287a61918739 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
2213 | goto out; | 2213 | goto out; |
2214 | } | 2214 | } |
2215 | 2215 | ||
2216 | memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); | ||
2217 | |||
2218 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2216 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2219 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2217 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2220 | if (hret != H_SUCCESS) | 2218 | if (hret != H_SUCCESS) |
@@ -3178,11 +3176,12 @@ out_err: | |||
3178 | 3176 | ||
3179 | static void ehea_shutdown_single_port(struct ehea_port *port) | 3177 | static void ehea_shutdown_single_port(struct ehea_port *port) |
3180 | { | 3178 | { |
3179 | struct ehea_adapter *adapter = port->adapter; | ||
3181 | unregister_netdev(port->netdev); | 3180 | unregister_netdev(port->netdev); |
3182 | ehea_unregister_port(port); | 3181 | ehea_unregister_port(port); |
3183 | kfree(port->mc_list); | 3182 | kfree(port->mc_list); |
3184 | free_netdev(port->netdev); | 3183 | free_netdev(port->netdev); |
3185 | port->adapter->active_ports--; | 3184 | adapter->active_ports--; |
3186 | } | 3185 | } |
3187 | 3186 | ||
3188 | static int ehea_setup_ports(struct ehea_adapter *adapter) | 3187 | static int ehea_setup_ports(struct ehea_adapter *adapter) |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 35f66d4a4595..9eca97fb0a54 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5823,6 +5823,7 @@ static int nv_resume(struct pci_dev *pdev) | |||
5823 | writel(txreg, base + NvRegTransmitPoll); | 5823 | writel(txreg, base + NvRegTransmitPoll); |
5824 | 5824 | ||
5825 | rc = nv_open(dev); | 5825 | rc = nv_open(dev); |
5826 | nv_set_multicast(dev); | ||
5826 | out: | 5827 | out: |
5827 | return rc; | 5828 | return rc; |
5828 | } | 5829 | } |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 67b4b0728fce..a5baaf59ff66 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -1093,7 +1093,7 @@ err: | |||
1093 | if (registered) | 1093 | if (registered) |
1094 | unregister_netdev(ndev); | 1094 | unregister_netdev(ndev); |
1095 | 1095 | ||
1096 | if (fep != NULL) { | 1096 | if (fep && fep->ops) { |
1097 | (*fep->ops->free_bd)(ndev); | 1097 | (*fep->ops->free_bd)(ndev); |
1098 | (*fep->ops->cleanup_data)(ndev); | 1098 | (*fep->ops->cleanup_data)(ndev); |
1099 | } | 1099 | } |
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index f90515935833..45ae9d1191d7 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c | |||
@@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns | |||
1340 | case PARAM_RTS: | 1340 | case PARAM_RTS: |
1341 | if ( !(scc->wreg[R5] & RTS) ) | 1341 | if ( !(scc->wreg[R5] & RTS) ) |
1342 | { | 1342 | { |
1343 | if (arg != TX_OFF) | 1343 | if (arg != TX_OFF) { |
1344 | scc_key_trx(scc, TX_ON); | 1344 | scc_key_trx(scc, TX_ON); |
1345 | scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); | 1345 | scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); |
1346 | } | ||
1346 | } else { | 1347 | } else { |
1347 | if (arg == TX_OFF) | 1348 | if (arg == TX_OFF) |
1348 | { | 1349 | { |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index c91b12ea26ad..36be6efc6398 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) | |||
631 | return status; | 631 | return status; |
632 | } | 632 | } |
633 | 633 | ||
634 | int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) | 634 | static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) |
635 | { | 635 | { |
636 | struct myri10ge_cmd cmd; | 636 | struct myri10ge_cmd cmd; |
637 | int status; | 637 | int status; |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 8f328a03847b..a550c9bd126f 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
391 | cardtype = CONTEC; | 391 | cardtype = CONTEC; |
392 | break; | 392 | break; |
393 | case MANFID_FUJITSU: | 393 | case MANFID_FUJITSU: |
394 | if (link->card_id == PRODID_FUJITSU_MBH10302) | 394 | if (link->conf.ConfigBase == 0x0fe0) |
395 | cardtype = MBH10302; | ||
396 | else if (link->card_id == PRODID_FUJITSU_MBH10302) | ||
395 | /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), | 397 | /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), |
396 | but these are MBH10304 based card. */ | 398 | but these are MBH10304 based card. */ |
397 | cardtype = MBH10304; | 399 | cardtype = MBH10304; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index d041f831a18d..f6c4698ce738 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -1461,22 +1461,25 @@ static void | |||
1461 | set_multicast_list(struct net_device *dev) | 1461 | set_multicast_list(struct net_device *dev) |
1462 | { | 1462 | { |
1463 | unsigned int ioaddr = dev->base_addr; | 1463 | unsigned int ioaddr = dev->base_addr; |
1464 | unsigned value; | ||
1464 | 1465 | ||
1465 | SelectPage(0x42); | 1466 | SelectPage(0x42); |
1467 | value = GetByte(XIRCREG42_SWC1) & 0xC0; | ||
1468 | |||
1466 | if (dev->flags & IFF_PROMISC) { /* snoop */ | 1469 | if (dev->flags & IFF_PROMISC) { /* snoop */ |
1467 | PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ | 1470 | PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ |
1468 | } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { | 1471 | } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { |
1469 | PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ | 1472 | PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ |
1470 | } else if (dev->mc_count) { | 1473 | } else if (dev->mc_count) { |
1471 | /* the chip can filter 9 addresses perfectly */ | 1474 | /* the chip can filter 9 addresses perfectly */ |
1472 | PutByte(XIRCREG42_SWC1, 0x01); | 1475 | PutByte(XIRCREG42_SWC1, value | 0x01); |
1473 | SelectPage(0x40); | 1476 | SelectPage(0x40); |
1474 | PutByte(XIRCREG40_CMD0, Offline); | 1477 | PutByte(XIRCREG40_CMD0, Offline); |
1475 | set_addresses(dev); | 1478 | set_addresses(dev); |
1476 | SelectPage(0x40); | 1479 | SelectPage(0x40); |
1477 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); | 1480 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); |
1478 | } else { /* standard usage */ | 1481 | } else { /* standard usage */ |
1479 | PutByte(XIRCREG42_SWC1, 0x00); | 1482 | PutByte(XIRCREG42_SWC1, value | 0x00); |
1480 | } | 1483 | } |
1481 | SelectPage(0); | 1484 | SelectPage(0); |
1482 | } | 1485 | } |
@@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full) | |||
1722 | 1725 | ||
1723 | /* enable receiver and put the mac online */ | 1726 | /* enable receiver and put the mac online */ |
1724 | if (full) { | 1727 | if (full) { |
1728 | set_multicast_list(dev); | ||
1725 | SelectPage(0x40); | 1729 | SelectPage(0x40); |
1726 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); | 1730 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); |
1727 | } | 1731 | } |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index a1c454dbc164..1c89b97f4e09 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev); | |||
325 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | 325 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
326 | void *ptr); | 326 | void *ptr); |
327 | static void pcnet32_purge_tx_ring(struct net_device *dev); | 327 | static void pcnet32_purge_tx_ring(struct net_device *dev); |
328 | static int pcnet32_alloc_ring(struct net_device *dev, char *name); | 328 | static int pcnet32_alloc_ring(struct net_device *dev, const char *name); |
329 | static void pcnet32_free_ring(struct net_device *dev); | 329 | static void pcnet32_free_ring(struct net_device *dev); |
330 | static void pcnet32_check_media(struct net_device *dev, int verbose); | 330 | static void pcnet32_check_media(struct net_device *dev, int verbose); |
331 | 331 | ||
@@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | /* if any allocation fails, caller must also call pcnet32_free_ring */ | 1985 | /* if any allocation fails, caller must also call pcnet32_free_ring */ |
1986 | static int pcnet32_alloc_ring(struct net_device *dev, char *name) | 1986 | static int pcnet32_alloc_ring(struct net_device *dev, const char *name) |
1987 | { | 1987 | { |
1988 | struct pcnet32_private *lp = netdev_priv(dev); | 1988 | struct pcnet32_private *lp = netdev_priv(dev); |
1989 | 1989 | ||
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 6bf9e76b0a00..6eb2d31d1e34 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -5,7 +5,7 @@ | |||
5 | menuconfig PHYLIB | 5 | menuconfig PHYLIB |
6 | tristate "PHY Device support and infrastructure" | 6 | tristate "PHY Device support and infrastructure" |
7 | depends on !S390 | 7 | depends on !S390 |
8 | depends on NET_ETHERNET && (BROKEN || !S390) | 8 | depends on NET_ETHERNET |
9 | help | 9 | help |
10 | Ethernet controllers are usually attached to PHY | 10 | Ethernet controllers are usually attached to PHY |
11 | devices. This option provides infrastructure for | 11 | devices. This option provides infrastructure for |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ac3c01d28fdf..16a0e7de5888 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) | |||
207 | 207 | ||
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | EXPORT_SYMBOL(get_phy_id); | ||
210 | 211 | ||
211 | /** | 212 | /** |
212 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct | 213 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct |
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 2109508c047a..f8274f8941ea 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h | |||
@@ -250,7 +250,7 @@ struct XENA_dev_config { | |||
250 | u64 tx_mat0_n[0x8]; | 250 | u64 tx_mat0_n[0x8]; |
251 | #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) | 251 | #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) |
252 | 252 | ||
253 | u8 unused_1[0x8]; | 253 | u64 xmsi_mask_reg; |
254 | u64 stat_byte_cnt; | 254 | u64 stat_byte_cnt; |
255 | #define STAT_BC(n) vBIT(n,4,12) | 255 | #define STAT_BC(n) vBIT(n,4,12) |
256 | 256 | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 523478ebfd69..a20693e09ae8 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -86,7 +86,7 @@ | |||
86 | #include "s2io.h" | 86 | #include "s2io.h" |
87 | #include "s2io-regs.h" | 87 | #include "s2io-regs.h" |
88 | 88 | ||
89 | #define DRV_VERSION "2.0.26.23" | 89 | #define DRV_VERSION "2.0.26.24" |
90 | 90 | ||
91 | /* S2io Driver name & version. */ | 91 | /* S2io Driver name & version. */ |
92 | static char s2io_driver_name[] = "Neterion"; | 92 | static char s2io_driver_name[] = "Neterion"; |
@@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) | |||
1113 | struct pci_dev *tdev = NULL; | 1113 | struct pci_dev *tdev = NULL; |
1114 | while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { | 1114 | while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { |
1115 | if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { | 1115 | if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { |
1116 | if (tdev->bus == s2io_pdev->bus->parent) | 1116 | if (tdev->bus == s2io_pdev->bus->parent) { |
1117 | pci_dev_put(tdev); | 1117 | pci_dev_put(tdev); |
1118 | return 1; | 1118 | return 1; |
1119 | } | ||
1119 | } | 1120 | } |
1120 | } | 1121 | } |
1121 | return 0; | 1122 | return 0; |
@@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link) | |||
1219 | TTI_DATA1_MEM_TX_URNG_B(0x10) | | 1220 | TTI_DATA1_MEM_TX_URNG_B(0x10) | |
1220 | TTI_DATA1_MEM_TX_URNG_C(0x30) | | 1221 | TTI_DATA1_MEM_TX_URNG_C(0x30) | |
1221 | TTI_DATA1_MEM_TX_TIMER_AC_EN; | 1222 | TTI_DATA1_MEM_TX_TIMER_AC_EN; |
1222 | 1223 | if (i == 0) | |
1223 | if (use_continuous_tx_intrs && (link == LINK_UP)) | 1224 | if (use_continuous_tx_intrs && (link == LINK_UP)) |
1224 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; | 1225 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; |
1225 | writeq(val64, &bar0->tti_data1_mem); | 1226 | writeq(val64, &bar0->tti_data1_mem); |
1226 | 1227 | ||
1227 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | | 1228 | if (nic->config.intr_type == MSI_X) { |
1228 | TTI_DATA2_MEM_TX_UFC_B(0x20) | | 1229 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | |
1229 | TTI_DATA2_MEM_TX_UFC_C(0x40) | | 1230 | TTI_DATA2_MEM_TX_UFC_B(0x100) | |
1230 | TTI_DATA2_MEM_TX_UFC_D(0x80); | 1231 | TTI_DATA2_MEM_TX_UFC_C(0x200) | |
1232 | TTI_DATA2_MEM_TX_UFC_D(0x300); | ||
1233 | } else { | ||
1234 | if ((nic->config.tx_steering_type == | ||
1235 | TX_DEFAULT_STEERING) && | ||
1236 | (config->tx_fifo_num > 1) && | ||
1237 | (i >= nic->udp_fifo_idx) && | ||
1238 | (i < (nic->udp_fifo_idx + | ||
1239 | nic->total_udp_fifos))) | ||
1240 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | | ||
1241 | TTI_DATA2_MEM_TX_UFC_B(0x80) | | ||
1242 | TTI_DATA2_MEM_TX_UFC_C(0x100) | | ||
1243 | TTI_DATA2_MEM_TX_UFC_D(0x120); | ||
1244 | else | ||
1245 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | | ||
1246 | TTI_DATA2_MEM_TX_UFC_B(0x20) | | ||
1247 | TTI_DATA2_MEM_TX_UFC_C(0x40) | | ||
1248 | TTI_DATA2_MEM_TX_UFC_D(0x80); | ||
1249 | } | ||
1231 | 1250 | ||
1232 | writeq(val64, &bar0->tti_data2_mem); | 1251 | writeq(val64, &bar0->tti_data2_mem); |
1233 | 1252 | ||
@@ -2813,6 +2832,15 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2813 | } | 2832 | } |
2814 | } | 2833 | } |
2815 | 2834 | ||
2835 | static int s2io_chk_rx_buffers(struct ring_info *ring) | ||
2836 | { | ||
2837 | if (fill_rx_buffers(ring) == -ENOMEM) { | ||
2838 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | ||
2839 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | ||
2840 | } | ||
2841 | return 0; | ||
2842 | } | ||
2843 | |||
2816 | /** | 2844 | /** |
2817 | * s2io_poll - Rx interrupt handler for NAPI support | 2845 | * s2io_poll - Rx interrupt handler for NAPI support |
2818 | * @napi : pointer to the napi structure. | 2846 | * @napi : pointer to the napi structure. |
@@ -2826,57 +2854,72 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2826 | * 0 on success and 1 if there are No Rx packets to be processed. | 2854 | * 0 on success and 1 if there are No Rx packets to be processed. |
2827 | */ | 2855 | */ |
2828 | 2856 | ||
2829 | static int s2io_poll(struct napi_struct *napi, int budget) | 2857 | static int s2io_poll_msix(struct napi_struct *napi, int budget) |
2830 | { | 2858 | { |
2831 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); | 2859 | struct ring_info *ring = container_of(napi, struct ring_info, napi); |
2832 | struct net_device *dev = nic->dev; | 2860 | struct net_device *dev = ring->dev; |
2833 | int pkt_cnt = 0, org_pkts_to_process; | ||
2834 | struct mac_info *mac_control; | ||
2835 | struct config_param *config; | 2861 | struct config_param *config; |
2862 | struct mac_info *mac_control; | ||
2863 | int pkts_processed = 0; | ||
2864 | u8 *addr = NULL, val8 = 0; | ||
2865 | struct s2io_nic *nic = dev->priv; | ||
2836 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 2866 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
2837 | int i; | 2867 | int budget_org = budget; |
2838 | 2868 | ||
2839 | mac_control = &nic->mac_control; | ||
2840 | config = &nic->config; | 2869 | config = &nic->config; |
2870 | mac_control = &nic->mac_control; | ||
2841 | 2871 | ||
2842 | nic->pkts_to_process = budget; | 2872 | if (unlikely(!is_s2io_card_up(nic))) |
2843 | org_pkts_to_process = nic->pkts_to_process; | 2873 | return 0; |
2844 | 2874 | ||
2845 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 2875 | pkts_processed = rx_intr_handler(ring, budget); |
2846 | readl(&bar0->rx_traffic_int); | 2876 | s2io_chk_rx_buffers(ring); |
2847 | 2877 | ||
2848 | for (i = 0; i < config->rx_ring_num; i++) { | 2878 | if (pkts_processed < budget_org) { |
2849 | rx_intr_handler(&mac_control->rings[i]); | 2879 | netif_rx_complete(dev, napi); |
2850 | pkt_cnt = org_pkts_to_process - nic->pkts_to_process; | 2880 | /*Re Enable MSI-Rx Vector*/ |
2851 | if (!nic->pkts_to_process) { | 2881 | addr = (u8 *)&bar0->xmsi_mask_reg; |
2852 | /* Quota for the current iteration has been met */ | 2882 | addr += 7 - ring->ring_no; |
2853 | goto no_rx; | 2883 | val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; |
2854 | } | 2884 | writeb(val8, addr); |
2885 | val8 = readb(addr); | ||
2855 | } | 2886 | } |
2887 | return pkts_processed; | ||
2888 | } | ||
2889 | static int s2io_poll_inta(struct napi_struct *napi, int budget) | ||
2890 | { | ||
2891 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); | ||
2892 | struct ring_info *ring; | ||
2893 | struct net_device *dev = nic->dev; | ||
2894 | struct config_param *config; | ||
2895 | struct mac_info *mac_control; | ||
2896 | int pkts_processed = 0; | ||
2897 | int ring_pkts_processed, i; | ||
2898 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | ||
2899 | int budget_org = budget; | ||
2856 | 2900 | ||
2857 | netif_rx_complete(dev, napi); | 2901 | config = &nic->config; |
2902 | mac_control = &nic->mac_control; | ||
2858 | 2903 | ||
2859 | for (i = 0; i < config->rx_ring_num; i++) { | 2904 | if (unlikely(!is_s2io_card_up(nic))) |
2860 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2905 | return 0; |
2861 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | ||
2862 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | ||
2863 | break; | ||
2864 | } | ||
2865 | } | ||
2866 | /* Re enable the Rx interrupts. */ | ||
2867 | writeq(0x0, &bar0->rx_traffic_mask); | ||
2868 | readl(&bar0->rx_traffic_mask); | ||
2869 | return pkt_cnt; | ||
2870 | 2906 | ||
2871 | no_rx: | ||
2872 | for (i = 0; i < config->rx_ring_num; i++) { | 2907 | for (i = 0; i < config->rx_ring_num; i++) { |
2873 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2908 | ring = &mac_control->rings[i]; |
2874 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2909 | ring_pkts_processed = rx_intr_handler(ring, budget); |
2875 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | 2910 | s2io_chk_rx_buffers(ring); |
2911 | pkts_processed += ring_pkts_processed; | ||
2912 | budget -= ring_pkts_processed; | ||
2913 | if (budget <= 0) | ||
2876 | break; | 2914 | break; |
2877 | } | ||
2878 | } | 2915 | } |
2879 | return pkt_cnt; | 2916 | if (pkts_processed < budget_org) { |
2917 | netif_rx_complete(dev, napi); | ||
2918 | /* Re enable the Rx interrupts for the ring */ | ||
2919 | writeq(0, &bar0->rx_traffic_mask); | ||
2920 | readl(&bar0->rx_traffic_mask); | ||
2921 | } | ||
2922 | return pkts_processed; | ||
2880 | } | 2923 | } |
2881 | 2924 | ||
2882 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2925 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -2918,7 +2961,7 @@ static void s2io_netpoll(struct net_device *dev) | |||
2918 | 2961 | ||
2919 | /* check for received packet and indicate up to network */ | 2962 | /* check for received packet and indicate up to network */ |
2920 | for (i = 0; i < config->rx_ring_num; i++) | 2963 | for (i = 0; i < config->rx_ring_num; i++) |
2921 | rx_intr_handler(&mac_control->rings[i]); | 2964 | rx_intr_handler(&mac_control->rings[i], 0); |
2922 | 2965 | ||
2923 | for (i = 0; i < config->rx_ring_num; i++) { | 2966 | for (i = 0; i < config->rx_ring_num; i++) { |
2924 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2967 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
@@ -2934,7 +2977,8 @@ static void s2io_netpoll(struct net_device *dev) | |||
2934 | 2977 | ||
2935 | /** | 2978 | /** |
2936 | * rx_intr_handler - Rx interrupt handler | 2979 | * rx_intr_handler - Rx interrupt handler |
2937 | * @nic: device private variable. | 2980 | * @ring_info: per ring structure. |
2981 | * @budget: budget for napi processing. | ||
2938 | * Description: | 2982 | * Description: |
2939 | * If the interrupt is because of a received frame or if the | 2983 | * If the interrupt is because of a received frame or if the |
2940 | * receive ring contains fresh as yet un-processed frames,this function is | 2984 | * receive ring contains fresh as yet un-processed frames,this function is |
@@ -2942,15 +2986,15 @@ static void s2io_netpoll(struct net_device *dev) | |||
2942 | * stopped and sends the skb to the OSM's Rx handler and then increments | 2986 | * stopped and sends the skb to the OSM's Rx handler and then increments |
2943 | * the offset. | 2987 | * the offset. |
2944 | * Return Value: | 2988 | * Return Value: |
2945 | * NONE. | 2989 | * No. of napi packets processed. |
2946 | */ | 2990 | */ |
2947 | static void rx_intr_handler(struct ring_info *ring_data) | 2991 | static int rx_intr_handler(struct ring_info *ring_data, int budget) |
2948 | { | 2992 | { |
2949 | int get_block, put_block; | 2993 | int get_block, put_block; |
2950 | struct rx_curr_get_info get_info, put_info; | 2994 | struct rx_curr_get_info get_info, put_info; |
2951 | struct RxD_t *rxdp; | 2995 | struct RxD_t *rxdp; |
2952 | struct sk_buff *skb; | 2996 | struct sk_buff *skb; |
2953 | int pkt_cnt = 0; | 2997 | int pkt_cnt = 0, napi_pkts = 0; |
2954 | int i; | 2998 | int i; |
2955 | struct RxD1* rxdp1; | 2999 | struct RxD1* rxdp1; |
2956 | struct RxD3* rxdp3; | 3000 | struct RxD3* rxdp3; |
@@ -2977,7 +3021,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2977 | DBG_PRINT(ERR_DBG, "%s: The skb is ", | 3021 | DBG_PRINT(ERR_DBG, "%s: The skb is ", |
2978 | ring_data->dev->name); | 3022 | ring_data->dev->name); |
2979 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); | 3023 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); |
2980 | return; | 3024 | return 0; |
2981 | } | 3025 | } |
2982 | if (ring_data->rxd_mode == RXD_MODE_1) { | 3026 | if (ring_data->rxd_mode == RXD_MODE_1) { |
2983 | rxdp1 = (struct RxD1*)rxdp; | 3027 | rxdp1 = (struct RxD1*)rxdp; |
@@ -3014,9 +3058,10 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
3014 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; | 3058 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; |
3015 | } | 3059 | } |
3016 | 3060 | ||
3017 | if(ring_data->nic->config.napi){ | 3061 | if (ring_data->nic->config.napi) { |
3018 | ring_data->nic->pkts_to_process -= 1; | 3062 | budget--; |
3019 | if (!ring_data->nic->pkts_to_process) | 3063 | napi_pkts++; |
3064 | if (!budget) | ||
3020 | break; | 3065 | break; |
3021 | } | 3066 | } |
3022 | pkt_cnt++; | 3067 | pkt_cnt++; |
@@ -3034,6 +3079,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
3034 | } | 3079 | } |
3035 | } | 3080 | } |
3036 | } | 3081 | } |
3082 | return(napi_pkts); | ||
3037 | } | 3083 | } |
3038 | 3084 | ||
3039 | /** | 3085 | /** |
@@ -3730,14 +3776,19 @@ static void restore_xmsi_data(struct s2io_nic *nic) | |||
3730 | { | 3776 | { |
3731 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3777 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
3732 | u64 val64; | 3778 | u64 val64; |
3733 | int i; | 3779 | int i, msix_index; |
3780 | |||
3781 | |||
3782 | if (nic->device_type == XFRAME_I_DEVICE) | ||
3783 | return; | ||
3734 | 3784 | ||
3735 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3785 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
3786 | msix_index = (i) ? ((i-1) * 8 + 1): 0; | ||
3736 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3787 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
3737 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3788 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
3738 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); | 3789 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); |
3739 | writeq(val64, &bar0->xmsi_access); | 3790 | writeq(val64, &bar0->xmsi_access); |
3740 | if (wait_for_msix_trans(nic, i)) { | 3791 | if (wait_for_msix_trans(nic, msix_index)) { |
3741 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); | 3792 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); |
3742 | continue; | 3793 | continue; |
3743 | } | 3794 | } |
@@ -3748,13 +3799,17 @@ static void store_xmsi_data(struct s2io_nic *nic) | |||
3748 | { | 3799 | { |
3749 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3800 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
3750 | u64 val64, addr, data; | 3801 | u64 val64, addr, data; |
3751 | int i; | 3802 | int i, msix_index; |
3803 | |||
3804 | if (nic->device_type == XFRAME_I_DEVICE) | ||
3805 | return; | ||
3752 | 3806 | ||
3753 | /* Store and display */ | 3807 | /* Store and display */ |
3754 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3808 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
3755 | val64 = (s2BIT(15) | vBIT(i, 26, 6)); | 3809 | msix_index = (i) ? ((i-1) * 8 + 1): 0; |
3810 | val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); | ||
3756 | writeq(val64, &bar0->xmsi_access); | 3811 | writeq(val64, &bar0->xmsi_access); |
3757 | if (wait_for_msix_trans(nic, i)) { | 3812 | if (wait_for_msix_trans(nic, msix_index)) { |
3758 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); | 3813 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); |
3759 | continue; | 3814 | continue; |
3760 | } | 3815 | } |
@@ -3770,11 +3825,11 @@ static void store_xmsi_data(struct s2io_nic *nic) | |||
3770 | static int s2io_enable_msi_x(struct s2io_nic *nic) | 3825 | static int s2io_enable_msi_x(struct s2io_nic *nic) |
3771 | { | 3826 | { |
3772 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3827 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
3773 | u64 tx_mat, rx_mat; | 3828 | u64 rx_mat; |
3774 | u16 msi_control; /* Temp variable */ | 3829 | u16 msi_control; /* Temp variable */ |
3775 | int ret, i, j, msix_indx = 1; | 3830 | int ret, i, j, msix_indx = 1; |
3776 | 3831 | ||
3777 | nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), | 3832 | nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry), |
3778 | GFP_KERNEL); | 3833 | GFP_KERNEL); |
3779 | if (!nic->entries) { | 3834 | if (!nic->entries) { |
3780 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ | 3835 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ |
@@ -3783,10 +3838,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
3783 | return -ENOMEM; | 3838 | return -ENOMEM; |
3784 | } | 3839 | } |
3785 | nic->mac_control.stats_info->sw_stat.mem_allocated | 3840 | nic->mac_control.stats_info->sw_stat.mem_allocated |
3786 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3841 | += (nic->num_entries * sizeof(struct msix_entry)); |
3842 | |||
3843 | memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry)); | ||
3787 | 3844 | ||
3788 | nic->s2io_entries = | 3845 | nic->s2io_entries = |
3789 | kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), | 3846 | kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry), |
3790 | GFP_KERNEL); | 3847 | GFP_KERNEL); |
3791 | if (!nic->s2io_entries) { | 3848 | if (!nic->s2io_entries) { |
3792 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", | 3849 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", |
@@ -3794,60 +3851,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
3794 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; | 3851 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; |
3795 | kfree(nic->entries); | 3852 | kfree(nic->entries); |
3796 | nic->mac_control.stats_info->sw_stat.mem_freed | 3853 | nic->mac_control.stats_info->sw_stat.mem_freed |
3797 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3854 | += (nic->num_entries * sizeof(struct msix_entry)); |
3798 | return -ENOMEM; | 3855 | return -ENOMEM; |
3799 | } | 3856 | } |
3800 | nic->mac_control.stats_info->sw_stat.mem_allocated | 3857 | nic->mac_control.stats_info->sw_stat.mem_allocated |
3801 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 3858 | += (nic->num_entries * sizeof(struct s2io_msix_entry)); |
3802 | 3859 | memset(nic->s2io_entries, 0, | |
3803 | for (i=0; i< MAX_REQUESTED_MSI_X; i++) { | 3860 | nic->num_entries * sizeof(struct s2io_msix_entry)); |
3804 | nic->entries[i].entry = i; | 3861 | |
3805 | nic->s2io_entries[i].entry = i; | 3862 | nic->entries[0].entry = 0; |
3863 | nic->s2io_entries[0].entry = 0; | ||
3864 | nic->s2io_entries[0].in_use = MSIX_FLG; | ||
3865 | nic->s2io_entries[0].type = MSIX_ALARM_TYPE; | ||
3866 | nic->s2io_entries[0].arg = &nic->mac_control.fifos; | ||
3867 | |||
3868 | for (i = 1; i < nic->num_entries; i++) { | ||
3869 | nic->entries[i].entry = ((i - 1) * 8) + 1; | ||
3870 | nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; | ||
3806 | nic->s2io_entries[i].arg = NULL; | 3871 | nic->s2io_entries[i].arg = NULL; |
3807 | nic->s2io_entries[i].in_use = 0; | 3872 | nic->s2io_entries[i].in_use = 0; |
3808 | } | 3873 | } |
3809 | 3874 | ||
3810 | tx_mat = readq(&bar0->tx_mat0_n[0]); | ||
3811 | for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) { | ||
3812 | tx_mat |= TX_MAT_SET(i, msix_indx); | ||
3813 | nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i]; | ||
3814 | nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE; | ||
3815 | nic->s2io_entries[msix_indx].in_use = MSIX_FLG; | ||
3816 | } | ||
3817 | writeq(tx_mat, &bar0->tx_mat0_n[0]); | ||
3818 | |||
3819 | rx_mat = readq(&bar0->rx_mat); | 3875 | rx_mat = readq(&bar0->rx_mat); |
3820 | for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { | 3876 | for (j = 0; j < nic->config.rx_ring_num; j++) { |
3821 | rx_mat |= RX_MAT_SET(j, msix_indx); | 3877 | rx_mat |= RX_MAT_SET(j, msix_indx); |
3822 | nic->s2io_entries[msix_indx].arg | 3878 | nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; |
3823 | = &nic->mac_control.rings[j]; | 3879 | nic->s2io_entries[j+1].type = MSIX_RING_TYPE; |
3824 | nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; | 3880 | nic->s2io_entries[j+1].in_use = MSIX_FLG; |
3825 | nic->s2io_entries[msix_indx].in_use = MSIX_FLG; | 3881 | msix_indx += 8; |
3826 | } | 3882 | } |
3827 | writeq(rx_mat, &bar0->rx_mat); | 3883 | writeq(rx_mat, &bar0->rx_mat); |
3884 | readq(&bar0->rx_mat); | ||
3828 | 3885 | ||
3829 | nic->avail_msix_vectors = 0; | 3886 | ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); |
3830 | ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); | ||
3831 | /* We fail init if error or we get less vectors than min required */ | 3887 | /* We fail init if error or we get less vectors than min required */ |
3832 | if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) { | ||
3833 | nic->avail_msix_vectors = ret; | ||
3834 | ret = pci_enable_msix(nic->pdev, nic->entries, ret); | ||
3835 | } | ||
3836 | if (ret) { | 3888 | if (ret) { |
3837 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); | 3889 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); |
3838 | kfree(nic->entries); | 3890 | kfree(nic->entries); |
3839 | nic->mac_control.stats_info->sw_stat.mem_freed | 3891 | nic->mac_control.stats_info->sw_stat.mem_freed |
3840 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3892 | += (nic->num_entries * sizeof(struct msix_entry)); |
3841 | kfree(nic->s2io_entries); | 3893 | kfree(nic->s2io_entries); |
3842 | nic->mac_control.stats_info->sw_stat.mem_freed | 3894 | nic->mac_control.stats_info->sw_stat.mem_freed |
3843 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 3895 | += (nic->num_entries * sizeof(struct s2io_msix_entry)); |
3844 | nic->entries = NULL; | 3896 | nic->entries = NULL; |
3845 | nic->s2io_entries = NULL; | 3897 | nic->s2io_entries = NULL; |
3846 | nic->avail_msix_vectors = 0; | ||
3847 | return -ENOMEM; | 3898 | return -ENOMEM; |
3848 | } | 3899 | } |
3849 | if (!nic->avail_msix_vectors) | ||
3850 | nic->avail_msix_vectors = MAX_REQUESTED_MSI_X; | ||
3851 | 3900 | ||
3852 | /* | 3901 | /* |
3853 | * To enable MSI-X, MSI also needs to be enabled, due to a bug | 3902 | * To enable MSI-X, MSI also needs to be enabled, due to a bug |
@@ -3919,7 +3968,7 @@ static void remove_msix_isr(struct s2io_nic *sp) | |||
3919 | int i; | 3968 | int i; |
3920 | u16 msi_control; | 3969 | u16 msi_control; |
3921 | 3970 | ||
3922 | for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { | 3971 | for (i = 0; i < sp->num_entries; i++) { |
3923 | if (sp->s2io_entries[i].in_use == | 3972 | if (sp->s2io_entries[i].in_use == |
3924 | MSIX_REGISTERED_SUCCESS) { | 3973 | MSIX_REGISTERED_SUCCESS) { |
3925 | int vector = sp->entries[i].vector; | 3974 | int vector = sp->entries[i].vector; |
@@ -3975,29 +4024,6 @@ static int s2io_open(struct net_device *dev) | |||
3975 | netif_carrier_off(dev); | 4024 | netif_carrier_off(dev); |
3976 | sp->last_link_state = 0; | 4025 | sp->last_link_state = 0; |
3977 | 4026 | ||
3978 | if (sp->config.intr_type == MSI_X) { | ||
3979 | int ret = s2io_enable_msi_x(sp); | ||
3980 | |||
3981 | if (!ret) { | ||
3982 | ret = s2io_test_msi(sp); | ||
3983 | /* rollback MSI-X, will re-enable during add_isr() */ | ||
3984 | remove_msix_isr(sp); | ||
3985 | } | ||
3986 | if (ret) { | ||
3987 | |||
3988 | DBG_PRINT(ERR_DBG, | ||
3989 | "%s: MSI-X requested but failed to enable\n", | ||
3990 | dev->name); | ||
3991 | sp->config.intr_type = INTA; | ||
3992 | } | ||
3993 | } | ||
3994 | |||
3995 | /* NAPI doesn't work well with MSI(X) */ | ||
3996 | if (sp->config.intr_type != INTA) { | ||
3997 | if(sp->config.napi) | ||
3998 | sp->config.napi = 0; | ||
3999 | } | ||
4000 | |||
4001 | /* Initialize H/W and enable interrupts */ | 4027 | /* Initialize H/W and enable interrupts */ |
4002 | err = s2io_card_up(sp); | 4028 | err = s2io_card_up(sp); |
4003 | if (err) { | 4029 | if (err) { |
@@ -4020,12 +4046,12 @@ hw_init_failed: | |||
4020 | if (sp->entries) { | 4046 | if (sp->entries) { |
4021 | kfree(sp->entries); | 4047 | kfree(sp->entries); |
4022 | sp->mac_control.stats_info->sw_stat.mem_freed | 4048 | sp->mac_control.stats_info->sw_stat.mem_freed |
4023 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 4049 | += (sp->num_entries * sizeof(struct msix_entry)); |
4024 | } | 4050 | } |
4025 | if (sp->s2io_entries) { | 4051 | if (sp->s2io_entries) { |
4026 | kfree(sp->s2io_entries); | 4052 | kfree(sp->s2io_entries); |
4027 | sp->mac_control.stats_info->sw_stat.mem_freed | 4053 | sp->mac_control.stats_info->sw_stat.mem_freed |
4028 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 4054 | += (sp->num_entries * sizeof(struct s2io_msix_entry)); |
4029 | } | 4055 | } |
4030 | } | 4056 | } |
4031 | return err; | 4057 | return err; |
@@ -4327,40 +4353,64 @@ s2io_alarm_handle(unsigned long data) | |||
4327 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 4353 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
4328 | } | 4354 | } |
4329 | 4355 | ||
4330 | static int s2io_chk_rx_buffers(struct ring_info *ring) | ||
4331 | { | ||
4332 | if (fill_rx_buffers(ring) == -ENOMEM) { | ||
4333 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | ||
4334 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | ||
4335 | } | ||
4336 | return 0; | ||
4337 | } | ||
4338 | |||
4339 | static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | 4356 | static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) |
4340 | { | 4357 | { |
4341 | struct ring_info *ring = (struct ring_info *)dev_id; | 4358 | struct ring_info *ring = (struct ring_info *)dev_id; |
4342 | struct s2io_nic *sp = ring->nic; | 4359 | struct s2io_nic *sp = ring->nic; |
4360 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | ||
4361 | struct net_device *dev = sp->dev; | ||
4343 | 4362 | ||
4344 | if (!is_s2io_card_up(sp)) | 4363 | if (unlikely(!is_s2io_card_up(sp))) |
4345 | return IRQ_HANDLED; | 4364 | return IRQ_HANDLED; |
4346 | 4365 | ||
4347 | rx_intr_handler(ring); | 4366 | if (sp->config.napi) { |
4348 | s2io_chk_rx_buffers(ring); | 4367 | u8 *addr = NULL, val8 = 0; |
4368 | |||
4369 | addr = (u8 *)&bar0->xmsi_mask_reg; | ||
4370 | addr += (7 - ring->ring_no); | ||
4371 | val8 = (ring->ring_no == 0) ? 0x7f : 0xff; | ||
4372 | writeb(val8, addr); | ||
4373 | val8 = readb(addr); | ||
4374 | netif_rx_schedule(dev, &ring->napi); | ||
4375 | } else { | ||
4376 | rx_intr_handler(ring, 0); | ||
4377 | s2io_chk_rx_buffers(ring); | ||
4378 | } | ||
4349 | 4379 | ||
4350 | return IRQ_HANDLED; | 4380 | return IRQ_HANDLED; |
4351 | } | 4381 | } |
4352 | 4382 | ||
4353 | static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) | 4383 | static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) |
4354 | { | 4384 | { |
4355 | struct fifo_info *fifo = (struct fifo_info *)dev_id; | 4385 | int i; |
4356 | struct s2io_nic *sp = fifo->nic; | 4386 | struct fifo_info *fifos = (struct fifo_info *)dev_id; |
4387 | struct s2io_nic *sp = fifos->nic; | ||
4388 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | ||
4389 | struct config_param *config = &sp->config; | ||
4390 | u64 reason; | ||
4357 | 4391 | ||
4358 | if (!is_s2io_card_up(sp)) | 4392 | if (unlikely(!is_s2io_card_up(sp))) |
4393 | return IRQ_NONE; | ||
4394 | |||
4395 | reason = readq(&bar0->general_int_status); | ||
4396 | if (unlikely(reason == S2IO_MINUS_ONE)) | ||
4397 | /* Nothing much can be done. Get out */ | ||
4359 | return IRQ_HANDLED; | 4398 | return IRQ_HANDLED; |
4360 | 4399 | ||
4361 | tx_intr_handler(fifo); | 4400 | writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); |
4401 | |||
4402 | if (reason & GEN_INTR_TXTRAFFIC) | ||
4403 | writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); | ||
4404 | |||
4405 | for (i = 0; i < config->tx_fifo_num; i++) | ||
4406 | tx_intr_handler(&fifos[i]); | ||
4407 | |||
4408 | writeq(sp->general_int_mask, &bar0->general_int_mask); | ||
4409 | readl(&bar0->general_int_status); | ||
4410 | |||
4362 | return IRQ_HANDLED; | 4411 | return IRQ_HANDLED; |
4363 | } | 4412 | } |
4413 | |||
4364 | static void s2io_txpic_intr_handle(struct s2io_nic *sp) | 4414 | static void s2io_txpic_intr_handle(struct s2io_nic *sp) |
4365 | { | 4415 | { |
4366 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 4416 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
@@ -4762,14 +4812,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4762 | 4812 | ||
4763 | if (config->napi) { | 4813 | if (config->napi) { |
4764 | if (reason & GEN_INTR_RXTRAFFIC) { | 4814 | if (reason & GEN_INTR_RXTRAFFIC) { |
4765 | if (likely(netif_rx_schedule_prep(dev, | 4815 | netif_rx_schedule(dev, &sp->napi); |
4766 | &sp->napi))) { | 4816 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); |
4767 | __netif_rx_schedule(dev, &sp->napi); | 4817 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
4768 | writeq(S2IO_MINUS_ONE, | 4818 | readl(&bar0->rx_traffic_int); |
4769 | &bar0->rx_traffic_mask); | ||
4770 | } else | ||
4771 | writeq(S2IO_MINUS_ONE, | ||
4772 | &bar0->rx_traffic_int); | ||
4773 | } | 4819 | } |
4774 | } else { | 4820 | } else { |
4775 | /* | 4821 | /* |
@@ -4781,7 +4827,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4781 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 4827 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
4782 | 4828 | ||
4783 | for (i = 0; i < config->rx_ring_num; i++) | 4829 | for (i = 0; i < config->rx_ring_num; i++) |
4784 | rx_intr_handler(&mac_control->rings[i]); | 4830 | rx_intr_handler(&mac_control->rings[i], 0); |
4785 | } | 4831 | } |
4786 | 4832 | ||
4787 | /* | 4833 | /* |
@@ -6984,62 +7030,62 @@ static int s2io_add_isr(struct s2io_nic * sp) | |||
6984 | 7030 | ||
6985 | /* After proper initialization of H/W, register ISR */ | 7031 | /* After proper initialization of H/W, register ISR */ |
6986 | if (sp->config.intr_type == MSI_X) { | 7032 | if (sp->config.intr_type == MSI_X) { |
6987 | int i, msix_tx_cnt=0,msix_rx_cnt=0; | 7033 | int i, msix_rx_cnt = 0; |
6988 | 7034 | ||
6989 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { | 7035 | for (i = 0; i < sp->num_entries; i++) { |
6990 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | 7036 | if (sp->s2io_entries[i].in_use == MSIX_FLG) { |
6991 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | 7037 | if (sp->s2io_entries[i].type == |
7038 | MSIX_RING_TYPE) { | ||
7039 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | ||
7040 | dev->name, i); | ||
7041 | err = request_irq(sp->entries[i].vector, | ||
7042 | s2io_msix_ring_handle, 0, | ||
7043 | sp->desc[i], | ||
7044 | sp->s2io_entries[i].arg); | ||
7045 | } else if (sp->s2io_entries[i].type == | ||
7046 | MSIX_ALARM_TYPE) { | ||
7047 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | ||
6992 | dev->name, i); | 7048 | dev->name, i); |
6993 | err = request_irq(sp->entries[i].vector, | 7049 | err = request_irq(sp->entries[i].vector, |
6994 | s2io_msix_fifo_handle, 0, sp->desc[i], | 7050 | s2io_msix_fifo_handle, 0, |
6995 | sp->s2io_entries[i].arg); | 7051 | sp->desc[i], |
6996 | /* If either data or addr is zero print it */ | 7052 | sp->s2io_entries[i].arg); |
6997 | if(!(sp->msix_info[i].addr && | 7053 | |
6998 | sp->msix_info[i].data)) { | ||
6999 | DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " | ||
7000 | "Data:0x%llx\n",sp->desc[i], | ||
7001 | (unsigned long long) | ||
7002 | sp->msix_info[i].addr, | ||
7003 | (unsigned long long) | ||
7004 | sp->msix_info[i].data); | ||
7005 | } else { | ||
7006 | msix_tx_cnt++; | ||
7007 | } | 7054 | } |
7008 | } else { | 7055 | /* if either data or addr is zero print it. */ |
7009 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | 7056 | if (!(sp->msix_info[i].addr && |
7010 | dev->name, i); | ||
7011 | err = request_irq(sp->entries[i].vector, | ||
7012 | s2io_msix_ring_handle, 0, sp->desc[i], | ||
7013 | sp->s2io_entries[i].arg); | ||
7014 | /* If either data or addr is zero print it */ | ||
7015 | if(!(sp->msix_info[i].addr && | ||
7016 | sp->msix_info[i].data)) { | 7057 | sp->msix_info[i].data)) { |
7017 | DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " | 7058 | DBG_PRINT(ERR_DBG, |
7018 | "Data:0x%llx\n",sp->desc[i], | 7059 | "%s @Addr:0x%llx Data:0x%llx\n", |
7060 | sp->desc[i], | ||
7019 | (unsigned long long) | 7061 | (unsigned long long) |
7020 | sp->msix_info[i].addr, | 7062 | sp->msix_info[i].addr, |
7021 | (unsigned long long) | 7063 | (unsigned long long) |
7022 | sp->msix_info[i].data); | 7064 | ntohl(sp->msix_info[i].data)); |
7023 | } else { | 7065 | } else |
7024 | msix_rx_cnt++; | 7066 | msix_rx_cnt++; |
7067 | if (err) { | ||
7068 | remove_msix_isr(sp); | ||
7069 | |||
7070 | DBG_PRINT(ERR_DBG, | ||
7071 | "%s:MSI-X-%d registration " | ||
7072 | "failed\n", dev->name, i); | ||
7073 | |||
7074 | DBG_PRINT(ERR_DBG, | ||
7075 | "%s: Defaulting to INTA\n", | ||
7076 | dev->name); | ||
7077 | sp->config.intr_type = INTA; | ||
7078 | break; | ||
7025 | } | 7079 | } |
7080 | sp->s2io_entries[i].in_use = | ||
7081 | MSIX_REGISTERED_SUCCESS; | ||
7026 | } | 7082 | } |
7027 | if (err) { | ||
7028 | remove_msix_isr(sp); | ||
7029 | DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " | ||
7030 | "failed\n", dev->name, i); | ||
7031 | DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n", | ||
7032 | dev->name); | ||
7033 | sp->config.intr_type = INTA; | ||
7034 | break; | ||
7035 | } | ||
7036 | sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; | ||
7037 | } | 7083 | } |
7038 | if (!err) { | 7084 | if (!err) { |
7039 | printk(KERN_INFO "MSI-X-TX %d entries enabled\n", | ||
7040 | msix_tx_cnt); | ||
7041 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", | 7085 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", |
7042 | msix_rx_cnt); | 7086 | --msix_rx_cnt); |
7087 | DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" | ||
7088 | " through alarm vector\n"); | ||
7043 | } | 7089 | } |
7044 | } | 7090 | } |
7045 | if (sp->config.intr_type == INTA) { | 7091 | if (sp->config.intr_type == INTA) { |
@@ -7080,8 +7126,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
7080 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); | 7126 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); |
7081 | 7127 | ||
7082 | /* Disable napi */ | 7128 | /* Disable napi */ |
7083 | if (config->napi) | 7129 | if (sp->config.napi) { |
7084 | napi_disable(&sp->napi); | 7130 | int off = 0; |
7131 | if (config->intr_type == MSI_X) { | ||
7132 | for (; off < sp->config.rx_ring_num; off++) | ||
7133 | napi_disable(&sp->mac_control.rings[off].napi); | ||
7134 | } | ||
7135 | else | ||
7136 | napi_disable(&sp->napi); | ||
7137 | } | ||
7085 | 7138 | ||
7086 | /* disable Tx and Rx traffic on the NIC */ | 7139 | /* disable Tx and Rx traffic on the NIC */ |
7087 | if (do_io) | 7140 | if (do_io) |
@@ -7173,8 +7226,15 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7173 | } | 7226 | } |
7174 | 7227 | ||
7175 | /* Initialise napi */ | 7228 | /* Initialise napi */ |
7176 | if (config->napi) | 7229 | if (config->napi) { |
7177 | napi_enable(&sp->napi); | 7230 | int i; |
7231 | if (config->intr_type == MSI_X) { | ||
7232 | for (i = 0; i < sp->config.rx_ring_num; i++) | ||
7233 | napi_enable(&sp->mac_control.rings[i].napi); | ||
7234 | } else { | ||
7235 | napi_enable(&sp->napi); | ||
7236 | } | ||
7237 | } | ||
7178 | 7238 | ||
7179 | /* Maintain the state prior to the open */ | 7239 | /* Maintain the state prior to the open */ |
7180 | if (sp->promisc_flg) | 7240 | if (sp->promisc_flg) |
@@ -7217,7 +7277,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7217 | /* Enable select interrupts */ | 7277 | /* Enable select interrupts */ |
7218 | en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); | 7278 | en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); |
7219 | if (sp->config.intr_type != INTA) | 7279 | if (sp->config.intr_type != INTA) |
7220 | en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); | 7280 | en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); |
7221 | else { | 7281 | else { |
7222 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; | 7282 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; |
7223 | interruptible |= TX_PIC_INTR; | 7283 | interruptible |= TX_PIC_INTR; |
@@ -7615,9 +7675,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
7615 | rx_ring_num = MAX_RX_RINGS; | 7675 | rx_ring_num = MAX_RX_RINGS; |
7616 | } | 7676 | } |
7617 | 7677 | ||
7618 | if (*dev_intr_type != INTA) | ||
7619 | napi = 0; | ||
7620 | |||
7621 | if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { | 7678 | if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { |
7622 | DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " | 7679 | DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " |
7623 | "Defaulting to INTA\n"); | 7680 | "Defaulting to INTA\n"); |
@@ -7918,8 +7975,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7918 | * will use eth_mac_addr() for dev->set_mac_address | 7975 | * will use eth_mac_addr() for dev->set_mac_address |
7919 | * mac address will be set every time dev->open() is called | 7976 | * mac address will be set every time dev->open() is called |
7920 | */ | 7977 | */ |
7921 | netif_napi_add(dev, &sp->napi, s2io_poll, 32); | ||
7922 | |||
7923 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7978 | #ifdef CONFIG_NET_POLL_CONTROLLER |
7924 | dev->poll_controller = s2io_netpoll; | 7979 | dev->poll_controller = s2io_netpoll; |
7925 | #endif | 7980 | #endif |
@@ -7963,6 +8018,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7963 | } | 8018 | } |
7964 | } | 8019 | } |
7965 | 8020 | ||
8021 | if (sp->config.intr_type == MSI_X) { | ||
8022 | sp->num_entries = config->rx_ring_num + 1; | ||
8023 | ret = s2io_enable_msi_x(sp); | ||
8024 | |||
8025 | if (!ret) { | ||
8026 | ret = s2io_test_msi(sp); | ||
8027 | /* rollback MSI-X, will re-enable during add_isr() */ | ||
8028 | remove_msix_isr(sp); | ||
8029 | } | ||
8030 | if (ret) { | ||
8031 | |||
8032 | DBG_PRINT(ERR_DBG, | ||
8033 | "%s: MSI-X requested but failed to enable\n", | ||
8034 | dev->name); | ||
8035 | sp->config.intr_type = INTA; | ||
8036 | } | ||
8037 | } | ||
8038 | |||
8039 | if (config->intr_type == MSI_X) { | ||
8040 | for (i = 0; i < config->rx_ring_num ; i++) | ||
8041 | netif_napi_add(dev, &mac_control->rings[i].napi, | ||
8042 | s2io_poll_msix, 64); | ||
8043 | } else { | ||
8044 | netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); | ||
8045 | } | ||
8046 | |||
7966 | /* Not needed for Herc */ | 8047 | /* Not needed for Herc */ |
7967 | if (sp->device_type & XFRAME_I_DEVICE) { | 8048 | if (sp->device_type & XFRAME_I_DEVICE) { |
7968 | /* | 8049 | /* |
@@ -8013,6 +8094,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8013 | /* store mac addresses from CAM to s2io_nic structure */ | 8094 | /* store mac addresses from CAM to s2io_nic structure */ |
8014 | do_s2io_store_unicast_mc(sp); | 8095 | do_s2io_store_unicast_mc(sp); |
8015 | 8096 | ||
8097 | /* Configure MSIX vector for number of rings configured plus one */ | ||
8098 | if ((sp->device_type == XFRAME_II_DEVICE) && | ||
8099 | (config->intr_type == MSI_X)) | ||
8100 | sp->num_entries = config->rx_ring_num + 1; | ||
8101 | |||
8016 | /* Store the values of the MSIX table in the s2io_nic structure */ | 8102 | /* Store the values of the MSIX table in the s2io_nic structure */ |
8017 | store_xmsi_data(sp); | 8103 | store_xmsi_data(sp); |
8018 | /* reset Nic and bring it to known state */ | 8104 | /* reset Nic and bring it to known state */ |
@@ -8078,8 +8164,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8078 | break; | 8164 | break; |
8079 | } | 8165 | } |
8080 | 8166 | ||
8081 | if (napi) | 8167 | switch (sp->config.napi) { |
8168 | case 0: | ||
8169 | DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); | ||
8170 | break; | ||
8171 | case 1: | ||
8082 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); | 8172 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); |
8173 | break; | ||
8174 | } | ||
8083 | 8175 | ||
8084 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | 8176 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, |
8085 | sp->config.tx_fifo_num); | 8177 | sp->config.tx_fifo_num); |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 0709ebae9139..4706f7f9acb6 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -706,7 +706,7 @@ struct ring_info { | |||
706 | /* per-ring buffer counter */ | 706 | /* per-ring buffer counter */ |
707 | u32 rx_bufs_left; | 707 | u32 rx_bufs_left; |
708 | 708 | ||
709 | #define MAX_LRO_SESSIONS 32 | 709 | #define MAX_LRO_SESSIONS 32 |
710 | struct lro lro0_n[MAX_LRO_SESSIONS]; | 710 | struct lro lro0_n[MAX_LRO_SESSIONS]; |
711 | u8 lro; | 711 | u8 lro; |
712 | 712 | ||
@@ -725,6 +725,11 @@ struct ring_info { | |||
725 | /* copy of sp->pdev pointer */ | 725 | /* copy of sp->pdev pointer */ |
726 | struct pci_dev *pdev; | 726 | struct pci_dev *pdev; |
727 | 727 | ||
728 | /* Per ring napi struct */ | ||
729 | struct napi_struct napi; | ||
730 | |||
731 | unsigned long interrupt_count; | ||
732 | |||
728 | /* | 733 | /* |
729 | * Place holders for the virtual and physical addresses of | 734 | * Place holders for the virtual and physical addresses of |
730 | * all the Rx Blocks | 735 | * all the Rx Blocks |
@@ -841,7 +846,7 @@ struct usr_addr { | |||
841 | * Structure to keep track of the MSI-X vectors and the corresponding | 846 | * Structure to keep track of the MSI-X vectors and the corresponding |
842 | * argument registered against each vector | 847 | * argument registered against each vector |
843 | */ | 848 | */ |
844 | #define MAX_REQUESTED_MSI_X 17 | 849 | #define MAX_REQUESTED_MSI_X 9 |
845 | struct s2io_msix_entry | 850 | struct s2io_msix_entry |
846 | { | 851 | { |
847 | u16 vector; | 852 | u16 vector; |
@@ -849,8 +854,8 @@ struct s2io_msix_entry | |||
849 | void *arg; | 854 | void *arg; |
850 | 855 | ||
851 | u8 type; | 856 | u8 type; |
852 | #define MSIX_FIFO_TYPE 1 | 857 | #define MSIX_ALARM_TYPE 1 |
853 | #define MSIX_RING_TYPE 2 | 858 | #define MSIX_RING_TYPE 2 |
854 | 859 | ||
855 | u8 in_use; | 860 | u8 in_use; |
856 | #define MSIX_REGISTERED_SUCCESS 0xAA | 861 | #define MSIX_REGISTERED_SUCCESS 0xAA |
@@ -877,7 +882,6 @@ struct s2io_nic { | |||
877 | */ | 882 | */ |
878 | int pkts_to_process; | 883 | int pkts_to_process; |
879 | struct net_device *dev; | 884 | struct net_device *dev; |
880 | struct napi_struct napi; | ||
881 | struct mac_info mac_control; | 885 | struct mac_info mac_control; |
882 | struct config_param config; | 886 | struct config_param config; |
883 | struct pci_dev *pdev; | 887 | struct pci_dev *pdev; |
@@ -948,6 +952,7 @@ struct s2io_nic { | |||
948 | */ | 952 | */ |
949 | u8 other_fifo_idx; | 953 | u8 other_fifo_idx; |
950 | 954 | ||
955 | struct napi_struct napi; | ||
951 | /* after blink, the adapter must be restored with original | 956 | /* after blink, the adapter must be restored with original |
952 | * values. | 957 | * values. |
953 | */ | 958 | */ |
@@ -962,6 +967,7 @@ struct s2io_nic { | |||
962 | unsigned long long start_time; | 967 | unsigned long long start_time; |
963 | struct vlan_group *vlgrp; | 968 | struct vlan_group *vlgrp; |
964 | #define MSIX_FLG 0xA5 | 969 | #define MSIX_FLG 0xA5 |
970 | int num_entries; | ||
965 | struct msix_entry *entries; | 971 | struct msix_entry *entries; |
966 | int msi_detected; | 972 | int msi_detected; |
967 | wait_queue_head_t msi_wait; | 973 | wait_queue_head_t msi_wait; |
@@ -982,6 +988,7 @@ struct s2io_nic { | |||
982 | u16 lro_max_aggr_per_sess; | 988 | u16 lro_max_aggr_per_sess; |
983 | volatile unsigned long state; | 989 | volatile unsigned long state; |
984 | u64 general_int_mask; | 990 | u64 general_int_mask; |
991 | |||
985 | #define VPD_STRING_LEN 80 | 992 | #define VPD_STRING_LEN 80 |
986 | u8 product_name[VPD_STRING_LEN]; | 993 | u8 product_name[VPD_STRING_LEN]; |
987 | u8 serial_num[VPD_STRING_LEN]; | 994 | u8 serial_num[VPD_STRING_LEN]; |
@@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev); | |||
1103 | static int init_shared_mem(struct s2io_nic *sp); | 1110 | static int init_shared_mem(struct s2io_nic *sp); |
1104 | static void free_shared_mem(struct s2io_nic *sp); | 1111 | static void free_shared_mem(struct s2io_nic *sp); |
1105 | static int init_nic(struct s2io_nic *nic); | 1112 | static int init_nic(struct s2io_nic *nic); |
1106 | static void rx_intr_handler(struct ring_info *ring_data); | 1113 | static int rx_intr_handler(struct ring_info *ring_data, int budget); |
1107 | static void tx_intr_handler(struct fifo_info *fifo_data); | 1114 | static void tx_intr_handler(struct fifo_info *fifo_data); |
1108 | static void s2io_handle_errors(void * dev_id); | 1115 | static void s2io_handle_errors(void * dev_id); |
1109 | 1116 | ||
@@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev); | |||
1114 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); | 1121 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); |
1115 | static void s2io_link(struct s2io_nic * sp, int link); | 1122 | static void s2io_link(struct s2io_nic * sp, int link); |
1116 | static void s2io_reset(struct s2io_nic * sp); | 1123 | static void s2io_reset(struct s2io_nic * sp); |
1117 | static int s2io_poll(struct napi_struct *napi, int budget); | 1124 | static int s2io_poll_msix(struct napi_struct *napi, int budget); |
1125 | static int s2io_poll_inta(struct napi_struct *napi, int budget); | ||
1118 | static void s2io_init_pci(struct s2io_nic * sp); | 1126 | static void s2io_init_pci(struct s2io_nic * sp); |
1119 | static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); | 1127 | static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); |
1120 | static void s2io_alarm_handle(unsigned long data); | 1128 | static void s2io_alarm_handle(unsigned long data); |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 888b7dec9866..33bb18f810fb 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
@@ -179,8 +179,7 @@ enum sbmac_state { | |||
179 | #define SBMAC_MAX_TXDESCR 256 | 179 | #define SBMAC_MAX_TXDESCR 256 |
180 | #define SBMAC_MAX_RXDESCR 256 | 180 | #define SBMAC_MAX_RXDESCR 256 |
181 | 181 | ||
182 | #define ETHER_ALIGN 2 | 182 | #define ETHER_ADDR_LEN 6 |
183 | #define ETHER_ADDR_LEN 6 | ||
184 | #define ENET_PACKET_SIZE 1518 | 183 | #define ENET_PACKET_SIZE 1518 |
185 | /*#define ENET_PACKET_SIZE 9216 */ | 184 | /*#define ENET_PACKET_SIZE 9216 */ |
186 | 185 | ||
@@ -262,8 +261,6 @@ struct sbmac_softc { | |||
262 | spinlock_t sbm_lock; /* spin lock */ | 261 | spinlock_t sbm_lock; /* spin lock */ |
263 | int sbm_devflags; /* current device flags */ | 262 | int sbm_devflags; /* current device flags */ |
264 | 263 | ||
265 | int sbm_buffersize; | ||
266 | |||
267 | /* | 264 | /* |
268 | * Controller-specific things | 265 | * Controller-specific things |
269 | */ | 266 | */ |
@@ -305,10 +302,11 @@ struct sbmac_softc { | |||
305 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, | 302 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, |
306 | int txrx, int maxdescr); | 303 | int txrx, int maxdescr); |
307 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); | 304 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); |
308 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); | 305 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
306 | struct sk_buff *m); | ||
309 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); | 307 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); |
310 | static void sbdma_emptyring(struct sbmacdma *d); | 308 | static void sbdma_emptyring(struct sbmacdma *d); |
311 | static void sbdma_fillring(struct sbmacdma *d); | 309 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); |
312 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 310 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
313 | int work_to_do, int poll); | 311 | int work_to_do, int poll); |
314 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 312 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
@@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d) | |||
777 | d->sbdma_remptr = NULL; | 775 | d->sbdma_remptr = NULL; |
778 | } | 776 | } |
779 | 777 | ||
780 | static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | 778 | static inline void sbdma_align_skb(struct sk_buff *skb, |
779 | unsigned int power2, unsigned int offset) | ||
781 | { | 780 | { |
782 | unsigned long addr; | 781 | unsigned char *addr = skb->data; |
783 | unsigned long newaddr; | 782 | unsigned char *newaddr = PTR_ALIGN(addr, power2); |
784 | |||
785 | addr = (unsigned long) skb->data; | ||
786 | |||
787 | newaddr = (addr + power2 - 1) & ~(power2 - 1); | ||
788 | 783 | ||
789 | skb_reserve(skb,newaddr-addr+offset); | 784 | skb_reserve(skb, newaddr - addr + offset); |
790 | } | 785 | } |
791 | 786 | ||
792 | 787 | ||
@@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
797 | * this queues a buffer for inbound packets. | 792 | * this queues a buffer for inbound packets. |
798 | * | 793 | * |
799 | * Input parameters: | 794 | * Input parameters: |
800 | * d - DMA channel descriptor | 795 | * sc - softc structure |
796 | * d - DMA channel descriptor | ||
801 | * sb - sk_buff to add, or NULL if we should allocate one | 797 | * sb - sk_buff to add, or NULL if we should allocate one |
802 | * | 798 | * |
803 | * Return value: | 799 | * Return value: |
@@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
806 | ********************************************************************* */ | 802 | ********************************************************************* */ |
807 | 803 | ||
808 | 804 | ||
809 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | 805 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
806 | struct sk_buff *sb) | ||
810 | { | 807 | { |
808 | struct net_device *dev = sc->sbm_dev; | ||
811 | struct sbdmadscr *dsc; | 809 | struct sbdmadscr *dsc; |
812 | struct sbdmadscr *nextdsc; | 810 | struct sbdmadscr *nextdsc; |
813 | struct sk_buff *sb_new = NULL; | 811 | struct sk_buff *sb_new = NULL; |
@@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
848 | */ | 846 | */ |
849 | 847 | ||
850 | if (sb == NULL) { | 848 | if (sb == NULL) { |
851 | sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); | 849 | sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + |
850 | SMP_CACHE_BYTES * 2 + | ||
851 | NET_IP_ALIGN); | ||
852 | if (sb_new == NULL) { | 852 | if (sb_new == NULL) { |
853 | pr_info("%s: sk_buff allocation failed\n", | 853 | pr_info("%s: sk_buff allocation failed\n", |
854 | d->sbdma_eth->sbm_dev->name); | 854 | d->sbdma_eth->sbm_dev->name); |
855 | return -ENOBUFS; | 855 | return -ENOBUFS; |
856 | } | 856 | } |
857 | 857 | ||
858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); | 858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); |
859 | } | 859 | } |
860 | else { | 860 | else { |
861 | sb_new = sb; | 861 | sb_new = sb; |
@@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
874 | * Do not interrupt per DMA transfer. | 874 | * Do not interrupt per DMA transfer. |
875 | */ | 875 | */ |
876 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 876 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; | 877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; |
878 | #else | 878 | #else |
879 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 879 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | | 880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | |
881 | M_DMA_DSCRA_INTERRUPT; | 881 | M_DMA_DSCRA_INTERRUPT; |
882 | #endif | 882 | #endif |
883 | 883 | ||
@@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d) | |||
1032 | * with sk_buffs | 1032 | * with sk_buffs |
1033 | * | 1033 | * |
1034 | * Input parameters: | 1034 | * Input parameters: |
1035 | * d - DMA channel | 1035 | * sc - softc structure |
1036 | * d - DMA channel | ||
1036 | * | 1037 | * |
1037 | * Return value: | 1038 | * Return value: |
1038 | * nothing | 1039 | * nothing |
1039 | ********************************************************************* */ | 1040 | ********************************************************************* */ |
1040 | 1041 | ||
1041 | static void sbdma_fillring(struct sbmacdma *d) | 1042 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) |
1042 | { | 1043 | { |
1043 | int idx; | 1044 | int idx; |
1044 | 1045 | ||
1045 | for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { | 1046 | for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { |
1046 | if (sbdma_add_rcvbuffer(d,NULL) != 0) | 1047 | if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) |
1047 | break; | 1048 | break; |
1048 | } | 1049 | } |
1049 | } | 1050 | } |
@@ -1159,10 +1160,11 @@ again: | |||
1159 | * packet and put it right back on the receive ring. | 1160 | * packet and put it right back on the receive ring. |
1160 | */ | 1161 | */ |
1161 | 1162 | ||
1162 | if (unlikely (sbdma_add_rcvbuffer(d,NULL) == | 1163 | if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == |
1163 | -ENOBUFS)) { | 1164 | -ENOBUFS)) { |
1164 | dev->stats.rx_dropped++; | 1165 | dev->stats.rx_dropped++; |
1165 | sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ | 1166 | /* Re-add old buffer */ |
1167 | sbdma_add_rcvbuffer(sc, d, sb); | ||
1166 | /* No point in continuing at the moment */ | 1168 | /* No point in continuing at the moment */ |
1167 | printk(KERN_ERR "dropped packet (1)\n"); | 1169 | printk(KERN_ERR "dropped packet (1)\n"); |
1168 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); | 1170 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); |
@@ -1212,7 +1214,7 @@ again: | |||
1212 | * put it back on the receive ring. | 1214 | * put it back on the receive ring. |
1213 | */ | 1215 | */ |
1214 | dev->stats.rx_errors++; | 1216 | dev->stats.rx_errors++; |
1215 | sbdma_add_rcvbuffer(d,sb); | 1217 | sbdma_add_rcvbuffer(sc, d, sb); |
1216 | } | 1218 | } |
1217 | 1219 | ||
1218 | 1220 | ||
@@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
1570 | * Fill the receive ring | 1572 | * Fill the receive ring |
1571 | */ | 1573 | */ |
1572 | 1574 | ||
1573 | sbdma_fillring(&(s->sbm_rxdma)); | 1575 | sbdma_fillring(s, &(s->sbm_rxdma)); |
1574 | 1576 | ||
1575 | /* | 1577 | /* |
1576 | * Turn on the rest of the bits in the enable register | 1578 | * Turn on the rest of the bits in the enable register |
@@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
2312 | dev->dev_addr[i] = eaddr[i]; | 2314 | dev->dev_addr[i] = eaddr[i]; |
2313 | } | 2315 | } |
2314 | 2316 | ||
2315 | |||
2316 | /* | ||
2317 | * Init packet size | ||
2318 | */ | ||
2319 | |||
2320 | sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; | ||
2321 | |||
2322 | /* | 2317 | /* |
2323 | * Initialize context (get pointers to registers and stuff), then | 2318 | * Initialize context (get pointers to registers and stuff), then |
2324 | * allocate the memory for the descriptor tables. | 2319 | * allocate the memory for the descriptor tables. |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index f64a860029b7..b4b63805ee8f 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
@@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
953 | unsigned entry; | 953 | unsigned entry; |
954 | u32 tx_status; | 954 | u32 tx_status; |
955 | 955 | ||
956 | if (skb_padto(skb, ETH_ZLEN)) | ||
957 | return NETDEV_TX_OK; | ||
958 | |||
959 | if (unlikely(skb->len > TX_BUF_SIZE)) { | 956 | if (unlikely(skb->len > TX_BUF_SIZE)) { |
960 | dev->stats.tx_dropped++; | 957 | dev->stats.tx_dropped++; |
961 | goto out; | 958 | goto out; |
@@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
975 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); | 972 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); |
976 | 973 | ||
977 | len = skb->len; | 974 | len = skb->len; |
975 | if (unlikely(len < ETH_ZLEN)) { | ||
976 | memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, | ||
977 | 0, ETH_ZLEN - len); | ||
978 | len = ETH_ZLEN; | ||
979 | } | ||
978 | 980 | ||
979 | wmb(); | 981 | wmb(); |
980 | 982 | ||
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index 2806201644cc..2c79d27404e0 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h | |||
@@ -483,7 +483,7 @@ typedef union efx_oword { | |||
483 | #endif | 483 | #endif |
484 | 484 | ||
485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ | 485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ |
486 | if (FALCON_REV(efx) >= FALCON_REV_B0) { \ | 486 | if (falcon_rev(efx) >= FALCON_REV_B0) { \ |
487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ | 487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ |
488 | } else { \ | 488 | } else { \ |
489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ | 489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ |
@@ -491,7 +491,7 @@ typedef union efx_oword { | |||
491 | } while (0) | 491 | } while (0) |
492 | 492 | ||
493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ | 493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ |
494 | (FALCON_REV(efx) >= FALCON_REV_B0 ? \ | 494 | (falcon_rev(efx) >= FALCON_REV_B0 ? \ |
495 | EFX_QWORD_FIELD((qword), field##_B0) : \ | 495 | EFX_QWORD_FIELD((qword), field##_B0) : \ |
496 | EFX_QWORD_FIELD((qword), field##_A1)) | 496 | EFX_QWORD_FIELD((qword), field##_A1)) |
497 | 497 | ||
@@ -501,8 +501,5 @@ typedef union efx_oword { | |||
501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) | 501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) |
502 | #define EFX_DMA_TYPE_WIDTH(width) \ | 502 | #define EFX_DMA_TYPE_WIDTH(width) \ |
503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) | 503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) |
504 | #define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ | ||
505 | ~((u64) 0) : ~((u32) 0)) | ||
506 | #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) | ||
507 | 504 | ||
508 | #endif /* EFX_BITFIELD_H */ | 505 | #endif /* EFX_BITFIELD_H */ |
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c index eecaa6d58584..7fc0328dc055 100644 --- a/drivers/net/sfc/boards.c +++ b/drivers/net/sfc/boards.c | |||
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context) | |||
27 | struct efx_blinker *bl = &efx->board_info.blinker; | 27 | struct efx_blinker *bl = &efx->board_info.blinker; |
28 | efx->board_info.set_fault_led(efx, bl->state); | 28 | efx->board_info.set_fault_led(efx, bl->state); |
29 | bl->state = !bl->state; | 29 | bl->state = !bl->state; |
30 | if (bl->resubmit) { | 30 | if (bl->resubmit) |
31 | bl->timer.expires = jiffies + BLINK_INTERVAL; | 31 | mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); |
32 | add_timer(&bl->timer); | ||
33 | } | ||
34 | } | 32 | } |
35 | 33 | ||
36 | static void board_blink(struct efx_nic *efx, int blink) | 34 | static void board_blink(struct efx_nic *efx, int blink) |
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink) | |||
44 | blinker->state = 0; | 42 | blinker->state = 0; |
45 | setup_timer(&blinker->timer, blink_led_timer, | 43 | setup_timer(&blinker->timer, blink_led_timer, |
46 | (unsigned long)efx); | 44 | (unsigned long)efx); |
47 | blinker->timer.expires = jiffies + BLINK_INTERVAL; | 45 | mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); |
48 | add_timer(&blinker->timer); | ||
49 | } else { | 46 | } else { |
50 | blinker->resubmit = 0; | 47 | blinker->resubmit = 0; |
51 | if (blinker->timer.function) | 48 | if (blinker->timer.function) |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 418f2e53a95b..449760642e31 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) | |||
199 | */ | 199 | */ |
200 | static inline void efx_channel_processed(struct efx_channel *channel) | 200 | static inline void efx_channel_processed(struct efx_channel *channel) |
201 | { | 201 | { |
202 | /* Write to EVQ_RPTR_REG. If a new event arrived in a race | 202 | /* The interrupt handler for this channel may set work_pending |
203 | * with finishing processing, a new interrupt will be raised. | 203 | * as soon as we acknowledge the events we've seen. Make sure |
204 | */ | 204 | * it's cleared before then. */ |
205 | channel->work_pending = 0; | 205 | channel->work_pending = 0; |
206 | smp_wmb(); /* Ensure channel updated before any new interrupt. */ | 206 | smp_wmb(); |
207 | |||
207 | falcon_eventq_read_ack(channel); | 208 | falcon_eventq_read_ack(channel); |
208 | } | 209 | } |
209 | 210 | ||
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
265 | napi_disable(&channel->napi_str); | 266 | napi_disable(&channel->napi_str); |
266 | 267 | ||
267 | /* Poll the channel */ | 268 | /* Poll the channel */ |
268 | (void) efx_process_channel(channel, efx->type->evq_size); | 269 | efx_process_channel(channel, efx->type->evq_size); |
269 | 270 | ||
270 | /* Ack the eventq. This may cause an interrupt to be generated | 271 | /* Ack the eventq. This may cause an interrupt to be generated |
271 | * when they are reenabled */ | 272 | * when they are reenabled */ |
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel) | |||
317 | * | 318 | * |
318 | *************************************************************************/ | 319 | *************************************************************************/ |
319 | 320 | ||
320 | /* Setup per-NIC RX buffer parameters. | ||
321 | * Calculate the rx buffer allocation parameters required to support | ||
322 | * the current MTU, including padding for header alignment and overruns. | ||
323 | */ | ||
324 | static void efx_calc_rx_buffer_params(struct efx_nic *efx) | ||
325 | { | ||
326 | unsigned int order, len; | ||
327 | |||
328 | len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
329 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
330 | efx->type->rx_buffer_padding); | ||
331 | |||
332 | /* Calculate page-order */ | ||
333 | for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) | ||
334 | ; | ||
335 | |||
336 | efx->rx_buffer_len = len; | ||
337 | efx->rx_buffer_order = order; | ||
338 | } | ||
339 | |||
340 | static int efx_probe_channel(struct efx_channel *channel) | 321 | static int efx_probe_channel(struct efx_channel *channel) |
341 | { | 322 | { |
342 | struct efx_tx_queue *tx_queue; | 323 | struct efx_tx_queue *tx_queue; |
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx) | |||
387 | struct efx_channel *channel; | 368 | struct efx_channel *channel; |
388 | int rc = 0; | 369 | int rc = 0; |
389 | 370 | ||
390 | efx_calc_rx_buffer_params(efx); | 371 | /* Calculate the rx buffer allocation parameters required to |
372 | * support the current MTU, including padding for header | ||
373 | * alignment and overruns. | ||
374 | */ | ||
375 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
376 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
377 | efx->type->rx_buffer_padding); | ||
378 | efx->rx_buffer_order = get_order(efx->rx_buffer_len); | ||
391 | 379 | ||
392 | /* Initialise the channels */ | 380 | /* Initialise the channels */ |
393 | efx_for_each_channel(channel, efx) { | 381 | efx_for_each_channel(channel, efx) { |
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel) | |||
440 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 428 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
441 | efx_poll, napi_weight); | 429 | efx_poll, napi_weight); |
442 | 430 | ||
431 | /* The interrupt handler for this channel may set work_pending | ||
432 | * as soon as we enable it. Make sure it's cleared before | ||
433 | * then. Similarly, make sure it sees the enabled flag set. */ | ||
443 | channel->work_pending = 0; | 434 | channel->work_pending = 0; |
444 | channel->enabled = 1; | 435 | channel->enabled = 1; |
445 | smp_wmb(); /* ensure channel updated before first interrupt */ | 436 | smp_wmb(); |
446 | 437 | ||
447 | napi_enable(&channel->napi_str); | 438 | napi_enable(&channel->napi_str); |
448 | 439 | ||
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx) | |||
704 | mutex_unlock(&efx->mac_lock); | 695 | mutex_unlock(&efx->mac_lock); |
705 | 696 | ||
706 | /* Serialise against efx_set_multicast_list() */ | 697 | /* Serialise against efx_set_multicast_list() */ |
707 | if (NET_DEV_REGISTERED(efx)) { | 698 | if (efx_dev_registered(efx)) { |
708 | netif_tx_lock_bh(efx->net_dev); | 699 | netif_tx_lock_bh(efx->net_dev); |
709 | netif_tx_unlock_bh(efx->net_dev); | 700 | netif_tx_unlock_bh(efx->net_dev); |
710 | } | 701 | } |
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx) | |||
791 | efx->membase = ioremap_nocache(efx->membase_phys, | 782 | efx->membase = ioremap_nocache(efx->membase_phys, |
792 | efx->type->mem_map_size); | 783 | efx->type->mem_map_size); |
793 | if (!efx->membase) { | 784 | if (!efx->membase) { |
794 | EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", | 785 | EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", |
795 | efx->type->mem_bar, efx->membase_phys, | 786 | efx->type->mem_bar, |
787 | (unsigned long long)efx->membase_phys, | ||
796 | efx->type->mem_map_size); | 788 | efx->type->mem_map_size); |
797 | rc = -ENOMEM; | 789 | rc = -ENOMEM; |
798 | goto fail4; | 790 | goto fail4; |
799 | } | 791 | } |
800 | EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", | 792 | EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", |
801 | efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, | 793 | efx->type->mem_bar, (unsigned long long)efx->membase_phys, |
802 | efx->membase); | 794 | efx->type->mem_map_size, efx->membase); |
803 | 795 | ||
804 | return 0; | 796 | return 0; |
805 | 797 | ||
806 | fail4: | 798 | fail4: |
807 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); | 799 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); |
808 | fail3: | 800 | fail3: |
809 | efx->membase_phys = 0UL; | 801 | efx->membase_phys = 0; |
810 | fail2: | 802 | fail2: |
811 | pci_disable_device(efx->pci_dev); | 803 | pci_disable_device(efx->pci_dev); |
812 | fail1: | 804 | fail1: |
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx) | |||
824 | 816 | ||
825 | if (efx->membase_phys) { | 817 | if (efx->membase_phys) { |
826 | pci_release_region(efx->pci_dev, efx->type->mem_bar); | 818 | pci_release_region(efx->pci_dev, efx->type->mem_bar); |
827 | efx->membase_phys = 0UL; | 819 | efx->membase_phys = 0; |
828 | } | 820 | } |
829 | 821 | ||
830 | pci_disable_device(efx->pci_dev); | 822 | pci_disable_device(efx->pci_dev); |
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx) | |||
1043 | return; | 1035 | return; |
1044 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | 1036 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) |
1045 | return; | 1037 | return; |
1046 | if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) | 1038 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) |
1047 | return; | 1039 | return; |
1048 | 1040 | ||
1049 | /* Mark the port as enabled so port reconfigurations can start, then | 1041 | /* Mark the port as enabled so port reconfigurations can start, then |
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx) | |||
1073 | cancel_delayed_work_sync(&efx->monitor_work); | 1065 | cancel_delayed_work_sync(&efx->monitor_work); |
1074 | 1066 | ||
1075 | /* Ensure that all RX slow refills are complete. */ | 1067 | /* Ensure that all RX slow refills are complete. */ |
1076 | efx_for_each_rx_queue(rx_queue, efx) { | 1068 | efx_for_each_rx_queue(rx_queue, efx) |
1077 | cancel_delayed_work_sync(&rx_queue->work); | 1069 | cancel_delayed_work_sync(&rx_queue->work); |
1078 | } | ||
1079 | 1070 | ||
1080 | /* Stop scheduled port reconfigurations */ | 1071 | /* Stop scheduled port reconfigurations */ |
1081 | cancel_work_sync(&efx->reconfigure_work); | 1072 | cancel_work_sync(&efx->reconfigure_work); |
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1101 | falcon_disable_interrupts(efx); | 1092 | falcon_disable_interrupts(efx); |
1102 | if (efx->legacy_irq) | 1093 | if (efx->legacy_irq) |
1103 | synchronize_irq(efx->legacy_irq); | 1094 | synchronize_irq(efx->legacy_irq); |
1104 | efx_for_each_channel_with_interrupt(channel, efx) | 1095 | efx_for_each_channel_with_interrupt(channel, efx) { |
1105 | if (channel->irq) | 1096 | if (channel->irq) |
1106 | synchronize_irq(channel->irq); | 1097 | synchronize_irq(channel->irq); |
1098 | } | ||
1107 | 1099 | ||
1108 | /* Stop all NAPI processing and synchronous rx refills */ | 1100 | /* Stop all NAPI processing and synchronous rx refills */ |
1109 | efx_for_each_channel(channel, efx) | 1101 | efx_for_each_channel(channel, efx) |
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1125 | /* Stop the kernel transmit interface late, so the watchdog | 1117 | /* Stop the kernel transmit interface late, so the watchdog |
1126 | * timer isn't ticking over the flush */ | 1118 | * timer isn't ticking over the flush */ |
1127 | efx_stop_queue(efx); | 1119 | efx_stop_queue(efx); |
1128 | if (NET_DEV_REGISTERED(efx)) { | 1120 | if (efx_dev_registered(efx)) { |
1129 | netif_tx_lock_bh(efx->net_dev); | 1121 | netif_tx_lock_bh(efx->net_dev); |
1130 | netif_tx_unlock_bh(efx->net_dev); | 1122 | netif_tx_unlock_bh(efx->net_dev); |
1131 | } | 1123 | } |
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev) | |||
1344 | return 0; | 1336 | return 0; |
1345 | } | 1337 | } |
1346 | 1338 | ||
1347 | /* Context: process, dev_base_lock held, non-blocking. */ | 1339 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1348 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | 1340 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) |
1349 | { | 1341 | { |
1350 | struct efx_nic *efx = net_dev->priv; | 1342 | struct efx_nic *efx = net_dev->priv; |
1351 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1343 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
1352 | struct net_device_stats *stats = &net_dev->stats; | 1344 | struct net_device_stats *stats = &net_dev->stats; |
1353 | 1345 | ||
1346 | /* Update stats if possible, but do not wait if another thread | ||
1347 | * is updating them (or resetting the NIC); slightly stale | ||
1348 | * stats are acceptable. | ||
1349 | */ | ||
1354 | if (!spin_trylock(&efx->stats_lock)) | 1350 | if (!spin_trylock(&efx->stats_lock)) |
1355 | return stats; | 1351 | return stats; |
1356 | if (efx->state == STATE_RUNNING) { | 1352 | if (efx->state == STATE_RUNNING) { |
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) | |||
1494 | static int efx_netdev_event(struct notifier_block *this, | 1490 | static int efx_netdev_event(struct notifier_block *this, |
1495 | unsigned long event, void *ptr) | 1491 | unsigned long event, void *ptr) |
1496 | { | 1492 | { |
1497 | struct net_device *net_dev = (struct net_device *)ptr; | 1493 | struct net_device *net_dev = ptr; |
1498 | 1494 | ||
1499 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { | 1495 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { |
1500 | struct efx_nic *efx = net_dev->priv; | 1496 | struct efx_nic *efx = net_dev->priv; |
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
1563 | efx_for_each_tx_queue(tx_queue, efx) | 1559 | efx_for_each_tx_queue(tx_queue, efx) |
1564 | efx_release_tx_buffers(tx_queue); | 1560 | efx_release_tx_buffers(tx_queue); |
1565 | 1561 | ||
1566 | if (NET_DEV_REGISTERED(efx)) { | 1562 | if (efx_dev_registered(efx)) { |
1567 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 1563 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
1568 | unregister_netdev(efx->net_dev); | 1564 | unregister_netdev(efx->net_dev); |
1569 | } | 1565 | } |
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx) | |||
1688 | if (method == RESET_TYPE_DISABLE) { | 1684 | if (method == RESET_TYPE_DISABLE) { |
1689 | /* Reinitialise the device anyway so the driver unload sequence | 1685 | /* Reinitialise the device anyway so the driver unload sequence |
1690 | * can talk to the external SRAM */ | 1686 | * can talk to the external SRAM */ |
1691 | (void) falcon_init_nic(efx); | 1687 | falcon_init_nic(efx); |
1692 | rc = -EIO; | 1688 | rc = -EIO; |
1693 | goto fail4; | 1689 | goto fail4; |
1694 | } | 1690 | } |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index b57cc68058c0..d3f749c72d41 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
116 | ************************************************************************** | 116 | ************************************************************************** |
117 | */ | 117 | */ |
118 | 118 | ||
119 | /* DMA address mask (up to 46-bit, avoiding compiler warnings) | 119 | /* DMA address mask */ |
120 | * | 120 | #define FALCON_DMA_MASK DMA_BIT_MASK(46) |
121 | * Note that it is possible to have a platform with 64-bit longs and | ||
122 | * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the | ||
123 | * platform DMA mask. | ||
124 | */ | ||
125 | #if BITS_PER_LONG == 64 | ||
126 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) | ||
127 | #else | ||
128 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) | ||
129 | #endif | ||
130 | 121 | ||
131 | /* TX DMA length mask (13-bit) */ | 122 | /* TX DMA length mask (13-bit) */ |
132 | #define FALCON_TX_DMA_MASK (4096 - 1) | 123 | #define FALCON_TX_DMA_MASK (4096 - 1) |
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
145 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 | 136 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 |
146 | 137 | ||
147 | #define FALCON_IS_DUAL_FUNC(efx) \ | 138 | #define FALCON_IS_DUAL_FUNC(efx) \ |
148 | (FALCON_REV(efx) < FALCON_REV_B0) | 139 | (falcon_rev(efx) < FALCON_REV_B0) |
149 | 140 | ||
150 | /************************************************************************** | 141 | /************************************************************************** |
151 | * | 142 | * |
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
465 | TX_DESCQ_TYPE, 0, | 456 | TX_DESCQ_TYPE, 0, |
466 | TX_NON_IP_DROP_DIS_B0, 1); | 457 | TX_NON_IP_DROP_DIS_B0, 1); |
467 | 458 | ||
468 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 459 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
469 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); | 460 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); |
470 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); | 461 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); |
471 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); | 462 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); |
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
474 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 465 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
475 | tx_queue->queue); | 466 | tx_queue->queue); |
476 | 467 | ||
477 | if (FALCON_REV(efx) < FALCON_REV_B0) { | 468 | if (falcon_rev(efx) < FALCON_REV_B0) { |
478 | efx_oword_t reg; | 469 | efx_oword_t reg; |
479 | 470 | ||
480 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ | 471 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ |
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
635 | efx_oword_t rx_desc_ptr; | 626 | efx_oword_t rx_desc_ptr; |
636 | struct efx_nic *efx = rx_queue->efx; | 627 | struct efx_nic *efx = rx_queue->efx; |
637 | int rc; | 628 | int rc; |
638 | int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; | 629 | int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; |
639 | int iscsi_digest_en = is_b0; | 630 | int iscsi_digest_en = is_b0; |
640 | 631 | ||
641 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 632 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", |
@@ -822,10 +813,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel, | |||
822 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | 813 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); |
823 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 814 | tx_queue = &efx->tx_queue[tx_ev_q_label]; |
824 | 815 | ||
825 | if (NET_DEV_REGISTERED(efx)) | 816 | if (efx_dev_registered(efx)) |
826 | netif_tx_lock(efx->net_dev); | 817 | netif_tx_lock(efx->net_dev); |
827 | falcon_notify_tx_desc(tx_queue); | 818 | falcon_notify_tx_desc(tx_queue); |
828 | if (NET_DEV_REGISTERED(efx)) | 819 | if (efx_dev_registered(efx)) |
829 | netif_tx_unlock(efx->net_dev); | 820 | netif_tx_unlock(efx->net_dev); |
830 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | 821 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && |
831 | EFX_WORKAROUND_10727(efx)) { | 822 | EFX_WORKAROUND_10727(efx)) { |
@@ -884,7 +875,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
884 | RX_EV_TCP_UDP_CHKSUM_ERR); | 875 | RX_EV_TCP_UDP_CHKSUM_ERR); |
885 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | 876 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); |
886 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | 877 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); |
887 | rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? | 878 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? |
888 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | 879 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); |
889 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | 880 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); |
890 | 881 | ||
@@ -1065,7 +1056,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
1065 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) | 1056 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) |
1066 | is_phy_event = 1; | 1057 | is_phy_event = 1; |
1067 | 1058 | ||
1068 | if ((FALCON_REV(efx) >= FALCON_REV_B0) && | 1059 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
1069 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | 1060 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) |
1070 | is_phy_event = 1; | 1061 | is_phy_event = 1; |
1071 | 1062 | ||
@@ -1405,7 +1396,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) | |||
1405 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | 1396 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) |
1406 | { | 1397 | { |
1407 | struct falcon_nic_data *nic_data = efx->nic_data; | 1398 | struct falcon_nic_data *nic_data = efx->nic_data; |
1408 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1399 | efx_oword_t *int_ker = efx->irq_status.addr; |
1409 | efx_oword_t fatal_intr; | 1400 | efx_oword_t fatal_intr; |
1410 | int error, mem_perr; | 1401 | int error, mem_perr; |
1411 | static int n_int_errors; | 1402 | static int n_int_errors; |
@@ -1451,8 +1442,8 @@ out: | |||
1451 | */ | 1442 | */ |
1452 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | 1443 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) |
1453 | { | 1444 | { |
1454 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1445 | struct efx_nic *efx = dev_id; |
1455 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1446 | efx_oword_t *int_ker = efx->irq_status.addr; |
1456 | struct efx_channel *channel; | 1447 | struct efx_channel *channel; |
1457 | efx_dword_t reg; | 1448 | efx_dword_t reg; |
1458 | u32 queues; | 1449 | u32 queues; |
@@ -1489,8 +1480,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | |||
1489 | 1480 | ||
1490 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | 1481 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) |
1491 | { | 1482 | { |
1492 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1483 | struct efx_nic *efx = dev_id; |
1493 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1484 | efx_oword_t *int_ker = efx->irq_status.addr; |
1494 | struct efx_channel *channel; | 1485 | struct efx_channel *channel; |
1495 | int syserr; | 1486 | int syserr; |
1496 | int queues; | 1487 | int queues; |
@@ -1542,9 +1533,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
1542 | */ | 1533 | */ |
1543 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) | 1534 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) |
1544 | { | 1535 | { |
1545 | struct efx_channel *channel = (struct efx_channel *)dev_id; | 1536 | struct efx_channel *channel = dev_id; |
1546 | struct efx_nic *efx = channel->efx; | 1537 | struct efx_nic *efx = channel->efx; |
1547 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1538 | efx_oword_t *int_ker = efx->irq_status.addr; |
1548 | int syserr; | 1539 | int syserr; |
1549 | 1540 | ||
1550 | efx->last_irq_cpu = raw_smp_processor_id(); | 1541 | efx->last_irq_cpu = raw_smp_processor_id(); |
@@ -1572,7 +1563,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) | |||
1572 | unsigned long offset; | 1563 | unsigned long offset; |
1573 | efx_dword_t dword; | 1564 | efx_dword_t dword; |
1574 | 1565 | ||
1575 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1566 | if (falcon_rev(efx) < FALCON_REV_B0) |
1576 | return; | 1567 | return; |
1577 | 1568 | ||
1578 | for (offset = RX_RSS_INDIR_TBL_B0; | 1569 | for (offset = RX_RSS_INDIR_TBL_B0; |
@@ -1595,7 +1586,7 @@ int falcon_init_interrupt(struct efx_nic *efx) | |||
1595 | 1586 | ||
1596 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 1587 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
1597 | irq_handler_t handler; | 1588 | irq_handler_t handler; |
1598 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1589 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1599 | handler = falcon_legacy_interrupt_b0; | 1590 | handler = falcon_legacy_interrupt_b0; |
1600 | else | 1591 | else |
1601 | handler = falcon_legacy_interrupt_a1; | 1592 | handler = falcon_legacy_interrupt_a1; |
@@ -1636,12 +1627,13 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1636 | efx_oword_t reg; | 1627 | efx_oword_t reg; |
1637 | 1628 | ||
1638 | /* Disable MSI/MSI-X interrupts */ | 1629 | /* Disable MSI/MSI-X interrupts */ |
1639 | efx_for_each_channel_with_interrupt(channel, efx) | 1630 | efx_for_each_channel_with_interrupt(channel, efx) { |
1640 | if (channel->irq) | 1631 | if (channel->irq) |
1641 | free_irq(channel->irq, channel); | 1632 | free_irq(channel->irq, channel); |
1633 | } | ||
1642 | 1634 | ||
1643 | /* ACK legacy interrupt */ | 1635 | /* ACK legacy interrupt */ |
1644 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1636 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1645 | falcon_read(efx, ®, INT_ISR0_B0); | 1637 | falcon_read(efx, ®, INT_ISR0_B0); |
1646 | else | 1638 | else |
1647 | falcon_irq_ack_a1(efx); | 1639 | falcon_irq_ack_a1(efx); |
@@ -1732,7 +1724,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) | |||
1732 | efx_oword_t temp; | 1724 | efx_oword_t temp; |
1733 | int count; | 1725 | int count; |
1734 | 1726 | ||
1735 | if ((FALCON_REV(efx) < FALCON_REV_B0) || | 1727 | if ((falcon_rev(efx) < FALCON_REV_B0) || |
1736 | (efx->loopback_mode != LOOPBACK_NONE)) | 1728 | (efx->loopback_mode != LOOPBACK_NONE)) |
1737 | return; | 1729 | return; |
1738 | 1730 | ||
@@ -1785,7 +1777,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | |||
1785 | { | 1777 | { |
1786 | efx_oword_t temp; | 1778 | efx_oword_t temp; |
1787 | 1779 | ||
1788 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1780 | if (falcon_rev(efx) < FALCON_REV_B0) |
1789 | return; | 1781 | return; |
1790 | 1782 | ||
1791 | /* Isolate the MAC -> RX */ | 1783 | /* Isolate the MAC -> RX */ |
@@ -1823,7 +1815,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1823 | MAC_SPEED, link_speed); | 1815 | MAC_SPEED, link_speed); |
1824 | /* On B0, MAC backpressure can be disabled and packets get | 1816 | /* On B0, MAC backpressure can be disabled and packets get |
1825 | * discarded. */ | 1817 | * discarded. */ |
1826 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1818 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1827 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | 1819 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, |
1828 | !efx->link_up); | 1820 | !efx->link_up); |
1829 | } | 1821 | } |
@@ -1841,7 +1833,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1841 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | 1833 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); |
1842 | 1834 | ||
1843 | /* Unisolate the MAC -> RX */ | 1835 | /* Unisolate the MAC -> RX */ |
1844 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1836 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1845 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 1837 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); |
1846 | falcon_write(efx, ®, RX_CFG_REG_KER); | 1838 | falcon_write(efx, ®, RX_CFG_REG_KER); |
1847 | } | 1839 | } |
@@ -1856,7 +1848,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
1856 | return 0; | 1848 | return 0; |
1857 | 1849 | ||
1858 | /* Statistics fetch will fail if the MAC is in TX drain */ | 1850 | /* Statistics fetch will fail if the MAC is in TX drain */ |
1859 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1851 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1860 | efx_oword_t temp; | 1852 | efx_oword_t temp; |
1861 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 1853 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); |
1862 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | 1854 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) |
@@ -1940,7 +1932,7 @@ static int falcon_gmii_wait(struct efx_nic *efx) | |||
1940 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | 1932 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, |
1941 | int addr, int value) | 1933 | int addr, int value) |
1942 | { | 1934 | { |
1943 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 1935 | struct efx_nic *efx = net_dev->priv; |
1944 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; | 1936 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; |
1945 | efx_oword_t reg; | 1937 | efx_oword_t reg; |
1946 | 1938 | ||
@@ -2008,7 +2000,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | |||
2008 | * could be read, -1 will be returned. */ | 2000 | * could be read, -1 will be returned. */ |
2009 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) | 2001 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) |
2010 | { | 2002 | { |
2011 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 2003 | struct efx_nic *efx = net_dev->priv; |
2012 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; | 2004 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; |
2013 | efx_oword_t reg; | 2005 | efx_oword_t reg; |
2014 | int value = -1; | 2006 | int value = -1; |
@@ -2113,7 +2105,7 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2113 | falcon_init_mdio(&efx->mii); | 2105 | falcon_init_mdio(&efx->mii); |
2114 | 2106 | ||
2115 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | 2107 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ |
2116 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2108 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2117 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; | 2109 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; |
2118 | else | 2110 | else |
2119 | efx->flow_control = EFX_FC_RX; | 2111 | efx->flow_control = EFX_FC_RX; |
@@ -2373,7 +2365,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2373 | return -ENODEV; | 2365 | return -ENODEV; |
2374 | } | 2366 | } |
2375 | 2367 | ||
2376 | switch (FALCON_REV(efx)) { | 2368 | switch (falcon_rev(efx)) { |
2377 | case FALCON_REV_A0: | 2369 | case FALCON_REV_A0: |
2378 | case 0xff: | 2370 | case 0xff: |
2379 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | 2371 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); |
@@ -2399,7 +2391,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2399 | break; | 2391 | break; |
2400 | 2392 | ||
2401 | default: | 2393 | default: |
2402 | EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); | 2394 | EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); |
2403 | return -ENODEV; | 2395 | return -ENODEV; |
2404 | } | 2396 | } |
2405 | 2397 | ||
@@ -2419,7 +2411,7 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2419 | 2411 | ||
2420 | /* Allocate storage for hardware specific data */ | 2412 | /* Allocate storage for hardware specific data */ |
2421 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | 2413 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
2422 | efx->nic_data = (void *) nic_data; | 2414 | efx->nic_data = nic_data; |
2423 | 2415 | ||
2424 | /* Determine number of ports etc. */ | 2416 | /* Determine number of ports etc. */ |
2425 | rc = falcon_probe_nic_variant(efx); | 2417 | rc = falcon_probe_nic_variant(efx); |
@@ -2489,13 +2481,10 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2489 | */ | 2481 | */ |
2490 | int falcon_init_nic(struct efx_nic *efx) | 2482 | int falcon_init_nic(struct efx_nic *efx) |
2491 | { | 2483 | { |
2492 | struct falcon_nic_data *data; | ||
2493 | efx_oword_t temp; | 2484 | efx_oword_t temp; |
2494 | unsigned thresh; | 2485 | unsigned thresh; |
2495 | int rc; | 2486 | int rc; |
2496 | 2487 | ||
2497 | data = (struct falcon_nic_data *)efx->nic_data; | ||
2498 | |||
2499 | /* Set up the address region register. This is only needed | 2488 | /* Set up the address region register. This is only needed |
2500 | * for the B0 FPGA, but since we are just pushing in the | 2489 | * for the B0 FPGA, but since we are just pushing in the |
2501 | * reset defaults this may as well be unconditional. */ | 2490 | * reset defaults this may as well be unconditional. */ |
@@ -2562,7 +2551,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2562 | 2551 | ||
2563 | /* Set number of RSS queues for receive path. */ | 2552 | /* Set number of RSS queues for receive path. */ |
2564 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | 2553 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); |
2565 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2554 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2566 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); | 2555 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); |
2567 | else | 2556 | else |
2568 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); | 2557 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); |
@@ -2600,7 +2589,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2600 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 2589 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
2601 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | 2590 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); |
2602 | /* Squash TX of packets of 16 bytes or less */ | 2591 | /* Squash TX of packets of 16 bytes or less */ |
2603 | if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | 2592 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) |
2604 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | 2593 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); |
2605 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | 2594 | falcon_write(efx, &temp, TX_CFG2_REG_KER); |
2606 | 2595 | ||
@@ -2617,7 +2606,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2617 | if (EFX_WORKAROUND_7575(efx)) | 2606 | if (EFX_WORKAROUND_7575(efx)) |
2618 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, | 2607 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, |
2619 | (3 * 4096) / 32); | 2608 | (3 * 4096) / 32); |
2620 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2609 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2621 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); | 2610 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); |
2622 | 2611 | ||
2623 | /* RX FIFO flow control thresholds */ | 2612 | /* RX FIFO flow control thresholds */ |
@@ -2633,7 +2622,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2633 | falcon_write(efx, &temp, RX_CFG_REG_KER); | 2622 | falcon_write(efx, &temp, RX_CFG_REG_KER); |
2634 | 2623 | ||
2635 | /* Set destination of both TX and RX Flush events */ | 2624 | /* Set destination of both TX and RX Flush events */ |
2636 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 2625 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
2637 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | 2626 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); |
2638 | falcon_write(efx, &temp, DP_CTRL_REG); | 2627 | falcon_write(efx, &temp, DP_CTRL_REG); |
2639 | } | 2628 | } |
@@ -2647,7 +2636,7 @@ void falcon_remove_nic(struct efx_nic *efx) | |||
2647 | 2636 | ||
2648 | falcon_free_buffer(efx, &efx->irq_status); | 2637 | falcon_free_buffer(efx, &efx->irq_status); |
2649 | 2638 | ||
2650 | (void) falcon_reset_hw(efx, RESET_TYPE_ALL); | 2639 | falcon_reset_hw(efx, RESET_TYPE_ALL); |
2651 | 2640 | ||
2652 | /* Release the second function after the reset */ | 2641 | /* Release the second function after the reset */ |
2653 | if (nic_data->pci_dev2) { | 2642 | if (nic_data->pci_dev2) { |
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h index 6117403b0c03..492f9bc28840 100644 --- a/drivers/net/sfc/falcon.h +++ b/drivers/net/sfc/falcon.h | |||
@@ -23,7 +23,10 @@ enum falcon_revision { | |||
23 | FALCON_REV_B0 = 2, | 23 | FALCON_REV_B0 = 2, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | #define FALCON_REV(efx) ((efx)->pci_dev->revision) | 26 | static inline int falcon_rev(struct efx_nic *efx) |
27 | { | ||
28 | return efx->pci_dev->revision; | ||
29 | } | ||
27 | 30 | ||
28 | extern struct efx_nic_type falcon_a_nic_type; | 31 | extern struct efx_nic_type falcon_a_nic_type; |
29 | extern struct efx_nic_type falcon_b_nic_type; | 32 | extern struct efx_nic_type falcon_b_nic_type; |
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h index 06e2d68fc3d1..6d003114eeab 100644 --- a/drivers/net/sfc/falcon_hwdefs.h +++ b/drivers/net/sfc/falcon_hwdefs.h | |||
@@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 { | |||
1125 | u8 port1_phy_type; | 1125 | u8 port1_phy_type; |
1126 | __le16 asic_sub_revision; | 1126 | __le16 asic_sub_revision; |
1127 | __le16 board_revision; | 1127 | __le16 board_revision; |
1128 | } __attribute__ ((packed)); | 1128 | } __packed; |
1129 | 1129 | ||
1130 | #define NVCONFIG_BASE 0x300 | 1130 | #define NVCONFIG_BASE 0x300 |
1131 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | 1131 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C |
@@ -1144,6 +1144,6 @@ struct falcon_nvconfig { | |||
1144 | __le16 board_struct_ver; | 1144 | __le16 board_struct_ver; |
1145 | __le16 board_checksum; | 1145 | __le16 board_checksum; |
1146 | struct falcon_nvconfig_board_v2 board_v2; | 1146 | struct falcon_nvconfig_board_v2 board_v2; |
1147 | } __attribute__ ((packed)); | 1147 | } __packed; |
1148 | 1148 | ||
1149 | #endif /* EFX_FALCON_HWDEFS_H */ | 1149 | #endif /* EFX_FALCON_HWDEFS_H */ |
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h index ea08184ddfa9..6670cdfc41ab 100644 --- a/drivers/net/sfc/falcon_io.h +++ b/drivers/net/sfc/falcon_io.h | |||
@@ -56,14 +56,27 @@ | |||
56 | #define FALCON_USE_QWORD_IO 1 | 56 | #define FALCON_USE_QWORD_IO 1 |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #define _falcon_writeq(efx, value, reg) \ | 59 | #ifdef FALCON_USE_QWORD_IO |
60 | __raw_writeq((__force u64) (value), (efx)->membase + (reg)) | 60 | static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, |
61 | #define _falcon_writel(efx, value, reg) \ | 61 | unsigned int reg) |
62 | __raw_writel((__force u32) (value), (efx)->membase + (reg)) | 62 | { |
63 | #define _falcon_readq(efx, reg) \ | 63 | __raw_writeq((__force u64)value, efx->membase + reg); |
64 | ((__force __le64) __raw_readq((efx)->membase + (reg))) | 64 | } |
65 | #define _falcon_readl(efx, reg) \ | 65 | static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) |
66 | ((__force __le32) __raw_readl((efx)->membase + (reg))) | 66 | { |
67 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | static inline void _falcon_writel(struct efx_nic *efx, __le32 value, | ||
72 | unsigned int reg) | ||
73 | { | ||
74 | __raw_writel((__force u32)value, efx->membase + reg); | ||
75 | } | ||
76 | static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) | ||
77 | { | ||
78 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
79 | } | ||
67 | 80 | ||
68 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ | 81 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ |
69 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, | 82 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, |
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index a74b7931a3c4..dbdcee4b0f8d 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx) | |||
221 | { | 221 | { |
222 | efx_dword_t reg; | 222 | efx_dword_t reg; |
223 | 223 | ||
224 | if (FALCON_REV(efx) < FALCON_REV_B0) | 224 | if (falcon_rev(efx) < FALCON_REV_B0) |
225 | return 1; | 225 | return 1; |
226 | 226 | ||
227 | /* The ISR latches, so clear it and re-read */ | 227 | /* The ISR latches, so clear it and re-read */ |
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) | |||
241 | { | 241 | { |
242 | efx_dword_t reg; | 242 | efx_dword_t reg; |
243 | 243 | ||
244 | if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) | 244 | if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) |
245 | return; | 245 | return; |
246 | 246 | ||
247 | /* Flush the ISR */ | 247 | /* Flush the ISR */ |
@@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) | |||
454 | 454 | ||
455 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", | 455 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", |
456 | __func__, tries); | 456 | __func__, tries); |
457 | (void) falcon_reset_xaui(efx); | 457 | falcon_reset_xaui(efx); |
458 | udelay(200); | 458 | udelay(200); |
459 | tries--; | 459 | tries--; |
460 | } | 460 | } |
@@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx) | |||
572 | xaui_link_ok = falcon_xaui_link_ok(efx); | 572 | xaui_link_ok = falcon_xaui_link_ok(efx); |
573 | 573 | ||
574 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) | 574 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) |
575 | (void) falcon_reset_xaui(efx); | 575 | falcon_reset_xaui(efx); |
576 | 576 | ||
577 | /* Call the PHY check_hw routine */ | 577 | /* Call the PHY check_hw routine */ |
578 | rc = efx->phy_op->check_hw(efx); | 578 | rc = efx->phy_op->check_hw(efx); |
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) | |||
639 | reset = ((flow_control & EFX_FC_TX) && | 639 | reset = ((flow_control & EFX_FC_TX) && |
640 | !(efx->flow_control & EFX_FC_TX)); | 640 | !(efx->flow_control & EFX_FC_TX)); |
641 | if (EFX_WORKAROUND_11482(efx) && reset) { | 641 | if (EFX_WORKAROUND_11482(efx) && reset) { |
642 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 642 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
643 | /* Recover by resetting the EM block */ | 643 | /* Recover by resetting the EM block */ |
644 | if (efx->link_up) | 644 | if (efx->link_up) |
645 | falcon_drain_tx_fifo(efx); | 645 | falcon_drain_tx_fifo(efx); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 59f261b4171f..5e20e7551dae 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -42,7 +42,7 @@ | |||
42 | #ifndef EFX_DRIVER_NAME | 42 | #ifndef EFX_DRIVER_NAME |
43 | #define EFX_DRIVER_NAME "sfc" | 43 | #define EFX_DRIVER_NAME "sfc" |
44 | #endif | 44 | #endif |
45 | #define EFX_DRIVER_VERSION "2.2.0136" | 45 | #define EFX_DRIVER_VERSION "2.2" |
46 | 46 | ||
47 | #ifdef EFX_ENABLE_DEBUG | 47 | #ifdef EFX_ENABLE_DEBUG |
48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | 48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) |
@@ -52,28 +52,19 @@ | |||
52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #define NET_DEV_REGISTERED(efx) \ | ||
56 | ((efx)->net_dev->reg_state == NETREG_REGISTERED) | ||
57 | |||
58 | /* Include net device name in log messages if it has been registered. | ||
59 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
60 | * are harmless. | ||
61 | */ | ||
62 | #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") | ||
63 | |||
64 | /* Un-rate-limited logging */ | 55 | /* Un-rate-limited logging */ |
65 | #define EFX_ERR(efx, fmt, args...) \ | 56 | #define EFX_ERR(efx, fmt, args...) \ |
66 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) | 57 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) |
67 | 58 | ||
68 | #define EFX_INFO(efx, fmt, args...) \ | 59 | #define EFX_INFO(efx, fmt, args...) \ |
69 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) | 60 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) |
70 | 61 | ||
71 | #ifdef EFX_ENABLE_DEBUG | 62 | #ifdef EFX_ENABLE_DEBUG |
72 | #define EFX_LOG(efx, fmt, args...) \ | 63 | #define EFX_LOG(efx, fmt, args...) \ |
73 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 64 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
74 | #else | 65 | #else |
75 | #define EFX_LOG(efx, fmt, args...) \ | 66 | #define EFX_LOG(efx, fmt, args...) \ |
76 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 67 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
77 | #endif | 68 | #endif |
78 | 69 | ||
79 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) | 70 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) |
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) | |||
90 | #define EFX_LOG_RL(efx, fmt, args...) \ | 81 | #define EFX_LOG_RL(efx, fmt, args...) \ |
91 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) | 82 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) |
92 | 83 | ||
93 | /* Kernel headers may redefine inline anyway */ | ||
94 | #ifndef inline | ||
95 | #define inline inline __attribute__ ((always_inline)) | ||
96 | #endif | ||
97 | |||
98 | /************************************************************************** | 84 | /************************************************************************** |
99 | * | 85 | * |
100 | * Efx data structures | 86 | * Efx data structures |
@@ -695,7 +681,7 @@ struct efx_nic { | |||
695 | struct workqueue_struct *workqueue; | 681 | struct workqueue_struct *workqueue; |
696 | struct work_struct reset_work; | 682 | struct work_struct reset_work; |
697 | struct delayed_work monitor_work; | 683 | struct delayed_work monitor_work; |
698 | unsigned long membase_phys; | 684 | resource_size_t membase_phys; |
699 | void __iomem *membase; | 685 | void __iomem *membase; |
700 | spinlock_t biu_lock; | 686 | spinlock_t biu_lock; |
701 | enum efx_int_mode interrupt_mode; | 687 | enum efx_int_mode interrupt_mode; |
@@ -719,7 +705,7 @@ struct efx_nic { | |||
719 | 705 | ||
720 | unsigned n_rx_nodesc_drop_cnt; | 706 | unsigned n_rx_nodesc_drop_cnt; |
721 | 707 | ||
722 | void *nic_data; | 708 | struct falcon_nic_data *nic_data; |
723 | 709 | ||
724 | struct mutex mac_lock; | 710 | struct mutex mac_lock; |
725 | int port_enabled; | 711 | int port_enabled; |
@@ -760,6 +746,20 @@ struct efx_nic { | |||
760 | void *loopback_selftest; | 746 | void *loopback_selftest; |
761 | }; | 747 | }; |
762 | 748 | ||
749 | static inline int efx_dev_registered(struct efx_nic *efx) | ||
750 | { | ||
751 | return efx->net_dev->reg_state == NETREG_REGISTERED; | ||
752 | } | ||
753 | |||
754 | /* Net device name, for inclusion in log messages if it has been registered. | ||
755 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
756 | * are harmless. | ||
757 | */ | ||
758 | static inline const char *efx_dev_name(struct efx_nic *efx) | ||
759 | { | ||
760 | return efx_dev_registered(efx) ? efx->name : ""; | ||
761 | } | ||
762 | |||
763 | /** | 763 | /** |
764 | * struct efx_nic_type - Efx device type definition | 764 | * struct efx_nic_type - Efx device type definition |
765 | * @mem_bar: Memory BAR number | 765 | * @mem_bar: Memory BAR number |
@@ -795,7 +795,7 @@ struct efx_nic_type { | |||
795 | unsigned int txd_ring_mask; | 795 | unsigned int txd_ring_mask; |
796 | unsigned int rxd_ring_mask; | 796 | unsigned int rxd_ring_mask; |
797 | unsigned int evq_size; | 797 | unsigned int evq_size; |
798 | dma_addr_t max_dma_mask; | 798 | u64 max_dma_mask; |
799 | unsigned int tx_dma_mask; | 799 | unsigned int tx_dma_mask; |
800 | unsigned bug5391_mask; | 800 | unsigned bug5391_mask; |
801 | 801 | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 670622373ddf..601b001437c0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95; | |||
86 | */ | 86 | */ |
87 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
88 | 88 | ||
89 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
90 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
91 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | /* Offset is always within one page, so we don't need to consider |
92 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | * the page order. |
93 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | */ |
94 | 94 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); | |
95 | #define RX_PAGE_SIZE(_efx) \ | 95 | } |
96 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) |
97 | { | ||
98 | return PAGE_SIZE << efx->rx_buffer_order; | ||
99 | } | ||
97 | 100 | ||
98 | 101 | ||
99 | /************************************************************************** | 102 | /************************************************************************** |
@@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95; | |||
106 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | 109 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, |
107 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | 110 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) |
108 | { | 111 | { |
109 | struct efx_channel *channel = (struct efx_channel *)priv; | 112 | struct efx_channel *channel = priv; |
110 | struct iphdr *iph; | 113 | struct iphdr *iph; |
111 | struct tcphdr *th; | 114 | struct tcphdr *th; |
112 | 115 | ||
@@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | |||
131 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | 134 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, |
132 | void *priv) | 135 | void *priv) |
133 | { | 136 | { |
134 | struct efx_channel *channel = (struct efx_channel *)priv; | 137 | struct efx_channel *channel = priv; |
135 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
136 | struct iphdr *iph; | 139 | struct iphdr *iph; |
137 | 140 | ||
138 | /* We support EtherII and VLAN encapsulated IPv4 */ | 141 | /* We support EtherII and VLAN encapsulated IPv4 */ |
139 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | 142 | eh = page_address(frag->page) + frag->page_offset; |
140 | *mac_hdr = eh; | 143 | *mac_hdr = eh; |
141 | 144 | ||
142 | if (eh->h_proto == htons(ETH_P_IP)) { | 145 | if (eh->h_proto == htons(ETH_P_IP)) { |
@@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
269 | return -ENOMEM; | 272 | return -ENOMEM; |
270 | 273 | ||
271 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 274 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
272 | 0, RX_PAGE_SIZE(efx), | 275 | 0, efx_rx_buf_size(efx), |
273 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
274 | 277 | ||
275 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
@@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
280 | 283 | ||
281 | rx_queue->buf_page = rx_buf->page; | 284 | rx_queue->buf_page = rx_buf->page; |
282 | rx_queue->buf_dma_addr = dma_addr; | 285 | rx_queue->buf_dma_addr = dma_addr; |
283 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | 286 | rx_queue->buf_data = (page_address(rx_buf->page) + |
284 | EFX_PAGE_IP_ALIGN); | 287 | EFX_PAGE_IP_ALIGN); |
285 | } | 288 | } |
286 | 289 | ||
287 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | ||
288 | rx_buf->len = bytes; | 290 | rx_buf->len = bytes; |
289 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
290 | rx_buf->data = rx_queue->buf_data; | 291 | rx_buf->data = rx_queue->buf_data; |
292 | offset = efx_rx_buf_offset(rx_buf); | ||
293 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
291 | 294 | ||
292 | /* Try to pack multiple buffers per page */ | 295 | /* Try to pack multiple buffers per page */ |
293 | if (efx->rx_buffer_order == 0) { | 296 | if (efx->rx_buffer_order == 0) { |
@@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
295 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 298 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
296 | offset += ((bytes + 0x1ff) & ~0x1ff); | 299 | offset += ((bytes + 0x1ff) & ~0x1ff); |
297 | 300 | ||
298 | space = RX_PAGE_SIZE(efx) - offset; | 301 | space = efx_rx_buf_size(efx) - offset; |
299 | if (space >= bytes) { | 302 | if (space >= bytes) { |
300 | /* Refs dropped on kernel releasing each skb */ | 303 | /* Refs dropped on kernel releasing each skb */ |
301 | get_page(rx_queue->buf_page); | 304 | get_page(rx_queue->buf_page); |
@@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
344 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 347 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
345 | if (rx_buf->unmap_addr) { | 348 | if (rx_buf->unmap_addr) { |
346 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 349 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
347 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 350 | efx_rx_buf_size(efx), |
351 | PCI_DMA_FROMDEVICE); | ||
348 | rx_buf->unmap_addr = 0; | 352 | rx_buf->unmap_addr = 0; |
349 | } | 353 | } |
350 | } else if (likely(rx_buf->skb)) { | 354 | } else if (likely(rx_buf->skb)) { |
@@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
400 | return 0; | 404 | return 0; |
401 | 405 | ||
402 | /* Record minimum fill level */ | 406 | /* Record minimum fill level */ |
403 | if (unlikely(fill_level < rx_queue->min_fill)) | 407 | if (unlikely(fill_level < rx_queue->min_fill)) { |
404 | if (fill_level) | 408 | if (fill_level) |
405 | rx_queue->min_fill = fill_level; | 409 | rx_queue->min_fill = fill_level; |
410 | } | ||
406 | 411 | ||
407 | /* Acquire RX add lock. If this lock is contended, then a fast | 412 | /* Acquire RX add lock. If this lock is contended, then a fast |
408 | * fill must already be in progress (e.g. in the refill | 413 | * fill must already be in progress (e.g. in the refill |
@@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
552 | struct skb_frag_struct frags; | 557 | struct skb_frag_struct frags; |
553 | 558 | ||
554 | frags.page = rx_buf->page; | 559 | frags.page = rx_buf->page; |
555 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 560 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
556 | frags.size = rx_buf->len; | 561 | frags.size = rx_buf->len; |
557 | 562 | ||
558 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 563 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
@@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
597 | if (unlikely(rx_buf->len > hdr_len)) { | 602 | if (unlikely(rx_buf->len > hdr_len)) { |
598 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 603 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
599 | frag->page = rx_buf->page; | 604 | frag->page = rx_buf->page; |
600 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 605 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
601 | frag->size = skb->len - hdr_len; | 606 | frag->size = skb->len - hdr_len; |
602 | skb_shinfo(skb)->nr_frags = 1; | 607 | skb_shinfo(skb)->nr_frags = 1; |
603 | skb->data_len = frag->size; | 608 | skb->data_len = frag->size; |
@@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
851 | /* For a page that is part-way through splitting into RX buffers */ | 856 | /* For a page that is part-way through splitting into RX buffers */ |
852 | if (rx_queue->buf_page != NULL) { | 857 | if (rx_queue->buf_page != NULL) { |
853 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 858 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
854 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 859 | efx_rx_buf_size(rx_queue->efx), |
860 | PCI_DMA_FROMDEVICE); | ||
855 | __free_pages(rx_queue->buf_page, | 861 | __free_pages(rx_queue->buf_page, |
856 | rx_queue->efx->rx_buffer_order); | 862 | rx_queue->efx->rx_buffer_order); |
857 | rx_queue->buf_page = NULL; | 863 | rx_queue->buf_page = NULL; |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index cbda15946e8f..3b2de9fe7f27 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx, | |||
290 | 290 | ||
291 | payload = &state->payload; | 291 | payload = &state->payload; |
292 | 292 | ||
293 | received = (struct efx_loopback_payload *)(char *) buf_ptr; | 293 | received = (struct efx_loopback_payload *) buf_ptr; |
294 | received->ip.saddr = payload->ip.saddr; | 294 | received->ip.saddr = payload->ip.saddr; |
295 | received->ip.check = payload->ip.check; | 295 | received->ip.check = payload->ip.check; |
296 | 296 | ||
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue) | |||
424 | * interrupt handler. */ | 424 | * interrupt handler. */ |
425 | smp_wmb(); | 425 | smp_wmb(); |
426 | 426 | ||
427 | if (NET_DEV_REGISTERED(efx)) | 427 | if (efx_dev_registered(efx)) |
428 | netif_tx_lock_bh(efx->net_dev); | 428 | netif_tx_lock_bh(efx->net_dev); |
429 | rc = efx_xmit(efx, tx_queue, skb); | 429 | rc = efx_xmit(efx, tx_queue, skb); |
430 | if (NET_DEV_REGISTERED(efx)) | 430 | if (efx_dev_registered(efx)) |
431 | netif_tx_unlock_bh(efx->net_dev); | 431 | netif_tx_unlock_bh(efx->net_dev); |
432 | 432 | ||
433 | if (rc != NETDEV_TX_OK) { | 433 | if (rc != NETDEV_TX_OK) { |
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
453 | int tx_done = 0, rx_good, rx_bad; | 453 | int tx_done = 0, rx_good, rx_bad; |
454 | int i, rc = 0; | 454 | int i, rc = 0; |
455 | 455 | ||
456 | if (NET_DEV_REGISTERED(efx)) | 456 | if (efx_dev_registered(efx)) |
457 | netif_tx_lock_bh(efx->net_dev); | 457 | netif_tx_lock_bh(efx->net_dev); |
458 | 458 | ||
459 | /* Count the number of tx completions, and decrement the refcnt. Any | 459 | /* Count the number of tx completions, and decrement the refcnt. Any |
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
465 | dev_kfree_skb_any(skb); | 465 | dev_kfree_skb_any(skb); |
466 | } | 466 | } |
467 | 467 | ||
468 | if (NET_DEV_REGISTERED(efx)) | 468 | if (efx_dev_registered(efx)) |
469 | netif_tx_unlock_bh(efx->net_dev); | 469 | netif_tx_unlock_bh(efx->net_dev); |
470 | 470 | ||
471 | /* Check TX completion and received packet counts */ | 471 | /* Check TX completion and received packet counts */ |
@@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
517 | state->packet_count = min(1 << (i << 2), state->packet_count); | 517 | state->packet_count = min(1 << (i << 2), state->packet_count); |
518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | 518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * |
519 | state->packet_count, GFP_KERNEL); | 519 | state->packet_count, GFP_KERNEL); |
520 | if (!state->skbs) | ||
521 | return -ENOMEM; | ||
520 | state->flush = 0; | 522 | state->flush = 0; |
521 | 523 | ||
522 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " | 524 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " |
@@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx, | |||
700 | * "flushing" so all inflight packets are dropped */ | 702 | * "flushing" so all inflight packets are dropped */ |
701 | BUG_ON(efx->loopback_selftest); | 703 | BUG_ON(efx->loopback_selftest); |
702 | state->flush = 1; | 704 | state->flush = 1; |
703 | efx->loopback_selftest = (void *)state; | 705 | efx->loopback_selftest = state; |
704 | 706 | ||
705 | rc = efx_test_loopbacks(efx, tests, loopback_modes); | 707 | rc = efx_test_loopbacks(efx, tests, loopback_modes); |
706 | 708 | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 725d1a539c49..66a0d1442aba 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c | |||
@@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx) | |||
116 | 116 | ||
117 | /* Turn off all power rails */ | 117 | /* Turn off all power rails */ |
118 | out = 0xff; | 118 | out = 0xff; |
119 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 119 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
120 | 120 | ||
121 | /* Disable port 1 outputs on IO expander */ | 121 | /* Disable port 1 outputs on IO expander */ |
122 | cfg = 0xff; | 122 | cfg = 0xff; |
123 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); | 123 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); |
124 | 124 | ||
125 | /* Disable port 0 outputs on IO expander */ | 125 | /* Disable port 0 outputs on IO expander */ |
126 | cfg = 0xff; | 126 | cfg = 0xff; |
127 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); | 127 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); |
128 | 128 | ||
129 | /* Clear any over-temperature alert */ | 129 | /* Clear any over-temperature alert */ |
130 | (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); | 130 | efx_i2c_read(i2c, MAX6647, RSL, &in, 1); |
131 | } | 131 | } |
132 | 132 | ||
133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected | 133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected |
@@ -253,14 +253,14 @@ done: | |||
253 | fail3: | 253 | fail3: |
254 | /* Turn off all power rails */ | 254 | /* Turn off all power rails */ |
255 | out = 0xff; | 255 | out = 0xff; |
256 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 256 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
257 | /* Disable port 1 outputs on IO expander */ | 257 | /* Disable port 1 outputs on IO expander */ |
258 | out = 0xff; | 258 | out = 0xff; |
259 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); | 259 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); |
260 | fail2: | 260 | fail2: |
261 | /* Disable port 0 outputs on IO expander */ | 261 | /* Disable port 0 outputs on IO expander */ |
262 | out = 0xff; | 262 | out = 0xff; |
263 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); | 263 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); |
264 | fail1: | 264 | fail1: |
265 | return rc; | 265 | return rc; |
266 | } | 266 | } |
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index b1cd6deec01f..c0146061c326 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
211 | int rc = 0; | 211 | int rc = 0; |
212 | 212 | ||
213 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | 213 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
214 | if (!phy_data) | ||
215 | return -ENOMEM; | ||
214 | efx->phy_data = phy_data; | 216 | efx->phy_data = phy_data; |
215 | 217 | ||
216 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); | 218 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); |
@@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) | |||
376 | * perform a special software reset */ | 378 | * perform a special software reset */ |
377 | if ((phy_data->tx_disabled && !efx->tx_disabled) || | 379 | if ((phy_data->tx_disabled && !efx->tx_disabled) || |
378 | loop_change) { | 380 | loop_change) { |
379 | (void) tenxpress_special_reset(efx); | 381 | tenxpress_special_reset(efx); |
380 | falcon_reset_xaui(efx); | 382 | falcon_reset_xaui(efx); |
381 | } | 383 | } |
382 | 384 | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 9b436f5b4888..5cdd082ab8f6 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
387 | if (unlikely(tx_queue->stopped)) { | 387 | if (unlikely(tx_queue->stopped)) { |
388 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 388 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { | 389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { |
390 | EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); | 390 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
391 | 391 | ||
392 | /* Do this under netif_tx_lock(), to avoid racing | 392 | /* Do this under netif_tx_lock(), to avoid racing |
393 | * with efx_xmit(). */ | 393 | * with efx_xmit(). */ |
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | |||
639 | base_dma = tsoh->dma_addr & PAGE_MASK; | 639 | base_dma = tsoh->dma_addr & PAGE_MASK; |
640 | 640 | ||
641 | p = &tx_queue->tso_headers_free; | 641 | p = &tx_queue->tso_headers_free; |
642 | while (*p != NULL) | 642 | while (*p != NULL) { |
643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) | 643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) |
644 | *p = (*p)->next; | 644 | *p = (*p)->next; |
645 | else | 645 | else |
646 | p = &(*p)->next; | 646 | p = &(*p)->next; |
647 | } | ||
647 | 648 | ||
648 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | 649 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); |
649 | } | 650 | } |
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
939 | 940 | ||
940 | /* Allocate a DMA-mapped header buffer. */ | 941 | /* Allocate a DMA-mapped header buffer. */ |
941 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { | 942 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { |
942 | if (tx_queue->tso_headers_free == NULL) | 943 | if (tx_queue->tso_headers_free == NULL) { |
943 | if (efx_tsoh_block_alloc(tx_queue)) | 944 | if (efx_tsoh_block_alloc(tx_queue)) |
944 | return -1; | 945 | return -1; |
946 | } | ||
945 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); | 947 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); |
946 | tsoh = tx_queue->tso_headers_free; | 948 | tsoh = tx_queue->tso_headers_free; |
947 | tx_queue->tso_headers_free = tsoh->next; | 949 | tx_queue->tso_headers_free = tsoh->next; |
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |||
1106 | { | 1108 | { |
1107 | unsigned i; | 1109 | unsigned i; |
1108 | 1110 | ||
1109 | if (tx_queue->buffer) | 1111 | if (tx_queue->buffer) { |
1110 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) | 1112 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) |
1111 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | 1113 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
1114 | } | ||
1112 | 1115 | ||
1113 | while (tx_queue->tso_headers_free != NULL) | 1116 | while (tx_queue->tso_headers_free != NULL) |
1114 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | 1117 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index dca62f190198..35ab19c27f8d 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
19 | #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) |
20 | 20 | ||
21 | /* XAUI resets if link not detected */ | 21 | /* XAUI resets if link not detected */ |
22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | 22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS |
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c index 3b9f9ddbc372..f3684ad28887 100644 --- a/drivers/net/sfc/xfp_phy.c +++ b/drivers/net/sfc/xfp_phy.c | |||
@@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx) | |||
85 | int rc; | 85 | int rc; |
86 | 86 | ||
87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); | 87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); |
88 | efx->phy_data = (void *) phy_data; | 88 | if (!phy_data) |
89 | return -ENOMEM; | ||
90 | efx->phy_data = phy_data; | ||
89 | 91 | ||
90 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" | 92 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" |
91 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), | 93 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index f226bcac7d17..3bb60530d4d7 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1159,17 +1159,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | #ifdef SKY2_VLAN_TAG_USED | 1161 | #ifdef SKY2_VLAN_TAG_USED |
1162 | static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 1162 | static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) |
1163 | { | 1163 | { |
1164 | struct sky2_port *sky2 = netdev_priv(dev); | 1164 | if (onoff) { |
1165 | struct sky2_hw *hw = sky2->hw; | ||
1166 | u16 port = sky2->port; | ||
1167 | |||
1168 | netif_tx_lock_bh(dev); | ||
1169 | napi_disable(&hw->napi); | ||
1170 | |||
1171 | sky2->vlgrp = grp; | ||
1172 | if (grp) { | ||
1173 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), | 1165 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), |
1174 | RX_VLAN_STRIP_ON); | 1166 | RX_VLAN_STRIP_ON); |
1175 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1167 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
@@ -1180,6 +1172,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp | |||
1180 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1172 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
1181 | TX_VLAN_TAG_OFF); | 1173 | TX_VLAN_TAG_OFF); |
1182 | } | 1174 | } |
1175 | } | ||
1176 | |||
1177 | static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
1178 | { | ||
1179 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1180 | struct sky2_hw *hw = sky2->hw; | ||
1181 | u16 port = sky2->port; | ||
1182 | |||
1183 | netif_tx_lock_bh(dev); | ||
1184 | napi_disable(&hw->napi); | ||
1185 | |||
1186 | sky2->vlgrp = grp; | ||
1187 | sky2_set_vlan_mode(hw, port, grp != NULL); | ||
1183 | 1188 | ||
1184 | sky2_read32(hw, B0_Y2_SP_LISR); | 1189 | sky2_read32(hw, B0_Y2_SP_LISR); |
1185 | napi_enable(&hw->napi); | 1190 | napi_enable(&hw->napi); |
@@ -1418,6 +1423,10 @@ static int sky2_up(struct net_device *dev) | |||
1418 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1423 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
1419 | TX_RING_SIZE - 1); | 1424 | TX_RING_SIZE - 1); |
1420 | 1425 | ||
1426 | #ifdef SKY2_VLAN_TAG_USED | ||
1427 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); | ||
1428 | #endif | ||
1429 | |||
1421 | err = sky2_rx_start(sky2); | 1430 | err = sky2_rx_start(sky2); |
1422 | if (err) | 1431 | if (err) |
1423 | goto err_out; | 1432 | goto err_out; |
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h index b880cba0f6fd..74cf8e1a181b 100644 --- a/drivers/net/tokenring/3c359.h +++ b/drivers/net/tokenring/3c359.h | |||
@@ -264,7 +264,7 @@ struct xl_private { | |||
264 | u16 asb; | 264 | u16 asb; |
265 | 265 | ||
266 | u8 __iomem *xl_mmio; | 266 | u8 __iomem *xl_mmio; |
267 | char *xl_card_name; | 267 | const char *xl_card_name; |
268 | struct pci_dev *pdev ; | 268 | struct pci_dev *pdev ; |
269 | 269 | ||
270 | spinlock_t xl_lock ; | 270 | spinlock_t xl_lock ; |
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h index c91956310fb2..10fbba08978f 100644 --- a/drivers/net/tokenring/olympic.h +++ b/drivers/net/tokenring/olympic.h | |||
@@ -254,7 +254,7 @@ struct olympic_private { | |||
254 | u8 __iomem *olympic_mmio; | 254 | u8 __iomem *olympic_mmio; |
255 | u8 __iomem *olympic_lap; | 255 | u8 __iomem *olympic_lap; |
256 | struct pci_dev *pdev ; | 256 | struct pci_dev *pdev ; |
257 | char *olympic_card_name ; | 257 | const char *olympic_card_name; |
258 | 258 | ||
259 | spinlock_t olympic_lock ; | 259 | spinlock_t olympic_lock ; |
260 | 260 | ||
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 2511ca7a12aa..e9e628621639 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *); | |||
225 | static const struct ethtool_ops netdev_ethtool_ops; | 225 | static const struct ethtool_ops netdev_ethtool_ops; |
226 | static u16 read_srom_word(long, int); | 226 | static u16 read_srom_word(long, int); |
227 | static irqreturn_t uli526x_interrupt(int, void *); | 227 | static irqreturn_t uli526x_interrupt(int, void *); |
228 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
229 | static void uli526x_poll(struct net_device *dev); | ||
230 | #endif | ||
228 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); | 231 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); |
229 | static void allocate_rx_buffer(struct uli526x_board_info *); | 232 | static void allocate_rx_buffer(struct uli526x_board_info *); |
230 | static void update_cr6(u32, unsigned long); | 233 | static void update_cr6(u32, unsigned long); |
@@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
339 | dev->get_stats = &uli526x_get_stats; | 342 | dev->get_stats = &uli526x_get_stats; |
340 | dev->set_multicast_list = &uli526x_set_filter_mode; | 343 | dev->set_multicast_list = &uli526x_set_filter_mode; |
341 | dev->ethtool_ops = &netdev_ethtool_ops; | 344 | dev->ethtool_ops = &netdev_ethtool_ops; |
345 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
346 | dev->poll_controller = &uli526x_poll; | ||
347 | #endif | ||
342 | spin_lock_init(&db->lock); | 348 | spin_lock_init(&db->lock); |
343 | 349 | ||
344 | 350 | ||
@@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) | |||
681 | db->cr5_data = inl(ioaddr + DCR5); | 687 | db->cr5_data = inl(ioaddr + DCR5); |
682 | outl(db->cr5_data, ioaddr + DCR5); | 688 | outl(db->cr5_data, ioaddr + DCR5); |
683 | if ( !(db->cr5_data & 0x180c1) ) { | 689 | if ( !(db->cr5_data & 0x180c1) ) { |
684 | spin_unlock_irqrestore(&db->lock, flags); | 690 | /* Restore CR7 to enable interrupt mask */ |
685 | outl(db->cr7_data, ioaddr + DCR7); | 691 | outl(db->cr7_data, ioaddr + DCR7); |
692 | spin_unlock_irqrestore(&db->lock, flags); | ||
686 | return IRQ_HANDLED; | 693 | return IRQ_HANDLED; |
687 | } | 694 | } |
688 | 695 | ||
@@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) | |||
715 | return IRQ_HANDLED; | 722 | return IRQ_HANDLED; |
716 | } | 723 | } |
717 | 724 | ||
725 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
726 | static void uli526x_poll(struct net_device *dev) | ||
727 | { | ||
728 | /* ISR grabs the irqsave lock, so this should be safe */ | ||
729 | uli526x_interrupt(dev->irq, dev); | ||
730 | } | ||
731 | #endif | ||
718 | 732 | ||
719 | /* | 733 | /* |
720 | * Free TX resource after TX complete | 734 | * Free TX resource after TX complete |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index ca0bdac07a78..fb0b918e5ccb 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, | |||
237 | skb->dev = ugeth->dev; | 237 | skb->dev = ugeth->dev; |
238 | 238 | ||
239 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | 239 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
240 | dma_map_single(NULL, | 240 | dma_map_single(&ugeth->dev->dev, |
241 | skb->data, | 241 | skb->data, |
242 | ugeth->ug_info->uf_info.max_rx_buf_length + | 242 | ugeth->ug_info->uf_info.max_rx_buf_length + |
243 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | 243 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, |
@@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
2158 | continue; | 2158 | continue; |
2159 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { | 2159 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { |
2160 | if (ugeth->tx_skbuff[i][j]) { | 2160 | if (ugeth->tx_skbuff[i][j]) { |
2161 | dma_unmap_single(NULL, | 2161 | dma_unmap_single(&ugeth->dev->dev, |
2162 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | 2162 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
2163 | (in_be32((u32 __iomem *)bd) & | 2163 | (in_be32((u32 __iomem *)bd) & |
2164 | BD_LENGTH_MASK), | 2164 | BD_LENGTH_MASK), |
@@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
2186 | bd = ugeth->p_rx_bd_ring[i]; | 2186 | bd = ugeth->p_rx_bd_ring[i]; |
2187 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | 2187 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { |
2188 | if (ugeth->rx_skbuff[i][j]) { | 2188 | if (ugeth->rx_skbuff[i][j]) { |
2189 | dma_unmap_single(NULL, | 2189 | dma_unmap_single(&ugeth->dev->dev, |
2190 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | 2190 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
2191 | ugeth->ug_info-> | 2191 | ugeth->ug_info-> |
2192 | uf_info.max_rx_buf_length + | 2192 | uf_info.max_rx_buf_length + |
@@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3406 | 3406 | ||
3407 | /* set up the buffer descriptor */ | 3407 | /* set up the buffer descriptor */ |
3408 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | 3408 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
3409 | dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); | 3409 | dma_map_single(&ugeth->dev->dev, skb->data, |
3410 | skb->len, DMA_TO_DEVICE)); | ||
3410 | 3411 | ||
3411 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ | 3412 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ |
3412 | 3413 | ||
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index dc6f097062df..37ecf845edfe 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c | |||
@@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = { | |||
1440 | // Belkin F5D5055 | 1440 | // Belkin F5D5055 |
1441 | USB_DEVICE(0x050d, 0x5055), | 1441 | USB_DEVICE(0x050d, 0x5055), |
1442 | .driver_info = (unsigned long) &ax88178_info, | 1442 | .driver_info = (unsigned long) &ax88178_info, |
1443 | }, { | ||
1444 | // Apple USB Ethernet Adapter | ||
1445 | USB_DEVICE(0x05ac, 0x1402), | ||
1446 | .driver_info = (unsigned long) &ax88772_info, | ||
1443 | }, | 1447 | }, |
1444 | { }, // END | 1448 | { }, // END |
1445 | }; | 1449 | }; |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 3969b7a2b8e6..ae467f182c40 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf) | |||
194 | dev_dbg(&info->control->dev, | 194 | dev_dbg(&info->control->dev, |
195 | "rndis response error, code %d\n", retval); | 195 | "rndis response error, code %d\n", retval); |
196 | } | 196 | } |
197 | msleep(2); | 197 | msleep(20); |
198 | } | 198 | } |
199 | dev_dbg(&info->control->dev, "rndis response timeout\n"); | 199 | dev_dbg(&info->control->dev, "rndis response timeout\n"); |
200 | return -ETIMEDOUT; | 200 | return -ETIMEDOUT; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f926b5ab3d09..fe7cdf2a2a23 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
470 | kfree_skb(skb); | 470 | kfree_skb(skb); |
471 | vi->num--; | 471 | vi->num--; |
472 | } | 472 | } |
473 | while ((skb = __skb_dequeue(&vi->send)) != NULL) | 473 | __skb_queue_purge(&vi->send); |
474 | kfree_skb(skb); | ||
475 | 474 | ||
476 | BUG_ON(vi->num != 0); | 475 | BUG_ON(vi->num != 0); |
477 | 476 | ||
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 9a83c9d5b8cf..7f984895b0d5 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c | |||
@@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22"; | |||
43 | 43 | ||
44 | #undef DEBUG_LINK | 44 | #undef DEBUG_LINK |
45 | 45 | ||
46 | static struct hdlc_proto *first_proto = NULL; | 46 | static struct hdlc_proto *first_proto; |
47 | |||
48 | 47 | ||
49 | static int hdlc_change_mtu(struct net_device *dev, int new_mtu) | 48 | static int hdlc_change_mtu(struct net_device *dev, int new_mtu) |
50 | { | 49 | { |
@@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev) | |||
314 | 313 | ||
315 | void register_hdlc_protocol(struct hdlc_proto *proto) | 314 | void register_hdlc_protocol(struct hdlc_proto *proto) |
316 | { | 315 | { |
316 | rtnl_lock(); | ||
317 | proto->next = first_proto; | 317 | proto->next = first_proto; |
318 | first_proto = proto; | 318 | first_proto = proto; |
319 | rtnl_unlock(); | ||
319 | } | 320 | } |
320 | 321 | ||
321 | 322 | ||
322 | void unregister_hdlc_protocol(struct hdlc_proto *proto) | 323 | void unregister_hdlc_protocol(struct hdlc_proto *proto) |
323 | { | 324 | { |
324 | struct hdlc_proto **p = &first_proto; | 325 | struct hdlc_proto **p; |
325 | while (*p) { | 326 | |
326 | if (*p == proto) { | 327 | rtnl_lock(); |
327 | *p = proto->next; | 328 | p = &first_proto; |
328 | return; | 329 | while (*p != proto) { |
329 | } | 330 | BUG_ON(!*p); |
330 | p = &((*p)->next); | 331 | p = &((*p)->next); |
331 | } | 332 | } |
333 | *p = proto->next; | ||
334 | rtnl_unlock(); | ||
332 | } | 335 | } |
333 | 336 | ||
334 | 337 | ||
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 7133c688cf20..762d21c1c703 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -56,6 +56,7 @@ struct cisco_state { | |||
56 | cisco_proto settings; | 56 | cisco_proto settings; |
57 | 57 | ||
58 | struct timer_list timer; | 58 | struct timer_list timer; |
59 | spinlock_t lock; | ||
59 | unsigned long last_poll; | 60 | unsigned long last_poll; |
60 | int up; | 61 | int up; |
61 | int request_sent; | 62 | int request_sent; |
@@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
158 | { | 159 | { |
159 | struct net_device *dev = skb->dev; | 160 | struct net_device *dev = skb->dev; |
160 | hdlc_device *hdlc = dev_to_hdlc(dev); | 161 | hdlc_device *hdlc = dev_to_hdlc(dev); |
162 | struct cisco_state *st = state(hdlc); | ||
161 | struct hdlc_header *data = (struct hdlc_header*)skb->data; | 163 | struct hdlc_header *data = (struct hdlc_header*)skb->data; |
162 | struct cisco_packet *cisco_data; | 164 | struct cisco_packet *cisco_data; |
163 | struct in_device *in_dev; | 165 | struct in_device *in_dev; |
@@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb) | |||
220 | goto rx_error; | 222 | goto rx_error; |
221 | 223 | ||
222 | case CISCO_KEEPALIVE_REQ: | 224 | case CISCO_KEEPALIVE_REQ: |
223 | state(hdlc)->rxseq = ntohl(cisco_data->par1); | 225 | spin_lock(&st->lock); |
224 | if (state(hdlc)->request_sent && | 226 | st->rxseq = ntohl(cisco_data->par1); |
225 | ntohl(cisco_data->par2) == state(hdlc)->txseq) { | 227 | if (st->request_sent && |
226 | state(hdlc)->last_poll = jiffies; | 228 | ntohl(cisco_data->par2) == st->txseq) { |
227 | if (!state(hdlc)->up) { | 229 | st->last_poll = jiffies; |
230 | if (!st->up) { | ||
228 | u32 sec, min, hrs, days; | 231 | u32 sec, min, hrs, days; |
229 | sec = ntohl(cisco_data->time) / 1000; | 232 | sec = ntohl(cisco_data->time) / 1000; |
230 | min = sec / 60; sec -= min * 60; | 233 | min = sec / 60; sec -= min * 60; |
@@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb) | |||
232 | days = hrs / 24; hrs -= days * 24; | 235 | days = hrs / 24; hrs -= days * 24; |
233 | printk(KERN_INFO "%s: Link up (peer " | 236 | printk(KERN_INFO "%s: Link up (peer " |
234 | "uptime %ud%uh%um%us)\n", | 237 | "uptime %ud%uh%um%us)\n", |
235 | dev->name, days, hrs, | 238 | dev->name, days, hrs, min, sec); |
236 | min, sec); | ||
237 | netif_dormant_off(dev); | 239 | netif_dormant_off(dev); |
238 | state(hdlc)->up = 1; | 240 | st->up = 1; |
239 | } | 241 | } |
240 | } | 242 | } |
243 | spin_unlock(&st->lock); | ||
241 | 244 | ||
242 | dev_kfree_skb_any(skb); | 245 | dev_kfree_skb_any(skb); |
243 | return NET_RX_SUCCESS; | 246 | return NET_RX_SUCCESS; |
@@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg) | |||
261 | { | 264 | { |
262 | struct net_device *dev = (struct net_device *)arg; | 265 | struct net_device *dev = (struct net_device *)arg; |
263 | hdlc_device *hdlc = dev_to_hdlc(dev); | 266 | hdlc_device *hdlc = dev_to_hdlc(dev); |
267 | struct cisco_state *st = state(hdlc); | ||
264 | 268 | ||
265 | if (state(hdlc)->up && | 269 | spin_lock(&st->lock); |
266 | time_after(jiffies, state(hdlc)->last_poll + | 270 | if (st->up && |
267 | state(hdlc)->settings.timeout * HZ)) { | 271 | time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) { |
268 | state(hdlc)->up = 0; | 272 | st->up = 0; |
269 | printk(KERN_INFO "%s: Link down\n", dev->name); | 273 | printk(KERN_INFO "%s: Link down\n", dev->name); |
270 | netif_dormant_on(dev); | 274 | netif_dormant_on(dev); |
271 | } | 275 | } |
272 | 276 | ||
273 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, | 277 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), |
274 | htonl(++state(hdlc)->txseq), | 278 | htonl(st->rxseq)); |
275 | htonl(state(hdlc)->rxseq)); | 279 | st->request_sent = 1; |
276 | state(hdlc)->request_sent = 1; | 280 | spin_unlock(&st->lock); |
277 | state(hdlc)->timer.expires = jiffies + | 281 | |
278 | state(hdlc)->settings.interval * HZ; | 282 | st->timer.expires = jiffies + st->settings.interval * HZ; |
279 | state(hdlc)->timer.function = cisco_timer; | 283 | st->timer.function = cisco_timer; |
280 | state(hdlc)->timer.data = arg; | 284 | st->timer.data = arg; |
281 | add_timer(&state(hdlc)->timer); | 285 | add_timer(&st->timer); |
282 | } | 286 | } |
283 | 287 | ||
284 | 288 | ||
@@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg) | |||
286 | static void cisco_start(struct net_device *dev) | 290 | static void cisco_start(struct net_device *dev) |
287 | { | 291 | { |
288 | hdlc_device *hdlc = dev_to_hdlc(dev); | 292 | hdlc_device *hdlc = dev_to_hdlc(dev); |
289 | state(hdlc)->up = 0; | 293 | struct cisco_state *st = state(hdlc); |
290 | state(hdlc)->request_sent = 0; | 294 | unsigned long flags; |
291 | state(hdlc)->txseq = state(hdlc)->rxseq = 0; | 295 | |
292 | 296 | spin_lock_irqsave(&st->lock, flags); | |
293 | init_timer(&state(hdlc)->timer); | 297 | st->up = 0; |
294 | state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ | 298 | st->request_sent = 0; |
295 | state(hdlc)->timer.function = cisco_timer; | 299 | st->txseq = st->rxseq = 0; |
296 | state(hdlc)->timer.data = (unsigned long)dev; | 300 | spin_unlock_irqrestore(&st->lock, flags); |
297 | add_timer(&state(hdlc)->timer); | 301 | |
302 | init_timer(&st->timer); | ||
303 | st->timer.expires = jiffies + HZ; /* First poll after 1 s */ | ||
304 | st->timer.function = cisco_timer; | ||
305 | st->timer.data = (unsigned long)dev; | ||
306 | add_timer(&st->timer); | ||
298 | } | 307 | } |
299 | 308 | ||
300 | 309 | ||
@@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev) | |||
302 | static void cisco_stop(struct net_device *dev) | 311 | static void cisco_stop(struct net_device *dev) |
303 | { | 312 | { |
304 | hdlc_device *hdlc = dev_to_hdlc(dev); | 313 | hdlc_device *hdlc = dev_to_hdlc(dev); |
305 | del_timer_sync(&state(hdlc)->timer); | 314 | struct cisco_state *st = state(hdlc); |
315 | unsigned long flags; | ||
316 | |||
317 | del_timer_sync(&st->timer); | ||
318 | |||
319 | spin_lock_irqsave(&st->lock, flags); | ||
306 | netif_dormant_on(dev); | 320 | netif_dormant_on(dev); |
307 | state(hdlc)->up = 0; | 321 | st->up = 0; |
308 | state(hdlc)->request_sent = 0; | 322 | st->request_sent = 0; |
323 | spin_unlock_irqrestore(&st->lock, flags); | ||
309 | } | 324 | } |
310 | 325 | ||
311 | 326 | ||
@@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
367 | return result; | 382 | return result; |
368 | 383 | ||
369 | memcpy(&state(hdlc)->settings, &new_settings, size); | 384 | memcpy(&state(hdlc)->settings, &new_settings, size); |
385 | spin_lock_init(&state(hdlc)->lock); | ||
370 | dev->hard_start_xmit = hdlc->xmit; | 386 | dev->hard_start_xmit = hdlc->xmit; |
371 | dev->header_ops = &cisco_header_ops; | 387 | dev->header_ops = &cisco_header_ops; |
372 | dev->type = ARPHRD_CISCO; | 388 | dev->type = ARPHRD_CISCO; |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 4e5c8fc35200..635b9ac9aaa1 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -1787,6 +1787,8 @@ ath5k_tasklet_rx(unsigned long data) | |||
1787 | 1787 | ||
1788 | spin_lock(&sc->rxbuflock); | 1788 | spin_lock(&sc->rxbuflock); |
1789 | do { | 1789 | do { |
1790 | rxs.flag = 0; | ||
1791 | |||
1790 | if (unlikely(list_empty(&sc->rxbuf))) { | 1792 | if (unlikely(list_empty(&sc->rxbuf))) { |
1791 | ATH5K_WARN(sc, "empty rx buf pool\n"); | 1793 | ATH5K_WARN(sc, "empty rx buf pool\n"); |
1792 | break; | 1794 | break; |
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c index 5fb1ae6ad3e2..77990b56860b 100644 --- a/drivers/net/wireless/ath5k/hw.c +++ b/drivers/net/wireless/ath5k/hw.c | |||
@@ -4119,6 +4119,7 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, | |||
4119 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, | 4119 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, |
4120 | AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); | 4120 | AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); |
4121 | rs->rs_status = 0; | 4121 | rs->rs_status = 0; |
4122 | rs->rs_phyerr = 0; | ||
4122 | 4123 | ||
4123 | /* | 4124 | /* |
4124 | * Key table status | 4125 | * Key table status |
@@ -4145,7 +4146,7 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, | |||
4145 | if (rx_status->rx_status_1 & | 4146 | if (rx_status->rx_status_1 & |
4146 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { | 4147 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { |
4147 | rs->rs_status |= AR5K_RXERR_PHY; | 4148 | rs->rs_status |= AR5K_RXERR_PHY; |
4148 | rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, | 4149 | rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1, |
4149 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); | 4150 | AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); |
4150 | } | 4151 | } |
4151 | 4152 | ||
@@ -4193,6 +4194,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, | |||
4193 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, | 4194 | rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, |
4194 | AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); | 4195 | AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); |
4195 | rs->rs_status = 0; | 4196 | rs->rs_status = 0; |
4197 | rs->rs_phyerr = 0; | ||
4196 | 4198 | ||
4197 | /* | 4199 | /* |
4198 | * Key table status | 4200 | * Key table status |
@@ -4215,7 +4217,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, | |||
4215 | if (rx_status->rx_status_1 & | 4217 | if (rx_status->rx_status_1 & |
4216 | AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { | 4218 | AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { |
4217 | rs->rs_status |= AR5K_RXERR_PHY; | 4219 | rs->rs_status |= AR5K_RXERR_PHY; |
4218 | rs->rs_phyerr = AR5K_REG_MS(rx_err->rx_error_1, | 4220 | rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, |
4219 | AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); | 4221 | AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); |
4220 | } | 4222 | } |
4221 | 4223 | ||
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 437a9bcc9bd3..ed4317a17cbb 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c | |||
@@ -833,6 +833,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = { | |||
833 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), | 833 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), |
834 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), | 834 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), |
835 | /* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ | 835 | /* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ |
836 | PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), | ||
836 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), | 837 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), |
837 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), | 838 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), |
838 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), | 839 | PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), |
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c index dcfdb404678b..688d60de55cb 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/libertas/ethtool.c | |||
@@ -73,8 +73,8 @@ out: | |||
73 | return ret; | 73 | return ret; |
74 | } | 74 | } |
75 | 75 | ||
76 | static void lbs_ethtool_get_stats(struct net_device * dev, | 76 | static void lbs_ethtool_get_stats(struct net_device *dev, |
77 | struct ethtool_stats * stats, u64 * data) | 77 | struct ethtool_stats *stats, uint64_t *data) |
78 | { | 78 | { |
79 | struct lbs_private *priv = dev->priv; | 79 | struct lbs_private *priv = dev->priv; |
80 | struct cmd_ds_mesh_access mesh_access; | 80 | struct cmd_ds_mesh_access mesh_access; |
@@ -83,12 +83,12 @@ static void lbs_ethtool_get_stats(struct net_device * dev, | |||
83 | lbs_deb_enter(LBS_DEB_ETHTOOL); | 83 | lbs_deb_enter(LBS_DEB_ETHTOOL); |
84 | 84 | ||
85 | /* Get Mesh Statistics */ | 85 | /* Get Mesh Statistics */ |
86 | ret = lbs_prepare_and_send_command(priv, | 86 | ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access); |
87 | CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS, | ||
88 | CMD_OPTION_WAITFORRSP, 0, &mesh_access); | ||
89 | 87 | ||
90 | if (ret) | 88 | if (ret) { |
89 | memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t))); | ||
91 | return; | 90 | return; |
91 | } | ||
92 | 92 | ||
93 | priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); | 93 | priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); |
94 | priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); | 94 | priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); |
@@ -111,19 +111,18 @@ static void lbs_ethtool_get_stats(struct net_device * dev, | |||
111 | lbs_deb_enter(LBS_DEB_ETHTOOL); | 111 | lbs_deb_enter(LBS_DEB_ETHTOOL); |
112 | } | 112 | } |
113 | 113 | ||
114 | static int lbs_ethtool_get_sset_count(struct net_device * dev, int sset) | 114 | static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset) |
115 | { | 115 | { |
116 | switch (sset) { | 116 | struct lbs_private *priv = dev->priv; |
117 | case ETH_SS_STATS: | 117 | |
118 | if (sset == ETH_SS_STATS && dev == priv->mesh_dev) | ||
118 | return MESH_STATS_NUM; | 119 | return MESH_STATS_NUM; |
119 | default: | 120 | |
120 | return -EOPNOTSUPP; | 121 | return -EOPNOTSUPP; |
121 | } | ||
122 | } | 122 | } |
123 | 123 | ||
124 | static void lbs_ethtool_get_strings(struct net_device *dev, | 124 | static void lbs_ethtool_get_strings(struct net_device *dev, |
125 | u32 stringset, | 125 | uint32_t stringset, uint8_t *s) |
126 | u8 * s) | ||
127 | { | 126 | { |
128 | int i; | 127 | int i; |
129 | 128 | ||
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index 8b7f5768a103..1c216e015f64 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c | |||
@@ -461,6 +461,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = { | |||
461 | PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ | 461 | PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ |
462 | PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ | 462 | PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ |
463 | PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ | 463 | PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ |
464 | PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */ | ||
464 | PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ | 465 | PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ |
465 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ | 466 | PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ |
466 | PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ | 467 | PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ |
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index d5787b37e1fb..9223ada5f00e 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c | |||
@@ -92,6 +92,7 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, | |||
92 | u8 data[4]; | 92 | u8 data[4]; |
93 | struct usb_ctrlrequest dr; | 93 | struct usb_ctrlrequest dr; |
94 | } *buf; | 94 | } *buf; |
95 | int rc; | ||
95 | 96 | ||
96 | buf = kmalloc(sizeof(*buf), GFP_ATOMIC); | 97 | buf = kmalloc(sizeof(*buf), GFP_ATOMIC); |
97 | if (!buf) | 98 | if (!buf) |
@@ -116,7 +117,11 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, | |||
116 | usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), | 117 | usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), |
117 | (unsigned char *)dr, buf, len, | 118 | (unsigned char *)dr, buf, len, |
118 | rtl8187_iowrite_async_cb, buf); | 119 | rtl8187_iowrite_async_cb, buf); |
119 | usb_submit_urb(urb, GFP_ATOMIC); | 120 | rc = usb_submit_urb(urb, GFP_ATOMIC); |
121 | if (rc < 0) { | ||
122 | kfree(buf); | ||
123 | usb_free_urb(urb); | ||
124 | } | ||
120 | } | 125 | } |
121 | 126 | ||
122 | static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, | 127 | static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, |
@@ -169,6 +174,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, | |||
169 | struct urb *urb; | 174 | struct urb *urb; |
170 | __le16 rts_dur = 0; | 175 | __le16 rts_dur = 0; |
171 | u32 flags; | 176 | u32 flags; |
177 | int rc; | ||
172 | 178 | ||
173 | urb = usb_alloc_urb(0, GFP_ATOMIC); | 179 | urb = usb_alloc_urb(0, GFP_ATOMIC); |
174 | if (!urb) { | 180 | if (!urb) { |
@@ -208,7 +214,11 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, | |||
208 | info->dev = dev; | 214 | info->dev = dev; |
209 | usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), | 215 | usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), |
210 | hdr, skb->len, rtl8187_tx_cb, skb); | 216 | hdr, skb->len, rtl8187_tx_cb, skb); |
211 | usb_submit_urb(urb, GFP_ATOMIC); | 217 | rc = usb_submit_urb(urb, GFP_ATOMIC); |
218 | if (rc < 0) { | ||
219 | usb_free_urb(urb); | ||
220 | kfree_skb(skb); | ||
221 | } | ||
212 | 222 | ||
213 | return 0; | 223 | return 0; |
214 | } | 224 | } |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8bddff150c70..d26f69b0184f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -946,8 +946,7 @@ err: | |||
946 | work_done++; | 946 | work_done++; |
947 | } | 947 | } |
948 | 948 | ||
949 | while ((skb = __skb_dequeue(&errq))) | 949 | __skb_queue_purge(&errq); |
950 | kfree_skb(skb); | ||
951 | 950 | ||
952 | work_done -= handle_incoming_queue(dev, &rxq); | 951 | work_done -= handle_incoming_queue(dev, &rxq); |
953 | 952 | ||
@@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np) | |||
1079 | } | 1078 | } |
1080 | } | 1079 | } |
1081 | 1080 | ||
1082 | while ((skb = __skb_dequeue(&free_list)) != NULL) | 1081 | __skb_queue_purge(&free_list); |
1083 | dev_kfree_skb(skb); | ||
1084 | 1082 | ||
1085 | spin_unlock_bh(&np->rx_lock); | 1083 | spin_unlock_bh(&np->rx_lock); |
1086 | } | 1084 | } |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b31faeccb9cd..867f6fd5c2c0 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1278,7 +1278,7 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) | |||
1278 | error = 0; | 1278 | error = 0; |
1279 | /* Check for command packet errors */ | 1279 | /* Check for command packet errors */ |
1280 | if (full_command_packet->command.newcommand.status != 0) { | 1280 | if (full_command_packet->command.newcommand.status != 0) { |
1281 | if (tw_dev->srb[request_id] != 0) { | 1281 | if (tw_dev->srb[request_id] != NULL) { |
1282 | error = twa_fill_sense(tw_dev, request_id, 1, 1); | 1282 | error = twa_fill_sense(tw_dev, request_id, 1, 1); |
1283 | } else { | 1283 | } else { |
1284 | /* Skip ioctl error prints */ | 1284 | /* Skip ioctl error prints */ |
@@ -1290,7 +1290,7 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) | |||
1290 | 1290 | ||
1291 | /* Check for correct state */ | 1291 | /* Check for correct state */ |
1292 | if (tw_dev->state[request_id] != TW_S_POSTED) { | 1292 | if (tw_dev->state[request_id] != TW_S_POSTED) { |
1293 | if (tw_dev->srb[request_id] != 0) { | 1293 | if (tw_dev->srb[request_id] != NULL) { |
1294 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); | 1294 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); |
1295 | TW_CLEAR_ALL_INTERRUPTS(tw_dev); | 1295 | TW_CLEAR_ALL_INTERRUPTS(tw_dev); |
1296 | goto twa_interrupt_bail; | 1296 | goto twa_interrupt_bail; |
@@ -1298,7 +1298,7 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) | |||
1298 | } | 1298 | } |
1299 | 1299 | ||
1300 | /* Check for internal command completion */ | 1300 | /* Check for internal command completion */ |
1301 | if (tw_dev->srb[request_id] == 0) { | 1301 | if (tw_dev->srb[request_id] == NULL) { |
1302 | if (request_id != tw_dev->chrdev_request_id) { | 1302 | if (request_id != tw_dev->chrdev_request_id) { |
1303 | if (twa_aen_complete(tw_dev, request_id)) | 1303 | if (twa_aen_complete(tw_dev, request_id)) |
1304 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); | 1304 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 1dca1775f4b1..0899cb61e3dd 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -3582,7 +3582,7 @@ static int checksetup(struct aha152x_setup *setup) | |||
3582 | if (i == ARRAY_SIZE(ports)) | 3582 | if (i == ARRAY_SIZE(ports)) |
3583 | return 0; | 3583 | return 0; |
3584 | 3584 | ||
3585 | if ( request_region(setup->io_port, IO_RANGE, "aha152x")==0 ) { | 3585 | if (!request_region(setup->io_port, IO_RANGE, "aha152x")) { |
3586 | printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup->io_port); | 3586 | printk(KERN_ERR "aha152x: io port 0x%x busy.\n", setup->io_port); |
3587 | return 0; | 3587 | return 0; |
3588 | } | 3588 | } |
@@ -3842,7 +3842,7 @@ static int __init aha152x_init(void) | |||
3842 | if ((setup_count == 1) && (setup[0].io_port == ports[i])) | 3842 | if ((setup_count == 1) && (setup[0].io_port == ports[i])) |
3843 | continue; | 3843 | continue; |
3844 | 3844 | ||
3845 | if ( request_region(ports[i], IO_RANGE, "aha152x")==0 ) { | 3845 | if (!request_region(ports[i], IO_RANGE, "aha152x")) { |
3846 | printk(KERN_ERR "aha152x: io port 0x%x busy.\n", ports[i]); | 3846 | printk(KERN_ERR "aha152x: io port 0x%x busy.\n", ports[i]); |
3847 | continue; | 3847 | continue; |
3848 | } | 3848 | } |
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index db6de5e6afb3..7d311541c76c 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c | |||
@@ -747,7 +747,7 @@ static void send_s870(struct atp_unit *dev,unsigned char c) | |||
747 | dev->quhd[c] = 0; | 747 | dev->quhd[c] = 0; |
748 | } | 748 | } |
749 | workreq = dev->quereq[c][dev->quhd[c]]; | 749 | workreq = dev->quereq[c][dev->quhd[c]]; |
750 | if (dev->id[c][scmd_id(workreq)].curr_req == 0) { | 750 | if (dev->id[c][scmd_id(workreq)].curr_req == NULL) { |
751 | dev->id[c][scmd_id(workreq)].curr_req = workreq; | 751 | dev->id[c][scmd_id(workreq)].curr_req = workreq; |
752 | dev->last_cmd[c] = scmd_id(workreq); | 752 | dev->last_cmd[c] = scmd_id(workreq); |
753 | goto cmd_subp; | 753 | goto cmd_subp; |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index aaa48e0c8ed0..da876d3924be 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -444,7 +444,7 @@ static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) | |||
444 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { | 444 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { |
445 | printk(KERN_ERR "scsi%d: pci resource invalid\n", | 445 | printk(KERN_ERR "scsi%d: pci resource invalid\n", |
446 | hba->host->host_no); | 446 | hba->host->host_no); |
447 | return 0; | 447 | return NULL; |
448 | } | 448 | } |
449 | 449 | ||
450 | mem_base_phy = pci_resource_start(pcidev, index); | 450 | mem_base_phy = pci_resource_start(pcidev, index); |
@@ -454,7 +454,7 @@ static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) | |||
454 | if (!mem_base_virt) { | 454 | if (!mem_base_virt) { |
455 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", | 455 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", |
456 | hba->host->host_no); | 456 | hba->host->host_no); |
457 | return 0; | 457 | return NULL; |
458 | } | 458 | } |
459 | return mem_base_virt; | 459 | return mem_base_virt; |
460 | } | 460 | } |
@@ -476,11 +476,11 @@ static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) | |||
476 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) | 476 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) |
477 | { | 477 | { |
478 | hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); | 478 | hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); |
479 | if (hba->u.mv.regs == 0) | 479 | if (hba->u.mv.regs == NULL) |
480 | return -1; | 480 | return -1; |
481 | 481 | ||
482 | hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); | 482 | hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); |
483 | if (hba->u.mv.mu == 0) { | 483 | if (hba->u.mv.mu == NULL) { |
484 | iounmap(hba->u.mv.regs); | 484 | iounmap(hba->u.mv.regs); |
485 | return -1; | 485 | return -1; |
486 | } | 486 | } |
@@ -1210,8 +1210,8 @@ static void hptiop_remove(struct pci_dev *pcidev) | |||
1210 | 1210 | ||
1211 | static struct hptiop_adapter_ops hptiop_itl_ops = { | 1211 | static struct hptiop_adapter_ops hptiop_itl_ops = { |
1212 | .iop_wait_ready = iop_wait_ready_itl, | 1212 | .iop_wait_ready = iop_wait_ready_itl, |
1213 | .internal_memalloc = 0, | 1213 | .internal_memalloc = NULL, |
1214 | .internal_memfree = 0, | 1214 | .internal_memfree = NULL, |
1215 | .map_pci_bar = hptiop_map_pci_bar_itl, | 1215 | .map_pci_bar = hptiop_map_pci_bar_itl, |
1216 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, | 1216 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, |
1217 | .enable_intr = hptiop_enable_intr_itl, | 1217 | .enable_intr = hptiop_enable_intr_itl, |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 51e2f299dbbb..3754ab87f89a 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -2811,7 +2811,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
2811 | 2811 | ||
2812 | /* Check for room in outstanding command list. */ | 2812 | /* Check for room in outstanding command list. */ |
2813 | for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && | 2813 | for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && |
2814 | ha->outstanding_cmds[cnt] != 0; cnt++); | 2814 | ha->outstanding_cmds[cnt] != NULL; cnt++); |
2815 | 2815 | ||
2816 | if (cnt >= MAX_OUTSTANDING_COMMANDS) { | 2816 | if (cnt >= MAX_OUTSTANDING_COMMANDS) { |
2817 | status = 1; | 2817 | status = 1; |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 53fa19cf2f06..788c3559522d 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -2602,7 +2602,12 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2602 | { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200, | 2602 | { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200, |
2603 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */ | 2603 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */ |
2604 | pbn_b2_2_115200 }, | 2604 | pbn_b2_2_115200 }, |
2605 | 2605 | /* | |
2606 | * IntaShield IS-400 | ||
2607 | */ | ||
2608 | { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, | ||
2609 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ | ||
2610 | pbn_b2_4_115200 }, | ||
2606 | /* | 2611 | /* |
2607 | * Perle PCI-RAS cards | 2612 | * Perle PCI-RAS cards |
2608 | */ | 2613 | */ |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index eab032733790..53b03c629aff 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -2054,6 +2054,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | |||
2054 | int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | 2054 | int uart_resume_port(struct uart_driver *drv, struct uart_port *port) |
2055 | { | 2055 | { |
2056 | struct uart_state *state = drv->state + port->line; | 2056 | struct uart_state *state = drv->state + port->line; |
2057 | struct device *tty_dev; | ||
2058 | struct uart_match match = {port, drv}; | ||
2057 | 2059 | ||
2058 | mutex_lock(&state->mutex); | 2060 | mutex_lock(&state->mutex); |
2059 | 2061 | ||
@@ -2063,7 +2065,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
2063 | return 0; | 2065 | return 0; |
2064 | } | 2066 | } |
2065 | 2067 | ||
2066 | if (!port->suspended) { | 2068 | tty_dev = device_find_child(port->dev, &match, serial_match_port); |
2069 | if (!port->suspended && device_may_wakeup(tty_dev)) { | ||
2067 | disable_irq_wake(port->irq); | 2070 | disable_irq_wake(port->irq); |
2068 | mutex_unlock(&state->mutex); | 2071 | mutex_unlock(&state->mutex); |
2069 | return 0; | 2072 | return 0; |
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c index 145c0281495d..2847336742d7 100644 --- a/drivers/serial/sunhv.c +++ b/drivers/serial/sunhv.c | |||
@@ -499,7 +499,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig | |||
499 | } else | 499 | } else |
500 | spin_lock(&port->lock); | 500 | spin_lock(&port->lock); |
501 | 501 | ||
502 | spin_lock_irqsave(&port->lock, flags); | ||
503 | for (i = 0; i < n; i++) { | 502 | for (i = 0; i < n; i++) { |
504 | if (*s == '\n') | 503 | if (*s == '\n') |
505 | sunhv_console_putchar(port, '\r'); | 504 | sunhv_console_putchar(port, '\r'); |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index b3518ca9f04e..41620c0fb046 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -68,6 +68,7 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; | |||
68 | 68 | ||
69 | struct spidev_data { | 69 | struct spidev_data { |
70 | struct device dev; | 70 | struct device dev; |
71 | spinlock_t spi_lock; | ||
71 | struct spi_device *spi; | 72 | struct spi_device *spi; |
72 | struct list_head device_entry; | 73 | struct list_head device_entry; |
73 | 74 | ||
@@ -85,12 +86,75 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); | |||
85 | 86 | ||
86 | /*-------------------------------------------------------------------------*/ | 87 | /*-------------------------------------------------------------------------*/ |
87 | 88 | ||
89 | /* | ||
90 | * We can't use the standard synchronous wrappers for file I/O; we | ||
91 | * need to protect against async removal of the underlying spi_device. | ||
92 | */ | ||
93 | static void spidev_complete(void *arg) | ||
94 | { | ||
95 | complete(arg); | ||
96 | } | ||
97 | |||
98 | static ssize_t | ||
99 | spidev_sync(struct spidev_data *spidev, struct spi_message *message) | ||
100 | { | ||
101 | DECLARE_COMPLETION_ONSTACK(done); | ||
102 | int status; | ||
103 | |||
104 | message->complete = spidev_complete; | ||
105 | message->context = &done; | ||
106 | |||
107 | spin_lock_irq(&spidev->spi_lock); | ||
108 | if (spidev->spi == NULL) | ||
109 | status = -ESHUTDOWN; | ||
110 | else | ||
111 | status = spi_async(spidev->spi, message); | ||
112 | spin_unlock_irq(&spidev->spi_lock); | ||
113 | |||
114 | if (status == 0) { | ||
115 | wait_for_completion(&done); | ||
116 | status = message->status; | ||
117 | if (status == 0) | ||
118 | status = message->actual_length; | ||
119 | } | ||
120 | return status; | ||
121 | } | ||
122 | |||
123 | static inline ssize_t | ||
124 | spidev_sync_write(struct spidev_data *spidev, size_t len) | ||
125 | { | ||
126 | struct spi_transfer t = { | ||
127 | .tx_buf = spidev->buffer, | ||
128 | .len = len, | ||
129 | }; | ||
130 | struct spi_message m; | ||
131 | |||
132 | spi_message_init(&m); | ||
133 | spi_message_add_tail(&t, &m); | ||
134 | return spidev_sync(spidev, &m); | ||
135 | } | ||
136 | |||
137 | static inline ssize_t | ||
138 | spidev_sync_read(struct spidev_data *spidev, size_t len) | ||
139 | { | ||
140 | struct spi_transfer t = { | ||
141 | .rx_buf = spidev->buffer, | ||
142 | .len = len, | ||
143 | }; | ||
144 | struct spi_message m; | ||
145 | |||
146 | spi_message_init(&m); | ||
147 | spi_message_add_tail(&t, &m); | ||
148 | return spidev_sync(spidev, &m); | ||
149 | } | ||
150 | |||
151 | /*-------------------------------------------------------------------------*/ | ||
152 | |||
88 | /* Read-only message with current device setup */ | 153 | /* Read-only message with current device setup */ |
89 | static ssize_t | 154 | static ssize_t |
90 | spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) | 155 | spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) |
91 | { | 156 | { |
92 | struct spidev_data *spidev; | 157 | struct spidev_data *spidev; |
93 | struct spi_device *spi; | ||
94 | ssize_t status = 0; | 158 | ssize_t status = 0; |
95 | 159 | ||
96 | /* chipselect only toggles at start or end of operation */ | 160 | /* chipselect only toggles at start or end of operation */ |
@@ -98,10 +162,9 @@ spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) | |||
98 | return -EMSGSIZE; | 162 | return -EMSGSIZE; |
99 | 163 | ||
100 | spidev = filp->private_data; | 164 | spidev = filp->private_data; |
101 | spi = spidev->spi; | ||
102 | 165 | ||
103 | mutex_lock(&spidev->buf_lock); | 166 | mutex_lock(&spidev->buf_lock); |
104 | status = spi_read(spi, spidev->buffer, count); | 167 | status = spidev_sync_read(spidev, count); |
105 | if (status == 0) { | 168 | if (status == 0) { |
106 | unsigned long missing; | 169 | unsigned long missing; |
107 | 170 | ||
@@ -122,7 +185,6 @@ spidev_write(struct file *filp, const char __user *buf, | |||
122 | size_t count, loff_t *f_pos) | 185 | size_t count, loff_t *f_pos) |
123 | { | 186 | { |
124 | struct spidev_data *spidev; | 187 | struct spidev_data *spidev; |
125 | struct spi_device *spi; | ||
126 | ssize_t status = 0; | 188 | ssize_t status = 0; |
127 | unsigned long missing; | 189 | unsigned long missing; |
128 | 190 | ||
@@ -131,12 +193,11 @@ spidev_write(struct file *filp, const char __user *buf, | |||
131 | return -EMSGSIZE; | 193 | return -EMSGSIZE; |
132 | 194 | ||
133 | spidev = filp->private_data; | 195 | spidev = filp->private_data; |
134 | spi = spidev->spi; | ||
135 | 196 | ||
136 | mutex_lock(&spidev->buf_lock); | 197 | mutex_lock(&spidev->buf_lock); |
137 | missing = copy_from_user(spidev->buffer, buf, count); | 198 | missing = copy_from_user(spidev->buffer, buf, count); |
138 | if (missing == 0) { | 199 | if (missing == 0) { |
139 | status = spi_write(spi, spidev->buffer, count); | 200 | status = spidev_sync_write(spidev, count); |
140 | if (status == 0) | 201 | if (status == 0) |
141 | status = count; | 202 | status = count; |
142 | } else | 203 | } else |
@@ -153,7 +214,6 @@ static int spidev_message(struct spidev_data *spidev, | |||
153 | struct spi_transfer *k_xfers; | 214 | struct spi_transfer *k_xfers; |
154 | struct spi_transfer *k_tmp; | 215 | struct spi_transfer *k_tmp; |
155 | struct spi_ioc_transfer *u_tmp; | 216 | struct spi_ioc_transfer *u_tmp; |
156 | struct spi_device *spi = spidev->spi; | ||
157 | unsigned n, total; | 217 | unsigned n, total; |
158 | u8 *buf; | 218 | u8 *buf; |
159 | int status = -EFAULT; | 219 | int status = -EFAULT; |
@@ -215,7 +275,7 @@ static int spidev_message(struct spidev_data *spidev, | |||
215 | spi_message_add_tail(k_tmp, &msg); | 275 | spi_message_add_tail(k_tmp, &msg); |
216 | } | 276 | } |
217 | 277 | ||
218 | status = spi_sync(spi, &msg); | 278 | status = spidev_sync(spidev, &msg); |
219 | if (status < 0) | 279 | if (status < 0) |
220 | goto done; | 280 | goto done; |
221 | 281 | ||
@@ -269,8 +329,16 @@ spidev_ioctl(struct inode *inode, struct file *filp, | |||
269 | if (err) | 329 | if (err) |
270 | return -EFAULT; | 330 | return -EFAULT; |
271 | 331 | ||
332 | /* guard against device removal before, or while, | ||
333 | * we issue this ioctl. | ||
334 | */ | ||
272 | spidev = filp->private_data; | 335 | spidev = filp->private_data; |
273 | spi = spidev->spi; | 336 | spin_lock_irq(&spidev->spi_lock); |
337 | spi = spi_dev_get(spidev->spi); | ||
338 | spin_unlock_irq(&spidev->spi_lock); | ||
339 | |||
340 | if (spi == NULL) | ||
341 | return -ESHUTDOWN; | ||
274 | 342 | ||
275 | switch (cmd) { | 343 | switch (cmd) { |
276 | /* read requests */ | 344 | /* read requests */ |
@@ -356,8 +424,10 @@ spidev_ioctl(struct inode *inode, struct file *filp, | |||
356 | default: | 424 | default: |
357 | /* segmented and/or full-duplex I/O request */ | 425 | /* segmented and/or full-duplex I/O request */ |
358 | if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) | 426 | if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) |
359 | || _IOC_DIR(cmd) != _IOC_WRITE) | 427 | || _IOC_DIR(cmd) != _IOC_WRITE) { |
360 | return -ENOTTY; | 428 | retval = -ENOTTY; |
429 | break; | ||
430 | } | ||
361 | 431 | ||
362 | tmp = _IOC_SIZE(cmd); | 432 | tmp = _IOC_SIZE(cmd); |
363 | if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { | 433 | if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { |
@@ -385,6 +455,7 @@ spidev_ioctl(struct inode *inode, struct file *filp, | |||
385 | kfree(ioc); | 455 | kfree(ioc); |
386 | break; | 456 | break; |
387 | } | 457 | } |
458 | spi_dev_put(spi); | ||
388 | return retval; | 459 | return retval; |
389 | } | 460 | } |
390 | 461 | ||
@@ -488,6 +559,7 @@ static int spidev_probe(struct spi_device *spi) | |||
488 | 559 | ||
489 | /* Initialize the driver data */ | 560 | /* Initialize the driver data */ |
490 | spidev->spi = spi; | 561 | spidev->spi = spi; |
562 | spin_lock_init(&spidev->spi_lock); | ||
491 | mutex_init(&spidev->buf_lock); | 563 | mutex_init(&spidev->buf_lock); |
492 | 564 | ||
493 | INIT_LIST_HEAD(&spidev->device_entry); | 565 | INIT_LIST_HEAD(&spidev->device_entry); |
@@ -526,13 +598,17 @@ static int spidev_remove(struct spi_device *spi) | |||
526 | { | 598 | { |
527 | struct spidev_data *spidev = dev_get_drvdata(&spi->dev); | 599 | struct spidev_data *spidev = dev_get_drvdata(&spi->dev); |
528 | 600 | ||
529 | mutex_lock(&device_list_lock); | 601 | /* make sure ops on existing fds can abort cleanly */ |
602 | spin_lock_irq(&spidev->spi_lock); | ||
603 | spidev->spi = NULL; | ||
604 | spin_unlock_irq(&spidev->spi_lock); | ||
530 | 605 | ||
606 | /* prevent new opens */ | ||
607 | mutex_lock(&device_list_lock); | ||
531 | list_del(&spidev->device_entry); | 608 | list_del(&spidev->device_entry); |
532 | dev_set_drvdata(&spi->dev, NULL); | 609 | dev_set_drvdata(&spi->dev, NULL); |
533 | clear_bit(MINOR(spidev->dev.devt), minors); | 610 | clear_bit(MINOR(spidev->dev.devt), minors); |
534 | device_unregister(&spidev->dev); | 611 | device_unregister(&spidev->dev); |
535 | |||
536 | mutex_unlock(&device_list_lock); | 612 | mutex_unlock(&device_list_lock); |
537 | 613 | ||
538 | return 0; | 614 | return 0; |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index e4bcf5376a99..bd4ac0bafecb 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -3356,7 +3356,7 @@ static int __devinit atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *i | |||
3356 | 3356 | ||
3357 | info->fix.mmio_start = raddr; | 3357 | info->fix.mmio_start = raddr; |
3358 | par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000); | 3358 | par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000); |
3359 | if (par->ati_regbase == 0) | 3359 | if (par->ati_regbase == NULL) |
3360 | return -ENOMEM; | 3360 | return -ENOMEM; |
3361 | 3361 | ||
3362 | info->fix.mmio_start += par->aux_start ? 0x400 : 0xc00; | 3362 | info->fix.mmio_start += par->aux_start ? 0x400 : 0xc00; |
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 72cd0d2f14ec..400e9264e456 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c | |||
@@ -2277,8 +2277,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2277 | do { | 2277 | do { |
2278 | rinfo->fb_base = ioremap (rinfo->fb_base_phys, | 2278 | rinfo->fb_base = ioremap (rinfo->fb_base_phys, |
2279 | rinfo->mapped_vram); | 2279 | rinfo->mapped_vram); |
2280 | } while ( rinfo->fb_base == 0 && | 2280 | } while (rinfo->fb_base == NULL && |
2281 | ((rinfo->mapped_vram /=2) >= MIN_MAPPED_VRAM) ); | 2281 | ((rinfo->mapped_vram /= 2) >= MIN_MAPPED_VRAM)); |
2282 | 2282 | ||
2283 | if (rinfo->fb_base == NULL) { | 2283 | if (rinfo->fb_base == NULL) { |
2284 | printk (KERN_ERR "radeonfb (%s): cannot map FB\n", | 2284 | printk (KERN_ERR "radeonfb (%s): cannot map FB\n", |
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h index f3107ad7e545..95883236c0cd 100644 --- a/drivers/video/matrox/matroxfb_base.h +++ b/drivers/video/matrox/matroxfb_base.h | |||
@@ -200,7 +200,7 @@ static inline int mga_ioremap(unsigned long phys, unsigned long size, int flags, | |||
200 | virt->vaddr = ioremap_nocache(phys, size); | 200 | virt->vaddr = ioremap_nocache(phys, size); |
201 | else | 201 | else |
202 | virt->vaddr = ioremap(phys, size); | 202 | virt->vaddr = ioremap(phys, size); |
203 | return (virt->vaddr == 0); /* 0, !0... 0, error_code in future */ | 203 | return (virt->vaddr == NULL); /* 0, !0... 0, error_code in future */ |
204 | } | 204 | } |
205 | 205 | ||
206 | static inline void mga_iounmap(vaddr_t va) { | 206 | static inline void mga_iounmap(vaddr_t va) { |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 3ee314beacc1..274bc93ab7d8 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -1351,7 +1351,6 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) | |||
1351 | struct pxafb_info *fbi; | 1351 | struct pxafb_info *fbi; |
1352 | void *addr; | 1352 | void *addr; |
1353 | struct pxafb_mach_info *inf = dev->platform_data; | 1353 | struct pxafb_mach_info *inf = dev->platform_data; |
1354 | struct pxafb_mode_info *mode = inf->modes; | ||
1355 | 1354 | ||
1356 | /* Alloc the pxafb_info and pseudo_palette in one step */ | 1355 | /* Alloc the pxafb_info and pseudo_palette in one step */ |
1357 | fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL); | 1356 | fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL); |
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index 13b38cbbe4cf..f0598961c6b0 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c | |||
@@ -1,75 +1,15 @@ | |||
1 | /* | 1 | /* linux/drivers/video/s3c2410fb.c |
2 | * linux/drivers/video/s3c2410fb.c | 2 | * Copyright (c) 2004,2005 Arnaud Patard |
3 | * Copyright (c) Arnaud Patard, Ben Dooks | 3 | * Copyright (c) 2004-2008 Ben Dooks |
4 | * | ||
5 | * S3C2410 LCD Framebuffer Driver | ||
4 | * | 6 | * |
5 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
6 | * License. See the file COPYING in the main directory of this archive for | 8 | * License. See the file COPYING in the main directory of this archive for |
7 | * more details. | 9 | * more details. |
8 | * | 10 | * |
9 | * S3C2410 LCD Controller Frame Buffer Driver | 11 | * Driver based on skeletonfb.c, sa1100fb.c and others. |
10 | * based on skeletonfb.c, sa1100fb.c and others | 12 | */ |
11 | * | ||
12 | * ChangeLog | ||
13 | * 2005-04-07: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
14 | * - u32 state -> pm_message_t state | ||
15 | * - S3C2410_{VA,SZ}_LCD -> S3C24XX | ||
16 | * | ||
17 | * 2005-03-15: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
18 | * - Removed the ioctl | ||
19 | * - use readl/writel instead of __raw_writel/__raw_readl | ||
20 | * | ||
21 | * 2004-12-04: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
22 | * - Added the possibility to set on or off the | ||
23 | * debugging messages | ||
24 | * - Replaced 0 and 1 by on or off when reading the | ||
25 | * /sys files | ||
26 | * | ||
27 | * 2005-03-23: Ben Dooks <ben-linux@fluff.org> | ||
28 | * - added non 16bpp modes | ||
29 | * - updated platform information for range of x/y/bpp | ||
30 | * - add code to ensure palette is written correctly | ||
31 | * - add pixel clock divisor control | ||
32 | * | ||
33 | * 2004-11-11: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
34 | * - Removed the use of currcon as it no more exists | ||
35 | * - Added LCD power sysfs interface | ||
36 | * | ||
37 | * 2004-11-03: Ben Dooks <ben-linux@fluff.org> | ||
38 | * - minor cleanups | ||
39 | * - add suspend/resume support | ||
40 | * - s3c2410fb_setcolreg() not valid in >8bpp modes | ||
41 | * - removed last CONFIG_FB_S3C2410_FIXED | ||
42 | * - ensure lcd controller stopped before cleanup | ||
43 | * - added sysfs interface for backlight power | ||
44 | * - added mask for gpio configuration | ||
45 | * - ensured IRQs disabled during GPIO configuration | ||
46 | * - disable TPAL before enabling video | ||
47 | * | ||
48 | * 2004-09-20: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
49 | * - Suppress command line options | ||
50 | * | ||
51 | * 2004-09-15: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
52 | * - code cleanup | ||
53 | * | ||
54 | * 2004-09-07: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
55 | * - Renamed from h1940fb.c to s3c2410fb.c | ||
56 | * - Add support for different devices | ||
57 | * - Backlight support | ||
58 | * | ||
59 | * 2004-09-05: Herbert Pötzl <herbert@13thfloor.at> | ||
60 | * - added clock (de-)allocation code | ||
61 | * - added fixem fbmem option | ||
62 | * | ||
63 | * 2004-07-27: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
64 | * - code cleanup | ||
65 | * - added a forgotten return in h1940fb_init | ||
66 | * | ||
67 | * 2004-07-19: Herbert Pötzl <herbert@13thfloor.at> | ||
68 | * - code cleanup and extended debugging | ||
69 | * | ||
70 | * 2004-07-15: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
71 | * - First version | ||
72 | */ | ||
73 | 13 | ||
74 | #include <linux/module.h> | 14 | #include <linux/module.h> |
75 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -580,6 +520,27 @@ static int s3c2410fb_setcolreg(unsigned regno, | |||
580 | return 0; | 520 | return 0; |
581 | } | 521 | } |
582 | 522 | ||
523 | /* s3c2410fb_lcd_enable | ||
524 | * | ||
525 | * shutdown the lcd controller | ||
526 | */ | ||
527 | static void s3c2410fb_lcd_enable(struct s3c2410fb_info *fbi, int enable) | ||
528 | { | ||
529 | unsigned long flags; | ||
530 | |||
531 | local_irq_save(flags); | ||
532 | |||
533 | if (enable) | ||
534 | fbi->regs.lcdcon1 |= S3C2410_LCDCON1_ENVID; | ||
535 | else | ||
536 | fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_ENVID; | ||
537 | |||
538 | writel(fbi->regs.lcdcon1, fbi->io + S3C2410_LCDCON1); | ||
539 | |||
540 | local_irq_restore(flags); | ||
541 | } | ||
542 | |||
543 | |||
583 | /* | 544 | /* |
584 | * s3c2410fb_blank | 545 | * s3c2410fb_blank |
585 | * @blank_mode: the blank mode we want. | 546 | * @blank_mode: the blank mode we want. |
@@ -589,9 +550,6 @@ static int s3c2410fb_setcolreg(unsigned regno, | |||
589 | * blanking succeeded, != 0 if un-/blanking failed due to e.g. a | 550 | * blanking succeeded, != 0 if un-/blanking failed due to e.g. a |
590 | * video mode which doesn't support it. Implements VESA suspend | 551 | * video mode which doesn't support it. Implements VESA suspend |
591 | * and powerdown modes on hardware that supports disabling hsync/vsync: | 552 | * and powerdown modes on hardware that supports disabling hsync/vsync: |
592 | * blank_mode == 2: suspend vsync | ||
593 | * blank_mode == 3: suspend hsync | ||
594 | * blank_mode == 4: powerdown | ||
595 | * | 553 | * |
596 | * Returns negative errno on error, or zero on success. | 554 | * Returns negative errno on error, or zero on success. |
597 | * | 555 | * |
@@ -605,6 +563,12 @@ static int s3c2410fb_blank(int blank_mode, struct fb_info *info) | |||
605 | 563 | ||
606 | tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL; | 564 | tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL; |
607 | 565 | ||
566 | if (blank_mode == FB_BLANK_POWERDOWN) { | ||
567 | s3c2410fb_lcd_enable(fbi, 0); | ||
568 | } else { | ||
569 | s3c2410fb_lcd_enable(fbi, 1); | ||
570 | } | ||
571 | |||
608 | if (blank_mode == FB_BLANK_UNBLANK) | 572 | if (blank_mode == FB_BLANK_UNBLANK) |
609 | writel(0x0, tpal_reg); | 573 | writel(0x0, tpal_reg); |
610 | else { | 574 | else { |
@@ -948,7 +912,10 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev, | |||
948 | } | 912 | } |
949 | 913 | ||
950 | /* create device files */ | 914 | /* create device files */ |
951 | device_create_file(&pdev->dev, &dev_attr_debug); | 915 | ret = device_create_file(&pdev->dev, &dev_attr_debug); |
916 | if (ret) { | ||
917 | printk(KERN_ERR "failed to add debug attribute\n"); | ||
918 | } | ||
952 | 919 | ||
953 | printk(KERN_INFO "fb%d: %s frame buffer device\n", | 920 | printk(KERN_INFO "fb%d: %s frame buffer device\n", |
954 | fbinfo->node, fbinfo->fix.id); | 921 | fbinfo->node, fbinfo->fix.id); |
@@ -983,21 +950,6 @@ static int __init s3c2412fb_probe(struct platform_device *pdev) | |||
983 | return s3c24xxfb_probe(pdev, DRV_S3C2412); | 950 | return s3c24xxfb_probe(pdev, DRV_S3C2412); |
984 | } | 951 | } |
985 | 952 | ||
986 | /* s3c2410fb_stop_lcd | ||
987 | * | ||
988 | * shutdown the lcd controller | ||
989 | */ | ||
990 | static void s3c2410fb_stop_lcd(struct s3c2410fb_info *fbi) | ||
991 | { | ||
992 | unsigned long flags; | ||
993 | |||
994 | local_irq_save(flags); | ||
995 | |||
996 | fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_ENVID; | ||
997 | writel(fbi->regs.lcdcon1, fbi->io + S3C2410_LCDCON1); | ||
998 | |||
999 | local_irq_restore(flags); | ||
1000 | } | ||
1001 | 953 | ||
1002 | /* | 954 | /* |
1003 | * Cleanup | 955 | * Cleanup |
@@ -1010,7 +962,7 @@ static int s3c2410fb_remove(struct platform_device *pdev) | |||
1010 | 962 | ||
1011 | unregister_framebuffer(fbinfo); | 963 | unregister_framebuffer(fbinfo); |
1012 | 964 | ||
1013 | s3c2410fb_stop_lcd(info); | 965 | s3c2410fb_lcd_enable(info, 0); |
1014 | msleep(1); | 966 | msleep(1); |
1015 | 967 | ||
1016 | s3c2410fb_unmap_video_memory(fbinfo); | 968 | s3c2410fb_unmap_video_memory(fbinfo); |
@@ -1043,7 +995,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state) | |||
1043 | struct fb_info *fbinfo = platform_get_drvdata(dev); | 995 | struct fb_info *fbinfo = platform_get_drvdata(dev); |
1044 | struct s3c2410fb_info *info = fbinfo->par; | 996 | struct s3c2410fb_info *info = fbinfo->par; |
1045 | 997 | ||
1046 | s3c2410fb_stop_lcd(info); | 998 | s3c2410fb_lcd_enable(info, 0); |
1047 | 999 | ||
1048 | /* sleep before disabling the clock, we need to ensure | 1000 | /* sleep before disabling the clock, we need to ensure |
1049 | * the LCD DMA engine is not going to get back on the bus | 1001 | * the LCD DMA engine is not going to get back on the bus |
@@ -1118,3 +1070,5 @@ MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>, " | |||
1118 | "Ben Dooks <ben-linux@fluff.org>"); | 1070 | "Ben Dooks <ben-linux@fluff.org>"); |
1119 | MODULE_DESCRIPTION("Framebuffer driver for the s3c2410"); | 1071 | MODULE_DESCRIPTION("Framebuffer driver for the s3c2410"); |
1120 | MODULE_LICENSE("GPL"); | 1072 | MODULE_LICENSE("GPL"); |
1073 | MODULE_ALIAS("platform:s3c2410-lcd"); | ||
1074 | MODULE_ALIAS("platform:s3c2412-lcd"); | ||
diff --git a/drivers/video/s3c2410fb.h b/drivers/video/s3c2410fb.h index dbb73b95e2ef..9a6ba3e9d1b8 100644 --- a/drivers/video/s3c2410fb.h +++ b/drivers/video/s3c2410fb.h | |||
@@ -1,26 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/video/s3c2410fb.h | 2 | * linux/drivers/video/s3c2410fb.h |
3 | * Copyright (c) Arnaud Patard | 3 | * Copyright (c) 2004 Arnaud Patard |
4 | * | ||
5 | * S3C2410 LCD Framebuffer Driver | ||
4 | * | 6 | * |
5 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
6 | * License. See the file COPYING in the main directory of this archive for | 8 | * License. See the file COPYING in the main directory of this archive for |
7 | * more details. | 9 | * more details. |
8 | * | 10 | * |
9 | * S3C2410 LCD Controller Frame Buffer Driver | 11 | */ |
10 | * based on skeletonfb.c, sa1100fb.h | ||
11 | * | ||
12 | * ChangeLog | ||
13 | * | ||
14 | * 2004-12-04: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
15 | * - Moved dprintk to s3c2410fb.c | ||
16 | * | ||
17 | * 2004-09-07: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
18 | * - Renamed from h1940fb.h to s3c2410fb.h | ||
19 | * - Changed h1940 to s3c2410 | ||
20 | * | ||
21 | * 2004-07-15: Arnaud Patard <arnaud.patard@rtp-net.org> | ||
22 | * - First version | ||
23 | */ | ||
24 | 12 | ||
25 | #ifndef __S3C2410FB_H | 13 | #ifndef __S3C2410FB_H |
26 | #define __S3C2410FB_H | 14 | #define __S3C2410FB_H |
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 73803624c131..b9343844cd1f 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c | |||
@@ -5787,7 +5787,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5787 | } else { | 5787 | } else { |
5788 | struct sis_video_info *countvideo = card_list; | 5788 | struct sis_video_info *countvideo = card_list; |
5789 | ivideo->cardnumber = 1; | 5789 | ivideo->cardnumber = 1; |
5790 | while((countvideo = countvideo->next) != 0) | 5790 | while((countvideo = countvideo->next) != NULL) |
5791 | ivideo->cardnumber++; | 5791 | ivideo->cardnumber++; |
5792 | } | 5792 | } |
5793 | 5793 | ||
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index 742b5c656d66..15d4a768b1f6 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -663,14 +663,14 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to) | |||
663 | sm501fb_sync_regs(fbi); | 663 | sm501fb_sync_regs(fbi); |
664 | mdelay(10); | 664 | mdelay(10); |
665 | 665 | ||
666 | if (pd->flags & SM501FB_FLAG_PANEL_USE_VBIASEN) { | 666 | if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { |
667 | control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */ | 667 | control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */ |
668 | writel(control, ctrl_reg); | 668 | writel(control, ctrl_reg); |
669 | sm501fb_sync_regs(fbi); | 669 | sm501fb_sync_regs(fbi); |
670 | mdelay(10); | 670 | mdelay(10); |
671 | } | 671 | } |
672 | 672 | ||
673 | if (pd->flags & SM501FB_FLAG_PANEL_USE_FPEN) { | 673 | if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { |
674 | control |= SM501_DC_PANEL_CONTROL_FPEN; | 674 | control |= SM501_DC_PANEL_CONTROL_FPEN; |
675 | writel(control, ctrl_reg); | 675 | writel(control, ctrl_reg); |
676 | sm501fb_sync_regs(fbi); | 676 | sm501fb_sync_regs(fbi); |
@@ -678,14 +678,14 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to) | |||
678 | } | 678 | } |
679 | } else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) { | 679 | } else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) { |
680 | /* disable panel power */ | 680 | /* disable panel power */ |
681 | if (pd->flags & SM501FB_FLAG_PANEL_USE_FPEN) { | 681 | if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { |
682 | control &= ~SM501_DC_PANEL_CONTROL_FPEN; | 682 | control &= ~SM501_DC_PANEL_CONTROL_FPEN; |
683 | writel(control, ctrl_reg); | 683 | writel(control, ctrl_reg); |
684 | sm501fb_sync_regs(fbi); | 684 | sm501fb_sync_regs(fbi); |
685 | mdelay(10); | 685 | mdelay(10); |
686 | } | 686 | } |
687 | 687 | ||
688 | if (pd->flags & SM501FB_FLAG_PANEL_USE_VBIASEN) { | 688 | if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { |
689 | control &= ~SM501_DC_PANEL_CONTROL_BIAS; | 689 | control &= ~SM501_DC_PANEL_CONTROL_BIAS; |
690 | writel(control, ctrl_reg); | 690 | writel(control, ctrl_reg); |
691 | sm501fb_sync_regs(fbi); | 691 | sm501fb_sync_regs(fbi); |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 254d115cafab..ccb78f66c2b6 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -295,6 +295,19 @@ config ALIM7101_WDT | |||
295 | 295 | ||
296 | Most people will say N. | 296 | Most people will say N. |
297 | 297 | ||
298 | config GEODE_WDT | ||
299 | tristate "AMD Geode CS5535/CS5536 Watchdog" | ||
300 | depends on MGEODE_LX | ||
301 | help | ||
302 | This driver enables a watchdog capability built into the | ||
303 | CS5535/CS5536 companion chips for the AMD Geode GX and LX | ||
304 | processors. This watchdog watches your kernel to make sure | ||
305 | it doesn't freeze, and if it does, it reboots your computer after | ||
306 | a certain amount of time. | ||
307 | |||
308 | You can compile this driver directly into the kernel, or use | ||
309 | it as a module. The module will be called geodewdt. | ||
310 | |||
298 | config SC520_WDT | 311 | config SC520_WDT |
299 | tristate "AMD Elan SC520 processor Watchdog" | 312 | tristate "AMD Elan SC520 processor Watchdog" |
300 | depends on X86 | 313 | depends on X86 |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index f3fb170fe5c6..25b352b664d9 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o | |||
59 | obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o | 59 | obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o |
60 | obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o | 60 | obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o |
61 | obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o | 61 | obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o |
62 | obj-$(CONFIG_GEODE_WDT) += geodewdt.o | ||
62 | obj-$(CONFIG_SC520_WDT) += sc520_wdt.o | 63 | obj-$(CONFIG_SC520_WDT) += sc520_wdt.o |
63 | obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o | 64 | obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o |
64 | obj-$(CONFIG_IB700_WDT) += ib700wdt.o | 65 | obj-$(CONFIG_IB700_WDT) += ib700wdt.o |
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index 1237113dc14a..03b3e3d91e7c 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c | |||
@@ -29,7 +29,8 @@ | |||
29 | 29 | ||
30 | #define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) | 30 | #define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) |
31 | #define stampit() stamp("here i am") | 31 | #define stampit() stamp("here i am") |
32 | #define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); }) | 32 | #define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) |
33 | #define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); }) | ||
33 | 34 | ||
34 | #define WATCHDOG_NAME "bfin-wdt" | 35 | #define WATCHDOG_NAME "bfin-wdt" |
35 | #define PFX WATCHDOG_NAME ": " | 36 | #define PFX WATCHDOG_NAME ": " |
@@ -377,20 +378,6 @@ static int bfin_wdt_resume(struct platform_device *pdev) | |||
377 | # define bfin_wdt_resume NULL | 378 | # define bfin_wdt_resume NULL |
378 | #endif | 379 | #endif |
379 | 380 | ||
380 | static struct platform_device bfin_wdt_device = { | ||
381 | .name = WATCHDOG_NAME, | ||
382 | .id = -1, | ||
383 | }; | ||
384 | |||
385 | static struct platform_driver bfin_wdt_driver = { | ||
386 | .driver = { | ||
387 | .name = WATCHDOG_NAME, | ||
388 | .owner = THIS_MODULE, | ||
389 | }, | ||
390 | .suspend = bfin_wdt_suspend, | ||
391 | .resume = bfin_wdt_resume, | ||
392 | }; | ||
393 | |||
394 | static const struct file_operations bfin_wdt_fops = { | 381 | static const struct file_operations bfin_wdt_fops = { |
395 | .owner = THIS_MODULE, | 382 | .owner = THIS_MODULE, |
396 | .llseek = no_llseek, | 383 | .llseek = no_llseek, |
@@ -418,11 +405,67 @@ static struct notifier_block bfin_wdt_notifier = { | |||
418 | }; | 405 | }; |
419 | 406 | ||
420 | /** | 407 | /** |
421 | * bfin_wdt_init - Initialize module | 408 | * bfin_wdt_probe - Initialize module |
422 | * | 409 | * |
423 | * Registers the device and notifier handler. Actual device | 410 | * Registers the misc device and notifier handler. Actual device |
424 | * initialization is handled by bfin_wdt_open(). | 411 | * initialization is handled by bfin_wdt_open(). |
425 | */ | 412 | */ |
413 | static int __devinit bfin_wdt_probe(struct platform_device *pdev) | ||
414 | { | ||
415 | int ret; | ||
416 | |||
417 | ret = register_reboot_notifier(&bfin_wdt_notifier); | ||
418 | if (ret) { | ||
419 | pr_devinit(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | ret = misc_register(&bfin_wdt_miscdev); | ||
424 | if (ret) { | ||
425 | pr_devinit(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", | ||
426 | WATCHDOG_MINOR, ret); | ||
427 | unregister_reboot_notifier(&bfin_wdt_notifier); | ||
428 | return ret; | ||
429 | } | ||
430 | |||
431 | pr_devinit(KERN_INFO PFX "initialized: timeout=%d sec (nowayout=%d)\n", | ||
432 | timeout, nowayout); | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * bfin_wdt_remove - Initialize module | ||
439 | * | ||
440 | * Unregisters the misc device and notifier handler. Actual device | ||
441 | * deinitialization is handled by bfin_wdt_close(). | ||
442 | */ | ||
443 | static int __devexit bfin_wdt_remove(struct platform_device *pdev) | ||
444 | { | ||
445 | misc_deregister(&bfin_wdt_miscdev); | ||
446 | unregister_reboot_notifier(&bfin_wdt_notifier); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static struct platform_device *bfin_wdt_device; | ||
451 | |||
452 | static struct platform_driver bfin_wdt_driver = { | ||
453 | .probe = bfin_wdt_probe, | ||
454 | .remove = __devexit_p(bfin_wdt_remove), | ||
455 | .suspend = bfin_wdt_suspend, | ||
456 | .resume = bfin_wdt_resume, | ||
457 | .driver = { | ||
458 | .name = WATCHDOG_NAME, | ||
459 | .owner = THIS_MODULE, | ||
460 | }, | ||
461 | }; | ||
462 | |||
463 | /** | ||
464 | * bfin_wdt_init - Initialize module | ||
465 | * | ||
466 | * Checks the module params and registers the platform device & driver. | ||
467 | * Real work is in the platform probe function. | ||
468 | */ | ||
426 | static int __init bfin_wdt_init(void) | 469 | static int __init bfin_wdt_init(void) |
427 | { | 470 | { |
428 | int ret; | 471 | int ret; |
@@ -436,44 +479,32 @@ static int __init bfin_wdt_init(void) | |||
436 | /* Since this is an on-chip device and needs no board-specific | 479 | /* Since this is an on-chip device and needs no board-specific |
437 | * resources, we'll handle all the platform device stuff here. | 480 | * resources, we'll handle all the platform device stuff here. |
438 | */ | 481 | */ |
439 | ret = platform_device_register(&bfin_wdt_device); | 482 | ret = platform_driver_register(&bfin_wdt_driver); |
440 | if (ret) | ||
441 | return ret; | ||
442 | |||
443 | ret = platform_driver_probe(&bfin_wdt_driver, NULL); | ||
444 | if (ret) | ||
445 | return ret; | ||
446 | |||
447 | ret = register_reboot_notifier(&bfin_wdt_notifier); | ||
448 | if (ret) { | 483 | if (ret) { |
449 | pr_init(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); | 484 | pr_init(KERN_ERR PFX "unable to register driver\n"); |
450 | return ret; | 485 | return ret; |
451 | } | 486 | } |
452 | 487 | ||
453 | ret = misc_register(&bfin_wdt_miscdev); | 488 | bfin_wdt_device = platform_device_register_simple(WATCHDOG_NAME, -1, NULL, 0); |
454 | if (ret) { | 489 | if (IS_ERR(bfin_wdt_device)) { |
455 | pr_init(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", | 490 | pr_init(KERN_ERR PFX "unable to register device\n"); |
456 | WATCHDOG_MINOR, ret); | 491 | platform_driver_unregister(&bfin_wdt_driver); |
457 | unregister_reboot_notifier(&bfin_wdt_notifier); | 492 | return PTR_ERR(bfin_wdt_device); |
458 | return ret; | ||
459 | } | 493 | } |
460 | 494 | ||
461 | pr_init(KERN_INFO PFX "initialized: timeout=%d sec (nowayout=%d)\n", | ||
462 | timeout, nowayout); | ||
463 | |||
464 | return 0; | 495 | return 0; |
465 | } | 496 | } |
466 | 497 | ||
467 | /** | 498 | /** |
468 | * bfin_wdt_exit - Deinitialize module | 499 | * bfin_wdt_exit - Deinitialize module |
469 | * | 500 | * |
470 | * Unregisters the device and notifier handler. Actual device | 501 | * Back out the platform device & driver steps. Real work is in the |
471 | * deinitialization is handled by bfin_wdt_close(). | 502 | * platform remove function. |
472 | */ | 503 | */ |
473 | static void __exit bfin_wdt_exit(void) | 504 | static void __exit bfin_wdt_exit(void) |
474 | { | 505 | { |
475 | misc_deregister(&bfin_wdt_miscdev); | 506 | platform_device_unregister(bfin_wdt_device); |
476 | unregister_reboot_notifier(&bfin_wdt_notifier); | 507 | platform_driver_unregister(&bfin_wdt_driver); |
477 | } | 508 | } |
478 | 509 | ||
479 | module_init(bfin_wdt_init); | 510 | module_init(bfin_wdt_init); |
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index d362f5bf658a..c1ba0db48501 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c | |||
@@ -1,12 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/char/watchdog/booke_wdt.c | ||
3 | * | ||
4 | * Watchdog timer for PowerPC Book-E systems | 2 | * Watchdog timer for PowerPC Book-E systems |
5 | * | 3 | * |
6 | * Author: Matthew McClintock | 4 | * Author: Matthew McClintock |
7 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> | 5 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> |
8 | * | 6 | * |
9 | * Copyright 2005 Freescale Semiconductor Inc. | 7 | * Copyright 2005, 2008 Freescale Semiconductor Inc. |
10 | * | 8 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | 10 | * under the terms of the GNU General Public License as published by the |
@@ -16,6 +14,7 @@ | |||
16 | 14 | ||
17 | #include <linux/module.h> | 15 | #include <linux/module.h> |
18 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/smp.h> | ||
19 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
20 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
21 | #include <linux/watchdog.h> | 20 | #include <linux/watchdog.h> |
@@ -38,7 +37,7 @@ | |||
38 | #define WDT_PERIOD_DEFAULT 3 /* Refer to the PPC40x and PPC4xx manuals */ | 37 | #define WDT_PERIOD_DEFAULT 3 /* Refer to the PPC40x and PPC4xx manuals */ |
39 | #endif /* for timing information */ | 38 | #endif /* for timing information */ |
40 | 39 | ||
41 | u32 booke_wdt_enabled = 0; | 40 | u32 booke_wdt_enabled; |
42 | u32 booke_wdt_period = WDT_PERIOD_DEFAULT; | 41 | u32 booke_wdt_period = WDT_PERIOD_DEFAULT; |
43 | 42 | ||
44 | #ifdef CONFIG_FSL_BOOKE | 43 | #ifdef CONFIG_FSL_BOOKE |
@@ -47,33 +46,31 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT; | |||
47 | #define WDTP(x) (TCR_WP(x)) | 46 | #define WDTP(x) (TCR_WP(x)) |
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | /* | 49 | static DEFINE_SPINLOCK(booke_wdt_lock); |
51 | * booke_wdt_ping: | 50 | |
52 | */ | 51 | static void __booke_wdt_ping(void *data) |
53 | static __inline__ void booke_wdt_ping(void) | ||
54 | { | 52 | { |
55 | mtspr(SPRN_TSR, TSR_ENW|TSR_WIS); | 53 | mtspr(SPRN_TSR, TSR_ENW|TSR_WIS); |
56 | } | 54 | } |
57 | 55 | ||
58 | /* | 56 | static void booke_wdt_ping(void) |
59 | * booke_wdt_enable: | 57 | { |
60 | */ | 58 | on_each_cpu(__booke_wdt_ping, NULL, 0, 0); |
61 | static __inline__ void booke_wdt_enable(void) | 59 | } |
60 | |||
61 | static void __booke_wdt_enable(void *data) | ||
62 | { | 62 | { |
63 | u32 val; | 63 | u32 val; |
64 | 64 | ||
65 | /* clear status before enabling watchdog */ | 65 | /* clear status before enabling watchdog */ |
66 | booke_wdt_ping(); | 66 | __booke_wdt_ping(NULL); |
67 | val = mfspr(SPRN_TCR); | 67 | val = mfspr(SPRN_TCR); |
68 | val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period)); | 68 | val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period)); |
69 | 69 | ||
70 | mtspr(SPRN_TCR, val); | 70 | mtspr(SPRN_TCR, val); |
71 | } | 71 | } |
72 | 72 | ||
73 | /* | 73 | static ssize_t booke_wdt_write(struct file *file, const char __user *buf, |
74 | * booke_wdt_write: | ||
75 | */ | ||
76 | static ssize_t booke_wdt_write (struct file *file, const char __user *buf, | ||
77 | size_t count, loff_t *ppos) | 74 | size_t count, loff_t *ppos) |
78 | { | 75 | { |
79 | booke_wdt_ping(); | 76 | booke_wdt_ping(); |
@@ -81,15 +78,11 @@ static ssize_t booke_wdt_write (struct file *file, const char __user *buf, | |||
81 | } | 78 | } |
82 | 79 | ||
83 | static struct watchdog_info ident = { | 80 | static struct watchdog_info ident = { |
84 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, | 81 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, |
85 | .firmware_version = 0, | 82 | .identity = "PowerPC Book-E Watchdog", |
86 | .identity = "PowerPC Book-E Watchdog", | ||
87 | }; | 83 | }; |
88 | 84 | ||
89 | /* | 85 | static int booke_wdt_ioctl(struct inode *inode, struct file *file, |
90 | * booke_wdt_ioctl: | ||
91 | */ | ||
92 | static int booke_wdt_ioctl (struct inode *inode, struct file *file, | ||
93 | unsigned int cmd, unsigned long arg) | 86 | unsigned int cmd, unsigned long arg) |
94 | { | 87 | { |
95 | u32 tmp = 0; | 88 | u32 tmp = 0; |
@@ -97,7 +90,7 @@ static int booke_wdt_ioctl (struct inode *inode, struct file *file, | |||
97 | 90 | ||
98 | switch (cmd) { | 91 | switch (cmd) { |
99 | case WDIOC_GETSUPPORT: | 92 | case WDIOC_GETSUPPORT: |
100 | if (copy_to_user ((struct watchdog_info __user *) arg, &ident, | 93 | if (copy_to_user((struct watchdog_info __user *)arg, &ident, |
101 | sizeof(struct watchdog_info))) | 94 | sizeof(struct watchdog_info))) |
102 | return -EFAULT; | 95 | return -EFAULT; |
103 | case WDIOC_GETSTATUS: | 96 | case WDIOC_GETSTATUS: |
@@ -132,33 +125,33 @@ static int booke_wdt_ioctl (struct inode *inode, struct file *file, | |||
132 | 125 | ||
133 | return 0; | 126 | return 0; |
134 | } | 127 | } |
135 | /* | 128 | |
136 | * booke_wdt_open: | 129 | static int booke_wdt_open(struct inode *inode, struct file *file) |
137 | */ | ||
138 | static int booke_wdt_open (struct inode *inode, struct file *file) | ||
139 | { | 130 | { |
131 | spin_lock(&booke_wdt_lock); | ||
140 | if (booke_wdt_enabled == 0) { | 132 | if (booke_wdt_enabled == 0) { |
141 | booke_wdt_enabled = 1; | 133 | booke_wdt_enabled = 1; |
142 | booke_wdt_enable(); | 134 | on_each_cpu(__booke_wdt_enable, NULL, 0, 0); |
143 | printk (KERN_INFO "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n", | 135 | printk(KERN_INFO "PowerPC Book-E Watchdog Timer Enabled " |
144 | booke_wdt_period); | 136 | "(wdt_period=%d)\n", booke_wdt_period); |
145 | } | 137 | } |
138 | spin_unlock(&booke_wdt_lock); | ||
146 | 139 | ||
147 | return nonseekable_open(inode, file); | 140 | return nonseekable_open(inode, file); |
148 | } | 141 | } |
149 | 142 | ||
150 | static const struct file_operations booke_wdt_fops = { | 143 | static const struct file_operations booke_wdt_fops = { |
151 | .owner = THIS_MODULE, | 144 | .owner = THIS_MODULE, |
152 | .llseek = no_llseek, | 145 | .llseek = no_llseek, |
153 | .write = booke_wdt_write, | 146 | .write = booke_wdt_write, |
154 | .ioctl = booke_wdt_ioctl, | 147 | .ioctl = booke_wdt_ioctl, |
155 | .open = booke_wdt_open, | 148 | .open = booke_wdt_open, |
156 | }; | 149 | }; |
157 | 150 | ||
158 | static struct miscdevice booke_wdt_miscdev = { | 151 | static struct miscdevice booke_wdt_miscdev = { |
159 | .minor = WATCHDOG_MINOR, | 152 | .minor = WATCHDOG_MINOR, |
160 | .name = "watchdog", | 153 | .name = "watchdog", |
161 | .fops = &booke_wdt_fops, | 154 | .fops = &booke_wdt_fops, |
162 | }; | 155 | }; |
163 | 156 | ||
164 | static void __exit booke_wdt_exit(void) | 157 | static void __exit booke_wdt_exit(void) |
@@ -166,28 +159,27 @@ static void __exit booke_wdt_exit(void) | |||
166 | misc_deregister(&booke_wdt_miscdev); | 159 | misc_deregister(&booke_wdt_miscdev); |
167 | } | 160 | } |
168 | 161 | ||
169 | /* | ||
170 | * booke_wdt_init: | ||
171 | */ | ||
172 | static int __init booke_wdt_init(void) | 162 | static int __init booke_wdt_init(void) |
173 | { | 163 | { |
174 | int ret = 0; | 164 | int ret = 0; |
175 | 165 | ||
176 | printk (KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n"); | 166 | printk(KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n"); |
177 | ident.firmware_version = cur_cpu_spec->pvr_value; | 167 | ident.firmware_version = cur_cpu_spec->pvr_value; |
178 | 168 | ||
179 | ret = misc_register(&booke_wdt_miscdev); | 169 | ret = misc_register(&booke_wdt_miscdev); |
180 | if (ret) { | 170 | if (ret) { |
181 | printk (KERN_CRIT "Cannot register miscdev on minor=%d (err=%d)\n", | 171 | printk(KERN_CRIT "Cannot register miscdev on minor=%d: %d\n", |
182 | WATCHDOG_MINOR, ret); | 172 | WATCHDOG_MINOR, ret); |
183 | return ret; | 173 | return ret; |
184 | } | 174 | } |
185 | 175 | ||
176 | spin_lock(&booke_wdt_lock); | ||
186 | if (booke_wdt_enabled == 1) { | 177 | if (booke_wdt_enabled == 1) { |
187 | printk (KERN_INFO "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n", | 178 | printk(KERN_INFO "PowerPC Book-E Watchdog Timer Enabled " |
188 | booke_wdt_period); | 179 | "(wdt_period=%d)\n", booke_wdt_period); |
189 | booke_wdt_enable(); | 180 | on_each_cpu(__booke_wdt_enable, NULL, 0, 0); |
190 | } | 181 | } |
182 | spin_unlock(&booke_wdt_lock); | ||
191 | 183 | ||
192 | return ret; | 184 | return ret; |
193 | } | 185 | } |
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c new file mode 100644 index 000000000000..f85b19625f97 --- /dev/null +++ b/drivers/watchdog/geodewdt.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* Watchdog timer for the Geode GX/LX with the CS5535/CS5536 companion chip | ||
2 | * | ||
3 | * Copyright (C) 2006-2007, Advanced Micro Devices, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/miscdevice.h> | ||
16 | #include <linux/watchdog.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/reboot.h> | ||
20 | |||
21 | #include <asm/uaccess.h> | ||
22 | #include <asm/geode.h> | ||
23 | |||
24 | #define GEODEWDT_HZ 500 | ||
25 | #define GEODEWDT_SCALE 6 | ||
26 | #define GEODEWDT_MAX_SECONDS 131 | ||
27 | |||
28 | #define WDT_FLAGS_OPEN 1 | ||
29 | #define WDT_FLAGS_ORPHAN 2 | ||
30 | |||
31 | #define DRV_NAME "geodewdt" | ||
32 | #define WATCHDOG_NAME "Geode GX/LX WDT" | ||
33 | #define WATCHDOG_TIMEOUT 60 | ||
34 | |||
35 | static int timeout = WATCHDOG_TIMEOUT; | ||
36 | module_param(timeout, int, 0); | ||
37 | MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=131, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); | ||
38 | |||
39 | static int nowayout = WATCHDOG_NOWAYOUT; | ||
40 | module_param(nowayout, int, 0); | ||
41 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | ||
42 | |||
43 | static struct platform_device *geodewdt_platform_device; | ||
44 | static unsigned long wdt_flags; | ||
45 | static int wdt_timer; | ||
46 | static int safe_close; | ||
47 | |||
48 | static void geodewdt_ping(void) | ||
49 | { | ||
50 | /* Stop the counter */ | ||
51 | geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); | ||
52 | |||
53 | /* Reset the counter */ | ||
54 | geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); | ||
55 | |||
56 | /* Enable the counter */ | ||
57 | geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); | ||
58 | } | ||
59 | |||
60 | static void geodewdt_disable(void) | ||
61 | { | ||
62 | geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); | ||
63 | geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); | ||
64 | } | ||
65 | |||
66 | static int geodewdt_set_heartbeat(int val) | ||
67 | { | ||
68 | if (val < 1 || val > GEODEWDT_MAX_SECONDS) | ||
69 | return -EINVAL; | ||
70 | |||
71 | geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); | ||
72 | geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ); | ||
73 | geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); | ||
74 | geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); | ||
75 | |||
76 | timeout = val; | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int | ||
81 | geodewdt_open(struct inode *inode, struct file *file) | ||
82 | { | ||
83 | if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags)) | ||
84 | return -EBUSY; | ||
85 | |||
86 | if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags)) | ||
87 | __module_get(THIS_MODULE); | ||
88 | |||
89 | geodewdt_ping(); | ||
90 | return nonseekable_open(inode, file); | ||
91 | } | ||
92 | |||
93 | static int | ||
94 | geodewdt_release(struct inode *inode, struct file *file) | ||
95 | { | ||
96 | if (safe_close) { | ||
97 | geodewdt_disable(); | ||
98 | module_put(THIS_MODULE); | ||
99 | } | ||
100 | else { | ||
101 | printk(KERN_CRIT "Unexpected close - watchdog is not stopping.\n"); | ||
102 | geodewdt_ping(); | ||
103 | |||
104 | set_bit(WDT_FLAGS_ORPHAN, &wdt_flags); | ||
105 | } | ||
106 | |||
107 | clear_bit(WDT_FLAGS_OPEN, &wdt_flags); | ||
108 | safe_close = 0; | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static ssize_t | ||
113 | geodewdt_write(struct file *file, const char __user *data, size_t len, | ||
114 | loff_t *ppos) | ||
115 | { | ||
116 | if(len) { | ||
117 | if (!nowayout) { | ||
118 | size_t i; | ||
119 | safe_close = 0; | ||
120 | |||
121 | for (i = 0; i != len; i++) { | ||
122 | char c; | ||
123 | |||
124 | if (get_user(c, data + i)) | ||
125 | return -EFAULT; | ||
126 | |||
127 | if (c == 'V') | ||
128 | safe_close = 1; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | geodewdt_ping(); | ||
133 | } | ||
134 | return len; | ||
135 | } | ||
136 | |||
137 | static int | ||
138 | geodewdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | ||
139 | unsigned long arg) | ||
140 | { | ||
141 | void __user *argp = (void __user *)arg; | ||
142 | int __user *p = argp; | ||
143 | int interval; | ||
144 | |||
145 | static struct watchdog_info ident = { | ||
146 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | ||
147 | | WDIOF_MAGICCLOSE, | ||
148 | .firmware_version = 1, | ||
149 | .identity = WATCHDOG_NAME, | ||
150 | }; | ||
151 | |||
152 | switch(cmd) { | ||
153 | case WDIOC_GETSUPPORT: | ||
154 | return copy_to_user(argp, &ident, | ||
155 | sizeof(ident)) ? -EFAULT : 0; | ||
156 | break; | ||
157 | |||
158 | case WDIOC_GETSTATUS: | ||
159 | case WDIOC_GETBOOTSTATUS: | ||
160 | return put_user(0, p); | ||
161 | |||
162 | case WDIOC_KEEPALIVE: | ||
163 | geodewdt_ping(); | ||
164 | return 0; | ||
165 | |||
166 | case WDIOC_SETTIMEOUT: | ||
167 | if (get_user(interval, p)) | ||
168 | return -EFAULT; | ||
169 | |||
170 | if (geodewdt_set_heartbeat(interval)) | ||
171 | return -EINVAL; | ||
172 | |||
173 | /* Fall through */ | ||
174 | |||
175 | case WDIOC_GETTIMEOUT: | ||
176 | return put_user(timeout, p); | ||
177 | |||
178 | case WDIOC_SETOPTIONS: | ||
179 | { | ||
180 | int options, ret = -EINVAL; | ||
181 | |||
182 | if (get_user(options, p)) | ||
183 | return -EFAULT; | ||
184 | |||
185 | if (options & WDIOS_DISABLECARD) { | ||
186 | geodewdt_disable(); | ||
187 | ret = 0; | ||
188 | } | ||
189 | |||
190 | if (options & WDIOS_ENABLECARD) { | ||
191 | geodewdt_ping(); | ||
192 | ret = 0; | ||
193 | } | ||
194 | |||
195 | return ret; | ||
196 | } | ||
197 | default: | ||
198 | return -ENOTTY; | ||
199 | } | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static const struct file_operations geodewdt_fops = { | ||
205 | .owner = THIS_MODULE, | ||
206 | .llseek = no_llseek, | ||
207 | .write = geodewdt_write, | ||
208 | .ioctl = geodewdt_ioctl, | ||
209 | .open = geodewdt_open, | ||
210 | .release = geodewdt_release, | ||
211 | }; | ||
212 | |||
213 | static struct miscdevice geodewdt_miscdev = { | ||
214 | .minor = WATCHDOG_MINOR, | ||
215 | .name = "watchdog", | ||
216 | .fops = &geodewdt_fops | ||
217 | }; | ||
218 | |||
219 | static int __devinit | ||
220 | geodewdt_probe(struct platform_device *dev) | ||
221 | { | ||
222 | int ret, timer; | ||
223 | |||
224 | timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, | ||
225 | MFGPT_DOMAIN_WORKING, THIS_MODULE); | ||
226 | |||
227 | if (timer == -1) { | ||
228 | printk(KERN_ERR "geodewdt: No timers were available\n"); | ||
229 | return -ENODEV; | ||
230 | } | ||
231 | |||
232 | wdt_timer = timer; | ||
233 | |||
234 | /* Set up the timer */ | ||
235 | |||
236 | geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, | ||
237 | GEODEWDT_SCALE | (3 << 8)); | ||
238 | |||
239 | /* Set up comparator 2 to reset when the event fires */ | ||
240 | geode_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1); | ||
241 | |||
242 | /* Set up the initial timeout */ | ||
243 | |||
244 | geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, | ||
245 | timeout * GEODEWDT_HZ); | ||
246 | |||
247 | ret = misc_register(&geodewdt_miscdev); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | static int __devexit | ||
253 | geodewdt_remove(struct platform_device *dev) | ||
254 | { | ||
255 | misc_deregister(&geodewdt_miscdev); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static void | ||
260 | geodewdt_shutdown(struct platform_device *dev) | ||
261 | { | ||
262 | geodewdt_disable(); | ||
263 | } | ||
264 | |||
265 | static struct platform_driver geodewdt_driver = { | ||
266 | .probe = geodewdt_probe, | ||
267 | .remove = __devexit_p(geodewdt_remove), | ||
268 | .shutdown = geodewdt_shutdown, | ||
269 | .driver = { | ||
270 | .owner = THIS_MODULE, | ||
271 | .name = DRV_NAME, | ||
272 | }, | ||
273 | }; | ||
274 | |||
275 | static int __init | ||
276 | geodewdt_init(void) | ||
277 | { | ||
278 | int ret; | ||
279 | |||
280 | ret = platform_driver_register(&geodewdt_driver); | ||
281 | if (ret) | ||
282 | return ret; | ||
283 | |||
284 | geodewdt_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); | ||
285 | if (IS_ERR(geodewdt_platform_device)) { | ||
286 | ret = PTR_ERR(geodewdt_platform_device); | ||
287 | goto err; | ||
288 | } | ||
289 | |||
290 | return 0; | ||
291 | err: | ||
292 | platform_driver_unregister(&geodewdt_driver); | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | static void __exit | ||
297 | geodewdt_exit(void) | ||
298 | { | ||
299 | platform_device_unregister(geodewdt_platform_device); | ||
300 | platform_driver_unregister(&geodewdt_driver); | ||
301 | } | ||
302 | |||
303 | module_init(geodewdt_init); | ||
304 | module_exit(geodewdt_exit); | ||
305 | |||
306 | MODULE_AUTHOR("Advanced Micro Devices, Inc"); | ||
307 | MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver"); | ||
308 | MODULE_LICENSE("GPL"); | ||
309 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | ||
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 6483d1066b95..6a63535fc04d 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -418,23 +418,20 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, | |||
418 | static unsigned long rom_pl; | 418 | static unsigned long rom_pl; |
419 | static int die_nmi_called; | 419 | static int die_nmi_called; |
420 | 420 | ||
421 | if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) | 421 | if (ulReason == DIE_NMI || ulReason == DIE_NMI_IPI) { |
422 | return NOTIFY_OK; | 422 | spin_lock_irqsave(&rom_lock, rom_pl); |
423 | 423 | if (!die_nmi_called) | |
424 | spin_lock_irqsave(&rom_lock, rom_pl); | 424 | asminline_call(&cmn_regs, cru_rom_addr); |
425 | if (!die_nmi_called) | 425 | die_nmi_called = 1; |
426 | asminline_call(&cmn_regs, cru_rom_addr); | 426 | spin_unlock_irqrestore(&rom_lock, rom_pl); |
427 | die_nmi_called = 1; | 427 | if (cmn_regs.u1.ral != 0) { |
428 | spin_unlock_irqrestore(&rom_lock, rom_pl); | 428 | panic("An NMI occurred, please see the Integrated " |
429 | if (cmn_regs.u1.ral == 0) { | 429 | "Management Log for details.\n"); |
430 | printk(KERN_WARNING "hpwdt: An NMI occurred, " | 430 | } |
431 | "but unable to determine source.\n"); | ||
432 | } else { | ||
433 | panic("An NMI occurred, please see the Integrated " | ||
434 | "Management Log for details.\n"); | ||
435 | } | 431 | } |
436 | 432 | ||
437 | return NOTIFY_STOP; | 433 | die_nmi_called = 0; |
434 | return NOTIFY_DONE; | ||
438 | } | 435 | } |
439 | 436 | ||
440 | /* | 437 | /* |
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index a0e6809e369f..95ba985bd341 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c | |||
@@ -41,9 +41,10 @@ | |||
41 | * 82801HH (ICH8DH) : document number 313056-003, 313057-009, | 41 | * 82801HH (ICH8DH) : document number 313056-003, 313057-009, |
42 | * 82801HO (ICH8DO) : document number 313056-003, 313057-009, | 42 | * 82801HO (ICH8DO) : document number 313056-003, 313057-009, |
43 | * 82801HEM (ICH8M-E) : document number 313056-003, 313057-009, | 43 | * 82801HEM (ICH8M-E) : document number 313056-003, 313057-009, |
44 | * 82801IB (ICH9) : document number 316972-001, 316973-001, | 44 | * 82801IB (ICH9) : document number 316972-001, 316973-006, |
45 | * 82801IR (ICH9R) : document number 316972-001, 316973-001, | 45 | * 82801IR (ICH9R) : document number 316972-001, 316973-006, |
46 | * 82801IH (ICH9DH) : document number 316972-001, 316973-001, | 46 | * 82801IH (ICH9DH) : document number 316972-001, 316973-006, |
47 | * 82801IO (ICH9DO) : document number 316972-001, 316973-006, | ||
47 | * 6300ESB (6300ESB) : document number 300641-003, 300884-010, | 48 | * 6300ESB (6300ESB) : document number 300641-003, 300884-010, |
48 | * 631xESB (631xESB) : document number 313082-001, 313075-005, | 49 | * 631xESB (631xESB) : document number 313082-001, 313075-005, |
49 | * 632xESB (632xESB) : document number 313082-001, 313075-005 | 50 | * 632xESB (632xESB) : document number 313082-001, 313075-005 |
@@ -55,8 +56,8 @@ | |||
55 | 56 | ||
56 | /* Module and version information */ | 57 | /* Module and version information */ |
57 | #define DRV_NAME "iTCO_wdt" | 58 | #define DRV_NAME "iTCO_wdt" |
58 | #define DRV_VERSION "1.02" | 59 | #define DRV_VERSION "1.03" |
59 | #define DRV_RELDATE "26-Jul-2007" | 60 | #define DRV_RELDATE "30-Apr-2008" |
60 | #define PFX DRV_NAME ": " | 61 | #define PFX DRV_NAME ": " |
61 | 62 | ||
62 | /* Includes */ | 63 | /* Includes */ |
@@ -104,6 +105,7 @@ enum iTCO_chipsets { | |||
104 | TCO_ICH9, /* ICH9 */ | 105 | TCO_ICH9, /* ICH9 */ |
105 | TCO_ICH9R, /* ICH9R */ | 106 | TCO_ICH9R, /* ICH9R */ |
106 | TCO_ICH9DH, /* ICH9DH */ | 107 | TCO_ICH9DH, /* ICH9DH */ |
108 | TCO_ICH9DO, /* ICH9DO */ | ||
107 | TCO_631XESB, /* 631xESB/632xESB */ | 109 | TCO_631XESB, /* 631xESB/632xESB */ |
108 | }; | 110 | }; |
109 | 111 | ||
@@ -136,6 +138,7 @@ static struct { | |||
136 | {"ICH9", 2}, | 138 | {"ICH9", 2}, |
137 | {"ICH9R", 2}, | 139 | {"ICH9R", 2}, |
138 | {"ICH9DH", 2}, | 140 | {"ICH9DH", 2}, |
141 | {"ICH9DO", 2}, | ||
139 | {"631xESB/632xESB", 2}, | 142 | {"631xESB/632xESB", 2}, |
140 | {NULL,0} | 143 | {NULL,0} |
141 | }; | 144 | }; |
@@ -181,6 +184,7 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { | |||
181 | { ITCO_PCI_DEVICE(0x2918, TCO_ICH9 )}, | 184 | { ITCO_PCI_DEVICE(0x2918, TCO_ICH9 )}, |
182 | { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R )}, | 185 | { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R )}, |
183 | { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH )}, | 186 | { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH )}, |
187 | { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO )}, | ||
184 | { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)}, | 188 | { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)}, |
185 | { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)}, | 189 | { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)}, |
186 | { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)}, | 190 | { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)}, |
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c index c622a0e6c9ae..528b882420b6 100644 --- a/drivers/watchdog/w83697hf_wdt.c +++ b/drivers/watchdog/w83697hf_wdt.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #define WATCHDOG_NAME "w83697hf/hg WDT" | 44 | #define WATCHDOG_NAME "w83697hf/hg WDT" |
45 | #define PFX WATCHDOG_NAME ": " | 45 | #define PFX WATCHDOG_NAME ": " |
46 | #define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */ | 46 | #define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */ |
47 | #define WATCHDOG_EARLY_DISABLE 1 /* Disable until userland kicks in */ | ||
47 | 48 | ||
48 | static unsigned long wdt_is_open; | 49 | static unsigned long wdt_is_open; |
49 | static char expect_close; | 50 | static char expect_close; |
@@ -56,12 +57,16 @@ MODULE_PARM_DESC(wdt_io, "w83697hf/hg WDT io port (default 0x2e, 0 = autodetect) | |||
56 | 57 | ||
57 | static int timeout = WATCHDOG_TIMEOUT; /* in seconds */ | 58 | static int timeout = WATCHDOG_TIMEOUT; /* in seconds */ |
58 | module_param(timeout, int, 0); | 59 | module_param(timeout, int, 0); |
59 | MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=255, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); | 60 | MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=255 (default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); |
60 | 61 | ||
61 | static int nowayout = WATCHDOG_NOWAYOUT; | 62 | static int nowayout = WATCHDOG_NOWAYOUT; |
62 | module_param(nowayout, int, 0); | 63 | module_param(nowayout, int, 0); |
63 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | 64 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); |
64 | 65 | ||
66 | static int early_disable = WATCHDOG_EARLY_DISABLE; | ||
67 | module_param(early_disable, int, 0); | ||
68 | MODULE_PARM_DESC(early_disable, "Watchdog gets disabled at boot time (default=" __MODULE_STRING(WATCHDOG_EARLY_DISABLE) ")"); | ||
69 | |||
65 | /* | 70 | /* |
66 | * Kernel methods. | 71 | * Kernel methods. |
67 | */ | 72 | */ |
@@ -140,7 +145,7 @@ w83697hf_init(void) | |||
140 | w83697hf_deselect_wdt(); | 145 | w83697hf_deselect_wdt(); |
141 | } | 146 | } |
142 | 147 | ||
143 | static int | 148 | static void |
144 | wdt_ping(void) | 149 | wdt_ping(void) |
145 | { | 150 | { |
146 | spin_lock(&io_lock); | 151 | spin_lock(&io_lock); |
@@ -150,10 +155,9 @@ wdt_ping(void) | |||
150 | 155 | ||
151 | w83697hf_deselect_wdt(); | 156 | w83697hf_deselect_wdt(); |
152 | spin_unlock(&io_lock); | 157 | spin_unlock(&io_lock); |
153 | return 0; | ||
154 | } | 158 | } |
155 | 159 | ||
156 | static int | 160 | static void |
157 | wdt_enable(void) | 161 | wdt_enable(void) |
158 | { | 162 | { |
159 | spin_lock(&io_lock); | 163 | spin_lock(&io_lock); |
@@ -164,10 +168,9 @@ wdt_enable(void) | |||
164 | 168 | ||
165 | w83697hf_deselect_wdt(); | 169 | w83697hf_deselect_wdt(); |
166 | spin_unlock(&io_lock); | 170 | spin_unlock(&io_lock); |
167 | return 0; | ||
168 | } | 171 | } |
169 | 172 | ||
170 | static int | 173 | static void |
171 | wdt_disable(void) | 174 | wdt_disable(void) |
172 | { | 175 | { |
173 | spin_lock(&io_lock); | 176 | spin_lock(&io_lock); |
@@ -178,7 +181,22 @@ wdt_disable(void) | |||
178 | 181 | ||
179 | w83697hf_deselect_wdt(); | 182 | w83697hf_deselect_wdt(); |
180 | spin_unlock(&io_lock); | 183 | spin_unlock(&io_lock); |
181 | return 0; | 184 | } |
185 | |||
186 | static unsigned char | ||
187 | wdt_running(void) | ||
188 | { | ||
189 | unsigned char t; | ||
190 | |||
191 | spin_lock(&io_lock); | ||
192 | w83697hf_select_wdt(); | ||
193 | |||
194 | t = w83697hf_get_reg(0xF4); /* Read timer */ | ||
195 | |||
196 | w83697hf_deselect_wdt(); | ||
197 | spin_unlock(&io_lock); | ||
198 | |||
199 | return t; | ||
182 | } | 200 | } |
183 | 201 | ||
184 | static int | 202 | static int |
@@ -397,7 +415,11 @@ wdt_init(void) | |||
397 | } | 415 | } |
398 | 416 | ||
399 | w83697hf_init(); | 417 | w83697hf_init(); |
400 | wdt_disable(); /* Disable watchdog until first use */ | 418 | if (early_disable) { |
419 | if (wdt_running()) | ||
420 | printk (KERN_WARNING PFX "Stopping previously enabled watchdog until userland kicks in\n"); | ||
421 | wdt_disable(); | ||
422 | } | ||
401 | 423 | ||
402 | if (wdt_set_heartbeat(timeout)) { | 424 | if (wdt_set_heartbeat(timeout)) { |
403 | wdt_set_heartbeat(WATCHDOG_TIMEOUT); | 425 | wdt_set_heartbeat(WATCHDOG_TIMEOUT); |
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 6653e29637a7..7013aaff6aed 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
@@ -119,6 +119,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
119 | dp = description + strlen(description); | 119 | dp = description + strlen(description); |
120 | sprintf(dp, ";uid=0x%x", sesInfo->linux_uid); | 120 | sprintf(dp, ";uid=0x%x", sesInfo->linux_uid); |
121 | 121 | ||
122 | dp = description + strlen(description); | ||
123 | sprintf(dp, ";user=%s", sesInfo->userName); | ||
124 | |||
122 | cFYI(1, ("key description = %s", description)); | 125 | cFYI(1, ("key description = %s", description)); |
123 | spnego_key = request_key(&cifs_spnego_key_type, description, ""); | 126 | spnego_key = request_key(&cifs_spnego_key_type, description, ""); |
124 | 127 | ||
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 7b9938445b07..9b8b4cfdf993 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -2159,8 +2159,7 @@ copyRetry: | |||
2159 | cFYI(1, ("Send error in copy = %d with %d files copied", | 2159 | cFYI(1, ("Send error in copy = %d with %d files copied", |
2160 | rc, le16_to_cpu(pSMBr->CopyCount))); | 2160 | rc, le16_to_cpu(pSMBr->CopyCount))); |
2161 | } | 2161 | } |
2162 | if (pSMB) | 2162 | cifs_buf_release(pSMB); |
2163 | cifs_buf_release(pSMB); | ||
2164 | 2163 | ||
2165 | if (rc == -EAGAIN) | 2164 | if (rc == -EAGAIN) |
2166 | goto copyRetry; | 2165 | goto copyRetry; |
@@ -2249,8 +2248,7 @@ createSymLinkRetry: | |||
2249 | if (rc) | 2248 | if (rc) |
2250 | cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc)); | 2249 | cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc)); |
2251 | 2250 | ||
2252 | if (pSMB) | 2251 | cifs_buf_release(pSMB); |
2253 | cifs_buf_release(pSMB); | ||
2254 | 2252 | ||
2255 | if (rc == -EAGAIN) | 2253 | if (rc == -EAGAIN) |
2256 | goto createSymLinkRetry; | 2254 | goto createSymLinkRetry; |
@@ -4095,8 +4093,7 @@ getDFSRetry: | |||
4095 | target_nodes, nls_codepage); | 4093 | target_nodes, nls_codepage); |
4096 | 4094 | ||
4097 | GetDFSRefExit: | 4095 | GetDFSRefExit: |
4098 | if (pSMB) | 4096 | cifs_buf_release(pSMB); |
4099 | cifs_buf_release(pSMB); | ||
4100 | 4097 | ||
4101 | if (rc == -EAGAIN) | 4098 | if (rc == -EAGAIN) |
4102 | goto getDFSRetry; | 4099 | goto getDFSRetry; |
@@ -5117,8 +5114,7 @@ setPermsRetry: | |||
5117 | if (rc) | 5114 | if (rc) |
5118 | cFYI(1, ("SetPathInfo (perms) returned %d", rc)); | 5115 | cFYI(1, ("SetPathInfo (perms) returned %d", rc)); |
5119 | 5116 | ||
5120 | if (pSMB) | 5117 | cifs_buf_release(pSMB); |
5121 | cifs_buf_release(pSMB); | ||
5122 | if (rc == -EAGAIN) | 5118 | if (rc == -EAGAIN) |
5123 | goto setPermsRetry; | 5119 | goto setPermsRetry; |
5124 | return rc; | 5120 | return rc; |
@@ -5340,8 +5336,7 @@ QAllEAsRetry: | |||
5340 | } | 5336 | } |
5341 | } | 5337 | } |
5342 | } | 5338 | } |
5343 | if (pSMB) | 5339 | cifs_buf_release(pSMB); |
5344 | cifs_buf_release(pSMB); | ||
5345 | if (rc == -EAGAIN) | 5340 | if (rc == -EAGAIN) |
5346 | goto QAllEAsRetry; | 5341 | goto QAllEAsRetry; |
5347 | 5342 | ||
@@ -5490,8 +5485,7 @@ QEARetry: | |||
5490 | } | 5485 | } |
5491 | } | 5486 | } |
5492 | } | 5487 | } |
5493 | if (pSMB) | 5488 | cifs_buf_release(pSMB); |
5494 | cifs_buf_release(pSMB); | ||
5495 | if (rc == -EAGAIN) | 5489 | if (rc == -EAGAIN) |
5496 | goto QEARetry; | 5490 | goto QEARetry; |
5497 | 5491 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 00ced97bd53a..129dbfe4dca7 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -172,7 +172,7 @@ static void fill_fake_finddataunix(FILE_UNIX_BASIC_INFO *pfnd_dat, | |||
172 | { | 172 | { |
173 | struct inode *pinode = NULL; | 173 | struct inode *pinode = NULL; |
174 | 174 | ||
175 | memset(pfnd_dat, sizeof(FILE_UNIX_BASIC_INFO), 0); | 175 | memset(pfnd_dat, 0, sizeof(FILE_UNIX_BASIC_INFO)); |
176 | 176 | ||
177 | /* __le64 pfnd_dat->EndOfFile = cpu_to_le64(0); | 177 | /* __le64 pfnd_dat->EndOfFile = cpu_to_le64(0); |
178 | __le64 pfnd_dat->NumOfBytes = cpu_to_le64(0); | 178 | __le64 pfnd_dat->NumOfBytes = cpu_to_le64(0); |
@@ -384,7 +384,7 @@ static int get_sfu_mode(struct inode *inode, | |||
384 | static void fill_fake_finddata(FILE_ALL_INFO *pfnd_dat, | 384 | static void fill_fake_finddata(FILE_ALL_INFO *pfnd_dat, |
385 | struct super_block *sb) | 385 | struct super_block *sb) |
386 | { | 386 | { |
387 | memset(pfnd_dat, sizeof(FILE_ALL_INFO), 0); | 387 | memset(pfnd_dat, 0, sizeof(FILE_ALL_INFO)); |
388 | 388 | ||
389 | /* __le64 pfnd_dat->AllocationSize = cpu_to_le64(0); | 389 | /* __le64 pfnd_dat->AllocationSize = cpu_to_le64(0); |
390 | __le64 pfnd_dat->EndOfFile = cpu_to_le64(0); | 390 | __le64 pfnd_dat->EndOfFile = cpu_to_le64(0); |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index cd62d75b2cc0..e2832bc7869a 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -1906,9 +1906,9 @@ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, | |||
1906 | goto out; | 1906 | goto out; |
1907 | } | 1907 | } |
1908 | } | 1908 | } |
1909 | mutex_unlock(&key_tfm_list_mutex); | ||
1910 | (*tfm) = key_tfm->key_tfm; | 1909 | (*tfm) = key_tfm->key_tfm; |
1911 | (*tfm_mutex) = &key_tfm->key_tfm_mutex; | 1910 | (*tfm_mutex) = &key_tfm->key_tfm_mutex; |
1912 | out: | 1911 | out: |
1912 | mutex_unlock(&key_tfm_list_mutex); | ||
1913 | return rc; | 1913 | return rc; |
1914 | } | 1914 | } |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index fb77e0962132..43e99513334a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -488,7 +488,12 @@ static struct fuse_conn *new_conn(struct super_block *sb) | |||
488 | err = bdi_init(&fc->bdi); | 488 | err = bdi_init(&fc->bdi); |
489 | if (err) | 489 | if (err) |
490 | goto error_kfree; | 490 | goto error_kfree; |
491 | err = bdi_register_dev(&fc->bdi, fc->dev); | 491 | if (sb->s_bdev) { |
492 | err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", | ||
493 | MAJOR(fc->dev), MINOR(fc->dev)); | ||
494 | } else { | ||
495 | err = bdi_register_dev(&fc->bdi, fc->dev); | ||
496 | } | ||
492 | if (err) | 497 | if (err) |
493 | goto error_bdi_destroy; | 498 | goto error_bdi_destroy; |
494 | /* | 499 | /* |
diff --git a/fs/ntfs/upcase.c b/fs/ntfs/upcase.c index 9101807dc81a..e2f72ca98037 100644 --- a/fs/ntfs/upcase.c +++ b/fs/ntfs/upcase.c | |||
@@ -77,11 +77,10 @@ ntfschar *generate_default_upcase(void) | |||
77 | uc[i] = cpu_to_le16(i); | 77 | uc[i] = cpu_to_le16(i); |
78 | for (r = 0; uc_run_table[r][0]; r++) | 78 | for (r = 0; uc_run_table[r][0]; r++) |
79 | for (i = uc_run_table[r][0]; i < uc_run_table[r][1]; i++) | 79 | for (i = uc_run_table[r][0]; i < uc_run_table[r][1]; i++) |
80 | uc[i] = cpu_to_le16(le16_to_cpu(uc[i]) + | 80 | le16_add_cpu(&uc[i], uc_run_table[r][2]); |
81 | uc_run_table[r][2]); | ||
82 | for (r = 0; uc_dup_table[r][0]; r++) | 81 | for (r = 0; uc_dup_table[r][0]; r++) |
83 | for (i = uc_dup_table[r][0]; i < uc_dup_table[r][1]; i += 2) | 82 | for (i = uc_dup_table[r][0]; i < uc_dup_table[r][1]; i += 2) |
84 | uc[i + 1] = cpu_to_le16(le16_to_cpu(uc[i + 1]) - 1); | 83 | le16_add_cpu(&uc[i + 1], -1); |
85 | for (r = 0; uc_word_table[r][0]; r++) | 84 | for (r = 0; uc_word_table[r][0]; r++) |
86 | uc[uc_word_table[r][0]] = cpu_to_le16(uc_word_table[r][1]); | 85 | uc[uc_word_table[r][0]] = cpu_to_le16(uc_word_table[r][1]); |
87 | return uc; | 86 | return uc; |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 6f4e8dc97da1..b08d10017911 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -425,7 +425,8 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, | |||
425 | } | 425 | } |
426 | } | 426 | } |
427 | unlock_new_inode(inode); | 427 | unlock_new_inode(inode); |
428 | } | 428 | } else |
429 | module_put(de->owner); | ||
429 | return inode; | 430 | return inode; |
430 | 431 | ||
431 | out_ino: | 432 | out_ino: |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 74a323d2b850..32dc14cd8900 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -139,7 +139,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
139 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | 139 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
140 | si_meminfo(&i); | 140 | si_meminfo(&i); |
141 | si_swapinfo(&i); | 141 | si_swapinfo(&i); |
142 | committed = atomic_read(&vm_committed_space); | 142 | committed = atomic_long_read(&vm_committed_space); |
143 | allowed = ((totalram_pages - hugetlb_total_pages()) | 143 | allowed = ((totalram_pages - hugetlb_total_pages()) |
144 | * sysctl_overcommit_ratio / 100) + total_swap_pages; | 144 | * sysctl_overcommit_ratio / 100) + total_swap_pages; |
145 | 145 | ||
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 5105015a75ad..98e0e86093b4 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -387,6 +387,8 @@ _xfs_buf_lookup_pages( | |||
387 | if (unlikely(page == NULL)) { | 387 | if (unlikely(page == NULL)) { |
388 | if (flags & XBF_READ_AHEAD) { | 388 | if (flags & XBF_READ_AHEAD) { |
389 | bp->b_page_count = i; | 389 | bp->b_page_count = i; |
390 | for (i = 0; i < bp->b_page_count; i++) | ||
391 | unlock_page(bp->b_pages[i]); | ||
390 | return -ENOMEM; | 392 | return -ENOMEM; |
391 | } | 393 | } |
392 | 394 | ||
@@ -416,17 +418,24 @@ _xfs_buf_lookup_pages( | |||
416 | ASSERT(!PagePrivate(page)); | 418 | ASSERT(!PagePrivate(page)); |
417 | if (!PageUptodate(page)) { | 419 | if (!PageUptodate(page)) { |
418 | page_count--; | 420 | page_count--; |
419 | if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) { | 421 | if (blocksize >= PAGE_CACHE_SIZE) { |
422 | if (flags & XBF_READ) | ||
423 | bp->b_flags |= _XBF_PAGE_LOCKED; | ||
424 | } else if (!PagePrivate(page)) { | ||
420 | if (test_page_region(page, offset, nbytes)) | 425 | if (test_page_region(page, offset, nbytes)) |
421 | page_count++; | 426 | page_count++; |
422 | } | 427 | } |
423 | } | 428 | } |
424 | 429 | ||
425 | unlock_page(page); | ||
426 | bp->b_pages[i] = page; | 430 | bp->b_pages[i] = page; |
427 | offset = 0; | 431 | offset = 0; |
428 | } | 432 | } |
429 | 433 | ||
434 | if (!(bp->b_flags & _XBF_PAGE_LOCKED)) { | ||
435 | for (i = 0; i < bp->b_page_count; i++) | ||
436 | unlock_page(bp->b_pages[i]); | ||
437 | } | ||
438 | |||
430 | if (page_count == bp->b_page_count) | 439 | if (page_count == bp->b_page_count) |
431 | bp->b_flags |= XBF_DONE; | 440 | bp->b_flags |= XBF_DONE; |
432 | 441 | ||
@@ -746,6 +755,7 @@ xfs_buf_associate_memory( | |||
746 | bp->b_count_desired = len; | 755 | bp->b_count_desired = len; |
747 | bp->b_buffer_length = buflen; | 756 | bp->b_buffer_length = buflen; |
748 | bp->b_flags |= XBF_MAPPED; | 757 | bp->b_flags |= XBF_MAPPED; |
758 | bp->b_flags &= ~_XBF_PAGE_LOCKED; | ||
749 | 759 | ||
750 | return 0; | 760 | return 0; |
751 | } | 761 | } |
@@ -1093,8 +1103,10 @@ _xfs_buf_ioend( | |||
1093 | xfs_buf_t *bp, | 1103 | xfs_buf_t *bp, |
1094 | int schedule) | 1104 | int schedule) |
1095 | { | 1105 | { |
1096 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) | 1106 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { |
1107 | bp->b_flags &= ~_XBF_PAGE_LOCKED; | ||
1097 | xfs_buf_ioend(bp, schedule); | 1108 | xfs_buf_ioend(bp, schedule); |
1109 | } | ||
1098 | } | 1110 | } |
1099 | 1111 | ||
1100 | STATIC void | 1112 | STATIC void |
@@ -1125,6 +1137,9 @@ xfs_buf_bio_end_io( | |||
1125 | 1137 | ||
1126 | if (--bvec >= bio->bi_io_vec) | 1138 | if (--bvec >= bio->bi_io_vec) |
1127 | prefetchw(&bvec->bv_page->flags); | 1139 | prefetchw(&bvec->bv_page->flags); |
1140 | |||
1141 | if (bp->b_flags & _XBF_PAGE_LOCKED) | ||
1142 | unlock_page(page); | ||
1128 | } while (bvec >= bio->bi_io_vec); | 1143 | } while (bvec >= bio->bi_io_vec); |
1129 | 1144 | ||
1130 | _xfs_buf_ioend(bp, 1); | 1145 | _xfs_buf_ioend(bp, 1); |
@@ -1163,7 +1178,8 @@ _xfs_buf_ioapply( | |||
1163 | * filesystem block size is not smaller than the page size. | 1178 | * filesystem block size is not smaller than the page size. |
1164 | */ | 1179 | */ |
1165 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && | 1180 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && |
1166 | (bp->b_flags & XBF_READ) && | 1181 | ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) == |
1182 | (XBF_READ|_XBF_PAGE_LOCKED)) && | ||
1167 | (blocksize >= PAGE_CACHE_SIZE)) { | 1183 | (blocksize >= PAGE_CACHE_SIZE)) { |
1168 | bio = bio_alloc(GFP_NOIO, 1); | 1184 | bio = bio_alloc(GFP_NOIO, 1); |
1169 | 1185 | ||
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 841d7883528d..f948ec7ba9a4 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -66,6 +66,25 @@ typedef enum { | |||
66 | _XBF_PAGES = (1 << 18), /* backed by refcounted pages */ | 66 | _XBF_PAGES = (1 << 18), /* backed by refcounted pages */ |
67 | _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ | 67 | _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ |
68 | _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ | 68 | _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ |
69 | |||
70 | /* | ||
71 | * Special flag for supporting metadata blocks smaller than a FSB. | ||
72 | * | ||
73 | * In this case we can have multiple xfs_buf_t on a single page and | ||
74 | * need to lock out concurrent xfs_buf_t readers as they only | ||
75 | * serialise access to the buffer. | ||
76 | * | ||
77 | * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation | ||
78 | * between reads of the page. Hence we can have one thread read the | ||
79 | * page and modify it, but then race with another thread that thinks | ||
80 | * the page is not up-to-date and hence reads it again. | ||
81 | * | ||
82 | * The result is that the first modifcation to the page is lost. | ||
83 | * This sort of AGF/AGI reading race can happen when unlinking inodes | ||
84 | * that require truncation and results in the AGI unlinked list | ||
85 | * modifications being lost. | ||
86 | */ | ||
87 | _XBF_PAGE_LOCKED = (1 << 22), | ||
69 | } xfs_buf_flags_t; | 88 | } xfs_buf_flags_t; |
70 | 89 | ||
71 | typedef enum { | 90 | typedef enum { |
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 65e78c13d4ae..5f60363b9343 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c | |||
@@ -184,19 +184,24 @@ xfs_file_release( | |||
184 | return -xfs_release(XFS_I(inode)); | 184 | return -xfs_release(XFS_I(inode)); |
185 | } | 185 | } |
186 | 186 | ||
187 | /* | ||
188 | * We ignore the datasync flag here because a datasync is effectively | ||
189 | * identical to an fsync. That is, datasync implies that we need to write | ||
190 | * only the metadata needed to be able to access the data that is written | ||
191 | * if we crash after the call completes. Hence if we are writing beyond | ||
192 | * EOF we have to log the inode size change as well, which makes it a | ||
193 | * full fsync. If we don't write beyond EOF, the inode core will be | ||
194 | * clean in memory and so we don't need to log the inode, just like | ||
195 | * fsync. | ||
196 | */ | ||
187 | STATIC int | 197 | STATIC int |
188 | xfs_file_fsync( | 198 | xfs_file_fsync( |
189 | struct file *filp, | 199 | struct file *filp, |
190 | struct dentry *dentry, | 200 | struct dentry *dentry, |
191 | int datasync) | 201 | int datasync) |
192 | { | 202 | { |
193 | int flags = FSYNC_WAIT; | ||
194 | |||
195 | if (datasync) | ||
196 | flags |= FSYNC_DATA; | ||
197 | xfs_iflags_clear(XFS_I(dentry->d_inode), XFS_ITRUNCATED); | 203 | xfs_iflags_clear(XFS_I(dentry->d_inode), XFS_ITRUNCATED); |
198 | return -xfs_fsync(XFS_I(dentry->d_inode), flags, | 204 | return -xfs_fsync(XFS_I(dentry->d_inode)); |
199 | (xfs_off_t)0, (xfs_off_t)-1); | ||
200 | } | 205 | } |
201 | 206 | ||
202 | /* | 207 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 9d73cb5c0fc7..25eb2a9e8d9b 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h | |||
@@ -230,14 +230,6 @@ static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt) | |||
230 | #define ATTR_NOSIZETOK 0x400 /* Don't get the SIZE token */ | 230 | #define ATTR_NOSIZETOK 0x400 /* Don't get the SIZE token */ |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * Flags to vop_fsync/reclaim. | ||
234 | */ | ||
235 | #define FSYNC_NOWAIT 0 /* asynchronous flush */ | ||
236 | #define FSYNC_WAIT 0x1 /* synchronous fsync or forced reclaim */ | ||
237 | #define FSYNC_INVAL 0x2 /* flush and invalidate cached data */ | ||
238 | #define FSYNC_DATA 0x4 /* synchronous fsync of data only */ | ||
239 | |||
240 | /* | ||
241 | * Tracking vnode activity. | 233 | * Tracking vnode activity. |
242 | */ | 234 | */ |
243 | #if defined(XFS_INODE_TRACE) | 235 | #if defined(XFS_INODE_TRACE) |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index cf0bb9c1d621..e569bf5d6cf0 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -2974,6 +2974,7 @@ xfs_iflush_cluster( | |||
2974 | xfs_mount_t *mp = ip->i_mount; | 2974 | xfs_mount_t *mp = ip->i_mount; |
2975 | xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); | 2975 | xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); |
2976 | unsigned long first_index, mask; | 2976 | unsigned long first_index, mask; |
2977 | unsigned long inodes_per_cluster; | ||
2977 | int ilist_size; | 2978 | int ilist_size; |
2978 | xfs_inode_t **ilist; | 2979 | xfs_inode_t **ilist; |
2979 | xfs_inode_t *iq; | 2980 | xfs_inode_t *iq; |
@@ -2985,8 +2986,9 @@ xfs_iflush_cluster( | |||
2985 | ASSERT(pag->pagi_inodeok); | 2986 | ASSERT(pag->pagi_inodeok); |
2986 | ASSERT(pag->pag_ici_init); | 2987 | ASSERT(pag->pag_ici_init); |
2987 | 2988 | ||
2988 | ilist_size = XFS_INODE_CLUSTER_SIZE(mp) * sizeof(xfs_inode_t *); | 2989 | inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; |
2989 | ilist = kmem_alloc(ilist_size, KM_MAYFAIL); | 2990 | ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); |
2991 | ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); | ||
2990 | if (!ilist) | 2992 | if (!ilist) |
2991 | return 0; | 2993 | return 0; |
2992 | 2994 | ||
@@ -2995,8 +2997,7 @@ xfs_iflush_cluster( | |||
2995 | read_lock(&pag->pag_ici_lock); | 2997 | read_lock(&pag->pag_ici_lock); |
2996 | /* really need a gang lookup range call here */ | 2998 | /* really need a gang lookup range call here */ |
2997 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, | 2999 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, |
2998 | first_index, | 3000 | first_index, inodes_per_cluster); |
2999 | XFS_INODE_CLUSTER_SIZE(mp)); | ||
3000 | if (nr_found == 0) | 3001 | if (nr_found == 0) |
3001 | goto out_free; | 3002 | goto out_free; |
3002 | 3003 | ||
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 70702a60b4bb..e475e3717eb3 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -856,18 +856,14 @@ xfs_readlink( | |||
856 | /* | 856 | /* |
857 | * xfs_fsync | 857 | * xfs_fsync |
858 | * | 858 | * |
859 | * This is called to sync the inode and its data out to disk. | 859 | * This is called to sync the inode and its data out to disk. We need to hold |
860 | * We need to hold the I/O lock while flushing the data, and | 860 | * the I/O lock while flushing the data, and the inode lock while flushing the |
861 | * the inode lock while flushing the inode. The inode lock CANNOT | 861 | * inode. The inode lock CANNOT be held while flushing the data, so acquire |
862 | * be held while flushing the data, so acquire after we're done | 862 | * after we're done with that. |
863 | * with that. | ||
864 | */ | 863 | */ |
865 | int | 864 | int |
866 | xfs_fsync( | 865 | xfs_fsync( |
867 | xfs_inode_t *ip, | 866 | xfs_inode_t *ip) |
868 | int flag, | ||
869 | xfs_off_t start, | ||
870 | xfs_off_t stop) | ||
871 | { | 867 | { |
872 | xfs_trans_t *tp; | 868 | xfs_trans_t *tp; |
873 | int error; | 869 | int error; |
@@ -875,103 +871,79 @@ xfs_fsync( | |||
875 | 871 | ||
876 | xfs_itrace_entry(ip); | 872 | xfs_itrace_entry(ip); |
877 | 873 | ||
878 | ASSERT(start >= 0 && stop >= -1); | ||
879 | |||
880 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 874 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
881 | return XFS_ERROR(EIO); | 875 | return XFS_ERROR(EIO); |
882 | 876 | ||
883 | if (flag & FSYNC_DATA) | 877 | /* capture size updates in I/O completion before writing the inode. */ |
884 | filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping); | 878 | error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping); |
879 | if (error) | ||
880 | return XFS_ERROR(error); | ||
885 | 881 | ||
886 | /* | 882 | /* |
887 | * We always need to make sure that the required inode state | 883 | * We always need to make sure that the required inode state is safe on |
888 | * is safe on disk. The vnode might be clean but because | 884 | * disk. The vnode might be clean but we still might need to force the |
889 | * of committed transactions that haven't hit the disk yet. | 885 | * log because of committed transactions that haven't hit the disk yet. |
890 | * Likewise, there could be unflushed non-transactional | 886 | * Likewise, there could be unflushed non-transactional changes to the |
891 | * changes to the inode core that have to go to disk. | 887 | * inode core that have to go to disk and this requires us to issue |
888 | * a synchronous transaction to capture these changes correctly. | ||
892 | * | 889 | * |
893 | * The following code depends on one assumption: that | 890 | * This code relies on the assumption that if the update_* fields |
894 | * any transaction that changes an inode logs the core | 891 | * of the inode are clear and the inode is unpinned then it is clean |
895 | * because it has to change some field in the inode core | 892 | * and no action is required. |
896 | * (typically nextents or nblocks). That assumption | ||
897 | * implies that any transactions against an inode will | ||
898 | * catch any non-transactional updates. If inode-altering | ||
899 | * transactions exist that violate this assumption, the | ||
900 | * code breaks. Right now, it figures that if the involved | ||
901 | * update_* field is clear and the inode is unpinned, the | ||
902 | * inode is clean. Either it's been flushed or it's been | ||
903 | * committed and the commit has hit the disk unpinning the inode. | ||
904 | * (Note that xfs_inode_item_format() called at commit clears | ||
905 | * the update_* fields.) | ||
906 | */ | 893 | */ |
907 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 894 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
908 | 895 | ||
909 | /* If we are flushing data then we care about update_size | 896 | if (!(ip->i_update_size || ip->i_update_core)) { |
910 | * being set, otherwise we care about update_core | ||
911 | */ | ||
912 | if ((flag & FSYNC_DATA) ? | ||
913 | (ip->i_update_size == 0) : | ||
914 | (ip->i_update_core == 0)) { | ||
915 | /* | 897 | /* |
916 | * Timestamps/size haven't changed since last inode | 898 | * Timestamps/size haven't changed since last inode flush or |
917 | * flush or inode transaction commit. That means | 899 | * inode transaction commit. That means either nothing got |
918 | * either nothing got written or a transaction | 900 | * written or a transaction committed which caught the updates. |
919 | * committed which caught the updates. If the | 901 | * If the latter happened and the transaction hasn't hit the |
920 | * latter happened and the transaction hasn't | 902 | * disk yet, the inode will be still be pinned. If it is, |
921 | * hit the disk yet, the inode will be still | 903 | * force the log. |
922 | * be pinned. If it is, force the log. | ||
923 | */ | 904 | */ |
924 | 905 | ||
925 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 906 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
926 | 907 | ||
927 | if (xfs_ipincount(ip)) { | 908 | if (xfs_ipincount(ip)) { |
928 | _xfs_log_force(ip->i_mount, (xfs_lsn_t)0, | 909 | error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0, |
929 | XFS_LOG_FORCE | | 910 | XFS_LOG_FORCE | XFS_LOG_SYNC, |
930 | ((flag & FSYNC_WAIT) | ||
931 | ? XFS_LOG_SYNC : 0), | ||
932 | &log_flushed); | 911 | &log_flushed); |
933 | } else { | 912 | } else { |
934 | /* | 913 | /* |
935 | * If the inode is not pinned and nothing | 914 | * If the inode is not pinned and nothing has changed |
936 | * has changed we don't need to flush the | 915 | * we don't need to flush the cache. |
937 | * cache. | ||
938 | */ | 916 | */ |
939 | changed = 0; | 917 | changed = 0; |
940 | } | 918 | } |
941 | error = 0; | ||
942 | } else { | 919 | } else { |
943 | /* | 920 | /* |
944 | * Kick off a transaction to log the inode | 921 | * Kick off a transaction to log the inode core to get the |
945 | * core to get the updates. Make it | 922 | * updates. The sync transaction will also force the log. |
946 | * sync if FSYNC_WAIT is passed in (which | ||
947 | * is done by everybody but specfs). The | ||
948 | * sync transaction will also force the log. | ||
949 | */ | 923 | */ |
950 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 924 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
951 | tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); | 925 | tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); |
952 | if ((error = xfs_trans_reserve(tp, 0, | 926 | error = xfs_trans_reserve(tp, 0, |
953 | XFS_FSYNC_TS_LOG_RES(ip->i_mount), | 927 | XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); |
954 | 0, 0, 0))) { | 928 | if (error) { |
955 | xfs_trans_cancel(tp, 0); | 929 | xfs_trans_cancel(tp, 0); |
956 | return error; | 930 | return error; |
957 | } | 931 | } |
958 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 932 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
959 | 933 | ||
960 | /* | 934 | /* |
961 | * Note - it's possible that we might have pushed | 935 | * Note - it's possible that we might have pushed ourselves out |
962 | * ourselves out of the way during trans_reserve | 936 | * of the way during trans_reserve which would flush the inode. |
963 | * which would flush the inode. But there's no | 937 | * But there's no guarantee that the inode buffer has actually |
964 | * guarantee that the inode buffer has actually | 938 | * gone out yet (it's delwri). Plus the buffer could be pinned |
965 | * gone out yet (it's delwri). Plus the buffer | 939 | * anyway if it's part of an inode in another recent |
966 | * could be pinned anyway if it's part of an | 940 | * transaction. So we play it safe and fire off the |
967 | * inode in another recent transaction. So we | 941 | * transaction anyway. |
968 | * play it safe and fire off the transaction anyway. | ||
969 | */ | 942 | */ |
970 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 943 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
971 | xfs_trans_ihold(tp, ip); | 944 | xfs_trans_ihold(tp, ip); |
972 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 945 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
973 | if (flag & FSYNC_WAIT) | 946 | xfs_trans_set_sync(tp); |
974 | xfs_trans_set_sync(tp); | ||
975 | error = _xfs_trans_commit(tp, 0, &log_flushed); | 947 | error = _xfs_trans_commit(tp, 0, &log_flushed); |
976 | 948 | ||
977 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 949 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 8abe8f186e20..57335ba4ce53 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h | |||
@@ -18,8 +18,7 @@ int xfs_open(struct xfs_inode *ip); | |||
18 | int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, | 18 | int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, |
19 | struct cred *credp); | 19 | struct cred *credp); |
20 | int xfs_readlink(struct xfs_inode *ip, char *link); | 20 | int xfs_readlink(struct xfs_inode *ip, char *link); |
21 | int xfs_fsync(struct xfs_inode *ip, int flag, xfs_off_t start, | 21 | int xfs_fsync(struct xfs_inode *ip); |
22 | xfs_off_t stop); | ||
23 | int xfs_release(struct xfs_inode *ip); | 22 | int xfs_release(struct xfs_inode *ip); |
24 | int xfs_inactive(struct xfs_inode *ip); | 23 | int xfs_inactive(struct xfs_inode *ip); |
25 | int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, | 24 | int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, |
diff --git a/include/asm-arm/arch-omap/board-palmte.h b/include/asm-arm/arch-omap/board-palmte.h index cd22035a7160..6fac2c8935be 100644 --- a/include/asm-arm/arch-omap/board-palmte.h +++ b/include/asm-arm/arch-omap/board-palmte.h | |||
@@ -14,8 +14,6 @@ | |||
14 | #ifndef __OMAP_BOARD_PALMTE_H | 14 | #ifndef __OMAP_BOARD_PALMTE_H |
15 | #define __OMAP_BOARD_PALMTE_H | 15 | #define __OMAP_BOARD_PALMTE_H |
16 | 16 | ||
17 | #include <asm/arch/gpio.h> | ||
18 | |||
19 | #define PALMTE_USBDETECT_GPIO 0 | 17 | #define PALMTE_USBDETECT_GPIO 0 |
20 | #define PALMTE_USB_OR_DC_GPIO 1 | 18 | #define PALMTE_USB_OR_DC_GPIO 1 |
21 | #define PALMTE_TSC_GPIO 4 | 19 | #define PALMTE_TSC_GPIO 4 |
diff --git a/include/asm-arm/arch-omap/clock.h b/include/asm-arm/arch-omap/clock.h index 57523bdb642b..12a5e4de9518 100644 --- a/include/asm-arm/arch-omap/clock.h +++ b/include/asm-arm/arch-omap/clock.h | |||
@@ -73,6 +73,8 @@ struct clk { | |||
73 | #endif | 73 | #endif |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct cpufreq_frequency_table; | ||
77 | |||
76 | struct clk_functions { | 78 | struct clk_functions { |
77 | int (*clk_enable)(struct clk *clk); | 79 | int (*clk_enable)(struct clk *clk); |
78 | void (*clk_disable)(struct clk *clk); | 80 | void (*clk_disable)(struct clk *clk); |
@@ -83,6 +85,9 @@ struct clk_functions { | |||
83 | void (*clk_allow_idle)(struct clk *clk); | 85 | void (*clk_allow_idle)(struct clk *clk); |
84 | void (*clk_deny_idle)(struct clk *clk); | 86 | void (*clk_deny_idle)(struct clk *clk); |
85 | void (*clk_disable_unused)(struct clk *clk); | 87 | void (*clk_disable_unused)(struct clk *clk); |
88 | #ifdef CONFIG_CPU_FREQ | ||
89 | void (*clk_init_cpufreq_table)(struct cpufreq_frequency_table **); | ||
90 | #endif | ||
86 | }; | 91 | }; |
87 | 92 | ||
88 | extern unsigned int mpurate; | 93 | extern unsigned int mpurate; |
diff --git a/include/asm-arm/arch-omap/entry-macro.S b/include/asm-arm/arch-omap/entry-macro.S index 74cd57221c8e..369093a45fcf 100644 --- a/include/asm-arm/arch-omap/entry-macro.S +++ b/include/asm-arm/arch-omap/entry-macro.S | |||
@@ -8,6 +8,7 @@ | |||
8 | * warranty of any kind, whether express or implied. | 8 | * warranty of any kind, whether express or implied. |
9 | */ | 9 | */ |
10 | #include <asm/hardware.h> | 10 | #include <asm/hardware.h> |
11 | #include <asm/arch/io.h> | ||
11 | #include <asm/arch/irqs.h> | 12 | #include <asm/arch/irqs.h> |
12 | 13 | ||
13 | #if defined(CONFIG_ARCH_OMAP1) | 14 | #if defined(CONFIG_ARCH_OMAP1) |
diff --git a/include/asm-arm/arch-omap/gpio.h b/include/asm-arm/arch-omap/gpio.h index 86621a04cd8f..5ee6a49864c3 100644 --- a/include/asm-arm/arch-omap/gpio.h +++ b/include/asm-arm/arch-omap/gpio.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #ifndef __ASM_ARCH_OMAP_GPIO_H | 26 | #ifndef __ASM_ARCH_OMAP_GPIO_H |
27 | #define __ASM_ARCH_OMAP_GPIO_H | 27 | #define __ASM_ARCH_OMAP_GPIO_H |
28 | 28 | ||
29 | #include <asm/hardware.h> | ||
30 | #include <asm/arch/irqs.h> | 29 | #include <asm/arch/irqs.h> |
31 | #include <asm/io.h> | 30 | #include <asm/io.h> |
32 | 31 | ||
diff --git a/include/asm-arm/arch-omap/hardware.h b/include/asm-arm/arch-omap/hardware.h index da572092e255..91d85b3417b7 100644 --- a/include/asm-arm/arch-omap/hardware.h +++ b/include/asm-arm/arch-omap/hardware.h | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <asm/types.h> | 41 | #include <asm/types.h> |
42 | #include <asm/arch/cpu.h> | 42 | #include <asm/arch/cpu.h> |
43 | #endif | 43 | #endif |
44 | #include <asm/arch/io.h> | ||
45 | #include <asm/arch/serial.h> | 44 | #include <asm/arch/serial.h> |
46 | 45 | ||
47 | /* | 46 | /* |
diff --git a/include/asm-arm/arch-sa1100/collie.h b/include/asm-arm/arch-sa1100/collie.h index 14a344aa3cc7..762eba535813 100644 --- a/include/asm-arm/arch-sa1100/collie.h +++ b/include/asm-arm/arch-sa1100/collie.h | |||
@@ -34,9 +34,12 @@ | |||
34 | 34 | ||
35 | #define COLLIE_GPIO_ON_KEY GPIO_GPIO (0) | 35 | #define COLLIE_GPIO_ON_KEY GPIO_GPIO (0) |
36 | #define COLLIE_GPIO_AC_IN GPIO_GPIO (1) | 36 | #define COLLIE_GPIO_AC_IN GPIO_GPIO (1) |
37 | #define COLLIE_GPIO_SDIO_INT GPIO_GPIO (11) | ||
37 | #define COLLIE_GPIO_CF_IRQ GPIO_GPIO (14) | 38 | #define COLLIE_GPIO_CF_IRQ GPIO_GPIO (14) |
38 | #define COLLIE_GPIO_nREMOCON_INT GPIO_GPIO (15) | 39 | #define COLLIE_GPIO_nREMOCON_INT GPIO_GPIO (15) |
39 | #define COLLIE_GPIO_UCB1x00_RESET GPIO_GPIO (16) | 40 | #define COLLIE_GPIO_UCB1x00_RESET GPIO_GPIO (16) |
41 | #define COLLIE_GPIO_nMIC_ON GPIO_GPIO (17) | ||
42 | #define COLLIE_GPIO_nREMOCON_ON GPIO_GPIO (18) | ||
40 | #define COLLIE_GPIO_CO GPIO_GPIO (20) | 43 | #define COLLIE_GPIO_CO GPIO_GPIO (20) |
41 | #define COLLIE_GPIO_MCP_CLK GPIO_GPIO (21) | 44 | #define COLLIE_GPIO_MCP_CLK GPIO_GPIO (21) |
42 | #define COLLIE_GPIO_CF_CD GPIO_GPIO (22) | 45 | #define COLLIE_GPIO_CF_CD GPIO_GPIO (22) |
@@ -49,6 +52,7 @@ | |||
49 | 52 | ||
50 | #define COLLIE_IRQ_GPIO_ON_KEY IRQ_GPIO0 | 53 | #define COLLIE_IRQ_GPIO_ON_KEY IRQ_GPIO0 |
51 | #define COLLIE_IRQ_GPIO_AC_IN IRQ_GPIO1 | 54 | #define COLLIE_IRQ_GPIO_AC_IN IRQ_GPIO1 |
55 | #define COLLIE_IRQ_GPIO_SDIO_IRQ IRQ_GPIO11 | ||
52 | #define COLLIE_IRQ_GPIO_CF_IRQ IRQ_GPIO14 | 56 | #define COLLIE_IRQ_GPIO_CF_IRQ IRQ_GPIO14 |
53 | #define COLLIE_IRQ_GPIO_nREMOCON_INT IRQ_GPIO15 | 57 | #define COLLIE_IRQ_GPIO_nREMOCON_INT IRQ_GPIO15 |
54 | #define COLLIE_IRQ_GPIO_CO IRQ_GPIO20 | 58 | #define COLLIE_IRQ_GPIO_CO IRQ_GPIO20 |
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h index 5c22b0112106..8e05bdb5f12f 100644 --- a/include/asm-arm/page.h +++ b/include/asm-arm/page.h | |||
@@ -179,10 +179,10 @@ typedef unsigned long pgprot_t; | |||
179 | 179 | ||
180 | #endif /* STRICT_MM_TYPECHECKS */ | 180 | #endif /* STRICT_MM_TYPECHECKS */ |
181 | 181 | ||
182 | typedef struct page *pgtable_t; | ||
183 | |||
184 | #endif /* CONFIG_MMU */ | 182 | #endif /* CONFIG_MMU */ |
185 | 183 | ||
184 | typedef struct page *pgtable_t; | ||
185 | |||
186 | #include <asm/memory.h> | 186 | #include <asm/memory.h> |
187 | 187 | ||
188 | #endif /* !__ASSEMBLY__ */ | 188 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 6335de9a2bb3..514af792a598 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h | |||
@@ -48,20 +48,6 @@ | |||
48 | #define CPUID_TCM 2 | 48 | #define CPUID_TCM 2 |
49 | #define CPUID_TLBTYPE 3 | 49 | #define CPUID_TLBTYPE 3 |
50 | 50 | ||
51 | #ifdef CONFIG_CPU_CP15 | ||
52 | #define read_cpuid(reg) \ | ||
53 | ({ \ | ||
54 | unsigned int __val; \ | ||
55 | asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ | ||
56 | : "=r" (__val) \ | ||
57 | : \ | ||
58 | : "cc"); \ | ||
59 | __val; \ | ||
60 | }) | ||
61 | #else | ||
62 | #define read_cpuid(reg) (processor_id) | ||
63 | #endif | ||
64 | |||
65 | /* | 51 | /* |
66 | * This is used to ensure the compiler did actually allocate the register we | 52 | * This is used to ensure the compiler did actually allocate the register we |
67 | * asked it for some inline assembly sequences. Apparently we can't trust | 53 | * asked it for some inline assembly sequences. Apparently we can't trust |
@@ -78,6 +64,21 @@ | |||
78 | #include <linux/stringify.h> | 64 | #include <linux/stringify.h> |
79 | #include <linux/irqflags.h> | 65 | #include <linux/irqflags.h> |
80 | 66 | ||
67 | #ifdef CONFIG_CPU_CP15 | ||
68 | #define read_cpuid(reg) \ | ||
69 | ({ \ | ||
70 | unsigned int __val; \ | ||
71 | asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ | ||
72 | : "=r" (__val) \ | ||
73 | : \ | ||
74 | : "cc"); \ | ||
75 | __val; \ | ||
76 | }) | ||
77 | #else | ||
78 | extern unsigned int processor_id; | ||
79 | #define read_cpuid(reg) (processor_id) | ||
80 | #endif | ||
81 | |||
81 | /* | 82 | /* |
82 | * The CPU ID never changes at run time, so we might as well tell the | 83 | * The CPU ID never changes at run time, so we might as well tell the |
83 | * compiler that it's constant. Use this function to read the CPU ID | 84 | * compiler that it's constant. Use this function to read the CPU ID |
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index ecf675a59d21..6be061d09da9 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h | |||
@@ -1,8 +1,12 @@ | |||
1 | #ifndef _ASM_GENERIC_GPIO_H | 1 | #ifndef _ASM_GENERIC_GPIO_H |
2 | #define _ASM_GENERIC_GPIO_H | 2 | #define _ASM_GENERIC_GPIO_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
4 | #ifdef CONFIG_HAVE_GPIO_LIB | 6 | #ifdef CONFIG_HAVE_GPIO_LIB |
5 | 7 | ||
8 | #include <linux/compiler.h> | ||
9 | |||
6 | /* Platforms may implement their GPIO interface with library code, | 10 | /* Platforms may implement their GPIO interface with library code, |
7 | * at a small performance cost for non-inlined operations and some | 11 | * at a small performance cost for non-inlined operations and some |
8 | * extra memory (for code and for per-GPIO table entries). | 12 | * extra memory (for code and for per-GPIO table entries). |
@@ -74,7 +78,7 @@ struct gpio_chip { | |||
74 | 78 | ||
75 | extern const char *gpiochip_is_requested(struct gpio_chip *chip, | 79 | extern const char *gpiochip_is_requested(struct gpio_chip *chip, |
76 | unsigned offset); | 80 | unsigned offset); |
77 | extern int __init __must_check gpiochip_reserve(int start, int ngpio); | 81 | extern int __must_check gpiochip_reserve(int start, int ngpio); |
78 | 82 | ||
79 | /* add/remove chips */ | 83 | /* add/remove chips */ |
80 | extern int gpiochip_add(struct gpio_chip *chip); | 84 | extern int gpiochip_add(struct gpio_chip *chip); |
diff --git a/include/asm-mips/gic.h b/include/asm-mips/gic.h index 01b2f92dc33d..3a492f225f00 100644 --- a/include/asm-mips/gic.h +++ b/include/asm-mips/gic.h | |||
@@ -330,7 +330,7 @@ | |||
330 | 330 | ||
331 | #define GIC_SH_RMASK_OFS 0x0300 | 331 | #define GIC_SH_RMASK_OFS 0x0300 |
332 | #define GIC_CLR_INTR_MASK(intr, val) \ | 332 | #define GIC_CLR_INTR_MASK(intr, val) \ |
333 | GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32)) | 333 | GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32))) |
334 | 334 | ||
335 | /* Register Map for Local Section */ | 335 | /* Register Map for Local Section */ |
336 | #define GIC_VPE_CTL_OFS 0x0000 | 336 | #define GIC_VPE_CTL_OFS 0x0000 |
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h index 363a14ee0ae5..1b5064dac007 100644 --- a/include/asm-mips/mach-au1x00/au1000.h +++ b/include/asm-mips/mach-au1x00/au1000.h | |||
@@ -1036,7 +1036,7 @@ enum soc_au1200_ints { | |||
1036 | #define USBD_INTSTAT 0xB020001C | 1036 | #define USBD_INTSTAT 0xB020001C |
1037 | # define USBDEV_INT_SOF (1 << 12) | 1037 | # define USBDEV_INT_SOF (1 << 12) |
1038 | # define USBDEV_INT_HF_BIT 6 | 1038 | # define USBDEV_INT_HF_BIT 6 |
1039 | # define USBDEV_INT_HF_MASK 0x3f << USBDEV_INT_HF_BIT) | 1039 | # define USBDEV_INT_HF_MASK (0x3f << USBDEV_INT_HF_BIT) |
1040 | # define USBDEV_INT_CMPLT_BIT 0 | 1040 | # define USBDEV_INT_CMPLT_BIT 0 |
1041 | # define USBDEV_INT_CMPLT_MASK (0x3f << USBDEV_INT_CMPLT_BIT) | 1041 | # define USBDEV_INT_CMPLT_MASK (0x3f << USBDEV_INT_CMPLT_BIT) |
1042 | #define USBD_CONFIG 0xB0200020 | 1042 | #define USBD_CONFIG 0xB0200020 |
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index 943c5a3fac8a..a4d0f876b427 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h | |||
@@ -428,12 +428,11 @@ extern void mpic_init(struct mpic *mpic); | |||
428 | */ | 428 | */ |
429 | 429 | ||
430 | 430 | ||
431 | /* Change/Read the priority of an interrupt. Default is 8 for irqs and | 431 | /* Change the priority of an interrupt. Default is 8 for irqs and |
432 | * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the | 432 | * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the |
433 | * IPI number is then the offset'ed (linux irq number mapped to the IPI) | 433 | * IPI number is then the offset'ed (linux irq number mapped to the IPI) |
434 | */ | 434 | */ |
435 | extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri); | 435 | extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri); |
436 | extern unsigned int mpic_irq_get_priority(unsigned int irq); | ||
437 | 436 | ||
438 | /* Setup a non-boot CPU */ | 437 | /* Setup a non-boot CPU */ |
439 | extern void mpic_setup_this_cpu(void); | 438 | extern void mpic_setup_this_cpu(void); |
diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h index d8a56cddf7f2..b163da79bb6d 100644 --- a/include/asm-sparc64/ptrace.h +++ b/include/asm-sparc64/ptrace.h | |||
@@ -126,6 +126,8 @@ struct sparc_trapf { | |||
126 | #define TRACEREG32_SZ sizeof(struct pt_regs32) | 126 | #define TRACEREG32_SZ sizeof(struct pt_regs32) |
127 | #define STACKFRAME32_SZ sizeof(struct sparc_stackf32) | 127 | #define STACKFRAME32_SZ sizeof(struct sparc_stackf32) |
128 | 128 | ||
129 | #ifdef __KERNEL__ | ||
130 | |||
129 | struct global_reg_snapshot { | 131 | struct global_reg_snapshot { |
130 | unsigned long tstate; | 132 | unsigned long tstate; |
131 | unsigned long tpc; | 133 | unsigned long tpc; |
@@ -137,8 +139,6 @@ struct global_reg_snapshot { | |||
137 | unsigned long pad2; | 139 | unsigned long pad2; |
138 | }; | 140 | }; |
139 | 141 | ||
140 | #ifdef __KERNEL__ | ||
141 | |||
142 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | 142 | #define __ARCH_WANT_COMPAT_SYS_PTRACE |
143 | 143 | ||
144 | #define force_successful_syscall_return() \ | 144 | #define force_successful_syscall_return() \ |
@@ -306,6 +306,8 @@ extern void __show_regs(struct pt_regs *); | |||
306 | #define SF_XARG5 0x58 | 306 | #define SF_XARG5 0x58 |
307 | #define SF_XXARG 0x5c | 307 | #define SF_XXARG 0x5c |
308 | 308 | ||
309 | #ifdef __KERNEL__ | ||
310 | |||
309 | /* global_reg_snapshot offsets */ | 311 | /* global_reg_snapshot offsets */ |
310 | #define GR_SNAP_TSTATE 0x00 | 312 | #define GR_SNAP_TSTATE 0x00 |
311 | #define GR_SNAP_TPC 0x08 | 313 | #define GR_SNAP_TPC 0x08 |
@@ -316,6 +318,8 @@ extern void __show_regs(struct pt_regs *); | |||
316 | #define GR_SNAP_PAD1 0x30 | 318 | #define GR_SNAP_PAD1 0x30 |
317 | #define GR_SNAP_PAD2 0x38 | 319 | #define GR_SNAP_PAD2 0x38 |
318 | 320 | ||
321 | #endif /* __KERNEL__ */ | ||
322 | |||
319 | /* Stuff for the ptrace system call */ | 323 | /* Stuff for the ptrace system call */ |
320 | #define PTRACE_SPARC_DETACH 11 | 324 | #define PTRACE_SPARC_DETACH 11 |
321 | #define PTRACE_GETREGS 12 | 325 | #define PTRACE_GETREGS 12 |
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 0c0674d94255..35c76ceb9f40 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h | |||
@@ -22,12 +22,23 @@ static inline void __native_flush_tlb(void) | |||
22 | 22 | ||
23 | static inline void __native_flush_tlb_global(void) | 23 | static inline void __native_flush_tlb_global(void) |
24 | { | 24 | { |
25 | unsigned long cr4 = read_cr4(); | 25 | unsigned long flags; |
26 | unsigned long cr4; | ||
26 | 27 | ||
28 | /* | ||
29 | * Read-modify-write to CR4 - protect it from preemption and | ||
30 | * from interrupts. (Use the raw variant because this code can | ||
31 | * be called from deep inside debugging code.) | ||
32 | */ | ||
33 | raw_local_irq_save(flags); | ||
34 | |||
35 | cr4 = read_cr4(); | ||
27 | /* clear PGE */ | 36 | /* clear PGE */ |
28 | write_cr4(cr4 & ~X86_CR4_PGE); | 37 | write_cr4(cr4 & ~X86_CR4_PGE); |
29 | /* write old PGE again and flush TLBs */ | 38 | /* write old PGE again and flush TLBs */ |
30 | write_cr4(cr4); | 39 | write_cr4(cr4); |
40 | |||
41 | raw_local_irq_restore(flags); | ||
31 | } | 42 | } |
32 | 43 | ||
33 | static inline void __native_flush_tlb_single(unsigned long addr) | 44 | static inline void __native_flush_tlb_single(unsigned long addr) |
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 4987a84078ef..98be6c5762b9 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h | |||
@@ -8,6 +8,9 @@ | |||
8 | 8 | ||
9 | #else | 9 | #else |
10 | 10 | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/errno.h> | ||
13 | |||
11 | /* | 14 | /* |
12 | * Some platforms don't support the GPIO programming interface. | 15 | * Some platforms don't support the GPIO programming interface. |
13 | * | 16 | * |
diff --git a/include/linux/mman.h b/include/linux/mman.h index 87920a0852a3..dab8892e6ff1 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h | |||
@@ -17,14 +17,14 @@ | |||
17 | 17 | ||
18 | extern int sysctl_overcommit_memory; | 18 | extern int sysctl_overcommit_memory; |
19 | extern int sysctl_overcommit_ratio; | 19 | extern int sysctl_overcommit_ratio; |
20 | extern atomic_t vm_committed_space; | 20 | extern atomic_long_t vm_committed_space; |
21 | 21 | ||
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | extern void vm_acct_memory(long pages); | 23 | extern void vm_acct_memory(long pages); |
24 | #else | 24 | #else |
25 | static inline void vm_acct_memory(long pages) | 25 | static inline void vm_acct_memory(long pages) |
26 | { | 26 | { |
27 | atomic_add(pages, &vm_committed_space); | 27 | atomic_long_add(pages, &vm_committed_space); |
28 | } | 28 | } |
29 | #endif | 29 | #endif |
30 | 30 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c463cd8a15a4..443bc7cd8c62 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -703,7 +703,7 @@ extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |||
703 | extern struct zone *next_zone(struct zone *zone); | 703 | extern struct zone *next_zone(struct zone *zone); |
704 | 704 | ||
705 | /** | 705 | /** |
706 | * for_each_pgdat - helper macro to iterate over all nodes | 706 | * for_each_online_pgdat - helper macro to iterate over all online nodes |
707 | * @pgdat - pointer to a pg_data_t variable | 707 | * @pgdat - pointer to a pg_data_t variable |
708 | */ | 708 | */ |
709 | #define for_each_online_pgdat(pgdat) \ | 709 | #define for_each_online_pgdat(pgdat) \ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2b0266484c84..f27fd2009334 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -514,12 +514,10 @@ struct net_device | |||
514 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ | 514 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ |
515 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ | 515 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ |
516 | #define NETIF_F_LRO 32768 /* large receive offload */ | 516 | #define NETIF_F_LRO 32768 /* large receive offload */ |
517 | #define NETIF_F_VLAN_TSO 65536 /* Supports TSO for VLANs */ | ||
518 | #define NETIF_F_VLAN_CSUM 131072 /* Supports TX checksumming for VLANs */ | ||
519 | 517 | ||
520 | /* Segmentation offload features */ | 518 | /* Segmentation offload features */ |
521 | #define NETIF_F_GSO_SHIFT 20 | 519 | #define NETIF_F_GSO_SHIFT 16 |
522 | #define NETIF_F_GSO_MASK 0xfff00000 | 520 | #define NETIF_F_GSO_MASK 0xffff0000 |
523 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) | 521 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) |
524 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) | 522 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) |
525 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) | 523 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) |
@@ -747,6 +745,9 @@ struct net_device | |||
747 | /* rtnetlink link ops */ | 745 | /* rtnetlink link ops */ |
748 | const struct rtnl_link_ops *rtnl_link_ops; | 746 | const struct rtnl_link_ops *rtnl_link_ops; |
749 | 747 | ||
748 | /* VLAN feature mask */ | ||
749 | unsigned long vlan_features; | ||
750 | |||
750 | /* for setting kernel sock attribute on TCP connection setup */ | 751 | /* for setting kernel sock attribute on TCP connection setup */ |
751 | #define GSO_MAX_SIZE 65536 | 752 | #define GSO_MAX_SIZE 65536 |
752 | unsigned int gso_max_size; | 753 | unsigned int gso_max_size; |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index e4c66593b5c6..0c5eb7ed8b3f 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
6 | #include <linux/types.h> | ||
7 | #include <linux/skbuff.h> | 6 | #include <linux/skbuff.h> |
8 | #include <linux/net.h> | 7 | #include <linux/net.h> |
9 | #include <linux/netdevice.h> | 8 | #include <linux/netdevice.h> |
@@ -14,6 +13,7 @@ | |||
14 | #include <linux/list.h> | 13 | #include <linux/list.h> |
15 | #include <net/net_namespace.h> | 14 | #include <net/net_namespace.h> |
16 | #endif | 15 | #endif |
16 | #include <linux/types.h> | ||
17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
18 | 18 | ||
19 | /* Responses from hook functions. */ | 19 | /* Responses from hook functions. */ |
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index dd9c97f2d436..590ac3d6d5d6 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h | |||
@@ -11,11 +11,11 @@ | |||
11 | 11 | ||
12 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
13 | #include <linux/if.h> | 13 | #include <linux/if.h> |
14 | #include <linux/types.h> | ||
15 | #include <linux/in.h> | 14 | #include <linux/in.h> |
16 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
17 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
18 | #endif | 17 | #endif |
18 | #include <linux/types.h> | ||
19 | #include <linux/compiler.h> | 19 | #include <linux/compiler.h> |
20 | #include <linux/netfilter_arp.h> | 20 | #include <linux/netfilter_arp.h> |
21 | 21 | ||
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index bfc889f90276..092bd50581a9 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h | |||
@@ -17,11 +17,11 @@ | |||
17 | 17 | ||
18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
19 | #include <linux/if.h> | 19 | #include <linux/if.h> |
20 | #include <linux/types.h> | ||
21 | #include <linux/in.h> | 20 | #include <linux/in.h> |
22 | #include <linux/ip.h> | 21 | #include <linux/ip.h> |
23 | #include <linux/skbuff.h> | 22 | #include <linux/skbuff.h> |
24 | #endif | 23 | #endif |
24 | #include <linux/types.h> | ||
25 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
26 | #include <linux/netfilter_ipv4.h> | 26 | #include <linux/netfilter_ipv4.h> |
27 | 27 | ||
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index f2507dcc5750..1089e33cf633 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h | |||
@@ -17,11 +17,11 @@ | |||
17 | 17 | ||
18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
19 | #include <linux/if.h> | 19 | #include <linux/if.h> |
20 | #include <linux/types.h> | ||
21 | #include <linux/in6.h> | 20 | #include <linux/in6.h> |
22 | #include <linux/ipv6.h> | 21 | #include <linux/ipv6.h> |
23 | #include <linux/skbuff.h> | 22 | #include <linux/skbuff.h> |
24 | #endif | 23 | #endif |
24 | #include <linux/types.h> | ||
25 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
26 | #include <linux/netfilter_ipv6.h> | 26 | #include <linux/netfilter_ipv6.h> |
27 | 27 | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index cf6dbd759395..9b940e644179 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1761,6 +1761,7 @@ | |||
1761 | 1761 | ||
1762 | #define PCI_VENDOR_ID_INTASHIELD 0x135a | 1762 | #define PCI_VENDOR_ID_INTASHIELD 0x135a |
1763 | #define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 | 1763 | #define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 |
1764 | #define PCI_DEVICE_ID_INTASHIELD_IS400 0x0dc0 | ||
1764 | 1765 | ||
1765 | #define PCI_VENDOR_ID_QUATECH 0x135C | 1766 | #define PCI_VENDOR_ID_QUATECH 0x135C |
1766 | #define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 | 1767 | #define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 |
@@ -2383,6 +2384,9 @@ | |||
2383 | #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 | 2384 | #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 |
2384 | #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 | 2385 | #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 |
2385 | #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f | 2386 | #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f |
2387 | #define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 | ||
2388 | #define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 | ||
2389 | #define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 | ||
2386 | #define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff | 2390 | #define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff |
2387 | #define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031 | 2391 | #define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031 |
2388 | #define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032 | 2392 | #define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032 |
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h index 47fbcba11850..78bfdea24a8e 100644 --- a/include/linux/raid/bitmap.h +++ b/include/linux/raid/bitmap.h | |||
@@ -262,7 +262,6 @@ int bitmap_create(mddev_t *mddev); | |||
262 | void bitmap_flush(mddev_t *mddev); | 262 | void bitmap_flush(mddev_t *mddev); |
263 | void bitmap_destroy(mddev_t *mddev); | 263 | void bitmap_destroy(mddev_t *mddev); |
264 | 264 | ||
265 | char *file_path(struct file *file, char *buf, int count); | ||
266 | void bitmap_print_sb(struct bitmap *bitmap); | 265 | void bitmap_print_sb(struct bitmap *bitmap); |
267 | void bitmap_update_sb(struct bitmap *bitmap); | 266 | void bitmap_update_sb(struct bitmap *bitmap); |
268 | 267 | ||
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index 81a1a02d4566..b7386ae9d288 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h | |||
@@ -72,6 +72,8 @@ | |||
72 | */ | 72 | */ |
73 | #define MD_PATCHLEVEL_VERSION 3 | 73 | #define MD_PATCHLEVEL_VERSION 3 |
74 | 74 | ||
75 | extern int mdp_major; | ||
76 | |||
75 | extern int register_md_personality (struct mdk_personality *p); | 77 | extern int register_md_personality (struct mdk_personality *p); |
76 | extern int unregister_md_personality (struct mdk_personality *p); | 78 | extern int unregister_md_personality (struct mdk_personality *p); |
77 | extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), | 79 | extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), |
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 812ffa590cff..3dea9f545c8f 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -180,13 +180,15 @@ struct mddev_s | |||
180 | int sync_speed_min; | 180 | int sync_speed_min; |
181 | int sync_speed_max; | 181 | int sync_speed_max; |
182 | 182 | ||
183 | /* resync even though the same disks are shared among md-devices */ | ||
184 | int parallel_resync; | ||
185 | |||
183 | int ok_start_degraded; | 186 | int ok_start_degraded; |
184 | /* recovery/resync flags | 187 | /* recovery/resync flags |
185 | * NEEDED: we might need to start a resync/recover | 188 | * NEEDED: we might need to start a resync/recover |
186 | * RUNNING: a thread is running, or about to be started | 189 | * RUNNING: a thread is running, or about to be started |
187 | * SYNC: actually doing a resync, not a recovery | 190 | * SYNC: actually doing a resync, not a recovery |
188 | * ERR: and IO error was detected - abort the resync/recovery | 191 | * INTR: resync needs to be aborted for some reason |
189 | * INTR: someone requested a (clean) early abort. | ||
190 | * DONE: thread is done and is waiting to be reaped | 192 | * DONE: thread is done and is waiting to be reaped |
191 | * REQUEST: user-space has requested a sync (used with SYNC) | 193 | * REQUEST: user-space has requested a sync (used with SYNC) |
192 | * CHECK: user-space request for for check-only, no repair | 194 | * CHECK: user-space request for for check-only, no repair |
@@ -196,7 +198,6 @@ struct mddev_s | |||
196 | */ | 198 | */ |
197 | #define MD_RECOVERY_RUNNING 0 | 199 | #define MD_RECOVERY_RUNNING 0 |
198 | #define MD_RECOVERY_SYNC 1 | 200 | #define MD_RECOVERY_SYNC 1 |
199 | #define MD_RECOVERY_ERR 2 | ||
200 | #define MD_RECOVERY_INTR 3 | 201 | #define MD_RECOVERY_INTR 3 |
201 | #define MD_RECOVERY_DONE 4 | 202 | #define MD_RECOVERY_DONE 4 |
202 | #define MD_RECOVERY_NEEDED 5 | 203 | #define MD_RECOVERY_NEEDED 5 |
diff --git a/include/linux/sm501.h b/include/linux/sm501.h index bca134544700..95c1c39ba445 100644 --- a/include/linux/sm501.h +++ b/include/linux/sm501.h | |||
@@ -71,8 +71,8 @@ extern unsigned long sm501_gpio_get(struct device *dev, | |||
71 | #define SM501FB_FLAG_DISABLE_AT_EXIT (1<<1) | 71 | #define SM501FB_FLAG_DISABLE_AT_EXIT (1<<1) |
72 | #define SM501FB_FLAG_USE_HWCURSOR (1<<2) | 72 | #define SM501FB_FLAG_USE_HWCURSOR (1<<2) |
73 | #define SM501FB_FLAG_USE_HWACCEL (1<<3) | 73 | #define SM501FB_FLAG_USE_HWACCEL (1<<3) |
74 | #define SM501FB_FLAG_PANEL_USE_FPEN (1<<4) | 74 | #define SM501FB_FLAG_PANEL_NO_FPEN (1<<4) |
75 | #define SM501FB_FLAG_PANEL_USE_VBIASEN (1<<5) | 75 | #define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5) |
76 | 76 | ||
77 | struct sm501_platdata_fbsub { | 77 | struct sm501_platdata_fbsub { |
78 | struct fb_videomode *def_mode; | 78 | struct fb_videomode *def_mode; |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index d96d9b122304..18e62e3d406f 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -355,7 +355,7 @@ struct tcp_sock { | |||
355 | u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ | 355 | u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ |
356 | 356 | ||
357 | u16 advmss; /* Advertised MSS */ | 357 | u16 advmss; /* Advertised MSS */ |
358 | u16 prior_ssthresh; /* ssthresh saved at recovery start */ | 358 | u32 prior_ssthresh; /* ssthresh saved at recovery start */ |
359 | u32 lost_out; /* Lost packets */ | 359 | u32 lost_out; /* Lost packets */ |
360 | u32 sacked_out; /* SACK'd packets */ | 360 | u32 sacked_out; /* SACK'd packets */ |
361 | u32 fackets_out; /* FACK'd packets */ | 361 | u32 fackets_out; /* FACK'd packets */ |
diff --git a/include/linux/types.h b/include/linux/types.h index 9dc2346627b4..d4a9ce6e2760 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -197,8 +197,6 @@ typedef u64 resource_size_t; | |||
197 | typedef u32 resource_size_t; | 197 | typedef u32 resource_size_t; |
198 | #endif | 198 | #endif |
199 | 199 | ||
200 | #endif /* __KERNEL__ */ | ||
201 | |||
202 | struct ustat { | 200 | struct ustat { |
203 | __kernel_daddr_t f_tfree; | 201 | __kernel_daddr_t f_tfree; |
204 | __kernel_ino_t f_tinode; | 202 | __kernel_ino_t f_tinode; |
@@ -206,4 +204,6 @@ struct ustat { | |||
206 | char f_fpack[6]; | 204 | char f_fpack[6]; |
207 | }; | 205 | }; |
208 | 206 | ||
207 | #endif /* __KERNEL__ */ | ||
208 | |||
209 | #endif /* _LINUX_TYPES_H */ | 209 | #endif /* _LINUX_TYPES_H */ |
diff --git a/include/net/netlink.h b/include/net/netlink.h index a5506c42f03c..112dcdf7e34e 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h | |||
@@ -772,12 +772,13 @@ static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype, | |||
772 | const struct nla_policy *policy, | 772 | const struct nla_policy *policy, |
773 | int len) | 773 | int len) |
774 | { | 774 | { |
775 | if (nla_len(nla) < len) | 775 | int nested_len = nla_len(nla) - NLA_ALIGN(len); |
776 | |||
777 | if (nested_len < 0) | ||
776 | return -1; | 778 | return -1; |
777 | if (nla_len(nla) >= NLA_ALIGN(len) + sizeof(struct nlattr)) | 779 | if (nested_len >= nla_attr_size(0)) |
778 | return nla_parse_nested(tb, maxtype, | 780 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), |
779 | nla_data(nla) + NLA_ALIGN(len), | 781 | nested_len, policy); |
780 | policy); | ||
781 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); | 782 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
782 | return 0; | 783 | return 0; |
783 | } | 784 | } |
diff --git a/init/Kconfig b/init/Kconfig index 6135d07f31ec..6199d1120900 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -13,6 +13,7 @@ config DEFCONFIG_LIST | |||
13 | default "/lib/modules/$UNAME_RELEASE/.config" | 13 | default "/lib/modules/$UNAME_RELEASE/.config" |
14 | default "/etc/kernel-config" | 14 | default "/etc/kernel-config" |
15 | default "/boot/config-$UNAME_RELEASE" | 15 | default "/boot/config-$UNAME_RELEASE" |
16 | default "$ARCH_DEFCONFIG" | ||
16 | default "arch/$ARCH/defconfig" | 17 | default "arch/$ARCH/defconfig" |
17 | 18 | ||
18 | menu "General setup" | 19 | menu "General setup" |
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c index 7473b0c59d4d..693d24694a6c 100644 --- a/init/do_mounts_md.c +++ b/init/do_mounts_md.c | |||
@@ -24,7 +24,6 @@ static struct { | |||
24 | 24 | ||
25 | static int md_setup_ents __initdata; | 25 | static int md_setup_ents __initdata; |
26 | 26 | ||
27 | extern int mdp_major; | ||
28 | /* | 27 | /* |
29 | * Parse the command-line parameters given our kernel, but do not | 28 | * Parse the command-line parameters given our kernel, but do not |
30 | * actually try to invoke the MD device now; that is handled by | 29 | * actually try to invoke the MD device now; that is handled by |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fbc6fc8949b4..15ac0e1e4f4d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2903,7 +2903,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) | |||
2903 | cg = tsk->cgroups; | 2903 | cg = tsk->cgroups; |
2904 | parent = task_cgroup(tsk, subsys->subsys_id); | 2904 | parent = task_cgroup(tsk, subsys->subsys_id); |
2905 | 2905 | ||
2906 | snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid); | 2906 | snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid); |
2907 | 2907 | ||
2908 | /* Pin the hierarchy */ | 2908 | /* Pin the hierarchy */ |
2909 | atomic_inc(&parent->root->sb->s_active); | 2909 | atomic_inc(&parent->root->sb->s_active); |
diff --git a/kernel/exit.c b/kernel/exit.c index 1510f78a0ffa..8f6185e69b69 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -126,6 +126,12 @@ static void __exit_signal(struct task_struct *tsk) | |||
126 | 126 | ||
127 | __unhash_process(tsk); | 127 | __unhash_process(tsk); |
128 | 128 | ||
129 | /* | ||
130 | * Do this under ->siglock, we can race with another thread | ||
131 | * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. | ||
132 | */ | ||
133 | flush_sigqueue(&tsk->pending); | ||
134 | |||
129 | tsk->signal = NULL; | 135 | tsk->signal = NULL; |
130 | tsk->sighand = NULL; | 136 | tsk->sighand = NULL; |
131 | spin_unlock(&sighand->siglock); | 137 | spin_unlock(&sighand->siglock); |
@@ -133,7 +139,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
133 | 139 | ||
134 | __cleanup_sighand(sighand); | 140 | __cleanup_sighand(sighand); |
135 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 141 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
136 | flush_sigqueue(&tsk->pending); | ||
137 | if (sig) { | 142 | if (sig) { |
138 | flush_sigqueue(&sig->shared_pending); | 143 | flush_sigqueue(&sig->shared_pending); |
139 | taskstats_tgid_free(sig); | 144 | taskstats_tgid_free(sig); |
diff --git a/kernel/module.c b/kernel/module.c index f5e9491ef7ac..5f80478b746d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1337,7 +1337,19 @@ out_unreg: | |||
1337 | kobject_put(&mod->mkobj.kobj); | 1337 | kobject_put(&mod->mkobj.kobj); |
1338 | return err; | 1338 | return err; |
1339 | } | 1339 | } |
1340 | #endif | 1340 | |
1341 | static void mod_sysfs_fini(struct module *mod) | ||
1342 | { | ||
1343 | kobject_put(&mod->mkobj.kobj); | ||
1344 | } | ||
1345 | |||
1346 | #else /* CONFIG_SYSFS */ | ||
1347 | |||
1348 | static void mod_sysfs_fini(struct module *mod) | ||
1349 | { | ||
1350 | } | ||
1351 | |||
1352 | #endif /* CONFIG_SYSFS */ | ||
1341 | 1353 | ||
1342 | static void mod_kobject_remove(struct module *mod) | 1354 | static void mod_kobject_remove(struct module *mod) |
1343 | { | 1355 | { |
@@ -1345,7 +1357,7 @@ static void mod_kobject_remove(struct module *mod) | |||
1345 | module_param_sysfs_remove(mod); | 1357 | module_param_sysfs_remove(mod); |
1346 | kobject_put(mod->mkobj.drivers_dir); | 1358 | kobject_put(mod->mkobj.drivers_dir); |
1347 | kobject_put(mod->holders_dir); | 1359 | kobject_put(mod->holders_dir); |
1348 | kobject_put(&mod->mkobj.kobj); | 1360 | mod_sysfs_fini(mod); |
1349 | } | 1361 | } |
1350 | 1362 | ||
1351 | /* | 1363 | /* |
@@ -1780,7 +1792,7 @@ static struct module *load_module(void __user *umod, | |||
1780 | 1792 | ||
1781 | /* Sanity checks against insmoding binaries or wrong arch, | 1793 | /* Sanity checks against insmoding binaries or wrong arch, |
1782 | weird elf version */ | 1794 | weird elf version */ |
1783 | if (memcmp(hdr->e_ident, ELFMAG, 4) != 0 | 1795 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 |
1784 | || hdr->e_type != ET_REL | 1796 | || hdr->e_type != ET_REL |
1785 | || !elf_check_arch(hdr) | 1797 | || !elf_check_arch(hdr) |
1786 | || hdr->e_shentsize != sizeof(*sechdrs)) { | 1798 | || hdr->e_shentsize != sizeof(*sechdrs)) { |
diff --git a/kernel/signal.c b/kernel/signal.c index 72bb4f51f963..12ffea7c201d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1242,7 +1242,8 @@ void sigqueue_free(struct sigqueue *q) | |||
1242 | /* | 1242 | /* |
1243 | * If the signal is still pending remove it from the | 1243 | * If the signal is still pending remove it from the |
1244 | * pending queue. We must hold ->siglock while testing | 1244 | * pending queue. We must hold ->siglock while testing |
1245 | * q->list to serialize with collect_signal(). | 1245 | * q->list to serialize with collect_signal() or with |
1246 | * __exit_signal()->flush_sigqueue(). | ||
1246 | */ | 1247 | */ |
1247 | spin_lock_irqsave(lock, flags); | 1248 | spin_lock_irqsave(lock, flags); |
1248 | if (!list_empty(&q->list)) | 1249 | if (!list_empty(&q->list)) |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 0101aeef7ed7..b7350bbfb076 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -62,8 +62,7 @@ static int stopmachine(void *cpu) | |||
62 | * help our sisters onto their CPUs. */ | 62 | * help our sisters onto their CPUs. */ |
63 | if (!prepared && !irqs_disabled) | 63 | if (!prepared && !irqs_disabled) |
64 | yield(); | 64 | yield(); |
65 | else | 65 | cpu_relax(); |
66 | cpu_relax(); | ||
67 | } | 66 | } |
68 | 67 | ||
69 | /* Ack: we are exiting. */ | 68 | /* Ack: we are exiting. */ |
@@ -106,8 +105,10 @@ static int stop_machine(void) | |||
106 | } | 105 | } |
107 | 106 | ||
108 | /* Wait for them all to come to life. */ | 107 | /* Wait for them all to come to life. */ |
109 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) | 108 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) { |
110 | yield(); | 109 | yield(); |
110 | cpu_relax(); | ||
111 | } | ||
111 | 112 | ||
112 | /* If some failed, kill them all. */ | 113 | /* If some failed, kill them all. */ |
113 | if (ret < 0) { | 114 | if (ret < 0) { |
diff --git a/kernel/sys.c b/kernel/sys.c index 895d2d4c9493..14e97282eb6c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1652,7 +1652,7 @@ asmlinkage long sys_umask(int mask) | |||
1652 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | 1652 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, |
1653 | unsigned long arg4, unsigned long arg5) | 1653 | unsigned long arg4, unsigned long arg5) |
1654 | { | 1654 | { |
1655 | long uninitialized_var(error); | 1655 | long error = 0; |
1656 | 1656 | ||
1657 | if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error)) | 1657 | if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error)) |
1658 | return error; | 1658 | return error; |
@@ -1701,9 +1701,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1701 | error = PR_TIMING_STATISTICAL; | 1701 | error = PR_TIMING_STATISTICAL; |
1702 | break; | 1702 | break; |
1703 | case PR_SET_TIMING: | 1703 | case PR_SET_TIMING: |
1704 | if (arg2 == PR_TIMING_STATISTICAL) | 1704 | if (arg2 != PR_TIMING_STATISTICAL) |
1705 | error = 0; | ||
1706 | else | ||
1707 | error = -EINVAL; | 1705 | error = -EINVAL; |
1708 | break; | 1706 | break; |
1709 | 1707 | ||
diff --git a/mm/memory.c b/mm/memory.c index fb5608a120ed..19e0ae9beecb 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2295,8 +2295,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2295 | vmf.flags = flags; | 2295 | vmf.flags = flags; |
2296 | vmf.page = NULL; | 2296 | vmf.page = NULL; |
2297 | 2297 | ||
2298 | BUG_ON(vma->vm_flags & VM_PFNMAP); | ||
2299 | |||
2300 | ret = vma->vm_ops->fault(vma, &vmf); | 2298 | ret = vma->vm_ops->fault(vma, &vmf); |
2301 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) | 2299 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) |
2302 | return ret; | 2300 | return ret; |
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(vm_get_page_prot); | |||
80 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 80 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
81 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 81 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
82 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 82 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
83 | atomic_t vm_committed_space = ATOMIC_INIT(0); | 83 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * Check that a process has enough memory to allocate a new virtual | 86 | * Check that a process has enough memory to allocate a new virtual |
@@ -177,7 +177,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
177 | * cast `allowed' as a signed long because vm_committed_space | 177 | * cast `allowed' as a signed long because vm_committed_space |
178 | * sometimes has a negative value | 178 | * sometimes has a negative value |
179 | */ | 179 | */ |
180 | if (atomic_read(&vm_committed_space) < (long)allowed) | 180 | if (atomic_long_read(&vm_committed_space) < (long)allowed) |
181 | return 0; | 181 | return 0; |
182 | error: | 182 | error: |
183 | vm_unacct_memory(pages); | 183 | vm_unacct_memory(pages); |
diff --git a/mm/nommu.c b/mm/nommu.c index ef8c62cec697..dca93fcb8b7a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -39,7 +39,7 @@ struct page *mem_map; | |||
39 | unsigned long max_mapnr; | 39 | unsigned long max_mapnr; |
40 | unsigned long num_physpages; | 40 | unsigned long num_physpages; |
41 | unsigned long askedalloc, realalloc; | 41 | unsigned long askedalloc, realalloc; |
42 | atomic_t vm_committed_space = ATOMIC_INIT(0); | 42 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
43 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 43 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
44 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 44 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
45 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | 45 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; |
@@ -1410,7 +1410,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
1410 | * cast `allowed' as a signed long because vm_committed_space | 1410 | * cast `allowed' as a signed long because vm_committed_space |
1411 | * sometimes has a negative value | 1411 | * sometimes has a negative value |
1412 | */ | 1412 | */ |
1413 | if (atomic_read(&vm_committed_space) < (long)allowed) | 1413 | if (atomic_long_read(&vm_committed_space) < (long)allowed) |
1414 | return 0; | 1414 | return 0; |
1415 | error: | 1415 | error: |
1416 | vm_unacct_memory(pages); | 1416 | vm_unacct_memory(pages); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 63835579323a..8e83f02cd2d3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1396,6 +1396,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, | |||
1396 | 1396 | ||
1397 | (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, | 1397 | (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, |
1398 | &preferred_zone); | 1398 | &preferred_zone); |
1399 | if (!preferred_zone) | ||
1400 | return NULL; | ||
1401 | |||
1399 | classzone_idx = zone_idx(preferred_zone); | 1402 | classzone_idx = zone_idx(preferred_zone); |
1400 | 1403 | ||
1401 | zonelist_scan: | 1404 | zonelist_scan: |
@@ -2804,7 +2807,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) | |||
2804 | alloc_size = zone->wait_table_hash_nr_entries | 2807 | alloc_size = zone->wait_table_hash_nr_entries |
2805 | * sizeof(wait_queue_head_t); | 2808 | * sizeof(wait_queue_head_t); |
2806 | 2809 | ||
2807 | if (system_state == SYSTEM_BOOTING) { | 2810 | if (!slab_is_available()) { |
2808 | zone->wait_table = (wait_queue_head_t *) | 2811 | zone->wait_table = (wait_queue_head_t *) |
2809 | alloc_bootmem_node(pgdat, alloc_size); | 2812 | alloc_bootmem_node(pgdat, alloc_size); |
2810 | } else { | 2813 | } else { |
@@ -3378,7 +3381,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3378 | * is used by this zone for memmap. This affects the watermark | 3381 | * is used by this zone for memmap. This affects the watermark |
3379 | * and per-cpu initialisations | 3382 | * and per-cpu initialisations |
3380 | */ | 3383 | */ |
3381 | memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; | 3384 | memmap_pages = |
3385 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; | ||
3382 | if (realsize >= memmap_pages) { | 3386 | if (realsize >= memmap_pages) { |
3383 | realsize -= memmap_pages; | 3387 | realsize -= memmap_pages; |
3384 | printk(KERN_DEBUG | 3388 | printk(KERN_DEBUG |
@@ -503,7 +503,7 @@ void vm_acct_memory(long pages) | |||
503 | local = &__get_cpu_var(committed_space); | 503 | local = &__get_cpu_var(committed_space); |
504 | *local += pages; | 504 | *local += pages; |
505 | if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { | 505 | if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { |
506 | atomic_add(*local, &vm_committed_space); | 506 | atomic_long_add(*local, &vm_committed_space); |
507 | *local = 0; | 507 | *local = 0; |
508 | } | 508 | } |
509 | preempt_enable(); | 509 | preempt_enable(); |
@@ -520,7 +520,7 @@ static int cpu_swap_callback(struct notifier_block *nfb, | |||
520 | 520 | ||
521 | committed = &per_cpu(committed_space, (long)hcpu); | 521 | committed = &per_cpu(committed_space, (long)hcpu); |
522 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 522 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
523 | atomic_add(*committed, &vm_committed_space); | 523 | atomic_long_add(*committed, &vm_committed_space); |
524 | *committed = 0; | 524 | *committed = 0; |
525 | drain_cpu_pagevecs((long)hcpu); | 525 | drain_cpu_pagevecs((long)hcpu); |
526 | } | 526 | } |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 51961300b586..ab2225da0ee2 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -387,14 +387,8 @@ static void vlan_transfer_features(struct net_device *dev, | |||
387 | { | 387 | { |
388 | unsigned long old_features = vlandev->features; | 388 | unsigned long old_features = vlandev->features; |
389 | 389 | ||
390 | if (dev->features & NETIF_F_VLAN_TSO) { | 390 | vlandev->features &= ~dev->vlan_features; |
391 | vlandev->features &= ~VLAN_TSO_FEATURES; | 391 | vlandev->features |= dev->features & dev->vlan_features; |
392 | vlandev->features |= dev->features & VLAN_TSO_FEATURES; | ||
393 | } | ||
394 | if (dev->features & NETIF_F_VLAN_CSUM) { | ||
395 | vlandev->features &= ~NETIF_F_ALL_CSUM; | ||
396 | vlandev->features |= dev->features & NETIF_F_ALL_CSUM; | ||
397 | } | ||
398 | 392 | ||
399 | if (old_features != vlandev->features) | 393 | if (old_features != vlandev->features) |
400 | netdev_features_change(vlandev); | 394 | netdev_features_change(vlandev); |
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 79625696e86a..5229a72c7ea1 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
@@ -7,8 +7,6 @@ | |||
7 | #define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT) | 7 | #define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT) |
8 | #define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1) | 8 | #define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1) |
9 | 9 | ||
10 | #define VLAN_TSO_FEATURES (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG) | ||
11 | |||
12 | /* Find a VLAN device by the MAC address of its Ethernet device, and | 10 | /* Find a VLAN device by the MAC address of its Ethernet device, and |
13 | * it's VLAN ID. The default configuration is to have VLAN's scope | 11 | * it's VLAN ID. The default configuration is to have VLAN's scope |
14 | * to be box-wide, so the MAC will be ignored. The mac will only be | 12 | * to be box-wide, so the MAC will be ignored. The mac will only be |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index b1cfbaa88db2..5d055c242ed8 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -663,10 +663,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
663 | (1<<__LINK_STATE_DORMANT))) | | 663 | (1<<__LINK_STATE_DORMANT))) | |
664 | (1<<__LINK_STATE_PRESENT); | 664 | (1<<__LINK_STATE_PRESENT); |
665 | 665 | ||
666 | if (real_dev->features & NETIF_F_VLAN_TSO) | 666 | dev->features |= real_dev->features & real_dev->vlan_features; |
667 | dev->features |= real_dev->features & VLAN_TSO_FEATURES; | ||
668 | if (real_dev->features & NETIF_F_VLAN_CSUM) | ||
669 | dev->features |= real_dev->features & NETIF_F_ALL_CSUM; | ||
670 | 667 | ||
671 | /* ipv6 shared card related stuff */ | 668 | /* ipv6 shared card related stuff */ |
672 | dev->dev_id = real_dev->dev_id; | 669 | dev->dev_id = real_dev->dev_id; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 418862f1bf22..9b539fa9fe18 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1288,7 +1288,6 @@ static void arp_format_neigh_entry(struct seq_file *seq, | |||
1288 | struct neighbour *n) | 1288 | struct neighbour *n) |
1289 | { | 1289 | { |
1290 | char hbuffer[HBUFFERLEN]; | 1290 | char hbuffer[HBUFFERLEN]; |
1291 | const char hexbuf[] = "0123456789ABCDEF"; | ||
1292 | int k, j; | 1291 | int k, j; |
1293 | char tbuf[16]; | 1292 | char tbuf[16]; |
1294 | struct net_device *dev = n->dev; | 1293 | struct net_device *dev = n->dev; |
@@ -1302,8 +1301,8 @@ static void arp_format_neigh_entry(struct seq_file *seq, | |||
1302 | else { | 1301 | else { |
1303 | #endif | 1302 | #endif |
1304 | for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < dev->addr_len; j++) { | 1303 | for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < dev->addr_len; j++) { |
1305 | hbuffer[k++] = hexbuf[(n->ha[j] >> 4) & 15]; | 1304 | hbuffer[k++] = hex_asc_hi(n->ha[j]); |
1306 | hbuffer[k++] = hexbuf[n->ha[j] & 15]; | 1305 | hbuffer[k++] = hex_asc_lo(n->ha[j]); |
1307 | hbuffer[k++] = ':'; | 1306 | hbuffer[k++] = ':'; |
1308 | } | 1307 | } |
1309 | hbuffer[--k] = 0; | 1308 | hbuffer[--k] = 0; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2ada033406de..4342cba4ff82 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -313,9 +313,8 @@ static void ipgre_tunnel_uninit(struct net_device *dev) | |||
313 | 313 | ||
314 | static void ipgre_err(struct sk_buff *skb, u32 info) | 314 | static void ipgre_err(struct sk_buff *skb, u32 info) |
315 | { | 315 | { |
316 | #ifndef I_WISH_WORLD_WERE_PERFECT | ||
317 | 316 | ||
318 | /* It is not :-( All the routers (except for Linux) return only | 317 | /* All the routers (except for Linux) return only |
319 | 8 bytes of packet payload. It means, that precise relaying of | 318 | 8 bytes of packet payload. It means, that precise relaying of |
320 | ICMP in the real Internet is absolutely infeasible. | 319 | ICMP in the real Internet is absolutely infeasible. |
321 | 320 | ||
@@ -398,149 +397,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
398 | out: | 397 | out: |
399 | read_unlock(&ipgre_lock); | 398 | read_unlock(&ipgre_lock); |
400 | return; | 399 | return; |
401 | #else | ||
402 | struct iphdr *iph = (struct iphdr*)dp; | ||
403 | struct iphdr *eiph; | ||
404 | __be16 *p = (__be16*)(dp+(iph->ihl<<2)); | ||
405 | const int type = icmp_hdr(skb)->type; | ||
406 | const int code = icmp_hdr(skb)->code; | ||
407 | int rel_type = 0; | ||
408 | int rel_code = 0; | ||
409 | __be32 rel_info = 0; | ||
410 | __u32 n = 0; | ||
411 | __be16 flags; | ||
412 | int grehlen = (iph->ihl<<2) + 4; | ||
413 | struct sk_buff *skb2; | ||
414 | struct flowi fl; | ||
415 | struct rtable *rt; | ||
416 | |||
417 | if (p[1] != htons(ETH_P_IP)) | ||
418 | return; | ||
419 | |||
420 | flags = p[0]; | ||
421 | if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { | ||
422 | if (flags&(GRE_VERSION|GRE_ROUTING)) | ||
423 | return; | ||
424 | if (flags&GRE_CSUM) | ||
425 | grehlen += 4; | ||
426 | if (flags&GRE_KEY) | ||
427 | grehlen += 4; | ||
428 | if (flags&GRE_SEQ) | ||
429 | grehlen += 4; | ||
430 | } | ||
431 | if (len < grehlen + sizeof(struct iphdr)) | ||
432 | return; | ||
433 | eiph = (struct iphdr*)(dp + grehlen); | ||
434 | |||
435 | switch (type) { | ||
436 | default: | ||
437 | return; | ||
438 | case ICMP_PARAMETERPROB: | ||
439 | n = ntohl(icmp_hdr(skb)->un.gateway) >> 24; | ||
440 | if (n < (iph->ihl<<2)) | ||
441 | return; | ||
442 | |||
443 | /* So... This guy found something strange INSIDE encapsulated | ||
444 | packet. Well, he is fool, but what can we do ? | ||
445 | */ | ||
446 | rel_type = ICMP_PARAMETERPROB; | ||
447 | n -= grehlen; | ||
448 | rel_info = htonl(n << 24); | ||
449 | break; | ||
450 | |||
451 | case ICMP_DEST_UNREACH: | ||
452 | switch (code) { | ||
453 | case ICMP_SR_FAILED: | ||
454 | case ICMP_PORT_UNREACH: | ||
455 | /* Impossible event. */ | ||
456 | return; | ||
457 | case ICMP_FRAG_NEEDED: | ||
458 | /* And it is the only really necessary thing :-) */ | ||
459 | n = ntohs(icmp_hdr(skb)->un.frag.mtu); | ||
460 | if (n < grehlen+68) | ||
461 | return; | ||
462 | n -= grehlen; | ||
463 | /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */ | ||
464 | if (n > ntohs(eiph->tot_len)) | ||
465 | return; | ||
466 | rel_info = htonl(n); | ||
467 | break; | ||
468 | default: | ||
469 | /* All others are translated to HOST_UNREACH. | ||
470 | rfc2003 contains "deep thoughts" about NET_UNREACH, | ||
471 | I believe, it is just ether pollution. --ANK | ||
472 | */ | ||
473 | rel_type = ICMP_DEST_UNREACH; | ||
474 | rel_code = ICMP_HOST_UNREACH; | ||
475 | break; | ||
476 | } | ||
477 | break; | ||
478 | case ICMP_TIME_EXCEEDED: | ||
479 | if (code != ICMP_EXC_TTL) | ||
480 | return; | ||
481 | break; | ||
482 | } | ||
483 | |||
484 | /* Prepare fake skb to feed it to icmp_send */ | ||
485 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
486 | if (skb2 == NULL) | ||
487 | return; | ||
488 | dst_release(skb2->dst); | ||
489 | skb2->dst = NULL; | ||
490 | skb_pull(skb2, skb->data - (u8*)eiph); | ||
491 | skb_reset_network_header(skb2); | ||
492 | |||
493 | /* Try to guess incoming interface */ | ||
494 | memset(&fl, 0, sizeof(fl)); | ||
495 | fl.fl4_dst = eiph->saddr; | ||
496 | fl.fl4_tos = RT_TOS(eiph->tos); | ||
497 | fl.proto = IPPROTO_GRE; | ||
498 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) { | ||
499 | kfree_skb(skb2); | ||
500 | return; | ||
501 | } | ||
502 | skb2->dev = rt->u.dst.dev; | ||
503 | |||
504 | /* route "incoming" packet */ | ||
505 | if (rt->rt_flags&RTCF_LOCAL) { | ||
506 | ip_rt_put(rt); | ||
507 | rt = NULL; | ||
508 | fl.fl4_dst = eiph->daddr; | ||
509 | fl.fl4_src = eiph->saddr; | ||
510 | fl.fl4_tos = eiph->tos; | ||
511 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || | ||
512 | rt->u.dst.dev->type != ARPHRD_IPGRE) { | ||
513 | ip_rt_put(rt); | ||
514 | kfree_skb(skb2); | ||
515 | return; | ||
516 | } | ||
517 | } else { | ||
518 | ip_rt_put(rt); | ||
519 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) || | ||
520 | skb2->dst->dev->type != ARPHRD_IPGRE) { | ||
521 | kfree_skb(skb2); | ||
522 | return; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | /* change mtu on this route */ | ||
527 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { | ||
528 | if (n > dst_mtu(skb2->dst)) { | ||
529 | kfree_skb(skb2); | ||
530 | return; | ||
531 | } | ||
532 | skb2->dst->ops->update_pmtu(skb2->dst, n); | ||
533 | } else if (type == ICMP_TIME_EXCEEDED) { | ||
534 | struct ip_tunnel *t = netdev_priv(skb2->dev); | ||
535 | if (t->parms.iph.ttl) { | ||
536 | rel_type = ICMP_DEST_UNREACH; | ||
537 | rel_code = ICMP_HOST_UNREACH; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | icmp_send(skb2, rel_type, rel_code, rel_info); | ||
542 | kfree_skb(skb2); | ||
543 | #endif | ||
544 | } | 400 | } |
545 | 401 | ||
546 | static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) | 402 | static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 149111f08e8d..af5cb53da5cc 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -278,9 +278,8 @@ static void ipip_tunnel_uninit(struct net_device *dev) | |||
278 | 278 | ||
279 | static int ipip_err(struct sk_buff *skb, u32 info) | 279 | static int ipip_err(struct sk_buff *skb, u32 info) |
280 | { | 280 | { |
281 | #ifndef I_WISH_WORLD_WERE_PERFECT | ||
282 | 281 | ||
283 | /* It is not :-( All the routers (except for Linux) return only | 282 | /* All the routers (except for Linux) return only |
284 | 8 bytes of packet payload. It means, that precise relaying of | 283 | 8 bytes of packet payload. It means, that precise relaying of |
285 | ICMP in the real Internet is absolutely infeasible. | 284 | ICMP in the real Internet is absolutely infeasible. |
286 | */ | 285 | */ |
@@ -337,133 +336,6 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
337 | out: | 336 | out: |
338 | read_unlock(&ipip_lock); | 337 | read_unlock(&ipip_lock); |
339 | return err; | 338 | return err; |
340 | #else | ||
341 | struct iphdr *iph = (struct iphdr*)dp; | ||
342 | int hlen = iph->ihl<<2; | ||
343 | struct iphdr *eiph; | ||
344 | const int type = icmp_hdr(skb)->type; | ||
345 | const int code = icmp_hdr(skb)->code; | ||
346 | int rel_type = 0; | ||
347 | int rel_code = 0; | ||
348 | __be32 rel_info = 0; | ||
349 | __u32 n = 0; | ||
350 | struct sk_buff *skb2; | ||
351 | struct flowi fl; | ||
352 | struct rtable *rt; | ||
353 | |||
354 | if (len < hlen + sizeof(struct iphdr)) | ||
355 | return 0; | ||
356 | eiph = (struct iphdr*)(dp + hlen); | ||
357 | |||
358 | switch (type) { | ||
359 | default: | ||
360 | return 0; | ||
361 | case ICMP_PARAMETERPROB: | ||
362 | n = ntohl(icmp_hdr(skb)->un.gateway) >> 24; | ||
363 | if (n < hlen) | ||
364 | return 0; | ||
365 | |||
366 | /* So... This guy found something strange INSIDE encapsulated | ||
367 | packet. Well, he is fool, but what can we do ? | ||
368 | */ | ||
369 | rel_type = ICMP_PARAMETERPROB; | ||
370 | rel_info = htonl((n - hlen) << 24); | ||
371 | break; | ||
372 | |||
373 | case ICMP_DEST_UNREACH: | ||
374 | switch (code) { | ||
375 | case ICMP_SR_FAILED: | ||
376 | case ICMP_PORT_UNREACH: | ||
377 | /* Impossible event. */ | ||
378 | return 0; | ||
379 | case ICMP_FRAG_NEEDED: | ||
380 | /* And it is the only really necessary thing :-) */ | ||
381 | n = ntohs(icmp_hdr(skb)->un.frag.mtu); | ||
382 | if (n < hlen+68) | ||
383 | return 0; | ||
384 | n -= hlen; | ||
385 | /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */ | ||
386 | if (n > ntohs(eiph->tot_len)) | ||
387 | return 0; | ||
388 | rel_info = htonl(n); | ||
389 | break; | ||
390 | default: | ||
391 | /* All others are translated to HOST_UNREACH. | ||
392 | rfc2003 contains "deep thoughts" about NET_UNREACH, | ||
393 | I believe, it is just ether pollution. --ANK | ||
394 | */ | ||
395 | rel_type = ICMP_DEST_UNREACH; | ||
396 | rel_code = ICMP_HOST_UNREACH; | ||
397 | break; | ||
398 | } | ||
399 | break; | ||
400 | case ICMP_TIME_EXCEEDED: | ||
401 | if (code != ICMP_EXC_TTL) | ||
402 | return 0; | ||
403 | break; | ||
404 | } | ||
405 | |||
406 | /* Prepare fake skb to feed it to icmp_send */ | ||
407 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
408 | if (skb2 == NULL) | ||
409 | return 0; | ||
410 | dst_release(skb2->dst); | ||
411 | skb2->dst = NULL; | ||
412 | skb_pull(skb2, skb->data - (u8*)eiph); | ||
413 | skb_reset_network_header(skb2); | ||
414 | |||
415 | /* Try to guess incoming interface */ | ||
416 | memset(&fl, 0, sizeof(fl)); | ||
417 | fl.fl4_daddr = eiph->saddr; | ||
418 | fl.fl4_tos = RT_TOS(eiph->tos); | ||
419 | fl.proto = IPPROTO_IPIP; | ||
420 | if (ip_route_output_key(dev_net(skb->dev), &rt, &key)) { | ||
421 | kfree_skb(skb2); | ||
422 | return 0; | ||
423 | } | ||
424 | skb2->dev = rt->u.dst.dev; | ||
425 | |||
426 | /* route "incoming" packet */ | ||
427 | if (rt->rt_flags&RTCF_LOCAL) { | ||
428 | ip_rt_put(rt); | ||
429 | rt = NULL; | ||
430 | fl.fl4_daddr = eiph->daddr; | ||
431 | fl.fl4_src = eiph->saddr; | ||
432 | fl.fl4_tos = eiph->tos; | ||
433 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || | ||
434 | rt->u.dst.dev->type != ARPHRD_TUNNEL) { | ||
435 | ip_rt_put(rt); | ||
436 | kfree_skb(skb2); | ||
437 | return 0; | ||
438 | } | ||
439 | } else { | ||
440 | ip_rt_put(rt); | ||
441 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) || | ||
442 | skb2->dst->dev->type != ARPHRD_TUNNEL) { | ||
443 | kfree_skb(skb2); | ||
444 | return 0; | ||
445 | } | ||
446 | } | ||
447 | |||
448 | /* change mtu on this route */ | ||
449 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { | ||
450 | if (n > dst_mtu(skb2->dst)) { | ||
451 | kfree_skb(skb2); | ||
452 | return 0; | ||
453 | } | ||
454 | skb2->dst->ops->update_pmtu(skb2->dst, n); | ||
455 | } else if (type == ICMP_TIME_EXCEEDED) { | ||
456 | struct ip_tunnel *t = netdev_priv(skb2->dev); | ||
457 | if (t->parms.iph.ttl) { | ||
458 | rel_type = ICMP_DEST_UNREACH; | ||
459 | rel_code = ICMP_HOST_UNREACH; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | icmp_send(skb2, rel_type, rel_code, rel_info); | ||
464 | kfree_skb(skb2); | ||
465 | return 0; | ||
466 | #endif | ||
467 | } | 339 | } |
468 | 340 | ||
469 | static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph, | 341 | static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph, |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index debf23581606..e399bde7813a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1836,7 +1836,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1836 | { | 1836 | { |
1837 | struct tcp_sock *tp = tcp_sk(sk); | 1837 | struct tcp_sock *tp = tcp_sk(sk); |
1838 | struct inet_connection_sock *icsk = inet_csk(sk); | 1838 | struct inet_connection_sock *icsk = inet_csk(sk); |
1839 | unsigned int cur_mss = tcp_current_mss(sk, 0); | 1839 | unsigned int cur_mss; |
1840 | int err; | 1840 | int err; |
1841 | 1841 | ||
1842 | /* Inconslusive MTU probe */ | 1842 | /* Inconslusive MTU probe */ |
@@ -1858,6 +1858,11 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1858 | return -ENOMEM; | 1858 | return -ENOMEM; |
1859 | } | 1859 | } |
1860 | 1860 | ||
1861 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) | ||
1862 | return -EHOSTUNREACH; /* Routing failure or similar. */ | ||
1863 | |||
1864 | cur_mss = tcp_current_mss(sk, 0); | ||
1865 | |||
1861 | /* If receiver has shrunk his window, and skb is out of | 1866 | /* If receiver has shrunk his window, and skb is out of |
1862 | * new window, do not retransmit it. The exception is the | 1867 | * new window, do not retransmit it. The exception is the |
1863 | * case, when window is shrunk to zero. In this case | 1868 | * case, when window is shrunk to zero. In this case |
@@ -1884,9 +1889,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1884 | (sysctl_tcp_retrans_collapse != 0)) | 1889 | (sysctl_tcp_retrans_collapse != 0)) |
1885 | tcp_retrans_try_collapse(sk, skb, cur_mss); | 1890 | tcp_retrans_try_collapse(sk, skb, cur_mss); |
1886 | 1891 | ||
1887 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) | ||
1888 | return -EHOSTUNREACH; /* Routing failure or similar. */ | ||
1889 | |||
1890 | /* Some Solaris stacks overoptimize and ignore the FIN on a | 1892 | /* Some Solaris stacks overoptimize and ignore the FIN on a |
1891 | * retransmit when old data is attached. So strip it off | 1893 | * retransmit when old data is attached. So strip it off |
1892 | * since it is cheap to do so and saves bytes on the network. | 1894 | * since it is cheap to do so and saves bytes on the network. |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 5a6fab95569f..3de6ffdaedf2 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -403,9 +403,8 @@ static void ipip6_tunnel_uninit(struct net_device *dev) | |||
403 | 403 | ||
404 | static int ipip6_err(struct sk_buff *skb, u32 info) | 404 | static int ipip6_err(struct sk_buff *skb, u32 info) |
405 | { | 405 | { |
406 | #ifndef I_WISH_WORLD_WERE_PERFECT | ||
407 | 406 | ||
408 | /* It is not :-( All the routers (except for Linux) return only | 407 | /* All the routers (except for Linux) return only |
409 | 8 bytes of packet payload. It means, that precise relaying of | 408 | 8 bytes of packet payload. It means, that precise relaying of |
410 | ICMP in the real Internet is absolutely infeasible. | 409 | ICMP in the real Internet is absolutely infeasible. |
411 | */ | 410 | */ |
@@ -462,92 +461,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
462 | out: | 461 | out: |
463 | read_unlock(&ipip6_lock); | 462 | read_unlock(&ipip6_lock); |
464 | return err; | 463 | return err; |
465 | #else | ||
466 | struct iphdr *iph = (struct iphdr*)dp; | ||
467 | int hlen = iph->ihl<<2; | ||
468 | struct ipv6hdr *iph6; | ||
469 | const int type = icmp_hdr(skb)->type; | ||
470 | const int code = icmp_hdr(skb)->code; | ||
471 | int rel_type = 0; | ||
472 | int rel_code = 0; | ||
473 | int rel_info = 0; | ||
474 | struct sk_buff *skb2; | ||
475 | struct rt6_info *rt6i; | ||
476 | |||
477 | if (len < hlen + sizeof(struct ipv6hdr)) | ||
478 | return; | ||
479 | iph6 = (struct ipv6hdr*)(dp + hlen); | ||
480 | |||
481 | switch (type) { | ||
482 | default: | ||
483 | return; | ||
484 | case ICMP_PARAMETERPROB: | ||
485 | if (icmp_hdr(skb)->un.gateway < hlen) | ||
486 | return; | ||
487 | |||
488 | /* So... This guy found something strange INSIDE encapsulated | ||
489 | packet. Well, he is fool, but what can we do ? | ||
490 | */ | ||
491 | rel_type = ICMPV6_PARAMPROB; | ||
492 | rel_info = icmp_hdr(skb)->un.gateway - hlen; | ||
493 | break; | ||
494 | |||
495 | case ICMP_DEST_UNREACH: | ||
496 | switch (code) { | ||
497 | case ICMP_SR_FAILED: | ||
498 | case ICMP_PORT_UNREACH: | ||
499 | /* Impossible event. */ | ||
500 | return; | ||
501 | case ICMP_FRAG_NEEDED: | ||
502 | /* Too complicated case ... */ | ||
503 | return; | ||
504 | default: | ||
505 | /* All others are translated to HOST_UNREACH. | ||
506 | rfc2003 contains "deep thoughts" about NET_UNREACH, | ||
507 | I believe, it is just ether pollution. --ANK | ||
508 | */ | ||
509 | rel_type = ICMPV6_DEST_UNREACH; | ||
510 | rel_code = ICMPV6_ADDR_UNREACH; | ||
511 | break; | ||
512 | } | ||
513 | break; | ||
514 | case ICMP_TIME_EXCEEDED: | ||
515 | if (code != ICMP_EXC_TTL) | ||
516 | return; | ||
517 | rel_type = ICMPV6_TIME_EXCEED; | ||
518 | rel_code = ICMPV6_EXC_HOPLIMIT; | ||
519 | break; | ||
520 | } | ||
521 | |||
522 | /* Prepare fake skb to feed it to icmpv6_send */ | ||
523 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
524 | if (skb2 == NULL) | ||
525 | return 0; | ||
526 | dst_release(skb2->dst); | ||
527 | skb2->dst = NULL; | ||
528 | skb_pull(skb2, skb->data - (u8*)iph6); | ||
529 | skb_reset_network_header(skb2); | ||
530 | |||
531 | /* Try to guess incoming interface */ | ||
532 | rt6i = rt6_lookup(dev_net(skb->dev), &iph6->saddr, NULL, NULL, 0); | ||
533 | if (rt6i && rt6i->rt6i_dev) { | ||
534 | skb2->dev = rt6i->rt6i_dev; | ||
535 | |||
536 | rt6i = rt6_lookup(dev_net(skb->dev), | ||
537 | &iph6->daddr, &iph6->saddr, NULL, 0); | ||
538 | |||
539 | if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { | ||
540 | struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); | ||
541 | if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { | ||
542 | rel_type = ICMPV6_DEST_UNREACH; | ||
543 | rel_code = ICMPV6_ADDR_UNREACH; | ||
544 | } | ||
545 | icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); | ||
546 | } | ||
547 | } | ||
548 | kfree_skb(skb2); | ||
549 | return 0; | ||
550 | #endif | ||
551 | } | 464 | } |
552 | 465 | ||
553 | static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) | 466 | static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 9e7236ff6bcc..9bba7ac5fee0 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1251,7 +1251,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr, | |||
1251 | x->sel.prefixlen_s = addr->sadb_address_prefixlen; | 1251 | x->sel.prefixlen_s = addr->sadb_address_prefixlen; |
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | if (x->props.mode == XFRM_MODE_TRANSPORT) | 1254 | if (!x->sel.family) |
1255 | x->sel.family = x->props.family; | 1255 | x->sel.family = x->props.family; |
1256 | 1256 | ||
1257 | if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) { | 1257 | if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e470bf12b765..7cfd12e0d1e2 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -730,7 +730,17 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
730 | if (bss->wmm_ie) { | 730 | if (bss->wmm_ie) { |
731 | wmm = 1; | 731 | wmm = 1; |
732 | } | 732 | } |
733 | |||
734 | /* get all rates supported by the device and the AP as | ||
735 | * some APs don't like getting a superset of their rates | ||
736 | * in the association request (e.g. D-Link DAP 1353 in | ||
737 | * b-only mode) */ | ||
738 | rates_len = ieee80211_compatible_rates(bss, sband, &rates); | ||
739 | |||
733 | ieee80211_rx_bss_put(dev, bss); | 740 | ieee80211_rx_bss_put(dev, bss); |
741 | } else { | ||
742 | rates = ~0; | ||
743 | rates_len = sband->n_bitrates; | ||
734 | } | 744 | } |
735 | 745 | ||
736 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 746 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
@@ -761,10 +771,7 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
761 | *pos++ = ifsta->ssid_len; | 771 | *pos++ = ifsta->ssid_len; |
762 | memcpy(pos, ifsta->ssid, ifsta->ssid_len); | 772 | memcpy(pos, ifsta->ssid, ifsta->ssid_len); |
763 | 773 | ||
764 | /* all supported rates should be added here but some APs | 774 | /* add all rates which were marked to be used above */ |
765 | * (e.g. D-Link DAP 1353 in b-only mode) don't like that | ||
766 | * Therefore only add rates the AP supports */ | ||
767 | rates_len = ieee80211_compatible_rates(bss, sband, &rates); | ||
768 | supp_rates_len = rates_len; | 775 | supp_rates_len = rates_len; |
769 | if (supp_rates_len > 8) | 776 | if (supp_rates_len > 8) |
770 | supp_rates_len = 8; | 777 | supp_rates_len = 8; |
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index 76e1de1dc735..457ebf9e85ae 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c | |||
@@ -209,7 +209,6 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev, | |||
209 | range->num_frequency = c; | 209 | range->num_frequency = c; |
210 | 210 | ||
211 | IW_EVENT_CAPA_SET_KERNEL(range->event_capa); | 211 | IW_EVENT_CAPA_SET_KERNEL(range->event_capa); |
212 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY); | ||
213 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); | 212 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); |
214 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); | 213 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); |
215 | 214 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index a1b0fbe3ea35..b976d9ed10e4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -50,19 +50,8 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) | |||
50 | 50 | ||
51 | switch (type) { | 51 | switch (type) { |
52 | case XFRMA_ALG_AUTH: | 52 | case XFRMA_ALG_AUTH: |
53 | if (!algp->alg_key_len && | ||
54 | strcmp(algp->alg_name, "digest_null") != 0) | ||
55 | return -EINVAL; | ||
56 | break; | ||
57 | |||
58 | case XFRMA_ALG_CRYPT: | 53 | case XFRMA_ALG_CRYPT: |
59 | if (!algp->alg_key_len && | ||
60 | strcmp(algp->alg_name, "cipher_null") != 0) | ||
61 | return -EINVAL; | ||
62 | break; | ||
63 | |||
64 | case XFRMA_ALG_COMP: | 54 | case XFRMA_ALG_COMP: |
65 | /* Zero length keys are legal. */ | ||
66 | break; | 55 | break; |
67 | 56 | ||
68 | default: | 57 | default: |
diff --git a/scripts/ver_linux b/scripts/ver_linux index ab69ecefedbd..7ac0e309be09 100755 --- a/scripts/ver_linux +++ b/scripts/ver_linux | |||
@@ -12,12 +12,9 @@ echo ' ' | |||
12 | uname -a | 12 | uname -a |
13 | echo ' ' | 13 | echo ' ' |
14 | 14 | ||
15 | gcc --version 2>&1| head -n 1 | grep -v gcc | awk \ | 15 | gcc -dumpversion 2>&1| awk \ |
16 | 'NR==1{print "Gnu C ", $1}' | 16 | 'NR==1{print "Gnu C ", $1}' |
17 | 17 | ||
18 | gcc --version 2>&1| grep gcc | awk \ | ||
19 | 'NR==1{print "Gnu C ", $3}' | ||
20 | |||
21 | make --version 2>&1 | awk -F, '{print $1}' | awk \ | 18 | make --version 2>&1 | awk -F, '{print $1}' | awk \ |
22 | '/GNU Make/{print "Gnu make ",$NF}' | 19 | '/GNU Make/{print "Gnu make ",$NF}' |
23 | 20 | ||
diff --git a/sound/drivers/pcsp/pcsp.h b/sound/drivers/pcsp/pcsp.h index f07cc1ee1fe7..1d661f795e8c 100644 --- a/sound/drivers/pcsp/pcsp.h +++ b/sound/drivers/pcsp/pcsp.h | |||
@@ -24,7 +24,8 @@ static DEFINE_SPINLOCK(i8253_lock); | |||
24 | /* default timer freq for PC-Speaker: 18643 Hz */ | 24 | /* default timer freq for PC-Speaker: 18643 Hz */ |
25 | #define DIV_18KHZ 64 | 25 | #define DIV_18KHZ 64 |
26 | #define MAX_DIV DIV_18KHZ | 26 | #define MAX_DIV DIV_18KHZ |
27 | #define CUR_DIV() (MAX_DIV >> chip->treble) | 27 | #define CALC_DIV(d) (MAX_DIV >> (d)) |
28 | #define CUR_DIV() CALC_DIV(chip->treble) | ||
28 | #define PCSP_MAX_TREBLE 1 | 29 | #define PCSP_MAX_TREBLE 1 |
29 | 30 | ||
30 | /* unfortunately, with hrtimers 37KHz does not work very well :( */ | 31 | /* unfortunately, with hrtimers 37KHz does not work very well :( */ |
@@ -36,7 +37,8 @@ static DEFINE_SPINLOCK(i8253_lock); | |||
36 | #define PCSP_DEFAULT_SDIV (DIV_18KHZ >> 1) | 37 | #define PCSP_DEFAULT_SDIV (DIV_18KHZ >> 1) |
37 | #define PCSP_DEFAULT_SRATE (PIT_TICK_RATE / PCSP_DEFAULT_SDIV) | 38 | #define PCSP_DEFAULT_SRATE (PIT_TICK_RATE / PCSP_DEFAULT_SDIV) |
38 | #define PCSP_INDEX_INC() (1 << (PCSP_MAX_TREBLE - chip->treble)) | 39 | #define PCSP_INDEX_INC() (1 << (PCSP_MAX_TREBLE - chip->treble)) |
39 | #define PCSP_RATE() (PIT_TICK_RATE / CUR_DIV()) | 40 | #define PCSP_CALC_RATE(i) (PIT_TICK_RATE / CALC_DIV(i)) |
41 | #define PCSP_RATE() PCSP_CALC_RATE(chip->treble) | ||
40 | #define PCSP_MIN_RATE__1 MAX_DIV/PIT_TICK_RATE | 42 | #define PCSP_MIN_RATE__1 MAX_DIV/PIT_TICK_RATE |
41 | #define PCSP_MAX_RATE__1 MIN_DIV/PIT_TICK_RATE | 43 | #define PCSP_MAX_RATE__1 MIN_DIV/PIT_TICK_RATE |
42 | #define PCSP_MAX_PERIOD_NS (1000000000ULL * PCSP_MIN_RATE__1) | 44 | #define PCSP_MAX_PERIOD_NS (1000000000ULL * PCSP_MIN_RATE__1) |
diff --git a/sound/drivers/pcsp/pcsp_mixer.c b/sound/drivers/pcsp/pcsp_mixer.c index 64a695fef74e..caeb0f57fcca 100644 --- a/sound/drivers/pcsp/pcsp_mixer.c +++ b/sound/drivers/pcsp/pcsp_mixer.c | |||
@@ -50,7 +50,8 @@ static int pcsp_treble_info(struct snd_kcontrol *kcontrol, | |||
50 | uinfo->value.enumerated.items = chip->max_treble + 1; | 50 | uinfo->value.enumerated.items = chip->max_treble + 1; |
51 | if (uinfo->value.enumerated.item > chip->max_treble) | 51 | if (uinfo->value.enumerated.item > chip->max_treble) |
52 | uinfo->value.enumerated.item = chip->max_treble; | 52 | uinfo->value.enumerated.item = chip->max_treble; |
53 | sprintf(uinfo->value.enumerated.name, "%d", PCSP_RATE()); | 53 | sprintf(uinfo->value.enumerated.name, "%d", |
54 | PCSP_CALC_RATE(uinfo->value.enumerated.item)); | ||
54 | return 0; | 55 | return 0; |
55 | } | 56 | } |
56 | 57 | ||
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index e0a605adde42..ff1b922c610b 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -2858,6 +2858,7 @@ static const char *ad1988_models[AD1988_MODEL_LAST] = { | |||
2858 | static struct snd_pci_quirk ad1988_cfg_tbl[] = { | 2858 | static struct snd_pci_quirk ad1988_cfg_tbl[] = { |
2859 | SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG), | 2859 | SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG), |
2860 | SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG), | 2860 | SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG), |
2861 | SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG), | ||
2861 | {} | 2862 | {} |
2862 | }; | 2863 | }; |
2863 | 2864 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 864b2f598c38..8f31247c52bd 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -853,6 +853,7 @@ do_sku: | |||
853 | case 0x10ec0269: | 853 | case 0x10ec0269: |
854 | case 0x10ec0862: | 854 | case 0x10ec0862: |
855 | case 0x10ec0662: | 855 | case 0x10ec0662: |
856 | case 0x10ec0889: | ||
856 | snd_hda_codec_write(codec, 0x14, 0, | 857 | snd_hda_codec_write(codec, 0x14, 0, |
857 | AC_VERB_SET_EAPD_BTLENABLE, 2); | 858 | AC_VERB_SET_EAPD_BTLENABLE, 2); |
858 | snd_hda_codec_write(codec, 0x15, 0, | 859 | snd_hda_codec_write(codec, 0x15, 0, |
@@ -877,6 +878,7 @@ do_sku: | |||
877 | case 0x10ec0883: | 878 | case 0x10ec0883: |
878 | case 0x10ec0885: | 879 | case 0x10ec0885: |
879 | case 0x10ec0888: | 880 | case 0x10ec0888: |
881 | case 0x10ec0889: | ||
880 | snd_hda_codec_write(codec, 0x20, 0, | 882 | snd_hda_codec_write(codec, 0x20, 0, |
881 | AC_VERB_SET_COEF_INDEX, 7); | 883 | AC_VERB_SET_COEF_INDEX, 7); |
882 | tmp = snd_hda_codec_read(codec, 0x20, 0, | 884 | tmp = snd_hda_codec_read(codec, 0x20, 0, |
@@ -7743,6 +7745,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { | |||
7743 | SND_PCI_QUIRK(0x103c, 0x2a60, "HP Lucknow", ALC888_3ST_HP), | 7745 | SND_PCI_QUIRK(0x103c, 0x2a60, "HP Lucknow", ALC888_3ST_HP), |
7744 | SND_PCI_QUIRK(0x103c, 0x2a61, "HP Nettle", ALC883_6ST_DIG), | 7746 | SND_PCI_QUIRK(0x103c, 0x2a61, "HP Nettle", ALC883_6ST_DIG), |
7745 | SND_PCI_QUIRK(0x1043, 0x8249, "Asus M2A-VM HDMI", ALC883_3ST_6ch_DIG), | 7747 | SND_PCI_QUIRK(0x1043, 0x8249, "Asus M2A-VM HDMI", ALC883_3ST_6ch_DIG), |
7748 | SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG), | ||
7746 | SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG), | 7749 | SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG), |
7747 | SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC), | 7750 | SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC), |
7748 | SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), | 7751 | SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 393f7fd2b1be..a4f44a00bae8 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -840,7 +840,7 @@ static struct snd_kcontrol_new stac92hd71bxx_mixer[] = { | |||
840 | static struct snd_kcontrol_new stac925x_mixer[] = { | 840 | static struct snd_kcontrol_new stac925x_mixer[] = { |
841 | STAC_INPUT_SOURCE(1), | 841 | STAC_INPUT_SOURCE(1), |
842 | HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_OUTPUT), | 842 | HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_OUTPUT), |
843 | HDA_CODEC_MUTE("Capture Switch", 0x09, 0, HDA_OUTPUT), | 843 | HDA_CODEC_MUTE("Capture Switch", 0x14, 0, HDA_OUTPUT), |
844 | HDA_CODEC_VOLUME("Capture Mux Volume", 0x0f, 0, HDA_OUTPUT), | 844 | HDA_CODEC_VOLUME("Capture Mux Volume", 0x0f, 0, HDA_OUTPUT), |
845 | { } /* end */ | 845 | { } /* end */ |
846 | }; | 846 | }; |
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 52b1d81a26f7..e7e43524f8c7 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
@@ -447,6 +447,23 @@ static struct hda_pcm_stream vt1708_pcm_analog_playback = { | |||
447 | }, | 447 | }, |
448 | }; | 448 | }; |
449 | 449 | ||
450 | static struct hda_pcm_stream vt1708_pcm_analog_s16_playback = { | ||
451 | .substreams = 1, | ||
452 | .channels_min = 2, | ||
453 | .channels_max = 8, | ||
454 | .nid = 0x10, /* NID to query formats and rates */ | ||
455 | /* We got noisy outputs on the right channel on VT1708 when | ||
456 | * 24bit samples are used. Until any workaround is found, | ||
457 | * disable the 24bit format, so far. | ||
458 | */ | ||
459 | .formats = SNDRV_PCM_FMTBIT_S16_LE, | ||
460 | .ops = { | ||
461 | .open = via_playback_pcm_open, | ||
462 | .prepare = via_playback_pcm_prepare, | ||
463 | .cleanup = via_playback_pcm_cleanup | ||
464 | }, | ||
465 | }; | ||
466 | |||
450 | static struct hda_pcm_stream vt1708_pcm_analog_capture = { | 467 | static struct hda_pcm_stream vt1708_pcm_analog_capture = { |
451 | .substreams = 2, | 468 | .substreams = 2, |
452 | .channels_min = 2, | 469 | .channels_min = 2, |
@@ -899,6 +916,9 @@ static int patch_vt1708(struct hda_codec *codec) | |||
899 | 916 | ||
900 | spec->stream_name_analog = "VT1708 Analog"; | 917 | spec->stream_name_analog = "VT1708 Analog"; |
901 | spec->stream_analog_playback = &vt1708_pcm_analog_playback; | 918 | spec->stream_analog_playback = &vt1708_pcm_analog_playback; |
919 | /* disable 32bit format on VT1708 */ | ||
920 | if (codec->vendor_id == 0x11061708) | ||
921 | spec->stream_analog_playback = &vt1708_pcm_analog_s16_playback; | ||
902 | spec->stream_analog_capture = &vt1708_pcm_analog_capture; | 922 | spec->stream_analog_capture = &vt1708_pcm_analog_capture; |
903 | 923 | ||
904 | spec->stream_name_digital = "VT1708 Digital"; | 924 | spec->stream_name_digital = "VT1708 Digital"; |