diff options
72 files changed, 541 insertions, 228 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index d3d653a5f9b9..bfe924217f24 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -346,7 +346,7 @@ tcp_orphan_retries - INTEGER | |||
346 | when RTO retransmissions remain unacknowledged. | 346 | when RTO retransmissions remain unacknowledged. |
347 | See tcp_retries2 for more details. | 347 | See tcp_retries2 for more details. |
348 | 348 | ||
349 | The default value is 7. | 349 | The default value is 8. |
350 | If your machine is a loaded WEB server, | 350 | If your machine is a loaded WEB server, |
351 | you should think about lowering this value, such sockets | 351 | you should think about lowering this value, such sockets |
352 | may consume significant resources. Cf. tcp_max_orphans. | 352 | may consume significant resources. Cf. tcp_max_orphans. |
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c index bfe68ec4e1a6..d8c1af025931 100644 --- a/arch/arm/mach-davinci/irq.c +++ b/arch/arm/mach-davinci/irq.c | |||
@@ -53,7 +53,7 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) | |||
53 | 53 | ||
54 | gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); | 54 | gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); |
55 | ct = gc->chip_types; | 55 | ct = gc->chip_types; |
56 | ct->chip.irq_ack = irq_gc_ack; | 56 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
57 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 57 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
58 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 58 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
59 | 59 | ||
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index 72b4e7631583..ab9f999106c7 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c | |||
@@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0); | |||
79 | static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); | 79 | static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); |
80 | static APBC_CLK(keypad, PXA168_KPC, 0, 32000); | 80 | static APBC_CLK(keypad, PXA168_KPC, 0, 32000); |
81 | 81 | ||
82 | static APMU_CLK(nand, NAND, 0x01db, 208000000); | 82 | static APMU_CLK(nand, NAND, 0x19b, 156000000); |
83 | static APMU_CLK(lcd, LCD, 0x7f, 312000000); | 83 | static APMU_CLK(lcd, LCD, 0x7f, 312000000); |
84 | 84 | ||
85 | /* device and clock bindings */ | 85 | /* device and clock bindings */ |
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c index 8f92ccd26edf..1464607aa60d 100644 --- a/arch/arm/mach-mmp/pxa910.c +++ b/arch/arm/mach-mmp/pxa910.c | |||
@@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000); | |||
110 | static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); | 110 | static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); |
111 | static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); | 111 | static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); |
112 | 112 | ||
113 | static APMU_CLK(nand, NAND, 0x01db, 208000000); | 113 | static APMU_CLK(nand, NAND, 0x19b, 156000000); |
114 | static APMU_CLK(u2o, USB, 0x1b, 480000000); | 114 | static APMU_CLK(u2o, USB, 0x1b, 480000000); |
115 | 115 | ||
116 | /* device and clock bindings */ | 116 | /* device and clock bindings */ |
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index 87ae3129f4f7..b27544bcafcb 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c | |||
@@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void) | |||
347 | if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && | 347 | if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && |
348 | (GPDR(i) & GPIO_bit(i))) { | 348 | (GPDR(i) & GPIO_bit(i))) { |
349 | if (GPLR(i) & GPIO_bit(i)) | 349 | if (GPLR(i) & GPIO_bit(i)) |
350 | PGSR(i) |= GPIO_bit(i); | 350 | PGSR(gpio_to_bank(i)) |= GPIO_bit(i); |
351 | else | 351 | else |
352 | PGSR(i) &= ~GPIO_bit(i); | 352 | PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i); |
353 | } | 353 | } |
354 | } | 354 | } |
355 | 355 | ||
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index d130f77b6d11..2f37d43f51b6 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c | |||
@@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = { | |||
573 | .xres = 480, | 573 | .xres = 480, |
574 | .yres = 272, | 574 | .yres = 272, |
575 | .bpp = 16, | 575 | .bpp = 16, |
576 | .hsync_len = 4, | 576 | .hsync_len = 41, |
577 | .left_margin = 2, | 577 | .left_margin = 2, |
578 | .right_margin = 1, | 578 | .right_margin = 1, |
579 | .vsync_len = 1, | 579 | .vsync_len = 10, |
580 | .upper_margin = 3, | 580 | .upper_margin = 3, |
581 | .lower_margin = 1, | 581 | .lower_margin = 1, |
582 | .sync = 0, | 582 | .sync = 0, |
@@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void) | |||
596 | { | 596 | { |
597 | int ret; | 597 | int ret; |
598 | 598 | ||
599 | pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); | ||
600 | |||
601 | /* Earlier devices had the backlight regulator controlled | ||
602 | * via PWM, later versions use another controller for that */ | ||
603 | if ((system_rev & 0xff) < 2) { | ||
604 | mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; | ||
605 | pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); | ||
606 | platform_device_register(&raumfeld_pwm_backlight_device); | ||
607 | } else | ||
608 | platform_device_register(&raumfeld_lt3593_device); | ||
609 | |||
610 | ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); | 599 | ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); |
611 | if (ret < 0) | 600 | if (ret < 0) |
612 | pr_warning("Unable to request GPIO_TFT_VA_EN\n"); | 601 | pr_warning("Unable to request GPIO_TFT_VA_EN\n"); |
613 | else | 602 | else |
614 | gpio_direction_output(GPIO_TFT_VA_EN, 1); | 603 | gpio_direction_output(GPIO_TFT_VA_EN, 1); |
615 | 604 | ||
605 | msleep(100); | ||
606 | |||
616 | ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); | 607 | ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); |
617 | if (ret < 0) | 608 | if (ret < 0) |
618 | pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); | 609 | pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); |
619 | else | 610 | else |
620 | gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); | 611 | gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); |
621 | 612 | ||
613 | /* Hardware revision 2 has the backlight regulator controlled | ||
614 | * by an LT3593, earlier and later devices use PWM for that. */ | ||
615 | if ((system_rev & 0xff) == 2) { | ||
616 | platform_device_register(&raumfeld_lt3593_device); | ||
617 | } else { | ||
618 | mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; | ||
619 | pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); | ||
620 | platform_device_register(&raumfeld_pwm_backlight_device); | ||
621 | } | ||
622 | |||
623 | pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); | ||
622 | platform_device_register(&pxa3xx_device_gcu); | 624 | platform_device_register(&pxa3xx_device_gcu); |
623 | } | 625 | } |
624 | 626 | ||
@@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = { | |||
657 | 659 | ||
658 | #define SPI_AK4104 \ | 660 | #define SPI_AK4104 \ |
659 | { \ | 661 | { \ |
660 | .modalias = "ak4104", \ | 662 | .modalias = "ak4104-codec", \ |
661 | .max_speed_hz = 10000, \ | 663 | .max_speed_hz = 10000, \ |
662 | .bus_num = 0, \ | 664 | .bus_num = 0, \ |
663 | .chip_select = 0, \ | 665 | .chip_select = 0, \ |
664 | .controller_data = (void *) GPIO_SPDIF_CS, \ | 666 | .controller_data = (void *) GPIO_SPDIF_CS, \ |
665 | } | 667 | } |
666 | 668 | ||
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 5b4fffab1eb4..41ab97ebe4cf 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c | |||
@@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio, | |||
432 | ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; | 432 | ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; |
433 | ct->regs.ack = GPIO_EDGE_CAUSE_OFF; | 433 | ct->regs.ack = GPIO_EDGE_CAUSE_OFF; |
434 | ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; | 434 | ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; |
435 | ct->chip.irq_ack = irq_gc_ack; | 435 | ct->chip.irq_ack = irq_gc_ack_clr_bit; |
436 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 436 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
437 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 437 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
438 | ct->chip.irq_set_type = gpio_irq_set_type; | 438 | ct->chip.irq_set_type = gpio_irq_set_type; |
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c index 48ebb9479b61..a11dc3670505 100644 --- a/arch/arm/plat-pxa/gpio.c +++ b/arch/arm/plat-pxa/gpio.c | |||
@@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c) | |||
50 | return container_of(c, struct pxa_gpio_chip, chip)->regbase; | 50 | return container_of(c, struct pxa_gpio_chip, chip)->regbase; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio) | 53 | static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio) |
54 | { | 54 | { |
55 | return &pxa_gpio_chips[gpio_to_bank(gpio)]; | 55 | return &pxa_gpio_chips[gpio_to_bank(gpio)]; |
56 | } | 56 | } |
@@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) | |||
161 | int gpio = irq_to_gpio(d->irq); | 161 | int gpio = irq_to_gpio(d->irq); |
162 | unsigned long gpdr, mask = GPIO_bit(gpio); | 162 | unsigned long gpdr, mask = GPIO_bit(gpio); |
163 | 163 | ||
164 | c = gpio_to_chip(gpio); | 164 | c = gpio_to_pxachip(gpio); |
165 | 165 | ||
166 | if (type == IRQ_TYPE_PROBE) { | 166 | if (type == IRQ_TYPE_PROBE) { |
167 | /* Don't mess with enabled GPIOs using preconfigured edges or | 167 | /* Don't mess with enabled GPIOs using preconfigured edges or |
@@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) | |||
230 | static void pxa_ack_muxed_gpio(struct irq_data *d) | 230 | static void pxa_ack_muxed_gpio(struct irq_data *d) |
231 | { | 231 | { |
232 | int gpio = irq_to_gpio(d->irq); | 232 | int gpio = irq_to_gpio(d->irq); |
233 | struct pxa_gpio_chip *c = gpio_to_chip(gpio); | 233 | struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); |
234 | 234 | ||
235 | __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); | 235 | __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); |
236 | } | 236 | } |
@@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d) | |||
238 | static void pxa_mask_muxed_gpio(struct irq_data *d) | 238 | static void pxa_mask_muxed_gpio(struct irq_data *d) |
239 | { | 239 | { |
240 | int gpio = irq_to_gpio(d->irq); | 240 | int gpio = irq_to_gpio(d->irq); |
241 | struct pxa_gpio_chip *c = gpio_to_chip(gpio); | 241 | struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); |
242 | uint32_t grer, gfer; | 242 | uint32_t grer, gfer; |
243 | 243 | ||
244 | c->irq_mask &= ~GPIO_bit(gpio); | 244 | c->irq_mask &= ~GPIO_bit(gpio); |
@@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d) | |||
252 | static void pxa_unmask_muxed_gpio(struct irq_data *d) | 252 | static void pxa_unmask_muxed_gpio(struct irq_data *d) |
253 | { | 253 | { |
254 | int gpio = irq_to_gpio(d->irq); | 254 | int gpio = irq_to_gpio(d->irq); |
255 | struct pxa_gpio_chip *c = gpio_to_chip(gpio); | 255 | struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); |
256 | 256 | ||
257 | c->irq_mask |= GPIO_bit(gpio); | 257 | c->irq_mask |= GPIO_bit(gpio); |
258 | update_edge_detect(c); | 258 | update_edge_detect(c); |
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index 135abda31c9a..327ab9f662e8 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c | |||
@@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) | |||
152 | if (!gc) | 152 | if (!gc) |
153 | return -ENOMEM; | 153 | return -ENOMEM; |
154 | ct = gc->chip_types; | 154 | ct = gc->chip_types; |
155 | ct->chip.irq_ack = irq_gc_ack; | 155 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
156 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 156 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
157 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 157 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
158 | ct->chip.irq_set_type = s5p_gpioint_set_type, | 158 | ct->chip.irq_set_type = s5p_gpioint_set_type, |
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c index 32582c0958e3..0e46588d847b 100644 --- a/arch/arm/plat-samsung/irq-uart.c +++ b/arch/arm/plat-samsung/irq-uart.c | |||
@@ -55,7 +55,7 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) | |||
55 | gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, | 55 | gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, |
56 | handle_level_irq); | 56 | handle_level_irq); |
57 | ct = gc->chip_types; | 57 | ct = gc->chip_types; |
58 | ct->chip.irq_ack = irq_gc_ack; | 58 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
59 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 59 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
60 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 60 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
61 | ct->regs.ack = S3C64XX_UINTP; | 61 | ct->regs.ack = S3C64XX_UINTP; |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 33867ec4a234..9d6a8effeda2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/of.h> | 12 | #include <linux/of.h> |
13 | #include <linux/memblock.h> | 13 | #include <linux/memblock.h> |
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/memory.h> | ||
16 | |||
15 | #include <asm/firmware.h> | 17 | #include <asm/firmware.h> |
16 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
17 | #include <asm/pSeries_reconfig.h> | 19 | #include <asm/pSeries_reconfig.h> |
@@ -20,24 +22,25 @@ | |||
20 | static unsigned long get_memblock_size(void) | 22 | static unsigned long get_memblock_size(void) |
21 | { | 23 | { |
22 | struct device_node *np; | 24 | struct device_node *np; |
23 | unsigned int memblock_size = 0; | 25 | unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; |
26 | struct resource r; | ||
24 | 27 | ||
25 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 28 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
26 | if (np) { | 29 | if (np) { |
27 | const unsigned long *size; | 30 | const __be64 *size; |
28 | 31 | ||
29 | size = of_get_property(np, "ibm,lmb-size", NULL); | 32 | size = of_get_property(np, "ibm,lmb-size", NULL); |
30 | memblock_size = size ? *size : 0; | 33 | if (size) |
31 | 34 | memblock_size = be64_to_cpup(size); | |
32 | of_node_put(np); | 35 | of_node_put(np); |
33 | } else { | 36 | } else if (machine_is(pseries)) { |
37 | /* This fallback really only applies to pseries */ | ||
34 | unsigned int memzero_size = 0; | 38 | unsigned int memzero_size = 0; |
35 | const unsigned int *regs; | ||
36 | 39 | ||
37 | np = of_find_node_by_path("/memory@0"); | 40 | np = of_find_node_by_path("/memory@0"); |
38 | if (np) { | 41 | if (np) { |
39 | regs = of_get_property(np, "reg", NULL); | 42 | if (!of_address_to_resource(np, 0, &r)) |
40 | memzero_size = regs ? regs[3] : 0; | 43 | memzero_size = resource_size(&r); |
41 | of_node_put(np); | 44 | of_node_put(np); |
42 | } | 45 | } |
43 | 46 | ||
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void) | |||
50 | sprintf(buf, "/memory@%x", memzero_size); | 53 | sprintf(buf, "/memory@%x", memzero_size); |
51 | np = of_find_node_by_path(buf); | 54 | np = of_find_node_by_path(buf); |
52 | if (np) { | 55 | if (np) { |
53 | regs = of_get_property(np, "reg", NULL); | 56 | if (!of_address_to_resource(np, 0, &r)) |
54 | memblock_size = regs ? regs[3] : 0; | 57 | memblock_size = resource_size(&r); |
55 | of_node_put(np); | 58 | of_node_put(np); |
56 | } | 59 | } |
57 | } | 60 | } |
58 | } | 61 | } |
59 | |||
60 | return memblock_size; | 62 | return memblock_size; |
61 | } | 63 | } |
62 | 64 | ||
65 | /* WARNING: This is going to override the generic definition whenever | ||
66 | * pseries is built-in regardless of what platform is active at boot | ||
67 | * time. This is fine for now as this is the only "option" and it | ||
68 | * should work everywhere. If not, we'll have to turn this into a | ||
69 | * ppc_md. callback | ||
70 | */ | ||
63 | unsigned long memory_block_size_bytes(void) | 71 | unsigned long memory_block_size_bytes(void) |
64 | { | 72 | { |
65 | return get_memblock_size(); | 73 | return get_memblock_size(); |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d865c4aeec55..bbaaa005bf0e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/poison.h> | 28 | #include <linux/poison.h> |
29 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/memory.h> | ||
31 | #include <linux/memory_hotplug.h> | 32 | #include <linux/memory_hotplug.h> |
32 | #include <linux/nmi.h> | 33 | #include <linux/nmi.h> |
33 | #include <linux/gfp.h> | 34 | #include <linux/gfp.h> |
@@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma) | |||
895 | } | 896 | } |
896 | 897 | ||
897 | #ifdef CONFIG_X86_UV | 898 | #ifdef CONFIG_X86_UV |
898 | #define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) | ||
899 | |||
900 | unsigned long memory_block_size_bytes(void) | 899 | unsigned long memory_block_size_bytes(void) |
901 | { | 900 | { |
902 | if (is_uv_system()) { | 901 | if (is_uv_system()) { |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 9f9b2359f718..45d7c8fc73bd 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -30,7 +30,6 @@ | |||
30 | static DEFINE_MUTEX(mem_sysfs_mutex); | 30 | static DEFINE_MUTEX(mem_sysfs_mutex); |
31 | 31 | ||
32 | #define MEMORY_CLASS_NAME "memory" | 32 | #define MEMORY_CLASS_NAME "memory" |
33 | #define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) | ||
34 | 33 | ||
35 | static int sections_per_block; | 34 | static int sections_per_block; |
36 | 35 | ||
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 999803ce10dc..5da67f165afa 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -90,9 +90,10 @@ | |||
90 | #define G4x_GMCH_SIZE_MASK (0xf << 8) | 90 | #define G4x_GMCH_SIZE_MASK (0xf << 8) |
91 | #define G4x_GMCH_SIZE_1M (0x1 << 8) | 91 | #define G4x_GMCH_SIZE_1M (0x1 << 8) |
92 | #define G4x_GMCH_SIZE_2M (0x3 << 8) | 92 | #define G4x_GMCH_SIZE_2M (0x3 << 8) |
93 | #define G4x_GMCH_SIZE_VT_1M (0x9 << 8) | 93 | #define G4x_GMCH_SIZE_VT_EN (0x8 << 8) |
94 | #define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) | 94 | #define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN) |
95 | #define G4x_GMCH_SIZE_VT_2M (0xc << 8) | 95 | #define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN) |
96 | #define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) | ||
96 | 97 | ||
97 | #define GFX_FLSH_CNTL 0x2170 /* 915+ */ | 98 | #define GFX_FLSH_CNTL 0x2170 /* 915+ */ |
98 | 99 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e1787022d6c8..296fbd66f0e1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1943,7 +1943,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1943 | if (!dev_priv->mm.gtt) { | 1943 | if (!dev_priv->mm.gtt) { |
1944 | DRM_ERROR("Failed to initialize GTT\n"); | 1944 | DRM_ERROR("Failed to initialize GTT\n"); |
1945 | ret = -ENODEV; | 1945 | ret = -ENODEV; |
1946 | goto out_iomapfree; | 1946 | goto out_rmmap; |
1947 | } | 1947 | } |
1948 | 1948 | ||
1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
@@ -1987,7 +1987,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1987 | if (dev_priv->wq == NULL) { | 1987 | if (dev_priv->wq == NULL) { |
1988 | DRM_ERROR("Failed to create our workqueue.\n"); | 1988 | DRM_ERROR("Failed to create our workqueue.\n"); |
1989 | ret = -ENOMEM; | 1989 | ret = -ENOMEM; |
1990 | goto out_iomapfree; | 1990 | goto out_mtrrfree; |
1991 | } | 1991 | } |
1992 | 1992 | ||
1993 | /* enable GEM by default */ | 1993 | /* enable GEM by default */ |
@@ -2074,13 +2074,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2074 | return 0; | 2074 | return 0; |
2075 | 2075 | ||
2076 | out_gem_unload: | 2076 | out_gem_unload: |
2077 | if (dev_priv->mm.inactive_shrinker.shrink) | ||
2078 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | ||
2079 | |||
2077 | if (dev->pdev->msi_enabled) | 2080 | if (dev->pdev->msi_enabled) |
2078 | pci_disable_msi(dev->pdev); | 2081 | pci_disable_msi(dev->pdev); |
2079 | 2082 | ||
2080 | intel_teardown_gmbus(dev); | 2083 | intel_teardown_gmbus(dev); |
2081 | intel_teardown_mchbar(dev); | 2084 | intel_teardown_mchbar(dev); |
2082 | destroy_workqueue(dev_priv->wq); | 2085 | destroy_workqueue(dev_priv->wq); |
2083 | out_iomapfree: | 2086 | out_mtrrfree: |
2087 | if (dev_priv->mm.gtt_mtrr >= 0) { | ||
2088 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||
2089 | dev->agp->agp_info.aper_size * 1024 * 1024); | ||
2090 | dev_priv->mm.gtt_mtrr = -1; | ||
2091 | } | ||
2084 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2092 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2085 | out_rmmap: | 2093 | out_rmmap: |
2086 | pci_iounmap(dev->pdev, dev_priv->regs); | 2094 | pci_iounmap(dev->pdev, dev_priv->regs); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 013d304455b9..eb91e2dd7914 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -52,7 +52,7 @@ module_param_named(powersave, i915_powersave, int, 0600); | |||
52 | unsigned int i915_semaphores = 0; | 52 | unsigned int i915_semaphores = 0; |
53 | module_param_named(semaphores, i915_semaphores, int, 0600); | 53 | module_param_named(semaphores, i915_semaphores, int, 0600); |
54 | 54 | ||
55 | unsigned int i915_enable_rc6 = 1; | 55 | unsigned int i915_enable_rc6 = 0; |
56 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 56 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
57 | 57 | ||
58 | unsigned int i915_enable_fbc = 0; | 58 | unsigned int i915_enable_fbc = 0; |
@@ -577,6 +577,7 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
577 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { | 577 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { |
578 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); | 578 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); |
579 | } else switch (INTEL_INFO(dev)->gen) { | 579 | } else switch (INTEL_INFO(dev)->gen) { |
580 | case 7: | ||
580 | case 6: | 581 | case 6: |
581 | ret = gen6_do_reset(dev, flags); | 582 | ret = gen6_do_reset(dev, flags); |
582 | /* If reset with a user forcewake, try to restore */ | 583 | /* If reset with a user forcewake, try to restore */ |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 391b55f1cc74..e2aced6eec4c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -50,7 +50,6 @@ struct intel_dp { | |||
50 | bool has_audio; | 50 | bool has_audio; |
51 | int force_audio; | 51 | int force_audio; |
52 | uint32_t color_range; | 52 | uint32_t color_range; |
53 | int dpms_mode; | ||
54 | uint8_t link_bw; | 53 | uint8_t link_bw; |
55 | uint8_t lane_count; | 54 | uint8_t lane_count; |
56 | uint8_t dpcd[4]; | 55 | uint8_t dpcd[4]; |
@@ -138,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp) | |||
138 | { | 137 | { |
139 | int max_lane_count = 4; | 138 | int max_lane_count = 4; |
140 | 139 | ||
141 | if (intel_dp->dpcd[0] >= 0x11) { | 140 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { |
142 | max_lane_count = intel_dp->dpcd[2] & 0x1f; | 141 | max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; |
143 | switch (max_lane_count) { | 142 | switch (max_lane_count) { |
144 | case 1: case 2: case 4: | 143 | case 1: case 2: case 4: |
145 | break; | 144 | break; |
@@ -153,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp) | |||
153 | static int | 152 | static int |
154 | intel_dp_max_link_bw(struct intel_dp *intel_dp) | 153 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
155 | { | 154 | { |
156 | int max_link_bw = intel_dp->dpcd[1]; | 155 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
157 | 156 | ||
158 | switch (max_link_bw) { | 157 | switch (max_link_bw) { |
159 | case DP_LINK_BW_1_62: | 158 | case DP_LINK_BW_1_62: |
@@ -774,7 +773,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
774 | /* | 773 | /* |
775 | * Check for DPCD version > 1.1 and enhanced framing support | 774 | * Check for DPCD version > 1.1 and enhanced framing support |
776 | */ | 775 | */ |
777 | if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { | 776 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
777 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { | ||
778 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 778 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
779 | intel_dp->DP |= DP_ENHANCED_FRAMING; | 779 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
780 | } | 780 | } |
@@ -942,11 +942,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) | |||
942 | udelay(200); | 942 | udelay(200); |
943 | } | 943 | } |
944 | 944 | ||
945 | /* If the sink supports it, try to set the power state appropriately */ | ||
946 | static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | ||
947 | { | ||
948 | int ret, i; | ||
949 | |||
950 | /* Should have a valid DPCD by this point */ | ||
951 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) | ||
952 | return; | ||
953 | |||
954 | if (mode != DRM_MODE_DPMS_ON) { | ||
955 | ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, | ||
956 | DP_SET_POWER_D3); | ||
957 | if (ret != 1) | ||
958 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); | ||
959 | } else { | ||
960 | /* | ||
961 | * When turning on, we need to retry for 1ms to give the sink | ||
962 | * time to wake up. | ||
963 | */ | ||
964 | for (i = 0; i < 3; i++) { | ||
965 | ret = intel_dp_aux_native_write_1(intel_dp, | ||
966 | DP_SET_POWER, | ||
967 | DP_SET_POWER_D0); | ||
968 | if (ret == 1) | ||
969 | break; | ||
970 | msleep(1); | ||
971 | } | ||
972 | } | ||
973 | } | ||
974 | |||
945 | static void intel_dp_prepare(struct drm_encoder *encoder) | 975 | static void intel_dp_prepare(struct drm_encoder *encoder) |
946 | { | 976 | { |
947 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 977 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
948 | struct drm_device *dev = encoder->dev; | 978 | struct drm_device *dev = encoder->dev; |
949 | 979 | ||
980 | /* Wake up the sink first */ | ||
981 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | ||
982 | |||
950 | if (is_edp(intel_dp)) { | 983 | if (is_edp(intel_dp)) { |
951 | ironlake_edp_backlight_off(dev); | 984 | ironlake_edp_backlight_off(dev); |
952 | ironlake_edp_panel_off(dev); | 985 | ironlake_edp_panel_off(dev); |
@@ -990,6 +1023,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
990 | if (mode != DRM_MODE_DPMS_ON) { | 1023 | if (mode != DRM_MODE_DPMS_ON) { |
991 | if (is_edp(intel_dp)) | 1024 | if (is_edp(intel_dp)) |
992 | ironlake_edp_backlight_off(dev); | 1025 | ironlake_edp_backlight_off(dev); |
1026 | intel_dp_sink_dpms(intel_dp, mode); | ||
993 | intel_dp_link_down(intel_dp); | 1027 | intel_dp_link_down(intel_dp); |
994 | if (is_edp(intel_dp)) | 1028 | if (is_edp(intel_dp)) |
995 | ironlake_edp_panel_off(dev); | 1029 | ironlake_edp_panel_off(dev); |
@@ -998,6 +1032,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
998 | } else { | 1032 | } else { |
999 | if (is_edp(intel_dp)) | 1033 | if (is_edp(intel_dp)) |
1000 | ironlake_edp_panel_vdd_on(intel_dp); | 1034 | ironlake_edp_panel_vdd_on(intel_dp); |
1035 | intel_dp_sink_dpms(intel_dp, mode); | ||
1001 | if (!(dp_reg & DP_PORT_EN)) { | 1036 | if (!(dp_reg & DP_PORT_EN)) { |
1002 | intel_dp_start_link_train(intel_dp); | 1037 | intel_dp_start_link_train(intel_dp); |
1003 | if (is_edp(intel_dp)) { | 1038 | if (is_edp(intel_dp)) { |
@@ -1009,7 +1044,31 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1009 | if (is_edp(intel_dp)) | 1044 | if (is_edp(intel_dp)) |
1010 | ironlake_edp_backlight_on(dev); | 1045 | ironlake_edp_backlight_on(dev); |
1011 | } | 1046 | } |
1012 | intel_dp->dpms_mode = mode; | 1047 | } |
1048 | |||
1049 | /* | ||
1050 | * Native read with retry for link status and receiver capability reads for | ||
1051 | * cases where the sink may still be asleep. | ||
1052 | */ | ||
1053 | static bool | ||
1054 | intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | ||
1055 | uint8_t *recv, int recv_bytes) | ||
1056 | { | ||
1057 | int ret, i; | ||
1058 | |||
1059 | /* | ||
1060 | * Sinks are *supposed* to come up within 1ms from an off state, | ||
1061 | * but we're also supposed to retry 3 times per the spec. | ||
1062 | */ | ||
1063 | for (i = 0; i < 3; i++) { | ||
1064 | ret = intel_dp_aux_native_read(intel_dp, address, recv, | ||
1065 | recv_bytes); | ||
1066 | if (ret == recv_bytes) | ||
1067 | return true; | ||
1068 | msleep(1); | ||
1069 | } | ||
1070 | |||
1071 | return false; | ||
1013 | } | 1072 | } |
1014 | 1073 | ||
1015 | /* | 1074 | /* |
@@ -1019,14 +1078,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1019 | static bool | 1078 | static bool |
1020 | intel_dp_get_link_status(struct intel_dp *intel_dp) | 1079 | intel_dp_get_link_status(struct intel_dp *intel_dp) |
1021 | { | 1080 | { |
1022 | int ret; | 1081 | return intel_dp_aux_native_read_retry(intel_dp, |
1023 | 1082 | DP_LANE0_1_STATUS, | |
1024 | ret = intel_dp_aux_native_read(intel_dp, | 1083 | intel_dp->link_status, |
1025 | DP_LANE0_1_STATUS, | 1084 | DP_LINK_STATUS_SIZE); |
1026 | intel_dp->link_status, DP_LINK_STATUS_SIZE); | ||
1027 | if (ret != DP_LINK_STATUS_SIZE) | ||
1028 | return false; | ||
1029 | return true; | ||
1030 | } | 1085 | } |
1031 | 1086 | ||
1032 | static uint8_t | 1087 | static uint8_t |
@@ -1515,6 +1570,8 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1515 | static void | 1570 | static void |
1516 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1571 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1517 | { | 1572 | { |
1573 | int ret; | ||
1574 | |||
1518 | if (!intel_dp->base.base.crtc) | 1575 | if (!intel_dp->base.base.crtc) |
1519 | return; | 1576 | return; |
1520 | 1577 | ||
@@ -1523,6 +1580,15 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
1523 | return; | 1580 | return; |
1524 | } | 1581 | } |
1525 | 1582 | ||
1583 | /* Try to read receiver status if the link appears to be up */ | ||
1584 | ret = intel_dp_aux_native_read(intel_dp, | ||
1585 | 0x000, intel_dp->dpcd, | ||
1586 | sizeof (intel_dp->dpcd)); | ||
1587 | if (ret != sizeof(intel_dp->dpcd)) { | ||
1588 | intel_dp_link_down(intel_dp); | ||
1589 | return; | ||
1590 | } | ||
1591 | |||
1526 | if (!intel_channel_eq_ok(intel_dp)) { | 1592 | if (!intel_channel_eq_ok(intel_dp)) { |
1527 | intel_dp_start_link_train(intel_dp); | 1593 | intel_dp_start_link_train(intel_dp); |
1528 | intel_dp_complete_link_train(intel_dp); | 1594 | intel_dp_complete_link_train(intel_dp); |
@@ -1533,6 +1599,7 @@ static enum drm_connector_status | |||
1533 | ironlake_dp_detect(struct intel_dp *intel_dp) | 1599 | ironlake_dp_detect(struct intel_dp *intel_dp) |
1534 | { | 1600 | { |
1535 | enum drm_connector_status status; | 1601 | enum drm_connector_status status; |
1602 | bool ret; | ||
1536 | 1603 | ||
1537 | /* Can't disconnect eDP, but you can close the lid... */ | 1604 | /* Can't disconnect eDP, but you can close the lid... */ |
1538 | if (is_edp(intel_dp)) { | 1605 | if (is_edp(intel_dp)) { |
@@ -1543,13 +1610,11 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
1543 | } | 1610 | } |
1544 | 1611 | ||
1545 | status = connector_status_disconnected; | 1612 | status = connector_status_disconnected; |
1546 | if (intel_dp_aux_native_read(intel_dp, | 1613 | ret = intel_dp_aux_native_read_retry(intel_dp, |
1547 | 0x000, intel_dp->dpcd, | 1614 | 0x000, intel_dp->dpcd, |
1548 | sizeof (intel_dp->dpcd)) | 1615 | sizeof (intel_dp->dpcd)); |
1549 | == sizeof(intel_dp->dpcd)) { | 1616 | if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0) |
1550 | if (intel_dp->dpcd[0] != 0) | 1617 | status = connector_status_connected; |
1551 | status = connector_status_connected; | ||
1552 | } | ||
1553 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], | 1618 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], |
1554 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); | 1619 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); |
1555 | return status; | 1620 | return status; |
@@ -1586,7 +1651,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1586 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, | 1651 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, |
1587 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | 1652 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) |
1588 | { | 1653 | { |
1589 | if (intel_dp->dpcd[0] != 0) | 1654 | if (intel_dp->dpcd[DP_DPCD_REV] != 0) |
1590 | status = connector_status_connected; | 1655 | status = connector_status_connected; |
1591 | } | 1656 | } |
1592 | 1657 | ||
@@ -1790,8 +1855,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) | |||
1790 | { | 1855 | { |
1791 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | 1856 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
1792 | 1857 | ||
1793 | if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON) | 1858 | intel_dp_check_link_status(intel_dp); |
1794 | intel_dp_check_link_status(intel_dp); | ||
1795 | } | 1859 | } |
1796 | 1860 | ||
1797 | /* Return which DP Port should be selected for Transcoder DP control */ | 1861 | /* Return which DP Port should be selected for Transcoder DP control */ |
@@ -1859,7 +1923,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1859 | return; | 1923 | return; |
1860 | 1924 | ||
1861 | intel_dp->output_reg = output_reg; | 1925 | intel_dp->output_reg = output_reg; |
1862 | intel_dp->dpms_mode = -1; | ||
1863 | 1926 | ||
1864 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1927 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1865 | if (!intel_connector) { | 1928 | if (!intel_connector) { |
@@ -1954,8 +2017,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1954 | sizeof(intel_dp->dpcd)); | 2017 | sizeof(intel_dp->dpcd)); |
1955 | ironlake_edp_panel_vdd_off(intel_dp); | 2018 | ironlake_edp_panel_vdd_off(intel_dp); |
1956 | if (ret == sizeof(intel_dp->dpcd)) { | 2019 | if (ret == sizeof(intel_dp->dpcd)) { |
1957 | if (intel_dp->dpcd[0] >= 0x11) | 2020 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
1958 | dev_priv->no_aux_handshake = intel_dp->dpcd[3] & | 2021 | dev_priv->no_aux_handshake = |
2022 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | ||
1959 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | 2023 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; |
1960 | } else { | 2024 | } else { |
1961 | /* if this fails, presume the device is a ghost */ | 2025 | /* if this fails, presume the device is a ghost */ |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c0e0ee63fbf4..39ac2b634ae5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | |||
165 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); | 165 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
166 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) | 166 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) |
167 | { | 167 | { |
168 | return intel_wait_ring_buffer(ring, ring->space - 8); | 168 | return intel_wait_ring_buffer(ring, ring->size - 8); |
169 | } | 169 | } |
170 | 170 | ||
171 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 171 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 660f96401a05..15bd0477a3e8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2000,7 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2000 | gb_backend_map = 0x66442200; | 2000 | gb_backend_map = 0x66442200; |
2001 | break; | 2001 | break; |
2002 | case CHIP_JUNIPER: | 2002 | case CHIP_JUNIPER: |
2003 | gb_backend_map = 0x00006420; | 2003 | gb_backend_map = 0x00002200; |
2004 | break; | 2004 | break; |
2005 | default: | 2005 | default: |
2006 | gb_backend_map = | 2006 | gb_backend_map = |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 57f3bc17b87e..2eb251858e72 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev) | |||
252 | 252 | ||
253 | } | 253 | } |
254 | 254 | ||
255 | /* emits 36 */ | 255 | /* emits 39 */ |
256 | static void | 256 | static void |
257 | set_default_state(struct radeon_device *rdev) | 257 | set_default_state(struct radeon_device *rdev) |
258 | { | 258 | { |
@@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev) | |||
531 | radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); | 531 | radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); |
532 | radeon_ring_write(rdev, 0); | 532 | radeon_ring_write(rdev, 0); |
533 | 533 | ||
534 | /* setup LDS */ | ||
535 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
536 | radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
537 | radeon_ring_write(rdev, 0x10001000); | ||
538 | |||
534 | /* SQ config */ | 539 | /* SQ config */ |
535 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); | 540 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); |
536 | radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); | 541 | radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); |
@@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
773 | /* calculate number of loops correctly */ | 778 | /* calculate number of loops correctly */ |
774 | ring_size = num_loops * dwords_per_loop; | 779 | ring_size = num_loops * dwords_per_loop; |
775 | /* set default + shaders */ | 780 | /* set default + shaders */ |
776 | ring_size += 52; /* shaders + def state */ | 781 | ring_size += 55; /* shaders + def state */ |
777 | ring_size += 10; /* fence emit for VB IB */ | 782 | ring_size += 10; /* fence emit for VB IB */ |
778 | ring_size += 5; /* done copy */ | 783 | ring_size += 5; /* done copy */ |
779 | ring_size += 10; /* fence emit for done copy */ | 784 | ring_size += 10; /* fence emit for done copy */ |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 3fc5fa1aefd0..229a20f10e2b 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev) | |||
331 | 331 | ||
332 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); | 332 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); |
333 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 333 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
334 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 334 | bus_cntl = RREG32(RV370_BUS_CNTL); |
335 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 335 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
336 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 336 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
337 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 337 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
@@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev) | |||
350 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 350 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
351 | 351 | ||
352 | /* enable the rom */ | 352 | /* enable the rom */ |
353 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 353 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); |
354 | 354 | ||
355 | /* Disable VGA mode */ | 355 | /* Disable VGA mode */ |
356 | WREG32(AVIVO_D1VGA_CONTROL, | 356 | WREG32(AVIVO_D1VGA_CONTROL, |
@@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev) | |||
367 | /* restore regs */ | 367 | /* restore regs */ |
368 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); | 368 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); |
369 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 369 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
370 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 370 | WREG32(RV370_BUS_CNTL, bus_cntl); |
371 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 371 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
372 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 372 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
373 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 373 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
@@ -390,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
390 | 390 | ||
391 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); | 391 | seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1); |
392 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 392 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
393 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 393 | if (rdev->flags & RADEON_IS_PCIE) |
394 | bus_cntl = RREG32(RV370_BUS_CNTL); | ||
395 | else | ||
396 | bus_cntl = RREG32(RADEON_BUS_CNTL); | ||
394 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); | 397 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); |
395 | crtc2_gen_cntl = 0; | 398 | crtc2_gen_cntl = 0; |
396 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); | 399 | crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); |
@@ -412,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
412 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 415 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
413 | 416 | ||
414 | /* enable the rom */ | 417 | /* enable the rom */ |
415 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 418 | if (rdev->flags & RADEON_IS_PCIE) |
419 | WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM)); | ||
420 | else | ||
421 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | ||
416 | 422 | ||
417 | /* Turn off mem requests and CRTC for both controllers */ | 423 | /* Turn off mem requests and CRTC for both controllers */ |
418 | WREG32(RADEON_CRTC_GEN_CNTL, | 424 | WREG32(RADEON_CRTC_GEN_CNTL, |
@@ -439,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) | |||
439 | /* restore regs */ | 445 | /* restore regs */ |
440 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); | 446 | WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1); |
441 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 447 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
442 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 448 | if (rdev->flags & RADEON_IS_PCIE) |
449 | WREG32(RV370_BUS_CNTL, bus_cntl); | ||
450 | else | ||
451 | WREG32(RADEON_BUS_CNTL, bus_cntl); | ||
443 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); | 452 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); |
444 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { | 453 | if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
445 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); | 454 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index cbfca3a24fdf..9792d4ffdc86 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -52,6 +52,12 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
52 | struct radeon_device *rdev = dev->dev_private; | 52 | struct radeon_device *rdev = dev->dev_private; |
53 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 53 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
54 | 54 | ||
55 | /* bail if the connector does not have hpd pin, e.g., | ||
56 | * VGA, TV, etc. | ||
57 | */ | ||
58 | if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) | ||
59 | return; | ||
60 | |||
55 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | 61 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); |
56 | 62 | ||
57 | /* powering up/down the eDP panel generates hpd events which | 63 | /* powering up/down the eDP panel generates hpd events which |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index ec93a75369e6..bc44a3d35ec6 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -300,6 +300,8 @@ | |||
300 | # define RADEON_BUS_READ_BURST (1 << 30) | 300 | # define RADEON_BUS_READ_BURST (1 << 30) |
301 | #define RADEON_BUS_CNTL1 0x0034 | 301 | #define RADEON_BUS_CNTL1 0x0034 |
302 | # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) | 302 | # define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4) |
303 | #define RV370_BUS_CNTL 0x004c | ||
304 | # define RV370_BUS_BIOS_DIS_ROM (1 << 2) | ||
303 | /* rv370/rv380, rv410, r423/r430/r480, r5xx */ | 305 | /* rv370/rv380, rv410, r423/r430/r480, r5xx */ |
304 | #define RADEON_MSI_REARM_EN 0x0160 | 306 | #define RADEON_MSI_REARM_EN 0x0160 |
305 | # define RV370_MSI_REARM_EN (1 << 0) | 307 | # define RV370_MSI_REARM_EN (1 << 0) |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 6e3b11e5abbe..1f5850e473cc 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
426 | return radeon_gart_table_vram_alloc(rdev); | 426 | return radeon_gart_table_vram_alloc(rdev); |
427 | } | 427 | } |
428 | 428 | ||
429 | int rs600_gart_enable(struct radeon_device *rdev) | 429 | static int rs600_gart_enable(struct radeon_device *rdev) |
430 | { | 430 | { |
431 | u32 tmp; | 431 | u32 tmp; |
432 | int r, i; | 432 | int r, i; |
@@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
440 | return r; | 440 | return r; |
441 | radeon_gart_restore(rdev); | 441 | radeon_gart_restore(rdev); |
442 | /* Enable bus master */ | 442 | /* Enable bus master */ |
443 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; | 443 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; |
444 | WREG32(R_00004C_BUS_CNTL, tmp); | 444 | WREG32(RADEON_BUS_CNTL, tmp); |
445 | /* FIXME: setup default page */ | 445 | /* FIXME: setup default page */ |
446 | WREG32_MC(R_000100_MC_PT0_CNTL, | 446 | WREG32_MC(R_000100_MC_PT0_CNTL, |
447 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | | 447 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 2a7e43bc796d..aa7d1d79b8c5 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -247,12 +247,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
247 | return 0; | 247 | return 0; |
248 | 248 | ||
249 | /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ | 249 | /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ |
250 | card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; | ||
250 | if (card->csd.structure == 3) { | 251 | if (card->csd.structure == 3) { |
251 | int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE]; | 252 | if (card->ext_csd.raw_ext_csd_structure > 2) { |
252 | if (ext_csd_struct > 2) { | ||
253 | printk(KERN_ERR "%s: unrecognised EXT_CSD structure " | 253 | printk(KERN_ERR "%s: unrecognised EXT_CSD structure " |
254 | "version %d\n", mmc_hostname(card->host), | 254 | "version %d\n", mmc_hostname(card->host), |
255 | ext_csd_struct); | 255 | card->ext_csd.raw_ext_csd_structure); |
256 | err = -EINVAL; | 256 | err = -EINVAL; |
257 | goto out; | 257 | goto out; |
258 | } | 258 | } |
@@ -266,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
266 | goto out; | 266 | goto out; |
267 | } | 267 | } |
268 | 268 | ||
269 | card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; | ||
270 | card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; | ||
271 | card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; | ||
272 | card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; | ||
269 | if (card->ext_csd.rev >= 2) { | 273 | if (card->ext_csd.rev >= 2) { |
270 | card->ext_csd.sectors = | 274 | card->ext_csd.sectors = |
271 | ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | | 275 | ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | |
@@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
277 | if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) | 281 | if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) |
278 | mmc_card_set_blockaddr(card); | 282 | mmc_card_set_blockaddr(card); |
279 | } | 283 | } |
280 | 284 | card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; | |
281 | switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { | 285 | switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { |
282 | case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | | 286 | case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | |
283 | EXT_CSD_CARD_TYPE_26: | 287 | EXT_CSD_CARD_TYPE_26: |
@@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
307 | mmc_hostname(card->host)); | 311 | mmc_hostname(card->host)); |
308 | } | 312 | } |
309 | 313 | ||
314 | card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; | ||
315 | card->ext_csd.raw_erase_timeout_mult = | ||
316 | ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; | ||
317 | card->ext_csd.raw_hc_erase_grp_size = | ||
318 | ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; | ||
310 | if (card->ext_csd.rev >= 3) { | 319 | if (card->ext_csd.rev >= 3) { |
311 | u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; | 320 | u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; |
312 | card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; | 321 | card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; |
@@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
334 | card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; | 343 | card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; |
335 | } | 344 | } |
336 | 345 | ||
346 | card->ext_csd.raw_hc_erase_gap_size = | ||
347 | ext_csd[EXT_CSD_PARTITION_ATTRIBUTE]; | ||
348 | card->ext_csd.raw_sec_trim_mult = | ||
349 | ext_csd[EXT_CSD_SEC_TRIM_MULT]; | ||
350 | card->ext_csd.raw_sec_erase_mult = | ||
351 | ext_csd[EXT_CSD_SEC_ERASE_MULT]; | ||
352 | card->ext_csd.raw_sec_feature_support = | ||
353 | ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; | ||
354 | card->ext_csd.raw_trim_mult = | ||
355 | ext_csd[EXT_CSD_TRIM_MULT]; | ||
337 | if (card->ext_csd.rev >= 4) { | 356 | if (card->ext_csd.rev >= 4) { |
338 | /* | 357 | /* |
339 | * Enhanced area feature support -- check whether the eMMC | 358 | * Enhanced area feature support -- check whether the eMMC |
@@ -341,7 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) | |||
341 | * area offset and size to user by adding sysfs interface. | 360 | * area offset and size to user by adding sysfs interface. |
342 | */ | 361 | */ |
343 | if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && | 362 | if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && |
344 | (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { | 363 | (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { |
345 | u8 hc_erase_grp_sz = | 364 | u8 hc_erase_grp_sz = |
346 | ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; | 365 | ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; |
347 | u8 hc_wp_grp_sz = | 366 | u8 hc_wp_grp_sz = |
@@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd) | |||
401 | } | 420 | } |
402 | 421 | ||
403 | 422 | ||
404 | static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd, | 423 | static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) |
405 | unsigned bus_width) | ||
406 | { | 424 | { |
407 | u8 *bw_ext_csd; | 425 | u8 *bw_ext_csd; |
408 | int err; | 426 | int err; |
409 | 427 | ||
428 | if (bus_width == MMC_BUS_WIDTH_1) | ||
429 | return 0; | ||
430 | |||
410 | err = mmc_get_ext_csd(card, &bw_ext_csd); | 431 | err = mmc_get_ext_csd(card, &bw_ext_csd); |
411 | if (err) | ||
412 | return err; | ||
413 | 432 | ||
414 | if ((ext_csd == NULL || bw_ext_csd == NULL)) { | 433 | if (err || bw_ext_csd == NULL) { |
415 | if (bus_width != MMC_BUS_WIDTH_1) | 434 | if (bus_width != MMC_BUS_WIDTH_1) |
416 | err = -EINVAL; | 435 | err = -EINVAL; |
417 | goto out; | 436 | goto out; |
@@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd, | |||
421 | goto out; | 440 | goto out; |
422 | 441 | ||
423 | /* only compare read only fields */ | 442 | /* only compare read only fields */ |
424 | err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] == | 443 | err = (!(card->ext_csd.raw_partition_support == |
425 | bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && | 444 | bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && |
426 | (ext_csd[EXT_CSD_ERASED_MEM_CONT] == | 445 | (card->ext_csd.raw_erased_mem_count == |
427 | bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && | 446 | bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && |
428 | (ext_csd[EXT_CSD_REV] == | 447 | (card->ext_csd.rev == |
429 | bw_ext_csd[EXT_CSD_REV]) && | 448 | bw_ext_csd[EXT_CSD_REV]) && |
430 | (ext_csd[EXT_CSD_STRUCTURE] == | 449 | (card->ext_csd.raw_ext_csd_structure == |
431 | bw_ext_csd[EXT_CSD_STRUCTURE]) && | 450 | bw_ext_csd[EXT_CSD_STRUCTURE]) && |
432 | (ext_csd[EXT_CSD_CARD_TYPE] == | 451 | (card->ext_csd.raw_card_type == |
433 | bw_ext_csd[EXT_CSD_CARD_TYPE]) && | 452 | bw_ext_csd[EXT_CSD_CARD_TYPE]) && |
434 | (ext_csd[EXT_CSD_S_A_TIMEOUT] == | 453 | (card->ext_csd.raw_s_a_timeout == |
435 | bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && | 454 | bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && |
436 | (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == | 455 | (card->ext_csd.raw_hc_erase_gap_size == |
437 | bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && | 456 | bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && |
438 | (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] == | 457 | (card->ext_csd.raw_erase_timeout_mult == |
439 | bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && | 458 | bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && |
440 | (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == | 459 | (card->ext_csd.raw_hc_erase_grp_size == |
441 | bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && | 460 | bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && |
442 | (ext_csd[EXT_CSD_SEC_TRIM_MULT] == | 461 | (card->ext_csd.raw_sec_trim_mult == |
443 | bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && | 462 | bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && |
444 | (ext_csd[EXT_CSD_SEC_ERASE_MULT] == | 463 | (card->ext_csd.raw_sec_erase_mult == |
445 | bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && | 464 | bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && |
446 | (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] == | 465 | (card->ext_csd.raw_sec_feature_support == |
447 | bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && | 466 | bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && |
448 | (ext_csd[EXT_CSD_TRIM_MULT] == | 467 | (card->ext_csd.raw_trim_mult == |
449 | bw_ext_csd[EXT_CSD_TRIM_MULT]) && | 468 | bw_ext_csd[EXT_CSD_TRIM_MULT]) && |
450 | memcmp(&ext_csd[EXT_CSD_SEC_CNT], | 469 | (card->ext_csd.raw_sectors[0] == |
451 | &bw_ext_csd[EXT_CSD_SEC_CNT], | 470 | bw_ext_csd[EXT_CSD_SEC_CNT + 0]) && |
452 | 4) != 0); | 471 | (card->ext_csd.raw_sectors[1] == |
472 | bw_ext_csd[EXT_CSD_SEC_CNT + 1]) && | ||
473 | (card->ext_csd.raw_sectors[2] == | ||
474 | bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && | ||
475 | (card->ext_csd.raw_sectors[3] == | ||
476 | bw_ext_csd[EXT_CSD_SEC_CNT + 3])); | ||
453 | if (err) | 477 | if (err) |
454 | err = -EINVAL; | 478 | err = -EINVAL; |
455 | 479 | ||
@@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
770 | */ | 794 | */ |
771 | if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) | 795 | if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) |
772 | err = mmc_compare_ext_csds(card, | 796 | err = mmc_compare_ext_csds(card, |
773 | ext_csd, | ||
774 | bus_width); | 797 | bus_width); |
775 | else | 798 | else |
776 | err = mmc_bus_test(card, bus_width); | 799 | err = mmc_bus_test(card, bus_width); |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 8f8b65af9ed5..60f46bc2bf64 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -140,7 +140,7 @@ MODULE_LICENSE("GPL"); | |||
140 | module_param(mtu, int, 0); | 140 | module_param(mtu, int, 0); |
141 | module_param(debug, int, 0); | 141 | module_param(debug, int, 0); |
142 | module_param(rx_copybreak, int, 0); | 142 | module_param(rx_copybreak, int, 0); |
143 | module_param(dspcfg_workaround, int, 1); | 143 | module_param(dspcfg_workaround, int, 0); |
144 | module_param_array(options, int, NULL, 0); | 144 | module_param_array(options, int, NULL, 0); |
145 | module_param_array(full_duplex, int, NULL, 0); | 145 | module_param_array(full_duplex, int, NULL, 0); |
146 | MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); | 146 | MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); |
@@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev) | |||
2028 | np->rx_ring[i].cmd_status = 0; | 2028 | np->rx_ring[i].cmd_status = 0; |
2029 | np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ | 2029 | np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ |
2030 | if (np->rx_skbuff[i]) { | 2030 | if (np->rx_skbuff[i]) { |
2031 | pci_unmap_single(np->pci_dev, | 2031 | pci_unmap_single(np->pci_dev, np->rx_dma[i], |
2032 | np->rx_dma[i], buflen, | 2032 | buflen + NATSEMI_PADDING, |
2033 | PCI_DMA_FROMDEVICE); | 2033 | PCI_DMA_FROMDEVICE); |
2034 | dev_kfree_skb(np->rx_skbuff[i]); | 2034 | dev_kfree_skb(np->rx_skbuff[i]); |
2035 | } | 2035 | } |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 8ec1a9a0bb9a..2f110fb30daa 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -182,11 +182,11 @@ static int sl_alloc_bufs(struct slip *sl, int mtu) | |||
182 | #ifdef SL_INCLUDE_CSLIP | 182 | #ifdef SL_INCLUDE_CSLIP |
183 | cbuff = xchg(&sl->cbuff, cbuff); | 183 | cbuff = xchg(&sl->cbuff, cbuff); |
184 | slcomp = xchg(&sl->slcomp, slcomp); | 184 | slcomp = xchg(&sl->slcomp, slcomp); |
185 | #endif | ||
185 | #ifdef CONFIG_SLIP_MODE_SLIP6 | 186 | #ifdef CONFIG_SLIP_MODE_SLIP6 |
186 | sl->xdata = 0; | 187 | sl->xdata = 0; |
187 | sl->xbits = 0; | 188 | sl->xbits = 0; |
188 | #endif | 189 | #endif |
189 | #endif | ||
190 | spin_unlock_bh(&sl->lock); | 190 | spin_unlock_bh(&sl->lock); |
191 | err = 0; | 191 | err = 0; |
192 | 192 | ||
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 387ca43f26f4..304fe78ff60e 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev) | |||
2421 | 2421 | ||
2422 | remove_net_device(hso_net->parent); | 2422 | remove_net_device(hso_net->parent); |
2423 | 2423 | ||
2424 | if (hso_net->net) { | 2424 | if (hso_net->net) |
2425 | unregister_netdev(hso_net->net); | 2425 | unregister_netdev(hso_net->net); |
2426 | free_netdev(hso_net->net); | ||
2427 | } | ||
2428 | 2426 | ||
2429 | /* start freeing */ | 2427 | /* start freeing */ |
2430 | for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { | 2428 | for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { |
@@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev) | |||
2436 | kfree(hso_net->mux_bulk_tx_buf); | 2434 | kfree(hso_net->mux_bulk_tx_buf); |
2437 | hso_net->mux_bulk_tx_buf = NULL; | 2435 | hso_net->mux_bulk_tx_buf = NULL; |
2438 | 2436 | ||
2437 | if (hso_net->net) | ||
2438 | free_netdev(hso_net->net); | ||
2439 | |||
2439 | kfree(hso_dev); | 2440 | kfree(hso_dev); |
2440 | } | 2441 | } |
2441 | 2442 | ||
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 296c316a8341..f2c0c236392f 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c | |||
@@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev) | |||
297 | #ifdef CONFIG_PM_SLEEP | 297 | #ifdef CONFIG_PM_SLEEP |
298 | static int ath5k_pci_suspend(struct device *dev) | 298 | static int ath5k_pci_suspend(struct device *dev) |
299 | { | 299 | { |
300 | struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev)); | 300 | struct pci_dev *pdev = to_pci_dev(dev); |
301 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | ||
302 | struct ath5k_softc *sc = hw->priv; | ||
301 | 303 | ||
302 | ath5k_led_off(sc); | 304 | ath5k_led_off(sc); |
303 | return 0; | 305 | return 0; |
@@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev) | |||
306 | static int ath5k_pci_resume(struct device *dev) | 308 | static int ath5k_pci_resume(struct device *dev) |
307 | { | 309 | { |
308 | struct pci_dev *pdev = to_pci_dev(dev); | 310 | struct pci_dev *pdev = to_pci_dev(dev); |
309 | struct ath5k_softc *sc = pci_get_drvdata(pdev); | 311 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); |
312 | struct ath5k_softc *sc = hw->priv; | ||
310 | 313 | ||
311 | /* | 314 | /* |
312 | * Suspend/Resume resets the PCI configuration space, so we have to | 315 | * Suspend/Resume resets the PCI configuration space, so we have to |
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c index 929c68cdf8ab..a073cdce1f15 100644 --- a/drivers/net/wireless/ath/ath5k/sysfs.c +++ b/drivers/net/wireless/ath/ath5k/sysfs.c | |||
@@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \ | |||
10 | struct device_attribute *attr, \ | 10 | struct device_attribute *attr, \ |
11 | char *buf) \ | 11 | char *buf) \ |
12 | { \ | 12 | { \ |
13 | struct ath5k_softc *sc = dev_get_drvdata(dev); \ | 13 | struct ieee80211_hw *hw = dev_get_drvdata(dev); \ |
14 | struct ath5k_softc *sc = hw->priv; \ | ||
14 | return snprintf(buf, PAGE_SIZE, "%d\n", get); \ | 15 | return snprintf(buf, PAGE_SIZE, "%d\n", get); \ |
15 | } \ | 16 | } \ |
16 | \ | 17 | \ |
@@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \ | |||
18 | struct device_attribute *attr, \ | 19 | struct device_attribute *attr, \ |
19 | const char *buf, size_t count) \ | 20 | const char *buf, size_t count) \ |
20 | { \ | 21 | { \ |
21 | struct ath5k_softc *sc = dev_get_drvdata(dev); \ | 22 | struct ieee80211_hw *hw = dev_get_drvdata(dev); \ |
23 | struct ath5k_softc *sc = hw->priv; \ | ||
22 | int val; \ | 24 | int val; \ |
23 | \ | 25 | \ |
24 | val = (int)simple_strtoul(buf, NULL, 10); \ | 26 | val = (int)simple_strtoul(buf, NULL, 10); \ |
@@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \ | |||
33 | struct device_attribute *attr, \ | 35 | struct device_attribute *attr, \ |
34 | char *buf) \ | 36 | char *buf) \ |
35 | { \ | 37 | { \ |
36 | struct ath5k_softc *sc = dev_get_drvdata(dev); \ | 38 | struct ieee80211_hw *hw = dev_get_drvdata(dev); \ |
39 | struct ath5k_softc *sc = hw->priv; \ | ||
37 | return snprintf(buf, PAGE_SIZE, "%d\n", get); \ | 40 | return snprintf(buf, PAGE_SIZE, "%d\n", get); \ |
38 | } \ | 41 | } \ |
39 | static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL) | 42 | static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL) |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 3779b8977d47..33443bcaa8d9 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -671,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
671 | * TODO - this could be improved to be dependent on the rate. | 671 | * TODO - this could be improved to be dependent on the rate. |
672 | * The hardware can keep up at lower rates, but not higher rates | 672 | * The hardware can keep up at lower rates, but not higher rates |
673 | */ | 673 | */ |
674 | if (fi->keyix != ATH9K_TXKEYIX_INVALID) | 674 | if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && |
675 | !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) | ||
675 | ndelim += ATH_AGGR_ENCRYPTDELIM; | 676 | ndelim += ATH_AGGR_ENCRYPTDELIM; |
676 | 677 | ||
677 | /* | 678 | /* |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 2fb53d067512..333b69ef2ae2 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = { | |||
112 | { USB_DEVICE(0x04bb, 0x093f) }, | 112 | { USB_DEVICE(0x04bb, 0x093f) }, |
113 | /* NEC WL300NU-G */ | 113 | /* NEC WL300NU-G */ |
114 | { USB_DEVICE(0x0409, 0x0249) }, | 114 | { USB_DEVICE(0x0409, 0x0249) }, |
115 | /* NEC WL300NU-AG */ | ||
116 | { USB_DEVICE(0x0409, 0x02b4) }, | ||
115 | /* AVM FRITZ!WLAN USB Stick N */ | 117 | /* AVM FRITZ!WLAN USB Stick N */ |
116 | { USB_DEVICE(0x057c, 0x8401) }, | 118 | { USB_DEVICE(0x057c, 0x8401) }, |
117 | /* AVM FRITZ!WLAN USB Stick N 2.4 */ | 119 | /* AVM FRITZ!WLAN USB Stick N 2.4 */ |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 092e342c19df..942f7a3969a7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | |||
@@ -298,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { | |||
298 | {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ | 298 | {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ |
299 | {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ | 299 | {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ |
300 | {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ | 300 | {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ |
301 | {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ | ||
301 | {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ | 302 | {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
302 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ | 303 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ |
303 | /* HP - Lite-On ,8188CUS Slim Combo */ | 304 | /* HP - Lite-On ,8188CUS Slim Combo */ |
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c index 712baab3c83d..e956f659089a 100644 --- a/drivers/pcmcia/pxa2xx_vpac270.c +++ b/drivers/pcmcia/pxa2xx_vpac270.c | |||
@@ -76,10 +76,10 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
76 | static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | 76 | static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) |
77 | { | 77 | { |
78 | if (skt->nr == 0) | 78 | if (skt->nr == 0) |
79 | gpio_request_array(vpac270_pcmcia_gpios, | 79 | gpio_free_array(vpac270_pcmcia_gpios, |
80 | ARRAY_SIZE(vpac270_pcmcia_gpios)); | 80 | ARRAY_SIZE(vpac270_pcmcia_gpios)); |
81 | else | 81 | else |
82 | gpio_request_array(vpac270_cf_gpios, | 82 | gpio_free_array(vpac270_cf_gpios, |
83 | ARRAY_SIZE(vpac270_cf_gpios)); | 83 | ARRAY_SIZE(vpac270_cf_gpios)); |
84 | } | 84 | } |
85 | 85 | ||
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index 2a20dabec76d..d6620ad309ce 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c | |||
@@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc) | |||
516 | 516 | ||
517 | static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc) | 517 | static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc) |
518 | { | 518 | { |
519 | ssb_pcicore_fix_sprom_core_index(pc); | ||
520 | |||
519 | /* Disable PCI interrupts. */ | 521 | /* Disable PCI interrupts. */ |
520 | ssb_write32(pc->dev, SSB_INTVEC, 0); | 522 | ssb_write32(pc->dev, SSB_INTVEC, 0); |
523 | |||
524 | /* Additional PCIe always once-executed workarounds */ | ||
525 | if (pc->dev->id.coreid == SSB_DEV_PCIE) { | ||
526 | ssb_pcicore_serdes_workaround(pc); | ||
527 | /* TODO: ASPM */ | ||
528 | /* TODO: Clock Request Update */ | ||
529 | } | ||
521 | } | 530 | } |
522 | 531 | ||
523 | void ssb_pcicore_init(struct ssb_pcicore *pc) | 532 | void ssb_pcicore_init(struct ssb_pcicore *pc) |
@@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc) | |||
529 | if (!ssb_device_is_enabled(dev)) | 538 | if (!ssb_device_is_enabled(dev)) |
530 | ssb_device_enable(dev, 0); | 539 | ssb_device_enable(dev, 0); |
531 | 540 | ||
532 | ssb_pcicore_fix_sprom_core_index(pc); | ||
533 | |||
534 | #ifdef CONFIG_SSB_PCICORE_HOSTMODE | 541 | #ifdef CONFIG_SSB_PCICORE_HOSTMODE |
535 | pc->hostmode = pcicore_is_in_hostmode(pc); | 542 | pc->hostmode = pcicore_is_in_hostmode(pc); |
536 | if (pc->hostmode) | 543 | if (pc->hostmode) |
@@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc) | |||
538 | #endif /* CONFIG_SSB_PCICORE_HOSTMODE */ | 545 | #endif /* CONFIG_SSB_PCICORE_HOSTMODE */ |
539 | if (!pc->hostmode) | 546 | if (!pc->hostmode) |
540 | ssb_pcicore_init_clientmode(pc); | 547 | ssb_pcicore_init_clientmode(pc); |
541 | |||
542 | /* Additional PCIe always once-executed workarounds */ | ||
543 | if (dev->id.coreid == SSB_DEV_PCIE) { | ||
544 | ssb_pcicore_serdes_workaround(pc); | ||
545 | /* TODO: ASPM */ | ||
546 | /* TODO: Clock Request Update */ | ||
547 | } | ||
548 | } | 548 | } |
549 | 549 | ||
550 | static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address) | 550 | static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address) |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 802ac5eeba28..f9fbbe96c222 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -1069,6 +1069,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) | |||
1069 | return 0; | 1069 | return 0; |
1070 | 1070 | ||
1071 | gfs2_log_lock(sdp); | 1071 | gfs2_log_lock(sdp); |
1072 | spin_lock(&sdp->sd_ail_lock); | ||
1072 | head = bh = page_buffers(page); | 1073 | head = bh = page_buffers(page); |
1073 | do { | 1074 | do { |
1074 | if (atomic_read(&bh->b_count)) | 1075 | if (atomic_read(&bh->b_count)) |
@@ -1080,6 +1081,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) | |||
1080 | goto not_possible; | 1081 | goto not_possible; |
1081 | bh = bh->b_this_page; | 1082 | bh = bh->b_this_page; |
1082 | } while(bh != head); | 1083 | } while(bh != head); |
1084 | spin_unlock(&sdp->sd_ail_lock); | ||
1083 | gfs2_log_unlock(sdp); | 1085 | gfs2_log_unlock(sdp); |
1084 | 1086 | ||
1085 | head = bh = page_buffers(page); | 1087 | head = bh = page_buffers(page); |
@@ -1112,6 +1114,7 @@ not_possible: /* Should never happen */ | |||
1112 | WARN_ON(buffer_dirty(bh)); | 1114 | WARN_ON(buffer_dirty(bh)); |
1113 | WARN_ON(buffer_pinned(bh)); | 1115 | WARN_ON(buffer_pinned(bh)); |
1114 | cannot_release: | 1116 | cannot_release: |
1117 | spin_unlock(&sdp->sd_ail_lock); | ||
1115 | gfs2_log_unlock(sdp); | 1118 | gfs2_log_unlock(sdp); |
1116 | return 0; | 1119 | return 0; |
1117 | } | 1120 | } |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 8ef70f464731..2cca29316bd6 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -47,10 +47,10 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl) | |||
47 | bd_ail_gl_list); | 47 | bd_ail_gl_list); |
48 | bh = bd->bd_bh; | 48 | bh = bd->bd_bh; |
49 | gfs2_remove_from_ail(bd); | 49 | gfs2_remove_from_ail(bd); |
50 | spin_unlock(&sdp->sd_ail_lock); | ||
51 | |||
52 | bd->bd_bh = NULL; | 50 | bd->bd_bh = NULL; |
53 | bh->b_private = NULL; | 51 | bh->b_private = NULL; |
52 | spin_unlock(&sdp->sd_ail_lock); | ||
53 | |||
54 | bd->bd_blkno = bh->b_blocknr; | 54 | bd->bd_blkno = bh->b_blocknr; |
55 | gfs2_log_lock(sdp); | 55 | gfs2_log_lock(sdp); |
56 | gfs2_assert_withdraw(sdp, !buffer_busy(bh)); | 56 | gfs2_assert_withdraw(sdp, !buffer_busy(bh)); |
@@ -221,8 +221,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) | |||
221 | } | 221 | } |
222 | } | 222 | } |
223 | 223 | ||
224 | if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) | 224 | if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { |
225 | gfs2_log_flush(gl->gl_sbd, NULL); | ||
225 | gl->gl_sbd->sd_rindex_uptodate = 0; | 226 | gl->gl_sbd->sd_rindex_uptodate = 0; |
227 | } | ||
226 | if (ip && S_ISREG(ip->i_inode.i_mode)) | 228 | if (ip && S_ISREG(ip->i_inode.i_mode)) |
227 | truncate_inode_pages(ip->i_inode.i_mapping, 0); | 229 | truncate_inode_pages(ip->i_inode.i_mapping, 0); |
228 | } | 230 | } |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 0a064e91ac70..81206e70cbf6 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/buffer_head.h> | 17 | #include <linux/buffer_head.h> |
18 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
19 | #include <linux/rculist_bl.h> | 19 | #include <linux/rculist_bl.h> |
20 | #include <linux/completion.h> | ||
20 | 21 | ||
21 | #define DIO_WAIT 0x00000010 | 22 | #define DIO_WAIT 0x00000010 |
22 | #define DIO_METADATA 0x00000020 | 23 | #define DIO_METADATA 0x00000020 |
@@ -546,6 +547,7 @@ struct gfs2_sbd { | |||
546 | struct gfs2_glock *sd_trans_gl; | 547 | struct gfs2_glock *sd_trans_gl; |
547 | wait_queue_head_t sd_glock_wait; | 548 | wait_queue_head_t sd_glock_wait; |
548 | atomic_t sd_glock_disposal; | 549 | atomic_t sd_glock_disposal; |
550 | struct completion sd_locking_init; | ||
549 | 551 | ||
550 | /* Inode Stuff */ | 552 | /* Inode Stuff */ |
551 | 553 | ||
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 903115f2bb34..85c62923ee29 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -903,6 +903,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp) | |||
903 | if (gfs2_ail1_empty(sdp)) | 903 | if (gfs2_ail1_empty(sdp)) |
904 | break; | 904 | break; |
905 | } | 905 | } |
906 | gfs2_log_flush(sdp, NULL); | ||
906 | } | 907 | } |
907 | 908 | ||
908 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) | 909 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 8ac9ae189b53..2a77071fb7b6 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -72,6 +72,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
72 | 72 | ||
73 | init_waitqueue_head(&sdp->sd_glock_wait); | 73 | init_waitqueue_head(&sdp->sd_glock_wait); |
74 | atomic_set(&sdp->sd_glock_disposal, 0); | 74 | atomic_set(&sdp->sd_glock_disposal, 0); |
75 | init_completion(&sdp->sd_locking_init); | ||
75 | spin_lock_init(&sdp->sd_statfs_spin); | 76 | spin_lock_init(&sdp->sd_statfs_spin); |
76 | 77 | ||
77 | spin_lock_init(&sdp->sd_rindex_spin); | 78 | spin_lock_init(&sdp->sd_rindex_spin); |
@@ -1017,11 +1018,13 @@ hostdata_error: | |||
1017 | fsname++; | 1018 | fsname++; |
1018 | if (lm->lm_mount == NULL) { | 1019 | if (lm->lm_mount == NULL) { |
1019 | fs_info(sdp, "Now mounting FS...\n"); | 1020 | fs_info(sdp, "Now mounting FS...\n"); |
1021 | complete(&sdp->sd_locking_init); | ||
1020 | return 0; | 1022 | return 0; |
1021 | } | 1023 | } |
1022 | ret = lm->lm_mount(sdp, fsname); | 1024 | ret = lm->lm_mount(sdp, fsname); |
1023 | if (ret == 0) | 1025 | if (ret == 0) |
1024 | fs_info(sdp, "Joined cluster. Now mounting FS...\n"); | 1026 | fs_info(sdp, "Joined cluster. Now mounting FS...\n"); |
1027 | complete(&sdp->sd_locking_init); | ||
1025 | return ret; | 1028 | return ret; |
1026 | } | 1029 | } |
1027 | 1030 | ||
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index ed540e7018be..fb0edf735483 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -757,13 +757,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
757 | struct timespec atime; | 757 | struct timespec atime; |
758 | struct gfs2_dinode *di; | 758 | struct gfs2_dinode *di; |
759 | int ret = -EAGAIN; | 759 | int ret = -EAGAIN; |
760 | int unlock_required = 0; | ||
760 | 761 | ||
761 | /* Skip timestamp update, if this is from a memalloc */ | 762 | /* Skip timestamp update, if this is from a memalloc */ |
762 | if (current->flags & PF_MEMALLOC) | 763 | if (current->flags & PF_MEMALLOC) |
763 | goto do_flush; | 764 | goto do_flush; |
764 | ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); | 765 | if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { |
765 | if (ret) | 766 | ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); |
766 | goto do_flush; | 767 | if (ret) |
768 | goto do_flush; | ||
769 | unlock_required = 1; | ||
770 | } | ||
767 | ret = gfs2_trans_begin(sdp, RES_DINODE, 0); | 771 | ret = gfs2_trans_begin(sdp, RES_DINODE, 0); |
768 | if (ret) | 772 | if (ret) |
769 | goto do_unlock; | 773 | goto do_unlock; |
@@ -780,7 +784,8 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
780 | } | 784 | } |
781 | gfs2_trans_end(sdp); | 785 | gfs2_trans_end(sdp); |
782 | do_unlock: | 786 | do_unlock: |
783 | gfs2_glock_dq_uninit(&gh); | 787 | if (unlock_required) |
788 | gfs2_glock_dq_uninit(&gh); | ||
784 | do_flush: | 789 | do_flush: |
785 | if (wbc->sync_mode == WB_SYNC_ALL) | 790 | if (wbc->sync_mode == WB_SYNC_ALL) |
786 | gfs2_log_flush(GFS2_SB(inode), ip->i_gl); | 791 | gfs2_log_flush(GFS2_SB(inode), ip->i_gl); |
@@ -1427,7 +1432,20 @@ out: | |||
1427 | return error; | 1432 | return error; |
1428 | } | 1433 | } |
1429 | 1434 | ||
1430 | /* | 1435 | /** |
1436 | * gfs2_evict_inode - Remove an inode from cache | ||
1437 | * @inode: The inode to evict | ||
1438 | * | ||
1439 | * There are three cases to consider: | ||
1440 | * 1. i_nlink == 0, we are final opener (and must deallocate) | ||
1441 | * 2. i_nlink == 0, we are not the final opener (and cannot deallocate) | ||
1442 | * 3. i_nlink > 0 | ||
1443 | * | ||
1444 | * If the fs is read only, then we have to treat all cases as per #3 | ||
1445 | * since we are unable to do any deallocation. The inode will be | ||
1446 | * deallocated by the next read/write node to attempt an allocation | ||
1447 | * in the same resource group | ||
1448 | * | ||
1431 | * We have to (at the moment) hold the inodes main lock to cover | 1449 | * We have to (at the moment) hold the inodes main lock to cover |
1432 | * the gap between unlocking the shared lock on the iopen lock and | 1450 | * the gap between unlocking the shared lock on the iopen lock and |
1433 | * taking the exclusive lock. I'd rather do a shared -> exclusive | 1451 | * taking the exclusive lock. I'd rather do a shared -> exclusive |
@@ -1470,6 +1488,8 @@ static void gfs2_evict_inode(struct inode *inode) | |||
1470 | if (error) | 1488 | if (error) |
1471 | goto out_truncate; | 1489 | goto out_truncate; |
1472 | 1490 | ||
1491 | /* Case 1 starts here */ | ||
1492 | |||
1473 | if (S_ISDIR(inode->i_mode) && | 1493 | if (S_ISDIR(inode->i_mode) && |
1474 | (ip->i_diskflags & GFS2_DIF_EXHASH)) { | 1494 | (ip->i_diskflags & GFS2_DIF_EXHASH)) { |
1475 | error = gfs2_dir_exhash_dealloc(ip); | 1495 | error = gfs2_dir_exhash_dealloc(ip); |
@@ -1493,13 +1513,16 @@ static void gfs2_evict_inode(struct inode *inode) | |||
1493 | goto out_unlock; | 1513 | goto out_unlock; |
1494 | 1514 | ||
1495 | out_truncate: | 1515 | out_truncate: |
1516 | /* Case 2 starts here */ | ||
1496 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); | 1517 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); |
1497 | if (error) | 1518 | if (error) |
1498 | goto out_unlock; | 1519 | goto out_unlock; |
1499 | gfs2_final_release_pages(ip); | 1520 | /* Needs to be done before glock release & also in a transaction */ |
1521 | truncate_inode_pages(&inode->i_data, 0); | ||
1500 | gfs2_trans_end(sdp); | 1522 | gfs2_trans_end(sdp); |
1501 | 1523 | ||
1502 | out_unlock: | 1524 | out_unlock: |
1525 | /* Error path for case 1 */ | ||
1503 | if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) | 1526 | if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) |
1504 | gfs2_glock_dq(&ip->i_iopen_gh); | 1527 | gfs2_glock_dq(&ip->i_iopen_gh); |
1505 | gfs2_holder_uninit(&ip->i_iopen_gh); | 1528 | gfs2_holder_uninit(&ip->i_iopen_gh); |
@@ -1507,6 +1530,7 @@ out_unlock: | |||
1507 | if (error && error != GLR_TRYFAILED && error != -EROFS) | 1530 | if (error && error != GLR_TRYFAILED && error != -EROFS) |
1508 | fs_warn(sdp, "gfs2_evict_inode: %d\n", error); | 1531 | fs_warn(sdp, "gfs2_evict_inode: %d\n", error); |
1509 | out: | 1532 | out: |
1533 | /* Case 3 starts here */ | ||
1510 | truncate_inode_pages(&inode->i_data, 0); | 1534 | truncate_inode_pages(&inode->i_data, 0); |
1511 | end_writeback(inode); | 1535 | end_writeback(inode); |
1512 | 1536 | ||
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index e20eab37bc80..443cabcfcd23 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
@@ -338,6 +338,9 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | |||
338 | rv = sscanf(buf, "%u", &first); | 338 | rv = sscanf(buf, "%u", &first); |
339 | if (rv != 1 || first > 1) | 339 | if (rv != 1 || first > 1) |
340 | return -EINVAL; | 340 | return -EINVAL; |
341 | rv = wait_for_completion_killable(&sdp->sd_locking_init); | ||
342 | if (rv) | ||
343 | return rv; | ||
341 | spin_lock(&sdp->sd_jindex_spin); | 344 | spin_lock(&sdp->sd_jindex_spin); |
342 | rv = -EBUSY; | 345 | rv = -EBUSY; |
343 | if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) | 346 | if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) |
@@ -414,7 +417,9 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | |||
414 | rv = sscanf(buf, "%d", &jid); | 417 | rv = sscanf(buf, "%d", &jid); |
415 | if (rv != 1) | 418 | if (rv != 1) |
416 | return -EINVAL; | 419 | return -EINVAL; |
417 | 420 | rv = wait_for_completion_killable(&sdp->sd_locking_init); | |
421 | if (rv) | ||
422 | return rv; | ||
418 | spin_lock(&sdp->sd_jindex_spin); | 423 | spin_lock(&sdp->sd_jindex_spin); |
419 | rv = -EINVAL; | 424 | rv = -EINVAL; |
420 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | 425 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) |
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 0bafcc91c27f..f9d03abcd04c 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c | |||
@@ -398,7 +398,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync) | |||
398 | * this offset and save the original offset. | 398 | * this offset and save the original offset. |
399 | */ | 399 | */ |
400 | data->args.offset = filelayout_get_dserver_offset(lseg, offset); | 400 | data->args.offset = filelayout_get_dserver_offset(lseg, offset); |
401 | data->mds_offset = offset; | ||
402 | 401 | ||
403 | /* Perform an asynchronous write */ | 402 | /* Perform an asynchronous write */ |
404 | status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient, | 403 | status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient, |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 6870bc61ceec..e6e8f3b9a1de 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -91,7 +91,7 @@ static int nfs4_stat_to_errno(int); | |||
91 | #define encode_getfh_maxsz (op_encode_hdr_maxsz) | 91 | #define encode_getfh_maxsz (op_encode_hdr_maxsz) |
92 | #define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \ | 92 | #define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \ |
93 | ((3+NFS4_FHSIZE) >> 2)) | 93 | ((3+NFS4_FHSIZE) >> 2)) |
94 | #define nfs4_fattr_bitmap_maxsz 3 | 94 | #define nfs4_fattr_bitmap_maxsz 4 |
95 | #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) | 95 | #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) |
96 | #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) | 96 | #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) |
97 | #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) | 97 | #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e268e3b23497..727168059684 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -864,6 +864,8 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
864 | 864 | ||
865 | data->args.fh = NFS_FH(inode); | 865 | data->args.fh = NFS_FH(inode); |
866 | data->args.offset = req_offset(req) + offset; | 866 | data->args.offset = req_offset(req) + offset; |
867 | /* pnfs_set_layoutcommit needs this */ | ||
868 | data->mds_offset = data->args.offset; | ||
867 | data->args.pgbase = req->wb_pgbase + offset; | 869 | data->args.pgbase = req->wb_pgbase + offset; |
868 | data->args.pages = data->pagevec; | 870 | data->args.pages = data->pagevec; |
869 | data->args.count = count; | 871 | data->args.count = count; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 8b4538446636..baa397eb9c33 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -676,7 +676,8 @@ void irq_gc_mask_disable_reg(struct irq_data *d); | |||
676 | void irq_gc_mask_set_bit(struct irq_data *d); | 676 | void irq_gc_mask_set_bit(struct irq_data *d); |
677 | void irq_gc_mask_clr_bit(struct irq_data *d); | 677 | void irq_gc_mask_clr_bit(struct irq_data *d); |
678 | void irq_gc_unmask_enable_reg(struct irq_data *d); | 678 | void irq_gc_unmask_enable_reg(struct irq_data *d); |
679 | void irq_gc_ack(struct irq_data *d); | 679 | void irq_gc_ack_set_bit(struct irq_data *d); |
680 | void irq_gc_ack_clr_bit(struct irq_data *d); | ||
680 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | 681 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); |
681 | void irq_gc_eoi(struct irq_data *d); | 682 | void irq_gc_eoi(struct irq_data *d); |
682 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | 683 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); |
diff --git a/include/linux/memory.h b/include/linux/memory.h index e1e3b2b84f85..935699b30b7c 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/compiler.h> | 20 | #include <linux/compiler.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | 22 | ||
23 | #define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) | ||
24 | |||
23 | struct memory_block { | 25 | struct memory_block { |
24 | unsigned long start_section_nr; | 26 | unsigned long start_section_nr; |
25 | unsigned long end_section_nr; | 27 | unsigned long end_section_nr; |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index c6927a4d157f..6ad43554ac05 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -64,6 +64,19 @@ struct mmc_ext_csd { | |||
64 | unsigned long long enhanced_area_offset; /* Units: Byte */ | 64 | unsigned long long enhanced_area_offset; /* Units: Byte */ |
65 | unsigned int enhanced_area_size; /* Units: KB */ | 65 | unsigned int enhanced_area_size; /* Units: KB */ |
66 | unsigned int boot_size; /* in bytes */ | 66 | unsigned int boot_size; /* in bytes */ |
67 | u8 raw_partition_support; /* 160 */ | ||
68 | u8 raw_erased_mem_count; /* 181 */ | ||
69 | u8 raw_ext_csd_structure; /* 194 */ | ||
70 | u8 raw_card_type; /* 196 */ | ||
71 | u8 raw_s_a_timeout; /* 217 */ | ||
72 | u8 raw_hc_erase_gap_size; /* 221 */ | ||
73 | u8 raw_erase_timeout_mult; /* 223 */ | ||
74 | u8 raw_hc_erase_grp_size; /* 224 */ | ||
75 | u8 raw_sec_trim_mult; /* 229 */ | ||
76 | u8 raw_sec_erase_mult; /* 230 */ | ||
77 | u8 raw_sec_feature_support;/* 231 */ | ||
78 | u8 raw_trim_mult; /* 232 */ | ||
79 | u8 raw_sectors[4]; /* 212 - 4 bytes */ | ||
67 | }; | 80 | }; |
68 | 81 | ||
69 | struct sd_scr { | 82 | struct sd_scr { |
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index dd6847e5d6e4..6506458ccd33 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h | |||
@@ -63,6 +63,7 @@ typedef enum { | |||
63 | SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */ | 63 | SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */ |
64 | SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */ | 64 | SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */ |
65 | SCTP_CMD_TIMER_START, /* Start a timer. */ | 65 | SCTP_CMD_TIMER_START, /* Start a timer. */ |
66 | SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */ | ||
66 | SCTP_CMD_TIMER_RESTART, /* Restart a timer. */ | 67 | SCTP_CMD_TIMER_RESTART, /* Restart a timer. */ |
67 | SCTP_CMD_TIMER_STOP, /* Stop a timer. */ | 68 | SCTP_CMD_TIMER_STOP, /* Stop a timer. */ |
68 | SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */ | 69 | SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */ |
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 99b027b2adce..ca4693b4e09e 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h | |||
@@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb) | |||
80 | 80 | ||
81 | void sctp_ulpevent_free(struct sctp_ulpevent *); | 81 | void sctp_ulpevent_free(struct sctp_ulpevent *); |
82 | int sctp_ulpevent_is_notification(const struct sctp_ulpevent *); | 82 | int sctp_ulpevent_is_notification(const struct sctp_ulpevent *); |
83 | void sctp_queue_purge_ulpevents(struct sk_buff_head *list); | 83 | unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list); |
84 | 84 | ||
85 | struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( | 85 | struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( |
86 | const struct sctp_association *asoc, | 86 | const struct sctp_association *asoc, |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 31a9db711906..3a2cab407b93 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c | |||
@@ -101,10 +101,10 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) | |||
101 | } | 101 | } |
102 | 102 | ||
103 | /** | 103 | /** |
104 | * irq_gc_ack - Ack pending interrupt | 104 | * irq_gc_ack_set_bit - Ack pending interrupt via setting bit |
105 | * @d: irq_data | 105 | * @d: irq_data |
106 | */ | 106 | */ |
107 | void irq_gc_ack(struct irq_data *d) | 107 | void irq_gc_ack_set_bit(struct irq_data *d) |
108 | { | 108 | { |
109 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 109 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
110 | u32 mask = 1 << (d->irq - gc->irq_base); | 110 | u32 mask = 1 << (d->irq - gc->irq_base); |
@@ -115,6 +115,20 @@ void irq_gc_ack(struct irq_data *d) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | /** | 117 | /** |
118 | * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit | ||
119 | * @d: irq_data | ||
120 | */ | ||
121 | void irq_gc_ack_clr_bit(struct irq_data *d) | ||
122 | { | ||
123 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
124 | u32 mask = ~(1 << (d->irq - gc->irq_base)); | ||
125 | |||
126 | irq_gc_lock(gc); | ||
127 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | ||
128 | irq_gc_unlock(gc); | ||
129 | } | ||
130 | |||
131 | /** | ||
118 | * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt | 132 | * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt |
119 | * @d: irq_data | 133 | * @d: irq_data |
120 | */ | 134 | */ |
diff --git a/kernel/sched.c b/kernel/sched.c index 9769c756ad66..3dc716f6d8ad 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7757,6 +7757,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | |||
7757 | #endif | 7757 | #endif |
7758 | #endif | 7758 | #endif |
7759 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); | 7759 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
7760 | #ifndef CONFIG_64BIT | ||
7761 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | ||
7762 | #endif | ||
7760 | } | 7763 | } |
7761 | 7764 | ||
7762 | static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | 7765 | static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index d3a05b9ade7a..bcd158f40bb9 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn) | |||
393 | 393 | ||
394 | hci_dev_put(hdev); | 394 | hci_dev_put(hdev); |
395 | 395 | ||
396 | if (conn->handle == 0) | ||
397 | kfree(conn); | ||
398 | |||
396 | return 0; | 399 | return 0; |
397 | } | 400 | } |
398 | 401 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index c405a954a603..43b4c2deb7cc 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg) | |||
464 | { | 464 | { |
465 | struct hidp_session *session = (struct hidp_session *) arg; | 465 | struct hidp_session *session = (struct hidp_session *) arg; |
466 | 466 | ||
467 | kthread_stop(session->task); | 467 | atomic_inc(&session->terminate); |
468 | wake_up_process(session->task); | ||
468 | } | 469 | } |
469 | 470 | ||
470 | static void hidp_set_timer(struct hidp_session *session) | 471 | static void hidp_set_timer(struct hidp_session *session) |
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session, | |||
535 | skb_queue_purge(&session->ctrl_transmit); | 536 | skb_queue_purge(&session->ctrl_transmit); |
536 | skb_queue_purge(&session->intr_transmit); | 537 | skb_queue_purge(&session->intr_transmit); |
537 | 538 | ||
538 | kthread_stop(session->task); | 539 | atomic_inc(&session->terminate); |
540 | wake_up_process(current); | ||
539 | } | 541 | } |
540 | } | 542 | } |
541 | 543 | ||
@@ -706,9 +708,8 @@ static int hidp_session(void *arg) | |||
706 | add_wait_queue(sk_sleep(intr_sk), &intr_wait); | 708 | add_wait_queue(sk_sleep(intr_sk), &intr_wait); |
707 | session->waiting_for_startup = 0; | 709 | session->waiting_for_startup = 0; |
708 | wake_up_interruptible(&session->startup_queue); | 710 | wake_up_interruptible(&session->startup_queue); |
709 | while (!kthread_should_stop()) { | 711 | set_current_state(TASK_INTERRUPTIBLE); |
710 | set_current_state(TASK_INTERRUPTIBLE); | 712 | while (!atomic_read(&session->terminate)) { |
711 | |||
712 | if (ctrl_sk->sk_state != BT_CONNECTED || | 713 | if (ctrl_sk->sk_state != BT_CONNECTED || |
713 | intr_sk->sk_state != BT_CONNECTED) | 714 | intr_sk->sk_state != BT_CONNECTED) |
714 | break; | 715 | break; |
@@ -726,6 +727,7 @@ static int hidp_session(void *arg) | |||
726 | hidp_process_transmit(session); | 727 | hidp_process_transmit(session); |
727 | 728 | ||
728 | schedule(); | 729 | schedule(); |
730 | set_current_state(TASK_INTERRUPTIBLE); | ||
729 | } | 731 | } |
730 | set_current_state(TASK_RUNNING); | 732 | set_current_state(TASK_RUNNING); |
731 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); | 733 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); |
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
1060 | err_add_device: | 1062 | err_add_device: |
1061 | hid_destroy_device(session->hid); | 1063 | hid_destroy_device(session->hid); |
1062 | session->hid = NULL; | 1064 | session->hid = NULL; |
1063 | kthread_stop(session->task); | 1065 | atomic_inc(&session->terminate); |
1066 | wake_up_process(session->task); | ||
1064 | 1067 | ||
1065 | unlink: | 1068 | unlink: |
1066 | hidp_del_timer(session); | 1069 | hidp_del_timer(session); |
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req) | |||
1111 | skb_queue_purge(&session->ctrl_transmit); | 1114 | skb_queue_purge(&session->ctrl_transmit); |
1112 | skb_queue_purge(&session->intr_transmit); | 1115 | skb_queue_purge(&session->intr_transmit); |
1113 | 1116 | ||
1114 | kthread_stop(session->task); | 1117 | atomic_inc(&session->terminate); |
1118 | wake_up_process(session->task); | ||
1115 | } | 1119 | } |
1116 | } else | 1120 | } else |
1117 | err = -ENOENT; | 1121 | err = -ENOENT; |
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index 19e95004b286..af1bcc823f26 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h | |||
@@ -142,6 +142,7 @@ struct hidp_session { | |||
142 | uint ctrl_mtu; | 142 | uint ctrl_mtu; |
143 | uint intr_mtu; | 143 | uint intr_mtu; |
144 | 144 | ||
145 | atomic_t terminate; | ||
145 | struct task_struct *task; | 146 | struct task_struct *task; |
146 | 147 | ||
147 | unsigned char keys[8]; | 148 | unsigned char keys[8]; |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 56fdd9162da9..ebff14c69078 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -2323,7 +2323,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2323 | 2323 | ||
2324 | sk = chan->sk; | 2324 | sk = chan->sk; |
2325 | 2325 | ||
2326 | if (sk->sk_state != BT_CONFIG) { | 2326 | if ((bt_sk(sk)->defer_setup && sk->sk_state != BT_CONNECT2) || |
2327 | (!bt_sk(sk)->defer_setup && sk->sk_state != BT_CONFIG)) { | ||
2327 | struct l2cap_cmd_rej rej; | 2328 | struct l2cap_cmd_rej rej; |
2328 | 2329 | ||
2329 | rej.reason = cpu_to_le16(0x0002); | 2330 | rej.reason = cpu_to_le16(0x0002); |
@@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2334 | 2335 | ||
2335 | /* Reject if config buffer is too small. */ | 2336 | /* Reject if config buffer is too small. */ |
2336 | len = cmd_len - sizeof(*req); | 2337 | len = cmd_len - sizeof(*req); |
2337 | if (chan->conf_len + len > sizeof(chan->conf_req)) { | 2338 | if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { |
2338 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 2339 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, |
2339 | l2cap_build_conf_rsp(chan, rsp, | 2340 | l2cap_build_conf_rsp(chan, rsp, |
2340 | L2CAP_CONF_REJECT, flags), rsp); | 2341 | L2CAP_CONF_REJECT, flags), rsp); |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 58ffa7d069c7..669d2e32efb6 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, | |||
877 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { | 877 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { |
878 | local->sched_scan_ies.ie[i] = kzalloc(2 + | 878 | local->sched_scan_ies.ie[i] = kzalloc(2 + |
879 | IEEE80211_MAX_SSID_LEN + | 879 | IEEE80211_MAX_SSID_LEN + |
880 | local->scan_ies_len, | 880 | local->scan_ies_len + |
881 | req->ie_len, | ||
881 | GFP_KERNEL); | 882 | GFP_KERNEL); |
882 | if (!local->sched_scan_ies.ie[i]) { | 883 | if (!local->sched_scan_ies.ie[i]) { |
883 | ret = -ENOMEM; | 884 | ret = -ENOMEM; |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index d91c1a26630d..8f6a302d2ac3 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
86 | struct sk_buff *skb = rx->skb; | 86 | struct sk_buff *skb = rx->skb; |
87 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 87 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
88 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 88 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
89 | int queue = rx->queue; | ||
90 | |||
91 | /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ | ||
92 | if (rx->queue == NUM_RX_DATA_QUEUES - 1) | ||
93 | queue = 0; | ||
89 | 94 | ||
90 | /* | 95 | /* |
91 | * it makes no sense to check for MIC errors on anything other | 96 | * it makes no sense to check for MIC errors on anything other |
@@ -148,8 +153,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
148 | 153 | ||
149 | update_iv: | 154 | update_iv: |
150 | /* update IV in key information to be able to detect replays */ | 155 | /* update IV in key information to be able to detect replays */ |
151 | rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; | 156 | rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32; |
152 | rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; | 157 | rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16; |
153 | 158 | ||
154 | return RX_CONTINUE; | 159 | return RX_CONTINUE; |
155 | 160 | ||
@@ -241,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
241 | struct ieee80211_key *key = rx->key; | 246 | struct ieee80211_key *key = rx->key; |
242 | struct sk_buff *skb = rx->skb; | 247 | struct sk_buff *skb = rx->skb; |
243 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 248 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
249 | int queue = rx->queue; | ||
250 | |||
251 | /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ | ||
252 | if (rx->queue == NUM_RX_DATA_QUEUES - 1) | ||
253 | queue = 0; | ||
244 | 254 | ||
245 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 255 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
246 | 256 | ||
@@ -261,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
261 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, | 271 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, |
262 | key, skb->data + hdrlen, | 272 | key, skb->data + hdrlen, |
263 | skb->len - hdrlen, rx->sta->sta.addr, | 273 | skb->len - hdrlen, rx->sta->sta.addr, |
264 | hdr->addr1, hwaccel, rx->queue, | 274 | hdr->addr1, hwaccel, queue, |
265 | &rx->tkip_iv32, | 275 | &rx->tkip_iv32, |
266 | &rx->tkip_iv16); | 276 | &rx->tkip_iv16); |
267 | if (res != TKIP_DECRYPT_OK) | 277 | if (res != TKIP_DECRYPT_OK) |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 1c88c8911dc5..d03682109b7a 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1582 | #endif /* SCTP_DEBUG */ | 1582 | #endif /* SCTP_DEBUG */ |
1583 | if (transport) { | 1583 | if (transport) { |
1584 | if (bytes_acked) { | 1584 | if (bytes_acked) { |
1585 | struct sctp_association *asoc = transport->asoc; | ||
1586 | |||
1585 | /* We may have counted DATA that was migrated | 1587 | /* We may have counted DATA that was migrated |
1586 | * to this transport due to DEL-IP operation. | 1588 | * to this transport due to DEL-IP operation. |
1587 | * Subtract those bytes, since the were never | 1589 | * Subtract those bytes, since the were never |
@@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1600 | transport->error_count = 0; | 1602 | transport->error_count = 0; |
1601 | transport->asoc->overall_error_count = 0; | 1603 | transport->asoc->overall_error_count = 0; |
1602 | 1604 | ||
1605 | /* | ||
1606 | * While in SHUTDOWN PENDING, we may have started | ||
1607 | * the T5 shutdown guard timer after reaching the | ||
1608 | * retransmission limit. Stop that timer as soon | ||
1609 | * as the receiver acknowledged any data. | ||
1610 | */ | ||
1611 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && | ||
1612 | del_timer(&asoc->timers | ||
1613 | [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) | ||
1614 | sctp_association_put(asoc); | ||
1615 | |||
1603 | /* Mark the destination transport address as | 1616 | /* Mark the destination transport address as |
1604 | * active if it is not so marked. | 1617 | * active if it is not so marked. |
1605 | */ | 1618 | */ |
@@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1629 | * A sender is doing zero window probing when the | 1642 | * A sender is doing zero window probing when the |
1630 | * receiver's advertised window is zero, and there is | 1643 | * receiver's advertised window is zero, and there is |
1631 | * only one data chunk in flight to the receiver. | 1644 | * only one data chunk in flight to the receiver. |
1645 | * | ||
1646 | * Allow the association to timeout while in SHUTDOWN | ||
1647 | * PENDING or SHUTDOWN RECEIVED in case the receiver | ||
1648 | * stays in zero window mode forever. | ||
1632 | */ | 1649 | */ |
1633 | if (!q->asoc->peer.rwnd && | 1650 | if (!q->asoc->peer.rwnd && |
1634 | !list_empty(&tlist) && | 1651 | !list_empty(&tlist) && |
1635 | (sack_ctsn+2 == q->asoc->next_tsn)) { | 1652 | (sack_ctsn+2 == q->asoc->next_tsn) && |
1653 | q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { | ||
1636 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " | 1654 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " |
1637 | "window probe: %u\n", | 1655 | "window probe: %u\n", |
1638 | __func__, sack_ctsn); | 1656 | __func__, sack_ctsn); |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 534c2e5feb05..6e0f88295aaf 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |||
670 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | 670 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the |
671 | * HEARTBEAT should clear the error counter of the destination | 671 | * HEARTBEAT should clear the error counter of the destination |
672 | * transport address to which the HEARTBEAT was sent. | 672 | * transport address to which the HEARTBEAT was sent. |
673 | * The association's overall error count is also cleared. | ||
674 | */ | 673 | */ |
675 | t->error_count = 0; | 674 | t->error_count = 0; |
676 | t->asoc->overall_error_count = 0; | 675 | |
676 | /* | ||
677 | * Although RFC4960 specifies that the overall error count must | ||
678 | * be cleared when a HEARTBEAT ACK is received, we make an | ||
679 | * exception while in SHUTDOWN PENDING. If the peer keeps its | ||
680 | * window shut forever, we may never be able to transmit our | ||
681 | * outstanding data and rely on the retransmission limit be reached | ||
682 | * to shutdown the association. | ||
683 | */ | ||
684 | if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING) | ||
685 | t->asoc->overall_error_count = 0; | ||
677 | 686 | ||
678 | /* Clear the hb_sent flag to signal that we had a good | 687 | /* Clear the hb_sent flag to signal that we had a good |
679 | * acknowledgement. | 688 | * acknowledgement. |
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1437 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); | 1446 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); |
1438 | break; | 1447 | break; |
1439 | 1448 | ||
1449 | case SCTP_CMD_TIMER_START_ONCE: | ||
1450 | timer = &asoc->timers[cmd->obj.to]; | ||
1451 | |||
1452 | if (timer_pending(timer)) | ||
1453 | break; | ||
1454 | /* fall through */ | ||
1455 | |||
1440 | case SCTP_CMD_TIMER_START: | 1456 | case SCTP_CMD_TIMER_START: |
1441 | timer = &asoc->timers[cmd->obj.to]; | 1457 | timer = &asoc->timers[cmd->obj.to]; |
1442 | timeout = asoc->timeouts[cmd->obj.to]; | 1458 | timeout = asoc->timeouts[cmd->obj.to]; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index a297283154d5..246117142b5c 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( | |||
5154 | * The sender of the SHUTDOWN MAY also start an overall guard timer | 5154 | * The sender of the SHUTDOWN MAY also start an overall guard timer |
5155 | * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. | 5155 | * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. |
5156 | */ | 5156 | */ |
5157 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | 5157 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, |
5158 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | 5158 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); |
5159 | 5159 | ||
5160 | if (asoc->autoclose) | 5160 | if (asoc->autoclose) |
@@ -5299,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, | |||
5299 | SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); | 5299 | SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); |
5300 | 5300 | ||
5301 | if (asoc->overall_error_count >= asoc->max_retrans) { | 5301 | if (asoc->overall_error_count >= asoc->max_retrans) { |
5302 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 5302 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { |
5303 | SCTP_ERROR(ETIMEDOUT)); | 5303 | /* |
5304 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 5304 | * We are here likely because the receiver had its rwnd |
5305 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 5305 | * closed for a while and we have not been able to |
5306 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); | 5306 | * transmit the locally queued data within the maximum |
5307 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 5307 | * retransmission attempts limit. Start the T5 |
5308 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 5308 | * shutdown guard timer to give the receiver one last |
5309 | return SCTP_DISPOSITION_DELETE_TCB; | 5309 | * chance and some additional time to recover before |
5310 | * aborting. | ||
5311 | */ | ||
5312 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE, | ||
5313 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
5314 | } else { | ||
5315 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
5316 | SCTP_ERROR(ETIMEDOUT)); | ||
5317 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | ||
5318 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
5319 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); | ||
5320 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
5321 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
5322 | return SCTP_DISPOSITION_DELETE_TCB; | ||
5323 | } | ||
5310 | } | 5324 | } |
5311 | 5325 | ||
5312 | /* E1) For the destination address for which the timer | 5326 | /* E1) For the destination address for which the timer |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 0338dc6fdc9d..7c211a7f90f4 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_ | |||
827 | /* SCTP_STATE_ESTABLISHED */ \ | 827 | /* SCTP_STATE_ESTABLISHED */ \ |
828 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ | 828 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
829 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 829 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
830 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ | 830 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ |
831 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 831 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
832 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ | 832 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ |
833 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 833 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 08c6238802de..d3ccf7973c59 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1384 | struct sctp_endpoint *ep; | 1384 | struct sctp_endpoint *ep; |
1385 | struct sctp_association *asoc; | 1385 | struct sctp_association *asoc; |
1386 | struct list_head *pos, *temp; | 1386 | struct list_head *pos, *temp; |
1387 | unsigned int data_was_unread; | ||
1387 | 1388 | ||
1388 | SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); | 1389 | SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); |
1389 | 1390 | ||
@@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1393 | 1394 | ||
1394 | ep = sctp_sk(sk)->ep; | 1395 | ep = sctp_sk(sk)->ep; |
1395 | 1396 | ||
1397 | /* Clean up any skbs sitting on the receive queue. */ | ||
1398 | data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); | ||
1399 | data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); | ||
1400 | |||
1396 | /* Walk all associations on an endpoint. */ | 1401 | /* Walk all associations on an endpoint. */ |
1397 | list_for_each_safe(pos, temp, &ep->asocs) { | 1402 | list_for_each_safe(pos, temp, &ep->asocs) { |
1398 | asoc = list_entry(pos, struct sctp_association, asocs); | 1403 | asoc = list_entry(pos, struct sctp_association, asocs); |
@@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1410 | } | 1415 | } |
1411 | } | 1416 | } |
1412 | 1417 | ||
1413 | if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1418 | if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || |
1419 | !skb_queue_empty(&asoc->ulpq.reasm) || | ||
1420 | (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { | ||
1414 | struct sctp_chunk *chunk; | 1421 | struct sctp_chunk *chunk; |
1415 | 1422 | ||
1416 | chunk = sctp_make_abort_user(asoc, NULL, 0); | 1423 | chunk = sctp_make_abort_user(asoc, NULL, 0); |
@@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1420 | sctp_primitive_SHUTDOWN(asoc, NULL); | 1427 | sctp_primitive_SHUTDOWN(asoc, NULL); |
1421 | } | 1428 | } |
1422 | 1429 | ||
1423 | /* Clean up any skbs sitting on the receive queue. */ | ||
1424 | sctp_queue_purge_ulpevents(&sk->sk_receive_queue); | ||
1425 | sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); | ||
1426 | |||
1427 | /* On a TCP-style socket, block for at most linger_time if set. */ | 1430 | /* On a TCP-style socket, block for at most linger_time if set. */ |
1428 | if (sctp_style(sk, TCP) && timeout) | 1431 | if (sctp_style(sk, TCP) && timeout) |
1429 | sctp_wait_for_close(sk, timeout); | 1432 | sctp_wait_for_close(sk, timeout); |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index e70e5fc87890..8a84017834c2 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event) | |||
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | /* Purge the skb lists holding ulpevents. */ | 1083 | /* Purge the skb lists holding ulpevents. */ |
1084 | void sctp_queue_purge_ulpevents(struct sk_buff_head *list) | 1084 | unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list) |
1085 | { | 1085 | { |
1086 | struct sk_buff *skb; | 1086 | struct sk_buff *skb; |
1087 | while ((skb = skb_dequeue(list)) != NULL) | 1087 | unsigned int data_unread = 0; |
1088 | sctp_ulpevent_free(sctp_skb2event(skb)); | 1088 | |
1089 | while ((skb = skb_dequeue(list)) != NULL) { | ||
1090 | struct sctp_ulpevent *event = sctp_skb2event(skb); | ||
1091 | |||
1092 | if (!sctp_ulpevent_is_notification(event)) | ||
1093 | data_unread += skb->len; | ||
1094 | |||
1095 | sctp_ulpevent_free(event); | ||
1096 | } | ||
1097 | |||
1098 | return data_unread; | ||
1089 | } | 1099 | } |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 9a80a922c527..e45d2fbbe5a8 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -597,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task) | |||
597 | u32 bind_version; | 597 | u32 bind_version; |
598 | struct rpc_xprt *xprt; | 598 | struct rpc_xprt *xprt; |
599 | struct rpc_clnt *rpcb_clnt; | 599 | struct rpc_clnt *rpcb_clnt; |
600 | static struct rpcbind_args *map; | 600 | struct rpcbind_args *map; |
601 | struct rpc_task *child; | 601 | struct rpc_task *child; |
602 | struct sockaddr_storage addr; | 602 | struct sockaddr_storage addr; |
603 | struct sockaddr *sap = (struct sockaddr *)&addr; | 603 | struct sockaddr *sap = (struct sockaddr *)&addr; |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index a27406b1654f..4814e246a874 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task) | |||
616 | BUG_ON(RPC_IS_QUEUED(task)); | 616 | BUG_ON(RPC_IS_QUEUED(task)); |
617 | 617 | ||
618 | for (;;) { | 618 | for (;;) { |
619 | void (*do_action)(struct rpc_task *); | ||
619 | 620 | ||
620 | /* | 621 | /* |
621 | * Execute any pending callback. | 622 | * Execute any pending callback first. |
622 | */ | 623 | */ |
623 | if (task->tk_callback) { | 624 | do_action = task->tk_callback; |
624 | void (*save_callback)(struct rpc_task *); | 625 | task->tk_callback = NULL; |
625 | 626 | if (do_action == NULL) { | |
626 | /* | ||
627 | * We set tk_callback to NULL before calling it, | ||
628 | * in case it sets the tk_callback field itself: | ||
629 | */ | ||
630 | save_callback = task->tk_callback; | ||
631 | task->tk_callback = NULL; | ||
632 | save_callback(task); | ||
633 | } else { | ||
634 | /* | 627 | /* |
635 | * Perform the next FSM step. | 628 | * Perform the next FSM step. |
636 | * tk_action may be NULL when the task has been killed | 629 | * tk_action may be NULL if the task has been killed. |
637 | * by someone else. | 630 | * In particular, note that rpc_killall_tasks may |
631 | * do this at any time, so beware when dereferencing. | ||
638 | */ | 632 | */ |
639 | if (task->tk_action == NULL) | 633 | do_action = task->tk_action; |
634 | if (do_action == NULL) | ||
640 | break; | 635 | break; |
641 | task->tk_action(task); | ||
642 | } | 636 | } |
637 | do_action(task); | ||
643 | 638 | ||
644 | /* | 639 | /* |
645 | * Lockless check for whether task is sleeping or not. | 640 | * Lockless check for whether task is sleeping or not. |
diff --git a/net/wireless/core.c b/net/wireless/core.c index c22ef3492ee6..880dbe2e6f94 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
366 | 366 | ||
367 | mutex_init(&rdev->mtx); | 367 | mutex_init(&rdev->mtx); |
368 | mutex_init(&rdev->devlist_mtx); | 368 | mutex_init(&rdev->devlist_mtx); |
369 | mutex_init(&rdev->sched_scan_mtx); | ||
369 | INIT_LIST_HEAD(&rdev->netdev_list); | 370 | INIT_LIST_HEAD(&rdev->netdev_list); |
370 | spin_lock_init(&rdev->bss_lock); | 371 | spin_lock_init(&rdev->bss_lock); |
371 | INIT_LIST_HEAD(&rdev->bss_list); | 372 | INIT_LIST_HEAD(&rdev->bss_list); |
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) | |||
701 | rfkill_destroy(rdev->rfkill); | 702 | rfkill_destroy(rdev->rfkill); |
702 | mutex_destroy(&rdev->mtx); | 703 | mutex_destroy(&rdev->mtx); |
703 | mutex_destroy(&rdev->devlist_mtx); | 704 | mutex_destroy(&rdev->devlist_mtx); |
705 | mutex_destroy(&rdev->sched_scan_mtx); | ||
704 | list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) | 706 | list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) |
705 | cfg80211_put_bss(&scan->pub); | 707 | cfg80211_put_bss(&scan->pub); |
706 | cfg80211_rdev_free_wowlan(rdev); | 708 | cfg80211_rdev_free_wowlan(rdev); |
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
737 | ___cfg80211_scan_done(rdev, true); | 739 | ___cfg80211_scan_done(rdev, true); |
738 | } | 740 | } |
739 | 741 | ||
742 | cfg80211_unlock_rdev(rdev); | ||
743 | |||
744 | mutex_lock(&rdev->sched_scan_mtx); | ||
745 | |||
740 | if (WARN_ON(rdev->sched_scan_req && | 746 | if (WARN_ON(rdev->sched_scan_req && |
741 | rdev->sched_scan_req->dev == wdev->netdev)) { | 747 | rdev->sched_scan_req->dev == wdev->netdev)) { |
742 | __cfg80211_stop_sched_scan(rdev, false); | 748 | __cfg80211_stop_sched_scan(rdev, false); |
743 | } | 749 | } |
744 | 750 | ||
745 | cfg80211_unlock_rdev(rdev); | 751 | mutex_unlock(&rdev->sched_scan_mtx); |
746 | 752 | ||
747 | mutex_lock(&rdev->devlist_mtx); | 753 | mutex_lock(&rdev->devlist_mtx); |
748 | rdev->opencount--; | 754 | rdev->opencount--; |
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
830 | break; | 836 | break; |
831 | case NL80211_IFTYPE_P2P_CLIENT: | 837 | case NL80211_IFTYPE_P2P_CLIENT: |
832 | case NL80211_IFTYPE_STATION: | 838 | case NL80211_IFTYPE_STATION: |
833 | cfg80211_lock_rdev(rdev); | 839 | mutex_lock(&rdev->sched_scan_mtx); |
834 | __cfg80211_stop_sched_scan(rdev, false); | 840 | __cfg80211_stop_sched_scan(rdev, false); |
835 | cfg80211_unlock_rdev(rdev); | 841 | mutex_unlock(&rdev->sched_scan_mtx); |
836 | 842 | ||
837 | wdev_lock(wdev); | 843 | wdev_lock(wdev); |
838 | #ifdef CONFIG_CFG80211_WEXT | 844 | #ifdef CONFIG_CFG80211_WEXT |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 3dce1f167eba..a570ff9214ec 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -65,6 +65,8 @@ struct cfg80211_registered_device { | |||
65 | struct work_struct scan_done_wk; | 65 | struct work_struct scan_done_wk; |
66 | struct work_struct sched_scan_results_wk; | 66 | struct work_struct sched_scan_results_wk; |
67 | 67 | ||
68 | struct mutex sched_scan_mtx; | ||
69 | |||
68 | #ifdef CONFIG_NL80211_TESTMODE | 70 | #ifdef CONFIG_NL80211_TESTMODE |
69 | struct genl_info *testmode_info; | 71 | struct genl_info *testmode_info; |
70 | #endif | 72 | #endif |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f07602d7bf68..cea338150d05 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, | |||
3461 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3461 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3462 | return -EINVAL; | 3462 | return -EINVAL; |
3463 | 3463 | ||
3464 | if (rdev->sched_scan_req) | ||
3465 | return -EINPROGRESS; | ||
3466 | |||
3467 | if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) | 3464 | if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) |
3468 | return -EINVAL; | 3465 | return -EINVAL; |
3469 | 3466 | ||
@@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, | |||
3502 | if (ie_len > wiphy->max_scan_ie_len) | 3499 | if (ie_len > wiphy->max_scan_ie_len) |
3503 | return -EINVAL; | 3500 | return -EINVAL; |
3504 | 3501 | ||
3502 | mutex_lock(&rdev->sched_scan_mtx); | ||
3503 | |||
3504 | if (rdev->sched_scan_req) { | ||
3505 | err = -EINPROGRESS; | ||
3506 | goto out; | ||
3507 | } | ||
3508 | |||
3505 | request = kzalloc(sizeof(*request) | 3509 | request = kzalloc(sizeof(*request) |
3506 | + sizeof(*request->ssids) * n_ssids | 3510 | + sizeof(*request->ssids) * n_ssids |
3507 | + sizeof(*request->channels) * n_channels | 3511 | + sizeof(*request->channels) * n_channels |
3508 | + ie_len, GFP_KERNEL); | 3512 | + ie_len, GFP_KERNEL); |
3509 | if (!request) | 3513 | if (!request) { |
3510 | return -ENOMEM; | 3514 | err = -ENOMEM; |
3515 | goto out; | ||
3516 | } | ||
3511 | 3517 | ||
3512 | if (n_ssids) | 3518 | if (n_ssids) |
3513 | request->ssids = (void *)&request->channels[n_channels]; | 3519 | request->ssids = (void *)&request->channels[n_channels]; |
@@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, | |||
3605 | out_free: | 3611 | out_free: |
3606 | kfree(request); | 3612 | kfree(request); |
3607 | out: | 3613 | out: |
3614 | mutex_unlock(&rdev->sched_scan_mtx); | ||
3608 | return err; | 3615 | return err; |
3609 | } | 3616 | } |
3610 | 3617 | ||
@@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb, | |||
3612 | struct genl_info *info) | 3619 | struct genl_info *info) |
3613 | { | 3620 | { |
3614 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | 3621 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; |
3622 | int err; | ||
3615 | 3623 | ||
3616 | if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || | 3624 | if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || |
3617 | !rdev->ops->sched_scan_stop) | 3625 | !rdev->ops->sched_scan_stop) |
3618 | return -EOPNOTSUPP; | 3626 | return -EOPNOTSUPP; |
3619 | 3627 | ||
3620 | return __cfg80211_stop_sched_scan(rdev, false); | 3628 | mutex_lock(&rdev->sched_scan_mtx); |
3629 | err = __cfg80211_stop_sched_scan(rdev, false); | ||
3630 | mutex_unlock(&rdev->sched_scan_mtx); | ||
3631 | |||
3632 | return err; | ||
3621 | } | 3633 | } |
3622 | 3634 | ||
3623 | static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, | 3635 | static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 7a6c67667d70..ae0c2256ba3b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) | |||
100 | rdev = container_of(wk, struct cfg80211_registered_device, | 100 | rdev = container_of(wk, struct cfg80211_registered_device, |
101 | sched_scan_results_wk); | 101 | sched_scan_results_wk); |
102 | 102 | ||
103 | cfg80211_lock_rdev(rdev); | 103 | mutex_lock(&rdev->sched_scan_mtx); |
104 | 104 | ||
105 | /* we don't have sched_scan_req anymore if the scan is stopping */ | 105 | /* we don't have sched_scan_req anymore if the scan is stopping */ |
106 | if (rdev->sched_scan_req) | 106 | if (rdev->sched_scan_req) |
107 | nl80211_send_sched_scan_results(rdev, | 107 | nl80211_send_sched_scan_results(rdev, |
108 | rdev->sched_scan_req->dev); | 108 | rdev->sched_scan_req->dev); |
109 | 109 | ||
110 | cfg80211_unlock_rdev(rdev); | 110 | mutex_unlock(&rdev->sched_scan_mtx); |
111 | } | 111 | } |
112 | 112 | ||
113 | void cfg80211_sched_scan_results(struct wiphy *wiphy) | 113 | void cfg80211_sched_scan_results(struct wiphy *wiphy) |
@@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy) | |||
123 | { | 123 | { |
124 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 124 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
125 | 125 | ||
126 | cfg80211_lock_rdev(rdev); | 126 | mutex_lock(&rdev->sched_scan_mtx); |
127 | __cfg80211_stop_sched_scan(rdev, true); | 127 | __cfg80211_stop_sched_scan(rdev, true); |
128 | cfg80211_unlock_rdev(rdev); | 128 | mutex_unlock(&rdev->sched_scan_mtx); |
129 | } | 129 | } |
130 | EXPORT_SYMBOL(cfg80211_sched_scan_stopped); | 130 | EXPORT_SYMBOL(cfg80211_sched_scan_stopped); |
131 | 131 | ||
@@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, | |||
135 | int err; | 135 | int err; |
136 | struct net_device *dev; | 136 | struct net_device *dev; |
137 | 137 | ||
138 | ASSERT_RDEV_LOCK(rdev); | 138 | lockdep_assert_held(&rdev->sched_scan_mtx); |
139 | 139 | ||
140 | if (!rdev->sched_scan_req) | 140 | if (!rdev->sched_scan_req) |
141 | return 0; | 141 | return 0; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d70f85eb7864..9414b9c5b1e4 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1345,6 +1345,8 @@ out: | |||
1345 | xfrm_state_check_expire(x1); | 1345 | xfrm_state_check_expire(x1); |
1346 | 1346 | ||
1347 | err = 0; | 1347 | err = 0; |
1348 | x->km.state = XFRM_STATE_DEAD; | ||
1349 | __xfrm_state_put(x); | ||
1348 | } | 1350 | } |
1349 | spin_unlock_bh(&x1->lock); | 1351 | spin_unlock_bh(&x1->lock); |
1350 | 1352 | ||
diff --git a/scripts/depmod.sh b/scripts/depmod.sh index 3b029cba2baf..a27235685949 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh | |||
@@ -21,13 +21,15 @@ fi | |||
21 | # older versions of depmod require the version string to start with three | 21 | # older versions of depmod require the version string to start with three |
22 | # numbers, so we cheat with a symlink here | 22 | # numbers, so we cheat with a symlink here |
23 | depmod_hack_needed=true | 23 | depmod_hack_needed=true |
24 | mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE | 24 | tmp_dir=$(mktemp -d ${TMPDIR:-/tmp}/depmod.XXXXXX) |
25 | if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then | 25 | mkdir -p "$tmp_dir/lib/modules/$KERNELRELEASE" |
26 | if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \ | 26 | if "$DEPMOD" -b "$tmp_dir" $KERNELRELEASE 2>/dev/null; then |
27 | -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then | 27 | if test -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep" -o \ |
28 | -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep.bin"; then | ||
28 | depmod_hack_needed=false | 29 | depmod_hack_needed=false |
29 | fi | 30 | fi |
30 | fi | 31 | fi |
32 | rm -rf "$tmp_dir" | ||
31 | if $depmod_hack_needed; then | 33 | if $depmod_hack_needed; then |
32 | symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE" | 34 | symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE" |
33 | ln -s "$KERNELRELEASE" "$symlink" | 35 | ln -s "$KERNELRELEASE" "$symlink" |