aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c4
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos4_bus.c1135
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c2
-rw-r--r--drivers/media/video/omap3isp/ispstat.c2
-rw-r--r--drivers/mfd/ab5500-debugfs.c2
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/da903x.c3
-rw-r--r--drivers/mfd/jz4740-adc.c1
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65910.c2
-rw-r--r--drivers/mfd/twl-core.c16
-rw-r--r--drivers/mfd/twl4030-irq.c18
-rw-r--r--drivers/mfd/wm8994-core.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c9
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/gadget/epautoconf.c3
-rw-r--r--drivers/usb/host/isp1760-if.c8
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--firmware/README.AddingFirmware3
-rw-r--r--fs/btrfs/async-thread.c3
-rw-r--r--fs/btrfs/inode.c9
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--include/linux/lglock.h36
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--include/trace/events/writeback.h15
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/kmod.c4
-rw-r--r--mm/filemap.c7
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/l2cap_core.c12
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/core/flow.c12
-rw-r--r--net/core/net-sysfs.c7
-rw-r--r--net/core/sock.c6
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/route.c112
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/llc/af_llc.c14
-rw-r--r--net/netfilter/xt_connbytes.c6
-rw-r--r--net/nfc/nci/core.c2
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/xfrm/xfrm_policy.c18
-rw-r--r--scripts/kconfig/Makefile5
-rw-r--r--sound/atmel/ac97c.c4
76 files changed, 1543 insertions, 157 deletions
diff --git a/Makefile b/Makefile
index a43733df3978..ea51081812f3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index b272cda35a01..af5755d20fbe 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
849 if (!irq) 849 if (!irq)
850 return -ENOMEM; 850 return -ENOMEM;
851 851
852 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
853 return -EINVAL;
854 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) 852 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
855 return -EINVAL; 853 return -EINVAL;
854 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
855 return -EINVAL;
856 856
857 return irq; 857 return irq;
858} 858}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index bfab3fa10edc..7b65f752c5f8 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
568 break; 568 break;
569 } 569 }
570 if (filter[i].jt != 0) { 570 if (filter[i].jt != 0) {
571 if (filter[i].jf) 571 if (filter[i].jf && f_offset)
572 t_offset += is_near(f_offset) ? 2 : 6; 572 t_offset += is_near(f_offset) ? 2 : 5;
573 EMIT_COND_JMP(t_op, t_offset); 573 EMIT_COND_JMP(t_op, t_offset);
574 if (filter[i].jf) 574 if (filter[i].jf)
575 EMIT_JMP(f_offset); 575 EMIT_JMP(f_offset);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6bdedd7cca2c..cf047c406d92 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -820,7 +820,7 @@ config PATA_PLATFORM
820 820
821config PATA_OF_PLATFORM 821config PATA_OF_PLATFORM
822 tristate "OpenFirmware platform device PATA support" 822 tristate "OpenFirmware platform device PATA support"
823 depends on PATA_PLATFORM && OF 823 depends on PATA_PLATFORM && OF && OF_IRQ
824 help 824 help
825 This option enables support for generic directly connected ATA 825 This option enables support for generic directly connected ATA
826 devices commonly found on embedded systems with OpenFirmware 826 devices commonly found on embedded systems with OpenFirmware
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 8f0491037080..464fa2147dfb 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
65 65
66comment "DEVFREQ Drivers" 66comment "DEVFREQ Drivers"
67 67
68config ARM_EXYNOS4_BUS_DEVFREQ
69 bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
70 depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
71 select ARCH_HAS_OPP
72 select DEVFREQ_GOV_SIMPLE_ONDEMAND
73 help
74 This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
75 and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
76 It reads PPMU counters of memory controllers and adjusts
77 the operating frequencies and voltages with OPP support.
78 To operate with optimal voltages, ASV support is required
79 (CONFIG_EXYNOS_ASV).
80
68endif # PM_DEVFREQ 81endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 4564a89e970a..8c464234f7e7 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
3obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o 3obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
4obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o 4obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
5obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o 5obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
6
7# DEVFREQ Drivers
8obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 59d24e9cb8c5..c189b82f5ece 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
347 if (!IS_ERR(devfreq)) { 347 if (!IS_ERR(devfreq)) {
348 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 348 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
349 err = -EINVAL; 349 err = -EINVAL;
350 goto out; 350 goto err_out;
351 } 351 }
352 } 352 }
353 353
@@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
356 dev_err(dev, "%s: Unable to create devfreq for the device\n", 356 dev_err(dev, "%s: Unable to create devfreq for the device\n",
357 __func__); 357 __func__);
358 err = -ENOMEM; 358 err = -ENOMEM;
359 goto out; 359 goto err_out;
360 } 360 }
361 361
362 mutex_init(&devfreq->lock); 362 mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
399 devfreq->next_polling); 399 devfreq->next_polling);
400 } 400 }
401 mutex_unlock(&devfreq_list_lock); 401 mutex_unlock(&devfreq_list_lock);
402 goto out; 402out:
403 return devfreq;
404
403err_init: 405err_init:
404 device_unregister(&devfreq->dev); 406 device_unregister(&devfreq->dev);
405err_dev: 407err_dev:
406 mutex_unlock(&devfreq->lock); 408 mutex_unlock(&devfreq->lock);
407 kfree(devfreq); 409 kfree(devfreq);
408out: 410err_out:
409 if (err) 411 return ERR_PTR(err);
410 return ERR_PTR(err);
411 else
412 return devfreq;
413} 412}
414 413
415/** 414/**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 000000000000..6460577d6701
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1135 @@
1/* drivers/devfreq/exynos4210_memorybus.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
8 * This version supports EXYNOS4210 only. This changes bus frequencies
9 * and vddint voltages. Exynos4412/4212 should be able to be supported
10 * with minor modifications.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18#include <linux/io.h>
19#include <linux/slab.h>
20#include <linux/mutex.h>
21#include <linux/suspend.h>
22#include <linux/opp.h>
23#include <linux/devfreq.h>
24#include <linux/platform_device.h>
25#include <linux/regulator/consumer.h>
26#include <linux/module.h>
27
28/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
29#ifdef CONFIG_EXYNOS_ASV
30extern unsigned int exynos_result_of_asv;
31#endif
32
33#include <mach/regs-clock.h>
34
35#include <plat/map-s5p.h>
36
37#define MAX_SAFEVOLT 1200000 /* 1.2V */
38
39enum exynos4_busf_type {
40 TYPE_BUSF_EXYNOS4210,
41 TYPE_BUSF_EXYNOS4x12,
42};
43
44/* Assume that the bus is saturated if the utilization is 40% */
45#define BUS_SATURATION_RATIO 40
46
47enum ppmu_counter {
48 PPMU_PMNCNT0 = 0,
49 PPMU_PMCCNT1,
50 PPMU_PMNCNT2,
51 PPMU_PMNCNT3,
52 PPMU_PMNCNT_MAX,
53};
54struct exynos4_ppmu {
55 void __iomem *hw_base;
56 unsigned int ccnt;
57 unsigned int event;
58 unsigned int count[PPMU_PMNCNT_MAX];
59 bool ccnt_overflow;
60 bool count_overflow[PPMU_PMNCNT_MAX];
61};
62
63enum busclk_level_idx {
64 LV_0 = 0,
65 LV_1,
66 LV_2,
67 LV_3,
68 LV_4,
69 _LV_END
70};
71#define EX4210_LV_MAX LV_2
72#define EX4x12_LV_MAX LV_4
73#define EX4210_LV_NUM (LV_2 + 1)
74#define EX4x12_LV_NUM (LV_4 + 1)
75
76struct busfreq_data {
77 enum exynos4_busf_type type;
78 struct device *dev;
79 struct devfreq *devfreq;
80 bool disabled;
81 struct regulator *vdd_int;
82 struct regulator *vdd_mif; /* Exynos4412/4212 only */
83 struct opp *curr_opp;
84 struct exynos4_ppmu dmc[2];
85
86 struct notifier_block pm_notifier;
87 struct mutex lock;
88
89 /* Dividers calculated at boot/probe-time */
90 unsigned int dmc_divtable[_LV_END]; /* DMC0 */
91 unsigned int top_divtable[_LV_END];
92};
93
94struct bus_opp_table {
95 unsigned int idx;
96 unsigned long clk;
97 unsigned long volt;
98};
99
100/* 4210 controls clock of mif and voltage of int */
101static struct bus_opp_table exynos4210_busclk_table[] = {
102 {LV_0, 400000, 1150000},
103 {LV_1, 267000, 1050000},
104 {LV_2, 133000, 1025000},
105 {0, 0, 0},
106};
107
108/*
109 * MIF is the main control knob clock for exynox4x12 MIF/INT
110 * clock and voltage of both mif/int are controlled.
111 */
112static struct bus_opp_table exynos4x12_mifclk_table[] = {
113 {LV_0, 400000, 1100000},
114 {LV_1, 267000, 1000000},
115 {LV_2, 160000, 950000},
116 {LV_3, 133000, 950000},
117 {LV_4, 100000, 950000},
118 {0, 0, 0},
119};
120
121/*
122 * INT is not the control knob of 4x12. LV_x is not meant to represent
123 * the current performance. (MIF does)
124 */
125static struct bus_opp_table exynos4x12_intclk_table[] = {
126 {LV_0, 200000, 1000000},
127 {LV_1, 160000, 950000},
128 {LV_2, 133000, 925000},
129 {LV_3, 100000, 900000},
130 {0, 0, 0},
131};
132
133/* TODO: asv volt definitions are "__initdata"? */
134/* Some chips have different operating voltages */
135static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
136 {1150000, 1050000, 1050000},
137 {1125000, 1025000, 1025000},
138 {1100000, 1000000, 1000000},
139 {1075000, 975000, 975000},
140 {1050000, 950000, 950000},
141};
142
143static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
144 /* 400 267 160 133 100 */
145 {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
146 {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
147 {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
148 {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
149 {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
150 {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
151 {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
152 {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
153 {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
154};
155
156static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
157 /* 200 160 133 100 */
158 {1000000, 950000, 925000, 900000}, /* ASV0 */
159 {975000, 925000, 925000, 900000}, /* ASV1 */
160 {950000, 925000, 900000, 875000}, /* ASV2 */
161 {950000, 900000, 900000, 875000}, /* ASV3 */
162 {925000, 875000, 875000, 875000}, /* ASV4 */
163 {900000, 850000, 850000, 850000}, /* ASV5 */
164 {900000, 850000, 850000, 850000}, /* ASV6 */
165 {900000, 850000, 850000, 850000}, /* ASV7 */
166 {900000, 850000, 850000, 850000}, /* ASV8 */
167};
168
169/*** Clock Divider Data for Exynos4210 ***/
170static unsigned int exynos4210_clkdiv_dmc0[][8] = {
171 /*
172 * Clock divider value for following
173 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
174 * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
175 */
176
177 /* DMC L0: 400MHz */
178 { 3, 1, 1, 1, 1, 1, 3, 1 },
179 /* DMC L1: 266.7MHz */
180 { 4, 1, 1, 2, 1, 1, 3, 1 },
181 /* DMC L2: 133MHz */
182 { 5, 1, 1, 5, 1, 1, 3, 1 },
183};
184static unsigned int exynos4210_clkdiv_top[][5] = {
185 /*
186 * Clock divider value for following
187 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
188 */
189 /* ACLK200 L0: 200MHz */
190 { 3, 7, 4, 5, 1 },
191 /* ACLK200 L1: 160MHz */
192 { 4, 7, 5, 6, 1 },
193 /* ACLK200 L2: 133MHz */
194 { 5, 7, 7, 7, 1 },
195};
196static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
197 /*
198 * Clock divider value for following
199 * { DIVGDL/R, DIVGPL/R }
200 */
201 /* ACLK_GDL/R L1: 200MHz */
202 { 3, 1 },
203 /* ACLK_GDL/R L2: 160MHz */
204 { 4, 1 },
205 /* ACLK_GDL/R L3: 133MHz */
206 { 5, 1 },
207};
208
209/*** Clock Divider Data for Exynos4212/4412 ***/
210static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
211 /*
212 * Clock divider value for following
213 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
214 * DIVDMCP}
215 */
216
217 /* DMC L0: 400MHz */
218 {3, 1, 1, 1, 1, 1},
219 /* DMC L1: 266.7MHz */
220 {4, 1, 1, 2, 1, 1},
221 /* DMC L2: 160MHz */
222 {5, 1, 1, 4, 1, 1},
223 /* DMC L3: 133MHz */
224 {5, 1, 1, 5, 1, 1},
225 /* DMC L4: 100MHz */
226 {7, 1, 1, 7, 1, 1},
227};
228static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
229 /*
230 * Clock divider value for following
231 * { G2DACP, DIVC2C, DIVC2C_ACLK }
232 */
233
234 /* DMC L0: 400MHz */
235 {3, 1, 1},
236 /* DMC L1: 266.7MHz */
237 {4, 2, 1},
238 /* DMC L2: 160MHz */
239 {5, 4, 1},
240 /* DMC L3: 133MHz */
241 {5, 5, 1},
242 /* DMC L4: 100MHz */
243 {7, 7, 1},
244};
245static unsigned int exynos4x12_clkdiv_top[][5] = {
246 /*
247 * Clock divider value for following
248 * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
249 DIVACLK133, DIVONENAND }
250 */
251
252 /* ACLK_GDL/R L0: 200MHz */
253 {2, 7, 4, 5, 1},
254 /* ACLK_GDL/R L1: 200MHz */
255 {2, 7, 4, 5, 1},
256 /* ACLK_GDL/R L2: 160MHz */
257 {4, 7, 5, 7, 1},
258 /* ACLK_GDL/R L3: 133MHz */
259 {4, 7, 5, 7, 1},
260 /* ACLK_GDL/R L4: 100MHz */
261 {7, 7, 7, 7, 1},
262};
263static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
264 /*
265 * Clock divider value for following
266 * { DIVGDL/R, DIVGPL/R }
267 */
268
269 /* ACLK_GDL/R L0: 200MHz */
270 {3, 1},
271 /* ACLK_GDL/R L1: 200MHz */
272 {3, 1},
273 /* ACLK_GDL/R L2: 160MHz */
274 {4, 1},
275 /* ACLK_GDL/R L3: 133MHz */
276 {5, 1},
277 /* ACLK_GDL/R L4: 100MHz */
278 {7, 1},
279};
280static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
281 /*
282 * Clock divider value for following
283 * { DIVMFC, DIVJPEG, DIVFIMC0~3}
284 */
285
286 /* SCLK_MFC: 200MHz */
287 {3, 3, 4},
288 /* SCLK_MFC: 200MHz */
289 {3, 3, 4},
290 /* SCLK_MFC: 160MHz */
291 {4, 4, 5},
292 /* SCLK_MFC: 133MHz */
293 {5, 5, 5},
294 /* SCLK_MFC: 100MHz */
295 {7, 7, 7},
296};
297
298
299static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
300{
301 unsigned int index;
302 unsigned int tmp;
303
304 for (index = LV_0; index < EX4210_LV_NUM; index++)
305 if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
306 break;
307
308 if (index == EX4210_LV_NUM)
309 return -EINVAL;
310
311 /* Change Divider - DMC0 */
312 tmp = data->dmc_divtable[index];
313
314 __raw_writel(tmp, S5P_CLKDIV_DMC0);
315
316 do {
317 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
318 } while (tmp & 0x11111111);
319
320 /* Change Divider - TOP */
321 tmp = data->top_divtable[index];
322
323 __raw_writel(tmp, S5P_CLKDIV_TOP);
324
325 do {
326 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
327 } while (tmp & 0x11111);
328
329 /* Change Divider - LEFTBUS */
330 tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
331
332 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
333
334 tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
335 S5P_CLKDIV_BUS_GDLR_SHIFT) |
336 (exynos4210_clkdiv_lr_bus[index][1] <<
337 S5P_CLKDIV_BUS_GPLR_SHIFT));
338
339 __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
340
341 do {
342 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
343 } while (tmp & 0x11);
344
345 /* Change Divider - RIGHTBUS */
346 tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
347
348 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
349
350 tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
351 S5P_CLKDIV_BUS_GDLR_SHIFT) |
352 (exynos4210_clkdiv_lr_bus[index][1] <<
353 S5P_CLKDIV_BUS_GPLR_SHIFT));
354
355 __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
356
357 do {
358 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
359 } while (tmp & 0x11);
360
361 return 0;
362}
363
364static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
365{
366 unsigned int index;
367 unsigned int tmp;
368
369 for (index = LV_0; index < EX4x12_LV_NUM; index++)
370 if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
371 break;
372
373 if (index == EX4x12_LV_NUM)
374 return -EINVAL;
375
376 /* Change Divider - DMC0 */
377 tmp = data->dmc_divtable[index];
378
379 __raw_writel(tmp, S5P_CLKDIV_DMC0);
380
381 do {
382 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
383 } while (tmp & 0x11111111);
384
385 /* Change Divider - DMC1 */
386 tmp = __raw_readl(S5P_CLKDIV_DMC1);
387
388 tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
389 S5P_CLKDIV_DMC1_C2C_MASK |
390 S5P_CLKDIV_DMC1_C2CACLK_MASK);
391
392 tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
393 S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
394 (exynos4x12_clkdiv_dmc1[index][1] <<
395 S5P_CLKDIV_DMC1_C2C_SHIFT) |
396 (exynos4x12_clkdiv_dmc1[index][2] <<
397 S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
398
399 __raw_writel(tmp, S5P_CLKDIV_DMC1);
400
401 do {
402 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
403 } while (tmp & 0x111111);
404
405 /* Change Divider - TOP */
406 tmp = __raw_readl(S5P_CLKDIV_TOP);
407
408 tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
409 S5P_CLKDIV_TOP_ACLK100_MASK |
410 S5P_CLKDIV_TOP_ACLK160_MASK |
411 S5P_CLKDIV_TOP_ACLK133_MASK |
412 S5P_CLKDIV_TOP_ONENAND_MASK);
413
414 tmp |= ((exynos4x12_clkdiv_top[index][0] <<
415 S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
416 (exynos4x12_clkdiv_top[index][1] <<
417 S5P_CLKDIV_TOP_ACLK100_SHIFT) |
418 (exynos4x12_clkdiv_top[index][2] <<
419 S5P_CLKDIV_TOP_ACLK160_SHIFT) |
420 (exynos4x12_clkdiv_top[index][3] <<
421 S5P_CLKDIV_TOP_ACLK133_SHIFT) |
422 (exynos4x12_clkdiv_top[index][4] <<
423 S5P_CLKDIV_TOP_ONENAND_SHIFT));
424
425 __raw_writel(tmp, S5P_CLKDIV_TOP);
426
427 do {
428 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
429 } while (tmp & 0x11111);
430
431 /* Change Divider - LEFTBUS */
432 tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
433
434 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
435
436 tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
437 S5P_CLKDIV_BUS_GDLR_SHIFT) |
438 (exynos4x12_clkdiv_lr_bus[index][1] <<
439 S5P_CLKDIV_BUS_GPLR_SHIFT));
440
441 __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
442
443 do {
444 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
445 } while (tmp & 0x11);
446
447 /* Change Divider - RIGHTBUS */
448 tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
449
450 tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
451
452 tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
453 S5P_CLKDIV_BUS_GDLR_SHIFT) |
454 (exynos4x12_clkdiv_lr_bus[index][1] <<
455 S5P_CLKDIV_BUS_GPLR_SHIFT));
456
457 __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
458
459 do {
460 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
461 } while (tmp & 0x11);
462
463 /* Change Divider - MFC */
464 tmp = __raw_readl(S5P_CLKDIV_MFC);
465
466 tmp &= ~(S5P_CLKDIV_MFC_MASK);
467
468 tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
469 S5P_CLKDIV_MFC_SHIFT));
470
471 __raw_writel(tmp, S5P_CLKDIV_MFC);
472
473 do {
474 tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
475 } while (tmp & 0x1);
476
477 /* Change Divider - JPEG */
478 tmp = __raw_readl(S5P_CLKDIV_CAM1);
479
480 tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
481
482 tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
483 S5P_CLKDIV_CAM1_JPEG_SHIFT));
484
485 __raw_writel(tmp, S5P_CLKDIV_CAM1);
486
487 do {
488 tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
489 } while (tmp & 0x1);
490
491 /* Change Divider - FIMC0~3 */
492 tmp = __raw_readl(S5P_CLKDIV_CAM);
493
494 tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
495 S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
496
497 tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
498 S5P_CLKDIV_CAM_FIMC0_SHIFT) |
499 (exynos4x12_clkdiv_sclkip[index][2] <<
500 S5P_CLKDIV_CAM_FIMC1_SHIFT) |
501 (exynos4x12_clkdiv_sclkip[index][2] <<
502 S5P_CLKDIV_CAM_FIMC2_SHIFT) |
503 (exynos4x12_clkdiv_sclkip[index][2] <<
504 S5P_CLKDIV_CAM_FIMC3_SHIFT));
505
506 __raw_writel(tmp, S5P_CLKDIV_CAM);
507
508 do {
509 tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
510 } while (tmp & 0x1111);
511
512 return 0;
513}
514
515
516static void busfreq_mon_reset(struct busfreq_data *data)
517{
518 unsigned int i;
519
520 for (i = 0; i < 2; i++) {
521 void __iomem *ppmu_base = data->dmc[i].hw_base;
522
523 /* Reset PPMU */
524 __raw_writel(0x8000000f, ppmu_base + 0xf010);
525 __raw_writel(0x8000000f, ppmu_base + 0xf050);
526 __raw_writel(0x6, ppmu_base + 0xf000);
527 __raw_writel(0x0, ppmu_base + 0xf100);
528
529 /* Set PPMU Event */
530 data->dmc[i].event = 0x6;
531 __raw_writel(((data->dmc[i].event << 12) | 0x1),
532 ppmu_base + 0xfc);
533
534 /* Start PPMU */
535 __raw_writel(0x1, ppmu_base + 0xf000);
536 }
537}
538
539static void exynos4_read_ppmu(struct busfreq_data *data)
540{
541 int i, j;
542
543 for (i = 0; i < 2; i++) {
544 void __iomem *ppmu_base = data->dmc[i].hw_base;
545 u32 overflow;
546
547 /* Stop PPMU */
548 __raw_writel(0x0, ppmu_base + 0xf000);
549
550 /* Update local data from PPMU */
551 overflow = __raw_readl(ppmu_base + 0xf050);
552
553 data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
554 data->dmc[i].ccnt_overflow = overflow & (1 << 31);
555
556 for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
557 data->dmc[i].count[j] = __raw_readl(
558 ppmu_base + (0xf110 + (0x10 * j)));
559 data->dmc[i].count_overflow[j] = overflow & (1 << j);
560 }
561 }
562
563 busfreq_mon_reset(data);
564}
565
566static int exynos4x12_get_intspec(unsigned long mifclk)
567{
568 int i = 0;
569
570 while (exynos4x12_intclk_table[i].clk) {
571 if (exynos4x12_intclk_table[i].clk <= mifclk)
572 return i;
573 i++;
574 }
575
576 return -EINVAL;
577}
578
579static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
580 struct opp *oldopp)
581{
582 int err = 0, tmp;
583 unsigned long volt = opp_get_voltage(opp);
584
585 switch (data->type) {
586 case TYPE_BUSF_EXYNOS4210:
587 /* OPP represents DMC clock + INT voltage */
588 err = regulator_set_voltage(data->vdd_int, volt,
589 MAX_SAFEVOLT);
590 break;
591 case TYPE_BUSF_EXYNOS4x12:
592 /* OPP represents MIF clock + MIF voltage */
593 err = regulator_set_voltage(data->vdd_mif, volt,
594 MAX_SAFEVOLT);
595 if (err)
596 break;
597
598 tmp = exynos4x12_get_intspec(opp_get_freq(opp));
599 if (tmp < 0) {
600 err = tmp;
601 regulator_set_voltage(data->vdd_mif,
602 opp_get_voltage(oldopp),
603 MAX_SAFEVOLT);
604 break;
605 }
606 err = regulator_set_voltage(data->vdd_int,
607 exynos4x12_intclk_table[tmp].volt,
608 MAX_SAFEVOLT);
609 /* Try to recover */
610 if (err)
611 regulator_set_voltage(data->vdd_mif,
612 opp_get_voltage(oldopp),
613 MAX_SAFEVOLT);
614 break;
615 default:
616 err = -EINVAL;
617 }
618
619 return err;
620}
621
622static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
623{
624 int err = 0;
625 struct platform_device *pdev = container_of(dev, struct platform_device,
626 dev);
627 struct busfreq_data *data = platform_get_drvdata(pdev);
628 struct opp *opp = devfreq_recommended_opp(dev, _freq);
629 unsigned long old_freq = opp_get_freq(data->curr_opp);
630 unsigned long freq = opp_get_freq(opp);
631
632 if (old_freq == freq)
633 return 0;
634
635 dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
636
637 mutex_lock(&data->lock);
638
639 if (data->disabled)
640 goto out;
641
642 if (old_freq < freq)
643 err = exynos4_bus_setvolt(data, opp, data->curr_opp);
644 if (err)
645 goto out;
646
647 if (old_freq != freq) {
648 switch (data->type) {
649 case TYPE_BUSF_EXYNOS4210:
650 err = exynos4210_set_busclk(data, opp);
651 break;
652 case TYPE_BUSF_EXYNOS4x12:
653 err = exynos4x12_set_busclk(data, opp);
654 break;
655 default:
656 err = -EINVAL;
657 }
658 }
659 if (err)
660 goto out;
661
662 if (old_freq > freq)
663 err = exynos4_bus_setvolt(data, opp, data->curr_opp);
664 if (err)
665 goto out;
666
667 data->curr_opp = opp;
668out:
669 mutex_unlock(&data->lock);
670 return err;
671}
672
673static int exynos4_get_busier_dmc(struct busfreq_data *data)
674{
675 u64 p0 = data->dmc[0].count[0];
676 u64 p1 = data->dmc[1].count[0];
677
678 p0 *= data->dmc[1].ccnt;
679 p1 *= data->dmc[0].ccnt;
680
681 if (data->dmc[1].ccnt == 0)
682 return 0;
683
684 if (p0 > p1)
685 return 0;
686 return 1;
687}
688
689static int exynos4_bus_get_dev_status(struct device *dev,
690 struct devfreq_dev_status *stat)
691{
692 struct platform_device *pdev = container_of(dev, struct platform_device,
693 dev);
694 struct busfreq_data *data = platform_get_drvdata(pdev);
695 int busier_dmc;
696 int cycles_x2 = 2; /* 2 x cycles */
697 void __iomem *addr;
698 u32 timing;
699 u32 memctrl;
700
701 exynos4_read_ppmu(data);
702 busier_dmc = exynos4_get_busier_dmc(data);
703 stat->current_frequency = opp_get_freq(data->curr_opp);
704
705 if (busier_dmc)
706 addr = S5P_VA_DMC1;
707 else
708 addr = S5P_VA_DMC0;
709
710 memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
711 timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
712
713 switch ((memctrl >> 8) & 0xf) {
714 case 0x4: /* DDR2 */
715 cycles_x2 = ((timing >> 16) & 0xf) * 2;
716 break;
717 case 0x5: /* LPDDR2 */
718 case 0x6: /* DDR3 */
719 cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
720 break;
721 default:
722 pr_err("%s: Unknown Memory Type(%d).\n", __func__,
723 (memctrl >> 8) & 0xf);
724 return -EINVAL;
725 }
726
727 /* Number of cycles spent on memory access */
728 stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
729 stat->busy_time *= 100 / BUS_SATURATION_RATIO;
730 stat->total_time = data->dmc[busier_dmc].ccnt;
731
732 /* If the counters have overflown, retry */
733 if (data->dmc[busier_dmc].ccnt_overflow ||
734 data->dmc[busier_dmc].count_overflow[0])
735 return -EAGAIN;
736
737 return 0;
738}
739
740static void exynos4_bus_exit(struct device *dev)
741{
742 struct platform_device *pdev = container_of(dev, struct platform_device,
743 dev);
744 struct busfreq_data *data = platform_get_drvdata(pdev);
745
746 devfreq_unregister_opp_notifier(dev, data->devfreq);
747}
748
749static struct devfreq_dev_profile exynos4_devfreq_profile = {
750 .initial_freq = 400000,
751 .polling_ms = 50,
752 .target = exynos4_bus_target,
753 .get_dev_status = exynos4_bus_get_dev_status,
754 .exit = exynos4_bus_exit,
755};
756
757static int exynos4210_init_tables(struct busfreq_data *data)
758{
759 u32 tmp;
760 int mgrp;
761 int i, err = 0;
762
763 tmp = __raw_readl(S5P_CLKDIV_DMC0);
764 for (i = LV_0; i < EX4210_LV_NUM; i++) {
765 tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
766 S5P_CLKDIV_DMC0_ACPPCLK_MASK |
767 S5P_CLKDIV_DMC0_DPHY_MASK |
768 S5P_CLKDIV_DMC0_DMC_MASK |
769 S5P_CLKDIV_DMC0_DMCD_MASK |
770 S5P_CLKDIV_DMC0_DMCP_MASK |
771 S5P_CLKDIV_DMC0_COPY2_MASK |
772 S5P_CLKDIV_DMC0_CORETI_MASK);
773
774 tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
775 S5P_CLKDIV_DMC0_ACP_SHIFT) |
776 (exynos4210_clkdiv_dmc0[i][1] <<
777 S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
778 (exynos4210_clkdiv_dmc0[i][2] <<
779 S5P_CLKDIV_DMC0_DPHY_SHIFT) |
780 (exynos4210_clkdiv_dmc0[i][3] <<
781 S5P_CLKDIV_DMC0_DMC_SHIFT) |
782 (exynos4210_clkdiv_dmc0[i][4] <<
783 S5P_CLKDIV_DMC0_DMCD_SHIFT) |
784 (exynos4210_clkdiv_dmc0[i][5] <<
785 S5P_CLKDIV_DMC0_DMCP_SHIFT) |
786 (exynos4210_clkdiv_dmc0[i][6] <<
787 S5P_CLKDIV_DMC0_COPY2_SHIFT) |
788 (exynos4210_clkdiv_dmc0[i][7] <<
789 S5P_CLKDIV_DMC0_CORETI_SHIFT));
790
791 data->dmc_divtable[i] = tmp;
792 }
793
794 tmp = __raw_readl(S5P_CLKDIV_TOP);
795 for (i = LV_0; i < EX4210_LV_NUM; i++) {
796 tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
797 S5P_CLKDIV_TOP_ACLK100_MASK |
798 S5P_CLKDIV_TOP_ACLK160_MASK |
799 S5P_CLKDIV_TOP_ACLK133_MASK |
800 S5P_CLKDIV_TOP_ONENAND_MASK);
801
802 tmp |= ((exynos4210_clkdiv_top[i][0] <<
803 S5P_CLKDIV_TOP_ACLK200_SHIFT) |
804 (exynos4210_clkdiv_top[i][1] <<
805 S5P_CLKDIV_TOP_ACLK100_SHIFT) |
806 (exynos4210_clkdiv_top[i][2] <<
807 S5P_CLKDIV_TOP_ACLK160_SHIFT) |
808 (exynos4210_clkdiv_top[i][3] <<
809 S5P_CLKDIV_TOP_ACLK133_SHIFT) |
810 (exynos4210_clkdiv_top[i][4] <<
811 S5P_CLKDIV_TOP_ONENAND_SHIFT));
812
813 data->top_divtable[i] = tmp;
814 }
815
816#ifdef CONFIG_EXYNOS_ASV
817 tmp = exynos4_result_of_asv;
818#else
819 tmp = 0; /* Max voltages for the reliability of the unknown */
820#endif
821
822 pr_debug("ASV Group of Exynos4 is %d\n", tmp);
823 /* Use merged grouping for voltage */
824 switch (tmp) {
825 case 0:
826 mgrp = 0;
827 break;
828 case 1:
829 case 2:
830 mgrp = 1;
831 break;
832 case 3:
833 case 4:
834 mgrp = 2;
835 break;
836 case 5:
837 case 6:
838 mgrp = 3;
839 break;
840 case 7:
841 mgrp = 4;
842 break;
843 default:
844 pr_warn("Unknown ASV Group. Use max voltage.\n");
845 mgrp = 0;
846 }
847
848 for (i = LV_0; i < EX4210_LV_NUM; i++)
849 exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
850
851 for (i = LV_0; i < EX4210_LV_NUM; i++) {
852 err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
853 exynos4210_busclk_table[i].volt);
854 if (err) {
855 dev_err(data->dev, "Cannot add opp entries.\n");
856 return err;
857 }
858 }
859
860
861 return 0;
862}
863
864static int exynos4x12_init_tables(struct busfreq_data *data)
865{
866 unsigned int i;
867 unsigned int tmp;
868 int ret;
869
870 /* Enable pause function for DREX2 DVFS */
871 tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
872 tmp |= DMC_PAUSE_ENABLE;
873 __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
874
875 tmp = __raw_readl(S5P_CLKDIV_DMC0);
876
877 for (i = 0; i < EX4x12_LV_NUM; i++) {
878 tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
879 S5P_CLKDIV_DMC0_ACPPCLK_MASK |
880 S5P_CLKDIV_DMC0_DPHY_MASK |
881 S5P_CLKDIV_DMC0_DMC_MASK |
882 S5P_CLKDIV_DMC0_DMCD_MASK |
883 S5P_CLKDIV_DMC0_DMCP_MASK);
884
885 tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
886 S5P_CLKDIV_DMC0_ACP_SHIFT) |
887 (exynos4x12_clkdiv_dmc0[i][1] <<
888 S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
889 (exynos4x12_clkdiv_dmc0[i][2] <<
890 S5P_CLKDIV_DMC0_DPHY_SHIFT) |
891 (exynos4x12_clkdiv_dmc0[i][3] <<
892 S5P_CLKDIV_DMC0_DMC_SHIFT) |
893 (exynos4x12_clkdiv_dmc0[i][4] <<
894 S5P_CLKDIV_DMC0_DMCD_SHIFT) |
895 (exynos4x12_clkdiv_dmc0[i][5] <<
896 S5P_CLKDIV_DMC0_DMCP_SHIFT));
897
898 data->dmc_divtable[i] = tmp;
899 }
900
901#ifdef CONFIG_EXYNOS_ASV
902 tmp = exynos4_result_of_asv;
903#else
904 tmp = 0; /* Max voltages for the reliability of the unknown */
905#endif
906
907 if (tmp > 8)
908 tmp = 0;
909 pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
910
911 for (i = 0; i < EX4x12_LV_NUM; i++) {
912 exynos4x12_mifclk_table[i].volt =
913 exynos4x12_mif_step_50[tmp][i];
914 exynos4x12_intclk_table[i].volt =
915 exynos4x12_int_volt[tmp][i];
916 }
917
918 for (i = 0; i < EX4x12_LV_NUM; i++) {
919 ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
920 exynos4x12_mifclk_table[i].volt);
921 if (ret) {
922 dev_err(data->dev, "Fail to add opp entries.\n");
923 return ret;
924 }
925 }
926
927 return 0;
928}
929
930static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
931 unsigned long event, void *ptr)
932{
933 struct busfreq_data *data = container_of(this, struct busfreq_data,
934 pm_notifier);
935 struct opp *opp;
936 unsigned long maxfreq = ULONG_MAX;
937 int err = 0;
938
939 switch (event) {
940 case PM_SUSPEND_PREPARE:
941 /* Set Fastest and Deactivate DVFS */
942 mutex_lock(&data->lock);
943
944 data->disabled = true;
945
946 opp = opp_find_freq_floor(data->dev, &maxfreq);
947
948 err = exynos4_bus_setvolt(data, opp, data->curr_opp);
949 if (err)
950 goto unlock;
951
952 switch (data->type) {
953 case TYPE_BUSF_EXYNOS4210:
954 err = exynos4210_set_busclk(data, opp);
955 break;
956 case TYPE_BUSF_EXYNOS4x12:
957 err = exynos4x12_set_busclk(data, opp);
958 break;
959 default:
960 err = -EINVAL;
961 }
962 if (err)
963 goto unlock;
964
965 data->curr_opp = opp;
966unlock:
967 mutex_unlock(&data->lock);
968 if (err)
969 return err;
970 return NOTIFY_OK;
971 case PM_POST_RESTORE:
972 case PM_POST_SUSPEND:
973 /* Reactivate */
974 mutex_lock(&data->lock);
975 data->disabled = false;
976 mutex_unlock(&data->lock);
977 return NOTIFY_OK;
978 }
979
980 return NOTIFY_DONE;
981}
982
983static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
984{
985 struct busfreq_data *data;
986 struct opp *opp;
987 struct device *dev = &pdev->dev;
988 int err = 0;
989
990 data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
991 if (data == NULL) {
992 dev_err(dev, "Cannot allocate memory.\n");
993 return -ENOMEM;
994 }
995
996 data->type = pdev->id_entry->driver_data;
997 data->dmc[0].hw_base = S5P_VA_DMC0;
998 data->dmc[1].hw_base = S5P_VA_DMC1;
999 data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
1000 data->dev = dev;
1001 mutex_init(&data->lock);
1002
1003 switch (data->type) {
1004 case TYPE_BUSF_EXYNOS4210:
1005 err = exynos4210_init_tables(data);
1006 break;
1007 case TYPE_BUSF_EXYNOS4x12:
1008 err = exynos4x12_init_tables(data);
1009 break;
1010 default:
1011 dev_err(dev, "Cannot determine the device id %d\n", data->type);
1012 err = -EINVAL;
1013 }
1014 if (err)
1015 goto err_regulator;
1016
1017 data->vdd_int = regulator_get(dev, "vdd_int");
1018 if (IS_ERR(data->vdd_int)) {
1019 dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
1020 err = PTR_ERR(data->vdd_int);
1021 goto err_regulator;
1022 }
1023 if (data->type == TYPE_BUSF_EXYNOS4x12) {
1024 data->vdd_mif = regulator_get(dev, "vdd_mif");
1025 if (IS_ERR(data->vdd_mif)) {
1026 dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
1027 err = PTR_ERR(data->vdd_mif);
1028 regulator_put(data->vdd_int);
1029 goto err_regulator;
1030
1031 }
1032 }
1033
1034 opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
1035 if (IS_ERR(opp)) {
1036 dev_err(dev, "Invalid initial frequency %lu kHz.\n",
1037 exynos4_devfreq_profile.initial_freq);
1038 err = PTR_ERR(opp);
1039 goto err_opp_add;
1040 }
1041 data->curr_opp = opp;
1042
1043 platform_set_drvdata(pdev, data);
1044
1045 busfreq_mon_reset(data);
1046
1047 data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
1048 &devfreq_simple_ondemand, NULL);
1049 if (IS_ERR(data->devfreq)) {
1050 err = PTR_ERR(data->devfreq);
1051 goto err_opp_add;
1052 }
1053
1054 devfreq_register_opp_notifier(dev, data->devfreq);
1055
1056 err = register_pm_notifier(&data->pm_notifier);
1057 if (err) {
1058 dev_err(dev, "Failed to setup pm notifier\n");
1059 goto err_devfreq_add;
1060 }
1061
1062 return 0;
1063err_devfreq_add:
1064 devfreq_remove_device(data->devfreq);
1065err_opp_add:
1066 if (data->vdd_mif)
1067 regulator_put(data->vdd_mif);
1068 regulator_put(data->vdd_int);
1069err_regulator:
1070 kfree(data);
1071 return err;
1072}
1073
1074static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
1075{
1076 struct busfreq_data *data = platform_get_drvdata(pdev);
1077
1078 unregister_pm_notifier(&data->pm_notifier);
1079 devfreq_remove_device(data->devfreq);
1080 regulator_put(data->vdd_int);
1081 if (data->vdd_mif)
1082 regulator_put(data->vdd_mif);
1083 kfree(data);
1084
1085 return 0;
1086}
1087
1088static int exynos4_busfreq_resume(struct device *dev)
1089{
1090 struct platform_device *pdev = container_of(dev, struct platform_device,
1091 dev);
1092 struct busfreq_data *data = platform_get_drvdata(pdev);
1093
1094 busfreq_mon_reset(data);
1095 return 0;
1096}
1097
1098static const struct dev_pm_ops exynos4_busfreq_pm = {
1099 .resume = exynos4_busfreq_resume,
1100};
1101
1102static const struct platform_device_id exynos4_busfreq_id[] = {
1103 { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
1104 { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
1105 { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
1106 { },
1107};
1108
1109static struct platform_driver exynos4_busfreq_driver = {
1110 .probe = exynos4_busfreq_probe,
1111 .remove = __devexit_p(exynos4_busfreq_remove),
1112 .id_table = exynos4_busfreq_id,
1113 .driver = {
1114 .name = "exynos4-busfreq",
1115 .owner = THIS_MODULE,
1116 .pm = &exynos4_busfreq_pm,
1117 },
1118};
1119
1120static int __init exynos4_busfreq_init(void)
1121{
1122 return platform_driver_register(&exynos4_busfreq_driver);
1123}
1124late_initcall(exynos4_busfreq_init);
1125
1126static void __exit exynos4_busfreq_exit(void)
1127{
1128 platform_driver_unregister(&exynos4_busfreq_driver);
1129}
1130module_exit(exynos4_busfreq_exit);
1131
1132MODULE_LICENSE("GPL");
1133MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
1134MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1135MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5e00d1670aa9..92c9628c572d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3276,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
3276 rdev->accel_working = false; 3276 rdev->accel_working = false;
3277 } 3277 }
3278 } 3278 }
3279
3280 /* Don't start up if the MC ucode is missing on BTC parts.
3281 * The default clocks and voltages before the MC ucode
3282 * is loaded are not suffient for advanced operations.
3283 */
3284 if (ASIC_IS_DCE5(rdev)) {
3285 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
3286 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3287 return -EINVAL;
3288 }
3289 }
3290
3279 return 0; 3291 return 0;
3280} 3292}
3281 3293
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8aa1dbb45c67..f94b33ae2215 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1093,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1093 struct vmw_surface *surface = NULL; 1093 struct vmw_surface *surface = NULL;
1094 struct vmw_dma_buffer *bo = NULL; 1094 struct vmw_dma_buffer *bo = NULL;
1095 struct ttm_base_object *user_obj; 1095 struct ttm_base_object *user_obj;
1096 u64 required_size;
1097 int ret; 1096 int ret;
1098 1097
1099 /** 1098 /**
@@ -1102,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1102 * requested framebuffer. 1101 * requested framebuffer.
1103 */ 1102 */
1104 1103
1105 required_size = mode_cmd->pitch * mode_cmd->height; 1104 if (!vmw_kms_validate_mode_vram(dev_priv,
1106 if (unlikely(required_size > (u64) dev_priv->vram_size)) { 1105 mode_cmd->pitch,
1106 mode_cmd->height)) {
1107 DRM_ERROR("VRAM size is too small for requested mode.\n"); 1107 DRM_ERROR("VRAM size is too small for requested mode.\n");
1108 return ERR_PTR(-ENOMEM); 1108 return ERR_PTR(-ENOMEM);
1109 } 1109 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index b6907118283a..6d03774b176e 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1393,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1393 atomic_read(&bitmap->behind_writes), 1393 atomic_read(&bitmap->behind_writes),
1394 bitmap->mddev->bitmap_info.max_write_behind); 1394 bitmap->mddev->bitmap_info.max_write_behind);
1395 } 1395 }
1396 if (bitmap->mddev->degraded)
1397 /* Never clear bits or update events_cleared when degraded */
1398 success = 0;
1399 1396
1400 while (sectors) { 1397 while (sectors) {
1401 sector_t blocks; 1398 sector_t blocks;
@@ -1409,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1409 return; 1406 return;
1410 } 1407 }
1411 1408
1412 if (success && 1409 if (success && !bitmap->mddev->degraded &&
1413 bitmap->events_cleared < bitmap->mddev->events) { 1410 bitmap->events_cleared < bitmap->mddev->events) {
1414 bitmap->events_cleared = bitmap->mddev->events; 1411 bitmap->events_cleared = bitmap->mddev->events;
1415 bitmap->need_sync = 1; 1412 bitmap->need_sync = 1;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index c3273efd08cb..627456542fb3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
230 return -EINVAL; 230 return -EINVAL;
231 231
232 rdev->raid_disk = rdev->saved_raid_disk; 232 rdev->raid_disk = rdev->saved_raid_disk;
233 rdev->saved_raid_disk = -1;
233 234
234 newconf = linear_conf(mddev,mddev->raid_disks+1); 235 newconf = linear_conf(mddev,mddev->raid_disks+1);
235 236
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ee981737edfc..f47f1f8ac44b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7360,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
7360 spares++; 7360 spares++;
7361 md_new_event(mddev); 7361 md_new_event(mddev);
7362 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7362 set_bit(MD_CHANGE_DEVS, &mddev->flags);
7363 } else 7363 }
7364 break;
7365 } 7364 }
7366 } 7365 }
7367 } 7366 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31670f8d6b65..858fdbb7eb07 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3065,11 +3065,17 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3065 } 3065 }
3066 } else if (test_bit(In_sync, &rdev->flags)) 3066 } else if (test_bit(In_sync, &rdev->flags))
3067 set_bit(R5_Insync, &dev->flags); 3067 set_bit(R5_Insync, &dev->flags);
3068 else { 3068 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3069 /* in sync if before recovery_offset */ 3069 /* in sync if before recovery_offset */
3070 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3070 set_bit(R5_Insync, &dev->flags);
3071 set_bit(R5_Insync, &dev->flags); 3071 else if (test_bit(R5_UPTODATE, &dev->flags) &&
3072 } 3072 test_bit(R5_Expanded, &dev->flags))
3073 /* If we've reshaped into here, we assume it is Insync.
3074 * We will shortly update recovery_offset to make
3075 * it official.
3076 */
3077 set_bit(R5_Insync, &dev->flags);
3078
3073 if (rdev && test_bit(R5_WriteError, &dev->flags)) { 3079 if (rdev && test_bit(R5_WriteError, &dev->flags)) {
3074 clear_bit(R5_Insync, &dev->flags); 3080 clear_bit(R5_Insync, &dev->flags);
3075 if (!test_bit(Faulty, &rdev->flags)) { 3081 if (!test_bit(Faulty, &rdev->flags)) {
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index b0b0fa5a3572..54a4a3f22e2e 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
1408{ 1408{
1409 struct isp_pipeline *pipe = 1409 struct isp_pipeline *pipe =
1410 to_isp_pipeline(&ccdc->video_out.video.entity); 1410 to_isp_pipeline(&ccdc->video_out.video.entity);
1411 struct video_device *vdev = &ccdc->subdev.devnode; 1411 struct video_device *vdev = ccdc->subdev.devnode;
1412 struct v4l2_event event; 1412 struct v4l2_event event;
1413 1413
1414 memset(&event, 0, sizeof(event)); 1414 memset(&event, 0, sizeof(event));
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 68d539456c55..bc0b2c7349b9 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
496 496
497static void isp_stat_queue_event(struct ispstat *stat, int err) 497static void isp_stat_queue_event(struct ispstat *stat, int err)
498{ 498{
499 struct video_device *vdev = &stat->subdev.devnode; 499 struct video_device *vdev = stat->subdev.devnode;
500 struct v4l2_event event; 500 struct v4l2_event event;
501 struct omap3isp_stat_event_status *status = (void *)event.u.data; 501 struct omap3isp_stat_event_status *status = (void *)event.u.data;
502 502
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 43c0ebb81956..b7b2d3483fd4 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,7 +4,7 @@
4 * Debugfs support for the AB5500 MFD driver 4 * Debugfs support for the AB5500 MFD driver
5 */ 5 */
6 6
7#include <linux/export.h> 7#include <linux/module.h>
8#include <linux/debugfs.h> 8#include <linux/debugfs.h>
9#include <linux/seq_file.h> 9#include <linux/seq_file.h>
10#include <linux/mfd/ab5500/ab5500.h> 10#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1e9173804ede..d3d572b2317b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
620 620
621static struct resource __devinitdata ab8500_chargalg_resources[] = {}; 621static struct resource __devinitdata ab8500_chargalg_resources[] = {};
622 622
623#ifdef CONFIG_DEBUG_FS
623static struct resource __devinitdata ab8500_debug_resources[] = { 624static struct resource __devinitdata ab8500_debug_resources[] = {
624 { 625 {
625 .name = "IRQ_FIRST", 626 .name = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
634 .flags = IORESOURCE_IRQ, 635 .flags = IORESOURCE_IRQ,
635 }, 636 },
636}; 637};
638#endif
637 639
638static struct resource __devinitdata ab8500_usb_resources[] = { 640static struct resource __devinitdata ab8500_usb_resources[] = {
639 { 641 {
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f1d88483112c..8d816cce8322 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
109 109
110 ret = __adp5520_read(chip->client, reg, &reg_val); 110 ret = __adp5520_read(chip->client, reg, &reg_val);
111 111
112 if (!ret && ((reg_val & bit_mask) == 0)) { 112 if (!ret && ((reg_val & bit_mask) != bit_mask)) {
113 reg_val |= bit_mask; 113 reg_val |= bit_mask;
114 ret = __adp5520_write(chip->client, reg, reg_val); 114 ret = __adp5520_write(chip->client, reg, reg_val);
115 } 115 }
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 1b79c37fd599..1924b857a0fb 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
182 if (ret) 182 if (ret)
183 goto out; 183 goto out;
184 184
185 if ((reg_val & bit_mask) == 0) { 185 if ((reg_val & bit_mask) != bit_mask) {
186 reg_val |= bit_mask; 186 reg_val |= bit_mask;
187 ret = __da903x_write(chip->client, reg, reg_val); 187 ret = __da903x_write(chip->client, reg, reg_val);
188 } 188 }
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
549 struct da903x_chip *chip = i2c_get_clientdata(client); 549 struct da903x_chip *chip = i2c_get_clientdata(client);
550 550
551 da903x_remove_subdevs(chip); 551 da903x_remove_subdevs(chip);
552 free_irq(client->irq, chip);
552 kfree(chip); 553 kfree(chip);
553 return 0; 554 return 0;
554} 555}
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 1e9ee533eacb..ef39528088f2 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/io.h>
19#include <linux/irq.h> 20#include <linux/irq.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index bba26d96c240..a5ddf31b60ca 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
197 if (ret) 197 if (ret)
198 goto out; 198 goto out;
199 199
200 if ((reg_val & bit_mask) == 0) { 200 if ((reg_val & bit_mask) != bit_mask) {
201 reg_val |= bit_mask; 201 reg_val |= bit_mask;
202 ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val); 202 ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
203 } 203 }
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 6f5b8cf2f652..c1da84bc1573 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
120 goto out; 120 goto out;
121 } 121 }
122 122
123 data &= mask; 123 data &= ~mask;
124 err = tps65910_i2c_write(tps65910, reg, 1, &data); 124 err = tps65910_i2c_write(tps65910, reg, 1, &data);
125 if (err) 125 if (err)
126 dev_err(tps65910->dev, "write to reg %x failed\n", reg); 126 dev_err(tps65910->dev, "write to reg %x failed\n", reg);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bfbd66021afd..61e70cfaa774 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
363 pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); 363 pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
364 return -EPERM; 364 return -EPERM;
365 } 365 }
366 sid = twl_map[mod_no].sid;
367 twl = &twl_modules[sid];
368
369 if (unlikely(!inuse)) { 366 if (unlikely(!inuse)) {
370 pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); 367 pr_err("%s: not initialized\n", DRIVER_NAME);
371 return -EPERM; 368 return -EPERM;
372 } 369 }
370 sid = twl_map[mod_no].sid;
371 twl = &twl_modules[sid];
372
373 mutex_lock(&twl->xfer_lock); 373 mutex_lock(&twl->xfer_lock);
374 /* 374 /*
375 * [MSG1]: fill the register address data 375 * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
420 pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); 420 pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
421 return -EPERM; 421 return -EPERM;
422 } 422 }
423 sid = twl_map[mod_no].sid;
424 twl = &twl_modules[sid];
425
426 if (unlikely(!inuse)) { 423 if (unlikely(!inuse)) {
427 pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); 424 pr_err("%s: not initialized\n", DRIVER_NAME);
428 return -EPERM; 425 return -EPERM;
429 } 426 }
427 sid = twl_map[mod_no].sid;
428 twl = &twl_modules[sid];
429
430 mutex_lock(&twl->xfer_lock); 430 mutex_lock(&twl->xfer_lock);
431 /* [MSG1] fill the register address data */ 431 /* [MSG1] fill the register address data */
432 msg = &twl->xfer_msg[0]; 432 msg = &twl->xfer_msg[0];
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index f062c8cc6c38..29f11e0765fe 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -432,6 +432,7 @@ struct sih_agent {
432 u32 edge_change; 432 u32 edge_change;
433 433
434 struct mutex irq_lock; 434 struct mutex irq_lock;
435 char *irq_name;
435}; 436};
436 437
437/*----------------------------------------------------------------------*/ 438/*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
589 * Generic handler for SIH interrupts ... we "know" this is called 590 * Generic handler for SIH interrupts ... we "know" this is called
590 * in task context, with IRQs enabled. 591 * in task context, with IRQs enabled.
591 */ 592 */
592static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) 593static irqreturn_t handle_twl4030_sih(int irq, void *data)
593{ 594{
594 struct sih_agent *agent = irq_get_handler_data(irq); 595 struct sih_agent *agent = irq_get_handler_data(irq);
595 const struct sih *sih = agent->sih; 596 const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
602 pr_err("twl4030: %s SIH, read ISR error %d\n", 603 pr_err("twl4030: %s SIH, read ISR error %d\n",
603 sih->name, isr); 604 sih->name, isr);
604 /* REVISIT: recover; eventually mask it all, etc */ 605 /* REVISIT: recover; eventually mask it all, etc */
605 return; 606 return IRQ_HANDLED;
606 } 607 }
607 608
608 while (isr) { 609 while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
616 pr_err("twl4030: %s SIH, invalid ISR bit %d\n", 617 pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
617 sih->name, irq); 618 sih->name, irq);
618 } 619 }
620 return IRQ_HANDLED;
619} 621}
620 622
621static unsigned twl4030_irq_next; 623static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
668 activate_irq(irq); 670 activate_irq(irq);
669 } 671 }
670 672
671 status = irq_base;
672 twl4030_irq_next += i; 673 twl4030_irq_next += i;
673 674
674 /* replace generic PIH handler (handle_simple_irq) */ 675 /* replace generic PIH handler (handle_simple_irq) */
675 irq = sih_mod + twl4030_irq_base; 676 irq = sih_mod + twl4030_irq_base;
676 irq_set_handler_data(irq, agent); 677 irq_set_handler_data(irq, agent);
677 irq_set_chained_handler(irq, handle_twl4030_sih); 678 agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
679 status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
680 agent->irq_name ?: sih->name, NULL);
678 681
679 pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, 682 pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
680 irq, irq_base, twl4030_irq_next - 1); 683 irq, irq_base, twl4030_irq_next - 1);
681 684
682 return status; 685 return status < 0 ? status : irq_base;
683} 686}
684 687
685/* FIXME need a call to reverse twl4030_sih_setup() ... */ 688/* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
733 } 736 }
734 737
735 /* install an irq handler to demultiplex the TWL4030 interrupt */ 738 /* install an irq handler to demultiplex the TWL4030 interrupt */
736 status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0, 739 status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
737 "TWL4030-PIH", NULL); 740 IRQF_ONESHOT,
741 "TWL4030-PIH", NULL);
738 if (status < 0) { 742 if (status < 0) {
739 pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status); 743 pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
740 goto fail_rqirq; 744 goto fail_rqirq;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 5d6ba132837e..61894fced8ea 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
239 239
240 switch (wm8994->type) { 240 switch (wm8994->type) {
241 case WM8958: 241 case WM8958:
242 case WM1811:
242 ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1); 243 ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
243 if (ret < 0) { 244 if (ret < 0) {
244 dev_err(dev, "Failed to read power status: %d\n", ret); 245 dev_err(dev, "Failed to read power status: %d\n", ret);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 67bf07819992..c8f47f17186f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -477,7 +477,6 @@ enum rtl_register_content {
477 /* Config1 register p.24 */ 477 /* Config1 register p.24 */
478 LEDS1 = (1 << 7), 478 LEDS1 = (1 << 7),
479 LEDS0 = (1 << 6), 479 LEDS0 = (1 << 6),
480 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
481 Speed_down = (1 << 4), 480 Speed_down = (1 << 4),
482 MEMMAP = (1 << 3), 481 MEMMAP = (1 << 3),
483 IOMAP = (1 << 2), 482 IOMAP = (1 << 2),
@@ -485,6 +484,7 @@ enum rtl_register_content {
485 PMEnable = (1 << 0), /* Power Management Enable */ 484 PMEnable = (1 << 0), /* Power Management Enable */
486 485
487 /* Config2 register p. 25 */ 486 /* Config2 register p. 25 */
487 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
488 PCI_Clock_66MHz = 0x01, 488 PCI_Clock_66MHz = 0x01,
489 PCI_Clock_33MHz = 0x00, 489 PCI_Clock_33MHz = 0x00,
490 490
@@ -3426,22 +3426,24 @@ static const struct rtl_cfg_info {
3426}; 3426};
3427 3427
3428/* Cfg9346_Unlock assumed. */ 3428/* Cfg9346_Unlock assumed. */
3429static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, 3429static unsigned rtl_try_msi(struct rtl8169_private *tp,
3430 const struct rtl_cfg_info *cfg) 3430 const struct rtl_cfg_info *cfg)
3431{ 3431{
3432 void __iomem *ioaddr = tp->mmio_addr;
3432 unsigned msi = 0; 3433 unsigned msi = 0;
3433 u8 cfg2; 3434 u8 cfg2;
3434 3435
3435 cfg2 = RTL_R8(Config2) & ~MSIEnable; 3436 cfg2 = RTL_R8(Config2) & ~MSIEnable;
3436 if (cfg->features & RTL_FEATURE_MSI) { 3437 if (cfg->features & RTL_FEATURE_MSI) {
3437 if (pci_enable_msi(pdev)) { 3438 if (pci_enable_msi(tp->pci_dev)) {
3438 dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); 3439 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
3439 } else { 3440 } else {
3440 cfg2 |= MSIEnable; 3441 cfg2 |= MSIEnable;
3441 msi = RTL_FEATURE_MSI; 3442 msi = RTL_FEATURE_MSI;
3442 } 3443 }
3443 } 3444 }
3444 RTL_W8(Config2, cfg2); 3445 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3446 RTL_W8(Config2, cfg2);
3445 return msi; 3447 return msi;
3446} 3448}
3447 3449
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4077 tp->features |= RTL_FEATURE_WOL; 4079 tp->features |= RTL_FEATURE_WOL;
4078 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) 4080 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
4079 tp->features |= RTL_FEATURE_WOL; 4081 tp->features |= RTL_FEATURE_WOL;
4080 tp->features |= rtl_try_msi(pdev, ioaddr, cfg); 4082 tp->features |= rtl_try_msi(tp, cfg);
4081 RTL_W8(Cfg9346, Cfg9346_Lock); 4083 RTL_W8(Cfg9346, Cfg9346_Lock);
4082 4084
4083 if (rtl_tbi_enabled(tp)) { 4085 if (rtl_tbi_enabled(tp)) {
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index dca9d3369cdd..c97d2f590855 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
836 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 836 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
837 837
838 /* handle completed packets */ 838 /* handle completed packets */
839 spin_unlock_irqrestore(&chan->lock, flags);
839 do { 840 do {
840 ret = __cpdma_chan_process(chan); 841 ret = __cpdma_chan_process(chan);
841 if (ret < 0) 842 if (ret < 0)
842 break; 843 break;
843 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 844 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
845 spin_lock_irqsave(&chan->lock, flags);
844 846
845 /* remaining packets haven't been tx/rx'ed, clean them up */ 847 /* remaining packets haven't been tx/rx'ed, clean them up */
846 while (chan->head) { 848 while (chan->head) {
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e6fed4d4cb77..e95f0e60a9bc 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1655,6 +1655,10 @@ static const struct usb_device_id products [] = {
1655 // ASIX 88772a 1655 // ASIX 88772a
1656 USB_DEVICE(0x0db0, 0xa877), 1656 USB_DEVICE(0x0db0, 0xa877),
1657 .driver_info = (unsigned long) &ax88772_info, 1657 .driver_info = (unsigned long) &ax88772_info,
1658}, {
1659 // Asus USB Ethernet Adapter
1660 USB_DEVICE (0x0b95, 0x7e2b),
1661 .driver_info = (unsigned long) &ax88772_info,
1658}, 1662},
1659 { }, // END 1663 { }, // END
1660}; 1664};
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 888abc2be3a5..528d5f3e868c 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
1271 1271
1272 ath_rc_priv->max_valid_rate = k; 1272 ath_rc_priv->max_valid_rate = k;
1273 ath_rc_sort_validrates(rate_table, ath_rc_priv); 1273 ath_rc_sort_validrates(rate_table, ath_rc_priv);
1274 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; 1274 ath_rc_priv->rate_max_phy = (k > 4) ?
1275 ath_rc_priv->valid_rate_index[k-4] :
1276 ath_rc_priv->valid_rate_index[k-1];
1275 ath_rc_priv->rate_table = rate_table; 1277 ath_rc_priv->rate_table = rate_table;
1276 1278
1277 ath_dbg(common, ATH_DBG_CONFIG, 1279 ath_dbg(common, ATH_DBG_CONFIG,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index a7a6def40d05..5c7c17c7166a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
606 if (ctx->ht.enabled) { 606 if (ctx->ht.enabled) {
607 /* if HT40 is used, it should not change 607 /* if HT40 is used, it should not change
608 * after associated except channel switch */ 608 * after associated except channel switch */
609 if (iwl_is_associated_ctx(ctx) && 609 if (!ctx->ht.is_40mhz ||
610 !ctx->ht.is_40mhz) 610 !iwl_is_associated_ctx(ctx))
611 iwlagn_config_ht40(conf, ctx); 611 iwlagn_config_ht40(conf, ctx);
612 } else 612 } else
613 ctx->ht.is_40mhz = false; 613 ctx->ht.is_40mhz = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 35a6b71f358c..df1540ca6102 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
91 tx_cmd->tid_tspec = qc[0] & 0xf; 91 tx_cmd->tid_tspec = qc[0] & 0xf;
92 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 92 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
93 } else { 93 } else {
94 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 94 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
95 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
96 else
97 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
95 } 98 }
96 99
97 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); 100 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index bacc06c95e7a..e0e9a3dfbc00 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2850,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
2850 int ret; 2850 int ret;
2851 u8 sta_id; 2851 u8 sta_id;
2852 2852
2853 if (ctx->ctxid != IWL_RXON_CTX_PAN)
2854 return 0;
2855
2853 IWL_DEBUG_MAC80211(priv, "enter\n"); 2856 IWL_DEBUG_MAC80211(priv, "enter\n");
2854 mutex_lock(&priv->shrd->mutex); 2857 mutex_lock(&priv->shrd->mutex);
2855 2858
@@ -2898,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
2898 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 2901 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2899 struct iwl_rxon_context *ctx = vif_priv->ctx; 2902 struct iwl_rxon_context *ctx = vif_priv->ctx;
2900 2903
2904 if (ctx->ctxid != IWL_RXON_CTX_PAN)
2905 return;
2906
2901 IWL_DEBUG_MAC80211(priv, "enter\n"); 2907 IWL_DEBUG_MAC80211(priv, "enter\n");
2902 mutex_lock(&priv->shrd->mutex); 2908 mutex_lock(&priv->shrd->mutex);
2903 2909
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index ce918980e977..5f17ab8e76ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1197 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); 1197 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1198 1198
1199 /* Set up entry for this TFD in Tx byte-count array */ 1199 /* Set up entry for this TFD in Tx byte-count array */
1200 if (is_agg) 1200 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1201 iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
1202 le16_to_cpu(tx_cmd->len));
1203 1201
1204 dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen, 1202 dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
1205 DMA_BIDIRECTIONAL); 1203 DMA_BIDIRECTIONAL);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ac278156d390..6e0a3eaecf70 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
939{ 939{
940 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; 940 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
941 unsigned long cmd_flags; 941 unsigned long cmd_flags;
942 unsigned long cmd_pending_q_flags;
943 unsigned long scan_pending_q_flags; 942 unsigned long scan_pending_q_flags;
944 uint16_t cancel_scan_cmd = false; 943 uint16_t cancel_scan_cmd = false;
945 944
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
949 cmd_node = adapter->curr_cmd; 948 cmd_node = adapter->curr_cmd;
950 cmd_node->wait_q_enabled = false; 949 cmd_node->wait_q_enabled = false;
951 cmd_node->cmd_flag |= CMD_F_CANCELED; 950 cmd_node->cmd_flag |= CMD_F_CANCELED;
952 spin_lock_irqsave(&adapter->cmd_pending_q_lock,
953 cmd_pending_q_flags);
954 list_del(&cmd_node->list);
955 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
956 cmd_pending_q_flags);
957 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 951 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
952 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
953 adapter->curr_cmd = NULL;
958 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); 954 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
959 } 955 }
960 956
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
981 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); 977 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
982 } 978 }
983 adapter->cmd_wait_q.status = -1; 979 adapter->cmd_wait_q.status = -1;
984 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
985} 980}
986 981
987/* 982/*
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 717ebc9ff941..600d82348511 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
264 ret = -ENODEV; 264 ret = -ENODEV;
265 goto err0; 265 goto err0;
266 } 266 }
267 dwc->revision = reg & DWC3_GSNPSREV_MASK; 267 dwc->revision = reg;
268 268
269 dwc3_core_soft_reset(dwc); 269 dwc3_core_soft_reset(dwc);
270 270
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 596a0b464e61..4dff83d2f265 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -130,9 +130,6 @@ ep_matches (
130 num_req_streams = ep_comp->bmAttributes & 0x1f; 130 num_req_streams = ep_comp->bmAttributes & 0x1f;
131 if (num_req_streams > ep->max_streams) 131 if (num_req_streams > ep->max_streams)
132 return 0; 132 return 0;
133 /* Update the ep_comp descriptor if needed */
134 if (num_req_streams != ep->max_streams)
135 ep_comp->bmAttributes = ep->max_streams;
136 } 133 }
137 134
138 } 135 }
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index a7dc1e1d45f2..2ac4ac2e4ef9 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -18,7 +18,7 @@
18 18
19#include "isp1760-hcd.h" 19#include "isp1760-hcd.h"
20 20
21#ifdef CONFIG_OF 21#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/of_platform.h> 24#include <linux/of_platform.h>
@@ -31,7 +31,7 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#endif 32#endif
33 33
34#ifdef CONFIG_OF 34#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
35struct isp1760 { 35struct isp1760 {
36 struct usb_hcd *hcd; 36 struct usb_hcd *hcd;
37 int rst_gpio; 37 int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
437 ret = platform_driver_register(&isp1760_plat_driver); 437 ret = platform_driver_register(&isp1760_plat_driver);
438 if (!ret) 438 if (!ret)
439 any_ret = 0; 439 any_ret = 0;
440#ifdef CONFIG_OF 440#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
441 ret = platform_driver_register(&isp1760_of_driver); 441 ret = platform_driver_register(&isp1760_of_driver);
442 if (!ret) 442 if (!ret)
443 any_ret = 0; 443 any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
457static void __exit isp1760_exit(void) 457static void __exit isp1760_exit(void)
458{ 458{
459 platform_driver_unregister(&isp1760_plat_driver); 459 platform_driver_unregister(&isp1760_plat_driver);
460#ifdef CONFIG_OF 460#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
461 platform_driver_unregister(&isp1760_of_driver); 461 platform_driver_unregister(&isp1760_of_driver);
462#endif 462#endif
463#ifdef CONFIG_PCI 463#ifdef CONFIG_PCI
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 60ddba8066ea..79cb0af779fa 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
774 if (musb->double_buffer_not_ok) 774 if (musb->double_buffer_not_ok)
775 musb_writew(epio, MUSB_TXMAXP, 775 musb_writew(epio, MUSB_TXMAXP,
776 hw_ep->max_packet_sz_tx); 776 hw_ep->max_packet_sz_tx);
777 else if (can_bulk_split(musb, qh->type))
778 musb_writew(epio, MUSB_TXMAXP, packet_sz
779 | ((hw_ep->max_packet_sz_tx /
780 packet_sz) - 1) << 11);
777 else 781 else
778 musb_writew(epio, MUSB_TXMAXP, 782 musb_writew(epio, MUSB_TXMAXP,
779 qh->maxpacket | 783 qh->maxpacket |
diff --git a/firmware/README.AddingFirmware b/firmware/README.AddingFirmware
index e24cd8986d8b..ea78c3a17eec 100644
--- a/firmware/README.AddingFirmware
+++ b/firmware/README.AddingFirmware
@@ -12,7 +12,7 @@ here.
12This directory is _NOT_ for adding arbitrary new firmware images. The 12This directory is _NOT_ for adding arbitrary new firmware images. The
13place to add those is the separate linux-firmware repository: 13place to add those is the separate linux-firmware repository:
14 14
15 git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git 15 git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
16 16
17That repository contains all these firmware images which have been 17That repository contains all these firmware images which have been
18extracted from older drivers, as well various new firmware images which 18extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
22To submit firmware to that repository, please send either a git binary 22To submit firmware to that repository, please send either a git binary
23diff or preferably a git pull request to: 23diff or preferably a git pull request to:
24 David Woodhouse <dwmw2@infradead.org> 24 David Woodhouse <dwmw2@infradead.org>
25 Ben Hutchings <ben@decadent.org.uk>
25 26
26Your commit should include an update to the WHENCE file clearly 27Your commit should include an update to the WHENCE file clearly
27identifying the licence under which the firmware is available, and 28identifying the licence under which the firmware is available, and
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 704a2ba08ea8..0cc20b35c1c4 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -563,8 +563,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
563 struct list_head *fallback; 563 struct list_head *fallback;
564 int ret; 564 int ret;
565 565
566again:
567 spin_lock_irqsave(&workers->lock, flags); 566 spin_lock_irqsave(&workers->lock, flags);
567again:
568 worker = next_worker(workers); 568 worker = next_worker(workers);
569 569
570 if (!worker) { 570 if (!worker) {
@@ -579,6 +579,7 @@ again:
579 spin_unlock_irqrestore(&workers->lock, flags); 579 spin_unlock_irqrestore(&workers->lock, flags);
580 /* we're below the limit, start another worker */ 580 /* we're below the limit, start another worker */
581 ret = __btrfs_start_workers(workers); 581 ret = __btrfs_start_workers(workers);
582 spin_lock_irqsave(&workers->lock, flags);
582 if (ret) 583 if (ret)
583 goto fallback; 584 goto fallback;
584 goto again; 585 goto again;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0a6b928813a4..fd1a06df5bc6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4590,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4590 int err = btrfs_add_link(trans, dir, inode, 4590 int err = btrfs_add_link(trans, dir, inode,
4591 dentry->d_name.name, dentry->d_name.len, 4591 dentry->d_name.name, dentry->d_name.len,
4592 backref, index); 4592 backref, index);
4593 if (!err) {
4594 d_instantiate(dentry, inode);
4595 return 0;
4596 }
4597 if (err > 0) 4593 if (err > 0)
4598 err = -EEXIST; 4594 err = -EEXIST;
4599 return err; 4595 return err;
@@ -4655,6 +4651,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4655 else { 4651 else {
4656 init_special_inode(inode, inode->i_mode, rdev); 4652 init_special_inode(inode, inode->i_mode, rdev);
4657 btrfs_update_inode(trans, root, inode); 4653 btrfs_update_inode(trans, root, inode);
4654 d_instantiate(dentry, inode);
4658 } 4655 }
4659out_unlock: 4656out_unlock:
4660 nr = trans->blocks_used; 4657 nr = trans->blocks_used;
@@ -4722,6 +4719,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4722 inode->i_mapping->a_ops = &btrfs_aops; 4719 inode->i_mapping->a_ops = &btrfs_aops;
4723 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 4720 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4724 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4721 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4722 d_instantiate(dentry, inode);
4725 } 4723 }
4726out_unlock: 4724out_unlock:
4727 nr = trans->blocks_used; 4725 nr = trans->blocks_used;
@@ -4779,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4779 struct dentry *parent = dentry->d_parent; 4777 struct dentry *parent = dentry->d_parent;
4780 err = btrfs_update_inode(trans, root, inode); 4778 err = btrfs_update_inode(trans, root, inode);
4781 BUG_ON(err); 4779 BUG_ON(err);
4780 d_instantiate(dentry, inode);
4782 btrfs_log_new_name(trans, inode, NULL, parent); 4781 btrfs_log_new_name(trans, inode, NULL, parent);
4783 } 4782 }
4784 4783
@@ -7245,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7245 drop_inode = 1; 7244 drop_inode = 1;
7246 7245
7247out_unlock: 7246out_unlock:
7247 if (!err)
7248 d_instantiate(dentry, inode);
7248 nr = trans->blocks_used; 7249 nr = trans->blocks_used;
7249 btrfs_end_transaction_throttle(trans, root); 7250 btrfs_end_transaction_throttle(trans, root);
7250 if (drop_inode) { 7251 if (drop_inode) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e4c7af393c2d..30f78bb16afb 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -47,17 +47,6 @@ struct wb_writeback_work {
47 struct completion *done; /* set if the caller waits */ 47 struct completion *done; /* set if the caller waits */
48}; 48};
49 49
50const char *wb_reason_name[] = {
51 [WB_REASON_BACKGROUND] = "background",
52 [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages",
53 [WB_REASON_SYNC] = "sync",
54 [WB_REASON_PERIODIC] = "periodic",
55 [WB_REASON_LAPTOP_TIMER] = "laptop_timer",
56 [WB_REASON_FREE_MORE_MEM] = "free_more_memory",
57 [WB_REASON_FS_FREE_SPACE] = "fs_free_space",
58 [WB_REASON_FORKER_THREAD] = "forker_thread"
59};
60
61/* 50/*
62 * Include the creation of the trace points after defining the 51 * Include the creation of the trace points after defining the
63 * wb_writeback_work structure so that the definition remains local to this 52 * wb_writeback_work structure so that the definition remains local to this
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index f549056fb20b..87f402ccec55 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -22,6 +22,7 @@
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/lockdep.h> 23#include <linux/lockdep.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/cpu.h>
25 26
26/* can make br locks by using local lock for read side, global lock for write */ 27/* can make br locks by using local lock for read side, global lock for write */
27#define br_lock_init(name) name##_lock_init() 28#define br_lock_init(name) name##_lock_init()
@@ -72,9 +73,31 @@
72 73
73#define DEFINE_LGLOCK(name) \ 74#define DEFINE_LGLOCK(name) \
74 \ 75 \
76 DEFINE_SPINLOCK(name##_cpu_lock); \
77 cpumask_t name##_cpus __read_mostly; \
75 DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ 78 DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
76 DEFINE_LGLOCK_LOCKDEP(name); \ 79 DEFINE_LGLOCK_LOCKDEP(name); \
77 \ 80 \
81 static int \
82 name##_lg_cpu_callback(struct notifier_block *nb, \
83 unsigned long action, void *hcpu) \
84 { \
85 switch (action & ~CPU_TASKS_FROZEN) { \
86 case CPU_UP_PREPARE: \
87 spin_lock(&name##_cpu_lock); \
88 cpu_set((unsigned long)hcpu, name##_cpus); \
89 spin_unlock(&name##_cpu_lock); \
90 break; \
91 case CPU_UP_CANCELED: case CPU_DEAD: \
92 spin_lock(&name##_cpu_lock); \
93 cpu_clear((unsigned long)hcpu, name##_cpus); \
94 spin_unlock(&name##_cpu_lock); \
95 } \
96 return NOTIFY_OK; \
97 } \
98 static struct notifier_block name##_lg_cpu_notifier = { \
99 .notifier_call = name##_lg_cpu_callback, \
100 }; \
78 void name##_lock_init(void) { \ 101 void name##_lock_init(void) { \
79 int i; \ 102 int i; \
80 LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ 103 LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -83,6 +106,11 @@
83 lock = &per_cpu(name##_lock, i); \ 106 lock = &per_cpu(name##_lock, i); \
84 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ 107 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
85 } \ 108 } \
109 register_hotcpu_notifier(&name##_lg_cpu_notifier); \
110 get_online_cpus(); \
111 for_each_online_cpu(i) \
112 cpu_set(i, name##_cpus); \
113 put_online_cpus(); \
86 } \ 114 } \
87 EXPORT_SYMBOL(name##_lock_init); \ 115 EXPORT_SYMBOL(name##_lock_init); \
88 \ 116 \
@@ -124,9 +152,9 @@
124 \ 152 \
125 void name##_global_lock_online(void) { \ 153 void name##_global_lock_online(void) { \
126 int i; \ 154 int i; \
127 preempt_disable(); \ 155 spin_lock(&name##_cpu_lock); \
128 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ 156 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
129 for_each_online_cpu(i) { \ 157 for_each_cpu(i, &name##_cpus) { \
130 arch_spinlock_t *lock; \ 158 arch_spinlock_t *lock; \
131 lock = &per_cpu(name##_lock, i); \ 159 lock = &per_cpu(name##_lock, i); \
132 arch_spin_lock(lock); \ 160 arch_spin_lock(lock); \
@@ -137,12 +165,12 @@
137 void name##_global_unlock_online(void) { \ 165 void name##_global_unlock_online(void) { \
138 int i; \ 166 int i; \
139 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ 167 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
140 for_each_online_cpu(i) { \ 168 for_each_cpu(i, &name##_cpus) { \
141 arch_spinlock_t *lock; \ 169 arch_spinlock_t *lock; \
142 lock = &per_cpu(name##_lock, i); \ 170 lock = &per_cpu(name##_lock, i); \
143 arch_spin_unlock(lock); \ 171 arch_spin_unlock(lock); \
144 } \ 172 } \
145 preempt_enable(); \ 173 spin_unlock(&name##_cpu_lock); \
146 } \ 174 } \
147 EXPORT_SYMBOL(name##_global_unlock_online); \ 175 EXPORT_SYMBOL(name##_global_unlock_online); \
148 \ 176 \
diff --git a/include/net/dst.h b/include/net/dst.h
index 6faec1a60216..75766b42660e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -53,6 +53,7 @@ struct dst_entry {
53#define DST_NOHASH 0x0008 53#define DST_NOHASH 0x0008
54#define DST_NOCACHE 0x0010 54#define DST_NOCACHE 0x0010
55#define DST_NOCOUNT 0x0020 55#define DST_NOCOUNT 0x0020
56#define DST_NOPEER 0x0040
56 57
57 short error; 58 short error;
58 short obsolete; 59 short obsolete;
diff --git a/include/net/flow.h b/include/net/flow.h
index a09447749e2d..57f15a7f1cdd 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
207 u8 dir, flow_resolve_t resolver, void *ctx); 207 u8 dir, flow_resolve_t resolver, void *ctx);
208 208
209extern void flow_cache_flush(void); 209extern void flow_cache_flush(void);
210extern void flow_cache_flush_deferred(void);
210extern atomic_t flow_cache_genid; 211extern atomic_t flow_cache_genid;
211 212
212#endif 213#endif
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e90e7a9935dd..a15432da27c3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -241,6 +241,9 @@ extern struct sctp_globals {
241 * bits is an indicator of when to send and window update SACK. 241 * bits is an indicator of when to send and window update SACK.
242 */ 242 */
243 int rwnd_update_shift; 243 int rwnd_update_shift;
244
245 /* Threshold for autoclose timeout, in seconds. */
246 unsigned long max_autoclose;
244} sctp_globals; 247} sctp_globals;
245 248
246#define sctp_rto_initial (sctp_globals.rto_initial) 249#define sctp_rto_initial (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
281#define sctp_auth_enable (sctp_globals.auth_enable) 284#define sctp_auth_enable (sctp_globals.auth_enable)
282#define sctp_checksum_disable (sctp_globals.checksum_disable) 285#define sctp_checksum_disable (sctp_globals.checksum_disable)
283#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift) 286#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
287#define sctp_max_autoclose (sctp_globals.max_autoclose)
284 288
285/* SCTP Socket type: UDP or TCP style. */ 289/* SCTP Socket type: UDP or TCP style. */
286typedef enum { 290typedef enum {
diff --git a/include/net/sock.h b/include/net/sock.h
index abb6e0f0c3c3..32e39371fba6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
637 637
638/* 638/*
639 * Take into account size of receive queue and backlog queue 639 * Take into account size of receive queue and backlog queue
640 * Do not take into account this skb truesize,
641 * to allow even a single big packet to come.
640 */ 642 */
641static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) 643static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
642{ 644{
643 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 645 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
644 646
645 return qsize + skb->truesize > sk->sk_rcvbuf; 647 return qsize > sk->sk_rcvbuf;
646} 648}
647 649
648/* The per-socket spinlock must be held here. */ 650/* The per-socket spinlock must be held here. */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index b99caa8b780c..99d1d0decf88 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -21,6 +21,16 @@
21 {I_REFERENCED, "I_REFERENCED"} \ 21 {I_REFERENCED, "I_REFERENCED"} \
22 ) 22 )
23 23
24#define WB_WORK_REASON \
25 {WB_REASON_BACKGROUND, "background"}, \
26 {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
27 {WB_REASON_SYNC, "sync"}, \
28 {WB_REASON_PERIODIC, "periodic"}, \
29 {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
30 {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
31 {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
32 {WB_REASON_FORKER_THREAD, "forker_thread"}
33
24struct wb_writeback_work; 34struct wb_writeback_work;
25 35
26DECLARE_EVENT_CLASS(writeback_work_class, 36DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
55 __entry->for_kupdate, 65 __entry->for_kupdate,
56 __entry->range_cyclic, 66 __entry->range_cyclic,
57 __entry->for_background, 67 __entry->for_background,
58 wb_reason_name[__entry->reason] 68 __print_symbolic(__entry->reason, WB_WORK_REASON)
59 ) 69 )
60); 70);
61#define DEFINE_WRITEBACK_WORK_EVENT(name) \ 71#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
184 __entry->older, /* older_than_this in jiffies */ 194 __entry->older, /* older_than_this in jiffies */
185 __entry->age, /* older_than_this in relative milliseconds */ 195 __entry->age, /* older_than_this in relative milliseconds */
186 __entry->moved, 196 __entry->moved,
187 wb_reason_name[__entry->reason]) 197 __print_symbolic(__entry->reason, WB_WORK_REASON)
198 )
188); 199);
189 200
190TRACE_EVENT(global_dirty_state, 201TRACE_EVENT(global_dirty_state,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 563f13609470..cf915b86a5fb 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -470,7 +470,7 @@ out:
470 cpu_maps_update_done(); 470 cpu_maps_update_done();
471} 471}
472 472
473static int alloc_frozen_cpus(void) 473static int __init alloc_frozen_cpus(void)
474{ 474{
475 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 475 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
476 return -ENOMEM; 476 return -ENOMEM;
@@ -543,7 +543,7 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
543} 543}
544 544
545 545
546int cpu_hotplug_pm_sync_init(void) 546static int __init cpu_hotplug_pm_sync_init(void)
547{ 547{
548 pm_notifier(cpu_hotplug_pm_callback, 0); 548 pm_notifier(cpu_hotplug_pm_callback, 0);
549 return 0; 549 return 0;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 81b4a27261b2..a0a88543934e 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -285,14 +285,14 @@ static int usermodehelper_disabled = 1;
285static atomic_t running_helpers = ATOMIC_INIT(0); 285static atomic_t running_helpers = ATOMIC_INIT(0);
286 286
287/* 287/*
288 * Wait queue head used by usermodehelper_pm_callback() to wait for all running 288 * Wait queue head used by usermodehelper_disable() to wait for all running
289 * helpers to finish. 289 * helpers to finish.
290 */ 290 */
291static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); 291static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
292 292
293/* 293/*
294 * Time to wait for running_helpers to become zero before the setting of 294 * Time to wait for running_helpers to become zero before the setting of
295 * usermodehelper_disabled in usermodehelper_pm_callback() fails 295 * usermodehelper_disabled in usermodehelper_disable() fails
296 */ 296 */
297#define RUNNING_HELPERS_TIMEOUT (5 * HZ) 297#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
298 298
diff --git a/mm/filemap.c b/mm/filemap.c
index c106d3b3cc64..5f0a3c91fdac 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1828,7 +1828,7 @@ repeat:
1828 page = __page_cache_alloc(gfp | __GFP_COLD); 1828 page = __page_cache_alloc(gfp | __GFP_COLD);
1829 if (!page) 1829 if (!page)
1830 return ERR_PTR(-ENOMEM); 1830 return ERR_PTR(-ENOMEM);
1831 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); 1831 err = add_to_page_cache_lru(page, mapping, index, gfp);
1832 if (unlikely(err)) { 1832 if (unlikely(err)) {
1833 page_cache_release(page); 1833 page_cache_release(page);
1834 if (err == -EEXIST) 1834 if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
1925 * @gfp: the page allocator flags to use if allocating 1925 * @gfp: the page allocator flags to use if allocating
1926 * 1926 *
1927 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 1927 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1928 * any new page allocations done using the specified allocation flags. Note 1928 * any new page allocations done using the specified allocation flags.
1929 * that the Radix tree operations will still use GFP_KERNEL, so you can't
1930 * expect to do this atomically or anything like that - but you can pass in
1931 * other page requirements.
1932 * 1929 *
1933 * If the page does not get brought uptodate, return -EIO. 1930 * If the page does not get brought uptodate, return -EIO.
1934 */ 1931 */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index e0af7237cd92..c1c597e3e198 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
673 goto encrypt; 673 goto encrypt;
674 674
675auth: 675auth:
676 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 676 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
677 return 0; 677 return 0;
678 678
679 if (!hci_conn_auth(conn, sec_level, auth_type)) 679 if (!hci_conn_auth(conn, sec_level, auth_type))
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ea94a1eecf2..17b5b1cd9657 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2152 void *ptr = req->data; 2152 void *ptr = req->data;
2153 int type, olen; 2153 int type, olen;
2154 unsigned long val; 2154 unsigned long val;
2155 struct l2cap_conf_rfc rfc; 2155 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2156 2156
2157 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); 2157 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2158 2158
@@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2271 } 2271 }
2272 } 2272 }
2273 2273
2274 /* Use sane default values in case a misbehaving remote device
2275 * did not send an RFC option.
2276 */
2277 rfc.mode = chan->mode;
2278 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2279 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2280 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2281
2282 BT_ERR("Expected RFC option was not found, using defaults");
2283
2274done: 2284done:
2275 switch (rfc.mode) { 2285 switch (rfc.mode) {
2276 case L2CAP_MODE_ERTM: 2286 case L2CAP_MODE_ERTM:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4e32e18211f9..2d28dfe98389 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1146 if (list_empty(&s->dlcs)) { 1146 if (list_empty(&s->dlcs)) {
1147 s->state = BT_DISCONN; 1147 s->state = BT_DISCONN;
1148 rfcomm_send_disc(s, 0); 1148 rfcomm_send_disc(s, 0);
1149 rfcomm_session_clear_timer(s);
1149 } 1150 }
1150 1151
1151 break; 1152 break;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index d6ec3720c77e..fa8b8f763580 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
114 return NULL; 114 return NULL;
115} 115}
116 116
117static unsigned int fake_mtu(const struct dst_entry *dst)
118{
119 return dst->dev->mtu;
120}
121
117static struct dst_ops fake_dst_ops = { 122static struct dst_ops fake_dst_ops = {
118 .family = AF_INET, 123 .family = AF_INET,
119 .protocol = cpu_to_be16(ETH_P_IP), 124 .protocol = cpu_to_be16(ETH_P_IP),
120 .update_pmtu = fake_update_pmtu, 125 .update_pmtu = fake_update_pmtu,
121 .cow_metrics = fake_cow_metrics, 126 .cow_metrics = fake_cow_metrics,
122 .neigh_lookup = fake_neigh_lookup, 127 .neigh_lookup = fake_neigh_lookup,
128 .mtu = fake_mtu,
123}; 129};
124 130
125/* 131/*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
141 rt->dst.dev = br->dev; 147 rt->dst.dev = br->dev;
142 rt->dst.path = &rt->dst; 148 rt->dst.path = &rt->dst;
143 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); 149 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
144 rt->dst.flags = DST_NOXFRM; 150 rt->dst.flags = DST_NOXFRM | DST_NOPEER;
145 rt->dst.ops = &fake_dst_ops; 151 rt->dst.ops = &fake_dst_ops;
146} 152}
147 153
diff --git a/net/core/flow.c b/net/core/flow.c
index 8ae42de9c79e..e318c7e98042 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
358 put_online_cpus(); 358 put_online_cpus();
359} 359}
360 360
361static void flow_cache_flush_task(struct work_struct *work)
362{
363 flow_cache_flush();
364}
365
366static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
367
368void flow_cache_flush_deferred(void)
369{
370 schedule_work(&flow_cache_flush_work);
371}
372
361static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) 373static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
362{ 374{
363 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); 375 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c71c434a4c05..385aefe53648 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
665 if (count) { 665 if (count) {
666 int i; 666 int i;
667 667
668 if (count > 1<<30) { 668 if (count > INT_MAX)
669 return -EINVAL;
670 count = roundup_pow_of_two(count);
671 if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
672 / sizeof(struct rps_dev_flow)) {
669 /* Enforce a limit to prevent overflow */ 673 /* Enforce a limit to prevent overflow */
670 return -EINVAL; 674 return -EINVAL;
671 } 675 }
672 count = roundup_pow_of_two(count);
673 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); 676 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
674 if (!table) 677 if (!table)
675 return -ENOMEM; 678 return -ENOMEM;
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ed7b1d12f5e..b23f174ab84c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
288 unsigned long flags; 288 unsigned long flags;
289 struct sk_buff_head *list = &sk->sk_receive_queue; 289 struct sk_buff_head *list = &sk->sk_receive_queue;
290 290
291 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 291 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
292 number of warnings when compiling with -W --ANK
293 */
294 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
295 (unsigned)sk->sk_rcvbuf) {
296 atomic_inc(&sk->sk_drops); 292 atomic_inc(&sk->sk_drops);
297 trace_sock_rcvqueue_full(sk, skb); 293 trace_sock_rcvqueue_full(sk, skb);
298 return -ENOMEM; 294 return -ENOMEM;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0da2afc97f32..99ec116bef14 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
253 } 253 }
254 } 254 }
255 255
256 /* no point in waiting if we could not bring up at least one device */
257 if (!ic_first_dev)
258 goto have_carrier;
259
256 /* wait for a carrier on at least one device */ 260 /* wait for a carrier on at least one device */
257 start = jiffies; 261 start = jiffies;
258 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { 262 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 46af62363b8c..94cdbc55ca7e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
91#include <linux/rcupdate.h> 91#include <linux/rcupdate.h>
92#include <linux/times.h> 92#include <linux/times.h>
93#include <linux/slab.h> 93#include <linux/slab.h>
94#include <linux/prefetch.h>
94#include <net/dst.h> 95#include <net/dst.h>
95#include <net/net_namespace.h> 96#include <net/net_namespace.h>
96#include <net/protocol.h> 97#include <net/protocol.h>
@@ -120,6 +121,7 @@
120 121
121static int ip_rt_max_size; 122static int ip_rt_max_size;
122static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 123static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
124static int ip_rt_gc_interval __read_mostly = 60 * HZ;
123static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 125static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
124static int ip_rt_redirect_number __read_mostly = 9; 126static int ip_rt_redirect_number __read_mostly = 9;
125static int ip_rt_redirect_load __read_mostly = HZ / 50; 127static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -133,6 +135,9 @@ static int ip_rt_min_advmss __read_mostly = 256;
133static int rt_chain_length_max __read_mostly = 20; 135static int rt_chain_length_max __read_mostly = 20;
134static int redirect_genid; 136static int redirect_genid;
135 137
138static struct delayed_work expires_work;
139static unsigned long expires_ljiffies;
140
136/* 141/*
137 * Interface to generic destination cache. 142 * Interface to generic destination cache.
138 */ 143 */
@@ -830,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
830 return ONE; 835 return ONE;
831} 836}
832 837
838static void rt_check_expire(void)
839{
840 static unsigned int rover;
841 unsigned int i = rover, goal;
842 struct rtable *rth;
843 struct rtable __rcu **rthp;
844 unsigned long samples = 0;
845 unsigned long sum = 0, sum2 = 0;
846 unsigned long delta;
847 u64 mult;
848
849 delta = jiffies - expires_ljiffies;
850 expires_ljiffies = jiffies;
851 mult = ((u64)delta) << rt_hash_log;
852 if (ip_rt_gc_timeout > 1)
853 do_div(mult, ip_rt_gc_timeout);
854 goal = (unsigned int)mult;
855 if (goal > rt_hash_mask)
856 goal = rt_hash_mask + 1;
857 for (; goal > 0; goal--) {
858 unsigned long tmo = ip_rt_gc_timeout;
859 unsigned long length;
860
861 i = (i + 1) & rt_hash_mask;
862 rthp = &rt_hash_table[i].chain;
863
864 if (need_resched())
865 cond_resched();
866
867 samples++;
868
869 if (rcu_dereference_raw(*rthp) == NULL)
870 continue;
871 length = 0;
872 spin_lock_bh(rt_hash_lock_addr(i));
873 while ((rth = rcu_dereference_protected(*rthp,
874 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
875 prefetch(rth->dst.rt_next);
876 if (rt_is_expired(rth)) {
877 *rthp = rth->dst.rt_next;
878 rt_free(rth);
879 continue;
880 }
881 if (rth->dst.expires) {
882 /* Entry is expired even if it is in use */
883 if (time_before_eq(jiffies, rth->dst.expires)) {
884nofree:
885 tmo >>= 1;
886 rthp = &rth->dst.rt_next;
887 /*
888 * We only count entries on
889 * a chain with equal hash inputs once
890 * so that entries for different QOS
891 * levels, and other non-hash input
892 * attributes don't unfairly skew
893 * the length computation
894 */
895 length += has_noalias(rt_hash_table[i].chain, rth);
896 continue;
897 }
898 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
899 goto nofree;
900
901 /* Cleanup aged off entries. */
902 *rthp = rth->dst.rt_next;
903 rt_free(rth);
904 }
905 spin_unlock_bh(rt_hash_lock_addr(i));
906 sum += length;
907 sum2 += length*length;
908 }
909 if (samples) {
910 unsigned long avg = sum / samples;
911 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
912 rt_chain_length_max = max_t(unsigned long,
913 ip_rt_gc_elasticity,
914 (avg + 4*sd) >> FRACT_BITS);
915 }
916 rover = i;
917}
918
919/*
920 * rt_worker_func() is run in process context.
921 * we call rt_check_expire() to scan part of the hash table
922 */
923static void rt_worker_func(struct work_struct *work)
924{
925 rt_check_expire();
926 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
927}
928
833/* 929/*
834 * Perturbation of rt_genid by a small quantity [1..256] 930 * Perturbation of rt_genid by a small quantity [1..256]
835 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() 931 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1271,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1271{ 1367{
1272 struct rtable *rt = (struct rtable *) dst; 1368 struct rtable *rt = (struct rtable *) dst;
1273 1369
1274 if (rt) { 1370 if (rt && !(rt->dst.flags & DST_NOPEER)) {
1275 if (rt->peer == NULL) 1371 if (rt->peer == NULL)
1276 rt_bind_peer(rt, rt->rt_dst, 1); 1372 rt_bind_peer(rt, rt->rt_dst, 1);
1277 1373
@@ -1282,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1282 iph->id = htons(inet_getid(rt->peer, more)); 1378 iph->id = htons(inet_getid(rt->peer, more));
1283 return; 1379 return;
1284 } 1380 }
1285 } else 1381 } else if (!rt)
1286 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 1382 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1287 __builtin_return_address(0)); 1383 __builtin_return_address(0));
1288 1384
@@ -3179,6 +3275,13 @@ static ctl_table ipv4_route_table[] = {
3179 .proc_handler = proc_dointvec_jiffies, 3275 .proc_handler = proc_dointvec_jiffies,
3180 }, 3276 },
3181 { 3277 {
3278 .procname = "gc_interval",
3279 .data = &ip_rt_gc_interval,
3280 .maxlen = sizeof(int),
3281 .mode = 0644,
3282 .proc_handler = proc_dointvec_jiffies,
3283 },
3284 {
3182 .procname = "redirect_load", 3285 .procname = "redirect_load",
3183 .data = &ip_rt_redirect_load, 3286 .data = &ip_rt_redirect_load,
3184 .maxlen = sizeof(int), 3287 .maxlen = sizeof(int),
@@ -3388,6 +3491,11 @@ int __init ip_rt_init(void)
3388 devinet_init(); 3491 devinet_init();
3389 ip_fib_init(); 3492 ip_fib_init();
3390 3493
3494 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3495 expires_ljiffies = jiffies;
3496 schedule_delayed_work(&expires_work,
3497 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3498
3391 if (ip_rt_proc_init()) 3499 if (ip_rt_proc_init())
3392 printk(KERN_ERR "Unable to create route proc files\n"); 3500 printk(KERN_ERR "Unable to create route proc files\n");
3393#ifdef CONFIG_XFRM 3501#ifdef CONFIG_XFRM
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 84d0bd5cac93..ec562713db9b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
603 static atomic_t ipv6_fragmentation_id; 603 static atomic_t ipv6_fragmentation_id;
604 int old, new; 604 int old, new;
605 605
606 if (rt) { 606 if (rt && !(rt->dst.flags & DST_NOPEER)) {
607 struct inet_peer *peer; 607 struct inet_peer *peer;
608 608
609 if (!rt->rt6i_peer) 609 if (!rt->rt6i_peer)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index dfd3a648a551..a18e6c3d36e3 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
833 copied += used; 833 copied += used;
834 len -= used; 834 len -= used;
835 835
836 /* For non stream protcols we get one packet per recvmsg call */
837 if (sk->sk_type != SOCK_STREAM)
838 goto copy_uaddr;
839
836 if (!(flags & MSG_PEEK)) { 840 if (!(flags & MSG_PEEK)) {
837 sk_eat_skb(sk, skb, 0); 841 sk_eat_skb(sk, skb, 0);
838 *seq = 0; 842 *seq = 0;
839 } 843 }
840 844
841 /* For non stream protcols we get one packet per recvmsg call */
842 if (sk->sk_type != SOCK_STREAM)
843 goto copy_uaddr;
844
845 /* Partial read */ 845 /* Partial read */
846 if (used + offset < skb->len) 846 if (used + offset < skb->len)
847 continue; 847 continue;
@@ -857,6 +857,12 @@ copy_uaddr:
857 } 857 }
858 if (llc_sk(sk)->cmsg_flags) 858 if (llc_sk(sk)->cmsg_flags)
859 llc_cmsg_rcv(msg, skb); 859 llc_cmsg_rcv(msg, skb);
860
861 if (!(flags & MSG_PEEK)) {
862 sk_eat_skb(sk, skb, 0);
863 *seq = 0;
864 }
865
860 goto out; 866 goto out;
861} 867}
862 868
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 5b138506690e..9ddf1c3bfb39 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
87 break; 87 break;
88 } 88 }
89 89
90 if (sinfo->count.to) 90 if (sinfo->count.to >= sinfo->count.from)
91 return what <= sinfo->count.to && what >= sinfo->count.from; 91 return what <= sinfo->count.to && what >= sinfo->count.from;
92 else 92 else /* inverted */
93 return what >= sinfo->count.from; 93 return what < sinfo->count.to || what > sinfo->count.from;
94} 94}
95 95
96static int connbytes_mt_check(const struct xt_mtchk_param *par) 96static int connbytes_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 3925c6578767..ea66034499ce 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev,
69 __u32 timeout) 69 __u32 timeout)
70{ 70{
71 int rc = 0; 71 int rc = 0;
72 unsigned long completion_rc; 72 long completion_rc;
73 73
74 ndev->req_status = NCI_REQ_PEND; 74 ndev->req_status = NCI_REQ_PEND;
75 75
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34d39d0..3891702b81df 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1630 if (snaplen > res) 1630 if (snaplen > res)
1631 snaplen = res; 1631 snaplen = res;
1632 1632
1633 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1633 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1634 (unsigned)sk->sk_rcvbuf)
1635 goto drop_n_acct; 1634 goto drop_n_acct;
1636 1635
1637 if (skb_shared(skb)) { 1636 if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1762 if (po->tp_version <= TPACKET_V2) { 1761 if (po->tp_version <= TPACKET_V2) {
1763 if (macoff + snaplen > po->rx_ring.frame_size) { 1762 if (macoff + snaplen > po->rx_ring.frame_size) {
1764 if (po->copy_thresh && 1763 if (po->copy_thresh &&
1765 atomic_read(&sk->sk_rmem_alloc) + skb->truesize 1764 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1766 < (unsigned)sk->sk_rcvbuf) {
1767 if (skb_shared(skb)) { 1765 if (skb_shared(skb)) {
1768 copy_skb = skb_clone(skb, GFP_ATOMIC); 1766 copy_skb = skb_clone(skb, GFP_ATOMIC);
1769 } else { 1767 } else {
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index f88256cbacbf..28de43092330 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
107 if (!netif_is_multiqueue(dev)) 107 if (!netif_is_multiqueue(dev))
108 return -EOPNOTSUPP; 108 return -EOPNOTSUPP;
109 109
110 if (nla_len(opt) < sizeof(*qopt)) 110 if (!opt || nla_len(opt) < sizeof(*qopt))
111 return -EINVAL; 111 return -EINVAL;
112 112
113 qopt = nla_data(opt); 113 qopt = nla_data(opt);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 152b5b3c3fff..acd2edbc073e 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
173 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 173 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
174 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 174 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
175 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 175 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
176 (unsigned long)sp->autoclose * HZ; 176 min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
177 177
178 /* Initializes the timers */ 178 /* Initializes the timers */
179 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 179 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 08b3cead6503..817174eb5f41 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
697 /* Keep track of how many bytes are in flight to the receiver. */ 697 /* Keep track of how many bytes are in flight to the receiver. */
698 asoc->outqueue.outstanding_bytes += datasize; 698 asoc->outqueue.outstanding_bytes += datasize;
699 699
700 /* Update our view of the receiver's rwnd. Include sk_buff overhead 700 /* Update our view of the receiver's rwnd. */
701 * while updating peer.rwnd so that it reduces the chances of a
702 * receiver running out of receive buffer space even when receive
703 * window is still open. This can happen when a sender is sending
704 * sending small messages.
705 */
706 datasize += sizeof(struct sk_buff);
707 if (datasize < rwnd) 701 if (datasize < rwnd)
708 rwnd -= datasize; 702 rwnd -= datasize;
709 else 703 else
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 14c2b06028ff..cfeb1d4a1ee6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
411 chunk->transport->flight_size -= 411 chunk->transport->flight_size -=
412 sctp_data_size(chunk); 412 sctp_data_size(chunk);
413 q->outstanding_bytes -= sctp_data_size(chunk); 413 q->outstanding_bytes -= sctp_data_size(chunk);
414 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 414 q->asoc->peer.rwnd += sctp_data_size(chunk);
415 sizeof(struct sk_buff));
416 } 415 }
417 continue; 416 continue;
418 } 417 }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
432 * (Section 7.2.4)), add the data size of those 431 * (Section 7.2.4)), add the data size of those
433 * chunks to the rwnd. 432 * chunks to the rwnd.
434 */ 433 */
435 q->asoc->peer.rwnd += (sctp_data_size(chunk) + 434 q->asoc->peer.rwnd += sctp_data_size(chunk);
436 sizeof(struct sk_buff));
437 q->outstanding_bytes -= sctp_data_size(chunk); 435 q->outstanding_bytes -= sctp_data_size(chunk);
438 if (chunk->transport) 436 if (chunk->transport)
439 transport->flight_size -= sctp_data_size(chunk); 437 transport->flight_size -= sctp_data_size(chunk);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 61b9fca5a173..6f6ad8686833 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
1285 sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; 1285 sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
1286 sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; 1286 sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
1287 1287
1288 /* Initialize maximum autoclose timeout. */
1289 sctp_max_autoclose = INT_MAX / HZ;
1290
1288 /* Initialize handle used for association ids. */ 1291 /* Initialize handle used for association ids. */
1289 idr_init(&sctp_assocs_id); 1292 idr_init(&sctp_assocs_id);
1290 1293
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 13bf5fcdbff1..54a7cd2fdd7a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2200 return -EINVAL; 2200 return -EINVAL;
2201 if (copy_from_user(&sp->autoclose, optval, optlen)) 2201 if (copy_from_user(&sp->autoclose, optval, optlen))
2202 return -EFAULT; 2202 return -EFAULT;
2203 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
2204 sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
2205 2203
2206 return 0; 2204 return 0;
2207} 2205}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b3952961b85..60ffbd067ff7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
53static int sack_timer_max = 500; 53static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16; 55static int rwnd_scale_max = 16;
56static unsigned long max_autoclose_min = 0;
57static unsigned long max_autoclose_max =
58 (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
59 ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
56 60
57extern long sysctl_sctp_mem[3]; 61extern long sysctl_sctp_mem[3];
58extern int sysctl_sctp_rmem[3]; 62extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
258 .extra1 = &one, 262 .extra1 = &one,
259 .extra2 = &rwnd_scale_max, 263 .extra2 = &rwnd_scale_max,
260 }, 264 },
265 {
266 .procname = "max_autoclose",
267 .data = &sctp_max_autoclose,
268 .maxlen = sizeof(unsigned long),
269 .mode = 0644,
270 .proc_handler = &proc_doulongvec_minmax,
271 .extra1 = &max_autoclose_min,
272 .extra2 = &max_autoclose_max,
273 },
261 274
262 { /* sentinel */ } 275 { /* sentinel */ }
263}; 276};
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2118d6446630..9049a5caeb25 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
2276{ 2276{
2277 struct dst_entry *head, *next; 2277 struct dst_entry *head, *next;
2278 2278
2279 flow_cache_flush();
2280
2281 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 2279 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2282 head = xfrm_policy_sk_bundles; 2280 head = xfrm_policy_sk_bundles;
2283 xfrm_policy_sk_bundles = NULL; 2281 xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
2290 } 2288 }
2291} 2289}
2292 2290
2291static void xfrm_garbage_collect(struct net *net)
2292{
2293 flow_cache_flush();
2294 __xfrm_garbage_collect(net);
2295}
2296
2297static void xfrm_garbage_collect_deferred(struct net *net)
2298{
2299 flow_cache_flush_deferred();
2300 __xfrm_garbage_collect(net);
2301}
2302
2293static void xfrm_init_pmtu(struct dst_entry *dst) 2303static void xfrm_init_pmtu(struct dst_entry *dst)
2294{ 2304{
2295 do { 2305 do {
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2422 if (likely(dst_ops->neigh_lookup == NULL)) 2432 if (likely(dst_ops->neigh_lookup == NULL))
2423 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2433 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2424 if (likely(afinfo->garbage_collect == NULL)) 2434 if (likely(afinfo->garbage_collect == NULL))
2425 afinfo->garbage_collect = __xfrm_garbage_collect; 2435 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2426 xfrm_policy_afinfo[afinfo->family] = afinfo; 2436 xfrm_policy_afinfo[afinfo->family] = afinfo;
2427 } 2437 }
2428 write_unlock_bh(&xfrm_policy_afinfo_lock); 2438 write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
2516 2526
2517 switch (event) { 2527 switch (event) {
2518 case NETDEV_DOWN: 2528 case NETDEV_DOWN:
2519 __xfrm_garbage_collect(dev_net(dev)); 2529 xfrm_garbage_collect(dev_net(dev));
2520 } 2530 }
2521 return NOTIFY_DONE; 2531 return NOTIFY_DONE;
2522} 2532}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ba573fe7c74d..914833d99b06 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
60 --directory=$(srctree) --directory=$(objtree) \ 60 --directory=$(srctree) --directory=$(objtree) \
61 --output $(obj)/config.pot 61 --output $(obj)/config.pot
62 $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot 62 $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
63 $(Q)ln -fs Kconfig.x86 arch/um/Kconfig 63 $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \
64 $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \ 64 $(srctree)/arch/*/um/Kconfig`; \
65 do \ 65 do \
66 echo " GEN $$i"; \ 66 echo " GEN $$i"; \
67 $(obj)/kxgettext $$i \ 67 $(obj)/kxgettext $$i \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
69 done ) 69 done )
70 $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \ 70 $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
71 --output $(obj)/linux.pot 71 --output $(obj)/linux.pot
72 $(Q)rm -f $(srctree)/arch/um/Kconfig
73 $(Q)rm -f $(obj)/config.pot 72 $(Q)rm -f $(obj)/config.pot
74 73
75PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig 74PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6e5addeb236b..73516f69ac7c 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
899 /* AC97 v2.2 specifications says minimum 1 us. */ 899 /* AC97 v2.2 specifications says minimum 1 us. */
900 udelay(2); 900 udelay(2);
901 gpio_set_value(chip->reset_pin, 1); 901 gpio_set_value(chip->reset_pin, 1);
902 } else {
903 ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
904 udelay(2);
905 ac97c_writel(chip, MR, AC97C_MR_ENA);
902 } 906 }
903} 907}
904 908