aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt44
-rw-r--r--Documentation/hwmon/max160657
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h51
-rw-r--r--arch/arm/include/asm/outercache.h7
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c6
-rw-r--r--arch/arm/mm/abort-macro.S2
-rw-r--r--arch/arm/mm/cache-l2x0.c235
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/x86/include/asm/pvclock.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/pci/acpi.c9
-rw-r--r--arch/x86/xen/setup.c19
-rw-r--r--arch/x86/xen/smp.c10
-rw-r--r--arch/x86/xen/xen-asm_32.S8
-rw-r--r--drivers/base/regmap/regmap.c5
-rw-r--r--drivers/dma/ste_dma40.c42
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-wacom.c22
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c6
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c6
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c60
-rw-r--r--drivers/iommu/amd_iommu.c18
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c47
-rw-r--r--drivers/mmc/core/core.c35
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/sd.c81
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c1
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c4
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/net/arm/am79c961a.c3
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c4
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/probe.c44
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-lib.c2
-rw-r--r--drivers/rtc/rtc-twl.c60
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--fs/9p/v9fs_vfs.h6
-rw-r--r--fs/9p/vfs_file.c36
-rw-r--r--fs/9p/vfs_inode.c139
-rw-r--r--fs/9p/vfs_inode_dotl.c86
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/namei.c33
-rw-r--r--fs/ubifs/debug.h6
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/net/9p/9p.h29
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--kernel/events/core.c67
-rw-r--r--kernel/sched.c43
-rw-r--r--kernel/time/alarmtimer.c18
-rw-r--r--net/9p/trans_virtio.c17
-rw-r--r--net/ceph/msgpool.c40
-rw-r--r--net/ceph/osd_client.c22
66 files changed, 1190 insertions, 397 deletions
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
new file mode 100644
index 000000000000..7ca52161e7ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -0,0 +1,44 @@
1* ARM L2 Cache Controller
2
3ARM cores often have a separate level 2 cache controller. There are various
4implementations of the L2 cache controller with compatible programming models.
5The ARM L2 cache representation in the device tree should be done as follows:
6
7Required properties:
8
9- compatible : should be one of:
10 "arm,pl310-cache"
11 "arm,l220-cache"
12 "arm,l210-cache"
13- cache-unified : Specifies the cache is a unified cache.
14- cache-level : Should be set to 2 for a level 2 cache.
15- reg : Physical base address and size of cache controller's memory mapped
16 registers.
17
18Optional properties:
19
20- arm,data-latency : Cycles of latency for Data RAM accesses. Specifies 3 cells of
21 read, write and setup latencies. Minimum valid values are 1. Controllers
22 without setup latency control should use a value of 0.
23- arm,tag-latency : Cycles of latency for Tag RAM accesses. Specifies 3 cells of
24 read, write and setup latencies. Controllers without setup latency control
25 should use 0. Controllers without separate read and write Tag RAM latency
26 values should only use the first cell.
27- arm,dirty-latency : Cycles of latency for Dirty RAMs. This is a single cell.
28- arm,filter-ranges : <start length> Starting address and length of window to
29 filter. Addresses in the filter window are directed to the M1 port. Other
30 addresses will go to the M0 port.
31- interrupts : 1 combined interrupt.
32
33Example:
34
35L2: cache-controller {
36 compatible = "arm,pl310-cache";
37 reg = <0xfff12000 0x1000>;
38 arm,data-latency = <1 1 1>;
39 arm,tag-latency = <2 2 2>;
40 arm,filter-latency = <0x80000000 0x8000000>;
41 cache-unified;
42 cache-level = <2>;
43 interrupts = <45>;
44};
diff --git a/Documentation/hwmon/max16065 b/Documentation/hwmon/max16065
index 44b4f61e04f9..c11f64a1f2ad 100644
--- a/Documentation/hwmon/max16065
+++ b/Documentation/hwmon/max16065
@@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
62the devices explicitly. Please see Documentation/i2c/instantiating-devices for 62the devices explicitly. Please see Documentation/i2c/instantiating-devices for
63details. 63details.
64 64
65WARNING: Do not access chip registers using the i2cdump command, and do not use
66any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
67supported by this driver interpret any access to a command register (including
68read commands) as request to execute the command in question. This may result in
69power loss, board resets, and/or Flash corruption. Worst case, your board may
70turn into a brick.
71
65 72
66Sysfs entries 73Sysfs entries
67------------- 74-------------
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index bfa706ffd968..434edccdf7f3 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -45,8 +45,15 @@
45#define L2X0_CLEAN_INV_LINE_PA 0x7F0 45#define L2X0_CLEAN_INV_LINE_PA 0x7F0
46#define L2X0_CLEAN_INV_LINE_IDX 0x7F8 46#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
47#define L2X0_CLEAN_INV_WAY 0x7FC 47#define L2X0_CLEAN_INV_WAY 0x7FC
48#define L2X0_LOCKDOWN_WAY_D 0x900 48/*
49#define L2X0_LOCKDOWN_WAY_I 0x904 49 * The lockdown registers repeat 8 times for L310, the L210 has only one
50 * D and one I lockdown register at 0x0900 and 0x0904.
51 */
52#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
53#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
54#define L2X0_LOCKDOWN_STRIDE 0x08
55#define L2X0_ADDR_FILTER_START 0xC00
56#define L2X0_ADDR_FILTER_END 0xC04
50#define L2X0_TEST_OPERATION 0xF00 57#define L2X0_TEST_OPERATION 0xF00
51#define L2X0_LINE_DATA 0xF10 58#define L2X0_LINE_DATA 0xF10
52#define L2X0_LINE_TAG 0xF30 59#define L2X0_LINE_TAG 0xF30
@@ -60,8 +67,23 @@
60#define L2X0_CACHE_ID_PART_MASK (0xf << 6) 67#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
61#define L2X0_CACHE_ID_PART_L210 (1 << 6) 68#define L2X0_CACHE_ID_PART_L210 (1 << 6)
62#define L2X0_CACHE_ID_PART_L310 (3 << 6) 69#define L2X0_CACHE_ID_PART_L310 (3 << 6)
70#define L2X0_CACHE_ID_RTL_MASK 0x3f
71#define L2X0_CACHE_ID_RTL_R0P0 0x0
72#define L2X0_CACHE_ID_RTL_R1P0 0x2
73#define L2X0_CACHE_ID_RTL_R2P0 0x4
74#define L2X0_CACHE_ID_RTL_R3P0 0x5
75#define L2X0_CACHE_ID_RTL_R3P1 0x6
76#define L2X0_CACHE_ID_RTL_R3P2 0x8
63 77
64#define L2X0_AUX_CTRL_MASK 0xc0000fff 78#define L2X0_AUX_CTRL_MASK 0xc0000fff
79#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
80#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
81#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
82#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
83#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
84#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
85#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
86#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
65#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 87#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
66#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 88#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
67#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) 89#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
@@ -72,8 +94,33 @@
72#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 94#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
73#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 95#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
74 96
97#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
98#define L2X0_LATENCY_CTRL_RD_SHIFT 4
99#define L2X0_LATENCY_CTRL_WR_SHIFT 8
100
101#define L2X0_ADDR_FILTER_EN 1
102
75#ifndef __ASSEMBLY__ 103#ifndef __ASSEMBLY__
76extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); 104extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
105extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask);
106
107struct l2x0_regs {
108 unsigned long phy_base;
109 unsigned long aux_ctrl;
110 /*
111 * Whether the following registers need to be saved/restored
112 * depends on platform
113 */
114 unsigned long tag_latency;
115 unsigned long data_latency;
116 unsigned long filter_start;
117 unsigned long filter_end;
118 unsigned long prefetch_ctrl;
119 unsigned long pwr_ctrl;
120};
121
122extern struct l2x0_regs l2x0_saved_regs;
123
77#endif 124#endif
78 125
79#endif 126#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index d8387437ec5a..53426c66352a 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -34,6 +34,7 @@ struct outer_cache_fns {
34 void (*sync)(void); 34 void (*sync)(void);
35#endif 35#endif
36 void (*set_debug)(unsigned long); 36 void (*set_debug)(unsigned long);
37 void (*resume)(void);
37}; 38};
38 39
39#ifdef CONFIG_OUTER_CACHE 40#ifdef CONFIG_OUTER_CACHE
@@ -74,6 +75,12 @@ static inline void outer_disable(void)
74 outer_cache.disable(); 75 outer_cache.disable();
75} 76}
76 77
78static inline void outer_resume(void)
79{
80 if (outer_cache.resume)
81 outer_cache.resume();
82}
83
77#else 84#else
78 85
79static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) 86static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 16baba2e4369..1429d8989fb9 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -20,6 +20,7 @@
20#include <asm/thread_info.h> 20#include <asm/thread_info.h>
21#include <asm/memory.h> 21#include <asm/memory.h>
22#include <asm/procinfo.h> 22#include <asm/procinfo.h>
23#include <asm/hardware/cache-l2x0.h>
23#include <linux/kbuild.h> 24#include <linux/kbuild.h>
24 25
25/* 26/*
@@ -92,6 +93,17 @@ int main(void)
92 DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); 93 DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
93 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); 94 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
94 BLANK(); 95 BLANK();
96#ifdef CONFIG_CACHE_L2X0
97 DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
98 DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl));
99 DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency));
100 DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency));
101 DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start));
102 DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end));
103 DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl));
104 DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl));
105 BLANK();
106#endif
95#ifdef CONFIG_CPU_HAS_ASID 107#ifdef CONFIG_CPU_HAS_ASID
96 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); 108 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
97 BLANK(); 109 BLANK();
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 2fbbdd5eac35..fcf0ae95651f 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -337,15 +337,15 @@ static unsigned long timer_reload;
337static void integrator_clocksource_init(u32 khz) 337static void integrator_clocksource_init(u32 khz)
338{ 338{
339 void __iomem *base = (void __iomem *)TIMER2_VA_BASE; 339 void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
340 u32 ctrl = TIMER_CTRL_ENABLE; 340 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
341 341
342 if (khz >= 1500) { 342 if (khz >= 1500) {
343 khz /= 16; 343 khz /= 16;
344 ctrl = TIMER_CTRL_DIV16; 344 ctrl |= TIMER_CTRL_DIV16;
345 } 345 }
346 346
347 writel(ctrl, base + TIMER_CTRL);
348 writel(0xffff, base + TIMER_LOAD); 347 writel(0xffff, base + TIMER_LOAD);
348 writel(ctrl, base + TIMER_CTRL);
349 349
350 clocksource_mmio_init(base + TIMER_VALUE, "timer2", 350 clocksource_mmio_init(base + TIMER_VALUE, "timer2",
351 khz * 1000, 200, 16, clocksource_mmio_readl_down); 351 khz * 1000, 200, 16, clocksource_mmio_readl_down);
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 52162d59407a..2cbf68ef0e83 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -17,7 +17,7 @@
17 cmp \tmp, # 0x5600 @ Is it ldrsb? 17 cmp \tmp, # 0x5600 @ Is it ldrsb?
18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes 18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
19 tst \tmp, #1 << 11 @ L = 0 -> write 19 tst \tmp, #1 << 11 @ L = 0 -> write
20 orreq \psr, \psr, #1 << 11 @ yes. 20 orreq \fsr, \fsr, #1 << 11 @ yes.
21 b do_DataAbort 21 b do_DataAbort
22not_thumb: 22not_thumb:
23 .endm 23 .endm
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 44c086710d2b..3f9b9980478e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -16,9 +16,12 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
21#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
22 25
23#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
24#include <asm/hardware/cache-l2x0.h> 27#include <asm/hardware/cache-l2x0.h>
@@ -30,11 +33,19 @@ static DEFINE_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 33static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31static uint32_t l2x0_size; 34static uint32_t l2x0_size;
32 35
36struct l2x0_regs l2x0_saved_regs;
37
38struct l2x0_of_data {
39 void (*setup)(const struct device_node *, __u32 *, __u32 *);
40 void (*save)(void);
41 void (*resume)(void);
42};
43
33static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 44static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
34{ 45{
35 /* wait for cache operation by line or way to complete */ 46 /* wait for cache operation by line or way to complete */
36 while (readl_relaxed(reg) & mask) 47 while (readl_relaxed(reg) & mask)
37 ; 48 cpu_relax();
38} 49}
39 50
40#ifdef CONFIG_CACHE_PL310 51#ifdef CONFIG_CACHE_PL310
@@ -277,6 +288,25 @@ static void l2x0_disable(void)
277 spin_unlock_irqrestore(&l2x0_lock, flags); 288 spin_unlock_irqrestore(&l2x0_lock, flags);
278} 289}
279 290
291static void l2x0_unlock(__u32 cache_id)
292{
293 int lockregs;
294 int i;
295
296 if (cache_id == L2X0_CACHE_ID_PART_L310)
297 lockregs = 8;
298 else
299 /* L210 and unknown types */
300 lockregs = 1;
301
302 for (i = 0; i < lockregs; i++) {
303 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
304 i * L2X0_LOCKDOWN_STRIDE);
305 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
306 i * L2X0_LOCKDOWN_STRIDE);
307 }
308}
309
280void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 310void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
281{ 311{
282 __u32 aux; 312 __u32 aux;
@@ -328,10 +358,14 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
328 * accessing the below registers will fault. 358 * accessing the below registers will fault.
329 */ 359 */
330 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 360 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
361 /* Make sure that I&D is not locked down when starting */
362 l2x0_unlock(cache_id);
331 363
332 /* l2x0 controller is disabled */ 364 /* l2x0 controller is disabled */
333 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 365 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
334 366
367 l2x0_saved_regs.aux_ctrl = aux;
368
335 l2x0_inv_all(); 369 l2x0_inv_all();
336 370
337 /* enable L2X0 */ 371 /* enable L2X0 */
@@ -351,3 +385,202 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
351 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 385 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
352 ways, cache_id, aux, l2x0_size); 386 ways, cache_id, aux, l2x0_size);
353} 387}
388
389#ifdef CONFIG_OF
390static void __init l2x0_of_setup(const struct device_node *np,
391 __u32 *aux_val, __u32 *aux_mask)
392{
393 u32 data[2] = { 0, 0 };
394 u32 tag = 0;
395 u32 dirty = 0;
396 u32 val = 0, mask = 0;
397
398 of_property_read_u32(np, "arm,tag-latency", &tag);
399 if (tag) {
400 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
401 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
402 }
403
404 of_property_read_u32_array(np, "arm,data-latency",
405 data, ARRAY_SIZE(data));
406 if (data[0] && data[1]) {
407 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
408 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
409 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
410 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
411 }
412
413 of_property_read_u32(np, "arm,dirty-latency", &dirty);
414 if (dirty) {
415 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
416 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
417 }
418
419 *aux_val &= ~mask;
420 *aux_val |= val;
421 *aux_mask &= ~mask;
422}
423
424static void __init pl310_of_setup(const struct device_node *np,
425 __u32 *aux_val, __u32 *aux_mask)
426{
427 u32 data[3] = { 0, 0, 0 };
428 u32 tag[3] = { 0, 0, 0 };
429 u32 filter[2] = { 0, 0 };
430
431 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
432 if (tag[0] && tag[1] && tag[2])
433 writel_relaxed(
434 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
435 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
436 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
437 l2x0_base + L2X0_TAG_LATENCY_CTRL);
438
439 of_property_read_u32_array(np, "arm,data-latency",
440 data, ARRAY_SIZE(data));
441 if (data[0] && data[1] && data[2])
442 writel_relaxed(
443 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
444 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
445 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
446 l2x0_base + L2X0_DATA_LATENCY_CTRL);
447
448 of_property_read_u32_array(np, "arm,filter-ranges",
449 filter, ARRAY_SIZE(filter));
450 if (filter[1]) {
451 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
452 l2x0_base + L2X0_ADDR_FILTER_END);
453 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
454 l2x0_base + L2X0_ADDR_FILTER_START);
455 }
456}
457
458static void __init pl310_save(void)
459{
460 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
461 L2X0_CACHE_ID_RTL_MASK;
462
463 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
464 L2X0_TAG_LATENCY_CTRL);
465 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
466 L2X0_DATA_LATENCY_CTRL);
467 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
468 L2X0_ADDR_FILTER_END);
469 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
470 L2X0_ADDR_FILTER_START);
471
472 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
473 /*
474 * From r2p0, there is Prefetch offset/control register
475 */
476 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
477 L2X0_PREFETCH_CTRL);
478 /*
479 * From r3p0, there is Power control register
480 */
481 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
482 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
483 L2X0_POWER_CTRL);
484 }
485}
486
487static void l2x0_resume(void)
488{
489 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
490 /* restore aux ctrl and enable l2 */
491 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
492
493 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
494 L2X0_AUX_CTRL);
495
496 l2x0_inv_all();
497
498 writel_relaxed(1, l2x0_base + L2X0_CTRL);
499 }
500}
501
502static void pl310_resume(void)
503{
504 u32 l2x0_revision;
505
506 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
507 /* restore pl310 setup */
508 writel_relaxed(l2x0_saved_regs.tag_latency,
509 l2x0_base + L2X0_TAG_LATENCY_CTRL);
510 writel_relaxed(l2x0_saved_regs.data_latency,
511 l2x0_base + L2X0_DATA_LATENCY_CTRL);
512 writel_relaxed(l2x0_saved_regs.filter_end,
513 l2x0_base + L2X0_ADDR_FILTER_END);
514 writel_relaxed(l2x0_saved_regs.filter_start,
515 l2x0_base + L2X0_ADDR_FILTER_START);
516
517 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
518 L2X0_CACHE_ID_RTL_MASK;
519
520 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
521 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
522 l2x0_base + L2X0_PREFETCH_CTRL);
523 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
524 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
525 l2x0_base + L2X0_POWER_CTRL);
526 }
527 }
528
529 l2x0_resume();
530}
531
532static const struct l2x0_of_data pl310_data = {
533 pl310_of_setup,
534 pl310_save,
535 pl310_resume,
536};
537
538static const struct l2x0_of_data l2x0_data = {
539 l2x0_of_setup,
540 NULL,
541 l2x0_resume,
542};
543
544static const struct of_device_id l2x0_ids[] __initconst = {
545 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
546 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
547 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
548 {}
549};
550
551int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
552{
553 struct device_node *np;
554 struct l2x0_of_data *data;
555 struct resource res;
556
557 np = of_find_matching_node(NULL, l2x0_ids);
558 if (!np)
559 return -ENODEV;
560
561 if (of_address_to_resource(np, 0, &res))
562 return -ENODEV;
563
564 l2x0_base = ioremap(res.start, resource_size(&res));
565 if (!l2x0_base)
566 return -ENOMEM;
567
568 l2x0_saved_regs.phy_base = res.start;
569
570 data = of_match_node(l2x0_ids, np)->data;
571
572 /* L2 configuration can only be changed if the cache is disabled */
573 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
574 if (data->setup)
575 data->setup(np, &aux_val, &aux_mask);
576 }
577
578 if (data->save)
579 data->save();
580
581 l2x0_init(l2x0_base, aux_val, aux_mask);
582
583 outer_cache.resume = data->resume;
584 return 0;
585}
586#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 91bca355cd31..cc7e2d8be9aa 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
298#ifdef CONFIG_HAVE_ARCH_PFN_VALID 298#ifdef CONFIG_HAVE_ARCH_PFN_VALID
299int pfn_valid(unsigned long pfn) 299int pfn_valid(unsigned long pfn)
300{ 300{
301 return memblock_is_memory(pfn << PAGE_SHIFT); 301 return memblock_is_memory(__pfn_to_phys(pfn));
302} 302}
303EXPORT_SYMBOL(pfn_valid); 303EXPORT_SYMBOL(pfn_valid);
304#endif 304#endif
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index a518c0a45044..c59cc97fe6c1 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
45#elif defined(__x86_64__) 45#elif defined(__x86_64__)
46 __asm__ ( 46 __asm__ (
47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" 47 "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
48 : [lo]"=a"(product), 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp) 49 [hi]"=d"(tmp)
50 : "0"(delta), 50 : "0"(delta),
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4ee3abf20ed6..cfa62ec090ec 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1900 1900
1901 perf_callchain_store(entry, regs->ip); 1901 perf_callchain_store(entry, regs->ip);
1902 1902
1903 if (!current->mm)
1904 return;
1905
1903 if (perf_callchain_user32(regs, entry)) 1906 if (perf_callchain_user32(regs, entry))
1904 return; 1907 return;
1905 1908
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index c95330267f08..039d91315bc5 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -365,8 +365,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
365 */ 365 */
366 if (bus) { 366 if (bus) {
367 struct pci_bus *child; 367 struct pci_bus *child;
368 list_for_each_entry(child, &bus->children, node) 368 list_for_each_entry(child, &bus->children, node) {
369 pcie_bus_configure_settings(child, child->self->pcie_mpss); 369 struct pci_dev *self = child->self;
370 if (!self)
371 continue;
372
373 pcie_bus_configure_settings(child, self->pcie_mpss);
374 }
370 } 375 }
371 376
372 if (!bus) 377 if (!bus)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index df118a825f39..c3b8d440873c 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
184 PFN_UP(start_pci), PFN_DOWN(last)); 184 PFN_UP(start_pci), PFN_DOWN(last));
185 return identity; 185 return identity;
186} 186}
187
188static unsigned long __init xen_get_max_pages(void)
189{
190 unsigned long max_pages = MAX_DOMAIN_PAGES;
191 domid_t domid = DOMID_SELF;
192 int ret;
193
194 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
195 if (ret > 0)
196 max_pages = ret;
197 return min(max_pages, MAX_DOMAIN_PAGES);
198}
199
187/** 200/**
188 * machine_specific_memory_setup - Hook for machine specific memory setup. 201 * machine_specific_memory_setup - Hook for machine specific memory setup.
189 **/ 202 **/
@@ -292,6 +305,12 @@ char * __init xen_memory_setup(void)
292 305
293 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 306 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
294 307
308 extra_limit = xen_get_max_pages();
309 if (extra_limit >= max_pfn)
310 extra_pages = extra_limit - max_pfn;
311 else
312 extra_pages = 0;
313
295 extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); 314 extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
296 315
297 /* 316 /*
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index e79dbb95482b..d4fc6d454f8d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -32,6 +32,7 @@
32#include <xen/page.h> 32#include <xen/page.h>
33#include <xen/events.h> 33#include <xen/events.h>
34 34
35#include <xen/hvc-console.h>
35#include "xen-ops.h" 36#include "xen-ops.h"
36#include "mmu.h" 37#include "mmu.h"
37 38
@@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
207 unsigned cpu; 208 unsigned cpu;
208 unsigned int i; 209 unsigned int i;
209 210
211 if (skip_ioapic_setup) {
212 char *m = (max_cpus == 0) ?
213 "The nosmp parameter is incompatible with Xen; " \
214 "use Xen dom0_max_vcpus=1 parameter" :
215 "The noapic parameter is incompatible with Xen";
216
217 xen_raw_printk(m);
218 panic(m);
219 }
210 xen_init_lock_cpu(0); 220 xen_init_lock_cpu(0);
211 221
212 smp_store_cpu_info(0); 222 smp_store_cpu_info(0);
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 22a2093b5862..b040b0e518ca 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -113,11 +113,13 @@ xen_iret_start_crit:
113 113
114 /* 114 /*
115 * If there's something pending, mask events again so we can 115 * If there's something pending, mask events again so we can
116 * jump back into xen_hypervisor_callback 116 * jump back into xen_hypervisor_callback. Otherwise do not
117 * touch XEN_vcpu_info_mask.
117 */ 118 */
118 sete XEN_vcpu_info_mask(%eax) 119 jne 1f
120 movb $1, XEN_vcpu_info_mask(%eax)
119 121
120 popl %eax 1221: popl %eax
121 123
122 /* 124 /*
123 * From this point on the registers are restored and the stack 125 * From this point on the registers are restored and the stack
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0eef4da1ac61..20663f8dae45 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); 168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
169 if (map->work_buf == NULL) { 169 if (map->work_buf == NULL) {
170 ret = -ENOMEM; 170 ret = -ENOMEM;
171 goto err_bus; 171 goto err_map;
172 } 172 }
173 173
174 return map; 174 return map;
175 175
176err_bus:
177 module_put(map->bus->owner);
178err_map: 176err_map:
179 kfree(map); 177 kfree(map);
180err: 178err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
188void regmap_exit(struct regmap *map) 186void regmap_exit(struct regmap *map)
189{ 187{
190 kfree(map->work_buf); 188 kfree(map->work_buf);
191 module_put(map->bus->owner);
192 kfree(map); 189 kfree(map);
193} 190}
194EXPORT_SYMBOL_GPL(regmap_exit); 191EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cd3a7c726bf8..467e4dcb20a0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -174,8 +174,10 @@ struct d40_base;
174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
175 * transfer and call client callback. 175 * transfer and call client callback.
176 * @client: Cliented owned descriptor list. 176 * @client: Cliented owned descriptor list.
177 * @pending_queue: Submitted jobs, to be issued by issue_pending()
177 * @active: Active descriptor. 178 * @active: Active descriptor.
178 * @queue: Queued jobs. 179 * @queue: Queued jobs.
180 * @prepare_queue: Prepared jobs.
179 * @dma_cfg: The client configuration of this dma channel. 181 * @dma_cfg: The client configuration of this dma channel.
180 * @configured: whether the dma_cfg configuration is valid 182 * @configured: whether the dma_cfg configuration is valid
181 * @base: Pointer to the device instance struct. 183 * @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
203 struct list_head pending_queue; 205 struct list_head pending_queue;
204 struct list_head active; 206 struct list_head active;
205 struct list_head queue; 207 struct list_head queue;
208 struct list_head prepare_queue;
206 struct stedma40_chan_cfg dma_cfg; 209 struct stedma40_chan_cfg dma_cfg;
207 bool configured; 210 bool configured;
208 struct d40_base *base; 211 struct d40_base *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
477 480
478 list_for_each_entry_safe(d, _d, &d40c->client, node) 481 list_for_each_entry_safe(d, _d, &d40c->client, node)
479 if (async_tx_test_ack(&d->txd)) { 482 if (async_tx_test_ack(&d->txd)) {
480 d40_pool_lli_free(d40c, d);
481 d40_desc_remove(d); 483 d40_desc_remove(d);
482 desc = d; 484 desc = d;
483 memset(desc, 0, sizeof(*desc)); 485 memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
644 return d; 646 return d;
645} 647}
646 648
649/* remove desc from current queue and add it to the pending_queue */
647static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 650static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
648{ 651{
652 d40_desc_remove(desc);
653 desc->is_in_client_list = false;
649 list_add_tail(&desc->node, &d40c->pending_queue); 654 list_add_tail(&desc->node, &d40c->pending_queue);
650} 655}
651 656
@@ -803,6 +808,7 @@ done:
803static void d40_term_all(struct d40_chan *d40c) 808static void d40_term_all(struct d40_chan *d40c)
804{ 809{
805 struct d40_desc *d40d; 810 struct d40_desc *d40d;
811 struct d40_desc *_d;
806 812
807 /* Release active descriptors */ 813 /* Release active descriptors */
808 while ((d40d = d40_first_active_get(d40c))) { 814 while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
822 d40_desc_free(d40c, d40d); 828 d40_desc_free(d40c, d40d);
823 } 829 }
824 830
831 /* Release client owned descriptors */
832 if (!list_empty(&d40c->client))
833 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
834 d40_desc_remove(d40d);
835 d40_desc_free(d40c, d40d);
836 }
837
838 /* Release descriptors in prepare queue */
839 if (!list_empty(&d40c->prepare_queue))
840 list_for_each_entry_safe(d40d, _d,
841 &d40c->prepare_queue, node) {
842 d40_desc_remove(d40d);
843 d40_desc_free(d40c, d40d);
844 }
845
825 d40c->pending_tx = 0; 846 d40c->pending_tx = 0;
826 d40c->busy = false; 847 d40c->busy = false;
827} 848}
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
1208 1229
1209 if (!d40d->cyclic) { 1230 if (!d40d->cyclic) {
1210 if (async_tx_test_ack(&d40d->txd)) { 1231 if (async_tx_test_ack(&d40d->txd)) {
1211 d40_pool_lli_free(d40c, d40d);
1212 d40_desc_remove(d40d); 1232 d40_desc_remove(d40d);
1213 d40_desc_free(d40c, d40d); 1233 d40_desc_free(d40c, d40d);
1214 } else { 1234 } else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
1595 u32 event; 1615 u32 event;
1596 struct d40_phy_res *phy = d40c->phy_chan; 1616 struct d40_phy_res *phy = d40c->phy_chan;
1597 bool is_src; 1617 bool is_src;
1598 struct d40_desc *d;
1599 struct d40_desc *_d;
1600
1601 1618
1602 /* Terminate all queued and active transfers */ 1619 /* Terminate all queued and active transfers */
1603 d40_term_all(d40c); 1620 d40_term_all(d40c);
1604 1621
1605 /* Release client owned descriptors */
1606 if (!list_empty(&d40c->client))
1607 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1608 d40_pool_lli_free(d40c, d);
1609 d40_desc_remove(d);
1610 d40_desc_free(d40c, d);
1611 }
1612
1613 if (phy == NULL) { 1622 if (phy == NULL) {
1614 chan_err(d40c, "phy == null\n"); 1623 chan_err(d40c, "phy == null\n");
1615 return -EINVAL; 1624 return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1911 goto err; 1920 goto err;
1912 } 1921 }
1913 1922
1923 /*
1924 * add descriptor to the prepare queue in order to be able
1925 * to free them later in terminate_all
1926 */
1927 list_add_tail(&desc->node, &chan->prepare_queue);
1928
1914 spin_unlock_irqrestore(&chan->lock, flags); 1929 spin_unlock_irqrestore(&chan->lock, flags);
1915 1930
1916 return &desc->txd; 1931 return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2400 INIT_LIST_HEAD(&d40c->queue); 2415 INIT_LIST_HEAD(&d40c->queue);
2401 INIT_LIST_HEAD(&d40c->pending_queue); 2416 INIT_LIST_HEAD(&d40c->pending_queue);
2402 INIT_LIST_HEAD(&d40c->client); 2417 INIT_LIST_HEAD(&d40c->client);
2418 INIT_LIST_HEAD(&d40c->prepare_queue);
2403 2419
2404 tasklet_init(&d40c->tasklet, dma_tasklet, 2420 tasklet_init(&d40c->tasklet, dma_tasklet,
2405 (unsigned long) d40c); 2421 (unsigned long) d40c);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7d27d2b0445a..7484e1b67249 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -277,6 +277,7 @@
277#define USB_DEVICE_ID_PENPOWER 0x00f4 277#define USB_DEVICE_ID_PENPOWER 0x00f4
278 278
279#define USB_VENDOR_ID_GREENASIA 0x0e8f 279#define USB_VENDOR_ID_GREENASIA 0x0e8f
280#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
280 281
281#define USB_VENDOR_ID_GRETAGMACBETH 0x0971 282#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
282#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 283#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0ec91c18a421..f0fbd7bd239e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
81#define NO_TOUCHES -1 81#define NO_TOUCHES -1
82#define SINGLE_TOUCH_UP -2 82#define SINGLE_TOUCH_UP -2
83 83
84/* Touch surface information. Dimension is in hundredths of a mm, min and max
85 * are in units. */
86#define MOUSE_DIMENSION_X (float)9056
87#define MOUSE_MIN_X -1100
88#define MOUSE_MAX_X 1258
89#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
90#define MOUSE_DIMENSION_Y (float)5152
91#define MOUSE_MIN_Y -1589
92#define MOUSE_MAX_Y 2047
93#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
94
95#define TRACKPAD_DIMENSION_X (float)13000
96#define TRACKPAD_MIN_X -2909
97#define TRACKPAD_MAX_X 3167
98#define TRACKPAD_RES_X \
99 ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
100#define TRACKPAD_DIMENSION_Y (float)11000
101#define TRACKPAD_MIN_Y -2456
102#define TRACKPAD_MAX_Y 2565
103#define TRACKPAD_RES_Y \
104 ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
105
84/** 106/**
85 * struct magicmouse_sc - Tracks Magic Mouse-specific data. 107 * struct magicmouse_sc - Tracks Magic Mouse-specific data.
86 * @input: Input device through which we report events. 108 * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
406 * inverse of the reported Y. 428 * inverse of the reported Y.
407 */ 429 */
408 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 430 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
409 input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 431 input_set_abs_params(input, ABS_MT_POSITION_X,
410 1358, 4, 0); 432 MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
411 input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 433 input_set_abs_params(input, ABS_MT_POSITION_Y,
412 2047, 4, 0); 434 MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
435
436 input_abs_set_res(input, ABS_MT_POSITION_X,
437 MOUSE_RES_X);
438 input_abs_set_res(input, ABS_MT_POSITION_Y,
439 MOUSE_RES_Y);
413 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ 440 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
414 input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); 441 input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
415 input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); 442 TRACKPAD_MAX_X, 4, 0);
416 input_set_abs_params(input, ABS_MT_POSITION_X, -2909, 443 input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
417 3167, 4, 0); 444 TRACKPAD_MAX_Y, 4, 0);
418 input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, 445 input_set_abs_params(input, ABS_MT_POSITION_X,
419 2565, 4, 0); 446 TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
447 input_set_abs_params(input, ABS_MT_POSITION_Y,
448 TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
449
450 input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
451 input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
452 input_abs_set_res(input, ABS_MT_POSITION_X,
453 TRACKPAD_RES_X);
454 input_abs_set_res(input, ABS_MT_POSITION_Y,
455 TRACKPAD_RES_Y);
420 } 456 }
421 457
422 input_set_events_per_packet(input, 60); 458 input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
501 } 537 }
502 report->size = 6; 538 report->size = 6;
503 539
540 /*
541 * Some devices repond with 'invalid report id' when feature
542 * report switching it into multitouch mode is sent to it.
543 *
544 * This results in -EIO from the _raw low-level transport callback,
545 * but there seems to be no other way of switching the mode.
546 * Thus the super-ugly hacky success check below.
547 */
504 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), 548 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
505 HID_FEATURE_REPORT); 549 HID_FEATURE_REPORT);
506 if (ret != sizeof(feature)) { 550 if (ret != -EIO && ret != sizeof(feature)) {
507 hid_err(hdev, "unable to request touch data (%d)\n", ret); 551 hid_err(hdev, "unable to request touch data (%d)\n", ret);
508 goto err_stop_hw; 552 goto err_stop_hw;
509 } 553 }
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 06888323828c..a597039d0755 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
353 if (ret) { 353 if (ret) {
354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", 354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
355 ret); 355 ret);
356 /* 356 goto err_battery;
357 * battery attribute is not critical for the tablet, but if it
358 * failed then there is no need to create ac attribute
359 */
360 goto move_on;
361 } 357 }
362 358
363 wdata->ac.properties = wacom_ac_props; 359 wdata->ac.properties = wacom_ac_props;
@@ -371,14 +367,8 @@ static int wacom_probe(struct hid_device *hdev,
371 if (ret) { 367 if (ret) {
372 hid_warn(hdev, 368 hid_warn(hdev,
373 "can't create ac battery attribute, err: %d\n", ret); 369 "can't create ac battery attribute, err: %d\n", ret);
374 /* 370 goto err_ac;
375 * ac attribute is not critical for the tablet, but if it
376 * failed then we don't want to battery attribute to exist
377 */
378 power_supply_unregister(&wdata->battery);
379 } 371 }
380
381move_on:
382#endif 372#endif
383 hidinput = list_entry(hdev->inputs.next, struct hid_input, list); 373 hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
384 input = hidinput->input; 374 input = hidinput->input;
@@ -416,6 +406,13 @@ move_on:
416 406
417 return 0; 407 return 0;
418 408
409#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
410err_ac:
411 power_supply_unregister(&wdata->battery);
412err_battery:
413 device_remove_file(&hdev->dev, &dev_attr_speed);
414 hid_hw_stop(hdev);
415#endif
419err_free: 416err_free:
420 kfree(wdata); 417 kfree(wdata);
421 return ret; 418 return ret;
@@ -426,6 +423,7 @@ static void wacom_remove(struct hid_device *hdev)
426#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 423#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
427 struct wacom_data *wdata = hid_get_drvdata(hdev); 424 struct wacom_data *wdata = hid_get_drvdata(hdev);
428#endif 425#endif
426 device_remove_file(&hdev->dev, &dev_attr_speed);
429 hid_hw_stop(hdev); 427 hid_hw_stop(hdev);
430 428
431#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 429#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 4bdb5d46c52c..3146fdcda272 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, 47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
48 48
49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 51 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
51 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 52 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, 53 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index d94a24fdf4ba..dd2d7b9620c2 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
124 124
125static inline int ADC_TO_CURR(int adc, int gain) 125static inline int ADC_TO_CURR(int adc, int gain)
126{ 126{
127 return adc * 1400000 / gain * 255; 127 return adc * 1400000 / (gain * 255);
128} 128}
129 129
130/* 130/*
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c7319734..d0ddb60155c9 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
141 block_buffer[ret] = '\0'; 141 block_buffer[ret] = '\0';
142 dev_info(&client->dev, "Device ID %s\n", block_buffer); 142 dev_info(&client->dev, "Device ID %s\n", block_buffer);
143 143
144 mid = NULL; 144 for (mid = ucd9000_id; mid->name[0]; mid++) {
145 for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
146 mid = &ucd9000_id[i];
147 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 145 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
148 break; 146 break;
149 } 147 }
150 if (!mid || !strlen(mid->name)) { 148 if (!mid->name[0]) {
151 dev_err(&client->dev, "Unsupported device\n"); 149 dev_err(&client->dev, "Unsupported device\n");
152 return -ENODEV; 150 return -ENODEV;
153 } 151 }
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf3609d..c65e9da707cc 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
68 block_buffer[ret] = '\0'; 68 block_buffer[ret] = '\0';
69 dev_info(&client->dev, "Device ID %s\n", block_buffer); 69 dev_info(&client->dev, "Device ID %s\n", block_buffer);
70 70
71 mid = NULL; 71 for (mid = ucd9200_id; mid->name[0]; mid++) {
72 for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
73 mid = &ucd9200_id[i];
74 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 72 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
75 break; 73 break;
76 } 74 }
77 if (!mid || !strlen(mid->name)) { 75 if (!mid->name[0]) {
78 dev_err(&client->dev, "Unsupported device\n"); 76 dev_err(&client->dev, "Unsupported device\n");
79 return -ENODEV; 77 return -ENODEV;
80 } 78 }
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6659d269b841..b73da6cd6f91 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
109 return -EINVAL; 109 return -EINVAL;
110 } 110 }
111 sds = kzalloc(sizeof(*sds), GFP_KERNEL); 111 sds = kzalloc(sizeof(*sds), GFP_KERNEL);
112 if (!sds) 112 if (!sds) {
113 ret = -ENOMEM;
113 goto err_mem; 114 goto err_mem;
115 }
114 116
115 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { 117 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
116 sds->pdev[i] = add_i2c_device(dev, i); 118 sds->pdev[i] = add_i2c_device(dev, i);
117 if (IS_ERR(sds->pdev[i])) { 119 if (IS_ERR(sds->pdev[i])) {
120 ret = PTR_ERR(sds->pdev[i]);
118 while (--i >= 0) 121 while (--i >= 0)
119 platform_device_unregister(sds->pdev[i]); 122 platform_device_unregister(sds->pdev[i]);
120 goto err_dev_add; 123 goto err_dev_add;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2440b7411978..3c94c4a81a55 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
270 270
271 /* Rounds down to not include partial word at the end of buf */ 271 /* Rounds down to not include partial word at the end of buf */
272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; 272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
273 if (words_to_transfer > tx_fifo_avail)
274 words_to_transfer = tx_fifo_avail;
275 273
276 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); 274 /* It's very common to have < 4 bytes, so optimize that case. */
277 275 if (words_to_transfer) {
278 buf += words_to_transfer * BYTES_PER_FIFO_WORD; 276 if (words_to_transfer > tx_fifo_avail)
279 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; 277 words_to_transfer = tx_fifo_avail;
280 tx_fifo_avail -= words_to_transfer; 278
279 /*
280 * Update state before writing to FIFO. If this casues us
281 * to finish writing all bytes (AKA buf_remaining goes to 0) we
282 * have a potential for an interrupt (PACKET_XFER_COMPLETE is
283 * not maskable). We need to make sure that the isr sees
284 * buf_remaining as 0 and doesn't call us back re-entrantly.
285 */
286 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
287 tx_fifo_avail -= words_to_transfer;
288 i2c_dev->msg_buf_remaining = buf_remaining;
289 i2c_dev->msg_buf = buf +
290 words_to_transfer * BYTES_PER_FIFO_WORD;
291 barrier();
292
293 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
294
295 buf += words_to_transfer * BYTES_PER_FIFO_WORD;
296 }
281 297
282 /* 298 /*
283 * If there is a partial word at the end of buf, handle it manually to 299 * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
287 if (tx_fifo_avail > 0 && buf_remaining > 0) { 303 if (tx_fifo_avail > 0 && buf_remaining > 0) {
288 BUG_ON(buf_remaining > 3); 304 BUG_ON(buf_remaining > 3);
289 memcpy(&val, buf, buf_remaining); 305 memcpy(&val, buf, buf_remaining);
306
307 /* Again update before writing to FIFO to make sure isr sees. */
308 i2c_dev->msg_buf_remaining = 0;
309 i2c_dev->msg_buf = NULL;
310 barrier();
311
290 i2c_writel(i2c_dev, val, I2C_TX_FIFO); 312 i2c_writel(i2c_dev, val, I2C_TX_FIFO);
291 buf_remaining = 0;
292 tx_fifo_avail--;
293 } 313 }
294 314
295 BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
296 i2c_dev->msg_buf_remaining = buf_remaining;
297 i2c_dev->msg_buf = buf;
298 return 0; 315 return 0;
299} 316}
300 317
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
411 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); 428 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
412 } 429 }
413 430
414 if ((status & I2C_INT_PACKET_XFER_COMPLETE) && 431 if (status & I2C_INT_PACKET_XFER_COMPLETE) {
415 !i2c_dev->msg_buf_remaining) 432 BUG_ON(i2c_dev->msg_buf_remaining);
416 complete(&i2c_dev->msg_complete); 433 complete(&i2c_dev->msg_complete);
434 }
417 435
418 i2c_writel(i2c_dev, status, I2C_INT_STATUS); 436 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
419 if (i2c_dev->is_dvc) 437 if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
531 549
532static u32 tegra_i2c_func(struct i2c_adapter *adap) 550static u32 tegra_i2c_func(struct i2c_adapter *adap)
533{ 551{
534 return I2C_FUNC_I2C; 552 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
535} 553}
536 554
537static const struct i2c_algorithm tegra_i2c_algo = { 555static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
719} 737}
720#endif 738#endif
721 739
740#if defined(CONFIG_OF)
741/* Match table for of_platform binding */
742static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
743 { .compatible = "nvidia,tegra20-i2c", },
744 {},
745};
746MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
747#else
748#define tegra_i2c_of_match NULL
749#endif
750
722static struct platform_driver tegra_i2c_driver = { 751static struct platform_driver tegra_i2c_driver = {
723 .probe = tegra_i2c_probe, 752 .probe = tegra_i2c_probe,
724 .remove = tegra_i2c_remove, 753 .remove = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
729 .driver = { 758 .driver = {
730 .name = "tegra-i2c", 759 .name = "tegra-i2c",
731 .owner = THIS_MODULE, 760 .owner = THIS_MODULE,
761 .of_match_table = tegra_i2c_of_match,
732 }, 762 },
733}; 763};
734 764
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a14f8dc23462..0e4227f457af 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
605 * Writes the command to the IOMMUs command buffer and informs the 605 * Writes the command to the IOMMUs command buffer and informs the
606 * hardware about the new command. 606 * hardware about the new command.
607 */ 607 */
608static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 608static int iommu_queue_command_sync(struct amd_iommu *iommu,
609 struct iommu_cmd *cmd,
610 bool sync)
609{ 611{
610 u32 left, tail, head, next_tail; 612 u32 left, tail, head, next_tail;
611 unsigned long flags; 613 unsigned long flags;
@@ -639,13 +641,18 @@ again:
639 copy_cmd_to_buffer(iommu, cmd, tail); 641 copy_cmd_to_buffer(iommu, cmd, tail);
640 642
641 /* We need to sync now to make sure all commands are processed */ 643 /* We need to sync now to make sure all commands are processed */
642 iommu->need_sync = true; 644 iommu->need_sync = sync;
643 645
644 spin_unlock_irqrestore(&iommu->lock, flags); 646 spin_unlock_irqrestore(&iommu->lock, flags);
645 647
646 return 0; 648 return 0;
647} 649}
648 650
651static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
652{
653 return iommu_queue_command_sync(iommu, cmd, true);
654}
655
649/* 656/*
650 * This function queues a completion wait command into the command 657 * This function queues a completion wait command into the command
651 * buffer of an IOMMU 658 * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
661 668
662 build_completion_wait(&cmd, (u64)&sem); 669 build_completion_wait(&cmd, (u64)&sem);
663 670
664 ret = iommu_queue_command(iommu, &cmd); 671 ret = iommu_queue_command_sync(iommu, &cmd, false);
665 if (ret) 672 if (ret)
666 return ret; 673 return ret;
667 674
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
840static void domain_flush_devices(struct protection_domain *domain) 847static void domain_flush_devices(struct protection_domain *domain)
841{ 848{
842 struct iommu_dev_data *dev_data; 849 struct iommu_dev_data *dev_data;
843 unsigned long flags;
844
845 spin_lock_irqsave(&domain->lock, flags);
846 850
847 list_for_each_entry(dev_data, &domain->dev_list, list) 851 list_for_each_entry(dev_data, &domain->dev_list, list)
848 device_flush_dte(dev_data); 852 device_flush_dte(dev_data);
849
850 spin_unlock_irqrestore(&domain->lock, flags);
851} 853}
852 854
853/**************************************************************************** 855/****************************************************************************
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3742ce8b0acf..5404b2295820 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1138 ret = 0; 1138 ret = 0;
1139 } 1139 }
1140 rdev->sectors = rdev->sb_start; 1140 rdev->sectors = rdev->sb_start;
1141 /* Limit to 4TB as metadata cannot record more than that */
1142 if (rdev->sectors >= (2ULL << 32))
1143 rdev->sectors = (2ULL << 32) - 2;
1141 1144
1142 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1145 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1143 /* "this cannot possibly happen" ... */ 1146 /* "this cannot possibly happen" ... */
1144 ret = -EINVAL; 1147 ret = -EINVAL;
1145 1148
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1173 mddev->clevel[0] = 0; 1176 mddev->clevel[0] = 0;
1174 mddev->layout = sb->layout; 1177 mddev->layout = sb->layout;
1175 mddev->raid_disks = sb->raid_disks; 1178 mddev->raid_disks = sb->raid_disks;
1176 mddev->dev_sectors = sb->size * 2; 1179 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1177 mddev->events = ev1; 1180 mddev->events = ev1;
1178 mddev->bitmap_info.offset = 0; 1181 mddev->bitmap_info.offset = 0;
1179 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1182 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1415 rdev->sb_start = calc_dev_sboffset(rdev); 1418 rdev->sb_start = calc_dev_sboffset(rdev);
1416 if (!num_sectors || num_sectors > rdev->sb_start) 1419 if (!num_sectors || num_sectors > rdev->sb_start)
1417 num_sectors = rdev->sb_start; 1420 num_sectors = rdev->sb_start;
1421 /* Limit to 4TB as metadata cannot record more than that.
1422 * 4TB == 2^32 KB, or 2*2^32 sectors.
1423 */
1424 if (num_sectors >= (2ULL << 32))
1425 num_sectors = (2ULL << 32) - 2;
1418 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1426 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1419 rdev->sb_page); 1427 rdev->sb_page);
1420 md_super_wait(rdev->mddev); 1428 md_super_wait(rdev->mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 32323f0afd89..f4622dd8fc59 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1099,12 +1099,11 @@ read_again:
1099 bio_list_add(&conf->pending_bio_list, mbio); 1099 bio_list_add(&conf->pending_bio_list, mbio);
1100 spin_unlock_irqrestore(&conf->device_lock, flags); 1100 spin_unlock_irqrestore(&conf->device_lock, flags);
1101 } 1101 }
1102 r1_bio_write_done(r1_bio); 1102 /* Mustn't call r1_bio_write_done before this next test,
1103 1103 * as it could result in the bio being freed.
1104 /* In case raid1d snuck in to freeze_array */ 1104 */
1105 wake_up(&conf->wait_barrier);
1106
1107 if (sectors_handled < (bio->bi_size >> 9)) { 1105 if (sectors_handled < (bio->bi_size >> 9)) {
1106 r1_bio_write_done(r1_bio);
1108 /* We need another r1_bio. It has already been counted 1107 /* We need another r1_bio. It has already been counted
1109 * in bio->bi_phys_segments 1108 * in bio->bi_phys_segments
1110 */ 1109 */
@@ -1117,6 +1116,11 @@ read_again:
1117 goto retry_write; 1116 goto retry_write;
1118 } 1117 }
1119 1118
1119 r1_bio_write_done(r1_bio);
1120
1121 /* In case raid1d snuck in to freeze_array */
1122 wake_up(&conf->wait_barrier);
1123
1120 if (do_sync || !bitmap || !plugged) 1124 if (do_sync || !bitmap || !plugged)
1121 md_wakeup_thread(mddev->thread); 1125 md_wakeup_thread(mddev->thread);
1122 1126
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8b29cd4f01c8..d7a8468ddeab 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
337 md_write_end(r10_bio->mddev); 337 md_write_end(r10_bio->mddev);
338} 338}
339 339
340static void one_write_done(r10bio_t *r10_bio)
341{
342 if (atomic_dec_and_test(&r10_bio->remaining)) {
343 if (test_bit(R10BIO_WriteError, &r10_bio->state))
344 reschedule_retry(r10_bio);
345 else {
346 close_write(r10_bio);
347 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
348 reschedule_retry(r10_bio);
349 else
350 raid_end_bio_io(r10_bio);
351 }
352 }
353}
354
340static void raid10_end_write_request(struct bio *bio, int error) 355static void raid10_end_write_request(struct bio *bio, int error)
341{ 356{
342 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 357 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
387 * Let's see if all mirrored write operations have finished 402 * Let's see if all mirrored write operations have finished
388 * already. 403 * already.
389 */ 404 */
390 if (atomic_dec_and_test(&r10_bio->remaining)) { 405 one_write_done(r10_bio);
391 if (test_bit(R10BIO_WriteError, &r10_bio->state))
392 reschedule_retry(r10_bio);
393 else {
394 close_write(r10_bio);
395 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
396 reschedule_retry(r10_bio);
397 else
398 raid_end_bio_io(r10_bio);
399 }
400 }
401 if (dec_rdev) 406 if (dec_rdev)
402 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 407 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
403} 408}
@@ -1127,20 +1132,12 @@ retry_write:
1127 spin_unlock_irqrestore(&conf->device_lock, flags); 1132 spin_unlock_irqrestore(&conf->device_lock, flags);
1128 } 1133 }
1129 1134
1130 if (atomic_dec_and_test(&r10_bio->remaining)) { 1135 /* Don't remove the bias on 'remaining' (one_write_done) until
1131 /* This matches the end of raid10_end_write_request() */ 1136 * after checking if we need to go around again.
1132 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 1137 */
1133 r10_bio->sectors,
1134 !test_bit(R10BIO_Degraded, &r10_bio->state),
1135 0);
1136 md_write_end(mddev);
1137 raid_end_bio_io(r10_bio);
1138 }
1139
1140 /* In case raid10d snuck in to freeze_array */
1141 wake_up(&conf->wait_barrier);
1142 1138
1143 if (sectors_handled < (bio->bi_size >> 9)) { 1139 if (sectors_handled < (bio->bi_size >> 9)) {
1140 one_write_done(r10_bio);
1144 /* We need another r10_bio. It has already been counted 1141 /* We need another r10_bio. It has already been counted
1145 * in bio->bi_phys_segments. 1142 * in bio->bi_phys_segments.
1146 */ 1143 */
@@ -1154,6 +1151,10 @@ retry_write:
1154 r10_bio->state = 0; 1151 r10_bio->state = 0;
1155 goto retry_write; 1152 goto retry_write;
1156 } 1153 }
1154 one_write_done(r10_bio);
1155
1156 /* In case raid10d snuck in to freeze_array */
1157 wake_up(&conf->wait_barrier);
1157 1158
1158 if (do_sync || !mddev->bitmap || !plugged) 1159 if (do_sync || !mddev->bitmap || !plugged)
1159 md_wakeup_thread(mddev->thread); 1160 md_wakeup_thread(mddev->thread);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 91a0a7460ebb..b27b94078c21 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
133 if (mrq->done) 133 if (mrq->done)
134 mrq->done(mrq); 134 mrq->done(mrq);
135 135
136 mmc_host_clk_gate(host); 136 mmc_host_clk_release(host);
137 } 137 }
138} 138}
139 139
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
192 mrq->stop->mrq = mrq; 192 mrq->stop->mrq = mrq;
193 } 193 }
194 } 194 }
195 mmc_host_clk_ungate(host); 195 mmc_host_clk_hold(host);
196 led_trigger_event(host->led, LED_FULL); 196 led_trigger_event(host->led, LED_FULL);
197 host->ops->request(host, mrq); 197 host->ops->request(host, mrq);
198} 198}
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
728 */ 728 */
729void mmc_set_chip_select(struct mmc_host *host, int mode) 729void mmc_set_chip_select(struct mmc_host *host, int mode)
730{ 730{
731 mmc_host_clk_hold(host);
731 host->ios.chip_select = mode; 732 host->ios.chip_select = mode;
732 mmc_set_ios(host); 733 mmc_set_ios(host);
734 mmc_host_clk_release(host);
733} 735}
734 736
735/* 737/*
736 * Sets the host clock to the highest possible frequency that 738 * Sets the host clock to the highest possible frequency that
737 * is below "hz". 739 * is below "hz".
738 */ 740 */
739void mmc_set_clock(struct mmc_host *host, unsigned int hz) 741static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
740{ 742{
741 WARN_ON(hz < host->f_min); 743 WARN_ON(hz < host->f_min);
742 744
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
747 mmc_set_ios(host); 749 mmc_set_ios(host);
748} 750}
749 751
752void mmc_set_clock(struct mmc_host *host, unsigned int hz)
753{
754 mmc_host_clk_hold(host);
755 __mmc_set_clock(host, hz);
756 mmc_host_clk_release(host);
757}
758
750#ifdef CONFIG_MMC_CLKGATE 759#ifdef CONFIG_MMC_CLKGATE
751/* 760/*
752 * This gates the clock by setting it to 0 Hz. 761 * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
779 if (host->clk_old) { 788 if (host->clk_old) {
780 BUG_ON(host->ios.clock); 789 BUG_ON(host->ios.clock);
781 /* This call will also set host->clk_gated to false */ 790 /* This call will also set host->clk_gated to false */
782 mmc_set_clock(host, host->clk_old); 791 __mmc_set_clock(host, host->clk_old);
783 } 792 }
784} 793}
785 794
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
807 */ 816 */
808void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 817void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
809{ 818{
819 mmc_host_clk_hold(host);
810 host->ios.bus_mode = mode; 820 host->ios.bus_mode = mode;
811 mmc_set_ios(host); 821 mmc_set_ios(host);
822 mmc_host_clk_release(host);
812} 823}
813 824
814/* 825/*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
816 */ 827 */
817void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 828void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
818{ 829{
830 mmc_host_clk_hold(host);
819 host->ios.bus_width = width; 831 host->ios.bus_width = width;
820 mmc_set_ios(host); 832 mmc_set_ios(host);
833 mmc_host_clk_release(host);
821} 834}
822 835
823/** 836/**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1015 1028
1016 ocr &= 3 << bit; 1029 ocr &= 3 << bit;
1017 1030
1031 mmc_host_clk_hold(host);
1018 host->ios.vdd = bit; 1032 host->ios.vdd = bit;
1019 mmc_set_ios(host); 1033 mmc_set_ios(host);
1034 mmc_host_clk_release(host);
1020 } else { 1035 } else {
1021 pr_warning("%s: host doesn't support card's voltages\n", 1036 pr_warning("%s: host doesn't support card's voltages\n",
1022 mmc_hostname(host)); 1037 mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
1063 */ 1078 */
1064void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1079void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1065{ 1080{
1081 mmc_host_clk_hold(host);
1066 host->ios.timing = timing; 1082 host->ios.timing = timing;
1067 mmc_set_ios(host); 1083 mmc_set_ios(host);
1084 mmc_host_clk_release(host);
1068} 1085}
1069 1086
1070/* 1087/*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1072 */ 1089 */
1073void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1090void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1074{ 1091{
1092 mmc_host_clk_hold(host);
1075 host->ios.drv_type = drv_type; 1093 host->ios.drv_type = drv_type;
1076 mmc_set_ios(host); 1094 mmc_set_ios(host);
1095 mmc_host_clk_release(host);
1077} 1096}
1078 1097
1079/* 1098/*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
1091{ 1110{
1092 int bit; 1111 int bit;
1093 1112
1113 mmc_host_clk_hold(host);
1114
1094 /* If ocr is set, we use it */ 1115 /* If ocr is set, we use it */
1095 if (host->ocr) 1116 if (host->ocr)
1096 bit = ffs(host->ocr) - 1; 1117 bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
1126 * time required to reach a stable voltage. 1147 * time required to reach a stable voltage.
1127 */ 1148 */
1128 mmc_delay(10); 1149 mmc_delay(10);
1150
1151 mmc_host_clk_release(host);
1129} 1152}
1130 1153
1131static void mmc_power_off(struct mmc_host *host) 1154static void mmc_power_off(struct mmc_host *host)
1132{ 1155{
1156 mmc_host_clk_hold(host);
1157
1133 host->ios.clock = 0; 1158 host->ios.clock = 0;
1134 host->ios.vdd = 0; 1159 host->ios.vdd = 0;
1135 1160
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
1147 host->ios.bus_width = MMC_BUS_WIDTH_1; 1172 host->ios.bus_width = MMC_BUS_WIDTH_1;
1148 host->ios.timing = MMC_TIMING_LEGACY; 1173 host->ios.timing = MMC_TIMING_LEGACY;
1149 mmc_set_ios(host); 1174 mmc_set_ios(host);
1175
1176 mmc_host_clk_release(host);
1150} 1177}
1151 1178
1152/* 1179/*
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8fd3a2..793d0a0dad8d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
119} 119}
120 120
121/** 121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks 122 * mmc_host_clk_hold - ungate hardware MCI clocks
123 * @host: host to ungate. 123 * @host: host to ungate.
124 * 124 *
125 * Makes sure the host ios.clock is restored to a non-zero value 125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock 126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user. 127 * if we're the first user.
128 */ 128 */
129void mmc_host_clk_ungate(struct mmc_host *host) 129void mmc_host_clk_hold(struct mmc_host *host)
130{ 130{
131 unsigned long flags; 131 unsigned long flags;
132 132
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
164} 164}
165 165
166/** 166/**
167 * mmc_host_clk_gate - gate off hardware MCI clocks 167 * mmc_host_clk_release - gate off hardware MCI clocks
168 * @host: host to gate. 168 * @host: host to gate.
169 * 169 *
170 * Calls the host driver with ios.clock set to zero as often as possible 170 * Calls the host driver with ios.clock set to zero as often as possible
171 * in order to gate off hardware MCI clocks. Decrease clock reference 171 * in order to gate off hardware MCI clocks. Decrease clock reference
172 * count and schedule disabling of clock. 172 * count and schedule disabling of clock.
173 */ 173 */
174void mmc_host_clk_gate(struct mmc_host *host) 174void mmc_host_clk_release(struct mmc_host *host)
175{ 175{
176 unsigned long flags; 176 unsigned long flags;
177 177
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
179 host->clk_requests--; 179 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) && 180 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests) 181 !host->clk_requests)
182 schedule_work(&host->clk_gate_work); 182 queue_work(system_nrt_wq, &host->clk_gate_work);
183 spin_unlock_irqrestore(&host->clk_lock, flags); 183 spin_unlock_irqrestore(&host->clk_lock, flags);
184} 184}
185 185
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
231 if (cancel_work_sync(&host->clk_gate_work)) 231 if (cancel_work_sync(&host->clk_gate_work))
232 mmc_host_clk_gate_delayed(host); 232 mmc_host_clk_gate_delayed(host);
233 if (host->clk_gated) 233 if (host->clk_gated)
234 mmc_host_clk_ungate(host); 234 mmc_host_clk_hold(host);
235 /* There should be only one user now */ 235 /* There should be only one user now */
236 WARN_ON(host->clk_requests > 1); 236 WARN_ON(host->clk_requests > 1);
237} 237}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index de199f911928..fb8a5cd2e4a1 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
16void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
17 17
18#ifdef CONFIG_MMC_CLKGATE 18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host); 19void mmc_host_clk_hold(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host); 20void mmc_host_clk_release(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host); 21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22 22
23#else 23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host) 24static inline void mmc_host_clk_hold(struct mmc_host *host)
25{ 25{
26} 26}
27 27
28static inline void mmc_host_clk_gate(struct mmc_host *host) 28static inline void mmc_host_clk_release(struct mmc_host *host)
29{ 29{
30} 30}
31 31
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 633975ff2bb3..0370e03e3142 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
469 return 0; 469 return 0;
470} 470}
471 471
472static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) 472static void sd_update_bus_speed_mode(struct mmc_card *card)
473{ 473{
474 unsigned int bus_speed = 0, timing = 0;
475 int err;
476
477 /* 474 /*
478 * If the host doesn't support any of the UHS-I modes, fallback on 475 * If the host doesn't support any of the UHS-I modes, fallback on
479 * default speed. 476 * default speed.
480 */ 477 */
481 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 478 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
482 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) 479 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
483 return 0; 480 card->sd_bus_speed = 0;
481 return;
482 }
484 483
485 if ((card->host->caps & MMC_CAP_UHS_SDR104) && 484 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
486 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { 485 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
487 bus_speed = UHS_SDR104_BUS_SPEED; 486 card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
488 timing = MMC_TIMING_UHS_SDR104;
489 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
490 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 487 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
491 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 488 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
492 bus_speed = UHS_DDR50_BUS_SPEED; 489 card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
493 timing = MMC_TIMING_UHS_DDR50;
494 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
495 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 490 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
496 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 491 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
497 SD_MODE_UHS_SDR50)) { 492 SD_MODE_UHS_SDR50)) {
498 bus_speed = UHS_SDR50_BUS_SPEED; 493 card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
499 timing = MMC_TIMING_UHS_SDR50;
500 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
501 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 494 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
502 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 495 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
503 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 496 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
504 bus_speed = UHS_SDR25_BUS_SPEED; 497 card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
505 timing = MMC_TIMING_UHS_SDR25;
506 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
507 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 498 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
508 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 499 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
509 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 500 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
510 SD_MODE_UHS_SDR12)) { 501 SD_MODE_UHS_SDR12)) {
511 bus_speed = UHS_SDR12_BUS_SPEED; 502 card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
512 timing = MMC_TIMING_UHS_SDR12; 503 }
513 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 504}
505
506static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
507{
508 int err;
509 unsigned int timing = 0;
510
511 switch (card->sd_bus_speed) {
512 case UHS_SDR104_BUS_SPEED:
513 timing = MMC_TIMING_UHS_SDR104;
514 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
515 break;
516 case UHS_DDR50_BUS_SPEED:
517 timing = MMC_TIMING_UHS_DDR50;
518 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
519 break;
520 case UHS_SDR50_BUS_SPEED:
521 timing = MMC_TIMING_UHS_SDR50;
522 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
523 break;
524 case UHS_SDR25_BUS_SPEED:
525 timing = MMC_TIMING_UHS_SDR25;
526 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
527 break;
528 case UHS_SDR12_BUS_SPEED:
529 timing = MMC_TIMING_UHS_SDR12;
530 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
531 break;
532 default:
533 return 0;
514 } 534 }
515 535
516 card->sd_bus_speed = bus_speed; 536 err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
517 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
518 if (err) 537 if (err)
519 return err; 538 return err;
520 539
521 if ((status[16] & 0xF) != bus_speed) 540 if ((status[16] & 0xF) != card->sd_bus_speed)
522 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", 541 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
523 mmc_hostname(card->host)); 542 mmc_hostname(card->host));
524 else { 543 else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
618 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 637 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
619 } 638 }
620 639
640 /*
641 * Select the bus speed mode depending on host
642 * and card capability.
643 */
644 sd_update_bus_speed_mode(card);
645
621 /* Set the driver strength for the card */ 646 /* Set the driver strength for the card */
622 err = sd_select_driver_type(card, status); 647 err = sd_select_driver_type(card, status);
623 if (err) 648 if (err)
624 goto out; 649 goto out;
625 650
626 /* Set bus speed mode of the card */ 651 /* Set current limit for the card */
627 err = sd_set_bus_speed_mode(card, status); 652 err = sd_set_current_limit(card, status);
628 if (err) 653 if (err)
629 goto out; 654 goto out;
630 655
631 /* Set current limit for the card */ 656 /* Set bus speed mode of the card */
632 err = sd_set_current_limit(card, status); 657 err = sd_set_bus_speed_mode(card, status);
633 if (err) 658 if (err)
634 goto out; 659 goto out;
635 660
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 0e9780f5a4a9..4dc0028086a3 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/module.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
21#include <linux/mmc/mmc.h> 22#include <linux/mmc/mmc.h>
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2bd7bf4fece7..fe886d6c474a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
302 ctrl &= ~SDHCI_CTRL_8BITBUS; 302 ctrl &= ~SDHCI_CTRL_8BITBUS;
303 break; 303 break;
304 default: 304 default:
305 ctrl &= ~SDHCI_CTRL_4BITBUS;
306 ctrl &= ~SDHCI_CTRL_8BITBUS;
305 break; 307 break;
306 } 308 }
307 309
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 774f6439d7ce..0c4a672f5db6 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
120 mmc_data->hclk = clk_get_rate(priv->clk); 120 mmc_data->hclk = clk_get_rate(priv->clk);
121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; 121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
122 mmc_data->get_cd = sh_mobile_sdhi_get_cd; 122 mmc_data->get_cd = sh_mobile_sdhi_get_cd;
123 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
124 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
125 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; 123 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
126 if (p) { 124 if (p) {
127 mmc_data->flags = p->tmio_flags; 125 mmc_data->flags = p->tmio_flags;
126 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
127 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
128 mmc_data->ocr_mask = p->tmio_ocr_mask; 128 mmc_data->ocr_mask = p->tmio_ocr_mask;
129 mmc_data->capabilities |= p->tmio_caps; 129 mmc_data->capabilities |= p->tmio_caps;
130 130
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 65b5b76cc379..64fbb0021825 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
181 181
182#define ubi_dbg_msg(fmt, ...) do { \ 182#define ubi_dbg_msg(fmt, ...) do { \
183 if (0) \ 183 if (0) \
184 pr_debug(fmt "\n", ##__VA_ARGS__); \ 184 printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
185} while (0) 185} while (0)
186 186
187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__) 187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 52fe21e1e2cd..3b1416e3d217 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
308 struct net_device *dev = (struct net_device *)data; 308 struct net_device *dev = (struct net_device *)data;
309 struct dev_priv *priv = netdev_priv(dev); 309 struct dev_priv *priv = netdev_priv(dev);
310 unsigned int lnkstat, carrier; 310 unsigned int lnkstat, carrier;
311 unsigned long flags;
311 312
313 spin_lock_irqsave(&priv->chip_lock, flags);
312 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; 314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
315 spin_unlock_irqrestore(&priv->chip_lock, flags);
313 carrier = netif_carrier_ok(dev); 316 carrier = netif_carrier_ok(dev);
314 317
315 if (lnkstat && !carrier) { 318 if (lnkstat && !carrier) {
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 753b21aaea61..3ffd9c1acc0a 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
169 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) 169 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
170 return; 170 return;
171 171
172 pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss); 172 if (dev->bus && dev->bus->self)
173 pcie_bus_configure_settings(dev->bus,
174 dev->bus->self->pcie_mpss);
173 175
174 memset(&hpp, 0, sizeof(hpp)); 176 memset(&hpp, 0, sizeof(hpp));
175 ret = pci_get_hp_params(dev, &hpp); 177 ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0ce67423a0a3..4e84fd4a4312 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79 79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; 80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
81 81
82/* 82/*
83 * The default CLS is used if arch didn't set CLS explicitly and not 83 * The default CLS is used if arch didn't set CLS explicitly and not
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8473727b29fa..b1187ff31d89 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1396,34 +1396,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
1396 1396
1397static void pcie_write_mrrs(struct pci_dev *dev, int mps) 1397static void pcie_write_mrrs(struct pci_dev *dev, int mps)
1398{ 1398{
1399 int rc, mrrs; 1399 int rc, mrrs, dev_mpss;
1400 1400
1401 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1401 /* In the "safe" case, do not configure the MRRS. There appear to be
1402 int dev_mpss = 128 << dev->pcie_mpss; 1402 * issues with setting MRRS to 0 on a number of devices.
1403 */
1403 1404
1404 /* For Max performance, the MRRS must be set to the largest 1405 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1405 * supported value. However, it cannot be configured larger 1406 return;
1406 * than the MPS the device or the bus can support. This assumes
1407 * that the largest MRRS available on the device cannot be
1408 * smaller than the device MPSS.
1409 */
1410 mrrs = mps < dev_mpss ? mps : dev_mpss;
1411 } else
1412 /* In the "safe" case, configure the MRRS for fairness on the
1413 * bus by making all devices have the same size
1414 */
1415 mrrs = mps;
1416 1407
1408 dev_mpss = 128 << dev->pcie_mpss;
1409
1410 /* For Max performance, the MRRS must be set to the largest supported
1411 * value. However, it cannot be configured larger than the MPS the
1412 * device or the bus can support. This assumes that the largest MRRS
1413 * available on the device cannot be smaller than the device MPSS.
1414 */
1415 mrrs = min(mps, dev_mpss);
1417 1416
1418 /* MRRS is a R/W register. Invalid values can be written, but a 1417 /* MRRS is a R/W register. Invalid values can be written, but a
1419 * subsiquent read will verify if the value is acceptable or not. 1418 * subsequent read will verify if the value is acceptable or not.
1420 * If the MRRS value provided is not acceptable (e.g., too large), 1419 * If the MRRS value provided is not acceptable (e.g., too large),
1421 * shrink the value until it is acceptable to the HW. 1420 * shrink the value until it is acceptable to the HW.
1422 */ 1421 */
1423 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1422 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1423 dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
1424 " to %d. If any issues are encountered, please try "
1425 "running with pci=pcie_bus_safe\n", mrrs);
1424 rc = pcie_set_readrq(dev, mrrs); 1426 rc = pcie_set_readrq(dev, mrrs);
1425 if (rc) 1427 if (rc)
1426 dev_err(&dev->dev, "Failed attempting to set the MRRS\n"); 1428 dev_err(&dev->dev,
1429 "Failed attempting to set the MRRS\n");
1427 1430
1428 mrrs /= 2; 1431 mrrs /= 2;
1429 } 1432 }
@@ -1436,13 +1439,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1436 if (!pci_is_pcie(dev)) 1439 if (!pci_is_pcie(dev))
1437 return 0; 1440 return 0;
1438 1441
1439 dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1442 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1440 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); 1443 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1441 1444
1442 pcie_write_mps(dev, mps); 1445 pcie_write_mps(dev, mps);
1443 pcie_write_mrrs(dev, mps); 1446 pcie_write_mrrs(dev, mps);
1444 1447
1445 dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1448 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1446 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); 1449 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1447 1450
1448 return 0; 1451 return 0;
@@ -1456,9 +1459,6 @@ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1456{ 1459{
1457 u8 smpss = mpss; 1460 u8 smpss = mpss;
1458 1461
1459 if (!bus->self)
1460 return;
1461
1462 if (!pci_is_pcie(bus->self)) 1462 if (!pci_is_pcie(bus->self))
1463 return; 1463 return;
1464 1464
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 335551d333b2..14a42a1edc66 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -36,6 +36,7 @@
36 */ 36 */
37struct ep93xx_rtc { 37struct ep93xx_rtc {
38 void __iomem *mmio_base; 38 void __iomem *mmio_base;
39 struct rtc_device *rtc;
39}; 40};
40 41
41static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, 42static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
130{ 131{
131 struct ep93xx_rtc *ep93xx_rtc; 132 struct ep93xx_rtc *ep93xx_rtc;
132 struct resource *res; 133 struct resource *res;
133 struct rtc_device *rtc;
134 int err; 134 int err;
135 135
136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); 136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
151 return -ENXIO; 151 return -ENXIO;
152 152
153 pdev->dev.platform_data = ep93xx_rtc; 153 pdev->dev.platform_data = ep93xx_rtc;
154 platform_set_drvdata(pdev, rtc); 154 platform_set_drvdata(pdev, ep93xx_rtc);
155 155
156 rtc = rtc_device_register(pdev->name, 156 ep93xx_rtc->rtc = rtc_device_register(pdev->name,
157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
158 if (IS_ERR(rtc)) { 158 if (IS_ERR(ep93xx_rtc->rtc)) {
159 err = PTR_ERR(rtc); 159 err = PTR_ERR(ep93xx_rtc->rtc);
160 goto exit; 160 goto exit;
161 } 161 }
162 162
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
167 return 0; 167 return 0;
168 168
169fail: 169fail:
170 rtc_device_unregister(rtc); 170 rtc_device_unregister(ep93xx_rtc->rtc);
171exit: 171exit:
172 platform_set_drvdata(pdev, NULL); 172 platform_set_drvdata(pdev, NULL);
173 pdev->dev.platform_data = NULL; 173 pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
176 176
177static int __exit ep93xx_rtc_remove(struct platform_device *pdev) 177static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
178{ 178{
179 struct rtc_device *rtc = platform_get_drvdata(pdev); 179 struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
180 180
181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
182 platform_set_drvdata(pdev, NULL); 182 platform_set_drvdata(pdev, NULL);
183 rtc_device_unregister(rtc); 183 rtc_device_unregister(ep93xx_rtc->rtc);
184 pdev->dev.platform_data = NULL; 184 pdev->dev.platform_data = NULL;
185 185
186 return 0; 186 return 0;
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 075f1708deae..c4cf05731118 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
85 time -= tm->tm_hour * 3600; 85 time -= tm->tm_hour * 3600;
86 tm->tm_min = time / 60; 86 tm->tm_min = time / 60;
87 tm->tm_sec = time - tm->tm_min * 60; 87 tm->tm_sec = time - tm->tm_min * 60;
88
89 tm->tm_isdst = 0;
88} 90}
89EXPORT_SYMBOL(rtc_time_to_tm); 91EXPORT_SYMBOL(rtc_time_to_tm);
90 92
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9a81f778d6b2..20687d55e7a7 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
362 int res; 362 int res;
363 u8 rd_reg; 363 u8 rd_reg;
364 364
365#ifdef CONFIG_LOCKDEP
366 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
367 * we don't want and can't tolerate. Although it might be
368 * friendlier not to borrow this thread context...
369 */
370 local_irq_enable();
371#endif
372
373 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 365 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
374 if (res) 366 if (res)
375 goto out; 367 goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
428static int __devinit twl_rtc_probe(struct platform_device *pdev) 420static int __devinit twl_rtc_probe(struct platform_device *pdev)
429{ 421{
430 struct rtc_device *rtc; 422 struct rtc_device *rtc;
431 int ret = 0; 423 int ret = -EINVAL;
432 int irq = platform_get_irq(pdev, 0); 424 int irq = platform_get_irq(pdev, 0);
433 u8 rd_reg; 425 u8 rd_reg;
434 426
435 if (irq <= 0) 427 if (irq <= 0)
436 return -EINVAL; 428 goto out1;
437
438 rtc = rtc_device_register(pdev->name,
439 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
440 if (IS_ERR(rtc)) {
441 ret = PTR_ERR(rtc);
442 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
443 PTR_ERR(rtc));
444 goto out0;
445
446 }
447
448 platform_set_drvdata(pdev, rtc);
449 429
450 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 430 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
451 if (ret < 0) 431 if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
462 if (ret < 0) 442 if (ret < 0)
463 goto out1; 443 goto out1;
464 444
465 ret = request_irq(irq, twl_rtc_interrupt,
466 IRQF_TRIGGER_RISING,
467 dev_name(&rtc->dev), rtc);
468 if (ret < 0) {
469 dev_err(&pdev->dev, "IRQ is not free.\n");
470 goto out1;
471 }
472
473 if (twl_class_is_6030()) { 445 if (twl_class_is_6030()) {
474 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, 446 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
475 REG_INT_MSK_LINE_A); 447 REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
480 /* Check RTC module status, Enable if it is off */ 452 /* Check RTC module status, Enable if it is off */
481 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); 453 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
482 if (ret < 0) 454 if (ret < 0)
483 goto out2; 455 goto out1;
484 456
485 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { 457 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
486 dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); 458 dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
487 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; 459 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
488 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); 460 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
489 if (ret < 0) 461 if (ret < 0)
490 goto out2; 462 goto out1;
491 } 463 }
492 464
493 /* init cached IRQ enable bits */ 465 /* init cached IRQ enable bits */
494 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); 466 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
495 if (ret < 0) 467 if (ret < 0)
468 goto out1;
469
470 rtc = rtc_device_register(pdev->name,
471 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
472 if (IS_ERR(rtc)) {
473 ret = PTR_ERR(rtc);
474 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
475 PTR_ERR(rtc));
476 goto out1;
477 }
478
479 ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
480 IRQF_TRIGGER_RISING,
481 dev_name(&rtc->dev), rtc);
482 if (ret < 0) {
483 dev_err(&pdev->dev, "IRQ is not free.\n");
496 goto out2; 484 goto out2;
485 }
497 486
498 return ret; 487 platform_set_drvdata(pdev, rtc);
488 return 0;
499 489
500out2: 490out2:
501 free_irq(irq, rtc);
502out1:
503 rtc_device_unregister(rtc); 491 rtc_device_unregister(rtc);
504out0: 492out1:
505 return ret; 493 return ret;
506} 494}
507 495
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 80d292fb92d8..7363c1b169e8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,7 +19,7 @@
19#include <asm/backlight.h> 19#include <asm/backlight.h>
20#endif 20#endif
21 21
22static const char const *backlight_types[] = { 22static const char *const backlight_types[] = {
23 [BACKLIGHT_RAW] = "raw", 23 [BACKLIGHT_RAW] = "raw",
24 [BACKLIGHT_PLATFORM] = "platform", 24 [BACKLIGHT_PLATFORM] = "platform",
25 [BACKLIGHT_FIRMWARE] = "firmware", 25 [BACKLIGHT_FIRMWARE] = "firmware",
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 46ce357ca1ab..410ffd6ceb5f 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
54 54
55struct inode *v9fs_alloc_inode(struct super_block *sb); 55struct inode *v9fs_alloc_inode(struct super_block *sb);
56void v9fs_destroy_inode(struct inode *inode); 56void v9fs_destroy_inode(struct inode *inode);
57struct inode *v9fs_get_inode(struct super_block *sb, int mode); 57struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
58int v9fs_init_inode(struct v9fs_session_info *v9ses, 58int v9fs_init_inode(struct v9fs_session_info *v9ses,
59 struct inode *inode, int mode); 59 struct inode *inode, int mode, dev_t);
60void v9fs_evict_inode(struct inode *inode); 60void v9fs_evict_inode(struct inode *inode);
61ino_t v9fs_qid2ino(struct p9_qid *qid); 61ino_t v9fs_qid2ino(struct p9_qid *qid);
62void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); 62void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
83 v9inode->cache_validity |= V9FS_INO_INVALID_ATTR; 83 v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
84 return; 84 return;
85} 85}
86
87int v9fs_open_to_dotl_flags(int flags);
86#endif 88#endif
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3c173fcc2c5a..62857a810a79 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
65 v9inode = V9FS_I(inode); 65 v9inode = V9FS_I(inode);
66 v9ses = v9fs_inode2v9ses(inode); 66 v9ses = v9fs_inode2v9ses(inode);
67 if (v9fs_proto_dotl(v9ses)) 67 if (v9fs_proto_dotl(v9ses))
68 omode = file->f_flags; 68 omode = v9fs_open_to_dotl_flags(file->f_flags);
69 else 69 else
70 omode = v9fs_uflags2omode(file->f_flags, 70 omode = v9fs_uflags2omode(file->f_flags,
71 v9fs_proto_dotu(v9ses)); 71 v9fs_proto_dotu(v9ses));
@@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
169 169
170 /* convert posix lock to p9 tlock args */ 170 /* convert posix lock to p9 tlock args */
171 memset(&flock, 0, sizeof(flock)); 171 memset(&flock, 0, sizeof(flock));
172 flock.type = fl->fl_type; 172 /* map the lock type */
173 switch (fl->fl_type) {
174 case F_RDLCK:
175 flock.type = P9_LOCK_TYPE_RDLCK;
176 break;
177 case F_WRLCK:
178 flock.type = P9_LOCK_TYPE_WRLCK;
179 break;
180 case F_UNLCK:
181 flock.type = P9_LOCK_TYPE_UNLCK;
182 break;
183 }
173 flock.start = fl->fl_start; 184 flock.start = fl->fl_start;
174 if (fl->fl_end == OFFSET_MAX) 185 if (fl->fl_end == OFFSET_MAX)
175 flock.length = 0; 186 flock.length = 0;
@@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
245 256
246 /* convert posix lock to p9 tgetlock args */ 257 /* convert posix lock to p9 tgetlock args */
247 memset(&glock, 0, sizeof(glock)); 258 memset(&glock, 0, sizeof(glock));
248 glock.type = fl->fl_type; 259 glock.type = P9_LOCK_TYPE_UNLCK;
249 glock.start = fl->fl_start; 260 glock.start = fl->fl_start;
250 if (fl->fl_end == OFFSET_MAX) 261 if (fl->fl_end == OFFSET_MAX)
251 glock.length = 0; 262 glock.length = 0;
@@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
257 res = p9_client_getlock_dotl(fid, &glock); 268 res = p9_client_getlock_dotl(fid, &glock);
258 if (res < 0) 269 if (res < 0)
259 return res; 270 return res;
260 if (glock.type != F_UNLCK) { 271 /* map 9p lock type to os lock type */
261 fl->fl_type = glock.type; 272 switch (glock.type) {
273 case P9_LOCK_TYPE_RDLCK:
274 fl->fl_type = F_RDLCK;
275 break;
276 case P9_LOCK_TYPE_WRLCK:
277 fl->fl_type = F_WRLCK;
278 break;
279 case P9_LOCK_TYPE_UNLCK:
280 fl->fl_type = F_UNLCK;
281 break;
282 }
283 if (glock.type != P9_LOCK_TYPE_UNLCK) {
262 fl->fl_start = glock.start; 284 fl->fl_start = glock.start;
263 if (glock.length == 0) 285 if (glock.length == 0)
264 fl->fl_end = OFFSET_MAX; 286 fl->fl_end = OFFSET_MAX;
265 else 287 else
266 fl->fl_end = glock.start + glock.length - 1; 288 fl->fl_end = glock.start + glock.length - 1;
267 fl->fl_pid = glock.proc_id; 289 fl->fl_pid = glock.proc_id;
268 } else 290 }
269 fl->fl_type = F_UNLCK;
270
271 return res; 291 return res;
272} 292}
273 293
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8bb5507e822f..e3c03db3c788 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
95/** 95/**
96 * p9mode2unixmode- convert plan9 mode bits to unix mode bits 96 * p9mode2unixmode- convert plan9 mode bits to unix mode bits
97 * @v9ses: v9fs session information 97 * @v9ses: v9fs session information
98 * @mode: mode to convert 98 * @stat: p9_wstat from which mode need to be derived
99 * @rdev: major number, minor number in case of device files.
99 * 100 *
100 */ 101 */
101 102static int p9mode2unixmode(struct v9fs_session_info *v9ses,
102static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) 103 struct p9_wstat *stat, dev_t *rdev)
103{ 104{
104 int res; 105 int res;
106 int mode = stat->mode;
105 107
106 res = mode & 0777; 108 res = mode & S_IALLUGO;
109 *rdev = 0;
107 110
108 if ((mode & P9_DMDIR) == P9_DMDIR) 111 if ((mode & P9_DMDIR) == P9_DMDIR)
109 res |= S_IFDIR; 112 res |= S_IFDIR;
@@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
116 && (v9ses->nodev == 0)) 119 && (v9ses->nodev == 0))
117 res |= S_IFIFO; 120 res |= S_IFIFO;
118 else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses)) 121 else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
119 && (v9ses->nodev == 0)) 122 && (v9ses->nodev == 0)) {
120 res |= S_IFBLK; 123 char type = 0, ext[32];
121 else 124 int major = -1, minor = -1;
125
126 strncpy(ext, stat->extension, sizeof(ext));
127 sscanf(ext, "%c %u %u", &type, &major, &minor);
128 switch (type) {
129 case 'c':
130 res |= S_IFCHR;
131 break;
132 case 'b':
133 res |= S_IFBLK;
134 break;
135 default:
136 P9_DPRINTK(P9_DEBUG_ERROR,
137 "Unknown special type %c %s\n", type,
138 stat->extension);
139 };
140 *rdev = MKDEV(major, minor);
141 } else
122 res |= S_IFREG; 142 res |= S_IFREG;
123 143
124 if (v9fs_proto_dotu(v9ses)) { 144 if (v9fs_proto_dotu(v9ses)) {
@@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
131 if ((mode & P9_DMSETVTX) == P9_DMSETVTX) 151 if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
132 res |= S_ISVTX; 152 res |= S_ISVTX;
133 } 153 }
134
135 return res; 154 return res;
136} 155}
137 156
@@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
242} 261}
243 262
244int v9fs_init_inode(struct v9fs_session_info *v9ses, 263int v9fs_init_inode(struct v9fs_session_info *v9ses,
245 struct inode *inode, int mode) 264 struct inode *inode, int mode, dev_t rdev)
246{ 265{
247 int err = 0; 266 int err = 0;
248 267
249 inode_init_owner(inode, NULL, mode); 268 inode_init_owner(inode, NULL, mode);
250 inode->i_blocks = 0; 269 inode->i_blocks = 0;
251 inode->i_rdev = 0; 270 inode->i_rdev = rdev;
252 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 271 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
253 inode->i_mapping->a_ops = &v9fs_addr_operations; 272 inode->i_mapping->a_ops = &v9fs_addr_operations;
254 273
@@ -335,7 +354,7 @@ error:
335 * 354 *
336 */ 355 */
337 356
338struct inode *v9fs_get_inode(struct super_block *sb, int mode) 357struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
339{ 358{
340 int err; 359 int err;
341 struct inode *inode; 360 struct inode *inode;
@@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
348 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n"); 367 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
349 return ERR_PTR(-ENOMEM); 368 return ERR_PTR(-ENOMEM);
350 } 369 }
351 err = v9fs_init_inode(v9ses, inode, mode); 370 err = v9fs_init_inode(v9ses, inode, mode, rdev);
352 if (err) { 371 if (err) {
353 iput(inode); 372 iput(inode);
354 return ERR_PTR(err); 373 return ERR_PTR(err);
@@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode)
435static int v9fs_test_inode(struct inode *inode, void *data) 454static int v9fs_test_inode(struct inode *inode, void *data)
436{ 455{
437 int umode; 456 int umode;
457 dev_t rdev;
438 struct v9fs_inode *v9inode = V9FS_I(inode); 458 struct v9fs_inode *v9inode = V9FS_I(inode);
439 struct p9_wstat *st = (struct p9_wstat *)data; 459 struct p9_wstat *st = (struct p9_wstat *)data;
440 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); 460 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
441 461
442 umode = p9mode2unixmode(v9ses, st->mode); 462 umode = p9mode2unixmode(v9ses, st, &rdev);
443 /* don't match inode of different type */ 463 /* don't match inode of different type */
444 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) 464 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
445 return 0; 465 return 0;
@@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
473 struct p9_wstat *st, 493 struct p9_wstat *st,
474 int new) 494 int new)
475{ 495{
496 dev_t rdev;
476 int retval, umode; 497 int retval, umode;
477 unsigned long i_ino; 498 unsigned long i_ino;
478 struct inode *inode; 499 struct inode *inode;
@@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
496 * later. 517 * later.
497 */ 518 */
498 inode->i_ino = i_ino; 519 inode->i_ino = i_ino;
499 umode = p9mode2unixmode(v9ses, st->mode); 520 umode = p9mode2unixmode(v9ses, st, &rdev);
500 retval = v9fs_init_inode(v9ses, inode, umode); 521 retval = v9fs_init_inode(v9ses, inode, umode, rdev);
501 if (retval) 522 if (retval)
502 goto error; 523 goto error;
503 524
@@ -532,6 +553,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
532} 553}
533 554
534/** 555/**
556 * v9fs_at_to_dotl_flags- convert Linux specific AT flags to
557 * plan 9 AT flag.
558 * @flags: flags to convert
559 */
560static int v9fs_at_to_dotl_flags(int flags)
561{
562 int rflags = 0;
563 if (flags & AT_REMOVEDIR)
564 rflags |= P9_DOTL_AT_REMOVEDIR;
565 return rflags;
566}
567
568/**
535 * v9fs_remove - helper function to remove files and directories 569 * v9fs_remove - helper function to remove files and directories
536 * @dir: directory inode that is being deleted 570 * @dir: directory inode that is being deleted
537 * @dentry: dentry that is being deleted 571 * @dentry: dentry that is being deleted
@@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
558 return retval; 592 return retval;
559 } 593 }
560 if (v9fs_proto_dotl(v9ses)) 594 if (v9fs_proto_dotl(v9ses))
561 retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags); 595 retval = p9_client_unlinkat(dfid, dentry->d_name.name,
596 v9fs_at_to_dotl_flags(flags));
562 if (retval == -EOPNOTSUPP) { 597 if (retval == -EOPNOTSUPP) {
563 /* Try the one based on path */ 598 /* Try the one based on path */
564 v9fid = v9fs_fid_clone(dentry); 599 v9fid = v9fs_fid_clone(dentry);
@@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
645 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 680 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
646 goto error; 681 goto error;
647 } 682 }
648 d_instantiate(dentry, inode);
649 err = v9fs_fid_add(dentry, fid); 683 err = v9fs_fid_add(dentry, fid);
650 if (err < 0) 684 if (err < 0)
651 goto error; 685 goto error;
652 686 d_instantiate(dentry, inode);
653 return ofid; 687 return ofid;
654
655error: 688error:
656 if (ofid) 689 if (ofid)
657 p9_client_clunk(ofid); 690 p9_client_clunk(ofid);
@@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
792struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, 825struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
793 struct nameidata *nameidata) 826 struct nameidata *nameidata)
794{ 827{
828 struct dentry *res;
795 struct super_block *sb; 829 struct super_block *sb;
796 struct v9fs_session_info *v9ses; 830 struct v9fs_session_info *v9ses;
797 struct p9_fid *dfid, *fid; 831 struct p9_fid *dfid, *fid;
@@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
823 857
824 return ERR_PTR(result); 858 return ERR_PTR(result);
825 } 859 }
826 860 /*
827 inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); 861 * Make sure we don't use a wrong inode due to parallel
862 * unlink. For cached mode create calls request for new
863 * inode. But with cache disabled, lookup should do this.
864 */
865 if (v9ses->cache)
866 inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
867 else
868 inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
828 if (IS_ERR(inode)) { 869 if (IS_ERR(inode)) {
829 result = PTR_ERR(inode); 870 result = PTR_ERR(inode);
830 inode = NULL; 871 inode = NULL;
831 goto error; 872 goto error;
832 } 873 }
833
834 result = v9fs_fid_add(dentry, fid); 874 result = v9fs_fid_add(dentry, fid);
835 if (result < 0) 875 if (result < 0)
836 goto error_iput; 876 goto error_iput;
837
838inst_out: 877inst_out:
839 d_add(dentry, inode); 878 /*
840 return NULL; 879 * If we had a rename on the server and a parallel lookup
841 880 * for the new name, then make sure we instantiate with
881 * the new name. ie look up for a/b, while on server somebody
882 * moved b under k and client parallely did a lookup for
883 * k/b.
884 */
885 res = d_materialise_unique(dentry, inode);
886 if (!IS_ERR(res))
887 return res;
888 result = PTR_ERR(res);
842error_iput: 889error_iput:
843 iput(inode); 890 iput(inode);
844error: 891error:
@@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1002 return PTR_ERR(st); 1049 return PTR_ERR(st);
1003 1050
1004 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); 1051 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
1005 generic_fillattr(dentry->d_inode, stat); 1052 generic_fillattr(dentry->d_inode, stat);
1006 1053
1007 p9stat_free(st); 1054 p9stat_free(st);
1008 kfree(st); 1055 kfree(st);
@@ -1086,6 +1133,7 @@ void
1086v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, 1133v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
1087 struct super_block *sb) 1134 struct super_block *sb)
1088{ 1135{
1136 mode_t mode;
1089 char ext[32]; 1137 char ext[32];
1090 char tag_name[14]; 1138 char tag_name[14];
1091 unsigned int i_nlink; 1139 unsigned int i_nlink;
@@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
1121 inode->i_nlink = i_nlink; 1169 inode->i_nlink = i_nlink;
1122 } 1170 }
1123 } 1171 }
1124 inode->i_mode = p9mode2unixmode(v9ses, stat->mode); 1172 mode = stat->mode & S_IALLUGO;
1125 if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) { 1173 mode |= inode->i_mode & ~S_IALLUGO;
1126 char type = 0; 1174 inode->i_mode = mode;
1127 int major = -1;
1128 int minor = -1;
1129
1130 strncpy(ext, stat->extension, sizeof(ext));
1131 sscanf(ext, "%c %u %u", &type, &major, &minor);
1132 switch (type) {
1133 case 'c':
1134 inode->i_mode &= ~S_IFBLK;
1135 inode->i_mode |= S_IFCHR;
1136 break;
1137 case 'b':
1138 break;
1139 default:
1140 P9_DPRINTK(P9_DEBUG_ERROR,
1141 "Unknown special type %c %s\n", type,
1142 stat->extension);
1143 };
1144 inode->i_rdev = MKDEV(major, minor);
1145 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1146 } else
1147 inode->i_rdev = 0;
1148
1149 i_size_write(inode, stat->length); 1175 i_size_write(inode, stat->length);
1150 1176
1151 /* not real number of blocks, but 512 byte ones ... */ 1177 /* not real number of blocks, but 512 byte ones ... */
@@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
1411 1437
1412int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) 1438int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
1413{ 1439{
1440 int umode;
1441 dev_t rdev;
1414 loff_t i_size; 1442 loff_t i_size;
1415 struct p9_wstat *st; 1443 struct p9_wstat *st;
1416 struct v9fs_session_info *v9ses; 1444 struct v9fs_session_info *v9ses;
@@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
1419 st = p9_client_stat(fid); 1447 st = p9_client_stat(fid);
1420 if (IS_ERR(st)) 1448 if (IS_ERR(st))
1421 return PTR_ERR(st); 1449 return PTR_ERR(st);
1450 /*
1451 * Don't update inode if the file type is different
1452 */
1453 umode = p9mode2unixmode(v9ses, st, &rdev);
1454 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
1455 goto out;
1422 1456
1423 spin_lock(&inode->i_lock); 1457 spin_lock(&inode->i_lock);
1424 /* 1458 /*
@@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
1430 if (v9ses->cache) 1464 if (v9ses->cache)
1431 inode->i_size = i_size; 1465 inode->i_size = i_size;
1432 spin_unlock(&inode->i_lock); 1466 spin_unlock(&inode->i_lock);
1467out:
1433 p9stat_free(st); 1468 p9stat_free(st);
1434 kfree(st); 1469 kfree(st);
1435 return 0; 1470 return 0;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index b6c8ed205192..aded79fcd5cf 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
153 * later. 153 * later.
154 */ 154 */
155 inode->i_ino = i_ino; 155 inode->i_ino = i_ino;
156 retval = v9fs_init_inode(v9ses, inode, st->st_mode); 156 retval = v9fs_init_inode(v9ses, inode,
157 st->st_mode, new_decode_dev(st->st_rdev));
157 if (retval) 158 if (retval)
158 goto error; 159 goto error;
159 160
@@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
190 return inode; 191 return inode;
191} 192}
192 193
194struct dotl_openflag_map {
195 int open_flag;
196 int dotl_flag;
197};
198
199static int v9fs_mapped_dotl_flags(int flags)
200{
201 int i;
202 int rflags = 0;
203 struct dotl_openflag_map dotl_oflag_map[] = {
204 { O_CREAT, P9_DOTL_CREATE },
205 { O_EXCL, P9_DOTL_EXCL },
206 { O_NOCTTY, P9_DOTL_NOCTTY },
207 { O_TRUNC, P9_DOTL_TRUNC },
208 { O_APPEND, P9_DOTL_APPEND },
209 { O_NONBLOCK, P9_DOTL_NONBLOCK },
210 { O_DSYNC, P9_DOTL_DSYNC },
211 { FASYNC, P9_DOTL_FASYNC },
212 { O_DIRECT, P9_DOTL_DIRECT },
213 { O_LARGEFILE, P9_DOTL_LARGEFILE },
214 { O_DIRECTORY, P9_DOTL_DIRECTORY },
215 { O_NOFOLLOW, P9_DOTL_NOFOLLOW },
216 { O_NOATIME, P9_DOTL_NOATIME },
217 { O_CLOEXEC, P9_DOTL_CLOEXEC },
218 { O_SYNC, P9_DOTL_SYNC},
219 };
220 for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
221 if (flags & dotl_oflag_map[i].open_flag)
222 rflags |= dotl_oflag_map[i].dotl_flag;
223 }
224 return rflags;
225}
226
227/**
228 * v9fs_open_to_dotl_flags- convert Linux specific open flags to
229 * plan 9 open flag.
230 * @flags: flags to convert
231 */
232int v9fs_open_to_dotl_flags(int flags)
233{
234 int rflags = 0;
235
236 /*
237 * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
238 * and P9_DOTL_NOACCESS
239 */
240 rflags |= flags & O_ACCMODE;
241 rflags |= v9fs_mapped_dotl_flags(flags);
242
243 return rflags;
244}
245
193/** 246/**
194 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. 247 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
195 * @dir: directory inode that is being created 248 * @dir: directory inode that is being created
@@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
258 "Failed to get acl values in creat %d\n", err); 311 "Failed to get acl values in creat %d\n", err);
259 goto error; 312 goto error;
260 } 313 }
261 err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); 314 err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
315 mode, gid, &qid);
262 if (err < 0) { 316 if (err < 0) {
263 P9_DPRINTK(P9_DEBUG_VFS, 317 P9_DPRINTK(P9_DEBUG_VFS,
264 "p9_client_open_dotl failed in creat %d\n", 318 "p9_client_open_dotl failed in creat %d\n",
@@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
281 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 335 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
282 goto error; 336 goto error;
283 } 337 }
284 d_instantiate(dentry, inode);
285 err = v9fs_fid_add(dentry, fid); 338 err = v9fs_fid_add(dentry, fid);
286 if (err < 0) 339 if (err < 0)
287 goto error; 340 goto error;
341 d_instantiate(dentry, inode);
288 342
289 /* Now set the ACL based on the default value */ 343 /* Now set the ACL based on the default value */
290 v9fs_set_create_acl(dentry, &dacl, &pacl); 344 v9fs_set_create_acl(dentry, &dacl, &pacl);
@@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
403 err); 457 err);
404 goto error; 458 goto error;
405 } 459 }
406 d_instantiate(dentry, inode);
407 err = v9fs_fid_add(dentry, fid); 460 err = v9fs_fid_add(dentry, fid);
408 if (err < 0) 461 if (err < 0)
409 goto error; 462 goto error;
463 d_instantiate(dentry, inode);
410 fid = NULL; 464 fid = NULL;
411 } else { 465 } else {
412 /* 466 /*
@@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
414 * inode with stat. We need to get an inode 468 * inode with stat. We need to get an inode
415 * so that we can set the acl with dentry 469 * so that we can set the acl with dentry
416 */ 470 */
417 inode = v9fs_get_inode(dir->i_sb, mode); 471 inode = v9fs_get_inode(dir->i_sb, mode, 0);
418 if (IS_ERR(inode)) { 472 if (IS_ERR(inode)) {
419 err = PTR_ERR(inode); 473 err = PTR_ERR(inode);
420 goto error; 474 goto error;
@@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
540void 594void
541v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) 595v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
542{ 596{
597 mode_t mode;
543 struct v9fs_inode *v9inode = V9FS_I(inode); 598 struct v9fs_inode *v9inode = V9FS_I(inode);
544 599
545 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { 600 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
552 inode->i_uid = stat->st_uid; 607 inode->i_uid = stat->st_uid;
553 inode->i_gid = stat->st_gid; 608 inode->i_gid = stat->st_gid;
554 inode->i_nlink = stat->st_nlink; 609 inode->i_nlink = stat->st_nlink;
555 inode->i_mode = stat->st_mode;
556 inode->i_rdev = new_decode_dev(stat->st_rdev);
557 610
558 if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) 611 mode = stat->st_mode & S_IALLUGO;
559 init_special_inode(inode, inode->i_mode, inode->i_rdev); 612 mode |= inode->i_mode & ~S_IALLUGO;
613 inode->i_mode = mode;
560 614
561 i_size_write(inode, stat->st_size); 615 i_size_write(inode, stat->st_size);
562 inode->i_blocks = stat->st_blocks; 616 inode->i_blocks = stat->st_blocks;
@@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
657 err); 711 err);
658 goto error; 712 goto error;
659 } 713 }
660 d_instantiate(dentry, inode);
661 err = v9fs_fid_add(dentry, fid); 714 err = v9fs_fid_add(dentry, fid);
662 if (err < 0) 715 if (err < 0)
663 goto error; 716 goto error;
717 d_instantiate(dentry, inode);
664 fid = NULL; 718 fid = NULL;
665 } else { 719 } else {
666 /* Not in cached mode. No need to populate inode with stat */ 720 /* Not in cached mode. No need to populate inode with stat */
667 inode = v9fs_get_inode(dir->i_sb, S_IFLNK); 721 inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
668 if (IS_ERR(inode)) { 722 if (IS_ERR(inode)) {
669 err = PTR_ERR(inode); 723 err = PTR_ERR(inode);
670 goto error; 724 goto error;
@@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
810 err); 864 err);
811 goto error; 865 goto error;
812 } 866 }
813 d_instantiate(dentry, inode);
814 err = v9fs_fid_add(dentry, fid); 867 err = v9fs_fid_add(dentry, fid);
815 if (err < 0) 868 if (err < 0)
816 goto error; 869 goto error;
870 d_instantiate(dentry, inode);
817 fid = NULL; 871 fid = NULL;
818 } else { 872 } else {
819 /* 873 /*
820 * Not in cached mode. No need to populate inode with stat. 874 * Not in cached mode. No need to populate inode with stat.
821 * socket syscall returns a fd, so we need instantiate 875 * socket syscall returns a fd, so we need instantiate
822 */ 876 */
823 inode = v9fs_get_inode(dir->i_sb, mode); 877 inode = v9fs_get_inode(dir->i_sb, mode, rdev);
824 if (IS_ERR(inode)) { 878 if (IS_ERR(inode)) {
825 err = PTR_ERR(inode); 879 err = PTR_ERR(inode);
826 goto error; 880 goto error;
@@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
886 st = p9_client_getattr_dotl(fid, P9_STATS_ALL); 940 st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
887 if (IS_ERR(st)) 941 if (IS_ERR(st))
888 return PTR_ERR(st); 942 return PTR_ERR(st);
943 /*
944 * Don't update inode if the file type is different
945 */
946 if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
947 goto out;
889 948
890 spin_lock(&inode->i_lock); 949 spin_lock(&inode->i_lock);
891 /* 950 /*
@@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
897 if (v9ses->cache) 956 if (v9ses->cache)
898 inode->i_size = i_size; 957 inode->i_size = i_size;
899 spin_unlock(&inode->i_lock); 958 spin_unlock(&inode->i_lock);
959out:
900 kfree(st); 960 kfree(st);
901 return 0; 961 return 0;
902} 962}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index feef6cdc1fd2..c70251d47ed1 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
149 else 149 else
150 sb->s_d_op = &v9fs_dentry_operations; 150 sb->s_d_op = &v9fs_dentry_operations;
151 151
152 inode = v9fs_get_inode(sb, S_IFDIR | mode); 152 inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
153 if (IS_ERR(inode)) { 153 if (IS_ERR(inode)) {
154 retval = PTR_ERR(inode); 154 retval = PTR_ERR(inode);
155 goto release_sb; 155 goto release_sb;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ff77262e887c..95f786ec7f08 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1429 WARN_ON_ONCE(bdev->bd_holders); 1429 WARN_ON_ONCE(bdev->bd_holders);
1430 sync_blockdev(bdev); 1430 sync_blockdev(bdev);
1431 kill_bdev(bdev); 1431 kill_bdev(bdev);
1432 /* ->release can cause the old bdi to disappear,
1433 * so must switch it out first
1434 */
1435 bdev_inode_switch_bdi(bdev->bd_inode,
1436 &default_backing_dev_info);
1432 } 1437 }
1433 if (bdev->bd_contains == bdev) { 1438 if (bdev->bd_contains == bdev) {
1434 if (disk->fops->release) 1439 if (disk->fops->release)
@@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1442 disk_put_part(bdev->bd_part); 1447 disk_put_part(bdev->bd_part);
1443 bdev->bd_part = NULL; 1448 bdev->bd_part = NULL;
1444 bdev->bd_disk = NULL; 1449 bdev->bd_disk = NULL;
1445 bdev_inode_switch_bdi(bdev->bd_inode,
1446 &default_backing_dev_info);
1447 if (bdev != bdev->bd_contains) 1450 if (bdev != bdev->bd_contains)
1448 victim = bdev->bd_contains; 1451 victim = bdev->bd_contains;
1449 bdev->bd_contains = NULL; 1452 bdev->bd_contains = NULL;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index fee028b5332e..86c59e16ba74 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1595 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); 1595 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1596 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 1596 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1597 *ppath); 1597 *ppath);
1598 } else if (rpath) { 1598 } else if (rpath || rino) {
1599 *ino = rino; 1599 *ino = rino;
1600 *ppath = rpath; 1600 *ppath = rpath;
1601 *pathlen = strlen(rpath); 1601 *pathlen = strlen(rpath);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index d47c5ec7fb1f..88bacaf385d9 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
813 fsc = create_fs_client(fsopt, opt); 813 fsc = create_fs_client(fsopt, opt);
814 if (IS_ERR(fsc)) { 814 if (IS_ERR(fsc)) {
815 res = ERR_CAST(fsc); 815 res = ERR_CAST(fsc);
816 kfree(fsopt); 816 destroy_mount_options(fsopt);
817 kfree(opt); 817 ceph_destroy_options(opt);
818 goto out_final; 818 goto out_final;
819 } 819 }
820 820
diff --git a/fs/namei.c b/fs/namei.c
index 2826db35dc25..b52bc685465f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -727,25 +727,22 @@ static int follow_automount(struct path *path, unsigned flags,
727 if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) 727 if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
728 return -EISDIR; /* we actually want to stop here */ 728 return -EISDIR; /* we actually want to stop here */
729 729
730 /* 730 /* We don't want to mount if someone's just doing a stat -
731 * We don't want to mount if someone's just doing a stat and they've 731 * unless they're stat'ing a directory and appended a '/' to
732 * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and 732 * the name.
733 * appended a '/' to the name. 733 *
734 * We do, however, want to mount if someone wants to open or
735 * create a file of any type under the mountpoint, wants to
736 * traverse through the mountpoint or wants to open the
737 * mounted directory. Also, autofs may mark negative dentries
738 * as being automount points. These will need the attentions
739 * of the daemon to instantiate them before they can be used.
734 */ 740 */
735 if (!(flags & LOOKUP_FOLLOW)) { 741 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
736 /* We do, however, want to mount if someone wants to open or 742 LOOKUP_OPEN | LOOKUP_CREATE)) &&
737 * create a file of any type under the mountpoint, wants to 743 path->dentry->d_inode)
738 * traverse through the mountpoint or wants to open the mounted 744 return -EISDIR;
739 * directory. 745
740 * Also, autofs may mark negative dentries as being automount
741 * points. These will need the attentions of the daemon to
742 * instantiate them before they can be used.
743 */
744 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
745 LOOKUP_OPEN | LOOKUP_CREATE)) &&
746 path->dentry->d_inode)
747 return -EISDIR;
748 }
749 current->total_link_count++; 746 current->total_link_count++;
750 if (current->total_link_count >= 40) 747 if (current->total_link_count >= 40)
751 return -ELOOP; 748 return -ELOOP;
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 45174b534377..feb361e252ac 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
335#define DBGKEY(key) ((char *)(key)) 335#define DBGKEY(key) ((char *)(key))
336#define DBGKEY1(key) ((char *)(key)) 336#define DBGKEY1(key) ((char *)(key))
337 337
338#define ubifs_dbg_msg(fmt, ...) do { \ 338#define ubifs_dbg_msg(fmt, ...) do { \
339 if (0) \ 339 if (0) \
340 pr_debug(fmt "\n", ##__VA_ARGS__); \ 340 printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
341} while (0) 341} while (0)
342 342
343#define dbg_dump_stack() 343#define dbg_dump_stack()
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 245bafdafd5e..c816075c01ce 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
944 944
945extern int perf_num_counters(void); 945extern int perf_num_counters(void);
946extern const char *perf_pmu_name(void); 946extern const char *perf_pmu_name(void);
947extern void __perf_event_task_sched_in(struct task_struct *task); 947extern void __perf_event_task_sched_in(struct task_struct *prev,
948extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 948 struct task_struct *task);
949extern void __perf_event_task_sched_out(struct task_struct *prev,
950 struct task_struct *next);
949extern int perf_event_init_task(struct task_struct *child); 951extern int perf_event_init_task(struct task_struct *child);
950extern void perf_event_exit_task(struct task_struct *child); 952extern void perf_event_exit_task(struct task_struct *child);
951extern void perf_event_free_task(struct task_struct *task); 953extern void perf_event_free_task(struct task_struct *task);
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1059 1061
1060extern struct jump_label_key perf_sched_events; 1062extern struct jump_label_key perf_sched_events;
1061 1063
1062static inline void perf_event_task_sched_in(struct task_struct *task) 1064static inline void perf_event_task_sched_in(struct task_struct *prev,
1065 struct task_struct *task)
1063{ 1066{
1064 if (static_branch(&perf_sched_events)) 1067 if (static_branch(&perf_sched_events))
1065 __perf_event_task_sched_in(task); 1068 __perf_event_task_sched_in(prev, task);
1066} 1069}
1067 1070
1068static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) 1071static inline void perf_event_task_sched_out(struct task_struct *prev,
1072 struct task_struct *next)
1069{ 1073{
1070 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1074 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1071 1075
1072 __perf_event_task_sched_out(task, next); 1076 if (static_branch(&perf_sched_events))
1077 __perf_event_task_sched_out(prev, next);
1073} 1078}
1074 1079
1075extern void perf_event_mmap(struct vm_area_struct *vma); 1080extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
1139extern void perf_event_task_tick(void); 1144extern void perf_event_task_tick(void);
1140#else 1145#else
1141static inline void 1146static inline void
1142perf_event_task_sched_in(struct task_struct *task) { } 1147perf_event_task_sched_in(struct task_struct *prev,
1148 struct task_struct *task) { }
1143static inline void 1149static inline void
1144perf_event_task_sched_out(struct task_struct *task, 1150perf_event_task_sched_out(struct task_struct *prev,
1145 struct task_struct *next) { } 1151 struct task_struct *next) { }
1146static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1152static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1147static inline void perf_event_exit_task(struct task_struct *child) { } 1153static inline void perf_event_exit_task(struct task_struct *child) { }
1148static inline void perf_event_free_task(struct task_struct *task) { } 1154static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 26f6ea4444e3..b47771aa5718 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -123,7 +123,7 @@ struct regulator_bulk_data {
123 const char *supply; 123 const char *supply;
124 struct regulator *consumer; 124 struct regulator *consumer;
125 125
126 /* Internal use */ 126 /* private: Internal use */
127 int ret; 127 int ret;
128}; 128};
129 129
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 342dcf13d039..a6326ef8ade6 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -288,6 +288,35 @@ enum p9_perm_t {
288 P9_DMSETVTX = 0x00010000, 288 P9_DMSETVTX = 0x00010000,
289}; 289};
290 290
291/* 9p2000.L open flags */
292#define P9_DOTL_RDONLY 00000000
293#define P9_DOTL_WRONLY 00000001
294#define P9_DOTL_RDWR 00000002
295#define P9_DOTL_NOACCESS 00000003
296#define P9_DOTL_CREATE 00000100
297#define P9_DOTL_EXCL 00000200
298#define P9_DOTL_NOCTTY 00000400
299#define P9_DOTL_TRUNC 00001000
300#define P9_DOTL_APPEND 00002000
301#define P9_DOTL_NONBLOCK 00004000
302#define P9_DOTL_DSYNC 00010000
303#define P9_DOTL_FASYNC 00020000
304#define P9_DOTL_DIRECT 00040000
305#define P9_DOTL_LARGEFILE 00100000
306#define P9_DOTL_DIRECTORY 00200000
307#define P9_DOTL_NOFOLLOW 00400000
308#define P9_DOTL_NOATIME 01000000
309#define P9_DOTL_CLOEXEC 02000000
310#define P9_DOTL_SYNC 04000000
311
312/* 9p2000.L at flags */
313#define P9_DOTL_AT_REMOVEDIR 0x200
314
315/* 9p2000.L lock type */
316#define P9_LOCK_TYPE_RDLCK 0
317#define P9_LOCK_TYPE_WRLCK 1
318#define P9_LOCK_TYPE_UNLCK 2
319
291/** 320/**
292 * enum p9_qid_t - QID types 321 * enum p9_qid_t - QID types
293 * @P9_QTDIR: directory 322 * @P9_QTDIR: directory
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 408ae4882d22..401d73bd151f 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1744,6 +1744,8 @@ struct wiphy_wowlan_support {
1744 * by default for perm_addr. In this case, the mask should be set to 1744 * by default for perm_addr. In this case, the mask should be set to
1745 * all-zeroes. In this case it is assumed that the device can handle 1745 * all-zeroes. In this case it is assumed that the device can handle
1746 * the same number of arbitrary MAC addresses. 1746 * the same number of arbitrary MAC addresses.
1747 * @registered: protects ->resume and ->suspend sysfs callbacks against
1748 * unregister hardware
1747 * @debugfsdir: debugfs directory used for this wiphy, will be renamed 1749 * @debugfsdir: debugfs directory used for this wiphy, will be renamed
1748 * automatically on wiphy renames 1750 * automatically on wiphy renames
1749 * @dev: (virtual) struct device for this wiphy 1751 * @dev: (virtual) struct device for this wiphy
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b8785e26ee1c..0f857782d06f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
399 local_irq_restore(flags); 399 local_irq_restore(flags);
400} 400}
401 401
402static inline void perf_cgroup_sched_out(struct task_struct *task) 402static inline void perf_cgroup_sched_out(struct task_struct *task,
403 struct task_struct *next)
403{ 404{
404 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 405 struct perf_cgroup *cgrp1;
406 struct perf_cgroup *cgrp2 = NULL;
407
408 /*
409 * we come here when we know perf_cgroup_events > 0
410 */
411 cgrp1 = perf_cgroup_from_task(task);
412
413 /*
414 * next is NULL when called from perf_event_enable_on_exec()
415 * that will systematically cause a cgroup_switch()
416 */
417 if (next)
418 cgrp2 = perf_cgroup_from_task(next);
419
420 /*
421 * only schedule out current cgroup events if we know
422 * that we are switching to a different cgroup. Otherwise,
423 * do no touch the cgroup events.
424 */
425 if (cgrp1 != cgrp2)
426 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
405} 427}
406 428
407static inline void perf_cgroup_sched_in(struct task_struct *task) 429static inline void perf_cgroup_sched_in(struct task_struct *prev,
430 struct task_struct *task)
408{ 431{
409 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 432 struct perf_cgroup *cgrp1;
433 struct perf_cgroup *cgrp2 = NULL;
434
435 /*
436 * we come here when we know perf_cgroup_events > 0
437 */
438 cgrp1 = perf_cgroup_from_task(task);
439
440 /* prev can never be NULL */
441 cgrp2 = perf_cgroup_from_task(prev);
442
443 /*
444 * only need to schedule in cgroup events if we are changing
445 * cgroup during ctxsw. Cgroup events were not scheduled
446 * out of ctxsw out if that was not the case.
447 */
448 if (cgrp1 != cgrp2)
449 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
410} 450}
411 451
412static inline int perf_cgroup_connect(int fd, struct perf_event *event, 452static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
518{ 558{
519} 559}
520 560
521static inline void perf_cgroup_sched_out(struct task_struct *task) 561static inline void perf_cgroup_sched_out(struct task_struct *task,
562 struct task_struct *next)
522{ 563{
523} 564}
524 565
525static inline void perf_cgroup_sched_in(struct task_struct *task) 566static inline void perf_cgroup_sched_in(struct task_struct *prev,
567 struct task_struct *task)
526{ 568{
527} 569}
528 570
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
1988 * cgroup event are system-wide mode only 2030 * cgroup event are system-wide mode only
1989 */ 2031 */
1990 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2032 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
1991 perf_cgroup_sched_out(task); 2033 perf_cgroup_sched_out(task, next);
1992} 2034}
1993 2035
1994static void task_ctx_sched_out(struct perf_event_context *ctx) 2036static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2153 * accessing the event control register. If a NMI hits, then it will 2195 * accessing the event control register. If a NMI hits, then it will
2154 * keep the event running. 2196 * keep the event running.
2155 */ 2197 */
2156void __perf_event_task_sched_in(struct task_struct *task) 2198void __perf_event_task_sched_in(struct task_struct *prev,
2199 struct task_struct *task)
2157{ 2200{
2158 struct perf_event_context *ctx; 2201 struct perf_event_context *ctx;
2159 int ctxn; 2202 int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
2171 * cgroup event are system-wide mode only 2214 * cgroup event are system-wide mode only
2172 */ 2215 */
2173 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2216 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2174 perf_cgroup_sched_in(task); 2217 perf_cgroup_sched_in(prev, task);
2175} 2218}
2176 2219
2177static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2220static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2427 * ctxswin cgroup events which are already scheduled 2470 * ctxswin cgroup events which are already scheduled
2428 * in. 2471 * in.
2429 */ 2472 */
2430 perf_cgroup_sched_out(current); 2473 perf_cgroup_sched_out(current, NULL);
2431 2474
2432 raw_spin_lock(&ctx->lock); 2475 raw_spin_lock(&ctx->lock);
2433 task_ctx_sched_out(ctx); 2476 task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
3353} 3396}
3354 3397
3355static void calc_timer_values(struct perf_event *event, 3398static void calc_timer_values(struct perf_event *event,
3356 u64 *running, 3399 u64 *enabled,
3357 u64 *enabled) 3400 u64 *running)
3358{ 3401{
3359 u64 now, ctx_time; 3402 u64 now, ctx_time;
3360 3403
diff --git a/kernel/sched.c b/kernel/sched.c
index ccacdbdecf45..ec5f472bc5b9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3066 local_irq_disable(); 3066 local_irq_disable();
3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3068 perf_event_task_sched_in(current); 3068 perf_event_task_sched_in(prev, current);
3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3070 local_irq_enable(); 3070 local_irq_enable();
3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
4279} 4279}
4280 4280
4281/* 4281/*
4282 * schedule() is the main scheduler function. 4282 * __schedule() is the main scheduler function.
4283 */ 4283 */
4284asmlinkage void __sched schedule(void) 4284static void __sched __schedule(void)
4285{ 4285{
4286 struct task_struct *prev, *next; 4286 struct task_struct *prev, *next;
4287 unsigned long *switch_count; 4287 unsigned long *switch_count;
@@ -4322,16 +4322,6 @@ need_resched:
4322 if (to_wakeup) 4322 if (to_wakeup)
4323 try_to_wake_up_local(to_wakeup); 4323 try_to_wake_up_local(to_wakeup);
4324 } 4324 }
4325
4326 /*
4327 * If we are going to sleep and we have plugged IO
4328 * queued, make sure to submit it to avoid deadlocks.
4329 */
4330 if (blk_needs_flush_plug(prev)) {
4331 raw_spin_unlock(&rq->lock);
4332 blk_schedule_flush_plug(prev);
4333 raw_spin_lock(&rq->lock);
4334 }
4335 } 4325 }
4336 switch_count = &prev->nvcsw; 4326 switch_count = &prev->nvcsw;
4337 } 4327 }
@@ -4369,6 +4359,26 @@ need_resched:
4369 if (need_resched()) 4359 if (need_resched())
4370 goto need_resched; 4360 goto need_resched;
4371} 4361}
4362
4363static inline void sched_submit_work(struct task_struct *tsk)
4364{
4365 if (!tsk->state)
4366 return;
4367 /*
4368 * If we are going to sleep and we have plugged IO queued,
4369 * make sure to submit it to avoid deadlocks.
4370 */
4371 if (blk_needs_flush_plug(tsk))
4372 blk_schedule_flush_plug(tsk);
4373}
4374
4375asmlinkage void schedule(void)
4376{
4377 struct task_struct *tsk = current;
4378
4379 sched_submit_work(tsk);
4380 __schedule();
4381}
4372EXPORT_SYMBOL(schedule); 4382EXPORT_SYMBOL(schedule);
4373 4383
4374#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4384#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
4435 4445
4436 do { 4446 do {
4437 add_preempt_count_notrace(PREEMPT_ACTIVE); 4447 add_preempt_count_notrace(PREEMPT_ACTIVE);
4438 schedule(); 4448 __schedule();
4439 sub_preempt_count_notrace(PREEMPT_ACTIVE); 4449 sub_preempt_count_notrace(PREEMPT_ACTIVE);
4440 4450
4441 /* 4451 /*
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4463 do { 4473 do {
4464 add_preempt_count(PREEMPT_ACTIVE); 4474 add_preempt_count(PREEMPT_ACTIVE);
4465 local_irq_enable(); 4475 local_irq_enable();
4466 schedule(); 4476 __schedule();
4467 local_irq_disable(); 4477 local_irq_disable();
4468 sub_preempt_count(PREEMPT_ACTIVE); 4478 sub_preempt_count(PREEMPT_ACTIVE);
4469 4479
@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
5588static void __cond_resched(void) 5598static void __cond_resched(void)
5589{ 5599{
5590 add_preempt_count(PREEMPT_ACTIVE); 5600 add_preempt_count(PREEMPT_ACTIVE);
5591 schedule(); 5601 __schedule();
5592 sub_preempt_count(PREEMPT_ACTIVE); 5602 sub_preempt_count(PREEMPT_ACTIVE);
5593} 5603}
5594 5604
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
7443 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 7453 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7444 if (sd && (sd->flags & SD_OVERLAP)) 7454 if (sd && (sd->flags & SD_OVERLAP))
7445 free_sched_groups(sd->groups, 0); 7455 free_sched_groups(sd->groups, 0);
7456 kfree(*per_cpu_ptr(sdd->sd, j));
7446 kfree(*per_cpu_ptr(sdd->sg, j)); 7457 kfree(*per_cpu_ptr(sdd->sg, j));
7447 kfree(*per_cpu_ptr(sdd->sgp, j)); 7458 kfree(*per_cpu_ptr(sdd->sgp, j));
7448 } 7459 }
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 59f369f98a04..ea5e1a928d5b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
441static void alarm_timer_get(struct k_itimer *timr, 441static void alarm_timer_get(struct k_itimer *timr,
442 struct itimerspec *cur_setting) 442 struct itimerspec *cur_setting)
443{ 443{
444 memset(cur_setting, 0, sizeof(struct itimerspec));
445
444 cur_setting->it_interval = 446 cur_setting->it_interval =
445 ktime_to_timespec(timr->it.alarmtimer.period); 447 ktime_to_timespec(timr->it.alarmtimer.period);
446 cur_setting->it_value = 448 cur_setting->it_value =
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
479 if (!rtcdev) 481 if (!rtcdev)
480 return -ENOTSUPP; 482 return -ENOTSUPP;
481 483
482 /* Save old values */ 484 /*
483 old_setting->it_interval = 485 * XXX HACK! Currently we can DOS a system if the interval
484 ktime_to_timespec(timr->it.alarmtimer.period); 486 * period on alarmtimers is too small. Cap the interval here
485 old_setting->it_value = 487 * to 100us and solve this properly in a future patch! -jstultz
486 ktime_to_timespec(timr->it.alarmtimer.node.expires); 488 */
489 if ((new_setting->it_interval.tv_sec == 0) &&
490 (new_setting->it_interval.tv_nsec < 100000))
491 new_setting->it_interval.tv_nsec = 100000;
492
493 if (old_setting)
494 alarm_timer_get(timr, old_setting);
487 495
488 /* If the timer was already set, cancel it */ 496 /* If the timer was already set, cancel it */
489 alarm_cancel(&timr->it.alarmtimer); 497 alarm_cancel(&timr->it.alarmtimer);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 175b5135bdcf..e317583fcc73 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263{ 263{
264 int in, out, inp, outp; 264 int in, out, inp, outp;
265 struct virtio_chan *chan = client->trans; 265 struct virtio_chan *chan = client->trans;
266 char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
267 unsigned long flags; 266 unsigned long flags;
268 size_t pdata_off = 0; 267 size_t pdata_off = 0;
269 struct trans_rpage_info *rpinfo = NULL; 268 struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
346 * Arrange in such a way that server places header in the 345 * Arrange in such a way that server places header in the
347 * alloced memory and payload onto the user buffer. 346 * alloced memory and payload onto the user buffer.
348 */ 347 */
349 inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11); 348 inp = pack_sg_list(chan->sg, out,
349 VIRTQUEUE_NUM, req->rc->sdata, 11);
350 /* 350 /*
351 * Running executables in the filesystem may result in 351 * Running executables in the filesystem may result in
352 * a read request with kernel buffer as opposed to user buffer. 352 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
366 } 366 }
367 in += inp; 367 in += inp;
368 } else { 368 } else {
369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
370 req->rc->capacity); 370 req->rc->sdata, req->rc->capacity);
371 } 371 }
372 372
373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
592 .close = p9_virtio_close, 592 .close = p9_virtio_close,
593 .request = p9_virtio_request, 593 .request = p9_virtio_request,
594 .cancel = p9_virtio_cancel, 594 .cancel = p9_virtio_cancel,
595 .maxsize = PAGE_SIZE*VIRTQUEUE_NUM, 595
596 /*
597 * We leave one entry for input and one entry for response
598 * headers. We also skip one more entry to accomodate, address
599 * that are not at page boundary, that can result in an extra
600 * page in zero copy.
601 */
602 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
596 .pref = P9_TRANS_PREF_PAYLOAD_SEP, 603 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
597 .def = 0, 604 .def = 0,
598 .owner = THIS_MODULE, 605 .owner = THIS_MODULE,
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index d5f2d97ac05c..1f4cb30a42c5 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -7,27 +7,37 @@
7 7
8#include <linux/ceph/msgpool.h> 8#include <linux/ceph/msgpool.h>
9 9
10static void *alloc_fn(gfp_t gfp_mask, void *arg) 10static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
11{ 11{
12 struct ceph_msgpool *pool = arg; 12 struct ceph_msgpool *pool = arg;
13 void *p; 13 struct ceph_msg *msg;
14 14
15 p = ceph_msg_new(0, pool->front_len, gfp_mask); 15 msg = ceph_msg_new(0, pool->front_len, gfp_mask);
16 if (!p) 16 if (!msg) {
17 pr_err("msgpool %s alloc failed\n", pool->name); 17 dout("msgpool_alloc %s failed\n", pool->name);
18 return p; 18 } else {
19 dout("msgpool_alloc %s %p\n", pool->name, msg);
20 msg->pool = pool;
21 }
22 return msg;
19} 23}
20 24
21static void free_fn(void *element, void *arg) 25static void msgpool_free(void *element, void *arg)
22{ 26{
23 ceph_msg_put(element); 27 struct ceph_msgpool *pool = arg;
28 struct ceph_msg *msg = element;
29
30 dout("msgpool_release %s %p\n", pool->name, msg);
31 msg->pool = NULL;
32 ceph_msg_put(msg);
24} 33}
25 34
26int ceph_msgpool_init(struct ceph_msgpool *pool, 35int ceph_msgpool_init(struct ceph_msgpool *pool,
27 int front_len, int size, bool blocking, const char *name) 36 int front_len, int size, bool blocking, const char *name)
28{ 37{
38 dout("msgpool %s init\n", name);
29 pool->front_len = front_len; 39 pool->front_len = front_len;
30 pool->pool = mempool_create(size, alloc_fn, free_fn, pool); 40 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
31 if (!pool->pool) 41 if (!pool->pool)
32 return -ENOMEM; 42 return -ENOMEM;
33 pool->name = name; 43 pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
36 46
37void ceph_msgpool_destroy(struct ceph_msgpool *pool) 47void ceph_msgpool_destroy(struct ceph_msgpool *pool)
38{ 48{
49 dout("msgpool %s destroy\n", pool->name);
39 mempool_destroy(pool->pool); 50 mempool_destroy(pool->pool);
40} 51}
41 52
42struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, 53struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
43 int front_len) 54 int front_len)
44{ 55{
56 struct ceph_msg *msg;
57
45 if (front_len > pool->front_len) { 58 if (front_len > pool->front_len) {
46 pr_err("msgpool_get pool %s need front %d, pool size is %d\n", 59 dout("msgpool_get %s need front %d, pool size is %d\n",
47 pool->name, front_len, pool->front_len); 60 pool->name, front_len, pool->front_len);
48 WARN_ON(1); 61 WARN_ON(1);
49 62
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
51 return ceph_msg_new(0, front_len, GFP_NOFS); 64 return ceph_msg_new(0, front_len, GFP_NOFS);
52 } 65 }
53 66
54 return mempool_alloc(pool->pool, GFP_NOFS); 67 msg = mempool_alloc(pool->pool, GFP_NOFS);
68 dout("msgpool_get %s %p\n", pool->name, msg);
69 return msg;
55} 70}
56 71
57void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) 72void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
58{ 73{
74 dout("msgpool_put %s %p\n", pool->name, msg);
75
59 /* reset msg front_len; user may have changed it */ 76 /* reset msg front_len; user may have changed it */
60 msg->front.iov_len = pool->front_len; 77 msg->front.iov_len = pool->front_len;
61 msg->hdr.front_len = cpu_to_le32(pool->front_len); 78 msg->hdr.front_len = cpu_to_le32(pool->front_len);
62 79
63 kref_init(&msg->kref); /* retake single ref */ 80 kref_init(&msg->kref); /* retake single ref */
81 mempool_free(msg, pool->pool);
64} 82}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index ce310eee708d..16836a7df7a6 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
685 put_osd(osd); 685 put_osd(osd);
686} 686}
687 687
688static void remove_all_osds(struct ceph_osd_client *osdc)
689{
690 dout("__remove_old_osds %p\n", osdc);
691 mutex_lock(&osdc->request_mutex);
692 while (!RB_EMPTY_ROOT(&osdc->osds)) {
693 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
694 struct ceph_osd, o_node);
695 __remove_osd(osdc, osd);
696 }
697 mutex_unlock(&osdc->request_mutex);
698}
699
688static void __move_osd_to_lru(struct ceph_osd_client *osdc, 700static void __move_osd_to_lru(struct ceph_osd_client *osdc,
689 struct ceph_osd *osd) 701 struct ceph_osd *osd)
690{ 702{
@@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
701 list_del_init(&osd->o_osd_lru); 713 list_del_init(&osd->o_osd_lru);
702} 714}
703 715
704static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) 716static void remove_old_osds(struct ceph_osd_client *osdc)
705{ 717{
706 struct ceph_osd *osd, *nosd; 718 struct ceph_osd *osd, *nosd;
707 719
708 dout("__remove_old_osds %p\n", osdc); 720 dout("__remove_old_osds %p\n", osdc);
709 mutex_lock(&osdc->request_mutex); 721 mutex_lock(&osdc->request_mutex);
710 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 722 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
711 if (!remove_all && time_before(jiffies, osd->lru_ttl)) 723 if (time_before(jiffies, osd->lru_ttl))
712 break; 724 break;
713 __remove_osd(osdc, osd); 725 __remove_osd(osdc, osd);
714 } 726 }
@@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
751 struct rb_node *parent = NULL; 763 struct rb_node *parent = NULL;
752 struct ceph_osd *osd = NULL; 764 struct ceph_osd *osd = NULL;
753 765
766 dout("__insert_osd %p osd%d\n", new, new->o_osd);
754 while (*p) { 767 while (*p) {
755 parent = *p; 768 parent = *p;
756 osd = rb_entry(parent, struct ceph_osd, o_node); 769 osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work)
1144 1157
1145 dout("osds timeout\n"); 1158 dout("osds timeout\n");
1146 down_read(&osdc->map_sem); 1159 down_read(&osdc->map_sem);
1147 remove_old_osds(osdc, 0); 1160 remove_old_osds(osdc);
1148 up_read(&osdc->map_sem); 1161 up_read(&osdc->map_sem);
1149 1162
1150 schedule_delayed_work(&osdc->osds_timeout_work, 1163 schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
1862 ceph_osdmap_destroy(osdc->osdmap); 1875 ceph_osdmap_destroy(osdc->osdmap);
1863 osdc->osdmap = NULL; 1876 osdc->osdmap = NULL;
1864 } 1877 }
1865 remove_old_osds(osdc, 1); 1878 remove_all_osds(osdc);
1866 WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
1867 mempool_destroy(osdc->req_mempool); 1879 mempool_destroy(osdc->req_mempool);
1868 ceph_msgpool_destroy(&osdc->msgpool_op); 1880 ceph_msgpool_destroy(&osdc->msgpool_op);
1869 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 1881 ceph_msgpool_destroy(&osdc->msgpool_op_reply);