aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/include/asm/efi.h6
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c45
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c93
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c127
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/mm/init.c58
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/platform/efi/efi.c47
-rw-r--r--arch/x86/platform/efi/efi_64.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/ni.c45
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c19
-rw-r--r--drivers/gpu/drm/radeon/si.c47
-rw-r--r--fs/btrfs/backref.c28
-rw-r--r--fs/btrfs/backref.h4
-rw-r--r--fs/btrfs/ctree.c70
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/inode.c7
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/qgroup.c17
-rw-r--r--fs/btrfs/send.c156
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/lockd/mon.c57
-rw-r--r--include/drm/drm_pciids.h3
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--mm/memblock.c24
-rw-r--r--net/sunrpc/xprtsock.c41
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-trace.c18
-rw-r--r--tools/perf/util/parse-events-test.c12
-rw-r--r--tools/perf/util/thread.c1
45 files changed, 746 insertions, 390 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 027ec2bfa135..f39a82dc0260 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2802,6 +2802,7 @@ F: sound/usb/misc/ua101.c
2802EXTENSIBLE FIRMWARE INTERFACE (EFI) 2802EXTENSIBLE FIRMWARE INTERFACE (EFI)
2803M: Matt Fleming <matt.fleming@intel.com> 2803M: Matt Fleming <matt.fleming@intel.com>
2804L: linux-efi@vger.kernel.org 2804L: linux-efi@vger.kernel.org
2805T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
2805S: Maintained 2806S: Maintained
2806F: Documentation/x86/efi-stub.txt 2807F: Documentation/x86/efi-stub.txt
2807F: arch/ia64/kernel/efi.c 2808F: arch/ia64/kernel/efi.c
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index c9dcc181d4d1..6e8fdf5ad113 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -35,7 +35,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 35#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
36 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 36 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
37 37
38#define efi_ioremap(addr, size, type) ioremap_cache(addr, size) 38#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
39 39
40#else /* !CONFIG_X86_32 */ 40#else /* !CONFIG_X86_32 */
41 41
@@ -89,7 +89,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 89 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
90 90
91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 91extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
92 u32 type); 92 u32 type, u64 attribute);
93 93
94#endif /* CONFIG_X86_32 */ 94#endif /* CONFIG_X86_32 */
95 95
@@ -98,6 +98,8 @@ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern int efi_memblock_x86_reserve_range(void); 98extern int efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 99extern void efi_call_phys_prelog(void);
100extern void efi_call_phys_epilog(void); 100extern void efi_call_phys_epilog(void);
101extern void efi_unmap_memmap(void);
102extern void efi_memory_uc(u64 addr, unsigned long size);
101 103
102#ifndef CONFIG_EFI 104#ifndef CONFIG_EFI
103/* 105/*
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c265593ec2cd..1817fa911024 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2257,6 +2257,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2257 continue; 2257 continue;
2258 2258
2259 cfg = irq_cfg(irq); 2259 cfg = irq_cfg(irq);
2260 if (!cfg)
2261 continue;
2262
2260 raw_spin_lock(&desc->lock); 2263 raw_spin_lock(&desc->lock);
2261 2264
2262 /* 2265 /*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3373f84d1397..4a3374e61a93 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -208,12 +208,14 @@ static bool check_hw_exists(void)
208 } 208 }
209 209
210 /* 210 /*
211 * Now write a value and read it back to see if it matches, 211 * Read the current value, change it and read it back to see if it
212 * this is needed to detect certain hardware emulators (qemu/kvm) 212 * matches, this is needed to detect certain hardware emulators
213 * that don't trap on the MSR access and always return 0s. 213 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
214 */ 214 */
215 val = 0xabcdUL;
216 reg = x86_pmu_event_addr(0); 215 reg = x86_pmu_event_addr(0);
216 if (rdmsrl_safe(reg, &val))
217 goto msr_fail;
218 val ^= 0xffffUL;
217 ret = wrmsrl_safe(reg, val); 219 ret = wrmsrl_safe(reg, val);
218 ret |= rdmsrl_safe(reg, &val_new); 220 ret |= rdmsrl_safe(reg, &val_new);
219 if (ret || val != val_new) 221 if (ret || val != val_new)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 5df8d32ba91e..3cf3d97cce3a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -118,22 +118,24 @@ static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
118{ 118{
119 struct pci_dev *pdev = box->pci_dev; 119 struct pci_dev *pdev = box->pci_dev;
120 int box_ctl = uncore_pci_box_ctl(box); 120 int box_ctl = uncore_pci_box_ctl(box);
121 u32 config; 121 u32 config = 0;
122 122
123 pci_read_config_dword(pdev, box_ctl, &config); 123 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
124 config |= SNBEP_PMON_BOX_CTL_FRZ; 124 config |= SNBEP_PMON_BOX_CTL_FRZ;
125 pci_write_config_dword(pdev, box_ctl, config); 125 pci_write_config_dword(pdev, box_ctl, config);
126 }
126} 127}
127 128
128static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) 129static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
129{ 130{
130 struct pci_dev *pdev = box->pci_dev; 131 struct pci_dev *pdev = box->pci_dev;
131 int box_ctl = uncore_pci_box_ctl(box); 132 int box_ctl = uncore_pci_box_ctl(box);
132 u32 config; 133 u32 config = 0;
133 134
134 pci_read_config_dword(pdev, box_ctl, &config); 135 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
135 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 136 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
136 pci_write_config_dword(pdev, box_ctl, config); 137 pci_write_config_dword(pdev, box_ctl, config);
138 }
137} 139}
138 140
139static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 141static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -156,7 +158,7 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct pe
156{ 158{
157 struct pci_dev *pdev = box->pci_dev; 159 struct pci_dev *pdev = box->pci_dev;
158 struct hw_perf_event *hwc = &event->hw; 160 struct hw_perf_event *hwc = &event->hw;
159 u64 count; 161 u64 count = 0;
160 162
161 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); 163 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
162 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); 164 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
@@ -603,11 +605,12 @@ static struct pci_driver snbep_uncore_pci_driver = {
603/* 605/*
604 * build pci bus to socket mapping 606 * build pci bus to socket mapping
605 */ 607 */
606static void snbep_pci2phy_map_init(void) 608static int snbep_pci2phy_map_init(void)
607{ 609{
608 struct pci_dev *ubox_dev = NULL; 610 struct pci_dev *ubox_dev = NULL;
609 int i, bus, nodeid; 611 int i, bus, nodeid;
610 u32 config; 612 int err = 0;
613 u32 config = 0;
611 614
612 while (1) { 615 while (1) {
613 /* find the UBOX device */ 616 /* find the UBOX device */
@@ -618,10 +621,14 @@ static void snbep_pci2phy_map_init(void)
618 break; 621 break;
619 bus = ubox_dev->bus->number; 622 bus = ubox_dev->bus->number;
620 /* get the Node ID of the local register */ 623 /* get the Node ID of the local register */
621 pci_read_config_dword(ubox_dev, 0x40, &config); 624 err = pci_read_config_dword(ubox_dev, 0x40, &config);
625 if (err)
626 break;
622 nodeid = config; 627 nodeid = config;
623 /* get the Node ID mapping */ 628 /* get the Node ID mapping */
624 pci_read_config_dword(ubox_dev, 0x54, &config); 629 err = pci_read_config_dword(ubox_dev, 0x54, &config);
630 if (err)
631 break;
625 /* 632 /*
626 * every three bits in the Node ID mapping register maps 633 * every three bits in the Node ID mapping register maps
627 * to a particular node. 634 * to a particular node.
@@ -633,7 +640,11 @@ static void snbep_pci2phy_map_init(void)
633 } 640 }
634 } 641 }
635 }; 642 };
636 return; 643
644 if (ubox_dev)
645 pci_dev_put(ubox_dev);
646
647 return err ? pcibios_err_to_errno(err) : 0;
637} 648}
638/* end of Sandy Bridge-EP uncore support */ 649/* end of Sandy Bridge-EP uncore support */
639 650
@@ -1547,7 +1558,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1547{ 1558{
1548 struct hw_perf_event *hwc = &event->hw; 1559 struct hw_perf_event *hwc = &event->hw;
1549 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1560 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1550 int port;
1551 1561
1552 /* adjust the main event selector and extra register index */ 1562 /* adjust the main event selector and extra register index */
1553 if (reg1->idx % 2) { 1563 if (reg1->idx % 2) {
@@ -1559,7 +1569,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1559 } 1569 }
1560 1570
1561 /* adjust extra register config */ 1571 /* adjust extra register config */
1562 port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1563 switch (reg1->idx % 6) { 1572 switch (reg1->idx % 6) {
1564 case 2: 1573 case 2:
1565 /* shift the 8~15 bits to the 0~7 bits */ 1574 /* shift the 8~15 bits to the 0~7 bits */
@@ -2578,9 +2587,11 @@ static int __init uncore_pci_init(void)
2578 2587
2579 switch (boot_cpu_data.x86_model) { 2588 switch (boot_cpu_data.x86_model) {
2580 case 45: /* Sandy Bridge-EP */ 2589 case 45: /* Sandy Bridge-EP */
2590 ret = snbep_pci2phy_map_init();
2591 if (ret)
2592 return ret;
2581 pci_uncores = snbep_pci_uncores; 2593 pci_uncores = snbep_pci_uncores;
2582 uncore_pci_driver = &snbep_uncore_pci_driver; 2594 uncore_pci_driver = &snbep_uncore_pci_driver;
2583 snbep_pci2phy_map_init();
2584 break; 2595 break;
2585 default: 2596 default:
2586 return 0; 2597 return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 7c46bfdbc373..4b7731bf23a8 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -3,6 +3,8 @@
3#include <linux/perf_event.h> 3#include <linux/perf_event.h>
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#include <asm/hardirq.h>
7
6#include "perf_event.h" 8#include "perf_event.h"
7 9
8static const u64 knc_perfmon_event_map[] = 10static const u64 knc_perfmon_event_map[] =
@@ -173,30 +175,100 @@ static void knc_pmu_enable_all(int added)
173static inline void 175static inline void
174knc_pmu_disable_event(struct perf_event *event) 176knc_pmu_disable_event(struct perf_event *event)
175{ 177{
176 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
177 struct hw_perf_event *hwc = &event->hw; 178 struct hw_perf_event *hwc = &event->hw;
178 u64 val; 179 u64 val;
179 180
180 val = hwc->config; 181 val = hwc->config;
181 if (cpuc->enabled) 182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
183 183
184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
185} 185}
186 186
187static void knc_pmu_enable_event(struct perf_event *event) 187static void knc_pmu_enable_event(struct perf_event *event)
188{ 188{
189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
190 struct hw_perf_event *hwc = &event->hw; 189 struct hw_perf_event *hwc = &event->hw;
191 u64 val; 190 u64 val;
192 191
193 val = hwc->config; 192 val = hwc->config;
194 if (cpuc->enabled) 193 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
195 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
196 194
197 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 195 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
198} 196}
199 197
198static inline u64 knc_pmu_get_status(void)
199{
200 u64 status;
201
202 rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
203
204 return status;
205}
206
207static inline void knc_pmu_ack_status(u64 ack)
208{
209 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
210}
211
212static int knc_pmu_handle_irq(struct pt_regs *regs)
213{
214 struct perf_sample_data data;
215 struct cpu_hw_events *cpuc;
216 int handled = 0;
217 int bit, loops;
218 u64 status;
219
220 cpuc = &__get_cpu_var(cpu_hw_events);
221
222 knc_pmu_disable_all();
223
224 status = knc_pmu_get_status();
225 if (!status) {
226 knc_pmu_enable_all(0);
227 return handled;
228 }
229
230 loops = 0;
231again:
232 knc_pmu_ack_status(status);
233 if (++loops > 100) {
234 WARN_ONCE(1, "perf: irq loop stuck!\n");
235 perf_event_print_debug();
236 goto done;
237 }
238
239 inc_irq_stat(apic_perf_irqs);
240
241 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
242 struct perf_event *event = cpuc->events[bit];
243
244 handled++;
245
246 if (!test_bit(bit, cpuc->active_mask))
247 continue;
248
249 if (!intel_pmu_save_and_restart(event))
250 continue;
251
252 perf_sample_data_init(&data, 0, event->hw.last_period);
253
254 if (perf_event_overflow(event, &data, regs))
255 x86_pmu_stop(event, 0);
256 }
257
258 /*
259 * Repeat if there is more work to be done:
260 */
261 status = knc_pmu_get_status();
262 if (status)
263 goto again;
264
265done:
266 knc_pmu_enable_all(0);
267
268 return handled;
269}
270
271
200PMU_FORMAT_ATTR(event, "config:0-7" ); 272PMU_FORMAT_ATTR(event, "config:0-7" );
201PMU_FORMAT_ATTR(umask, "config:8-15" ); 273PMU_FORMAT_ATTR(umask, "config:8-15" );
202PMU_FORMAT_ATTR(edge, "config:18" ); 274PMU_FORMAT_ATTR(edge, "config:18" );
@@ -214,7 +286,7 @@ static struct attribute *intel_knc_formats_attr[] = {
214 286
215static __initconst struct x86_pmu knc_pmu = { 287static __initconst struct x86_pmu knc_pmu = {
216 .name = "knc", 288 .name = "knc",
217 .handle_irq = x86_pmu_handle_irq, 289 .handle_irq = knc_pmu_handle_irq,
218 .disable_all = knc_pmu_disable_all, 290 .disable_all = knc_pmu_disable_all,
219 .enable_all = knc_pmu_enable_all, 291 .enable_all = knc_pmu_enable_all,
220 .enable = knc_pmu_enable_event, 292 .enable = knc_pmu_enable_event,
@@ -226,12 +298,11 @@ static __initconst struct x86_pmu knc_pmu = {
226 .event_map = knc_pmu_event_map, 298 .event_map = knc_pmu_event_map,
227 .max_events = ARRAY_SIZE(knc_perfmon_event_map), 299 .max_events = ARRAY_SIZE(knc_perfmon_event_map),
228 .apic = 1, 300 .apic = 1,
229 .max_period = (1ULL << 31) - 1, 301 .max_period = (1ULL << 39) - 1,
230 .version = 0, 302 .version = 0,
231 .num_counters = 2, 303 .num_counters = 2,
232 /* in theory 40 bits, early silicon is buggy though */ 304 .cntval_bits = 40,
233 .cntval_bits = 32, 305 .cntval_mask = (1ULL << 40) - 1,
234 .cntval_mask = (1ULL << 32) - 1,
235 .get_event_constraints = x86_get_event_constraints, 306 .get_event_constraints = x86_get_event_constraints,
236 .event_constraints = knc_event_constraints, 307 .event_constraints = knc_event_constraints,
237 .format_attrs = intel_knc_formats_attr, 308 .format_attrs = intel_knc_formats_attr,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index e4dd0f7a0453..7d0270bd793e 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -8,13 +8,106 @@
8 */ 8 */
9static const u64 p6_perfmon_event_map[] = 9static const u64 p6_perfmon_event_map[] =
10{ 10{
11 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, 11 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, /* CPU_CLK_UNHALTED */
12 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 12 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, /* INST_RETIRED */
13 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, 13 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, /* L2_RQSTS:M:E:S:I */
14 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, 14 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, /* L2_RQSTS:I */
15 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 15 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, /* BR_INST_RETIRED */
16 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 16 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, /* BR_MISS_PRED_RETIRED */
17 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 17 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, /* BUS_DRDY_CLOCKS */
18 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a2, /* RESOURCE_STALLS */
19
20};
21
22static __initconst u64 p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
26{
27 [ C(L1D) ] = {
28 [ C(OP_READ) ] = {
29 [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
30 [ C(RESULT_MISS) ] = 0x0045, /* DCU_LINES_IN */
31 },
32 [ C(OP_WRITE) ] = {
33 [ C(RESULT_ACCESS) ] = 0,
34 [ C(RESULT_MISS) ] = 0x0f29, /* L2_LD:M:E:S:I */
35 },
36 [ C(OP_PREFETCH) ] = {
37 [ C(RESULT_ACCESS) ] = 0,
38 [ C(RESULT_MISS) ] = 0,
39 },
40 },
41 [ C(L1I ) ] = {
42 [ C(OP_READ) ] = {
43 [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
44 [ C(RESULT_MISS) ] = 0x0f28, /* L2_IFETCH:M:E:S:I */
45 },
46 [ C(OP_WRITE) ] = {
47 [ C(RESULT_ACCESS) ] = -1,
48 [ C(RESULT_MISS) ] = -1,
49 },
50 [ C(OP_PREFETCH) ] = {
51 [ C(RESULT_ACCESS) ] = 0,
52 [ C(RESULT_MISS) ] = 0,
53 },
54 },
55 [ C(LL ) ] = {
56 [ C(OP_READ) ] = {
57 [ C(RESULT_ACCESS) ] = 0,
58 [ C(RESULT_MISS) ] = 0,
59 },
60 [ C(OP_WRITE) ] = {
61 [ C(RESULT_ACCESS) ] = 0,
62 [ C(RESULT_MISS) ] = 0x0025, /* L2_M_LINES_INM */
63 },
64 [ C(OP_PREFETCH) ] = {
65 [ C(RESULT_ACCESS) ] = 0,
66 [ C(RESULT_MISS) ] = 0,
67 },
68 },
69 [ C(DTLB) ] = {
70 [ C(OP_READ) ] = {
71 [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
72 [ C(RESULT_MISS) ] = 0,
73 },
74 [ C(OP_WRITE) ] = {
75 [ C(RESULT_ACCESS) ] = 0,
76 [ C(RESULT_MISS) ] = 0,
77 },
78 [ C(OP_PREFETCH) ] = {
79 [ C(RESULT_ACCESS) ] = 0,
80 [ C(RESULT_MISS) ] = 0,
81 },
82 },
83 [ C(ITLB) ] = {
84 [ C(OP_READ) ] = {
85 [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
86 [ C(RESULT_MISS) ] = 0x0085, /* ITLB_MISS */
87 },
88 [ C(OP_WRITE) ] = {
89 [ C(RESULT_ACCESS) ] = -1,
90 [ C(RESULT_MISS) ] = -1,
91 },
92 [ C(OP_PREFETCH) ] = {
93 [ C(RESULT_ACCESS) ] = -1,
94 [ C(RESULT_MISS) ] = -1,
95 },
96 },
97 [ C(BPU ) ] = {
98 [ C(OP_READ) ] = {
99 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED */
100 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISS_PRED_RETIRED */
101 },
102 [ C(OP_WRITE) ] = {
103 [ C(RESULT_ACCESS) ] = -1,
104 [ C(RESULT_MISS) ] = -1,
105 },
106 [ C(OP_PREFETCH) ] = {
107 [ C(RESULT_ACCESS) ] = -1,
108 [ C(RESULT_MISS) ] = -1,
109 },
110 },
18}; 111};
19 112
20static u64 p6_pmu_event_map(int hw_event) 113static u64 p6_pmu_event_map(int hw_event)
@@ -34,7 +127,7 @@ static struct event_constraint p6_event_constraints[] =
34{ 127{
35 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ 128 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
36 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 129 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
37 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ 130 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
38 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 131 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
39 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 132 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
40 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 133 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
@@ -64,25 +157,25 @@ static void p6_pmu_enable_all(int added)
64static inline void 157static inline void
65p6_pmu_disable_event(struct perf_event *event) 158p6_pmu_disable_event(struct perf_event *event)
66{ 159{
67 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
68 struct hw_perf_event *hwc = &event->hw; 160 struct hw_perf_event *hwc = &event->hw;
69 u64 val = P6_NOP_EVENT; 161 u64 val = P6_NOP_EVENT;
70 162
71 if (cpuc->enabled)
72 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
73
74 (void)wrmsrl_safe(hwc->config_base, val); 163 (void)wrmsrl_safe(hwc->config_base, val);
75} 164}
76 165
77static void p6_pmu_enable_event(struct perf_event *event) 166static void p6_pmu_enable_event(struct perf_event *event)
78{ 167{
79 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
80 struct hw_perf_event *hwc = &event->hw; 168 struct hw_perf_event *hwc = &event->hw;
81 u64 val; 169 u64 val;
82 170
83 val = hwc->config; 171 val = hwc->config;
84 if (cpuc->enabled) 172
85 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 173 /*
174 * p6 only has a global event enable, set on PerfEvtSel0
175 * We "disable" events by programming P6_NOP_EVENT
176 * and we rely on p6_pmu_enable_all() being called
177 * to actually enable the events.
178 */
86 179
87 (void)wrmsrl_safe(hwc->config_base, val); 180 (void)wrmsrl_safe(hwc->config_base, val);
88} 181}
@@ -158,5 +251,9 @@ __init int p6_pmu_init(void)
158 251
159 x86_pmu = p6_pmu; 252 x86_pmu = p6_pmu;
160 253
254 memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
255 sizeof(hw_cache_event_ids));
256
257
161 return 0; 258 return 0;
162} 259}
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ed858e9e9a74..df06ade26bef 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1077,6 +1077,9 @@ void __init memblock_x86_fill(void)
1077 memblock_add(ei->addr, ei->size); 1077 memblock_add(ei->addr, ei->size);
1078 } 1078 }
1079 1079
1080 /* throw away partial pages */
1081 memblock_trim_memory(PAGE_SIZE);
1082
1080 memblock_dump_all(); 1083 memblock_dump_all();
1081} 1084}
1082 1085
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 468e98dfd44e..ca45696f30fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -921,18 +921,19 @@ void __init setup_arch(char **cmdline_p)
921#ifdef CONFIG_X86_64 921#ifdef CONFIG_X86_64
922 if (max_pfn > max_low_pfn) { 922 if (max_pfn > max_low_pfn) {
923 int i; 923 int i;
924 for (i = 0; i < e820.nr_map; i++) { 924 unsigned long start, end;
925 struct e820entry *ei = &e820.map[i]; 925 unsigned long start_pfn, end_pfn;
926 926
927 if (ei->addr + ei->size <= 1UL << 32) 927 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
928 continue; 928 NULL) {
929 929
930 if (ei->type == E820_RESERVED) 930 end = PFN_PHYS(end_pfn);
931 if (end <= (1UL<<32))
931 continue; 932 continue;
932 933
934 start = PFN_PHYS(start_pfn);
933 max_pfn_mapped = init_memory_mapping( 935 max_pfn_mapped = init_memory_mapping(
934 ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr, 936 max((1UL<<32), start), end);
935 ei->addr + ei->size);
936 } 937 }
937 938
938 /* can we preseve max_low_pfn ?*/ 939 /* can we preseve max_low_pfn ?*/
@@ -1048,6 +1049,18 @@ void __init setup_arch(char **cmdline_p)
1048 arch_init_ideal_nops(); 1049 arch_init_ideal_nops();
1049 1050
1050 register_refined_jiffies(CLOCK_TICK_RATE); 1051 register_refined_jiffies(CLOCK_TICK_RATE);
1052
1053#ifdef CONFIG_EFI
1054 /* Once setup is done above, disable efi_enabled on mismatched
1055 * firmware/kernel archtectures since there is no support for
1056 * runtime services.
1057 */
1058 if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
1059 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1060 efi_unmap_memmap();
1061 efi_enabled = 0;
1062 }
1063#endif
1051} 1064}
1052 1065
1053#ifdef CONFIG_X86_32 1066#ifdef CONFIG_X86_32
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ab1f6a93b527..d7aea41563b3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -35,40 +35,44 @@ struct map_range {
35 unsigned page_size_mask; 35 unsigned page_size_mask;
36}; 36};
37 37
38static void __init find_early_table_space(struct map_range *mr, unsigned long end, 38/*
39 int use_pse, int use_gbpages) 39 * First calculate space needed for kernel direct mapping page tables to cover
40 * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
41 * pages. Then find enough contiguous space for those page tables.
42 */
43static void __init find_early_table_space(struct map_range *mr, int nr_range)
40{ 44{
41 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; 45 int i;
46 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
47 unsigned long start = 0, good_end;
42 phys_addr_t base; 48 phys_addr_t base;
43 49
44 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 50 for (i = 0; i < nr_range; i++) {
45 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 51 unsigned long range, extra;
46
47 if (use_gbpages) {
48 unsigned long extra;
49
50 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
51 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
52 } else
53 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
54 52
55 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 53 range = mr[i].end - mr[i].start;
54 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
56 55
57 if (use_pse) { 56 if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
58 unsigned long extra; 57 extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
58 pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
59 } else {
60 pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
61 }
59 62
60 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 63 if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
64 extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
61#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
62 extra += PMD_SIZE; 66 extra += PMD_SIZE;
63#endif 67#endif
64 /* The first 2/4M doesn't use large pages. */ 68 ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
65 if (mr->start < PMD_SIZE) 69 } else {
66 extra += mr->end - mr->start; 70 ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
67 71 }
68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 72 }
69 } else
70 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
71 73
74 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
75 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
72 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 76 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
73 77
74#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
@@ -86,7 +90,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
86 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 90 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
87 91
88 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", 92 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
89 end - 1, pgt_buf_start << PAGE_SHIFT, 93 mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
90 (pgt_buf_top << PAGE_SHIFT) - 1); 94 (pgt_buf_top << PAGE_SHIFT) - 1);
91} 95}
92 96
@@ -267,7 +271,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
267 * nodes are discovered. 271 * nodes are discovered.
268 */ 272 */
269 if (!after_bootmem) 273 if (!after_bootmem)
270 find_early_table_space(&mr[0], end, use_pse, use_gbpages); 274 find_early_table_space(mr, nr_range);
271 275
272 for (i = 0; i < nr_range; i++) 276 for (i = 0; i < nr_range; i++)
273 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 277 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2b6b4a3c8beb..3baff255adac 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -386,7 +386,8 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
386 * these mappings are more intelligent. 386 * these mappings are more intelligent.
387 */ 387 */
388 if (pte_val(*pte)) { 388 if (pte_val(*pte)) {
389 pages++; 389 if (!after_bootmem)
390 pages++;
390 continue; 391 continue;
391 } 392 }
392 393
@@ -451,6 +452,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
451 * attributes. 452 * attributes.
452 */ 453 */
453 if (page_size_mask & (1 << PG_LEVEL_2M)) { 454 if (page_size_mask & (1 << PG_LEVEL_2M)) {
455 if (!after_bootmem)
456 pages++;
454 last_map_addr = next; 457 last_map_addr = next;
455 continue; 458 continue;
456 } 459 }
@@ -526,6 +529,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
526 * attributes. 529 * attributes.
527 */ 530 */
528 if (page_size_mask & (1 << PG_LEVEL_1G)) { 531 if (page_size_mask & (1 << PG_LEVEL_1G)) {
532 if (!after_bootmem)
533 pages++;
529 last_map_addr = next; 534 last_map_addr = next;
530 continue; 535 continue;
531 } 536 }
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index aded2a91162a..ad4439145f85 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -70,11 +70,15 @@ EXPORT_SYMBOL(efi);
70struct efi_memory_map memmap; 70struct efi_memory_map memmap;
71 71
72bool efi_64bit; 72bool efi_64bit;
73static bool efi_native;
74 73
75static struct efi efi_phys __initdata; 74static struct efi efi_phys __initdata;
76static efi_system_table_t efi_systab __initdata; 75static efi_system_table_t efi_systab __initdata;
77 76
77static inline bool efi_is_native(void)
78{
79 return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
80}
81
78static int __init setup_noefi(char *arg) 82static int __init setup_noefi(char *arg)
79{ 83{
80 efi_enabled = 0; 84 efi_enabled = 0;
@@ -420,7 +424,7 @@ void __init efi_reserve_boot_services(void)
420 } 424 }
421} 425}
422 426
423static void __init efi_unmap_memmap(void) 427void __init efi_unmap_memmap(void)
424{ 428{
425 if (memmap.map) { 429 if (memmap.map) {
426 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 430 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
@@ -432,7 +436,7 @@ void __init efi_free_boot_services(void)
432{ 436{
433 void *p; 437 void *p;
434 438
435 if (!efi_native) 439 if (!efi_is_native())
436 return; 440 return;
437 441
438 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 442 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
@@ -684,12 +688,10 @@ void __init efi_init(void)
684 return; 688 return;
685 } 689 }
686 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; 690 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
687 efi_native = !efi_64bit;
688#else 691#else
689 efi_phys.systab = (efi_system_table_t *) 692 efi_phys.systab = (efi_system_table_t *)
690 (boot_params.efi_info.efi_systab | 693 (boot_params.efi_info.efi_systab |
691 ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 694 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
692 efi_native = efi_64bit;
693#endif 695#endif
694 696
695 if (efi_systab_init(efi_phys.systab)) { 697 if (efi_systab_init(efi_phys.systab)) {
@@ -723,7 +725,7 @@ void __init efi_init(void)
723 * that doesn't match the kernel 32/64-bit mode. 725 * that doesn't match the kernel 32/64-bit mode.
724 */ 726 */
725 727
726 if (!efi_native) 728 if (!efi_is_native())
727 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 729 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
728 else if (efi_runtime_init()) { 730 else if (efi_runtime_init()) {
729 efi_enabled = 0; 731 efi_enabled = 0;
@@ -735,7 +737,7 @@ void __init efi_init(void)
735 return; 737 return;
736 } 738 }
737#ifdef CONFIG_X86_32 739#ifdef CONFIG_X86_32
738 if (efi_native) { 740 if (efi_is_native()) {
739 x86_platform.get_wallclock = efi_get_time; 741 x86_platform.get_wallclock = efi_get_time;
740 x86_platform.set_wallclock = efi_set_rtc_mmss; 742 x86_platform.set_wallclock = efi_set_rtc_mmss;
741 } 743 }
@@ -810,6 +812,16 @@ void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
810 return NULL; 812 return NULL;
811} 813}
812 814
815void efi_memory_uc(u64 addr, unsigned long size)
816{
817 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
818 u64 npages;
819
820 npages = round_up(size, page_shift) / page_shift;
821 memrange_efi_to_native(&addr, &npages);
822 set_memory_uc(addr, npages);
823}
824
813/* 825/*
814 * This function will switch the EFI runtime services to virtual mode. 826 * This function will switch the EFI runtime services to virtual mode.
815 * Essentially, look through the EFI memmap and map every region that 827 * Essentially, look through the EFI memmap and map every region that
@@ -823,7 +835,7 @@ void __init efi_enter_virtual_mode(void)
823 efi_memory_desc_t *md, *prev_md = NULL; 835 efi_memory_desc_t *md, *prev_md = NULL;
824 efi_status_t status; 836 efi_status_t status;
825 unsigned long size; 837 unsigned long size;
826 u64 end, systab, addr, npages, end_pfn; 838 u64 end, systab, end_pfn;
827 void *p, *va, *new_memmap = NULL; 839 void *p, *va, *new_memmap = NULL;
828 int count = 0; 840 int count = 0;
829 841
@@ -834,7 +846,7 @@ void __init efi_enter_virtual_mode(void)
834 * non-native EFI 846 * non-native EFI
835 */ 847 */
836 848
837 if (!efi_native) { 849 if (!efi_is_native()) {
838 efi_unmap_memmap(); 850 efi_unmap_memmap();
839 return; 851 return;
840 } 852 }
@@ -879,10 +891,14 @@ void __init efi_enter_virtual_mode(void)
879 end_pfn = PFN_UP(end); 891 end_pfn = PFN_UP(end);
880 if (end_pfn <= max_low_pfn_mapped 892 if (end_pfn <= max_low_pfn_mapped
881 || (end_pfn > (1UL << (32 - PAGE_SHIFT)) 893 || (end_pfn > (1UL << (32 - PAGE_SHIFT))
882 && end_pfn <= max_pfn_mapped)) 894 && end_pfn <= max_pfn_mapped)) {
883 va = __va(md->phys_addr); 895 va = __va(md->phys_addr);
884 else 896
885 va = efi_ioremap(md->phys_addr, size, md->type); 897 if (!(md->attribute & EFI_MEMORY_WB))
898 efi_memory_uc((u64)(unsigned long)va, size);
899 } else
900 va = efi_ioremap(md->phys_addr, size,
901 md->type, md->attribute);
886 902
887 md->virt_addr = (u64) (unsigned long) va; 903 md->virt_addr = (u64) (unsigned long) va;
888 904
@@ -892,13 +908,6 @@ void __init efi_enter_virtual_mode(void)
892 continue; 908 continue;
893 } 909 }
894 910
895 if (!(md->attribute & EFI_MEMORY_WB)) {
896 addr = md->virt_addr;
897 npages = md->num_pages;
898 memrange_efi_to_native(&addr, &npages);
899 set_memory_uc(addr, npages);
900 }
901
902 systab = (u64) (unsigned long) efi_phys.systab; 911 systab = (u64) (unsigned long) efi_phys.systab;
903 if (md->phys_addr <= systab && systab < end) { 912 if (md->phys_addr <= systab && systab < end) {
904 systab += md->virt_addr - md->phys_addr; 913 systab += md->virt_addr - md->phys_addr;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac3aa54e2654..95fd505dfeb6 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -82,7 +82,7 @@ void __init efi_call_phys_epilog(void)
82} 82}
83 83
84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 84void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
85 u32 type) 85 u32 type, u64 attribute)
86{ 86{
87 unsigned long last_map_pfn; 87 unsigned long last_map_pfn;
88 88
@@ -92,8 +92,11 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 92 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 93 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
94 unsigned long top = last_map_pfn << PAGE_SHIFT; 94 unsigned long top = last_map_pfn << PAGE_SHIFT;
95 efi_ioremap(top, size - (top - phys_addr), type); 95 efi_ioremap(top, size - (top - phys_addr), type, attribute);
96 } 96 }
97 97
98 if (!(attribute & EFI_MEMORY_WB))
99 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
100
98 return (void __iomem *)__va(phys_addr); 101 return (void __iomem *)__va(phys_addr);
99} 102}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 49cbb3795a10..ba498f8e47a2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
184 struct radeon_backlight_privdata *pdata; 184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig; 185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level; 186 u8 backlight_level;
187 char bl_name[16];
187 188
188 if (!radeon_encoder->enc_priv) 189 if (!radeon_encoder->enc_priv)
189 return; 190 return;
@@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
203 memset(&props, 0, sizeof(props)); 204 memset(&props, 0, sizeof(props));
204 props.max_brightness = RADEON_MAX_BL_LEVEL; 205 props.max_brightness = RADEON_MAX_BL_LEVEL;
205 props.type = BACKLIGHT_RAW; 206 props.type = BACKLIGHT_RAW;
206 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 207 snprintf(bl_name, sizeof(bl_name),
208 "radeon_bl%d", dev->primary->index);
209 bd = backlight_device_register(bl_name, &drm_connector->kdev,
207 pdata, &radeon_atom_backlight_ops, &props); 210 pdata, &radeon_atom_backlight_ops, &props);
208 if (IS_ERR(bd)) { 211 if (IS_ERR(bd)) {
209 DRM_ERROR("Backlight registration failed\n"); 212 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 573ed1bc6cf7..30271b641913 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
2829 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: 2829 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
2830 return true; 2830 return true;
2831 default: 2831 default:
2832 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
2832 return false; 2833 return false;
2833 } 2834 }
2834} 2835}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8c74c729586d..81e6a568c29d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1538,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1538{ 1538{
1539 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 1539 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1540 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1540 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1541 int i;
1542 1541
1543 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2)); 1542 while (count) {
1544 radeon_ring_write(ring, pe); 1543 unsigned ndw = 1 + count * 2;
1545 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1544 if (ndw > 0x3FFF)
1546 for (i = 0; i < count; ++i) { 1545 ndw = 0x3FFF;
1547 uint64_t value = 0; 1546
1548 if (flags & RADEON_VM_PAGE_SYSTEM) { 1547 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
1549 value = radeon_vm_map_gart(rdev, addr); 1548 radeon_ring_write(ring, pe);
1550 value &= 0xFFFFFFFFFFFFF000ULL; 1549 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1551 addr += incr; 1550 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1552 1551 uint64_t value = 0;
1553 } else if (flags & RADEON_VM_PAGE_VALID) { 1552 if (flags & RADEON_VM_PAGE_SYSTEM) {
1554 value = addr; 1553 value = radeon_vm_map_gart(rdev, addr);
1555 addr += incr; 1554 value &= 0xFFFFFFFFFFFFF000ULL;
1556 } 1555 addr += incr;
1556
1557 } else if (flags & RADEON_VM_PAGE_VALID) {
1558 value = addr;
1559 addr += incr;
1560 }
1557 1561
1558 value |= r600_flags; 1562 value |= r600_flags;
1559 radeon_ring_write(ring, value); 1563 radeon_ring_write(ring, value);
1560 radeon_ring_write(ring, upper_32_bits(value)); 1564 radeon_ring_write(ring, upper_32_bits(value));
1565 }
1561 } 1566 }
1562} 1567}
1563 1568
@@ -1586,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1586 /* bits 0-7 are the VM contexts0-7 */ 1591 /* bits 0-7 are the VM contexts0-7 */
1587 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 1592 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
1588 radeon_ring_write(ring, 1 << vm->id); 1593 radeon_ring_write(ring, 1 << vm->id);
1594
1595 /* sync PFP to ME, otherwise we might get invalid PFP reads */
1596 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1597 radeon_ring_write(ring, 0x0);
1589} 1598}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2423d1b5d385..cbef6815907a 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -502,6 +502,7 @@
502#define PACKET3_MPEG_INDEX 0x3A 502#define PACKET3_MPEG_INDEX 0x3A
503#define PACKET3_WAIT_REG_MEM 0x3C 503#define PACKET3_WAIT_REG_MEM 0x3C
504#define PACKET3_MEM_WRITE 0x3D 504#define PACKET3_MEM_WRITE 0x3D
505#define PACKET3_PFP_SYNC_ME 0x42
505#define PACKET3_SURFACE_SYNC 0x43 506#define PACKET3_SURFACE_SYNC 0x43
506# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 507# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
507# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) 508# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 1aa3f910b993..37f6a907aea4 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
87 atpx_arg_elements[1].integer.value = 0; 87 atpx_arg_elements[1].integer.value = 0;
88 } 88 }
89 89
90 status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer); 90 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
91 91
92 /* Fail only if calling the method fails and ATPX is supported */ 92 /* Fail only if calling the method fails and ATPX is supported */
93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
373} 373}
374 374
375/** 375/**
376 * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles 376 * radeon_atpx_pci_probe_handle - look up the ATPX handle
377 * 377 *
378 * @pdev: pci device 378 * @pdev: pci device
379 * 379 *
380 * Look up the ATPX and ATRM handles (all asics). 380 * Look up the ATPX handles (all asics).
381 * Returns true if the handles are found, false if not. 381 * Returns true if the handles are found, false if not.
382 */ 382 */
383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bd13ca09eb62..e2f5f888c374 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev)
355 */ 355 */
356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
357{ 357{
358 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
359
358 mc->vram_start = base; 360 mc->vram_start = base;
359 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 361 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
360 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 362 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
@@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
368 mc->mc_vram_size = mc->aper_size; 370 mc->mc_vram_size = mc->aper_size;
369 } 371 }
370 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 372 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
371 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) 373 if (limit && limit < mc->real_vram_size)
372 mc->real_vram_size = radeon_vram_limit; 374 mc->real_vram_size = limit;
373 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 375 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
374 mc->mc_vram_size >> 20, mc->vram_start, 376 mc->mc_vram_size >> 20, mc->vram_start,
375 mc->vram_end, mc->real_vram_size >> 20); 377 mc->vram_end, mc->real_vram_size >> 20);
@@ -835,6 +837,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
835} 837}
836 838
837/** 839/**
840 * radeon_check_pot_argument - check that argument is a power of two
841 *
842 * @arg: value to check
843 *
844 * Validates that a certain argument is a power of two (all asics).
845 * Returns true if argument is valid.
846 */
847static bool radeon_check_pot_argument(int arg)
848{
849 return (arg & (arg - 1)) == 0;
850}
851
852/**
838 * radeon_check_arguments - validate module params 853 * radeon_check_arguments - validate module params
839 * 854 *
840 * @rdev: radeon_device pointer 855 * @rdev: radeon_device pointer
@@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
845static void radeon_check_arguments(struct radeon_device *rdev) 860static void radeon_check_arguments(struct radeon_device *rdev)
846{ 861{
847 /* vramlimit must be a power of two */ 862 /* vramlimit must be a power of two */
848 switch (radeon_vram_limit) { 863 if (!radeon_check_pot_argument(radeon_vram_limit)) {
849 case 0:
850 case 4:
851 case 8:
852 case 16:
853 case 32:
854 case 64:
855 case 128:
856 case 256:
857 case 512:
858 case 1024:
859 case 2048:
860 case 4096:
861 break;
862 default:
863 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 864 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
864 radeon_vram_limit); 865 radeon_vram_limit);
865 radeon_vram_limit = 0; 866 radeon_vram_limit = 0;
866 break;
867 } 867 }
868 radeon_vram_limit = radeon_vram_limit << 20; 868
869 /* gtt size must be power of two and greater or equal to 32M */ 869 /* gtt size must be power of two and greater or equal to 32M */
870 switch (radeon_gart_size) { 870 if (radeon_gart_size < 32) {
871 case 4:
872 case 8:
873 case 16:
874 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 871 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
875 radeon_gart_size); 872 radeon_gart_size);
876 radeon_gart_size = 512; 873 radeon_gart_size = 512;
877 break; 874
878 case 32: 875 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
879 case 64:
880 case 128:
881 case 256:
882 case 512:
883 case 1024:
884 case 2048:
885 case 4096:
886 break;
887 default:
888 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 876 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
889 radeon_gart_size); 877 radeon_gart_size);
890 radeon_gart_size = 512; 878 radeon_gart_size = 512;
891 break;
892 } 879 }
893 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 880 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
881
894 /* AGP mode can only be -1, 1, 2, 4, 8 */ 882 /* AGP mode can only be -1, 1, 2, 4, 8 */
895 switch (radeon_agpmode) { 883 switch (radeon_agpmode) {
896 case -1: 884 case -1:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a7677dd1ce98..4debd60e5aa6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev)
355 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 355 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
356 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); 356 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
357 /* Allocate pages table */ 357 /* Allocate pages table */
358 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages, 358 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
359 GFP_KERNEL);
360 if (rdev->gart.pages == NULL) { 359 if (rdev->gart.pages == NULL) {
361 radeon_gart_fini(rdev); 360 radeon_gart_fini(rdev);
362 return -ENOMEM; 361 return -ENOMEM;
363 } 362 }
364 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) * 363 rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
365 rdev->gart.num_cpu_pages, GFP_KERNEL); 364 rdev->gart.num_cpu_pages);
366 if (rdev->gart.pages_addr == NULL) { 365 if (rdev->gart.pages_addr == NULL) {
367 radeon_gart_fini(rdev); 366 radeon_gart_fini(rdev);
368 return -ENOMEM; 367 return -ENOMEM;
@@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
388 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); 387 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
389 } 388 }
390 rdev->gart.ready = false; 389 rdev->gart.ready = false;
391 kfree(rdev->gart.pages); 390 vfree(rdev->gart.pages);
392 kfree(rdev->gart.pages_addr); 391 vfree(rdev->gart.pages_addr);
393 rdev->gart.pages = NULL; 392 rdev->gart.pages = NULL;
394 rdev->gart.pages_addr = NULL; 393 rdev->gart.pages_addr = NULL;
395 394
@@ -577,7 +576,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
577 * 576 *
578 * Global and local mutex must be locked! 577 * Global and local mutex must be locked!
579 */ 578 */
580int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) 579static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
581{ 580{
582 struct radeon_vm *vm_evict; 581 struct radeon_vm *vm_evict;
583 582
@@ -1036,8 +1035,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1036 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); 1035 pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
1037 pte += (addr & mask) * 8; 1036 pte += (addr & mask) * 8;
1038 1037
1039 if (((last_pte + 8 * count) != pte) || 1038 if ((last_pte + 8 * count) != pte) {
1040 ((count + nptes) > 1 << 11)) {
1041 1039
1042 if (count) { 1040 if (count) {
1043 radeon_asic_vm_set_page(rdev, last_pte, 1041 radeon_asic_vm_set_page(rdev, last_pte,
@@ -1148,17 +1146,17 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1148 1146
1149 if (RADEON_VM_BLOCK_SIZE > 11) 1147 if (RADEON_VM_BLOCK_SIZE > 11)
1150 /* reserve space for one header for every 2k dwords */ 1148 /* reserve space for one header for every 2k dwords */
1151 ndw += (nptes >> 11) * 3; 1149 ndw += (nptes >> 11) * 4;
1152 else 1150 else
1153 /* reserve space for one header for 1151 /* reserve space for one header for
1154 every (1 << BLOCK_SIZE) entries */ 1152 every (1 << BLOCK_SIZE) entries */
1155 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3; 1153 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
1156 1154
1157 /* reserve space for pte addresses */ 1155 /* reserve space for pte addresses */
1158 ndw += nptes * 2; 1156 ndw += nptes * 2;
1159 1157
1160 /* reserve space for one header for every 2k dwords */ 1158 /* reserve space for one header for every 2k dwords */
1161 ndw += (npdes >> 11) * 3; 1159 ndw += (npdes >> 11) * 4;
1162 1160
1163 /* reserve space for pde addresses */ 1161 /* reserve space for pde addresses */
1164 ndw += npdes * 2; 1162 ndw += npdes * 2;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f38fbcc46935..fe5c1f6b7957 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
53 struct drm_gem_object **obj) 53 struct drm_gem_object **obj)
54{ 54{
55 struct radeon_bo *robj; 55 struct radeon_bo *robj;
56 unsigned long max_size;
56 int r; 57 int r;
57 58
58 *obj = NULL; 59 *obj = NULL;
@@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
60 if (alignment < PAGE_SIZE) { 61 if (alignment < PAGE_SIZE) {
61 alignment = PAGE_SIZE; 62 alignment = PAGE_SIZE;
62 } 63 }
64
65 /* maximun bo size is the minimun btw visible vram and gtt size */
66 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
67 if (size > max_size) {
68 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
69 __func__, __LINE__, size >> 20, max_size >> 20);
70 return -ENOMEM;
71 }
72
73retry:
63 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 74 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
64 if (r) { 75 if (r) {
65 if (r != -ERESTARTSYS) 76 if (r != -ERESTARTSYS) {
77 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78 initial_domain |= RADEON_GEM_DOMAIN_GTT;
79 goto retry;
80 }
66 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 81 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
67 size, initial_domain, alignment, r); 82 size, initial_domain, alignment, r);
83 }
68 return r; 84 return r;
69 } 85 }
70 *obj = &robj->gem_base; 86 *obj = &robj->gem_base;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index a13ad9d707cf..0063df9d166d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
370 struct backlight_properties props; 370 struct backlight_properties props;
371 struct radeon_backlight_privdata *pdata; 371 struct radeon_backlight_privdata *pdata;
372 uint8_t backlight_level; 372 uint8_t backlight_level;
373 char bl_name[16];
373 374
374 if (!radeon_encoder->enc_priv) 375 if (!radeon_encoder->enc_priv)
375 return; 376 return;
@@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
389 memset(&props, 0, sizeof(props)); 390 memset(&props, 0, sizeof(props));
390 props.max_brightness = RADEON_MAX_BL_LEVEL; 391 props.max_brightness = RADEON_MAX_BL_LEVEL;
391 props.type = BACKLIGHT_RAW; 392 props.type = BACKLIGHT_RAW;
392 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 393 snprintf(bl_name, sizeof(bl_name),
394 "radeon_bl%d", dev->primary->index);
395 bd = backlight_device_register(bl_name, &drm_connector->kdev,
393 pdata, &radeon_backlight_ops, &props); 396 pdata, &radeon_backlight_ops, &props);
394 if (IS_ERR(bd)) { 397 if (IS_ERR(bd)) {
395 DRM_ERROR("Backlight registration failed\n"); 398 DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8b27dd6e3144..b91118ccef86 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev,
105 struct radeon_bo *bo; 105 struct radeon_bo *bo;
106 enum ttm_bo_type type; 106 enum ttm_bo_type type;
107 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 107 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
108 unsigned long max_size = 0;
109 size_t acc_size; 108 size_t acc_size;
110 int r; 109 int r;
111 110
@@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev,
121 } 120 }
122 *bo_ptr = NULL; 121 *bo_ptr = NULL;
123 122
124 /* maximun bo size is the minimun btw visible vram and gtt size */
125 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
126 if ((page_align << PAGE_SHIFT) >= max_size) {
127 printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
128 __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
129 return -ENOMEM;
130 }
131
132 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 123 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
133 sizeof(struct radeon_bo)); 124 sizeof(struct radeon_bo));
134 125
135retry:
136 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 126 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
137 if (bo == NULL) 127 if (bo == NULL)
138 return -ENOMEM; 128 return -ENOMEM;
@@ -154,15 +144,6 @@ retry:
154 acc_size, sg, &radeon_ttm_bo_destroy); 144 acc_size, sg, &radeon_ttm_bo_destroy);
155 up_read(&rdev->pm.mclk_lock); 145 up_read(&rdev->pm.mclk_lock);
156 if (unlikely(r != 0)) { 146 if (unlikely(r != 0)) {
157 if (r != -ERESTARTSYS) {
158 if (domain == RADEON_GEM_DOMAIN_VRAM) {
159 domain |= RADEON_GEM_DOMAIN_GTT;
160 goto retry;
161 }
162 dev_err(rdev->dev,
163 "object_init failed for (%lu, 0x%08X)\n",
164 size, domain);
165 }
166 return r; 147 return r;
167 } 148 }
168 *bo_ptr = bo; 149 *bo_ptr = bo;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index df8dd7701643..b0db712060fb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2808,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2808{ 2808{
2809 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 2809 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2810 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 2810 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2811 int i;
2812 uint64_t value;
2813 2811
2814 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); 2812 while (count) {
2815 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2813 unsigned ndw = 2 + count * 2;
2816 WRITE_DATA_DST_SEL(1))); 2814 if (ndw > 0x3FFE)
2817 radeon_ring_write(ring, pe); 2815 ndw = 0x3FFE;
2818 radeon_ring_write(ring, upper_32_bits(pe)); 2816
2819 for (i = 0; i < count; ++i) { 2817 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
2820 if (flags & RADEON_VM_PAGE_SYSTEM) { 2818 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2821 value = radeon_vm_map_gart(rdev, addr); 2819 WRITE_DATA_DST_SEL(1)));
2822 value &= 0xFFFFFFFFFFFFF000ULL; 2820 radeon_ring_write(ring, pe);
2823 } else if (flags & RADEON_VM_PAGE_VALID) 2821 radeon_ring_write(ring, upper_32_bits(pe));
2824 value = addr; 2822 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
2825 else 2823 uint64_t value;
2826 value = 0; 2824 if (flags & RADEON_VM_PAGE_SYSTEM) {
2827 addr += incr; 2825 value = radeon_vm_map_gart(rdev, addr);
2828 value |= r600_flags; 2826 value &= 0xFFFFFFFFFFFFF000ULL;
2829 radeon_ring_write(ring, value); 2827 } else if (flags & RADEON_VM_PAGE_VALID)
2830 radeon_ring_write(ring, upper_32_bits(value)); 2828 value = addr;
2829 else
2830 value = 0;
2831 addr += incr;
2832 value |= r600_flags;
2833 radeon_ring_write(ring, value);
2834 radeon_ring_write(ring, upper_32_bits(value));
2835 }
2831 } 2836 }
2832} 2837}
2833 2838
@@ -2868,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2868 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 2873 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2869 radeon_ring_write(ring, 0); 2874 radeon_ring_write(ring, 0);
2870 radeon_ring_write(ring, 1 << vm->id); 2875 radeon_ring_write(ring, 1 << vm->id);
2876
2877 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2878 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2879 radeon_ring_write(ring, 0x0);
2871} 2880}
2872 2881
2873/* 2882/*
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f3187938e081..208d8aa5b07e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -283,9 +283,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
283 goto out; 283 goto out;
284 } 284 }
285 285
286 rcu_read_lock(); 286 root_level = btrfs_old_root_level(root, time_seq);
287 root_level = btrfs_header_level(root->node);
288 rcu_read_unlock();
289 287
290 if (root_level + 1 == level) 288 if (root_level + 1 == level)
291 goto out; 289 goto out;
@@ -1177,16 +1175,15 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1177 return ret; 1175 return ret;
1178} 1176}
1179 1177
1180static char *ref_to_path(struct btrfs_root *fs_root, 1178char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1181 struct btrfs_path *path, 1179 u32 name_len, unsigned long name_off,
1182 u32 name_len, unsigned long name_off, 1180 struct extent_buffer *eb_in, u64 parent,
1183 struct extent_buffer *eb_in, u64 parent, 1181 char *dest, u32 size)
1184 char *dest, u32 size)
1185{ 1182{
1186 int slot; 1183 int slot;
1187 u64 next_inum; 1184 u64 next_inum;
1188 int ret; 1185 int ret;
1189 s64 bytes_left = size - 1; 1186 s64 bytes_left = ((s64)size) - 1;
1190 struct extent_buffer *eb = eb_in; 1187 struct extent_buffer *eb = eb_in;
1191 struct btrfs_key found_key; 1188 struct btrfs_key found_key;
1192 int leave_spinning = path->leave_spinning; 1189 int leave_spinning = path->leave_spinning;
@@ -1266,10 +1263,10 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root,
1266 struct extent_buffer *eb_in, u64 parent, 1263 struct extent_buffer *eb_in, u64 parent,
1267 char *dest, u32 size) 1264 char *dest, u32 size)
1268{ 1265{
1269 return ref_to_path(fs_root, path, 1266 return btrfs_ref_to_path(fs_root, path,
1270 btrfs_inode_ref_name_len(eb_in, iref), 1267 btrfs_inode_ref_name_len(eb_in, iref),
1271 (unsigned long)(iref + 1), 1268 (unsigned long)(iref + 1),
1272 eb_in, parent, dest, size); 1269 eb_in, parent, dest, size);
1273} 1270}
1274 1271
1275/* 1272/*
@@ -1715,9 +1712,8 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1715 ipath->fspath->bytes_left - s_ptr : 0; 1712 ipath->fspath->bytes_left - s_ptr : 0;
1716 1713
1717 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1714 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1718 fspath = ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 1715 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1719 name_off, eb, inum, fspath_min, 1716 name_off, eb, inum, fspath_min, bytes_left);
1720 bytes_left);
1721 if (IS_ERR(fspath)) 1717 if (IS_ERR(fspath))
1722 return PTR_ERR(fspath); 1718 return PTR_ERR(fspath);
1723 1719
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index e75533043a5f..d61feca79455 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -62,6 +62,10 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
62char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 62char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
63 struct btrfs_inode_ref *iref, struct extent_buffer *eb, 63 struct btrfs_inode_ref *iref, struct extent_buffer *eb,
64 u64 parent, char *dest, u32 size); 64 u64 parent, char *dest, u32 size);
65char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
66 u32 name_len, unsigned long name_off,
67 struct extent_buffer *eb_in, u64 parent,
68 char *dest, u32 size);
65 69
66struct btrfs_data_container *init_data_container(u32 total_bytes); 70struct btrfs_data_container *init_data_container(u32 total_bytes);
67struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 71struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b33436211000..cdfb4c49a806 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -596,6 +596,11 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
596 if (tree_mod_dont_log(fs_info, eb)) 596 if (tree_mod_dont_log(fs_info, eb))
597 return 0; 597 return 0;
598 598
599 /*
600 * When we override something during the move, we log these removals.
601 * This can only happen when we move towards the beginning of the
602 * buffer, i.e. dst_slot < src_slot.
603 */
599 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 604 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
600 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot, 605 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
601 MOD_LOG_KEY_REMOVE_WHILE_MOVING); 606 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
@@ -647,8 +652,6 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
647 if (tree_mod_dont_log(fs_info, NULL)) 652 if (tree_mod_dont_log(fs_info, NULL))
648 return 0; 653 return 0;
649 654
650 __tree_mod_log_free_eb(fs_info, old_root);
651
652 ret = tree_mod_alloc(fs_info, flags, &tm); 655 ret = tree_mod_alloc(fs_info, flags, &tm);
653 if (ret < 0) 656 if (ret < 0)
654 goto out; 657 goto out;
@@ -926,12 +929,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
926 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 929 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
927 BUG_ON(ret); /* -ENOMEM */ 930 BUG_ON(ret); /* -ENOMEM */
928 } 931 }
929 /* 932 tree_mod_log_free_eb(root->fs_info, buf);
930 * don't log freeing in case we're freeing the root node, this
931 * is done by tree_mod_log_set_root_pointer later
932 */
933 if (buf != root->node && btrfs_header_level(buf) != 0)
934 tree_mod_log_free_eb(root->fs_info, buf);
935 clean_tree_block(trans, root, buf); 933 clean_tree_block(trans, root, buf);
936 *last_ref = 1; 934 *last_ref = 1;
937 } 935 }
@@ -1225,6 +1223,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1225 free_extent_buffer(eb); 1223 free_extent_buffer(eb);
1226 1224
1227 __tree_mod_log_rewind(eb_rewin, time_seq, tm); 1225 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1226 WARN_ON(btrfs_header_nritems(eb_rewin) >
1227 BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
1228 1228
1229 return eb_rewin; 1229 return eb_rewin;
1230} 1230}
@@ -1241,9 +1241,11 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1241{ 1241{
1242 struct tree_mod_elem *tm; 1242 struct tree_mod_elem *tm;
1243 struct extent_buffer *eb; 1243 struct extent_buffer *eb;
1244 struct extent_buffer *old;
1244 struct tree_mod_root *old_root = NULL; 1245 struct tree_mod_root *old_root = NULL;
1245 u64 old_generation = 0; 1246 u64 old_generation = 0;
1246 u64 logical; 1247 u64 logical;
1248 u32 blocksize;
1247 1249
1248 eb = btrfs_read_lock_root_node(root); 1250 eb = btrfs_read_lock_root_node(root);
1249 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); 1251 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
@@ -1259,14 +1261,32 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1259 } 1261 }
1260 1262
1261 tm = tree_mod_log_search(root->fs_info, logical, time_seq); 1263 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1262 if (old_root) 1264 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1265 btrfs_tree_read_unlock(root->node);
1266 free_extent_buffer(root->node);
1267 blocksize = btrfs_level_size(root, old_root->level);
1268 old = read_tree_block(root, logical, blocksize, 0);
1269 if (!old) {
1270 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1271 logical);
1272 WARN_ON(1);
1273 } else {
1274 eb = btrfs_clone_extent_buffer(old);
1275 free_extent_buffer(old);
1276 }
1277 } else if (old_root) {
1278 btrfs_tree_read_unlock(root->node);
1279 free_extent_buffer(root->node);
1263 eb = alloc_dummy_extent_buffer(logical, root->nodesize); 1280 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1264 else 1281 } else {
1265 eb = btrfs_clone_extent_buffer(root->node); 1282 eb = btrfs_clone_extent_buffer(root->node);
1266 btrfs_tree_read_unlock(root->node); 1283 btrfs_tree_read_unlock(root->node);
1267 free_extent_buffer(root->node); 1284 free_extent_buffer(root->node);
1285 }
1286
1268 if (!eb) 1287 if (!eb)
1269 return NULL; 1288 return NULL;
1289 extent_buffer_get(eb);
1270 btrfs_tree_read_lock(eb); 1290 btrfs_tree_read_lock(eb);
1271 if (old_root) { 1291 if (old_root) {
1272 btrfs_set_header_bytenr(eb, eb->start); 1292 btrfs_set_header_bytenr(eb, eb->start);
@@ -1279,11 +1299,28 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1279 __tree_mod_log_rewind(eb, time_seq, tm); 1299 __tree_mod_log_rewind(eb, time_seq, tm);
1280 else 1300 else
1281 WARN_ON(btrfs_header_level(eb) != 0); 1301 WARN_ON(btrfs_header_level(eb) != 0);
1282 extent_buffer_get(eb); 1302 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1283 1303
1284 return eb; 1304 return eb;
1285} 1305}
1286 1306
1307int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1308{
1309 struct tree_mod_elem *tm;
1310 int level;
1311
1312 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1313 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1314 level = tm->old_root.level;
1315 } else {
1316 rcu_read_lock();
1317 level = btrfs_header_level(root->node);
1318 rcu_read_unlock();
1319 }
1320
1321 return level;
1322}
1323
1287static inline int should_cow_block(struct btrfs_trans_handle *trans, 1324static inline int should_cow_block(struct btrfs_trans_handle *trans,
1288 struct btrfs_root *root, 1325 struct btrfs_root *root,
1289 struct extent_buffer *buf) 1326 struct extent_buffer *buf)
@@ -1725,6 +1762,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1725 goto enospc; 1762 goto enospc;
1726 } 1763 }
1727 1764
1765 tree_mod_log_free_eb(root->fs_info, root->node);
1728 tree_mod_log_set_root_pointer(root, child); 1766 tree_mod_log_set_root_pointer(root, child);
1729 rcu_assign_pointer(root->node, child); 1767 rcu_assign_pointer(root->node, child);
1730 1768
@@ -2970,8 +3008,10 @@ static int push_node_left(struct btrfs_trans_handle *trans,
2970 push_items * sizeof(struct btrfs_key_ptr)); 3008 push_items * sizeof(struct btrfs_key_ptr));
2971 3009
2972 if (push_items < src_nritems) { 3010 if (push_items < src_nritems) {
2973 tree_mod_log_eb_move(root->fs_info, src, 0, push_items, 3011 /*
2974 src_nritems - push_items); 3012 * don't call tree_mod_log_eb_move here, key removal was already
3013 * fully logged by tree_mod_log_eb_copy above.
3014 */
2975 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 3015 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2976 btrfs_node_key_ptr_offset(push_items), 3016 btrfs_node_key_ptr_offset(push_items),
2977 (src_nritems - push_items) * 3017 (src_nritems - push_items) *
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 926c9ffc66d9..c72ead869507 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3120,6 +3120,7 @@ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
3120{ 3120{
3121 return atomic_inc_return(&fs_info->tree_mod_seq); 3121 return atomic_inc_return(&fs_info->tree_mod_seq);
3122} 3122}
3123int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
3123 3124
3124/* root-item.c */ 3125/* root-item.c */
3125int btrfs_find_root_ref(struct btrfs_root *tree_root, 3126int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -3338,6 +3339,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3338int btrfs_update_inode(struct btrfs_trans_handle *trans, 3339int btrfs_update_inode(struct btrfs_trans_handle *trans,
3339 struct btrfs_root *root, 3340 struct btrfs_root *root,
3340 struct inode *inode); 3341 struct inode *inode);
3342int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3343 struct btrfs_root *root, struct inode *inode);
3341int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3344int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
3342int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); 3345int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
3343int btrfs_orphan_cleanup(struct btrfs_root *root); 3346int btrfs_orphan_cleanup(struct btrfs_root *root);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8036d3a84853..472873a94d96 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4110,8 +4110,8 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4110 4110
4111 return eb; 4111 return eb;
4112err: 4112err:
4113 for (i--; i >= 0; i--) 4113 for (; i > 0; i--)
4114 __free_page(eb->pages[i]); 4114 __free_page(eb->pages[i - 1]);
4115 __free_extent_buffer(eb); 4115 __free_extent_buffer(eb);
4116 return NULL; 4116 return NULL;
4117} 4117}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 85a1e5053fe6..95542a1b3dfc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -94,8 +94,6 @@ static noinline int cow_file_range(struct inode *inode,
94 struct page *locked_page, 94 struct page *locked_page,
95 u64 start, u64 end, int *page_started, 95 u64 start, u64 end, int *page_started,
96 unsigned long *nr_written, int unlock); 96 unsigned long *nr_written, int unlock);
97static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root, struct inode *inode);
99 97
100static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 98static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
101 struct inode *inode, struct inode *dir, 99 struct inode *inode, struct inode *dir,
@@ -2746,8 +2744,9 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2746 return btrfs_update_inode_item(trans, root, inode); 2744 return btrfs_update_inode_item(trans, root, inode);
2747} 2745}
2748 2746
2749static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 2747noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2750 struct btrfs_root *root, struct inode *inode) 2748 struct btrfs_root *root,
2749 struct inode *inode)
2751{ 2750{
2752 int ret; 2751 int ret;
2753 2752
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 61168805f175..8fcf9a59c28d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -343,7 +343,8 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
343 return -EOPNOTSUPP; 343 return -EOPNOTSUPP;
344 if (copy_from_user(&range, arg, sizeof(range))) 344 if (copy_from_user(&range, arg, sizeof(range)))
345 return -EFAULT; 345 return -EFAULT;
346 if (range.start > total_bytes) 346 if (range.start > total_bytes ||
347 range.len < fs_info->sb->s_blocksize)
347 return -EINVAL; 348 return -EINVAL;
348 349
349 range.len = min(range.len, total_bytes - range.start); 350 range.len = min(range.len, total_bytes - range.start);
@@ -570,7 +571,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
570 ret = btrfs_commit_transaction(trans, 571 ret = btrfs_commit_transaction(trans,
571 root->fs_info->extent_root); 572 root->fs_info->extent_root);
572 } 573 }
573 BUG_ON(ret); 574 if (ret)
575 goto fail;
574 576
575 ret = pending_snapshot->error; 577 ret = pending_snapshot->error;
576 if (ret) 578 if (ret)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5039686df6ae..fe9d02c45f8e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -790,8 +790,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
790 } 790 }
791 791
792 path = btrfs_alloc_path(); 792 path = btrfs_alloc_path();
793 if (!path) 793 if (!path) {
794 return -ENOMEM; 794 ret = -ENOMEM;
795 goto out_free_root;
796 }
795 797
796 key.objectid = 0; 798 key.objectid = 0;
797 key.type = BTRFS_QGROUP_STATUS_KEY; 799 key.type = BTRFS_QGROUP_STATUS_KEY;
@@ -800,7 +802,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
800 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 802 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
801 sizeof(*ptr)); 803 sizeof(*ptr));
802 if (ret) 804 if (ret)
803 goto out; 805 goto out_free_path;
804 806
805 leaf = path->nodes[0]; 807 leaf = path->nodes[0];
806 ptr = btrfs_item_ptr(leaf, path->slots[0], 808 ptr = btrfs_item_ptr(leaf, path->slots[0],
@@ -818,8 +820,15 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
818 fs_info->quota_root = quota_root; 820 fs_info->quota_root = quota_root;
819 fs_info->pending_quota_state = 1; 821 fs_info->pending_quota_state = 1;
820 spin_unlock(&fs_info->qgroup_lock); 822 spin_unlock(&fs_info->qgroup_lock);
821out: 823out_free_path:
822 btrfs_free_path(path); 824 btrfs_free_path(path);
825out_free_root:
826 if (ret) {
827 free_extent_buffer(quota_root->node);
828 free_extent_buffer(quota_root->commit_root);
829 kfree(quota_root);
830 }
831out:
823 return ret; 832 return ret;
824} 833}
825 834
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index c7beb543a4a8..e78b297b0b00 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -745,31 +745,36 @@ typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
745 void *ctx); 745 void *ctx);
746 746
747/* 747/*
748 * Helper function to iterate the entries in ONE btrfs_inode_ref. 748 * Helper function to iterate the entries in ONE btrfs_inode_ref or
749 * btrfs_inode_extref.
749 * The iterate callback may return a non zero value to stop iteration. This can 750 * The iterate callback may return a non zero value to stop iteration. This can
750 * be a negative value for error codes or 1 to simply stop it. 751 * be a negative value for error codes or 1 to simply stop it.
751 * 752 *
752 * path must point to the INODE_REF when called. 753 * path must point to the INODE_REF or INODE_EXTREF when called.
753 */ 754 */
754static int iterate_inode_ref(struct send_ctx *sctx, 755static int iterate_inode_ref(struct send_ctx *sctx,
755 struct btrfs_root *root, struct btrfs_path *path, 756 struct btrfs_root *root, struct btrfs_path *path,
756 struct btrfs_key *found_key, int resolve, 757 struct btrfs_key *found_key, int resolve,
757 iterate_inode_ref_t iterate, void *ctx) 758 iterate_inode_ref_t iterate, void *ctx)
758{ 759{
759 struct extent_buffer *eb; 760 struct extent_buffer *eb = path->nodes[0];
760 struct btrfs_item *item; 761 struct btrfs_item *item;
761 struct btrfs_inode_ref *iref; 762 struct btrfs_inode_ref *iref;
763 struct btrfs_inode_extref *extref;
762 struct btrfs_path *tmp_path; 764 struct btrfs_path *tmp_path;
763 struct fs_path *p; 765 struct fs_path *p;
764 u32 cur; 766 u32 cur = 0;
765 u32 len;
766 u32 total; 767 u32 total;
767 int slot; 768 int slot = path->slots[0];
768 u32 name_len; 769 u32 name_len;
769 char *start; 770 char *start;
770 int ret = 0; 771 int ret = 0;
771 int num; 772 int num = 0;
772 int index; 773 int index;
774 u64 dir;
775 unsigned long name_off;
776 unsigned long elem_size;
777 unsigned long ptr;
773 778
774 p = fs_path_alloc_reversed(sctx); 779 p = fs_path_alloc_reversed(sctx);
775 if (!p) 780 if (!p)
@@ -781,24 +786,40 @@ static int iterate_inode_ref(struct send_ctx *sctx,
781 return -ENOMEM; 786 return -ENOMEM;
782 } 787 }
783 788
784 eb = path->nodes[0];
785 slot = path->slots[0];
786 item = btrfs_item_nr(eb, slot);
787 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
788 cur = 0;
789 len = 0;
790 total = btrfs_item_size(eb, item);
791 789
792 num = 0; 790 if (found_key->type == BTRFS_INODE_REF_KEY) {
791 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
792 struct btrfs_inode_ref);
793 item = btrfs_item_nr(eb, slot);
794 total = btrfs_item_size(eb, item);
795 elem_size = sizeof(*iref);
796 } else {
797 ptr = btrfs_item_ptr_offset(eb, slot);
798 total = btrfs_item_size_nr(eb, slot);
799 elem_size = sizeof(*extref);
800 }
801
793 while (cur < total) { 802 while (cur < total) {
794 fs_path_reset(p); 803 fs_path_reset(p);
795 804
796 name_len = btrfs_inode_ref_name_len(eb, iref); 805 if (found_key->type == BTRFS_INODE_REF_KEY) {
797 index = btrfs_inode_ref_index(eb, iref); 806 iref = (struct btrfs_inode_ref *)(ptr + cur);
807 name_len = btrfs_inode_ref_name_len(eb, iref);
808 name_off = (unsigned long)(iref + 1);
809 index = btrfs_inode_ref_index(eb, iref);
810 dir = found_key->offset;
811 } else {
812 extref = (struct btrfs_inode_extref *)(ptr + cur);
813 name_len = btrfs_inode_extref_name_len(eb, extref);
814 name_off = (unsigned long)&extref->name;
815 index = btrfs_inode_extref_index(eb, extref);
816 dir = btrfs_inode_extref_parent(eb, extref);
817 }
818
798 if (resolve) { 819 if (resolve) {
799 start = btrfs_iref_to_path(root, tmp_path, iref, eb, 820 start = btrfs_ref_to_path(root, tmp_path, name_len,
800 found_key->offset, p->buf, 821 name_off, eb, dir,
801 p->buf_len); 822 p->buf, p->buf_len);
802 if (IS_ERR(start)) { 823 if (IS_ERR(start)) {
803 ret = PTR_ERR(start); 824 ret = PTR_ERR(start);
804 goto out; 825 goto out;
@@ -809,9 +830,10 @@ static int iterate_inode_ref(struct send_ctx *sctx,
809 p->buf_len + p->buf - start); 830 p->buf_len + p->buf - start);
810 if (ret < 0) 831 if (ret < 0)
811 goto out; 832 goto out;
812 start = btrfs_iref_to_path(root, tmp_path, iref, 833 start = btrfs_ref_to_path(root, tmp_path,
813 eb, found_key->offset, p->buf, 834 name_len, name_off,
814 p->buf_len); 835 eb, dir,
836 p->buf, p->buf_len);
815 if (IS_ERR(start)) { 837 if (IS_ERR(start)) {
816 ret = PTR_ERR(start); 838 ret = PTR_ERR(start);
817 goto out; 839 goto out;
@@ -820,21 +842,16 @@ static int iterate_inode_ref(struct send_ctx *sctx,
820 } 842 }
821 p->start = start; 843 p->start = start;
822 } else { 844 } else {
823 ret = fs_path_add_from_extent_buffer(p, eb, 845 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
824 (unsigned long)(iref + 1), name_len); 846 name_len);
825 if (ret < 0) 847 if (ret < 0)
826 goto out; 848 goto out;
827 } 849 }
828 850
829 851 cur += elem_size + name_len;
830 len = sizeof(*iref) + name_len; 852 ret = iterate(num, dir, index, p, ctx);
831 iref = (struct btrfs_inode_ref *)((char *)iref + len);
832 cur += len;
833
834 ret = iterate(num, found_key->offset, index, p, ctx);
835 if (ret) 853 if (ret)
836 goto out; 854 goto out;
837
838 num++; 855 num++;
839 } 856 }
840 857
@@ -998,7 +1015,8 @@ static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
998 } 1015 }
999 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1016 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1000 if (found_key.objectid != ino || 1017 if (found_key.objectid != ino ||
1001 found_key.type != BTRFS_INODE_REF_KEY) { 1018 (found_key.type != BTRFS_INODE_REF_KEY &&
1019 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1002 ret = -ENOENT; 1020 ret = -ENOENT;
1003 goto out; 1021 goto out;
1004 } 1022 }
@@ -1551,8 +1569,8 @@ static int get_first_ref(struct send_ctx *sctx,
1551 struct btrfs_key key; 1569 struct btrfs_key key;
1552 struct btrfs_key found_key; 1570 struct btrfs_key found_key;
1553 struct btrfs_path *path; 1571 struct btrfs_path *path;
1554 struct btrfs_inode_ref *iref;
1555 int len; 1572 int len;
1573 u64 parent_dir;
1556 1574
1557 path = alloc_path_for_send(); 1575 path = alloc_path_for_send();
1558 if (!path) 1576 if (!path)
@@ -1568,27 +1586,41 @@ static int get_first_ref(struct send_ctx *sctx,
1568 if (!ret) 1586 if (!ret)
1569 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1587 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1570 path->slots[0]); 1588 path->slots[0]);
1571 if (ret || found_key.objectid != key.objectid || 1589 if (ret || found_key.objectid != ino ||
1572 found_key.type != key.type) { 1590 (found_key.type != BTRFS_INODE_REF_KEY &&
1591 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1573 ret = -ENOENT; 1592 ret = -ENOENT;
1574 goto out; 1593 goto out;
1575 } 1594 }
1576 1595
1577 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1596 if (key.type == BTRFS_INODE_REF_KEY) {
1578 struct btrfs_inode_ref); 1597 struct btrfs_inode_ref *iref;
1579 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 1598 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1580 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1599 struct btrfs_inode_ref);
1581 (unsigned long)(iref + 1), len); 1600 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1601 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1602 (unsigned long)(iref + 1),
1603 len);
1604 parent_dir = found_key.offset;
1605 } else {
1606 struct btrfs_inode_extref *extref;
1607 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1608 struct btrfs_inode_extref);
1609 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1610 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1611 (unsigned long)&extref->name, len);
1612 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1613 }
1582 if (ret < 0) 1614 if (ret < 0)
1583 goto out; 1615 goto out;
1584 btrfs_release_path(path); 1616 btrfs_release_path(path);
1585 1617
1586 ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL, 1618 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
1587 NULL, NULL); 1619 NULL, NULL);
1588 if (ret < 0) 1620 if (ret < 0)
1589 goto out; 1621 goto out;
1590 1622
1591 *dir = found_key.offset; 1623 *dir = parent_dir;
1592 1624
1593out: 1625out:
1594 btrfs_free_path(path); 1626 btrfs_free_path(path);
@@ -2430,7 +2462,8 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino);
2430 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2462 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2431 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2463 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2432 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2464 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2433 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev); 2465 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2466 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2434 } 2467 }
2435 2468
2436 ret = send_cmd(sctx); 2469 ret = send_cmd(sctx);
@@ -3226,7 +3259,8 @@ static int process_all_refs(struct send_ctx *sctx,
3226 btrfs_item_key_to_cpu(eb, &found_key, slot); 3259 btrfs_item_key_to_cpu(eb, &found_key, slot);
3227 3260
3228 if (found_key.objectid != key.objectid || 3261 if (found_key.objectid != key.objectid ||
3229 found_key.type != key.type) 3262 (found_key.type != BTRFS_INODE_REF_KEY &&
3263 found_key.type != BTRFS_INODE_EXTREF_KEY))
3230 break; 3264 break;
3231 3265
3232 ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb, 3266 ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb,
@@ -3987,7 +4021,7 @@ static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
3987 if (sctx->cur_ino == 0) 4021 if (sctx->cur_ino == 0)
3988 goto out; 4022 goto out;
3989 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && 4023 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
3990 sctx->cmp_key->type <= BTRFS_INODE_REF_KEY) 4024 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
3991 goto out; 4025 goto out;
3992 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) 4026 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
3993 goto out; 4027 goto out;
@@ -4033,22 +4067,21 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4033 if (ret < 0) 4067 if (ret < 0)
4034 goto out; 4068 goto out;
4035 4069
4036 if (!S_ISLNK(sctx->cur_inode_mode)) { 4070 if (!sctx->parent_root || sctx->cur_inode_new) {
4037 if (!sctx->parent_root || sctx->cur_inode_new) { 4071 need_chown = 1;
4072 if (!S_ISLNK(sctx->cur_inode_mode))
4038 need_chmod = 1; 4073 need_chmod = 1;
4039 need_chown = 1; 4074 } else {
4040 } else { 4075 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
4041 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, 4076 NULL, NULL, &right_mode, &right_uid,
4042 NULL, NULL, &right_mode, &right_uid, 4077 &right_gid, NULL);
4043 &right_gid, NULL); 4078 if (ret < 0)
4044 if (ret < 0) 4079 goto out;
4045 goto out;
4046 4080
4047 if (left_uid != right_uid || left_gid != right_gid) 4081 if (left_uid != right_uid || left_gid != right_gid)
4048 need_chown = 1; 4082 need_chown = 1;
4049 if (left_mode != right_mode) 4083 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
4050 need_chmod = 1; 4084 need_chmod = 1;
4051 }
4052 } 4085 }
4053 4086
4054 if (S_ISREG(sctx->cur_inode_mode)) { 4087 if (S_ISREG(sctx->cur_inode_mode)) {
@@ -4335,7 +4368,8 @@ static int changed_cb(struct btrfs_root *left_root,
4335 4368
4336 if (key->type == BTRFS_INODE_ITEM_KEY) 4369 if (key->type == BTRFS_INODE_ITEM_KEY)
4337 ret = changed_inode(sctx, result); 4370 ret = changed_inode(sctx, result);
4338 else if (key->type == BTRFS_INODE_REF_KEY) 4371 else if (key->type == BTRFS_INODE_REF_KEY ||
4372 key->type == BTRFS_INODE_EXTREF_KEY)
4339 ret = changed_ref(sctx, result); 4373 ret = changed_ref(sctx, result);
4340 else if (key->type == BTRFS_XATTR_ITEM_KEY) 4374 else if (key->type == BTRFS_XATTR_ITEM_KEY)
4341 ret = changed_xattr(sctx, result); 4375 ret = changed_xattr(sctx, result);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 77db875b5116..04bbfb1052eb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1200,7 +1200,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1200 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1200 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1201 dentry->d_name.len * 2); 1201 dentry->d_name.len * 2);
1202 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 1202 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1203 ret = btrfs_update_inode(trans, parent_root, parent_inode); 1203 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1204 if (ret) 1204 if (ret)
1205 btrfs_abort_transaction(trans, root, ret); 1205 btrfs_abort_transaction(trans, root, ret);
1206fail: 1206fail:
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 029b903a4ae3..0f5ebb72a5ea 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1819,6 +1819,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1819 "Failed to relocate sys chunks after " 1819 "Failed to relocate sys chunks after "
1820 "device initialization. This can be fixed " 1820 "device initialization. This can be fixed "
1821 "using the \"btrfs balance\" command."); 1821 "using the \"btrfs balance\" command.");
1822 trans = btrfs_attach_transaction(root);
1823 if (IS_ERR(trans)) {
1824 if (PTR_ERR(trans) == -ENOENT)
1825 return 0;
1826 return PTR_ERR(trans);
1827 }
1828 ret = btrfs_commit_transaction(trans, root);
1822 } 1829 }
1823 1830
1824 return ret; 1831 return ret;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e4fb3ba5a58a..3d7e09bcc0e9 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -85,29 +85,38 @@ static struct rpc_clnt *nsm_create(struct net *net)
85 return rpc_create(&args); 85 return rpc_create(&args);
86} 86}
87 87
88static struct rpc_clnt *nsm_client_set(struct lockd_net *ln,
89 struct rpc_clnt *clnt)
90{
91 spin_lock(&ln->nsm_clnt_lock);
92 if (ln->nsm_users == 0) {
93 if (clnt == NULL)
94 goto out;
95 ln->nsm_clnt = clnt;
96 }
97 clnt = ln->nsm_clnt;
98 ln->nsm_users++;
99out:
100 spin_unlock(&ln->nsm_clnt_lock);
101 return clnt;
102}
103
88static struct rpc_clnt *nsm_client_get(struct net *net) 104static struct rpc_clnt *nsm_client_get(struct net *net)
89{ 105{
90 static DEFINE_MUTEX(nsm_create_mutex); 106 struct rpc_clnt *clnt, *new;
91 struct rpc_clnt *clnt;
92 struct lockd_net *ln = net_generic(net, lockd_net_id); 107 struct lockd_net *ln = net_generic(net, lockd_net_id);
93 108
94 spin_lock(&ln->nsm_clnt_lock); 109 clnt = nsm_client_set(ln, NULL);
95 if (ln->nsm_users) { 110 if (clnt != NULL)
96 ln->nsm_users++;
97 clnt = ln->nsm_clnt;
98 spin_unlock(&ln->nsm_clnt_lock);
99 goto out; 111 goto out;
100 }
101 spin_unlock(&ln->nsm_clnt_lock);
102 112
103 mutex_lock(&nsm_create_mutex); 113 clnt = new = nsm_create(net);
104 clnt = nsm_create(net); 114 if (IS_ERR(clnt))
105 if (!IS_ERR(clnt)) { 115 goto out;
106 ln->nsm_clnt = clnt; 116
107 smp_wmb(); 117 clnt = nsm_client_set(ln, new);
108 ln->nsm_users = 1; 118 if (clnt != new)
109 } 119 rpc_shutdown_client(new);
110 mutex_unlock(&nsm_create_mutex);
111out: 120out:
112 return clnt; 121 return clnt;
113} 122}
@@ -115,18 +124,16 @@ out:
115static void nsm_client_put(struct net *net) 124static void nsm_client_put(struct net *net)
116{ 125{
117 struct lockd_net *ln = net_generic(net, lockd_net_id); 126 struct lockd_net *ln = net_generic(net, lockd_net_id);
118 struct rpc_clnt *clnt = ln->nsm_clnt; 127 struct rpc_clnt *clnt = NULL;
119 int shutdown = 0;
120 128
121 spin_lock(&ln->nsm_clnt_lock); 129 spin_lock(&ln->nsm_clnt_lock);
122 if (ln->nsm_users) { 130 ln->nsm_users--;
123 if (--ln->nsm_users) 131 if (ln->nsm_users == 0) {
124 ln->nsm_clnt = NULL; 132 clnt = ln->nsm_clnt;
125 shutdown = !ln->nsm_users; 133 ln->nsm_clnt = NULL;
126 } 134 }
127 spin_unlock(&ln->nsm_clnt_lock); 135 spin_unlock(&ln->nsm_clnt_lock);
128 136 if (clnt != NULL)
129 if (shutdown)
130 rpc_shutdown_client(clnt); 137 rpc_shutdown_client(clnt);
131} 138}
132 139
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index c78bb997e2c6..af1cbaf535ed 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -205,6 +205,8 @@
205 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 205 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
206 {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 206 {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
207 {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 207 {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
208 {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
209 {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
208 {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 210 {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
209 {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 211 {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
210 {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ 212 {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
@@ -217,6 +219,7 @@
217 {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 219 {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
218 {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 220 {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
219 {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 221 {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
222 {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
220 {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 223 {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
221 {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 224 {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
222 {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ 225 {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 569d67d4243e..d452ee191066 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -57,6 +57,7 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
57int memblock_remove(phys_addr_t base, phys_addr_t size); 57int memblock_remove(phys_addr_t base, phys_addr_t size);
58int memblock_free(phys_addr_t base, phys_addr_t size); 58int memblock_free(phys_addr_t base, phys_addr_t size);
59int memblock_reserve(phys_addr_t base, phys_addr_t size); 59int memblock_reserve(phys_addr_t base, phys_addr_t size);
60void memblock_trim_memory(phys_addr_t align);
60 61
61#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 62#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
62void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 63void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2e902359aee5..6bfb2faa0b19 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -803,12 +803,16 @@ static inline void perf_event_task_tick(void) { }
803do { \ 803do { \
804 static struct notifier_block fn##_nb __cpuinitdata = \ 804 static struct notifier_block fn##_nb __cpuinitdata = \
805 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ 805 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
806 unsigned long cpu = smp_processor_id(); \
807 unsigned long flags; \
806 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ 808 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
807 (void *)(unsigned long)smp_processor_id()); \ 809 (void *)(unsigned long)cpu); \
810 local_irq_save(flags); \
808 fn(&fn##_nb, (unsigned long)CPU_STARTING, \ 811 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
809 (void *)(unsigned long)smp_processor_id()); \ 812 (void *)(unsigned long)cpu); \
813 local_irq_restore(flags); \
810 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ 814 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
811 (void *)(unsigned long)smp_processor_id()); \ 815 (void *)(unsigned long)cpu); \
812 register_cpu_notifier(&fn##_nb); \ 816 register_cpu_notifier(&fn##_nb); \
813} while (0) 817} while (0)
814 818
diff --git a/mm/memblock.c b/mm/memblock.c
index 931eef145af5..625905523c2a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -930,6 +930,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 930 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
931} 931}
932 932
933void __init_memblock memblock_trim_memory(phys_addr_t align)
934{
935 int i;
936 phys_addr_t start, end, orig_start, orig_end;
937 struct memblock_type *mem = &memblock.memory;
938
939 for (i = 0; i < mem->cnt; i++) {
940 orig_start = mem->regions[i].base;
941 orig_end = mem->regions[i].base + mem->regions[i].size;
942 start = round_up(orig_start, align);
943 end = round_down(orig_end, align);
944
945 if (start == orig_start && end == orig_end)
946 continue;
947
948 if (start < end) {
949 mem->regions[i].base = start;
950 mem->regions[i].size = end - start;
951 } else {
952 memblock_remove_region(mem, i);
953 i--;
954 }
955 }
956}
933 957
934void __init_memblock memblock_set_current_limit(phys_addr_t limit) 958void __init_memblock memblock_set_current_limit(phys_addr_t limit)
935{ 959{
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index aaaadfbe36e9..75853cabf4c9 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -254,7 +254,6 @@ struct sock_xprt {
254 void (*old_data_ready)(struct sock *, int); 254 void (*old_data_ready)(struct sock *, int);
255 void (*old_state_change)(struct sock *); 255 void (*old_state_change)(struct sock *);
256 void (*old_write_space)(struct sock *); 256 void (*old_write_space)(struct sock *);
257 void (*old_error_report)(struct sock *);
258}; 257};
259 258
260/* 259/*
@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
737 dprintk("RPC: sendmsg returned unrecognized error %d\n", 736 dprintk("RPC: sendmsg returned unrecognized error %d\n",
738 -status); 737 -status);
739 case -ECONNRESET: 738 case -ECONNRESET:
740 case -EPIPE:
741 xs_tcp_shutdown(xprt); 739 xs_tcp_shutdown(xprt);
742 case -ECONNREFUSED: 740 case -ECONNREFUSED:
743 case -ENOTCONN: 741 case -ENOTCONN:
742 case -EPIPE:
744 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 743 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
745 } 744 }
746 745
@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
781 transport->old_data_ready = sk->sk_data_ready; 780 transport->old_data_ready = sk->sk_data_ready;
782 transport->old_state_change = sk->sk_state_change; 781 transport->old_state_change = sk->sk_state_change;
783 transport->old_write_space = sk->sk_write_space; 782 transport->old_write_space = sk->sk_write_space;
784 transport->old_error_report = sk->sk_error_report;
785} 783}
786 784
787static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 785static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
789 sk->sk_data_ready = transport->old_data_ready; 787 sk->sk_data_ready = transport->old_data_ready;
790 sk->sk_state_change = transport->old_state_change; 788 sk->sk_state_change = transport->old_state_change;
791 sk->sk_write_space = transport->old_write_space; 789 sk->sk_write_space = transport->old_write_space;
792 sk->sk_error_report = transport->old_error_report;
793} 790}
794 791
795static void xs_reset_transport(struct sock_xprt *transport) 792static void xs_reset_transport(struct sock_xprt *transport)
@@ -1453,7 +1450,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1453 xprt_clear_connecting(xprt); 1450 xprt_clear_connecting(xprt);
1454} 1451}
1455 1452
1456static void xs_sock_mark_closed(struct rpc_xprt *xprt) 1453static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1457{ 1454{
1458 smp_mb__before_clear_bit(); 1455 smp_mb__before_clear_bit();
1459 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1456 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1461,6 +1458,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1461 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1458 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1462 clear_bit(XPRT_CLOSING, &xprt->state); 1459 clear_bit(XPRT_CLOSING, &xprt->state);
1463 smp_mb__after_clear_bit(); 1460 smp_mb__after_clear_bit();
1461}
1462
1463static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1464{
1465 xs_sock_reset_connection_flags(xprt);
1464 /* Mark transport as closed and wake up all pending tasks */ 1466 /* Mark transport as closed and wake up all pending tasks */
1465 xprt_disconnect_done(xprt); 1467 xprt_disconnect_done(xprt);
1466} 1468}
@@ -1516,6 +1518,7 @@ static void xs_tcp_state_change(struct sock *sk)
1516 case TCP_CLOSE_WAIT: 1518 case TCP_CLOSE_WAIT:
1517 /* The server initiated a shutdown of the socket */ 1519 /* The server initiated a shutdown of the socket */
1518 xprt->connect_cookie++; 1520 xprt->connect_cookie++;
1521 clear_bit(XPRT_CONNECTED, &xprt->state);
1519 xs_tcp_force_close(xprt); 1522 xs_tcp_force_close(xprt);
1520 case TCP_CLOSING: 1523 case TCP_CLOSING:
1521 /* 1524 /*
@@ -1540,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk)
1540 read_unlock_bh(&sk->sk_callback_lock); 1543 read_unlock_bh(&sk->sk_callback_lock);
1541} 1544}
1542 1545
1543/**
1544 * xs_error_report - callback mainly for catching socket errors
1545 * @sk: socket
1546 */
1547static void xs_error_report(struct sock *sk)
1548{
1549 struct rpc_xprt *xprt;
1550
1551 read_lock_bh(&sk->sk_callback_lock);
1552 if (!(xprt = xprt_from_sock(sk)))
1553 goto out;
1554 dprintk("RPC: %s client %p...\n"
1555 "RPC: error %d\n",
1556 __func__, xprt, sk->sk_err);
1557 xprt_wake_pending_tasks(xprt, -EAGAIN);
1558out:
1559 read_unlock_bh(&sk->sk_callback_lock);
1560}
1561
1562static void xs_write_space(struct sock *sk) 1546static void xs_write_space(struct sock *sk)
1563{ 1547{
1564 struct socket *sock; 1548 struct socket *sock;
@@ -1858,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1858 sk->sk_user_data = xprt; 1842 sk->sk_user_data = xprt;
1859 sk->sk_data_ready = xs_local_data_ready; 1843 sk->sk_data_ready = xs_local_data_ready;
1860 sk->sk_write_space = xs_udp_write_space; 1844 sk->sk_write_space = xs_udp_write_space;
1861 sk->sk_error_report = xs_error_report;
1862 sk->sk_allocation = GFP_ATOMIC; 1845 sk->sk_allocation = GFP_ATOMIC;
1863 1846
1864 xprt_clear_connected(xprt); 1847 xprt_clear_connected(xprt);
@@ -1983,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1983 sk->sk_user_data = xprt; 1966 sk->sk_user_data = xprt;
1984 sk->sk_data_ready = xs_udp_data_ready; 1967 sk->sk_data_ready = xs_udp_data_ready;
1985 sk->sk_write_space = xs_udp_write_space; 1968 sk->sk_write_space = xs_udp_write_space;
1986 sk->sk_error_report = xs_error_report;
1987 sk->sk_no_check = UDP_CSUM_NORCV; 1969 sk->sk_no_check = UDP_CSUM_NORCV;
1988 sk->sk_allocation = GFP_ATOMIC; 1970 sk->sk_allocation = GFP_ATOMIC;
1989 1971
@@ -2050,10 +2032,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
2050 any.sa_family = AF_UNSPEC; 2032 any.sa_family = AF_UNSPEC;
2051 result = kernel_connect(transport->sock, &any, sizeof(any), 0); 2033 result = kernel_connect(transport->sock, &any, sizeof(any), 0);
2052 if (!result) 2034 if (!result)
2053 xs_sock_mark_closed(&transport->xprt); 2035 xs_sock_reset_connection_flags(&transport->xprt);
2054 else 2036 dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
2055 dprintk("RPC: AF_UNSPEC connect return code %d\n",
2056 result);
2057} 2037}
2058 2038
2059static void xs_tcp_reuse_connection(struct sock_xprt *transport) 2039static void xs_tcp_reuse_connection(struct sock_xprt *transport)
@@ -2098,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2098 sk->sk_data_ready = xs_tcp_data_ready; 2078 sk->sk_data_ready = xs_tcp_data_ready;
2099 sk->sk_state_change = xs_tcp_state_change; 2079 sk->sk_state_change = xs_tcp_state_change;
2100 sk->sk_write_space = xs_tcp_write_space; 2080 sk->sk_write_space = xs_tcp_write_space;
2101 sk->sk_error_report = xs_error_report;
2102 sk->sk_allocation = GFP_ATOMIC; 2081 sk->sk_allocation = GFP_ATOMIC;
2103 2082
2104 /* socket options */ 2083 /* socket options */
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 411ee5664e98..178b88ae3d2f 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -414,7 +414,7 @@ static int show_html_page(const char *perf_cmd)
414int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused) 414int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
415{ 415{
416 bool show_all = false; 416 bool show_all = false;
417 enum help_format help_format = HELP_FORMAT_NONE; 417 enum help_format help_format = HELP_FORMAT_MAN;
418 struct option builtin_help_options[] = { 418 struct option builtin_help_options[] = {
419 OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), 419 OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
420 OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), 420 OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index dec8ced61fb0..7aaee39f6774 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -56,6 +56,10 @@ static int trace__read_syscall_info(struct trace *trace, int id)
56{ 56{
57 char tp_name[128]; 57 char tp_name[128];
58 struct syscall *sc; 58 struct syscall *sc;
59 const char *name = audit_syscall_to_name(id, trace->audit_machine);
60
61 if (name == NULL)
62 return -1;
59 63
60 if (id > trace->syscalls.max) { 64 if (id > trace->syscalls.max) {
61 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); 65 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
@@ -75,11 +79,8 @@ static int trace__read_syscall_info(struct trace *trace, int id)
75 } 79 }
76 80
77 sc = trace->syscalls.table + id; 81 sc = trace->syscalls.table + id;
78 sc->name = audit_syscall_to_name(id, trace->audit_machine); 82 sc->name = name;
79 if (sc->name == NULL) 83 sc->fmt = syscall_fmt__find(sc->name);
80 return -1;
81
82 sc->fmt = syscall_fmt__find(sc->name);
83 84
84 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 85 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
85 sc->tp_format = event_format__new("syscalls", tp_name); 86 sc->tp_format = event_format__new("syscalls", tp_name);
@@ -267,6 +268,13 @@ again:
267 if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1) 268 if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
268 printf("%d ", sample.tid); 269 printf("%d ", sample.tid);
269 270
271 if (sample.raw_data == NULL) {
272 printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
273 perf_evsel__name(evsel), sample.tid,
274 sample.cpu, sample.raw_size);
275 continue;
276 }
277
270 handler = evsel->handler.func; 278 handler = evsel->handler.func;
271 handler(trace, evsel, &sample); 279 handler(trace, evsel, &sample);
272 } 280 }
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 28c18d1d52c3..516ecd9ddd6e 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -513,7 +513,8 @@ static int test__group1(struct perf_evlist *evlist)
513 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); 513 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
514 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); 514 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
515 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 515 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
516 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 516 /* use of precise requires exclude_guest */
517 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
517 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 518 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
518 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); 519 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
519 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 520 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
@@ -599,7 +600,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
599 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); 600 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
600 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); 601 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
601 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 602 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
602 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 603 /* use of precise requires exclude_guest */
604 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
603 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 605 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
604 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); 606 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
605 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 607 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
@@ -662,7 +664,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
662 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); 664 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
663 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); 665 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
664 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 666 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
665 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 667 /* use of precise requires exclude_guest */
668 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
666 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 669 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
667 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); 670 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
668 TEST_ASSERT_VAL("wrong group name", !evsel->group_name); 671 TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
@@ -676,7 +679,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
676 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); 679 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
677 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); 680 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
678 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); 681 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
679 TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); 682 /* use of precise requires exclude_guest */
683 TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
680 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); 684 TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
681 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); 685 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
682 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); 686 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index fb4b7ea6752f..8b3e5939afb6 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -39,7 +39,6 @@ int thread__set_comm(struct thread *self, const char *comm)
39 err = self->comm == NULL ? -ENOMEM : 0; 39 err = self->comm == NULL ? -ENOMEM : 0;
40 if (!err) { 40 if (!err) {
41 self->comm_set = true; 41 self->comm_set = true;
42 map_groups__flush(&self->mg);
43 } 42 }
44 return err; 43 return err;
45} 44}