diff options
Diffstat (limited to 'arch')
41 files changed, 1717 insertions, 338 deletions
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c index cc71a26723ef..355980321c2d 100644 --- a/arch/arm/mach-omap1/board-palmz71.c +++ b/arch/arm/mach-omap1/board-palmz71.c | |||
@@ -288,8 +288,7 @@ palmz71_gpio_setup(int early) | |||
288 | } | 288 | } |
289 | gpio_direction_input(PALMZ71_USBDETECT_GPIO); | 289 | gpio_direction_input(PALMZ71_USBDETECT_GPIO); |
290 | if (request_irq(gpio_to_irq(PALMZ71_USBDETECT_GPIO), | 290 | if (request_irq(gpio_to_irq(PALMZ71_USBDETECT_GPIO), |
291 | palmz71_powercable, IRQF_SAMPLE_RANDOM, | 291 | palmz71_powercable, 0, "palmz71-cable", NULL)) |
292 | "palmz71-cable", NULL)) | ||
293 | printk(KERN_ERR | 292 | printk(KERN_ERR |
294 | "IRQ request for power cable failed!\n"); | 293 | "IRQ request for power cable failed!\n"); |
295 | palmz71_powercable(gpio_to_irq(PALMZ71_USBDETECT_GPIO), NULL); | 294 | palmz71_powercable(gpio_to_irq(PALMZ71_USBDETECT_GPIO), NULL); |
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index 6bb3f47b1f14..0ca0db787903 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c | |||
@@ -456,7 +456,7 @@ static int lubbock_mci_init(struct device *dev, | |||
456 | init_timer(&mmc_timer); | 456 | init_timer(&mmc_timer); |
457 | mmc_timer.data = (unsigned long) data; | 457 | mmc_timer.data = (unsigned long) data; |
458 | return request_irq(LUBBOCK_SD_IRQ, lubbock_detect_int, | 458 | return request_irq(LUBBOCK_SD_IRQ, lubbock_detect_int, |
459 | IRQF_SAMPLE_RANDOM, "lubbock-sd-detect", data); | 459 | 0, "lubbock-sd-detect", data); |
460 | } | 460 | } |
461 | 461 | ||
462 | static int lubbock_mci_get_ro(struct device *dev) | 462 | static int lubbock_mci_get_ro(struct device *dev) |
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 2db697cd2b4e..39561dcf65f2 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c | |||
@@ -633,9 +633,8 @@ static struct platform_device bq24022 = { | |||
633 | static int magician_mci_init(struct device *dev, | 633 | static int magician_mci_init(struct device *dev, |
634 | irq_handler_t detect_irq, void *data) | 634 | irq_handler_t detect_irq, void *data) |
635 | { | 635 | { |
636 | return request_irq(IRQ_MAGICIAN_SD, detect_irq, | 636 | return request_irq(IRQ_MAGICIAN_SD, detect_irq, IRQF_DISABLED, |
637 | IRQF_DISABLED | IRQF_SAMPLE_RANDOM, | 637 | "mmc card detect", data); |
638 | "mmc card detect", data); | ||
639 | } | 638 | } |
640 | 639 | ||
641 | static void magician_mci_exit(struct device *dev, void *data) | 640 | static void magician_mci_exit(struct device *dev, void *data) |
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index 2b6ac00b2cd9..166dd32cc1d3 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c | |||
@@ -332,8 +332,8 @@ static int trizeps4_mci_init(struct device *dev, irq_handler_t mci_detect_int, | |||
332 | int err; | 332 | int err; |
333 | 333 | ||
334 | err = request_irq(TRIZEPS4_MMC_IRQ, mci_detect_int, | 334 | err = request_irq(TRIZEPS4_MMC_IRQ, mci_detect_int, |
335 | IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_SAMPLE_RANDOM, | 335 | IRQF_DISABLED | IRQF_TRIGGER_RISING, |
336 | "MMC card detect", data); | 336 | "MMC card detect", data); |
337 | if (err) { | 337 | if (err) { |
338 | printk(KERN_ERR "trizeps4_mci_init: MMC/SD: can't request" | 338 | printk(KERN_ERR "trizeps4_mci_init: MMC/SD: can't request" |
339 | "MMC card detect IRQ\n"); | 339 | "MMC card detect IRQ\n"); |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 5c3e0888265a..1034884b77da 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/kernel_stat.h> | 24 | #include <linux/kernel_stat.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/random.h> /* for rand_initialize_irq() */ | ||
27 | #include <linux/signal.h> | 26 | #include <linux/signal.h> |
28 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
29 | #include <linux/threads.h> | 28 | #include <linux/threads.h> |
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts index 852e5b27485d..57573bd52caa 100644 --- a/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts +++ b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts | |||
@@ -56,7 +56,7 @@ | |||
56 | ranges = <0x0 0x0 0xffe00000 0x100000>; | 56 | ranges = <0x0 0x0 0xffe00000 0x100000>; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | pci0: pcie@ffe08000 { | 59 | pci2: pcie@ffe08000 { |
60 | reg = <0 0xffe08000 0 0x1000>; | 60 | reg = <0 0xffe08000 0 0x1000>; |
61 | status = "disabled"; | 61 | status = "disabled"; |
62 | }; | 62 | }; |
@@ -76,7 +76,7 @@ | |||
76 | }; | 76 | }; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | pci2: pcie@ffe0a000 { | 79 | pci0: pcie@ffe0a000 { |
80 | reg = <0 0xffe0a000 0 0x1000>; | 80 | reg = <0 0xffe0a000 0 0x1000>; |
81 | ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000 | 81 | ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000 |
82 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; | 82 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; |
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts index b5a56ca51cf7..470247ea68b4 100644 --- a/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts +++ b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts | |||
@@ -56,7 +56,7 @@ | |||
56 | ranges = <0x0 0xf 0xffe00000 0x100000>; | 56 | ranges = <0x0 0xf 0xffe00000 0x100000>; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | pci0: pcie@fffe08000 { | 59 | pci2: pcie@fffe08000 { |
60 | reg = <0xf 0xffe08000 0 0x1000>; | 60 | reg = <0xf 0xffe08000 0 0x1000>; |
61 | status = "disabled"; | 61 | status = "disabled"; |
62 | }; | 62 | }; |
@@ -76,7 +76,7 @@ | |||
76 | }; | 76 | }; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | pci2: pcie@fffe0a000 { | 79 | pci0: pcie@fffe0a000 { |
80 | reg = <0xf 0xffe0a000 0 0x1000>; | 80 | reg = <0xf 0xffe0a000 0 0x1000>; |
81 | ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000 | 81 | ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000 |
82 | 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; | 82 | 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; |
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts index 22a215e94162..6cdcadc80c30 100644 --- a/arch/powerpc/boot/dts/p3041ds.dts +++ b/arch/powerpc/boot/dts/p3041ds.dts | |||
@@ -58,7 +58,7 @@ | |||
58 | #size-cells = <1>; | 58 | #size-cells = <1>; |
59 | compatible = "spansion,s25sl12801"; | 59 | compatible = "spansion,s25sl12801"; |
60 | reg = <0>; | 60 | reg = <0>; |
61 | spi-max-frequency = <40000000>; /* input clock */ | 61 | spi-max-frequency = <35000000>; /* input clock */ |
62 | partition@u-boot { | 62 | partition@u-boot { |
63 | label = "u-boot"; | 63 | label = "u-boot"; |
64 | reg = <0x00000000 0x00100000>; | 64 | reg = <0x00000000 0x00100000>; |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index ab523f3c1731..9ecf6e35cd8d 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -67,7 +67,6 @@ kvmppc_skip_Hinterrupt: | |||
67 | #elif defined(CONFIG_PPC_BOOK3S_32) | 67 | #elif defined(CONFIG_PPC_BOOK3S_32) |
68 | 68 | ||
69 | #define FUNC(name) name | 69 | #define FUNC(name) name |
70 | #define MTMSR_EERI(reg) mtmsr (reg) | ||
71 | 70 | ||
72 | .macro INTERRUPT_TRAMPOLINE intno | 71 | .macro INTERRUPT_TRAMPOLINE intno |
73 | 72 | ||
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 89ee02c54561..3c732acf331d 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c | |||
@@ -208,6 +208,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) | |||
208 | u8 __iomem *lbc_lcs0_ba = NULL; | 208 | u8 __iomem *lbc_lcs0_ba = NULL; |
209 | u8 __iomem *lbc_lcs1_ba = NULL; | 209 | u8 __iomem *lbc_lcs1_ba = NULL; |
210 | phys_addr_t cs0_addr, cs1_addr; | 210 | phys_addr_t cs0_addr, cs1_addr; |
211 | u32 br0, or0, br1, or1; | ||
211 | const __be32 *iprop; | 212 | const __be32 *iprop; |
212 | unsigned int num_laws; | 213 | unsigned int num_laws; |
213 | u8 b; | 214 | u8 b; |
@@ -256,11 +257,70 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) | |||
256 | } | 257 | } |
257 | num_laws = be32_to_cpup(iprop); | 258 | num_laws = be32_to_cpup(iprop); |
258 | 259 | ||
259 | cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br)); | 260 | /* |
260 | cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br)); | 261 | * Indirect mode requires both BR0 and BR1 to be set to "GPCM", |
262 | * otherwise writes to these addresses won't actually appear on the | ||
263 | * local bus, and so the PIXIS won't see them. | ||
264 | * | ||
265 | * In FCM mode, writes go to the NAND controller, which does not pass | ||
266 | * them to the localbus directly. So we force BR0 and BR1 into GPCM | ||
267 | * mode, since we don't care about what's behind the localbus any | ||
268 | * more. | ||
269 | */ | ||
270 | br0 = in_be32(&lbc->bank[0].br); | ||
271 | br1 = in_be32(&lbc->bank[1].br); | ||
272 | or0 = in_be32(&lbc->bank[0].or); | ||
273 | or1 = in_be32(&lbc->bank[1].or); | ||
274 | |||
275 | /* Make sure CS0 and CS1 are programmed */ | ||
276 | if (!(br0 & BR_V) || !(br1 & BR_V)) { | ||
277 | pr_err("p1022ds: CS0 and/or CS1 is not programmed\n"); | ||
278 | goto exit; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Use the existing BRx/ORx values if it's already GPCM. Otherwise, | ||
283 | * force the values to simple 32KB GPCM windows with the most | ||
284 | * conservative timing. | ||
285 | */ | ||
286 | if ((br0 & BR_MSEL) != BR_MS_GPCM) { | ||
287 | br0 = (br0 & BR_BA) | BR_V; | ||
288 | or0 = 0xFFFF8000 | 0xFF7; | ||
289 | out_be32(&lbc->bank[0].br, br0); | ||
290 | out_be32(&lbc->bank[0].or, or0); | ||
291 | } | ||
292 | if ((br1 & BR_MSEL) != BR_MS_GPCM) { | ||
293 | br1 = (br1 & BR_BA) | BR_V; | ||
294 | or1 = 0xFFFF8000 | 0xFF7; | ||
295 | out_be32(&lbc->bank[1].br, br1); | ||
296 | out_be32(&lbc->bank[1].or, or1); | ||
297 | } | ||
298 | |||
299 | cs0_addr = lbc_br_to_phys(ecm, num_laws, br0); | ||
300 | if (!cs0_addr) { | ||
301 | pr_err("p1022ds: could not determine physical address for CS0" | ||
302 | " (BR0=%08x)\n", br0); | ||
303 | goto exit; | ||
304 | } | ||
305 | cs1_addr = lbc_br_to_phys(ecm, num_laws, br1); | ||
306 | if (!cs0_addr) { | ||
307 | pr_err("p1022ds: could not determine physical address for CS1" | ||
308 | " (BR1=%08x)\n", br1); | ||
309 | goto exit; | ||
310 | } | ||
261 | 311 | ||
262 | lbc_lcs0_ba = ioremap(cs0_addr, 1); | 312 | lbc_lcs0_ba = ioremap(cs0_addr, 1); |
313 | if (!lbc_lcs0_ba) { | ||
314 | pr_err("p1022ds: could not ioremap CS0 address %llx\n", | ||
315 | (unsigned long long)cs0_addr); | ||
316 | goto exit; | ||
317 | } | ||
263 | lbc_lcs1_ba = ioremap(cs1_addr, 1); | 318 | lbc_lcs1_ba = ioremap(cs1_addr, 1); |
319 | if (!lbc_lcs1_ba) { | ||
320 | pr_err("p1022ds: could not ioremap CS1 address %llx\n", | ||
321 | (unsigned long long)cs1_addr); | ||
322 | goto exit; | ||
323 | } | ||
264 | 324 | ||
265 | /* Make sure we're in indirect mode first. */ | 325 | /* Make sure we're in indirect mode first. */ |
266 | if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) != | 326 | if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) != |
@@ -419,18 +479,6 @@ void __init p1022_ds_pic_init(void) | |||
419 | 479 | ||
420 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) | 480 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) |
421 | 481 | ||
422 | /* | ||
423 | * Disables a node in the device tree. | ||
424 | * | ||
425 | * This function is called before kmalloc() is available, so the 'new' object | ||
426 | * should be allocated in the global area. The easiest way is to do that is | ||
427 | * to allocate one static local variable for each call to this function. | ||
428 | */ | ||
429 | static void __init disable_one_node(struct device_node *np, struct property *new) | ||
430 | { | ||
431 | prom_update_property(np, new); | ||
432 | } | ||
433 | |||
434 | /* TRUE if there is a "video=fslfb" command-line parameter. */ | 482 | /* TRUE if there is a "video=fslfb" command-line parameter. */ |
435 | static bool fslfb; | 483 | static bool fslfb; |
436 | 484 | ||
@@ -493,28 +541,58 @@ static void __init p1022_ds_setup_arch(void) | |||
493 | diu_ops.valid_monitor_port = p1022ds_valid_monitor_port; | 541 | diu_ops.valid_monitor_port = p1022ds_valid_monitor_port; |
494 | 542 | ||
495 | /* | 543 | /* |
496 | * Disable the NOR flash node if there is video=fslfb... command-line | 544 | * Disable the NOR and NAND flash nodes if there is video=fslfb... |
497 | * parameter. When the DIU is active, NOR flash is unavailable, so we | 545 | * command-line parameter. When the DIU is active, the localbus is |
498 | * have to disable the node before the MTD driver loads. | 546 | * unavailable, so we have to disable these nodes before the MTD |
547 | * driver loads. | ||
499 | */ | 548 | */ |
500 | if (fslfb) { | 549 | if (fslfb) { |
501 | struct device_node *np = | 550 | struct device_node *np = |
502 | of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc"); | 551 | of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc"); |
503 | 552 | ||
504 | if (np) { | 553 | if (np) { |
505 | np = of_find_compatible_node(np, NULL, "cfi-flash"); | 554 | struct device_node *np2; |
506 | if (np) { | 555 | |
556 | of_node_get(np); | ||
557 | np2 = of_find_compatible_node(np, NULL, "cfi-flash"); | ||
558 | if (np2) { | ||
507 | static struct property nor_status = { | 559 | static struct property nor_status = { |
508 | .name = "status", | 560 | .name = "status", |
509 | .value = "disabled", | 561 | .value = "disabled", |
510 | .length = sizeof("disabled"), | 562 | .length = sizeof("disabled"), |
511 | }; | 563 | }; |
512 | 564 | ||
565 | /* | ||
566 | * prom_update_property() is called before | ||
567 | * kmalloc() is available, so the 'new' object | ||
568 | * should be allocated in the global area. | ||
569 | * The easiest way is to do that is to | ||
570 | * allocate one static local variable for each | ||
571 | * call to this function. | ||
572 | */ | ||
513 | pr_info("p1022ds: disabling %s node", | 573 | pr_info("p1022ds: disabling %s node", |
514 | np->full_name); | 574 | np2->full_name); |
515 | disable_one_node(np, &nor_status); | 575 | prom_update_property(np2, &nor_status); |
516 | of_node_put(np); | 576 | of_node_put(np2); |
517 | } | 577 | } |
578 | |||
579 | of_node_get(np); | ||
580 | np2 = of_find_compatible_node(np, NULL, | ||
581 | "fsl,elbc-fcm-nand"); | ||
582 | if (np2) { | ||
583 | static struct property nand_status = { | ||
584 | .name = "status", | ||
585 | .value = "disabled", | ||
586 | .length = sizeof("disabled"), | ||
587 | }; | ||
588 | |||
589 | pr_info("p1022ds: disabling %s node", | ||
590 | np2->full_name); | ||
591 | prom_update_property(np2, &nand_status); | ||
592 | of_node_put(np2); | ||
593 | } | ||
594 | |||
595 | of_node_put(np); | ||
518 | } | 596 | } |
519 | 597 | ||
520 | } | 598 | } |
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h index 60c9c0bd5ba2..2aa97ddb7b78 100644 --- a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h +++ b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2009-2010 Freescale Semiconductor, Inc | 2 | * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc |
3 | * | 3 | * |
4 | * QorIQ based Cache Controller Memory Mapped Registers | 4 | * QorIQ based Cache Controller Memory Mapped Registers |
5 | * | 5 | * |
@@ -91,7 +91,7 @@ struct mpc85xx_l2ctlr { | |||
91 | 91 | ||
92 | struct sram_parameters { | 92 | struct sram_parameters { |
93 | unsigned int sram_size; | 93 | unsigned int sram_size; |
94 | uint64_t sram_offset; | 94 | phys_addr_t sram_offset; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | extern int instantiate_cache_sram(struct platform_device *dev, | 97 | extern int instantiate_cache_sram(struct platform_device *dev, |
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c index cedabd0f4bfe..68ac3aacb191 100644 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2009-2010 Freescale Semiconductor, Inc. | 2 | * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc. |
3 | * | 3 | * |
4 | * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation | 4 | * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation |
5 | * | 5 | * |
@@ -31,24 +31,21 @@ static char *sram_size; | |||
31 | static char *sram_offset; | 31 | static char *sram_offset; |
32 | struct mpc85xx_l2ctlr __iomem *l2ctlr; | 32 | struct mpc85xx_l2ctlr __iomem *l2ctlr; |
33 | 33 | ||
34 | static long get_cache_sram_size(void) | 34 | static int get_cache_sram_params(struct sram_parameters *sram_params) |
35 | { | 35 | { |
36 | unsigned long val; | 36 | unsigned long long addr; |
37 | unsigned int size; | ||
37 | 38 | ||
38 | if (!sram_size || (strict_strtoul(sram_size, 0, &val) < 0)) | 39 | if (!sram_size || (kstrtouint(sram_size, 0, &size) < 0)) |
39 | return -EINVAL; | 40 | return -EINVAL; |
40 | 41 | ||
41 | return val; | 42 | if (!sram_offset || (kstrtoull(sram_offset, 0, &addr) < 0)) |
42 | } | ||
43 | |||
44 | static long get_cache_sram_offset(void) | ||
45 | { | ||
46 | unsigned long val; | ||
47 | |||
48 | if (!sram_offset || (strict_strtoul(sram_offset, 0, &val) < 0)) | ||
49 | return -EINVAL; | 43 | return -EINVAL; |
50 | 44 | ||
51 | return val; | 45 | sram_params->sram_offset = addr; |
46 | sram_params->sram_size = size; | ||
47 | |||
48 | return 0; | ||
52 | } | 49 | } |
53 | 50 | ||
54 | static int __init get_size_from_cmdline(char *str) | 51 | static int __init get_size_from_cmdline(char *str) |
@@ -93,17 +90,9 @@ static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) | |||
93 | } | 90 | } |
94 | l2cache_size = *prop; | 91 | l2cache_size = *prop; |
95 | 92 | ||
96 | sram_params.sram_size = get_cache_sram_size(); | 93 | if (get_cache_sram_params(&sram_params)) { |
97 | if ((int)sram_params.sram_size <= 0) { | ||
98 | dev_err(&dev->dev, | ||
99 | "Entire L2 as cache, Aborting Cache-SRAM stuff\n"); | ||
100 | return -EINVAL; | ||
101 | } | ||
102 | |||
103 | sram_params.sram_offset = get_cache_sram_offset(); | ||
104 | if ((int64_t)sram_params.sram_offset <= 0) { | ||
105 | dev_err(&dev->dev, | 94 | dev_err(&dev->dev, |
106 | "Entire L2 as cache, provide a valid sram offset\n"); | 95 | "Entire L2 as cache, provide valid sram offset and size\n"); |
107 | return -EINVAL; | 96 | return -EINVAL; |
108 | } | 97 | } |
109 | 98 | ||
@@ -125,14 +114,14 @@ static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) | |||
125 | * Write bits[0-17] to srbar0 | 114 | * Write bits[0-17] to srbar0 |
126 | */ | 115 | */ |
127 | out_be32(&l2ctlr->srbar0, | 116 | out_be32(&l2ctlr->srbar0, |
128 | sram_params.sram_offset & L2SRAM_BAR_MSK_LO18); | 117 | lower_32_bits(sram_params.sram_offset) & L2SRAM_BAR_MSK_LO18); |
129 | 118 | ||
130 | /* | 119 | /* |
131 | * Write bits[18-21] to srbare0 | 120 | * Write bits[18-21] to srbare0 |
132 | */ | 121 | */ |
133 | #ifdef CONFIG_PHYS_64BIT | 122 | #ifdef CONFIG_PHYS_64BIT |
134 | out_be32(&l2ctlr->srbarea0, | 123 | out_be32(&l2ctlr->srbarea0, |
135 | (sram_params.sram_offset >> 32) & L2SRAM_BARE_MSK_HI4); | 124 | upper_32_bits(sram_params.sram_offset) & L2SRAM_BARE_MSK_HI4); |
136 | #endif | 125 | #endif |
137 | 126 | ||
138 | clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI); | 127 | clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 296cd32466df..76de6b68487c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -90,6 +90,7 @@ config S390 | |||
90 | select HAVE_MEMBLOCK_NODE_MAP | 90 | select HAVE_MEMBLOCK_NODE_MAP |
91 | select HAVE_CMPXCHG_LOCAL | 91 | select HAVE_CMPXCHG_LOCAL |
92 | select ARCH_DISCARD_MEMBLOCK | 92 | select ARCH_DISCARD_MEMBLOCK |
93 | select BUILDTIME_EXTABLE_SORT | ||
93 | select ARCH_INLINE_SPIN_TRYLOCK | 94 | select ARCH_INLINE_SPIN_TRYLOCK |
94 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 95 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
95 | select ARCH_INLINE_SPIN_LOCK | 96 | select ARCH_INLINE_SPIN_LOCK |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index de57702a3f44..f39cd710980b 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -7,6 +7,9 @@ CONFIG_TASK_DELAY_ACCT=y | |||
7 | CONFIG_TASK_XACCT=y | 7 | CONFIG_TASK_XACCT=y |
8 | CONFIG_TASK_IO_ACCOUNTING=y | 8 | CONFIG_TASK_IO_ACCOUNTING=y |
9 | CONFIG_AUDIT=y | 9 | CONFIG_AUDIT=y |
10 | CONFIG_NO_HZ=y | ||
11 | CONFIG_HIGH_RES_TIMERS=y | ||
12 | CONFIG_RCU_FAST_NO_HZ=y | ||
10 | CONFIG_IKCONFIG=y | 13 | CONFIG_IKCONFIG=y |
11 | CONFIG_IKCONFIG_PROC=y | 14 | CONFIG_IKCONFIG_PROC=y |
12 | CONFIG_CGROUPS=y | 15 | CONFIG_CGROUPS=y |
@@ -35,8 +38,6 @@ CONFIG_MODVERSIONS=y | |||
35 | CONFIG_PARTITION_ADVANCED=y | 38 | CONFIG_PARTITION_ADVANCED=y |
36 | CONFIG_IBM_PARTITION=y | 39 | CONFIG_IBM_PARTITION=y |
37 | CONFIG_DEFAULT_DEADLINE=y | 40 | CONFIG_DEFAULT_DEADLINE=y |
38 | CONFIG_NO_HZ=y | ||
39 | CONFIG_HIGH_RES_TIMERS=y | ||
40 | CONFIG_PREEMPT=y | 41 | CONFIG_PREEMPT=y |
41 | CONFIG_MEMORY_HOTPLUG=y | 42 | CONFIG_MEMORY_HOTPLUG=y |
42 | CONFIG_MEMORY_HOTREMOVE=y | 43 | CONFIG_MEMORY_HOTREMOVE=y |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 5c63615f1349..b749c5733657 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
12 | #include <asm/tlbflush.h> | 12 | #include <asm/tlbflush.h> |
13 | #include <asm/ctl_reg.h> | 13 | #include <asm/ctl_reg.h> |
14 | #include <asm-generic/mm_hooks.h> | ||
15 | 14 | ||
16 | static inline int init_new_context(struct task_struct *tsk, | 15 | static inline int init_new_context(struct task_struct *tsk, |
17 | struct mm_struct *mm) | 16 | struct mm_struct *mm) |
@@ -58,7 +57,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) | |||
58 | pgd_t *pgd = mm->pgd; | 57 | pgd_t *pgd = mm->pgd; |
59 | 58 | ||
60 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 59 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); |
61 | if (user_mode != HOME_SPACE_MODE) { | 60 | if (addressing_mode != HOME_SPACE_MODE) { |
62 | /* Load primary space page table origin. */ | 61 | /* Load primary space page table origin. */ |
63 | asm volatile(LCTL_OPCODE" 1,1,%0\n" | 62 | asm volatile(LCTL_OPCODE" 1,1,%0\n" |
64 | : : "m" (S390_lowcore.user_asce) ); | 63 | : : "m" (S390_lowcore.user_asce) ); |
@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev, | |||
91 | switch_mm(prev, next, current); | 90 | switch_mm(prev, next, current); |
92 | } | 91 | } |
93 | 92 | ||
93 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | ||
94 | struct mm_struct *mm) | ||
95 | { | ||
96 | #ifdef CONFIG_64BIT | ||
97 | if (oldmm->context.asce_limit < mm->context.asce_limit) | ||
98 | crst_table_downgrade(mm, oldmm->context.asce_limit); | ||
99 | #endif | ||
100 | } | ||
101 | |||
102 | static inline void arch_exit_mmap(struct mm_struct *mm) | ||
103 | { | ||
104 | } | ||
105 | |||
94 | #endif /* __S390_MMU_CONTEXT_H */ | 106 | #endif /* __S390_MMU_CONTEXT_H */ |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index c40fa91e38a8..11e4e3236937 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -120,7 +120,9 @@ struct stack_frame { | |||
120 | regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ | 120 | regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ |
121 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 121 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ |
122 | regs->gprs[15] = new_stackp; \ | 122 | regs->gprs[15] = new_stackp; \ |
123 | __tlb_flush_mm(current->mm); \ | ||
123 | crst_table_downgrade(current->mm, 1UL << 31); \ | 124 | crst_table_downgrade(current->mm, 1UL << 31); \ |
125 | update_mm(current->mm, current); \ | ||
124 | } while (0) | 126 | } while (0) |
125 | 127 | ||
126 | /* Forward declaration, a strange C thing */ | 128 | /* Forward declaration, a strange C thing */ |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 57e80534375a..e6859d16ee2d 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -60,7 +60,7 @@ void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr, | |||
60 | #define SECONDARY_SPACE_MODE 2 | 60 | #define SECONDARY_SPACE_MODE 2 |
61 | #define HOME_SPACE_MODE 3 | 61 | #define HOME_SPACE_MODE 3 |
62 | 62 | ||
63 | extern unsigned int user_mode; | 63 | extern unsigned int addressing_mode; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Machine features detected in head.S | 66 | * Machine features detected in head.S |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 21be961e8a43..ba500d8dc392 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -110,6 +110,7 @@ struct debug_view debug_raw_view = { | |||
110 | NULL, | 110 | NULL, |
111 | NULL | 111 | NULL |
112 | }; | 112 | }; |
113 | EXPORT_SYMBOL(debug_raw_view); | ||
113 | 114 | ||
114 | struct debug_view debug_hex_ascii_view = { | 115 | struct debug_view debug_hex_ascii_view = { |
115 | "hex_ascii", | 116 | "hex_ascii", |
@@ -119,6 +120,7 @@ struct debug_view debug_hex_ascii_view = { | |||
119 | NULL, | 120 | NULL, |
120 | NULL | 121 | NULL |
121 | }; | 122 | }; |
123 | EXPORT_SYMBOL(debug_hex_ascii_view); | ||
122 | 124 | ||
123 | static struct debug_view debug_level_view = { | 125 | static struct debug_view debug_level_view = { |
124 | "level", | 126 | "level", |
@@ -155,6 +157,7 @@ struct debug_view debug_sprintf_view = { | |||
155 | NULL, | 157 | NULL, |
156 | NULL | 158 | NULL |
157 | }; | 159 | }; |
160 | EXPORT_SYMBOL(debug_sprintf_view); | ||
158 | 161 | ||
159 | /* used by dump analysis tools to determine version of debug feature */ | 162 | /* used by dump analysis tools to determine version of debug feature */ |
160 | static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION; | 163 | static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION; |
@@ -730,6 +733,7 @@ debug_info_t *debug_register(const char *name, int pages_per_area, | |||
730 | return debug_register_mode(name, pages_per_area, nr_areas, buf_size, | 733 | return debug_register_mode(name, pages_per_area, nr_areas, buf_size, |
731 | S_IRUSR | S_IWUSR, 0, 0); | 734 | S_IRUSR | S_IWUSR, 0, 0); |
732 | } | 735 | } |
736 | EXPORT_SYMBOL(debug_register); | ||
733 | 737 | ||
734 | /* | 738 | /* |
735 | * debug_unregister: | 739 | * debug_unregister: |
@@ -748,6 +752,7 @@ debug_unregister(debug_info_t * id) | |||
748 | out: | 752 | out: |
749 | return; | 753 | return; |
750 | } | 754 | } |
755 | EXPORT_SYMBOL(debug_unregister); | ||
751 | 756 | ||
752 | /* | 757 | /* |
753 | * debug_set_size: | 758 | * debug_set_size: |
@@ -810,7 +815,7 @@ debug_set_level(debug_info_t* id, int new_level) | |||
810 | } | 815 | } |
811 | spin_unlock_irqrestore(&id->lock,flags); | 816 | spin_unlock_irqrestore(&id->lock,flags); |
812 | } | 817 | } |
813 | 818 | EXPORT_SYMBOL(debug_set_level); | |
814 | 819 | ||
815 | /* | 820 | /* |
816 | * proceed_active_entry: | 821 | * proceed_active_entry: |
@@ -930,7 +935,7 @@ debug_stop_all(void) | |||
930 | if (debug_stoppable) | 935 | if (debug_stoppable) |
931 | debug_active = 0; | 936 | debug_active = 0; |
932 | } | 937 | } |
933 | 938 | EXPORT_SYMBOL(debug_stop_all); | |
934 | 939 | ||
935 | void debug_set_critical(void) | 940 | void debug_set_critical(void) |
936 | { | 941 | { |
@@ -963,6 +968,7 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len) | |||
963 | 968 | ||
964 | return active; | 969 | return active; |
965 | } | 970 | } |
971 | EXPORT_SYMBOL(debug_event_common); | ||
966 | 972 | ||
967 | /* | 973 | /* |
968 | * debug_exception_common: | 974 | * debug_exception_common: |
@@ -990,6 +996,7 @@ debug_entry_t | |||
990 | 996 | ||
991 | return active; | 997 | return active; |
992 | } | 998 | } |
999 | EXPORT_SYMBOL(debug_exception_common); | ||
993 | 1000 | ||
994 | /* | 1001 | /* |
995 | * counts arguments in format string for sprintf view | 1002 | * counts arguments in format string for sprintf view |
@@ -1043,6 +1050,7 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...) | |||
1043 | 1050 | ||
1044 | return active; | 1051 | return active; |
1045 | } | 1052 | } |
1053 | EXPORT_SYMBOL(debug_sprintf_event); | ||
1046 | 1054 | ||
1047 | /* | 1055 | /* |
1048 | * debug_sprintf_exception: | 1056 | * debug_sprintf_exception: |
@@ -1081,25 +1089,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...) | |||
1081 | 1089 | ||
1082 | return active; | 1090 | return active; |
1083 | } | 1091 | } |
1084 | 1092 | EXPORT_SYMBOL(debug_sprintf_exception); | |
1085 | /* | ||
1086 | * debug_init: | ||
1087 | * - is called exactly once to initialize the debug feature | ||
1088 | */ | ||
1089 | |||
1090 | static int | ||
1091 | __init debug_init(void) | ||
1092 | { | ||
1093 | int rc = 0; | ||
1094 | |||
1095 | s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); | ||
1096 | mutex_lock(&debug_mutex); | ||
1097 | debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); | ||
1098 | initialized = 1; | ||
1099 | mutex_unlock(&debug_mutex); | ||
1100 | |||
1101 | return rc; | ||
1102 | } | ||
1103 | 1093 | ||
1104 | /* | 1094 | /* |
1105 | * debug_register_view: | 1095 | * debug_register_view: |
@@ -1147,6 +1137,7 @@ debug_register_view(debug_info_t * id, struct debug_view *view) | |||
1147 | out: | 1137 | out: |
1148 | return rc; | 1138 | return rc; |
1149 | } | 1139 | } |
1140 | EXPORT_SYMBOL(debug_register_view); | ||
1150 | 1141 | ||
1151 | /* | 1142 | /* |
1152 | * debug_unregister_view: | 1143 | * debug_unregister_view: |
@@ -1176,6 +1167,7 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view) | |||
1176 | out: | 1167 | out: |
1177 | return rc; | 1168 | return rc; |
1178 | } | 1169 | } |
1170 | EXPORT_SYMBOL(debug_unregister_view); | ||
1179 | 1171 | ||
1180 | static inline char * | 1172 | static inline char * |
1181 | debug_get_user_string(const char __user *user_buf, size_t user_len) | 1173 | debug_get_user_string(const char __user *user_buf, size_t user_len) |
@@ -1485,6 +1477,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, | |||
1485 | except_str, entry->id.fields.cpuid, (void *) caller); | 1477 | except_str, entry->id.fields.cpuid, (void *) caller); |
1486 | return rc; | 1478 | return rc; |
1487 | } | 1479 | } |
1480 | EXPORT_SYMBOL(debug_dflt_header_fn); | ||
1488 | 1481 | ||
1489 | /* | 1482 | /* |
1490 | * prints debug data sprintf-formated: | 1483 | * prints debug data sprintf-formated: |
@@ -1533,33 +1526,16 @@ out: | |||
1533 | } | 1526 | } |
1534 | 1527 | ||
1535 | /* | 1528 | /* |
1536 | * clean up module | 1529 | * debug_init: |
1530 | * - is called exactly once to initialize the debug feature | ||
1537 | */ | 1531 | */ |
1538 | static void __exit debug_exit(void) | 1532 | static int __init debug_init(void) |
1539 | { | 1533 | { |
1540 | debugfs_remove(debug_debugfs_root_entry); | 1534 | s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); |
1541 | unregister_sysctl_table(s390dbf_sysctl_header); | 1535 | mutex_lock(&debug_mutex); |
1542 | return; | 1536 | debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL); |
1537 | initialized = 1; | ||
1538 | mutex_unlock(&debug_mutex); | ||
1539 | return 0; | ||
1543 | } | 1540 | } |
1544 | |||
1545 | /* | ||
1546 | * module definitions | ||
1547 | */ | ||
1548 | postcore_initcall(debug_init); | 1541 | postcore_initcall(debug_init); |
1549 | module_exit(debug_exit); | ||
1550 | MODULE_LICENSE("GPL"); | ||
1551 | |||
1552 | EXPORT_SYMBOL(debug_register); | ||
1553 | EXPORT_SYMBOL(debug_unregister); | ||
1554 | EXPORT_SYMBOL(debug_set_level); | ||
1555 | EXPORT_SYMBOL(debug_stop_all); | ||
1556 | EXPORT_SYMBOL(debug_register_view); | ||
1557 | EXPORT_SYMBOL(debug_unregister_view); | ||
1558 | EXPORT_SYMBOL(debug_event_common); | ||
1559 | EXPORT_SYMBOL(debug_exception_common); | ||
1560 | EXPORT_SYMBOL(debug_hex_ascii_view); | ||
1561 | EXPORT_SYMBOL(debug_raw_view); | ||
1562 | EXPORT_SYMBOL(debug_dflt_header_fn); | ||
1563 | EXPORT_SYMBOL(debug_sprintf_view); | ||
1564 | EXPORT_SYMBOL(debug_sprintf_exception); | ||
1565 | EXPORT_SYMBOL(debug_sprintf_event); | ||
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 1f6b428e2762..619c5d350726 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -1531,7 +1531,7 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) | |||
1531 | 1531 | ||
1532 | void show_code(struct pt_regs *regs) | 1532 | void show_code(struct pt_regs *regs) |
1533 | { | 1533 | { |
1534 | char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; | 1534 | char *mode = user_mode(regs) ? "User" : "Krnl"; |
1535 | unsigned char code[64]; | 1535 | unsigned char code[64]; |
1536 | char buffer[64], *ptr; | 1536 | char buffer[64], *ptr; |
1537 | mm_segment_t old_fs; | 1537 | mm_segment_t old_fs; |
@@ -1540,7 +1540,7 @@ void show_code(struct pt_regs *regs) | |||
1540 | 1540 | ||
1541 | /* Get a snapshot of the 64 bytes surrounding the fault address. */ | 1541 | /* Get a snapshot of the 64 bytes surrounding the fault address. */ |
1542 | old_fs = get_fs(); | 1542 | old_fs = get_fs(); |
1543 | set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); | 1543 | set_fs(user_mode(regs) ? USER_DS : KERNEL_DS); |
1544 | for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { | 1544 | for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { |
1545 | addr = regs->psw.addr - 34 + start; | 1545 | addr = regs->psw.addr - 34 + start; |
1546 | if (__copy_from_user(code + start - 2, | 1546 | if (__copy_from_user(code + start - 2, |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index bc95a8ebd9cc..83c3271c442b 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -455,7 +455,6 @@ void __init startup_init(void) | |||
455 | init_kernel_storage_key(); | 455 | init_kernel_storage_key(); |
456 | lockdep_init(); | 456 | lockdep_init(); |
457 | lockdep_off(); | 457 | lockdep_off(); |
458 | sort_main_extable(); | ||
459 | setup_lowcore_early(); | 458 | setup_lowcore_early(); |
460 | setup_facility_list(); | 459 | setup_facility_list(); |
461 | detect_machine_type(); | 460 | detect_machine_type(); |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index e64d141555ce..6ffcd3203215 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -1583,7 +1583,7 @@ static struct kset *vmcmd_kset; | |||
1583 | 1583 | ||
1584 | static void vmcmd_run(struct shutdown_trigger *trigger) | 1584 | static void vmcmd_run(struct shutdown_trigger *trigger) |
1585 | { | 1585 | { |
1586 | char *cmd, *next_cmd; | 1586 | char *cmd; |
1587 | 1587 | ||
1588 | if (strcmp(trigger->name, ON_REIPL_STR) == 0) | 1588 | if (strcmp(trigger->name, ON_REIPL_STR) == 0) |
1589 | cmd = vmcmd_on_reboot; | 1589 | cmd = vmcmd_on_reboot; |
@@ -1600,15 +1600,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger) | |||
1600 | 1600 | ||
1601 | if (strlen(cmd) == 0) | 1601 | if (strlen(cmd) == 0) |
1602 | return; | 1602 | return; |
1603 | do { | 1603 | __cpcmd(cmd, NULL, 0, NULL); |
1604 | next_cmd = strchr(cmd, '\n'); | ||
1605 | if (next_cmd) { | ||
1606 | next_cmd[0] = 0; | ||
1607 | next_cmd += 1; | ||
1608 | } | ||
1609 | __cpcmd(cmd, NULL, 0, NULL); | ||
1610 | cmd = next_cmd; | ||
1611 | } while (cmd != NULL); | ||
1612 | } | 1604 | } |
1613 | 1605 | ||
1614 | static int vmcmd_init(void) | 1606 | static int vmcmd_init(void) |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 743c0f32fe3b..f86c81e13c37 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -302,8 +302,8 @@ static int __init parse_vmalloc(char *arg) | |||
302 | } | 302 | } |
303 | early_param("vmalloc", parse_vmalloc); | 303 | early_param("vmalloc", parse_vmalloc); |
304 | 304 | ||
305 | unsigned int user_mode = HOME_SPACE_MODE; | 305 | unsigned int addressing_mode = HOME_SPACE_MODE; |
306 | EXPORT_SYMBOL_GPL(user_mode); | 306 | EXPORT_SYMBOL_GPL(addressing_mode); |
307 | 307 | ||
308 | static int set_amode_primary(void) | 308 | static int set_amode_primary(void) |
309 | { | 309 | { |
@@ -328,7 +328,7 @@ static int set_amode_primary(void) | |||
328 | */ | 328 | */ |
329 | static int __init early_parse_switch_amode(char *p) | 329 | static int __init early_parse_switch_amode(char *p) |
330 | { | 330 | { |
331 | user_mode = PRIMARY_SPACE_MODE; | 331 | addressing_mode = PRIMARY_SPACE_MODE; |
332 | return 0; | 332 | return 0; |
333 | } | 333 | } |
334 | early_param("switch_amode", early_parse_switch_amode); | 334 | early_param("switch_amode", early_parse_switch_amode); |
@@ -336,9 +336,9 @@ early_param("switch_amode", early_parse_switch_amode); | |||
336 | static int __init early_parse_user_mode(char *p) | 336 | static int __init early_parse_user_mode(char *p) |
337 | { | 337 | { |
338 | if (p && strcmp(p, "primary") == 0) | 338 | if (p && strcmp(p, "primary") == 0) |
339 | user_mode = PRIMARY_SPACE_MODE; | 339 | addressing_mode = PRIMARY_SPACE_MODE; |
340 | else if (!p || strcmp(p, "home") == 0) | 340 | else if (!p || strcmp(p, "home") == 0) |
341 | user_mode = HOME_SPACE_MODE; | 341 | addressing_mode = HOME_SPACE_MODE; |
342 | else | 342 | else |
343 | return 1; | 343 | return 1; |
344 | return 0; | 344 | return 0; |
@@ -347,7 +347,7 @@ early_param("user_mode", early_parse_user_mode); | |||
347 | 347 | ||
348 | static void setup_addressing_mode(void) | 348 | static void setup_addressing_mode(void) |
349 | { | 349 | { |
350 | if (user_mode == PRIMARY_SPACE_MODE) { | 350 | if (addressing_mode == PRIMARY_SPACE_MODE) { |
351 | if (set_amode_primary()) | 351 | if (set_amode_primary()) |
352 | pr_info("Address spaces switched, " | 352 | pr_info("Address spaces switched, " |
353 | "mvcos available\n"); | 353 | "mvcos available\n"); |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index af2421a0f315..01775c04a90e 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -185,7 +185,7 @@ void show_registers(struct pt_regs *regs) | |||
185 | { | 185 | { |
186 | char *mode; | 186 | char *mode; |
187 | 187 | ||
188 | mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; | 188 | mode = user_mode(regs) ? "User" : "Krnl"; |
189 | printk("%s PSW : %p %p", | 189 | printk("%s PSW : %p %p", |
190 | mode, (void *) regs->psw.mask, | 190 | mode, (void *) regs->psw.mask, |
191 | (void *) regs->psw.addr); | 191 | (void *) regs->psw.addr); |
@@ -225,7 +225,7 @@ void show_regs(struct pt_regs *regs) | |||
225 | (void *) current->thread.ksp); | 225 | (void *) current->thread.ksp); |
226 | show_registers(regs); | 226 | show_registers(regs); |
227 | /* Show stack backtrace if pt_regs is from kernel mode */ | 227 | /* Show stack backtrace if pt_regs is from kernel mode */ |
228 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 228 | if (!user_mode(regs)) |
229 | show_trace(NULL, (unsigned long *) regs->gprs[15]); | 229 | show_trace(NULL, (unsigned long *) regs->gprs[15]); |
230 | show_last_breaking_event(regs); | 230 | show_last_breaking_event(regs); |
231 | } | 231 | } |
@@ -300,7 +300,7 @@ static void __kprobes do_trap(struct pt_regs *regs, | |||
300 | regs->int_code, si_signo) == NOTIFY_STOP) | 300 | regs->int_code, si_signo) == NOTIFY_STOP) |
301 | return; | 301 | return; |
302 | 302 | ||
303 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 303 | if (user_mode(regs)) { |
304 | info.si_signo = si_signo; | 304 | info.si_signo = si_signo; |
305 | info.si_errno = 0; | 305 | info.si_errno = 0; |
306 | info.si_code = si_code; | 306 | info.si_code = si_code; |
@@ -341,7 +341,7 @@ void __kprobes do_per_trap(struct pt_regs *regs) | |||
341 | 341 | ||
342 | static void default_trap_handler(struct pt_regs *regs) | 342 | static void default_trap_handler(struct pt_regs *regs) |
343 | { | 343 | { |
344 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 344 | if (user_mode(regs)) { |
345 | report_user_fault(regs, SIGSEGV); | 345 | report_user_fault(regs, SIGSEGV); |
346 | do_exit(SIGSEGV); | 346 | do_exit(SIGSEGV); |
347 | } else | 347 | } else |
@@ -410,7 +410,7 @@ static void __kprobes illegal_op(struct pt_regs *regs) | |||
410 | 410 | ||
411 | location = get_psw_address(regs); | 411 | location = get_psw_address(regs); |
412 | 412 | ||
413 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 413 | if (user_mode(regs)) { |
414 | if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) | 414 | if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) |
415 | return; | 415 | return; |
416 | if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { | 416 | if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { |
@@ -478,7 +478,7 @@ void specification_exception(struct pt_regs *regs) | |||
478 | 478 | ||
479 | location = (__u16 __user *) get_psw_address(regs); | 479 | location = (__u16 __user *) get_psw_address(regs); |
480 | 480 | ||
481 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 481 | if (user_mode(regs)) { |
482 | get_user(*((__u16 *) opcode), location); | 482 | get_user(*((__u16 *) opcode), location); |
483 | switch (opcode[0]) { | 483 | switch (opcode[0]) { |
484 | case 0x28: /* LDR Rx,Ry */ | 484 | case 0x28: /* LDR Rx,Ry */ |
@@ -531,7 +531,7 @@ static void data_exception(struct pt_regs *regs) | |||
531 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); | 531 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); |
532 | 532 | ||
533 | #ifdef CONFIG_MATHEMU | 533 | #ifdef CONFIG_MATHEMU |
534 | else if (regs->psw.mask & PSW_MASK_PSTATE) { | 534 | else if (user_mode(regs)) { |
535 | __u8 opcode[6]; | 535 | __u8 opcode[6]; |
536 | get_user(*((__u16 *) opcode), location); | 536 | get_user(*((__u16 *) opcode), location); |
537 | switch (opcode[0]) { | 537 | switch (opcode[0]) { |
@@ -598,7 +598,7 @@ static void data_exception(struct pt_regs *regs) | |||
598 | static void space_switch_exception(struct pt_regs *regs) | 598 | static void space_switch_exception(struct pt_regs *regs) |
599 | { | 599 | { |
600 | /* Set user psw back to home space mode. */ | 600 | /* Set user psw back to home space mode. */ |
601 | if (regs->psw.mask & PSW_MASK_PSTATE) | 601 | if (user_mode(regs)) |
602 | regs->psw.mask |= PSW_ASC_HOME; | 602 | regs->psw.mask |= PSW_ASC_HOME; |
603 | /* Send SIGILL. */ | 603 | /* Send SIGILL. */ |
604 | do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); | 604 | do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index ea5590fdca3b..9a19ca367c17 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -84,7 +84,8 @@ struct vdso_data *vdso_data = &vdso_data_store.data; | |||
84 | */ | 84 | */ |
85 | static void vdso_init_data(struct vdso_data *vd) | 85 | static void vdso_init_data(struct vdso_data *vd) |
86 | { | 86 | { |
87 | vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31); | 87 | vd->ectg_available = |
88 | addressing_mode != HOME_SPACE_MODE && test_facility(31); | ||
88 | } | 89 | } |
89 | 90 | ||
90 | #ifdef CONFIG_64BIT | 91 | #ifdef CONFIG_64BIT |
@@ -101,7 +102,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) | |||
101 | 102 | ||
102 | lowcore->vdso_per_cpu_data = __LC_PASTE; | 103 | lowcore->vdso_per_cpu_data = __LC_PASTE; |
103 | 104 | ||
104 | if (user_mode == HOME_SPACE_MODE || !vdso_enabled) | 105 | if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) |
105 | return 0; | 106 | return 0; |
106 | 107 | ||
107 | segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); | 108 | segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); |
@@ -146,7 +147,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore) | |||
146 | unsigned long segment_table, page_table, page_frame; | 147 | unsigned long segment_table, page_table, page_frame; |
147 | u32 *psal, *aste; | 148 | u32 *psal, *aste; |
148 | 149 | ||
149 | if (user_mode == HOME_SPACE_MODE || !vdso_enabled) | 150 | if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) |
150 | return; | 151 | return; |
151 | 152 | ||
152 | psal = (u32 *)(addr_t) lowcore->paste[4]; | 153 | psal = (u32 *)(addr_t) lowcore->paste[4]; |
@@ -164,7 +165,7 @@ static void vdso_init_cr5(void) | |||
164 | { | 165 | { |
165 | unsigned long cr5; | 166 | unsigned long cr5; |
166 | 167 | ||
167 | if (user_mode == HOME_SPACE_MODE || !vdso_enabled) | 168 | if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) |
168 | return; | 169 | return; |
169 | cr5 = offsetof(struct _lowcore, paste); | 170 | cr5 = offsetof(struct _lowcore, paste); |
170 | __ctl_load(cr5, 5, 5); | 171 | __ctl_load(cr5, 5, 5); |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 21109c63eb12..de8fa9bbd35e 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -45,7 +45,7 @@ SECTIONS | |||
45 | 45 | ||
46 | .dummy : { *(.dummy) } :data | 46 | .dummy : { *(.dummy) } :data |
47 | 47 | ||
48 | RODATA | 48 | RO_DATA_SECTION(PAGE_SIZE) |
49 | 49 | ||
50 | #ifdef CONFIG_SHARED_KERNEL | 50 | #ifdef CONFIG_SHARED_KERNEL |
51 | . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */ | 51 | . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */ |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 6a12d1bb6e09..6c013f544146 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #define VM_FAULT_BADCONTEXT 0x010000 | 49 | #define VM_FAULT_BADCONTEXT 0x010000 |
50 | #define VM_FAULT_BADMAP 0x020000 | 50 | #define VM_FAULT_BADMAP 0x020000 |
51 | #define VM_FAULT_BADACCESS 0x040000 | 51 | #define VM_FAULT_BADACCESS 0x040000 |
52 | #define VM_FAULT_SIGNAL 0x080000 | ||
52 | 53 | ||
53 | static unsigned long store_indication; | 54 | static unsigned long store_indication; |
54 | 55 | ||
@@ -110,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code) | |||
110 | if (trans_exc_code == 2) | 111 | if (trans_exc_code == 2) |
111 | /* Access via secondary space, set_fs setting decides */ | 112 | /* Access via secondary space, set_fs setting decides */ |
112 | return current->thread.mm_segment.ar4; | 113 | return current->thread.mm_segment.ar4; |
113 | if (user_mode == HOME_SPACE_MODE) | 114 | if (addressing_mode == HOME_SPACE_MODE) |
114 | /* User space if the access has been done via home space. */ | 115 | /* User space if the access has been done via home space. */ |
115 | return trans_exc_code == 3; | 116 | return trans_exc_code == 3; |
116 | /* | 117 | /* |
@@ -219,7 +220,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) | |||
219 | case VM_FAULT_BADACCESS: | 220 | case VM_FAULT_BADACCESS: |
220 | case VM_FAULT_BADMAP: | 221 | case VM_FAULT_BADMAP: |
221 | /* Bad memory access. Check if it is kernel or user space. */ | 222 | /* Bad memory access. Check if it is kernel or user space. */ |
222 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 223 | if (user_mode(regs)) { |
223 | /* User mode accesses just cause a SIGSEGV */ | 224 | /* User mode accesses just cause a SIGSEGV */ |
224 | si_code = (fault == VM_FAULT_BADMAP) ? | 225 | si_code = (fault == VM_FAULT_BADMAP) ? |
225 | SEGV_MAPERR : SEGV_ACCERR; | 226 | SEGV_MAPERR : SEGV_ACCERR; |
@@ -229,15 +230,19 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) | |||
229 | case VM_FAULT_BADCONTEXT: | 230 | case VM_FAULT_BADCONTEXT: |
230 | do_no_context(regs); | 231 | do_no_context(regs); |
231 | break; | 232 | break; |
233 | case VM_FAULT_SIGNAL: | ||
234 | if (!user_mode(regs)) | ||
235 | do_no_context(regs); | ||
236 | break; | ||
232 | default: /* fault & VM_FAULT_ERROR */ | 237 | default: /* fault & VM_FAULT_ERROR */ |
233 | if (fault & VM_FAULT_OOM) { | 238 | if (fault & VM_FAULT_OOM) { |
234 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 239 | if (!user_mode(regs)) |
235 | do_no_context(regs); | 240 | do_no_context(regs); |
236 | else | 241 | else |
237 | pagefault_out_of_memory(); | 242 | pagefault_out_of_memory(); |
238 | } else if (fault & VM_FAULT_SIGBUS) { | 243 | } else if (fault & VM_FAULT_SIGBUS) { |
239 | /* Kernel mode? Handle exceptions or die */ | 244 | /* Kernel mode? Handle exceptions or die */ |
240 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 245 | if (!user_mode(regs)) |
241 | do_no_context(regs); | 246 | do_no_context(regs); |
242 | else | 247 | else |
243 | do_sigbus(regs); | 248 | do_sigbus(regs); |
@@ -286,7 +291,7 @@ static inline int do_exception(struct pt_regs *regs, int access) | |||
286 | 291 | ||
287 | address = trans_exc_code & __FAIL_ADDR_MASK; | 292 | address = trans_exc_code & __FAIL_ADDR_MASK; |
288 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 293 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
289 | flags = FAULT_FLAG_ALLOW_RETRY; | 294 | flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
290 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) | 295 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) |
291 | flags |= FAULT_FLAG_WRITE; | 296 | flags |= FAULT_FLAG_WRITE; |
292 | down_read(&mm->mmap_sem); | 297 | down_read(&mm->mmap_sem); |
@@ -335,6 +340,11 @@ retry: | |||
335 | * the fault. | 340 | * the fault. |
336 | */ | 341 | */ |
337 | fault = handle_mm_fault(mm, vma, address, flags); | 342 | fault = handle_mm_fault(mm, vma, address, flags); |
343 | /* No reason to continue if interrupted by SIGKILL. */ | ||
344 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { | ||
345 | fault = VM_FAULT_SIGNAL; | ||
346 | goto out; | ||
347 | } | ||
338 | if (unlikely(fault & VM_FAULT_ERROR)) | 348 | if (unlikely(fault & VM_FAULT_ERROR)) |
339 | goto out_up; | 349 | goto out_up; |
340 | 350 | ||
@@ -426,7 +436,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs) | |||
426 | } | 436 | } |
427 | 437 | ||
428 | /* User mode accesses just cause a SIGSEGV */ | 438 | /* User mode accesses just cause a SIGSEGV */ |
429 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 439 | if (user_mode(regs)) { |
430 | do_sigsegv(regs, SEGV_MAPERR); | 440 | do_sigsegv(regs, SEGV_MAPERR); |
431 | return; | 441 | return; |
432 | } | 442 | } |
@@ -441,6 +451,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | |||
441 | struct pt_regs regs; | 451 | struct pt_regs regs; |
442 | int access, fault; | 452 | int access, fault; |
443 | 453 | ||
454 | /* Emulate a uaccess fault from kernel mode. */ | ||
444 | regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; | 455 | regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; |
445 | if (!irqs_disabled()) | 456 | if (!irqs_disabled()) |
446 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; | 457 | regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; |
@@ -450,12 +461,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | |||
450 | regs.int_parm_long = (uaddr & PAGE_MASK) | 2; | 461 | regs.int_parm_long = (uaddr & PAGE_MASK) | 2; |
451 | access = write ? VM_WRITE : VM_READ; | 462 | access = write ? VM_WRITE : VM_READ; |
452 | fault = do_exception(®s, access); | 463 | fault = do_exception(®s, access); |
453 | if (unlikely(fault)) { | 464 | /* |
454 | if (fault & VM_FAULT_OOM) | 465 | * Since the fault happened in kernel mode while performing a uaccess |
455 | return -EFAULT; | 466 | * all we need to do now is emulating a fixup in case "fault" is not |
456 | else if (fault & VM_FAULT_SIGBUS) | 467 | * zero. |
457 | do_sigbus(®s); | 468 | * For the calling uaccess functions this results always in -EFAULT. |
458 | } | 469 | */ |
459 | return fault ? -EFAULT : 0; | 470 | return fault ? -EFAULT : 0; |
460 | } | 471 | } |
461 | 472 | ||
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 573384256c5c..c59a5efa58b1 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
103 | 103 | ||
104 | int s390_mmap_check(unsigned long addr, unsigned long len) | 104 | int s390_mmap_check(unsigned long addr, unsigned long len) |
105 | { | 105 | { |
106 | int rc; | ||
107 | |||
106 | if (!is_compat_task() && | 108 | if (!is_compat_task() && |
107 | len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) | 109 | len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) { |
108 | return crst_table_upgrade(current->mm, 1UL << 53); | 110 | rc = crst_table_upgrade(current->mm, 1UL << 53); |
111 | if (rc) | ||
112 | return rc; | ||
113 | update_mm(current->mm, current); | ||
114 | } | ||
109 | return 0; | 115 | return 0; |
110 | } | 116 | } |
111 | 117 | ||
@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, | |||
125 | rc = crst_table_upgrade(mm, 1UL << 53); | 131 | rc = crst_table_upgrade(mm, 1UL << 53); |
126 | if (rc) | 132 | if (rc) |
127 | return (unsigned long) rc; | 133 | return (unsigned long) rc; |
134 | update_mm(mm, current); | ||
128 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 135 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); |
129 | } | 136 | } |
130 | return area; | 137 | return area; |
@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, | |||
147 | rc = crst_table_upgrade(mm, 1UL << 53); | 154 | rc = crst_table_upgrade(mm, 1UL << 53); |
148 | if (rc) | 155 | if (rc) |
149 | return (unsigned long) rc; | 156 | return (unsigned long) rc; |
157 | update_mm(mm, current); | ||
150 | area = arch_get_unmapped_area_topdown(filp, addr, len, | 158 | area = arch_get_unmapped_area_topdown(filp, addr, len, |
151 | pgoff, flags); | 159 | pgoff, flags); |
152 | } | 160 | } |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 1cab221077cc..18df31d1f2c9 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -85,7 +85,6 @@ repeat: | |||
85 | crst_table_free(mm, table); | 85 | crst_table_free(mm, table); |
86 | if (mm->context.asce_limit < limit) | 86 | if (mm->context.asce_limit < limit) |
87 | goto repeat; | 87 | goto repeat; |
88 | update_mm(mm, current); | ||
89 | return 0; | 88 | return 0; |
90 | } | 89 | } |
91 | 90 | ||
@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
93 | { | 92 | { |
94 | pgd_t *pgd; | 93 | pgd_t *pgd; |
95 | 94 | ||
96 | if (mm->context.asce_limit <= limit) | ||
97 | return; | ||
98 | __tlb_flush_mm(mm); | ||
99 | while (mm->context.asce_limit > limit) { | 95 | while (mm->context.asce_limit > limit) { |
100 | pgd = mm->pgd; | 96 | pgd = mm->pgd; |
101 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | 97 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { |
@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
118 | mm->task_size = mm->context.asce_limit; | 114 | mm->task_size = mm->context.asce_limit; |
119 | crst_table_free(mm, (unsigned long *) pgd); | 115 | crst_table_free(mm, (unsigned long *) pgd); |
120 | } | 116 | } |
121 | update_mm(mm, current); | ||
122 | } | 117 | } |
123 | #endif | 118 | #endif |
124 | 119 | ||
@@ -801,7 +796,7 @@ int s390_enable_sie(void) | |||
801 | struct mm_struct *mm, *old_mm; | 796 | struct mm_struct *mm, *old_mm; |
802 | 797 | ||
803 | /* Do we have switched amode? If no, we cannot do sie */ | 798 | /* Do we have switched amode? If no, we cannot do sie */ |
804 | if (user_mode == HOME_SPACE_MODE) | 799 | if (addressing_mode == HOME_SPACE_MODE) |
805 | return -EINVAL; | 800 | return -EINVAL; |
806 | 801 | ||
807 | /* Do we have pgstes? if yes, we are done */ | 802 | /* Do we have pgstes? if yes, we are done */ |
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c index c82f62fb9c28..8a6811b2cdb9 100644 --- a/arch/s390/oprofile/backtrace.c +++ b/arch/s390/oprofile/backtrace.c | |||
@@ -58,7 +58,7 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth) | |||
58 | unsigned long head; | 58 | unsigned long head; |
59 | struct stack_frame* head_sf; | 59 | struct stack_frame* head_sf; |
60 | 60 | ||
61 | if (user_mode (regs)) | 61 | if (user_mode(regs)) |
62 | return; | 62 | return; |
63 | 63 | ||
64 | head = regs->gprs[15]; | 64 | head = regs->gprs[15]; |
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 435e406fdec3..81d92fc9983b 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c | |||
@@ -1250,14 +1250,12 @@ int ldc_bind(struct ldc_channel *lp, const char *name) | |||
1250 | snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); | 1250 | snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); |
1251 | snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); | 1251 | snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); |
1252 | 1252 | ||
1253 | err = request_irq(lp->cfg.rx_irq, ldc_rx, | 1253 | err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED, |
1254 | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, | ||
1255 | lp->rx_irq_name, lp); | 1254 | lp->rx_irq_name, lp); |
1256 | if (err) | 1255 | if (err) |
1257 | return err; | 1256 | return err; |
1258 | 1257 | ||
1259 | err = request_irq(lp->cfg.tx_irq, ldc_tx, | 1258 | err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED, |
1260 | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, | ||
1261 | lp->tx_irq_name, lp); | 1259 | lp->tx_irq_name, lp); |
1262 | if (err) { | 1260 | if (err) { |
1263 | free_irq(lp->cfg.rx_irq, lp); | 1261 | free_irq(lp->cfg.rx_irq, lp); |
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index acfd0e0fd0c9..ac9d25c8dc01 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
@@ -362,18 +362,18 @@ static irqreturn_t line_write_interrupt(int irq, void *data) | |||
362 | int line_setup_irq(int fd, int input, int output, struct line *line, void *data) | 362 | int line_setup_irq(int fd, int input, int output, struct line *line, void *data) |
363 | { | 363 | { |
364 | const struct line_driver *driver = line->driver; | 364 | const struct line_driver *driver = line->driver; |
365 | int err = 0, flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; | 365 | int err = 0; |
366 | 366 | ||
367 | if (input) | 367 | if (input) |
368 | err = um_request_irq(driver->read_irq, fd, IRQ_READ, | 368 | err = um_request_irq(driver->read_irq, fd, IRQ_READ, |
369 | line_interrupt, flags, | 369 | line_interrupt, IRQF_SHARED, |
370 | driver->read_irq_name, data); | 370 | driver->read_irq_name, data); |
371 | if (err) | 371 | if (err) |
372 | return err; | 372 | return err; |
373 | if (output) | 373 | if (output) |
374 | err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, | 374 | err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, |
375 | line_write_interrupt, flags, | 375 | line_write_interrupt, IRQF_SHARED, |
376 | driver->write_irq_name, data); | 376 | driver->write_irq_name, data); |
377 | return err; | 377 | return err; |
378 | } | 378 | } |
379 | 379 | ||
@@ -779,8 +779,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, | |||
779 | .stack = stack }); | 779 | .stack = stack }); |
780 | 780 | ||
781 | if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, | 781 | if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, |
782 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | 782 | IRQF_SHARED, "winch", winch) < 0) { |
783 | "winch", winch) < 0) { | ||
784 | printk(KERN_ERR "register_winch_irq - failed to register " | 783 | printk(KERN_ERR "register_winch_irq - failed to register " |
785 | "IRQ\n"); | 784 | "IRQ\n"); |
786 | goto out_free; | 785 | goto out_free; |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 43b39d61b538..664a60e8dfb4 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -774,8 +774,7 @@ static int __init mconsole_init(void) | |||
774 | register_reboot_notifier(&reboot_notifier); | 774 | register_reboot_notifier(&reboot_notifier); |
775 | 775 | ||
776 | err = um_request_irq(MCONSOLE_IRQ, sock, IRQ_READ, mconsole_interrupt, | 776 | err = um_request_irq(MCONSOLE_IRQ, sock, IRQ_READ, mconsole_interrupt, |
777 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | 777 | IRQF_SHARED, "mconsole", (void *)sock); |
778 | "mconsole", (void *)sock); | ||
779 | if (err) { | 778 | if (err) { |
780 | printk(KERN_ERR "Failed to get IRQ for management console\n"); | 779 | printk(KERN_ERR "Failed to get IRQ for management console\n"); |
781 | goto out; | 780 | goto out; |
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c index 11866ffd45a9..1d83d50236e1 100644 --- a/arch/um/drivers/port_kern.c +++ b/arch/um/drivers/port_kern.c | |||
@@ -100,8 +100,7 @@ static int port_accept(struct port_list *port) | |||
100 | .port = port }); | 100 | .port = port }); |
101 | 101 | ||
102 | if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt, | 102 | if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt, |
103 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | 103 | IRQF_SHARED, "telnetd", conn)) { |
104 | "telnetd", conn)) { | ||
105 | printk(KERN_ERR "port_accept : failed to get IRQ for " | 104 | printk(KERN_ERR "port_accept : failed to get IRQ for " |
106 | "telnetd\n"); | 105 | "telnetd\n"); |
107 | goto out_free; | 106 | goto out_free; |
@@ -184,8 +183,7 @@ void *port_data(int port_num) | |||
184 | } | 183 | } |
185 | 184 | ||
186 | if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt, | 185 | if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt, |
187 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | 186 | IRQF_SHARED, "port", port)) { |
188 | "port", port)) { | ||
189 | printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num); | 187 | printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num); |
190 | goto out_close; | 188 | goto out_close; |
191 | } | 189 | } |
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index b25296e6218a..e32c6aa6396f 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c | |||
@@ -131,8 +131,7 @@ static int __init rng_init (void) | |||
131 | random_fd = err; | 131 | random_fd = err; |
132 | 132 | ||
133 | err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt, | 133 | err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt, |
134 | IRQF_SAMPLE_RANDOM, "random", | 134 | 0, "random", NULL); |
135 | NULL); | ||
136 | if (err) | 135 | if (err) |
137 | goto err_out_cleanup_hw; | 136 | goto err_out_cleanup_hw; |
138 | 137 | ||
diff --git a/arch/um/drivers/xterm_kern.c b/arch/um/drivers/xterm_kern.c index b68bbe269e01..e3031e69445d 100644 --- a/arch/um/drivers/xterm_kern.c +++ b/arch/um/drivers/xterm_kern.c | |||
@@ -50,8 +50,7 @@ int xterm_fd(int socket, int *pid_out) | |||
50 | init_completion(&data->ready); | 50 | init_completion(&data->ready); |
51 | 51 | ||
52 | err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt, | 52 | err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt, |
53 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | 53 | IRQF_SHARED, "xterm", data); |
54 | "xterm", data); | ||
55 | if (err) { | 54 | if (err) { |
56 | printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, " | 55 | printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, " |
57 | "err = %d\n", err); | 56 | "err = %d\n", err); |
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c index 2a1639255763..c88211139a51 100644 --- a/arch/um/kernel/sigio.c +++ b/arch/um/kernel/sigio.c | |||
@@ -25,8 +25,7 @@ int write_sigio_irq(int fd) | |||
25 | int err; | 25 | int err; |
26 | 26 | ||
27 | err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt, | 27 | err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt, |
28 | IRQF_SAMPLE_RANDOM, "write sigio", | 28 | 0, "write sigio", NULL); |
29 | NULL); | ||
30 | if (err) { | 29 | if (err) { |
31 | printk(KERN_ERR "write_sigio_irq : um_request_irq failed, " | 30 | printk(KERN_ERR "write_sigio_irq : um_request_irq failed, " |
32 | "err = %d\n", err); | 31 | "err = %d\n", err); |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index c78f14a0df00..dab39350e51e 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -234,7 +234,7 @@ extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); | |||
234 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); | 234 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); |
235 | extern void perf_check_microcode(void); | 235 | extern void perf_check_microcode(void); |
236 | #else | 236 | #else |
237 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | 237 | static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) |
238 | { | 238 | { |
239 | *nr = 0; | 239 | *nr = 0; |
240 | return NULL; | 240 | return NULL; |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index a15df4be151f..821d53b696d1 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -374,7 +374,7 @@ struct x86_pmu { | |||
374 | /* | 374 | /* |
375 | * Intel DebugStore bits | 375 | * Intel DebugStore bits |
376 | */ | 376 | */ |
377 | int bts :1, | 377 | unsigned int bts :1, |
378 | bts_active :1, | 378 | bts_active :1, |
379 | pebs :1, | 379 | pebs :1, |
380 | pebs_active :1, | 380 | pebs_active :1, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 7a8b9d0abcaa..382366977d4c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -138,6 +138,84 @@ static u64 intel_pmu_event_map(int hw_event) | |||
138 | return intel_perfmon_event_map[hw_event]; | 138 | return intel_perfmon_event_map[hw_event]; |
139 | } | 139 | } |
140 | 140 | ||
141 | #define SNB_DMND_DATA_RD (1ULL << 0) | ||
142 | #define SNB_DMND_RFO (1ULL << 1) | ||
143 | #define SNB_DMND_IFETCH (1ULL << 2) | ||
144 | #define SNB_DMND_WB (1ULL << 3) | ||
145 | #define SNB_PF_DATA_RD (1ULL << 4) | ||
146 | #define SNB_PF_RFO (1ULL << 5) | ||
147 | #define SNB_PF_IFETCH (1ULL << 6) | ||
148 | #define SNB_LLC_DATA_RD (1ULL << 7) | ||
149 | #define SNB_LLC_RFO (1ULL << 8) | ||
150 | #define SNB_LLC_IFETCH (1ULL << 9) | ||
151 | #define SNB_BUS_LOCKS (1ULL << 10) | ||
152 | #define SNB_STRM_ST (1ULL << 11) | ||
153 | #define SNB_OTHER (1ULL << 15) | ||
154 | #define SNB_RESP_ANY (1ULL << 16) | ||
155 | #define SNB_NO_SUPP (1ULL << 17) | ||
156 | #define SNB_LLC_HITM (1ULL << 18) | ||
157 | #define SNB_LLC_HITE (1ULL << 19) | ||
158 | #define SNB_LLC_HITS (1ULL << 20) | ||
159 | #define SNB_LLC_HITF (1ULL << 21) | ||
160 | #define SNB_LOCAL (1ULL << 22) | ||
161 | #define SNB_REMOTE (0xffULL << 23) | ||
162 | #define SNB_SNP_NONE (1ULL << 31) | ||
163 | #define SNB_SNP_NOT_NEEDED (1ULL << 32) | ||
164 | #define SNB_SNP_MISS (1ULL << 33) | ||
165 | #define SNB_NO_FWD (1ULL << 34) | ||
166 | #define SNB_SNP_FWD (1ULL << 35) | ||
167 | #define SNB_HITM (1ULL << 36) | ||
168 | #define SNB_NON_DRAM (1ULL << 37) | ||
169 | |||
170 | #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) | ||
171 | #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) | ||
172 | #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) | ||
173 | |||
174 | #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ | ||
175 | SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ | ||
176 | SNB_HITM) | ||
177 | |||
178 | #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) | ||
179 | #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) | ||
180 | |||
181 | #define SNB_L3_ACCESS SNB_RESP_ANY | ||
182 | #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) | ||
183 | |||
184 | static __initconst const u64 snb_hw_cache_extra_regs | ||
185 | [PERF_COUNT_HW_CACHE_MAX] | ||
186 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
187 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
188 | { | ||
189 | [ C(LL ) ] = { | ||
190 | [ C(OP_READ) ] = { | ||
191 | [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, | ||
192 | [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, | ||
193 | }, | ||
194 | [ C(OP_WRITE) ] = { | ||
195 | [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, | ||
196 | [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, | ||
197 | }, | ||
198 | [ C(OP_PREFETCH) ] = { | ||
199 | [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, | ||
200 | [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, | ||
201 | }, | ||
202 | }, | ||
203 | [ C(NODE) ] = { | ||
204 | [ C(OP_READ) ] = { | ||
205 | [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, | ||
206 | [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, | ||
207 | }, | ||
208 | [ C(OP_WRITE) ] = { | ||
209 | [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, | ||
210 | [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, | ||
211 | }, | ||
212 | [ C(OP_PREFETCH) ] = { | ||
213 | [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, | ||
214 | [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, | ||
215 | }, | ||
216 | }, | ||
217 | }; | ||
218 | |||
141 | static __initconst const u64 snb_hw_cache_event_ids | 219 | static __initconst const u64 snb_hw_cache_event_ids |
142 | [PERF_COUNT_HW_CACHE_MAX] | 220 | [PERF_COUNT_HW_CACHE_MAX] |
143 | [PERF_COUNT_HW_CACHE_OP_MAX] | 221 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -235,16 +313,16 @@ static __initconst const u64 snb_hw_cache_event_ids | |||
235 | }, | 313 | }, |
236 | [ C(NODE) ] = { | 314 | [ C(NODE) ] = { |
237 | [ C(OP_READ) ] = { | 315 | [ C(OP_READ) ] = { |
238 | [ C(RESULT_ACCESS) ] = -1, | 316 | [ C(RESULT_ACCESS) ] = 0x01b7, |
239 | [ C(RESULT_MISS) ] = -1, | 317 | [ C(RESULT_MISS) ] = 0x01b7, |
240 | }, | 318 | }, |
241 | [ C(OP_WRITE) ] = { | 319 | [ C(OP_WRITE) ] = { |
242 | [ C(RESULT_ACCESS) ] = -1, | 320 | [ C(RESULT_ACCESS) ] = 0x01b7, |
243 | [ C(RESULT_MISS) ] = -1, | 321 | [ C(RESULT_MISS) ] = 0x01b7, |
244 | }, | 322 | }, |
245 | [ C(OP_PREFETCH) ] = { | 323 | [ C(OP_PREFETCH) ] = { |
246 | [ C(RESULT_ACCESS) ] = -1, | 324 | [ C(RESULT_ACCESS) ] = 0x01b7, |
247 | [ C(RESULT_MISS) ] = -1, | 325 | [ C(RESULT_MISS) ] = 0x01b7, |
248 | }, | 326 | }, |
249 | }, | 327 | }, |
250 | 328 | ||
@@ -1964,6 +2042,8 @@ __init int intel_pmu_init(void) | |||
1964 | case 58: /* IvyBridge */ | 2042 | case 58: /* IvyBridge */ |
1965 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 2043 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
1966 | sizeof(hw_cache_event_ids)); | 2044 | sizeof(hw_cache_event_ids)); |
2045 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, | ||
2046 | sizeof(hw_cache_extra_regs)); | ||
1967 | 2047 | ||
1968 | intel_pmu_lbr_init_snb(); | 2048 | intel_pmu_lbr_init_snb(); |
1969 | 2049 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 19faffc60886..7563fda9f033 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -18,6 +18,7 @@ static struct event_constraint constraint_empty = | |||
18 | EVENT_CONSTRAINT(0, 0, 0); | 18 | EVENT_CONSTRAINT(0, 0, 0); |
19 | 19 | ||
20 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); | 20 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); |
21 | DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); | ||
21 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | 22 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); |
22 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | 23 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); |
23 | DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); | 24 | DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); |
@@ -33,10 +34,81 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); | |||
33 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); | 34 | DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); |
34 | DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); | 35 | DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); |
35 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); | 36 | DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); |
36 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7"); | 37 | DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); |
37 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15"); | 38 | DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); |
38 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23"); | 39 | DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); |
39 | DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31"); | 40 | DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); |
41 | |||
42 | static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) | ||
43 | { | ||
44 | u64 count; | ||
45 | |||
46 | rdmsrl(event->hw.event_base, count); | ||
47 | |||
48 | return count; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * generic get constraint function for shared match/mask registers. | ||
53 | */ | ||
54 | static struct event_constraint * | ||
55 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
56 | { | ||
57 | struct intel_uncore_extra_reg *er; | ||
58 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
59 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
60 | unsigned long flags; | ||
61 | bool ok = false; | ||
62 | |||
63 | /* | ||
64 | * reg->alloc can be set due to existing state, so for fake box we | ||
65 | * need to ignore this, otherwise we might fail to allocate proper | ||
66 | * fake state for this extra reg constraint. | ||
67 | */ | ||
68 | if (reg1->idx == EXTRA_REG_NONE || | ||
69 | (!uncore_box_is_fake(box) && reg1->alloc)) | ||
70 | return NULL; | ||
71 | |||
72 | er = &box->shared_regs[reg1->idx]; | ||
73 | raw_spin_lock_irqsave(&er->lock, flags); | ||
74 | if (!atomic_read(&er->ref) || | ||
75 | (er->config1 == reg1->config && er->config2 == reg2->config)) { | ||
76 | atomic_inc(&er->ref); | ||
77 | er->config1 = reg1->config; | ||
78 | er->config2 = reg2->config; | ||
79 | ok = true; | ||
80 | } | ||
81 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
82 | |||
83 | if (ok) { | ||
84 | if (!uncore_box_is_fake(box)) | ||
85 | reg1->alloc = 1; | ||
86 | return NULL; | ||
87 | } | ||
88 | |||
89 | return &constraint_empty; | ||
90 | } | ||
91 | |||
92 | static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
93 | { | ||
94 | struct intel_uncore_extra_reg *er; | ||
95 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
96 | |||
97 | /* | ||
98 | * Only put constraint if extra reg was actually allocated. Also | ||
99 | * takes care of event which do not use an extra shared reg. | ||
100 | * | ||
101 | * Also, if this is a fake box we shouldn't touch any event state | ||
102 | * (reg->alloc) and we don't care about leaving inconsistent box | ||
103 | * state either since it will be thrown out. | ||
104 | */ | ||
105 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
106 | return; | ||
107 | |||
108 | er = &box->shared_regs[reg1->idx]; | ||
109 | atomic_dec(&er->ref); | ||
110 | reg1->alloc = 0; | ||
111 | } | ||
40 | 112 | ||
41 | /* Sandy Bridge-EP uncore support */ | 113 | /* Sandy Bridge-EP uncore support */ |
42 | static struct intel_uncore_type snbep_uncore_cbox; | 114 | static struct intel_uncore_type snbep_uncore_cbox; |
@@ -64,18 +136,15 @@ static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) | |||
64 | pci_write_config_dword(pdev, box_ctl, config); | 136 | pci_write_config_dword(pdev, box_ctl, config); |
65 | } | 137 | } |
66 | 138 | ||
67 | static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, | 139 | static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
68 | struct perf_event *event) | ||
69 | { | 140 | { |
70 | struct pci_dev *pdev = box->pci_dev; | 141 | struct pci_dev *pdev = box->pci_dev; |
71 | struct hw_perf_event *hwc = &event->hw; | 142 | struct hw_perf_event *hwc = &event->hw; |
72 | 143 | ||
73 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | | 144 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); |
74 | SNBEP_PMON_CTL_EN); | ||
75 | } | 145 | } |
76 | 146 | ||
77 | static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, | 147 | static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
78 | struct perf_event *event) | ||
79 | { | 148 | { |
80 | struct pci_dev *pdev = box->pci_dev; | 149 | struct pci_dev *pdev = box->pci_dev; |
81 | struct hw_perf_event *hwc = &event->hw; | 150 | struct hw_perf_event *hwc = &event->hw; |
@@ -83,8 +152,7 @@ static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, | |||
83 | pci_write_config_dword(pdev, hwc->config_base, hwc->config); | 152 | pci_write_config_dword(pdev, hwc->config_base, hwc->config); |
84 | } | 153 | } |
85 | 154 | ||
86 | static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, | 155 | static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
87 | struct perf_event *event) | ||
88 | { | 156 | { |
89 | struct pci_dev *pdev = box->pci_dev; | 157 | struct pci_dev *pdev = box->pci_dev; |
90 | struct hw_perf_event *hwc = &event->hw; | 158 | struct hw_perf_event *hwc = &event->hw; |
@@ -92,14 +160,15 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, | |||
92 | 160 | ||
93 | pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); | 161 | pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); |
94 | pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); | 162 | pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); |
163 | |||
95 | return count; | 164 | return count; |
96 | } | 165 | } |
97 | 166 | ||
98 | static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) | 167 | static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) |
99 | { | 168 | { |
100 | struct pci_dev *pdev = box->pci_dev; | 169 | struct pci_dev *pdev = box->pci_dev; |
101 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, | 170 | |
102 | SNBEP_PMON_BOX_CTL_INT); | 171 | pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT); |
103 | } | 172 | } |
104 | 173 | ||
105 | static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) | 174 | static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) |
@@ -112,7 +181,6 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) | |||
112 | rdmsrl(msr, config); | 181 | rdmsrl(msr, config); |
113 | config |= SNBEP_PMON_BOX_CTL_FRZ; | 182 | config |= SNBEP_PMON_BOX_CTL_FRZ; |
114 | wrmsrl(msr, config); | 183 | wrmsrl(msr, config); |
115 | return; | ||
116 | } | 184 | } |
117 | } | 185 | } |
118 | 186 | ||
@@ -126,12 +194,10 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) | |||
126 | rdmsrl(msr, config); | 194 | rdmsrl(msr, config); |
127 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; | 195 | config &= ~SNBEP_PMON_BOX_CTL_FRZ; |
128 | wrmsrl(msr, config); | 196 | wrmsrl(msr, config); |
129 | return; | ||
130 | } | 197 | } |
131 | } | 198 | } |
132 | 199 | ||
133 | static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, | 200 | static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
134 | struct perf_event *event) | ||
135 | { | 201 | { |
136 | struct hw_perf_event *hwc = &event->hw; | 202 | struct hw_perf_event *hwc = &event->hw; |
137 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | 203 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
@@ -150,68 +216,15 @@ static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, | |||
150 | wrmsrl(hwc->config_base, hwc->config); | 216 | wrmsrl(hwc->config_base, hwc->config); |
151 | } | 217 | } |
152 | 218 | ||
153 | static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box, | ||
154 | struct perf_event *event) | ||
155 | { | ||
156 | struct hw_perf_event *hwc = &event->hw; | ||
157 | u64 count; | ||
158 | |||
159 | rdmsrl(hwc->event_base, count); | ||
160 | return count; | ||
161 | } | ||
162 | |||
163 | static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) | 219 | static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) |
164 | { | 220 | { |
165 | unsigned msr = uncore_msr_box_ctl(box); | 221 | unsigned msr = uncore_msr_box_ctl(box); |
222 | |||
166 | if (msr) | 223 | if (msr) |
167 | wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); | 224 | wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); |
168 | } | 225 | } |
169 | 226 | ||
170 | static struct event_constraint * | 227 | static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
171 | snbep_uncore_get_constraint(struct intel_uncore_box *box, | ||
172 | struct perf_event *event) | ||
173 | { | ||
174 | struct intel_uncore_extra_reg *er; | ||
175 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
176 | unsigned long flags; | ||
177 | bool ok = false; | ||
178 | |||
179 | if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc)) | ||
180 | return NULL; | ||
181 | |||
182 | er = &box->shared_regs[reg1->idx]; | ||
183 | raw_spin_lock_irqsave(&er->lock, flags); | ||
184 | if (!atomic_read(&er->ref) || er->config1 == reg1->config) { | ||
185 | atomic_inc(&er->ref); | ||
186 | er->config1 = reg1->config; | ||
187 | ok = true; | ||
188 | } | ||
189 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
190 | |||
191 | if (ok) { | ||
192 | if (box->phys_id >= 0) | ||
193 | reg1->alloc = 1; | ||
194 | return NULL; | ||
195 | } | ||
196 | return &constraint_empty; | ||
197 | } | ||
198 | |||
199 | static void snbep_uncore_put_constraint(struct intel_uncore_box *box, | ||
200 | struct perf_event *event) | ||
201 | { | ||
202 | struct intel_uncore_extra_reg *er; | ||
203 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
204 | |||
205 | if (box->phys_id < 0 || !reg1->alloc) | ||
206 | return; | ||
207 | |||
208 | er = &box->shared_regs[reg1->idx]; | ||
209 | atomic_dec(&er->ref); | ||
210 | reg1->alloc = 0; | ||
211 | } | ||
212 | |||
213 | static int snbep_uncore_hw_config(struct intel_uncore_box *box, | ||
214 | struct perf_event *event) | ||
215 | { | 228 | { |
216 | struct hw_perf_event *hwc = &event->hw; | 229 | struct hw_perf_event *hwc = &event->hw; |
217 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | 230 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; |
@@ -221,14 +234,16 @@ static int snbep_uncore_hw_config(struct intel_uncore_box *box, | |||
221 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; | 234 | SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; |
222 | reg1->config = event->attr.config1 & | 235 | reg1->config = event->attr.config1 & |
223 | SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK; | 236 | SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK; |
224 | } else if (box->pmu->type == &snbep_uncore_pcu) { | ||
225 | reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; | ||
226 | reg1->config = event->attr.config1 & | ||
227 | SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; | ||
228 | } else { | 237 | } else { |
229 | return 0; | 238 | if (box->pmu->type == &snbep_uncore_pcu) { |
239 | reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; | ||
240 | reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; | ||
241 | } else { | ||
242 | return 0; | ||
243 | } | ||
230 | } | 244 | } |
231 | reg1->idx = 0; | 245 | reg1->idx = 0; |
246 | |||
232 | return 0; | 247 | return 0; |
233 | } | 248 | } |
234 | 249 | ||
@@ -272,10 +287,19 @@ static struct attribute *snbep_uncore_pcu_formats_attr[] = { | |||
272 | &format_attr_thresh5.attr, | 287 | &format_attr_thresh5.attr, |
273 | &format_attr_occ_invert.attr, | 288 | &format_attr_occ_invert.attr, |
274 | &format_attr_occ_edge.attr, | 289 | &format_attr_occ_edge.attr, |
275 | &format_attr_filter_brand0.attr, | 290 | &format_attr_filter_band0.attr, |
276 | &format_attr_filter_brand1.attr, | 291 | &format_attr_filter_band1.attr, |
277 | &format_attr_filter_brand2.attr, | 292 | &format_attr_filter_band2.attr, |
278 | &format_attr_filter_brand3.attr, | 293 | &format_attr_filter_band3.attr, |
294 | NULL, | ||
295 | }; | ||
296 | |||
297 | static struct attribute *snbep_uncore_qpi_formats_attr[] = { | ||
298 | &format_attr_event_ext.attr, | ||
299 | &format_attr_umask.attr, | ||
300 | &format_attr_edge.attr, | ||
301 | &format_attr_inv.attr, | ||
302 | &format_attr_thresh8.attr, | ||
279 | NULL, | 303 | NULL, |
280 | }; | 304 | }; |
281 | 305 | ||
@@ -314,15 +338,20 @@ static struct attribute_group snbep_uncore_pcu_format_group = { | |||
314 | .attrs = snbep_uncore_pcu_formats_attr, | 338 | .attrs = snbep_uncore_pcu_formats_attr, |
315 | }; | 339 | }; |
316 | 340 | ||
341 | static struct attribute_group snbep_uncore_qpi_format_group = { | ||
342 | .name = "format", | ||
343 | .attrs = snbep_uncore_qpi_formats_attr, | ||
344 | }; | ||
345 | |||
317 | static struct intel_uncore_ops snbep_uncore_msr_ops = { | 346 | static struct intel_uncore_ops snbep_uncore_msr_ops = { |
318 | .init_box = snbep_uncore_msr_init_box, | 347 | .init_box = snbep_uncore_msr_init_box, |
319 | .disable_box = snbep_uncore_msr_disable_box, | 348 | .disable_box = snbep_uncore_msr_disable_box, |
320 | .enable_box = snbep_uncore_msr_enable_box, | 349 | .enable_box = snbep_uncore_msr_enable_box, |
321 | .disable_event = snbep_uncore_msr_disable_event, | 350 | .disable_event = snbep_uncore_msr_disable_event, |
322 | .enable_event = snbep_uncore_msr_enable_event, | 351 | .enable_event = snbep_uncore_msr_enable_event, |
323 | .read_counter = snbep_uncore_msr_read_counter, | 352 | .read_counter = uncore_msr_read_counter, |
324 | .get_constraint = snbep_uncore_get_constraint, | 353 | .get_constraint = uncore_get_constraint, |
325 | .put_constraint = snbep_uncore_put_constraint, | 354 | .put_constraint = uncore_put_constraint, |
326 | .hw_config = snbep_uncore_hw_config, | 355 | .hw_config = snbep_uncore_hw_config, |
327 | }; | 356 | }; |
328 | 357 | ||
@@ -485,8 +514,13 @@ static struct intel_uncore_type snbep_uncore_qpi = { | |||
485 | .num_counters = 4, | 514 | .num_counters = 4, |
486 | .num_boxes = 2, | 515 | .num_boxes = 2, |
487 | .perf_ctr_bits = 48, | 516 | .perf_ctr_bits = 48, |
517 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | ||
518 | .event_ctl = SNBEP_PCI_PMON_CTL0, | ||
519 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, | ||
520 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | ||
521 | .ops = &snbep_uncore_pci_ops, | ||
488 | .event_descs = snbep_uncore_qpi_events, | 522 | .event_descs = snbep_uncore_qpi_events, |
489 | SNBEP_UNCORE_PCI_COMMON_INIT(), | 523 | .format_group = &snbep_uncore_qpi_format_group, |
490 | }; | 524 | }; |
491 | 525 | ||
492 | 526 | ||
@@ -603,10 +637,8 @@ static void snbep_pci2phy_map_init(void) | |||
603 | } | 637 | } |
604 | /* end of Sandy Bridge-EP uncore support */ | 638 | /* end of Sandy Bridge-EP uncore support */ |
605 | 639 | ||
606 | |||
607 | /* Sandy Bridge uncore support */ | 640 | /* Sandy Bridge uncore support */ |
608 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, | 641 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
609 | struct perf_event *event) | ||
610 | { | 642 | { |
611 | struct hw_perf_event *hwc = &event->hw; | 643 | struct hw_perf_event *hwc = &event->hw; |
612 | 644 | ||
@@ -616,20 +648,11 @@ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, | |||
616 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | 648 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); |
617 | } | 649 | } |
618 | 650 | ||
619 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, | 651 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) |
620 | struct perf_event *event) | ||
621 | { | 652 | { |
622 | wrmsrl(event->hw.config_base, 0); | 653 | wrmsrl(event->hw.config_base, 0); |
623 | } | 654 | } |
624 | 655 | ||
625 | static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box, | ||
626 | struct perf_event *event) | ||
627 | { | ||
628 | u64 count; | ||
629 | rdmsrl(event->hw.event_base, count); | ||
630 | return count; | ||
631 | } | ||
632 | |||
633 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | 656 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) |
634 | { | 657 | { |
635 | if (box->pmu->pmu_idx == 0) { | 658 | if (box->pmu->pmu_idx == 0) { |
@@ -648,15 +671,15 @@ static struct attribute *snb_uncore_formats_attr[] = { | |||
648 | }; | 671 | }; |
649 | 672 | ||
650 | static struct attribute_group snb_uncore_format_group = { | 673 | static struct attribute_group snb_uncore_format_group = { |
651 | .name = "format", | 674 | .name = "format", |
652 | .attrs = snb_uncore_formats_attr, | 675 | .attrs = snb_uncore_formats_attr, |
653 | }; | 676 | }; |
654 | 677 | ||
655 | static struct intel_uncore_ops snb_uncore_msr_ops = { | 678 | static struct intel_uncore_ops snb_uncore_msr_ops = { |
656 | .init_box = snb_uncore_msr_init_box, | 679 | .init_box = snb_uncore_msr_init_box, |
657 | .disable_event = snb_uncore_msr_disable_event, | 680 | .disable_event = snb_uncore_msr_disable_event, |
658 | .enable_event = snb_uncore_msr_enable_event, | 681 | .enable_event = snb_uncore_msr_enable_event, |
659 | .read_counter = snb_uncore_msr_read_counter, | 682 | .read_counter = uncore_msr_read_counter, |
660 | }; | 683 | }; |
661 | 684 | ||
662 | static struct event_constraint snb_uncore_cbox_constraints[] = { | 685 | static struct event_constraint snb_uncore_cbox_constraints[] = { |
@@ -697,12 +720,10 @@ static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | |||
697 | 720 | ||
698 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | 721 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) |
699 | { | 722 | { |
700 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, | 723 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); |
701 | NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | ||
702 | } | 724 | } |
703 | 725 | ||
704 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, | 726 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) |
705 | struct perf_event *event) | ||
706 | { | 727 | { |
707 | struct hw_perf_event *hwc = &event->hw; | 728 | struct hw_perf_event *hwc = &event->hw; |
708 | 729 | ||
@@ -744,7 +765,7 @@ static struct intel_uncore_ops nhm_uncore_msr_ops = { | |||
744 | .enable_box = nhm_uncore_msr_enable_box, | 765 | .enable_box = nhm_uncore_msr_enable_box, |
745 | .disable_event = snb_uncore_msr_disable_event, | 766 | .disable_event = snb_uncore_msr_disable_event, |
746 | .enable_event = nhm_uncore_msr_enable_event, | 767 | .enable_event = nhm_uncore_msr_enable_event, |
747 | .read_counter = snb_uncore_msr_read_counter, | 768 | .read_counter = uncore_msr_read_counter, |
748 | }; | 769 | }; |
749 | 770 | ||
750 | static struct intel_uncore_type nhm_uncore = { | 771 | static struct intel_uncore_type nhm_uncore = { |
@@ -769,8 +790,1041 @@ static struct intel_uncore_type *nhm_msr_uncores[] = { | |||
769 | }; | 790 | }; |
770 | /* end of Nehalem uncore support */ | 791 | /* end of Nehalem uncore support */ |
771 | 792 | ||
772 | static void uncore_assign_hw_event(struct intel_uncore_box *box, | 793 | /* Nehalem-EX uncore support */ |
773 | struct perf_event *event, int idx) | 794 | #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ |
795 | ((1ULL << (n)) - 1))) | ||
796 | |||
797 | DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); | ||
798 | DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); | ||
799 | DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63"); | ||
800 | DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); | ||
801 | DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); | ||
802 | |||
803 | static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) | ||
804 | { | ||
805 | wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); | ||
806 | } | ||
807 | |||
808 | static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) | ||
809 | { | ||
810 | unsigned msr = uncore_msr_box_ctl(box); | ||
811 | u64 config; | ||
812 | |||
813 | if (msr) { | ||
814 | rdmsrl(msr, config); | ||
815 | config &= ~((1ULL << uncore_num_counters(box)) - 1); | ||
816 | /* WBox has a fixed counter */ | ||
817 | if (uncore_msr_fixed_ctl(box)) | ||
818 | config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
819 | wrmsrl(msr, config); | ||
820 | } | ||
821 | } | ||
822 | |||
823 | static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) | ||
824 | { | ||
825 | unsigned msr = uncore_msr_box_ctl(box); | ||
826 | u64 config; | ||
827 | |||
828 | if (msr) { | ||
829 | rdmsrl(msr, config); | ||
830 | config |= (1ULL << uncore_num_counters(box)) - 1; | ||
831 | /* WBox has a fixed counter */ | ||
832 | if (uncore_msr_fixed_ctl(box)) | ||
833 | config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; | ||
834 | wrmsrl(msr, config); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
839 | { | ||
840 | wrmsrl(event->hw.config_base, 0); | ||
841 | } | ||
842 | |||
843 | static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
844 | { | ||
845 | struct hw_perf_event *hwc = &event->hw; | ||
846 | |||
847 | if (hwc->idx >= UNCORE_PMC_IDX_FIXED) | ||
848 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); | ||
849 | else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) | ||
850 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
851 | else | ||
852 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
853 | } | ||
854 | |||
855 | #define NHMEX_UNCORE_OPS_COMMON_INIT() \ | ||
856 | .init_box = nhmex_uncore_msr_init_box, \ | ||
857 | .disable_box = nhmex_uncore_msr_disable_box, \ | ||
858 | .enable_box = nhmex_uncore_msr_enable_box, \ | ||
859 | .disable_event = nhmex_uncore_msr_disable_event, \ | ||
860 | .read_counter = uncore_msr_read_counter | ||
861 | |||
862 | static struct intel_uncore_ops nhmex_uncore_ops = { | ||
863 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
864 | .enable_event = nhmex_uncore_msr_enable_event, | ||
865 | }; | ||
866 | |||
867 | static struct attribute *nhmex_uncore_ubox_formats_attr[] = { | ||
868 | &format_attr_event.attr, | ||
869 | &format_attr_edge.attr, | ||
870 | NULL, | ||
871 | }; | ||
872 | |||
873 | static struct attribute_group nhmex_uncore_ubox_format_group = { | ||
874 | .name = "format", | ||
875 | .attrs = nhmex_uncore_ubox_formats_attr, | ||
876 | }; | ||
877 | |||
878 | static struct intel_uncore_type nhmex_uncore_ubox = { | ||
879 | .name = "ubox", | ||
880 | .num_counters = 1, | ||
881 | .num_boxes = 1, | ||
882 | .perf_ctr_bits = 48, | ||
883 | .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, | ||
884 | .perf_ctr = NHMEX_U_MSR_PMON_CTR, | ||
885 | .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, | ||
886 | .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, | ||
887 | .ops = &nhmex_uncore_ops, | ||
888 | .format_group = &nhmex_uncore_ubox_format_group | ||
889 | }; | ||
890 | |||
891 | static struct attribute *nhmex_uncore_cbox_formats_attr[] = { | ||
892 | &format_attr_event.attr, | ||
893 | &format_attr_umask.attr, | ||
894 | &format_attr_edge.attr, | ||
895 | &format_attr_inv.attr, | ||
896 | &format_attr_thresh8.attr, | ||
897 | NULL, | ||
898 | }; | ||
899 | |||
900 | static struct attribute_group nhmex_uncore_cbox_format_group = { | ||
901 | .name = "format", | ||
902 | .attrs = nhmex_uncore_cbox_formats_attr, | ||
903 | }; | ||
904 | |||
905 | static struct intel_uncore_type nhmex_uncore_cbox = { | ||
906 | .name = "cbox", | ||
907 | .num_counters = 6, | ||
908 | .num_boxes = 8, | ||
909 | .perf_ctr_bits = 48, | ||
910 | .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, | ||
911 | .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, | ||
912 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
913 | .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, | ||
914 | .msr_offset = NHMEX_C_MSR_OFFSET, | ||
915 | .pair_ctr_ctl = 1, | ||
916 | .ops = &nhmex_uncore_ops, | ||
917 | .format_group = &nhmex_uncore_cbox_format_group | ||
918 | }; | ||
919 | |||
920 | static struct uncore_event_desc nhmex_uncore_wbox_events[] = { | ||
921 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), | ||
922 | { /* end: all zeroes */ }, | ||
923 | }; | ||
924 | |||
925 | static struct intel_uncore_type nhmex_uncore_wbox = { | ||
926 | .name = "wbox", | ||
927 | .num_counters = 4, | ||
928 | .num_boxes = 1, | ||
929 | .perf_ctr_bits = 48, | ||
930 | .event_ctl = NHMEX_W_MSR_PMON_CNT0, | ||
931 | .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, | ||
932 | .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, | ||
933 | .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, | ||
934 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
935 | .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, | ||
936 | .pair_ctr_ctl = 1, | ||
937 | .event_descs = nhmex_uncore_wbox_events, | ||
938 | .ops = &nhmex_uncore_ops, | ||
939 | .format_group = &nhmex_uncore_cbox_format_group | ||
940 | }; | ||
941 | |||
942 | static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
943 | { | ||
944 | struct hw_perf_event *hwc = &event->hw; | ||
945 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
946 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
947 | int ctr, ev_sel; | ||
948 | |||
949 | ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> | ||
950 | NHMEX_B_PMON_CTR_SHIFT; | ||
951 | ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> | ||
952 | NHMEX_B_PMON_CTL_EV_SEL_SHIFT; | ||
953 | |||
954 | /* events that do not use the match/mask registers */ | ||
955 | if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || | ||
956 | (ctr == 2 && ev_sel != 0x4) || ctr == 3) | ||
957 | return 0; | ||
958 | |||
959 | if (box->pmu->pmu_idx == 0) | ||
960 | reg1->reg = NHMEX_B0_MSR_MATCH; | ||
961 | else | ||
962 | reg1->reg = NHMEX_B1_MSR_MATCH; | ||
963 | reg1->idx = 0; | ||
964 | reg1->config = event->attr.config1; | ||
965 | reg2->config = event->attr.config2; | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
970 | { | ||
971 | struct hw_perf_event *hwc = &event->hw; | ||
972 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
973 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
974 | |||
975 | if (reg1->idx != EXTRA_REG_NONE) { | ||
976 | wrmsrl(reg1->reg, reg1->config); | ||
977 | wrmsrl(reg1->reg + 1, reg2->config); | ||
978 | } | ||
979 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
980 | (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * The Bbox has 4 counters, but each counter monitors different events. | ||
985 | * Use bits 6-7 in the event config to select counter. | ||
986 | */ | ||
987 | static struct event_constraint nhmex_uncore_bbox_constraints[] = { | ||
988 | EVENT_CONSTRAINT(0 , 1, 0xc0), | ||
989 | EVENT_CONSTRAINT(0x40, 2, 0xc0), | ||
990 | EVENT_CONSTRAINT(0x80, 4, 0xc0), | ||
991 | EVENT_CONSTRAINT(0xc0, 8, 0xc0), | ||
992 | EVENT_CONSTRAINT_END, | ||
993 | }; | ||
994 | |||
995 | static struct attribute *nhmex_uncore_bbox_formats_attr[] = { | ||
996 | &format_attr_event5.attr, | ||
997 | &format_attr_counter.attr, | ||
998 | &format_attr_match.attr, | ||
999 | &format_attr_mask.attr, | ||
1000 | NULL, | ||
1001 | }; | ||
1002 | |||
1003 | static struct attribute_group nhmex_uncore_bbox_format_group = { | ||
1004 | .name = "format", | ||
1005 | .attrs = nhmex_uncore_bbox_formats_attr, | ||
1006 | }; | ||
1007 | |||
1008 | static struct intel_uncore_ops nhmex_uncore_bbox_ops = { | ||
1009 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1010 | .enable_event = nhmex_bbox_msr_enable_event, | ||
1011 | .hw_config = nhmex_bbox_hw_config, | ||
1012 | .get_constraint = uncore_get_constraint, | ||
1013 | .put_constraint = uncore_put_constraint, | ||
1014 | }; | ||
1015 | |||
1016 | static struct intel_uncore_type nhmex_uncore_bbox = { | ||
1017 | .name = "bbox", | ||
1018 | .num_counters = 4, | ||
1019 | .num_boxes = 2, | ||
1020 | .perf_ctr_bits = 48, | ||
1021 | .event_ctl = NHMEX_B0_MSR_PMON_CTL0, | ||
1022 | .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, | ||
1023 | .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, | ||
1024 | .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, | ||
1025 | .msr_offset = NHMEX_B_MSR_OFFSET, | ||
1026 | .pair_ctr_ctl = 1, | ||
1027 | .num_shared_regs = 1, | ||
1028 | .constraints = nhmex_uncore_bbox_constraints, | ||
1029 | .ops = &nhmex_uncore_bbox_ops, | ||
1030 | .format_group = &nhmex_uncore_bbox_format_group | ||
1031 | }; | ||
1032 | |||
1033 | static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1034 | { | ||
1035 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1036 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1037 | |||
1038 | if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { | ||
1039 | reg1->config = event->attr.config1; | ||
1040 | reg2->config = event->attr.config2; | ||
1041 | } else { | ||
1042 | reg1->config = ~0ULL; | ||
1043 | reg2->config = ~0ULL; | ||
1044 | } | ||
1045 | |||
1046 | if (box->pmu->pmu_idx == 0) | ||
1047 | reg1->reg = NHMEX_S0_MSR_MM_CFG; | ||
1048 | else | ||
1049 | reg1->reg = NHMEX_S1_MSR_MM_CFG; | ||
1050 | |||
1051 | reg1->idx = 0; | ||
1052 | |||
1053 | return 0; | ||
1054 | } | ||
1055 | |||
1056 | static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1057 | { | ||
1058 | struct hw_perf_event *hwc = &event->hw; | ||
1059 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1060 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1061 | |||
1062 | wrmsrl(reg1->reg, 0); | ||
1063 | if (reg1->config != ~0ULL || reg2->config != ~0ULL) { | ||
1064 | wrmsrl(reg1->reg + 1, reg1->config); | ||
1065 | wrmsrl(reg1->reg + 2, reg2->config); | ||
1066 | wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); | ||
1067 | } | ||
1068 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); | ||
1069 | } | ||
1070 | |||
1071 | static struct attribute *nhmex_uncore_sbox_formats_attr[] = { | ||
1072 | &format_attr_event.attr, | ||
1073 | &format_attr_umask.attr, | ||
1074 | &format_attr_edge.attr, | ||
1075 | &format_attr_inv.attr, | ||
1076 | &format_attr_thresh8.attr, | ||
1077 | &format_attr_mm_cfg.attr, | ||
1078 | &format_attr_match.attr, | ||
1079 | &format_attr_mask.attr, | ||
1080 | NULL, | ||
1081 | }; | ||
1082 | |||
1083 | static struct attribute_group nhmex_uncore_sbox_format_group = { | ||
1084 | .name = "format", | ||
1085 | .attrs = nhmex_uncore_sbox_formats_attr, | ||
1086 | }; | ||
1087 | |||
1088 | static struct intel_uncore_ops nhmex_uncore_sbox_ops = { | ||
1089 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1090 | .enable_event = nhmex_sbox_msr_enable_event, | ||
1091 | .hw_config = nhmex_sbox_hw_config, | ||
1092 | .get_constraint = uncore_get_constraint, | ||
1093 | .put_constraint = uncore_put_constraint, | ||
1094 | }; | ||
1095 | |||
1096 | static struct intel_uncore_type nhmex_uncore_sbox = { | ||
1097 | .name = "sbox", | ||
1098 | .num_counters = 4, | ||
1099 | .num_boxes = 2, | ||
1100 | .perf_ctr_bits = 48, | ||
1101 | .event_ctl = NHMEX_S0_MSR_PMON_CTL0, | ||
1102 | .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, | ||
1103 | .event_mask = NHMEX_PMON_RAW_EVENT_MASK, | ||
1104 | .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, | ||
1105 | .msr_offset = NHMEX_S_MSR_OFFSET, | ||
1106 | .pair_ctr_ctl = 1, | ||
1107 | .num_shared_regs = 1, | ||
1108 | .ops = &nhmex_uncore_sbox_ops, | ||
1109 | .format_group = &nhmex_uncore_sbox_format_group | ||
1110 | }; | ||
1111 | |||
1112 | enum { | ||
1113 | EXTRA_REG_NHMEX_M_FILTER, | ||
1114 | EXTRA_REG_NHMEX_M_DSP, | ||
1115 | EXTRA_REG_NHMEX_M_ISS, | ||
1116 | EXTRA_REG_NHMEX_M_MAP, | ||
1117 | EXTRA_REG_NHMEX_M_MSC_THR, | ||
1118 | EXTRA_REG_NHMEX_M_PGT, | ||
1119 | EXTRA_REG_NHMEX_M_PLD, | ||
1120 | EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, | ||
1121 | }; | ||
1122 | |||
1123 | static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { | ||
1124 | MBOX_INC_SEL_EXTAR_REG(0x0, DSP), | ||
1125 | MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), | ||
1126 | MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), | ||
1127 | MBOX_INC_SEL_EXTAR_REG(0x9, ISS), | ||
1128 | /* event 0xa uses two extra registers */ | ||
1129 | MBOX_INC_SEL_EXTAR_REG(0xa, ISS), | ||
1130 | MBOX_INC_SEL_EXTAR_REG(0xa, PLD), | ||
1131 | MBOX_INC_SEL_EXTAR_REG(0xb, PLD), | ||
1132 | /* events 0xd ~ 0x10 use the same extra register */ | ||
1133 | MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), | ||
1134 | MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), | ||
1135 | MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), | ||
1136 | MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), | ||
1137 | MBOX_INC_SEL_EXTAR_REG(0x16, PGT), | ||
1138 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), | ||
1139 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), | ||
1140 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), | ||
1141 | MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), | ||
1142 | EVENT_EXTRA_END | ||
1143 | }; | ||
1144 | |||
1145 | static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) | ||
1146 | { | ||
1147 | struct intel_uncore_extra_reg *er; | ||
1148 | unsigned long flags; | ||
1149 | bool ret = false; | ||
1150 | u64 mask; | ||
1151 | |||
1152 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
1153 | er = &box->shared_regs[idx]; | ||
1154 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1155 | if (!atomic_read(&er->ref) || er->config == config) { | ||
1156 | atomic_inc(&er->ref); | ||
1157 | er->config = config; | ||
1158 | ret = true; | ||
1159 | } | ||
1160 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1161 | |||
1162 | return ret; | ||
1163 | } | ||
1164 | /* | ||
1165 | * The ZDP_CTL_FVC MSR has 4 fields which are used to control | ||
1166 | * events 0xd ~ 0x10. Besides these 4 fields, there are additional | ||
1167 | * fields which are shared. | ||
1168 | */ | ||
1169 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1170 | if (WARN_ON_ONCE(idx >= 4)) | ||
1171 | return false; | ||
1172 | |||
1173 | /* mask of the shared fields */ | ||
1174 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; | ||
1175 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
1176 | |||
1177 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1178 | /* add mask of the non-shared field if it's in use */ | ||
1179 | if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) | ||
1180 | mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
1181 | |||
1182 | if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { | ||
1183 | atomic_add(1 << (idx * 8), &er->ref); | ||
1184 | mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | | ||
1185 | NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
1186 | er->config &= ~mask; | ||
1187 | er->config |= (config & mask); | ||
1188 | ret = true; | ||
1189 | } | ||
1190 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1191 | |||
1192 | return ret; | ||
1193 | } | ||
1194 | |||
1195 | static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) | ||
1196 | { | ||
1197 | struct intel_uncore_extra_reg *er; | ||
1198 | |||
1199 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
1200 | er = &box->shared_regs[idx]; | ||
1201 | atomic_dec(&er->ref); | ||
1202 | return; | ||
1203 | } | ||
1204 | |||
1205 | idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1206 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
1207 | atomic_sub(1 << (idx * 8), &er->ref); | ||
1208 | } | ||
1209 | |||
1210 | u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) | ||
1211 | { | ||
1212 | struct hw_perf_event *hwc = &event->hw; | ||
1213 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1214 | int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
1215 | u64 config = reg1->config; | ||
1216 | |||
1217 | /* get the non-shared control bits and shift them */ | ||
1218 | idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1219 | config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); | ||
1220 | if (new_idx > orig_idx) { | ||
1221 | idx = new_idx - orig_idx; | ||
1222 | config <<= 3 * idx; | ||
1223 | } else { | ||
1224 | idx = orig_idx - new_idx; | ||
1225 | config >>= 3 * idx; | ||
1226 | } | ||
1227 | |||
1228 | /* add the shared control bits back */ | ||
1229 | config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; | ||
1230 | if (modify) { | ||
1231 | /* adjust the main event selector */ | ||
1232 | if (new_idx > orig_idx) | ||
1233 | hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
1234 | else | ||
1235 | hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; | ||
1236 | reg1->config = config; | ||
1237 | reg1->idx = ~0xff | new_idx; | ||
1238 | } | ||
1239 | return config; | ||
1240 | } | ||
1241 | |||
1242 | static struct event_constraint * | ||
1243 | nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1244 | { | ||
1245 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1246 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1247 | int i, idx[2], alloc = 0; | ||
1248 | u64 config1 = reg1->config; | ||
1249 | |||
1250 | idx[0] = __BITS_VALUE(reg1->idx, 0, 8); | ||
1251 | idx[1] = __BITS_VALUE(reg1->idx, 1, 8); | ||
1252 | again: | ||
1253 | for (i = 0; i < 2; i++) { | ||
1254 | if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) | ||
1255 | idx[i] = 0xff; | ||
1256 | |||
1257 | if (idx[i] == 0xff) | ||
1258 | continue; | ||
1259 | |||
1260 | if (!nhmex_mbox_get_shared_reg(box, idx[i], | ||
1261 | __BITS_VALUE(config1, i, 32))) | ||
1262 | goto fail; | ||
1263 | alloc |= (0x1 << i); | ||
1264 | } | ||
1265 | |||
1266 | /* for the match/mask registers */ | ||
1267 | if ((uncore_box_is_fake(box) || !reg2->alloc) && | ||
1268 | !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) | ||
1269 | goto fail; | ||
1270 | |||
1271 | /* | ||
1272 | * If it's a fake box -- as per validate_{group,event}() we | ||
1273 | * shouldn't touch event state and we can avoid doing so | ||
1274 | * since both will only call get_event_constraints() once | ||
1275 | * on each event, this avoids the need for reg->alloc. | ||
1276 | */ | ||
1277 | if (!uncore_box_is_fake(box)) { | ||
1278 | if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) | ||
1279 | nhmex_mbox_alter_er(event, idx[0], true); | ||
1280 | reg1->alloc |= alloc; | ||
1281 | reg2->alloc = 1; | ||
1282 | } | ||
1283 | return NULL; | ||
1284 | fail: | ||
1285 | if (idx[0] != 0xff && !(alloc & 0x1) && | ||
1286 | idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { | ||
1287 | /* | ||
1288 | * events 0xd ~ 0x10 are functional identical, but are | ||
1289 | * controlled by different fields in the ZDP_CTL_FVC | ||
1290 | * register. If we failed to take one field, try the | ||
1291 | * rest 3 choices. | ||
1292 | */ | ||
1293 | BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); | ||
1294 | idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1295 | idx[0] = (idx[0] + 1) % 4; | ||
1296 | idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; | ||
1297 | if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { | ||
1298 | config1 = nhmex_mbox_alter_er(event, idx[0], false); | ||
1299 | goto again; | ||
1300 | } | ||
1301 | } | ||
1302 | |||
1303 | if (alloc & 0x1) | ||
1304 | nhmex_mbox_put_shared_reg(box, idx[0]); | ||
1305 | if (alloc & 0x2) | ||
1306 | nhmex_mbox_put_shared_reg(box, idx[1]); | ||
1307 | return &constraint_empty; | ||
1308 | } | ||
1309 | |||
1310 | static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1311 | { | ||
1312 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1313 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1314 | |||
1315 | if (uncore_box_is_fake(box)) | ||
1316 | return; | ||
1317 | |||
1318 | if (reg1->alloc & 0x1) | ||
1319 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); | ||
1320 | if (reg1->alloc & 0x2) | ||
1321 | nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); | ||
1322 | reg1->alloc = 0; | ||
1323 | |||
1324 | if (reg2->alloc) { | ||
1325 | nhmex_mbox_put_shared_reg(box, reg2->idx); | ||
1326 | reg2->alloc = 0; | ||
1327 | } | ||
1328 | } | ||
1329 | |||
1330 | static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) | ||
1331 | { | ||
1332 | if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
1333 | return er->idx; | ||
1334 | return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; | ||
1335 | } | ||
1336 | |||
1337 | static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1338 | { | ||
1339 | struct intel_uncore_type *type = box->pmu->type; | ||
1340 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1341 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1342 | struct extra_reg *er; | ||
1343 | unsigned msr; | ||
1344 | int reg_idx = 0; | ||
1345 | |||
1346 | if (WARN_ON_ONCE(reg1->idx != -1)) | ||
1347 | return -EINVAL; | ||
1348 | /* | ||
1349 | * The mbox events may require 2 extra MSRs at the most. But only | ||
1350 | * the lower 32 bits in these MSRs are significant, so we can use | ||
1351 | * config1 to pass two MSRs' config. | ||
1352 | */ | ||
1353 | for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { | ||
1354 | if (er->event != (event->hw.config & er->config_mask)) | ||
1355 | continue; | ||
1356 | if (event->attr.config1 & ~er->valid_mask) | ||
1357 | return -EINVAL; | ||
1358 | if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) || | ||
1359 | er->idx == __BITS_VALUE(reg1->idx, 1, 8)) | ||
1360 | continue; | ||
1361 | if (WARN_ON_ONCE(reg_idx >= 2)) | ||
1362 | return -EINVAL; | ||
1363 | |||
1364 | msr = er->msr + type->msr_offset * box->pmu->pmu_idx; | ||
1365 | if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) | ||
1366 | return -EINVAL; | ||
1367 | |||
1368 | /* always use the 32~63 bits to pass the PLD config */ | ||
1369 | if (er->idx == EXTRA_REG_NHMEX_M_PLD) | ||
1370 | reg_idx = 1; | ||
1371 | |||
1372 | reg1->idx &= ~(0xff << (reg_idx * 8)); | ||
1373 | reg1->reg &= ~(0xffff << (reg_idx * 16)); | ||
1374 | reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); | ||
1375 | reg1->reg |= msr << (reg_idx * 16); | ||
1376 | reg1->config = event->attr.config1; | ||
1377 | reg_idx++; | ||
1378 | } | ||
1379 | /* use config2 to pass the filter config */ | ||
1380 | reg2->idx = EXTRA_REG_NHMEX_M_FILTER; | ||
1381 | if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) | ||
1382 | reg2->config = event->attr.config2; | ||
1383 | else | ||
1384 | reg2->config = ~0ULL; | ||
1385 | if (box->pmu->pmu_idx == 0) | ||
1386 | reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; | ||
1387 | else | ||
1388 | reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; | ||
1389 | |||
1390 | return 0; | ||
1391 | } | ||
1392 | |||
1393 | static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
1394 | { | ||
1395 | struct intel_uncore_extra_reg *er; | ||
1396 | unsigned long flags; | ||
1397 | u64 config; | ||
1398 | |||
1399 | if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) | ||
1400 | return box->shared_regs[idx].config; | ||
1401 | |||
1402 | er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; | ||
1403 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1404 | config = er->config; | ||
1405 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1406 | return config; | ||
1407 | } | ||
1408 | |||
1409 | static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1410 | { | ||
1411 | struct hw_perf_event *hwc = &event->hw; | ||
1412 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1413 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1414 | int idx; | ||
1415 | |||
1416 | idx = __BITS_VALUE(reg1->idx, 0, 8); | ||
1417 | if (idx != 0xff) | ||
1418 | wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), | ||
1419 | nhmex_mbox_shared_reg_config(box, idx)); | ||
1420 | idx = __BITS_VALUE(reg1->idx, 1, 8); | ||
1421 | if (idx != 0xff) | ||
1422 | wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), | ||
1423 | nhmex_mbox_shared_reg_config(box, idx)); | ||
1424 | |||
1425 | wrmsrl(reg2->reg, 0); | ||
1426 | if (reg2->config != ~0ULL) { | ||
1427 | wrmsrl(reg2->reg + 1, | ||
1428 | reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); | ||
1429 | wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & | ||
1430 | (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); | ||
1431 | wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); | ||
1432 | } | ||
1433 | |||
1434 | wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); | ||
1435 | } | ||
1436 | |||
1437 | DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); | ||
1438 | DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); | ||
1439 | DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); | ||
1440 | DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); | ||
1441 | DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); | ||
1442 | DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); | ||
1443 | DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63"); | ||
1444 | DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); | ||
1445 | DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); | ||
1446 | DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); | ||
1447 | DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); | ||
1448 | DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); | ||
1449 | DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); | ||
1450 | DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); | ||
1451 | DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); | ||
1452 | DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); | ||
1453 | |||
1454 | static struct attribute *nhmex_uncore_mbox_formats_attr[] = { | ||
1455 | &format_attr_count_mode.attr, | ||
1456 | &format_attr_storage_mode.attr, | ||
1457 | &format_attr_wrap_mode.attr, | ||
1458 | &format_attr_flag_mode.attr, | ||
1459 | &format_attr_inc_sel.attr, | ||
1460 | &format_attr_set_flag_sel.attr, | ||
1461 | &format_attr_filter_cfg.attr, | ||
1462 | &format_attr_filter_match.attr, | ||
1463 | &format_attr_filter_mask.attr, | ||
1464 | &format_attr_dsp.attr, | ||
1465 | &format_attr_thr.attr, | ||
1466 | &format_attr_fvc.attr, | ||
1467 | &format_attr_pgt.attr, | ||
1468 | &format_attr_map.attr, | ||
1469 | &format_attr_iss.attr, | ||
1470 | &format_attr_pld.attr, | ||
1471 | NULL, | ||
1472 | }; | ||
1473 | |||
1474 | static struct attribute_group nhmex_uncore_mbox_format_group = { | ||
1475 | .name = "format", | ||
1476 | .attrs = nhmex_uncore_mbox_formats_attr, | ||
1477 | }; | ||
1478 | |||
1479 | static struct uncore_event_desc nhmex_uncore_mbox_events[] = { | ||
1480 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), | ||
1481 | INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), | ||
1482 | { /* end: all zeroes */ }, | ||
1483 | }; | ||
1484 | |||
1485 | static struct intel_uncore_ops nhmex_uncore_mbox_ops = { | ||
1486 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1487 | .enable_event = nhmex_mbox_msr_enable_event, | ||
1488 | .hw_config = nhmex_mbox_hw_config, | ||
1489 | .get_constraint = nhmex_mbox_get_constraint, | ||
1490 | .put_constraint = nhmex_mbox_put_constraint, | ||
1491 | }; | ||
1492 | |||
1493 | static struct intel_uncore_type nhmex_uncore_mbox = { | ||
1494 | .name = "mbox", | ||
1495 | .num_counters = 6, | ||
1496 | .num_boxes = 2, | ||
1497 | .perf_ctr_bits = 48, | ||
1498 | .event_ctl = NHMEX_M0_MSR_PMU_CTL0, | ||
1499 | .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, | ||
1500 | .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, | ||
1501 | .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, | ||
1502 | .msr_offset = NHMEX_M_MSR_OFFSET, | ||
1503 | .pair_ctr_ctl = 1, | ||
1504 | .num_shared_regs = 8, | ||
1505 | .event_descs = nhmex_uncore_mbox_events, | ||
1506 | .ops = &nhmex_uncore_mbox_ops, | ||
1507 | .format_group = &nhmex_uncore_mbox_format_group, | ||
1508 | }; | ||
1509 | |||
1510 | void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) | ||
1511 | { | ||
1512 | struct hw_perf_event *hwc = &event->hw; | ||
1513 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1514 | int port; | ||
1515 | |||
1516 | /* adjust the main event selector */ | ||
1517 | if (reg1->idx % 2) { | ||
1518 | reg1->idx--; | ||
1519 | hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1520 | } else { | ||
1521 | reg1->idx++; | ||
1522 | hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1523 | } | ||
1524 | |||
1525 | /* adjust address or config of extra register */ | ||
1526 | port = reg1->idx / 6 + box->pmu->pmu_idx * 4; | ||
1527 | switch (reg1->idx % 6) { | ||
1528 | case 0: | ||
1529 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); | ||
1530 | break; | ||
1531 | case 1: | ||
1532 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); | ||
1533 | break; | ||
1534 | case 2: | ||
1535 | /* the 8~15 bits to the 0~7 bits */ | ||
1536 | reg1->config >>= 8; | ||
1537 | break; | ||
1538 | case 3: | ||
1539 | /* the 0~7 bits to the 8~15 bits */ | ||
1540 | reg1->config <<= 8; | ||
1541 | break; | ||
1542 | case 4: | ||
1543 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); | ||
1544 | break; | ||
1545 | case 5: | ||
1546 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); | ||
1547 | break; | ||
1548 | }; | ||
1549 | } | ||
1550 | |||
1551 | /* | ||
1552 | * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. | ||
1553 | * An event set consists of 6 events, the 3rd and 4th events in | ||
1554 | * an event set use the same extra register. So an event set uses | ||
1555 | * 5 extra registers. | ||
1556 | */ | ||
1557 | static struct event_constraint * | ||
1558 | nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1559 | { | ||
1560 | struct hw_perf_event *hwc = &event->hw; | ||
1561 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1562 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1563 | struct intel_uncore_extra_reg *er; | ||
1564 | unsigned long flags; | ||
1565 | int idx, er_idx; | ||
1566 | u64 config1; | ||
1567 | bool ok = false; | ||
1568 | |||
1569 | if (!uncore_box_is_fake(box) && reg1->alloc) | ||
1570 | return NULL; | ||
1571 | |||
1572 | idx = reg1->idx % 6; | ||
1573 | config1 = reg1->config; | ||
1574 | again: | ||
1575 | er_idx = idx; | ||
1576 | /* the 3rd and 4th events use the same extra register */ | ||
1577 | if (er_idx > 2) | ||
1578 | er_idx--; | ||
1579 | er_idx += (reg1->idx / 6) * 5; | ||
1580 | |||
1581 | er = &box->shared_regs[er_idx]; | ||
1582 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1583 | if (idx < 2) { | ||
1584 | if (!atomic_read(&er->ref) || er->config == reg1->config) { | ||
1585 | atomic_inc(&er->ref); | ||
1586 | er->config = reg1->config; | ||
1587 | ok = true; | ||
1588 | } | ||
1589 | } else if (idx == 2 || idx == 3) { | ||
1590 | /* | ||
1591 | * these two events use different fields in a extra register, | ||
1592 | * the 0~7 bits and the 8~15 bits respectively. | ||
1593 | */ | ||
1594 | u64 mask = 0xff << ((idx - 2) * 8); | ||
1595 | if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || | ||
1596 | !((er->config ^ config1) & mask)) { | ||
1597 | atomic_add(1 << ((idx - 2) * 8), &er->ref); | ||
1598 | er->config &= ~mask; | ||
1599 | er->config |= config1 & mask; | ||
1600 | ok = true; | ||
1601 | } | ||
1602 | } else { | ||
1603 | if (!atomic_read(&er->ref) || | ||
1604 | (er->config == (hwc->config >> 32) && | ||
1605 | er->config1 == reg1->config && | ||
1606 | er->config2 == reg2->config)) { | ||
1607 | atomic_inc(&er->ref); | ||
1608 | er->config = (hwc->config >> 32); | ||
1609 | er->config1 = reg1->config; | ||
1610 | er->config2 = reg2->config; | ||
1611 | ok = true; | ||
1612 | } | ||
1613 | } | ||
1614 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1615 | |||
1616 | if (!ok) { | ||
1617 | /* | ||
1618 | * The Rbox events are always in pairs. The paired | ||
1619 | * events are functional identical, but use different | ||
1620 | * extra registers. If we failed to take an extra | ||
1621 | * register, try the alternative. | ||
1622 | */ | ||
1623 | if (idx % 2) | ||
1624 | idx--; | ||
1625 | else | ||
1626 | idx++; | ||
1627 | if (idx != reg1->idx % 6) { | ||
1628 | if (idx == 2) | ||
1629 | config1 >>= 8; | ||
1630 | else if (idx == 3) | ||
1631 | config1 <<= 8; | ||
1632 | goto again; | ||
1633 | } | ||
1634 | } else { | ||
1635 | if (!uncore_box_is_fake(box)) { | ||
1636 | if (idx != reg1->idx % 6) | ||
1637 | nhmex_rbox_alter_er(box, event); | ||
1638 | reg1->alloc = 1; | ||
1639 | } | ||
1640 | return NULL; | ||
1641 | } | ||
1642 | return &constraint_empty; | ||
1643 | } | ||
1644 | |||
1645 | static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) | ||
1646 | { | ||
1647 | struct intel_uncore_extra_reg *er; | ||
1648 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1649 | int idx, er_idx; | ||
1650 | |||
1651 | if (uncore_box_is_fake(box) || !reg1->alloc) | ||
1652 | return; | ||
1653 | |||
1654 | idx = reg1->idx % 6; | ||
1655 | er_idx = idx; | ||
1656 | if (er_idx > 2) | ||
1657 | er_idx--; | ||
1658 | er_idx += (reg1->idx / 6) * 5; | ||
1659 | |||
1660 | er = &box->shared_regs[er_idx]; | ||
1661 | if (idx == 2 || idx == 3) | ||
1662 | atomic_sub(1 << ((idx - 2) * 8), &er->ref); | ||
1663 | else | ||
1664 | atomic_dec(&er->ref); | ||
1665 | |||
1666 | reg1->alloc = 0; | ||
1667 | } | ||
1668 | |||
1669 | static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
1670 | { | ||
1671 | struct hw_perf_event *hwc = &event->hw; | ||
1672 | struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; | ||
1673 | struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; | ||
1674 | int port, idx; | ||
1675 | |||
1676 | idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> | ||
1677 | NHMEX_R_PMON_CTL_EV_SEL_SHIFT; | ||
1678 | if (idx >= 0x18) | ||
1679 | return -EINVAL; | ||
1680 | |||
1681 | reg1->idx = idx; | ||
1682 | reg1->config = event->attr.config1; | ||
1683 | |||
1684 | port = idx / 6 + box->pmu->pmu_idx * 4; | ||
1685 | idx %= 6; | ||
1686 | switch (idx) { | ||
1687 | case 0: | ||
1688 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); | ||
1689 | break; | ||
1690 | case 1: | ||
1691 | reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); | ||
1692 | break; | ||
1693 | case 2: | ||
1694 | case 3: | ||
1695 | reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port); | ||
1696 | break; | ||
1697 | case 4: | ||
1698 | case 5: | ||
1699 | if (idx == 4) | ||
1700 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); | ||
1701 | else | ||
1702 | reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); | ||
1703 | reg2->config = event->attr.config2; | ||
1704 | hwc->config |= event->attr.config & (~0ULL << 32); | ||
1705 | break; | ||
1706 | }; | ||
1707 | return 0; | ||
1708 | } | ||
1709 | |||
1710 | static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx) | ||
1711 | { | ||
1712 | struct intel_uncore_extra_reg *er; | ||
1713 | unsigned long flags; | ||
1714 | u64 config; | ||
1715 | |||
1716 | er = &box->shared_regs[idx]; | ||
1717 | |||
1718 | raw_spin_lock_irqsave(&er->lock, flags); | ||
1719 | config = er->config; | ||
1720 | raw_spin_unlock_irqrestore(&er->lock, flags); | ||
1721 | |||
1722 | return config; | ||
1723 | } | ||
1724 | |||
1725 | static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
1726 | { | ||
1727 | struct hw_perf_event *hwc = &event->hw; | ||
1728 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
1729 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
1730 | int idx, er_idx; | ||
1731 | |||
1732 | idx = reg1->idx % 6; | ||
1733 | er_idx = idx; | ||
1734 | if (er_idx > 2) | ||
1735 | er_idx--; | ||
1736 | er_idx += (reg1->idx / 6) * 5; | ||
1737 | |||
1738 | switch (idx) { | ||
1739 | case 0: | ||
1740 | case 1: | ||
1741 | wrmsrl(reg1->reg, reg1->config); | ||
1742 | break; | ||
1743 | case 2: | ||
1744 | case 3: | ||
1745 | wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); | ||
1746 | break; | ||
1747 | case 4: | ||
1748 | case 5: | ||
1749 | wrmsrl(reg1->reg, reg1->config); | ||
1750 | wrmsrl(reg1->reg + 1, hwc->config >> 32); | ||
1751 | wrmsrl(reg1->reg + 2, reg2->config); | ||
1752 | break; | ||
1753 | }; | ||
1754 | |||
1755 | wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | | ||
1756 | (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); | ||
1757 | } | ||
1758 | |||
1759 | DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); | ||
1760 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); | ||
1761 | DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); | ||
1762 | DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); | ||
1763 | DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); | ||
1764 | |||
1765 | static struct attribute *nhmex_uncore_rbox_formats_attr[] = { | ||
1766 | &format_attr_event5.attr, | ||
1767 | &format_attr_xbr_mm_cfg.attr, | ||
1768 | &format_attr_xbr_match.attr, | ||
1769 | &format_attr_xbr_mask.attr, | ||
1770 | &format_attr_qlx_cfg.attr, | ||
1771 | &format_attr_iperf_cfg.attr, | ||
1772 | NULL, | ||
1773 | }; | ||
1774 | |||
1775 | static struct attribute_group nhmex_uncore_rbox_format_group = { | ||
1776 | .name = "format", | ||
1777 | .attrs = nhmex_uncore_rbox_formats_attr, | ||
1778 | }; | ||
1779 | |||
1780 | static struct uncore_event_desc nhmex_uncore_rbox_events[] = { | ||
1781 | INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), | ||
1782 | INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), | ||
1783 | INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), | ||
1784 | INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), | ||
1785 | INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), | ||
1786 | INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), | ||
1787 | { /* end: all zeroes */ }, | ||
1788 | }; | ||
1789 | |||
1790 | static struct intel_uncore_ops nhmex_uncore_rbox_ops = { | ||
1791 | NHMEX_UNCORE_OPS_COMMON_INIT(), | ||
1792 | .enable_event = nhmex_rbox_msr_enable_event, | ||
1793 | .hw_config = nhmex_rbox_hw_config, | ||
1794 | .get_constraint = nhmex_rbox_get_constraint, | ||
1795 | .put_constraint = nhmex_rbox_put_constraint, | ||
1796 | }; | ||
1797 | |||
1798 | static struct intel_uncore_type nhmex_uncore_rbox = { | ||
1799 | .name = "rbox", | ||
1800 | .num_counters = 8, | ||
1801 | .num_boxes = 2, | ||
1802 | .perf_ctr_bits = 48, | ||
1803 | .event_ctl = NHMEX_R_MSR_PMON_CTL0, | ||
1804 | .perf_ctr = NHMEX_R_MSR_PMON_CNT0, | ||
1805 | .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, | ||
1806 | .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, | ||
1807 | .msr_offset = NHMEX_R_MSR_OFFSET, | ||
1808 | .pair_ctr_ctl = 1, | ||
1809 | .num_shared_regs = 20, | ||
1810 | .event_descs = nhmex_uncore_rbox_events, | ||
1811 | .ops = &nhmex_uncore_rbox_ops, | ||
1812 | .format_group = &nhmex_uncore_rbox_format_group | ||
1813 | }; | ||
1814 | |||
1815 | static struct intel_uncore_type *nhmex_msr_uncores[] = { | ||
1816 | &nhmex_uncore_ubox, | ||
1817 | &nhmex_uncore_cbox, | ||
1818 | &nhmex_uncore_bbox, | ||
1819 | &nhmex_uncore_sbox, | ||
1820 | &nhmex_uncore_mbox, | ||
1821 | &nhmex_uncore_rbox, | ||
1822 | &nhmex_uncore_wbox, | ||
1823 | NULL, | ||
1824 | }; | ||
1825 | /* end of Nehalem-EX uncore support */ | ||
1826 | |||
1827 | static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) | ||
774 | { | 1828 | { |
775 | struct hw_perf_event *hwc = &event->hw; | 1829 | struct hw_perf_event *hwc = &event->hw; |
776 | 1830 | ||
@@ -787,8 +1841,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, | |||
787 | hwc->event_base = uncore_perf_ctr(box, hwc->idx); | 1841 | hwc->event_base = uncore_perf_ctr(box, hwc->idx); |
788 | } | 1842 | } |
789 | 1843 | ||
790 | static void uncore_perf_event_update(struct intel_uncore_box *box, | 1844 | static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) |
791 | struct perf_event *event) | ||
792 | { | 1845 | { |
793 | u64 prev_count, new_count, delta; | 1846 | u64 prev_count, new_count, delta; |
794 | int shift; | 1847 | int shift; |
@@ -858,14 +1911,12 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) | |||
858 | box->hrtimer.function = uncore_pmu_hrtimer; | 1911 | box->hrtimer.function = uncore_pmu_hrtimer; |
859 | } | 1912 | } |
860 | 1913 | ||
861 | struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, | 1914 | struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) |
862 | int cpu) | ||
863 | { | 1915 | { |
864 | struct intel_uncore_box *box; | 1916 | struct intel_uncore_box *box; |
865 | int i, size; | 1917 | int i, size; |
866 | 1918 | ||
867 | size = sizeof(*box) + type->num_shared_regs * | 1919 | size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); |
868 | sizeof(struct intel_uncore_extra_reg); | ||
869 | 1920 | ||
870 | box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); | 1921 | box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); |
871 | if (!box) | 1922 | if (!box) |
@@ -915,12 +1966,11 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) | |||
915 | * perf core schedules event on the basis of cpu, uncore events are | 1966 | * perf core schedules event on the basis of cpu, uncore events are |
916 | * collected by one of the cpus inside a physical package. | 1967 | * collected by one of the cpus inside a physical package. |
917 | */ | 1968 | */ |
918 | return uncore_pmu_to_box(uncore_event_to_pmu(event), | 1969 | return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); |
919 | smp_processor_id()); | ||
920 | } | 1970 | } |
921 | 1971 | ||
922 | static int uncore_collect_events(struct intel_uncore_box *box, | 1972 | static int |
923 | struct perf_event *leader, bool dogrp) | 1973 | uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) |
924 | { | 1974 | { |
925 | struct perf_event *event; | 1975 | struct perf_event *event; |
926 | int n, max_count; | 1976 | int n, max_count; |
@@ -952,8 +2002,7 @@ static int uncore_collect_events(struct intel_uncore_box *box, | |||
952 | } | 2002 | } |
953 | 2003 | ||
954 | static struct event_constraint * | 2004 | static struct event_constraint * |
955 | uncore_get_event_constraint(struct intel_uncore_box *box, | 2005 | uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) |
956 | struct perf_event *event) | ||
957 | { | 2006 | { |
958 | struct intel_uncore_type *type = box->pmu->type; | 2007 | struct intel_uncore_type *type = box->pmu->type; |
959 | struct event_constraint *c; | 2008 | struct event_constraint *c; |
@@ -977,15 +2026,13 @@ uncore_get_event_constraint(struct intel_uncore_box *box, | |||
977 | return &type->unconstrainted; | 2026 | return &type->unconstrainted; |
978 | } | 2027 | } |
979 | 2028 | ||
980 | static void uncore_put_event_constraint(struct intel_uncore_box *box, | 2029 | static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event) |
981 | struct perf_event *event) | ||
982 | { | 2030 | { |
983 | if (box->pmu->type->ops->put_constraint) | 2031 | if (box->pmu->type->ops->put_constraint) |
984 | box->pmu->type->ops->put_constraint(box, event); | 2032 | box->pmu->type->ops->put_constraint(box, event); |
985 | } | 2033 | } |
986 | 2034 | ||
987 | static int uncore_assign_events(struct intel_uncore_box *box, | 2035 | static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) |
988 | int assign[], int n) | ||
989 | { | 2036 | { |
990 | unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; | 2037 | unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; |
991 | struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX]; | 2038 | struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX]; |
@@ -1407,8 +2454,7 @@ static bool pcidrv_registered; | |||
1407 | /* | 2454 | /* |
1408 | * add a pci uncore device | 2455 | * add a pci uncore device |
1409 | */ | 2456 | */ |
1410 | static int __devinit uncore_pci_add(struct intel_uncore_type *type, | 2457 | static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) |
1411 | struct pci_dev *pdev) | ||
1412 | { | 2458 | { |
1413 | struct intel_uncore_pmu *pmu; | 2459 | struct intel_uncore_pmu *pmu; |
1414 | struct intel_uncore_box *box; | 2460 | struct intel_uncore_box *box; |
@@ -1485,6 +2531,7 @@ static int __devinit uncore_pci_probe(struct pci_dev *pdev, | |||
1485 | struct intel_uncore_type *type; | 2531 | struct intel_uncore_type *type; |
1486 | 2532 | ||
1487 | type = (struct intel_uncore_type *)id->driver_data; | 2533 | type = (struct intel_uncore_type *)id->driver_data; |
2534 | |||
1488 | return uncore_pci_add(type, pdev); | 2535 | return uncore_pci_add(type, pdev); |
1489 | } | 2536 | } |
1490 | 2537 | ||
@@ -1612,8 +2659,8 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) | |||
1612 | return 0; | 2659 | return 0; |
1613 | } | 2660 | } |
1614 | 2661 | ||
1615 | static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores, | 2662 | static void __cpuinit |
1616 | int old_cpu, int new_cpu) | 2663 | uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) |
1617 | { | 2664 | { |
1618 | struct intel_uncore_type *type; | 2665 | struct intel_uncore_type *type; |
1619 | struct intel_uncore_pmu *pmu; | 2666 | struct intel_uncore_pmu *pmu; |
@@ -1694,8 +2741,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu) | |||
1694 | uncore_change_context(pci_uncores, -1, cpu); | 2741 | uncore_change_context(pci_uncores, -1, cpu); |
1695 | } | 2742 | } |
1696 | 2743 | ||
1697 | static int __cpuinit uncore_cpu_notifier(struct notifier_block *self, | 2744 | static int |
1698 | unsigned long action, void *hcpu) | 2745 | __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1699 | { | 2746 | { |
1700 | unsigned int cpu = (long)hcpu; | 2747 | unsigned int cpu = (long)hcpu; |
1701 | 2748 | ||
@@ -1732,12 +2779,12 @@ static int __cpuinit uncore_cpu_notifier(struct notifier_block *self, | |||
1732 | } | 2779 | } |
1733 | 2780 | ||
1734 | static struct notifier_block uncore_cpu_nb __cpuinitdata = { | 2781 | static struct notifier_block uncore_cpu_nb __cpuinitdata = { |
1735 | .notifier_call = uncore_cpu_notifier, | 2782 | .notifier_call = uncore_cpu_notifier, |
1736 | /* | 2783 | /* |
1737 | * to migrate uncore events, our notifier should be executed | 2784 | * to migrate uncore events, our notifier should be executed |
1738 | * before perf core's notifier. | 2785 | * before perf core's notifier. |
1739 | */ | 2786 | */ |
1740 | .priority = CPU_PRI_PERF + 1, | 2787 | .priority = CPU_PRI_PERF + 1, |
1741 | }; | 2788 | }; |
1742 | 2789 | ||
1743 | static void __init uncore_cpu_setup(void *dummy) | 2790 | static void __init uncore_cpu_setup(void *dummy) |
@@ -1767,6 +2814,9 @@ static int __init uncore_cpu_init(void) | |||
1767 | snbep_uncore_cbox.num_boxes = max_cores; | 2814 | snbep_uncore_cbox.num_boxes = max_cores; |
1768 | msr_uncores = snbep_msr_uncores; | 2815 | msr_uncores = snbep_msr_uncores; |
1769 | break; | 2816 | break; |
2817 | case 46: | ||
2818 | msr_uncores = nhmex_msr_uncores; | ||
2819 | break; | ||
1770 | default: | 2820 | default: |
1771 | return 0; | 2821 | return 0; |
1772 | } | 2822 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index b13e9ea81def..f3851892e077 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #include "perf_event.h" | 5 | #include "perf_event.h" |
6 | 6 | ||
7 | #define UNCORE_PMU_NAME_LEN 32 | 7 | #define UNCORE_PMU_NAME_LEN 32 |
8 | #define UNCORE_BOX_HASH_SIZE 8 | ||
9 | |||
10 | #define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) | 8 | #define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) |
11 | 9 | ||
12 | #define UNCORE_FIXED_EVENT 0xff | 10 | #define UNCORE_FIXED_EVENT 0xff |
@@ -115,6 +113,10 @@ | |||
115 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ | 113 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ |
116 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) | 114 | SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) |
117 | 115 | ||
116 | #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ | ||
117 | (SNBEP_PMON_RAW_EVENT_MASK | \ | ||
118 | SNBEP_PMON_CTL_EV_SEL_EXT) | ||
119 | |||
118 | /* SNB-EP pci control register */ | 120 | /* SNB-EP pci control register */ |
119 | #define SNBEP_PCI_PMON_BOX_CTL 0xf4 | 121 | #define SNBEP_PCI_PMON_BOX_CTL 0xf4 |
120 | #define SNBEP_PCI_PMON_CTL0 0xd8 | 122 | #define SNBEP_PCI_PMON_CTL0 0xd8 |
@@ -158,6 +160,193 @@ | |||
158 | #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc | 160 | #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc |
159 | #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd | 161 | #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd |
160 | 162 | ||
163 | /* NHM-EX event control */ | ||
164 | #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff | ||
165 | #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 | ||
166 | #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) | ||
167 | #define NHMEX_PMON_CTL_EDGE_DET (1 << 18) | ||
168 | #define NHMEX_PMON_CTL_PMI_EN (1 << 20) | ||
169 | #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) | ||
170 | #define NHMEX_PMON_CTL_INVERT (1 << 23) | ||
171 | #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 | ||
172 | #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
173 | NHMEX_PMON_CTL_UMASK_MASK | \ | ||
174 | NHMEX_PMON_CTL_EDGE_DET | \ | ||
175 | NHMEX_PMON_CTL_INVERT | \ | ||
176 | NHMEX_PMON_CTL_TRESH_MASK) | ||
177 | |||
178 | /* NHM-EX Ubox */ | ||
179 | #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 | ||
180 | #define NHMEX_U_MSR_PMON_CTR 0xc11 | ||
181 | #define NHMEX_U_MSR_PMON_EV_SEL 0xc10 | ||
182 | |||
183 | #define NHMEX_U_PMON_GLOBAL_EN (1 << 0) | ||
184 | #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e | ||
185 | #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) | ||
186 | #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) | ||
187 | #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) | ||
188 | |||
189 | #define NHMEX_U_PMON_RAW_EVENT_MASK \ | ||
190 | (NHMEX_PMON_CTL_EV_SEL_MASK | \ | ||
191 | NHMEX_PMON_CTL_EDGE_DET) | ||
192 | |||
193 | /* NHM-EX Cbox */ | ||
194 | #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 | ||
195 | #define NHMEX_C0_MSR_PMON_CTR0 0xd11 | ||
196 | #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 | ||
197 | #define NHMEX_C_MSR_OFFSET 0x20 | ||
198 | |||
199 | /* NHM-EX Bbox */ | ||
200 | #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 | ||
201 | #define NHMEX_B0_MSR_PMON_CTR0 0xc31 | ||
202 | #define NHMEX_B0_MSR_PMON_CTL0 0xc30 | ||
203 | #define NHMEX_B_MSR_OFFSET 0x40 | ||
204 | #define NHMEX_B0_MSR_MATCH 0xe45 | ||
205 | #define NHMEX_B0_MSR_MASK 0xe46 | ||
206 | #define NHMEX_B1_MSR_MATCH 0xe4d | ||
207 | #define NHMEX_B1_MSR_MASK 0xe4e | ||
208 | |||
209 | #define NHMEX_B_PMON_CTL_EN (1 << 0) | ||
210 | #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 | ||
211 | #define NHMEX_B_PMON_CTL_EV_SEL_MASK \ | ||
212 | (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) | ||
213 | #define NHMEX_B_PMON_CTR_SHIFT 6 | ||
214 | #define NHMEX_B_PMON_CTR_MASK \ | ||
215 | (0x3 << NHMEX_B_PMON_CTR_SHIFT) | ||
216 | #define NHMEX_B_PMON_RAW_EVENT_MASK \ | ||
217 | (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ | ||
218 | NHMEX_B_PMON_CTR_MASK) | ||
219 | |||
220 | /* NHM-EX Sbox */ | ||
221 | #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 | ||
222 | #define NHMEX_S0_MSR_PMON_CTR0 0xc51 | ||
223 | #define NHMEX_S0_MSR_PMON_CTL0 0xc50 | ||
224 | #define NHMEX_S_MSR_OFFSET 0x80 | ||
225 | #define NHMEX_S0_MSR_MM_CFG 0xe48 | ||
226 | #define NHMEX_S0_MSR_MATCH 0xe49 | ||
227 | #define NHMEX_S0_MSR_MASK 0xe4a | ||
228 | #define NHMEX_S1_MSR_MM_CFG 0xe58 | ||
229 | #define NHMEX_S1_MSR_MATCH 0xe59 | ||
230 | #define NHMEX_S1_MSR_MASK 0xe5a | ||
231 | |||
232 | #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) | ||
233 | |||
234 | /* NHM-EX Mbox */ | ||
235 | #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 | ||
236 | #define NHMEX_M0_MSR_PMU_DSP 0xca5 | ||
237 | #define NHMEX_M0_MSR_PMU_ISS 0xca6 | ||
238 | #define NHMEX_M0_MSR_PMU_MAP 0xca7 | ||
239 | #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 | ||
240 | #define NHMEX_M0_MSR_PMU_PGT 0xca9 | ||
241 | #define NHMEX_M0_MSR_PMU_PLD 0xcaa | ||
242 | #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab | ||
243 | #define NHMEX_M0_MSR_PMU_CTL0 0xcb0 | ||
244 | #define NHMEX_M0_MSR_PMU_CNT0 0xcb1 | ||
245 | #define NHMEX_M_MSR_OFFSET 0x40 | ||
246 | #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 | ||
247 | #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c | ||
248 | |||
249 | #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) | ||
250 | #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL | ||
251 | #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL | ||
252 | #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 | ||
253 | |||
254 | #define NHMEX_M_PMON_CTL_EN (1 << 0) | ||
255 | #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) | ||
256 | #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 | ||
257 | #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ | ||
258 | (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) | ||
259 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 | ||
260 | #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ | ||
261 | (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) | ||
262 | #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) | ||
263 | #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) | ||
264 | #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 | ||
265 | #define NHMEX_M_PMON_CTL_INC_SEL_MASK \ | ||
266 | (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
267 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 | ||
268 | #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ | ||
269 | (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | ||
270 | #define NHMEX_M_PMON_RAW_EVENT_MASK \ | ||
271 | (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ | ||
272 | NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ | ||
273 | NHMEX_M_PMON_CTL_WRAP_MODE | \ | ||
274 | NHMEX_M_PMON_CTL_FLAG_MODE | \ | ||
275 | NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
276 | NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) | ||
277 | |||
278 | |||
279 | #define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f | ||
280 | #define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5) | ||
281 | #define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8) | ||
282 | #define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23) | ||
283 | #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \ | ||
284 | (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \ | ||
285 | NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \ | ||
286 | NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \ | ||
287 | NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR) | ||
288 | #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) | ||
289 | |||
290 | /* | ||
291 | * use the 9~13 bits to select event If the 7th bit is not set, | ||
292 | * otherwise use the 19~21 bits to select event. | ||
293 | */ | ||
294 | #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) | ||
295 | #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ | ||
296 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
297 | #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ | ||
298 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
299 | #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ | ||
300 | NHMEX_M_PMON_CTL_FLAG_MODE) | ||
301 | #define MBOX_INC_SEL_EXTAR_REG(c, r) \ | ||
302 | EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
303 | MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) | ||
304 | #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ | ||
305 | EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ | ||
306 | MBOX_SET_FLAG_SEL_MASK, \ | ||
307 | (u64)-1, NHMEX_M_##r) | ||
308 | |||
309 | /* NHM-EX Rbox */ | ||
310 | #define NHMEX_R_MSR_GLOBAL_CTL 0xe00 | ||
311 | #define NHMEX_R_MSR_PMON_CTL0 0xe10 | ||
312 | #define NHMEX_R_MSR_PMON_CNT0 0xe11 | ||
313 | #define NHMEX_R_MSR_OFFSET 0x20 | ||
314 | |||
315 | #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ | ||
316 | ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) | ||
317 | #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) | ||
318 | #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) | ||
319 | #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ | ||
320 | (((n) < 4 ? 0 : 0x10) + (n) * 4) | ||
321 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ | ||
322 | (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
323 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ | ||
324 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) | ||
325 | #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ | ||
326 | (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) | ||
327 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ | ||
328 | (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) | ||
329 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ | ||
330 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) | ||
331 | #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ | ||
332 | (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) | ||
333 | |||
334 | #define NHMEX_R_PMON_CTL_EN (1 << 0) | ||
335 | #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 | ||
336 | #define NHMEX_R_PMON_CTL_EV_SEL_MASK \ | ||
337 | (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) | ||
338 | #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) | ||
339 | #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK | ||
340 | |||
341 | /* NHM-EX Wbox */ | ||
342 | #define NHMEX_W_MSR_GLOBAL_CTL 0xc80 | ||
343 | #define NHMEX_W_MSR_PMON_CNT0 0xc90 | ||
344 | #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 | ||
345 | #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 | ||
346 | #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 | ||
347 | |||
348 | #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) | ||
349 | |||
161 | struct intel_uncore_ops; | 350 | struct intel_uncore_ops; |
162 | struct intel_uncore_pmu; | 351 | struct intel_uncore_pmu; |
163 | struct intel_uncore_box; | 352 | struct intel_uncore_box; |
@@ -178,6 +367,7 @@ struct intel_uncore_type { | |||
178 | unsigned msr_offset; | 367 | unsigned msr_offset; |
179 | unsigned num_shared_regs:8; | 368 | unsigned num_shared_regs:8; |
180 | unsigned single_fixed:1; | 369 | unsigned single_fixed:1; |
370 | unsigned pair_ctr_ctl:1; | ||
181 | struct event_constraint unconstrainted; | 371 | struct event_constraint unconstrainted; |
182 | struct event_constraint *constraints; | 372 | struct event_constraint *constraints; |
183 | struct intel_uncore_pmu *pmus; | 373 | struct intel_uncore_pmu *pmus; |
@@ -213,7 +403,7 @@ struct intel_uncore_pmu { | |||
213 | 403 | ||
214 | struct intel_uncore_extra_reg { | 404 | struct intel_uncore_extra_reg { |
215 | raw_spinlock_t lock; | 405 | raw_spinlock_t lock; |
216 | u64 config1; | 406 | u64 config, config1, config2; |
217 | atomic_t ref; | 407 | atomic_t ref; |
218 | }; | 408 | }; |
219 | 409 | ||
@@ -323,14 +513,16 @@ unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) | |||
323 | static inline | 513 | static inline |
324 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) | 514 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) |
325 | { | 515 | { |
326 | return idx + box->pmu->type->event_ctl + | 516 | return box->pmu->type->event_ctl + |
517 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | ||
327 | box->pmu->type->msr_offset * box->pmu->pmu_idx; | 518 | box->pmu->type->msr_offset * box->pmu->pmu_idx; |
328 | } | 519 | } |
329 | 520 | ||
330 | static inline | 521 | static inline |
331 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) | 522 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) |
332 | { | 523 | { |
333 | return idx + box->pmu->type->perf_ctr + | 524 | return box->pmu->type->perf_ctr + |
525 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | ||
334 | box->pmu->type->msr_offset * box->pmu->pmu_idx; | 526 | box->pmu->type->msr_offset * box->pmu->pmu_idx; |
335 | } | 527 | } |
336 | 528 | ||
@@ -422,3 +614,8 @@ static inline void uncore_box_init(struct intel_uncore_box *box) | |||
422 | box->pmu->type->ops->init_box(box); | 614 | box->pmu->type->ops->init_box(box); |
423 | } | 615 | } |
424 | } | 616 | } |
617 | |||
618 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) | ||
619 | { | ||
620 | return (box->phys_id < 0); | ||
621 | } | ||