aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/timers/00-INDEX10
-rw-r--r--Documentation/timers/hpet.txt (renamed from Documentation/hpet.txt)43
-rw-r--r--arch/Kconfig14
-rw-r--r--arch/x86/kernel/apic_32.c4
-rw-r--r--arch/x86/kernel/apic_64.c4
-rw-r--r--arch/x86/kernel/hpet.c6
-rw-r--r--arch/x86/kernel/quirks.c41
-rw-r--r--arch/x86/oprofile/Makefile2
-rw-r--r--arch/x86/oprofile/nmi_int.c27
-rw-r--r--arch/x86/oprofile/op_model_amd.c543
-rw-r--r--arch/x86/oprofile/op_model_athlon.c190
-rw-r--r--arch/x86/oprofile/op_x86_model.h4
-rw-r--r--arch/x86/pci/fixup.c28
-rw-r--r--drivers/char/hpet.c159
-rw-r--r--drivers/oprofile/buffer_sync.c209
-rw-r--r--drivers/oprofile/cpu_buffer.c74
-rw-r--r--drivers/oprofile/cpu_buffer.h2
-rw-r--r--include/linux/hpet.h14
-rw-r--r--include/linux/oprofile.h2
20 files changed, 1004 insertions, 374 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 73060819ed99..438277800103 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -159,8 +159,6 @@ hayes-esp.txt
159 - info on using the Hayes ESP serial driver. 159 - info on using the Hayes ESP serial driver.
160highuid.txt 160highuid.txt
161 - notes on the change from 16 bit to 32 bit user/group IDs. 161 - notes on the change from 16 bit to 32 bit user/group IDs.
162hpet.txt
163 - High Precision Event Timer Driver for Linux.
164timers/ 162timers/
165 - info on the timer related topics 163 - info on the timer related topics
166hw_random.txt 164hw_random.txt
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
new file mode 100644
index 000000000000..397dc35e1323
--- /dev/null
+++ b/Documentation/timers/00-INDEX
@@ -0,0 +1,10 @@
100-INDEX
2 - this file
3highres.txt
4 - High resolution timers and dynamic ticks design notes
5hpet.txt
6 - High Precision Event Timer Driver for Linux
7hrtimers.txt
8 - subsystem for high-resolution kernel timers
9timer_stats.txt
10 - timer usage statistics
diff --git a/Documentation/hpet.txt b/Documentation/timers/hpet.txt
index 6ad52d9dad6c..e7c09abcfab4 100644
--- a/Documentation/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -1,21 +1,32 @@
1 High Precision Event Timer Driver for Linux 1 High Precision Event Timer Driver for Linux
2 2
3The High Precision Event Timer (HPET) hardware is the future replacement 3The High Precision Event Timer (HPET) hardware follows a specification
4for the 8254 and Real Time Clock (RTC) periodic timer functionality. 4by Intel and Microsoft which can be found at
5Each HPET can have up to 32 timers. It is possible to configure the 5
6first two timers as legacy replacements for 8254 and RTC periodic timers. 6 http://www.intel.com/technology/architecture/hpetspec.htm
7A specification done by Intel and Microsoft can be found at 7
8<http://www.intel.com/technology/architecture/hpetspec.htm>. 8Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision")
9and up to 32 comparators. Normally three or more comparators are provided,
10each of which can generate oneshot interupts and at least one of which has
11additional hardware to support periodic interrupts. The comparators are
12also called "timers", which can be misleading since usually timers are
13independent of each other ... these share a counter, complicating resets.
14
15HPET devices can support two interrupt routing modes. In one mode, the
16comparators are additional interrupt sources with no particular system
17role. Many x86 BIOS writers don't route HPET interrupts at all, which
18prevents use of that mode. They support the other "legacy replacement"
19mode where the first two comparators block interrupts from 8254 timers
20and from the RTC.
9 21
10The driver supports detection of HPET driver allocation and initialization 22The driver supports detection of HPET driver allocation and initialization
11of the HPET before the driver module_init routine is called. This enables 23of the HPET before the driver module_init routine is called. This enables
12platform code which uses timer 0 or 1 as the main timer to intercept HPET 24platform code which uses timer 0 or 1 as the main timer to intercept HPET
13initialization. An example of this initialization can be found in 25initialization. An example of this initialization can be found in
14arch/i386/kernel/time_hpet.c. 26arch/x86/kernel/hpet.c.
15 27
16The driver provides two APIs which are very similar to the API found in 28The driver provides a userspace API which resembles the API found in the
17the rtc.c driver. There is a user space API and a kernel space API. 29RTC driver framework. An example user space program is provided below.
18An example user space program is provided below.
19 30
20#include <stdio.h> 31#include <stdio.h>
21#include <stdlib.h> 32#include <stdlib.h>
@@ -286,15 +297,3 @@ out:
286 297
287 return; 298 return;
288} 299}
289
290The kernel API has three interfaces exported from the driver:
291
292 hpet_register(struct hpet_task *tp, int periodic)
293 hpet_unregister(struct hpet_task *tp)
294 hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
295
296The kernel module using this interface fills in the ht_func and ht_data
297members of the hpet_task structure before calling hpet_register.
298hpet_control simply vectors to the hpet_ioctl routine and has the same
299commands and respective arguments as the user API. hpet_unregister
300is used to terminate usage of the HPET timer reserved by hpet_register.
diff --git a/arch/Kconfig b/arch/Kconfig
index 364c6dadde0a..0267babe5eb9 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -13,6 +13,20 @@ config OPROFILE
13 13
14 If unsure, say N. 14 If unsure, say N.
15 15
16config OPROFILE_IBS
17 bool "OProfile AMD IBS support (EXPERIMENTAL)"
18 default n
19 depends on OPROFILE && SMP && X86
20 help
21 Instruction-Based Sampling (IBS) is a new profiling
22 technique that provides rich, precise program performance
23 information. IBS is introduced by AMD Family10h processors
24 (AMD Opteron Quad-Core processor “Barcelona”) to overcome
25 the limitations of conventional performance counter
26 sampling.
27
28 If unsure, say N.
29
16config HAVE_OPROFILE 30config HAVE_OPROFILE
17 def_bool n 31 def_bool n
18 32
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index a91c57cb666a..21c831d96af3 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -295,6 +295,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
295 * 295 *
296 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 296 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
297 * MCE interrupts are supported. Thus MCE offset must be set to 0. 297 * MCE interrupts are supported. Thus MCE offset must be set to 0.
298 *
299 * If mask=1, the LVT entry does not generate interrupts while mask=0
300 * enables the vector. See also the BKDGs.
298 */ 301 */
299 302
300#define APIC_EILVT_LVTOFF_MCE 0 303#define APIC_EILVT_LVTOFF_MCE 0
@@ -319,6 +322,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
319 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 322 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
320 return APIC_EILVT_LVTOFF_IBS; 323 return APIC_EILVT_LVTOFF_IBS;
321} 324}
325EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
322 326
323/* 327/*
324 * Program the next event, relative to now 328 * Program the next event, relative to now
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 53898b65a6ae..94ddb69ae15e 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -307,6 +307,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
307 * 307 *
308 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 308 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
309 * MCE interrupts are supported. Thus MCE offset must be set to 0. 309 * MCE interrupts are supported. Thus MCE offset must be set to 0.
310 *
311 * If mask=1, the LVT entry does not generate interrupts while mask=0
312 * enables the vector. See also the BKDGs.
310 */ 313 */
311 314
312#define APIC_EILVT_LVTOFF_MCE 0 315#define APIC_EILVT_LVTOFF_MCE 0
@@ -331,6 +334,7 @@ u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
331 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 334 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
332 return APIC_EILVT_LVTOFF_IBS; 335 return APIC_EILVT_LVTOFF_IBS;
333} 336}
337EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
334 338
335/* 339/*
336 * Program the next event, relative to now 340 * Program the next event, relative to now
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 73deaffadd03..acf62fc233da 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -115,13 +115,17 @@ static void hpet_reserve_platform_timers(unsigned long id)
115 hd.hd_phys_address = hpet_address; 115 hd.hd_phys_address = hpet_address;
116 hd.hd_address = hpet; 116 hd.hd_address = hpet;
117 hd.hd_nirqs = nrtimers; 117 hd.hd_nirqs = nrtimers;
118 hd.hd_flags = HPET_DATA_PLATFORM;
119 hpet_reserve_timer(&hd, 0); 118 hpet_reserve_timer(&hd, 0);
120 119
121#ifdef CONFIG_HPET_EMULATE_RTC 120#ifdef CONFIG_HPET_EMULATE_RTC
122 hpet_reserve_timer(&hd, 1); 121 hpet_reserve_timer(&hd, 1);
123#endif 122#endif
124 123
124 /*
125 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
126 * is wrong for i8259!) not the output IRQ. Many BIOS writers
127 * don't bother configuring *any* comparator interrupts.
128 */
125 hd.hd_irq[0] = HPET_LEGACY_8254; 129 hd.hd_irq[0] = HPET_LEGACY_8254;
126 hd.hd_irq[1] = HPET_LEGACY_RTC; 130 hd.hd_irq[1] = HPET_LEGACY_RTC;
127 131
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index d13858818100..f6a11b9b1f98 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -354,9 +354,27 @@ static void ati_force_hpet_resume(void)
354 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 354 printk(KERN_DEBUG "Force enabled HPET at resume\n");
355} 355}
356 356
357static u32 ati_ixp4x0_rev(struct pci_dev *dev)
358{
359 u32 d;
360 u8 b;
361
362 pci_read_config_byte(dev, 0xac, &b);
363 b &= ~(1<<5);
364 pci_write_config_byte(dev, 0xac, b);
365 pci_read_config_dword(dev, 0x70, &d);
366 d |= 1<<8;
367 pci_write_config_dword(dev, 0x70, d);
368 pci_read_config_dword(dev, 0x8, &d);
369 d &= 0xff;
370 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
371 return d;
372}
373
357static void ati_force_enable_hpet(struct pci_dev *dev) 374static void ati_force_enable_hpet(struct pci_dev *dev)
358{ 375{
359 u32 uninitialized_var(val); 376 u32 d, val;
377 u8 b;
360 378
361 if (hpet_address || force_hpet_address) 379 if (hpet_address || force_hpet_address)
362 return; 380 return;
@@ -366,14 +384,33 @@ static void ati_force_enable_hpet(struct pci_dev *dev)
366 return; 384 return;
367 } 385 }
368 386
387 d = ati_ixp4x0_rev(dev);
388 if (d < 0x82)
389 return;
390
391 /* base address */
369 pci_write_config_dword(dev, 0x14, 0xfed00000); 392 pci_write_config_dword(dev, 0x14, 0xfed00000);
370 pci_read_config_dword(dev, 0x14, &val); 393 pci_read_config_dword(dev, 0x14, &val);
394
395 /* enable interrupt */
396 outb(0x72, 0xcd6); b = inb(0xcd7);
397 b |= 0x1;
398 outb(0x72, 0xcd6); outb(b, 0xcd7);
399 outb(0x72, 0xcd6); b = inb(0xcd7);
400 if (!(b & 0x1))
401 return;
402 pci_read_config_dword(dev, 0x64, &d);
403 d |= (1<<10);
404 pci_write_config_dword(dev, 0x64, d);
405 pci_read_config_dword(dev, 0x64, &d);
406 if (!(d & (1<<10)))
407 return;
408
371 force_hpet_address = val; 409 force_hpet_address = val;
372 force_hpet_resume_type = ATI_FORCE_HPET_RESUME; 410 force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
373 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", 411 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
374 force_hpet_address); 412 force_hpet_address);
375 cached_dev = dev; 413 cached_dev = dev;
376 return;
377} 414}
378DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, 415DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
379 ati_force_enable_hpet); 416 ati_force_enable_hpet);
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
index 30f3eb366667..446902b2a6b6 100644
--- a/arch/x86/oprofile/Makefile
+++ b/arch/x86/oprofile/Makefile
@@ -7,6 +7,6 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o 9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \ 10oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
11 op_model_ppro.o op_model_p4.o 11 op_model_ppro.o op_model_p4.o
12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o 12oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 8a5f1614a3d5..57f6c9088081 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -1,10 +1,11 @@
1/** 1/**
2 * @file nmi_int.c 2 * @file nmi_int.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002-2008 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
8 */ 9 */
9 10
10#include <linux/init.h> 11#include <linux/init.h>
@@ -439,6 +440,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
439 __u8 vendor = boot_cpu_data.x86_vendor; 440 __u8 vendor = boot_cpu_data.x86_vendor;
440 __u8 family = boot_cpu_data.x86; 441 __u8 family = boot_cpu_data.x86;
441 char *cpu_type; 442 char *cpu_type;
443 int ret = 0;
442 444
443 if (!cpu_has_apic) 445 if (!cpu_has_apic)
444 return -ENODEV; 446 return -ENODEV;
@@ -451,19 +453,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
451 default: 453 default:
452 return -ENODEV; 454 return -ENODEV;
453 case 6: 455 case 6:
454 model = &op_athlon_spec; 456 model = &op_amd_spec;
455 cpu_type = "i386/athlon"; 457 cpu_type = "i386/athlon";
456 break; 458 break;
457 case 0xf: 459 case 0xf:
458 model = &op_athlon_spec; 460 model = &op_amd_spec;
459 /* Actually it could be i386/hammer too, but give 461 /* Actually it could be i386/hammer too, but give
460 user space an consistent name. */ 462 user space an consistent name. */
461 cpu_type = "x86-64/hammer"; 463 cpu_type = "x86-64/hammer";
462 break; 464 break;
463 case 0x10: 465 case 0x10:
464 model = &op_athlon_spec; 466 model = &op_amd_spec;
465 cpu_type = "x86-64/family10"; 467 cpu_type = "x86-64/family10";
466 break; 468 break;
469 case 0x11:
470 model = &op_amd_spec;
471 cpu_type = "x86-64/family11h";
472 break;
467 } 473 }
468 break; 474 break;
469 475
@@ -490,17 +496,24 @@ int __init op_nmi_init(struct oprofile_operations *ops)
490 return -ENODEV; 496 return -ENODEV;
491 } 497 }
492 498
493 init_sysfs();
494#ifdef CONFIG_SMP 499#ifdef CONFIG_SMP
495 register_cpu_notifier(&oprofile_cpu_nb); 500 register_cpu_notifier(&oprofile_cpu_nb);
496#endif 501#endif
497 using_nmi = 1; 502 /* default values, can be overwritten by model */
498 ops->create_files = nmi_create_files; 503 ops->create_files = nmi_create_files;
499 ops->setup = nmi_setup; 504 ops->setup = nmi_setup;
500 ops->shutdown = nmi_shutdown; 505 ops->shutdown = nmi_shutdown;
501 ops->start = nmi_start; 506 ops->start = nmi_start;
502 ops->stop = nmi_stop; 507 ops->stop = nmi_stop;
503 ops->cpu_type = cpu_type; 508 ops->cpu_type = cpu_type;
509
510 if (model->init)
511 ret = model->init(ops);
512 if (ret)
513 return ret;
514
515 init_sysfs();
516 using_nmi = 1;
504 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 517 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
505 return 0; 518 return 0;
506} 519}
@@ -513,4 +526,6 @@ void op_nmi_exit(void)
513 unregister_cpu_notifier(&oprofile_cpu_nb); 526 unregister_cpu_notifier(&oprofile_cpu_nb);
514#endif 527#endif
515 } 528 }
529 if (model->exit)
530 model->exit();
516} 531}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
new file mode 100644
index 000000000000..d9faf607b3a6
--- /dev/null
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -0,0 +1,543 @@
1/*
2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002-2008 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf
13*/
14
15#include <linux/oprofile.h>
16#include <linux/device.h>
17#include <linux/pci.h>
18
19#include <asm/ptrace.h>
20#include <asm/msr.h>
21#include <asm/nmi.h>
22
23#include "op_x86_model.h"
24#include "op_counter.h"
25
26#define NUM_COUNTERS 4
27#define NUM_CONTROLS 4
28
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
31#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
32#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
33
34#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
35#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
36#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
37#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
38#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
39#define CTRL_CLEAR_LO(x) (x &= (1<<21))
40#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
41#define CTRL_SET_ENABLE(val) (val |= 1<<20)
42#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
43#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
44#define CTRL_SET_UM(val, m) (val |= (m << 8))
45#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
46#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
47#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
48#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
49
50static unsigned long reset_value[NUM_COUNTERS];
51
52#ifdef CONFIG_OPROFILE_IBS
53
54/* IbsFetchCtl bits/masks */
55#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */
56#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */
57#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */
58
59/*IbsOpCtl bits */
60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
62
63/* Codes used in cpu_buffer.c */
64/* This produces duplicate code, need to be fixed */
65#define IBS_FETCH_BEGIN 3
66#define IBS_OP_BEGIN 4
67
68/* The function interface needs to be fixed, something like add
69 data. Should then be added to linux/oprofile.h. */
70extern void oprofile_add_ibs_sample(struct pt_regs *const regs,
71 unsigned int * const ibs_sample, u8 code);
72
73struct ibs_fetch_sample {
74 /* MSRC001_1031 IBS Fetch Linear Address Register */
75 unsigned int ibs_fetch_lin_addr_low;
76 unsigned int ibs_fetch_lin_addr_high;
77 /* MSRC001_1030 IBS Fetch Control Register */
78 unsigned int ibs_fetch_ctl_low;
79 unsigned int ibs_fetch_ctl_high;
80 /* MSRC001_1032 IBS Fetch Physical Address Register */
81 unsigned int ibs_fetch_phys_addr_low;
82 unsigned int ibs_fetch_phys_addr_high;
83};
84
85struct ibs_op_sample {
86 /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
87 unsigned int ibs_op_rip_low;
88 unsigned int ibs_op_rip_high;
89 /* MSRC001_1035 IBS Op Data Register */
90 unsigned int ibs_op_data1_low;
91 unsigned int ibs_op_data1_high;
92 /* MSRC001_1036 IBS Op Data 2 Register */
93 unsigned int ibs_op_data2_low;
94 unsigned int ibs_op_data2_high;
95 /* MSRC001_1037 IBS Op Data 3 Register */
96 unsigned int ibs_op_data3_low;
97 unsigned int ibs_op_data3_high;
98 /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
99 unsigned int ibs_dc_linear_low;
100 unsigned int ibs_dc_linear_high;
101 /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
102 unsigned int ibs_dc_phys_low;
103 unsigned int ibs_dc_phys_high;
104};
105
106/*
107 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h+
108*/
109static void clear_ibs_nmi(void);
110
111static int ibs_allowed; /* AMD Family10h and later */
112
113struct op_ibs_config {
114 unsigned long op_enabled;
115 unsigned long fetch_enabled;
116 unsigned long max_cnt_fetch;
117 unsigned long max_cnt_op;
118 unsigned long rand_en;
119 unsigned long dispatched_ops;
120};
121
122static struct op_ibs_config ibs_config;
123
124#endif
125
126/* functions for op_amd_spec */
127
128static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
129{
130 int i;
131
132 for (i = 0; i < NUM_COUNTERS; i++) {
133 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
134 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
135 else
136 msrs->counters[i].addr = 0;
137 }
138
139 for (i = 0; i < NUM_CONTROLS; i++) {
140 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
141 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
142 else
143 msrs->controls[i].addr = 0;
144 }
145}
146
147
148static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
149{
150 unsigned int low, high;
151 int i;
152
153 /* clear all counters */
154 for (i = 0 ; i < NUM_CONTROLS; ++i) {
155 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
156 continue;
157 CTRL_READ(low, high, msrs, i);
158 CTRL_CLEAR_LO(low);
159 CTRL_CLEAR_HI(high);
160 CTRL_WRITE(low, high, msrs, i);
161 }
162
163 /* avoid a false detection of ctr overflows in NMI handler */
164 for (i = 0; i < NUM_COUNTERS; ++i) {
165 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
166 continue;
167 CTR_WRITE(1, msrs, i);
168 }
169
170 /* enable active counters */
171 for (i = 0; i < NUM_COUNTERS; ++i) {
172 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
173 reset_value[i] = counter_config[i].count;
174
175 CTR_WRITE(counter_config[i].count, msrs, i);
176
177 CTRL_READ(low, high, msrs, i);
178 CTRL_CLEAR_LO(low);
179 CTRL_CLEAR_HI(high);
180 CTRL_SET_ENABLE(low);
181 CTRL_SET_USR(low, counter_config[i].user);
182 CTRL_SET_KERN(low, counter_config[i].kernel);
183 CTRL_SET_UM(low, counter_config[i].unit_mask);
184 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
185 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
186 CTRL_SET_HOST_ONLY(high, 0);
187 CTRL_SET_GUEST_ONLY(high, 0);
188
189 CTRL_WRITE(low, high, msrs, i);
190 } else {
191 reset_value[i] = 0;
192 }
193 }
194}
195
196#ifdef CONFIG_OPROFILE_IBS
197
198static inline int
199op_amd_handle_ibs(struct pt_regs * const regs,
200 struct op_msrs const * const msrs)
201{
202 unsigned int low, high;
203 struct ibs_fetch_sample ibs_fetch;
204 struct ibs_op_sample ibs_op;
205
206 if (!ibs_allowed)
207 return 1;
208
209 if (ibs_config.fetch_enabled) {
210 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
211 if (high & IBS_FETCH_HIGH_VALID_BIT) {
212 ibs_fetch.ibs_fetch_ctl_high = high;
213 ibs_fetch.ibs_fetch_ctl_low = low;
214 rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high);
215 ibs_fetch.ibs_fetch_lin_addr_high = high;
216 ibs_fetch.ibs_fetch_lin_addr_low = low;
217 rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high);
218 ibs_fetch.ibs_fetch_phys_addr_high = high;
219 ibs_fetch.ibs_fetch_phys_addr_low = low;
220
221 oprofile_add_ibs_sample(regs,
222 (unsigned int *)&ibs_fetch,
223 IBS_FETCH_BEGIN);
224
225 /*reenable the IRQ */
226 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
227 high &= ~IBS_FETCH_HIGH_VALID_BIT;
228 high |= IBS_FETCH_HIGH_ENABLE;
229 low &= IBS_FETCH_LOW_MAX_CNT_MASK;
230 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
231 }
232 }
233
234 if (ibs_config.op_enabled) {
235 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
236 if (low & IBS_OP_LOW_VALID_BIT) {
237 rdmsr(MSR_AMD64_IBSOPRIP, low, high);
238 ibs_op.ibs_op_rip_low = low;
239 ibs_op.ibs_op_rip_high = high;
240 rdmsr(MSR_AMD64_IBSOPDATA, low, high);
241 ibs_op.ibs_op_data1_low = low;
242 ibs_op.ibs_op_data1_high = high;
243 rdmsr(MSR_AMD64_IBSOPDATA2, low, high);
244 ibs_op.ibs_op_data2_low = low;
245 ibs_op.ibs_op_data2_high = high;
246 rdmsr(MSR_AMD64_IBSOPDATA3, low, high);
247 ibs_op.ibs_op_data3_low = low;
248 ibs_op.ibs_op_data3_high = high;
249 rdmsr(MSR_AMD64_IBSDCLINAD, low, high);
250 ibs_op.ibs_dc_linear_low = low;
251 ibs_op.ibs_dc_linear_high = high;
252 rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high);
253 ibs_op.ibs_dc_phys_low = low;
254 ibs_op.ibs_dc_phys_high = high;
255
256 /* reenable the IRQ */
257 oprofile_add_ibs_sample(regs,
258 (unsigned int *)&ibs_op,
259 IBS_OP_BEGIN);
260 rdmsr(MSR_AMD64_IBSOPCTL, low, high);
261 high = 0;
262 low &= ~IBS_OP_LOW_VALID_BIT;
263 low |= IBS_OP_LOW_ENABLE;
264 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
265 }
266 }
267
268 return 1;
269}
270
271#endif
272
273static int op_amd_check_ctrs(struct pt_regs * const regs,
274 struct op_msrs const * const msrs)
275{
276 unsigned int low, high;
277 int i;
278
279 for (i = 0 ; i < NUM_COUNTERS; ++i) {
280 if (!reset_value[i])
281 continue;
282 CTR_READ(low, high, msrs, i);
283 if (CTR_OVERFLOWED(low)) {
284 oprofile_add_sample(regs, i);
285 CTR_WRITE(reset_value[i], msrs, i);
286 }
287 }
288
289#ifdef CONFIG_OPROFILE_IBS
290 op_amd_handle_ibs(regs, msrs);
291#endif
292
293 /* See op_model_ppro.c */
294 return 1;
295}
296
297static void op_amd_start(struct op_msrs const * const msrs)
298{
299 unsigned int low, high;
300 int i;
301 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
302 if (reset_value[i]) {
303 CTRL_READ(low, high, msrs, i);
304 CTRL_SET_ACTIVE(low);
305 CTRL_WRITE(low, high, msrs, i);
306 }
307 }
308
309#ifdef CONFIG_OPROFILE_IBS
310 if (ibs_allowed && ibs_config.fetch_enabled) {
311 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
312 high = IBS_FETCH_HIGH_ENABLE;
313 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
314 }
315
316 if (ibs_allowed && ibs_config.op_enabled) {
317 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE;
318 high = 0;
319 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
320 }
321#endif
322}
323
324
325static void op_amd_stop(struct op_msrs const * const msrs)
326{
327 unsigned int low, high;
328 int i;
329
330 /* Subtle: stop on all counters to avoid race with
331 * setting our pm callback */
332 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
333 if (!reset_value[i])
334 continue;
335 CTRL_READ(low, high, msrs, i);
336 CTRL_SET_INACTIVE(low);
337 CTRL_WRITE(low, high, msrs, i);
338 }
339
340#ifdef CONFIG_OPROFILE_IBS
341 if (ibs_allowed && ibs_config.fetch_enabled) {
342 low = 0; /* clear max count and enable */
343 high = 0;
344 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
345 }
346
347 if (ibs_allowed && ibs_config.op_enabled) {
348 low = 0; /* clear max count and enable */
349 high = 0;
350 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
351 }
352#endif
353}
354
355static void op_amd_shutdown(struct op_msrs const * const msrs)
356{
357 int i;
358
359 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
360 if (CTR_IS_RESERVED(msrs, i))
361 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
362 }
363 for (i = 0 ; i < NUM_CONTROLS ; ++i) {
364 if (CTRL_IS_RESERVED(msrs, i))
365 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
366 }
367}
368
369#ifndef CONFIG_OPROFILE_IBS
370
371/* no IBS support */
372
373static int op_amd_init(struct oprofile_operations *ops)
374{
375 return 0;
376}
377
378static void op_amd_exit(void) {}
379
380#else
381
382static u8 ibs_eilvt_off;
383
384static inline void apic_init_ibs_nmi_per_cpu(void *arg)
385{
386 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
387}
388
389static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
390{
391 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
392}
393
394static int pfm_amd64_setup_eilvt(void)
395{
396#define IBSCTL_LVTOFFSETVAL (1 << 8)
397#define IBSCTL 0x1cc
398 struct pci_dev *cpu_cfg;
399 int nodes;
400 u32 value = 0;
401
402 /* per CPU setup */
403 on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
404
405 nodes = 0;
406 cpu_cfg = NULL;
407 do {
408 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
409 PCI_DEVICE_ID_AMD_10H_NB_MISC,
410 cpu_cfg);
411 if (!cpu_cfg)
412 break;
413 ++nodes;
414 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
415 | IBSCTL_LVTOFFSETVAL);
416 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
417 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
418 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
419 "IBSCTL = 0x%08x", value);
420 return 1;
421 }
422 } while (1);
423
424 if (!nodes) {
425 printk(KERN_DEBUG "No CPU node configured for IBS");
426 return 1;
427 }
428
429#ifdef CONFIG_NUMA
430 /* Sanity check */
431 /* Works only for 64bit with proper numa implementation. */
432 if (nodes != num_possible_nodes()) {
433 printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
434 "found: %d, expected %d",
435 nodes, num_possible_nodes());
436 return 1;
437 }
438#endif
439 return 0;
440}
441
442/*
443 * initialize the APIC for the IBS interrupts
444 * if available (AMD Family10h rev B0 and later)
445 */
446static void setup_ibs(void)
447{
448 ibs_allowed = boot_cpu_has(X86_FEATURE_IBS);
449
450 if (!ibs_allowed)
451 return;
452
453 if (pfm_amd64_setup_eilvt()) {
454 ibs_allowed = 0;
455 return;
456 }
457
458 printk(KERN_INFO "oprofile: AMD IBS detected\n");
459}
460
461
462/*
463 * unitialize the APIC for the IBS interrupts if needed on AMD Family10h
464 * rev B0 and later */
465static void clear_ibs_nmi(void)
466{
467 if (ibs_allowed)
468 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
469}
470
471static int (*create_arch_files)(struct super_block * sb, struct dentry * root);
472
473static int setup_ibs_files(struct super_block * sb, struct dentry * root)
474{
475 char buf[12];
476 struct dentry *dir;
477 int ret = 0;
478
479 /* architecture specific files */
480 if (create_arch_files)
481 ret = create_arch_files(sb, root);
482
483 if (ret)
484 return ret;
485
486 if (!ibs_allowed)
487 return ret;
488
489 /* model specific files */
490
491 /* setup some reasonable defaults */
492 ibs_config.max_cnt_fetch = 250000;
493 ibs_config.fetch_enabled = 0;
494 ibs_config.max_cnt_op = 250000;
495 ibs_config.op_enabled = 0;
496 ibs_config.dispatched_ops = 1;
497 snprintf(buf, sizeof(buf), "ibs_fetch");
498 dir = oprofilefs_mkdir(sb, root, buf);
499 oprofilefs_create_ulong(sb, dir, "rand_enable",
500 &ibs_config.rand_en);
501 oprofilefs_create_ulong(sb, dir, "enable",
502 &ibs_config.fetch_enabled);
503 oprofilefs_create_ulong(sb, dir, "max_count",
504 &ibs_config.max_cnt_fetch);
505 snprintf(buf, sizeof(buf), "ibs_uops");
506 dir = oprofilefs_mkdir(sb, root, buf);
507 oprofilefs_create_ulong(sb, dir, "enable",
508 &ibs_config.op_enabled);
509 oprofilefs_create_ulong(sb, dir, "max_count",
510 &ibs_config.max_cnt_op);
511 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
512 &ibs_config.dispatched_ops);
513
514 return 0;
515}
516
517static int op_amd_init(struct oprofile_operations *ops)
518{
519 setup_ibs();
520 create_arch_files = ops->create_files;
521 ops->create_files = setup_ibs_files;
522 return 0;
523}
524
525static void op_amd_exit(void)
526{
527 clear_ibs_nmi();
528}
529
530#endif
531
532struct op_x86_model_spec const op_amd_spec = {
533 .init = op_amd_init,
534 .exit = op_amd_exit,
535 .num_counters = NUM_COUNTERS,
536 .num_controls = NUM_CONTROLS,
537 .fill_in_addresses = &op_amd_fill_in_addresses,
538 .setup_ctrs = &op_amd_setup_ctrs,
539 .check_ctrs = &op_amd_check_ctrs,
540 .start = &op_amd_start,
541 .stop = &op_amd_stop,
542 .shutdown = &op_amd_shutdown
543};
diff --git a/arch/x86/oprofile/op_model_athlon.c b/arch/x86/oprofile/op_model_athlon.c
deleted file mode 100644
index 3d534879a9dc..000000000000
--- a/arch/x86/oprofile/op_model_athlon.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * @file op_model_athlon.h
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 */
12
13#include <linux/oprofile.h>
14#include <asm/ptrace.h>
15#include <asm/msr.h>
16#include <asm/nmi.h>
17
18#include "op_x86_model.h"
19#include "op_counter.h"
20
21#define NUM_COUNTERS 4
22#define NUM_CONTROLS 4
23
24#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
25#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
26#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
27#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
28
29#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
30#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
31#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
32#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
33#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
34#define CTRL_CLEAR_LO(x) (x &= (1<<21))
35#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
36#define CTRL_SET_ENABLE(val) (val |= 1<<20)
37#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
38#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
39#define CTRL_SET_UM(val, m) (val |= (m << 8))
40#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
41#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
42#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
43#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
44
45static unsigned long reset_value[NUM_COUNTERS];
46
47static void athlon_fill_in_addresses(struct op_msrs * const msrs)
48{
49 int i;
50
51 for (i = 0; i < NUM_COUNTERS; i++) {
52 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
53 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
54 else
55 msrs->counters[i].addr = 0;
56 }
57
58 for (i = 0; i < NUM_CONTROLS; i++) {
59 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
60 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
61 else
62 msrs->controls[i].addr = 0;
63 }
64}
65
66
67static void athlon_setup_ctrs(struct op_msrs const * const msrs)
68{
69 unsigned int low, high;
70 int i;
71
72 /* clear all counters */
73 for (i = 0 ; i < NUM_CONTROLS; ++i) {
74 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
75 continue;
76 CTRL_READ(low, high, msrs, i);
77 CTRL_CLEAR_LO(low);
78 CTRL_CLEAR_HI(high);
79 CTRL_WRITE(low, high, msrs, i);
80 }
81
82 /* avoid a false detection of ctr overflows in NMI handler */
83 for (i = 0; i < NUM_COUNTERS; ++i) {
84 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
85 continue;
86 CTR_WRITE(1, msrs, i);
87 }
88
89 /* enable active counters */
90 for (i = 0; i < NUM_COUNTERS; ++i) {
91 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
92 reset_value[i] = counter_config[i].count;
93
94 CTR_WRITE(counter_config[i].count, msrs, i);
95
96 CTRL_READ(low, high, msrs, i);
97 CTRL_CLEAR_LO(low);
98 CTRL_CLEAR_HI(high);
99 CTRL_SET_ENABLE(low);
100 CTRL_SET_USR(low, counter_config[i].user);
101 CTRL_SET_KERN(low, counter_config[i].kernel);
102 CTRL_SET_UM(low, counter_config[i].unit_mask);
103 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
104 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
105 CTRL_SET_HOST_ONLY(high, 0);
106 CTRL_SET_GUEST_ONLY(high, 0);
107
108 CTRL_WRITE(low, high, msrs, i);
109 } else {
110 reset_value[i] = 0;
111 }
112 }
113}
114
115
116static int athlon_check_ctrs(struct pt_regs * const regs,
117 struct op_msrs const * const msrs)
118{
119 unsigned int low, high;
120 int i;
121
122 for (i = 0 ; i < NUM_COUNTERS; ++i) {
123 if (!reset_value[i])
124 continue;
125 CTR_READ(low, high, msrs, i);
126 if (CTR_OVERFLOWED(low)) {
127 oprofile_add_sample(regs, i);
128 CTR_WRITE(reset_value[i], msrs, i);
129 }
130 }
131
132 /* See op_model_ppro.c */
133 return 1;
134}
135
136
137static void athlon_start(struct op_msrs const * const msrs)
138{
139 unsigned int low, high;
140 int i;
141 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
142 if (reset_value[i]) {
143 CTRL_READ(low, high, msrs, i);
144 CTRL_SET_ACTIVE(low);
145 CTRL_WRITE(low, high, msrs, i);
146 }
147 }
148}
149
150
151static void athlon_stop(struct op_msrs const * const msrs)
152{
153 unsigned int low, high;
154 int i;
155
156 /* Subtle: stop on all counters to avoid race with
157 * setting our pm callback */
158 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
159 if (!reset_value[i])
160 continue;
161 CTRL_READ(low, high, msrs, i);
162 CTRL_SET_INACTIVE(low);
163 CTRL_WRITE(low, high, msrs, i);
164 }
165}
166
167static void athlon_shutdown(struct op_msrs const * const msrs)
168{
169 int i;
170
171 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
172 if (CTR_IS_RESERVED(msrs, i))
173 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
174 }
175 for (i = 0 ; i < NUM_CONTROLS ; ++i) {
176 if (CTRL_IS_RESERVED(msrs, i))
177 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
178 }
179}
180
181struct op_x86_model_spec const op_athlon_spec = {
182 .num_counters = NUM_COUNTERS,
183 .num_controls = NUM_CONTROLS,
184 .fill_in_addresses = &athlon_fill_in_addresses,
185 .setup_ctrs = &athlon_setup_ctrs,
186 .check_ctrs = &athlon_check_ctrs,
187 .start = &athlon_start,
188 .stop = &athlon_stop,
189 .shutdown = &athlon_shutdown
190};
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 45b605fa71d0..05a0261ba0c3 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -32,6 +32,8 @@ struct pt_regs;
32 * various x86 CPU models' perfctr support. 32 * various x86 CPU models' perfctr support.
33 */ 33 */
34struct op_x86_model_spec { 34struct op_x86_model_spec {
35 int (*init)(struct oprofile_operations *ops);
36 void (*exit)(void);
35 unsigned int const num_counters; 37 unsigned int const num_counters;
36 unsigned int const num_controls; 38 unsigned int const num_controls;
37 void (*fill_in_addresses)(struct op_msrs * const msrs); 39 void (*fill_in_addresses)(struct op_msrs * const msrs);
@@ -46,6 +48,6 @@ struct op_x86_model_spec {
46extern struct op_x86_model_spec const op_ppro_spec; 48extern struct op_x86_model_spec const op_ppro_spec;
47extern struct op_x86_model_spec const op_p4_spec; 49extern struct op_x86_model_spec const op_p4_spec;
48extern struct op_x86_model_spec const op_p4_ht2_spec; 50extern struct op_x86_model_spec const op_p4_ht2_spec;
49extern struct op_x86_model_spec const op_athlon_spec; 51extern struct op_x86_model_spec const op_amd_spec;
50 52
51#endif /* OP_X86_MODEL_H */ 53#endif /* OP_X86_MODEL_H */
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 4bdaa590375d..3c27a809393b 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -511,3 +511,31 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, fam10h_pci_cfg_space_size);
511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size); 511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size);
512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size); 512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size);
513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size); 513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size);
514
515/*
516 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
517 * confusing the PCI engine:
518 */
519static void sb600_disable_hpet_bar(struct pci_dev *dev)
520{
521 u8 val;
522
523 /*
524 * The SB600 and SB700 both share the same device
525 * ID, but the PM register 0x55 does something different
526 * for the SB700, so make sure we are dealing with the
527 * SB600 before touching the bit:
528 */
529
530 pci_read_config_byte(dev, 0x08, &val);
531
532 if (val < 0x2F) {
533 outb(0x55, 0xCD6);
534 val = inb(0xCD7);
535
536 /* Set bit 7 in PM register 0x55 */
537 outb(0x55, 0xCD6);
538 outb(val | 0x80, 0xCD7);
539 }
540}
541DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index b3f5dbc6d880..f3cfb4c76125 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -53,6 +53,11 @@
53 53
54#define HPET_RANGE_SIZE 1024 /* from HPET spec */ 54#define HPET_RANGE_SIZE 1024 /* from HPET spec */
55 55
56
57/* WARNING -- don't get confused. These macros are never used
58 * to write the (single) counter, and rarely to read it.
59 * They're badly named; to fix, someday.
60 */
56#if BITS_PER_LONG == 64 61#if BITS_PER_LONG == 64
57#define write_counter(V, MC) writeq(V, MC) 62#define write_counter(V, MC) writeq(V, MC)
58#define read_counter(MC) readq(MC) 63#define read_counter(MC) readq(MC)
@@ -77,7 +82,7 @@ static struct clocksource clocksource_hpet = {
77 .rating = 250, 82 .rating = 250,
78 .read = read_hpet, 83 .read = read_hpet,
79 .mask = CLOCKSOURCE_MASK(64), 84 .mask = CLOCKSOURCE_MASK(64),
80 .mult = 0, /*to be caluclated*/ 85 .mult = 0, /* to be calculated */
81 .shift = 10, 86 .shift = 10,
82 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 87 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
83}; 88};
@@ -86,8 +91,6 @@ static struct clocksource *hpet_clocksource;
86 91
87/* A lock for concurrent access by app and isr hpet activity. */ 92/* A lock for concurrent access by app and isr hpet activity. */
88static DEFINE_SPINLOCK(hpet_lock); 93static DEFINE_SPINLOCK(hpet_lock);
89/* A lock for concurrent intermodule access to hpet and isr hpet activity. */
90static DEFINE_SPINLOCK(hpet_task_lock);
91 94
92#define HPET_DEV_NAME (7) 95#define HPET_DEV_NAME (7)
93 96
@@ -99,7 +102,6 @@ struct hpet_dev {
99 unsigned long hd_irqdata; 102 unsigned long hd_irqdata;
100 wait_queue_head_t hd_waitqueue; 103 wait_queue_head_t hd_waitqueue;
101 struct fasync_struct *hd_async_queue; 104 struct fasync_struct *hd_async_queue;
102 struct hpet_task *hd_task;
103 unsigned int hd_flags; 105 unsigned int hd_flags;
104 unsigned int hd_irq; 106 unsigned int hd_irq;
105 unsigned int hd_hdwirq; 107 unsigned int hd_hdwirq;
@@ -173,11 +175,6 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
173 writel(isr, &devp->hd_hpet->hpet_isr); 175 writel(isr, &devp->hd_hpet->hpet_isr);
174 spin_unlock(&hpet_lock); 176 spin_unlock(&hpet_lock);
175 177
176 spin_lock(&hpet_task_lock);
177 if (devp->hd_task)
178 devp->hd_task->ht_func(devp->hd_task->ht_data);
179 spin_unlock(&hpet_task_lock);
180
181 wake_up_interruptible(&devp->hd_waitqueue); 178 wake_up_interruptible(&devp->hd_waitqueue);
182 179
183 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); 180 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
@@ -185,6 +182,67 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
185 return IRQ_HANDLED; 182 return IRQ_HANDLED;
186} 183}
187 184
185static void hpet_timer_set_irq(struct hpet_dev *devp)
186{
187 unsigned long v;
188 int irq, gsi;
189 struct hpet_timer __iomem *timer;
190
191 spin_lock_irq(&hpet_lock);
192 if (devp->hd_hdwirq) {
193 spin_unlock_irq(&hpet_lock);
194 return;
195 }
196
197 timer = devp->hd_timer;
198
199 /* we prefer level triggered mode */
200 v = readl(&timer->hpet_config);
201 if (!(v & Tn_INT_TYPE_CNF_MASK)) {
202 v |= Tn_INT_TYPE_CNF_MASK;
203 writel(v, &timer->hpet_config);
204 }
205 spin_unlock_irq(&hpet_lock);
206
207 v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
208 Tn_INT_ROUTE_CAP_SHIFT;
209
210 /*
211 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
212 * legacy device. In IO APIC mode, we skip all the legacy IRQS.
213 */
214 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
215 v &= ~0xf3df;
216 else
217 v &= ~0xffff;
218
219 for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ;
220 irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) {
221
222 if (irq >= NR_IRQS) {
223 irq = HPET_MAX_IRQ;
224 break;
225 }
226
227 gsi = acpi_register_gsi(irq, ACPI_LEVEL_SENSITIVE,
228 ACPI_ACTIVE_LOW);
229 if (gsi > 0)
230 break;
231
232 /* FIXME: Setup interrupt source table */
233 }
234
235 if (irq < HPET_MAX_IRQ) {
236 spin_lock_irq(&hpet_lock);
237 v = readl(&timer->hpet_config);
238 v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
239 writel(v, &timer->hpet_config);
240 devp->hd_hdwirq = gsi;
241 spin_unlock_irq(&hpet_lock);
242 }
243 return;
244}
245
188static int hpet_open(struct inode *inode, struct file *file) 246static int hpet_open(struct inode *inode, struct file *file)
189{ 247{
190 struct hpet_dev *devp; 248 struct hpet_dev *devp;
@@ -199,8 +257,7 @@ static int hpet_open(struct inode *inode, struct file *file)
199 257
200 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 258 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
201 for (i = 0; i < hpetp->hp_ntimer; i++) 259 for (i = 0; i < hpetp->hp_ntimer; i++)
202 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN 260 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
203 || hpetp->hp_dev[i].hd_task)
204 continue; 261 continue;
205 else { 262 else {
206 devp = &hpetp->hp_dev[i]; 263 devp = &hpetp->hp_dev[i];
@@ -219,6 +276,8 @@ static int hpet_open(struct inode *inode, struct file *file)
219 spin_unlock_irq(&hpet_lock); 276 spin_unlock_irq(&hpet_lock);
220 unlock_kernel(); 277 unlock_kernel();
221 278
279 hpet_timer_set_irq(devp);
280
222 return 0; 281 return 0;
223} 282}
224 283
@@ -441,7 +500,11 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
441 devp->hd_irq = irq; 500 devp->hd_irq = irq;
442 t = devp->hd_ireqfreq; 501 t = devp->hd_ireqfreq;
443 v = readq(&timer->hpet_config); 502 v = readq(&timer->hpet_config);
444 g = v | Tn_INT_ENB_CNF_MASK; 503
504 /* 64-bit comparators are not yet supported through the ioctls,
505 * so force this into 32-bit mode if it supports both modes
506 */
507 g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
445 508
446 if (devp->hd_flags & HPET_PERIODIC) { 509 if (devp->hd_flags & HPET_PERIODIC) {
447 write_counter(t, &timer->hpet_compare); 510 write_counter(t, &timer->hpet_compare);
@@ -451,6 +514,12 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
451 v |= Tn_VAL_SET_CNF_MASK; 514 v |= Tn_VAL_SET_CNF_MASK;
452 writeq(v, &timer->hpet_config); 515 writeq(v, &timer->hpet_config);
453 local_irq_save(flags); 516 local_irq_save(flags);
517
518 /* NOTE: what we modify here is a hidden accumulator
519 * register supported by periodic-capable comparators.
520 * We never want to modify the (single) counter; that
521 * would affect all the comparators.
522 */
454 m = read_counter(&hpet->hpet_mc); 523 m = read_counter(&hpet->hpet_mc);
455 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 524 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
456 } else { 525 } else {
@@ -604,57 +673,6 @@ static int hpet_is_known(struct hpet_data *hdp)
604 return 0; 673 return 0;
605} 674}
606 675
607static inline int hpet_tpcheck(struct hpet_task *tp)
608{
609 struct hpet_dev *devp;
610 struct hpets *hpetp;
611
612 devp = tp->ht_opaque;
613
614 if (!devp)
615 return -ENXIO;
616
617 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
618 if (devp >= hpetp->hp_dev
619 && devp < (hpetp->hp_dev + hpetp->hp_ntimer)
620 && devp->hd_hpet == hpetp->hp_hpet)
621 return 0;
622
623 return -ENXIO;
624}
625
626#if 0
627int hpet_unregister(struct hpet_task *tp)
628{
629 struct hpet_dev *devp;
630 struct hpet_timer __iomem *timer;
631 int err;
632
633 if ((err = hpet_tpcheck(tp)))
634 return err;
635
636 spin_lock_irq(&hpet_task_lock);
637 spin_lock(&hpet_lock);
638
639 devp = tp->ht_opaque;
640 if (devp->hd_task != tp) {
641 spin_unlock(&hpet_lock);
642 spin_unlock_irq(&hpet_task_lock);
643 return -ENXIO;
644 }
645
646 timer = devp->hd_timer;
647 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
648 &timer->hpet_config);
649 devp->hd_flags &= ~(HPET_IE | HPET_PERIODIC);
650 devp->hd_task = NULL;
651 spin_unlock(&hpet_lock);
652 spin_unlock_irq(&hpet_task_lock);
653
654 return 0;
655}
656#endif /* 0 */
657
658static ctl_table hpet_table[] = { 676static ctl_table hpet_table[] = {
659 { 677 {
660 .ctl_name = CTL_UNNUMBERED, 678 .ctl_name = CTL_UNNUMBERED,
@@ -746,6 +764,7 @@ int hpet_alloc(struct hpet_data *hdp)
746 static struct hpets *last = NULL; 764 static struct hpets *last = NULL;
747 unsigned long period; 765 unsigned long period;
748 unsigned long long temp; 766 unsigned long long temp;
767 u32 remainder;
749 768
750 /* 769 /*
751 * hpet_alloc can be called by platform dependent code. 770 * hpet_alloc can be called by platform dependent code.
@@ -809,9 +828,13 @@ int hpet_alloc(struct hpet_data *hdp)
809 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 828 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
810 printk("\n"); 829 printk("\n");
811 830
812 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", 831 temp = hpetp->hp_tick_freq;
813 hpetp->hp_which, hpetp->hp_ntimer, 832 remainder = do_div(temp, 1000000);
814 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, hpetp->hp_tick_freq); 833 printk(KERN_INFO
834 "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
835 hpetp->hp_which, hpetp->hp_ntimer,
836 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
837 (unsigned) temp, remainder);
815 838
816 mcfg = readq(&hpet->hpet_config); 839 mcfg = readq(&hpet->hpet_config);
817 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { 840 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
@@ -874,8 +897,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
874 hdp->hd_address = ioremap(addr.minimum, addr.address_length); 897 hdp->hd_address = ioremap(addr.minimum, addr.address_length);
875 898
876 if (hpet_is_known(hdp)) { 899 if (hpet_is_known(hdp)) {
877 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
878 __func__, hdp->hd_phys_address);
879 iounmap(hdp->hd_address); 900 iounmap(hdp->hd_address);
880 return AE_ALREADY_EXISTS; 901 return AE_ALREADY_EXISTS;
881 } 902 }
@@ -891,8 +912,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
891 HPET_RANGE_SIZE); 912 HPET_RANGE_SIZE);
892 913
893 if (hpet_is_known(hdp)) { 914 if (hpet_is_known(hdp)) {
894 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
895 __func__, hdp->hd_phys_address);
896 iounmap(hdp->hd_address); 915 iounmap(hdp->hd_address);
897 return AE_ALREADY_EXISTS; 916 return AE_ALREADY_EXISTS;
898 } 917 }
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 9304c4555079..ed982273fb8b 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -5,6 +5,7 @@
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf
8 * 9 *
9 * This is the core of the buffer management. Each 10 * This is the core of the buffer management. Each
10 * CPU buffer is processed and entered into the 11 * CPU buffer is processed and entered into the
@@ -33,7 +34,7 @@
33#include "event_buffer.h" 34#include "event_buffer.h"
34#include "cpu_buffer.h" 35#include "cpu_buffer.h"
35#include "buffer_sync.h" 36#include "buffer_sync.h"
36 37
37static LIST_HEAD(dying_tasks); 38static LIST_HEAD(dying_tasks);
38static LIST_HEAD(dead_tasks); 39static LIST_HEAD(dead_tasks);
39static cpumask_t marked_cpus = CPU_MASK_NONE; 40static cpumask_t marked_cpus = CPU_MASK_NONE;
@@ -48,10 +49,11 @@ static void process_task_mortuary(void);
48 * Can be invoked from softirq via RCU callback due to 49 * Can be invoked from softirq via RCU callback due to
49 * call_rcu() of the task struct, hence the _irqsave. 50 * call_rcu() of the task struct, hence the _irqsave.
50 */ 51 */
51static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) 52static int
53task_free_notify(struct notifier_block *self, unsigned long val, void *data)
52{ 54{
53 unsigned long flags; 55 unsigned long flags;
54 struct task_struct * task = data; 56 struct task_struct *task = data;
55 spin_lock_irqsave(&task_mortuary, flags); 57 spin_lock_irqsave(&task_mortuary, flags);
56 list_add(&task->tasks, &dying_tasks); 58 list_add(&task->tasks, &dying_tasks);
57 spin_unlock_irqrestore(&task_mortuary, flags); 59 spin_unlock_irqrestore(&task_mortuary, flags);
@@ -62,13 +64,14 @@ static int task_free_notify(struct notifier_block * self, unsigned long val, voi
62/* The task is on its way out. A sync of the buffer means we can catch 64/* The task is on its way out. A sync of the buffer means we can catch
63 * any remaining samples for this task. 65 * any remaining samples for this task.
64 */ 66 */
65static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) 67static int
68task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
66{ 69{
67 /* To avoid latency problems, we only process the current CPU, 70 /* To avoid latency problems, we only process the current CPU,
68 * hoping that most samples for the task are on this CPU 71 * hoping that most samples for the task are on this CPU
69 */ 72 */
70 sync_buffer(raw_smp_processor_id()); 73 sync_buffer(raw_smp_processor_id());
71 return 0; 74 return 0;
72} 75}
73 76
74 77
@@ -77,11 +80,12 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi
77 * we don't lose any. This does not have to be exact, it's a QoI issue 80 * we don't lose any. This does not have to be exact, it's a QoI issue
78 * only. 81 * only.
79 */ 82 */
80static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) 83static int
84munmap_notify(struct notifier_block *self, unsigned long val, void *data)
81{ 85{
82 unsigned long addr = (unsigned long)data; 86 unsigned long addr = (unsigned long)data;
83 struct mm_struct * mm = current->mm; 87 struct mm_struct *mm = current->mm;
84 struct vm_area_struct * mpnt; 88 struct vm_area_struct *mpnt;
85 89
86 down_read(&mm->mmap_sem); 90 down_read(&mm->mmap_sem);
87 91
@@ -99,11 +103,12 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void *
99 return 0; 103 return 0;
100} 104}
101 105
102 106
103/* We need to be told about new modules so we don't attribute to a previously 107/* We need to be told about new modules so we don't attribute to a previously
104 * loaded module, or drop the samples on the floor. 108 * loaded module, or drop the samples on the floor.
105 */ 109 */
106static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) 110static int
111module_load_notify(struct notifier_block *self, unsigned long val, void *data)
107{ 112{
108#ifdef CONFIG_MODULES 113#ifdef CONFIG_MODULES
109 if (val != MODULE_STATE_COMING) 114 if (val != MODULE_STATE_COMING)
@@ -118,7 +123,7 @@ static int module_load_notify(struct notifier_block * self, unsigned long val, v
118 return 0; 123 return 0;
119} 124}
120 125
121 126
122static struct notifier_block task_free_nb = { 127static struct notifier_block task_free_nb = {
123 .notifier_call = task_free_notify, 128 .notifier_call = task_free_notify,
124}; 129};
@@ -135,7 +140,7 @@ static struct notifier_block module_load_nb = {
135 .notifier_call = module_load_notify, 140 .notifier_call = module_load_notify,
136}; 141};
137 142
138 143
139static void end_sync(void) 144static void end_sync(void)
140{ 145{
141 end_cpu_work(); 146 end_cpu_work();
@@ -208,14 +213,14 @@ static inline unsigned long fast_get_dcookie(struct path *path)
208 * not strictly necessary but allows oprofile to associate 213 * not strictly necessary but allows oprofile to associate
209 * shared-library samples with particular applications 214 * shared-library samples with particular applications
210 */ 215 */
211static unsigned long get_exec_dcookie(struct mm_struct * mm) 216static unsigned long get_exec_dcookie(struct mm_struct *mm)
212{ 217{
213 unsigned long cookie = NO_COOKIE; 218 unsigned long cookie = NO_COOKIE;
214 struct vm_area_struct * vma; 219 struct vm_area_struct *vma;
215 220
216 if (!mm) 221 if (!mm)
217 goto out; 222 goto out;
218 223
219 for (vma = mm->mmap; vma; vma = vma->vm_next) { 224 for (vma = mm->mmap; vma; vma = vma->vm_next) {
220 if (!vma->vm_file) 225 if (!vma->vm_file)
221 continue; 226 continue;
@@ -235,13 +240,14 @@ out:
235 * sure to do this lookup before a mm->mmap modification happens so 240 * sure to do this lookup before a mm->mmap modification happens so
236 * we don't lose track. 241 * we don't lose track.
237 */ 242 */
238static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) 243static unsigned long
244lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
239{ 245{
240 unsigned long cookie = NO_COOKIE; 246 unsigned long cookie = NO_COOKIE;
241 struct vm_area_struct * vma; 247 struct vm_area_struct *vma;
242 248
243 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { 249 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
244 250
245 if (addr < vma->vm_start || addr >= vma->vm_end) 251 if (addr < vma->vm_start || addr >= vma->vm_end)
246 continue; 252 continue;
247 253
@@ -263,9 +269,20 @@ static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, o
263 return cookie; 269 return cookie;
264} 270}
265 271
272static void increment_tail(struct oprofile_cpu_buffer *b)
273{
274 unsigned long new_tail = b->tail_pos + 1;
275
276 rmb(); /* be sure fifo pointers are synchromized */
277
278 if (new_tail < b->buffer_size)
279 b->tail_pos = new_tail;
280 else
281 b->tail_pos = 0;
282}
266 283
267static unsigned long last_cookie = INVALID_COOKIE; 284static unsigned long last_cookie = INVALID_COOKIE;
268 285
269static void add_cpu_switch(int i) 286static void add_cpu_switch(int i)
270{ 287{
271 add_event_entry(ESCAPE_CODE); 288 add_event_entry(ESCAPE_CODE);
@@ -278,16 +295,16 @@ static void add_kernel_ctx_switch(unsigned int in_kernel)
278{ 295{
279 add_event_entry(ESCAPE_CODE); 296 add_event_entry(ESCAPE_CODE);
280 if (in_kernel) 297 if (in_kernel)
281 add_event_entry(KERNEL_ENTER_SWITCH_CODE); 298 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
282 else 299 else
283 add_event_entry(KERNEL_EXIT_SWITCH_CODE); 300 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
284} 301}
285 302
286static void 303static void
287add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) 304add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
288{ 305{
289 add_event_entry(ESCAPE_CODE); 306 add_event_entry(ESCAPE_CODE);
290 add_event_entry(CTX_SWITCH_CODE); 307 add_event_entry(CTX_SWITCH_CODE);
291 add_event_entry(task->pid); 308 add_event_entry(task->pid);
292 add_event_entry(cookie); 309 add_event_entry(cookie);
293 /* Another code for daemon back-compat */ 310 /* Another code for daemon back-compat */
@@ -296,7 +313,7 @@ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
296 add_event_entry(task->tgid); 313 add_event_entry(task->tgid);
297} 314}
298 315
299 316
300static void add_cookie_switch(unsigned long cookie) 317static void add_cookie_switch(unsigned long cookie)
301{ 318{
302 add_event_entry(ESCAPE_CODE); 319 add_event_entry(ESCAPE_CODE);
@@ -304,13 +321,78 @@ static void add_cookie_switch(unsigned long cookie)
304 add_event_entry(cookie); 321 add_event_entry(cookie);
305} 322}
306 323
307 324
308static void add_trace_begin(void) 325static void add_trace_begin(void)
309{ 326{
310 add_event_entry(ESCAPE_CODE); 327 add_event_entry(ESCAPE_CODE);
311 add_event_entry(TRACE_BEGIN_CODE); 328 add_event_entry(TRACE_BEGIN_CODE);
312} 329}
313 330
331#ifdef CONFIG_OPROFILE_IBS
332
333#define IBS_FETCH_CODE_SIZE 2
334#define IBS_OP_CODE_SIZE 5
335#define IBS_EIP(offset) \
336 (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
337#define IBS_EVENT(offset) \
338 (((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
339
340/*
341 * Add IBS fetch and op entries to event buffer
342 */
343static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
344 int in_kernel, struct mm_struct *mm)
345{
346 unsigned long rip;
347 int i, count;
348 unsigned long ibs_cookie = 0;
349 off_t offset;
350
351 increment_tail(cpu_buf); /* move to RIP entry */
352
353 rip = IBS_EIP(cpu_buf->tail_pos);
354
355#ifdef __LP64__
356 rip += IBS_EVENT(cpu_buf->tail_pos) << 32;
357#endif
358
359 if (mm) {
360 ibs_cookie = lookup_dcookie(mm, rip, &offset);
361
362 if (ibs_cookie == NO_COOKIE)
363 offset = rip;
364 if (ibs_cookie == INVALID_COOKIE) {
365 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
366 offset = rip;
367 }
368 if (ibs_cookie != last_cookie) {
369 add_cookie_switch(ibs_cookie);
370 last_cookie = ibs_cookie;
371 }
372 } else
373 offset = rip;
374
375 add_event_entry(ESCAPE_CODE);
376 add_event_entry(code);
377 add_event_entry(offset); /* Offset from Dcookie */
378
379 /* we send the Dcookie offset, but send the raw Linear Add also*/
380 add_event_entry(IBS_EIP(cpu_buf->tail_pos));
381 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
382
383 if (code == IBS_FETCH_CODE)
384 count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
385 else
386 count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
387
388 for (i = 0; i < count; i++) {
389 increment_tail(cpu_buf);
390 add_event_entry(IBS_EIP(cpu_buf->tail_pos));
391 add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
392 }
393}
394
395#endif
314 396
315static void add_sample_entry(unsigned long offset, unsigned long event) 397static void add_sample_entry(unsigned long offset, unsigned long event)
316{ 398{
@@ -319,13 +401,13 @@ static void add_sample_entry(unsigned long offset, unsigned long event)
319} 401}
320 402
321 403
322static int add_us_sample(struct mm_struct * mm, struct op_sample * s) 404static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
323{ 405{
324 unsigned long cookie; 406 unsigned long cookie;
325 off_t offset; 407 off_t offset;
326 408
327 cookie = lookup_dcookie(mm, s->eip, &offset); 409 cookie = lookup_dcookie(mm, s->eip, &offset);
328 410
329 if (cookie == INVALID_COOKIE) { 411 if (cookie == INVALID_COOKIE) {
330 atomic_inc(&oprofile_stats.sample_lost_no_mapping); 412 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
331 return 0; 413 return 0;
@@ -341,13 +423,13 @@ static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
341 return 1; 423 return 1;
342} 424}
343 425
344 426
345/* Add a sample to the global event buffer. If possible the 427/* Add a sample to the global event buffer. If possible the
346 * sample is converted into a persistent dentry/offset pair 428 * sample is converted into a persistent dentry/offset pair
347 * for later lookup from userspace. 429 * for later lookup from userspace.
348 */ 430 */
349static int 431static int
350add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) 432add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
351{ 433{
352 if (in_kernel) { 434 if (in_kernel) {
353 add_sample_entry(s->eip, s->event); 435 add_sample_entry(s->eip, s->event);
@@ -359,9 +441,9 @@ add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
359 } 441 }
360 return 0; 442 return 0;
361} 443}
362
363 444
364static void release_mm(struct mm_struct * mm) 445
446static void release_mm(struct mm_struct *mm)
365{ 447{
366 if (!mm) 448 if (!mm)
367 return; 449 return;
@@ -370,9 +452,9 @@ static void release_mm(struct mm_struct * mm)
370} 452}
371 453
372 454
373static struct mm_struct * take_tasks_mm(struct task_struct * task) 455static struct mm_struct *take_tasks_mm(struct task_struct *task)
374{ 456{
375 struct mm_struct * mm = get_task_mm(task); 457 struct mm_struct *mm = get_task_mm(task);
376 if (mm) 458 if (mm)
377 down_read(&mm->mmap_sem); 459 down_read(&mm->mmap_sem);
378 return mm; 460 return mm;
@@ -383,10 +465,10 @@ static inline int is_code(unsigned long val)
383{ 465{
384 return val == ESCAPE_CODE; 466 return val == ESCAPE_CODE;
385} 467}
386 468
387 469
388/* "acquire" as many cpu buffer slots as we can */ 470/* "acquire" as many cpu buffer slots as we can */
389static unsigned long get_slots(struct oprofile_cpu_buffer * b) 471static unsigned long get_slots(struct oprofile_cpu_buffer *b)
390{ 472{
391 unsigned long head = b->head_pos; 473 unsigned long head = b->head_pos;
392 unsigned long tail = b->tail_pos; 474 unsigned long tail = b->tail_pos;
@@ -412,19 +494,6 @@ static unsigned long get_slots(struct oprofile_cpu_buffer * b)
412} 494}
413 495
414 496
415static void increment_tail(struct oprofile_cpu_buffer * b)
416{
417 unsigned long new_tail = b->tail_pos + 1;
418
419 rmb();
420
421 if (new_tail < b->buffer_size)
422 b->tail_pos = new_tail;
423 else
424 b->tail_pos = 0;
425}
426
427
428/* Move tasks along towards death. Any tasks on dead_tasks 497/* Move tasks along towards death. Any tasks on dead_tasks
429 * will definitely have no remaining references in any 498 * will definitely have no remaining references in any
430 * CPU buffers at this point, because we use two lists, 499 * CPU buffers at this point, because we use two lists,
@@ -435,8 +504,8 @@ static void process_task_mortuary(void)
435{ 504{
436 unsigned long flags; 505 unsigned long flags;
437 LIST_HEAD(local_dead_tasks); 506 LIST_HEAD(local_dead_tasks);
438 struct task_struct * task; 507 struct task_struct *task;
439 struct task_struct * ttask; 508 struct task_struct *ttask;
440 509
441 spin_lock_irqsave(&task_mortuary, flags); 510 spin_lock_irqsave(&task_mortuary, flags);
442 511
@@ -493,7 +562,7 @@ void sync_buffer(int cpu)
493{ 562{
494 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); 563 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
495 struct mm_struct *mm = NULL; 564 struct mm_struct *mm = NULL;
496 struct task_struct * new; 565 struct task_struct *new;
497 unsigned long cookie = 0; 566 unsigned long cookie = 0;
498 int in_kernel = 1; 567 int in_kernel = 1;
499 unsigned int i; 568 unsigned int i;
@@ -501,7 +570,7 @@ void sync_buffer(int cpu)
501 unsigned long available; 570 unsigned long available;
502 571
503 mutex_lock(&buffer_mutex); 572 mutex_lock(&buffer_mutex);
504 573
505 add_cpu_switch(cpu); 574 add_cpu_switch(cpu);
506 575
507 /* Remember, only we can modify tail_pos */ 576 /* Remember, only we can modify tail_pos */
@@ -509,8 +578,8 @@ void sync_buffer(int cpu)
509 available = get_slots(cpu_buf); 578 available = get_slots(cpu_buf);
510 579
511 for (i = 0; i < available; ++i) { 580 for (i = 0; i < available; ++i) {
512 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; 581 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
513 582
514 if (is_code(s->eip)) { 583 if (is_code(s->eip)) {
515 if (s->event <= CPU_IS_KERNEL) { 584 if (s->event <= CPU_IS_KERNEL) {
516 /* kernel/userspace switch */ 585 /* kernel/userspace switch */
@@ -521,8 +590,18 @@ void sync_buffer(int cpu)
521 } else if (s->event == CPU_TRACE_BEGIN) { 590 } else if (s->event == CPU_TRACE_BEGIN) {
522 state = sb_bt_start; 591 state = sb_bt_start;
523 add_trace_begin(); 592 add_trace_begin();
593#ifdef CONFIG_OPROFILE_IBS
594 } else if (s->event == IBS_FETCH_BEGIN) {
595 state = sb_bt_start;
596 add_ibs_begin(cpu_buf,
597 IBS_FETCH_CODE, in_kernel, mm);
598 } else if (s->event == IBS_OP_BEGIN) {
599 state = sb_bt_start;
600 add_ibs_begin(cpu_buf,
601 IBS_OP_CODE, in_kernel, mm);
602#endif
524 } else { 603 } else {
525 struct mm_struct * oldmm = mm; 604 struct mm_struct *oldmm = mm;
526 605
527 /* userspace context switch */ 606 /* userspace context switch */
528 new = (struct task_struct *)s->event; 607 new = (struct task_struct *)s->event;
@@ -533,13 +612,11 @@ void sync_buffer(int cpu)
533 cookie = get_exec_dcookie(mm); 612 cookie = get_exec_dcookie(mm);
534 add_user_ctx_switch(new, cookie); 613 add_user_ctx_switch(new, cookie);
535 } 614 }
536 } else { 615 } else if (state >= sb_bt_start &&
537 if (state >= sb_bt_start && 616 !add_sample(mm, s, in_kernel)) {
538 !add_sample(mm, s, in_kernel)) { 617 if (state == sb_bt_start) {
539 if (state == sb_bt_start) { 618 state = sb_bt_ignore;
540 state = sb_bt_ignore; 619 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
541 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
542 }
543 } 620 }
544 } 621 }
545 622
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 7ba78e6d210e..e1bd5a937f6c 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -5,6 +5,7 @@
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
8 * 9 *
9 * Each CPU has a local buffer that stores PC value/event 10 * Each CPU has a local buffer that stores PC value/event
10 * pairs. We also log context switches when we notice them. 11 * pairs. We also log context switches when we notice them.
@@ -209,7 +210,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
209 return 1; 210 return 1;
210} 211}
211 212
212static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf) 213static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
213{ 214{
214 if (nr_available_slots(cpu_buf) < 4) { 215 if (nr_available_slots(cpu_buf) < 4) {
215 cpu_buf->sample_lost_overflow++; 216 cpu_buf->sample_lost_overflow++;
@@ -254,6 +255,75 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
254 oprofile_add_ext_sample(pc, regs, event, is_kernel); 255 oprofile_add_ext_sample(pc, regs, event, is_kernel);
255} 256}
256 257
258#ifdef CONFIG_OPROFILE_IBS
259
260#define MAX_IBS_SAMPLE_SIZE 14
261static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
262 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
263{
264 struct task_struct *task;
265
266 cpu_buf->sample_received++;
267
268 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
269 cpu_buf->sample_lost_overflow++;
270 return 0;
271 }
272
273 is_kernel = !!is_kernel;
274
275 /* notice a switch from user->kernel or vice versa */
276 if (cpu_buf->last_is_kernel != is_kernel) {
277 cpu_buf->last_is_kernel = is_kernel;
278 add_code(cpu_buf, is_kernel);
279 }
280
281 /* notice a task switch */
282 if (!is_kernel) {
283 task = current;
284
285 if (cpu_buf->last_task != task) {
286 cpu_buf->last_task = task;
287 add_code(cpu_buf, (unsigned long)task);
288 }
289 }
290
291 add_code(cpu_buf, ibs_code);
292 add_sample(cpu_buf, ibs[0], ibs[1]);
293 add_sample(cpu_buf, ibs[2], ibs[3]);
294 add_sample(cpu_buf, ibs[4], ibs[5]);
295
296 if (ibs_code == IBS_OP_BEGIN) {
297 add_sample(cpu_buf, ibs[6], ibs[7]);
298 add_sample(cpu_buf, ibs[8], ibs[9]);
299 add_sample(cpu_buf, ibs[10], ibs[11]);
300 }
301
302 return 1;
303}
304
305void oprofile_add_ibs_sample(struct pt_regs *const regs,
306 unsigned int * const ibs_sample, u8 code)
307{
308 int is_kernel = !user_mode(regs);
309 unsigned long pc = profile_pc(regs);
310
311 struct oprofile_cpu_buffer *cpu_buf =
312 &per_cpu(cpu_buffer, smp_processor_id());
313
314 if (!backtrace_depth) {
315 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
316 return;
317 }
318
319 /* if log_sample() fails we can't backtrace since we lost the source
320 * of this event */
321 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
322 oprofile_ops.backtrace(regs, backtrace_depth);
323}
324
325#endif
326
257void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 327void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
258{ 328{
259 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 329 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
@@ -296,7 +366,7 @@ static void wq_sync_buffer(struct work_struct *work)
296 struct oprofile_cpu_buffer * b = 366 struct oprofile_cpu_buffer * b =
297 container_of(work, struct oprofile_cpu_buffer, work.work); 367 container_of(work, struct oprofile_cpu_buffer, work.work);
298 if (b->cpu != smp_processor_id()) { 368 if (b->cpu != smp_processor_id()) {
299 printk("WQ on CPU%d, prefer CPU%d\n", 369 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
300 smp_processor_id(), b->cpu); 370 smp_processor_id(), b->cpu);
301 } 371 }
302 sync_buffer(b->cpu); 372 sync_buffer(b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index c3e366b52261..9c44d004da69 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -55,5 +55,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
55/* transient events for the CPU buffer -> event buffer */ 55/* transient events for the CPU buffer -> event buffer */
56#define CPU_IS_KERNEL 1 56#define CPU_IS_KERNEL 1
57#define CPU_TRACE_BEGIN 2 57#define CPU_TRACE_BEGIN 2
58#define IBS_FETCH_BEGIN 3
59#define IBS_OP_BEGIN 4
58 60
59#endif /* OPROFILE_CPU_BUFFER_H */ 61#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 2dc29ce6c8e4..79f63a27bcef 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -37,6 +37,7 @@ struct hpet {
37#define hpet_compare _u1._hpet_compare 37#define hpet_compare _u1._hpet_compare
38 38
39#define HPET_MAX_TIMERS (32) 39#define HPET_MAX_TIMERS (32)
40#define HPET_MAX_IRQ (32)
40 41
41/* 42/*
42 * HPET general capabilities register 43 * HPET general capabilities register
@@ -64,7 +65,7 @@ struct hpet {
64 */ 65 */
65 66
66#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) 67#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL)
67#define Tn_INI_ROUTE_CAP_SHIFT (32UL) 68#define Tn_INT_ROUTE_CAP_SHIFT (32UL)
68#define Tn_FSB_INT_DELCAP_MASK (0x8000UL) 69#define Tn_FSB_INT_DELCAP_MASK (0x8000UL)
69#define Tn_FSB_INT_DELCAP_SHIFT (15) 70#define Tn_FSB_INT_DELCAP_SHIFT (15)
70#define Tn_FSB_EN_CNF_MASK (0x4000UL) 71#define Tn_FSB_EN_CNF_MASK (0x4000UL)
@@ -91,23 +92,14 @@ struct hpet {
91 * exported interfaces 92 * exported interfaces
92 */ 93 */
93 94
94struct hpet_task {
95 void (*ht_func) (void *);
96 void *ht_data;
97 void *ht_opaque;
98};
99
100struct hpet_data { 95struct hpet_data {
101 unsigned long hd_phys_address; 96 unsigned long hd_phys_address;
102 void __iomem *hd_address; 97 void __iomem *hd_address;
103 unsigned short hd_nirqs; 98 unsigned short hd_nirqs;
104 unsigned short hd_flags;
105 unsigned int hd_state; /* timer allocated */ 99 unsigned int hd_state; /* timer allocated */
106 unsigned int hd_irq[HPET_MAX_TIMERS]; 100 unsigned int hd_irq[HPET_MAX_TIMERS];
107}; 101};
108 102
109#define HPET_DATA_PLATFORM 0x0001 /* platform call to hpet_alloc */
110
111static inline void hpet_reserve_timer(struct hpet_data *hd, int timer) 103static inline void hpet_reserve_timer(struct hpet_data *hd, int timer)
112{ 104{
113 hd->hd_state |= (1 << timer); 105 hd->hd_state |= (1 << timer);
@@ -125,7 +117,7 @@ struct hpet_info {
125 unsigned short hi_timer; 117 unsigned short hi_timer;
126}; 118};
127 119
128#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ 120#define HPET_INFO_PERIODIC 0x0010 /* periodic-capable comparator */
129 121
130#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ 122#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */
131#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ 123#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 041bb31100f4..bcb8f725427c 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -36,6 +36,8 @@
36#define XEN_ENTER_SWITCH_CODE 10 36#define XEN_ENTER_SWITCH_CODE 10
37#define SPU_PROFILING_CODE 11 37#define SPU_PROFILING_CODE 11
38#define SPU_CTX_SWITCH_CODE 12 38#define SPU_CTX_SWITCH_CODE 12
39#define IBS_FETCH_CODE 13
40#define IBS_OP_CODE 14
39 41
40struct super_block; 42struct super_block;
41struct dentry; 43struct dentry;