diff options
Diffstat (limited to 'arch')
27 files changed, 666 insertions, 357 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 8f146a4b4752..bbea636ff687 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <asm/leds.h> | 32 | #include <asm/leds.h> |
| 33 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
| 34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
| 35 | #include <asm/mach/time.h> | ||
| 35 | 36 | ||
| 36 | extern const char *processor_modes[]; | 37 | extern const char *processor_modes[]; |
| 37 | extern void setup_mm_for_reboot(char mode); | 38 | extern void setup_mm_for_reboot(char mode); |
| @@ -85,8 +86,10 @@ EXPORT_SYMBOL(pm_power_off); | |||
| 85 | void default_idle(void) | 86 | void default_idle(void) |
| 86 | { | 87 | { |
| 87 | local_irq_disable(); | 88 | local_irq_disable(); |
| 88 | if (!need_resched() && !hlt_counter) | 89 | if (!need_resched() && !hlt_counter) { |
| 90 | timer_dyn_reprogram(); | ||
| 89 | arch_idle(); | 91 | arch_idle(); |
| 92 | } | ||
| 90 | local_irq_enable(); | 93 | local_irq_enable(); |
| 91 | } | 94 | } |
| 92 | 95 | ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 06054c9ba074..1b7fcd50c3e2 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
| @@ -424,15 +424,19 @@ static int timer_dyn_tick_disable(void) | |||
| 424 | return ret; | 424 | return ret; |
| 425 | } | 425 | } |
| 426 | 426 | ||
| 427 | /* | ||
| 428 | * Reprogram the system timer for at least the calculated time interval. | ||
| 429 | * This function should be called from the idle thread with IRQs disabled, | ||
| 430 | * immediately before sleeping. | ||
| 431 | */ | ||
| 427 | void timer_dyn_reprogram(void) | 432 | void timer_dyn_reprogram(void) |
| 428 | { | 433 | { |
| 429 | struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; | 434 | struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; |
| 430 | unsigned long flags; | ||
| 431 | 435 | ||
| 432 | write_seqlock_irqsave(&xtime_lock, flags); | 436 | write_seqlock(&xtime_lock); |
| 433 | if (dyn_tick->state & DYN_TICK_ENABLED) | 437 | if (dyn_tick->state & DYN_TICK_ENABLED) |
| 434 | dyn_tick->reprogram(next_timer_interrupt() - jiffies); | 438 | dyn_tick->reprogram(next_timer_interrupt() - jiffies); |
| 435 | write_sequnlock_irqrestore(&xtime_lock, flags); | 439 | write_sequnlock(&xtime_lock); |
| 436 | } | 440 | } |
| 437 | 441 | ||
| 438 | static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) | 442 | static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) |
diff --git a/arch/arm/mach-aaec2000/Makefile.boot b/arch/arm/mach-aaec2000/Makefile.boot new file mode 100644 index 000000000000..8f5a8b7c53c7 --- /dev/null +++ b/arch/arm/mach-aaec2000/Makefile.boot | |||
| @@ -0,0 +1 @@ | |||
| zreladdr-y := 0xf0008000 | |||
diff --git a/arch/arm/mach-omap/usb.c b/arch/arm/mach-omap/usb.c index 6e805d451d0e..7f37857b1a28 100644 --- a/arch/arm/mach-omap/usb.c +++ b/arch/arm/mach-omap/usb.c | |||
| @@ -288,8 +288,8 @@ static void usb_release(struct device *dev) | |||
| 288 | static struct resource udc_resources[] = { | 288 | static struct resource udc_resources[] = { |
| 289 | /* order is significant! */ | 289 | /* order is significant! */ |
| 290 | { /* registers */ | 290 | { /* registers */ |
| 291 | .start = IO_ADDRESS(UDC_BASE), | 291 | .start = UDC_BASE, |
| 292 | .end = IO_ADDRESS(UDC_BASE + 0xff), | 292 | .end = UDC_BASE + 0xff, |
| 293 | .flags = IORESOURCE_MEM, | 293 | .flags = IORESOURCE_MEM, |
| 294 | }, { /* general IRQ */ | 294 | }, { /* general IRQ */ |
| 295 | .start = IH2_BASE + 20, | 295 | .start = IH2_BASE + 20, |
| @@ -355,8 +355,8 @@ static struct platform_device ohci_device = { | |||
| 355 | static struct resource otg_resources[] = { | 355 | static struct resource otg_resources[] = { |
| 356 | /* order is significant! */ | 356 | /* order is significant! */ |
| 357 | { | 357 | { |
| 358 | .start = IO_ADDRESS(OTG_BASE), | 358 | .start = OTG_BASE, |
| 359 | .end = IO_ADDRESS(OTG_BASE + 0xff), | 359 | .end = OTG_BASE + 0xff, |
| 360 | .flags = IORESOURCE_MEM, | 360 | .flags = IORESOURCE_MEM, |
| 361 | }, { | 361 | }, { |
| 362 | .start = IH2_BASE + 8, | 362 | .start = IH2_BASE + 8, |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c08710b1ff02..6dcb23d64bf5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
| @@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s) | |||
| 522 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); | 522 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | static inline void | ||
| 526 | free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | ||
| 527 | { | ||
| 528 | struct page *start_pg, *end_pg; | ||
| 529 | unsigned long pg, pgend; | ||
| 530 | |||
| 531 | /* | ||
| 532 | * Convert start_pfn/end_pfn to a struct page pointer. | ||
| 533 | */ | ||
| 534 | start_pg = pfn_to_page(start_pfn); | ||
| 535 | end_pg = pfn_to_page(end_pfn); | ||
| 536 | |||
| 537 | /* | ||
| 538 | * Convert to physical addresses, and | ||
| 539 | * round start upwards and end downwards. | ||
| 540 | */ | ||
| 541 | pg = PAGE_ALIGN(__pa(start_pg)); | ||
| 542 | pgend = __pa(end_pg) & PAGE_MASK; | ||
| 543 | |||
| 544 | /* | ||
| 545 | * If there are free pages between these, | ||
| 546 | * free the section of the memmap array. | ||
| 547 | */ | ||
| 548 | if (pg < pgend) | ||
| 549 | free_bootmem_node(NODE_DATA(node), pg, pgend - pg); | ||
| 550 | } | ||
| 551 | |||
| 552 | /* | ||
| 553 | * The mem_map array can get very big. Free the unused area of the memory map. | ||
| 554 | */ | ||
| 555 | static void __init free_unused_memmap_node(int node, struct meminfo *mi) | ||
| 556 | { | ||
| 557 | unsigned long bank_start, prev_bank_end = 0; | ||
| 558 | unsigned int i; | ||
| 559 | |||
| 560 | /* | ||
| 561 | * [FIXME] This relies on each bank being in address order. This | ||
| 562 | * may not be the case, especially if the user has provided the | ||
| 563 | * information on the command line. | ||
| 564 | */ | ||
| 565 | for (i = 0; i < mi->nr_banks; i++) { | ||
| 566 | if (mi->bank[i].size == 0 || mi->bank[i].node != node) | ||
| 567 | continue; | ||
| 568 | |||
| 569 | bank_start = mi->bank[i].start >> PAGE_SHIFT; | ||
| 570 | if (bank_start < prev_bank_end) { | ||
| 571 | printk(KERN_ERR "MEM: unordered memory banks. " | ||
| 572 | "Not freeing memmap.\n"); | ||
| 573 | break; | ||
| 574 | } | ||
| 575 | |||
| 576 | /* | ||
| 577 | * If we had a previous bank, and there is a space | ||
| 578 | * between the current bank and the previous, free it. | ||
| 579 | */ | ||
| 580 | if (prev_bank_end && prev_bank_end != bank_start) | ||
| 581 | free_memmap(node, prev_bank_end, bank_start); | ||
| 582 | |||
| 583 | prev_bank_end = (mi->bank[i].start + | ||
| 584 | mi->bank[i].size) >> PAGE_SHIFT; | ||
| 585 | } | ||
| 586 | } | ||
| 587 | |||
| 525 | /* | 588 | /* |
| 526 | * mem_init() marks the free areas in the mem_map and tells us how much | 589 | * mem_init() marks the free areas in the mem_map and tells us how much |
| 527 | * memory is free. This is done after various parts of the system have | 590 | * memory is free. This is done after various parts of the system have |
| @@ -540,16 +603,12 @@ void __init mem_init(void) | |||
| 540 | max_mapnr = virt_to_page(high_memory) - mem_map; | 603 | max_mapnr = virt_to_page(high_memory) - mem_map; |
| 541 | #endif | 604 | #endif |
| 542 | 605 | ||
| 543 | /* | ||
| 544 | * We may have non-contiguous memory. | ||
| 545 | */ | ||
| 546 | if (meminfo.nr_banks != 1) | ||
| 547 | create_memmap_holes(&meminfo); | ||
| 548 | |||
| 549 | /* this will put all unused low memory onto the freelists */ | 606 | /* this will put all unused low memory onto the freelists */ |
| 550 | for_each_online_node(node) { | 607 | for_each_online_node(node) { |
| 551 | pg_data_t *pgdat = NODE_DATA(node); | 608 | pg_data_t *pgdat = NODE_DATA(node); |
| 552 | 609 | ||
| 610 | free_unused_memmap_node(node, &meminfo); | ||
| 611 | |||
| 553 | if (pgdat->node_spanned_pages != 0) | 612 | if (pgdat->node_spanned_pages != 0) |
| 554 | totalram_pages += free_all_bootmem_node(pgdat); | 613 | totalram_pages += free_all_bootmem_node(pgdat); |
| 555 | } | 614 | } |
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 2c2b93d77d43..052ab443ec4e 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c | |||
| @@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
| 169 | 169 | ||
| 170 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | 170 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); |
| 171 | 171 | ||
| 172 | /* | ||
| 173 | * Copy over the kernel and IO PGD entries | ||
| 174 | */ | ||
| 172 | init_pgd = pgd_offset_k(0); | 175 | init_pgd = pgd_offset_k(0); |
| 176 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
| 177 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
| 178 | |||
| 179 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | ||
| 173 | 180 | ||
| 174 | if (!vectors_high()) { | 181 | if (!vectors_high()) { |
| 175 | /* | 182 | /* |
| @@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
| 198 | spin_unlock(&mm->page_table_lock); | 205 | spin_unlock(&mm->page_table_lock); |
| 199 | } | 206 | } |
| 200 | 207 | ||
| 201 | /* | ||
| 202 | * Copy over the kernel and IO PGD entries | ||
| 203 | */ | ||
| 204 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
| 205 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
| 206 | |||
| 207 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | ||
| 208 | |||
| 209 | return new_pgd; | 208 | return new_pgd; |
| 210 | 209 | ||
| 211 | no_pte: | 210 | no_pte: |
| @@ -698,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
| 698 | for (i = 0; i < nr; i++) | 697 | for (i = 0; i < nr; i++) |
| 699 | create_mapping(io_desc + i); | 698 | create_mapping(io_desc + i); |
| 700 | } | 699 | } |
| 701 | |||
| 702 | static inline void | ||
| 703 | free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | ||
| 704 | { | ||
| 705 | struct page *start_pg, *end_pg; | ||
| 706 | unsigned long pg, pgend; | ||
| 707 | |||
| 708 | /* | ||
| 709 | * Convert start_pfn/end_pfn to a struct page pointer. | ||
| 710 | */ | ||
| 711 | start_pg = pfn_to_page(start_pfn); | ||
| 712 | end_pg = pfn_to_page(end_pfn); | ||
| 713 | |||
| 714 | /* | ||
| 715 | * Convert to physical addresses, and | ||
| 716 | * round start upwards and end downwards. | ||
| 717 | */ | ||
| 718 | pg = PAGE_ALIGN(__pa(start_pg)); | ||
| 719 | pgend = __pa(end_pg) & PAGE_MASK; | ||
| 720 | |||
| 721 | /* | ||
| 722 | * If there are free pages between these, | ||
| 723 | * free the section of the memmap array. | ||
| 724 | */ | ||
| 725 | if (pg < pgend) | ||
| 726 | free_bootmem_node(NODE_DATA(node), pg, pgend - pg); | ||
| 727 | } | ||
| 728 | |||
| 729 | static inline void free_unused_memmap_node(int node, struct meminfo *mi) | ||
| 730 | { | ||
| 731 | unsigned long bank_start, prev_bank_end = 0; | ||
| 732 | unsigned int i; | ||
| 733 | |||
| 734 | /* | ||
| 735 | * [FIXME] This relies on each bank being in address order. This | ||
| 736 | * may not be the case, especially if the user has provided the | ||
| 737 | * information on the command line. | ||
| 738 | */ | ||
| 739 | for (i = 0; i < mi->nr_banks; i++) { | ||
| 740 | if (mi->bank[i].size == 0 || mi->bank[i].node != node) | ||
| 741 | continue; | ||
| 742 | |||
| 743 | bank_start = mi->bank[i].start >> PAGE_SHIFT; | ||
| 744 | if (bank_start < prev_bank_end) { | ||
| 745 | printk(KERN_ERR "MEM: unordered memory banks. " | ||
| 746 | "Not freeing memmap.\n"); | ||
| 747 | break; | ||
| 748 | } | ||
| 749 | |||
| 750 | /* | ||
| 751 | * If we had a previous bank, and there is a space | ||
| 752 | * between the current bank and the previous, free it. | ||
| 753 | */ | ||
| 754 | if (prev_bank_end && prev_bank_end != bank_start) | ||
| 755 | free_memmap(node, prev_bank_end, bank_start); | ||
| 756 | |||
| 757 | prev_bank_end = PAGE_ALIGN(mi->bank[i].start + | ||
| 758 | mi->bank[i].size) >> PAGE_SHIFT; | ||
| 759 | } | ||
| 760 | } | ||
| 761 | |||
| 762 | /* | ||
| 763 | * The mem_map array can get very big. Free | ||
| 764 | * the unused area of the memory map. | ||
| 765 | */ | ||
| 766 | void __init create_memmap_holes(struct meminfo *mi) | ||
| 767 | { | ||
| 768 | int node; | ||
| 769 | |||
| 770 | for_each_online_node(node) | ||
| 771 | free_unused_memmap_node(node, mi); | ||
| 772 | } | ||
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 30c1dfbb052f..6d3a79e5fef8 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | # To add an entry into this database, please see Documentation/arm/README, | 6 | # To add an entry into this database, please see Documentation/arm/README, |
| 7 | # or contact rmk@arm.linux.org.uk | 7 | # or contact rmk@arm.linux.org.uk |
| 8 | # | 8 | # |
| 9 | # Last update: Thu Mar 24 14:34:50 2005 | 9 | # Last update: Thu Jun 23 20:19:33 2005 |
| 10 | # | 10 | # |
| 11 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 11 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
| 12 | # | 12 | # |
| @@ -243,7 +243,7 @@ yoho ARCH_YOHO YOHO 231 | |||
| 243 | jasper ARCH_JASPER JASPER 232 | 243 | jasper ARCH_JASPER JASPER 232 |
| 244 | dsc25 ARCH_DSC25 DSC25 233 | 244 | dsc25 ARCH_DSC25 DSC25 233 |
| 245 | omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 | 245 | omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 |
| 246 | ramses ARCH_RAMSES RAMSES 235 | 246 | mnci ARCH_RAMSES RAMSES 235 |
| 247 | s28x ARCH_S28X S28X 236 | 247 | s28x ARCH_S28X S28X 236 |
| 248 | mport3 ARCH_MPORT3 MPORT3 237 | 248 | mport3 ARCH_MPORT3 MPORT3 237 |
| 249 | pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238 | 249 | pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238 |
| @@ -323,7 +323,7 @@ nimbra29x ARCH_NIMBRA29X NIMBRA29X 311 | |||
| 323 | nimbra210 ARCH_NIMBRA210 NIMBRA210 312 | 323 | nimbra210 ARCH_NIMBRA210 NIMBRA210 312 |
| 324 | hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313 | 324 | hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313 |
| 325 | labarm ARCH_LABARM LABARM 314 | 325 | labarm ARCH_LABARM LABARM 314 |
| 326 | m825xx ARCH_M825XX M825XX 315 | 326 | comcerto ARCH_M825XX M825XX 315 |
| 327 | m7100 SA1100_M7100 M7100 316 | 327 | m7100 SA1100_M7100 M7100 316 |
| 328 | nipc2 ARCH_NIPC2 NIPC2 317 | 328 | nipc2 ARCH_NIPC2 NIPC2 317 |
| 329 | fu7202 ARCH_FU7202 FU7202 318 | 329 | fu7202 ARCH_FU7202 FU7202 318 |
| @@ -724,3 +724,66 @@ lpc22xx MACH_LPC22XX LPC22XX 715 | |||
| 724 | omap_comet3 MACH_COMET3 COMET3 716 | 724 | omap_comet3 MACH_COMET3 COMET3 716 |
| 725 | omap_comet4 MACH_COMET4 COMET4 717 | 725 | omap_comet4 MACH_COMET4 COMET4 717 |
| 726 | csb625 MACH_CSB625 CSB625 718 | 726 | csb625 MACH_CSB625 CSB625 718 |
| 727 | fortunet2 MACH_FORTUNET2 FORTUNET2 719 | ||
| 728 | s5h2200 MACH_S5H2200 S5H2200 720 | ||
| 729 | optorm920 MACH_OPTORM920 OPTORM920 721 | ||
| 730 | adsbitsyxb MACH_ADSBITSYXB ADSBITSYXB 722 | ||
| 731 | adssphere MACH_ADSSPHERE ADSSPHERE 723 | ||
| 732 | adsportal MACH_ADSPORTAL ADSPORTAL 724 | ||
| 733 | ln2410sbc MACH_LN2410SBC LN2410SBC 725 | ||
| 734 | cb3rufc MACH_CB3RUFC CB3RUFC 726 | ||
| 735 | mp2usb MACH_MP2USB MP2USB 727 | ||
| 736 | ntnp425c MACH_NTNP425C NTNP425C 728 | ||
| 737 | colibri MACH_COLIBRI COLIBRI 729 | ||
| 738 | pcm7220 MACH_PCM7220 PCM7220 730 | ||
| 739 | gateway7001 MACH_GATEWAY7001 GATEWAY7001 731 | ||
| 740 | pcm027 MACH_PCM027 PCM027 732 | ||
| 741 | cmpxa MACH_CMPXA CMPXA 733 | ||
| 742 | anubis MACH_ANUBIS ANUBIS 734 | ||
| 743 | ite8152 MACH_ITE8152 ITE8152 735 | ||
| 744 | lpc3xxx MACH_LPC3XXX LPC3XXX 736 | ||
| 745 | puppeteer MACH_PUPPETEER PUPPETEER 737 | ||
| 746 | vt001 MACH_MACH_VADATECH MACH_VADATECH 738 | ||
| 747 | e570 MACH_E570 E570 739 | ||
| 748 | x50 MACH_X50 X50 740 | ||
| 749 | recon MACH_RECON RECON 741 | ||
| 750 | xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742 | ||
| 751 | fpic2 MACH_FPIC2 FPIC2 743 | ||
| 752 | akita MACH_AKITA AKITA 744 | ||
| 753 | a81 MACH_A81 A81 745 | ||
| 754 | svm_sc25x MACH_SVM_SC25X SVM_SC25X 746 | ||
| 755 | vt020 MACH_VADATECH020 VADATECH020 747 | ||
| 756 | tli MACH_TLI TLI 748 | ||
| 757 | edb9315lc MACH_EDB9315LC EDB9315LC 749 | ||
| 758 | passec MACH_PASSEC PASSEC 750 | ||
| 759 | ds_tiger MACH_DS_TIGER DS_TIGER 751 | ||
| 760 | e310 MACH_E310 E310 752 | ||
| 761 | e330 MACH_E330 E330 753 | ||
| 762 | rt3000 MACH_RT3000 RT3000 754 | ||
| 763 | nokia770 MACH_NOKIA770 NOKIA770 755 | ||
| 764 | pnx0106 MACH_PNX0106 PNX0106 756 | ||
| 765 | hx21xx MACH_HX21XX HX21XX 757 | ||
| 766 | faraday MACH_FARADAY FARADAY 758 | ||
| 767 | sbc9312 MACH_SBC9312 SBC9312 759 | ||
| 768 | batman MACH_BATMAN BATMAN 760 | ||
| 769 | jpd201 MACH_JPD201 JPD201 761 | ||
| 770 | mipsa MACH_MIPSA MIPSA 762 | ||
| 771 | kacom MACH_KACOM KACOM 763 | ||
| 772 | swarcocpu MACH_SWARCOCPU SWARCOCPU 764 | ||
| 773 | swarcodsl MACH_SWARCODSL SWARCODSL 765 | ||
| 774 | blueangel MACH_BLUEANGEL BLUEANGEL 766 | ||
| 775 | hairygrama MACH_HAIRYGRAMA HAIRYGRAMA 767 | ||
| 776 | banff MACH_BANFF BANFF 768 | ||
| 777 | carmeva MACH_CARMEVA CARMEVA 769 | ||
| 778 | sam255 MACH_SAM255 SAM255 770 | ||
| 779 | ppm10 MACH_PPM10 PPM10 771 | ||
| 780 | edb9315a MACH_EDB9315A EDB9315A 772 | ||
| 781 | sunset MACH_SUNSET SUNSET 773 | ||
| 782 | stargate2 MACH_STARGATE2 STARGATE2 774 | ||
| 783 | intelmote2 MACH_INTELMOTE2 INTELMOTE2 775 | ||
| 784 | trizeps4 MACH_TRIZEPS4 TRIZEPS4 776 | ||
| 785 | mainstone2 MACH_MAINSTONE2 MAINSTONE2 777 | ||
| 786 | ez_ixp42x MACH_EZ_IXP42X EZ_IXP42X 778 | ||
| 787 | tapwave_zodiac MACH_TAPWAVE_ZODIAC TAPWAVE_ZODIAC 779 | ||
| 788 | universalmeter MACH_UNIVERSALMETER UNIVERSALMETER 780 | ||
| 789 | hicoarm9 MACH_HICOARM9 HICOARM9 781 | ||
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 3762f6b35ab2..fc8b17521761 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
| @@ -127,48 +127,23 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
| 127 | regs->eip = (unsigned long)&p->ainsn.insn; | 127 | regs->eip = (unsigned long)&p->ainsn.insn; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | struct task_struct *arch_get_kprobe_task(void *ptr) | ||
| 131 | { | ||
| 132 | return ((struct thread_info *) (((unsigned long) ptr) & | ||
| 133 | (~(THREAD_SIZE -1))))->task; | ||
| 134 | } | ||
| 135 | |||
| 136 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | 130 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) |
| 137 | { | 131 | { |
| 138 | unsigned long *sara = (unsigned long *)®s->esp; | 132 | unsigned long *sara = (unsigned long *)®s->esp; |
| 139 | struct kretprobe_instance *ri; | 133 | struct kretprobe_instance *ri; |
| 140 | static void *orig_ret_addr; | 134 | |
| 135 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
| 136 | ri->rp = rp; | ||
| 137 | ri->task = current; | ||
| 138 | ri->ret_addr = (kprobe_opcode_t *) *sara; | ||
| 141 | 139 | ||
| 142 | /* | ||
| 143 | * Save the return address when the return probe hits | ||
| 144 | * the first time, and use it to populate the (krprobe | ||
| 145 | * instance)->ret_addr for subsequent return probes at | ||
| 146 | * the same addrress since stack address would have | ||
| 147 | * the kretprobe_trampoline by then. | ||
| 148 | */ | ||
| 149 | if (((void*) *sara) != kretprobe_trampoline) | ||
| 150 | orig_ret_addr = (void*) *sara; | ||
| 151 | |||
| 152 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
| 153 | ri->rp = rp; | ||
| 154 | ri->stack_addr = sara; | ||
| 155 | ri->ret_addr = orig_ret_addr; | ||
| 156 | add_rp_inst(ri); | ||
| 157 | /* Replace the return addr with trampoline addr */ | 140 | /* Replace the return addr with trampoline addr */ |
| 158 | *sara = (unsigned long) &kretprobe_trampoline; | 141 | *sara = (unsigned long) &kretprobe_trampoline; |
| 159 | } else { | ||
| 160 | rp->nmissed++; | ||
| 161 | } | ||
| 162 | } | ||
| 163 | 142 | ||
| 164 | void arch_kprobe_flush_task(struct task_struct *tk) | 143 | add_rp_inst(ri); |
| 165 | { | 144 | } else { |
| 166 | struct kretprobe_instance *ri; | 145 | rp->nmissed++; |
| 167 | while ((ri = get_rp_inst_tsk(tk)) != NULL) { | 146 | } |
| 168 | *((unsigned long *)(ri->stack_addr)) = | ||
| 169 | (unsigned long) ri->ret_addr; | ||
| 170 | recycle_rp_inst(ri); | ||
| 171 | } | ||
| 172 | } | 147 | } |
| 173 | 148 | ||
| 174 | /* | 149 | /* |
| @@ -286,36 +261,59 @@ no_kprobe: | |||
| 286 | */ | 261 | */ |
| 287 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 262 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
| 288 | { | 263 | { |
| 289 | struct task_struct *tsk; | 264 | struct kretprobe_instance *ri = NULL; |
| 290 | struct kretprobe_instance *ri; | 265 | struct hlist_head *head; |
| 291 | struct hlist_head *head; | 266 | struct hlist_node *node, *tmp; |
| 292 | struct hlist_node *node; | 267 | unsigned long orig_ret_address = 0; |
| 293 | unsigned long *sara = ((unsigned long *) ®s->esp) - 1; | 268 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
| 294 | |||
| 295 | tsk = arch_get_kprobe_task(sara); | ||
| 296 | head = kretprobe_inst_table_head(tsk); | ||
| 297 | |||
| 298 | hlist_for_each_entry(ri, node, head, hlist) { | ||
| 299 | if (ri->stack_addr == sara && ri->rp) { | ||
| 300 | if (ri->rp->handler) | ||
| 301 | ri->rp->handler(ri, regs); | ||
| 302 | } | ||
| 303 | } | ||
| 304 | return 0; | ||
| 305 | } | ||
| 306 | 269 | ||
| 307 | void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, | 270 | head = kretprobe_inst_table_head(current); |
| 308 | unsigned long flags) | ||
| 309 | { | ||
| 310 | struct kretprobe_instance *ri; | ||
| 311 | /* RA already popped */ | ||
| 312 | unsigned long *sara = ((unsigned long *)®s->esp) - 1; | ||
| 313 | 271 | ||
| 314 | while ((ri = get_rp_inst(sara))) { | 272 | /* |
| 315 | regs->eip = (unsigned long)ri->ret_addr; | 273 | * It is possible to have multiple instances associated with a given |
| 274 | * task either because an multiple functions in the call path | ||
| 275 | * have a return probe installed on them, and/or more then one return | ||
| 276 | * return probe was registered for a target function. | ||
| 277 | * | ||
| 278 | * We can handle this because: | ||
| 279 | * - instances are always inserted at the head of the list | ||
| 280 | * - when multiple return probes are registered for the same | ||
| 281 | * function, the first instance's ret_addr will point to the | ||
| 282 | * real return address, and all the rest will point to | ||
| 283 | * kretprobe_trampoline | ||
| 284 | */ | ||
| 285 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
| 286 | if (ri->task != current) | ||
| 287 | /* another task is sharing our hash bucket */ | ||
| 288 | continue; | ||
| 289 | |||
| 290 | if (ri->rp && ri->rp->handler) | ||
| 291 | ri->rp->handler(ri, regs); | ||
| 292 | |||
| 293 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
| 316 | recycle_rp_inst(ri); | 294 | recycle_rp_inst(ri); |
| 295 | |||
| 296 | if (orig_ret_address != trampoline_address) | ||
| 297 | /* | ||
| 298 | * This is the real return address. Any other | ||
| 299 | * instances associated with this task are for | ||
| 300 | * other calls deeper on the call stack | ||
| 301 | */ | ||
| 302 | break; | ||
| 317 | } | 303 | } |
| 318 | regs->eflags &= ~TF_MASK; | 304 | |
| 305 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
| 306 | regs->eip = orig_ret_address; | ||
| 307 | |||
| 308 | unlock_kprobes(); | ||
| 309 | preempt_enable_no_resched(); | ||
| 310 | |||
| 311 | /* | ||
| 312 | * By returning a non-zero value, we are telling | ||
| 313 | * kprobe_handler() that we have handled unlocking | ||
| 314 | * and re-enabling preemption. | ||
| 315 | */ | ||
| 316 | return 1; | ||
| 319 | } | 317 | } |
| 320 | 318 | ||
| 321 | /* | 319 | /* |
| @@ -403,8 +401,7 @@ static inline int post_kprobe_handler(struct pt_regs *regs) | |||
| 403 | current_kprobe->post_handler(current_kprobe, regs, 0); | 401 | current_kprobe->post_handler(current_kprobe, regs, 0); |
| 404 | } | 402 | } |
| 405 | 403 | ||
| 406 | if (current_kprobe->post_handler != trampoline_post_handler) | 404 | resume_execution(current_kprobe, regs); |
| 407 | resume_execution(current_kprobe, regs); | ||
| 408 | regs->eflags |= kprobe_saved_eflags; | 405 | regs->eflags |= kprobe_saved_eflags; |
| 409 | 406 | ||
| 410 | /*Restore back the original saved kprobes variables and continue. */ | 407 | /*Restore back the original saved kprobes variables and continue. */ |
| @@ -534,3 +531,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 534 | } | 531 | } |
| 535 | return 0; | 532 | return 0; |
| 536 | } | 533 | } |
| 534 | |||
| 535 | static struct kprobe trampoline_p = { | ||
| 536 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
| 537 | .pre_handler = trampoline_probe_handler | ||
| 538 | }; | ||
| 539 | |||
| 540 | int __init arch_init(void) | ||
| 541 | { | ||
| 542 | return register_kprobe(&trampoline_p); | ||
| 543 | } | ||
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 5f8cfa6b7940..ba243a4cc119 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
| @@ -617,6 +617,33 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss) | |||
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | /* | 619 | /* |
| 620 | * This function selects if the context switch from prev to next | ||
| 621 | * has to tweak the TSC disable bit in the cr4. | ||
| 622 | */ | ||
| 623 | static inline void disable_tsc(struct task_struct *prev_p, | ||
| 624 | struct task_struct *next_p) | ||
| 625 | { | ||
| 626 | struct thread_info *prev, *next; | ||
| 627 | |||
| 628 | /* | ||
| 629 | * gcc should eliminate the ->thread_info dereference if | ||
| 630 | * has_secure_computing returns 0 at compile time (SECCOMP=n). | ||
| 631 | */ | ||
| 632 | prev = prev_p->thread_info; | ||
| 633 | next = next_p->thread_info; | ||
| 634 | |||
| 635 | if (has_secure_computing(prev) || has_secure_computing(next)) { | ||
| 636 | /* slow path here */ | ||
| 637 | if (has_secure_computing(prev) && | ||
| 638 | !has_secure_computing(next)) { | ||
| 639 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
| 640 | } else if (!has_secure_computing(prev) && | ||
| 641 | has_secure_computing(next)) | ||
| 642 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
| 643 | } | ||
| 644 | } | ||
| 645 | |||
| 646 | /* | ||
| 620 | * switch_to(x,yn) should switch tasks from x to y. | 647 | * switch_to(x,yn) should switch tasks from x to y. |
| 621 | * | 648 | * |
| 622 | * We fsave/fwait so that an exception goes off at the right time | 649 | * We fsave/fwait so that an exception goes off at the right time |
| @@ -695,6 +722,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
| 695 | if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) | 722 | if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) |
| 696 | handle_io_bitmap(next, tss); | 723 | handle_io_bitmap(next, tss); |
| 697 | 724 | ||
| 725 | disable_tsc(prev_p, next_p); | ||
| 726 | |||
| 698 | return prev_p; | 727 | return prev_p; |
| 699 | } | 728 | } |
| 700 | 729 | ||
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 442a6e937b19..3db9a04aec6e 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
| @@ -289,3 +289,5 @@ ENTRY(sys_call_table) | |||
| 289 | .long sys_add_key | 289 | .long sys_add_key |
| 290 | .long sys_request_key | 290 | .long sys_request_key |
| 291 | .long sys_keyctl | 291 | .long sys_keyctl |
| 292 | .long sys_ioprio_set | ||
| 293 | .long sys_ioprio_get /* 290 */ | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index b1d5d3d5276c..785a51b0ad8e 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
| @@ -1577,8 +1577,8 @@ sys_call_table: | |||
| 1577 | data8 sys_add_key | 1577 | data8 sys_add_key |
| 1578 | data8 sys_request_key | 1578 | data8 sys_request_key |
| 1579 | data8 sys_keyctl | 1579 | data8 sys_keyctl |
| 1580 | data8 sys_ni_syscall | 1580 | data8 sys_ioprio_set |
| 1581 | data8 sys_ni_syscall // 1275 | 1581 | data8 sys_ioprio_get // 1275 |
| 1582 | data8 sys_set_zone_reclaim | 1582 | data8 sys_set_zone_reclaim |
| 1583 | data8 sys_ni_syscall | 1583 | data8 sys_ni_syscall |
| 1584 | data8 sys_ni_syscall | 1584 | data8 sys_ni_syscall |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 5978823d5c63..3aa3167edbec 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
| 36 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
| 37 | #include <asm/sections.h> | ||
| 37 | 38 | ||
| 38 | extern void jprobe_inst_return(void); | 39 | extern void jprobe_inst_return(void); |
| 39 | 40 | ||
| @@ -263,13 +264,33 @@ static inline void get_kprobe_inst(bundle_t *bundle, uint slot, | |||
| 263 | } | 264 | } |
| 264 | } | 265 | } |
| 265 | 266 | ||
| 267 | /* Returns non-zero if the addr is in the Interrupt Vector Table */ | ||
| 268 | static inline int in_ivt_functions(unsigned long addr) | ||
| 269 | { | ||
| 270 | return (addr >= (unsigned long)__start_ivt_text | ||
| 271 | && addr < (unsigned long)__end_ivt_text); | ||
| 272 | } | ||
| 273 | |||
| 266 | static int valid_kprobe_addr(int template, int slot, unsigned long addr) | 274 | static int valid_kprobe_addr(int template, int slot, unsigned long addr) |
| 267 | { | 275 | { |
| 268 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { | 276 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { |
| 269 | printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n", | 277 | printk(KERN_WARNING "Attempting to insert unaligned kprobe " |
| 270 | addr); | 278 | "at 0x%lx\n", addr); |
| 271 | return -EINVAL; | 279 | return -EINVAL; |
| 272 | } | 280 | } |
| 281 | |||
| 282 | if (in_ivt_functions(addr)) { | ||
| 283 | printk(KERN_WARNING "Kprobes can't be inserted inside " | ||
| 284 | "IVT functions at 0x%lx\n", addr); | ||
| 285 | return -EINVAL; | ||
| 286 | } | ||
| 287 | |||
| 288 | if (slot == 1 && bundle_encoding[template][1] != L) { | ||
| 289 | printk(KERN_WARNING "Inserting kprobes on slot #1 " | ||
| 290 | "is not supported\n"); | ||
| 291 | return -EINVAL; | ||
| 292 | } | ||
| 293 | |||
| 273 | return 0; | 294 | return 0; |
| 274 | } | 295 | } |
| 275 | 296 | ||
| @@ -290,6 +311,94 @@ static inline void set_current_kprobe(struct kprobe *p) | |||
| 290 | current_kprobe = p; | 311 | current_kprobe = p; |
| 291 | } | 312 | } |
| 292 | 313 | ||
| 314 | static void kretprobe_trampoline(void) | ||
| 315 | { | ||
| 316 | } | ||
| 317 | |||
| 318 | /* | ||
| 319 | * At this point the target function has been tricked into | ||
| 320 | * returning into our trampoline. Lookup the associated instance | ||
| 321 | * and then: | ||
| 322 | * - call the handler function | ||
| 323 | * - cleanup by marking the instance as unused | ||
| 324 | * - long jump back to the original return address | ||
| 325 | */ | ||
| 326 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
| 327 | { | ||
| 328 | struct kretprobe_instance *ri = NULL; | ||
| 329 | struct hlist_head *head; | ||
| 330 | struct hlist_node *node, *tmp; | ||
| 331 | unsigned long orig_ret_address = 0; | ||
| 332 | unsigned long trampoline_address = | ||
| 333 | ((struct fnptr *)kretprobe_trampoline)->ip; | ||
| 334 | |||
| 335 | head = kretprobe_inst_table_head(current); | ||
| 336 | |||
| 337 | /* | ||
| 338 | * It is possible to have multiple instances associated with a given | ||
| 339 | * task either because an multiple functions in the call path | ||
| 340 | * have a return probe installed on them, and/or more then one return | ||
| 341 | * return probe was registered for a target function. | ||
| 342 | * | ||
| 343 | * We can handle this because: | ||
| 344 | * - instances are always inserted at the head of the list | ||
| 345 | * - when multiple return probes are registered for the same | ||
| 346 | * function, the first instance's ret_addr will point to the | ||
| 347 | * real return address, and all the rest will point to | ||
| 348 | * kretprobe_trampoline | ||
| 349 | */ | ||
| 350 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
| 351 | if (ri->task != current) | ||
| 352 | /* another task is sharing our hash bucket */ | ||
| 353 | continue; | ||
| 354 | |||
| 355 | if (ri->rp && ri->rp->handler) | ||
| 356 | ri->rp->handler(ri, regs); | ||
| 357 | |||
| 358 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
| 359 | recycle_rp_inst(ri); | ||
| 360 | |||
| 361 | if (orig_ret_address != trampoline_address) | ||
| 362 | /* | ||
| 363 | * This is the real return address. Any other | ||
| 364 | * instances associated with this task are for | ||
| 365 | * other calls deeper on the call stack | ||
| 366 | */ | ||
| 367 | break; | ||
| 368 | } | ||
| 369 | |||
| 370 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
| 371 | regs->cr_iip = orig_ret_address; | ||
| 372 | |||
| 373 | unlock_kprobes(); | ||
| 374 | preempt_enable_no_resched(); | ||
| 375 | |||
| 376 | /* | ||
| 377 | * By returning a non-zero value, we are telling | ||
| 378 | * kprobe_handler() that we have handled unlocking | ||
| 379 | * and re-enabling preemption. | ||
| 380 | */ | ||
| 381 | return 1; | ||
| 382 | } | ||
| 383 | |||
| 384 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | ||
| 385 | { | ||
| 386 | struct kretprobe_instance *ri; | ||
| 387 | |||
| 388 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
| 389 | ri->rp = rp; | ||
| 390 | ri->task = current; | ||
| 391 | ri->ret_addr = (kprobe_opcode_t *)regs->b0; | ||
| 392 | |||
| 393 | /* Replace the return addr with trampoline addr */ | ||
| 394 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; | ||
| 395 | |||
| 396 | add_rp_inst(ri); | ||
| 397 | } else { | ||
| 398 | rp->nmissed++; | ||
| 399 | } | ||
| 400 | } | ||
| 401 | |||
| 293 | int arch_prepare_kprobe(struct kprobe *p) | 402 | int arch_prepare_kprobe(struct kprobe *p) |
| 294 | { | 403 | { |
| 295 | unsigned long addr = (unsigned long) p->addr; | 404 | unsigned long addr = (unsigned long) p->addr; |
| @@ -492,8 +601,8 @@ static int pre_kprobes_handler(struct die_args *args) | |||
| 492 | if (p->pre_handler && p->pre_handler(p, regs)) | 601 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 493 | /* | 602 | /* |
| 494 | * Our pre-handler is specifically requesting that we just | 603 | * Our pre-handler is specifically requesting that we just |
| 495 | * do a return. This is handling the case where the | 604 | * do a return. This is used for both the jprobe pre-handler |
| 496 | * pre-handler is really our special jprobe pre-handler. | 605 | * and the kretprobe trampoline |
| 497 | */ | 606 | */ |
| 498 | return 1; | 607 | return 1; |
| 499 | 608 | ||
| @@ -599,3 +708,14 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 599 | *regs = jprobe_saved_regs; | 708 | *regs = jprobe_saved_regs; |
| 600 | return 1; | 709 | return 1; |
| 601 | } | 710 | } |
| 711 | |||
| 712 | static struct kprobe trampoline_p = { | ||
| 713 | .pre_handler = trampoline_probe_handler | ||
| 714 | }; | ||
| 715 | |||
| 716 | int __init arch_init(void) | ||
| 717 | { | ||
| 718 | trampoline_p.addr = | ||
| 719 | (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; | ||
| 720 | return register_kprobe(&trampoline_p); | ||
| 721 | } | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index ebb71f3d6d19..6e35bff05d59 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/efi.h> | 27 | #include <linux/efi.h> |
| 28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
| 29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
| 30 | #include <linux/kprobes.h> | ||
| 30 | 31 | ||
| 31 | #include <asm/cpu.h> | 32 | #include <asm/cpu.h> |
| 32 | #include <asm/delay.h> | 33 | #include <asm/delay.h> |
| @@ -707,6 +708,13 @@ kernel_thread_helper (int (*fn)(void *), void *arg) | |||
| 707 | void | 708 | void |
| 708 | flush_thread (void) | 709 | flush_thread (void) |
| 709 | { | 710 | { |
| 711 | /* | ||
| 712 | * Remove function-return probe instances associated with this task | ||
| 713 | * and put them back on the free list. Do not insert an exit probe for | ||
| 714 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
| 715 | */ | ||
| 716 | kprobe_flush_task(current); | ||
| 717 | |||
| 710 | /* drop floating-point and debug-register state if it exists: */ | 718 | /* drop floating-point and debug-register state if it exists: */ |
| 711 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 719 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
| 712 | ia64_drop_fpu(current); | 720 | ia64_drop_fpu(current); |
| @@ -721,6 +729,14 @@ flush_thread (void) | |||
| 721 | void | 729 | void |
| 722 | exit_thread (void) | 730 | exit_thread (void) |
| 723 | { | 731 | { |
| 732 | |||
| 733 | /* | ||
| 734 | * Remove function-return probe instances associated with this task | ||
| 735 | * and put them back on the free list. Do not insert an exit probe for | ||
| 736 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
| 737 | */ | ||
| 738 | kprobe_flush_task(current); | ||
| 739 | |||
| 724 | ia64_drop_fpu(current); | 740 | ia64_drop_fpu(current); |
| 725 | #ifdef CONFIG_PERFMON | 741 | #ifdef CONFIG_PERFMON |
| 726 | /* if needed, stop monitoring and flush state to perfmon context */ | 742 | /* if needed, stop monitoring and flush state to perfmon context */ |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index b9f0db4c1b04..a676e79e0681 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
| @@ -8,6 +8,11 @@ | |||
| 8 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) | 8 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) |
| 9 | #include <asm-generic/vmlinux.lds.h> | 9 | #include <asm-generic/vmlinux.lds.h> |
| 10 | 10 | ||
| 11 | #define IVT_TEXT \ | ||
| 12 | VMLINUX_SYMBOL(__start_ivt_text) = .; \ | ||
| 13 | *(.text.ivt) \ | ||
| 14 | VMLINUX_SYMBOL(__end_ivt_text) = .; | ||
| 15 | |||
| 11 | OUTPUT_FORMAT("elf64-ia64-little") | 16 | OUTPUT_FORMAT("elf64-ia64-little") |
| 12 | OUTPUT_ARCH(ia64) | 17 | OUTPUT_ARCH(ia64) |
| 13 | ENTRY(phys_start) | 18 | ENTRY(phys_start) |
| @@ -39,7 +44,7 @@ SECTIONS | |||
| 39 | 44 | ||
| 40 | .text : AT(ADDR(.text) - LOAD_OFFSET) | 45 | .text : AT(ADDR(.text) - LOAD_OFFSET) |
| 41 | { | 46 | { |
| 42 | *(.text.ivt) | 47 | IVT_TEXT |
| 43 | *(.text) | 48 | *(.text) |
| 44 | SCHED_TEXT | 49 | SCHED_TEXT |
| 45 | LOCK_TEXT | 50 | LOCK_TEXT |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 508026ae5842..65ee15396ffd 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
| @@ -457,7 +457,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs) | |||
| 457 | if (!user_mode(regs)) | 457 | if (!user_mode(regs)) |
| 458 | return 1; | 458 | return 1; |
| 459 | 459 | ||
| 460 | if (try_to_freeze(0)) | 460 | if (try_to_freeze()) |
| 461 | goto no_signal; | 461 | goto no_signal; |
| 462 | 462 | ||
| 463 | if (!oldset) | 463 | if (!oldset) |
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index b6a63a49a232..191a8def3bdb 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S | |||
| @@ -1449,3 +1449,5 @@ _GLOBAL(sys_call_table) | |||
| 1449 | .long sys_request_key /* 270 */ | 1449 | .long sys_request_key /* 270 */ |
| 1450 | .long sys_keyctl | 1450 | .long sys_keyctl |
| 1451 | .long sys_waitid | 1451 | .long sys_waitid |
| 1452 | .long sys_ioprio_set | ||
| 1453 | .long sys_ioprio_get | ||
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index 334ef4150d92..6164a2b34733 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c | |||
| @@ -606,9 +606,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
| 606 | struct page *page = pfn_to_page(pfn); | 606 | struct page *page = pfn_to_page(pfn); |
| 607 | if (!PageReserved(page) | 607 | if (!PageReserved(page) |
| 608 | && !test_bit(PG_arch_1, &page->flags)) { | 608 | && !test_bit(PG_arch_1, &page->flags)) { |
| 609 | if (vma->vm_mm == current->active_mm) | 609 | if (vma->vm_mm == current->active_mm) { |
| 610 | #ifdef CONFIG_8xx | ||
| 611 | /* On 8xx, cache control instructions (particularly | ||
| 612 | * "dcbst" from flush_dcache_icache) fault as write | ||
| 613 | * operation if there is an unpopulated TLB entry | ||
| 614 | * for the address in question. To workaround that, | ||
| 615 | * we invalidate the TLB here, thus avoiding dcbst | ||
| 616 | * misbehaviour. | ||
| 617 | */ | ||
| 618 | _tlbie(address); | ||
| 619 | #endif | ||
| 610 | __flush_dcache_icache((void *) address); | 620 | __flush_dcache_icache((void *) address); |
| 611 | else | 621 | } else |
| 612 | flush_dcache_icache_page(page); | 622 | flush_dcache_icache_page(page); |
| 613 | set_bit(PG_arch_1, &page->flags); | 623 | set_bit(PG_arch_1, &page->flags); |
| 614 | } | 624 | } |
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S index f459ade1bd63..016a74649155 100644 --- a/arch/ppc/platforms/pmac_sleep.S +++ b/arch/ppc/platforms/pmac_sleep.S | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | .section .text | 46 | .section .text |
| 47 | .align 5 | 47 | .align 5 |
| 48 | 48 | ||
| 49 | #if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ_PMAC) | 49 | #if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) |
| 50 | 50 | ||
| 51 | /* This gets called by via-pmu.c late during the sleep process. | 51 | /* This gets called by via-pmu.c late during the sleep process. |
| 52 | * The PMU was already send the sleep command and will shut us down | 52 | * The PMU was already send the sleep command and will shut us down |
| @@ -382,7 +382,7 @@ turn_on_mmu: | |||
| 382 | isync | 382 | isync |
| 383 | rfi | 383 | rfi |
| 384 | 384 | ||
| 385 | #endif /* defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ) */ | 385 | #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ |
| 386 | 386 | ||
| 387 | .section .data | 387 | .section .data |
| 388 | .balign L1_CACHE_LINE_SIZE | 388 | .balign L1_CACHE_LINE_SIZE |
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c index de60ccc7db9f..778ce4fec368 100644 --- a/arch/ppc/platforms/pmac_time.c +++ b/arch/ppc/platforms/pmac_time.c | |||
| @@ -206,7 +206,7 @@ via_calibrate_decr(void) | |||
| 206 | return 1; | 206 | return 1; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | #ifdef CONFIG_PMAC_PBOOK | 209 | #ifdef CONFIG_PM |
| 210 | /* | 210 | /* |
| 211 | * Reset the time after a sleep. | 211 | * Reset the time after a sleep. |
| 212 | */ | 212 | */ |
| @@ -238,7 +238,7 @@ time_sleep_notify(struct pmu_sleep_notifier *self, int when) | |||
| 238 | static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = { | 238 | static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = { |
| 239 | time_sleep_notify, SLEEP_LEVEL_MISC, | 239 | time_sleep_notify, SLEEP_LEVEL_MISC, |
| 240 | }; | 240 | }; |
| 241 | #endif /* CONFIG_PMAC_PBOOK */ | 241 | #endif /* CONFIG_PM */ |
| 242 | 242 | ||
| 243 | /* | 243 | /* |
| 244 | * Query the OF and get the decr frequency. | 244 | * Query the OF and get the decr frequency. |
| @@ -251,9 +251,9 @@ pmac_calibrate_decr(void) | |||
| 251 | struct device_node *cpu; | 251 | struct device_node *cpu; |
| 252 | unsigned int freq, *fp; | 252 | unsigned int freq, *fp; |
| 253 | 253 | ||
| 254 | #ifdef CONFIG_PMAC_PBOOK | 254 | #ifdef CONFIG_PM |
| 255 | pmu_register_sleep_notifier(&time_sleep_notifier); | 255 | pmu_register_sleep_notifier(&time_sleep_notifier); |
| 256 | #endif /* CONFIG_PMAC_PBOOK */ | 256 | #endif /* CONFIG_PM */ |
| 257 | 257 | ||
| 258 | /* We assume MacRISC2 machines have correct device-tree | 258 | /* We assume MacRISC2 machines have correct device-tree |
| 259 | * calibration. That's better since the VIA itself seems | 259 | * calibration. That's better since the VIA itself seems |
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c index 70e58f43f2b8..8b149c2fc54f 100644 --- a/arch/ppc/platforms/sandpoint.c +++ b/arch/ppc/platforms/sandpoint.c | |||
| @@ -324,6 +324,7 @@ sandpoint_setup_arch(void) | |||
| 324 | pdata[1].irq = 0; | 324 | pdata[1].irq = 0; |
| 325 | pdata[1].mapbase = 0; | 325 | pdata[1].mapbase = 0; |
| 326 | } | 326 | } |
| 327 | } | ||
| 327 | 328 | ||
| 328 | printk(KERN_INFO "Motorola SPS Sandpoint Test Platform\n"); | 329 | printk(KERN_INFO "Motorola SPS Sandpoint Test Platform\n"); |
| 329 | printk(KERN_INFO "Port by MontaVista Software, Inc. (source@mvista.com)\n"); | 330 | printk(KERN_INFO "Port by MontaVista Software, Inc. (source@mvista.com)\n"); |
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c index b45d8268bf93..ad39b86ca92c 100644 --- a/arch/ppc/syslib/open_pic.c +++ b/arch/ppc/syslib/open_pic.c | |||
| @@ -370,8 +370,9 @@ void __init openpic_init(int offset) | |||
| 370 | /* Initialize IPI interrupts */ | 370 | /* Initialize IPI interrupts */ |
| 371 | if ( ppc_md.progress ) ppc_md.progress("openpic: ipi",0x3bb); | 371 | if ( ppc_md.progress ) ppc_md.progress("openpic: ipi",0x3bb); |
| 372 | for (i = 0; i < OPENPIC_NUM_IPI; i++) { | 372 | for (i = 0; i < OPENPIC_NUM_IPI; i++) { |
| 373 | /* Disabled, Priority 10..13 */ | 373 | /* Disabled, increased priorities 10..13 */ |
| 374 | openpic_initipi(i, 10+i, OPENPIC_VEC_IPI+i+offset); | 374 | openpic_initipi(i, OPENPIC_PRIORITY_IPI_BASE+i, |
| 375 | OPENPIC_VEC_IPI+i+offset); | ||
| 375 | /* IPIs are per-CPU */ | 376 | /* IPIs are per-CPU */ |
| 376 | irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU; | 377 | irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU; |
| 377 | irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi; | 378 | irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi; |
| @@ -399,8 +400,9 @@ void __init openpic_init(int offset) | |||
| 399 | if (sense & IRQ_SENSE_MASK) | 400 | if (sense & IRQ_SENSE_MASK) |
| 400 | irq_desc[i+offset].status = IRQ_LEVEL; | 401 | irq_desc[i+offset].status = IRQ_LEVEL; |
| 401 | 402 | ||
| 402 | /* Enabled, Priority 8 */ | 403 | /* Enabled, Default priority */ |
| 403 | openpic_initirq(i, 8, i+offset, (sense & IRQ_POLARITY_MASK), | 404 | openpic_initirq(i, OPENPIC_PRIORITY_DEFAULT, i+offset, |
| 405 | (sense & IRQ_POLARITY_MASK), | ||
| 404 | (sense & IRQ_SENSE_MASK)); | 406 | (sense & IRQ_SENSE_MASK)); |
| 405 | /* Processor 0 */ | 407 | /* Processor 0 */ |
| 406 | openpic_mapirq(i, CPU_MASK_CPU0, CPU_MASK_NONE); | 408 | openpic_mapirq(i, CPU_MASK_CPU0, CPU_MASK_NONE); |
| @@ -656,6 +658,18 @@ static void __init openpic_maptimer(u_int timer, cpumask_t cpumask) | |||
| 656 | } | 658 | } |
| 657 | 659 | ||
| 658 | /* | 660 | /* |
| 661 | * Change the priority of an interrupt | ||
| 662 | */ | ||
| 663 | void __init | ||
| 664 | openpic_set_irq_priority(u_int irq, u_int pri) | ||
| 665 | { | ||
| 666 | check_arg_irq(irq); | ||
| 667 | openpic_safe_writefield(&ISR[irq - open_pic_irq_offset]->Vector_Priority, | ||
| 668 | OPENPIC_PRIORITY_MASK, | ||
| 669 | pri << OPENPIC_PRIORITY_SHIFT); | ||
| 670 | } | ||
| 671 | |||
| 672 | /* | ||
| 659 | * Initalize the interrupt source which will generate an NMI. | 673 | * Initalize the interrupt source which will generate an NMI. |
| 660 | * This raises the interrupt's priority from 8 to 9. | 674 | * This raises the interrupt's priority from 8 to 9. |
| 661 | * | 675 | * |
| @@ -665,9 +679,7 @@ void __init | |||
| 665 | openpic_init_nmi_irq(u_int irq) | 679 | openpic_init_nmi_irq(u_int irq) |
| 666 | { | 680 | { |
| 667 | check_arg_irq(irq); | 681 | check_arg_irq(irq); |
| 668 | openpic_safe_writefield(&ISR[irq - open_pic_irq_offset]->Vector_Priority, | 682 | openpic_set_irq_priority(irq, OPENPIC_PRIORITY_NMI); |
| 669 | OPENPIC_PRIORITY_MASK, | ||
| 670 | 9 << OPENPIC_PRIORITY_SHIFT); | ||
| 671 | } | 683 | } |
| 672 | 684 | ||
| 673 | /* | 685 | /* |
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index 782ce3efa2c1..1d2ff6d6b0b3 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c | |||
| @@ -36,6 +36,8 @@ | |||
| 36 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
| 37 | #include <asm/sstep.h> | 37 | #include <asm/sstep.h> |
| 38 | 38 | ||
| 39 | static DECLARE_MUTEX(kprobe_mutex); | ||
| 40 | |||
| 39 | static struct kprobe *current_kprobe; | 41 | static struct kprobe *current_kprobe; |
| 40 | static unsigned long kprobe_status, kprobe_saved_msr; | 42 | static unsigned long kprobe_status, kprobe_saved_msr; |
| 41 | static struct kprobe *kprobe_prev; | 43 | static struct kprobe *kprobe_prev; |
| @@ -54,6 +56,15 @@ int arch_prepare_kprobe(struct kprobe *p) | |||
| 54 | printk("Cannot register a kprobe on rfid or mtmsrd\n"); | 56 | printk("Cannot register a kprobe on rfid or mtmsrd\n"); |
| 55 | ret = -EINVAL; | 57 | ret = -EINVAL; |
| 56 | } | 58 | } |
| 59 | |||
| 60 | /* insn must be on a special executable page on ppc64 */ | ||
| 61 | if (!ret) { | ||
| 62 | up(&kprobe_mutex); | ||
| 63 | p->ainsn.insn = get_insn_slot(); | ||
| 64 | down(&kprobe_mutex); | ||
| 65 | if (!p->ainsn.insn) | ||
| 66 | ret = -ENOMEM; | ||
| 67 | } | ||
| 57 | return ret; | 68 | return ret; |
| 58 | } | 69 | } |
| 59 | 70 | ||
| @@ -79,16 +90,22 @@ void arch_disarm_kprobe(struct kprobe *p) | |||
| 79 | 90 | ||
| 80 | void arch_remove_kprobe(struct kprobe *p) | 91 | void arch_remove_kprobe(struct kprobe *p) |
| 81 | { | 92 | { |
| 93 | up(&kprobe_mutex); | ||
| 94 | free_insn_slot(p->ainsn.insn); | ||
| 95 | down(&kprobe_mutex); | ||
| 82 | } | 96 | } |
| 83 | 97 | ||
| 84 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 98 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
| 85 | { | 99 | { |
| 100 | kprobe_opcode_t insn = *p->ainsn.insn; | ||
| 101 | |||
| 86 | regs->msr |= MSR_SE; | 102 | regs->msr |= MSR_SE; |
| 87 | /*single step inline if it a breakpoint instruction*/ | 103 | |
| 88 | if (p->opcode == BREAKPOINT_INSTRUCTION) | 104 | /* single step inline if it is a trap variant */ |
| 105 | if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn)) | ||
| 89 | regs->nip = (unsigned long)p->addr; | 106 | regs->nip = (unsigned long)p->addr; |
| 90 | else | 107 | else |
| 91 | regs->nip = (unsigned long)&p->ainsn.insn; | 108 | regs->nip = (unsigned long)p->ainsn.insn; |
| 92 | } | 109 | } |
| 93 | 110 | ||
| 94 | static inline void save_previous_kprobe(void) | 111 | static inline void save_previous_kprobe(void) |
| @@ -105,6 +122,23 @@ static inline void restore_previous_kprobe(void) | |||
| 105 | kprobe_saved_msr = kprobe_saved_msr_prev; | 122 | kprobe_saved_msr = kprobe_saved_msr_prev; |
| 106 | } | 123 | } |
| 107 | 124 | ||
| 125 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | ||
| 126 | { | ||
| 127 | struct kretprobe_instance *ri; | ||
| 128 | |||
| 129 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
| 130 | ri->rp = rp; | ||
| 131 | ri->task = current; | ||
| 132 | ri->ret_addr = (kprobe_opcode_t *)regs->link; | ||
| 133 | |||
| 134 | /* Replace the return addr with trampoline addr */ | ||
| 135 | regs->link = (unsigned long)kretprobe_trampoline; | ||
| 136 | add_rp_inst(ri); | ||
| 137 | } else { | ||
| 138 | rp->nmissed++; | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 108 | static inline int kprobe_handler(struct pt_regs *regs) | 142 | static inline int kprobe_handler(struct pt_regs *regs) |
| 109 | { | 143 | { |
| 110 | struct kprobe *p; | 144 | struct kprobe *p; |
| @@ -195,6 +229,78 @@ no_kprobe: | |||
| 195 | } | 229 | } |
| 196 | 230 | ||
| 197 | /* | 231 | /* |
| 232 | * Function return probe trampoline: | ||
| 233 | * - init_kprobes() establishes a probepoint here | ||
| 234 | * - When the probed function returns, this probe | ||
| 235 | * causes the handlers to fire | ||
| 236 | */ | ||
| 237 | void kretprobe_trampoline_holder(void) | ||
| 238 | { | ||
| 239 | asm volatile(".global kretprobe_trampoline\n" | ||
| 240 | "kretprobe_trampoline:\n" | ||
| 241 | "nop\n"); | ||
| 242 | } | ||
| 243 | |||
| 244 | /* | ||
| 245 | * Called when the probe at kretprobe trampoline is hit | ||
| 246 | */ | ||
| 247 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
| 248 | { | ||
| 249 | struct kretprobe_instance *ri = NULL; | ||
| 250 | struct hlist_head *head; | ||
| 251 | struct hlist_node *node, *tmp; | ||
| 252 | unsigned long orig_ret_address = 0; | ||
| 253 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | ||
| 254 | |||
| 255 | head = kretprobe_inst_table_head(current); | ||
| 256 | |||
| 257 | /* | ||
| 258 | * It is possible to have multiple instances associated with a given | ||
| 259 | * task either because an multiple functions in the call path | ||
| 260 | * have a return probe installed on them, and/or more then one return | ||
| 261 | * return probe was registered for a target function. | ||
| 262 | * | ||
| 263 | * We can handle this because: | ||
| 264 | * - instances are always inserted at the head of the list | ||
| 265 | * - when multiple return probes are registered for the same | ||
| 266 | * function, the first instance's ret_addr will point to the | ||
| 267 | * real return address, and all the rest will point to | ||
| 268 | * kretprobe_trampoline | ||
| 269 | */ | ||
| 270 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
| 271 | if (ri->task != current) | ||
| 272 | /* another task is sharing our hash bucket */ | ||
| 273 | continue; | ||
| 274 | |||
| 275 | if (ri->rp && ri->rp->handler) | ||
| 276 | ri->rp->handler(ri, regs); | ||
| 277 | |||
| 278 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
| 279 | recycle_rp_inst(ri); | ||
| 280 | |||
| 281 | if (orig_ret_address != trampoline_address) | ||
| 282 | /* | ||
| 283 | * This is the real return address. Any other | ||
| 284 | * instances associated with this task are for | ||
| 285 | * other calls deeper on the call stack | ||
| 286 | */ | ||
| 287 | break; | ||
| 288 | } | ||
| 289 | |||
| 290 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
| 291 | regs->nip = orig_ret_address; | ||
| 292 | |||
| 293 | unlock_kprobes(); | ||
| 294 | |||
| 295 | /* | ||
| 296 | * By returning a non-zero value, we are telling | ||
| 297 | * kprobe_handler() that we have handled unlocking | ||
| 298 | * and re-enabling preemption. | ||
| 299 | */ | ||
| 300 | return 1; | ||
| 301 | } | ||
| 302 | |||
| 303 | /* | ||
| 198 | * Called after single-stepping. p->addr is the address of the | 304 | * Called after single-stepping. p->addr is the address of the |
| 199 | * instruction whose first byte has been replaced by the "breakpoint" | 305 | * instruction whose first byte has been replaced by the "breakpoint" |
| 200 | * instruction. To avoid the SMP problems that can occur when we | 306 | * instruction. To avoid the SMP problems that can occur when we |
| @@ -205,9 +311,10 @@ no_kprobe: | |||
| 205 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | 311 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) |
| 206 | { | 312 | { |
| 207 | int ret; | 313 | int ret; |
| 314 | unsigned int insn = *p->ainsn.insn; | ||
| 208 | 315 | ||
| 209 | regs->nip = (unsigned long)p->addr; | 316 | regs->nip = (unsigned long)p->addr; |
| 210 | ret = emulate_step(regs, p->ainsn.insn[0]); | 317 | ret = emulate_step(regs, insn); |
| 211 | if (ret == 0) | 318 | if (ret == 0) |
| 212 | regs->nip = (unsigned long)p->addr + 4; | 319 | regs->nip = (unsigned long)p->addr + 4; |
| 213 | } | 320 | } |
| @@ -331,3 +438,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 331 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); | 438 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); |
| 332 | return 1; | 439 | return 1; |
| 333 | } | 440 | } |
| 441 | |||
| 442 | static struct kprobe trampoline_p = { | ||
| 443 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
| 444 | .pre_handler = trampoline_probe_handler | ||
| 445 | }; | ||
| 446 | |||
| 447 | int __init arch_init(void) | ||
| 448 | { | ||
| 449 | return register_kprobe(&trampoline_p); | ||
| 450 | } | ||
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c index b230a63fe4c8..705742f4eec6 100644 --- a/arch/ppc64/kernel/ppc_ksyms.c +++ b/arch/ppc64/kernel/ppc_ksyms.c | |||
| @@ -75,6 +75,7 @@ EXPORT_SYMBOL(giveup_fpu); | |||
| 75 | EXPORT_SYMBOL(giveup_altivec); | 75 | EXPORT_SYMBOL(giveup_altivec); |
| 76 | #endif | 76 | #endif |
| 77 | EXPORT_SYMBOL(__flush_icache_range); | 77 | EXPORT_SYMBOL(__flush_icache_range); |
| 78 | EXPORT_SYMBOL(flush_dcache_range); | ||
| 78 | 79 | ||
| 79 | #ifdef CONFIG_SMP | 80 | #ifdef CONFIG_SMP |
| 80 | #ifdef CONFIG_PPC_ISERIES | 81 | #ifdef CONFIG_PPC_ISERIES |
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c index aba89554d89d..f7cae05e40fb 100644 --- a/arch/ppc64/kernel/process.c +++ b/arch/ppc64/kernel/process.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/kallsyms.h> | 36 | #include <linux/kallsyms.h> |
| 37 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
| 38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
| 39 | #include <linux/kprobes.h> | ||
| 39 | 40 | ||
| 40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
| 41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
| @@ -307,6 +308,8 @@ void show_regs(struct pt_regs * regs) | |||
| 307 | 308 | ||
| 308 | void exit_thread(void) | 309 | void exit_thread(void) |
| 309 | { | 310 | { |
| 311 | kprobe_flush_task(current); | ||
| 312 | |||
| 310 | #ifndef CONFIG_SMP | 313 | #ifndef CONFIG_SMP |
| 311 | if (last_task_used_math == current) | 314 | if (last_task_used_math == current) |
| 312 | last_task_used_math = NULL; | 315 | last_task_used_math = NULL; |
| @@ -321,6 +324,7 @@ void flush_thread(void) | |||
| 321 | { | 324 | { |
| 322 | struct thread_info *t = current_thread_info(); | 325 | struct thread_info *t = current_thread_info(); |
| 323 | 326 | ||
| 327 | kprobe_flush_task(current); | ||
| 324 | if (t->flags & _TIF_ABI_PENDING) | 328 | if (t->flags & _TIF_ABI_PENDING) |
| 325 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 329 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
| 326 | 330 | ||
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 2348a75e050d..2a532db9138a 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c | |||
| @@ -91,6 +91,7 @@ unsigned long tb_to_xs; | |||
| 91 | unsigned tb_to_us; | 91 | unsigned tb_to_us; |
| 92 | unsigned long processor_freq; | 92 | unsigned long processor_freq; |
| 93 | DEFINE_SPINLOCK(rtc_lock); | 93 | DEFINE_SPINLOCK(rtc_lock); |
| 94 | EXPORT_SYMBOL_GPL(rtc_lock); | ||
| 94 | 95 | ||
| 95 | unsigned long tb_to_ns_scale; | 96 | unsigned long tb_to_ns_scale; |
| 96 | unsigned long tb_to_ns_shift; | 97 | unsigned long tb_to_ns_shift; |
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 4e680f87a75f..acd2a778ebe6 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
| @@ -38,7 +38,7 @@ | |||
| 38 | #include <linux/string.h> | 38 | #include <linux/string.h> |
| 39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
| 40 | #include <linux/preempt.h> | 40 | #include <linux/preempt.h> |
| 41 | #include <linux/moduleloader.h> | 41 | |
| 42 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
| 43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
| 44 | #include <asm/kdebug.h> | 44 | #include <asm/kdebug.h> |
| @@ -51,8 +51,6 @@ static struct kprobe *kprobe_prev; | |||
| 51 | static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev; | 51 | static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev; |
| 52 | static struct pt_regs jprobe_saved_regs; | 52 | static struct pt_regs jprobe_saved_regs; |
| 53 | static long *jprobe_saved_rsp; | 53 | static long *jprobe_saved_rsp; |
| 54 | static kprobe_opcode_t *get_insn_slot(void); | ||
| 55 | static void free_insn_slot(kprobe_opcode_t *slot); | ||
| 56 | void jprobe_return_end(void); | 54 | void jprobe_return_end(void); |
| 57 | 55 | ||
| 58 | /* copy of the kernel stack at the probe fire time */ | 56 | /* copy of the kernel stack at the probe fire time */ |
| @@ -274,48 +272,23 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
| 274 | regs->rip = (unsigned long)p->ainsn.insn; | 272 | regs->rip = (unsigned long)p->ainsn.insn; |
| 275 | } | 273 | } |
| 276 | 274 | ||
| 277 | struct task_struct *arch_get_kprobe_task(void *ptr) | ||
| 278 | { | ||
| 279 | return ((struct thread_info *) (((unsigned long) ptr) & | ||
| 280 | (~(THREAD_SIZE -1))))->task; | ||
| 281 | } | ||
| 282 | |||
| 283 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | 275 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) |
| 284 | { | 276 | { |
| 285 | unsigned long *sara = (unsigned long *)regs->rsp; | 277 | unsigned long *sara = (unsigned long *)regs->rsp; |
| 286 | struct kretprobe_instance *ri; | 278 | struct kretprobe_instance *ri; |
| 287 | static void *orig_ret_addr; | 279 | |
| 280 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
| 281 | ri->rp = rp; | ||
| 282 | ri->task = current; | ||
| 283 | ri->ret_addr = (kprobe_opcode_t *) *sara; | ||
| 288 | 284 | ||
| 289 | /* | ||
| 290 | * Save the return address when the return probe hits | ||
| 291 | * the first time, and use it to populate the (krprobe | ||
| 292 | * instance)->ret_addr for subsequent return probes at | ||
| 293 | * the same addrress since stack address would have | ||
| 294 | * the kretprobe_trampoline by then. | ||
| 295 | */ | ||
| 296 | if (((void*) *sara) != kretprobe_trampoline) | ||
| 297 | orig_ret_addr = (void*) *sara; | ||
| 298 | |||
| 299 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
| 300 | ri->rp = rp; | ||
| 301 | ri->stack_addr = sara; | ||
| 302 | ri->ret_addr = orig_ret_addr; | ||
| 303 | add_rp_inst(ri); | ||
| 304 | /* Replace the return addr with trampoline addr */ | 285 | /* Replace the return addr with trampoline addr */ |
| 305 | *sara = (unsigned long) &kretprobe_trampoline; | 286 | *sara = (unsigned long) &kretprobe_trampoline; |
| 306 | } else { | ||
| 307 | rp->nmissed++; | ||
| 308 | } | ||
| 309 | } | ||
| 310 | 287 | ||
| 311 | void arch_kprobe_flush_task(struct task_struct *tk) | 288 | add_rp_inst(ri); |
| 312 | { | 289 | } else { |
| 313 | struct kretprobe_instance *ri; | 290 | rp->nmissed++; |
| 314 | while ((ri = get_rp_inst_tsk(tk)) != NULL) { | 291 | } |
| 315 | *((unsigned long *)(ri->stack_addr)) = | ||
| 316 | (unsigned long) ri->ret_addr; | ||
| 317 | recycle_rp_inst(ri); | ||
| 318 | } | ||
| 319 | } | 292 | } |
| 320 | 293 | ||
| 321 | /* | 294 | /* |
| @@ -428,36 +401,59 @@ no_kprobe: | |||
| 428 | */ | 401 | */ |
| 429 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 402 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
| 430 | { | 403 | { |
| 431 | struct task_struct *tsk; | 404 | struct kretprobe_instance *ri = NULL; |
| 432 | struct kretprobe_instance *ri; | 405 | struct hlist_head *head; |
| 433 | struct hlist_head *head; | 406 | struct hlist_node *node, *tmp; |
| 434 | struct hlist_node *node; | 407 | unsigned long orig_ret_address = 0; |
| 435 | unsigned long *sara = (unsigned long *)regs->rsp - 1; | 408 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
| 436 | |||
| 437 | tsk = arch_get_kprobe_task(sara); | ||
| 438 | head = kretprobe_inst_table_head(tsk); | ||
| 439 | |||
| 440 | hlist_for_each_entry(ri, node, head, hlist) { | ||
| 441 | if (ri->stack_addr == sara && ri->rp) { | ||
| 442 | if (ri->rp->handler) | ||
| 443 | ri->rp->handler(ri, regs); | ||
| 444 | } | ||
| 445 | } | ||
| 446 | return 0; | ||
| 447 | } | ||
| 448 | 409 | ||
| 449 | void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, | 410 | head = kretprobe_inst_table_head(current); |
| 450 | unsigned long flags) | ||
| 451 | { | ||
| 452 | struct kretprobe_instance *ri; | ||
| 453 | /* RA already popped */ | ||
| 454 | unsigned long *sara = ((unsigned long *)regs->rsp) - 1; | ||
| 455 | 411 | ||
| 456 | while ((ri = get_rp_inst(sara))) { | 412 | /* |
| 457 | regs->rip = (unsigned long)ri->ret_addr; | 413 | * It is possible to have multiple instances associated with a given |
| 414 | * task either because an multiple functions in the call path | ||
| 415 | * have a return probe installed on them, and/or more then one return | ||
| 416 | * return probe was registered for a target function. | ||
| 417 | * | ||
| 418 | * We can handle this because: | ||
| 419 | * - instances are always inserted at the head of the list | ||
| 420 | * - when multiple return probes are registered for the same | ||
| 421 | * function, the first instance's ret_addr will point to the | ||
| 422 | * real return address, and all the rest will point to | ||
| 423 | * kretprobe_trampoline | ||
| 424 | */ | ||
| 425 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
| 426 | if (ri->task != current) | ||
| 427 | /* another task is sharing our hash bucket */ | ||
| 428 | continue; | ||
| 429 | |||
| 430 | if (ri->rp && ri->rp->handler) | ||
| 431 | ri->rp->handler(ri, regs); | ||
| 432 | |||
| 433 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
| 458 | recycle_rp_inst(ri); | 434 | recycle_rp_inst(ri); |
| 435 | |||
| 436 | if (orig_ret_address != trampoline_address) | ||
| 437 | /* | ||
| 438 | * This is the real return address. Any other | ||
| 439 | * instances associated with this task are for | ||
| 440 | * other calls deeper on the call stack | ||
| 441 | */ | ||
| 442 | break; | ||
| 459 | } | 443 | } |
| 460 | regs->eflags &= ~TF_MASK; | 444 | |
| 445 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
| 446 | regs->rip = orig_ret_address; | ||
| 447 | |||
| 448 | unlock_kprobes(); | ||
| 449 | preempt_enable_no_resched(); | ||
| 450 | |||
| 451 | /* | ||
| 452 | * By returning a non-zero value, we are telling | ||
| 453 | * kprobe_handler() that we have handled unlocking | ||
| 454 | * and re-enabling preemption. | ||
| 455 | */ | ||
| 456 | return 1; | ||
| 461 | } | 457 | } |
| 462 | 458 | ||
| 463 | /* | 459 | /* |
| @@ -550,8 +546,7 @@ int post_kprobe_handler(struct pt_regs *regs) | |||
| 550 | current_kprobe->post_handler(current_kprobe, regs, 0); | 546 | current_kprobe->post_handler(current_kprobe, regs, 0); |
| 551 | } | 547 | } |
| 552 | 548 | ||
| 553 | if (current_kprobe->post_handler != trampoline_post_handler) | 549 | resume_execution(current_kprobe, regs); |
| 554 | resume_execution(current_kprobe, regs); | ||
| 555 | regs->eflags |= kprobe_saved_rflags; | 550 | regs->eflags |= kprobe_saved_rflags; |
| 556 | 551 | ||
| 557 | /* Restore the original saved kprobes variables and continue. */ | 552 | /* Restore the original saved kprobes variables and continue. */ |
| @@ -682,111 +677,12 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 682 | return 0; | 677 | return 0; |
| 683 | } | 678 | } |
| 684 | 679 | ||
| 685 | /* | 680 | static struct kprobe trampoline_p = { |
| 686 | * kprobe->ainsn.insn points to the copy of the instruction to be single-stepped. | 681 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, |
| 687 | * By default on x86_64, pages we get from kmalloc or vmalloc are not | 682 | .pre_handler = trampoline_probe_handler |
| 688 | * executable. Single-stepping an instruction on such a page yields an | ||
| 689 | * oops. So instead of storing the instruction copies in their respective | ||
| 690 | * kprobe objects, we allocate a page, map it executable, and store all the | ||
| 691 | * instruction copies there. (We can allocate additional pages if somebody | ||
| 692 | * inserts a huge number of probes.) Each page can hold up to INSNS_PER_PAGE | ||
| 693 | * instruction slots, each of which is MAX_INSN_SIZE*sizeof(kprobe_opcode_t) | ||
| 694 | * bytes. | ||
| 695 | */ | ||
| 696 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE*sizeof(kprobe_opcode_t))) | ||
| 697 | struct kprobe_insn_page { | ||
| 698 | struct hlist_node hlist; | ||
| 699 | kprobe_opcode_t *insns; /* page of instruction slots */ | ||
| 700 | char slot_used[INSNS_PER_PAGE]; | ||
| 701 | int nused; | ||
| 702 | }; | 683 | }; |
| 703 | 684 | ||
| 704 | static struct hlist_head kprobe_insn_pages; | 685 | int __init arch_init(void) |
| 705 | |||
| 706 | /** | ||
| 707 | * get_insn_slot() - Find a slot on an executable page for an instruction. | ||
| 708 | * We allocate an executable page if there's no room on existing ones. | ||
| 709 | */ | ||
| 710 | static kprobe_opcode_t *get_insn_slot(void) | ||
| 711 | { | ||
| 712 | struct kprobe_insn_page *kip; | ||
| 713 | struct hlist_node *pos; | ||
| 714 | |||
| 715 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
| 716 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
| 717 | if (kip->nused < INSNS_PER_PAGE) { | ||
| 718 | int i; | ||
| 719 | for (i = 0; i < INSNS_PER_PAGE; i++) { | ||
| 720 | if (!kip->slot_used[i]) { | ||
| 721 | kip->slot_used[i] = 1; | ||
| 722 | kip->nused++; | ||
| 723 | return kip->insns + (i*MAX_INSN_SIZE); | ||
| 724 | } | ||
| 725 | } | ||
| 726 | /* Surprise! No unused slots. Fix kip->nused. */ | ||
| 727 | kip->nused = INSNS_PER_PAGE; | ||
| 728 | } | ||
| 729 | } | ||
| 730 | |||
| 731 | /* All out of space. Need to allocate a new page. Use slot 0.*/ | ||
| 732 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); | ||
| 733 | if (!kip) { | ||
| 734 | return NULL; | ||
| 735 | } | ||
| 736 | |||
| 737 | /* | ||
| 738 | * For the %rip-relative displacement fixups to be doable, we | ||
| 739 | * need our instruction copy to be within +/- 2GB of any data it | ||
| 740 | * might access via %rip. That is, within 2GB of where the | ||
| 741 | * kernel image and loaded module images reside. So we allocate | ||
| 742 | * a page in the module loading area. | ||
| 743 | */ | ||
| 744 | kip->insns = module_alloc(PAGE_SIZE); | ||
| 745 | if (!kip->insns) { | ||
| 746 | kfree(kip); | ||
| 747 | return NULL; | ||
| 748 | } | ||
| 749 | INIT_HLIST_NODE(&kip->hlist); | ||
| 750 | hlist_add_head(&kip->hlist, &kprobe_insn_pages); | ||
| 751 | memset(kip->slot_used, 0, INSNS_PER_PAGE); | ||
| 752 | kip->slot_used[0] = 1; | ||
| 753 | kip->nused = 1; | ||
| 754 | return kip->insns; | ||
| 755 | } | ||
| 756 | |||
| 757 | /** | ||
| 758 | * free_insn_slot() - Free instruction slot obtained from get_insn_slot(). | ||
| 759 | */ | ||
| 760 | static void free_insn_slot(kprobe_opcode_t *slot) | ||
| 761 | { | 686 | { |
| 762 | struct kprobe_insn_page *kip; | 687 | return register_kprobe(&trampoline_p); |
| 763 | struct hlist_node *pos; | ||
| 764 | |||
| 765 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
| 766 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
| 767 | if (kip->insns <= slot | ||
| 768 | && slot < kip->insns+(INSNS_PER_PAGE*MAX_INSN_SIZE)) { | ||
| 769 | int i = (slot - kip->insns) / MAX_INSN_SIZE; | ||
| 770 | kip->slot_used[i] = 0; | ||
| 771 | kip->nused--; | ||
| 772 | if (kip->nused == 0) { | ||
| 773 | /* | ||
| 774 | * Page is no longer in use. Free it unless | ||
| 775 | * it's the last one. We keep the last one | ||
| 776 | * so as not to have to set it up again the | ||
| 777 | * next time somebody inserts a probe. | ||
| 778 | */ | ||
| 779 | hlist_del(&kip->hlist); | ||
| 780 | if (hlist_empty(&kprobe_insn_pages)) { | ||
| 781 | INIT_HLIST_NODE(&kip->hlist); | ||
| 782 | hlist_add_head(&kip->hlist, | ||
| 783 | &kprobe_insn_pages); | ||
| 784 | } else { | ||
| 785 | module_free(NULL, kip->insns); | ||
| 786 | kfree(kip); | ||
| 787 | } | ||
| 788 | } | ||
| 789 | return; | ||
| 790 | } | ||
| 791 | } | ||
| 792 | } | 688 | } |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 1d91271796e5..7577f9d7a75d 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
| @@ -482,6 +482,33 @@ out: | |||
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | /* | 484 | /* |
| 485 | * This function selects if the context switch from prev to next | ||
| 486 | * has to tweak the TSC disable bit in the cr4. | ||
| 487 | */ | ||
| 488 | static inline void disable_tsc(struct task_struct *prev_p, | ||
| 489 | struct task_struct *next_p) | ||
| 490 | { | ||
| 491 | struct thread_info *prev, *next; | ||
| 492 | |||
| 493 | /* | ||
| 494 | * gcc should eliminate the ->thread_info dereference if | ||
| 495 | * has_secure_computing returns 0 at compile time (SECCOMP=n). | ||
| 496 | */ | ||
| 497 | prev = prev_p->thread_info; | ||
| 498 | next = next_p->thread_info; | ||
| 499 | |||
| 500 | if (has_secure_computing(prev) || has_secure_computing(next)) { | ||
| 501 | /* slow path here */ | ||
| 502 | if (has_secure_computing(prev) && | ||
| 503 | !has_secure_computing(next)) { | ||
| 504 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
| 505 | } else if (!has_secure_computing(prev) && | ||
| 506 | has_secure_computing(next)) | ||
| 507 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
| 508 | } | ||
| 509 | } | ||
| 510 | |||
| 511 | /* | ||
| 485 | * This special macro can be used to load a debugging register | 512 | * This special macro can be used to load a debugging register |
| 486 | */ | 513 | */ |
| 487 | #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r) | 514 | #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r) |
| @@ -599,6 +626,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct * | |||
| 599 | } | 626 | } |
| 600 | } | 627 | } |
| 601 | 628 | ||
| 629 | disable_tsc(prev_p, next_p); | ||
| 630 | |||
| 602 | return prev_p; | 631 | return prev_p; |
| 603 | } | 632 | } |
| 604 | 633 | ||
