diff options
Diffstat (limited to 'arch')
42 files changed, 867 insertions, 688 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 8f146a4b4752..bbea636ff687 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/leds.h> | 32 | #include <asm/leds.h> |
33 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
35 | #include <asm/mach/time.h> | ||
35 | 36 | ||
36 | extern const char *processor_modes[]; | 37 | extern const char *processor_modes[]; |
37 | extern void setup_mm_for_reboot(char mode); | 38 | extern void setup_mm_for_reboot(char mode); |
@@ -85,8 +86,10 @@ EXPORT_SYMBOL(pm_power_off); | |||
85 | void default_idle(void) | 86 | void default_idle(void) |
86 | { | 87 | { |
87 | local_irq_disable(); | 88 | local_irq_disable(); |
88 | if (!need_resched() && !hlt_counter) | 89 | if (!need_resched() && !hlt_counter) { |
90 | timer_dyn_reprogram(); | ||
89 | arch_idle(); | 91 | arch_idle(); |
92 | } | ||
90 | local_irq_enable(); | 93 | local_irq_enable(); |
91 | } | 94 | } |
92 | 95 | ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 06054c9ba074..1b7fcd50c3e2 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -424,15 +424,19 @@ static int timer_dyn_tick_disable(void) | |||
424 | return ret; | 424 | return ret; |
425 | } | 425 | } |
426 | 426 | ||
427 | /* | ||
428 | * Reprogram the system timer for at least the calculated time interval. | ||
429 | * This function should be called from the idle thread with IRQs disabled, | ||
430 | * immediately before sleeping. | ||
431 | */ | ||
427 | void timer_dyn_reprogram(void) | 432 | void timer_dyn_reprogram(void) |
428 | { | 433 | { |
429 | struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; | 434 | struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; |
430 | unsigned long flags; | ||
431 | 435 | ||
432 | write_seqlock_irqsave(&xtime_lock, flags); | 436 | write_seqlock(&xtime_lock); |
433 | if (dyn_tick->state & DYN_TICK_ENABLED) | 437 | if (dyn_tick->state & DYN_TICK_ENABLED) |
434 | dyn_tick->reprogram(next_timer_interrupt() - jiffies); | 438 | dyn_tick->reprogram(next_timer_interrupt() - jiffies); |
435 | write_sequnlock_irqrestore(&xtime_lock, flags); | 439 | write_sequnlock(&xtime_lock); |
436 | } | 440 | } |
437 | 441 | ||
438 | static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) | 442 | static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) |
diff --git a/arch/arm/mach-aaec2000/Makefile.boot b/arch/arm/mach-aaec2000/Makefile.boot new file mode 100644 index 000000000000..8f5a8b7c53c7 --- /dev/null +++ b/arch/arm/mach-aaec2000/Makefile.boot | |||
@@ -0,0 +1 @@ | |||
zreladdr-y := 0xf0008000 | |||
diff --git a/arch/arm/mach-omap/usb.c b/arch/arm/mach-omap/usb.c index 6e805d451d0e..7f37857b1a28 100644 --- a/arch/arm/mach-omap/usb.c +++ b/arch/arm/mach-omap/usb.c | |||
@@ -288,8 +288,8 @@ static void usb_release(struct device *dev) | |||
288 | static struct resource udc_resources[] = { | 288 | static struct resource udc_resources[] = { |
289 | /* order is significant! */ | 289 | /* order is significant! */ |
290 | { /* registers */ | 290 | { /* registers */ |
291 | .start = IO_ADDRESS(UDC_BASE), | 291 | .start = UDC_BASE, |
292 | .end = IO_ADDRESS(UDC_BASE + 0xff), | 292 | .end = UDC_BASE + 0xff, |
293 | .flags = IORESOURCE_MEM, | 293 | .flags = IORESOURCE_MEM, |
294 | }, { /* general IRQ */ | 294 | }, { /* general IRQ */ |
295 | .start = IH2_BASE + 20, | 295 | .start = IH2_BASE + 20, |
@@ -355,8 +355,8 @@ static struct platform_device ohci_device = { | |||
355 | static struct resource otg_resources[] = { | 355 | static struct resource otg_resources[] = { |
356 | /* order is significant! */ | 356 | /* order is significant! */ |
357 | { | 357 | { |
358 | .start = IO_ADDRESS(OTG_BASE), | 358 | .start = OTG_BASE, |
359 | .end = IO_ADDRESS(OTG_BASE + 0xff), | 359 | .end = OTG_BASE + 0xff, |
360 | .flags = IORESOURCE_MEM, | 360 | .flags = IORESOURCE_MEM, |
361 | }, { | 361 | }, { |
362 | .start = IH2_BASE + 8, | 362 | .start = IH2_BASE + 8, |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c08710b1ff02..6dcb23d64bf5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s) | |||
522 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); | 522 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); |
523 | } | 523 | } |
524 | 524 | ||
525 | static inline void | ||
526 | free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | ||
527 | { | ||
528 | struct page *start_pg, *end_pg; | ||
529 | unsigned long pg, pgend; | ||
530 | |||
531 | /* | ||
532 | * Convert start_pfn/end_pfn to a struct page pointer. | ||
533 | */ | ||
534 | start_pg = pfn_to_page(start_pfn); | ||
535 | end_pg = pfn_to_page(end_pfn); | ||
536 | |||
537 | /* | ||
538 | * Convert to physical addresses, and | ||
539 | * round start upwards and end downwards. | ||
540 | */ | ||
541 | pg = PAGE_ALIGN(__pa(start_pg)); | ||
542 | pgend = __pa(end_pg) & PAGE_MASK; | ||
543 | |||
544 | /* | ||
545 | * If there are free pages between these, | ||
546 | * free the section of the memmap array. | ||
547 | */ | ||
548 | if (pg < pgend) | ||
549 | free_bootmem_node(NODE_DATA(node), pg, pgend - pg); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * The mem_map array can get very big. Free the unused area of the memory map. | ||
554 | */ | ||
555 | static void __init free_unused_memmap_node(int node, struct meminfo *mi) | ||
556 | { | ||
557 | unsigned long bank_start, prev_bank_end = 0; | ||
558 | unsigned int i; | ||
559 | |||
560 | /* | ||
561 | * [FIXME] This relies on each bank being in address order. This | ||
562 | * may not be the case, especially if the user has provided the | ||
563 | * information on the command line. | ||
564 | */ | ||
565 | for (i = 0; i < mi->nr_banks; i++) { | ||
566 | if (mi->bank[i].size == 0 || mi->bank[i].node != node) | ||
567 | continue; | ||
568 | |||
569 | bank_start = mi->bank[i].start >> PAGE_SHIFT; | ||
570 | if (bank_start < prev_bank_end) { | ||
571 | printk(KERN_ERR "MEM: unordered memory banks. " | ||
572 | "Not freeing memmap.\n"); | ||
573 | break; | ||
574 | } | ||
575 | |||
576 | /* | ||
577 | * If we had a previous bank, and there is a space | ||
578 | * between the current bank and the previous, free it. | ||
579 | */ | ||
580 | if (prev_bank_end && prev_bank_end != bank_start) | ||
581 | free_memmap(node, prev_bank_end, bank_start); | ||
582 | |||
583 | prev_bank_end = (mi->bank[i].start + | ||
584 | mi->bank[i].size) >> PAGE_SHIFT; | ||
585 | } | ||
586 | } | ||
587 | |||
525 | /* | 588 | /* |
526 | * mem_init() marks the free areas in the mem_map and tells us how much | 589 | * mem_init() marks the free areas in the mem_map and tells us how much |
527 | * memory is free. This is done after various parts of the system have | 590 | * memory is free. This is done after various parts of the system have |
@@ -540,16 +603,12 @@ void __init mem_init(void) | |||
540 | max_mapnr = virt_to_page(high_memory) - mem_map; | 603 | max_mapnr = virt_to_page(high_memory) - mem_map; |
541 | #endif | 604 | #endif |
542 | 605 | ||
543 | /* | ||
544 | * We may have non-contiguous memory. | ||
545 | */ | ||
546 | if (meminfo.nr_banks != 1) | ||
547 | create_memmap_holes(&meminfo); | ||
548 | |||
549 | /* this will put all unused low memory onto the freelists */ | 606 | /* this will put all unused low memory onto the freelists */ |
550 | for_each_online_node(node) { | 607 | for_each_online_node(node) { |
551 | pg_data_t *pgdat = NODE_DATA(node); | 608 | pg_data_t *pgdat = NODE_DATA(node); |
552 | 609 | ||
610 | free_unused_memmap_node(node, &meminfo); | ||
611 | |||
553 | if (pgdat->node_spanned_pages != 0) | 612 | if (pgdat->node_spanned_pages != 0) |
554 | totalram_pages += free_all_bootmem_node(pgdat); | 613 | totalram_pages += free_all_bootmem_node(pgdat); |
555 | } | 614 | } |
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 2c2b93d77d43..052ab443ec4e 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c | |||
@@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
169 | 169 | ||
170 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | 170 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); |
171 | 171 | ||
172 | /* | ||
173 | * Copy over the kernel and IO PGD entries | ||
174 | */ | ||
172 | init_pgd = pgd_offset_k(0); | 175 | init_pgd = pgd_offset_k(0); |
176 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
177 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
178 | |||
179 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | ||
173 | 180 | ||
174 | if (!vectors_high()) { | 181 | if (!vectors_high()) { |
175 | /* | 182 | /* |
@@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
198 | spin_unlock(&mm->page_table_lock); | 205 | spin_unlock(&mm->page_table_lock); |
199 | } | 206 | } |
200 | 207 | ||
201 | /* | ||
202 | * Copy over the kernel and IO PGD entries | ||
203 | */ | ||
204 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
205 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
206 | |||
207 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | ||
208 | |||
209 | return new_pgd; | 208 | return new_pgd; |
210 | 209 | ||
211 | no_pte: | 210 | no_pte: |
@@ -698,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
698 | for (i = 0; i < nr; i++) | 697 | for (i = 0; i < nr; i++) |
699 | create_mapping(io_desc + i); | 698 | create_mapping(io_desc + i); |
700 | } | 699 | } |
701 | |||
702 | static inline void | ||
703 | free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | ||
704 | { | ||
705 | struct page *start_pg, *end_pg; | ||
706 | unsigned long pg, pgend; | ||
707 | |||
708 | /* | ||
709 | * Convert start_pfn/end_pfn to a struct page pointer. | ||
710 | */ | ||
711 | start_pg = pfn_to_page(start_pfn); | ||
712 | end_pg = pfn_to_page(end_pfn); | ||
713 | |||
714 | /* | ||
715 | * Convert to physical addresses, and | ||
716 | * round start upwards and end downwards. | ||
717 | */ | ||
718 | pg = PAGE_ALIGN(__pa(start_pg)); | ||
719 | pgend = __pa(end_pg) & PAGE_MASK; | ||
720 | |||
721 | /* | ||
722 | * If there are free pages between these, | ||
723 | * free the section of the memmap array. | ||
724 | */ | ||
725 | if (pg < pgend) | ||
726 | free_bootmem_node(NODE_DATA(node), pg, pgend - pg); | ||
727 | } | ||
728 | |||
729 | static inline void free_unused_memmap_node(int node, struct meminfo *mi) | ||
730 | { | ||
731 | unsigned long bank_start, prev_bank_end = 0; | ||
732 | unsigned int i; | ||
733 | |||
734 | /* | ||
735 | * [FIXME] This relies on each bank being in address order. This | ||
736 | * may not be the case, especially if the user has provided the | ||
737 | * information on the command line. | ||
738 | */ | ||
739 | for (i = 0; i < mi->nr_banks; i++) { | ||
740 | if (mi->bank[i].size == 0 || mi->bank[i].node != node) | ||
741 | continue; | ||
742 | |||
743 | bank_start = mi->bank[i].start >> PAGE_SHIFT; | ||
744 | if (bank_start < prev_bank_end) { | ||
745 | printk(KERN_ERR "MEM: unordered memory banks. " | ||
746 | "Not freeing memmap.\n"); | ||
747 | break; | ||
748 | } | ||
749 | |||
750 | /* | ||
751 | * If we had a previous bank, and there is a space | ||
752 | * between the current bank and the previous, free it. | ||
753 | */ | ||
754 | if (prev_bank_end && prev_bank_end != bank_start) | ||
755 | free_memmap(node, prev_bank_end, bank_start); | ||
756 | |||
757 | prev_bank_end = PAGE_ALIGN(mi->bank[i].start + | ||
758 | mi->bank[i].size) >> PAGE_SHIFT; | ||
759 | } | ||
760 | } | ||
761 | |||
762 | /* | ||
763 | * The mem_map array can get very big. Free | ||
764 | * the unused area of the memory map. | ||
765 | */ | ||
766 | void __init create_memmap_holes(struct meminfo *mi) | ||
767 | { | ||
768 | int node; | ||
769 | |||
770 | for_each_online_node(node) | ||
771 | free_unused_memmap_node(node, mi); | ||
772 | } | ||
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 30c1dfbb052f..6d3a79e5fef8 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -6,7 +6,7 @@ | |||
6 | # To add an entry into this database, please see Documentation/arm/README, | 6 | # To add an entry into this database, please see Documentation/arm/README, |
7 | # or contact rmk@arm.linux.org.uk | 7 | # or contact rmk@arm.linux.org.uk |
8 | # | 8 | # |
9 | # Last update: Thu Mar 24 14:34:50 2005 | 9 | # Last update: Thu Jun 23 20:19:33 2005 |
10 | # | 10 | # |
11 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 11 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
12 | # | 12 | # |
@@ -243,7 +243,7 @@ yoho ARCH_YOHO YOHO 231 | |||
243 | jasper ARCH_JASPER JASPER 232 | 243 | jasper ARCH_JASPER JASPER 232 |
244 | dsc25 ARCH_DSC25 DSC25 233 | 244 | dsc25 ARCH_DSC25 DSC25 233 |
245 | omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 | 245 | omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 |
246 | ramses ARCH_RAMSES RAMSES 235 | 246 | mnci ARCH_RAMSES RAMSES 235 |
247 | s28x ARCH_S28X S28X 236 | 247 | s28x ARCH_S28X S28X 236 |
248 | mport3 ARCH_MPORT3 MPORT3 237 | 248 | mport3 ARCH_MPORT3 MPORT3 237 |
249 | pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238 | 249 | pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238 |
@@ -323,7 +323,7 @@ nimbra29x ARCH_NIMBRA29X NIMBRA29X 311 | |||
323 | nimbra210 ARCH_NIMBRA210 NIMBRA210 312 | 323 | nimbra210 ARCH_NIMBRA210 NIMBRA210 312 |
324 | hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313 | 324 | hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313 |
325 | labarm ARCH_LABARM LABARM 314 | 325 | labarm ARCH_LABARM LABARM 314 |
326 | m825xx ARCH_M825XX M825XX 315 | 326 | comcerto ARCH_M825XX M825XX 315 |
327 | m7100 SA1100_M7100 M7100 316 | 327 | m7100 SA1100_M7100 M7100 316 |
328 | nipc2 ARCH_NIPC2 NIPC2 317 | 328 | nipc2 ARCH_NIPC2 NIPC2 317 |
329 | fu7202 ARCH_FU7202 FU7202 318 | 329 | fu7202 ARCH_FU7202 FU7202 318 |
@@ -724,3 +724,66 @@ lpc22xx MACH_LPC22XX LPC22XX 715 | |||
724 | omap_comet3 MACH_COMET3 COMET3 716 | 724 | omap_comet3 MACH_COMET3 COMET3 716 |
725 | omap_comet4 MACH_COMET4 COMET4 717 | 725 | omap_comet4 MACH_COMET4 COMET4 717 |
726 | csb625 MACH_CSB625 CSB625 718 | 726 | csb625 MACH_CSB625 CSB625 718 |
727 | fortunet2 MACH_FORTUNET2 FORTUNET2 719 | ||
728 | s5h2200 MACH_S5H2200 S5H2200 720 | ||
729 | optorm920 MACH_OPTORM920 OPTORM920 721 | ||
730 | adsbitsyxb MACH_ADSBITSYXB ADSBITSYXB 722 | ||
731 | adssphere MACH_ADSSPHERE ADSSPHERE 723 | ||
732 | adsportal MACH_ADSPORTAL ADSPORTAL 724 | ||
733 | ln2410sbc MACH_LN2410SBC LN2410SBC 725 | ||
734 | cb3rufc MACH_CB3RUFC CB3RUFC 726 | ||
735 | mp2usb MACH_MP2USB MP2USB 727 | ||
736 | ntnp425c MACH_NTNP425C NTNP425C 728 | ||
737 | colibri MACH_COLIBRI COLIBRI 729 | ||
738 | pcm7220 MACH_PCM7220 PCM7220 730 | ||
739 | gateway7001 MACH_GATEWAY7001 GATEWAY7001 731 | ||
740 | pcm027 MACH_PCM027 PCM027 732 | ||
741 | cmpxa MACH_CMPXA CMPXA 733 | ||
742 | anubis MACH_ANUBIS ANUBIS 734 | ||
743 | ite8152 MACH_ITE8152 ITE8152 735 | ||
744 | lpc3xxx MACH_LPC3XXX LPC3XXX 736 | ||
745 | puppeteer MACH_PUPPETEER PUPPETEER 737 | ||
746 | vt001 MACH_MACH_VADATECH MACH_VADATECH 738 | ||
747 | e570 MACH_E570 E570 739 | ||
748 | x50 MACH_X50 X50 740 | ||
749 | recon MACH_RECON RECON 741 | ||
750 | xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742 | ||
751 | fpic2 MACH_FPIC2 FPIC2 743 | ||
752 | akita MACH_AKITA AKITA 744 | ||
753 | a81 MACH_A81 A81 745 | ||
754 | svm_sc25x MACH_SVM_SC25X SVM_SC25X 746 | ||
755 | vt020 MACH_VADATECH020 VADATECH020 747 | ||
756 | tli MACH_TLI TLI 748 | ||
757 | edb9315lc MACH_EDB9315LC EDB9315LC 749 | ||
758 | passec MACH_PASSEC PASSEC 750 | ||
759 | ds_tiger MACH_DS_TIGER DS_TIGER 751 | ||
760 | e310 MACH_E310 E310 752 | ||
761 | e330 MACH_E330 E330 753 | ||
762 | rt3000 MACH_RT3000 RT3000 754 | ||
763 | nokia770 MACH_NOKIA770 NOKIA770 755 | ||
764 | pnx0106 MACH_PNX0106 PNX0106 756 | ||
765 | hx21xx MACH_HX21XX HX21XX 757 | ||
766 | faraday MACH_FARADAY FARADAY 758 | ||
767 | sbc9312 MACH_SBC9312 SBC9312 759 | ||
768 | batman MACH_BATMAN BATMAN 760 | ||
769 | jpd201 MACH_JPD201 JPD201 761 | ||
770 | mipsa MACH_MIPSA MIPSA 762 | ||
771 | kacom MACH_KACOM KACOM 763 | ||
772 | swarcocpu MACH_SWARCOCPU SWARCOCPU 764 | ||
773 | swarcodsl MACH_SWARCODSL SWARCODSL 765 | ||
774 | blueangel MACH_BLUEANGEL BLUEANGEL 766 | ||
775 | hairygrama MACH_HAIRYGRAMA HAIRYGRAMA 767 | ||
776 | banff MACH_BANFF BANFF 768 | ||
777 | carmeva MACH_CARMEVA CARMEVA 769 | ||
778 | sam255 MACH_SAM255 SAM255 770 | ||
779 | ppm10 MACH_PPM10 PPM10 771 | ||
780 | edb9315a MACH_EDB9315A EDB9315A 772 | ||
781 | sunset MACH_SUNSET SUNSET 773 | ||
782 | stargate2 MACH_STARGATE2 STARGATE2 774 | ||
783 | intelmote2 MACH_INTELMOTE2 INTELMOTE2 775 | ||
784 | trizeps4 MACH_TRIZEPS4 TRIZEPS4 776 | ||
785 | mainstone2 MACH_MAINSTONE2 MAINSTONE2 777 | ||
786 | ez_ixp42x MACH_EZ_IXP42X EZ_IXP42X 778 | ||
787 | tapwave_zodiac MACH_TAPWAVE_ZODIAC TAPWAVE_ZODIAC 779 | ||
788 | universalmeter MACH_UNIVERSALMETER UNIVERSALMETER 780 | ||
789 | hicoarm9 MACH_HICOARM9 HICOARM9 781 | ||
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 3762f6b35ab2..fc8b17521761 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -127,48 +127,23 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
127 | regs->eip = (unsigned long)&p->ainsn.insn; | 127 | regs->eip = (unsigned long)&p->ainsn.insn; |
128 | } | 128 | } |
129 | 129 | ||
130 | struct task_struct *arch_get_kprobe_task(void *ptr) | ||
131 | { | ||
132 | return ((struct thread_info *) (((unsigned long) ptr) & | ||
133 | (~(THREAD_SIZE -1))))->task; | ||
134 | } | ||
135 | |||
136 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | 130 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) |
137 | { | 131 | { |
138 | unsigned long *sara = (unsigned long *)®s->esp; | 132 | unsigned long *sara = (unsigned long *)®s->esp; |
139 | struct kretprobe_instance *ri; | 133 | struct kretprobe_instance *ri; |
140 | static void *orig_ret_addr; | 134 | |
135 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
136 | ri->rp = rp; | ||
137 | ri->task = current; | ||
138 | ri->ret_addr = (kprobe_opcode_t *) *sara; | ||
141 | 139 | ||
142 | /* | ||
143 | * Save the return address when the return probe hits | ||
144 | * the first time, and use it to populate the (krprobe | ||
145 | * instance)->ret_addr for subsequent return probes at | ||
146 | * the same addrress since stack address would have | ||
147 | * the kretprobe_trampoline by then. | ||
148 | */ | ||
149 | if (((void*) *sara) != kretprobe_trampoline) | ||
150 | orig_ret_addr = (void*) *sara; | ||
151 | |||
152 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
153 | ri->rp = rp; | ||
154 | ri->stack_addr = sara; | ||
155 | ri->ret_addr = orig_ret_addr; | ||
156 | add_rp_inst(ri); | ||
157 | /* Replace the return addr with trampoline addr */ | 140 | /* Replace the return addr with trampoline addr */ |
158 | *sara = (unsigned long) &kretprobe_trampoline; | 141 | *sara = (unsigned long) &kretprobe_trampoline; |
159 | } else { | ||
160 | rp->nmissed++; | ||
161 | } | ||
162 | } | ||
163 | 142 | ||
164 | void arch_kprobe_flush_task(struct task_struct *tk) | 143 | add_rp_inst(ri); |
165 | { | 144 | } else { |
166 | struct kretprobe_instance *ri; | 145 | rp->nmissed++; |
167 | while ((ri = get_rp_inst_tsk(tk)) != NULL) { | 146 | } |
168 | *((unsigned long *)(ri->stack_addr)) = | ||
169 | (unsigned long) ri->ret_addr; | ||
170 | recycle_rp_inst(ri); | ||
171 | } | ||
172 | } | 147 | } |
173 | 148 | ||
174 | /* | 149 | /* |
@@ -286,36 +261,59 @@ no_kprobe: | |||
286 | */ | 261 | */ |
287 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 262 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
288 | { | 263 | { |
289 | struct task_struct *tsk; | 264 | struct kretprobe_instance *ri = NULL; |
290 | struct kretprobe_instance *ri; | 265 | struct hlist_head *head; |
291 | struct hlist_head *head; | 266 | struct hlist_node *node, *tmp; |
292 | struct hlist_node *node; | 267 | unsigned long orig_ret_address = 0; |
293 | unsigned long *sara = ((unsigned long *) ®s->esp) - 1; | 268 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
294 | |||
295 | tsk = arch_get_kprobe_task(sara); | ||
296 | head = kretprobe_inst_table_head(tsk); | ||
297 | |||
298 | hlist_for_each_entry(ri, node, head, hlist) { | ||
299 | if (ri->stack_addr == sara && ri->rp) { | ||
300 | if (ri->rp->handler) | ||
301 | ri->rp->handler(ri, regs); | ||
302 | } | ||
303 | } | ||
304 | return 0; | ||
305 | } | ||
306 | 269 | ||
307 | void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, | 270 | head = kretprobe_inst_table_head(current); |
308 | unsigned long flags) | ||
309 | { | ||
310 | struct kretprobe_instance *ri; | ||
311 | /* RA already popped */ | ||
312 | unsigned long *sara = ((unsigned long *)®s->esp) - 1; | ||
313 | 271 | ||
314 | while ((ri = get_rp_inst(sara))) { | 272 | /* |
315 | regs->eip = (unsigned long)ri->ret_addr; | 273 | * It is possible to have multiple instances associated with a given |
274 | * task either because an multiple functions in the call path | ||
275 | * have a return probe installed on them, and/or more then one return | ||
276 | * return probe was registered for a target function. | ||
277 | * | ||
278 | * We can handle this because: | ||
279 | * - instances are always inserted at the head of the list | ||
280 | * - when multiple return probes are registered for the same | ||
281 | * function, the first instance's ret_addr will point to the | ||
282 | * real return address, and all the rest will point to | ||
283 | * kretprobe_trampoline | ||
284 | */ | ||
285 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
286 | if (ri->task != current) | ||
287 | /* another task is sharing our hash bucket */ | ||
288 | continue; | ||
289 | |||
290 | if (ri->rp && ri->rp->handler) | ||
291 | ri->rp->handler(ri, regs); | ||
292 | |||
293 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
316 | recycle_rp_inst(ri); | 294 | recycle_rp_inst(ri); |
295 | |||
296 | if (orig_ret_address != trampoline_address) | ||
297 | /* | ||
298 | * This is the real return address. Any other | ||
299 | * instances associated with this task are for | ||
300 | * other calls deeper on the call stack | ||
301 | */ | ||
302 | break; | ||
317 | } | 303 | } |
318 | regs->eflags &= ~TF_MASK; | 304 | |
305 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
306 | regs->eip = orig_ret_address; | ||
307 | |||
308 | unlock_kprobes(); | ||
309 | preempt_enable_no_resched(); | ||
310 | |||
311 | /* | ||
312 | * By returning a non-zero value, we are telling | ||
313 | * kprobe_handler() that we have handled unlocking | ||
314 | * and re-enabling preemption. | ||
315 | */ | ||
316 | return 1; | ||
319 | } | 317 | } |
320 | 318 | ||
321 | /* | 319 | /* |
@@ -403,8 +401,7 @@ static inline int post_kprobe_handler(struct pt_regs *regs) | |||
403 | current_kprobe->post_handler(current_kprobe, regs, 0); | 401 | current_kprobe->post_handler(current_kprobe, regs, 0); |
404 | } | 402 | } |
405 | 403 | ||
406 | if (current_kprobe->post_handler != trampoline_post_handler) | 404 | resume_execution(current_kprobe, regs); |
407 | resume_execution(current_kprobe, regs); | ||
408 | regs->eflags |= kprobe_saved_eflags; | 405 | regs->eflags |= kprobe_saved_eflags; |
409 | 406 | ||
410 | /*Restore back the original saved kprobes variables and continue. */ | 407 | /*Restore back the original saved kprobes variables and continue. */ |
@@ -534,3 +531,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
534 | } | 531 | } |
535 | return 0; | 532 | return 0; |
536 | } | 533 | } |
534 | |||
535 | static struct kprobe trampoline_p = { | ||
536 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
537 | .pre_handler = trampoline_probe_handler | ||
538 | }; | ||
539 | |||
540 | int __init arch_init(void) | ||
541 | { | ||
542 | return register_kprobe(&trampoline_p); | ||
543 | } | ||
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 5f8cfa6b7940..ba243a4cc119 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -617,6 +617,33 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss) | |||
617 | } | 617 | } |
618 | 618 | ||
619 | /* | 619 | /* |
620 | * This function selects if the context switch from prev to next | ||
621 | * has to tweak the TSC disable bit in the cr4. | ||
622 | */ | ||
623 | static inline void disable_tsc(struct task_struct *prev_p, | ||
624 | struct task_struct *next_p) | ||
625 | { | ||
626 | struct thread_info *prev, *next; | ||
627 | |||
628 | /* | ||
629 | * gcc should eliminate the ->thread_info dereference if | ||
630 | * has_secure_computing returns 0 at compile time (SECCOMP=n). | ||
631 | */ | ||
632 | prev = prev_p->thread_info; | ||
633 | next = next_p->thread_info; | ||
634 | |||
635 | if (has_secure_computing(prev) || has_secure_computing(next)) { | ||
636 | /* slow path here */ | ||
637 | if (has_secure_computing(prev) && | ||
638 | !has_secure_computing(next)) { | ||
639 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
640 | } else if (!has_secure_computing(prev) && | ||
641 | has_secure_computing(next)) | ||
642 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | /* | ||
620 | * switch_to(x,yn) should switch tasks from x to y. | 647 | * switch_to(x,yn) should switch tasks from x to y. |
621 | * | 648 | * |
622 | * We fsave/fwait so that an exception goes off at the right time | 649 | * We fsave/fwait so that an exception goes off at the right time |
@@ -695,6 +722,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
695 | if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) | 722 | if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) |
696 | handle_io_bitmap(next, tss); | 723 | handle_io_bitmap(next, tss); |
697 | 724 | ||
725 | disable_tsc(prev_p, next_p); | ||
726 | |||
698 | return prev_p; | 727 | return prev_p; |
699 | } | 728 | } |
700 | 729 | ||
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 442a6e937b19..3db9a04aec6e 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -289,3 +289,5 @@ ENTRY(sys_call_table) | |||
289 | .long sys_add_key | 289 | .long sys_add_key |
290 | .long sys_request_key | 290 | .long sys_request_key |
291 | .long sys_keyctl | 291 | .long sys_keyctl |
292 | .long sys_ioprio_set | ||
293 | .long sys_ioprio_get /* 290 */ | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index b1d5d3d5276c..785a51b0ad8e 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1577,8 +1577,8 @@ sys_call_table: | |||
1577 | data8 sys_add_key | 1577 | data8 sys_add_key |
1578 | data8 sys_request_key | 1578 | data8 sys_request_key |
1579 | data8 sys_keyctl | 1579 | data8 sys_keyctl |
1580 | data8 sys_ni_syscall | 1580 | data8 sys_ioprio_set |
1581 | data8 sys_ni_syscall // 1275 | 1581 | data8 sys_ioprio_get // 1275 |
1582 | data8 sys_set_zone_reclaim | 1582 | data8 sys_set_zone_reclaim |
1583 | data8 sys_ni_syscall | 1583 | data8 sys_ni_syscall |
1584 | data8 sys_ni_syscall | 1584 | data8 sys_ni_syscall |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 5978823d5c63..3aa3167edbec 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
36 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
37 | #include <asm/sections.h> | ||
37 | 38 | ||
38 | extern void jprobe_inst_return(void); | 39 | extern void jprobe_inst_return(void); |
39 | 40 | ||
@@ -263,13 +264,33 @@ static inline void get_kprobe_inst(bundle_t *bundle, uint slot, | |||
263 | } | 264 | } |
264 | } | 265 | } |
265 | 266 | ||
267 | /* Returns non-zero if the addr is in the Interrupt Vector Table */ | ||
268 | static inline int in_ivt_functions(unsigned long addr) | ||
269 | { | ||
270 | return (addr >= (unsigned long)__start_ivt_text | ||
271 | && addr < (unsigned long)__end_ivt_text); | ||
272 | } | ||
273 | |||
266 | static int valid_kprobe_addr(int template, int slot, unsigned long addr) | 274 | static int valid_kprobe_addr(int template, int slot, unsigned long addr) |
267 | { | 275 | { |
268 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { | 276 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { |
269 | printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n", | 277 | printk(KERN_WARNING "Attempting to insert unaligned kprobe " |
270 | addr); | 278 | "at 0x%lx\n", addr); |
271 | return -EINVAL; | 279 | return -EINVAL; |
272 | } | 280 | } |
281 | |||
282 | if (in_ivt_functions(addr)) { | ||
283 | printk(KERN_WARNING "Kprobes can't be inserted inside " | ||
284 | "IVT functions at 0x%lx\n", addr); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | |||
288 | if (slot == 1 && bundle_encoding[template][1] != L) { | ||
289 | printk(KERN_WARNING "Inserting kprobes on slot #1 " | ||
290 | "is not supported\n"); | ||
291 | return -EINVAL; | ||
292 | } | ||
293 | |||
273 | return 0; | 294 | return 0; |
274 | } | 295 | } |
275 | 296 | ||
@@ -290,6 +311,94 @@ static inline void set_current_kprobe(struct kprobe *p) | |||
290 | current_kprobe = p; | 311 | current_kprobe = p; |
291 | } | 312 | } |
292 | 313 | ||
314 | static void kretprobe_trampoline(void) | ||
315 | { | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * At this point the target function has been tricked into | ||
320 | * returning into our trampoline. Lookup the associated instance | ||
321 | * and then: | ||
322 | * - call the handler function | ||
323 | * - cleanup by marking the instance as unused | ||
324 | * - long jump back to the original return address | ||
325 | */ | ||
326 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
327 | { | ||
328 | struct kretprobe_instance *ri = NULL; | ||
329 | struct hlist_head *head; | ||
330 | struct hlist_node *node, *tmp; | ||
331 | unsigned long orig_ret_address = 0; | ||
332 | unsigned long trampoline_address = | ||
333 | ((struct fnptr *)kretprobe_trampoline)->ip; | ||
334 | |||
335 | head = kretprobe_inst_table_head(current); | ||
336 | |||
337 | /* | ||
338 | * It is possible to have multiple instances associated with a given | ||
339 | * task either because an multiple functions in the call path | ||
340 | * have a return probe installed on them, and/or more then one return | ||
341 | * return probe was registered for a target function. | ||
342 | * | ||
343 | * We can handle this because: | ||
344 | * - instances are always inserted at the head of the list | ||
345 | * - when multiple return probes are registered for the same | ||
346 | * function, the first instance's ret_addr will point to the | ||
347 | * real return address, and all the rest will point to | ||
348 | * kretprobe_trampoline | ||
349 | */ | ||
350 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
351 | if (ri->task != current) | ||
352 | /* another task is sharing our hash bucket */ | ||
353 | continue; | ||
354 | |||
355 | if (ri->rp && ri->rp->handler) | ||
356 | ri->rp->handler(ri, regs); | ||
357 | |||
358 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
359 | recycle_rp_inst(ri); | ||
360 | |||
361 | if (orig_ret_address != trampoline_address) | ||
362 | /* | ||
363 | * This is the real return address. Any other | ||
364 | * instances associated with this task are for | ||
365 | * other calls deeper on the call stack | ||
366 | */ | ||
367 | break; | ||
368 | } | ||
369 | |||
370 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
371 | regs->cr_iip = orig_ret_address; | ||
372 | |||
373 | unlock_kprobes(); | ||
374 | preempt_enable_no_resched(); | ||
375 | |||
376 | /* | ||
377 | * By returning a non-zero value, we are telling | ||
378 | * kprobe_handler() that we have handled unlocking | ||
379 | * and re-enabling preemption. | ||
380 | */ | ||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | ||
385 | { | ||
386 | struct kretprobe_instance *ri; | ||
387 | |||
388 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
389 | ri->rp = rp; | ||
390 | ri->task = current; | ||
391 | ri->ret_addr = (kprobe_opcode_t *)regs->b0; | ||
392 | |||
393 | /* Replace the return addr with trampoline addr */ | ||
394 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; | ||
395 | |||
396 | add_rp_inst(ri); | ||
397 | } else { | ||
398 | rp->nmissed++; | ||
399 | } | ||
400 | } | ||
401 | |||
293 | int arch_prepare_kprobe(struct kprobe *p) | 402 | int arch_prepare_kprobe(struct kprobe *p) |
294 | { | 403 | { |
295 | unsigned long addr = (unsigned long) p->addr; | 404 | unsigned long addr = (unsigned long) p->addr; |
@@ -492,8 +601,8 @@ static int pre_kprobes_handler(struct die_args *args) | |||
492 | if (p->pre_handler && p->pre_handler(p, regs)) | 601 | if (p->pre_handler && p->pre_handler(p, regs)) |
493 | /* | 602 | /* |
494 | * Our pre-handler is specifically requesting that we just | 603 | * Our pre-handler is specifically requesting that we just |
495 | * do a return. This is handling the case where the | 604 | * do a return. This is used for both the jprobe pre-handler |
496 | * pre-handler is really our special jprobe pre-handler. | 605 | * and the kretprobe trampoline |
497 | */ | 606 | */ |
498 | return 1; | 607 | return 1; |
499 | 608 | ||
@@ -599,3 +708,14 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
599 | *regs = jprobe_saved_regs; | 708 | *regs = jprobe_saved_regs; |
600 | return 1; | 709 | return 1; |
601 | } | 710 | } |
711 | |||
712 | static struct kprobe trampoline_p = { | ||
713 | .pre_handler = trampoline_probe_handler | ||
714 | }; | ||
715 | |||
716 | int __init arch_init(void) | ||
717 | { | ||
718 | trampoline_p.addr = | ||
719 | (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; | ||
720 | return register_kprobe(&trampoline_p); | ||
721 | } | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index ebb71f3d6d19..6e35bff05d59 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/efi.h> | 27 | #include <linux/efi.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/kprobes.h> | ||
30 | 31 | ||
31 | #include <asm/cpu.h> | 32 | #include <asm/cpu.h> |
32 | #include <asm/delay.h> | 33 | #include <asm/delay.h> |
@@ -707,6 +708,13 @@ kernel_thread_helper (int (*fn)(void *), void *arg) | |||
707 | void | 708 | void |
708 | flush_thread (void) | 709 | flush_thread (void) |
709 | { | 710 | { |
711 | /* | ||
712 | * Remove function-return probe instances associated with this task | ||
713 | * and put them back on the free list. Do not insert an exit probe for | ||
714 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
715 | */ | ||
716 | kprobe_flush_task(current); | ||
717 | |||
710 | /* drop floating-point and debug-register state if it exists: */ | 718 | /* drop floating-point and debug-register state if it exists: */ |
711 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 719 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
712 | ia64_drop_fpu(current); | 720 | ia64_drop_fpu(current); |
@@ -721,6 +729,14 @@ flush_thread (void) | |||
721 | void | 729 | void |
722 | exit_thread (void) | 730 | exit_thread (void) |
723 | { | 731 | { |
732 | |||
733 | /* | ||
734 | * Remove function-return probe instances associated with this task | ||
735 | * and put them back on the free list. Do not insert an exit probe for | ||
736 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
737 | */ | ||
738 | kprobe_flush_task(current); | ||
739 | |||
724 | ia64_drop_fpu(current); | 740 | ia64_drop_fpu(current); |
725 | #ifdef CONFIG_PERFMON | 741 | #ifdef CONFIG_PERFMON |
726 | /* if needed, stop monitoring and flush state to perfmon context */ | 742 | /* if needed, stop monitoring and flush state to perfmon context */ |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index b9f0db4c1b04..a676e79e0681 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -8,6 +8,11 @@ | |||
8 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) | 8 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) |
9 | #include <asm-generic/vmlinux.lds.h> | 9 | #include <asm-generic/vmlinux.lds.h> |
10 | 10 | ||
11 | #define IVT_TEXT \ | ||
12 | VMLINUX_SYMBOL(__start_ivt_text) = .; \ | ||
13 | *(.text.ivt) \ | ||
14 | VMLINUX_SYMBOL(__end_ivt_text) = .; | ||
15 | |||
11 | OUTPUT_FORMAT("elf64-ia64-little") | 16 | OUTPUT_FORMAT("elf64-ia64-little") |
12 | OUTPUT_ARCH(ia64) | 17 | OUTPUT_ARCH(ia64) |
13 | ENTRY(phys_start) | 18 | ENTRY(phys_start) |
@@ -39,7 +44,7 @@ SECTIONS | |||
39 | 44 | ||
40 | .text : AT(ADDR(.text) - LOAD_OFFSET) | 45 | .text : AT(ADDR(.text) - LOAD_OFFSET) |
41 | { | 46 | { |
42 | *(.text.ivt) | 47 | IVT_TEXT |
43 | *(.text) | 48 | *(.text) |
44 | SCHED_TEXT | 49 | SCHED_TEXT |
45 | LOCK_TEXT | 50 | LOCK_TEXT |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 508026ae5842..65ee15396ffd 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -457,7 +457,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs) | |||
457 | if (!user_mode(regs)) | 457 | if (!user_mode(regs)) |
458 | return 1; | 458 | return 1; |
459 | 459 | ||
460 | if (try_to_freeze(0)) | 460 | if (try_to_freeze()) |
461 | goto no_signal; | 461 | goto no_signal; |
462 | 462 | ||
463 | if (!oldset) | 463 | if (!oldset) |
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index b6a63a49a232..191a8def3bdb 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S | |||
@@ -1449,3 +1449,5 @@ _GLOBAL(sys_call_table) | |||
1449 | .long sys_request_key /* 270 */ | 1449 | .long sys_request_key /* 270 */ |
1450 | .long sys_keyctl | 1450 | .long sys_keyctl |
1451 | .long sys_waitid | 1451 | .long sys_waitid |
1452 | .long sys_ioprio_set | ||
1453 | .long sys_ioprio_get | ||
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index 334ef4150d92..6164a2b34733 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c | |||
@@ -606,9 +606,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
606 | struct page *page = pfn_to_page(pfn); | 606 | struct page *page = pfn_to_page(pfn); |
607 | if (!PageReserved(page) | 607 | if (!PageReserved(page) |
608 | && !test_bit(PG_arch_1, &page->flags)) { | 608 | && !test_bit(PG_arch_1, &page->flags)) { |
609 | if (vma->vm_mm == current->active_mm) | 609 | if (vma->vm_mm == current->active_mm) { |
610 | #ifdef CONFIG_8xx | ||
611 | /* On 8xx, cache control instructions (particularly | ||
612 | * "dcbst" from flush_dcache_icache) fault as write | ||
613 | * operation if there is an unpopulated TLB entry | ||
614 | * for the address in question. To workaround that, | ||
615 | * we invalidate the TLB here, thus avoiding dcbst | ||
616 | * misbehaviour. | ||
617 | */ | ||
618 | _tlbie(address); | ||
619 | #endif | ||
610 | __flush_dcache_icache((void *) address); | 620 | __flush_dcache_icache((void *) address); |
611 | else | 621 | } else |
612 | flush_dcache_icache_page(page); | 622 | flush_dcache_icache_page(page); |
613 | set_bit(PG_arch_1, &page->flags); | 623 | set_bit(PG_arch_1, &page->flags); |
614 | } | 624 | } |
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S index f459ade1bd63..016a74649155 100644 --- a/arch/ppc/platforms/pmac_sleep.S +++ b/arch/ppc/platforms/pmac_sleep.S | |||
@@ -46,7 +46,7 @@ | |||
46 | .section .text | 46 | .section .text |
47 | .align 5 | 47 | .align 5 |
48 | 48 | ||
49 | #if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ_PMAC) | 49 | #if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) |
50 | 50 | ||
51 | /* This gets called by via-pmu.c late during the sleep process. | 51 | /* This gets called by via-pmu.c late during the sleep process. |
52 | * The PMU was already send the sleep command and will shut us down | 52 | * The PMU was already send the sleep command and will shut us down |
@@ -382,7 +382,7 @@ turn_on_mmu: | |||
382 | isync | 382 | isync |
383 | rfi | 383 | rfi |
384 | 384 | ||
385 | #endif /* defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ) */ | 385 | #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ |
386 | 386 | ||
387 | .section .data | 387 | .section .data |
388 | .balign L1_CACHE_LINE_SIZE | 388 | .balign L1_CACHE_LINE_SIZE |
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c index de60ccc7db9f..778ce4fec368 100644 --- a/arch/ppc/platforms/pmac_time.c +++ b/arch/ppc/platforms/pmac_time.c | |||
@@ -206,7 +206,7 @@ via_calibrate_decr(void) | |||
206 | return 1; | 206 | return 1; |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef CONFIG_PMAC_PBOOK | 209 | #ifdef CONFIG_PM |
210 | /* | 210 | /* |
211 | * Reset the time after a sleep. | 211 | * Reset the time after a sleep. |
212 | */ | 212 | */ |
@@ -238,7 +238,7 @@ time_sleep_notify(struct pmu_sleep_notifier *self, int when) | |||
238 | static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = { | 238 | static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = { |
239 | time_sleep_notify, SLEEP_LEVEL_MISC, | 239 | time_sleep_notify, SLEEP_LEVEL_MISC, |
240 | }; | 240 | }; |
241 | #endif /* CONFIG_PMAC_PBOOK */ | 241 | #endif /* CONFIG_PM */ |
242 | 242 | ||
243 | /* | 243 | /* |
244 | * Query the OF and get the decr frequency. | 244 | * Query the OF and get the decr frequency. |
@@ -251,9 +251,9 @@ pmac_calibrate_decr(void) | |||
251 | struct device_node *cpu; | 251 | struct device_node *cpu; |
252 | unsigned int freq, *fp; | 252 | unsigned int freq, *fp; |
253 | 253 | ||
254 | #ifdef CONFIG_PMAC_PBOOK | 254 | #ifdef CONFIG_PM |
255 | pmu_register_sleep_notifier(&time_sleep_notifier); | 255 | pmu_register_sleep_notifier(&time_sleep_notifier); |
256 | #endif /* CONFIG_PMAC_PBOOK */ | 256 | #endif /* CONFIG_PM */ |
257 | 257 | ||
258 | /* We assume MacRISC2 machines have correct device-tree | 258 | /* We assume MacRISC2 machines have correct device-tree |
259 | * calibration. That's better since the VIA itself seems | 259 | * calibration. That's better since the VIA itself seems |
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c index 70e58f43f2b8..8b149c2fc54f 100644 --- a/arch/ppc/platforms/sandpoint.c +++ b/arch/ppc/platforms/sandpoint.c | |||
@@ -324,6 +324,7 @@ sandpoint_setup_arch(void) | |||
324 | pdata[1].irq = 0; | 324 | pdata[1].irq = 0; |
325 | pdata[1].mapbase = 0; | 325 | pdata[1].mapbase = 0; |
326 | } | 326 | } |
327 | } | ||
327 | 328 | ||
328 | printk(KERN_INFO "Motorola SPS Sandpoint Test Platform\n"); | 329 | printk(KERN_INFO "Motorola SPS Sandpoint Test Platform\n"); |
329 | printk(KERN_INFO "Port by MontaVista Software, Inc. (source@mvista.com)\n"); | 330 | printk(KERN_INFO "Port by MontaVista Software, Inc. (source@mvista.com)\n"); |
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c index b45d8268bf93..ad39b86ca92c 100644 --- a/arch/ppc/syslib/open_pic.c +++ b/arch/ppc/syslib/open_pic.c | |||
@@ -370,8 +370,9 @@ void __init openpic_init(int offset) | |||
370 | /* Initialize IPI interrupts */ | 370 | /* Initialize IPI interrupts */ |
371 | if ( ppc_md.progress ) ppc_md.progress("openpic: ipi",0x3bb); | 371 | if ( ppc_md.progress ) ppc_md.progress("openpic: ipi",0x3bb); |
372 | for (i = 0; i < OPENPIC_NUM_IPI; i++) { | 372 | for (i = 0; i < OPENPIC_NUM_IPI; i++) { |
373 | /* Disabled, Priority 10..13 */ | 373 | /* Disabled, increased priorities 10..13 */ |
374 | openpic_initipi(i, 10+i, OPENPIC_VEC_IPI+i+offset); | 374 | openpic_initipi(i, OPENPIC_PRIORITY_IPI_BASE+i, |
375 | OPENPIC_VEC_IPI+i+offset); | ||
375 | /* IPIs are per-CPU */ | 376 | /* IPIs are per-CPU */ |
376 | irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU; | 377 | irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU; |
377 | irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi; | 378 | irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi; |
@@ -399,8 +400,9 @@ void __init openpic_init(int offset) | |||
399 | if (sense & IRQ_SENSE_MASK) | 400 | if (sense & IRQ_SENSE_MASK) |
400 | irq_desc[i+offset].status = IRQ_LEVEL; | 401 | irq_desc[i+offset].status = IRQ_LEVEL; |
401 | 402 | ||
402 | /* Enabled, Priority 8 */ | 403 | /* Enabled, Default priority */ |
403 | openpic_initirq(i, 8, i+offset, (sense & IRQ_POLARITY_MASK), | 404 | openpic_initirq(i, OPENPIC_PRIORITY_DEFAULT, i+offset, |
405 | (sense & IRQ_POLARITY_MASK), | ||
404 | (sense & IRQ_SENSE_MASK)); | 406 | (sense & IRQ_SENSE_MASK)); |
405 | /* Processor 0 */ | 407 | /* Processor 0 */ |
406 | openpic_mapirq(i, CPU_MASK_CPU0, CPU_MASK_NONE); | 408 | openpic_mapirq(i, CPU_MASK_CPU0, CPU_MASK_NONE); |
@@ -656,6 +658,18 @@ static void __init openpic_maptimer(u_int timer, cpumask_t cpumask) | |||
656 | } | 658 | } |
657 | 659 | ||
658 | /* | 660 | /* |
661 | * Change the priority of an interrupt | ||
662 | */ | ||
663 | void __init | ||
664 | openpic_set_irq_priority(u_int irq, u_int pri) | ||
665 | { | ||
666 | check_arg_irq(irq); | ||
667 | openpic_safe_writefield(&ISR[irq - open_pic_irq_offset]->Vector_Priority, | ||
668 | OPENPIC_PRIORITY_MASK, | ||
669 | pri << OPENPIC_PRIORITY_SHIFT); | ||
670 | } | ||
671 | |||
672 | /* | ||
659 | * Initalize the interrupt source which will generate an NMI. | 673 | * Initalize the interrupt source which will generate an NMI. |
660 | * This raises the interrupt's priority from 8 to 9. | 674 | * This raises the interrupt's priority from 8 to 9. |
661 | * | 675 | * |
@@ -665,9 +679,7 @@ void __init | |||
665 | openpic_init_nmi_irq(u_int irq) | 679 | openpic_init_nmi_irq(u_int irq) |
666 | { | 680 | { |
667 | check_arg_irq(irq); | 681 | check_arg_irq(irq); |
668 | openpic_safe_writefield(&ISR[irq - open_pic_irq_offset]->Vector_Priority, | 682 | openpic_set_irq_priority(irq, OPENPIC_PRIORITY_NMI); |
669 | OPENPIC_PRIORITY_MASK, | ||
670 | 9 << OPENPIC_PRIORITY_SHIFT); | ||
671 | } | 683 | } |
672 | 684 | ||
673 | /* | 685 | /* |
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index 782ce3efa2c1..1d2ff6d6b0b3 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c | |||
@@ -36,6 +36,8 @@ | |||
36 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
37 | #include <asm/sstep.h> | 37 | #include <asm/sstep.h> |
38 | 38 | ||
39 | static DECLARE_MUTEX(kprobe_mutex); | ||
40 | |||
39 | static struct kprobe *current_kprobe; | 41 | static struct kprobe *current_kprobe; |
40 | static unsigned long kprobe_status, kprobe_saved_msr; | 42 | static unsigned long kprobe_status, kprobe_saved_msr; |
41 | static struct kprobe *kprobe_prev; | 43 | static struct kprobe *kprobe_prev; |
@@ -54,6 +56,15 @@ int arch_prepare_kprobe(struct kprobe *p) | |||
54 | printk("Cannot register a kprobe on rfid or mtmsrd\n"); | 56 | printk("Cannot register a kprobe on rfid or mtmsrd\n"); |
55 | ret = -EINVAL; | 57 | ret = -EINVAL; |
56 | } | 58 | } |
59 | |||
60 | /* insn must be on a special executable page on ppc64 */ | ||
61 | if (!ret) { | ||
62 | up(&kprobe_mutex); | ||
63 | p->ainsn.insn = get_insn_slot(); | ||
64 | down(&kprobe_mutex); | ||
65 | if (!p->ainsn.insn) | ||
66 | ret = -ENOMEM; | ||
67 | } | ||
57 | return ret; | 68 | return ret; |
58 | } | 69 | } |
59 | 70 | ||
@@ -79,16 +90,22 @@ void arch_disarm_kprobe(struct kprobe *p) | |||
79 | 90 | ||
80 | void arch_remove_kprobe(struct kprobe *p) | 91 | void arch_remove_kprobe(struct kprobe *p) |
81 | { | 92 | { |
93 | up(&kprobe_mutex); | ||
94 | free_insn_slot(p->ainsn.insn); | ||
95 | down(&kprobe_mutex); | ||
82 | } | 96 | } |
83 | 97 | ||
84 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 98 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
85 | { | 99 | { |
100 | kprobe_opcode_t insn = *p->ainsn.insn; | ||
101 | |||
86 | regs->msr |= MSR_SE; | 102 | regs->msr |= MSR_SE; |
87 | /*single step inline if it a breakpoint instruction*/ | 103 | |
88 | if (p->opcode == BREAKPOINT_INSTRUCTION) | 104 | /* single step inline if it is a trap variant */ |
105 | if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn)) | ||
89 | regs->nip = (unsigned long)p->addr; | 106 | regs->nip = (unsigned long)p->addr; |
90 | else | 107 | else |
91 | regs->nip = (unsigned long)&p->ainsn.insn; | 108 | regs->nip = (unsigned long)p->ainsn.insn; |
92 | } | 109 | } |
93 | 110 | ||
94 | static inline void save_previous_kprobe(void) | 111 | static inline void save_previous_kprobe(void) |
@@ -105,6 +122,23 @@ static inline void restore_previous_kprobe(void) | |||
105 | kprobe_saved_msr = kprobe_saved_msr_prev; | 122 | kprobe_saved_msr = kprobe_saved_msr_prev; |
106 | } | 123 | } |
107 | 124 | ||
125 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | ||
126 | { | ||
127 | struct kretprobe_instance *ri; | ||
128 | |||
129 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
130 | ri->rp = rp; | ||
131 | ri->task = current; | ||
132 | ri->ret_addr = (kprobe_opcode_t *)regs->link; | ||
133 | |||
134 | /* Replace the return addr with trampoline addr */ | ||
135 | regs->link = (unsigned long)kretprobe_trampoline; | ||
136 | add_rp_inst(ri); | ||
137 | } else { | ||
138 | rp->nmissed++; | ||
139 | } | ||
140 | } | ||
141 | |||
108 | static inline int kprobe_handler(struct pt_regs *regs) | 142 | static inline int kprobe_handler(struct pt_regs *regs) |
109 | { | 143 | { |
110 | struct kprobe *p; | 144 | struct kprobe *p; |
@@ -195,6 +229,78 @@ no_kprobe: | |||
195 | } | 229 | } |
196 | 230 | ||
197 | /* | 231 | /* |
232 | * Function return probe trampoline: | ||
233 | * - init_kprobes() establishes a probepoint here | ||
234 | * - When the probed function returns, this probe | ||
235 | * causes the handlers to fire | ||
236 | */ | ||
237 | void kretprobe_trampoline_holder(void) | ||
238 | { | ||
239 | asm volatile(".global kretprobe_trampoline\n" | ||
240 | "kretprobe_trampoline:\n" | ||
241 | "nop\n"); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Called when the probe at kretprobe trampoline is hit | ||
246 | */ | ||
247 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
248 | { | ||
249 | struct kretprobe_instance *ri = NULL; | ||
250 | struct hlist_head *head; | ||
251 | struct hlist_node *node, *tmp; | ||
252 | unsigned long orig_ret_address = 0; | ||
253 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | ||
254 | |||
255 | head = kretprobe_inst_table_head(current); | ||
256 | |||
257 | /* | ||
258 | * It is possible to have multiple instances associated with a given | ||
259 | * task either because an multiple functions in the call path | ||
260 | * have a return probe installed on them, and/or more then one return | ||
261 | * return probe was registered for a target function. | ||
262 | * | ||
263 | * We can handle this because: | ||
264 | * - instances are always inserted at the head of the list | ||
265 | * - when multiple return probes are registered for the same | ||
266 | * function, the first instance's ret_addr will point to the | ||
267 | * real return address, and all the rest will point to | ||
268 | * kretprobe_trampoline | ||
269 | */ | ||
270 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
271 | if (ri->task != current) | ||
272 | /* another task is sharing our hash bucket */ | ||
273 | continue; | ||
274 | |||
275 | if (ri->rp && ri->rp->handler) | ||
276 | ri->rp->handler(ri, regs); | ||
277 | |||
278 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
279 | recycle_rp_inst(ri); | ||
280 | |||
281 | if (orig_ret_address != trampoline_address) | ||
282 | /* | ||
283 | * This is the real return address. Any other | ||
284 | * instances associated with this task are for | ||
285 | * other calls deeper on the call stack | ||
286 | */ | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
291 | regs->nip = orig_ret_address; | ||
292 | |||
293 | unlock_kprobes(); | ||
294 | |||
295 | /* | ||
296 | * By returning a non-zero value, we are telling | ||
297 | * kprobe_handler() that we have handled unlocking | ||
298 | * and re-enabling preemption. | ||
299 | */ | ||
300 | return 1; | ||
301 | } | ||
302 | |||
303 | /* | ||
198 | * Called after single-stepping. p->addr is the address of the | 304 | * Called after single-stepping. p->addr is the address of the |
199 | * instruction whose first byte has been replaced by the "breakpoint" | 305 | * instruction whose first byte has been replaced by the "breakpoint" |
200 | * instruction. To avoid the SMP problems that can occur when we | 306 | * instruction. To avoid the SMP problems that can occur when we |
@@ -205,9 +311,10 @@ no_kprobe: | |||
205 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | 311 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) |
206 | { | 312 | { |
207 | int ret; | 313 | int ret; |
314 | unsigned int insn = *p->ainsn.insn; | ||
208 | 315 | ||
209 | regs->nip = (unsigned long)p->addr; | 316 | regs->nip = (unsigned long)p->addr; |
210 | ret = emulate_step(regs, p->ainsn.insn[0]); | 317 | ret = emulate_step(regs, insn); |
211 | if (ret == 0) | 318 | if (ret == 0) |
212 | regs->nip = (unsigned long)p->addr + 4; | 319 | regs->nip = (unsigned long)p->addr + 4; |
213 | } | 320 | } |
@@ -331,3 +438,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
331 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); | 438 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); |
332 | return 1; | 439 | return 1; |
333 | } | 440 | } |
441 | |||
442 | static struct kprobe trampoline_p = { | ||
443 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
444 | .pre_handler = trampoline_probe_handler | ||
445 | }; | ||
446 | |||
447 | int __init arch_init(void) | ||
448 | { | ||
449 | return register_kprobe(&trampoline_p); | ||
450 | } | ||
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c index b230a63fe4c8..705742f4eec6 100644 --- a/arch/ppc64/kernel/ppc_ksyms.c +++ b/arch/ppc64/kernel/ppc_ksyms.c | |||
@@ -75,6 +75,7 @@ EXPORT_SYMBOL(giveup_fpu); | |||
75 | EXPORT_SYMBOL(giveup_altivec); | 75 | EXPORT_SYMBOL(giveup_altivec); |
76 | #endif | 76 | #endif |
77 | EXPORT_SYMBOL(__flush_icache_range); | 77 | EXPORT_SYMBOL(__flush_icache_range); |
78 | EXPORT_SYMBOL(flush_dcache_range); | ||
78 | 79 | ||
79 | #ifdef CONFIG_SMP | 80 | #ifdef CONFIG_SMP |
80 | #ifdef CONFIG_PPC_ISERIES | 81 | #ifdef CONFIG_PPC_ISERIES |
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c index aba89554d89d..f7cae05e40fb 100644 --- a/arch/ppc64/kernel/process.c +++ b/arch/ppc64/kernel/process.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/kallsyms.h> | 36 | #include <linux/kallsyms.h> |
37 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <linux/kprobes.h> | ||
39 | 40 | ||
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
@@ -307,6 +308,8 @@ void show_regs(struct pt_regs * regs) | |||
307 | 308 | ||
308 | void exit_thread(void) | 309 | void exit_thread(void) |
309 | { | 310 | { |
311 | kprobe_flush_task(current); | ||
312 | |||
310 | #ifndef CONFIG_SMP | 313 | #ifndef CONFIG_SMP |
311 | if (last_task_used_math == current) | 314 | if (last_task_used_math == current) |
312 | last_task_used_math = NULL; | 315 | last_task_used_math = NULL; |
@@ -321,6 +324,7 @@ void flush_thread(void) | |||
321 | { | 324 | { |
322 | struct thread_info *t = current_thread_info(); | 325 | struct thread_info *t = current_thread_info(); |
323 | 326 | ||
327 | kprobe_flush_task(current); | ||
324 | if (t->flags & _TIF_ABI_PENDING) | 328 | if (t->flags & _TIF_ABI_PENDING) |
325 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 329 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
326 | 330 | ||
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 2348a75e050d..2a532db9138a 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c | |||
@@ -91,6 +91,7 @@ unsigned long tb_to_xs; | |||
91 | unsigned tb_to_us; | 91 | unsigned tb_to_us; |
92 | unsigned long processor_freq; | 92 | unsigned long processor_freq; |
93 | DEFINE_SPINLOCK(rtc_lock); | 93 | DEFINE_SPINLOCK(rtc_lock); |
94 | EXPORT_SYMBOL_GPL(rtc_lock); | ||
94 | 95 | ||
95 | unsigned long tb_to_ns_scale; | 96 | unsigned long tb_to_ns_scale; |
96 | unsigned long tb_to_ns_shift; | 97 | unsigned long tb_to_ns_shift; |
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c index a0716ccc2f4a..8852c20c8d99 100644 --- a/arch/sparc64/kernel/auxio.c +++ b/arch/sparc64/kernel/auxio.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/ebus.h> | 16 | #include <asm/ebus.h> |
17 | #include <asm/auxio.h> | 17 | #include <asm/auxio.h> |
18 | 18 | ||
19 | /* This cannot be static, as it is referenced in entry.S */ | 19 | /* This cannot be static, as it is referenced in irq.c */ |
20 | void __iomem *auxio_register = NULL; | 20 | void __iomem *auxio_register = NULL; |
21 | 21 | ||
22 | enum auxio_type { | 22 | enum auxio_type { |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index a47f2d0b1a29..eee516a71c14 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -271,8 +271,9 @@ cplus_fptrap_insn_1: | |||
271 | fmuld %f0, %f2, %f26 | 271 | fmuld %f0, %f2, %f26 |
272 | faddd %f0, %f2, %f28 | 272 | faddd %f0, %f2, %f28 |
273 | fmuld %f0, %f2, %f30 | 273 | fmuld %f0, %f2, %f30 |
274 | membar #Sync | ||
274 | b,pt %xcc, fpdis_exit | 275 | b,pt %xcc, fpdis_exit |
275 | membar #Sync | 276 | nop |
276 | 2: andcc %g5, FPRS_DU, %g0 | 277 | 2: andcc %g5, FPRS_DU, %g0 |
277 | bne,pt %icc, 3f | 278 | bne,pt %icc, 3f |
278 | fzero %f32 | 279 | fzero %f32 |
@@ -301,8 +302,9 @@ cplus_fptrap_insn_2: | |||
301 | fmuld %f32, %f34, %f58 | 302 | fmuld %f32, %f34, %f58 |
302 | faddd %f32, %f34, %f60 | 303 | faddd %f32, %f34, %f60 |
303 | fmuld %f32, %f34, %f62 | 304 | fmuld %f32, %f34, %f62 |
305 | membar #Sync | ||
304 | ba,pt %xcc, fpdis_exit | 306 | ba,pt %xcc, fpdis_exit |
305 | membar #Sync | 307 | nop |
306 | 3: mov SECONDARY_CONTEXT, %g3 | 308 | 3: mov SECONDARY_CONTEXT, %g3 |
307 | add %g6, TI_FPREGS, %g1 | 309 | add %g6, TI_FPREGS, %g1 |
308 | ldxa [%g3] ASI_DMMU, %g5 | 310 | ldxa [%g3] ASI_DMMU, %g5 |
@@ -699,116 +701,6 @@ utrap_ill: | |||
699 | ba,pt %xcc, rtrap | 701 | ba,pt %xcc, rtrap |
700 | clr %l6 | 702 | clr %l6 |
701 | 703 | ||
702 | #ifdef CONFIG_BLK_DEV_FD | ||
703 | .globl floppy_hardint | ||
704 | floppy_hardint: | ||
705 | wr %g0, (1 << 11), %clear_softint | ||
706 | sethi %hi(doing_pdma), %g1 | ||
707 | ld [%g1 + %lo(doing_pdma)], %g2 | ||
708 | brz,pn %g2, floppy_dosoftint | ||
709 | sethi %hi(fdc_status), %g3 | ||
710 | ldx [%g3 + %lo(fdc_status)], %g3 | ||
711 | sethi %hi(pdma_vaddr), %g5 | ||
712 | ldx [%g5 + %lo(pdma_vaddr)], %g4 | ||
713 | sethi %hi(pdma_size), %g5 | ||
714 | ldx [%g5 + %lo(pdma_size)], %g5 | ||
715 | |||
716 | next_byte: | ||
717 | lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7 | ||
718 | andcc %g7, 0x80, %g0 | ||
719 | be,pn %icc, floppy_fifo_emptied | ||
720 | andcc %g7, 0x20, %g0 | ||
721 | be,pn %icc, floppy_overrun | ||
722 | andcc %g7, 0x40, %g0 | ||
723 | be,pn %icc, floppy_write | ||
724 | sub %g5, 1, %g5 | ||
725 | |||
726 | inc %g3 | ||
727 | lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7 | ||
728 | dec %g3 | ||
729 | orcc %g0, %g5, %g0 | ||
730 | stb %g7, [%g4] | ||
731 | bne,pn %xcc, next_byte | ||
732 | add %g4, 1, %g4 | ||
733 | |||
734 | b,pt %xcc, floppy_tdone | ||
735 | nop | ||
736 | |||
737 | floppy_write: | ||
738 | ldub [%g4], %g7 | ||
739 | orcc %g0, %g5, %g0 | ||
740 | inc %g3 | ||
741 | stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E | ||
742 | dec %g3 | ||
743 | bne,pn %xcc, next_byte | ||
744 | add %g4, 1, %g4 | ||
745 | |||
746 | floppy_tdone: | ||
747 | sethi %hi(pdma_vaddr), %g1 | ||
748 | stx %g4, [%g1 + %lo(pdma_vaddr)] | ||
749 | sethi %hi(pdma_size), %g1 | ||
750 | stx %g5, [%g1 + %lo(pdma_size)] | ||
751 | sethi %hi(auxio_register), %g1 | ||
752 | ldx [%g1 + %lo(auxio_register)], %g7 | ||
753 | lduba [%g7] ASI_PHYS_BYPASS_EC_E, %g5 | ||
754 | or %g5, AUXIO_AUX1_FTCNT, %g5 | ||
755 | /* andn %g5, AUXIO_AUX1_MASK, %g5 */ | ||
756 | stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E | ||
757 | andn %g5, AUXIO_AUX1_FTCNT, %g5 | ||
758 | /* andn %g5, AUXIO_AUX1_MASK, %g5 */ | ||
759 | |||
760 | nop; nop; nop; nop; nop; nop; | ||
761 | nop; nop; nop; nop; nop; nop; | ||
762 | |||
763 | stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E | ||
764 | sethi %hi(doing_pdma), %g1 | ||
765 | b,pt %xcc, floppy_dosoftint | ||
766 | st %g0, [%g1 + %lo(doing_pdma)] | ||
767 | |||
768 | floppy_fifo_emptied: | ||
769 | sethi %hi(pdma_vaddr), %g1 | ||
770 | stx %g4, [%g1 + %lo(pdma_vaddr)] | ||
771 | sethi %hi(pdma_size), %g1 | ||
772 | stx %g5, [%g1 + %lo(pdma_size)] | ||
773 | sethi %hi(irq_action), %g1 | ||
774 | or %g1, %lo(irq_action), %g1 | ||
775 | ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq] | ||
776 | ldx [%g3 + 0x08], %g4 ! action->flags>>48==ino | ||
777 | sethi %hi(ivector_table), %g3 | ||
778 | srlx %g4, 48, %g4 | ||
779 | or %g3, %lo(ivector_table), %g3 | ||
780 | sllx %g4, 5, %g4 | ||
781 | ldx [%g3 + %g4], %g4 ! &ivector_table[ino] | ||
782 | ldx [%g4 + 0x10], %g4 ! bucket->iclr | ||
783 | stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE | ||
784 | membar #Sync ! probably not needed... | ||
785 | retry | ||
786 | |||
787 | floppy_overrun: | ||
788 | sethi %hi(pdma_vaddr), %g1 | ||
789 | stx %g4, [%g1 + %lo(pdma_vaddr)] | ||
790 | sethi %hi(pdma_size), %g1 | ||
791 | stx %g5, [%g1 + %lo(pdma_size)] | ||
792 | sethi %hi(doing_pdma), %g1 | ||
793 | st %g0, [%g1 + %lo(doing_pdma)] | ||
794 | |||
795 | floppy_dosoftint: | ||
796 | rdpr %pil, %g2 | ||
797 | wrpr %g0, 15, %pil | ||
798 | sethi %hi(109f), %g7 | ||
799 | b,pt %xcc, etrap_irq | ||
800 | 109: or %g7, %lo(109b), %g7 | ||
801 | |||
802 | mov 11, %o0 | ||
803 | mov 0, %o1 | ||
804 | call sparc_floppy_irq | ||
805 | add %sp, PTREGS_OFF, %o2 | ||
806 | |||
807 | b,pt %xcc, rtrap_irq | ||
808 | nop | ||
809 | |||
810 | #endif /* CONFIG_BLK_DEV_FD */ | ||
811 | |||
812 | /* XXX Here is stuff we still need to write... -DaveM XXX */ | 704 | /* XXX Here is stuff we still need to write... -DaveM XXX */ |
813 | .globl netbsd_syscall | 705 | .globl netbsd_syscall |
814 | netbsd_syscall: | 706 | netbsd_syscall: |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 4dcb8af94090..424712577307 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
38 | #include <asm/cache.h> | 38 | #include <asm/cache.h> |
39 | #include <asm/cpudata.h> | 39 | #include <asm/cpudata.h> |
40 | #include <asm/auxio.h> | ||
40 | 41 | ||
41 | #ifdef CONFIG_SMP | 42 | #ifdef CONFIG_SMP |
42 | static void distribute_irqs(void); | 43 | static void distribute_irqs(void); |
@@ -834,137 +835,65 @@ void handler_irq(int irq, struct pt_regs *regs) | |||
834 | } | 835 | } |
835 | 836 | ||
836 | #ifdef CONFIG_BLK_DEV_FD | 837 | #ifdef CONFIG_BLK_DEV_FD |
837 | extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs); | 838 | extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);; |
838 | 839 | ||
839 | void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) | 840 | /* XXX No easy way to include asm/floppy.h XXX */ |
840 | { | 841 | extern unsigned char *pdma_vaddr; |
841 | struct irqaction *action = *(irq + irq_action); | 842 | extern unsigned long pdma_size; |
842 | struct ino_bucket *bucket; | 843 | extern volatile int doing_pdma; |
843 | int cpu = smp_processor_id(); | 844 | extern unsigned long fdc_status; |
844 | |||
845 | irq_enter(); | ||
846 | kstat_this_cpu.irqs[irq]++; | ||
847 | |||
848 | *(irq_work(cpu, irq)) = 0; | ||
849 | bucket = get_ino_in_irqaction(action) + ivector_table; | ||
850 | |||
851 | bucket->flags |= IBF_INPROGRESS; | ||
852 | |||
853 | floppy_interrupt(irq, dev_cookie, regs); | ||
854 | upa_writel(ICLR_IDLE, bucket->iclr); | ||
855 | |||
856 | bucket->flags &= ~IBF_INPROGRESS; | ||
857 | |||
858 | irq_exit(); | ||
859 | } | ||
860 | #endif | ||
861 | |||
862 | /* The following assumes that the branch lies before the place we | ||
863 | * are branching to. This is the case for a trap vector... | ||
864 | * You have been warned. | ||
865 | */ | ||
866 | #define SPARC_BRANCH(dest_addr, inst_addr) \ | ||
867 | (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff)) | ||
868 | |||
869 | #define SPARC_NOP (0x01000000) | ||
870 | 845 | ||
871 | static void install_fast_irq(unsigned int cpu_irq, | 846 | irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) |
872 | irqreturn_t (*handler)(int, void *, struct pt_regs *)) | ||
873 | { | 847 | { |
874 | extern unsigned long sparc64_ttable_tl0; | 848 | if (likely(doing_pdma)) { |
875 | unsigned long ttent = (unsigned long) &sparc64_ttable_tl0; | 849 | void __iomem *stat = (void __iomem *) fdc_status; |
876 | unsigned int *insns; | 850 | unsigned char *vaddr = pdma_vaddr; |
877 | 851 | unsigned long size = pdma_size; | |
878 | ttent += 0x820; | 852 | u8 val; |
879 | ttent += (cpu_irq - 1) << 5; | 853 | |
880 | insns = (unsigned int *) ttent; | 854 | while (size) { |
881 | insns[0] = SPARC_BRANCH(((unsigned long) handler), | 855 | val = readb(stat); |
882 | ((unsigned long)&insns[0])); | 856 | if (unlikely(!(val & 0x80))) { |
883 | insns[1] = SPARC_NOP; | 857 | pdma_vaddr = vaddr; |
884 | __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent)); | 858 | pdma_size = size; |
885 | } | 859 | return IRQ_HANDLED; |
886 | 860 | } | |
887 | int request_fast_irq(unsigned int irq, | 861 | if (unlikely(!(val & 0x20))) { |
888 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | 862 | pdma_vaddr = vaddr; |
889 | unsigned long irqflags, const char *name, void *dev_id) | 863 | pdma_size = size; |
890 | { | 864 | doing_pdma = 0; |
891 | struct irqaction *action; | 865 | goto main_interrupt; |
892 | struct ino_bucket *bucket = __bucket(irq); | 866 | } |
893 | unsigned long flags; | 867 | if (val & 0x40) { |
894 | 868 | /* read */ | |
895 | /* No pil0 dummy buckets allowed here. */ | 869 | *vaddr++ = readb(stat + 1); |
896 | if (bucket < &ivector_table[0] || | 870 | } else { |
897 | bucket >= &ivector_table[NUM_IVECS]) { | 871 | unsigned char data = *vaddr++; |
898 | unsigned int *caller; | ||
899 | |||
900 | __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); | ||
901 | printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt " | ||
902 | "from %p, irq %08x.\n", caller, irq); | ||
903 | return -EINVAL; | ||
904 | } | ||
905 | |||
906 | if (!handler) | ||
907 | return -EINVAL; | ||
908 | 872 | ||
909 | if ((bucket->pil == 0) || (bucket->pil == 14)) { | 873 | /* write */ |
910 | printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n"); | 874 | writeb(data, stat + 1); |
911 | return -EBUSY; | 875 | } |
912 | } | 876 | size--; |
877 | } | ||
913 | 878 | ||
914 | spin_lock_irqsave(&irq_action_lock, flags); | 879 | pdma_vaddr = vaddr; |
880 | pdma_size = size; | ||
915 | 881 | ||
916 | action = *(bucket->pil + irq_action); | 882 | /* Send Terminal Count pulse to floppy controller. */ |
917 | if (action) { | 883 | val = readb(auxio_register); |
918 | if (action->flags & SA_SHIRQ) | 884 | val |= AUXIO_AUX1_FTCNT; |
919 | panic("Trying to register fast irq when already shared.\n"); | 885 | writeb(val, auxio_register); |
920 | if (irqflags & SA_SHIRQ) | 886 | val &= AUXIO_AUX1_FTCNT; |
921 | panic("Trying to register fast irq as shared.\n"); | 887 | writeb(val, auxio_register); |
922 | printk("request_fast_irq: Trying to register yet already owned.\n"); | ||
923 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
924 | return -EBUSY; | ||
925 | } | ||
926 | 888 | ||
927 | /* | 889 | doing_pdma = 0; |
928 | * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we | ||
929 | * support smp intr affinity in this path. | ||
930 | */ | ||
931 | if (irqflags & SA_STATIC_ALLOC) { | ||
932 | if (static_irq_count < MAX_STATIC_ALLOC) | ||
933 | action = &static_irqaction[static_irq_count++]; | ||
934 | else | ||
935 | printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " | ||
936 | "using kmalloc\n", bucket->pil, name); | ||
937 | } | ||
938 | if (action == NULL) | ||
939 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), | ||
940 | GFP_ATOMIC); | ||
941 | if (!action) { | ||
942 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
943 | return -ENOMEM; | ||
944 | } | 890 | } |
945 | install_fast_irq(bucket->pil, handler); | ||
946 | 891 | ||
947 | bucket->irq_info = action; | 892 | main_interrupt: |
948 | bucket->flags |= IBF_ACTIVE; | 893 | return floppy_interrupt(irq, dev_cookie, regs); |
949 | |||
950 | action->handler = handler; | ||
951 | action->flags = irqflags; | ||
952 | action->dev_id = NULL; | ||
953 | action->name = name; | ||
954 | action->next = NULL; | ||
955 | put_ino_in_irqaction(action, irq); | ||
956 | put_smpaff_in_irqaction(action, CPU_MASK_NONE); | ||
957 | |||
958 | *(bucket->pil + irq_action) = action; | ||
959 | enable_irq(irq); | ||
960 | |||
961 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
962 | |||
963 | #ifdef CONFIG_SMP | ||
964 | distribute_irqs(); | ||
965 | #endif | ||
966 | return 0; | ||
967 | } | 894 | } |
895 | EXPORT_SYMBOL(sparc_floppy_irq); | ||
896 | #endif | ||
968 | 897 | ||
969 | /* We really don't need these at all on the Sparc. We only have | 898 | /* We really don't need these at all on the Sparc. We only have |
970 | * stubs here because they are exported to modules. | 899 | * stubs here because they are exported to modules. |
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c index 63496c43fe17..a809e63f03ef 100644 --- a/arch/sparc64/kernel/semaphore.c +++ b/arch/sparc64/kernel/semaphore.c | |||
@@ -32,8 +32,9 @@ static __inline__ int __sem_update_count(struct semaphore *sem, int incr) | |||
32 | " add %1, %4, %1\n" | 32 | " add %1, %4, %1\n" |
33 | " cas [%3], %0, %1\n" | 33 | " cas [%3], %0, %1\n" |
34 | " cmp %0, %1\n" | 34 | " cmp %0, %1\n" |
35 | " membar #StoreLoad | #StoreStore\n" | ||
35 | " bne,pn %%icc, 1b\n" | 36 | " bne,pn %%icc, 1b\n" |
36 | " membar #StoreLoad | #StoreStore\n" | 37 | " nop\n" |
37 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | 38 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) |
38 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | 39 | : "r" (&sem->count), "r" (incr), "m" (sem->count) |
39 | : "cc"); | 40 | : "cc"); |
@@ -71,8 +72,9 @@ void up(struct semaphore *sem) | |||
71 | " cmp %%g1, %%g7\n" | 72 | " cmp %%g1, %%g7\n" |
72 | " bne,pn %%icc, 1b\n" | 73 | " bne,pn %%icc, 1b\n" |
73 | " addcc %%g7, 1, %%g0\n" | 74 | " addcc %%g7, 1, %%g0\n" |
75 | " membar #StoreLoad | #StoreStore\n" | ||
74 | " ble,pn %%icc, 3f\n" | 76 | " ble,pn %%icc, 3f\n" |
75 | " membar #StoreLoad | #StoreStore\n" | 77 | " nop\n" |
76 | "2:\n" | 78 | "2:\n" |
77 | " .subsection 2\n" | 79 | " .subsection 2\n" |
78 | "3: mov %0, %%g1\n" | 80 | "3: mov %0, %%g1\n" |
@@ -128,8 +130,9 @@ void __sched down(struct semaphore *sem) | |||
128 | " cmp %%g1, %%g7\n" | 130 | " cmp %%g1, %%g7\n" |
129 | " bne,pn %%icc, 1b\n" | 131 | " bne,pn %%icc, 1b\n" |
130 | " cmp %%g7, 1\n" | 132 | " cmp %%g7, 1\n" |
133 | " membar #StoreLoad | #StoreStore\n" | ||
131 | " bl,pn %%icc, 3f\n" | 134 | " bl,pn %%icc, 3f\n" |
132 | " membar #StoreLoad | #StoreStore\n" | 135 | " nop\n" |
133 | "2:\n" | 136 | "2:\n" |
134 | " .subsection 2\n" | 137 | " .subsection 2\n" |
135 | "3: mov %0, %%g1\n" | 138 | "3: mov %0, %%g1\n" |
@@ -233,8 +236,9 @@ int __sched down_interruptible(struct semaphore *sem) | |||
233 | " cmp %%g1, %%g7\n" | 236 | " cmp %%g1, %%g7\n" |
234 | " bne,pn %%icc, 1b\n" | 237 | " bne,pn %%icc, 1b\n" |
235 | " cmp %%g7, 1\n" | 238 | " cmp %%g7, 1\n" |
239 | " membar #StoreLoad | #StoreStore\n" | ||
236 | " bl,pn %%icc, 3f\n" | 240 | " bl,pn %%icc, 3f\n" |
237 | " membar #StoreLoad | #StoreStore\n" | 241 | " nop\n" |
238 | "2:\n" | 242 | "2:\n" |
239 | " .subsection 2\n" | 243 | " .subsection 2\n" |
240 | "3: mov %2, %%g1\n" | 244 | "3: mov %2, %%g1\n" |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index e78cc53594fa..56cd96f4a5cd 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -227,7 +227,6 @@ EXPORT_SYMBOL(__flush_dcache_range); | |||
227 | 227 | ||
228 | EXPORT_SYMBOL(mostek_lock); | 228 | EXPORT_SYMBOL(mostek_lock); |
229 | EXPORT_SYMBOL(mstk48t02_regs); | 229 | EXPORT_SYMBOL(mstk48t02_regs); |
230 | EXPORT_SYMBOL(request_fast_irq); | ||
231 | #ifdef CONFIG_SUN_AUXIO | 230 | #ifdef CONFIG_SUN_AUXIO |
232 | EXPORT_SYMBOL(auxio_set_led); | 231 | EXPORT_SYMBOL(auxio_set_led); |
233 | EXPORT_SYMBOL(auxio_set_lte); | 232 | EXPORT_SYMBOL(auxio_set_lte); |
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 2c8f9344b4ee..3a145fc39cf2 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -98,8 +98,9 @@ startup_continue: | |||
98 | 98 | ||
99 | sethi %hi(prom_entry_lock), %g2 | 99 | sethi %hi(prom_entry_lock), %g2 |
100 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 | 100 | 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 |
101 | membar #StoreLoad | #StoreStore | ||
101 | brnz,pn %g1, 1b | 102 | brnz,pn %g1, 1b |
102 | membar #StoreLoad | #StoreStore | 103 | nop |
103 | 104 | ||
104 | sethi %hi(p1275buf), %g2 | 105 | sethi %hi(p1275buf), %g2 |
105 | or %g2, %lo(p1275buf), %g2 | 106 | or %g2, %lo(p1275buf), %g2 |
diff --git a/arch/sparc64/lib/U1memcpy.S b/arch/sparc64/lib/U1memcpy.S index da9b520c7189..bafd2fc07acb 100644 --- a/arch/sparc64/lib/U1memcpy.S +++ b/arch/sparc64/lib/U1memcpy.S | |||
@@ -87,14 +87,17 @@ | |||
87 | #define LOOP_CHUNK3(src, dest, len, branch_dest) \ | 87 | #define LOOP_CHUNK3(src, dest, len, branch_dest) \ |
88 | MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest) | 88 | MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest) |
89 | 89 | ||
90 | #define DO_SYNC membar #Sync; | ||
90 | #define STORE_SYNC(dest, fsrc) \ | 91 | #define STORE_SYNC(dest, fsrc) \ |
91 | EX_ST(STORE_BLK(%fsrc, %dest)); \ | 92 | EX_ST(STORE_BLK(%fsrc, %dest)); \ |
92 | add %dest, 0x40, %dest; | 93 | add %dest, 0x40, %dest; \ |
94 | DO_SYNC | ||
93 | 95 | ||
94 | #define STORE_JUMP(dest, fsrc, target) \ | 96 | #define STORE_JUMP(dest, fsrc, target) \ |
95 | EX_ST(STORE_BLK(%fsrc, %dest)); \ | 97 | EX_ST(STORE_BLK(%fsrc, %dest)); \ |
96 | add %dest, 0x40, %dest; \ | 98 | add %dest, 0x40, %dest; \ |
97 | ba,pt %xcc, target; | 99 | ba,pt %xcc, target; \ |
100 | nop; | ||
98 | 101 | ||
99 | #define FINISH_VISCHUNK(dest, f0, f1, left) \ | 102 | #define FINISH_VISCHUNK(dest, f0, f1, left) \ |
100 | subcc %left, 8, %left;\ | 103 | subcc %left, 8, %left;\ |
@@ -239,17 +242,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
239 | ba,pt %xcc, 1b+4 | 242 | ba,pt %xcc, 1b+4 |
240 | faligndata %f0, %f2, %f48 | 243 | faligndata %f0, %f2, %f48 |
241 | 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) | 244 | 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) |
242 | STORE_SYNC(o0, f48) membar #Sync | 245 | STORE_SYNC(o0, f48) |
243 | FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) | 246 | FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) |
244 | STORE_JUMP(o0, f48, 40f) membar #Sync | 247 | STORE_JUMP(o0, f48, 40f) |
245 | 2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) | 248 | 2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) |
246 | STORE_SYNC(o0, f48) membar #Sync | 249 | STORE_SYNC(o0, f48) |
247 | FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) | 250 | FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) |
248 | STORE_JUMP(o0, f48, 48f) membar #Sync | 251 | STORE_JUMP(o0, f48, 48f) |
249 | 3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) | 252 | 3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) |
250 | STORE_SYNC(o0, f48) membar #Sync | 253 | STORE_SYNC(o0, f48) |
251 | FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) | 254 | FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) |
252 | STORE_JUMP(o0, f48, 56f) membar #Sync | 255 | STORE_JUMP(o0, f48, 56f) |
253 | 256 | ||
254 | 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) | 257 | 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) |
255 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 258 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) |
@@ -260,17 +263,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
260 | ba,pt %xcc, 1b+4 | 263 | ba,pt %xcc, 1b+4 |
261 | faligndata %f2, %f4, %f48 | 264 | faligndata %f2, %f4, %f48 |
262 | 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) | 265 | 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) |
263 | STORE_SYNC(o0, f48) membar #Sync | 266 | STORE_SYNC(o0, f48) |
264 | FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) | 267 | FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) |
265 | STORE_JUMP(o0, f48, 41f) membar #Sync | 268 | STORE_JUMP(o0, f48, 41f) |
266 | 2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) | 269 | 2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) |
267 | STORE_SYNC(o0, f48) membar #Sync | 270 | STORE_SYNC(o0, f48) |
268 | FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) | 271 | FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) |
269 | STORE_JUMP(o0, f48, 49f) membar #Sync | 272 | STORE_JUMP(o0, f48, 49f) |
270 | 3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) | 273 | 3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) |
271 | STORE_SYNC(o0, f48) membar #Sync | 274 | STORE_SYNC(o0, f48) |
272 | FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) | 275 | FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) |
273 | STORE_JUMP(o0, f48, 57f) membar #Sync | 276 | STORE_JUMP(o0, f48, 57f) |
274 | 277 | ||
275 | 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) | 278 | 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) |
276 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 279 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) |
@@ -281,17 +284,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
281 | ba,pt %xcc, 1b+4 | 284 | ba,pt %xcc, 1b+4 |
282 | faligndata %f4, %f6, %f48 | 285 | faligndata %f4, %f6, %f48 |
283 | 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) | 286 | 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) |
284 | STORE_SYNC(o0, f48) membar #Sync | 287 | STORE_SYNC(o0, f48) |
285 | FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) | 288 | FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) |
286 | STORE_JUMP(o0, f48, 42f) membar #Sync | 289 | STORE_JUMP(o0, f48, 42f) |
287 | 2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) | 290 | 2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) |
288 | STORE_SYNC(o0, f48) membar #Sync | 291 | STORE_SYNC(o0, f48) |
289 | FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) | 292 | FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) |
290 | STORE_JUMP(o0, f48, 50f) membar #Sync | 293 | STORE_JUMP(o0, f48, 50f) |
291 | 3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) | 294 | 3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) |
292 | STORE_SYNC(o0, f48) membar #Sync | 295 | STORE_SYNC(o0, f48) |
293 | FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) | 296 | FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) |
294 | STORE_JUMP(o0, f48, 58f) membar #Sync | 297 | STORE_JUMP(o0, f48, 58f) |
295 | 298 | ||
296 | 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) | 299 | 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) |
297 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 300 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) |
@@ -302,17 +305,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
302 | ba,pt %xcc, 1b+4 | 305 | ba,pt %xcc, 1b+4 |
303 | faligndata %f6, %f8, %f48 | 306 | faligndata %f6, %f8, %f48 |
304 | 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) | 307 | 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) |
305 | STORE_SYNC(o0, f48) membar #Sync | 308 | STORE_SYNC(o0, f48) |
306 | FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) | 309 | FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) |
307 | STORE_JUMP(o0, f48, 43f) membar #Sync | 310 | STORE_JUMP(o0, f48, 43f) |
308 | 2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) | 311 | 2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) |
309 | STORE_SYNC(o0, f48) membar #Sync | 312 | STORE_SYNC(o0, f48) |
310 | FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) | 313 | FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) |
311 | STORE_JUMP(o0, f48, 51f) membar #Sync | 314 | STORE_JUMP(o0, f48, 51f) |
312 | 3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) | 315 | 3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) |
313 | STORE_SYNC(o0, f48) membar #Sync | 316 | STORE_SYNC(o0, f48) |
314 | FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) | 317 | FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) |
315 | STORE_JUMP(o0, f48, 59f) membar #Sync | 318 | STORE_JUMP(o0, f48, 59f) |
316 | 319 | ||
317 | 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) | 320 | 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) |
318 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 321 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) |
@@ -323,17 +326,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
323 | ba,pt %xcc, 1b+4 | 326 | ba,pt %xcc, 1b+4 |
324 | faligndata %f8, %f10, %f48 | 327 | faligndata %f8, %f10, %f48 |
325 | 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) | 328 | 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) |
326 | STORE_SYNC(o0, f48) membar #Sync | 329 | STORE_SYNC(o0, f48) |
327 | FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) | 330 | FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) |
328 | STORE_JUMP(o0, f48, 44f) membar #Sync | 331 | STORE_JUMP(o0, f48, 44f) |
329 | 2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) | 332 | 2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) |
330 | STORE_SYNC(o0, f48) membar #Sync | 333 | STORE_SYNC(o0, f48) |
331 | FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) | 334 | FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) |
332 | STORE_JUMP(o0, f48, 52f) membar #Sync | 335 | STORE_JUMP(o0, f48, 52f) |
333 | 3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) | 336 | 3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) |
334 | STORE_SYNC(o0, f48) membar #Sync | 337 | STORE_SYNC(o0, f48) |
335 | FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) | 338 | FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) |
336 | STORE_JUMP(o0, f48, 60f) membar #Sync | 339 | STORE_JUMP(o0, f48, 60f) |
337 | 340 | ||
338 | 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) | 341 | 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) |
339 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 342 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) |
@@ -344,17 +347,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
344 | ba,pt %xcc, 1b+4 | 347 | ba,pt %xcc, 1b+4 |
345 | faligndata %f10, %f12, %f48 | 348 | faligndata %f10, %f12, %f48 |
346 | 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) | 349 | 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) |
347 | STORE_SYNC(o0, f48) membar #Sync | 350 | STORE_SYNC(o0, f48) |
348 | FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) | 351 | FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) |
349 | STORE_JUMP(o0, f48, 45f) membar #Sync | 352 | STORE_JUMP(o0, f48, 45f) |
350 | 2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) | 353 | 2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) |
351 | STORE_SYNC(o0, f48) membar #Sync | 354 | STORE_SYNC(o0, f48) |
352 | FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) | 355 | FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) |
353 | STORE_JUMP(o0, f48, 53f) membar #Sync | 356 | STORE_JUMP(o0, f48, 53f) |
354 | 3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) | 357 | 3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) |
355 | STORE_SYNC(o0, f48) membar #Sync | 358 | STORE_SYNC(o0, f48) |
356 | FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) | 359 | FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) |
357 | STORE_JUMP(o0, f48, 61f) membar #Sync | 360 | STORE_JUMP(o0, f48, 61f) |
358 | 361 | ||
359 | 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) | 362 | 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) |
360 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 363 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) |
@@ -365,17 +368,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
365 | ba,pt %xcc, 1b+4 | 368 | ba,pt %xcc, 1b+4 |
366 | faligndata %f12, %f14, %f48 | 369 | faligndata %f12, %f14, %f48 |
367 | 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) | 370 | 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) |
368 | STORE_SYNC(o0, f48) membar #Sync | 371 | STORE_SYNC(o0, f48) |
369 | FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) | 372 | FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) |
370 | STORE_JUMP(o0, f48, 46f) membar #Sync | 373 | STORE_JUMP(o0, f48, 46f) |
371 | 2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) | 374 | 2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) |
372 | STORE_SYNC(o0, f48) membar #Sync | 375 | STORE_SYNC(o0, f48) |
373 | FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) | 376 | FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) |
374 | STORE_JUMP(o0, f48, 54f) membar #Sync | 377 | STORE_JUMP(o0, f48, 54f) |
375 | 3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) | 378 | 3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) |
376 | STORE_SYNC(o0, f48) membar #Sync | 379 | STORE_SYNC(o0, f48) |
377 | FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) | 380 | FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) |
378 | STORE_JUMP(o0, f48, 62f) membar #Sync | 381 | STORE_JUMP(o0, f48, 62f) |
379 | 382 | ||
380 | 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) | 383 | 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) |
381 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 384 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) |
@@ -386,17 +389,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
386 | ba,pt %xcc, 1b+4 | 389 | ba,pt %xcc, 1b+4 |
387 | faligndata %f14, %f16, %f48 | 390 | faligndata %f14, %f16, %f48 |
388 | 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) | 391 | 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) |
389 | STORE_SYNC(o0, f48) membar #Sync | 392 | STORE_SYNC(o0, f48) |
390 | FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) | 393 | FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) |
391 | STORE_JUMP(o0, f48, 47f) membar #Sync | 394 | STORE_JUMP(o0, f48, 47f) |
392 | 2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) | 395 | 2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) |
393 | STORE_SYNC(o0, f48) membar #Sync | 396 | STORE_SYNC(o0, f48) |
394 | FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) | 397 | FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) |
395 | STORE_JUMP(o0, f48, 55f) membar #Sync | 398 | STORE_JUMP(o0, f48, 55f) |
396 | 3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) | 399 | 3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) |
397 | STORE_SYNC(o0, f48) membar #Sync | 400 | STORE_SYNC(o0, f48) |
398 | FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) | 401 | FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) |
399 | STORE_JUMP(o0, f48, 63f) membar #Sync | 402 | STORE_JUMP(o0, f48, 63f) |
400 | 403 | ||
401 | 40: FINISH_VISCHUNK(o0, f0, f2, g3) | 404 | 40: FINISH_VISCHUNK(o0, f0, f2, g3) |
402 | 41: FINISH_VISCHUNK(o0, f2, f4, g3) | 405 | 41: FINISH_VISCHUNK(o0, f2, f4, g3) |
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S index 65e328d600a8..4e18989bd602 100644 --- a/arch/sparc64/lib/VISsave.S +++ b/arch/sparc64/lib/VISsave.S | |||
@@ -72,7 +72,11 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 | |||
72 | 72 | ||
73 | stda %f48, [%g3 + %g1] ASI_BLK_P | 73 | stda %f48, [%g3 + %g1] ASI_BLK_P |
74 | 5: membar #Sync | 74 | 5: membar #Sync |
75 | jmpl %g7 + %g0, %g0 | 75 | ba,pt %xcc, 80f |
76 | nop | ||
77 | |||
78 | .align 32 | ||
79 | 80: jmpl %g7 + %g0, %g0 | ||
76 | nop | 80 | nop |
77 | 81 | ||
78 | 6: ldub [%g3 + TI_FPSAVED], %o5 | 82 | 6: ldub [%g3 + TI_FPSAVED], %o5 |
@@ -87,8 +91,11 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 | |||
87 | stda %f32, [%g2 + %g1] ASI_BLK_P | 91 | stda %f32, [%g2 + %g1] ASI_BLK_P |
88 | stda %f48, [%g3 + %g1] ASI_BLK_P | 92 | stda %f48, [%g3 + %g1] ASI_BLK_P |
89 | membar #Sync | 93 | membar #Sync |
90 | jmpl %g7 + %g0, %g0 | 94 | ba,pt %xcc, 80f |
95 | nop | ||
91 | 96 | ||
97 | .align 32 | ||
98 | 80: jmpl %g7 + %g0, %g0 | ||
92 | nop | 99 | nop |
93 | 100 | ||
94 | .align 32 | 101 | .align 32 |
@@ -126,6 +133,10 @@ VISenterhalf: | |||
126 | stda %f0, [%g2 + %g1] ASI_BLK_P | 133 | stda %f0, [%g2 + %g1] ASI_BLK_P |
127 | stda %f16, [%g3 + %g1] ASI_BLK_P | 134 | stda %f16, [%g3 + %g1] ASI_BLK_P |
128 | membar #Sync | 135 | membar #Sync |
136 | ba,pt %xcc, 4f | ||
137 | nop | ||
138 | |||
139 | .align 32 | ||
129 | 4: and %o5, FPRS_DU, %o5 | 140 | 4: and %o5, FPRS_DU, %o5 |
130 | jmpl %g7 + %g0, %g0 | 141 | jmpl %g7 + %g0, %g0 |
131 | wr %o5, FPRS_FEF, %fprs | 142 | wr %o5, FPRS_FEF, %fprs |
diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S index e528b8d1a3e6..faf87c31598b 100644 --- a/arch/sparc64/lib/atomic.S +++ b/arch/sparc64/lib/atomic.S | |||
@@ -7,18 +7,6 @@ | |||
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | #include <asm/asi.h> | 8 | #include <asm/asi.h> |
9 | 9 | ||
10 | /* On SMP we need to use memory barriers to ensure | ||
11 | * correct memory operation ordering, nop these out | ||
12 | * for uniprocessor. | ||
13 | */ | ||
14 | #ifdef CONFIG_SMP | ||
15 | #define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad | ||
16 | #define ATOMIC_POST_BARRIER membar #StoreLoad | #StoreStore | ||
17 | #else | ||
18 | #define ATOMIC_PRE_BARRIER nop | ||
19 | #define ATOMIC_POST_BARRIER nop | ||
20 | #endif | ||
21 | |||
22 | .text | 10 | .text |
23 | 11 | ||
24 | /* Two versions of the atomic routines, one that | 12 | /* Two versions of the atomic routines, one that |
@@ -52,6 +40,24 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
52 | nop | 40 | nop |
53 | .size atomic_sub, .-atomic_sub | 41 | .size atomic_sub, .-atomic_sub |
54 | 42 | ||
43 | /* On SMP we need to use memory barriers to ensure | ||
44 | * correct memory operation ordering, nop these out | ||
45 | * for uniprocessor. | ||
46 | */ | ||
47 | #ifdef CONFIG_SMP | ||
48 | |||
49 | #define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad; | ||
50 | #define ATOMIC_POST_BARRIER \ | ||
51 | ba,pt %xcc, 80b; \ | ||
52 | membar #StoreLoad | #StoreStore | ||
53 | |||
54 | 80: retl | ||
55 | nop | ||
56 | #else | ||
57 | #define ATOMIC_PRE_BARRIER | ||
58 | #define ATOMIC_POST_BARRIER | ||
59 | #endif | ||
60 | |||
55 | .globl atomic_add_ret | 61 | .globl atomic_add_ret |
56 | .type atomic_add_ret,#function | 62 | .type atomic_add_ret,#function |
57 | atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ | 63 | atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ |
@@ -62,9 +68,10 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ | |||
62 | cmp %g1, %g7 | 68 | cmp %g1, %g7 |
63 | bne,pn %icc, 1b | 69 | bne,pn %icc, 1b |
64 | add %g7, %o0, %g7 | 70 | add %g7, %o0, %g7 |
71 | sra %g7, 0, %o0 | ||
65 | ATOMIC_POST_BARRIER | 72 | ATOMIC_POST_BARRIER |
66 | retl | 73 | retl |
67 | sra %g7, 0, %o0 | 74 | nop |
68 | .size atomic_add_ret, .-atomic_add_ret | 75 | .size atomic_add_ret, .-atomic_add_ret |
69 | 76 | ||
70 | .globl atomic_sub_ret | 77 | .globl atomic_sub_ret |
@@ -77,9 +84,10 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
77 | cmp %g1, %g7 | 84 | cmp %g1, %g7 |
78 | bne,pn %icc, 1b | 85 | bne,pn %icc, 1b |
79 | sub %g7, %o0, %g7 | 86 | sub %g7, %o0, %g7 |
87 | sra %g7, 0, %o0 | ||
80 | ATOMIC_POST_BARRIER | 88 | ATOMIC_POST_BARRIER |
81 | retl | 89 | retl |
82 | sra %g7, 0, %o0 | 90 | nop |
83 | .size atomic_sub_ret, .-atomic_sub_ret | 91 | .size atomic_sub_ret, .-atomic_sub_ret |
84 | 92 | ||
85 | .globl atomic64_add | 93 | .globl atomic64_add |
@@ -118,9 +126,10 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ | |||
118 | cmp %g1, %g7 | 126 | cmp %g1, %g7 |
119 | bne,pn %xcc, 1b | 127 | bne,pn %xcc, 1b |
120 | add %g7, %o0, %g7 | 128 | add %g7, %o0, %g7 |
129 | mov %g7, %o0 | ||
121 | ATOMIC_POST_BARRIER | 130 | ATOMIC_POST_BARRIER |
122 | retl | 131 | retl |
123 | mov %g7, %o0 | 132 | nop |
124 | .size atomic64_add_ret, .-atomic64_add_ret | 133 | .size atomic64_add_ret, .-atomic64_add_ret |
125 | 134 | ||
126 | .globl atomic64_sub_ret | 135 | .globl atomic64_sub_ret |
@@ -133,7 +142,8 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
133 | cmp %g1, %g7 | 142 | cmp %g1, %g7 |
134 | bne,pn %xcc, 1b | 143 | bne,pn %xcc, 1b |
135 | sub %g7, %o0, %g7 | 144 | sub %g7, %o0, %g7 |
145 | mov %g7, %o0 | ||
136 | ATOMIC_POST_BARRIER | 146 | ATOMIC_POST_BARRIER |
137 | retl | 147 | retl |
138 | mov %g7, %o0 | 148 | nop |
139 | .size atomic64_sub_ret, .-atomic64_sub_ret | 149 | .size atomic64_sub_ret, .-atomic64_sub_ret |
diff --git a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S index 886dcd2b376a..31afbfe6c1e8 100644 --- a/arch/sparc64/lib/bitops.S +++ b/arch/sparc64/lib/bitops.S | |||
@@ -7,20 +7,26 @@ | |||
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | #include <asm/asi.h> | 8 | #include <asm/asi.h> |
9 | 9 | ||
10 | .text | ||
11 | |||
10 | /* On SMP we need to use memory barriers to ensure | 12 | /* On SMP we need to use memory barriers to ensure |
11 | * correct memory operation ordering, nop these out | 13 | * correct memory operation ordering, nop these out |
12 | * for uniprocessor. | 14 | * for uniprocessor. |
13 | */ | 15 | */ |
16 | |||
14 | #ifdef CONFIG_SMP | 17 | #ifdef CONFIG_SMP |
15 | #define BITOP_PRE_BARRIER membar #StoreLoad | #LoadLoad | 18 | #define BITOP_PRE_BARRIER membar #StoreLoad | #LoadLoad |
16 | #define BITOP_POST_BARRIER membar #StoreLoad | #StoreStore | 19 | #define BITOP_POST_BARRIER \ |
20 | ba,pt %xcc, 80b; \ | ||
21 | membar #StoreLoad | #StoreStore | ||
22 | |||
23 | 80: retl | ||
24 | nop | ||
17 | #else | 25 | #else |
18 | #define BITOP_PRE_BARRIER nop | 26 | #define BITOP_PRE_BARRIER |
19 | #define BITOP_POST_BARRIER nop | 27 | #define BITOP_POST_BARRIER |
20 | #endif | 28 | #endif |
21 | 29 | ||
22 | .text | ||
23 | |||
24 | .globl test_and_set_bit | 30 | .globl test_and_set_bit |
25 | .type test_and_set_bit,#function | 31 | .type test_and_set_bit,#function |
26 | test_and_set_bit: /* %o0=nr, %o1=addr */ | 32 | test_and_set_bit: /* %o0=nr, %o1=addr */ |
@@ -37,10 +43,11 @@ test_and_set_bit: /* %o0=nr, %o1=addr */ | |||
37 | cmp %g7, %g1 | 43 | cmp %g7, %g1 |
38 | bne,pn %xcc, 1b | 44 | bne,pn %xcc, 1b |
39 | and %g7, %o2, %g2 | 45 | and %g7, %o2, %g2 |
40 | BITOP_POST_BARRIER | ||
41 | clr %o0 | 46 | clr %o0 |
47 | movrne %g2, 1, %o0 | ||
48 | BITOP_POST_BARRIER | ||
42 | retl | 49 | retl |
43 | movrne %g2, 1, %o0 | 50 | nop |
44 | .size test_and_set_bit, .-test_and_set_bit | 51 | .size test_and_set_bit, .-test_and_set_bit |
45 | 52 | ||
46 | .globl test_and_clear_bit | 53 | .globl test_and_clear_bit |
@@ -59,10 +66,11 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */ | |||
59 | cmp %g7, %g1 | 66 | cmp %g7, %g1 |
60 | bne,pn %xcc, 1b | 67 | bne,pn %xcc, 1b |
61 | and %g7, %o2, %g2 | 68 | and %g7, %o2, %g2 |
62 | BITOP_POST_BARRIER | ||
63 | clr %o0 | 69 | clr %o0 |
70 | movrne %g2, 1, %o0 | ||
71 | BITOP_POST_BARRIER | ||
64 | retl | 72 | retl |
65 | movrne %g2, 1, %o0 | 73 | nop |
66 | .size test_and_clear_bit, .-test_and_clear_bit | 74 | .size test_and_clear_bit, .-test_and_clear_bit |
67 | 75 | ||
68 | .globl test_and_change_bit | 76 | .globl test_and_change_bit |
@@ -81,10 +89,11 @@ test_and_change_bit: /* %o0=nr, %o1=addr */ | |||
81 | cmp %g7, %g1 | 89 | cmp %g7, %g1 |
82 | bne,pn %xcc, 1b | 90 | bne,pn %xcc, 1b |
83 | and %g7, %o2, %g2 | 91 | and %g7, %o2, %g2 |
84 | BITOP_POST_BARRIER | ||
85 | clr %o0 | 92 | clr %o0 |
93 | movrne %g2, 1, %o0 | ||
94 | BITOP_POST_BARRIER | ||
86 | retl | 95 | retl |
87 | movrne %g2, 1, %o0 | 96 | nop |
88 | .size test_and_change_bit, .-test_and_change_bit | 97 | .size test_and_change_bit, .-test_and_change_bit |
89 | 98 | ||
90 | .globl set_bit | 99 | .globl set_bit |
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c index c421e0c65325..f03344cf784e 100644 --- a/arch/sparc64/lib/debuglocks.c +++ b/arch/sparc64/lib/debuglocks.c | |||
@@ -252,8 +252,9 @@ wlock_again: | |||
252 | " andn %%g1, %%g3, %%g7\n" | 252 | " andn %%g1, %%g3, %%g7\n" |
253 | " casx [%0], %%g1, %%g7\n" | 253 | " casx [%0], %%g1, %%g7\n" |
254 | " cmp %%g1, %%g7\n" | 254 | " cmp %%g1, %%g7\n" |
255 | " membar #StoreLoad | #StoreStore\n" | ||
255 | " bne,pn %%xcc, 1b\n" | 256 | " bne,pn %%xcc, 1b\n" |
256 | " membar #StoreLoad | #StoreStore" | 257 | " nop" |
257 | : /* no outputs */ | 258 | : /* no outputs */ |
258 | : "r" (&(rw->lock)) | 259 | : "r" (&(rw->lock)) |
259 | : "g3", "g1", "g7", "cc", "memory"); | 260 | : "g3", "g1", "g7", "cc", "memory"); |
@@ -351,8 +352,9 @@ int _do_write_trylock (rwlock_t *rw, char *str) | |||
351 | " andn %%g1, %%g3, %%g7\n" | 352 | " andn %%g1, %%g3, %%g7\n" |
352 | " casx [%0], %%g1, %%g7\n" | 353 | " casx [%0], %%g1, %%g7\n" |
353 | " cmp %%g1, %%g7\n" | 354 | " cmp %%g1, %%g7\n" |
355 | " membar #StoreLoad | #StoreStore\n" | ||
354 | " bne,pn %%xcc, 1b\n" | 356 | " bne,pn %%xcc, 1b\n" |
355 | " membar #StoreLoad | #StoreStore" | 357 | " nop" |
356 | : /* no outputs */ | 358 | : /* no outputs */ |
357 | : "r" (&(rw->lock)) | 359 | : "r" (&(rw->lock)) |
358 | : "g3", "g1", "g7", "cc", "memory"); | 360 | : "g3", "g1", "g7", "cc", "memory"); |
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S index 7e6fdaebedba..8ee288dd0afc 100644 --- a/arch/sparc64/lib/dec_and_lock.S +++ b/arch/sparc64/lib/dec_and_lock.S | |||
@@ -48,8 +48,9 @@ start_to_zero: | |||
48 | #endif | 48 | #endif |
49 | to_zero: | 49 | to_zero: |
50 | ldstub [%o1], %g3 | 50 | ldstub [%o1], %g3 |
51 | membar #StoreLoad | #StoreStore | ||
51 | brnz,pn %g3, spin_on_lock | 52 | brnz,pn %g3, spin_on_lock |
52 | membar #StoreLoad | #StoreStore | 53 | nop |
53 | loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */ | 54 | loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */ |
54 | cmp %g2, %g7 | 55 | cmp %g2, %g7 |
55 | 56 | ||
@@ -71,8 +72,9 @@ loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */ | |||
71 | nop | 72 | nop |
72 | spin_on_lock: | 73 | spin_on_lock: |
73 | ldub [%o1], %g3 | 74 | ldub [%o1], %g3 |
75 | membar #LoadLoad | ||
74 | brnz,pt %g3, spin_on_lock | 76 | brnz,pt %g3, spin_on_lock |
75 | membar #LoadLoad | 77 | nop |
76 | ba,pt %xcc, to_zero | 78 | ba,pt %xcc, to_zero |
77 | nop | 79 | nop |
78 | nop | 80 | nop |
diff --git a/arch/sparc64/lib/rwsem.S b/arch/sparc64/lib/rwsem.S index 174ff7b9164c..75f0e6b951d6 100644 --- a/arch/sparc64/lib/rwsem.S +++ b/arch/sparc64/lib/rwsem.S | |||
@@ -17,8 +17,9 @@ __down_read: | |||
17 | bne,pn %icc, 1b | 17 | bne,pn %icc, 1b |
18 | add %g7, 1, %g7 | 18 | add %g7, 1, %g7 |
19 | cmp %g7, 0 | 19 | cmp %g7, 0 |
20 | membar #StoreLoad | #StoreStore | ||
20 | bl,pn %icc, 3f | 21 | bl,pn %icc, 3f |
21 | membar #StoreLoad | #StoreStore | 22 | nop |
22 | 2: | 23 | 2: |
23 | retl | 24 | retl |
24 | nop | 25 | nop |
@@ -57,8 +58,9 @@ __down_write: | |||
57 | cmp %g3, %g7 | 58 | cmp %g3, %g7 |
58 | bne,pn %icc, 1b | 59 | bne,pn %icc, 1b |
59 | cmp %g7, 0 | 60 | cmp %g7, 0 |
61 | membar #StoreLoad | #StoreStore | ||
60 | bne,pn %icc, 3f | 62 | bne,pn %icc, 3f |
61 | membar #StoreLoad | #StoreStore | 63 | nop |
62 | 2: retl | 64 | 2: retl |
63 | nop | 65 | nop |
64 | 3: | 66 | 3: |
@@ -97,8 +99,9 @@ __up_read: | |||
97 | cmp %g1, %g7 | 99 | cmp %g1, %g7 |
98 | bne,pn %icc, 1b | 100 | bne,pn %icc, 1b |
99 | cmp %g7, 0 | 101 | cmp %g7, 0 |
102 | membar #StoreLoad | #StoreStore | ||
100 | bl,pn %icc, 3f | 103 | bl,pn %icc, 3f |
101 | membar #StoreLoad | #StoreStore | 104 | nop |
102 | 2: retl | 105 | 2: retl |
103 | nop | 106 | nop |
104 | 3: sethi %hi(RWSEM_ACTIVE_MASK), %g1 | 107 | 3: sethi %hi(RWSEM_ACTIVE_MASK), %g1 |
@@ -126,8 +129,9 @@ __up_write: | |||
126 | bne,pn %icc, 1b | 129 | bne,pn %icc, 1b |
127 | sub %g7, %g1, %g7 | 130 | sub %g7, %g1, %g7 |
128 | cmp %g7, 0 | 131 | cmp %g7, 0 |
132 | membar #StoreLoad | #StoreStore | ||
129 | bl,pn %icc, 3f | 133 | bl,pn %icc, 3f |
130 | membar #StoreLoad | #StoreStore | 134 | nop |
131 | 2: | 135 | 2: |
132 | retl | 136 | retl |
133 | nop | 137 | nop |
@@ -151,8 +155,9 @@ __downgrade_write: | |||
151 | bne,pn %icc, 1b | 155 | bne,pn %icc, 1b |
152 | sub %g7, %g1, %g7 | 156 | sub %g7, %g1, %g7 |
153 | cmp %g7, 0 | 157 | cmp %g7, 0 |
158 | membar #StoreLoad | #StoreStore | ||
154 | bl,pn %icc, 3f | 159 | bl,pn %icc, 3f |
155 | membar #StoreLoad | #StoreStore | 160 | nop |
156 | 2: | 161 | 2: |
157 | retl | 162 | retl |
158 | nop | 163 | nop |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9c5222075da9..8fc413cb6acd 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -136,8 +136,9 @@ static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) | |||
136 | "or %%g1, %0, %%g1\n\t" | 136 | "or %%g1, %0, %%g1\n\t" |
137 | "casx [%2], %%g7, %%g1\n\t" | 137 | "casx [%2], %%g7, %%g1\n\t" |
138 | "cmp %%g7, %%g1\n\t" | 138 | "cmp %%g7, %%g1\n\t" |
139 | "membar #StoreLoad | #StoreStore\n\t" | ||
139 | "bne,pn %%xcc, 1b\n\t" | 140 | "bne,pn %%xcc, 1b\n\t" |
140 | " membar #StoreLoad | #StoreStore" | 141 | " nop" |
141 | : /* no outputs */ | 142 | : /* no outputs */ |
142 | : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) | 143 | : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) |
143 | : "g1", "g7"); | 144 | : "g1", "g7"); |
@@ -157,8 +158,9 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c | |||
157 | " andn %%g7, %1, %%g1\n\t" | 158 | " andn %%g7, %1, %%g1\n\t" |
158 | "casx [%2], %%g7, %%g1\n\t" | 159 | "casx [%2], %%g7, %%g1\n\t" |
159 | "cmp %%g7, %%g1\n\t" | 160 | "cmp %%g7, %%g1\n\t" |
161 | "membar #StoreLoad | #StoreStore\n\t" | ||
160 | "bne,pn %%xcc, 1b\n\t" | 162 | "bne,pn %%xcc, 1b\n\t" |
161 | " membar #StoreLoad | #StoreStore\n" | 163 | " nop\n" |
162 | "2:" | 164 | "2:" |
163 | : /* no outputs */ | 165 | : /* no outputs */ |
164 | : "r" (cpu), "r" (mask), "r" (&page->flags), | 166 | : "r" (cpu), "r" (mask), "r" (&page->flags), |
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 7a0934321010..7a2431d3abc7 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S | |||
@@ -266,8 +266,9 @@ __cheetah_flush_tlb_pending: /* 22 insns */ | |||
266 | andn %o3, 1, %o3 | 266 | andn %o3, 1, %o3 |
267 | stxa %g0, [%o3] ASI_IMMU_DEMAP | 267 | stxa %g0, [%o3] ASI_IMMU_DEMAP |
268 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP | 268 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP |
269 | membar #Sync | ||
269 | brnz,pt %o1, 1b | 270 | brnz,pt %o1, 1b |
270 | membar #Sync | 271 | nop |
271 | stxa %g2, [%o4] ASI_DMMU | 272 | stxa %g2, [%o4] ASI_DMMU |
272 | flush %g6 | 273 | flush %g6 |
273 | wrpr %g0, 0, %tl | 274 | wrpr %g0, 0, %tl |
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 4e680f87a75f..acd2a778ebe6 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <linux/string.h> | 38 | #include <linux/string.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/preempt.h> | 40 | #include <linux/preempt.h> |
41 | #include <linux/moduleloader.h> | 41 | |
42 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
44 | #include <asm/kdebug.h> | 44 | #include <asm/kdebug.h> |
@@ -51,8 +51,6 @@ static struct kprobe *kprobe_prev; | |||
51 | static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev; | 51 | static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev; |
52 | static struct pt_regs jprobe_saved_regs; | 52 | static struct pt_regs jprobe_saved_regs; |
53 | static long *jprobe_saved_rsp; | 53 | static long *jprobe_saved_rsp; |
54 | static kprobe_opcode_t *get_insn_slot(void); | ||
55 | static void free_insn_slot(kprobe_opcode_t *slot); | ||
56 | void jprobe_return_end(void); | 54 | void jprobe_return_end(void); |
57 | 55 | ||
58 | /* copy of the kernel stack at the probe fire time */ | 56 | /* copy of the kernel stack at the probe fire time */ |
@@ -274,48 +272,23 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
274 | regs->rip = (unsigned long)p->ainsn.insn; | 272 | regs->rip = (unsigned long)p->ainsn.insn; |
275 | } | 273 | } |
276 | 274 | ||
277 | struct task_struct *arch_get_kprobe_task(void *ptr) | ||
278 | { | ||
279 | return ((struct thread_info *) (((unsigned long) ptr) & | ||
280 | (~(THREAD_SIZE -1))))->task; | ||
281 | } | ||
282 | |||
283 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | 275 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) |
284 | { | 276 | { |
285 | unsigned long *sara = (unsigned long *)regs->rsp; | 277 | unsigned long *sara = (unsigned long *)regs->rsp; |
286 | struct kretprobe_instance *ri; | 278 | struct kretprobe_instance *ri; |
287 | static void *orig_ret_addr; | 279 | |
280 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
281 | ri->rp = rp; | ||
282 | ri->task = current; | ||
283 | ri->ret_addr = (kprobe_opcode_t *) *sara; | ||
288 | 284 | ||
289 | /* | ||
290 | * Save the return address when the return probe hits | ||
291 | * the first time, and use it to populate the (krprobe | ||
292 | * instance)->ret_addr for subsequent return probes at | ||
293 | * the same addrress since stack address would have | ||
294 | * the kretprobe_trampoline by then. | ||
295 | */ | ||
296 | if (((void*) *sara) != kretprobe_trampoline) | ||
297 | orig_ret_addr = (void*) *sara; | ||
298 | |||
299 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
300 | ri->rp = rp; | ||
301 | ri->stack_addr = sara; | ||
302 | ri->ret_addr = orig_ret_addr; | ||
303 | add_rp_inst(ri); | ||
304 | /* Replace the return addr with trampoline addr */ | 285 | /* Replace the return addr with trampoline addr */ |
305 | *sara = (unsigned long) &kretprobe_trampoline; | 286 | *sara = (unsigned long) &kretprobe_trampoline; |
306 | } else { | ||
307 | rp->nmissed++; | ||
308 | } | ||
309 | } | ||
310 | 287 | ||
311 | void arch_kprobe_flush_task(struct task_struct *tk) | 288 | add_rp_inst(ri); |
312 | { | 289 | } else { |
313 | struct kretprobe_instance *ri; | 290 | rp->nmissed++; |
314 | while ((ri = get_rp_inst_tsk(tk)) != NULL) { | 291 | } |
315 | *((unsigned long *)(ri->stack_addr)) = | ||
316 | (unsigned long) ri->ret_addr; | ||
317 | recycle_rp_inst(ri); | ||
318 | } | ||
319 | } | 292 | } |
320 | 293 | ||
321 | /* | 294 | /* |
@@ -428,36 +401,59 @@ no_kprobe: | |||
428 | */ | 401 | */ |
429 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 402 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
430 | { | 403 | { |
431 | struct task_struct *tsk; | 404 | struct kretprobe_instance *ri = NULL; |
432 | struct kretprobe_instance *ri; | 405 | struct hlist_head *head; |
433 | struct hlist_head *head; | 406 | struct hlist_node *node, *tmp; |
434 | struct hlist_node *node; | 407 | unsigned long orig_ret_address = 0; |
435 | unsigned long *sara = (unsigned long *)regs->rsp - 1; | 408 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
436 | |||
437 | tsk = arch_get_kprobe_task(sara); | ||
438 | head = kretprobe_inst_table_head(tsk); | ||
439 | |||
440 | hlist_for_each_entry(ri, node, head, hlist) { | ||
441 | if (ri->stack_addr == sara && ri->rp) { | ||
442 | if (ri->rp->handler) | ||
443 | ri->rp->handler(ri, regs); | ||
444 | } | ||
445 | } | ||
446 | return 0; | ||
447 | } | ||
448 | 409 | ||
449 | void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, | 410 | head = kretprobe_inst_table_head(current); |
450 | unsigned long flags) | ||
451 | { | ||
452 | struct kretprobe_instance *ri; | ||
453 | /* RA already popped */ | ||
454 | unsigned long *sara = ((unsigned long *)regs->rsp) - 1; | ||
455 | 411 | ||
456 | while ((ri = get_rp_inst(sara))) { | 412 | /* |
457 | regs->rip = (unsigned long)ri->ret_addr; | 413 | * It is possible to have multiple instances associated with a given |
414 | * task either because an multiple functions in the call path | ||
415 | * have a return probe installed on them, and/or more then one return | ||
416 | * return probe was registered for a target function. | ||
417 | * | ||
418 | * We can handle this because: | ||
419 | * - instances are always inserted at the head of the list | ||
420 | * - when multiple return probes are registered for the same | ||
421 | * function, the first instance's ret_addr will point to the | ||
422 | * real return address, and all the rest will point to | ||
423 | * kretprobe_trampoline | ||
424 | */ | ||
425 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
426 | if (ri->task != current) | ||
427 | /* another task is sharing our hash bucket */ | ||
428 | continue; | ||
429 | |||
430 | if (ri->rp && ri->rp->handler) | ||
431 | ri->rp->handler(ri, regs); | ||
432 | |||
433 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
458 | recycle_rp_inst(ri); | 434 | recycle_rp_inst(ri); |
435 | |||
436 | if (orig_ret_address != trampoline_address) | ||
437 | /* | ||
438 | * This is the real return address. Any other | ||
439 | * instances associated with this task are for | ||
440 | * other calls deeper on the call stack | ||
441 | */ | ||
442 | break; | ||
459 | } | 443 | } |
460 | regs->eflags &= ~TF_MASK; | 444 | |
445 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
446 | regs->rip = orig_ret_address; | ||
447 | |||
448 | unlock_kprobes(); | ||
449 | preempt_enable_no_resched(); | ||
450 | |||
451 | /* | ||
452 | * By returning a non-zero value, we are telling | ||
453 | * kprobe_handler() that we have handled unlocking | ||
454 | * and re-enabling preemption. | ||
455 | */ | ||
456 | return 1; | ||
461 | } | 457 | } |
462 | 458 | ||
463 | /* | 459 | /* |
@@ -550,8 +546,7 @@ int post_kprobe_handler(struct pt_regs *regs) | |||
550 | current_kprobe->post_handler(current_kprobe, regs, 0); | 546 | current_kprobe->post_handler(current_kprobe, regs, 0); |
551 | } | 547 | } |
552 | 548 | ||
553 | if (current_kprobe->post_handler != trampoline_post_handler) | 549 | resume_execution(current_kprobe, regs); |
554 | resume_execution(current_kprobe, regs); | ||
555 | regs->eflags |= kprobe_saved_rflags; | 550 | regs->eflags |= kprobe_saved_rflags; |
556 | 551 | ||
557 | /* Restore the original saved kprobes variables and continue. */ | 552 | /* Restore the original saved kprobes variables and continue. */ |
@@ -682,111 +677,12 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
682 | return 0; | 677 | return 0; |
683 | } | 678 | } |
684 | 679 | ||
685 | /* | 680 | static struct kprobe trampoline_p = { |
686 | * kprobe->ainsn.insn points to the copy of the instruction to be single-stepped. | 681 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, |
687 | * By default on x86_64, pages we get from kmalloc or vmalloc are not | 682 | .pre_handler = trampoline_probe_handler |
688 | * executable. Single-stepping an instruction on such a page yields an | ||
689 | * oops. So instead of storing the instruction copies in their respective | ||
690 | * kprobe objects, we allocate a page, map it executable, and store all the | ||
691 | * instruction copies there. (We can allocate additional pages if somebody | ||
692 | * inserts a huge number of probes.) Each page can hold up to INSNS_PER_PAGE | ||
693 | * instruction slots, each of which is MAX_INSN_SIZE*sizeof(kprobe_opcode_t) | ||
694 | * bytes. | ||
695 | */ | ||
696 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE*sizeof(kprobe_opcode_t))) | ||
697 | struct kprobe_insn_page { | ||
698 | struct hlist_node hlist; | ||
699 | kprobe_opcode_t *insns; /* page of instruction slots */ | ||
700 | char slot_used[INSNS_PER_PAGE]; | ||
701 | int nused; | ||
702 | }; | 683 | }; |
703 | 684 | ||
704 | static struct hlist_head kprobe_insn_pages; | 685 | int __init arch_init(void) |
705 | |||
706 | /** | ||
707 | * get_insn_slot() - Find a slot on an executable page for an instruction. | ||
708 | * We allocate an executable page if there's no room on existing ones. | ||
709 | */ | ||
710 | static kprobe_opcode_t *get_insn_slot(void) | ||
711 | { | ||
712 | struct kprobe_insn_page *kip; | ||
713 | struct hlist_node *pos; | ||
714 | |||
715 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
716 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
717 | if (kip->nused < INSNS_PER_PAGE) { | ||
718 | int i; | ||
719 | for (i = 0; i < INSNS_PER_PAGE; i++) { | ||
720 | if (!kip->slot_used[i]) { | ||
721 | kip->slot_used[i] = 1; | ||
722 | kip->nused++; | ||
723 | return kip->insns + (i*MAX_INSN_SIZE); | ||
724 | } | ||
725 | } | ||
726 | /* Surprise! No unused slots. Fix kip->nused. */ | ||
727 | kip->nused = INSNS_PER_PAGE; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /* All out of space. Need to allocate a new page. Use slot 0.*/ | ||
732 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); | ||
733 | if (!kip) { | ||
734 | return NULL; | ||
735 | } | ||
736 | |||
737 | /* | ||
738 | * For the %rip-relative displacement fixups to be doable, we | ||
739 | * need our instruction copy to be within +/- 2GB of any data it | ||
740 | * might access via %rip. That is, within 2GB of where the | ||
741 | * kernel image and loaded module images reside. So we allocate | ||
742 | * a page in the module loading area. | ||
743 | */ | ||
744 | kip->insns = module_alloc(PAGE_SIZE); | ||
745 | if (!kip->insns) { | ||
746 | kfree(kip); | ||
747 | return NULL; | ||
748 | } | ||
749 | INIT_HLIST_NODE(&kip->hlist); | ||
750 | hlist_add_head(&kip->hlist, &kprobe_insn_pages); | ||
751 | memset(kip->slot_used, 0, INSNS_PER_PAGE); | ||
752 | kip->slot_used[0] = 1; | ||
753 | kip->nused = 1; | ||
754 | return kip->insns; | ||
755 | } | ||
756 | |||
757 | /** | ||
758 | * free_insn_slot() - Free instruction slot obtained from get_insn_slot(). | ||
759 | */ | ||
760 | static void free_insn_slot(kprobe_opcode_t *slot) | ||
761 | { | 686 | { |
762 | struct kprobe_insn_page *kip; | 687 | return register_kprobe(&trampoline_p); |
763 | struct hlist_node *pos; | ||
764 | |||
765 | hlist_for_each(pos, &kprobe_insn_pages) { | ||
766 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); | ||
767 | if (kip->insns <= slot | ||
768 | && slot < kip->insns+(INSNS_PER_PAGE*MAX_INSN_SIZE)) { | ||
769 | int i = (slot - kip->insns) / MAX_INSN_SIZE; | ||
770 | kip->slot_used[i] = 0; | ||
771 | kip->nused--; | ||
772 | if (kip->nused == 0) { | ||
773 | /* | ||
774 | * Page is no longer in use. Free it unless | ||
775 | * it's the last one. We keep the last one | ||
776 | * so as not to have to set it up again the | ||
777 | * next time somebody inserts a probe. | ||
778 | */ | ||
779 | hlist_del(&kip->hlist); | ||
780 | if (hlist_empty(&kprobe_insn_pages)) { | ||
781 | INIT_HLIST_NODE(&kip->hlist); | ||
782 | hlist_add_head(&kip->hlist, | ||
783 | &kprobe_insn_pages); | ||
784 | } else { | ||
785 | module_free(NULL, kip->insns); | ||
786 | kfree(kip); | ||
787 | } | ||
788 | } | ||
789 | return; | ||
790 | } | ||
791 | } | ||
792 | } | 688 | } |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 1d91271796e5..7577f9d7a75d 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -482,6 +482,33 @@ out: | |||
482 | } | 482 | } |
483 | 483 | ||
484 | /* | 484 | /* |
485 | * This function selects if the context switch from prev to next | ||
486 | * has to tweak the TSC disable bit in the cr4. | ||
487 | */ | ||
488 | static inline void disable_tsc(struct task_struct *prev_p, | ||
489 | struct task_struct *next_p) | ||
490 | { | ||
491 | struct thread_info *prev, *next; | ||
492 | |||
493 | /* | ||
494 | * gcc should eliminate the ->thread_info dereference if | ||
495 | * has_secure_computing returns 0 at compile time (SECCOMP=n). | ||
496 | */ | ||
497 | prev = prev_p->thread_info; | ||
498 | next = next_p->thread_info; | ||
499 | |||
500 | if (has_secure_computing(prev) || has_secure_computing(next)) { | ||
501 | /* slow path here */ | ||
502 | if (has_secure_computing(prev) && | ||
503 | !has_secure_computing(next)) { | ||
504 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
505 | } else if (!has_secure_computing(prev) && | ||
506 | has_secure_computing(next)) | ||
507 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
508 | } | ||
509 | } | ||
510 | |||
511 | /* | ||
485 | * This special macro can be used to load a debugging register | 512 | * This special macro can be used to load a debugging register |
486 | */ | 513 | */ |
487 | #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r) | 514 | #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r) |
@@ -599,6 +626,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct * | |||
599 | } | 626 | } |
600 | } | 627 | } |
601 | 628 | ||
629 | disable_tsc(prev_p, next_p); | ||
630 | |||
602 | return prev_p; | 631 | return prev_p; |
603 | } | 632 | } |
604 | 633 | ||