aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cputable.c31
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S187
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/iommu.c35
-rw-r--r--arch/powerpc/kernel/kgdb.c410
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/lparcfg.c386
-rw-r--r--arch/powerpc/kernel/machine_kexec.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c1
-rw-r--r--arch/powerpc/kernel/process.c46
-rw-r--r--arch/powerpc/kernel/prom_init.c9
-rw-r--r--arch/powerpc/kernel/prom_parse.c44
-rw-r--r--arch/powerpc/kernel/ptrace.c72
-rw-r--r--arch/powerpc/kernel/rtas_flash.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c16
-rw-r--r--arch/powerpc/kernel/signal.c6
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/suspend.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/traps.c16
-rw-r--r--arch/powerpc/kernel/vio.c1033
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S31
24 files changed, 2022 insertions, 341 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bf0b1fd0ec34..1a4094704b1f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -74,6 +74,7 @@ obj-y += time.o prom.o traps.o setup-common.o \
74 misc_$(CONFIG_WORD_SIZE).o 74 misc_$(CONFIG_WORD_SIZE).o
75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
76obj-$(CONFIG_PPC64) += dma_64.o iommu.o 76obj-$(CONFIG_PPC64) += dma_64.o iommu.o
77obj-$(CONFIG_KGDB) += kgdb.o
77obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 78obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
78obj-$(CONFIG_MODULES) += ppc_ksyms.o 79obj-$(CONFIG_MODULES) += ppc_ksyms.o
79obj-$(CONFIG_BOOTX_TEXT) += btext.o 80obj-$(CONFIG_BOOTX_TEXT) += btext.o
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f7f3c215d06f..25c273c761d1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -23,6 +23,9 @@
23struct cpu_spec* cur_cpu_spec = NULL; 23struct cpu_spec* cur_cpu_spec = NULL;
24EXPORT_SYMBOL(cur_cpu_spec); 24EXPORT_SYMBOL(cur_cpu_spec);
25 25
26/* The platform string corresponding to the real PVR */
27const char *powerpc_base_platform;
28
26/* NOTE: 29/* NOTE:
27 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's 30 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
28 * the responsibility of the appropriate CPU save/restore functions to 31 * the responsibility of the appropriate CPU save/restore functions to
@@ -355,6 +358,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
355 .icache_bsize = 128, 358 .icache_bsize = 128,
356 .dcache_bsize = 128, 359 .dcache_bsize = 128,
357 .machine_check = machine_check_generic, 360 .machine_check = machine_check_generic,
361 .oprofile_cpu_type = "ppc64/compat-power5+",
358 .platform = "power5+", 362 .platform = "power5+",
359 }, 363 },
360 { /* Power6 */ 364 { /* Power6 */
@@ -386,6 +390,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
386 .icache_bsize = 128, 390 .icache_bsize = 128,
387 .dcache_bsize = 128, 391 .dcache_bsize = 128,
388 .machine_check = machine_check_generic, 392 .machine_check = machine_check_generic,
393 .oprofile_cpu_type = "ppc64/compat-power6",
389 .platform = "power6", 394 .platform = "power6",
390 }, 395 },
391 { /* 2.06-compliant processor, i.e. Power7 "architected" mode */ 396 { /* 2.06-compliant processor, i.e. Power7 "architected" mode */
@@ -397,6 +402,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
397 .icache_bsize = 128, 402 .icache_bsize = 128,
398 .dcache_bsize = 128, 403 .dcache_bsize = 128,
399 .machine_check = machine_check_generic, 404 .machine_check = machine_check_generic,
405 .oprofile_cpu_type = "ppc64/compat-power7",
400 .platform = "power7", 406 .platform = "power7",
401 }, 407 },
402 { /* Power7 */ 408 { /* Power7 */
@@ -1629,9 +1635,34 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
1629 t->cpu_setup = s->cpu_setup; 1635 t->cpu_setup = s->cpu_setup;
1630 t->cpu_restore = s->cpu_restore; 1636 t->cpu_restore = s->cpu_restore;
1631 t->platform = s->platform; 1637 t->platform = s->platform;
1638 /*
1639 * If we have passed through this logic once
1640 * before and have pulled the default case
1641 * because the real PVR was not found inside
1642 * cpu_specs[], then we are possibly running in
1643 * compatibility mode. In that case, let the
1644 * oprofiler know which set of compatibility
1645 * counters to pull from by making sure the
1646 * oprofile_cpu_type string is set to that of
1647 * compatibility mode. If the oprofile_cpu_type
1648 * already has a value, then we are possibly
1649 * overriding a real PVR with a logical one, and,
1650 * in that case, keep the current value for
1651 * oprofile_cpu_type.
1652 */
1653 if (t->oprofile_cpu_type == NULL)
1654 t->oprofile_cpu_type = s->oprofile_cpu_type;
1632 } else 1655 } else
1633 *t = *s; 1656 *t = *s;
1634 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; 1657 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
1658
1659 /*
1660 * Set the base platform string once; assumes
1661 * we're called with real pvr first.
1662 */
1663 if (*PTRRELOC(&powerpc_base_platform) == NULL)
1664 *PTRRELOC(&powerpc_base_platform) = t->platform;
1665
1635#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) 1666#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE)
1636 /* ppc64 and booke expect identify_cpu to also call 1667 /* ppc64 and booke expect identify_cpu to also call
1637 * setup_cpu for that processor. I will consolidate 1668 * setup_cpu for that processor. I will consolidate
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index da52269aec1e..81c8324a4a3c 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -148,7 +148,7 @@ transfer_to_handler:
148 /* Check to see if the dbcr0 register is set up to debug. Use the 148 /* Check to see if the dbcr0 register is set up to debug. Use the
149 internal debug mode bit to do this. */ 149 internal debug mode bit to do this. */
150 lwz r12,THREAD_DBCR0(r12) 150 lwz r12,THREAD_DBCR0(r12)
151 andis. r12,r12,DBCR0_IDM@h 151 andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
152 beq+ 3f 152 beq+ 3f
153 /* From user and task is ptraced - load up global dbcr0 */ 153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */ 154 li r12,-1 /* clear all pending debug events */
@@ -292,7 +292,7 @@ syscall_exit_cont:
292 /* If the process has its own DBCR0 value, load it up. The internal 292 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */ 293 debug mode bit tells us that dbcr0 should be loaded. */
294 lwz r0,THREAD+THREAD_DBCR0(r2) 294 lwz r0,THREAD+THREAD_DBCR0(r2)
295 andis. r10,r0,DBCR0_IDM@h 295 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
296 bnel- load_dbcr0 296 bnel- load_dbcr0
297#endif 297#endif
298#ifdef CONFIG_44x 298#ifdef CONFIG_44x
@@ -720,7 +720,7 @@ restore_user:
720 /* Check whether this process has its own DBCR0 value. The internal 720 /* Check whether this process has its own DBCR0 value. The internal
721 debug mode bit tells us that dbcr0 should be loaded. */ 721 debug mode bit tells us that dbcr0 should be loaded. */
722 lwz r0,THREAD+THREAD_DBCR0(r2) 722 lwz r0,THREAD+THREAD_DBCR0(r2)
723 andis. r10,r0,DBCR0_IDM@h 723 andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
724 bnel- load_dbcr0 724 bnel- load_dbcr0
725#endif 725#endif
726 726
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index c4268500e856..3cb52fa0eda3 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -151,16 +151,11 @@ skpinv: addi r6,r6,1 /* Increment */
151 /* Invalidate TLB0 */ 151 /* Invalidate TLB0 */
152 li r6,0x04 152 li r6,0x04
153 tlbivax 0,r6 153 tlbivax 0,r6
154#ifdef CONFIG_SMP 154 TLBSYNC
155 tlbsync
156#endif
157 /* Invalidate TLB1 */ 155 /* Invalidate TLB1 */
158 li r6,0x0c 156 li r6,0x0c
159 tlbivax 0,r6 157 tlbivax 0,r6
160#ifdef CONFIG_SMP 158 TLBSYNC
161 tlbsync
162#endif
163 msync
164 159
165/* 3. Setup a temp mapping and jump to it */ 160/* 3. Setup a temp mapping and jump to it */
166 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ 161 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
@@ -238,10 +233,7 @@ skpinv: addi r6,r6,1 /* Increment */
238 /* Invalidate TLB1 */ 233 /* Invalidate TLB1 */
239 li r9,0x0c 234 li r9,0x0c
240 tlbivax 0,r9 235 tlbivax 0,r9
241#ifdef CONFIG_SMP 236 TLBSYNC
242 tlbsync
243#endif
244 msync
245 237
246/* 6. Setup KERNELBASE mapping in TLB1[0] */ 238/* 6. Setup KERNELBASE mapping in TLB1[0] */
247 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 239 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
@@ -283,10 +275,7 @@ skpinv: addi r6,r6,1 /* Increment */
283 /* Invalidate TLB1 */ 275 /* Invalidate TLB1 */
284 li r9,0x0c 276 li r9,0x0c
285 tlbivax 0,r9 277 tlbivax 0,r9
286#ifdef CONFIG_SMP 278 TLBSYNC
287 tlbsync
288#endif
289 msync
290 279
291 /* Establish the interrupt vector offsets */ 280 /* Establish the interrupt vector offsets */
292 SET_IVOR(0, CriticalInput); 281 SET_IVOR(0, CriticalInput);
@@ -483,90 +472,16 @@ interrupt_base:
483 472
484 /* Data Storage Interrupt */ 473 /* Data Storage Interrupt */
485 START_EXCEPTION(DataStorage) 474 START_EXCEPTION(DataStorage)
486 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 475 NORMAL_EXCEPTION_PROLOG
487 mtspr SPRN_SPRG1, r11 476 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
488 mtspr SPRN_SPRG4W, r12 477 stw r5,_ESR(r11)
489 mtspr SPRN_SPRG5W, r13 478 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
490 mfcr r11 479 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
491 mtspr SPRN_SPRG7W, r11 480 bne 1f
492 481 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
493 /* 4821:
494 * Check if it was a store fault, if not then bail 483 addi r3,r1,STACK_FRAME_OVERHEAD
495 * because a user tried to access a kernel or 484 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
496 * read-protected page. Otherwise, get the
497 * offending address and handle it.
498 */
499 mfspr r10, SPRN_ESR
500 andis. r10, r10, ESR_ST@h
501 beq 2f
502
503 mfspr r10, SPRN_DEAR /* Get faulting address */
504
505 /* If we are faulting a kernel address, we have to use the
506 * kernel page tables.
507 */
508 lis r11, PAGE_OFFSET@h
509 cmplw 0, r10, r11
510 bge 2f
511
512 /* Get the PGD for the current thread */
5133:
514 mfspr r11,SPRN_SPRG3
515 lwz r11,PGDIR(r11)
5164:
517 FIND_PTE
518
519 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
520 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
521 cmpwi 0, r13, _PAGE_RW|_PAGE_USER
522 bne 2f /* Bail if not */
523
524 /* Update 'changed'. */
525 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
526 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
527
528 /* MAS2 not updated as the entry does exist in the tlb, this
529 fault taken to detect state transition (eg: COW -> DIRTY)
530 */
531 andi. r11, r11, _PAGE_HWEXEC
532 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
533 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
534
535 /* update search PID in MAS6, AS = 0 */
536 mfspr r12, SPRN_PID0
537 slwi r12, r12, 16
538 mtspr SPRN_MAS6, r12
539
540 /* find the TLB index that caused the fault. It has to be here. */
541 tlbsx 0, r10
542
543 /* only update the perm bits, assume the RPN is fine */
544 mfspr r12, SPRN_MAS3
545 rlwimi r12, r11, 0, 20, 31
546 mtspr SPRN_MAS3,r12
547 tlbwe
548
549 /* Done...restore registers and get out of here. */
550 mfspr r11, SPRN_SPRG7R
551 mtcr r11
552 mfspr r13, SPRN_SPRG5R
553 mfspr r12, SPRN_SPRG4R
554 mfspr r11, SPRN_SPRG1
555 mfspr r10, SPRN_SPRG0
556 rfi /* Force context change */
557
5582:
559 /*
560 * The bailout. Restore registers to pre-exception conditions
561 * and call the heavyweights to help us out.
562 */
563 mfspr r11, SPRN_SPRG7R
564 mtcr r11
565 mfspr r13, SPRN_SPRG5R
566 mfspr r12, SPRN_SPRG4R
567 mfspr r11, SPRN_SPRG1
568 mfspr r10, SPRN_SPRG0
569 b data_access
570 485
571 /* Instruction Storage Interrupt */ 486 /* Instruction Storage Interrupt */
572 INSTRUCTION_STORAGE_EXCEPTION 487 INSTRUCTION_STORAGE_EXCEPTION
@@ -645,15 +560,30 @@ interrupt_base:
645 lwz r11,PGDIR(r11) 560 lwz r11,PGDIR(r11)
646 561
6474: 5624:
563 /* Mask of required permission bits. Note that while we
564 * do copy ESR:ST to _PAGE_RW position as trying to write
565 * to an RO page is pretty common, we don't do it with
566 * _PAGE_DIRTY. We could do it, but it's a fairly rare
567 * event so I'd rather take the overhead when it happens
568 * rather than adding an instruction here. We should measure
569 * whether the whole thing is worth it in the first place
570 * as we could avoid loading SPRN_ESR completely in the first
571 * place...
572 *
573 * TODO: Is it worth doing that mfspr & rlwimi in the first
574 * place or can we save a couple of instructions here ?
575 */
576 mfspr r12,SPRN_ESR
577 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
578 rlwimi r13,r12,11,29,29
579
648 FIND_PTE 580 FIND_PTE
649 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 581 andc. r13,r13,r11 /* Check permission */
650 beq 2f /* Bail if not present */ 582 bne 2f /* Bail if permission mismach */
651 583
652#ifdef CONFIG_PTE_64BIT 584#ifdef CONFIG_PTE_64BIT
653 lwz r13, 0(r12) 585 lwz r13, 0(r12)
654#endif 586#endif
655 ori r11, r11, _PAGE_ACCESSED
656 stw r11, PTE_FLAGS_OFFSET(r12)
657 587
658 /* Jump to common tlb load */ 588 /* Jump to common tlb load */
659 b finish_tlb_load 589 b finish_tlb_load
@@ -667,7 +597,7 @@ interrupt_base:
667 mfspr r12, SPRN_SPRG4R 597 mfspr r12, SPRN_SPRG4R
668 mfspr r11, SPRN_SPRG1 598 mfspr r11, SPRN_SPRG1
669 mfspr r10, SPRN_SPRG0 599 mfspr r10, SPRN_SPRG0
670 b data_access 600 b DataStorage
671 601
672 /* Instruction TLB Error Interrupt */ 602 /* Instruction TLB Error Interrupt */
673 /* 603 /*
@@ -705,15 +635,16 @@ interrupt_base:
705 lwz r11,PGDIR(r11) 635 lwz r11,PGDIR(r11)
706 636
7074: 6374:
638 /* Make up the required permissions */
639 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
640
708 FIND_PTE 641 FIND_PTE
709 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 642 andc. r13,r13,r11 /* Check permission */
710 beq 2f /* Bail if not present */ 643 bne 2f /* Bail if permission mismach */
711 644
712#ifdef CONFIG_PTE_64BIT 645#ifdef CONFIG_PTE_64BIT
713 lwz r13, 0(r12) 646 lwz r13, 0(r12)
714#endif 647#endif
715 ori r11, r11, _PAGE_ACCESSED
716 stw r11, PTE_FLAGS_OFFSET(r12)
717 648
718 /* Jump to common TLB load point */ 649 /* Jump to common TLB load point */
719 b finish_tlb_load 650 b finish_tlb_load
@@ -768,29 +699,13 @@ interrupt_base:
768 * Local functions 699 * Local functions
769 */ 700 */
770 701
771 /*
772 * Data TLB exceptions will bail out to this point
773 * if they can't resolve the lightweight TLB fault.
774 */
775data_access:
776 NORMAL_EXCEPTION_PROLOG
777 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
778 stw r5,_ESR(r11)
779 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
780 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
781 bne 1f
782 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
7831:
784 addi r3,r1,STACK_FRAME_OVERHEAD
785 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
786
787/* 702/*
788
789 * Both the instruction and data TLB miss get to this 703 * Both the instruction and data TLB miss get to this
790 * point to load the TLB. 704 * point to load the TLB.
791 * r10 - EA of fault 705 * r10 - EA of fault
792 * r11 - TLB (info from Linux PTE) 706 * r11 - TLB (info from Linux PTE)
793 * r12, r13 - available to use 707 * r12 - available to use
708 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
794 * CR5 - results of addr >= PAGE_OFFSET 709 * CR5 - results of addr >= PAGE_OFFSET
795 * MAS0, MAS1 - loaded with proper value when we get here 710 * MAS0, MAS1 - loaded with proper value when we get here
796 * MAS2, MAS3 - will need additional info from Linux PTE 711 * MAS2, MAS3 - will need additional info from Linux PTE
@@ -812,20 +727,14 @@ finish_tlb_load:
812#endif 727#endif
813 mtspr SPRN_MAS2, r12 728 mtspr SPRN_MAS2, r12
814 729
815 bge 5, 1f 730 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
816 731 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
817 /* is user addr */ 732 and r12, r11, r10
818 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
819 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ 733 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
820 srwi r10, r12, 1 734 slwi r10, r12, 1
821 or r12, r12, r10 /* Copy user perms into supervisor */ 735 or r10, r10, r12
822 iseleq r12, 0, r12 736 iseleq r12, r12, r10
823 b 2f 737
824
825 /* is kernel addr */
8261: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
827 ori r12, r12, (MAS3_SX | MAS3_SR)
828
829#ifdef CONFIG_PTE_64BIT 738#ifdef CONFIG_PTE_64BIT
8302: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 7392: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
831 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 740 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index c3cf0e8f3ac1..d308a9f70f1b 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -60,7 +60,7 @@ void cpu_idle(void)
60 60
61 set_thread_flag(TIF_POLLING_NRFLAG); 61 set_thread_flag(TIF_POLLING_NRFLAG);
62 while (1) { 62 while (1) {
63 tick_nohz_stop_sched_tick(); 63 tick_nohz_stop_sched_tick(1);
64 while (!need_resched() && !cpu_should_die()) { 64 while (!need_resched() && !cpu_should_die()) {
65 ppc64_runlatch_off(); 65 ppc64_runlatch_off();
66 66
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 8c68ee9e5d1c..550a19399bfa 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -49,6 +49,8 @@ static int novmerge = 1;
49 49
50static int protect4gb = 1; 50static int protect4gb = 1;
51 51
52static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
53
52static inline unsigned long iommu_num_pages(unsigned long vaddr, 54static inline unsigned long iommu_num_pages(unsigned long vaddr,
53 unsigned long slen) 55 unsigned long slen)
54{ 56{
@@ -186,10 +188,12 @@ static unsigned long iommu_range_alloc(struct device *dev,
186static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, 188static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
187 void *page, unsigned int npages, 189 void *page, unsigned int npages,
188 enum dma_data_direction direction, 190 enum dma_data_direction direction,
189 unsigned long mask, unsigned int align_order) 191 unsigned long mask, unsigned int align_order,
192 struct dma_attrs *attrs)
190{ 193{
191 unsigned long entry, flags; 194 unsigned long entry, flags;
192 dma_addr_t ret = DMA_ERROR_CODE; 195 dma_addr_t ret = DMA_ERROR_CODE;
196 int build_fail;
193 197
194 spin_lock_irqsave(&(tbl->it_lock), flags); 198 spin_lock_irqsave(&(tbl->it_lock), flags);
195 199
@@ -204,9 +208,21 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
204 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 208 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
205 209
206 /* Put the TCEs in the HW table */ 210 /* Put the TCEs in the HW table */
207 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, 211 build_fail = ppc_md.tce_build(tbl, entry, npages,
208 direction); 212 (unsigned long)page & IOMMU_PAGE_MASK,
213 direction, attrs);
214
215 /* ppc_md.tce_build() only returns non-zero for transient errors.
216 * Clean up the table bitmap in this case and return
217 * DMA_ERROR_CODE. For all other errors the functionality is
218 * not altered.
219 */
220 if (unlikely(build_fail)) {
221 __iommu_free(tbl, ret, npages);
209 222
223 spin_unlock_irqrestore(&(tbl->it_lock), flags);
224 return DMA_ERROR_CODE;
225 }
210 226
211 /* Flush/invalidate TLB caches if necessary */ 227 /* Flush/invalidate TLB caches if necessary */
212 if (ppc_md.tce_flush) 228 if (ppc_md.tce_flush)
@@ -275,7 +291,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
275 dma_addr_t dma_next = 0, dma_addr; 291 dma_addr_t dma_next = 0, dma_addr;
276 unsigned long flags; 292 unsigned long flags;
277 struct scatterlist *s, *outs, *segstart; 293 struct scatterlist *s, *outs, *segstart;
278 int outcount, incount, i; 294 int outcount, incount, i, build_fail = 0;
279 unsigned int align; 295 unsigned int align;
280 unsigned long handle; 296 unsigned long handle;
281 unsigned int max_seg_size; 297 unsigned int max_seg_size;
@@ -336,7 +352,11 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
336 npages, entry, dma_addr); 352 npages, entry, dma_addr);
337 353
338 /* Insert into HW table */ 354 /* Insert into HW table */
339 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); 355 build_fail = ppc_md.tce_build(tbl, entry, npages,
356 vaddr & IOMMU_PAGE_MASK,
357 direction, attrs);
358 if(unlikely(build_fail))
359 goto failure;
340 360
341 /* If we are in an open segment, try merging */ 361 /* If we are in an open segment, try merging */
342 if (segstart != s) { 362 if (segstart != s) {
@@ -573,7 +593,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
573 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 593 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
574 594
575 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 595 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
576 mask >> IOMMU_PAGE_SHIFT, align); 596 mask >> IOMMU_PAGE_SHIFT, align,
597 attrs);
577 if (dma_handle == DMA_ERROR_CODE) { 598 if (dma_handle == DMA_ERROR_CODE) {
578 if (printk_ratelimit()) { 599 if (printk_ratelimit()) {
579 printk(KERN_INFO "iommu_alloc failed, " 600 printk(KERN_INFO "iommu_alloc failed, "
@@ -642,7 +663,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
642 nio_pages = size >> IOMMU_PAGE_SHIFT; 663 nio_pages = size >> IOMMU_PAGE_SHIFT;
643 io_order = get_iommu_order(size); 664 io_order = get_iommu_order(size);
644 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 665 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
645 mask >> IOMMU_PAGE_SHIFT, io_order); 666 mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
646 if (mapping == DMA_ERROR_CODE) { 667 if (mapping == DMA_ERROR_CODE) {
647 free_pages((unsigned long)ret, order); 668 free_pages((unsigned long)ret, order);
648 return NULL; 669 return NULL;
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
new file mode 100644
index 000000000000..b4fdf2f2743c
--- /dev/null
+++ b/arch/powerpc/kernel/kgdb.c
@@ -0,0 +1,410 @@
1/*
2 * PowerPC backend to the KGDB stub.
3 *
4 * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
5 * Copyright (C) 2003 Timesys Corporation.
6 * Copyright (C) 2004-2006 MontaVista Software, Inc.
7 * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
8 * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
9 * Sergei Shtylyov <sshtylyov@ru.mvista.com>
10 * Copyright (C) 2007-2008 Wind River Systems, Inc.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program as licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/kgdb.h>
20#include <linux/smp.h>
21#include <linux/signal.h>
22#include <linux/ptrace.h>
23#include <asm/current.h>
24#include <asm/processor.h>
25#include <asm/machdep.h>
26
27/*
28 * This table contains the mapping between PowerPC hardware trap types, and
29 * signals, which are primarily what GDB understands. GDB and the kernel
30 * don't always agree on values, so we use constants taken from gdb-6.2.
31 */
32static struct hard_trap_info
33{
34 unsigned int tt; /* Trap type code for powerpc */
35 unsigned char signo; /* Signal that we map this trap into */
36} hard_trap_info[] = {
37 { 0x0100, 0x02 /* SIGINT */ }, /* system reset */
38 { 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
39 { 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
40 { 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
41 { 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
42 { 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
43 { 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
44 { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
45 { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
46 { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
47#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
48 { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
49#if defined(CONFIG_FSL_BOOKE)
50 { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
51 { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
52 { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
53 { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
54 { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
55 { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */
56 { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
57 { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
58 { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
59#else /* ! CONFIG_FSL_BOOKE */
60 { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
61 { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
62 { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
63 { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
64 { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
65#endif
66#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
67 { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
68#if defined(CONFIG_8xx)
69 { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
70#else /* ! CONFIG_8xx */
71 { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
72 { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
73 { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
74#if defined(CONFIG_PPC64)
75 { 0x1200, 0x05 /* SIGILL */ }, /* system error */
76 { 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
77 { 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
78 { 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
79 { 0x1800, 0x04 /* SIGILL */ }, /* thermal */
80#else /* ! CONFIG_PPC64 */
81 { 0x1400, 0x02 /* SIGINT */ }, /* SMI */
82 { 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
83 { 0x1700, 0x04 /* SIGILL */ }, /* TAU */
84 { 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
85#endif
86#endif
87#endif
88 { 0x0000, 0x00 } /* Must be last */
89};
90
91static int computeSignal(unsigned int tt)
92{
93 struct hard_trap_info *ht;
94
95 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
96 if (ht->tt == tt)
97 return ht->signo;
98
99 return SIGHUP; /* default for things we don't know about */
100}
101
102static int kgdb_call_nmi_hook(struct pt_regs *regs)
103{
104 kgdb_nmicallback(raw_smp_processor_id(), regs);
105 return 0;
106}
107
108#ifdef CONFIG_SMP
109void kgdb_roundup_cpus(unsigned long flags)
110{
111 smp_send_debugger_break(MSG_ALL_BUT_SELF);
112}
113#endif
114
115/* KGDB functions to use existing PowerPC64 hooks. */
116static int kgdb_debugger(struct pt_regs *regs)
117{
118 return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
119}
120
121static int kgdb_handle_breakpoint(struct pt_regs *regs)
122{
123 if (user_mode(regs))
124 return 0;
125
126 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
127 return 0;
128
129 if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
130 regs->nip += 4;
131
132 return 1;
133}
134
135static int kgdb_singlestep(struct pt_regs *regs)
136{
137 struct thread_info *thread_info, *exception_thread_info;
138
139 if (user_mode(regs))
140 return 0;
141
142 /*
143 * On Book E and perhaps other processsors, singlestep is handled on
144 * the critical exception stack. This causes current_thread_info()
145 * to fail, since it it locates the thread_info by masking off
146 * the low bits of the current stack pointer. We work around
147 * this issue by copying the thread_info from the kernel stack
148 * before calling kgdb_handle_exception, and copying it back
149 * afterwards. On most processors the copy is avoided since
150 * exception_thread_info == thread_info.
151 */
152 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
153 exception_thread_info = current_thread_info();
154
155 if (thread_info != exception_thread_info)
156 memcpy(exception_thread_info, thread_info, sizeof *thread_info);
157
158 kgdb_handle_exception(0, SIGTRAP, 0, regs);
159
160 if (thread_info != exception_thread_info)
161 memcpy(thread_info, exception_thread_info, sizeof *thread_info);
162
163 return 1;
164}
165
166static int kgdb_iabr_match(struct pt_regs *regs)
167{
168 if (user_mode(regs))
169 return 0;
170
171 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
172 return 0;
173 return 1;
174}
175
176static int kgdb_dabr_match(struct pt_regs *regs)
177{
178 if (user_mode(regs))
179 return 0;
180
181 if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
182 return 0;
183 return 1;
184}
185
186#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
187
188#define PACK32(ptr, src) do { \
189 u32 *ptr32; \
190 ptr32 = (u32 *)ptr; \
191 *(ptr32++) = (src); \
192 ptr = (unsigned long *)ptr32; \
193 } while (0)
194
195
196void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
197{
198 unsigned long *ptr = gdb_regs;
199 int reg;
200
201 memset(gdb_regs, 0, NUMREGBYTES);
202
203 for (reg = 0; reg < 32; reg++)
204 PACK64(ptr, regs->gpr[reg]);
205
206#ifdef CONFIG_FSL_BOOKE
207#ifdef CONFIG_SPE
208 for (reg = 0; reg < 32; reg++)
209 PACK64(ptr, current->thread.evr[reg]);
210#else
211 ptr += 32;
212#endif
213#else
214 /* fp registers not used by kernel, leave zero */
215 ptr += 32 * 8 / sizeof(long);
216#endif
217
218 PACK64(ptr, regs->nip);
219 PACK64(ptr, regs->msr);
220 PACK32(ptr, regs->ccr);
221 PACK64(ptr, regs->link);
222 PACK64(ptr, regs->ctr);
223 PACK32(ptr, regs->xer);
224
225 BUG_ON((unsigned long)ptr >
226 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
227}
228
229void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
230{
231 struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
232 STACK_FRAME_OVERHEAD);
233 unsigned long *ptr = gdb_regs;
234 int reg;
235
236 memset(gdb_regs, 0, NUMREGBYTES);
237
238 /* Regs GPR0-2 */
239 for (reg = 0; reg < 3; reg++)
240 PACK64(ptr, regs->gpr[reg]);
241
242 /* Regs GPR3-13 are caller saved, not in regs->gpr[] */
243 ptr += 11;
244
245 /* Regs GPR14-31 */
246 for (reg = 14; reg < 32; reg++)
247 PACK64(ptr, regs->gpr[reg]);
248
249#ifdef CONFIG_FSL_BOOKE
250#ifdef CONFIG_SPE
251 for (reg = 0; reg < 32; reg++)
252 PACK64(ptr, p->thread.evr[reg]);
253#else
254 ptr += 32;
255#endif
256#else
257 /* fp registers not used by kernel, leave zero */
258 ptr += 32 * 8 / sizeof(long);
259#endif
260
261 PACK64(ptr, regs->nip);
262 PACK64(ptr, regs->msr);
263 PACK32(ptr, regs->ccr);
264 PACK64(ptr, regs->link);
265 PACK64(ptr, regs->ctr);
266 PACK32(ptr, regs->xer);
267
268 BUG_ON((unsigned long)ptr >
269 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
270}
271
272#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
273
274#define UNPACK32(dest, ptr) do { \
275 u32 *ptr32; \
276 ptr32 = (u32 *)ptr; \
277 dest = *(ptr32++); \
278 ptr = (unsigned long *)ptr32; \
279 } while (0)
280
281void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
282{
283 unsigned long *ptr = gdb_regs;
284 int reg;
285#ifdef CONFIG_SPE
286 union {
287 u32 v32[2];
288 u64 v64;
289 } acc;
290#endif
291
292 for (reg = 0; reg < 32; reg++)
293 UNPACK64(regs->gpr[reg], ptr);
294
295#ifdef CONFIG_FSL_BOOKE
296#ifdef CONFIG_SPE
297 for (reg = 0; reg < 32; reg++)
298 UNPACK64(current->thread.evr[reg], ptr);
299#else
300 ptr += 32;
301#endif
302#else
303 /* fp registers not used by kernel, leave zero */
304 ptr += 32 * 8 / sizeof(int);
305#endif
306
307 UNPACK64(regs->nip, ptr);
308 UNPACK64(regs->msr, ptr);
309 UNPACK32(regs->ccr, ptr);
310 UNPACK64(regs->link, ptr);
311 UNPACK64(regs->ctr, ptr);
312 UNPACK32(regs->xer, ptr);
313
314 BUG_ON((unsigned long)ptr >
315 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
316}
317
318/*
319 * This function does PowerPC specific procesing for interfacing to gdb.
320 */
321int kgdb_arch_handle_exception(int vector, int signo, int err_code,
322 char *remcom_in_buffer, char *remcom_out_buffer,
323 struct pt_regs *linux_regs)
324{
325 char *ptr = &remcom_in_buffer[1];
326 unsigned long addr;
327
328 switch (remcom_in_buffer[0]) {
329 /*
330 * sAA..AA Step one instruction from AA..AA
331 * This will return an error to gdb ..
332 */
333 case 's':
334 case 'c':
335 /* handle the optional parameter */
336 if (kgdb_hex2long(&ptr, &addr))
337 linux_regs->nip = addr;
338
339 atomic_set(&kgdb_cpu_doing_single_step, -1);
340 /* set the trace bit if we're stepping */
341 if (remcom_in_buffer[0] == 's') {
342#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
343 mtspr(SPRN_DBCR0,
344 mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
345 linux_regs->msr |= MSR_DE;
346#else
347 linux_regs->msr |= MSR_SE;
348#endif
349 kgdb_single_step = 1;
350 if (kgdb_contthread)
351 atomic_set(&kgdb_cpu_doing_single_step,
352 raw_smp_processor_id());
353 }
354 return 0;
355 }
356
357 return -1;
358}
359
360/*
361 * Global data
362 */
363struct kgdb_arch arch_kgdb_ops = {
364 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
365};
366
367static int kgdb_not_implemented(struct pt_regs *regs)
368{
369 return 0;
370}
371
372static void *old__debugger_ipi;
373static void *old__debugger;
374static void *old__debugger_bpt;
375static void *old__debugger_sstep;
376static void *old__debugger_iabr_match;
377static void *old__debugger_dabr_match;
378static void *old__debugger_fault_handler;
379
380int kgdb_arch_init(void)
381{
382 old__debugger_ipi = __debugger_ipi;
383 old__debugger = __debugger;
384 old__debugger_bpt = __debugger_bpt;
385 old__debugger_sstep = __debugger_sstep;
386 old__debugger_iabr_match = __debugger_iabr_match;
387 old__debugger_dabr_match = __debugger_dabr_match;
388 old__debugger_fault_handler = __debugger_fault_handler;
389
390 __debugger_ipi = kgdb_call_nmi_hook;
391 __debugger = kgdb_debugger;
392 __debugger_bpt = kgdb_handle_breakpoint;
393 __debugger_sstep = kgdb_singlestep;
394 __debugger_iabr_match = kgdb_iabr_match;
395 __debugger_dabr_match = kgdb_dabr_match;
396 __debugger_fault_handler = kgdb_not_implemented;
397
398 return 0;
399}
400
401void kgdb_arch_exit(void)
402{
403 __debugger_ipi = old__debugger_ipi;
404 __debugger = old__debugger;
405 __debugger_bpt = old__debugger_bpt;
406 __debugger_sstep = old__debugger_sstep;
407 __debugger_iabr_match = old__debugger_iabr_match;
408 __debugger_dabr_match = old__debugger_dabr_match;
409 __debugger_fault_handler = old__debugger_fault_handler;
410}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 4ba2af125450..de79915452c8 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -144,7 +144,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
144 kcb->kprobe_saved_msr = regs->msr; 144 kcb->kprobe_saved_msr = regs->msr;
145} 145}
146 146
147/* Called with kretprobe_lock held */
148void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 147void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
149 struct pt_regs *regs) 148 struct pt_regs *regs)
150{ 149{
@@ -312,8 +311,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
312 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 311 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
313 312
314 INIT_HLIST_HEAD(&empty_rp); 313 INIT_HLIST_HEAD(&empty_rp);
315 spin_lock_irqsave(&kretprobe_lock, flags); 314 kretprobe_hash_lock(current, &head, &flags);
316 head = kretprobe_inst_table_head(current);
317 315
318 /* 316 /*
319 * It is possible to have multiple instances associated with a given 317 * It is possible to have multiple instances associated with a given
@@ -352,7 +350,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
352 regs->nip = orig_ret_address; 350 regs->nip = orig_ret_address;
353 351
354 reset_current_kprobe(); 352 reset_current_kprobe();
355 spin_unlock_irqrestore(&kretprobe_lock, flags); 353 kretprobe_hash_unlock(current, &flags);
356 preempt_enable_no_resched(); 354 preempt_enable_no_resched();
357 355
358 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 356 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 827a5726a035..9f856a0c3e38 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -34,8 +34,9 @@
34#include <asm/time.h> 34#include <asm/time.h>
35#include <asm/prom.h> 35#include <asm/prom.h>
36#include <asm/vdso_datapage.h> 36#include <asm/vdso_datapage.h>
37#include <asm/vio.h>
37 38
38#define MODULE_VERS "1.7" 39#define MODULE_VERS "1.8"
39#define MODULE_NAME "lparcfg" 40#define MODULE_NAME "lparcfg"
40 41
41/* #define LPARCFG_DEBUG */ 42/* #define LPARCFG_DEBUG */
@@ -129,32 +130,46 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v)
129/* 130/*
130 * Methods used to fetch LPAR data when running on a pSeries platform. 131 * Methods used to fetch LPAR data when running on a pSeries platform.
131 */ 132 */
132static void log_plpar_hcall_return(unsigned long rc, char *tag) 133/**
134 * h_get_mpp
135 * H_GET_MPP hcall returns info in 7 parms
136 */
137int h_get_mpp(struct hvcall_mpp_data *mpp_data)
133{ 138{
134 switch(rc) { 139 int rc;
135 case 0: 140 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
136 return; 141
137 case H_HARDWARE: 142 rc = plpar_hcall9(H_GET_MPP, retbuf);
138 printk(KERN_INFO "plpar-hcall (%s) " 143
139 "Hardware fault\n", tag); 144 mpp_data->entitled_mem = retbuf[0];
140 return; 145 mpp_data->mapped_mem = retbuf[1];
141 case H_FUNCTION: 146
142 printk(KERN_INFO "plpar-hcall (%s) " 147 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
143 "Function not allowed\n", tag); 148 mpp_data->pool_num = retbuf[2] & 0xffff;
144 return; 149
145 case H_AUTHORITY: 150 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
146 printk(KERN_INFO "plpar-hcall (%s) " 151 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
147 "Not authorized to this function\n", tag); 152 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
148 return; 153
149 case H_PARAMETER: 154 mpp_data->pool_size = retbuf[4];
150 printk(KERN_INFO "plpar-hcall (%s) " 155 mpp_data->loan_request = retbuf[5];
151 "Bad parameter(s)\n",tag); 156 mpp_data->backing_mem = retbuf[6];
152 return; 157
153 default: 158 return rc;
154 printk(KERN_INFO "plpar-hcall (%s) "
155 "Unexpected rc(0x%lx)\n", tag, rc);
156 }
157} 159}
160EXPORT_SYMBOL(h_get_mpp);
161
162struct hvcall_ppp_data {
163 u64 entitlement;
164 u64 unallocated_entitlement;
165 u16 group_num;
166 u16 pool_num;
167 u8 capped;
168 u8 weight;
169 u8 unallocated_weight;
170 u16 active_procs_in_pool;
171 u16 active_system_procs;
172};
158 173
159/* 174/*
160 * H_GET_PPP hcall returns info in 4 parms. 175 * H_GET_PPP hcall returns info in 4 parms.
@@ -176,27 +191,30 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag)
176 * XXXX - Active processors in Physical Processor Pool. 191 * XXXX - Active processors in Physical Processor Pool.
177 * XXXX - Processors active on platform. 192 * XXXX - Processors active on platform.
178 */ 193 */
179static unsigned int h_get_ppp(unsigned long *entitled, 194static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
180 unsigned long *unallocated,
181 unsigned long *aggregation,
182 unsigned long *resource)
183{ 195{
184 unsigned long rc; 196 unsigned long rc;
185 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 197 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
186 198
187 rc = plpar_hcall(H_GET_PPP, retbuf); 199 rc = plpar_hcall(H_GET_PPP, retbuf);
188 200
189 *entitled = retbuf[0]; 201 ppp_data->entitlement = retbuf[0];
190 *unallocated = retbuf[1]; 202 ppp_data->unallocated_entitlement = retbuf[1];
191 *aggregation = retbuf[2]; 203
192 *resource = retbuf[3]; 204 ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
205 ppp_data->pool_num = retbuf[2] & 0xffff;
193 206
194 log_plpar_hcall_return(rc, "H_GET_PPP"); 207 ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
208 ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
209 ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
210 ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
211 ppp_data->active_system_procs = retbuf[3] & 0xffff;
195 212
196 return rc; 213 return rc;
197} 214}
198 215
199static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) 216static unsigned h_pic(unsigned long *pool_idle_time,
217 unsigned long *num_procs)
200{ 218{
201 unsigned long rc; 219 unsigned long rc;
202 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 220 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
@@ -206,8 +224,87 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
206 *pool_idle_time = retbuf[0]; 224 *pool_idle_time = retbuf[0];
207 *num_procs = retbuf[1]; 225 *num_procs = retbuf[1];
208 226
209 if (rc != H_AUTHORITY) 227 return rc;
210 log_plpar_hcall_return(rc, "H_PIC"); 228}
229
230/*
231 * parse_ppp_data
232 * Parse out the data returned from h_get_ppp and h_pic
233 */
234static void parse_ppp_data(struct seq_file *m)
235{
236 struct hvcall_ppp_data ppp_data;
237 int rc;
238
239 rc = h_get_ppp(&ppp_data);
240 if (rc)
241 return;
242
243 seq_printf(m, "partition_entitled_capacity=%ld\n",
244 ppp_data.entitlement);
245 seq_printf(m, "group=%d\n", ppp_data.group_num);
246 seq_printf(m, "system_active_processors=%d\n",
247 ppp_data.active_system_procs);
248
249 /* pool related entries are apropriate for shared configs */
250 if (lppaca[0].shared_proc) {
251 unsigned long pool_idle_time, pool_procs;
252
253 seq_printf(m, "pool=%d\n", ppp_data.pool_num);
254
255 /* report pool_capacity in percentage */
256 seq_printf(m, "pool_capacity=%d\n",
257 ppp_data.active_procs_in_pool * 100);
258
259 h_pic(&pool_idle_time, &pool_procs);
260 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
261 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
262 }
263
264 seq_printf(m, "unallocated_capacity_weight=%d\n",
265 ppp_data.unallocated_weight);
266 seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
267 seq_printf(m, "capped=%d\n", ppp_data.capped);
268 seq_printf(m, "unallocated_capacity=%ld\n",
269 ppp_data.unallocated_entitlement);
270}
271
272/**
273 * parse_mpp_data
274 * Parse out data returned from h_get_mpp
275 */
276static void parse_mpp_data(struct seq_file *m)
277{
278 struct hvcall_mpp_data mpp_data;
279 int rc;
280
281 rc = h_get_mpp(&mpp_data);
282 if (rc)
283 return;
284
285 seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
286
287 if (mpp_data.mapped_mem != -1)
288 seq_printf(m, "mapped_entitled_memory=%ld\n",
289 mpp_data.mapped_mem);
290
291 seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
292 seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
293
294 seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
295 seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
296 mpp_data.unallocated_mem_weight);
297 seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
298 mpp_data.unallocated_entitlement);
299
300 if (mpp_data.pool_size != -1)
301 seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
302 mpp_data.pool_size);
303
304 seq_printf(m, "entitled_memory_loan_request=%ld\n",
305 mpp_data.loan_request);
306
307 seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
211} 308}
212 309
213#define SPLPAR_CHARACTERISTICS_TOKEN 20 310#define SPLPAR_CHARACTERISTICS_TOKEN 20
@@ -313,6 +410,25 @@ static int lparcfg_count_active_processors(void)
313 return count; 410 return count;
314} 411}
315 412
413static void pseries_cmo_data(struct seq_file *m)
414{
415 int cpu;
416 unsigned long cmo_faults = 0;
417 unsigned long cmo_fault_time = 0;
418
419 if (!firmware_has_feature(FW_FEATURE_CMO))
420 return;
421
422 for_each_possible_cpu(cpu) {
423 cmo_faults += lppaca[cpu].cmo_faults;
424 cmo_fault_time += lppaca[cpu].cmo_fault_time;
425 }
426
427 seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
428 seq_printf(m, "cmo_fault_time_usec=%lu\n",
429 cmo_fault_time / tb_ticks_per_usec);
430}
431
316static int pseries_lparcfg_data(struct seq_file *m, void *v) 432static int pseries_lparcfg_data(struct seq_file *m, void *v)
317{ 433{
318 int partition_potential_processors; 434 int partition_potential_processors;
@@ -334,60 +450,13 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
334 partition_active_processors = lparcfg_count_active_processors(); 450 partition_active_processors = lparcfg_count_active_processors();
335 451
336 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 452 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
337 unsigned long h_entitled, h_unallocated;
338 unsigned long h_aggregation, h_resource;
339 unsigned long pool_idle_time, pool_procs;
340 unsigned long purr;
341
342 h_get_ppp(&h_entitled, &h_unallocated, &h_aggregation,
343 &h_resource);
344
345 seq_printf(m, "R4=0x%lx\n", h_entitled);
346 seq_printf(m, "R5=0x%lx\n", h_unallocated);
347 seq_printf(m, "R6=0x%lx\n", h_aggregation);
348 seq_printf(m, "R7=0x%lx\n", h_resource);
349
350 purr = get_purr();
351
352 /* this call handles the ibm,get-system-parameter contents */ 453 /* this call handles the ibm,get-system-parameter contents */
353 parse_system_parameter_string(m); 454 parse_system_parameter_string(m);
455 parse_ppp_data(m);
456 parse_mpp_data(m);
457 pseries_cmo_data(m);
354 458
355 seq_printf(m, "partition_entitled_capacity=%ld\n", h_entitled); 459 seq_printf(m, "purr=%ld\n", get_purr());
356
357 seq_printf(m, "group=%ld\n", (h_aggregation >> 2 * 8) & 0xffff);
358
359 seq_printf(m, "system_active_processors=%ld\n",
360 (h_resource >> 0 * 8) & 0xffff);
361
362 /* pool related entries are apropriate for shared configs */
363 if (lppaca[0].shared_proc) {
364
365 h_pic(&pool_idle_time, &pool_procs);
366
367 seq_printf(m, "pool=%ld\n",
368 (h_aggregation >> 0 * 8) & 0xffff);
369
370 /* report pool_capacity in percentage */
371 seq_printf(m, "pool_capacity=%ld\n",
372 ((h_resource >> 2 * 8) & 0xffff) * 100);
373
374 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
375
376 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
377 }
378
379 seq_printf(m, "unallocated_capacity_weight=%ld\n",
380 (h_resource >> 4 * 8) & 0xFF);
381
382 seq_printf(m, "capacity_weight=%ld\n",
383 (h_resource >> 5 * 8) & 0xFF);
384
385 seq_printf(m, "capped=%ld\n", (h_resource >> 6 * 8) & 0x01);
386
387 seq_printf(m, "unallocated_capacity=%ld\n", h_unallocated);
388
389 seq_printf(m, "purr=%ld\n", purr);
390
391 } else { /* non SPLPAR case */ 460 } else { /* non SPLPAR case */
392 461
393 seq_printf(m, "system_active_processors=%d\n", 462 seq_printf(m, "system_active_processors=%d\n",
@@ -414,6 +483,83 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
414 return 0; 483 return 0;
415} 484}
416 485
486static ssize_t update_ppp(u64 *entitlement, u8 *weight)
487{
488 struct hvcall_ppp_data ppp_data;
489 u8 new_weight;
490 u64 new_entitled;
491 ssize_t retval;
492
493 /* Get our current parameters */
494 retval = h_get_ppp(&ppp_data);
495 if (retval)
496 return retval;
497
498 if (entitlement) {
499 new_weight = ppp_data.weight;
500 new_entitled = *entitlement;
501 } else if (weight) {
502 new_weight = *weight;
503 new_entitled = ppp_data.entitlement;
504 } else
505 return -EINVAL;
506
507 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
508 __FUNCTION__, ppp_data.entitlement, ppp_data.weight);
509
510 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
511 __FUNCTION__, new_entitled, new_weight);
512
513 retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
514 return retval;
515}
516
517/**
518 * update_mpp
519 *
520 * Update the memory entitlement and weight for the partition. Caller must
521 * specify either a new entitlement or weight, not both, to be updated
522 * since the h_set_mpp call takes both entitlement and weight as parameters.
523 */
524static ssize_t update_mpp(u64 *entitlement, u8 *weight)
525{
526 struct hvcall_mpp_data mpp_data;
527 u64 new_entitled;
528 u8 new_weight;
529 ssize_t rc;
530
531 if (entitlement) {
532 /* Check with vio to ensure the new memory entitlement
533 * can be handled.
534 */
535 rc = vio_cmo_entitlement_update(*entitlement);
536 if (rc)
537 return rc;
538 }
539
540 rc = h_get_mpp(&mpp_data);
541 if (rc)
542 return rc;
543
544 if (entitlement) {
545 new_weight = mpp_data.mem_weight;
546 new_entitled = *entitlement;
547 } else if (weight) {
548 new_weight = *weight;
549 new_entitled = mpp_data.entitled_mem;
550 } else
551 return -EINVAL;
552
553 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
554 __FUNCTION__, mpp_data.entitled_mem, mpp_data.mem_weight);
555
556 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
557 __FUNCTION__, new_entitled, new_weight);
558
559 rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
560 return rc;
561}
562
417/* 563/*
418 * Interface for changing system parameters (variable capacity weight 564 * Interface for changing system parameters (variable capacity weight
419 * and entitled capacity). Format of input is "param_name=value"; 565 * and entitled capacity). Format of input is "param_name=value";
@@ -427,35 +573,27 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
427static ssize_t lparcfg_write(struct file *file, const char __user * buf, 573static ssize_t lparcfg_write(struct file *file, const char __user * buf,
428 size_t count, loff_t * off) 574 size_t count, loff_t * off)
429{ 575{
430 char *kbuf; 576 int kbuf_sz = 64;
577 char kbuf[kbuf_sz];
431 char *tmp; 578 char *tmp;
432 u64 new_entitled, *new_entitled_ptr = &new_entitled; 579 u64 new_entitled, *new_entitled_ptr = &new_entitled;
433 u8 new_weight, *new_weight_ptr = &new_weight; 580 u8 new_weight, *new_weight_ptr = &new_weight;
434 581 ssize_t retval;
435 unsigned long current_entitled; /* parameters for h_get_ppp */
436 unsigned long dummy;
437 unsigned long resource;
438 u8 current_weight;
439
440 ssize_t retval = -ENOMEM;
441 582
442 if (!firmware_has_feature(FW_FEATURE_SPLPAR) || 583 if (!firmware_has_feature(FW_FEATURE_SPLPAR) ||
443 firmware_has_feature(FW_FEATURE_ISERIES)) 584 firmware_has_feature(FW_FEATURE_ISERIES))
444 return -EINVAL; 585 return -EINVAL;
445 586
446 kbuf = kmalloc(count, GFP_KERNEL); 587 if (count > kbuf_sz)
447 if (!kbuf) 588 return -EINVAL;
448 goto out;
449 589
450 retval = -EFAULT;
451 if (copy_from_user(kbuf, buf, count)) 590 if (copy_from_user(kbuf, buf, count))
452 goto out; 591 return -EFAULT;
453 592
454 retval = -EINVAL;
455 kbuf[count - 1] = '\0'; 593 kbuf[count - 1] = '\0';
456 tmp = strchr(kbuf, '='); 594 tmp = strchr(kbuf, '=');
457 if (!tmp) 595 if (!tmp)
458 goto out; 596 return -EINVAL;
459 597
460 *tmp++ = '\0'; 598 *tmp++ = '\0';
461 599
@@ -463,34 +601,32 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
463 char *endp; 601 char *endp;
464 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10); 602 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
465 if (endp == tmp) 603 if (endp == tmp)
466 goto out; 604 return -EINVAL;
467 new_weight_ptr = &current_weight; 605
606 retval = update_ppp(new_entitled_ptr, NULL);
468 } else if (!strcmp(kbuf, "capacity_weight")) { 607 } else if (!strcmp(kbuf, "capacity_weight")) {
469 char *endp; 608 char *endp;
470 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10); 609 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
471 if (endp == tmp) 610 if (endp == tmp)
472 goto out; 611 return -EINVAL;
473 new_entitled_ptr = &current_entitled;
474 } else
475 goto out;
476
477 /* Get our current parameters */
478 retval = h_get_ppp(&current_entitled, &dummy, &dummy, &resource);
479 if (retval) {
480 retval = -EIO;
481 goto out;
482 }
483
484 current_weight = (resource >> 5 * 8) & 0xFF;
485 612
486 pr_debug("%s: current_entitled = %lu, current_weight = %u\n", 613 retval = update_ppp(NULL, new_weight_ptr);
487 __func__, current_entitled, current_weight); 614 } else if (!strcmp(kbuf, "entitled_memory")) {
615 char *endp;
616 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
617 if (endp == tmp)
618 return -EINVAL;
488 619
489 pr_debug("%s: new_entitled = %lu, new_weight = %u\n", 620 retval = update_mpp(new_entitled_ptr, NULL);
490 __func__, *new_entitled_ptr, *new_weight_ptr); 621 } else if (!strcmp(kbuf, "entitled_memory_weight")) {
622 char *endp;
623 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
624 if (endp == tmp)
625 return -EINVAL;
491 626
492 retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, 627 retval = update_mpp(NULL, new_weight_ptr);
493 *new_weight_ptr); 628 } else
629 return -EINVAL;
494 630
495 if (retval == H_SUCCESS || retval == H_CONSTRAINED) { 631 if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
496 retval = count; 632 retval = count;
@@ -506,8 +642,6 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
506 retval = -EIO; 642 retval = -EIO;
507 } 643 }
508 644
509out:
510 kfree(kbuf);
511 return retval; 645 return retval;
512} 646}
513 647
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 29a0e039d436..aab76887a842 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -48,7 +48,7 @@ void machine_kexec_cleanup(struct kimage *image)
48 * Do not allocate memory (or fail in any way) in machine_kexec(). 48 * Do not allocate memory (or fail in any way) in machine_kexec().
49 * We are past the point of no return, committed to rebooting now. 49 * We are past the point of no return, committed to rebooting now.
50 */ 50 */
51NORET_TYPE void machine_kexec(struct kimage *image) 51void machine_kexec(struct kimage *image)
52{ 52{
53 if (ppc_md.machine_kexec) 53 if (ppc_md.machine_kexec)
54 ppc_md.machine_kexec(image); 54 ppc_md.machine_kexec(image);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 063cdd413049..224e9a11765c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -598,6 +598,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
598 res->start = pci_addr; 598 res->start = pci_addr;
599 break; 599 break;
600 case 2: /* PCI Memory space */ 600 case 2: /* PCI Memory space */
601 case 3: /* PCI 64 bits Memory space */
601 printk(KERN_INFO 602 printk(KERN_INFO
602 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", 603 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
603 cpu_addr, cpu_addr + size - 1, pci_addr, 604 cpu_addr, cpu_addr + size - 1, pci_addr,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 219f3634115e..db2497ccc111 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -47,6 +47,8 @@
47#ifdef CONFIG_PPC64 47#ifdef CONFIG_PPC64
48#include <asm/firmware.h> 48#include <asm/firmware.h>
49#endif 49#endif
50#include <linux/kprobes.h>
51#include <linux/kdebug.h>
50 52
51extern unsigned long _get_SP(void); 53extern unsigned long _get_SP(void);
52 54
@@ -239,6 +241,35 @@ void discard_lazy_cpu_state(void)
239} 241}
240#endif /* CONFIG_SMP */ 242#endif /* CONFIG_SMP */
241 243
244void do_dabr(struct pt_regs *regs, unsigned long address,
245 unsigned long error_code)
246{
247 siginfo_t info;
248
249 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
250 11, SIGSEGV) == NOTIFY_STOP)
251 return;
252
253 if (debugger_dabr_match(regs))
254 return;
255
256 /* Clear the DAC and struct entries. One shot trigger */
257#if (defined(CONFIG_44x) || defined(CONFIG_BOOKE))
258 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
259 | DBCR0_IDM));
260#endif
261
262 /* Clear the DABR */
263 set_dabr(0);
264
265 /* Deliver the signal to userspace */
266 info.si_signo = SIGTRAP;
267 info.si_errno = 0;
268 info.si_code = TRAP_HWBKPT;
269 info.si_addr = (void __user *)address;
270 force_sig_info(SIGTRAP, &info, current);
271}
272
242static DEFINE_PER_CPU(unsigned long, current_dabr); 273static DEFINE_PER_CPU(unsigned long, current_dabr);
243 274
244int set_dabr(unsigned long dabr) 275int set_dabr(unsigned long dabr)
@@ -254,6 +285,11 @@ int set_dabr(unsigned long dabr)
254#if defined(CONFIG_PPC64) || defined(CONFIG_6xx) 285#if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
255 mtspr(SPRN_DABR, dabr); 286 mtspr(SPRN_DABR, dabr);
256#endif 287#endif
288
289#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
290 mtspr(SPRN_DAC1, dabr);
291#endif
292
257 return 0; 293 return 0;
258} 294}
259 295
@@ -337,6 +373,12 @@ struct task_struct *__switch_to(struct task_struct *prev,
337 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 373 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
338 set_dabr(new->thread.dabr); 374 set_dabr(new->thread.dabr);
339 375
376#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
377 /* If new thread DAC (HW breakpoint) is the same then leave it */
378 if (new->thread.dabr)
379 set_dabr(new->thread.dabr);
380#endif
381
340 new_thread = &new->thread; 382 new_thread = &new->thread;
341 old_thread = &current->thread; 383 old_thread = &current->thread;
342 384
@@ -525,6 +567,10 @@ void flush_thread(void)
525 if (current->thread.dabr) { 567 if (current->thread.dabr) {
526 current->thread.dabr = 0; 568 current->thread.dabr = 0;
527 set_dabr(0); 569 set_dabr(0);
570
571#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
572 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
573#endif
528 } 574 }
529} 575}
530 576
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1ea8c8d3ce89..c4ab2195b9cb 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -643,6 +643,11 @@ static void __init early_cmdline_parse(void)
643#else 643#else
644#define OV5_MSI 0x00 644#define OV5_MSI 0x00
645#endif /* CONFIG_PCI_MSI */ 645#endif /* CONFIG_PCI_MSI */
646#ifdef CONFIG_PPC_SMLPAR
647#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
648#else
649#define OV5_CMO 0x00
650#endif
646 651
647/* 652/*
648 * The architecture vector has an array of PVR mask/value pairs, 653 * The architecture vector has an array of PVR mask/value pairs,
@@ -687,10 +692,12 @@ static unsigned char ibm_architecture_vec[] = {
687 0, /* don't halt */ 692 0, /* don't halt */
688 693
689 /* option vector 5: PAPR/OF options */ 694 /* option vector 5: PAPR/OF options */
690 3 - 2, /* length */ 695 5 - 2, /* length */
691 0, /* don't ignore, don't halt */ 696 0, /* don't ignore, don't halt */
692 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | 697 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
693 OV5_DONATE_DEDICATE_CPU | OV5_MSI, 698 OV5_DONATE_DEDICATE_CPU | OV5_MSI,
699 0,
700 OV5_CMO,
694}; 701};
695 702
696/* Old method - ELF header with PT_NOTE sections */ 703/* Old method - ELF header with PT_NOTE sections */
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 90eb3a3e383e..bc1fb27368af 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -128,12 +128,35 @@ static void of_bus_pci_count_cells(struct device_node *np,
128 *sizec = 2; 128 *sizec = 2;
129} 129}
130 130
131static unsigned int of_bus_pci_get_flags(const u32 *addr)
132{
133 unsigned int flags = 0;
134 u32 w = addr[0];
135
136 switch((w >> 24) & 0x03) {
137 case 0x01:
138 flags |= IORESOURCE_IO;
139 break;
140 case 0x02: /* 32 bits */
141 case 0x03: /* 64 bits */
142 flags |= IORESOURCE_MEM;
143 break;
144 }
145 if (w & 0x40000000)
146 flags |= IORESOURCE_PREFETCH;
147 return flags;
148}
149
131static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna) 150static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
132{ 151{
133 u64 cp, s, da; 152 u64 cp, s, da;
153 unsigned int af, rf;
154
155 af = of_bus_pci_get_flags(addr);
156 rf = of_bus_pci_get_flags(range);
134 157
135 /* Check address type match */ 158 /* Check address type match */
136 if ((addr[0] ^ range[0]) & 0x03000000) 159 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
137 return OF_BAD_ADDR; 160 return OF_BAD_ADDR;
138 161
139 /* Read address values, skipping high cell */ 162 /* Read address values, skipping high cell */
@@ -153,25 +176,6 @@ static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
153 return of_bus_default_translate(addr + 1, offset, na - 1); 176 return of_bus_default_translate(addr + 1, offset, na - 1);
154} 177}
155 178
156static unsigned int of_bus_pci_get_flags(const u32 *addr)
157{
158 unsigned int flags = 0;
159 u32 w = addr[0];
160
161 switch((w >> 24) & 0x03) {
162 case 0x01:
163 flags |= IORESOURCE_IO;
164 break;
165 case 0x02: /* 32 bits */
166 case 0x03: /* 64 bits */
167 flags |= IORESOURCE_MEM;
168 break;
169 }
170 if (w & 0x40000000)
171 flags |= IORESOURCE_PREFETCH;
172 return flags;
173}
174
175const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 179const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
176 unsigned int *flags) 180 unsigned int *flags)
177{ 181{
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8feb93e7890c..a5d0e78779c8 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -703,7 +703,7 @@ void user_enable_single_step(struct task_struct *task)
703 703
704 if (regs != NULL) { 704 if (regs != NULL) {
705#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 705#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
706 task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC; 706 task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
707 regs->msr |= MSR_DE; 707 regs->msr |= MSR_DE;
708#else 708#else
709 regs->msr |= MSR_SE; 709 regs->msr |= MSR_SE;
@@ -716,9 +716,16 @@ void user_disable_single_step(struct task_struct *task)
716{ 716{
717 struct pt_regs *regs = task->thread.regs; 717 struct pt_regs *regs = task->thread.regs;
718 718
719
720#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
721 /* If DAC then do not single step, skip */
722 if (task->thread.dabr)
723 return;
724#endif
725
719 if (regs != NULL) { 726 if (regs != NULL) {
720#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 727#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
721 task->thread.dbcr0 = 0; 728 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
722 regs->msr &= ~MSR_DE; 729 regs->msr &= ~MSR_DE;
723#else 730#else
724 regs->msr &= ~MSR_SE; 731 regs->msr &= ~MSR_SE;
@@ -727,22 +734,75 @@ void user_disable_single_step(struct task_struct *task)
727 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 734 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
728} 735}
729 736
730static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, 737int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
731 unsigned long data) 738 unsigned long data)
732{ 739{
733 /* We only support one DABR and no IABRS at the moment */ 740 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
741 * For embedded processors we support one DAC and no IAC's at the
742 * moment.
743 */
734 if (addr > 0) 744 if (addr > 0)
735 return -EINVAL; 745 return -EINVAL;
736 746
737 /* The bottom 3 bits are flags */
738 if ((data & ~0x7UL) >= TASK_SIZE) 747 if ((data & ~0x7UL) >= TASK_SIZE)
739 return -EIO; 748 return -EIO;
740 749
741 /* Ensure translation is on */ 750#ifdef CONFIG_PPC64
751
752 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
753 * It was assumed, on previous implementations, that 3 bits were
754 * passed together with the data address, fitting the design of the
755 * DABR register, as follows:
756 *
757 * bit 0: Read flag
758 * bit 1: Write flag
759 * bit 2: Breakpoint translation
760 *
761 * Thus, we use them here as so.
762 */
763
764 /* Ensure breakpoint translation bit is set */
742 if (data && !(data & DABR_TRANSLATION)) 765 if (data && !(data & DABR_TRANSLATION))
743 return -EIO; 766 return -EIO;
744 767
768 /* Move contents to the DABR register */
745 task->thread.dabr = data; 769 task->thread.dabr = data;
770
771#endif
772#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
773
774 /* As described above, it was assumed 3 bits were passed with the data
775 * address, but we will assume only the mode bits will be passed
776 * as to not cause alignment restrictions for DAC-based processors.
777 */
778
779 /* DAC's hold the whole address without any mode flags */
780 task->thread.dabr = data & ~0x3UL;
781
782 if (task->thread.dabr == 0) {
783 task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM);
784 task->thread.regs->msr &= ~MSR_DE;
785 return 0;
786 }
787
788 /* Read or Write bits must be set */
789
790 if (!(data & 0x3UL))
791 return -EINVAL;
792
793 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
794 register */
795 task->thread.dbcr0 = DBCR0_IDM;
796
797 /* Check for write and read flags and set DBCR0
798 accordingly */
799 if (data & 0x1UL)
800 task->thread.dbcr0 |= DBSR_DAC1R;
801 if (data & 0x2UL)
802 task->thread.dbcr0 |= DBSR_DAC1W;
803
804 task->thread.regs->msr |= MSR_DE;
805#endif
746 return 0; 806 return 0;
747} 807}
748 808
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 09ded5c424a9..149cb112cd1a 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
286} 286}
287 287
288/* constructor for flash_block_cache */ 288/* constructor for flash_block_cache */
289void rtas_block_ctor(struct kmem_cache *cache, void *ptr) 289void rtas_block_ctor(void *ptr)
290{ 290{
291 memset(ptr, 0, RTAS_BLK_SIZE); 291 memset(ptr, 0, RTAS_BLK_SIZE);
292} 292}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 4efebe88e64a..066e65c59b58 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -43,10 +43,6 @@
43 43
44#define DBG(fmt...) 44#define DBG(fmt...)
45 45
46#if defined CONFIG_KGDB
47#include <asm/kgdb.h>
48#endif
49
50extern void bootx_init(unsigned long r4, unsigned long phys); 46extern void bootx_init(unsigned long r4, unsigned long phys);
51 47
52int boot_cpuid; 48int boot_cpuid;
@@ -302,18 +298,6 @@ void __init setup_arch(char **cmdline_p)
302 298
303 xmon_setup(); 299 xmon_setup();
304 300
305#if defined(CONFIG_KGDB)
306 if (ppc_md.kgdb_map_scc)
307 ppc_md.kgdb_map_scc();
308 set_debug_traps();
309 if (strstr(cmd_line, "gdb")) {
310 if (ppc_md.progress)
311 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
312 printk("kgdb breakpoint activated\n");
313 breakpoint();
314 }
315#endif
316
317 /* 301 /*
318 * Set cache line size based on type of cpu as a default. 302 * Set cache line size based on type of cpu as a default.
319 * Systems with OF can look in the properties on the cpu node(s) 303 * Systems with OF can look in the properties on the cpu node(s)
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index ad55488939c3..7aada783ec6a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -145,8 +145,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
145 * user space. The DABR will have been cleared if it 145 * user space. The DABR will have been cleared if it
146 * triggered inside the kernel. 146 * triggered inside the kernel.
147 */ 147 */
148 if (current->thread.dabr) 148 if (current->thread.dabr) {
149 set_dabr(current->thread.dabr); 149 set_dabr(current->thread.dabr);
150#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
151 mtspr(SPRN_DBCR0, current->thread.dbcr0);
152#endif
153 }
150 154
151 if (is32) { 155 if (is32) {
152 if (ka.sa.sa_flags & SA_SIGINFO) 156 if (ka.sa.sa_flags & SA_SIGINFO)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 071bee3ec749..f2589645870a 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -59,6 +59,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
59 59
60void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 60void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
61{ 61{
62 save_context_stack(trace, tsk->thread.regs->gpr[1], tsk, 0); 62 save_context_stack(trace, tsk->thread.ksp, tsk, 0);
63} 63}
64EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 64EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
index 8cee57107541..6fc6328dc626 100644
--- a/arch/powerpc/kernel/suspend.c
+++ b/arch/powerpc/kernel/suspend.c
@@ -7,6 +7,7 @@
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */ 8 */
9 9
10#include <linux/mm.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
12/* References to section boundaries */ 13/* References to section boundaries */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c8127f832df0..800e5e9a087b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -28,7 +28,9 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
28/* Time in microseconds we delay before sleeping in the idle loop */ 28/* Time in microseconds we delay before sleeping in the idle loop */
29DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; 29DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 };
30 30
31static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf, 31static ssize_t store_smt_snooze_delay(struct sys_device *dev,
32 struct sysdev_attribute *attr,
33 const char *buf,
32 size_t count) 34 size_t count)
33{ 35{
34 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 36 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
@@ -44,7 +46,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
44 return count; 46 return count;
45} 47}
46 48
47static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf) 49static ssize_t show_smt_snooze_delay(struct sys_device *dev,
50 struct sysdev_attribute *attr,
51 char *buf)
48{ 52{
49 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 53 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
50 54
@@ -152,14 +156,17 @@ static unsigned long write_##NAME(unsigned long val) \
152 mtspr(ADDRESS, val); \ 156 mtspr(ADDRESS, val); \
153 return 0; \ 157 return 0; \
154} \ 158} \
155static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ 159static ssize_t show_##NAME(struct sys_device *dev, \
160 struct sysdev_attribute *attr, \
161 char *buf) \
156{ \ 162{ \
157 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ 163 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
158 unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \ 164 unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
159 return sprintf(buf, "%lx\n", val); \ 165 return sprintf(buf, "%lx\n", val); \
160} \ 166} \
161static ssize_t __used \ 167static ssize_t __used \
162 store_##NAME(struct sys_device *dev, const char *buf, size_t count) \ 168 store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \
169 const char *buf, size_t count) \
163{ \ 170{ \
164 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ 171 struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
165 unsigned long val; \ 172 unsigned long val; \
@@ -522,7 +529,8 @@ static void register_nodes(void)
522#endif 529#endif
523 530
524/* Only valid if CPU is present. */ 531/* Only valid if CPU is present. */
525static ssize_t show_physical_id(struct sys_device *dev, char *buf) 532static ssize_t show_physical_id(struct sys_device *dev,
533 struct sysdev_attribute *attr, char *buf)
526{ 534{
527 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 535 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
528 536
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 878fbddb6ae1..81ccb8dd1a54 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1067,6 +1067,22 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1067 } 1067 }
1068 1068
1069 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1069 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1070 } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1071 regs->msr &= ~MSR_DE;
1072
1073 if (user_mode(regs)) {
1074 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
1075 DBCR0_IDM);
1076 } else {
1077 /* Disable DAC interupts */
1078 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
1079 DBSR_DAC1W | DBCR0_IDM));
1080
1081 /* Clear the DAC event */
1082 mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
1083 }
1084 /* Setup and send the trap to the handler */
1085 do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
1070 } 1086 }
1071} 1087}
1072#endif /* CONFIG_4xx || CONFIG_BOOKE */ 1088#endif /* CONFIG_4xx || CONFIG_BOOKE */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index b77f8af7ddde..ade8aeaa2e70 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1,11 +1,12 @@
1/* 1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support. 2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 * 3 *
4 * Copyright (c) 2003-2005 IBM Corp. 4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com 5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com 6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com> 7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell 8 * Stephen Rothwell
9 * Robert Jennings <rcjenn@us.ibm.com>
9 * 10 *
10 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
@@ -46,6 +47,996 @@ static struct vio_dev vio_bus_device = { /* fake "parent" device */
46 .dev.bus = &vio_bus_type, 47 .dev.bus = &vio_bus_type,
47}; 48};
48 49
50#ifdef CONFIG_PPC_SMLPAR
51/**
52 * vio_cmo_pool - A pool of IO memory for CMO use
53 *
54 * @size: The size of the pool in bytes
55 * @free: The amount of free memory in the pool
56 */
57struct vio_cmo_pool {
58 size_t size;
59 size_t free;
60};
61
62/* How many ms to delay queued balance work */
63#define VIO_CMO_BALANCE_DELAY 100
64
65/* Portion out IO memory to CMO devices by this chunk size */
66#define VIO_CMO_BALANCE_CHUNK 131072
67
68/**
69 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
70 *
71 * @vio_dev: struct vio_dev pointer
72 * @list: pointer to other devices on bus that are being tracked
73 */
74struct vio_cmo_dev_entry {
75 struct vio_dev *viodev;
76 struct list_head list;
77};
78
79/**
80 * vio_cmo - VIO bus accounting structure for CMO entitlement
81 *
82 * @lock: spinlock for entire structure
83 * @balance_q: work queue for balancing system entitlement
84 * @device_list: list of CMO-enabled devices requiring entitlement
85 * @entitled: total system entitlement in bytes
86 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
87 * @excess: pool of excess entitlement not needed for device reserves or spare
88 * @spare: IO memory for device hotplug functionality
89 * @min: minimum necessary for system operation
90 * @desired: desired memory for system operation
91 * @curr: bytes currently allocated
92 * @high: high water mark for IO data usage
93 */
94struct vio_cmo {
95 spinlock_t lock;
96 struct delayed_work balance_q;
97 struct list_head device_list;
98 size_t entitled;
99 struct vio_cmo_pool reserve;
100 struct vio_cmo_pool excess;
101 size_t spare;
102 size_t min;
103 size_t desired;
104 size_t curr;
105 size_t high;
106} vio_cmo;
107
108/**
109 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
110 */
111static int vio_cmo_num_OF_devs(void)
112{
113 struct device_node *node_vroot;
114 int count = 0;
115
116 /*
117 * Count the number of vdevice entries with an
118 * ibm,my-dma-window OF property
119 */
120 node_vroot = of_find_node_by_name(NULL, "vdevice");
121 if (node_vroot) {
122 struct device_node *of_node;
123 struct property *prop;
124
125 for_each_child_of_node(node_vroot, of_node) {
126 prop = of_find_property(of_node, "ibm,my-dma-window",
127 NULL);
128 if (prop)
129 count++;
130 }
131 }
132 of_node_put(node_vroot);
133 return count;
134}
135
136/**
137 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
138 *
139 * @viodev: VIO device requesting IO memory
140 * @size: size of allocation requested
141 *
142 * Allocations come from memory reserved for the devices and any excess
143 * IO memory available to all devices. The spare pool used to service
144 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
145 * made available.
146 *
147 * Return codes:
148 * 0 for successful allocation and -ENOMEM for a failure
149 */
150static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
151{
152 unsigned long flags;
153 size_t reserve_free = 0;
154 size_t excess_free = 0;
155 int ret = -ENOMEM;
156
157 spin_lock_irqsave(&vio_cmo.lock, flags);
158
159 /* Determine the amount of free entitlement available in reserve */
160 if (viodev->cmo.entitled > viodev->cmo.allocated)
161 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
162
163 /* If spare is not fulfilled, the excess pool can not be used. */
164 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
165 excess_free = vio_cmo.excess.free;
166
167 /* The request can be satisfied */
168 if ((reserve_free + excess_free) >= size) {
169 vio_cmo.curr += size;
170 if (vio_cmo.curr > vio_cmo.high)
171 vio_cmo.high = vio_cmo.curr;
172 viodev->cmo.allocated += size;
173 size -= min(reserve_free, size);
174 vio_cmo.excess.free -= size;
175 ret = 0;
176 }
177
178 spin_unlock_irqrestore(&vio_cmo.lock, flags);
179 return ret;
180}
181
182/**
183 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
184 * @viodev: VIO device freeing IO memory
185 * @size: size of deallocation
186 *
187 * IO memory is freed by the device back to the correct memory pools.
188 * The spare pool is replenished first from either memory pool, then
189 * the reserve pool is used to reduce device entitlement, the excess
190 * pool is used to increase the reserve pool toward the desired entitlement
191 * target, and then the remaining memory is returned to the pools.
192 *
193 */
194static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
195{
196 unsigned long flags;
197 size_t spare_needed = 0;
198 size_t excess_freed = 0;
199 size_t reserve_freed = size;
200 size_t tmp;
201 int balance = 0;
202
203 spin_lock_irqsave(&vio_cmo.lock, flags);
204 vio_cmo.curr -= size;
205
206 /* Amount of memory freed from the excess pool */
207 if (viodev->cmo.allocated > viodev->cmo.entitled) {
208 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
209 viodev->cmo.entitled));
210 reserve_freed -= excess_freed;
211 }
212
213 /* Remove allocation from device */
214 viodev->cmo.allocated -= (reserve_freed + excess_freed);
215
216 /* Spare is a subset of the reserve pool, replenish it first. */
217 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
218
219 /*
220 * Replenish the spare in the reserve pool from the excess pool.
221 * This moves entitlement into the reserve pool.
222 */
223 if (spare_needed && excess_freed) {
224 tmp = min(excess_freed, spare_needed);
225 vio_cmo.excess.size -= tmp;
226 vio_cmo.reserve.size += tmp;
227 vio_cmo.spare += tmp;
228 excess_freed -= tmp;
229 spare_needed -= tmp;
230 balance = 1;
231 }
232
233 /*
234 * Replenish the spare in the reserve pool from the reserve pool.
235 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
236 * if needed, and gives it to the spare pool. The amount of used
237 * memory in this pool does not change.
238 */
239 if (spare_needed && reserve_freed) {
240 tmp = min(spare_needed, min(reserve_freed,
241 (viodev->cmo.entitled -
242 VIO_CMO_MIN_ENT)));
243
244 vio_cmo.spare += tmp;
245 viodev->cmo.entitled -= tmp;
246 reserve_freed -= tmp;
247 spare_needed -= tmp;
248 balance = 1;
249 }
250
251 /*
252 * Increase the reserve pool until the desired allocation is met.
253 * Move an allocation freed from the excess pool into the reserve
254 * pool and schedule a balance operation.
255 */
256 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
257 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
258
259 vio_cmo.excess.size -= tmp;
260 vio_cmo.reserve.size += tmp;
261 excess_freed -= tmp;
262 balance = 1;
263 }
264
265 /* Return memory from the excess pool to that pool */
266 if (excess_freed)
267 vio_cmo.excess.free += excess_freed;
268
269 if (balance)
270 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
271 spin_unlock_irqrestore(&vio_cmo.lock, flags);
272}
273
274/**
275 * vio_cmo_entitlement_update - Manage system entitlement changes
276 *
277 * @new_entitlement: new system entitlement to attempt to accommodate
278 *
279 * Increases in entitlement will be used to fulfill the spare entitlement
280 * and the rest is given to the excess pool. Decreases, if they are
281 * possible, come from the excess pool and from unused device entitlement
282 *
283 * Returns: 0 on success, -ENOMEM when change can not be made
284 */
285int vio_cmo_entitlement_update(size_t new_entitlement)
286{
287 struct vio_dev *viodev;
288 struct vio_cmo_dev_entry *dev_ent;
289 unsigned long flags;
290 size_t avail, delta, tmp;
291
292 spin_lock_irqsave(&vio_cmo.lock, flags);
293
294 /* Entitlement increases */
295 if (new_entitlement > vio_cmo.entitled) {
296 delta = new_entitlement - vio_cmo.entitled;
297
298 /* Fulfill spare allocation */
299 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
300 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
301 vio_cmo.spare += tmp;
302 vio_cmo.reserve.size += tmp;
303 delta -= tmp;
304 }
305
306 /* Remaining new allocation goes to the excess pool */
307 vio_cmo.entitled += delta;
308 vio_cmo.excess.size += delta;
309 vio_cmo.excess.free += delta;
310
311 goto out;
312 }
313
314 /* Entitlement decreases */
315 delta = vio_cmo.entitled - new_entitlement;
316 avail = vio_cmo.excess.free;
317
318 /*
319 * Need to check how much unused entitlement each device can
320 * sacrifice to fulfill entitlement change.
321 */
322 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
323 if (avail >= delta)
324 break;
325
326 viodev = dev_ent->viodev;
327 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
328 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
329 avail += viodev->cmo.entitled -
330 max_t(size_t, viodev->cmo.allocated,
331 VIO_CMO_MIN_ENT);
332 }
333
334 if (delta <= avail) {
335 vio_cmo.entitled -= delta;
336
337 /* Take entitlement from the excess pool first */
338 tmp = min(vio_cmo.excess.free, delta);
339 vio_cmo.excess.size -= tmp;
340 vio_cmo.excess.free -= tmp;
341 delta -= tmp;
342
343 /*
344 * Remove all but VIO_CMO_MIN_ENT bytes from devices
345 * until entitlement change is served
346 */
347 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
348 if (!delta)
349 break;
350
351 viodev = dev_ent->viodev;
352 tmp = 0;
353 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
354 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
355 tmp = viodev->cmo.entitled -
356 max_t(size_t, viodev->cmo.allocated,
357 VIO_CMO_MIN_ENT);
358 viodev->cmo.entitled -= min(tmp, delta);
359 delta -= min(tmp, delta);
360 }
361 } else {
362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return -ENOMEM;
364 }
365
366out:
367 schedule_delayed_work(&vio_cmo.balance_q, 0);
368 spin_unlock_irqrestore(&vio_cmo.lock, flags);
369 return 0;
370}
371
372/**
373 * vio_cmo_balance - Balance entitlement among devices
374 *
375 * @work: work queue structure for this operation
376 *
377 * Any system entitlement above the minimum needed for devices, or
378 * already allocated to devices, can be distributed to the devices.
379 * The list of devices is iterated through to recalculate the desired
380 * entitlement level and to determine how much entitlement above the
381 * minimum entitlement is allocated to devices.
382 *
383 * Small chunks of the available entitlement are given to devices until
384 * their requirements are fulfilled or there is no entitlement left to give.
385 * Upon completion sizes of the reserve and excess pools are calculated.
386 *
387 * The system minimum entitlement level is also recalculated here.
388 * Entitlement will be reserved for devices even after vio_bus_remove to
389 * accommodate reloading the driver. The OF tree is walked to count the
390 * number of devices present and this will remove entitlement for devices
391 * that have actually left the system after having vio_bus_remove called.
392 */
393static void vio_cmo_balance(struct work_struct *work)
394{
395 struct vio_cmo *cmo;
396 struct vio_dev *viodev;
397 struct vio_cmo_dev_entry *dev_ent;
398 unsigned long flags;
399 size_t avail = 0, level, chunk, need;
400 int devcount = 0, fulfilled;
401
402 cmo = container_of(work, struct vio_cmo, balance_q.work);
403
404 spin_lock_irqsave(&vio_cmo.lock, flags);
405
406 /* Calculate minimum entitlement and fulfill spare */
407 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
408 BUG_ON(cmo->min > cmo->entitled);
409 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
410 cmo->min += cmo->spare;
411 cmo->desired = cmo->min;
412
413 /*
414 * Determine how much entitlement is available and reset device
415 * entitlements
416 */
417 avail = cmo->entitled - cmo->spare;
418 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
419 viodev = dev_ent->viodev;
420 devcount++;
421 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
422 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
423 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
424 }
425
426 /*
427 * Having provided each device with the minimum entitlement, loop
428 * over the devices portioning out the remaining entitlement
429 * until there is nothing left.
430 */
431 level = VIO_CMO_MIN_ENT;
432 while (avail) {
433 fulfilled = 0;
434 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
435 viodev = dev_ent->viodev;
436
437 if (viodev->cmo.desired <= level) {
438 fulfilled++;
439 continue;
440 }
441
442 /*
443 * Give the device up to VIO_CMO_BALANCE_CHUNK
444 * bytes of entitlement, but do not exceed the
445 * desired level of entitlement for the device.
446 */
447 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
448 chunk = min(chunk, (viodev->cmo.desired -
449 viodev->cmo.entitled));
450 viodev->cmo.entitled += chunk;
451
452 /*
453 * If the memory for this entitlement increase was
454 * already allocated to the device it does not come
455 * from the available pool being portioned out.
456 */
457 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
458 max(viodev->cmo.allocated, level);
459 avail -= need;
460
461 }
462 if (fulfilled == devcount)
463 break;
464 level += VIO_CMO_BALANCE_CHUNK;
465 }
466
467 /* Calculate new reserve and excess pool sizes */
468 cmo->reserve.size = cmo->min;
469 cmo->excess.free = 0;
470 cmo->excess.size = 0;
471 need = 0;
472 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
473 viodev = dev_ent->viodev;
474 /* Calculated reserve size above the minimum entitlement */
475 if (viodev->cmo.entitled)
476 cmo->reserve.size += (viodev->cmo.entitled -
477 VIO_CMO_MIN_ENT);
478 /* Calculated used excess entitlement */
479 if (viodev->cmo.allocated > viodev->cmo.entitled)
480 need += viodev->cmo.allocated - viodev->cmo.entitled;
481 }
482 cmo->excess.size = cmo->entitled - cmo->reserve.size;
483 cmo->excess.free = cmo->excess.size - need;
484
485 cancel_delayed_work(container_of(work, struct delayed_work, work));
486 spin_unlock_irqrestore(&vio_cmo.lock, flags);
487}
488
489static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
490 dma_addr_t *dma_handle, gfp_t flag)
491{
492 struct vio_dev *viodev = to_vio_dev(dev);
493 void *ret;
494
495 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
496 atomic_inc(&viodev->cmo.allocs_failed);
497 return NULL;
498 }
499
500 ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
501 if (unlikely(ret == NULL)) {
502 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
503 atomic_inc(&viodev->cmo.allocs_failed);
504 }
505
506 return ret;
507}
508
509static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
510 void *vaddr, dma_addr_t dma_handle)
511{
512 struct vio_dev *viodev = to_vio_dev(dev);
513
514 dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
515
516 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
517}
518
519static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
520 size_t size,
521 enum dma_data_direction direction,
522 struct dma_attrs *attrs)
523{
524 struct vio_dev *viodev = to_vio_dev(dev);
525 dma_addr_t ret = DMA_ERROR_CODE;
526
527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
528 atomic_inc(&viodev->cmo.allocs_failed);
529 return ret;
530 }
531
532 ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
533 if (unlikely(dma_mapping_error(ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed);
536 }
537
538 return ret;
539}
540
541static void vio_dma_iommu_unmap_single(struct device *dev,
542 dma_addr_t dma_handle, size_t size,
543 enum dma_data_direction direction,
544 struct dma_attrs *attrs)
545{
546 struct vio_dev *viodev = to_vio_dev(dev);
547
548 dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs);
549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
551}
552
553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction,
555 struct dma_attrs *attrs)
556{
557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct scatterlist *sgl;
559 int ret, count = 0;
560 size_t alloc_size = 0;
561
562 for (sgl = sglist; count < nelems; count++, sgl++)
563 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
564
565 if (vio_cmo_alloc(viodev, alloc_size)) {
566 atomic_inc(&viodev->cmo.allocs_failed);
567 return 0;
568 }
569
570 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
571
572 if (unlikely(!ret)) {
573 vio_cmo_dealloc(viodev, alloc_size);
574 atomic_inc(&viodev->cmo.allocs_failed);
575 }
576
577 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
578 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
579 if (alloc_size)
580 vio_cmo_dealloc(viodev, alloc_size);
581
582 return ret;
583}
584
585static void vio_dma_iommu_unmap_sg(struct device *dev,
586 struct scatterlist *sglist, int nelems,
587 enum dma_data_direction direction,
588 struct dma_attrs *attrs)
589{
590 struct vio_dev *viodev = to_vio_dev(dev);
591 struct scatterlist *sgl;
592 size_t alloc_size = 0;
593 int count = 0;
594
595 for (sgl = sglist; count < nelems; count++, sgl++)
596 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
597
598 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
599
600 vio_cmo_dealloc(viodev, alloc_size);
601}
602
603struct dma_mapping_ops vio_dma_mapping_ops = {
604 .alloc_coherent = vio_dma_iommu_alloc_coherent,
605 .free_coherent = vio_dma_iommu_free_coherent,
606 .map_single = vio_dma_iommu_map_single,
607 .unmap_single = vio_dma_iommu_unmap_single,
608 .map_sg = vio_dma_iommu_map_sg,
609 .unmap_sg = vio_dma_iommu_unmap_sg,
610};
611
612/**
613 * vio_cmo_set_dev_desired - Set desired entitlement for a device
614 *
615 * @viodev: struct vio_dev for device to alter
616 * @new_desired: new desired entitlement level in bytes
617 *
618 * For use by devices to request a change to their entitlement at runtime or
619 * through sysfs. The desired entitlement level is changed and a balancing
620 * of system resources is scheduled to run in the future.
621 */
622void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
623{
624 unsigned long flags;
625 struct vio_cmo_dev_entry *dev_ent;
626 int found = 0;
627
628 if (!firmware_has_feature(FW_FEATURE_CMO))
629 return;
630
631 spin_lock_irqsave(&vio_cmo.lock, flags);
632 if (desired < VIO_CMO_MIN_ENT)
633 desired = VIO_CMO_MIN_ENT;
634
635 /*
636 * Changes will not be made for devices not in the device list.
637 * If it is not in the device list, then no driver is loaded
638 * for the device and it can not receive entitlement.
639 */
640 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
641 if (viodev == dev_ent->viodev) {
642 found = 1;
643 break;
644 }
645 if (!found)
646 return;
647
648 /* Increase/decrease in desired device entitlement */
649 if (desired >= viodev->cmo.desired) {
650 /* Just bump the bus and device values prior to a balance*/
651 vio_cmo.desired += desired - viodev->cmo.desired;
652 viodev->cmo.desired = desired;
653 } else {
654 /* Decrease bus and device values for desired entitlement */
655 vio_cmo.desired -= viodev->cmo.desired - desired;
656 viodev->cmo.desired = desired;
657 /*
658 * If less entitlement is desired than current entitlement, move
659 * any reserve memory in the change region to the excess pool.
660 */
661 if (viodev->cmo.entitled > desired) {
662 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
663 vio_cmo.excess.size += viodev->cmo.entitled - desired;
664 /*
665 * If entitlement moving from the reserve pool to the
666 * excess pool is currently unused, add to the excess
667 * free counter.
668 */
669 if (viodev->cmo.allocated < viodev->cmo.entitled)
670 vio_cmo.excess.free += viodev->cmo.entitled -
671 max(viodev->cmo.allocated, desired);
672 viodev->cmo.entitled = desired;
673 }
674 }
675 schedule_delayed_work(&vio_cmo.balance_q, 0);
676 spin_unlock_irqrestore(&vio_cmo.lock, flags);
677}
678
679/**
680 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
681 *
682 * @viodev - Pointer to struct vio_dev for device
683 *
684 * Determine the devices IO memory entitlement needs, attempting
685 * to satisfy the system minimum entitlement at first and scheduling
686 * a balance operation to take care of the rest at a later time.
687 *
688 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
689 * -ENOMEM when entitlement is not available for device or
690 * device entry.
691 *
692 */
693static int vio_cmo_bus_probe(struct vio_dev *viodev)
694{
695 struct vio_cmo_dev_entry *dev_ent;
696 struct device *dev = &viodev->dev;
697 struct vio_driver *viodrv = to_vio_driver(dev->driver);
698 unsigned long flags;
699 size_t size;
700
701 /*
702 * Check to see that device has a DMA window and configure
703 * entitlement for the device.
704 */
705 if (of_get_property(viodev->dev.archdata.of_node,
706 "ibm,my-dma-window", NULL)) {
707 /* Check that the driver is CMO enabled and get desired DMA */
708 if (!viodrv->get_desired_dma) {
709 dev_err(dev, "%s: device driver does not support CMO\n",
710 __func__);
711 return -EINVAL;
712 }
713
714 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
715 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
716 viodev->cmo.desired = VIO_CMO_MIN_ENT;
717 size = VIO_CMO_MIN_ENT;
718
719 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
720 GFP_KERNEL);
721 if (!dev_ent)
722 return -ENOMEM;
723
724 dev_ent->viodev = viodev;
725 spin_lock_irqsave(&vio_cmo.lock, flags);
726 list_add(&dev_ent->list, &vio_cmo.device_list);
727 } else {
728 viodev->cmo.desired = 0;
729 size = 0;
730 spin_lock_irqsave(&vio_cmo.lock, flags);
731 }
732
733 /*
734 * If the needs for vio_cmo.min have not changed since they
735 * were last set, the number of devices in the OF tree has
736 * been constant and the IO memory for this is already in
737 * the reserve pool.
738 */
739 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
740 VIO_CMO_MIN_ENT)) {
741 /* Updated desired entitlement if device requires it */
742 if (size)
743 vio_cmo.desired += (viodev->cmo.desired -
744 VIO_CMO_MIN_ENT);
745 } else {
746 size_t tmp;
747
748 tmp = vio_cmo.spare + vio_cmo.excess.free;
749 if (tmp < size) {
750 dev_err(dev, "%s: insufficient free "
751 "entitlement to add device. "
752 "Need %lu, have %lu\n", __func__,
753 size, (vio_cmo.spare + tmp));
754 spin_unlock_irqrestore(&vio_cmo.lock, flags);
755 return -ENOMEM;
756 }
757
758 /* Use excess pool first to fulfill request */
759 tmp = min(size, vio_cmo.excess.free);
760 vio_cmo.excess.free -= tmp;
761 vio_cmo.excess.size -= tmp;
762 vio_cmo.reserve.size += tmp;
763
764 /* Use spare if excess pool was insufficient */
765 vio_cmo.spare -= size - tmp;
766
767 /* Update bus accounting */
768 vio_cmo.min += size;
769 vio_cmo.desired += viodev->cmo.desired;
770 }
771 spin_unlock_irqrestore(&vio_cmo.lock, flags);
772 return 0;
773}
774
775/**
776 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
777 *
778 * @viodev - Pointer to struct vio_dev for device
779 *
780 * Remove the device from the cmo device list. The minimum entitlement
781 * will be reserved for the device as long as it is in the system. The
782 * rest of the entitlement the device had been allocated will be returned
783 * to the system.
784 */
785static void vio_cmo_bus_remove(struct vio_dev *viodev)
786{
787 struct vio_cmo_dev_entry *dev_ent;
788 unsigned long flags;
789 size_t tmp;
790
791 spin_lock_irqsave(&vio_cmo.lock, flags);
792 if (viodev->cmo.allocated) {
793 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
794 "allocated after remove operation.\n",
795 __func__, viodev->cmo.allocated);
796 BUG();
797 }
798
799 /*
800 * Remove the device from the device list being maintained for
801 * CMO enabled devices.
802 */
803 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
804 if (viodev == dev_ent->viodev) {
805 list_del(&dev_ent->list);
806 kfree(dev_ent);
807 break;
808 }
809
810 /*
811 * Devices may not require any entitlement and they do not need
812 * to be processed. Otherwise, return the device's entitlement
813 * back to the pools.
814 */
815 if (viodev->cmo.entitled) {
816 /*
817 * This device has not yet left the OF tree, it's
818 * minimum entitlement remains in vio_cmo.min and
819 * vio_cmo.desired
820 */
821 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
822
823 /*
824 * Save min allocation for device in reserve as long
825 * as it exists in OF tree as determined by later
826 * balance operation
827 */
828 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
829
830 /* Replenish spare from freed reserve pool */
831 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
832 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
833 vio_cmo.spare));
834 vio_cmo.spare += tmp;
835 viodev->cmo.entitled -= tmp;
836 }
837
838 /* Remaining reserve goes to excess pool */
839 vio_cmo.excess.size += viodev->cmo.entitled;
840 vio_cmo.excess.free += viodev->cmo.entitled;
841 vio_cmo.reserve.size -= viodev->cmo.entitled;
842
843 /*
844 * Until the device is removed it will keep a
845 * minimum entitlement; this will guarantee that
846 * a module unload/load will result in a success.
847 */
848 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
849 viodev->cmo.desired = VIO_CMO_MIN_ENT;
850 atomic_set(&viodev->cmo.allocs_failed, 0);
851 }
852
853 spin_unlock_irqrestore(&vio_cmo.lock, flags);
854}
855
856static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
857{
858 vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
859 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
860}
861
862/**
863 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
864 *
865 * Set up the reserve and excess entitlement pools based on available
866 * system entitlement and the number of devices in the OF tree that
867 * require entitlement in the reserve pool.
868 */
869static void vio_cmo_bus_init(void)
870{
871 struct hvcall_mpp_data mpp_data;
872 int err;
873
874 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
875 spin_lock_init(&vio_cmo.lock);
876 INIT_LIST_HEAD(&vio_cmo.device_list);
877 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
878
879 /* Get current system entitlement */
880 err = h_get_mpp(&mpp_data);
881
882 /*
883 * On failure, continue with entitlement set to 0, will panic()
884 * later when spare is reserved.
885 */
886 if (err != H_SUCCESS) {
887 printk(KERN_ERR "%s: unable to determine system IO "\
888 "entitlement. (%d)\n", __func__, err);
889 vio_cmo.entitled = 0;
890 } else {
891 vio_cmo.entitled = mpp_data.entitled_mem;
892 }
893
894 /* Set reservation and check against entitlement */
895 vio_cmo.spare = VIO_CMO_MIN_ENT;
896 vio_cmo.reserve.size = vio_cmo.spare;
897 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
898 VIO_CMO_MIN_ENT);
899 if (vio_cmo.reserve.size > vio_cmo.entitled) {
900 printk(KERN_ERR "%s: insufficient system entitlement\n",
901 __func__);
902 panic("%s: Insufficient system entitlement", __func__);
903 }
904
905 /* Set the remaining accounting variables */
906 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
907 vio_cmo.excess.free = vio_cmo.excess.size;
908 vio_cmo.min = vio_cmo.reserve.size;
909 vio_cmo.desired = vio_cmo.reserve.size;
910}
911
912/* sysfs device functions and data structures for CMO */
913
914#define viodev_cmo_rd_attr(name) \
915static ssize_t viodev_cmo_##name##_show(struct device *dev, \
916 struct device_attribute *attr, \
917 char *buf) \
918{ \
919 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
920}
921
922static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
923 struct device_attribute *attr, char *buf)
924{
925 struct vio_dev *viodev = to_vio_dev(dev);
926 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
927}
928
929static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
930 struct device_attribute *attr, const char *buf, size_t count)
931{
932 struct vio_dev *viodev = to_vio_dev(dev);
933 atomic_set(&viodev->cmo.allocs_failed, 0);
934 return count;
935}
936
937static ssize_t viodev_cmo_desired_set(struct device *dev,
938 struct device_attribute *attr, const char *buf, size_t count)
939{
940 struct vio_dev *viodev = to_vio_dev(dev);
941 size_t new_desired;
942 int ret;
943
944 ret = strict_strtoul(buf, 10, &new_desired);
945 if (ret)
946 return ret;
947
948 vio_cmo_set_dev_desired(viodev, new_desired);
949 return count;
950}
951
952viodev_cmo_rd_attr(desired);
953viodev_cmo_rd_attr(entitled);
954viodev_cmo_rd_attr(allocated);
955
956static ssize_t name_show(struct device *, struct device_attribute *, char *);
957static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
958static struct device_attribute vio_cmo_dev_attrs[] = {
959 __ATTR_RO(name),
960 __ATTR_RO(devspec),
961 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
962 viodev_cmo_desired_show, viodev_cmo_desired_set),
963 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
964 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
965 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
966 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
967 __ATTR_NULL
968};
969
970/* sysfs bus functions and data structures for CMO */
971
972#define viobus_cmo_rd_attr(name) \
973static ssize_t \
974viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
975{ \
976 return sprintf(buf, "%lu\n", vio_cmo.name); \
977}
978
979#define viobus_cmo_pool_rd_attr(name, var) \
980static ssize_t \
981viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
982{ \
983 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
984}
985
986static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
987 size_t count)
988{
989 unsigned long flags;
990
991 spin_lock_irqsave(&vio_cmo.lock, flags);
992 vio_cmo.high = vio_cmo.curr;
993 spin_unlock_irqrestore(&vio_cmo.lock, flags);
994
995 return count;
996}
997
998viobus_cmo_rd_attr(entitled);
999viobus_cmo_pool_rd_attr(reserve, size);
1000viobus_cmo_pool_rd_attr(excess, size);
1001viobus_cmo_pool_rd_attr(excess, free);
1002viobus_cmo_rd_attr(spare);
1003viobus_cmo_rd_attr(min);
1004viobus_cmo_rd_attr(desired);
1005viobus_cmo_rd_attr(curr);
1006viobus_cmo_rd_attr(high);
1007
1008static struct bus_attribute vio_cmo_bus_attrs[] = {
1009 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
1010 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
1011 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
1012 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
1013 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
1014 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
1015 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
1016 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
1017 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1018 viobus_cmo_high_show, viobus_cmo_high_reset),
1019 __ATTR_NULL
1020};
1021
1022static void vio_cmo_sysfs_init(void)
1023{
1024 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1025 vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
1026}
1027#else /* CONFIG_PPC_SMLPAR */
1028/* Dummy functions for iSeries platform */
1029int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1030void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1031static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1032static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1033static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1034static void vio_cmo_bus_init() {}
1035static void vio_cmo_sysfs_init() { }
1036#endif /* CONFIG_PPC_SMLPAR */
1037EXPORT_SYMBOL(vio_cmo_entitlement_update);
1038EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1039
49static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 1040static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
50{ 1041{
51 const unsigned char *dma_window; 1042 const unsigned char *dma_window;
@@ -114,8 +1105,17 @@ static int vio_bus_probe(struct device *dev)
114 return error; 1105 return error;
115 1106
116 id = vio_match_device(viodrv->id_table, viodev); 1107 id = vio_match_device(viodrv->id_table, viodev);
117 if (id) 1108 if (id) {
1109 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1110 if (firmware_has_feature(FW_FEATURE_CMO)) {
1111 error = vio_cmo_bus_probe(viodev);
1112 if (error)
1113 return error;
1114 }
118 error = viodrv->probe(viodev, id); 1115 error = viodrv->probe(viodev, id);
1116 if (error)
1117 vio_cmo_bus_remove(viodev);
1118 }
119 1119
120 return error; 1120 return error;
121} 1121}
@@ -125,12 +1125,23 @@ static int vio_bus_remove(struct device *dev)
125{ 1125{
126 struct vio_dev *viodev = to_vio_dev(dev); 1126 struct vio_dev *viodev = to_vio_dev(dev);
127 struct vio_driver *viodrv = to_vio_driver(dev->driver); 1127 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1128 struct device *devptr;
1129 int ret = 1;
1130
1131 /*
1132 * Hold a reference to the device after the remove function is called
1133 * to allow for CMO accounting cleanup for the device.
1134 */
1135 devptr = get_device(dev);
128 1136
129 if (viodrv->remove) 1137 if (viodrv->remove)
130 return viodrv->remove(viodev); 1138 ret = viodrv->remove(viodev);
1139
1140 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1141 vio_cmo_bus_remove(viodev);
131 1142
132 /* driver can't remove */ 1143 put_device(devptr);
133 return 1; 1144 return ret;
134} 1145}
135 1146
136/** 1147/**
@@ -215,7 +1226,11 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
215 viodev->unit_address = *unit_address; 1226 viodev->unit_address = *unit_address;
216 } 1227 }
217 viodev->dev.archdata.of_node = of_node_get(of_node); 1228 viodev->dev.archdata.of_node = of_node_get(of_node);
218 viodev->dev.archdata.dma_ops = &dma_iommu_ops; 1229
1230 if (firmware_has_feature(FW_FEATURE_CMO))
1231 vio_cmo_set_dma_ops(viodev);
1232 else
1233 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
219 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); 1234 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
220 viodev->dev.archdata.numa_node = of_node_to_nid(of_node); 1235 viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
221 1236
@@ -245,6 +1260,9 @@ static int __init vio_bus_init(void)
245 int err; 1260 int err;
246 struct device_node *node_vroot; 1261 struct device_node *node_vroot;
247 1262
1263 if (firmware_has_feature(FW_FEATURE_CMO))
1264 vio_cmo_sysfs_init();
1265
248 err = bus_register(&vio_bus_type); 1266 err = bus_register(&vio_bus_type);
249 if (err) { 1267 if (err) {
250 printk(KERN_ERR "failed to register VIO bus\n"); 1268 printk(KERN_ERR "failed to register VIO bus\n");
@@ -262,6 +1280,9 @@ static int __init vio_bus_init(void)
262 return err; 1280 return err;
263 } 1281 }
264 1282
1283 if (firmware_has_feature(FW_FEATURE_CMO))
1284 vio_cmo_bus_init();
1285
265 node_vroot = of_find_node_by_name(NULL, "vdevice"); 1286 node_vroot = of_find_node_by_name(NULL, "vdevice");
266 if (node_vroot) { 1287 if (node_vroot) {
267 struct device_node *of_node; 1288 struct device_node *of_node;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 87a72c66ce27..4a8ce62fe112 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,25 @@
9 9
10ENTRY(_stext) 10ENTRY(_stext)
11 11
12PHDRS {
13 kernel PT_LOAD FLAGS(7); /* RWX */
14 notes PT_NOTE FLAGS(0);
15 dummy PT_NOTE FLAGS(0);
16
17 /* binutils < 2.18 has a bug that makes it misbehave when taking an
18 ELF file with all segments at load address 0 as input. This
19 happens when running "strip" on vmlinux, because of the AT() magic
20 in this linker script. People using GCC >= 4.2 won't run into
21 this problem, because the "build-id" support will put some data
22 into the "notes" segment (at a non-zero load address).
23
24 To work around this, we force some data into both the "dummy"
25 segment and the kernel segment, so the dummy segment will get a
26 non-zero load address. It's not enough to always create the
27 "notes" segment, since if nothing gets assigned to it, its load
28 address will be zero. */
29}
30
12#ifdef CONFIG_PPC64 31#ifdef CONFIG_PPC64
13OUTPUT_ARCH(powerpc:common64) 32OUTPUT_ARCH(powerpc:common64)
14jiffies = jiffies_64; 33jiffies = jiffies_64;
@@ -50,7 +69,7 @@ SECTIONS
50 . = ALIGN(PAGE_SIZE); 69 . = ALIGN(PAGE_SIZE);
51 _etext = .; 70 _etext = .;
52 PROVIDE32 (etext = .); 71 PROVIDE32 (etext = .);
53 } 72 } :kernel
54 73
55 /* Read-only data */ 74 /* Read-only data */
56 RODATA 75 RODATA
@@ -62,7 +81,13 @@ SECTIONS
62 __stop___ex_table = .; 81 __stop___ex_table = .;
63 } 82 }
64 83
65 NOTES 84 NOTES :kernel :notes
85
86 /* The dummy segment contents for the bug workaround mentioned above
87 near PHDRS. */
88 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
89 LONG(0xf177)
90 } :kernel :dummy
66 91
67/* 92/*
68 * Init sections discarded at runtime 93 * Init sections discarded at runtime
@@ -74,7 +99,7 @@ SECTIONS
74 _sinittext = .; 99 _sinittext = .;
75 INIT_TEXT 100 INIT_TEXT
76 _einittext = .; 101 _einittext = .;
77 } 102 } :kernel
78 103
79 /* .exit.text is discarded at runtime, not link time, 104 /* .exit.text is discarded at runtime, not link time,
80 * to deal with references from __bug_table 105 * to deal with references from __bug_table