aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/crash_dump.c10
-rw-r--r--arch/powerpc/kernel/iommu.c23
-rw-r--r--arch/powerpc/kernel/pci-common.c136
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c99
5 files changed, 152 insertions, 120 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 09febc582584..75c5dd0138fd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -359,8 +359,8 @@ int main(void)
359 359
360 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 360 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
361 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 361 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
362 DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
363 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); 362 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
363 DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
364 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 364 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
365 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 365 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
366 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 366 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
@@ -372,7 +372,7 @@ int main(void)
372 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 372 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
373 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 373 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
374 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 374 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
375 DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid)); 375 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
376 376
377 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 377 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
378 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 378 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index a323c9b32ee1..97e056379728 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -27,6 +27,9 @@
27#define DBG(fmt...) 27#define DBG(fmt...)
28#endif 28#endif
29 29
30/* Stores the physical address of elf header of crash image. */
31unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
32
30void __init reserve_kdump_trampoline(void) 33void __init reserve_kdump_trampoline(void)
31{ 34{
32 lmb_reserve(0, KDUMP_RESERVE_LIMIT); 35 lmb_reserve(0, KDUMP_RESERVE_LIMIT);
@@ -66,7 +69,11 @@ void __init setup_kdump_trampoline(void)
66 DBG(" <- setup_kdump_trampoline()\n"); 69 DBG(" <- setup_kdump_trampoline()\n");
67} 70}
68 71
69#ifdef CONFIG_PROC_VMCORE 72/*
73 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
74 * is_kdump_kernel() to determine if we are booting after a panic. Hence
75 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
76 */
70static int __init parse_elfcorehdr(char *p) 77static int __init parse_elfcorehdr(char *p)
71{ 78{
72 if (p) 79 if (p)
@@ -75,7 +82,6 @@ static int __init parse_elfcorehdr(char *p)
75 return 1; 82 return 1;
76} 83}
77__setup("elfcorehdr=", parse_elfcorehdr); 84__setup("elfcorehdr=", parse_elfcorehdr);
78#endif
79 85
80static int __init parse_savemaxmem(char *p) 86static int __init parse_savemaxmem(char *p)
81{ 87{
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 550a19399bfa..ea1ba89f9c90 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -51,17 +51,6 @@ static int protect4gb = 1;
51 51
52static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); 52static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
53 53
54static inline unsigned long iommu_num_pages(unsigned long vaddr,
55 unsigned long slen)
56{
57 unsigned long npages;
58
59 npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
60 npages >>= IOMMU_PAGE_SHIFT;
61
62 return npages;
63}
64
65static int __init setup_protect4gb(char *str) 54static int __init setup_protect4gb(char *str)
66{ 55{
67 if (strcmp(str, "on") == 0) 56 if (strcmp(str, "on") == 0)
@@ -325,7 +314,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
325 } 314 }
326 /* Allocate iommu entries for that segment */ 315 /* Allocate iommu entries for that segment */
327 vaddr = (unsigned long) sg_virt(s); 316 vaddr = (unsigned long) sg_virt(s);
328 npages = iommu_num_pages(vaddr, slen); 317 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
329 align = 0; 318 align = 0;
330 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && 319 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
331 (vaddr & ~PAGE_MASK) == 0) 320 (vaddr & ~PAGE_MASK) == 0)
@@ -418,7 +407,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
418 unsigned long vaddr, npages; 407 unsigned long vaddr, npages;
419 408
420 vaddr = s->dma_address & IOMMU_PAGE_MASK; 409 vaddr = s->dma_address & IOMMU_PAGE_MASK;
421 npages = iommu_num_pages(s->dma_address, s->dma_length); 410 npages = iommu_num_pages(s->dma_address, s->dma_length,
411 IOMMU_PAGE_SIZE);
422 __iommu_free(tbl, vaddr, npages); 412 __iommu_free(tbl, vaddr, npages);
423 s->dma_address = DMA_ERROR_CODE; 413 s->dma_address = DMA_ERROR_CODE;
424 s->dma_length = 0; 414 s->dma_length = 0;
@@ -452,7 +442,8 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
452 442
453 if (sg->dma_length == 0) 443 if (sg->dma_length == 0)
454 break; 444 break;
455 npages = iommu_num_pages(dma_handle, sg->dma_length); 445 npages = iommu_num_pages(dma_handle, sg->dma_length,
446 IOMMU_PAGE_SIZE);
456 __iommu_free(tbl, dma_handle, npages); 447 __iommu_free(tbl, dma_handle, npages);
457 sg = sg_next(sg); 448 sg = sg_next(sg);
458 } 449 }
@@ -584,7 +575,7 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
584 BUG_ON(direction == DMA_NONE); 575 BUG_ON(direction == DMA_NONE);
585 576
586 uaddr = (unsigned long)vaddr; 577 uaddr = (unsigned long)vaddr;
587 npages = iommu_num_pages(uaddr, size); 578 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
588 579
589 if (tbl) { 580 if (tbl) {
590 align = 0; 581 align = 0;
@@ -617,7 +608,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
617 BUG_ON(direction == DMA_NONE); 608 BUG_ON(direction == DMA_NONE);
618 609
619 if (tbl) { 610 if (tbl) {
620 npages = iommu_num_pages(dma_handle, size); 611 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
621 iommu_free(tbl, dma_handle, npages); 612 iommu_free(tbl, dma_handle, npages);
622 } 613 }
623} 614}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 01ce8c38bae6..3815d84a1ef4 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -451,7 +451,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
451 pci_dev_put(pdev); 451 pci_dev_put(pdev);
452 } 452 }
453 453
454 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); 454 DBG("non-PCI map for %llx, prot: %lx\n",
455 (unsigned long long)offset, prot);
455 456
456 return __pgprot(prot); 457 return __pgprot(prot);
457} 458}
@@ -490,6 +491,131 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
490 return ret; 491 return ret;
491} 492}
492 493
494/* This provides legacy IO read access on a bus */
495int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
496{
497 unsigned long offset;
498 struct pci_controller *hose = pci_bus_to_host(bus);
499 struct resource *rp = &hose->io_resource;
500 void __iomem *addr;
501
502 /* Check if port can be supported by that bus. We only check
503 * the ranges of the PHB though, not the bus itself as the rules
504 * for forwarding legacy cycles down bridges are not our problem
505 * here. So if the host bridge supports it, we do it.
506 */
507 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
508 offset += port;
509
510 if (!(rp->flags & IORESOURCE_IO))
511 return -ENXIO;
512 if (offset < rp->start || (offset + size) > rp->end)
513 return -ENXIO;
514 addr = hose->io_base_virt + port;
515
516 switch(size) {
517 case 1:
518 *((u8 *)val) = in_8(addr);
519 return 1;
520 case 2:
521 if (port & 1)
522 return -EINVAL;
523 *((u16 *)val) = in_le16(addr);
524 return 2;
525 case 4:
526 if (port & 3)
527 return -EINVAL;
528 *((u32 *)val) = in_le32(addr);
529 return 4;
530 }
531 return -EINVAL;
532}
533
534/* This provides legacy IO write access on a bus */
535int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
536{
537 unsigned long offset;
538 struct pci_controller *hose = pci_bus_to_host(bus);
539 struct resource *rp = &hose->io_resource;
540 void __iomem *addr;
541
542 /* Check if port can be supported by that bus. We only check
543 * the ranges of the PHB though, not the bus itself as the rules
544 * for forwarding legacy cycles down bridges are not our problem
545 * here. So if the host bridge supports it, we do it.
546 */
547 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
548 offset += port;
549
550 if (!(rp->flags & IORESOURCE_IO))
551 return -ENXIO;
552 if (offset < rp->start || (offset + size) > rp->end)
553 return -ENXIO;
554 addr = hose->io_base_virt + port;
555
556 /* WARNING: The generic code is idiotic. It gets passed a pointer
557 * to what can be a 1, 2 or 4 byte quantity and always reads that
558 * as a u32, which means that we have to correct the location of
559 * the data read within those 32 bits for size 1 and 2
560 */
561 switch(size) {
562 case 1:
563 out_8(addr, val >> 24);
564 return 1;
565 case 2:
566 if (port & 1)
567 return -EINVAL;
568 out_le16(addr, val >> 16);
569 return 2;
570 case 4:
571 if (port & 3)
572 return -EINVAL;
573 out_le32(addr, val);
574 return 4;
575 }
576 return -EINVAL;
577}
578
579/* This provides legacy IO or memory mmap access on a bus */
580int pci_mmap_legacy_page_range(struct pci_bus *bus,
581 struct vm_area_struct *vma,
582 enum pci_mmap_state mmap_state)
583{
584 struct pci_controller *hose = pci_bus_to_host(bus);
585 resource_size_t offset =
586 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
587 resource_size_t size = vma->vm_end - vma->vm_start;
588 struct resource *rp;
589
590 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
591 pci_domain_nr(bus), bus->number,
592 mmap_state == pci_mmap_mem ? "MEM" : "IO",
593 (unsigned long long)offset,
594 (unsigned long long)(offset + size - 1));
595
596 if (mmap_state == pci_mmap_mem) {
597 if ((offset + size) > hose->isa_mem_size)
598 return -ENXIO;
599 offset += hose->isa_mem_phys;
600 } else {
601 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
602 unsigned long roffset = offset + io_offset;
603 rp = &hose->io_resource;
604 if (!(rp->flags & IORESOURCE_IO))
605 return -ENXIO;
606 if (roffset < rp->start || (roffset + size) > rp->end)
607 return -ENXIO;
608 offset += hose->io_base_phys;
609 }
610 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
611
612 vma->vm_pgoff = offset >> PAGE_SHIFT;
613 vma->vm_page_prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
614 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
615 vma->vm_end - vma->vm_start,
616 vma->vm_page_prot);
617}
618
493void pci_resource_to_user(const struct pci_dev *dev, int bar, 619void pci_resource_to_user(const struct pci_dev *dev, int bar,
494 const struct resource *rsrc, 620 const struct resource *rsrc,
495 resource_size_t *start, resource_size_t *end) 621 resource_size_t *start, resource_size_t *end)
@@ -592,6 +718,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
592 cpu_addr = of_translate_address(dev, ranges + 3); 718 cpu_addr = of_translate_address(dev, ranges + 3);
593 size = of_read_number(ranges + pna + 3, 2); 719 size = of_read_number(ranges + pna + 3, 2);
594 ranges += np; 720 ranges += np;
721
722 /* If we failed translation or got a zero-sized region
723 * (some FW try to feed us with non sensical zero sized regions
724 * such as power3 which look like some kind of attempt at exposing
725 * the VGA memory hole)
726 */
595 if (cpu_addr == OF_BAD_ADDR || size == 0) 727 if (cpu_addr == OF_BAD_ADDR || size == 0)
596 continue; 728 continue;
597 729
@@ -665,6 +797,8 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
665 isa_hole = memno; 797 isa_hole = memno;
666 if (primary || isa_mem_base == 0) 798 if (primary || isa_mem_base == 0)
667 isa_mem_base = cpu_addr; 799 isa_mem_base = cpu_addr;
800 hose->isa_mem_phys = cpu_addr;
801 hose->isa_mem_size = size;
668 } 802 }
669 803
670 /* We get the PCI/Mem offset from the first range or 804 /* We get the PCI/Mem offset from the first range or
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index ff7de7b0797e..bb1cfcfdbbbb 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -61,42 +61,6 @@ asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
61 return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x)); 61 return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
62} 62}
63 63
64int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
65{
66 compat_ino_t ino;
67 long err;
68
69 if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) ||
70 !new_valid_dev(stat->rdev))
71 return -EOVERFLOW;
72
73 ino = stat->ino;
74 if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino)
75 return -EOVERFLOW;
76
77 err = access_ok(VERIFY_WRITE, statbuf, sizeof(*statbuf)) ? 0 : -EFAULT;
78 err |= __put_user(new_encode_dev(stat->dev), &statbuf->st_dev);
79 err |= __put_user(ino, &statbuf->st_ino);
80 err |= __put_user(stat->mode, &statbuf->st_mode);
81 err |= __put_user(stat->nlink, &statbuf->st_nlink);
82 err |= __put_user(stat->uid, &statbuf->st_uid);
83 err |= __put_user(stat->gid, &statbuf->st_gid);
84 err |= __put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev);
85 err |= __put_user(stat->size, &statbuf->st_size);
86 err |= __put_user(stat->atime.tv_sec, &statbuf->st_atime);
87 err |= __put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
88 err |= __put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
89 err |= __put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
90 err |= __put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
91 err |= __put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
92 err |= __put_user(stat->blksize, &statbuf->st_blksize);
93 err |= __put_user(stat->blocks, &statbuf->st_blocks);
94 err |= __put_user(0, &statbuf->__unused4[0]);
95 err |= __put_user(0, &statbuf->__unused4[1]);
96
97 return err;
98}
99
100/* Note: it is necessary to treat option as an unsigned int, 64/* Note: it is necessary to treat option as an unsigned int,
101 * with the corresponding cast to a signed int to insure that the 65 * with the corresponding cast to a signed int to insure that the
102 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 66 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
@@ -107,69 +71,6 @@ asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
107 return sys_sysfs((int)option, arg1, arg2); 71 return sys_sysfs((int)option, arg1, arg2);
108} 72}
109 73
110static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
111{
112 long usec;
113
114 if (!access_ok(VERIFY_READ, i, sizeof(*i)))
115 return -EFAULT;
116 if (__get_user(o->tv_sec, &i->tv_sec))
117 return -EFAULT;
118 if (__get_user(usec, &i->tv_usec))
119 return -EFAULT;
120 o->tv_nsec = usec * 1000;
121 return 0;
122}
123
124static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
125{
126 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
127 (__put_user(i->tv_sec, &o->tv_sec) |
128 __put_user(i->tv_usec, &o->tv_usec)));
129}
130
131
132
133
134/* Translations due to time_t size differences. Which affects all
135 sorts of things, like timeval and itimerval. */
136extern struct timezone sys_tz;
137
138asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
139{
140 if (tv) {
141 struct timeval ktv;
142 do_gettimeofday(&ktv);
143 if (put_tv32(tv, &ktv))
144 return -EFAULT;
145 }
146 if (tz) {
147 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
148 return -EFAULT;
149 }
150
151 return 0;
152}
153
154
155
156asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
157{
158 struct timespec kts;
159 struct timezone ktz;
160
161 if (tv) {
162 if (get_ts32(&kts, tv))
163 return -EFAULT;
164 }
165 if (tz) {
166 if (copy_from_user(&ktz, tz, sizeof(ktz)))
167 return -EFAULT;
168 }
169
170 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
171}
172
173#ifdef CONFIG_SYSVIPC 74#ifdef CONFIG_SYSVIPC
174long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, 75long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
175 u32 fifth) 76 u32 fifth)