diff options
Diffstat (limited to 'arch')
102 files changed, 3807 insertions, 913 deletions
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 62619f25132f..53c213f70fcb 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -361,7 +361,7 @@ osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) | |||
361 | SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path, | 361 | SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path, |
362 | int, flag, void __user *, data) | 362 | int, flag, void __user *, data) |
363 | { | 363 | { |
364 | int retval = -EINVAL; | 364 | int retval; |
365 | char *name; | 365 | char *name; |
366 | 366 | ||
367 | name = getname(path); | 367 | name = getname(path); |
@@ -379,6 +379,7 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path, | |||
379 | retval = osf_procfs_mount(name, data, flag); | 379 | retval = osf_procfs_mount(name, data, flag); |
380 | break; | 380 | break; |
381 | default: | 381 | default: |
382 | retval = -EINVAL; | ||
382 | printk("osf_mount(%ld, %x)\n", typenr, flag); | 383 | printk("osf_mount(%ld, %x)\n", typenr, flag); |
383 | } | 384 | } |
384 | putname(name); | 385 | putname(name); |
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 7adac388a771..059eac6abda1 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig | |||
@@ -20,6 +20,12 @@ config RWSEM_GENERIC_SPINLOCK | |||
20 | config RWSEM_XCHGADD_ALGORITHM | 20 | config RWSEM_XCHGADD_ALGORITHM |
21 | bool | 21 | bool |
22 | 22 | ||
23 | config GENERIC_TIME | ||
24 | def_bool y | ||
25 | |||
26 | config ARCH_USES_GETTIMEOFFSET | ||
27 | def_bool y | ||
28 | |||
23 | config GENERIC_IOMAP | 29 | config GENERIC_IOMAP |
24 | bool | 30 | bool |
25 | default y | 31 | default y |
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index fd529a0ec758..b70fb34939d9 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c | |||
@@ -628,9 +628,9 @@ static int create_output_descriptors(struct cryptocop_operation *operation, int | |||
628 | cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset); | 628 | cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset); |
629 | cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength; | 629 | cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength; |
630 | 630 | ||
631 | assert(desc_len >= dlength); | ||
631 | desc_len -= dlength; | 632 | desc_len -= dlength; |
632 | *iniov_offset += dlength; | 633 | *iniov_offset += dlength; |
633 | assert(desc_len >= 0); | ||
634 | if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) { | 634 | if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) { |
635 | *iniov_offset = 0; | 635 | *iniov_offset = 0; |
636 | ++(*iniov_ix); | 636 | ++(*iniov_ix); |
diff --git a/arch/cris/arch-v32/mach-fs/arbiter.c b/arch/cris/arch-v32/mach-fs/arbiter.c index 84d31bd7b692..82ef293c4c81 100644 --- a/arch/cris/arch-v32/mach-fs/arbiter.c +++ b/arch/cris/arch-v32/mach-fs/arbiter.c | |||
@@ -332,7 +332,7 @@ int crisv32_arbiter_unwatch(int id) | |||
332 | if (id == 0) | 332 | if (id == 0) |
333 | intr_mask.bp0 = regk_marb_no; | 333 | intr_mask.bp0 = regk_marb_no; |
334 | else if (id == 1) | 334 | else if (id == 1) |
335 | intr_mask.bp2 = regk_marb_no; | 335 | intr_mask.bp1 = regk_marb_no; |
336 | else if (id == 2) | 336 | else if (id == 2) |
337 | intr_mask.bp2 = regk_marb_no; | 337 | intr_mask.bp2 = regk_marb_no; |
338 | else if (id == 3) | 338 | else if (id == 3) |
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c index 074fe7dea96b..a05dd31f3efb 100644 --- a/arch/cris/kernel/time.c +++ b/arch/cris/kernel/time.c | |||
@@ -42,75 +42,11 @@ unsigned long loops_per_usec; | |||
42 | extern unsigned long do_slow_gettimeoffset(void); | 42 | extern unsigned long do_slow_gettimeoffset(void); |
43 | static unsigned long (*do_gettimeoffset)(void) = do_slow_gettimeoffset; | 43 | static unsigned long (*do_gettimeoffset)(void) = do_slow_gettimeoffset; |
44 | 44 | ||
45 | /* | 45 | u32 arch_gettimeoffset(void) |
46 | * This version of gettimeofday has near microsecond resolution. | ||
47 | * | ||
48 | * Note: Division is quite slow on CRIS and do_gettimeofday is called | ||
49 | * rather often. Maybe we should do some kind of approximation here | ||
50 | * (a naive approximation would be to divide by 1024). | ||
51 | */ | ||
52 | void do_gettimeofday(struct timeval *tv) | ||
53 | { | ||
54 | unsigned long flags; | ||
55 | signed long usec, sec; | ||
56 | local_irq_save(flags); | ||
57 | usec = do_gettimeoffset(); | ||
58 | |||
59 | /* | ||
60 | * If time_adjust is negative then NTP is slowing the clock | ||
61 | * so make sure not to go into next possible interval. | ||
62 | * Better to lose some accuracy than have time go backwards.. | ||
63 | */ | ||
64 | if (unlikely(time_adjust < 0) && usec > tickadj) | ||
65 | usec = tickadj; | ||
66 | |||
67 | sec = xtime.tv_sec; | ||
68 | usec += xtime.tv_nsec / 1000; | ||
69 | local_irq_restore(flags); | ||
70 | |||
71 | while (usec >= 1000000) { | ||
72 | usec -= 1000000; | ||
73 | sec++; | ||
74 | } | ||
75 | |||
76 | tv->tv_sec = sec; | ||
77 | tv->tv_usec = usec; | ||
78 | } | ||
79 | |||
80 | EXPORT_SYMBOL(do_gettimeofday); | ||
81 | |||
82 | int do_settimeofday(struct timespec *tv) | ||
83 | { | 46 | { |
84 | time_t wtm_sec, sec = tv->tv_sec; | 47 | return do_gettimeoffset() * 1000; |
85 | long wtm_nsec, nsec = tv->tv_nsec; | ||
86 | |||
87 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
88 | return -EINVAL; | ||
89 | |||
90 | write_seqlock_irq(&xtime_lock); | ||
91 | /* | ||
92 | * This is revolting. We need to set "xtime" correctly. However, the | ||
93 | * value in this location is the value at the most recent update of | ||
94 | * wall time. Discover what correction gettimeofday() would have | ||
95 | * made, and then undo it! | ||
96 | */ | ||
97 | nsec -= do_gettimeoffset() * NSEC_PER_USEC; | ||
98 | |||
99 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
100 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
101 | |||
102 | set_normalized_timespec(&xtime, sec, nsec); | ||
103 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
104 | |||
105 | ntp_clear(); | ||
106 | write_sequnlock_irq(&xtime_lock); | ||
107 | clock_was_set(); | ||
108 | return 0; | ||
109 | } | 48 | } |
110 | 49 | ||
111 | EXPORT_SYMBOL(do_settimeofday); | ||
112 | |||
113 | |||
114 | /* | 50 | /* |
115 | * BUG: This routine does not handle hour overflow properly; it just | 51 | * BUG: This routine does not handle hour overflow properly; it just |
116 | * sets the minutes. Usually you'll only notice that after reboot! | 52 | * sets the minutes. Usually you'll only notice that after reboot! |
diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h index 492b5c4dfed6..8c7260a3cd41 100644 --- a/arch/frv/include/asm/pci.h +++ b/arch/frv/include/asm/pci.h | |||
@@ -68,41 +68,4 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
68 | #define PCIBIOS_MIN_IO 0x100 | 68 | #define PCIBIOS_MIN_IO 0x100 |
69 | #define PCIBIOS_MIN_MEM 0x00010000 | 69 | #define PCIBIOS_MIN_MEM 0x00010000 |
70 | 70 | ||
71 | /* Make physical memory consistent for a single | ||
72 | * streaming mode DMA translation after a transfer. | ||
73 | * | ||
74 | * If you perform a pci_map_single() but wish to interrogate the | ||
75 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
76 | * mapping, you must call this function before doing so. At the | ||
77 | * next point you give the PCI dma address back to the card, the | ||
78 | * device again owns the buffer. | ||
79 | */ | ||
80 | static inline void pci_dma_sync_single(struct pci_dev *hwdev, | ||
81 | dma_addr_t dma_handle, | ||
82 | size_t size, int direction) | ||
83 | { | ||
84 | BUG_ON(direction == PCI_DMA_NONE); | ||
85 | |||
86 | frv_cache_wback_inv((unsigned long)bus_to_virt(dma_handle), | ||
87 | (unsigned long)bus_to_virt(dma_handle) + size); | ||
88 | } | ||
89 | |||
90 | /* Make physical memory consistent for a set of streaming | ||
91 | * mode DMA translations after a transfer. | ||
92 | * | ||
93 | * The same as pci_dma_sync_single but for a scatter-gather list, | ||
94 | * same rules and usage. | ||
95 | */ | ||
96 | static inline void pci_dma_sync_sg(struct pci_dev *hwdev, | ||
97 | struct scatterlist *sg, | ||
98 | int nelems, int direction) | ||
99 | { | ||
100 | int i; | ||
101 | BUG_ON(direction == PCI_DMA_NONE); | ||
102 | |||
103 | for (i = 0; i < nelems; i++) | ||
104 | frv_cache_wback_inv(sg_dma_address(&sg[i]), | ||
105 | sg_dma_address(&sg[i])+sg_dma_len(&sg[i])); | ||
106 | } | ||
107 | |||
108 | #endif /* _ASM_FRV_PCI_H */ | 71 | #endif /* _ASM_FRV_PCI_H */ |
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 4c41656ede87..b5298eb09adb 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h | |||
@@ -219,54 +219,6 @@ do { \ | |||
219 | NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \ | 219 | NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \ |
220 | } while (0) | 220 | } while (0) |
221 | 221 | ||
222 | |||
223 | /* | ||
224 | * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out | ||
225 | * extra segments containing the gate DSO contents. Dumping its | ||
226 | * contents makes post-mortem fully interpretable later without matching up | ||
227 | * the same kernel and hardware config to see what PC values meant. | ||
228 | * Dumping its extra ELF program headers includes all the other information | ||
229 | * a debugger needs to easily find how the gate DSO was being used. | ||
230 | */ | ||
231 | #define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum) | ||
232 | #define ELF_CORE_WRITE_EXTRA_PHDRS \ | ||
233 | do { \ | ||
234 | const struct elf_phdr *const gate_phdrs = \ | ||
235 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \ | ||
236 | int i; \ | ||
237 | Elf64_Off ofs = 0; \ | ||
238 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \ | ||
239 | struct elf_phdr phdr = gate_phdrs[i]; \ | ||
240 | if (phdr.p_type == PT_LOAD) { \ | ||
241 | phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ | ||
242 | phdr.p_filesz = phdr.p_memsz; \ | ||
243 | if (ofs == 0) { \ | ||
244 | ofs = phdr.p_offset = offset; \ | ||
245 | offset += phdr.p_filesz; \ | ||
246 | } \ | ||
247 | else \ | ||
248 | phdr.p_offset = ofs; \ | ||
249 | } \ | ||
250 | else \ | ||
251 | phdr.p_offset += ofs; \ | ||
252 | phdr.p_paddr = 0; /* match other core phdrs */ \ | ||
253 | DUMP_WRITE(&phdr, sizeof(phdr)); \ | ||
254 | } \ | ||
255 | } while (0) | ||
256 | #define ELF_CORE_WRITE_EXTRA_DATA \ | ||
257 | do { \ | ||
258 | const struct elf_phdr *const gate_phdrs = \ | ||
259 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \ | ||
260 | int i; \ | ||
261 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \ | ||
262 | if (gate_phdrs[i].p_type == PT_LOAD) { \ | ||
263 | DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \ | ||
264 | PAGE_ALIGN(gate_phdrs[i].p_memsz)); \ | ||
265 | break; \ | ||
266 | } \ | ||
267 | } \ | ||
268 | } while (0) | ||
269 | |||
270 | /* | 222 | /* |
271 | * format for entries in the Global Offset Table | 223 | * format for entries in the Global Offset Table |
272 | */ | 224 | */ |
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 4138282aefa8..db10b1e378b0 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -45,6 +45,8 @@ endif | |||
45 | obj-$(CONFIG_DMAR) += pci-dma.o | 45 | obj-$(CONFIG_DMAR) += pci-dma.o |
46 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 46 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
47 | 47 | ||
48 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | ||
49 | |||
48 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. | 50 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. |
49 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 | 51 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 |
50 | 52 | ||
diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c new file mode 100644 index 000000000000..bac1639bc320 --- /dev/null +++ b/arch/ia64/kernel/elfcore.c | |||
@@ -0,0 +1,80 @@ | |||
1 | #include <linux/elf.h> | ||
2 | #include <linux/coredump.h> | ||
3 | #include <linux/fs.h> | ||
4 | #include <linux/mm.h> | ||
5 | |||
6 | #include <asm/elf.h> | ||
7 | |||
8 | |||
9 | Elf64_Half elf_core_extra_phdrs(void) | ||
10 | { | ||
11 | return GATE_EHDR->e_phnum; | ||
12 | } | ||
13 | |||
14 | int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, | ||
15 | unsigned long limit) | ||
16 | { | ||
17 | const struct elf_phdr *const gate_phdrs = | ||
18 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); | ||
19 | int i; | ||
20 | Elf64_Off ofs = 0; | ||
21 | |||
22 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { | ||
23 | struct elf_phdr phdr = gate_phdrs[i]; | ||
24 | |||
25 | if (phdr.p_type == PT_LOAD) { | ||
26 | phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); | ||
27 | phdr.p_filesz = phdr.p_memsz; | ||
28 | if (ofs == 0) { | ||
29 | ofs = phdr.p_offset = offset; | ||
30 | offset += phdr.p_filesz; | ||
31 | } else { | ||
32 | phdr.p_offset = ofs; | ||
33 | } | ||
34 | } else { | ||
35 | phdr.p_offset += ofs; | ||
36 | } | ||
37 | phdr.p_paddr = 0; /* match other core phdrs */ | ||
38 | *size += sizeof(phdr); | ||
39 | if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) | ||
40 | return 0; | ||
41 | } | ||
42 | return 1; | ||
43 | } | ||
44 | |||
45 | int elf_core_write_extra_data(struct file *file, size_t *size, | ||
46 | unsigned long limit) | ||
47 | { | ||
48 | const struct elf_phdr *const gate_phdrs = | ||
49 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); | ||
50 | int i; | ||
51 | |||
52 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { | ||
53 | if (gate_phdrs[i].p_type == PT_LOAD) { | ||
54 | void *addr = (void *)gate_phdrs[i].p_vaddr; | ||
55 | size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); | ||
56 | |||
57 | *size += memsz; | ||
58 | if (*size > limit || !dump_write(file, addr, memsz)) | ||
59 | return 0; | ||
60 | break; | ||
61 | } | ||
62 | } | ||
63 | return 1; | ||
64 | } | ||
65 | |||
66 | size_t elf_core_extra_data_size(void) | ||
67 | { | ||
68 | const struct elf_phdr *const gate_phdrs = | ||
69 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); | ||
70 | int i; | ||
71 | size_t size = 0; | ||
72 | |||
73 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { | ||
74 | if (gate_phdrs[i].p_type == PT_LOAD) { | ||
75 | size += PAGE_ALIGN(gate_phdrs[i].p_memsz); | ||
76 | break; | ||
77 | } | ||
78 | } | ||
79 | return size; | ||
80 | } | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index b81e46b1629b..703062c44fb9 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2315,6 +2315,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t | |||
2315 | DPRINT(("Cannot allocate vma\n")); | 2315 | DPRINT(("Cannot allocate vma\n")); |
2316 | goto error_kmem; | 2316 | goto error_kmem; |
2317 | } | 2317 | } |
2318 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
2318 | 2319 | ||
2319 | /* | 2320 | /* |
2320 | * partially initialize the vma for the sampling buffer | 2321 | * partially initialize the vma for the sampling buffer |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ca3335ea56cc..ed41759efcac 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -117,6 +117,7 @@ ia64_init_addr_space (void) | |||
117 | */ | 117 | */ |
118 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 118 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
119 | if (vma) { | 119 | if (vma) { |
120 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
120 | vma->vm_mm = current->mm; | 121 | vma->vm_mm = current->mm; |
121 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | 122 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
122 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 123 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
@@ -135,6 +136,7 @@ ia64_init_addr_space (void) | |||
135 | if (!(current->personality & MMAP_PAGE_ZERO)) { | 136 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
136 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 137 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
137 | if (vma) { | 138 | if (vma) { |
139 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
138 | vma->vm_mm = current->mm; | 140 | vma->vm_mm = current->mm; |
139 | vma->vm_end = PAGE_SIZE; | 141 | vma->vm_end = PAGE_SIZE; |
140 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | 142 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug index bc989e522a04..7305ac8f7f5b 100644 --- a/arch/parisc/Kconfig.debug +++ b/arch/parisc/Kconfig.debug | |||
@@ -12,4 +12,18 @@ config DEBUG_RODATA | |||
12 | portion of the kernel code won't be covered by a TLB anymore. | 12 | portion of the kernel code won't be covered by a TLB anymore. |
13 | If in doubt, say "N". | 13 | If in doubt, say "N". |
14 | 14 | ||
15 | config DEBUG_STRICT_USER_COPY_CHECKS | ||
16 | bool "Strict copy size checks" | ||
17 | depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING | ||
18 | ---help--- | ||
19 | Enabling this option turns a certain set of sanity checks for user | ||
20 | copy operations into compile time failures. | ||
21 | |||
22 | The copy_from_user() etc checks are there to help test if there | ||
23 | are sufficient security checks on the length argument of | ||
24 | the copy operation, by having gcc prove that the argument is | ||
25 | within bounds. | ||
26 | |||
27 | If unsure, or if you run an older (pre 4.4) gcc, say N. | ||
28 | |||
15 | endmenu | 29 | endmenu |
diff --git a/arch/parisc/include/asm/param.h b/arch/parisc/include/asm/param.h index 32e03d877858..965d45427975 100644 --- a/arch/parisc/include/asm/param.h +++ b/arch/parisc/include/asm/param.h | |||
@@ -1,22 +1 @@ | |||
1 | #ifndef _ASMPARISC_PARAM_H | #include <asm-generic/param.h> | |
2 | #define _ASMPARISC_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #define HZ CONFIG_HZ | ||
6 | #define USER_HZ 100 /* some user API use "ticks" */ | ||
7 | #define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 4096 | ||
15 | |||
16 | #ifndef NOGROUP | ||
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | |||
22 | #endif | ||
diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h index d91357bca5b4..4653c77bf9d1 100644 --- a/arch/parisc/include/asm/system.h +++ b/arch/parisc/include/asm/system.h | |||
@@ -160,7 +160,7 @@ static inline void set_eiem(unsigned long val) | |||
160 | ldcd). */ | 160 | ldcd). */ |
161 | 161 | ||
162 | #define __PA_LDCW_ALIGNMENT 4 | 162 | #define __PA_LDCW_ALIGNMENT 4 |
163 | #define __ldcw_align(a) ((volatile unsigned int *)a) | 163 | #define __ldcw_align(a) (&(a)->slock) |
164 | #define __LDCW "ldcw,co" | 164 | #define __LDCW "ldcw,co" |
165 | 165 | ||
166 | #endif /*!CONFIG_PA20*/ | 166 | #endif /*!CONFIG_PA20*/ |
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 7cf799d70b4c..ff4cf9dab8d2 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <asm/page.h> | 7 | #include <asm/page.h> |
8 | #include <asm/system.h> | 8 | #include <asm/system.h> |
9 | #include <asm/cache.h> | 9 | #include <asm/cache.h> |
10 | #include <asm/errno.h> | ||
10 | #include <asm-generic/uaccess-unaligned.h> | 11 | #include <asm-generic/uaccess-unaligned.h> |
11 | 12 | ||
12 | #define VERIFY_READ 0 | 13 | #define VERIFY_READ 0 |
@@ -234,13 +235,35 @@ extern long lstrnlen_user(const char __user *,long); | |||
234 | 235 | ||
235 | unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); | 236 | unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); |
236 | #define __copy_to_user copy_to_user | 237 | #define __copy_to_user copy_to_user |
237 | unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len); | 238 | unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len); |
238 | #define __copy_from_user copy_from_user | ||
239 | unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); | 239 | unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); |
240 | #define __copy_in_user copy_in_user | 240 | #define __copy_in_user copy_in_user |
241 | #define __copy_to_user_inatomic __copy_to_user | 241 | #define __copy_to_user_inatomic __copy_to_user |
242 | #define __copy_from_user_inatomic __copy_from_user | 242 | #define __copy_from_user_inatomic __copy_from_user |
243 | 243 | ||
244 | extern void copy_from_user_overflow(void) | ||
245 | #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS | ||
246 | __compiletime_error("copy_from_user() buffer size is not provably correct") | ||
247 | #else | ||
248 | __compiletime_warning("copy_from_user() buffer size is not provably correct") | ||
249 | #endif | ||
250 | ; | ||
251 | |||
252 | static inline unsigned long __must_check copy_from_user(void *to, | ||
253 | const void __user *from, | ||
254 | unsigned long n) | ||
255 | { | ||
256 | int sz = __compiletime_object_size(to); | ||
257 | int ret = -EFAULT; | ||
258 | |||
259 | if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) | ||
260 | ret = __copy_from_user(to, from, n); | ||
261 | else | ||
262 | copy_from_user_overflow(); | ||
263 | |||
264 | return ret; | ||
265 | } | ||
266 | |||
244 | struct pt_regs; | 267 | struct pt_regs; |
245 | int fixup_exception(struct pt_regs *regs); | 268 | int fixup_exception(struct pt_regs *regs); |
246 | 269 | ||
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index cda158318c62..1ce7d2851d90 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h | |||
@@ -811,8 +811,10 @@ | |||
811 | #define __NR_pwritev (__NR_Linux + 316) | 811 | #define __NR_pwritev (__NR_Linux + 316) |
812 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) | 812 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) |
813 | #define __NR_perf_event_open (__NR_Linux + 318) | 813 | #define __NR_perf_event_open (__NR_Linux + 318) |
814 | #define __NR_recvmmsg (__NR_Linux + 319) | ||
815 | #define __NR_accept4 (__NR_Linux + 320) | ||
814 | 816 | ||
815 | #define __NR_Linux_syscalls (__NR_perf_event_open + 1) | 817 | #define __NR_Linux_syscalls (__NR_accept4 + 1) |
816 | 818 | ||
817 | 819 | ||
818 | #define __IGNORE_select /* newselect */ | 820 | #define __IGNORE_select /* newselect */ |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 1054baa2fc69..d054f3da3ff5 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -171,14 +171,14 @@ parisc_cache_init(void) | |||
171 | cache_info.ic_conf.cc_cst, | 171 | cache_info.ic_conf.cc_cst, |
172 | cache_info.ic_conf.cc_hv); | 172 | cache_info.ic_conf.cc_hv); |
173 | 173 | ||
174 | printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n", | 174 | printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", |
175 | cache_info.dt_conf.tc_sh, | 175 | cache_info.dt_conf.tc_sh, |
176 | cache_info.dt_conf.tc_page, | 176 | cache_info.dt_conf.tc_page, |
177 | cache_info.dt_conf.tc_cst, | 177 | cache_info.dt_conf.tc_cst, |
178 | cache_info.dt_conf.tc_aid, | 178 | cache_info.dt_conf.tc_aid, |
179 | cache_info.dt_conf.tc_pad1); | 179 | cache_info.dt_conf.tc_pad1); |
180 | 180 | ||
181 | printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n", | 181 | printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", |
182 | cache_info.it_conf.tc_sh, | 182 | cache_info.it_conf.tc_sh, |
183 | cache_info.it_conf.tc_page, | 183 | cache_info.it_conf.tc_page, |
184 | cache_info.it_conf.tc_cst, | 184 | cache_info.it_conf.tc_cst, |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 01c4fcf8f481..de5f6dab48b7 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -417,6 +417,8 @@ | |||
417 | ENTRY_COMP(pwritev) | 417 | ENTRY_COMP(pwritev) |
418 | ENTRY_COMP(rt_tgsigqueueinfo) | 418 | ENTRY_COMP(rt_tgsigqueueinfo) |
419 | ENTRY_SAME(perf_event_open) | 419 | ENTRY_SAME(perf_event_open) |
420 | ENTRY_COMP(recvmmsg) | ||
421 | ENTRY_SAME(accept4) /* 320 */ | ||
420 | 422 | ||
421 | /* Nothing yet */ | 423 | /* Nothing yet */ |
422 | 424 | ||
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index a79c6f9e7e2c..05511ccb61d2 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -250,9 +250,21 @@ static int __init rtc_init(void) | |||
250 | } | 250 | } |
251 | module_init(rtc_init); | 251 | module_init(rtc_init); |
252 | 252 | ||
253 | void __init time_init(void) | 253 | void read_persistent_clock(struct timespec *ts) |
254 | { | 254 | { |
255 | static struct pdc_tod tod_data; | 255 | static struct pdc_tod tod_data; |
256 | if (pdc_tod_read(&tod_data) == 0) { | ||
257 | ts->tv_sec = tod_data.tod_sec; | ||
258 | ts->tv_nsec = tod_data.tod_usec * 1000; | ||
259 | } else { | ||
260 | printk(KERN_ERR "Error reading tod clock\n"); | ||
261 | ts->tv_sec = 0; | ||
262 | ts->tv_nsec = 0; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | void __init time_init(void) | ||
267 | { | ||
256 | unsigned long current_cr16_khz; | 268 | unsigned long current_cr16_khz; |
257 | 269 | ||
258 | clocktick = (100 * PAGE0->mem_10msec) / HZ; | 270 | clocktick = (100 * PAGE0->mem_10msec) / HZ; |
@@ -264,19 +276,4 @@ void __init time_init(void) | |||
264 | clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz, | 276 | clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz, |
265 | clocksource_cr16.shift); | 277 | clocksource_cr16.shift); |
266 | clocksource_register(&clocksource_cr16); | 278 | clocksource_register(&clocksource_cr16); |
267 | |||
268 | if (pdc_tod_read(&tod_data) == 0) { | ||
269 | unsigned long flags; | ||
270 | |||
271 | write_seqlock_irqsave(&xtime_lock, flags); | ||
272 | xtime.tv_sec = tod_data.tod_sec; | ||
273 | xtime.tv_nsec = tod_data.tod_usec * 1000; | ||
274 | set_normalized_timespec(&wall_to_monotonic, | ||
275 | -xtime.tv_sec, -xtime.tv_nsec); | ||
276 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
277 | } else { | ||
278 | printk(KERN_ERR "Error reading tod clock\n"); | ||
279 | xtime.tv_sec = 0; | ||
280 | xtime.tv_nsec = 0; | ||
281 | } | ||
282 | } | 279 | } |
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index e6f4b7a4b7e3..92d977bb5ea8 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/signal.h> | 27 | #include <linux/signal.h> |
28 | #include <linux/ratelimit.h> | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | 30 | ||
30 | /* #define DEBUG_UNALIGNED 1 */ | 31 | /* #define DEBUG_UNALIGNED 1 */ |
@@ -446,8 +447,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) | |||
446 | 447 | ||
447 | void handle_unaligned(struct pt_regs *regs) | 448 | void handle_unaligned(struct pt_regs *regs) |
448 | { | 449 | { |
449 | static unsigned long unaligned_count = 0; | 450 | static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); |
450 | static unsigned long last_time = 0; | ||
451 | unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0; | 451 | unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0; |
452 | int modify = 0; | 452 | int modify = 0; |
453 | int ret = ERR_NOTHANDLED; | 453 | int ret = ERR_NOTHANDLED; |
@@ -460,14 +460,8 @@ void handle_unaligned(struct pt_regs *regs) | |||
460 | goto force_sigbus; | 460 | goto force_sigbus; |
461 | } | 461 | } |
462 | 462 | ||
463 | if (unaligned_count > 5 && | 463 | if (!(current->thread.flags & PARISC_UAC_NOPRINT) && |
464 | time_after(jiffies, last_time + 5 * HZ)) { | 464 | __ratelimit(&ratelimit)) { |
465 | unaligned_count = 0; | ||
466 | last_time = jiffies; | ||
467 | } | ||
468 | |||
469 | if (!(current->thread.flags & PARISC_UAC_NOPRINT) | ||
470 | && ++unaligned_count < 5) { | ||
471 | char buf[256]; | 465 | char buf[256]; |
472 | sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", | 466 | sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", |
473 | current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); | 467 | current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); |
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index abf41f4632a9..1dbca5c31b3c 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c | |||
@@ -475,7 +475,8 @@ unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len) | |||
475 | return pa_memcpy((void __force *)dst, src, len); | 475 | return pa_memcpy((void __force *)dst, src, len); |
476 | } | 476 | } |
477 | 477 | ||
478 | unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len) | 478 | EXPORT_SYMBOL(__copy_from_user); |
479 | unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len) | ||
479 | { | 480 | { |
480 | mtsp(get_user_space(), 1); | 481 | mtsp(get_user_space(), 1); |
481 | mtsp(get_kernel_space(), 2); | 482 | mtsp(get_kernel_space(), 2); |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b037d95eeadc..64c00227b997 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -451,7 +451,7 @@ static int __cpuinit numa_setup_cpu(unsigned long lcpu) | |||
451 | nid = of_node_to_nid_single(cpu); | 451 | nid = of_node_to_nid_single(cpu); |
452 | 452 | ||
453 | if (nid < 0 || !node_online(nid)) | 453 | if (nid < 0 || !node_online(nid)) |
454 | nid = any_online_node(NODE_MASK_ALL); | 454 | nid = first_online_node; |
455 | out: | 455 | out: |
456 | map_cpu_to_node(lcpu, nid); | 456 | map_cpu_to_node(lcpu, nid); |
457 | 457 | ||
@@ -1114,7 +1114,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) | |||
1114 | int nid, found = 0; | 1114 | int nid, found = 0; |
1115 | 1115 | ||
1116 | if (!numa_enabled || (min_common_depth < 0)) | 1116 | if (!numa_enabled || (min_common_depth < 0)) |
1117 | return any_online_node(NODE_MASK_ALL); | 1117 | return first_online_node; |
1118 | 1118 | ||
1119 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 1119 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
1120 | if (memory) { | 1120 | if (memory) { |
@@ -1125,7 +1125,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) | |||
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | if (nid < 0 || !node_online(nid)) | 1127 | if (nid < 0 || !node_online(nid)) |
1128 | nid = any_online_node(NODE_MASK_ALL); | 1128 | nid = first_online_node; |
1129 | 1129 | ||
1130 | if (NODE_DATA(nid)->node_spanned_pages) | 1130 | if (NODE_DATA(nid)->node_spanned_pages) |
1131 | return nid; | 1131 | return nid; |
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index c666bfe5e984..9b04b1102bbc 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -321,11 +321,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, | |||
321 | #define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40 | 321 | #define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40 |
322 | #define QDIO_ERROR_SLSB_STATE 0x80 | 322 | #define QDIO_ERROR_SLSB_STATE 0x80 |
323 | 323 | ||
324 | /* for qdio_initialize */ | ||
325 | #define QDIO_INBOUND_0COPY_SBALS 0x01 | ||
326 | #define QDIO_OUTBOUND_0COPY_SBALS 0x02 | ||
327 | #define QDIO_USE_OUTBOUND_PCIS 0x04 | ||
328 | |||
329 | /* for qdio_cleanup */ | 324 | /* for qdio_cleanup */ |
330 | #define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01 | 325 | #define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01 |
331 | #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 | 326 | #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 |
@@ -344,7 +339,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, | |||
344 | * @input_handler: handler to be called for input queues | 339 | * @input_handler: handler to be called for input queues |
345 | * @output_handler: handler to be called for output queues | 340 | * @output_handler: handler to be called for output queues |
346 | * @int_parm: interruption parameter | 341 | * @int_parm: interruption parameter |
347 | * @flags: initialization flags | ||
348 | * @input_sbal_addr_array: address of no_input_qs * 128 pointers | 342 | * @input_sbal_addr_array: address of no_input_qs * 128 pointers |
349 | * @output_sbal_addr_array: address of no_output_qs * 128 pointers | 343 | * @output_sbal_addr_array: address of no_output_qs * 128 pointers |
350 | */ | 344 | */ |
@@ -361,7 +355,6 @@ struct qdio_initialize { | |||
361 | qdio_handler_t *input_handler; | 355 | qdio_handler_t *input_handler; |
362 | qdio_handler_t *output_handler; | 356 | qdio_handler_t *output_handler; |
363 | unsigned long int_parm; | 357 | unsigned long int_parm; |
364 | unsigned long flags; | ||
365 | void **input_sbal_addr_array; | 358 | void **input_sbal_addr_array; |
366 | void **output_sbal_addr_array; | 359 | void **output_sbal_addr_array; |
367 | }; | 360 | }; |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a8f93f1705ad..8b22e7f316bb 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -73,15 +73,15 @@ unsigned long long monotonic_clock(void) | |||
73 | } | 73 | } |
74 | EXPORT_SYMBOL(monotonic_clock); | 74 | EXPORT_SYMBOL(monotonic_clock); |
75 | 75 | ||
76 | void tod_to_timeval(__u64 todval, struct timespec *xtime) | 76 | void tod_to_timeval(__u64 todval, struct timespec *xt) |
77 | { | 77 | { |
78 | unsigned long long sec; | 78 | unsigned long long sec; |
79 | 79 | ||
80 | sec = todval >> 12; | 80 | sec = todval >> 12; |
81 | do_div(sec, 1000000); | 81 | do_div(sec, 1000000); |
82 | xtime->tv_sec = sec; | 82 | xt->tv_sec = sec; |
83 | todval -= (sec * 1000000) << 12; | 83 | todval -= (sec * 1000000) << 12; |
84 | xtime->tv_nsec = ((todval * 1000) >> 12); | 84 | xt->tv_nsec = ((todval * 1000) >> 12); |
85 | } | 85 | } |
86 | EXPORT_SYMBOL(tod_to_timeval); | 86 | EXPORT_SYMBOL(tod_to_timeval); |
87 | 87 | ||
@@ -216,8 +216,8 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, | |||
216 | ++vdso_data->tb_update_count; | 216 | ++vdso_data->tb_update_count; |
217 | smp_wmb(); | 217 | smp_wmb(); |
218 | vdso_data->xtime_tod_stamp = clock->cycle_last; | 218 | vdso_data->xtime_tod_stamp = clock->cycle_last; |
219 | vdso_data->xtime_clock_sec = xtime.tv_sec; | 219 | vdso_data->xtime_clock_sec = wall_time->tv_sec; |
220 | vdso_data->xtime_clock_nsec = xtime.tv_nsec; | 220 | vdso_data->xtime_clock_nsec = wall_time->tv_nsec; |
221 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | 221 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; |
222 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | 222 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; |
223 | smp_wmb(); | 223 | smp_wmb(); |
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index cd54a1c352af..761ab8b56afc 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # Makefile for s390-specific library files.. | 2 | # Makefile for s390-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o usercopy.o | 5 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o |
6 | obj-y += usercopy.o | ||
6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o | 7 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o |
7 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o | 8 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o |
8 | lib-$(CONFIG_SMP) += spinlock.o | 9 | lib-$(CONFIG_SMP) += spinlock.o |
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 76a3637b88e0..f16bd04e39e9 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -374,7 +374,7 @@ static struct ctl_table cmm_dir_table[] = { | |||
374 | #ifdef CONFIG_CMM_IUCV | 374 | #ifdef CONFIG_CMM_IUCV |
375 | #define SMSG_PREFIX "CMM" | 375 | #define SMSG_PREFIX "CMM" |
376 | static void | 376 | static void |
377 | cmm_smsg_target(char *from, char *msg) | 377 | cmm_smsg_target(const char *from, char *msg) |
378 | { | 378 | { |
379 | long nr, seconds; | 379 | long nr, seconds; |
380 | 380 | ||
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index be300aaca6fe..7da0fc94a01e 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c | |||
@@ -419,6 +419,9 @@ static struct i2c_board_info migor_i2c_devices[] = { | |||
419 | I2C_BOARD_INFO("migor_ts", 0x51), | 419 | I2C_BOARD_INFO("migor_ts", 0x51), |
420 | .irq = 38, /* IRQ6 */ | 420 | .irq = 38, /* IRQ6 */ |
421 | }, | 421 | }, |
422 | { | ||
423 | I2C_BOARD_INFO("wm8978", 0x1a), | ||
424 | }, | ||
422 | }; | 425 | }; |
423 | 426 | ||
424 | static struct i2c_board_info migor_i2c_camera[] = { | 427 | static struct i2c_board_info migor_i2c_camera[] = { |
@@ -619,6 +622,19 @@ static int __init migor_devices_setup(void) | |||
619 | 622 | ||
620 | platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20); | 623 | platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20); |
621 | 624 | ||
625 | /* SIU: Port B */ | ||
626 | gpio_request(GPIO_FN_SIUBOLR, NULL); | ||
627 | gpio_request(GPIO_FN_SIUBOBT, NULL); | ||
628 | gpio_request(GPIO_FN_SIUBISLD, NULL); | ||
629 | gpio_request(GPIO_FN_SIUBOSLD, NULL); | ||
630 | gpio_request(GPIO_FN_SIUMCKB, NULL); | ||
631 | |||
632 | /* | ||
633 | * The original driver sets SIUB OLR/OBT, ILR/IBT, and SIUA OLR/OBT to | ||
634 | * output. Need only SIUB, set to output for master mode (table 34.2) | ||
635 | */ | ||
636 | __raw_writew(__raw_readw(PORT_MSELCRA) | 1, PORT_MSELCRA); | ||
637 | |||
622 | i2c_register_board_info(0, migor_i2c_devices, | 638 | i2c_register_board_info(0, migor_i2c_devices, |
623 | ARRAY_SIZE(migor_i2c_devices)); | 639 | ARRAY_SIZE(migor_i2c_devices)); |
624 | 640 | ||
diff --git a/arch/sh/boot/compressed/cache.c b/arch/sh/boot/compressed/cache.c index e27fc74f228c..d0b77b68a4d0 100644 --- a/arch/sh/boot/compressed/cache.c +++ b/arch/sh/boot/compressed/cache.c | |||
@@ -5,7 +5,7 @@ int cache_control(unsigned int command) | |||
5 | 5 | ||
6 | for (i = 0; i < (32 * 1024); i += 32) { | 6 | for (i = 0; i < (32 * 1024); i += 32) { |
7 | (void)*p; | 7 | (void)*p; |
8 | p += (32 / sizeof (int)); | 8 | p += (32 / sizeof(int)); |
9 | } | 9 | } |
10 | 10 | ||
11 | return 0; | 11 | return 0; |
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index da3ebec921a7..1f4e562c5e8c 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h | |||
@@ -86,8 +86,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, | |||
86 | struct page *page, unsigned long vaddr, void *dst, const void *src, | 86 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
87 | unsigned long len); | 87 | unsigned long len); |
88 | 88 | ||
89 | #define flush_cache_vmap(start, end) flush_cache_all() | 89 | #define flush_cache_vmap(start, end) local_flush_cache_all(NULL) |
90 | #define flush_cache_vunmap(start, end) flush_cache_all() | 90 | #define flush_cache_vunmap(start, end) local_flush_cache_all(NULL) |
91 | 91 | ||
92 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 92 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
93 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 93 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h new file mode 100644 index 000000000000..51cd78feacff --- /dev/null +++ b/arch/sh/include/asm/dma-register.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Common header for the legacy SH DMA driver and the new dmaengine driver | ||
3 | * | ||
4 | * extracted from arch/sh/include/asm/dma-sh.h: | ||
5 | * | ||
6 | * Copyright (C) 2000 Takashi YOSHII | ||
7 | * Copyright (C) 2003 Paul Mundt | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | #ifndef DMA_REGISTER_H | ||
14 | #define DMA_REGISTER_H | ||
15 | |||
16 | /* DMA register */ | ||
17 | #define SAR 0x00 | ||
18 | #define DAR 0x04 | ||
19 | #define TCR 0x08 | ||
20 | #define CHCR 0x0C | ||
21 | #define DMAOR 0x40 | ||
22 | |||
23 | /* DMAOR definitions */ | ||
24 | #define DMAOR_AE 0x00000004 | ||
25 | #define DMAOR_NMIF 0x00000002 | ||
26 | #define DMAOR_DME 0x00000001 | ||
27 | |||
28 | /* Definitions for the SuperH DMAC */ | ||
29 | #define REQ_L 0x00000000 | ||
30 | #define REQ_E 0x00080000 | ||
31 | #define RACK_H 0x00000000 | ||
32 | #define RACK_L 0x00040000 | ||
33 | #define ACK_R 0x00000000 | ||
34 | #define ACK_W 0x00020000 | ||
35 | #define ACK_H 0x00000000 | ||
36 | #define ACK_L 0x00010000 | ||
37 | #define DM_INC 0x00004000 | ||
38 | #define DM_DEC 0x00008000 | ||
39 | #define DM_FIX 0x0000c000 | ||
40 | #define SM_INC 0x00001000 | ||
41 | #define SM_DEC 0x00002000 | ||
42 | #define SM_FIX 0x00003000 | ||
43 | #define RS_IN 0x00000200 | ||
44 | #define RS_OUT 0x00000300 | ||
45 | #define TS_BLK 0x00000040 | ||
46 | #define TM_BUR 0x00000020 | ||
47 | #define CHCR_DE 0x00000001 | ||
48 | #define CHCR_TE 0x00000002 | ||
49 | #define CHCR_IE 0x00000004 | ||
50 | |||
51 | #endif | ||
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index e934a2e66651..f3acb8e34c6b 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h | |||
@@ -11,7 +11,8 @@ | |||
11 | #ifndef __DMA_SH_H | 11 | #ifndef __DMA_SH_H |
12 | #define __DMA_SH_H | 12 | #define __DMA_SH_H |
13 | 13 | ||
14 | #include <asm/dma.h> | 14 | #include <asm/dma-register.h> |
15 | #include <cpu/dma-register.h> | ||
15 | #include <cpu/dma.h> | 16 | #include <cpu/dma.h> |
16 | 17 | ||
17 | /* DMAOR contorl: The DMAOR access size is different by CPU.*/ | 18 | /* DMAOR contorl: The DMAOR access size is different by CPU.*/ |
@@ -53,34 +54,6 @@ static int dmte_irq_map[] __maybe_unused = { | |||
53 | #endif | 54 | #endif |
54 | }; | 55 | }; |
55 | 56 | ||
56 | /* Definitions for the SuperH DMAC */ | ||
57 | #define REQ_L 0x00000000 | ||
58 | #define REQ_E 0x00080000 | ||
59 | #define RACK_H 0x00000000 | ||
60 | #define RACK_L 0x00040000 | ||
61 | #define ACK_R 0x00000000 | ||
62 | #define ACK_W 0x00020000 | ||
63 | #define ACK_H 0x00000000 | ||
64 | #define ACK_L 0x00010000 | ||
65 | #define DM_INC 0x00004000 | ||
66 | #define DM_DEC 0x00008000 | ||
67 | #define DM_FIX 0x0000c000 | ||
68 | #define SM_INC 0x00001000 | ||
69 | #define SM_DEC 0x00002000 | ||
70 | #define SM_FIX 0x00003000 | ||
71 | #define RS_IN 0x00000200 | ||
72 | #define RS_OUT 0x00000300 | ||
73 | #define TS_BLK 0x00000040 | ||
74 | #define TM_BUR 0x00000020 | ||
75 | #define CHCR_DE 0x00000001 | ||
76 | #define CHCR_TE 0x00000002 | ||
77 | #define CHCR_IE 0x00000004 | ||
78 | |||
79 | /* DMAOR definitions */ | ||
80 | #define DMAOR_AE 0x00000004 | ||
81 | #define DMAOR_NMIF 0x00000002 | ||
82 | #define DMAOR_DME 0x00000001 | ||
83 | |||
84 | /* | 57 | /* |
85 | * Define the default configuration for dual address memory-memory transfer. | 58 | * Define the default configuration for dual address memory-memory transfer. |
86 | * The 0x400 value represents auto-request, external->external. | 59 | * The 0x400 value represents auto-request, external->external. |
@@ -111,61 +84,4 @@ static u32 dma_base_addr[] __maybe_unused = { | |||
111 | #endif | 84 | #endif |
112 | }; | 85 | }; |
113 | 86 | ||
114 | /* DMA register */ | ||
115 | #define SAR 0x00 | ||
116 | #define DAR 0x04 | ||
117 | #define TCR 0x08 | ||
118 | #define CHCR 0x0C | ||
119 | #define DMAOR 0x40 | ||
120 | |||
121 | /* | ||
122 | * for dma engine | ||
123 | * | ||
124 | * SuperH DMA mode | ||
125 | */ | ||
126 | #define SHDMA_MIX_IRQ (1 << 1) | ||
127 | #define SHDMA_DMAOR1 (1 << 2) | ||
128 | #define SHDMA_DMAE1 (1 << 3) | ||
129 | |||
130 | enum sh_dmae_slave_chan_id { | ||
131 | SHDMA_SLAVE_SCIF0_TX, | ||
132 | SHDMA_SLAVE_SCIF0_RX, | ||
133 | SHDMA_SLAVE_SCIF1_TX, | ||
134 | SHDMA_SLAVE_SCIF1_RX, | ||
135 | SHDMA_SLAVE_SCIF2_TX, | ||
136 | SHDMA_SLAVE_SCIF2_RX, | ||
137 | SHDMA_SLAVE_SCIF3_TX, | ||
138 | SHDMA_SLAVE_SCIF3_RX, | ||
139 | SHDMA_SLAVE_SCIF4_TX, | ||
140 | SHDMA_SLAVE_SCIF4_RX, | ||
141 | SHDMA_SLAVE_SCIF5_TX, | ||
142 | SHDMA_SLAVE_SCIF5_RX, | ||
143 | SHDMA_SLAVE_SIUA_TX, | ||
144 | SHDMA_SLAVE_SIUA_RX, | ||
145 | SHDMA_SLAVE_SIUB_TX, | ||
146 | SHDMA_SLAVE_SIUB_RX, | ||
147 | SHDMA_SLAVE_NUMBER, /* Must stay last */ | ||
148 | }; | ||
149 | |||
150 | struct sh_dmae_slave_config { | ||
151 | enum sh_dmae_slave_chan_id slave_id; | ||
152 | dma_addr_t addr; | ||
153 | u32 chcr; | ||
154 | char mid_rid; | ||
155 | }; | ||
156 | |||
157 | struct sh_dmae_pdata { | ||
158 | unsigned int mode; | ||
159 | struct sh_dmae_slave_config *config; | ||
160 | int config_num; | ||
161 | }; | ||
162 | |||
163 | struct device; | ||
164 | |||
165 | struct sh_dmae_slave { | ||
166 | enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ | ||
167 | struct device *dma_dev; /* Set by the platform */ | ||
168 | struct sh_dmae_slave_config *config; /* Set by the driver */ | ||
169 | }; | ||
170 | |||
171 | #endif /* __DMA_SH_H */ | 87 | #endif /* __DMA_SH_H */ |
diff --git a/arch/sh/include/asm/dmaengine.h b/arch/sh/include/asm/dmaengine.h new file mode 100644 index 000000000000..bf2f30cf0a27 --- /dev/null +++ b/arch/sh/include/asm/dmaengine.h | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Header for the new SH dmaengine driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef ASM_DMAENGINE_H | ||
11 | #define ASM_DMAENGINE_H | ||
12 | |||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/list.h> | ||
15 | |||
16 | #include <asm/dma-register.h> | ||
17 | |||
18 | #define SH_DMAC_MAX_CHANNELS 6 | ||
19 | |||
20 | enum sh_dmae_slave_chan_id { | ||
21 | SHDMA_SLAVE_SCIF0_TX, | ||
22 | SHDMA_SLAVE_SCIF0_RX, | ||
23 | SHDMA_SLAVE_SCIF1_TX, | ||
24 | SHDMA_SLAVE_SCIF1_RX, | ||
25 | SHDMA_SLAVE_SCIF2_TX, | ||
26 | SHDMA_SLAVE_SCIF2_RX, | ||
27 | SHDMA_SLAVE_SCIF3_TX, | ||
28 | SHDMA_SLAVE_SCIF3_RX, | ||
29 | SHDMA_SLAVE_SCIF4_TX, | ||
30 | SHDMA_SLAVE_SCIF4_RX, | ||
31 | SHDMA_SLAVE_SCIF5_TX, | ||
32 | SHDMA_SLAVE_SCIF5_RX, | ||
33 | SHDMA_SLAVE_SIUA_TX, | ||
34 | SHDMA_SLAVE_SIUA_RX, | ||
35 | SHDMA_SLAVE_SIUB_TX, | ||
36 | SHDMA_SLAVE_SIUB_RX, | ||
37 | SHDMA_SLAVE_NUMBER, /* Must stay last */ | ||
38 | }; | ||
39 | |||
40 | struct sh_dmae_slave_config { | ||
41 | enum sh_dmae_slave_chan_id slave_id; | ||
42 | dma_addr_t addr; | ||
43 | u32 chcr; | ||
44 | char mid_rid; | ||
45 | }; | ||
46 | |||
47 | struct sh_dmae_channel { | ||
48 | unsigned int offset; | ||
49 | unsigned int dmars; | ||
50 | unsigned int dmars_bit; | ||
51 | }; | ||
52 | |||
53 | struct sh_dmae_pdata { | ||
54 | struct sh_dmae_slave_config *slave; | ||
55 | int slave_num; | ||
56 | struct sh_dmae_channel *channel; | ||
57 | int channel_num; | ||
58 | unsigned int ts_low_shift; | ||
59 | unsigned int ts_low_mask; | ||
60 | unsigned int ts_high_shift; | ||
61 | unsigned int ts_high_mask; | ||
62 | unsigned int *ts_shift; | ||
63 | int ts_shift_num; | ||
64 | u16 dmaor_init; | ||
65 | }; | ||
66 | |||
67 | struct device; | ||
68 | |||
69 | /* Used by slave DMA clients to request DMA to/from a specific peripheral */ | ||
70 | struct sh_dmae_slave { | ||
71 | enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ | ||
72 | struct device *dma_dev; /* Set by the platform */ | ||
73 | struct sh_dmae_slave_config *config; /* Set by the driver */ | ||
74 | }; | ||
75 | |||
76 | struct sh_dmae_regs { | ||
77 | u32 sar; /* SAR / source address */ | ||
78 | u32 dar; /* DAR / destination address */ | ||
79 | u32 tcr; /* TCR / transfer count */ | ||
80 | }; | ||
81 | |||
82 | struct sh_desc { | ||
83 | struct sh_dmae_regs hw; | ||
84 | struct list_head node; | ||
85 | struct dma_async_tx_descriptor async_tx; | ||
86 | enum dma_data_direction direction; | ||
87 | dma_cookie_t cookie; | ||
88 | size_t partial; | ||
89 | int chunks; | ||
90 | int mark; | ||
91 | }; | ||
92 | |||
93 | #endif | ||
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 7dab7b23a5ec..f689554e17c1 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -291,21 +291,21 @@ unsigned long long poke_real_address_q(unsigned long long addr, | |||
291 | * doesn't exist, so everything must go through page tables. | 291 | * doesn't exist, so everything must go through page tables. |
292 | */ | 292 | */ |
293 | #ifdef CONFIG_MMU | 293 | #ifdef CONFIG_MMU |
294 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, | 294 | void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, |
295 | pgprot_t prot, void *caller); | 295 | pgprot_t prot, void *caller); |
296 | void __iounmap(void __iomem *addr); | 296 | void __iounmap(void __iomem *addr); |
297 | 297 | ||
298 | static inline void __iomem * | 298 | static inline void __iomem * |
299 | __ioremap(unsigned long offset, unsigned long size, pgprot_t prot) | 299 | __ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) |
300 | { | 300 | { |
301 | return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); | 301 | return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); |
302 | } | 302 | } |
303 | 303 | ||
304 | static inline void __iomem * | 304 | static inline void __iomem * |
305 | __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) | 305 | __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) |
306 | { | 306 | { |
307 | #ifdef CONFIG_29BIT | 307 | #ifdef CONFIG_29BIT |
308 | unsigned long last_addr = offset + size - 1; | 308 | phys_addr_t last_addr = offset + size - 1; |
309 | 309 | ||
310 | /* | 310 | /* |
311 | * For P1 and P2 space this is trivial, as everything is already | 311 | * For P1 and P2 space this is trivial, as everything is already |
@@ -329,7 +329,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | static inline void __iomem * | 331 | static inline void __iomem * |
332 | __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) | 332 | __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) |
333 | { | 333 | { |
334 | void __iomem *ret; | 334 | void __iomem *ret; |
335 | 335 | ||
@@ -349,35 +349,32 @@ __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) | |||
349 | #define __iounmap(addr) do { } while (0) | 349 | #define __iounmap(addr) do { } while (0) |
350 | #endif /* CONFIG_MMU */ | 350 | #endif /* CONFIG_MMU */ |
351 | 351 | ||
352 | static inline void __iomem * | 352 | static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) |
353 | ioremap(unsigned long offset, unsigned long size) | ||
354 | { | 353 | { |
355 | return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); | 354 | return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); |
356 | } | 355 | } |
357 | 356 | ||
358 | static inline void __iomem * | 357 | static inline void __iomem * |
359 | ioremap_cache(unsigned long offset, unsigned long size) | 358 | ioremap_cache(phys_addr_t offset, unsigned long size) |
360 | { | 359 | { |
361 | return __ioremap_mode(offset, size, PAGE_KERNEL); | 360 | return __ioremap_mode(offset, size, PAGE_KERNEL); |
362 | } | 361 | } |
363 | 362 | ||
364 | #ifdef CONFIG_HAVE_IOREMAP_PROT | 363 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
365 | static inline void __iomem * | 364 | static inline void __iomem * |
366 | ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) | 365 | ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) |
367 | { | 366 | { |
368 | return __ioremap_mode(offset, size, __pgprot(flags)); | 367 | return __ioremap_mode(offset, size, __pgprot(flags)); |
369 | } | 368 | } |
370 | #endif | 369 | #endif |
371 | 370 | ||
372 | #ifdef CONFIG_IOREMAP_FIXED | 371 | #ifdef CONFIG_IOREMAP_FIXED |
373 | extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, | 372 | extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); |
374 | unsigned long, pgprot_t); | ||
375 | extern int iounmap_fixed(void __iomem *); | 373 | extern int iounmap_fixed(void __iomem *); |
376 | extern void ioremap_fixed_init(void); | 374 | extern void ioremap_fixed_init(void); |
377 | #else | 375 | #else |
378 | static inline void __iomem * | 376 | static inline void __iomem * |
379 | ioremap_fixed(resource_size_t phys_addr, unsigned long offset, | 377 | ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) |
380 | unsigned long size, pgprot_t prot) | ||
381 | { | 378 | { |
382 | BUG(); | 379 | BUG(); |
383 | return NULL; | 380 | return NULL; |
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 15a05b615ba7..19fe84550b49 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h | |||
@@ -55,19 +55,29 @@ typedef struct { | |||
55 | 55 | ||
56 | #ifdef CONFIG_PMB | 56 | #ifdef CONFIG_PMB |
57 | /* arch/sh/mm/pmb.c */ | 57 | /* arch/sh/mm/pmb.c */ |
58 | long pmb_remap(unsigned long virt, unsigned long phys, | ||
59 | unsigned long size, pgprot_t prot); | ||
60 | void pmb_unmap(unsigned long addr); | ||
61 | void pmb_init(void); | ||
62 | bool __in_29bit_mode(void); | 58 | bool __in_29bit_mode(void); |
59 | |||
60 | void pmb_init(void); | ||
61 | int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, | ||
62 | unsigned long size, pgprot_t prot); | ||
63 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, | ||
64 | pgprot_t prot, void *caller); | ||
65 | int pmb_unmap(void __iomem *addr); | ||
66 | |||
63 | #else | 67 | #else |
64 | static inline long pmb_remap(unsigned long virt, unsigned long phys, | 68 | |
65 | unsigned long size, pgprot_t prot) | 69 | static inline void __iomem * |
70 | pmb_remap_caller(phys_addr_t phys, unsigned long size, | ||
71 | pgprot_t prot, void *caller) | ||
72 | { | ||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | static inline int pmb_unmap(void __iomem *addr) | ||
66 | { | 77 | { |
67 | return -EINVAL; | 78 | return -EINVAL; |
68 | } | 79 | } |
69 | 80 | ||
70 | #define pmb_unmap(addr) do { } while (0) | ||
71 | #define pmb_init(addr) do { } while (0) | 81 | #define pmb_init(addr) do { } while (0) |
72 | 82 | ||
73 | #ifdef CONFIG_29BIT | 83 | #ifdef CONFIG_29BIT |
@@ -77,6 +87,13 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys, | |||
77 | #endif | 87 | #endif |
78 | 88 | ||
79 | #endif /* CONFIG_PMB */ | 89 | #endif /* CONFIG_PMB */ |
90 | |||
91 | static inline void __iomem * | ||
92 | pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot) | ||
93 | { | ||
94 | return pmb_remap_caller(phys, size, prot, __builtin_return_address(0)); | ||
95 | } | ||
96 | |||
80 | #endif /* __ASSEMBLY__ */ | 97 | #endif /* __ASSEMBLY__ */ |
81 | 98 | ||
82 | #endif /* __MMU_H */ | 99 | #endif /* __MMU_H */ |
diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h index 57565a3b551f..f1b1e6944a5f 100644 --- a/arch/sh/include/asm/siu.h +++ b/arch/sh/include/asm/siu.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifndef ASM_SIU_H | 11 | #ifndef ASM_SIU_H |
12 | #define ASM_SIU_H | 12 | #define ASM_SIU_H |
13 | 13 | ||
14 | #include <asm/dma-sh.h> | 14 | #include <asm/dmaengine.h> |
15 | 15 | ||
16 | struct device; | 16 | struct device; |
17 | 17 | ||
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 37cdadd975ac..88e734069fa6 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | #define pcibus_to_node(bus) ((void)(bus), -1) | 36 | #define pcibus_to_node(bus) ((void)(bus), -1) |
37 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | 37 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ |
38 | CPU_MASK_ALL_PTR : \ | 38 | cpu_all_mask : \ |
39 | cpumask_of_node(pcibus_to_node(bus))) | 39 | cpumask_of_node(pcibus_to_node(bus))) |
40 | 40 | ||
41 | #endif | 41 | #endif |
diff --git a/arch/sh/include/cpu-sh3/cpu/dma-register.h b/arch/sh/include/cpu-sh3/cpu/dma-register.h new file mode 100644 index 000000000000..2349e488c9a6 --- /dev/null +++ b/arch/sh/include/cpu-sh3/cpu/dma-register.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * SH3 CPU-specific DMA definitions, used by both DMA drivers | ||
3 | * | ||
4 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef CPU_DMA_REGISTER_H | ||
11 | #define CPU_DMA_REGISTER_H | ||
12 | |||
13 | #define CHCR_TS_LOW_MASK 0x18 | ||
14 | #define CHCR_TS_LOW_SHIFT 3 | ||
15 | #define CHCR_TS_HIGH_MASK 0 | ||
16 | #define CHCR_TS_HIGH_SHIFT 0 | ||
17 | |||
18 | #define DMAOR_INIT DMAOR_DME | ||
19 | |||
20 | /* | ||
21 | * The SuperH DMAC supports a number of transmit sizes, we list them here, | ||
22 | * with their respective values as they appear in the CHCR registers. | ||
23 | */ | ||
24 | enum { | ||
25 | XMIT_SZ_8BIT, | ||
26 | XMIT_SZ_16BIT, | ||
27 | XMIT_SZ_32BIT, | ||
28 | XMIT_SZ_128BIT, | ||
29 | }; | ||
30 | |||
31 | /* log2(size / 8) - used to calculate number of transfers */ | ||
32 | #define TS_SHIFT { \ | ||
33 | [XMIT_SZ_8BIT] = 0, \ | ||
34 | [XMIT_SZ_16BIT] = 1, \ | ||
35 | [XMIT_SZ_32BIT] = 2, \ | ||
36 | [XMIT_SZ_128BIT] = 4, \ | ||
37 | } | ||
38 | |||
39 | #define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) | ||
40 | |||
41 | #endif | ||
diff --git a/arch/sh/include/cpu-sh3/cpu/dma.h b/arch/sh/include/cpu-sh3/cpu/dma.h index 207811a7a650..24e28b91c9d5 100644 --- a/arch/sh/include/cpu-sh3/cpu/dma.h +++ b/arch/sh/include/cpu-sh3/cpu/dma.h | |||
@@ -20,31 +20,4 @@ | |||
20 | #define TS_32 0x00000010 | 20 | #define TS_32 0x00000010 |
21 | #define TS_128 0x00000018 | 21 | #define TS_128 0x00000018 |
22 | 22 | ||
23 | #define CHCR_TS_LOW_MASK 0x18 | ||
24 | #define CHCR_TS_LOW_SHIFT 3 | ||
25 | #define CHCR_TS_HIGH_MASK 0 | ||
26 | #define CHCR_TS_HIGH_SHIFT 0 | ||
27 | |||
28 | #define DMAOR_INIT DMAOR_DME | ||
29 | |||
30 | /* | ||
31 | * The SuperH DMAC supports a number of transmit sizes, we list them here, | ||
32 | * with their respective values as they appear in the CHCR registers. | ||
33 | */ | ||
34 | enum { | ||
35 | XMIT_SZ_8BIT, | ||
36 | XMIT_SZ_16BIT, | ||
37 | XMIT_SZ_32BIT, | ||
38 | XMIT_SZ_128BIT, | ||
39 | }; | ||
40 | |||
41 | #define TS_SHIFT { \ | ||
42 | [XMIT_SZ_8BIT] = 0, \ | ||
43 | [XMIT_SZ_16BIT] = 1, \ | ||
44 | [XMIT_SZ_32BIT] = 2, \ | ||
45 | [XMIT_SZ_128BIT] = 4, \ | ||
46 | } | ||
47 | |||
48 | #define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) | ||
49 | |||
50 | #endif /* __ASM_CPU_SH3_DMA_H */ | 23 | #endif /* __ASM_CPU_SH3_DMA_H */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h new file mode 100644 index 000000000000..55f9fec082d4 --- /dev/null +++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * SH4 CPU-specific DMA definitions, used by both DMA drivers | ||
3 | * | ||
4 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef CPU_DMA_REGISTER_H | ||
11 | #define CPU_DMA_REGISTER_H | ||
12 | |||
13 | /* SH7751/7760/7780 DMA IRQ sources */ | ||
14 | |||
15 | #ifdef CONFIG_CPU_SH4A | ||
16 | |||
17 | #define DMAOR_INIT DMAOR_DME | ||
18 | |||
19 | #if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ | ||
20 | defined(CONFIG_CPU_SUBTYPE_SH7730) | ||
21 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
22 | #define CHCR_TS_LOW_SHIFT 3 | ||
23 | #define CHCR_TS_HIGH_MASK 0 | ||
24 | #define CHCR_TS_HIGH_SHIFT 0 | ||
25 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \ | ||
26 | defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
27 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
28 | #define CHCR_TS_LOW_SHIFT 3 | ||
29 | #define CHCR_TS_HIGH_MASK 0x00300000 | ||
30 | #define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */ | ||
31 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | ||
32 | defined(CONFIG_CPU_SUBTYPE_SH7764) | ||
33 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
34 | #define CHCR_TS_LOW_SHIFT 3 | ||
35 | #define CHCR_TS_HIGH_MASK 0 | ||
36 | #define CHCR_TS_HIGH_SHIFT 0 | ||
37 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) | ||
38 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
39 | #define CHCR_TS_LOW_SHIFT 3 | ||
40 | #define CHCR_TS_HIGH_MASK 0 | ||
41 | #define CHCR_TS_HIGH_SHIFT 0 | ||
42 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
43 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
44 | #define CHCR_TS_LOW_SHIFT 3 | ||
45 | #define CHCR_TS_HIGH_MASK 0 | ||
46 | #define CHCR_TS_HIGH_SHIFT 0 | ||
47 | #else /* SH7785 */ | ||
48 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
49 | #define CHCR_TS_LOW_SHIFT 3 | ||
50 | #define CHCR_TS_HIGH_MASK 0 | ||
51 | #define CHCR_TS_HIGH_SHIFT 0 | ||
52 | #endif | ||
53 | |||
54 | /* Transmit sizes and respective CHCR register values */ | ||
55 | enum { | ||
56 | XMIT_SZ_8BIT = 0, | ||
57 | XMIT_SZ_16BIT = 1, | ||
58 | XMIT_SZ_32BIT = 2, | ||
59 | XMIT_SZ_64BIT = 7, | ||
60 | XMIT_SZ_128BIT = 3, | ||
61 | XMIT_SZ_256BIT = 4, | ||
62 | XMIT_SZ_128BIT_BLK = 0xb, | ||
63 | XMIT_SZ_256BIT_BLK = 0xc, | ||
64 | }; | ||
65 | |||
66 | /* log2(size / 8) - used to calculate number of transfers */ | ||
67 | #define TS_SHIFT { \ | ||
68 | [XMIT_SZ_8BIT] = 0, \ | ||
69 | [XMIT_SZ_16BIT] = 1, \ | ||
70 | [XMIT_SZ_32BIT] = 2, \ | ||
71 | [XMIT_SZ_64BIT] = 3, \ | ||
72 | [XMIT_SZ_128BIT] = 4, \ | ||
73 | [XMIT_SZ_256BIT] = 5, \ | ||
74 | [XMIT_SZ_128BIT_BLK] = 4, \ | ||
75 | [XMIT_SZ_256BIT_BLK] = 5, \ | ||
76 | } | ||
77 | |||
78 | #define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ | ||
79 | ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) | ||
80 | |||
81 | #else /* CONFIG_CPU_SH4A */ | ||
82 | |||
83 | #define DMAOR_INIT (0x8000 | DMAOR_DME) | ||
84 | |||
85 | #define CHCR_TS_LOW_MASK 0x70 | ||
86 | #define CHCR_TS_LOW_SHIFT 4 | ||
87 | #define CHCR_TS_HIGH_MASK 0 | ||
88 | #define CHCR_TS_HIGH_SHIFT 0 | ||
89 | |||
90 | /* Transmit sizes and respective CHCR register values */ | ||
91 | enum { | ||
92 | XMIT_SZ_8BIT = 1, | ||
93 | XMIT_SZ_16BIT = 2, | ||
94 | XMIT_SZ_32BIT = 3, | ||
95 | XMIT_SZ_64BIT = 0, | ||
96 | XMIT_SZ_256BIT = 4, | ||
97 | }; | ||
98 | |||
99 | /* log2(size / 8) - used to calculate number of transfers */ | ||
100 | #define TS_SHIFT { \ | ||
101 | [XMIT_SZ_8BIT] = 0, \ | ||
102 | [XMIT_SZ_16BIT] = 1, \ | ||
103 | [XMIT_SZ_32BIT] = 2, \ | ||
104 | [XMIT_SZ_64BIT] = 3, \ | ||
105 | [XMIT_SZ_256BIT] = 5, \ | ||
106 | } | ||
107 | |||
108 | #define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) | ||
109 | |||
110 | #endif /* CONFIG_CPU_SH4A */ | ||
111 | |||
112 | #endif | ||
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h index e734ea47d8a0..9647e681fd27 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h +++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h | |||
@@ -8,20 +8,12 @@ | |||
8 | #define DMAE0_IRQ 78 /* DMA Error IRQ*/ | 8 | #define DMAE0_IRQ 78 /* DMA Error IRQ*/ |
9 | #define SH_DMAC_BASE0 0xFE008020 | 9 | #define SH_DMAC_BASE0 0xFE008020 |
10 | #define SH_DMARS_BASE0 0xFE009000 | 10 | #define SH_DMARS_BASE0 0xFE009000 |
11 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
12 | #define CHCR_TS_LOW_SHIFT 3 | ||
13 | #define CHCR_TS_HIGH_MASK 0 | ||
14 | #define CHCR_TS_HIGH_SHIFT 0 | ||
15 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) | 11 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) |
16 | #define DMTE0_IRQ 48 | 12 | #define DMTE0_IRQ 48 |
17 | #define DMTE4_IRQ 76 | 13 | #define DMTE4_IRQ 76 |
18 | #define DMAE0_IRQ 78 /* DMA Error IRQ*/ | 14 | #define DMAE0_IRQ 78 /* DMA Error IRQ*/ |
19 | #define SH_DMAC_BASE0 0xFE008020 | 15 | #define SH_DMAC_BASE0 0xFE008020 |
20 | #define SH_DMARS_BASE0 0xFE009000 | 16 | #define SH_DMARS_BASE0 0xFE009000 |
21 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
22 | #define CHCR_TS_LOW_SHIFT 3 | ||
23 | #define CHCR_TS_HIGH_MASK 0x00300000 | ||
24 | #define CHCR_TS_HIGH_SHIFT 20 | ||
25 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | 17 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ |
26 | defined(CONFIG_CPU_SUBTYPE_SH7764) | 18 | defined(CONFIG_CPU_SUBTYPE_SH7764) |
27 | #define DMTE0_IRQ 34 | 19 | #define DMTE0_IRQ 34 |
@@ -29,10 +21,6 @@ | |||
29 | #define DMAE0_IRQ 38 | 21 | #define DMAE0_IRQ 38 |
30 | #define SH_DMAC_BASE0 0xFF608020 | 22 | #define SH_DMAC_BASE0 0xFF608020 |
31 | #define SH_DMARS_BASE0 0xFF609000 | 23 | #define SH_DMARS_BASE0 0xFF609000 |
32 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
33 | #define CHCR_TS_LOW_SHIFT 3 | ||
34 | #define CHCR_TS_HIGH_MASK 0 | ||
35 | #define CHCR_TS_HIGH_SHIFT 0 | ||
36 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) | 24 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) |
37 | #define DMTE0_IRQ 48 /* DMAC0A*/ | 25 | #define DMTE0_IRQ 48 /* DMAC0A*/ |
38 | #define DMTE4_IRQ 76 /* DMAC0B */ | 26 | #define DMTE4_IRQ 76 /* DMAC0B */ |
@@ -46,10 +34,6 @@ | |||
46 | #define SH_DMAC_BASE0 0xFE008020 | 34 | #define SH_DMAC_BASE0 0xFE008020 |
47 | #define SH_DMAC_BASE1 0xFDC08020 | 35 | #define SH_DMAC_BASE1 0xFDC08020 |
48 | #define SH_DMARS_BASE0 0xFDC09000 | 36 | #define SH_DMARS_BASE0 0xFDC09000 |
49 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
50 | #define CHCR_TS_LOW_SHIFT 3 | ||
51 | #define CHCR_TS_HIGH_MASK 0 | ||
52 | #define CHCR_TS_HIGH_SHIFT 0 | ||
53 | #elif defined(CONFIG_CPU_SUBTYPE_SH7724) | 37 | #elif defined(CONFIG_CPU_SUBTYPE_SH7724) |
54 | #define DMTE0_IRQ 48 /* DMAC0A*/ | 38 | #define DMTE0_IRQ 48 /* DMAC0A*/ |
55 | #define DMTE4_IRQ 76 /* DMAC0B */ | 39 | #define DMTE4_IRQ 76 /* DMAC0B */ |
@@ -64,10 +48,6 @@ | |||
64 | #define SH_DMAC_BASE1 0xFDC08020 | 48 | #define SH_DMAC_BASE1 0xFDC08020 |
65 | #define SH_DMARS_BASE0 0xFE009000 | 49 | #define SH_DMARS_BASE0 0xFE009000 |
66 | #define SH_DMARS_BASE1 0xFDC09000 | 50 | #define SH_DMARS_BASE1 0xFDC09000 |
67 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
68 | #define CHCR_TS_LOW_SHIFT 3 | ||
69 | #define CHCR_TS_HIGH_MASK 0x00600000 | ||
70 | #define CHCR_TS_HIGH_SHIFT 21 | ||
71 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) | 51 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) |
72 | #define DMTE0_IRQ 34 | 52 | #define DMTE0_IRQ 34 |
73 | #define DMTE4_IRQ 44 | 53 | #define DMTE4_IRQ 44 |
@@ -80,10 +60,6 @@ | |||
80 | #define SH_DMAC_BASE0 0xFC808020 | 60 | #define SH_DMAC_BASE0 0xFC808020 |
81 | #define SH_DMAC_BASE1 0xFC818020 | 61 | #define SH_DMAC_BASE1 0xFC818020 |
82 | #define SH_DMARS_BASE0 0xFC809000 | 62 | #define SH_DMARS_BASE0 0xFC809000 |
83 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
84 | #define CHCR_TS_LOW_SHIFT 3 | ||
85 | #define CHCR_TS_HIGH_MASK 0 | ||
86 | #define CHCR_TS_HIGH_SHIFT 0 | ||
87 | #else /* SH7785 */ | 63 | #else /* SH7785 */ |
88 | #define DMTE0_IRQ 33 | 64 | #define DMTE0_IRQ 33 |
89 | #define DMTE4_IRQ 37 | 65 | #define DMTE4_IRQ 37 |
@@ -97,10 +73,6 @@ | |||
97 | #define SH_DMAC_BASE0 0xFC808020 | 73 | #define SH_DMAC_BASE0 0xFC808020 |
98 | #define SH_DMAC_BASE1 0xFCC08020 | 74 | #define SH_DMAC_BASE1 0xFCC08020 |
99 | #define SH_DMARS_BASE0 0xFC809000 | 75 | #define SH_DMARS_BASE0 0xFC809000 |
100 | #define CHCR_TS_LOW_MASK 0x00000018 | ||
101 | #define CHCR_TS_LOW_SHIFT 3 | ||
102 | #define CHCR_TS_HIGH_MASK 0 | ||
103 | #define CHCR_TS_HIGH_SHIFT 0 | ||
104 | #endif | 76 | #endif |
105 | 77 | ||
106 | #define REQ_HE 0x000000C0 | 78 | #define REQ_HE 0x000000C0 |
@@ -108,38 +80,4 @@ | |||
108 | #define REQ_LE 0x00000040 | 80 | #define REQ_LE 0x00000040 |
109 | #define TM_BURST 0x00000020 | 81 | #define TM_BURST 0x00000020 |
110 | 82 | ||
111 | /* | ||
112 | * The SuperH DMAC supports a number of transmit sizes, we list them here, | ||
113 | * with their respective values as they appear in the CHCR registers. | ||
114 | * | ||
115 | * Defaults to a 64-bit transfer size. | ||
116 | */ | ||
117 | enum { | ||
118 | XMIT_SZ_8BIT = 0, | ||
119 | XMIT_SZ_16BIT = 1, | ||
120 | XMIT_SZ_32BIT = 2, | ||
121 | XMIT_SZ_64BIT = 7, | ||
122 | XMIT_SZ_128BIT = 3, | ||
123 | XMIT_SZ_256BIT = 4, | ||
124 | XMIT_SZ_128BIT_BLK = 0xb, | ||
125 | XMIT_SZ_256BIT_BLK = 0xc, | ||
126 | }; | ||
127 | |||
128 | /* | ||
129 | * The DMA count is defined as the number of bytes to transfer. | ||
130 | */ | ||
131 | #define TS_SHIFT { \ | ||
132 | [XMIT_SZ_8BIT] = 0, \ | ||
133 | [XMIT_SZ_16BIT] = 1, \ | ||
134 | [XMIT_SZ_32BIT] = 2, \ | ||
135 | [XMIT_SZ_64BIT] = 3, \ | ||
136 | [XMIT_SZ_128BIT] = 4, \ | ||
137 | [XMIT_SZ_256BIT] = 5, \ | ||
138 | [XMIT_SZ_128BIT_BLK] = 4, \ | ||
139 | [XMIT_SZ_256BIT_BLK] = 5, \ | ||
140 | } | ||
141 | |||
142 | #define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ | ||
143 | ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) | ||
144 | |||
145 | #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ | 83 | #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/dma.h b/arch/sh/include/cpu-sh4/cpu/dma.h index 114a369705bc..ca747e93c2ed 100644 --- a/arch/sh/include/cpu-sh4/cpu/dma.h +++ b/arch/sh/include/cpu-sh4/cpu/dma.h | |||
@@ -5,9 +5,8 @@ | |||
5 | 5 | ||
6 | #ifdef CONFIG_CPU_SH4A | 6 | #ifdef CONFIG_CPU_SH4A |
7 | 7 | ||
8 | #define DMAOR_INIT (DMAOR_DME) | ||
9 | |||
10 | #include <cpu/dma-sh4a.h> | 8 | #include <cpu/dma-sh4a.h> |
9 | |||
11 | #else /* CONFIG_CPU_SH4A */ | 10 | #else /* CONFIG_CPU_SH4A */ |
12 | /* | 11 | /* |
13 | * SH7750/SH7751/SH7760 | 12 | * SH7750/SH7751/SH7760 |
@@ -17,7 +16,6 @@ | |||
17 | #define DMTE6_IRQ 46 | 16 | #define DMTE6_IRQ 46 |
18 | #define DMAE0_IRQ 38 | 17 | #define DMAE0_IRQ 38 |
19 | 18 | ||
20 | #define DMAOR_INIT (0x8000|DMAOR_DME) | ||
21 | #define SH_DMAC_BASE0 0xffa00000 | 19 | #define SH_DMAC_BASE0 0xffa00000 |
22 | #define SH_DMAC_BASE1 0xffa00070 | 20 | #define SH_DMAC_BASE1 0xffa00070 |
23 | /* Definitions for the SuperH DMAC */ | 21 | /* Definitions for the SuperH DMAC */ |
@@ -27,40 +25,8 @@ | |||
27 | #define TS_32 0x00000030 | 25 | #define TS_32 0x00000030 |
28 | #define TS_64 0x00000000 | 26 | #define TS_64 0x00000000 |
29 | 27 | ||
30 | #define CHCR_TS_LOW_MASK 0x70 | ||
31 | #define CHCR_TS_LOW_SHIFT 4 | ||
32 | #define CHCR_TS_HIGH_MASK 0 | ||
33 | #define CHCR_TS_HIGH_SHIFT 0 | ||
34 | |||
35 | #define DMAOR_COD 0x00000008 | 28 | #define DMAOR_COD 0x00000008 |
36 | 29 | ||
37 | /* | ||
38 | * The SuperH DMAC supports a number of transmit sizes, we list them here, | ||
39 | * with their respective values as they appear in the CHCR registers. | ||
40 | * | ||
41 | * Defaults to a 64-bit transfer size. | ||
42 | */ | ||
43 | enum { | ||
44 | XMIT_SZ_8BIT = 1, | ||
45 | XMIT_SZ_16BIT = 2, | ||
46 | XMIT_SZ_32BIT = 3, | ||
47 | XMIT_SZ_64BIT = 0, | ||
48 | XMIT_SZ_256BIT = 4, | ||
49 | }; | ||
50 | |||
51 | /* | ||
52 | * The DMA count is defined as the number of bytes to transfer. | ||
53 | */ | ||
54 | #define TS_SHIFT { \ | ||
55 | [XMIT_SZ_8BIT] = 0, \ | ||
56 | [XMIT_SZ_16BIT] = 1, \ | ||
57 | [XMIT_SZ_32BIT] = 2, \ | ||
58 | [XMIT_SZ_64BIT] = 3, \ | ||
59 | [XMIT_SZ_256BIT] = 5, \ | ||
60 | } | ||
61 | |||
62 | #define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) | ||
63 | |||
64 | #endif | 30 | #endif |
65 | 31 | ||
66 | #endif /* __ASM_CPU_SH4_DMA_H */ | 32 | #endif /* __ASM_CPU_SH4_DMA_H */ |
diff --git a/arch/sh/include/mach-migor/mach/migor.h b/arch/sh/include/mach-migor/mach/migor.h index cee6cb88e020..42fccf93412e 100644 --- a/arch/sh/include/mach-migor/mach/migor.h +++ b/arch/sh/include/mach-migor/mach/migor.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __ASM_SH_MIGOR_H | 1 | #ifndef __ASM_SH_MIGOR_H |
2 | #define __ASM_SH_MIGOR_H | 2 | #define __ASM_SH_MIGOR_H |
3 | 3 | ||
4 | #define PORT_MSELCRA 0xa4050180 | ||
4 | #define PORT_MSELCRB 0xa4050182 | 5 | #define PORT_MSELCRB 0xa4050182 |
5 | #define BSC_CS4BCR 0xfec10010 | 6 | #define BSC_CS4BCR 0xfec10010 |
6 | #define BSC_CS6ABCR 0xfec1001c | 7 | #define BSC_CS6ABCR 0xfec1001c |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index ef3f97827808..fd7e3639e845 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c | |||
@@ -7,19 +7,167 @@ | |||
7 | * License. See the file "COPYING" in the main directory of this archive | 7 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 8 | * for more details. |
9 | */ | 9 | */ |
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/mm.h> | ||
12 | #include <linux/platform_device.h> | ||
12 | #include <linux/serial.h> | 13 | #include <linux/serial.h> |
13 | #include <linux/serial_sci.h> | 14 | #include <linux/serial_sci.h> |
14 | #include <linux/mm.h> | 15 | #include <linux/sh_timer.h> |
15 | #include <linux/uio_driver.h> | 16 | #include <linux/uio_driver.h> |
16 | #include <linux/usb/m66592.h> | 17 | #include <linux/usb/m66592.h> |
17 | #include <linux/sh_timer.h> | 18 | |
18 | #include <asm/clock.h> | 19 | #include <asm/clock.h> |
20 | #include <asm/dmaengine.h> | ||
19 | #include <asm/mmzone.h> | 21 | #include <asm/mmzone.h> |
20 | #include <asm/dma-sh.h> | 22 | #include <asm/siu.h> |
23 | |||
24 | #include <cpu/dma-register.h> | ||
21 | #include <cpu/sh7722.h> | 25 | #include <cpu/sh7722.h> |
22 | 26 | ||
27 | static struct sh_dmae_slave_config sh7722_dmae_slaves[] = { | ||
28 | { | ||
29 | .slave_id = SHDMA_SLAVE_SCIF0_TX, | ||
30 | .addr = 0xffe0000c, | ||
31 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | ||
32 | .mid_rid = 0x21, | ||
33 | }, { | ||
34 | .slave_id = SHDMA_SLAVE_SCIF0_RX, | ||
35 | .addr = 0xffe00014, | ||
36 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | ||
37 | .mid_rid = 0x22, | ||
38 | }, { | ||
39 | .slave_id = SHDMA_SLAVE_SCIF1_TX, | ||
40 | .addr = 0xffe1000c, | ||
41 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | ||
42 | .mid_rid = 0x25, | ||
43 | }, { | ||
44 | .slave_id = SHDMA_SLAVE_SCIF1_RX, | ||
45 | .addr = 0xffe10014, | ||
46 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | ||
47 | .mid_rid = 0x26, | ||
48 | }, { | ||
49 | .slave_id = SHDMA_SLAVE_SCIF2_TX, | ||
50 | .addr = 0xffe2000c, | ||
51 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | ||
52 | .mid_rid = 0x29, | ||
53 | }, { | ||
54 | .slave_id = SHDMA_SLAVE_SCIF2_RX, | ||
55 | .addr = 0xffe20014, | ||
56 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | ||
57 | .mid_rid = 0x2a, | ||
58 | }, { | ||
59 | .slave_id = SHDMA_SLAVE_SIUA_TX, | ||
60 | .addr = 0xa454c098, | ||
61 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | ||
62 | .mid_rid = 0xb1, | ||
63 | }, { | ||
64 | .slave_id = SHDMA_SLAVE_SIUA_RX, | ||
65 | .addr = 0xa454c090, | ||
66 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | ||
67 | .mid_rid = 0xb2, | ||
68 | }, { | ||
69 | .slave_id = SHDMA_SLAVE_SIUB_TX, | ||
70 | .addr = 0xa454c09c, | ||
71 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | ||
72 | .mid_rid = 0xb5, | ||
73 | }, { | ||
74 | .slave_id = SHDMA_SLAVE_SIUB_RX, | ||
75 | .addr = 0xa454c094, | ||
76 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | ||
77 | .mid_rid = 0xb6, | ||
78 | }, | ||
79 | }; | ||
80 | |||
81 | static struct sh_dmae_channel sh7722_dmae_channels[] = { | ||
82 | { | ||
83 | .offset = 0, | ||
84 | .dmars = 0, | ||
85 | .dmars_bit = 0, | ||
86 | }, { | ||
87 | .offset = 0x10, | ||
88 | .dmars = 0, | ||
89 | .dmars_bit = 8, | ||
90 | }, { | ||
91 | .offset = 0x20, | ||
92 | .dmars = 4, | ||
93 | .dmars_bit = 0, | ||
94 | }, { | ||
95 | .offset = 0x30, | ||
96 | .dmars = 4, | ||
97 | .dmars_bit = 8, | ||
98 | }, { | ||
99 | .offset = 0x50, | ||
100 | .dmars = 8, | ||
101 | .dmars_bit = 0, | ||
102 | }, { | ||
103 | .offset = 0x60, | ||
104 | .dmars = 8, | ||
105 | .dmars_bit = 8, | ||
106 | } | ||
107 | }; | ||
108 | |||
109 | static unsigned int ts_shift[] = TS_SHIFT; | ||
110 | |||
111 | static struct sh_dmae_pdata dma_platform_data = { | ||
112 | .slave = sh7722_dmae_slaves, | ||
113 | .slave_num = ARRAY_SIZE(sh7722_dmae_slaves), | ||
114 | .channel = sh7722_dmae_channels, | ||
115 | .channel_num = ARRAY_SIZE(sh7722_dmae_channels), | ||
116 | .ts_low_shift = CHCR_TS_LOW_SHIFT, | ||
117 | .ts_low_mask = CHCR_TS_LOW_MASK, | ||
118 | .ts_high_shift = CHCR_TS_HIGH_SHIFT, | ||
119 | .ts_high_mask = CHCR_TS_HIGH_MASK, | ||
120 | .ts_shift = ts_shift, | ||
121 | .ts_shift_num = ARRAY_SIZE(ts_shift), | ||
122 | .dmaor_init = DMAOR_INIT, | ||
123 | }; | ||
124 | |||
125 | static struct resource sh7722_dmae_resources[] = { | ||
126 | [0] = { | ||
127 | /* Channel registers and DMAOR */ | ||
128 | .start = 0xfe008020, | ||
129 | .end = 0xfe00808f, | ||
130 | .flags = IORESOURCE_MEM, | ||
131 | }, | ||
132 | [1] = { | ||
133 | /* DMARSx */ | ||
134 | .start = 0xfe009000, | ||
135 | .end = 0xfe00900b, | ||
136 | .flags = IORESOURCE_MEM, | ||
137 | }, | ||
138 | { | ||
139 | /* DMA error IRQ */ | ||
140 | .start = 78, | ||
141 | .end = 78, | ||
142 | .flags = IORESOURCE_IRQ, | ||
143 | }, | ||
144 | { | ||
145 | /* IRQ for channels 0-3 */ | ||
146 | .start = 48, | ||
147 | .end = 51, | ||
148 | .flags = IORESOURCE_IRQ, | ||
149 | }, | ||
150 | { | ||
151 | /* IRQ for channels 4-5 */ | ||
152 | .start = 76, | ||
153 | .end = 77, | ||
154 | .flags = IORESOURCE_IRQ, | ||
155 | }, | ||
156 | }; | ||
157 | |||
158 | struct platform_device dma_device = { | ||
159 | .name = "sh-dma-engine", | ||
160 | .id = -1, | ||
161 | .resource = sh7722_dmae_resources, | ||
162 | .num_resources = ARRAY_SIZE(sh7722_dmae_resources), | ||
163 | .dev = { | ||
164 | .platform_data = &dma_platform_data, | ||
165 | }, | ||
166 | .archdata = { | ||
167 | .hwblk_id = HWBLK_DMAC, | ||
168 | }, | ||
169 | }; | ||
170 | |||
23 | /* Serial */ | 171 | /* Serial */ |
24 | static struct plat_sci_port scif0_platform_data = { | 172 | static struct plat_sci_port scif0_platform_data = { |
25 | .mapbase = 0xffe00000, | 173 | .mapbase = 0xffe00000, |
@@ -388,15 +536,36 @@ static struct platform_device tmu2_device = { | |||
388 | }, | 536 | }, |
389 | }; | 537 | }; |
390 | 538 | ||
391 | static struct sh_dmae_pdata dma_platform_data = { | 539 | static struct siu_platform siu_platform_data = { |
392 | .mode = 0, | 540 | .dma_dev = &dma_device.dev, |
541 | .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX, | ||
542 | .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX, | ||
543 | .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX, | ||
544 | .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX, | ||
393 | }; | 545 | }; |
394 | 546 | ||
395 | static struct platform_device dma_device = { | 547 | static struct resource siu_resources[] = { |
396 | .name = "sh-dma-engine", | 548 | [0] = { |
549 | .start = 0xa4540000, | ||
550 | .end = 0xa454c10f, | ||
551 | .flags = IORESOURCE_MEM, | ||
552 | }, | ||
553 | [1] = { | ||
554 | .start = 108, | ||
555 | .flags = IORESOURCE_IRQ, | ||
556 | }, | ||
557 | }; | ||
558 | |||
559 | static struct platform_device siu_device = { | ||
560 | .name = "sh_siu", | ||
397 | .id = -1, | 561 | .id = -1, |
398 | .dev = { | 562 | .dev = { |
399 | .platform_data = &dma_platform_data, | 563 | .platform_data = &siu_platform_data, |
564 | }, | ||
565 | .resource = siu_resources, | ||
566 | .num_resources = ARRAY_SIZE(siu_resources), | ||
567 | .archdata = { | ||
568 | .hwblk_id = HWBLK_SIU, | ||
400 | }, | 569 | }, |
401 | }; | 570 | }; |
402 | 571 | ||
@@ -414,6 +583,7 @@ static struct platform_device *sh7722_devices[] __initdata = { | |||
414 | &vpu_device, | 583 | &vpu_device, |
415 | &veu_device, | 584 | &veu_device, |
416 | &jpu_device, | 585 | &jpu_device, |
586 | &siu_device, | ||
417 | &dma_device, | 587 | &dma_device, |
418 | }; | 588 | }; |
419 | 589 | ||
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index 31e3451f7e3d..e7fa2a92fc1f 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c | |||
@@ -21,22 +21,189 @@ | |||
21 | #include <linux/sh_timer.h> | 21 | #include <linux/sh_timer.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/notifier.h> | 23 | #include <linux/notifier.h> |
24 | |||
24 | #include <asm/suspend.h> | 25 | #include <asm/suspend.h> |
25 | #include <asm/clock.h> | 26 | #include <asm/clock.h> |
26 | #include <asm/dma-sh.h> | 27 | #include <asm/dmaengine.h> |
27 | #include <asm/mmzone.h> | 28 | #include <asm/mmzone.h> |
29 | |||
30 | #include <cpu/dma-register.h> | ||
28 | #include <cpu/sh7724.h> | 31 | #include <cpu/sh7724.h> |
29 | 32 | ||
30 | /* DMA */ | 33 | /* DMA */ |
31 | static struct sh_dmae_pdata dma_platform_data = { | 34 | static struct sh_dmae_channel sh7724_dmae0_channels[] = { |
32 | .mode = SHDMA_DMAOR1, | 35 | { |
36 | .offset = 0, | ||
37 | .dmars = 0, | ||
38 | .dmars_bit = 0, | ||
39 | }, { | ||
40 | .offset = 0x10, | ||
41 | .dmars = 0, | ||
42 | .dmars_bit = 8, | ||
43 | }, { | ||
44 | .offset = 0x20, | ||
45 | .dmars = 4, | ||
46 | .dmars_bit = 0, | ||
47 | }, { | ||
48 | .offset = 0x30, | ||
49 | .dmars = 4, | ||
50 | .dmars_bit = 8, | ||
51 | }, { | ||
52 | .offset = 0x50, | ||
53 | .dmars = 8, | ||
54 | .dmars_bit = 0, | ||
55 | }, { | ||
56 | .offset = 0x60, | ||
57 | .dmars = 8, | ||
58 | .dmars_bit = 8, | ||
59 | } | ||
60 | }; | ||
61 | |||
62 | static struct sh_dmae_channel sh7724_dmae1_channels[] = { | ||
63 | { | ||
64 | .offset = 0, | ||
65 | .dmars = 0, | ||
66 | .dmars_bit = 0, | ||
67 | }, { | ||
68 | .offset = 0x10, | ||
69 | .dmars = 0, | ||
70 | .dmars_bit = 8, | ||
71 | }, { | ||
72 | .offset = 0x20, | ||
73 | .dmars = 4, | ||
74 | .dmars_bit = 0, | ||
75 | }, { | ||
76 | .offset = 0x30, | ||
77 | .dmars = 4, | ||
78 | .dmars_bit = 8, | ||
79 | }, { | ||
80 | .offset = 0x50, | ||
81 | .dmars = 8, | ||
82 | .dmars_bit = 0, | ||
83 | }, { | ||
84 | .offset = 0x60, | ||
85 | .dmars = 8, | ||
86 | .dmars_bit = 8, | ||
87 | } | ||
88 | }; | ||
89 | |||
90 | static unsigned int ts_shift[] = TS_SHIFT; | ||
91 | |||
92 | static struct sh_dmae_pdata dma0_platform_data = { | ||
93 | .channel = sh7724_dmae0_channels, | ||
94 | .channel_num = ARRAY_SIZE(sh7724_dmae0_channels), | ||
95 | .ts_low_shift = CHCR_TS_LOW_SHIFT, | ||
96 | .ts_low_mask = CHCR_TS_LOW_MASK, | ||
97 | .ts_high_shift = CHCR_TS_HIGH_SHIFT, | ||
98 | .ts_high_mask = CHCR_TS_HIGH_MASK, | ||
99 | .ts_shift = ts_shift, | ||
100 | .ts_shift_num = ARRAY_SIZE(ts_shift), | ||
101 | .dmaor_init = DMAOR_INIT, | ||
102 | }; | ||
103 | |||
104 | static struct sh_dmae_pdata dma1_platform_data = { | ||
105 | .channel = sh7724_dmae1_channels, | ||
106 | .channel_num = ARRAY_SIZE(sh7724_dmae1_channels), | ||
107 | .ts_low_shift = CHCR_TS_LOW_SHIFT, | ||
108 | .ts_low_mask = CHCR_TS_LOW_MASK, | ||
109 | .ts_high_shift = CHCR_TS_HIGH_SHIFT, | ||
110 | .ts_high_mask = CHCR_TS_HIGH_MASK, | ||
111 | .ts_shift = ts_shift, | ||
112 | .ts_shift_num = ARRAY_SIZE(ts_shift), | ||
113 | .dmaor_init = DMAOR_INIT, | ||
114 | }; | ||
115 | |||
116 | /* Resource order important! */ | ||
117 | static struct resource sh7724_dmae0_resources[] = { | ||
118 | { | ||
119 | /* Channel registers and DMAOR */ | ||
120 | .start = 0xfe008020, | ||
121 | .end = 0xfe00808f, | ||
122 | .flags = IORESOURCE_MEM, | ||
123 | }, | ||
124 | { | ||
125 | /* DMARSx */ | ||
126 | .start = 0xfe009000, | ||
127 | .end = 0xfe00900b, | ||
128 | .flags = IORESOURCE_MEM, | ||
129 | }, | ||
130 | { | ||
131 | /* DMA error IRQ */ | ||
132 | .start = 78, | ||
133 | .end = 78, | ||
134 | .flags = IORESOURCE_IRQ, | ||
135 | }, | ||
136 | { | ||
137 | /* IRQ for channels 0-3 */ | ||
138 | .start = 48, | ||
139 | .end = 51, | ||
140 | .flags = IORESOURCE_IRQ, | ||
141 | }, | ||
142 | { | ||
143 | /* IRQ for channels 4-5 */ | ||
144 | .start = 76, | ||
145 | .end = 77, | ||
146 | .flags = IORESOURCE_IRQ, | ||
147 | }, | ||
33 | }; | 148 | }; |
34 | 149 | ||
35 | static struct platform_device dma_device = { | 150 | /* Resource order important! */ |
36 | .name = "sh-dma-engine", | 151 | static struct resource sh7724_dmae1_resources[] = { |
37 | .id = -1, | 152 | { |
38 | .dev = { | 153 | /* Channel registers and DMAOR */ |
39 | .platform_data = &dma_platform_data, | 154 | .start = 0xfdc08020, |
155 | .end = 0xfdc0808f, | ||
156 | .flags = IORESOURCE_MEM, | ||
157 | }, | ||
158 | { | ||
159 | /* DMARSx */ | ||
160 | .start = 0xfdc09000, | ||
161 | .end = 0xfdc0900b, | ||
162 | .flags = IORESOURCE_MEM, | ||
163 | }, | ||
164 | { | ||
165 | /* DMA error IRQ */ | ||
166 | .start = 74, | ||
167 | .end = 74, | ||
168 | .flags = IORESOURCE_IRQ, | ||
169 | }, | ||
170 | { | ||
171 | /* IRQ for channels 0-3 */ | ||
172 | .start = 40, | ||
173 | .end = 43, | ||
174 | .flags = IORESOURCE_IRQ, | ||
175 | }, | ||
176 | { | ||
177 | /* IRQ for channels 4-5 */ | ||
178 | .start = 72, | ||
179 | .end = 73, | ||
180 | .flags = IORESOURCE_IRQ, | ||
181 | }, | ||
182 | }; | ||
183 | |||
184 | static struct platform_device dma0_device = { | ||
185 | .name = "sh-dma-engine", | ||
186 | .id = 0, | ||
187 | .resource = sh7724_dmae0_resources, | ||
188 | .num_resources = ARRAY_SIZE(sh7724_dmae0_resources), | ||
189 | .dev = { | ||
190 | .platform_data = &dma0_platform_data, | ||
191 | }, | ||
192 | .archdata = { | ||
193 | .hwblk_id = HWBLK_DMAC0, | ||
194 | }, | ||
195 | }; | ||
196 | |||
197 | static struct platform_device dma1_device = { | ||
198 | .name = "sh-dma-engine", | ||
199 | .id = 1, | ||
200 | .resource = sh7724_dmae1_resources, | ||
201 | .num_resources = ARRAY_SIZE(sh7724_dmae1_resources), | ||
202 | .dev = { | ||
203 | .platform_data = &dma1_platform_data, | ||
204 | }, | ||
205 | .archdata = { | ||
206 | .hwblk_id = HWBLK_DMAC1, | ||
40 | }, | 207 | }, |
41 | }; | 208 | }; |
42 | 209 | ||
@@ -663,7 +830,8 @@ static struct platform_device *sh7724_devices[] __initdata = { | |||
663 | &tmu3_device, | 830 | &tmu3_device, |
664 | &tmu4_device, | 831 | &tmu4_device, |
665 | &tmu5_device, | 832 | &tmu5_device, |
666 | &dma_device, | 833 | &dma0_device, |
834 | &dma1_device, | ||
667 | &rtc_device, | 835 | &rtc_device, |
668 | &iic0_device, | 836 | &iic0_device, |
669 | &iic1_device, | 837 | &iic1_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index f8f21618d785..02e792c90de6 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c | |||
@@ -13,7 +13,10 @@ | |||
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | #include <linux/serial_sci.h> | 14 | #include <linux/serial_sci.h> |
15 | #include <linux/sh_timer.h> | 15 | #include <linux/sh_timer.h> |
16 | #include <asm/dma-sh.h> | 16 | |
17 | #include <asm/dmaengine.h> | ||
18 | |||
19 | #include <cpu/dma-register.h> | ||
17 | 20 | ||
18 | static struct plat_sci_port scif0_platform_data = { | 21 | static struct plat_sci_port scif0_platform_data = { |
19 | .mapbase = 0xffe00000, | 22 | .mapbase = 0xffe00000, |
@@ -247,15 +250,131 @@ static struct platform_device rtc_device = { | |||
247 | .resource = rtc_resources, | 250 | .resource = rtc_resources, |
248 | }; | 251 | }; |
249 | 252 | ||
250 | static struct sh_dmae_pdata dma_platform_data = { | 253 | /* DMA */ |
251 | .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), | 254 | static struct sh_dmae_channel sh7780_dmae0_channels[] = { |
255 | { | ||
256 | .offset = 0, | ||
257 | .dmars = 0, | ||
258 | .dmars_bit = 0, | ||
259 | }, { | ||
260 | .offset = 0x10, | ||
261 | .dmars = 0, | ||
262 | .dmars_bit = 8, | ||
263 | }, { | ||
264 | .offset = 0x20, | ||
265 | .dmars = 4, | ||
266 | .dmars_bit = 0, | ||
267 | }, { | ||
268 | .offset = 0x30, | ||
269 | .dmars = 4, | ||
270 | .dmars_bit = 8, | ||
271 | }, { | ||
272 | .offset = 0x50, | ||
273 | .dmars = 8, | ||
274 | .dmars_bit = 0, | ||
275 | }, { | ||
276 | .offset = 0x60, | ||
277 | .dmars = 8, | ||
278 | .dmars_bit = 8, | ||
279 | } | ||
280 | }; | ||
281 | |||
282 | static struct sh_dmae_channel sh7780_dmae1_channels[] = { | ||
283 | { | ||
284 | .offset = 0, | ||
285 | }, { | ||
286 | .offset = 0x10, | ||
287 | }, { | ||
288 | .offset = 0x20, | ||
289 | }, { | ||
290 | .offset = 0x30, | ||
291 | }, { | ||
292 | .offset = 0x50, | ||
293 | }, { | ||
294 | .offset = 0x60, | ||
295 | } | ||
296 | }; | ||
297 | |||
298 | static unsigned int ts_shift[] = TS_SHIFT; | ||
299 | |||
300 | static struct sh_dmae_pdata dma0_platform_data = { | ||
301 | .channel = sh7780_dmae0_channels, | ||
302 | .channel_num = ARRAY_SIZE(sh7780_dmae0_channels), | ||
303 | .ts_low_shift = CHCR_TS_LOW_SHIFT, | ||
304 | .ts_low_mask = CHCR_TS_LOW_MASK, | ||
305 | .ts_high_shift = CHCR_TS_HIGH_SHIFT, | ||
306 | .ts_high_mask = CHCR_TS_HIGH_MASK, | ||
307 | .ts_shift = ts_shift, | ||
308 | .ts_shift_num = ARRAY_SIZE(ts_shift), | ||
309 | .dmaor_init = DMAOR_INIT, | ||
310 | }; | ||
311 | |||
312 | static struct sh_dmae_pdata dma1_platform_data = { | ||
313 | .channel = sh7780_dmae1_channels, | ||
314 | .channel_num = ARRAY_SIZE(sh7780_dmae1_channels), | ||
315 | .ts_low_shift = CHCR_TS_LOW_SHIFT, | ||
316 | .ts_low_mask = CHCR_TS_LOW_MASK, | ||
317 | .ts_high_shift = CHCR_TS_HIGH_SHIFT, | ||
318 | .ts_high_mask = CHCR_TS_HIGH_MASK, | ||
319 | .ts_shift = ts_shift, | ||
320 | .ts_shift_num = ARRAY_SIZE(ts_shift), | ||
321 | .dmaor_init = DMAOR_INIT, | ||
252 | }; | 322 | }; |
253 | 323 | ||
254 | static struct platform_device dma_device = { | 324 | static struct resource sh7780_dmae0_resources[] = { |
325 | [0] = { | ||
326 | /* Channel registers and DMAOR */ | ||
327 | .start = 0xfc808020, | ||
328 | .end = 0xfc80808f, | ||
329 | .flags = IORESOURCE_MEM, | ||
330 | }, | ||
331 | [1] = { | ||
332 | /* DMARSx */ | ||
333 | .start = 0xfc809000, | ||
334 | .end = 0xfc80900b, | ||
335 | .flags = IORESOURCE_MEM, | ||
336 | }, | ||
337 | { | ||
338 | /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */ | ||
339 | .start = 34, | ||
340 | .end = 34, | ||
341 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, | ||
342 | }, | ||
343 | }; | ||
344 | |||
345 | static struct resource sh7780_dmae1_resources[] = { | ||
346 | [0] = { | ||
347 | /* Channel registers and DMAOR */ | ||
348 | .start = 0xfc818020, | ||
349 | .end = 0xfc81808f, | ||
350 | .flags = IORESOURCE_MEM, | ||
351 | }, | ||
352 | /* DMAC1 has no DMARS */ | ||
353 | { | ||
354 | /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */ | ||
355 | .start = 46, | ||
356 | .end = 46, | ||
357 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, | ||
358 | }, | ||
359 | }; | ||
360 | |||
361 | static struct platform_device dma0_device = { | ||
255 | .name = "sh-dma-engine", | 362 | .name = "sh-dma-engine", |
256 | .id = -1, | 363 | .id = 0, |
364 | .resource = sh7780_dmae0_resources, | ||
365 | .num_resources = ARRAY_SIZE(sh7780_dmae0_resources), | ||
257 | .dev = { | 366 | .dev = { |
258 | .platform_data = &dma_platform_data, | 367 | .platform_data = &dma0_platform_data, |
368 | }, | ||
369 | }; | ||
370 | |||
371 | static struct platform_device dma1_device = { | ||
372 | .name = "sh-dma-engine", | ||
373 | .id = 1, | ||
374 | .resource = sh7780_dmae1_resources, | ||
375 | .num_resources = ARRAY_SIZE(sh7780_dmae1_resources), | ||
376 | .dev = { | ||
377 | .platform_data = &dma1_platform_data, | ||
259 | }, | 378 | }, |
260 | }; | 379 | }; |
261 | 380 | ||
@@ -269,7 +388,8 @@ static struct platform_device *sh7780_devices[] __initdata = { | |||
269 | &tmu4_device, | 388 | &tmu4_device, |
270 | &tmu5_device, | 389 | &tmu5_device, |
271 | &rtc_device, | 390 | &rtc_device, |
272 | &dma_device, | 391 | &dma0_device, |
392 | &dma1_device, | ||
273 | }; | 393 | }; |
274 | 394 | ||
275 | static int __init sh7780_devices_setup(void) | 395 | static int __init sh7780_devices_setup(void) |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index 23448d8c6711..1fcd88b1671e 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c | |||
@@ -14,9 +14,12 @@ | |||
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/sh_timer.h> | 16 | #include <linux/sh_timer.h> |
17 | #include <asm/dma-sh.h> | 17 | |
18 | #include <asm/dmaengine.h> | ||
18 | #include <asm/mmzone.h> | 19 | #include <asm/mmzone.h> |
19 | 20 | ||
21 | #include <cpu/dma-register.h> | ||
22 | |||
20 | static struct plat_sci_port scif0_platform_data = { | 23 | static struct plat_sci_port scif0_platform_data = { |
21 | .mapbase = 0xffea0000, | 24 | .mapbase = 0xffea0000, |
22 | .flags = UPF_BOOT_AUTOCONF, | 25 | .flags = UPF_BOOT_AUTOCONF, |
@@ -295,15 +298,131 @@ static struct platform_device tmu5_device = { | |||
295 | .num_resources = ARRAY_SIZE(tmu5_resources), | 298 | .num_resources = ARRAY_SIZE(tmu5_resources), |
296 | }; | 299 | }; |
297 | 300 | ||
298 | static struct sh_dmae_pdata dma_platform_data = { | 301 | /* DMA */ |
299 | .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), | 302 | static struct sh_dmae_channel sh7785_dmae0_channels[] = { |
303 | { | ||
304 | .offset = 0, | ||
305 | .dmars = 0, | ||
306 | .dmars_bit = 0, | ||
307 | }, { | ||
308 | .offset = 0x10, | ||
309 | .dmars = 0, | ||
310 | .dmars_bit = 8, | ||
311 | }, { | ||
312 | .offset = 0x20, | ||
313 | .dmars = 4, | ||
314 | .dmars_bit = 0, | ||
315 | }, { | ||
316 | .offset = 0x30, | ||
317 | .dmars = 4, | ||
318 | .dmars_bit = 8, | ||
319 | }, { | ||
320 | .offset = 0x50, | ||
321 | .dmars = 8, | ||
322 | .dmars_bit = 0, | ||
323 | }, { | ||
324 | .offset = 0x60, | ||
325 | .dmars = 8, | ||
326 | .dmars_bit = 8, | ||
327 | } | ||
328 | }; | ||
329 | |||
330 | static struct sh_dmae_channel sh7785_dmae1_channels[] = { | ||
331 | { | ||
332 | .offset = 0, | ||
333 | }, { | ||
334 | .offset = 0x10, | ||
335 | }, { | ||
336 | .offset = 0x20, | ||
337 | }, { | ||
338 | .offset = 0x30, | ||
339 | }, { | ||
340 | .offset = 0x50, | ||
341 | }, { | ||
342 | .offset = 0x60, | ||
343 | } | ||
344 | }; | ||
345 | |||
346 | static unsigned int ts_shift[] = TS_SHIFT; | ||
347 | |||
348 | static struct sh_dmae_pdata dma0_platform_data = { | ||
349 | .channel = sh7785_dmae0_channels, | ||
350 | .channel_num = ARRAY_SIZE(sh7785_dmae0_channels), | ||
351 | .ts_low_shift = CHCR_TS_LOW_SHIFT, | ||
352 | .ts_low_mask = CHCR_TS_LOW_MASK, | ||
353 | .ts_high_shift = CHCR_TS_HIGH_SHIFT, | ||
354 | .ts_high_mask = CHCR_TS_HIGH_MASK, | ||
355 | .ts_shift = ts_shift, | ||
356 | .ts_shift_num = ARRAY_SIZE(ts_shift), | ||
357 | .dmaor_init = DMAOR_INIT, | ||
358 | }; | ||
359 | |||
360 | static struct sh_dmae_pdata dma1_platform_data = { | ||
361 | .channel = sh7785_dmae1_channels, | ||
362 | .channel_num = ARRAY_SIZE(sh7785_dmae1_channels), | ||
363 | .ts_low_shift = CHCR_TS_LOW_SHIFT, | ||
364 | .ts_low_mask = CHCR_TS_LOW_MASK, | ||
365 | .ts_high_shift = CHCR_TS_HIGH_SHIFT, | ||
366 | .ts_high_mask = CHCR_TS_HIGH_MASK, | ||
367 | .ts_shift = ts_shift, | ||
368 | .ts_shift_num = ARRAY_SIZE(ts_shift), | ||
369 | .dmaor_init = DMAOR_INIT, | ||
300 | }; | 370 | }; |
301 | 371 | ||
302 | static struct platform_device dma_device = { | 372 | static struct resource sh7785_dmae0_resources[] = { |
373 | [0] = { | ||
374 | /* Channel registers and DMAOR */ | ||
375 | .start = 0xfc808020, | ||
376 | .end = 0xfc80808f, | ||
377 | .flags = IORESOURCE_MEM, | ||
378 | }, | ||
379 | [1] = { | ||
380 | /* DMARSx */ | ||
381 | .start = 0xfc809000, | ||
382 | .end = 0xfc80900b, | ||
383 | .flags = IORESOURCE_MEM, | ||
384 | }, | ||
385 | { | ||
386 | /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */ | ||
387 | .start = 33, | ||
388 | .end = 33, | ||
389 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, | ||
390 | }, | ||
391 | }; | ||
392 | |||
393 | static struct resource sh7785_dmae1_resources[] = { | ||
394 | [0] = { | ||
395 | /* Channel registers and DMAOR */ | ||
396 | .start = 0xfcc08020, | ||
397 | .end = 0xfcc0808f, | ||
398 | .flags = IORESOURCE_MEM, | ||
399 | }, | ||
400 | /* DMAC1 has no DMARS */ | ||
401 | { | ||
402 | /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */ | ||
403 | .start = 52, | ||
404 | .end = 52, | ||
405 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, | ||
406 | }, | ||
407 | }; | ||
408 | |||
409 | static struct platform_device dma0_device = { | ||
303 | .name = "sh-dma-engine", | 410 | .name = "sh-dma-engine", |
304 | .id = -1, | 411 | .id = 0, |
412 | .resource = sh7785_dmae0_resources, | ||
413 | .num_resources = ARRAY_SIZE(sh7785_dmae0_resources), | ||
305 | .dev = { | 414 | .dev = { |
306 | .platform_data = &dma_platform_data, | 415 | .platform_data = &dma0_platform_data, |
416 | }, | ||
417 | }; | ||
418 | |||
419 | static struct platform_device dma1_device = { | ||
420 | .name = "sh-dma-engine", | ||
421 | .id = 1, | ||
422 | .resource = sh7785_dmae1_resources, | ||
423 | .num_resources = ARRAY_SIZE(sh7785_dmae1_resources), | ||
424 | .dev = { | ||
425 | .platform_data = &dma1_platform_data, | ||
307 | }, | 426 | }, |
308 | }; | 427 | }; |
309 | 428 | ||
@@ -320,7 +439,8 @@ static struct platform_device *sh7785_devices[] __initdata = { | |||
320 | &tmu3_device, | 439 | &tmu3_device, |
321 | &tmu4_device, | 440 | &tmu4_device, |
322 | &tmu5_device, | 441 | &tmu5_device, |
323 | &dma_device, | 442 | &dma0_device, |
443 | &dma1_device, | ||
324 | }; | 444 | }; |
325 | 445 | ||
326 | static int __init sh7785_devices_setup(void) | 446 | static int __init sh7785_devices_setup(void) |
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index e2f1753d275c..675eea7785d9 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c | |||
@@ -143,26 +143,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) | |||
143 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | 143 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); |
144 | } | 144 | } |
145 | 145 | ||
146 | /* | ||
147 | * Store a breakpoint's encoded address, length, and type. | ||
148 | */ | ||
149 | static int arch_store_info(struct perf_event *bp) | ||
150 | { | ||
151 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
152 | |||
153 | /* | ||
154 | * User-space requests will always have the address field populated | ||
155 | * For kernel-addresses, either the address or symbol name can be | ||
156 | * specified. | ||
157 | */ | ||
158 | if (info->name) | ||
159 | info->address = (unsigned long)kallsyms_lookup_name(info->name); | ||
160 | if (info->address) | ||
161 | return 0; | ||
162 | |||
163 | return -EINVAL; | ||
164 | } | ||
165 | |||
166 | int arch_bp_generic_fields(int sh_len, int sh_type, | 146 | int arch_bp_generic_fields(int sh_len, int sh_type, |
167 | int *gen_len, int *gen_type) | 147 | int *gen_len, int *gen_type) |
168 | { | 148 | { |
@@ -276,10 +256,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, | |||
276 | return ret; | 256 | return ret; |
277 | } | 257 | } |
278 | 258 | ||
279 | ret = arch_store_info(bp); | 259 | /* |
280 | 260 | * For kernel-addresses, either the address or symbol name can be | |
281 | if (ret < 0) | 261 | * specified. |
282 | return ret; | 262 | */ |
263 | if (info->name) | ||
264 | info->address = (unsigned long)kallsyms_lookup_name(info->name); | ||
283 | 265 | ||
284 | /* | 266 | /* |
285 | * Check that the low-order bits of the address are appropriate | 267 | * Check that the low-order bits of the address are appropriate |
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 3459e70eed72..8870d6ba64bf 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -443,7 +443,7 @@ void __init setup_arch(char **cmdline_p) | |||
443 | 443 | ||
444 | nodes_clear(node_online_map); | 444 | nodes_clear(node_online_map); |
445 | 445 | ||
446 | /* Setup bootmem with available RAM */ | 446 | pmb_init(); |
447 | lmb_init(); | 447 | lmb_init(); |
448 | setup_memory(); | 448 | setup_memory(); |
449 | sparse_init(); | 449 | sparse_init(); |
@@ -452,7 +452,6 @@ void __init setup_arch(char **cmdline_p) | |||
452 | conswitchp = &dummy_con; | 452 | conswitchp = &dummy_con; |
453 | #endif | 453 | #endif |
454 | paging_init(); | 454 | paging_init(); |
455 | pmb_init(); | ||
456 | 455 | ||
457 | ioremap_fixed_init(); | 456 | ioremap_fixed_init(); |
458 | 457 | ||
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 953fa1613312..8a0072de2bcc 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c | |||
@@ -39,12 +39,12 @@ static int null_rtc_set_time(const time_t secs) | |||
39 | void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; | 39 | void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; |
40 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; | 40 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; |
41 | 41 | ||
42 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | ||
43 | void read_persistent_clock(struct timespec *ts) | 42 | void read_persistent_clock(struct timespec *ts) |
44 | { | 43 | { |
45 | rtc_sh_get_time(ts); | 44 | rtc_sh_get_time(ts); |
46 | } | 45 | } |
47 | 46 | ||
47 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | ||
48 | int update_persistent_clock(struct timespec now) | 48 | int update_persistent_clock(struct timespec now) |
49 | { | 49 | { |
50 | return rtc_sh_set_time(now.tv_sec); | 50 | return rtc_sh_set_time(now.tv_sec); |
@@ -113,9 +113,5 @@ void __init time_init(void) | |||
113 | hwblk_init(); | 113 | hwblk_init(); |
114 | clk_init(); | 114 | clk_init(); |
115 | 115 | ||
116 | rtc_sh_get_time(&xtime); | ||
117 | set_normalized_timespec(&wall_to_monotonic, | ||
118 | -xtime.tv_sec, -xtime.tv_nsec); | ||
119 | |||
120 | late_time_init = sh_late_time_init; | 116 | late_time_init = sh_late_time_init; |
121 | } | 117 | } |
diff --git a/arch/sh/lib/libgcc.h b/arch/sh/lib/libgcc.h index 3f19d1c5d942..05909d58e2fe 100644 --- a/arch/sh/lib/libgcc.h +++ b/arch/sh/lib/libgcc.h | |||
@@ -17,8 +17,7 @@ struct DWstruct { | |||
17 | #error I feel sick. | 17 | #error I feel sick. |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | typedef union | 20 | typedef union { |
21 | { | ||
22 | struct DWstruct s; | 21 | struct DWstruct s; |
23 | long long ll; | 22 | long long ll; |
24 | } DWunion; | 23 | } DWunion; |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index c68d2d7d00a9..1ab2385ecefe 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -34,11 +34,12 @@ | |||
34 | * caller shouldn't need to know that small detail. | 34 | * caller shouldn't need to know that small detail. |
35 | */ | 35 | */ |
36 | void __iomem * __init_refok | 36 | void __iomem * __init_refok |
37 | __ioremap_caller(unsigned long phys_addr, unsigned long size, | 37 | __ioremap_caller(phys_addr_t phys_addr, unsigned long size, |
38 | pgprot_t pgprot, void *caller) | 38 | pgprot_t pgprot, void *caller) |
39 | { | 39 | { |
40 | struct vm_struct *area; | 40 | struct vm_struct *area; |
41 | unsigned long offset, last_addr, addr, orig_addr; | 41 | unsigned long offset, last_addr, addr, orig_addr; |
42 | void __iomem *mapped; | ||
42 | 43 | ||
43 | /* Don't allow wraparound or zero size */ | 44 | /* Don't allow wraparound or zero size */ |
44 | last_addr = phys_addr + size - 1; | 45 | last_addr = phys_addr + size - 1; |
@@ -46,6 +47,20 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
46 | return NULL; | 47 | return NULL; |
47 | 48 | ||
48 | /* | 49 | /* |
50 | * If we can't yet use the regular approach, go the fixmap route. | ||
51 | */ | ||
52 | if (!mem_init_done) | ||
53 | return ioremap_fixed(phys_addr, size, pgprot); | ||
54 | |||
55 | /* | ||
56 | * First try to remap through the PMB. | ||
57 | * PMB entries are all pre-faulted. | ||
58 | */ | ||
59 | mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); | ||
60 | if (mapped && !IS_ERR(mapped)) | ||
61 | return mapped; | ||
62 | |||
63 | /* | ||
49 | * Mappings have to be page-aligned | 64 | * Mappings have to be page-aligned |
50 | */ | 65 | */ |
51 | offset = phys_addr & ~PAGE_MASK; | 66 | offset = phys_addr & ~PAGE_MASK; |
@@ -53,12 +68,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
53 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 68 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
54 | 69 | ||
55 | /* | 70 | /* |
56 | * If we can't yet use the regular approach, go the fixmap route. | ||
57 | */ | ||
58 | if (!mem_init_done) | ||
59 | return ioremap_fixed(phys_addr, offset, size, pgprot); | ||
60 | |||
61 | /* | ||
62 | * Ok, go for it.. | 71 | * Ok, go for it.. |
63 | */ | 72 | */ |
64 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 73 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
@@ -67,33 +76,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
67 | area->phys_addr = phys_addr; | 76 | area->phys_addr = phys_addr; |
68 | orig_addr = addr = (unsigned long)area->addr; | 77 | orig_addr = addr = (unsigned long)area->addr; |
69 | 78 | ||
70 | #ifdef CONFIG_PMB | 79 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { |
71 | /* | 80 | vunmap((void *)orig_addr); |
72 | * First try to remap through the PMB once a valid VMA has been | 81 | return NULL; |
73 | * established. Smaller allocations (or the rest of the size | ||
74 | * remaining after a PMB mapping due to the size not being | ||
75 | * perfectly aligned on a PMB size boundary) are then mapped | ||
76 | * through the UTLB using conventional page tables. | ||
77 | * | ||
78 | * PMB entries are all pre-faulted. | ||
79 | */ | ||
80 | if (unlikely(phys_addr >= P1SEG)) { | ||
81 | unsigned long mapped; | ||
82 | |||
83 | mapped = pmb_remap(addr, phys_addr, size, pgprot); | ||
84 | if (likely(mapped)) { | ||
85 | addr += mapped; | ||
86 | phys_addr += mapped; | ||
87 | size -= mapped; | ||
88 | } | ||
89 | } | 82 | } |
90 | #endif | ||
91 | |||
92 | if (likely(size)) | ||
93 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { | ||
94 | vunmap((void *)orig_addr); | ||
95 | return NULL; | ||
96 | } | ||
97 | 83 | ||
98 | return (void __iomem *)(offset + (char *)orig_addr); | 84 | return (void __iomem *)(offset + (char *)orig_addr); |
99 | } | 85 | } |
@@ -133,23 +119,11 @@ void __iounmap(void __iomem *addr) | |||
133 | if (iounmap_fixed(addr) == 0) | 119 | if (iounmap_fixed(addr) == 0) |
134 | return; | 120 | return; |
135 | 121 | ||
136 | #ifdef CONFIG_PMB | ||
137 | /* | 122 | /* |
138 | * Purge any PMB entries that may have been established for this | 123 | * If the PMB handled it, there's nothing else to do. |
139 | * mapping, then proceed with conventional VMA teardown. | ||
140 | * | ||
141 | * XXX: Note that due to the way that remove_vm_area() does | ||
142 | * matching of the resultant VMA, we aren't able to fast-forward | ||
143 | * the address past the PMB space until the end of the VMA where | ||
144 | * the page tables reside. As such, unmap_vm_area() will be | ||
145 | * forced to linearly scan over the area until it finds the page | ||
146 | * tables where PTEs that need to be unmapped actually reside, | ||
147 | * which is far from optimal. Perhaps we need to use a separate | ||
148 | * VMA for the PMB mappings? | ||
149 | * -- PFM. | ||
150 | */ | 124 | */ |
151 | pmb_unmap(vaddr); | 125 | if (pmb_unmap(addr) == 0) |
152 | #endif | 126 | return; |
153 | 127 | ||
154 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); | 128 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); |
155 | if (!p) { | 129 | if (!p) { |
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c index 0b78b1e20ef1..7f682e5dafcf 100644 --- a/arch/sh/mm/ioremap_fixed.c +++ b/arch/sh/mm/ioremap_fixed.c | |||
@@ -45,14 +45,21 @@ void __init ioremap_fixed_init(void) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | void __init __iomem * | 47 | void __init __iomem * |
48 | ioremap_fixed(resource_size_t phys_addr, unsigned long offset, | 48 | ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) |
49 | unsigned long size, pgprot_t prot) | ||
50 | { | 49 | { |
51 | enum fixed_addresses idx0, idx; | 50 | enum fixed_addresses idx0, idx; |
52 | struct ioremap_map *map; | 51 | struct ioremap_map *map; |
53 | unsigned int nrpages; | 52 | unsigned int nrpages; |
53 | unsigned long offset; | ||
54 | int i, slot; | 54 | int i, slot; |
55 | 55 | ||
56 | /* | ||
57 | * Mappings have to be page-aligned | ||
58 | */ | ||
59 | offset = phys_addr & ~PAGE_MASK; | ||
60 | phys_addr &= PAGE_MASK; | ||
61 | size = PAGE_ALIGN(phys_addr + size) - phys_addr; | ||
62 | |||
56 | slot = -1; | 63 | slot = -1; |
57 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | 64 | for (i = 0; i < FIX_N_IOREMAPS; i++) { |
58 | map = &ioremap_maps[i]; | 65 | map = &ioremap_maps[i]; |
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 422e92721878..961b34085e3b 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c | |||
@@ -74,6 +74,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
74 | start_pfn = start >> PAGE_SHIFT; | 74 | start_pfn = start >> PAGE_SHIFT; |
75 | end_pfn = end >> PAGE_SHIFT; | 75 | end_pfn = end >> PAGE_SHIFT; |
76 | 76 | ||
77 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, | ||
78 | PAGE_KERNEL); | ||
79 | |||
77 | lmb_add(start, end - start); | 80 | lmb_add(start, end - start); |
78 | 81 | ||
79 | __add_active_range(nid, start_pfn, end_pfn); | 82 | __add_active_range(nid, start_pfn, end_pfn); |
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 198bcff5e96f..a4662e2782c3 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -23,7 +23,8 @@ | |||
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/rwlock.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <asm/cacheflush.h> | ||
27 | #include <asm/sizes.h> | 28 | #include <asm/sizes.h> |
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
@@ -52,12 +53,24 @@ struct pmb_entry { | |||
52 | struct pmb_entry *link; | 53 | struct pmb_entry *link; |
53 | }; | 54 | }; |
54 | 55 | ||
56 | static struct { | ||
57 | unsigned long size; | ||
58 | int flag; | ||
59 | } pmb_sizes[] = { | ||
60 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, | ||
61 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, | ||
62 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, | ||
63 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, | ||
64 | }; | ||
65 | |||
55 | static void pmb_unmap_entry(struct pmb_entry *, int depth); | 66 | static void pmb_unmap_entry(struct pmb_entry *, int depth); |
56 | 67 | ||
57 | static DEFINE_RWLOCK(pmb_rwlock); | 68 | static DEFINE_RWLOCK(pmb_rwlock); |
58 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 69 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
59 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); | 70 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); |
60 | 71 | ||
72 | static unsigned int pmb_iomapping_enabled; | ||
73 | |||
61 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) | 74 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) |
62 | { | 75 | { |
63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 76 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
@@ -73,6 +86,142 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry) | |||
73 | return mk_pmb_entry(entry) | PMB_DATA; | 86 | return mk_pmb_entry(entry) | PMB_DATA; |
74 | } | 87 | } |
75 | 88 | ||
89 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
90 | { | ||
91 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Ensure that the PMB entries match our cache configuration. | ||
96 | * | ||
97 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
98 | * invalid, so care must be taken to manually adjust cacheable | ||
99 | * translations. | ||
100 | */ | ||
101 | static __always_inline unsigned long pmb_cache_flags(void) | ||
102 | { | ||
103 | unsigned long flags = 0; | ||
104 | |||
105 | #if defined(CONFIG_CACHE_OFF) | ||
106 | flags |= PMB_WT | PMB_UB; | ||
107 | #elif defined(CONFIG_CACHE_WRITETHROUGH) | ||
108 | flags |= PMB_C | PMB_WT | PMB_UB; | ||
109 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
110 | flags |= PMB_C; | ||
111 | #endif | ||
112 | |||
113 | return flags; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Convert typical pgprot value to the PMB equivalent | ||
118 | */ | ||
119 | static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) | ||
120 | { | ||
121 | unsigned long pmb_flags = 0; | ||
122 | u64 flags = pgprot_val(prot); | ||
123 | |||
124 | if (flags & _PAGE_CACHABLE) | ||
125 | pmb_flags |= PMB_C; | ||
126 | if (flags & _PAGE_WT) | ||
127 | pmb_flags |= PMB_WT | PMB_UB; | ||
128 | |||
129 | return pmb_flags; | ||
130 | } | ||
131 | |||
132 | static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) | ||
133 | { | ||
134 | return (b->vpn == (a->vpn + a->size)) && | ||
135 | (b->ppn == (a->ppn + a->size)) && | ||
136 | (b->flags == a->flags); | ||
137 | } | ||
138 | |||
139 | static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, | ||
140 | unsigned long size) | ||
141 | { | ||
142 | int i; | ||
143 | |||
144 | read_lock(&pmb_rwlock); | ||
145 | |||
146 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
147 | struct pmb_entry *pmbe, *iter; | ||
148 | unsigned long span; | ||
149 | |||
150 | if (!test_bit(i, pmb_map)) | ||
151 | continue; | ||
152 | |||
153 | pmbe = &pmb_entry_list[i]; | ||
154 | |||
155 | /* | ||
156 | * See if VPN and PPN are bounded by an existing mapping. | ||
157 | */ | ||
158 | if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) | ||
159 | continue; | ||
160 | if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) | ||
161 | continue; | ||
162 | |||
163 | /* | ||
164 | * Now see if we're in range of a simple mapping. | ||
165 | */ | ||
166 | if (size <= pmbe->size) { | ||
167 | read_unlock(&pmb_rwlock); | ||
168 | return true; | ||
169 | } | ||
170 | |||
171 | span = pmbe->size; | ||
172 | |||
173 | /* | ||
174 | * Finally for sizes that involve compound mappings, walk | ||
175 | * the chain. | ||
176 | */ | ||
177 | for (iter = pmbe->link; iter; iter = iter->link) | ||
178 | span += iter->size; | ||
179 | |||
180 | /* | ||
181 | * Nothing else to do if the range requirements are met. | ||
182 | */ | ||
183 | if (size <= span) { | ||
184 | read_unlock(&pmb_rwlock); | ||
185 | return true; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | read_unlock(&pmb_rwlock); | ||
190 | return false; | ||
191 | } | ||
192 | |||
193 | static bool pmb_size_valid(unsigned long size) | ||
194 | { | ||
195 | int i; | ||
196 | |||
197 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
198 | if (pmb_sizes[i].size == size) | ||
199 | return true; | ||
200 | |||
201 | return false; | ||
202 | } | ||
203 | |||
204 | static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) | ||
205 | { | ||
206 | return (addr >= P1SEG && (addr + size - 1) < P3SEG); | ||
207 | } | ||
208 | |||
209 | static inline bool pmb_prot_valid(pgprot_t prot) | ||
210 | { | ||
211 | return (pgprot_val(prot) & _PAGE_USER) == 0; | ||
212 | } | ||
213 | |||
214 | static int pmb_size_to_flags(unsigned long size) | ||
215 | { | ||
216 | int i; | ||
217 | |||
218 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
219 | if (pmb_sizes[i].size == size) | ||
220 | return pmb_sizes[i].flag; | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
76 | static int pmb_alloc_entry(void) | 225 | static int pmb_alloc_entry(void) |
77 | { | 226 | { |
78 | int pos; | 227 | int pos; |
@@ -140,33 +289,22 @@ static void pmb_free(struct pmb_entry *pmbe) | |||
140 | } | 289 | } |
141 | 290 | ||
142 | /* | 291 | /* |
143 | * Ensure that the PMB entries match our cache configuration. | 292 | * Must be run uncached. |
144 | * | ||
145 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
146 | * invalid, so care must be taken to manually adjust cacheable | ||
147 | * translations. | ||
148 | */ | 293 | */ |
149 | static __always_inline unsigned long pmb_cache_flags(void) | 294 | static void __set_pmb_entry(struct pmb_entry *pmbe) |
150 | { | 295 | { |
151 | unsigned long flags = 0; | 296 | unsigned long addr, data; |
152 | 297 | ||
153 | #if defined(CONFIG_CACHE_WRITETHROUGH) | 298 | addr = mk_pmb_addr(pmbe->entry); |
154 | flags |= PMB_C | PMB_WT | PMB_UB; | 299 | data = mk_pmb_data(pmbe->entry); |
155 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
156 | flags |= PMB_C; | ||
157 | #endif | ||
158 | 300 | ||
159 | return flags; | 301 | jump_to_uncached(); |
160 | } | ||
161 | 302 | ||
162 | /* | 303 | /* Set V-bit */ |
163 | * Must be run uncached. | 304 | __raw_writel(pmbe->vpn | PMB_V, addr); |
164 | */ | 305 | __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data); |
165 | static void __set_pmb_entry(struct pmb_entry *pmbe) | 306 | |
166 | { | 307 | back_to_cached(); |
167 | writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); | ||
168 | writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, | ||
169 | mk_pmb_data(pmbe->entry)); | ||
170 | } | 308 | } |
171 | 309 | ||
172 | static void __clear_pmb_entry(struct pmb_entry *pmbe) | 310 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
@@ -194,144 +332,155 @@ static void set_pmb_entry(struct pmb_entry *pmbe) | |||
194 | spin_unlock_irqrestore(&pmbe->lock, flags); | 332 | spin_unlock_irqrestore(&pmbe->lock, flags); |
195 | } | 333 | } |
196 | 334 | ||
197 | static struct { | 335 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, |
198 | unsigned long size; | 336 | unsigned long size, pgprot_t prot) |
199 | int flag; | ||
200 | } pmb_sizes[] = { | ||
201 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, | ||
202 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, | ||
203 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, | ||
204 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, | ||
205 | }; | ||
206 | |||
207 | long pmb_remap(unsigned long vaddr, unsigned long phys, | ||
208 | unsigned long size, pgprot_t prot) | ||
209 | { | 337 | { |
210 | struct pmb_entry *pmbp, *pmbe; | 338 | struct pmb_entry *pmbp, *pmbe; |
211 | unsigned long wanted; | 339 | unsigned long orig_addr, orig_size; |
212 | int pmb_flags, i; | 340 | unsigned long flags, pmb_flags; |
213 | long err; | 341 | int i, mapped; |
214 | u64 flags; | ||
215 | 342 | ||
216 | flags = pgprot_val(prot); | 343 | if (!pmb_addr_valid(vaddr, size)) |
344 | return -EFAULT; | ||
345 | if (pmb_mapping_exists(vaddr, phys, size)) | ||
346 | return 0; | ||
217 | 347 | ||
218 | pmb_flags = PMB_WT | PMB_UB; | 348 | orig_addr = vaddr; |
219 | 349 | orig_size = size; | |
220 | /* Convert typical pgprot value to the PMB equivalent */ | ||
221 | if (flags & _PAGE_CACHABLE) { | ||
222 | pmb_flags |= PMB_C; | ||
223 | 350 | ||
224 | if ((flags & _PAGE_WT) == 0) | 351 | flush_tlb_kernel_range(vaddr, vaddr + size); |
225 | pmb_flags &= ~(PMB_WT | PMB_UB); | ||
226 | } | ||
227 | 352 | ||
353 | pmb_flags = pgprot_to_pmb_flags(prot); | ||
228 | pmbp = NULL; | 354 | pmbp = NULL; |
229 | wanted = size; | ||
230 | 355 | ||
231 | again: | 356 | do { |
232 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 357 | for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
233 | unsigned long flags; | 358 | if (size < pmb_sizes[i].size) |
359 | continue; | ||
360 | |||
361 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | | ||
362 | pmb_sizes[i].flag, PMB_NO_ENTRY); | ||
363 | if (IS_ERR(pmbe)) { | ||
364 | pmb_unmap_entry(pmbp, mapped); | ||
365 | return PTR_ERR(pmbe); | ||
366 | } | ||
234 | 367 | ||
235 | if (size < pmb_sizes[i].size) | 368 | spin_lock_irqsave(&pmbe->lock, flags); |
236 | continue; | ||
237 | 369 | ||
238 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, | 370 | pmbe->size = pmb_sizes[i].size; |
239 | PMB_NO_ENTRY); | ||
240 | if (IS_ERR(pmbe)) { | ||
241 | err = PTR_ERR(pmbe); | ||
242 | goto out; | ||
243 | } | ||
244 | 371 | ||
245 | spin_lock_irqsave(&pmbe->lock, flags); | 372 | __set_pmb_entry(pmbe); |
246 | 373 | ||
247 | __set_pmb_entry(pmbe); | 374 | phys += pmbe->size; |
375 | vaddr += pmbe->size; | ||
376 | size -= pmbe->size; | ||
248 | 377 | ||
249 | phys += pmb_sizes[i].size; | 378 | /* |
250 | vaddr += pmb_sizes[i].size; | 379 | * Link adjacent entries that span multiple PMB |
251 | size -= pmb_sizes[i].size; | 380 | * entries for easier tear-down. |
381 | */ | ||
382 | if (likely(pmbp)) { | ||
383 | spin_lock(&pmbp->lock); | ||
384 | pmbp->link = pmbe; | ||
385 | spin_unlock(&pmbp->lock); | ||
386 | } | ||
252 | 387 | ||
253 | pmbe->size = pmb_sizes[i].size; | 388 | pmbp = pmbe; |
254 | 389 | ||
255 | /* | 390 | /* |
256 | * Link adjacent entries that span multiple PMB entries | 391 | * Instead of trying smaller sizes on every |
257 | * for easier tear-down. | 392 | * iteration (even if we succeed in allocating |
258 | */ | 393 | * space), try using pmb_sizes[i].size again. |
259 | if (likely(pmbp)) { | 394 | */ |
260 | spin_lock(&pmbp->lock); | 395 | i--; |
261 | pmbp->link = pmbe; | 396 | mapped++; |
262 | spin_unlock(&pmbp->lock); | 397 | |
398 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
263 | } | 399 | } |
400 | } while (size >= SZ_16M); | ||
264 | 401 | ||
265 | pmbp = pmbe; | 402 | flush_cache_vmap(orig_addr, orig_addr + orig_size); |
266 | 403 | ||
267 | /* | 404 | return 0; |
268 | * Instead of trying smaller sizes on every iteration | 405 | } |
269 | * (even if we succeed in allocating space), try using | ||
270 | * pmb_sizes[i].size again. | ||
271 | */ | ||
272 | i--; | ||
273 | 406 | ||
274 | spin_unlock_irqrestore(&pmbe->lock, flags); | 407 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, |
275 | } | 408 | pgprot_t prot, void *caller) |
409 | { | ||
410 | unsigned long vaddr; | ||
411 | phys_addr_t offset, last_addr; | ||
412 | phys_addr_t align_mask; | ||
413 | unsigned long aligned; | ||
414 | struct vm_struct *area; | ||
415 | int i, ret; | ||
276 | 416 | ||
277 | if (size >= SZ_16M) | 417 | if (!pmb_iomapping_enabled) |
278 | goto again; | 418 | return NULL; |
279 | 419 | ||
280 | return wanted - size; | 420 | /* |
421 | * Small mappings need to go through the TLB. | ||
422 | */ | ||
423 | if (size < SZ_16M) | ||
424 | return ERR_PTR(-EINVAL); | ||
425 | if (!pmb_prot_valid(prot)) | ||
426 | return ERR_PTR(-EINVAL); | ||
281 | 427 | ||
282 | out: | 428 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) |
283 | pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); | 429 | if (size >= pmb_sizes[i].size) |
430 | break; | ||
431 | |||
432 | last_addr = phys + size; | ||
433 | align_mask = ~(pmb_sizes[i].size - 1); | ||
434 | offset = phys & ~align_mask; | ||
435 | phys &= align_mask; | ||
436 | aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; | ||
437 | |||
438 | /* | ||
439 | * XXX: This should really start from uncached_end, but this | ||
440 | * causes the MMU to reset, so for now we restrict it to the | ||
441 | * 0xb000...0xc000 range. | ||
442 | */ | ||
443 | area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000, | ||
444 | P3SEG, caller); | ||
445 | if (!area) | ||
446 | return NULL; | ||
447 | |||
448 | area->phys_addr = phys; | ||
449 | vaddr = (unsigned long)area->addr; | ||
450 | |||
451 | ret = pmb_bolt_mapping(vaddr, phys, size, prot); | ||
452 | if (unlikely(ret != 0)) | ||
453 | return ERR_PTR(ret); | ||
284 | 454 | ||
285 | return err; | 455 | return (void __iomem *)(offset + (char *)vaddr); |
286 | } | 456 | } |
287 | 457 | ||
288 | void pmb_unmap(unsigned long addr) | 458 | int pmb_unmap(void __iomem *addr) |
289 | { | 459 | { |
290 | struct pmb_entry *pmbe = NULL; | 460 | struct pmb_entry *pmbe = NULL; |
291 | int i; | 461 | unsigned long vaddr = (unsigned long __force)addr; |
462 | int i, found = 0; | ||
292 | 463 | ||
293 | read_lock(&pmb_rwlock); | 464 | read_lock(&pmb_rwlock); |
294 | 465 | ||
295 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 466 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
296 | if (test_bit(i, pmb_map)) { | 467 | if (test_bit(i, pmb_map)) { |
297 | pmbe = &pmb_entry_list[i]; | 468 | pmbe = &pmb_entry_list[i]; |
298 | if (pmbe->vpn == addr) | 469 | if (pmbe->vpn == vaddr) { |
470 | found = 1; | ||
299 | break; | 471 | break; |
472 | } | ||
300 | } | 473 | } |
301 | } | 474 | } |
302 | 475 | ||
303 | read_unlock(&pmb_rwlock); | 476 | read_unlock(&pmb_rwlock); |
304 | 477 | ||
305 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); | 478 | if (found) { |
306 | } | 479 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); |
307 | 480 | return 0; | |
308 | static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) | 481 | } |
309 | { | ||
310 | return (b->vpn == (a->vpn + a->size)) && | ||
311 | (b->ppn == (a->ppn + a->size)) && | ||
312 | (b->flags == a->flags); | ||
313 | } | ||
314 | |||
315 | static bool pmb_size_valid(unsigned long size) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
320 | if (pmb_sizes[i].size == size) | ||
321 | return true; | ||
322 | |||
323 | return false; | ||
324 | } | ||
325 | |||
326 | static int pmb_size_to_flags(unsigned long size) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
331 | if (pmb_sizes[i].size == size) | ||
332 | return pmb_sizes[i].flag; | ||
333 | 482 | ||
334 | return 0; | 483 | return -EINVAL; |
335 | } | 484 | } |
336 | 485 | ||
337 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | 486 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) |
@@ -351,6 +500,8 @@ static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | |||
351 | */ | 500 | */ |
352 | __clear_pmb_entry(pmbe); | 501 | __clear_pmb_entry(pmbe); |
353 | 502 | ||
503 | flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size); | ||
504 | |||
354 | pmbe = pmblink->link; | 505 | pmbe = pmblink->link; |
355 | 506 | ||
356 | pmb_free(pmblink); | 507 | pmb_free(pmblink); |
@@ -369,11 +520,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | |||
369 | write_unlock_irqrestore(&pmb_rwlock, flags); | 520 | write_unlock_irqrestore(&pmb_rwlock, flags); |
370 | } | 521 | } |
371 | 522 | ||
372 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
373 | { | ||
374 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | ||
375 | } | ||
376 | |||
377 | static void __init pmb_notify(void) | 523 | static void __init pmb_notify(void) |
378 | { | 524 | { |
379 | int i; | 525 | int i; |
@@ -625,6 +771,18 @@ static void __init pmb_resize(void) | |||
625 | } | 771 | } |
626 | #endif | 772 | #endif |
627 | 773 | ||
774 | static int __init early_pmb(char *p) | ||
775 | { | ||
776 | if (!p) | ||
777 | return 0; | ||
778 | |||
779 | if (strstr(p, "iomap")) | ||
780 | pmb_iomapping_enabled = 1; | ||
781 | |||
782 | return 0; | ||
783 | } | ||
784 | early_param("pmb", early_pmb); | ||
785 | |||
628 | void __init pmb_init(void) | 786 | void __init pmb_init(void) |
629 | { | 787 | { |
630 | /* Synchronize software state */ | 788 | /* Synchronize software state */ |
@@ -713,7 +871,7 @@ static int __init pmb_debugfs_init(void) | |||
713 | 871 | ||
714 | return 0; | 872 | return 0; |
715 | } | 873 | } |
716 | postcore_initcall(pmb_debugfs_init); | 874 | subsys_initcall(pmb_debugfs_init); |
717 | 875 | ||
718 | #ifdef CONFIG_PM | 876 | #ifdef CONFIG_PM |
719 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 877 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
diff --git a/arch/um/.gitignore b/arch/um/.gitignore new file mode 100644 index 000000000000..a73d3a1cc746 --- /dev/null +++ b/arch/um/.gitignore | |||
@@ -0,0 +1,3 @@ | |||
1 | kernel/config.c | ||
2 | kernel/config.tmp | ||
3 | kernel/vmlinux.lds | ||
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index cf8a97f34518..64cda95f59ca 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
@@ -18,10 +18,10 @@ static irqreturn_t line_interrupt(int irq, void *data) | |||
18 | { | 18 | { |
19 | struct chan *chan = data; | 19 | struct chan *chan = data; |
20 | struct line *line = chan->line; | 20 | struct line *line = chan->line; |
21 | struct tty_struct *tty = line->tty; | 21 | struct tty_struct *tty; |
22 | 22 | ||
23 | if (line) | 23 | if (line) |
24 | chan_interrupt(&line->chan_list, &line->task, tty, irq); | 24 | chan_interrupt(&line->chan_list, &line->task, line->tty, irq); |
25 | return IRQ_HANDLED; | 25 | return IRQ_HANDLED; |
26 | } | 26 | } |
27 | 27 | ||
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile index 1b549bca4645..804b28dd0328 100644 --- a/arch/um/sys-i386/Makefile +++ b/arch/um/sys-i386/Makefile | |||
@@ -6,6 +6,8 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ | |||
6 | ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ | 6 | ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ |
7 | sys_call_table.o tls.o | 7 | sys_call_table.o tls.o |
8 | 8 | ||
9 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | ||
10 | |||
9 | subarch-obj-y = lib/semaphore_32.o lib/string_32.o | 11 | subarch-obj-y = lib/semaphore_32.o lib/string_32.o |
10 | subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o | 12 | subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o |
11 | subarch-obj-$(CONFIG_MODULES) += kernel/module.o | 13 | subarch-obj-$(CONFIG_MODULES) += kernel/module.o |
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h index 770885472ed4..e64cd41d7bab 100644 --- a/arch/um/sys-i386/asm/elf.h +++ b/arch/um/sys-i386/asm/elf.h | |||
@@ -116,47 +116,4 @@ do { \ | |||
116 | } \ | 116 | } \ |
117 | } while (0) | 117 | } while (0) |
118 | 118 | ||
119 | /* | ||
120 | * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out | ||
121 | * extra segments containing the vsyscall DSO contents. Dumping its | ||
122 | * contents makes post-mortem fully interpretable later without matching up | ||
123 | * the same kernel and hardware config to see what PC values meant. | ||
124 | * Dumping its extra ELF program headers includes all the other information | ||
125 | * a debugger needs to easily find how the vsyscall DSO was being used. | ||
126 | */ | ||
127 | #define ELF_CORE_EXTRA_PHDRS \ | ||
128 | (vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0 ) | ||
129 | |||
130 | #define ELF_CORE_WRITE_EXTRA_PHDRS \ | ||
131 | if ( vsyscall_ehdr ) { \ | ||
132 | const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \ | ||
133 | const struct elf_phdr *const phdrp = \ | ||
134 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \ | ||
135 | int i; \ | ||
136 | Elf32_Off ofs = 0; \ | ||
137 | for (i = 0; i < ehdrp->e_phnum; ++i) { \ | ||
138 | struct elf_phdr phdr = phdrp[i]; \ | ||
139 | if (phdr.p_type == PT_LOAD) { \ | ||
140 | ofs = phdr.p_offset = offset; \ | ||
141 | offset += phdr.p_filesz; \ | ||
142 | } \ | ||
143 | else \ | ||
144 | phdr.p_offset += ofs; \ | ||
145 | phdr.p_paddr = 0; /* match other core phdrs */ \ | ||
146 | DUMP_WRITE(&phdr, sizeof(phdr)); \ | ||
147 | } \ | ||
148 | } | ||
149 | #define ELF_CORE_WRITE_EXTRA_DATA \ | ||
150 | if ( vsyscall_ehdr ) { \ | ||
151 | const struct elfhdr *const ehdrp = (struct elfhdr *)vsyscall_ehdr; \ | ||
152 | const struct elf_phdr *const phdrp = \ | ||
153 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); \ | ||
154 | int i; \ | ||
155 | for (i = 0; i < ehdrp->e_phnum; ++i) { \ | ||
156 | if (phdrp[i].p_type == PT_LOAD) \ | ||
157 | DUMP_WRITE((void *) phdrp[i].p_vaddr, \ | ||
158 | phdrp[i].p_filesz); \ | ||
159 | } \ | ||
160 | } | ||
161 | |||
162 | #endif | 119 | #endif |
diff --git a/arch/um/sys-i386/elfcore.c b/arch/um/sys-i386/elfcore.c new file mode 100644 index 000000000000..6bb49b687c97 --- /dev/null +++ b/arch/um/sys-i386/elfcore.c | |||
@@ -0,0 +1,83 @@ | |||
1 | #include <linux/elf.h> | ||
2 | #include <linux/coredump.h> | ||
3 | #include <linux/fs.h> | ||
4 | #include <linux/mm.h> | ||
5 | |||
6 | #include <asm/elf.h> | ||
7 | |||
8 | |||
9 | Elf32_Half elf_core_extra_phdrs(void) | ||
10 | { | ||
11 | return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; | ||
12 | } | ||
13 | |||
14 | int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, | ||
15 | unsigned long limit) | ||
16 | { | ||
17 | if ( vsyscall_ehdr ) { | ||
18 | const struct elfhdr *const ehdrp = | ||
19 | (struct elfhdr *) vsyscall_ehdr; | ||
20 | const struct elf_phdr *const phdrp = | ||
21 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
22 | int i; | ||
23 | Elf32_Off ofs = 0; | ||
24 | |||
25 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
26 | struct elf_phdr phdr = phdrp[i]; | ||
27 | |||
28 | if (phdr.p_type == PT_LOAD) { | ||
29 | ofs = phdr.p_offset = offset; | ||
30 | offset += phdr.p_filesz; | ||
31 | } else { | ||
32 | phdr.p_offset += ofs; | ||
33 | } | ||
34 | phdr.p_paddr = 0; /* match other core phdrs */ | ||
35 | *size += sizeof(phdr); | ||
36 | if (*size > limit | ||
37 | || !dump_write(file, &phdr, sizeof(phdr))) | ||
38 | return 0; | ||
39 | } | ||
40 | } | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | int elf_core_write_extra_data(struct file *file, size_t *size, | ||
45 | unsigned long limit) | ||
46 | { | ||
47 | if ( vsyscall_ehdr ) { | ||
48 | const struct elfhdr *const ehdrp = | ||
49 | (struct elfhdr *) vsyscall_ehdr; | ||
50 | const struct elf_phdr *const phdrp = | ||
51 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
52 | int i; | ||
53 | |||
54 | for (i = 0; i < ehdrp->e_phnum; ++i) { | ||
55 | if (phdrp[i].p_type == PT_LOAD) { | ||
56 | void *addr = (void *) phdrp[i].p_vaddr; | ||
57 | size_t filesz = phdrp[i].p_filesz; | ||
58 | |||
59 | *size += filesz; | ||
60 | if (*size > limit | ||
61 | || !dump_write(file, addr, filesz)) | ||
62 | return 0; | ||
63 | } | ||
64 | } | ||
65 | } | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | size_t elf_core_extra_data_size(void) | ||
70 | { | ||
71 | if ( vsyscall_ehdr ) { | ||
72 | const struct elfhdr *const ehdrp = | ||
73 | (struct elfhdr *)vsyscall_ehdr; | ||
74 | const struct elf_phdr *const phdrp = | ||
75 | (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff); | ||
76 | int i; | ||
77 | |||
78 | for (i = 0; i < ehdrp->e_phnum; ++i) | ||
79 | if (phdrp[i].p_type == PT_LOAD) | ||
80 | return (size_t) phdrp[i].p_filesz; | ||
81 | } | ||
82 | return 0; | ||
83 | } | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f15f37bfbd62..e98440371525 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -393,8 +393,12 @@ config X86_ELAN | |||
393 | 393 | ||
394 | config X86_MRST | 394 | config X86_MRST |
395 | bool "Moorestown MID platform" | 395 | bool "Moorestown MID platform" |
396 | depends on PCI | ||
397 | depends on PCI_GOANY | ||
396 | depends on X86_32 | 398 | depends on X86_32 |
397 | depends on X86_EXTENDED_PLATFORM | 399 | depends on X86_EXTENDED_PLATFORM |
400 | depends on X86_IO_APIC | ||
401 | select APB_TIMER | ||
398 | ---help--- | 402 | ---help--- |
399 | Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin | 403 | Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin |
400 | Internet Device(MID) platform. Moorestown consists of two chips: | 404 | Internet Device(MID) platform. Moorestown consists of two chips: |
@@ -429,6 +433,7 @@ config X86_32_NON_STANDARD | |||
429 | config X86_NUMAQ | 433 | config X86_NUMAQ |
430 | bool "NUMAQ (IBM/Sequent)" | 434 | bool "NUMAQ (IBM/Sequent)" |
431 | depends on X86_32_NON_STANDARD | 435 | depends on X86_32_NON_STANDARD |
436 | depends on PCI | ||
432 | select NUMA | 437 | select NUMA |
433 | select X86_MPPARSE | 438 | select X86_MPPARSE |
434 | ---help--- | 439 | ---help--- |
@@ -629,6 +634,16 @@ config HPET_EMULATE_RTC | |||
629 | def_bool y | 634 | def_bool y |
630 | depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y) | 635 | depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y) |
631 | 636 | ||
637 | config APB_TIMER | ||
638 | def_bool y if MRST | ||
639 | prompt "Langwell APB Timer Support" if X86_MRST | ||
640 | help | ||
641 | APB timer is the replacement for 8254, HPET on X86 MID platforms. | ||
642 | The APBT provides a stable time base on SMP | ||
643 | systems, unlike the TSC, but it is more expensive to access, | ||
644 | as it is off-chip. APB timers are always running regardless of CPU | ||
645 | C states, they are used as per CPU clockevent device when possible. | ||
646 | |||
632 | # Mark as embedded because too many people got it wrong. | 647 | # Mark as embedded because too many people got it wrong. |
633 | # The code disables itself when not needed. | 648 | # The code disables itself when not needed. |
634 | config DMI | 649 | config DMI |
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h new file mode 100644 index 000000000000..c74a2eebe570 --- /dev/null +++ b/arch/x86/include/asm/apb_timer.h | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | * | ||
12 | * Note: | ||
13 | */ | ||
14 | |||
15 | #ifndef ASM_X86_APBT_H | ||
16 | #define ASM_X86_APBT_H | ||
17 | #include <linux/sfi.h> | ||
18 | |||
19 | #ifdef CONFIG_APB_TIMER | ||
20 | |||
21 | /* Langwell DW APB timer registers */ | ||
22 | #define APBTMR_N_LOAD_COUNT 0x00 | ||
23 | #define APBTMR_N_CURRENT_VALUE 0x04 | ||
24 | #define APBTMR_N_CONTROL 0x08 | ||
25 | #define APBTMR_N_EOI 0x0c | ||
26 | #define APBTMR_N_INT_STATUS 0x10 | ||
27 | |||
28 | #define APBTMRS_INT_STATUS 0xa0 | ||
29 | #define APBTMRS_EOI 0xa4 | ||
30 | #define APBTMRS_RAW_INT_STATUS 0xa8 | ||
31 | #define APBTMRS_COMP_VERSION 0xac | ||
32 | #define APBTMRS_REG_SIZE 0x14 | ||
33 | |||
34 | /* register bits */ | ||
35 | #define APBTMR_CONTROL_ENABLE (1<<0) | ||
36 | #define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */ | ||
37 | #define APBTMR_CONTROL_INT (1<<2) | ||
38 | |||
39 | /* default memory mapped register base */ | ||
40 | #define LNW_SCU_ADDR 0xFF100000 | ||
41 | #define LNW_EXT_TIMER_OFFSET 0x1B800 | ||
42 | #define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET) | ||
43 | #define LNW_EXT_TIMER_PGOFFSET 0x800 | ||
44 | |||
45 | /* APBT clock speed range from PCLK to fabric base, 25-100MHz */ | ||
46 | #define APBT_MAX_FREQ 50 | ||
47 | #define APBT_MIN_FREQ 1 | ||
48 | #define APBT_MMAP_SIZE 1024 | ||
49 | |||
50 | #define APBT_DEV_USED 1 | ||
51 | |||
52 | extern void apbt_time_init(void); | ||
53 | extern struct clock_event_device *global_clock_event; | ||
54 | extern unsigned long apbt_quick_calibrate(void); | ||
55 | extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); | ||
56 | extern void apbt_setup_secondary_clock(void); | ||
57 | extern unsigned int boot_cpu_id; | ||
58 | extern int disable_apbt_percpu; | ||
59 | |||
60 | extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); | ||
61 | extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); | ||
62 | extern int sfi_mtimer_num; | ||
63 | |||
64 | #else /* CONFIG_APB_TIMER */ | ||
65 | |||
66 | static inline unsigned long apbt_quick_calibrate(void) {return 0; } | ||
67 | static inline void apbt_time_init(void) {return 0; } | ||
68 | |||
69 | #endif | ||
70 | #endif /* ASM_X86_APBT_H */ | ||
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index eeac829a0f44..a929c9ede33d 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -53,13 +53,6 @@ extern void threshold_interrupt(void); | |||
53 | extern void call_function_interrupt(void); | 53 | extern void call_function_interrupt(void); |
54 | extern void call_function_single_interrupt(void); | 54 | extern void call_function_single_interrupt(void); |
55 | 55 | ||
56 | /* PIC specific functions */ | ||
57 | extern void disable_8259A_irq(unsigned int irq); | ||
58 | extern void enable_8259A_irq(unsigned int irq); | ||
59 | extern int i8259A_irq_pending(unsigned int irq); | ||
60 | extern void make_8259A_irq(unsigned int irq); | ||
61 | extern void init_8259A(int aeoi); | ||
62 | |||
63 | /* IOAPIC */ | 56 | /* IOAPIC */ |
64 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) | 57 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) |
65 | extern unsigned long io_apic_irqs; | 58 | extern unsigned long io_apic_irqs; |
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 7ec65b18085d..1655147646aa 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
@@ -26,11 +26,6 @@ extern unsigned int cached_irq_mask; | |||
26 | 26 | ||
27 | extern raw_spinlock_t i8259A_lock; | 27 | extern raw_spinlock_t i8259A_lock; |
28 | 28 | ||
29 | extern void init_8259A(int auto_eoi); | ||
30 | extern void enable_8259A_irq(unsigned int irq); | ||
31 | extern void disable_8259A_irq(unsigned int irq); | ||
32 | extern unsigned int startup_8259A_irq(unsigned int irq); | ||
33 | |||
34 | /* the PIC may need a careful delay on some platforms, hence specific calls */ | 29 | /* the PIC may need a careful delay on some platforms, hence specific calls */ |
35 | static inline unsigned char inb_pic(unsigned int port) | 30 | static inline unsigned char inb_pic(unsigned int port) |
36 | { | 31 | { |
@@ -57,7 +52,17 @@ static inline void outb_pic(unsigned char value, unsigned int port) | |||
57 | 52 | ||
58 | extern struct irq_chip i8259A_chip; | 53 | extern struct irq_chip i8259A_chip; |
59 | 54 | ||
60 | extern void mask_8259A(void); | 55 | struct legacy_pic { |
61 | extern void unmask_8259A(void); | 56 | int nr_legacy_irqs; |
57 | struct irq_chip *chip; | ||
58 | void (*mask_all)(void); | ||
59 | void (*restore_mask)(void); | ||
60 | void (*init)(int auto_eoi); | ||
61 | int (*irq_pending)(unsigned int irq); | ||
62 | void (*make_irq)(unsigned int irq); | ||
63 | }; | ||
64 | |||
65 | extern struct legacy_pic *legacy_pic; | ||
66 | extern struct legacy_pic null_legacy_pic; | ||
62 | 67 | ||
63 | #endif /* _ASM_X86_I8259_H */ | 68 | #endif /* _ASM_X86_I8259_H */ |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 5f61f6e0ffdd..35832a03a515 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -143,8 +143,6 @@ extern int noioapicreroute; | |||
143 | /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ | 143 | /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ |
144 | extern int timer_through_8259; | 144 | extern int timer_through_8259; |
145 | 145 | ||
146 | extern void io_apic_disable_legacy(void); | ||
147 | |||
148 | /* | 146 | /* |
149 | * If we use the IO-APIC for IRQ routing, disable automatic | 147 | * If we use the IO-APIC for IRQ routing, disable automatic |
150 | * assignment of PCI IRQ's. | 148 | * assignment of PCI IRQ's. |
@@ -189,6 +187,7 @@ extern struct mp_ioapic_gsi mp_gsi_routing[]; | |||
189 | int mp_find_ioapic(int gsi); | 187 | int mp_find_ioapic(int gsi); |
190 | int mp_find_ioapic_pin(int ioapic, int gsi); | 188 | int mp_find_ioapic_pin(int ioapic, int gsi); |
191 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); | 189 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); |
190 | extern void __init pre_init_apic_IRQ0(void); | ||
192 | 191 | ||
193 | #else /* !CONFIG_X86_IO_APIC */ | 192 | #else /* !CONFIG_X86_IO_APIC */ |
194 | 193 | ||
@@ -198,7 +197,11 @@ static const int timer_through_8259 = 0; | |||
198 | static inline void ioapic_init_mappings(void) { } | 197 | static inline void ioapic_init_mappings(void) { } |
199 | static inline void ioapic_insert_resources(void) { } | 198 | static inline void ioapic_insert_resources(void) { } |
200 | static inline void probe_nr_irqs_gsi(void) { } | 199 | static inline void probe_nr_irqs_gsi(void) { } |
200 | static inline int mp_find_ioapic(int gsi) { return 0; } | ||
201 | 201 | ||
202 | struct io_apic_irq_attr; | ||
203 | static inline int io_apic_set_pci_routing(struct device *dev, int irq, | ||
204 | struct io_apic_irq_attr *irq_attr) { return 0; } | ||
202 | #endif | 205 | #endif |
203 | 206 | ||
204 | #endif /* _ASM_X86_IO_APIC_H */ | 207 | #endif /* _ASM_X86_IO_APIC_H */ |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 262292729fc4..5458380b6ef8 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -48,6 +48,5 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS); | |||
48 | extern int vector_used_by_percpu_irq(unsigned int vector); | 48 | extern int vector_used_by_percpu_irq(unsigned int vector); |
49 | 49 | ||
50 | extern void init_ISA_irqs(void); | 50 | extern void init_ISA_irqs(void); |
51 | extern int nr_legacy_irqs; | ||
52 | 51 | ||
53 | #endif /* _ASM_X86_IRQ_H */ | 52 | #endif /* _ASM_X86_IRQ_H */ |
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h new file mode 100644 index 000000000000..451d30e7f62d --- /dev/null +++ b/arch/x86/include/asm/mrst.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * mrst.h: Intel Moorestown platform specific setup code | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | #ifndef _ASM_X86_MRST_H | ||
12 | #define _ASM_X86_MRST_H | ||
13 | extern int pci_mrst_init(void); | ||
14 | int __init sfi_parse_mrtc(struct sfi_table_header *table); | ||
15 | |||
16 | #define SFI_MTMR_MAX_NUM 8 | ||
17 | #define SFI_MRTC_MAX 8 | ||
18 | |||
19 | #endif /* _ASM_X86_MRST_H */ | ||
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h index 13370b95ea94..37c516545ec8 100644 --- a/arch/x86/include/asm/numaq.h +++ b/arch/x86/include/asm/numaq.h | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | extern int found_numaq; | 31 | extern int found_numaq; |
32 | extern int get_memcfg_numaq(void); | 32 | extern int get_memcfg_numaq(void); |
33 | extern int pci_numaq_init(void); | ||
33 | 34 | ||
34 | extern void *xquad_portio; | 35 | extern void *xquad_portio; |
35 | 36 | ||
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h index 3a57385d9fa7..101229b0d8ed 100644 --- a/arch/x86/include/asm/olpc.h +++ b/arch/x86/include/asm/olpc.h | |||
@@ -13,7 +13,6 @@ struct olpc_platform_t { | |||
13 | 13 | ||
14 | #define OLPC_F_PRESENT 0x01 | 14 | #define OLPC_F_PRESENT 0x01 |
15 | #define OLPC_F_DCON 0x02 | 15 | #define OLPC_F_DCON 0x02 |
16 | #define OLPC_F_VSA 0x04 | ||
17 | 16 | ||
18 | #ifdef CONFIG_OLPC | 17 | #ifdef CONFIG_OLPC |
19 | 18 | ||
@@ -51,18 +50,6 @@ static inline int olpc_has_dcon(void) | |||
51 | } | 50 | } |
52 | 51 | ||
53 | /* | 52 | /* |
54 | * The VSA is software from AMD that typical Geode bioses will include. | ||
55 | * It is used to emulate the PCI bus, VGA, etc. OLPC's Open Firmware does | ||
56 | * not include the VSA; instead, PCI is emulated by the kernel. | ||
57 | * | ||
58 | * The VSA is described further in arch/x86/pci/olpc.c. | ||
59 | */ | ||
60 | static inline int olpc_has_vsa(void) | ||
61 | { | ||
62 | return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * The "Mass Production" version of OLPC's XO is identified as being model | 53 | * The "Mass Production" version of OLPC's XO is identified as being model |
67 | * C2. During the prototype phase, the following models (in chronological | 54 | * C2. During the prototype phase, the following models (in chronological |
68 | * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models | 55 | * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models |
@@ -87,13 +74,10 @@ static inline int olpc_has_dcon(void) | |||
87 | return 0; | 74 | return 0; |
88 | } | 75 | } |
89 | 76 | ||
90 | static inline int olpc_has_vsa(void) | ||
91 | { | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | #endif | 77 | #endif |
96 | 78 | ||
79 | extern int pci_olpc_init(void); | ||
80 | |||
97 | /* EC related functions */ | 81 | /* EC related functions */ |
98 | 82 | ||
99 | extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | 83 | extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index b4a00dd4eed5..3e002ca5a287 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -45,8 +45,15 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
45 | 45 | ||
46 | #ifdef CONFIG_PCI | 46 | #ifdef CONFIG_PCI |
47 | extern unsigned int pcibios_assign_all_busses(void); | 47 | extern unsigned int pcibios_assign_all_busses(void); |
48 | extern int pci_legacy_init(void); | ||
49 | # ifdef CONFIG_ACPI | ||
50 | # define x86_default_pci_init pci_acpi_init | ||
51 | # else | ||
52 | # define x86_default_pci_init pci_legacy_init | ||
53 | # endif | ||
48 | #else | 54 | #else |
49 | #define pcibios_assign_all_busses() 0 | 55 | # define pcibios_assign_all_busses() 0 |
56 | # define x86_default_pci_init NULL | ||
50 | #endif | 57 | #endif |
51 | 58 | ||
52 | extern unsigned long pci_mem_start; | 59 | extern unsigned long pci_mem_start; |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 05b58ccb2e82..1a0422348d6d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -83,7 +83,6 @@ struct irq_routing_table { | |||
83 | 83 | ||
84 | extern unsigned int pcibios_irq_mask; | 84 | extern unsigned int pcibios_irq_mask; |
85 | 85 | ||
86 | extern int pcibios_scanned; | ||
87 | extern spinlock_t pci_config_lock; | 86 | extern spinlock_t pci_config_lock; |
88 | 87 | ||
89 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 88 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
@@ -106,16 +105,15 @@ extern bool port_cf9_safe; | |||
106 | extern int pci_direct_probe(void); | 105 | extern int pci_direct_probe(void); |
107 | extern void pci_direct_init(int type); | 106 | extern void pci_direct_init(int type); |
108 | extern void pci_pcbios_init(void); | 107 | extern void pci_pcbios_init(void); |
109 | extern int pci_olpc_init(void); | ||
110 | extern void __init dmi_check_pciprobe(void); | 108 | extern void __init dmi_check_pciprobe(void); |
111 | extern void __init dmi_check_skip_isa_align(void); | 109 | extern void __init dmi_check_skip_isa_align(void); |
112 | 110 | ||
113 | /* some common used subsys_initcalls */ | 111 | /* some common used subsys_initcalls */ |
114 | extern int __init pci_acpi_init(void); | 112 | extern int __init pci_acpi_init(void); |
115 | extern int __init pcibios_irq_init(void); | 113 | extern void __init pcibios_irq_init(void); |
116 | extern int __init pci_visws_init(void); | ||
117 | extern int __init pci_numaq_init(void); | ||
118 | extern int __init pcibios_init(void); | 114 | extern int __init pcibios_init(void); |
115 | extern int pci_legacy_init(void); | ||
116 | extern void pcibios_fixup_irqs(void); | ||
119 | 117 | ||
120 | /* pci-mmconfig.c */ | 118 | /* pci-mmconfig.c */ |
121 | 119 | ||
@@ -183,3 +181,17 @@ static inline void mmio_config_writel(void __iomem *pos, u32 val) | |||
183 | { | 181 | { |
184 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); | 182 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); |
185 | } | 183 | } |
184 | |||
185 | #ifdef CONFIG_PCI | ||
186 | # ifdef CONFIG_ACPI | ||
187 | # define x86_default_pci_init pci_acpi_init | ||
188 | # else | ||
189 | # define x86_default_pci_init pci_legacy_init | ||
190 | # endif | ||
191 | # define x86_default_pci_init_irq pcibios_irq_init | ||
192 | # define x86_default_pci_fixup_irqs pcibios_fixup_irqs | ||
193 | #else | ||
194 | # define x86_default_pci_init NULL | ||
195 | # define x86_default_pci_init_irq NULL | ||
196 | # define x86_default_pci_fixup_irqs NULL | ||
197 | #endif | ||
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 18e496c98ff0..86b1506f4179 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -37,10 +37,8 @@ void setup_bios_corruption_check(void); | |||
37 | 37 | ||
38 | #ifdef CONFIG_X86_VISWS | 38 | #ifdef CONFIG_X86_VISWS |
39 | extern void visws_early_detect(void); | 39 | extern void visws_early_detect(void); |
40 | extern int is_visws_box(void); | ||
41 | #else | 40 | #else |
42 | static inline void visws_early_detect(void) { } | 41 | static inline void visws_early_detect(void) { } |
43 | static inline int is_visws_box(void) { return 0; } | ||
44 | #endif | 42 | #endif |
45 | 43 | ||
46 | extern unsigned long saved_video_mode; | 44 | extern unsigned long saved_video_mode; |
diff --git a/arch/x86/include/asm/visws/cobalt.h b/arch/x86/include/asm/visws/cobalt.h index 166adf61e770..2edb37637ead 100644 --- a/arch/x86/include/asm/visws/cobalt.h +++ b/arch/x86/include/asm/visws/cobalt.h | |||
@@ -122,4 +122,6 @@ extern char visws_board_type; | |||
122 | 122 | ||
123 | extern char visws_board_rev; | 123 | extern char visws_board_rev; |
124 | 124 | ||
125 | extern int pci_visws_init(void); | ||
126 | |||
125 | #endif /* _ASM_X86_VISWS_COBALT_H */ | 127 | #endif /* _ASM_X86_VISWS_COBALT_H */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 60cc35269083..519b54327d75 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -99,6 +99,20 @@ struct x86_init_iommu { | |||
99 | }; | 99 | }; |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * struct x86_init_pci - platform specific pci init functions | ||
103 | * @arch_init: platform specific pci arch init call | ||
104 | * @init: platform specific pci subsystem init | ||
105 | * @init_irq: platform specific pci irq init | ||
106 | * @fixup_irqs: platform specific pci irq fixup | ||
107 | */ | ||
108 | struct x86_init_pci { | ||
109 | int (*arch_init)(void); | ||
110 | int (*init)(void); | ||
111 | void (*init_irq)(void); | ||
112 | void (*fixup_irqs)(void); | ||
113 | }; | ||
114 | |||
115 | /** | ||
102 | * struct x86_init_ops - functions for platform specific setup | 116 | * struct x86_init_ops - functions for platform specific setup |
103 | * | 117 | * |
104 | */ | 118 | */ |
@@ -110,6 +124,7 @@ struct x86_init_ops { | |||
110 | struct x86_init_paging paging; | 124 | struct x86_init_paging paging; |
111 | struct x86_init_timers timers; | 125 | struct x86_init_timers timers; |
112 | struct x86_init_iommu iommu; | 126 | struct x86_init_iommu iommu; |
127 | struct x86_init_pci pci; | ||
113 | }; | 128 | }; |
114 | 129 | ||
115 | /** | 130 | /** |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d87f09bc5a52..4c58352209e0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -87,6 +87,7 @@ obj-$(CONFIG_VM86) += vm86_32.o | |||
87 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 87 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
88 | 88 | ||
89 | obj-$(CONFIG_HPET_TIMER) += hpet.o | 89 | obj-$(CONFIG_HPET_TIMER) += hpet.o |
90 | obj-$(CONFIG_APB_TIMER) += apb_timer.o | ||
90 | 91 | ||
91 | obj-$(CONFIG_K8_NB) += k8.o | 92 | obj-$(CONFIG_K8_NB) += k8.o |
92 | obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o | 93 | obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 738fcb60e708..a54d714545ff 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/ioport.h> | 35 | #include <linux/ioport.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | #include <asm/pci_x86.h> | ||
38 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
39 | #include <asm/io_apic.h> | 40 | #include <asm/io_apic.h> |
40 | #include <asm/apic.h> | 41 | #include <asm/apic.h> |
@@ -1624,6 +1625,9 @@ int __init acpi_boot_init(void) | |||
1624 | 1625 | ||
1625 | acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); | 1626 | acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); |
1626 | 1627 | ||
1628 | if (!acpi_noirq) | ||
1629 | x86_init.pci.init = pci_acpi_init; | ||
1630 | |||
1627 | return 0; | 1631 | return 0; |
1628 | } | 1632 | } |
1629 | 1633 | ||
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c new file mode 100644 index 000000000000..4b7099526d2c --- /dev/null +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -0,0 +1,784 @@ | |||
1 | /* | ||
2 | * apb_timer.c: Driver for Langwell APB timers | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | * | ||
12 | * Note: | ||
13 | * Langwell is the south complex of Intel Moorestown MID platform. There are | ||
14 | * eight external timers in total that can be used by the operating system. | ||
15 | * The timer information, such as frequency and addresses, is provided to the | ||
16 | * OS via SFI tables. | ||
17 | * Timer interrupts are routed via FW/HW emulated IOAPIC independently via | ||
18 | * individual redirection table entries (RTE). | ||
19 | * Unlike HPET, there is no master counter, therefore one of the timers are | ||
20 | * used as clocksource. The overall allocation looks like: | ||
21 | * - timer 0 - NR_CPUs for per cpu timer | ||
22 | * - one timer for clocksource | ||
23 | * - one timer for watchdog driver. | ||
24 | * It is also worth notice that APB timer does not support true one-shot mode, | ||
25 | * free-running mode will be used here to emulate one-shot mode. | ||
26 | * APB timer can also be used as broadcast timer along with per cpu local APIC | ||
27 | * timer, but by default APB timer has higher rating than local APIC timers. | ||
28 | */ | ||
29 | |||
30 | #include <linux/clocksource.h> | ||
31 | #include <linux/clockchips.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/errno.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysdev.h> | ||
36 | #include <linux/pm.h> | ||
37 | #include <linux/pci.h> | ||
38 | #include <linux/sfi.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/cpu.h> | ||
41 | #include <linux/irq.h> | ||
42 | |||
43 | #include <asm/fixmap.h> | ||
44 | #include <asm/apb_timer.h> | ||
45 | |||
46 | #define APBT_MASK CLOCKSOURCE_MASK(32) | ||
47 | #define APBT_SHIFT 22 | ||
48 | #define APBT_CLOCKEVENT_RATING 150 | ||
49 | #define APBT_CLOCKSOURCE_RATING 250 | ||
50 | #define APBT_MIN_DELTA_USEC 200 | ||
51 | |||
52 | #define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt) | ||
53 | #define APBT_CLOCKEVENT0_NUM (0) | ||
54 | #define APBT_CLOCKEVENT1_NUM (1) | ||
55 | #define APBT_CLOCKSOURCE_NUM (2) | ||
56 | |||
57 | static unsigned long apbt_address; | ||
58 | static int apb_timer_block_enabled; | ||
59 | static void __iomem *apbt_virt_address; | ||
60 | static int phy_cs_timer_id; | ||
61 | |||
62 | /* | ||
63 | * Common DW APB timer info | ||
64 | */ | ||
65 | static uint64_t apbt_freq; | ||
66 | |||
67 | static void apbt_set_mode(enum clock_event_mode mode, | ||
68 | struct clock_event_device *evt); | ||
69 | static int apbt_next_event(unsigned long delta, | ||
70 | struct clock_event_device *evt); | ||
71 | static cycle_t apbt_read_clocksource(struct clocksource *cs); | ||
72 | static void apbt_restart_clocksource(struct clocksource *cs); | ||
73 | |||
74 | struct apbt_dev { | ||
75 | struct clock_event_device evt; | ||
76 | unsigned int num; | ||
77 | int cpu; | ||
78 | unsigned int irq; | ||
79 | unsigned int tick; | ||
80 | unsigned int count; | ||
81 | unsigned int flags; | ||
82 | char name[10]; | ||
83 | }; | ||
84 | |||
85 | int disable_apbt_percpu __cpuinitdata; | ||
86 | |||
87 | static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev); | ||
88 | |||
89 | #ifdef CONFIG_SMP | ||
90 | static unsigned int apbt_num_timers_used; | ||
91 | static struct apbt_dev *apbt_devs; | ||
92 | #endif | ||
93 | |||
94 | static inline unsigned long apbt_readl_reg(unsigned long a) | ||
95 | { | ||
96 | return readl(apbt_virt_address + a); | ||
97 | } | ||
98 | |||
99 | static inline void apbt_writel_reg(unsigned long d, unsigned long a) | ||
100 | { | ||
101 | writel(d, apbt_virt_address + a); | ||
102 | } | ||
103 | |||
104 | static inline unsigned long apbt_readl(int n, unsigned long a) | ||
105 | { | ||
106 | return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE); | ||
107 | } | ||
108 | |||
109 | static inline void apbt_writel(int n, unsigned long d, unsigned long a) | ||
110 | { | ||
111 | writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE); | ||
112 | } | ||
113 | |||
114 | static inline void apbt_set_mapping(void) | ||
115 | { | ||
116 | struct sfi_timer_table_entry *mtmr; | ||
117 | |||
118 | if (apbt_virt_address) { | ||
119 | pr_debug("APBT base already mapped\n"); | ||
120 | return; | ||
121 | } | ||
122 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | ||
123 | if (mtmr == NULL) { | ||
124 | printk(KERN_ERR "Failed to get MTMR %d from SFI\n", | ||
125 | APBT_CLOCKEVENT0_NUM); | ||
126 | return; | ||
127 | } | ||
128 | apbt_address = (unsigned long)mtmr->phys_addr; | ||
129 | if (!apbt_address) { | ||
130 | printk(KERN_WARNING "No timer base from SFI, use default\n"); | ||
131 | apbt_address = APBT_DEFAULT_BASE; | ||
132 | } | ||
133 | apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE); | ||
134 | if (apbt_virt_address) { | ||
135 | pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\ | ||
136 | (void *)apbt_address, (void *)apbt_virt_address); | ||
137 | } else { | ||
138 | pr_debug("Failed mapping APBT phy address at %p\n",\ | ||
139 | (void *)apbt_address); | ||
140 | goto panic_noapbt; | ||
141 | } | ||
142 | apbt_freq = mtmr->freq_hz / USEC_PER_SEC; | ||
143 | sfi_free_mtmr(mtmr); | ||
144 | |||
145 | /* Now figure out the physical timer id for clocksource device */ | ||
146 | mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM); | ||
147 | if (mtmr == NULL) | ||
148 | goto panic_noapbt; | ||
149 | |||
150 | /* Now figure out the physical timer id */ | ||
151 | phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) | ||
152 | / APBTMRS_REG_SIZE; | ||
153 | pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id); | ||
154 | return; | ||
155 | |||
156 | panic_noapbt: | ||
157 | panic("Failed to setup APB system timer\n"); | ||
158 | |||
159 | } | ||
160 | |||
161 | static inline void apbt_clear_mapping(void) | ||
162 | { | ||
163 | iounmap(apbt_virt_address); | ||
164 | apbt_virt_address = NULL; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * APBT timer interrupt enable / disable | ||
169 | */ | ||
170 | static inline int is_apbt_capable(void) | ||
171 | { | ||
172 | return apbt_virt_address ? 1 : 0; | ||
173 | } | ||
174 | |||
175 | static struct clocksource clocksource_apbt = { | ||
176 | .name = "apbt", | ||
177 | .rating = APBT_CLOCKSOURCE_RATING, | ||
178 | .read = apbt_read_clocksource, | ||
179 | .mask = APBT_MASK, | ||
180 | .shift = APBT_SHIFT, | ||
181 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
182 | .resume = apbt_restart_clocksource, | ||
183 | }; | ||
184 | |||
185 | /* boot APB clock event device */ | ||
186 | static struct clock_event_device apbt_clockevent = { | ||
187 | .name = "apbt0", | ||
188 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
189 | .set_mode = apbt_set_mode, | ||
190 | .set_next_event = apbt_next_event, | ||
191 | .shift = APBT_SHIFT, | ||
192 | .irq = 0, | ||
193 | .rating = APBT_CLOCKEVENT_RATING, | ||
194 | }; | ||
195 | |||
196 | /* | ||
197 | * if user does not want to use per CPU apb timer, just give it a lower rating | ||
198 | * than local apic timer and skip the late per cpu timer init. | ||
199 | */ | ||
200 | static inline int __init setup_x86_mrst_timer(char *arg) | ||
201 | { | ||
202 | if (!arg) | ||
203 | return -EINVAL; | ||
204 | |||
205 | if (strcmp("apbt_only", arg) == 0) | ||
206 | disable_apbt_percpu = 0; | ||
207 | else if (strcmp("lapic_and_apbt", arg) == 0) | ||
208 | disable_apbt_percpu = 1; | ||
209 | else { | ||
210 | pr_warning("X86 MRST timer option %s not recognised" | ||
211 | " use x86_mrst_timer=apbt_only or lapic_and_apbt\n", | ||
212 | arg); | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | return 0; | ||
216 | } | ||
217 | __setup("x86_mrst_timer=", setup_x86_mrst_timer); | ||
218 | |||
219 | /* | ||
220 | * start count down from 0xffff_ffff. this is done by toggling the enable bit | ||
221 | * then load initial load count to ~0. | ||
222 | */ | ||
223 | static void apbt_start_counter(int n) | ||
224 | { | ||
225 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | ||
226 | |||
227 | ctrl &= ~APBTMR_CONTROL_ENABLE; | ||
228 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | ||
229 | apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT); | ||
230 | /* enable, mask interrupt */ | ||
231 | ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; | ||
232 | ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT); | ||
233 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | ||
234 | /* read it once to get cached counter value initialized */ | ||
235 | apbt_read_clocksource(&clocksource_apbt); | ||
236 | } | ||
237 | |||
238 | static irqreturn_t apbt_interrupt_handler(int irq, void *data) | ||
239 | { | ||
240 | struct apbt_dev *dev = (struct apbt_dev *)data; | ||
241 | struct clock_event_device *aevt = &dev->evt; | ||
242 | |||
243 | if (!aevt->event_handler) { | ||
244 | printk(KERN_INFO "Spurious APBT timer interrupt on %d\n", | ||
245 | dev->num); | ||
246 | return IRQ_NONE; | ||
247 | } | ||
248 | aevt->event_handler(aevt); | ||
249 | return IRQ_HANDLED; | ||
250 | } | ||
251 | |||
252 | static void apbt_restart_clocksource(struct clocksource *cs) | ||
253 | { | ||
254 | apbt_start_counter(phy_cs_timer_id); | ||
255 | } | ||
256 | |||
257 | /* Setup IRQ routing via IOAPIC */ | ||
258 | #ifdef CONFIG_SMP | ||
259 | static void apbt_setup_irq(struct apbt_dev *adev) | ||
260 | { | ||
261 | struct irq_chip *chip; | ||
262 | struct irq_desc *desc; | ||
263 | |||
264 | /* timer0 irq has been setup early */ | ||
265 | if (adev->irq == 0) | ||
266 | return; | ||
267 | desc = irq_to_desc(adev->irq); | ||
268 | chip = get_irq_chip(adev->irq); | ||
269 | disable_irq(adev->irq); | ||
270 | desc->status |= IRQ_MOVE_PCNTXT; | ||
271 | irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); | ||
272 | /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ | ||
273 | set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); | ||
274 | enable_irq(adev->irq); | ||
275 | if (system_state == SYSTEM_BOOTING) | ||
276 | if (request_irq(adev->irq, apbt_interrupt_handler, | ||
277 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
278 | adev->name, adev)) { | ||
279 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
280 | adev->num); | ||
281 | } | ||
282 | } | ||
283 | #endif | ||
284 | |||
285 | static void apbt_enable_int(int n) | ||
286 | { | ||
287 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | ||
288 | /* clear pending intr */ | ||
289 | apbt_readl(n, APBTMR_N_EOI); | ||
290 | ctrl &= ~APBTMR_CONTROL_INT; | ||
291 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | ||
292 | } | ||
293 | |||
294 | static void apbt_disable_int(int n) | ||
295 | { | ||
296 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | ||
297 | |||
298 | ctrl |= APBTMR_CONTROL_INT; | ||
299 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | ||
300 | } | ||
301 | |||
302 | |||
303 | static int __init apbt_clockevent_register(void) | ||
304 | { | ||
305 | struct sfi_timer_table_entry *mtmr; | ||
306 | struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); | ||
307 | |||
308 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | ||
309 | if (mtmr == NULL) { | ||
310 | printk(KERN_ERR "Failed to get MTMR %d from SFI\n", | ||
311 | APBT_CLOCKEVENT0_NUM); | ||
312 | return -ENODEV; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * We need to calculate the scaled math multiplication factor for | ||
317 | * nanosecond to apbt tick conversion. | ||
318 | * mult = (nsec/cycle)*2^APBT_SHIFT | ||
319 | */ | ||
320 | apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz | ||
321 | , NSEC_PER_SEC, APBT_SHIFT); | ||
322 | |||
323 | /* Calculate the min / max delta */ | ||
324 | apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | ||
325 | &apbt_clockevent); | ||
326 | apbt_clockevent.min_delta_ns = clockevent_delta2ns( | ||
327 | APBT_MIN_DELTA_USEC*apbt_freq, | ||
328 | &apbt_clockevent); | ||
329 | /* | ||
330 | * Start apbt with the boot cpu mask and make it | ||
331 | * global if not used for per cpu timer. | ||
332 | */ | ||
333 | apbt_clockevent.cpumask = cpumask_of(smp_processor_id()); | ||
334 | adev->num = smp_processor_id(); | ||
335 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); | ||
336 | |||
337 | if (disable_apbt_percpu) { | ||
338 | apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; | ||
339 | global_clock_event = &adev->evt; | ||
340 | printk(KERN_DEBUG "%s clockevent registered as global\n", | ||
341 | global_clock_event->name); | ||
342 | } | ||
343 | |||
344 | if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler, | ||
345 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | ||
346 | apbt_clockevent.name, adev)) { | ||
347 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | ||
348 | apbt_clockevent.irq); | ||
349 | } | ||
350 | |||
351 | clockevents_register_device(&adev->evt); | ||
352 | /* Start APBT 0 interrupts */ | ||
353 | apbt_enable_int(APBT_CLOCKEVENT0_NUM); | ||
354 | |||
355 | sfi_free_mtmr(mtmr); | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | #ifdef CONFIG_SMP | ||
360 | /* Should be called with per cpu */ | ||
361 | void apbt_setup_secondary_clock(void) | ||
362 | { | ||
363 | struct apbt_dev *adev; | ||
364 | struct clock_event_device *aevt; | ||
365 | int cpu; | ||
366 | |||
367 | /* Don't register boot CPU clockevent */ | ||
368 | cpu = smp_processor_id(); | ||
369 | if (cpu == boot_cpu_id) | ||
370 | return; | ||
371 | /* | ||
372 | * We need to calculate the scaled math multiplication factor for | ||
373 | * nanosecond to apbt tick conversion. | ||
374 | * mult = (nsec/cycle)*2^APBT_SHIFT | ||
375 | */ | ||
376 | printk(KERN_INFO "Init per CPU clockevent %d\n", cpu); | ||
377 | adev = &per_cpu(cpu_apbt_dev, cpu); | ||
378 | aevt = &adev->evt; | ||
379 | |||
380 | memcpy(aevt, &apbt_clockevent, sizeof(*aevt)); | ||
381 | aevt->cpumask = cpumask_of(cpu); | ||
382 | aevt->name = adev->name; | ||
383 | aevt->mode = CLOCK_EVT_MODE_UNUSED; | ||
384 | |||
385 | printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n", | ||
386 | cpu, aevt->name, *(u32 *)aevt->cpumask); | ||
387 | |||
388 | apbt_setup_irq(adev); | ||
389 | |||
390 | clockevents_register_device(aevt); | ||
391 | |||
392 | apbt_enable_int(cpu); | ||
393 | |||
394 | return; | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * this notify handler process CPU hotplug events. in case of S0i3, nonboot | ||
399 | * cpus are disabled/enabled frequently, for performance reasons, we keep the | ||
400 | * per cpu timer irq registered so that we do need to do free_irq/request_irq. | ||
401 | * | ||
402 | * TODO: it might be more reliable to directly disable percpu clockevent device | ||
403 | * without the notifier chain. currently, cpu 0 may get interrupts from other | ||
404 | * cpu timers during the offline process due to the ordering of notification. | ||
405 | * the extra interrupt is harmless. | ||
406 | */ | ||
407 | static int apbt_cpuhp_notify(struct notifier_block *n, | ||
408 | unsigned long action, void *hcpu) | ||
409 | { | ||
410 | unsigned long cpu = (unsigned long)hcpu; | ||
411 | struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); | ||
412 | |||
413 | switch (action & 0xf) { | ||
414 | case CPU_DEAD: | ||
415 | apbt_disable_int(cpu); | ||
416 | if (system_state == SYSTEM_RUNNING) | ||
417 | pr_debug("skipping APBT CPU %lu offline\n", cpu); | ||
418 | else if (adev) { | ||
419 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); | ||
420 | free_irq(adev->irq, adev); | ||
421 | } | ||
422 | break; | ||
423 | default: | ||
424 | pr_debug(KERN_INFO "APBT notified %lu, no action\n", action); | ||
425 | } | ||
426 | return NOTIFY_OK; | ||
427 | } | ||
428 | |||
429 | static __init int apbt_late_init(void) | ||
430 | { | ||
431 | if (disable_apbt_percpu) | ||
432 | return 0; | ||
433 | /* This notifier should be called after workqueue is ready */ | ||
434 | hotcpu_notifier(apbt_cpuhp_notify, -20); | ||
435 | return 0; | ||
436 | } | ||
437 | fs_initcall(apbt_late_init); | ||
438 | #else | ||
439 | |||
440 | void apbt_setup_secondary_clock(void) {} | ||
441 | |||
442 | #endif /* CONFIG_SMP */ | ||
443 | |||
444 | static void apbt_set_mode(enum clock_event_mode mode, | ||
445 | struct clock_event_device *evt) | ||
446 | { | ||
447 | unsigned long ctrl; | ||
448 | uint64_t delta; | ||
449 | int timer_num; | ||
450 | struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); | ||
451 | |||
452 | timer_num = adev->num; | ||
453 | pr_debug("%s CPU %d timer %d mode=%d\n", | ||
454 | __func__, first_cpu(*evt->cpumask), timer_num, mode); | ||
455 | |||
456 | switch (mode) { | ||
457 | case CLOCK_EVT_MODE_PERIODIC: | ||
458 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult; | ||
459 | delta >>= apbt_clockevent.shift; | ||
460 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | ||
461 | ctrl |= APBTMR_CONTROL_MODE_PERIODIC; | ||
462 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
463 | /* | ||
464 | * DW APB p. 46, have to disable timer before load counter, | ||
465 | * may cause sync problem. | ||
466 | */ | ||
467 | ctrl &= ~APBTMR_CONTROL_ENABLE; | ||
468 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
469 | udelay(1); | ||
470 | pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ); | ||
471 | apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); | ||
472 | ctrl |= APBTMR_CONTROL_ENABLE; | ||
473 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
474 | break; | ||
475 | /* APB timer does not have one-shot mode, use free running mode */ | ||
476 | case CLOCK_EVT_MODE_ONESHOT: | ||
477 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | ||
478 | /* | ||
479 | * set free running mode, this mode will let timer reload max | ||
480 | * timeout which will give time (3min on 25MHz clock) to rearm | ||
481 | * the next event, therefore emulate the one-shot mode. | ||
482 | */ | ||
483 | ctrl &= ~APBTMR_CONTROL_ENABLE; | ||
484 | ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; | ||
485 | |||
486 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
487 | /* write again to set free running mode */ | ||
488 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
489 | |||
490 | /* | ||
491 | * DW APB p. 46, load counter with all 1s before starting free | ||
492 | * running mode. | ||
493 | */ | ||
494 | apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT); | ||
495 | ctrl &= ~APBTMR_CONTROL_INT; | ||
496 | ctrl |= APBTMR_CONTROL_ENABLE; | ||
497 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
498 | break; | ||
499 | |||
500 | case CLOCK_EVT_MODE_UNUSED: | ||
501 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
502 | apbt_disable_int(timer_num); | ||
503 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | ||
504 | ctrl &= ~APBTMR_CONTROL_ENABLE; | ||
505 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
506 | break; | ||
507 | |||
508 | case CLOCK_EVT_MODE_RESUME: | ||
509 | apbt_enable_int(timer_num); | ||
510 | break; | ||
511 | } | ||
512 | } | ||
513 | |||
514 | static int apbt_next_event(unsigned long delta, | ||
515 | struct clock_event_device *evt) | ||
516 | { | ||
517 | unsigned long ctrl; | ||
518 | int timer_num; | ||
519 | |||
520 | struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); | ||
521 | |||
522 | timer_num = adev->num; | ||
523 | /* Disable timer */ | ||
524 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | ||
525 | ctrl &= ~APBTMR_CONTROL_ENABLE; | ||
526 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
527 | /* write new count */ | ||
528 | apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); | ||
529 | ctrl |= APBTMR_CONTROL_ENABLE; | ||
530 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | /* | ||
535 | * APB timer clock is not in sync with pclk on Langwell, which translates to | ||
536 | * unreliable read value caused by sampling error. the error does not add up | ||
537 | * overtime and only happens when sampling a 0 as a 1 by mistake. so the time | ||
538 | * would go backwards. the following code is trying to prevent time traveling | ||
539 | * backwards. little bit paranoid. | ||
540 | */ | ||
541 | static cycle_t apbt_read_clocksource(struct clocksource *cs) | ||
542 | { | ||
543 | unsigned long t0, t1, t2; | ||
544 | static unsigned long last_read; | ||
545 | |||
546 | bad_count: | ||
547 | t1 = apbt_readl(phy_cs_timer_id, | ||
548 | APBTMR_N_CURRENT_VALUE); | ||
549 | t2 = apbt_readl(phy_cs_timer_id, | ||
550 | APBTMR_N_CURRENT_VALUE); | ||
551 | if (unlikely(t1 < t2)) { | ||
552 | pr_debug("APBT: read current count error %lx:%lx:%lx\n", | ||
553 | t1, t2, t2 - t1); | ||
554 | goto bad_count; | ||
555 | } | ||
556 | /* | ||
557 | * check against cached last read, makes sure time does not go back. | ||
558 | * it could be a normal rollover but we will do tripple check anyway | ||
559 | */ | ||
560 | if (unlikely(t2 > last_read)) { | ||
561 | /* check if we have a normal rollover */ | ||
562 | unsigned long raw_intr_status = | ||
563 | apbt_readl_reg(APBTMRS_RAW_INT_STATUS); | ||
564 | /* | ||
565 | * cs timer interrupt is masked but raw intr bit is set if | ||
566 | * rollover occurs. then we read EOI reg to clear it. | ||
567 | */ | ||
568 | if (raw_intr_status & (1 << phy_cs_timer_id)) { | ||
569 | apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); | ||
570 | goto out; | ||
571 | } | ||
572 | pr_debug("APB CS going back %lx:%lx:%lx ", | ||
573 | t2, last_read, t2 - last_read); | ||
574 | bad_count_x3: | ||
575 | pr_debug(KERN_INFO "tripple check enforced\n"); | ||
576 | t0 = apbt_readl(phy_cs_timer_id, | ||
577 | APBTMR_N_CURRENT_VALUE); | ||
578 | udelay(1); | ||
579 | t1 = apbt_readl(phy_cs_timer_id, | ||
580 | APBTMR_N_CURRENT_VALUE); | ||
581 | udelay(1); | ||
582 | t2 = apbt_readl(phy_cs_timer_id, | ||
583 | APBTMR_N_CURRENT_VALUE); | ||
584 | if ((t2 > t1) || (t1 > t0)) { | ||
585 | printk(KERN_ERR "Error: APB CS tripple check failed\n"); | ||
586 | goto bad_count_x3; | ||
587 | } | ||
588 | } | ||
589 | out: | ||
590 | last_read = t2; | ||
591 | return (cycle_t)~t2; | ||
592 | } | ||
593 | |||
594 | static int apbt_clocksource_register(void) | ||
595 | { | ||
596 | u64 start, now; | ||
597 | cycle_t t1; | ||
598 | |||
599 | /* Start the counter, use timer 2 as source, timer 0/1 for event */ | ||
600 | apbt_start_counter(phy_cs_timer_id); | ||
601 | |||
602 | /* Verify whether apbt counter works */ | ||
603 | t1 = apbt_read_clocksource(&clocksource_apbt); | ||
604 | rdtscll(start); | ||
605 | |||
606 | /* | ||
607 | * We don't know the TSC frequency yet, but waiting for | ||
608 | * 200000 TSC cycles is safe: | ||
609 | * 4 GHz == 50us | ||
610 | * 1 GHz == 200us | ||
611 | */ | ||
612 | do { | ||
613 | rep_nop(); | ||
614 | rdtscll(now); | ||
615 | } while ((now - start) < 200000UL); | ||
616 | |||
617 | /* APBT is the only always on clocksource, it has to work! */ | ||
618 | if (t1 == apbt_read_clocksource(&clocksource_apbt)) | ||
619 | panic("APBT counter not counting. APBT disabled\n"); | ||
620 | |||
621 | /* | ||
622 | * initialize and register APBT clocksource | ||
623 | * convert that to ns/clock cycle | ||
624 | * mult = (ns/c) * 2^APBT_SHIFT | ||
625 | */ | ||
626 | clocksource_apbt.mult = div_sc(MSEC_PER_SEC, | ||
627 | (unsigned long) apbt_freq, APBT_SHIFT); | ||
628 | clocksource_register(&clocksource_apbt); | ||
629 | |||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * Early setup the APBT timer, only use timer 0 for booting then switch to | ||
635 | * per CPU timer if possible. | ||
636 | * returns 1 if per cpu apbt is setup | ||
637 | * returns 0 if no per cpu apbt is chosen | ||
638 | * panic if set up failed, this is the only platform timer on Moorestown. | ||
639 | */ | ||
640 | void __init apbt_time_init(void) | ||
641 | { | ||
642 | #ifdef CONFIG_SMP | ||
643 | int i; | ||
644 | struct sfi_timer_table_entry *p_mtmr; | ||
645 | unsigned int percpu_timer; | ||
646 | struct apbt_dev *adev; | ||
647 | #endif | ||
648 | |||
649 | if (apb_timer_block_enabled) | ||
650 | return; | ||
651 | apbt_set_mapping(); | ||
652 | if (apbt_virt_address) { | ||
653 | pr_debug("Found APBT version 0x%lx\n",\ | ||
654 | apbt_readl_reg(APBTMRS_COMP_VERSION)); | ||
655 | } else | ||
656 | goto out_noapbt; | ||
657 | /* | ||
658 | * Read the frequency and check for a sane value, for ESL model | ||
659 | * we extend the possible clock range to allow time scaling. | ||
660 | */ | ||
661 | |||
662 | if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { | ||
663 | pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq); | ||
664 | goto out_noapbt; | ||
665 | } | ||
666 | if (apbt_clocksource_register()) { | ||
667 | pr_debug("APBT has failed to register clocksource\n"); | ||
668 | goto out_noapbt; | ||
669 | } | ||
670 | if (!apbt_clockevent_register()) | ||
671 | apb_timer_block_enabled = 1; | ||
672 | else { | ||
673 | pr_debug("APBT has failed to register clockevent\n"); | ||
674 | goto out_noapbt; | ||
675 | } | ||
676 | #ifdef CONFIG_SMP | ||
677 | /* kernel cmdline disable apb timer, so we will use lapic timers */ | ||
678 | if (disable_apbt_percpu) { | ||
679 | printk(KERN_INFO "apbt: disabled per cpu timer\n"); | ||
680 | return; | ||
681 | } | ||
682 | pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); | ||
683 | if (num_possible_cpus() <= sfi_mtimer_num) { | ||
684 | percpu_timer = 1; | ||
685 | apbt_num_timers_used = num_possible_cpus(); | ||
686 | } else { | ||
687 | percpu_timer = 0; | ||
688 | apbt_num_timers_used = 1; | ||
689 | adev = &per_cpu(cpu_apbt_dev, 0); | ||
690 | adev->flags &= ~APBT_DEV_USED; | ||
691 | } | ||
692 | pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); | ||
693 | |||
694 | /* here we set up per CPU timer data structure */ | ||
695 | apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used, | ||
696 | GFP_KERNEL); | ||
697 | if (!apbt_devs) { | ||
698 | printk(KERN_ERR "Failed to allocate APB timer devices\n"); | ||
699 | return; | ||
700 | } | ||
701 | for (i = 0; i < apbt_num_timers_used; i++) { | ||
702 | adev = &per_cpu(cpu_apbt_dev, i); | ||
703 | adev->num = i; | ||
704 | adev->cpu = i; | ||
705 | p_mtmr = sfi_get_mtmr(i); | ||
706 | if (p_mtmr) { | ||
707 | adev->tick = p_mtmr->freq_hz; | ||
708 | adev->irq = p_mtmr->irq; | ||
709 | } else | ||
710 | printk(KERN_ERR "Failed to get timer for cpu %d\n", i); | ||
711 | adev->count = 0; | ||
712 | sprintf(adev->name, "apbt%d", i); | ||
713 | } | ||
714 | #endif | ||
715 | |||
716 | return; | ||
717 | |||
718 | out_noapbt: | ||
719 | apbt_clear_mapping(); | ||
720 | apb_timer_block_enabled = 0; | ||
721 | panic("failed to enable APB timer\n"); | ||
722 | } | ||
723 | |||
724 | static inline void apbt_disable(int n) | ||
725 | { | ||
726 | if (is_apbt_capable()) { | ||
727 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | ||
728 | ctrl &= ~APBTMR_CONTROL_ENABLE; | ||
729 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | ||
730 | } | ||
731 | } | ||
732 | |||
733 | /* called before apb_timer_enable, use early map */ | ||
734 | unsigned long apbt_quick_calibrate() | ||
735 | { | ||
736 | int i, scale; | ||
737 | u64 old, new; | ||
738 | cycle_t t1, t2; | ||
739 | unsigned long khz = 0; | ||
740 | u32 loop, shift; | ||
741 | |||
742 | apbt_set_mapping(); | ||
743 | apbt_start_counter(phy_cs_timer_id); | ||
744 | |||
745 | /* check if the timer can count down, otherwise return */ | ||
746 | old = apbt_read_clocksource(&clocksource_apbt); | ||
747 | i = 10000; | ||
748 | while (--i) { | ||
749 | if (old != apbt_read_clocksource(&clocksource_apbt)) | ||
750 | break; | ||
751 | } | ||
752 | if (!i) | ||
753 | goto failed; | ||
754 | |||
755 | /* count 16 ms */ | ||
756 | loop = (apbt_freq * 1000) << 4; | ||
757 | |||
758 | /* restart the timer to ensure it won't get to 0 in the calibration */ | ||
759 | apbt_start_counter(phy_cs_timer_id); | ||
760 | |||
761 | old = apbt_read_clocksource(&clocksource_apbt); | ||
762 | old += loop; | ||
763 | |||
764 | t1 = __native_read_tsc(); | ||
765 | |||
766 | do { | ||
767 | new = apbt_read_clocksource(&clocksource_apbt); | ||
768 | } while (new < old); | ||
769 | |||
770 | t2 = __native_read_tsc(); | ||
771 | |||
772 | shift = 5; | ||
773 | if (unlikely(loop >> shift == 0)) { | ||
774 | printk(KERN_INFO | ||
775 | "APBT TSC calibration failed, not enough resolution\n"); | ||
776 | return 0; | ||
777 | } | ||
778 | scale = (int)div_u64((t2 - t1), loop >> shift); | ||
779 | khz = (scale * apbt_freq * 1000) >> shift; | ||
780 | printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz); | ||
781 | return khz; | ||
782 | failed: | ||
783 | return 0; | ||
784 | } | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 6e29b2a77aa8..00187f1fcfb7 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1390,7 +1390,7 @@ void __init enable_IR_x2apic(void) | |||
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | local_irq_save(flags); | 1392 | local_irq_save(flags); |
1393 | mask_8259A(); | 1393 | legacy_pic->mask_all(); |
1394 | mask_IO_APIC_setup(ioapic_entries); | 1394 | mask_IO_APIC_setup(ioapic_entries); |
1395 | 1395 | ||
1396 | if (dmar_table_init_ret) | 1396 | if (dmar_table_init_ret) |
@@ -1422,7 +1422,7 @@ void __init enable_IR_x2apic(void) | |||
1422 | nox2apic: | 1422 | nox2apic: |
1423 | if (!ret) /* IR enabling failed */ | 1423 | if (!ret) /* IR enabling failed */ |
1424 | restore_IO_APIC_setup(ioapic_entries); | 1424 | restore_IO_APIC_setup(ioapic_entries); |
1425 | unmask_8259A(); | 1425 | legacy_pic->restore_mask(); |
1426 | local_irq_restore(flags); | 1426 | local_irq_restore(flags); |
1427 | 1427 | ||
1428 | out: | 1428 | out: |
@@ -2018,7 +2018,7 @@ static int lapic_resume(struct sys_device *dev) | |||
2018 | } | 2018 | } |
2019 | 2019 | ||
2020 | mask_IO_APIC_setup(ioapic_entries); | 2020 | mask_IO_APIC_setup(ioapic_entries); |
2021 | mask_8259A(); | 2021 | legacy_pic->mask_all(); |
2022 | } | 2022 | } |
2023 | 2023 | ||
2024 | if (x2apic_mode) | 2024 | if (x2apic_mode) |
@@ -2062,7 +2062,7 @@ static int lapic_resume(struct sys_device *dev) | |||
2062 | 2062 | ||
2063 | if (intr_remapping_enabled) { | 2063 | if (intr_remapping_enabled) { |
2064 | reenable_intr_remapping(x2apic_mode); | 2064 | reenable_intr_remapping(x2apic_mode); |
2065 | unmask_8259A(); | 2065 | legacy_pic->restore_mask(); |
2066 | restore_IO_APIC_setup(ioapic_entries); | 2066 | restore_IO_APIC_setup(ioapic_entries); |
2067 | free_ioapic_entries(ioapic_entries); | 2067 | free_ioapic_entries(ioapic_entries); |
2068 | } | 2068 | } |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 14862f11cc4a..e4e0ddcb1546 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -143,12 +143,6 @@ static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; | |||
143 | static struct irq_cfg irq_cfgx[NR_IRQS]; | 143 | static struct irq_cfg irq_cfgx[NR_IRQS]; |
144 | #endif | 144 | #endif |
145 | 145 | ||
146 | void __init io_apic_disable_legacy(void) | ||
147 | { | ||
148 | nr_legacy_irqs = 0; | ||
149 | nr_irqs_gsi = 0; | ||
150 | } | ||
151 | |||
152 | int __init arch_early_irq_init(void) | 146 | int __init arch_early_irq_init(void) |
153 | { | 147 | { |
154 | struct irq_cfg *cfg; | 148 | struct irq_cfg *cfg; |
@@ -157,6 +151,11 @@ int __init arch_early_irq_init(void) | |||
157 | int node; | 151 | int node; |
158 | int i; | 152 | int i; |
159 | 153 | ||
154 | if (!legacy_pic->nr_legacy_irqs) { | ||
155 | nr_irqs_gsi = 0; | ||
156 | io_apic_irqs = ~0UL; | ||
157 | } | ||
158 | |||
160 | cfg = irq_cfgx; | 159 | cfg = irq_cfgx; |
161 | count = ARRAY_SIZE(irq_cfgx); | 160 | count = ARRAY_SIZE(irq_cfgx); |
162 | node= cpu_to_node(boot_cpu_id); | 161 | node= cpu_to_node(boot_cpu_id); |
@@ -170,7 +169,7 @@ int __init arch_early_irq_init(void) | |||
170 | * For legacy IRQ's, start with assigning irq0 to irq15 to | 169 | * For legacy IRQ's, start with assigning irq0 to irq15 to |
171 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. | 170 | * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. |
172 | */ | 171 | */ |
173 | if (i < nr_legacy_irqs) { | 172 | if (i < legacy_pic->nr_legacy_irqs) { |
174 | cfg[i].vector = IRQ0_VECTOR + i; | 173 | cfg[i].vector = IRQ0_VECTOR + i; |
175 | cpumask_set_cpu(0, cfg[i].domain); | 174 | cpumask_set_cpu(0, cfg[i].domain); |
176 | } | 175 | } |
@@ -852,7 +851,7 @@ static int __init find_isa_irq_apic(int irq, int type) | |||
852 | */ | 851 | */ |
853 | static int EISA_ELCR(unsigned int irq) | 852 | static int EISA_ELCR(unsigned int irq) |
854 | { | 853 | { |
855 | if (irq < nr_legacy_irqs) { | 854 | if (irq < legacy_pic->nr_legacy_irqs) { |
856 | unsigned int port = 0x4d0 + (irq >> 3); | 855 | unsigned int port = 0x4d0 + (irq >> 3); |
857 | return (inb(port) >> (irq & 7)) & 1; | 856 | return (inb(port) >> (irq & 7)) & 1; |
858 | } | 857 | } |
@@ -1439,7 +1438,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
1439 | * controllers like 8259. Now that IO-APIC can handle this irq, update | 1438 | * controllers like 8259. Now that IO-APIC can handle this irq, update |
1440 | * the cfg->domain. | 1439 | * the cfg->domain. |
1441 | */ | 1440 | */ |
1442 | if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) | 1441 | if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) |
1443 | apic->vector_allocation_domain(0, cfg->domain); | 1442 | apic->vector_allocation_domain(0, cfg->domain); |
1444 | 1443 | ||
1445 | if (assign_irq_vector(irq, cfg, apic->target_cpus())) | 1444 | if (assign_irq_vector(irq, cfg, apic->target_cpus())) |
@@ -1463,8 +1462,8 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
1463 | } | 1462 | } |
1464 | 1463 | ||
1465 | ioapic_register_intr(irq, desc, trigger); | 1464 | ioapic_register_intr(irq, desc, trigger); |
1466 | if (irq < nr_legacy_irqs) | 1465 | if (irq < legacy_pic->nr_legacy_irqs) |
1467 | disable_8259A_irq(irq); | 1466 | legacy_pic->chip->mask(irq); |
1468 | 1467 | ||
1469 | ioapic_write_entry(apic_id, pin, entry); | 1468 | ioapic_write_entry(apic_id, pin, entry); |
1470 | } | 1469 | } |
@@ -1873,7 +1872,7 @@ __apicdebuginit(void) print_PIC(void) | |||
1873 | unsigned int v; | 1872 | unsigned int v; |
1874 | unsigned long flags; | 1873 | unsigned long flags; |
1875 | 1874 | ||
1876 | if (!nr_legacy_irqs) | 1875 | if (!legacy_pic->nr_legacy_irqs) |
1877 | return; | 1876 | return; |
1878 | 1877 | ||
1879 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | 1878 | printk(KERN_DEBUG "\nprinting PIC contents\n"); |
@@ -1957,7 +1956,7 @@ void __init enable_IO_APIC(void) | |||
1957 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | 1956 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; |
1958 | } | 1957 | } |
1959 | 1958 | ||
1960 | if (!nr_legacy_irqs) | 1959 | if (!legacy_pic->nr_legacy_irqs) |
1961 | return; | 1960 | return; |
1962 | 1961 | ||
1963 | for(apic = 0; apic < nr_ioapics; apic++) { | 1962 | for(apic = 0; apic < nr_ioapics; apic++) { |
@@ -2014,7 +2013,7 @@ void disable_IO_APIC(void) | |||
2014 | */ | 2013 | */ |
2015 | clear_IO_APIC(); | 2014 | clear_IO_APIC(); |
2016 | 2015 | ||
2017 | if (!nr_legacy_irqs) | 2016 | if (!legacy_pic->nr_legacy_irqs) |
2018 | return; | 2017 | return; |
2019 | 2018 | ||
2020 | /* | 2019 | /* |
@@ -2247,9 +2246,9 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2247 | struct irq_cfg *cfg; | 2246 | struct irq_cfg *cfg; |
2248 | 2247 | ||
2249 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 2248 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
2250 | if (irq < nr_legacy_irqs) { | 2249 | if (irq < legacy_pic->nr_legacy_irqs) { |
2251 | disable_8259A_irq(irq); | 2250 | legacy_pic->chip->mask(irq); |
2252 | if (i8259A_irq_pending(irq)) | 2251 | if (legacy_pic->irq_pending(irq)) |
2253 | was_pending = 1; | 2252 | was_pending = 1; |
2254 | } | 2253 | } |
2255 | cfg = irq_cfg(irq); | 2254 | cfg = irq_cfg(irq); |
@@ -2782,8 +2781,8 @@ static inline void init_IO_APIC_traps(void) | |||
2782 | * so default to an old-fashioned 8259 | 2781 | * so default to an old-fashioned 8259 |
2783 | * interrupt if we can.. | 2782 | * interrupt if we can.. |
2784 | */ | 2783 | */ |
2785 | if (irq < nr_legacy_irqs) | 2784 | if (irq < legacy_pic->nr_legacy_irqs) |
2786 | make_8259A_irq(irq); | 2785 | legacy_pic->make_irq(irq); |
2787 | else | 2786 | else |
2788 | /* Strange. Oh, well.. */ | 2787 | /* Strange. Oh, well.. */ |
2789 | desc->chip = &no_irq_chip; | 2788 | desc->chip = &no_irq_chip; |
@@ -2940,7 +2939,7 @@ static inline void __init check_timer(void) | |||
2940 | /* | 2939 | /* |
2941 | * get/set the timer IRQ vector: | 2940 | * get/set the timer IRQ vector: |
2942 | */ | 2941 | */ |
2943 | disable_8259A_irq(0); | 2942 | legacy_pic->chip->mask(0); |
2944 | assign_irq_vector(0, cfg, apic->target_cpus()); | 2943 | assign_irq_vector(0, cfg, apic->target_cpus()); |
2945 | 2944 | ||
2946 | /* | 2945 | /* |
@@ -2953,7 +2952,7 @@ static inline void __init check_timer(void) | |||
2953 | * automatically. | 2952 | * automatically. |
2954 | */ | 2953 | */ |
2955 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | 2954 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
2956 | init_8259A(1); | 2955 | legacy_pic->init(1); |
2957 | #ifdef CONFIG_X86_32 | 2956 | #ifdef CONFIG_X86_32 |
2958 | { | 2957 | { |
2959 | unsigned int ver; | 2958 | unsigned int ver; |
@@ -3012,7 +3011,7 @@ static inline void __init check_timer(void) | |||
3012 | if (timer_irq_works()) { | 3011 | if (timer_irq_works()) { |
3013 | if (nmi_watchdog == NMI_IO_APIC) { | 3012 | if (nmi_watchdog == NMI_IO_APIC) { |
3014 | setup_nmi(); | 3013 | setup_nmi(); |
3015 | enable_8259A_irq(0); | 3014 | legacy_pic->chip->unmask(0); |
3016 | } | 3015 | } |
3017 | if (disable_timer_pin_1 > 0) | 3016 | if (disable_timer_pin_1 > 0) |
3018 | clear_IO_APIC_pin(0, pin1); | 3017 | clear_IO_APIC_pin(0, pin1); |
@@ -3035,14 +3034,14 @@ static inline void __init check_timer(void) | |||
3035 | */ | 3034 | */ |
3036 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); | 3035 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); |
3037 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); | 3036 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); |
3038 | enable_8259A_irq(0); | 3037 | legacy_pic->chip->unmask(0); |
3039 | if (timer_irq_works()) { | 3038 | if (timer_irq_works()) { |
3040 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); | 3039 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); |
3041 | timer_through_8259 = 1; | 3040 | timer_through_8259 = 1; |
3042 | if (nmi_watchdog == NMI_IO_APIC) { | 3041 | if (nmi_watchdog == NMI_IO_APIC) { |
3043 | disable_8259A_irq(0); | 3042 | legacy_pic->chip->mask(0); |
3044 | setup_nmi(); | 3043 | setup_nmi(); |
3045 | enable_8259A_irq(0); | 3044 | legacy_pic->chip->unmask(0); |
3046 | } | 3045 | } |
3047 | goto out; | 3046 | goto out; |
3048 | } | 3047 | } |
@@ -3050,7 +3049,7 @@ static inline void __init check_timer(void) | |||
3050 | * Cleanup, just in case ... | 3049 | * Cleanup, just in case ... |
3051 | */ | 3050 | */ |
3052 | local_irq_disable(); | 3051 | local_irq_disable(); |
3053 | disable_8259A_irq(0); | 3052 | legacy_pic->chip->mask(0); |
3054 | clear_IO_APIC_pin(apic2, pin2); | 3053 | clear_IO_APIC_pin(apic2, pin2); |
3055 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); | 3054 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); |
3056 | } | 3055 | } |
@@ -3069,22 +3068,22 @@ static inline void __init check_timer(void) | |||
3069 | 3068 | ||
3070 | lapic_register_intr(0, desc); | 3069 | lapic_register_intr(0, desc); |
3071 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ | 3070 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ |
3072 | enable_8259A_irq(0); | 3071 | legacy_pic->chip->unmask(0); |
3073 | 3072 | ||
3074 | if (timer_irq_works()) { | 3073 | if (timer_irq_works()) { |
3075 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); | 3074 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); |
3076 | goto out; | 3075 | goto out; |
3077 | } | 3076 | } |
3078 | local_irq_disable(); | 3077 | local_irq_disable(); |
3079 | disable_8259A_irq(0); | 3078 | legacy_pic->chip->mask(0); |
3080 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); | 3079 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); |
3081 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); | 3080 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); |
3082 | 3081 | ||
3083 | apic_printk(APIC_QUIET, KERN_INFO | 3082 | apic_printk(APIC_QUIET, KERN_INFO |
3084 | "...trying to set up timer as ExtINT IRQ...\n"); | 3083 | "...trying to set up timer as ExtINT IRQ...\n"); |
3085 | 3084 | ||
3086 | init_8259A(0); | 3085 | legacy_pic->init(0); |
3087 | make_8259A_irq(0); | 3086 | legacy_pic->make_irq(0); |
3088 | apic_write(APIC_LVT0, APIC_DM_EXTINT); | 3087 | apic_write(APIC_LVT0, APIC_DM_EXTINT); |
3089 | 3088 | ||
3090 | unlock_ExtINT_logic(); | 3089 | unlock_ExtINT_logic(); |
@@ -3126,7 +3125,7 @@ void __init setup_IO_APIC(void) | |||
3126 | /* | 3125 | /* |
3127 | * calling enable_IO_APIC() is moved to setup_local_APIC for BP | 3126 | * calling enable_IO_APIC() is moved to setup_local_APIC for BP |
3128 | */ | 3127 | */ |
3129 | io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL; | 3128 | io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; |
3130 | 3129 | ||
3131 | apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); | 3130 | apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); |
3132 | /* | 3131 | /* |
@@ -3137,7 +3136,7 @@ void __init setup_IO_APIC(void) | |||
3137 | sync_Arb_IDs(); | 3136 | sync_Arb_IDs(); |
3138 | setup_IO_APIC_irqs(); | 3137 | setup_IO_APIC_irqs(); |
3139 | init_IO_APIC_traps(); | 3138 | init_IO_APIC_traps(); |
3140 | if (nr_legacy_irqs) | 3139 | if (legacy_pic->nr_legacy_irqs) |
3141 | check_timer(); | 3140 | check_timer(); |
3142 | } | 3141 | } |
3143 | 3142 | ||
@@ -3928,7 +3927,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3928 | /* | 3927 | /* |
3929 | * IRQs < 16 are already in the irq_2_pin[] map | 3928 | * IRQs < 16 are already in the irq_2_pin[] map |
3930 | */ | 3929 | */ |
3931 | if (irq >= nr_legacy_irqs) { | 3930 | if (irq >= legacy_pic->nr_legacy_irqs) { |
3932 | cfg = desc->chip_data; | 3931 | cfg = desc->chip_data; |
3933 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { | 3932 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { |
3934 | printk(KERN_INFO "can not add pin %d for irq %d\n", | 3933 | printk(KERN_INFO "can not add pin %d for irq %d\n", |
@@ -4302,3 +4301,24 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
4302 | 4301 | ||
4303 | nr_ioapics++; | 4302 | nr_ioapics++; |
4304 | } | 4303 | } |
4304 | |||
4305 | /* Enable IOAPIC early just for system timer */ | ||
4306 | void __init pre_init_apic_IRQ0(void) | ||
4307 | { | ||
4308 | struct irq_cfg *cfg; | ||
4309 | struct irq_desc *desc; | ||
4310 | |||
4311 | printk(KERN_INFO "Early APIC setup for system timer0\n"); | ||
4312 | #ifndef CONFIG_SMP | ||
4313 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | ||
4314 | #endif | ||
4315 | desc = irq_to_desc_alloc_node(0, 0); | ||
4316 | |||
4317 | setup_local_APIC(); | ||
4318 | |||
4319 | cfg = irq_cfg(0); | ||
4320 | add_pin_to_irq_node(cfg, 0, 0, 0); | ||
4321 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | ||
4322 | |||
4323 | setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); | ||
4324 | } | ||
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index bd7c96b5e8d8..8aa65adbd25d 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -177,7 +177,7 @@ int __init check_nmi_watchdog(void) | |||
177 | error: | 177 | error: |
178 | if (nmi_watchdog == NMI_IO_APIC) { | 178 | if (nmi_watchdog == NMI_IO_APIC) { |
179 | if (!timer_through_8259) | 179 | if (!timer_through_8259) |
180 | disable_8259A_irq(0); | 180 | legacy_pic->chip->mask(0); |
181 | on_each_cpu(__acpi_nmi_disable, NULL, 1); | 181 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
182 | } | 182 | } |
183 | 183 | ||
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 47dd856708e5..3e28401f161c 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -277,6 +277,7 @@ static __init void early_check_numaq(void) | |||
277 | x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus; | 277 | x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus; |
278 | x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info; | 278 | x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info; |
279 | x86_init.timers.tsc_pre_init = numaq_tsc_init; | 279 | x86_init.timers.tsc_pre_init = numaq_tsc_init; |
280 | x86_init.pci.init = pci_numaq_init; | ||
280 | } | 281 | } |
281 | } | 282 | } |
282 | 283 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index f138c6c389b9..870e6cc6ad28 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig | |||
@@ -10,6 +10,20 @@ if CPU_FREQ | |||
10 | 10 | ||
11 | comment "CPUFreq processor drivers" | 11 | comment "CPUFreq processor drivers" |
12 | 12 | ||
13 | config X86_PCC_CPUFREQ | ||
14 | tristate "Processor Clocking Control interface driver" | ||
15 | depends on ACPI && ACPI_PROCESSOR | ||
16 | help | ||
17 | This driver adds support for the PCC interface. | ||
18 | |||
19 | For details, take a look at: | ||
20 | <file:Documentation/cpu-freq/pcc-cpufreq.txt>. | ||
21 | |||
22 | To compile this driver as a module, choose M here: the | ||
23 | module will be called pcc-cpufreq. | ||
24 | |||
25 | If in doubt, say N. | ||
26 | |||
13 | config X86_ACPI_CPUFREQ | 27 | config X86_ACPI_CPUFREQ |
14 | tristate "ACPI Processor P-States driver" | 28 | tristate "ACPI Processor P-States driver" |
15 | select CPU_FREQ_TABLE | 29 | select CPU_FREQ_TABLE |
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile index 509296df294d..1840c0a5170b 100644 --- a/arch/x86/kernel/cpu/cpufreq/Makefile +++ b/arch/x86/kernel/cpu/cpufreq/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o | 5 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o |
6 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o | 6 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o |
7 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o | ||
7 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | 8 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o |
8 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | 9 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o |
9 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o | 10 | obj-$(CONFIG_X86_LONGHAUL) += longhaul.o |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c new file mode 100644 index 000000000000..ff36d2979a90 --- /dev/null +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -0,0 +1,620 @@ | |||
1 | /* | ||
2 | * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface | ||
3 | * | ||
4 | * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com> | ||
5 | * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. | ||
6 | * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com> | ||
7 | * | ||
8 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; version 2 of the License. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON | ||
17 | * INFRINGEMENT. See the GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | * | ||
23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/cpufreq.h> | ||
32 | #include <linux/compiler.h> | ||
33 | |||
34 | #include <linux/acpi.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/uaccess.h> | ||
38 | |||
39 | #include <acpi/processor.h> | ||
40 | |||
41 | #define PCC_VERSION "1.00.00" | ||
42 | #define POLL_LOOPS 300 | ||
43 | |||
44 | #define CMD_COMPLETE 0x1 | ||
45 | #define CMD_GET_FREQ 0x0 | ||
46 | #define CMD_SET_FREQ 0x1 | ||
47 | |||
48 | #define BUF_SZ 4 | ||
49 | |||
50 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ | ||
51 | "pcc-cpufreq", msg) | ||
52 | |||
53 | struct pcc_register_resource { | ||
54 | u8 descriptor; | ||
55 | u16 length; | ||
56 | u8 space_id; | ||
57 | u8 bit_width; | ||
58 | u8 bit_offset; | ||
59 | u8 access_size; | ||
60 | u64 address; | ||
61 | } __attribute__ ((packed)); | ||
62 | |||
63 | struct pcc_memory_resource { | ||
64 | u8 descriptor; | ||
65 | u16 length; | ||
66 | u8 space_id; | ||
67 | u8 resource_usage; | ||
68 | u8 type_specific; | ||
69 | u64 granularity; | ||
70 | u64 minimum; | ||
71 | u64 maximum; | ||
72 | u64 translation_offset; | ||
73 | u64 address_length; | ||
74 | } __attribute__ ((packed)); | ||
75 | |||
76 | static struct cpufreq_driver pcc_cpufreq_driver; | ||
77 | |||
78 | struct pcc_header { | ||
79 | u32 signature; | ||
80 | u16 length; | ||
81 | u8 major; | ||
82 | u8 minor; | ||
83 | u32 features; | ||
84 | u16 command; | ||
85 | u16 status; | ||
86 | u32 latency; | ||
87 | u32 minimum_time; | ||
88 | u32 maximum_time; | ||
89 | u32 nominal; | ||
90 | u32 throttled_frequency; | ||
91 | u32 minimum_frequency; | ||
92 | }; | ||
93 | |||
94 | static void __iomem *pcch_virt_addr; | ||
95 | static struct pcc_header __iomem *pcch_hdr; | ||
96 | |||
97 | static DEFINE_SPINLOCK(pcc_lock); | ||
98 | |||
99 | static struct acpi_generic_address doorbell; | ||
100 | |||
101 | static u64 doorbell_preserve; | ||
102 | static u64 doorbell_write; | ||
103 | |||
104 | static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f, | ||
105 | 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; | ||
106 | |||
107 | struct pcc_cpu { | ||
108 | u32 input_offset; | ||
109 | u32 output_offset; | ||
110 | }; | ||
111 | |||
112 | static struct pcc_cpu *pcc_cpu_info; | ||
113 | |||
114 | static int pcc_cpufreq_verify(struct cpufreq_policy *policy) | ||
115 | { | ||
116 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | ||
117 | policy->cpuinfo.max_freq); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static inline void pcc_cmd(void) | ||
122 | { | ||
123 | u64 doorbell_value; | ||
124 | int i; | ||
125 | |||
126 | acpi_read(&doorbell_value, &doorbell); | ||
127 | acpi_write((doorbell_value & doorbell_preserve) | doorbell_write, | ||
128 | &doorbell); | ||
129 | |||
130 | for (i = 0; i < POLL_LOOPS; i++) { | ||
131 | if (ioread16(&pcch_hdr->status) & CMD_COMPLETE) | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static inline void pcc_clear_mapping(void) | ||
137 | { | ||
138 | if (pcch_virt_addr) | ||
139 | iounmap(pcch_virt_addr); | ||
140 | pcch_virt_addr = NULL; | ||
141 | } | ||
142 | |||
143 | static unsigned int pcc_get_freq(unsigned int cpu) | ||
144 | { | ||
145 | struct pcc_cpu *pcc_cpu_data; | ||
146 | unsigned int curr_freq; | ||
147 | unsigned int freq_limit; | ||
148 | u16 status; | ||
149 | u32 input_buffer; | ||
150 | u32 output_buffer; | ||
151 | |||
152 | spin_lock(&pcc_lock); | ||
153 | |||
154 | dprintk("get: get_freq for CPU %d\n", cpu); | ||
155 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | ||
156 | |||
157 | input_buffer = 0x1; | ||
158 | iowrite32(input_buffer, | ||
159 | (pcch_virt_addr + pcc_cpu_data->input_offset)); | ||
160 | iowrite16(CMD_GET_FREQ, &pcch_hdr->command); | ||
161 | |||
162 | pcc_cmd(); | ||
163 | |||
164 | output_buffer = | ||
165 | ioread32(pcch_virt_addr + pcc_cpu_data->output_offset); | ||
166 | |||
167 | /* Clear the input buffer - we are done with the current command */ | ||
168 | memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); | ||
169 | |||
170 | status = ioread16(&pcch_hdr->status); | ||
171 | if (status != CMD_COMPLETE) { | ||
172 | dprintk("get: FAILED: for CPU %d, status is %d\n", | ||
173 | cpu, status); | ||
174 | goto cmd_incomplete; | ||
175 | } | ||
176 | iowrite16(0, &pcch_hdr->status); | ||
177 | curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) | ||
178 | / 100) * 1000); | ||
179 | |||
180 | dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is " | ||
181 | "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n", | ||
182 | cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), | ||
183 | output_buffer, curr_freq); | ||
184 | |||
185 | freq_limit = (output_buffer >> 8) & 0xff; | ||
186 | if (freq_limit != 0xff) { | ||
187 | dprintk("get: frequency for cpu %d is being temporarily" | ||
188 | " capped at %d\n", cpu, curr_freq); | ||
189 | } | ||
190 | |||
191 | spin_unlock(&pcc_lock); | ||
192 | return curr_freq; | ||
193 | |||
194 | cmd_incomplete: | ||
195 | iowrite16(0, &pcch_hdr->status); | ||
196 | spin_unlock(&pcc_lock); | ||
197 | return -EINVAL; | ||
198 | } | ||
199 | |||
200 | static int pcc_cpufreq_target(struct cpufreq_policy *policy, | ||
201 | unsigned int target_freq, | ||
202 | unsigned int relation) | ||
203 | { | ||
204 | struct pcc_cpu *pcc_cpu_data; | ||
205 | struct cpufreq_freqs freqs; | ||
206 | u16 status; | ||
207 | u32 input_buffer; | ||
208 | int cpu; | ||
209 | |||
210 | spin_lock(&pcc_lock); | ||
211 | cpu = policy->cpu; | ||
212 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | ||
213 | |||
214 | dprintk("target: CPU %d should go to target freq: %d " | ||
215 | "(virtual) input_offset is 0x%x\n", | ||
216 | cpu, target_freq, | ||
217 | (pcch_virt_addr + pcc_cpu_data->input_offset)); | ||
218 | |||
219 | freqs.new = target_freq; | ||
220 | freqs.cpu = cpu; | ||
221 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
222 | |||
223 | input_buffer = 0x1 | (((target_freq * 100) | ||
224 | / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); | ||
225 | iowrite32(input_buffer, | ||
226 | (pcch_virt_addr + pcc_cpu_data->input_offset)); | ||
227 | iowrite16(CMD_SET_FREQ, &pcch_hdr->command); | ||
228 | |||
229 | pcc_cmd(); | ||
230 | |||
231 | /* Clear the input buffer - we are done with the current command */ | ||
232 | memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); | ||
233 | |||
234 | status = ioread16(&pcch_hdr->status); | ||
235 | if (status != CMD_COMPLETE) { | ||
236 | dprintk("target: FAILED for cpu %d, with status: 0x%x\n", | ||
237 | cpu, status); | ||
238 | goto cmd_incomplete; | ||
239 | } | ||
240 | iowrite16(0, &pcch_hdr->status); | ||
241 | |||
242 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
243 | dprintk("target: was SUCCESSFUL for cpu %d\n", cpu); | ||
244 | spin_unlock(&pcc_lock); | ||
245 | |||
246 | return 0; | ||
247 | |||
248 | cmd_incomplete: | ||
249 | iowrite16(0, &pcch_hdr->status); | ||
250 | spin_unlock(&pcc_lock); | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | |||
254 | static int pcc_get_offset(int cpu) | ||
255 | { | ||
256 | acpi_status status; | ||
257 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
258 | union acpi_object *pccp, *offset; | ||
259 | struct pcc_cpu *pcc_cpu_data; | ||
260 | struct acpi_processor *pr; | ||
261 | int ret = 0; | ||
262 | |||
263 | pr = per_cpu(processors, cpu); | ||
264 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | ||
265 | |||
266 | status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); | ||
267 | if (ACPI_FAILURE(status)) | ||
268 | return -ENODEV; | ||
269 | |||
270 | pccp = buffer.pointer; | ||
271 | if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { | ||
272 | ret = -ENODEV; | ||
273 | goto out_free; | ||
274 | }; | ||
275 | |||
276 | offset = &(pccp->package.elements[0]); | ||
277 | if (!offset || offset->type != ACPI_TYPE_INTEGER) { | ||
278 | ret = -ENODEV; | ||
279 | goto out_free; | ||
280 | } | ||
281 | |||
282 | pcc_cpu_data->input_offset = offset->integer.value; | ||
283 | |||
284 | offset = &(pccp->package.elements[1]); | ||
285 | if (!offset || offset->type != ACPI_TYPE_INTEGER) { | ||
286 | ret = -ENODEV; | ||
287 | goto out_free; | ||
288 | } | ||
289 | |||
290 | pcc_cpu_data->output_offset = offset->integer.value; | ||
291 | |||
292 | memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); | ||
293 | memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); | ||
294 | |||
295 | dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data " | ||
296 | "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", | ||
297 | cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); | ||
298 | out_free: | ||
299 | kfree(buffer.pointer); | ||
300 | return ret; | ||
301 | } | ||
302 | |||
303 | static int __init pcc_cpufreq_do_osc(acpi_handle *handle) | ||
304 | { | ||
305 | acpi_status status; | ||
306 | struct acpi_object_list input; | ||
307 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
308 | union acpi_object in_params[4]; | ||
309 | union acpi_object *out_obj; | ||
310 | u32 capabilities[2]; | ||
311 | u32 errors; | ||
312 | u32 supported; | ||
313 | int ret = 0; | ||
314 | |||
315 | input.count = 4; | ||
316 | input.pointer = in_params; | ||
317 | input.count = 4; | ||
318 | input.pointer = in_params; | ||
319 | in_params[0].type = ACPI_TYPE_BUFFER; | ||
320 | in_params[0].buffer.length = 16; | ||
321 | in_params[0].buffer.pointer = OSC_UUID; | ||
322 | in_params[1].type = ACPI_TYPE_INTEGER; | ||
323 | in_params[1].integer.value = 1; | ||
324 | in_params[2].type = ACPI_TYPE_INTEGER; | ||
325 | in_params[2].integer.value = 2; | ||
326 | in_params[3].type = ACPI_TYPE_BUFFER; | ||
327 | in_params[3].buffer.length = 8; | ||
328 | in_params[3].buffer.pointer = (u8 *)&capabilities; | ||
329 | |||
330 | capabilities[0] = OSC_QUERY_ENABLE; | ||
331 | capabilities[1] = 0x1; | ||
332 | |||
333 | status = acpi_evaluate_object(*handle, "_OSC", &input, &output); | ||
334 | if (ACPI_FAILURE(status)) | ||
335 | return -ENODEV; | ||
336 | |||
337 | if (!output.length) | ||
338 | return -ENODEV; | ||
339 | |||
340 | out_obj = output.pointer; | ||
341 | if (out_obj->type != ACPI_TYPE_BUFFER) { | ||
342 | ret = -ENODEV; | ||
343 | goto out_free; | ||
344 | } | ||
345 | |||
346 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); | ||
347 | if (errors) { | ||
348 | ret = -ENODEV; | ||
349 | goto out_free; | ||
350 | } | ||
351 | |||
352 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); | ||
353 | if (!(supported & 0x1)) { | ||
354 | ret = -ENODEV; | ||
355 | goto out_free; | ||
356 | } | ||
357 | |||
358 | kfree(output.pointer); | ||
359 | capabilities[0] = 0x0; | ||
360 | capabilities[1] = 0x1; | ||
361 | |||
362 | status = acpi_evaluate_object(*handle, "_OSC", &input, &output); | ||
363 | if (ACPI_FAILURE(status)) | ||
364 | return -ENODEV; | ||
365 | |||
366 | if (!output.length) | ||
367 | return -ENODEV; | ||
368 | |||
369 | out_obj = output.pointer; | ||
370 | if (out_obj->type != ACPI_TYPE_BUFFER) { | ||
371 | ret = -ENODEV; | ||
372 | goto out_free; | ||
373 | } | ||
374 | |||
375 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); | ||
376 | if (errors) { | ||
377 | ret = -ENODEV; | ||
378 | goto out_free; | ||
379 | } | ||
380 | |||
381 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); | ||
382 | if (!(supported & 0x1)) { | ||
383 | ret = -ENODEV; | ||
384 | goto out_free; | ||
385 | } | ||
386 | |||
387 | out_free: | ||
388 | kfree(output.pointer); | ||
389 | return ret; | ||
390 | } | ||
391 | |||
392 | static int __init pcc_cpufreq_probe(void) | ||
393 | { | ||
394 | acpi_status status; | ||
395 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
396 | struct pcc_memory_resource *mem_resource; | ||
397 | struct pcc_register_resource *reg_resource; | ||
398 | union acpi_object *out_obj, *member; | ||
399 | acpi_handle handle, osc_handle; | ||
400 | int ret = 0; | ||
401 | |||
402 | status = acpi_get_handle(NULL, "\\_SB", &handle); | ||
403 | if (ACPI_FAILURE(status)) | ||
404 | return -ENODEV; | ||
405 | |||
406 | status = acpi_get_handle(handle, "_OSC", &osc_handle); | ||
407 | if (ACPI_SUCCESS(status)) { | ||
408 | ret = pcc_cpufreq_do_osc(&osc_handle); | ||
409 | if (ret) | ||
410 | dprintk("probe: _OSC evaluation did not succeed\n"); | ||
411 | /* Firmware's use of _OSC is optional */ | ||
412 | ret = 0; | ||
413 | } | ||
414 | |||
415 | status = acpi_evaluate_object(handle, "PCCH", NULL, &output); | ||
416 | if (ACPI_FAILURE(status)) | ||
417 | return -ENODEV; | ||
418 | |||
419 | out_obj = output.pointer; | ||
420 | if (out_obj->type != ACPI_TYPE_PACKAGE) { | ||
421 | ret = -ENODEV; | ||
422 | goto out_free; | ||
423 | } | ||
424 | |||
425 | member = &out_obj->package.elements[0]; | ||
426 | if (member->type != ACPI_TYPE_BUFFER) { | ||
427 | ret = -ENODEV; | ||
428 | goto out_free; | ||
429 | } | ||
430 | |||
431 | mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; | ||
432 | |||
433 | dprintk("probe: mem_resource descriptor: 0x%x," | ||
434 | " length: %d, space_id: %d, resource_usage: %d," | ||
435 | " type_specific: %d, granularity: 0x%llx," | ||
436 | " minimum: 0x%llx, maximum: 0x%llx," | ||
437 | " translation_offset: 0x%llx, address_length: 0x%llx\n", | ||
438 | mem_resource->descriptor, mem_resource->length, | ||
439 | mem_resource->space_id, mem_resource->resource_usage, | ||
440 | mem_resource->type_specific, mem_resource->granularity, | ||
441 | mem_resource->minimum, mem_resource->maximum, | ||
442 | mem_resource->translation_offset, | ||
443 | mem_resource->address_length); | ||
444 | |||
445 | if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { | ||
446 | ret = -ENODEV; | ||
447 | goto out_free; | ||
448 | } | ||
449 | |||
450 | pcch_virt_addr = ioremap_nocache(mem_resource->minimum, | ||
451 | mem_resource->address_length); | ||
452 | if (pcch_virt_addr == NULL) { | ||
453 | dprintk("probe: could not map shared mem region\n"); | ||
454 | goto out_free; | ||
455 | } | ||
456 | pcch_hdr = pcch_virt_addr; | ||
457 | |||
458 | dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); | ||
459 | dprintk("probe: PCCH header is at physical address: 0x%llx," | ||
460 | " signature: 0x%x, length: %d bytes, major: %d, minor: %d," | ||
461 | " supported features: 0x%x, command field: 0x%x," | ||
462 | " status field: 0x%x, nominal latency: %d us\n", | ||
463 | mem_resource->minimum, ioread32(&pcch_hdr->signature), | ||
464 | ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major), | ||
465 | ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features), | ||
466 | ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), | ||
467 | ioread32(&pcch_hdr->latency)); | ||
468 | |||
469 | dprintk("probe: min time between commands: %d us," | ||
470 | " max time between commands: %d us," | ||
471 | " nominal CPU frequency: %d MHz," | ||
472 | " minimum CPU frequency: %d MHz," | ||
473 | " minimum CPU frequency without throttling: %d MHz\n", | ||
474 | ioread32(&pcch_hdr->minimum_time), | ||
475 | ioread32(&pcch_hdr->maximum_time), | ||
476 | ioread32(&pcch_hdr->nominal), | ||
477 | ioread32(&pcch_hdr->throttled_frequency), | ||
478 | ioread32(&pcch_hdr->minimum_frequency)); | ||
479 | |||
480 | member = &out_obj->package.elements[1]; | ||
481 | if (member->type != ACPI_TYPE_BUFFER) { | ||
482 | ret = -ENODEV; | ||
483 | goto pcch_free; | ||
484 | } | ||
485 | |||
486 | reg_resource = (struct pcc_register_resource *)member->buffer.pointer; | ||
487 | |||
488 | doorbell.space_id = reg_resource->space_id; | ||
489 | doorbell.bit_width = reg_resource->bit_width; | ||
490 | doorbell.bit_offset = reg_resource->bit_offset; | ||
491 | doorbell.access_width = 64; | ||
492 | doorbell.address = reg_resource->address; | ||
493 | |||
494 | dprintk("probe: doorbell: space_id is %d, bit_width is %d, " | ||
495 | "bit_offset is %d, access_width is %d, address is 0x%llx\n", | ||
496 | doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, | ||
497 | doorbell.access_width, reg_resource->address); | ||
498 | |||
499 | member = &out_obj->package.elements[2]; | ||
500 | if (member->type != ACPI_TYPE_INTEGER) { | ||
501 | ret = -ENODEV; | ||
502 | goto pcch_free; | ||
503 | } | ||
504 | |||
505 | doorbell_preserve = member->integer.value; | ||
506 | |||
507 | member = &out_obj->package.elements[3]; | ||
508 | if (member->type != ACPI_TYPE_INTEGER) { | ||
509 | ret = -ENODEV; | ||
510 | goto pcch_free; | ||
511 | } | ||
512 | |||
513 | doorbell_write = member->integer.value; | ||
514 | |||
515 | dprintk("probe: doorbell_preserve: 0x%llx," | ||
516 | " doorbell_write: 0x%llx\n", | ||
517 | doorbell_preserve, doorbell_write); | ||
518 | |||
519 | pcc_cpu_info = alloc_percpu(struct pcc_cpu); | ||
520 | if (!pcc_cpu_info) { | ||
521 | ret = -ENOMEM; | ||
522 | goto pcch_free; | ||
523 | } | ||
524 | |||
525 | printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency" | ||
526 | " limits: %d MHz, %d MHz\n", PCC_VERSION, | ||
527 | ioread32(&pcch_hdr->minimum_frequency), | ||
528 | ioread32(&pcch_hdr->nominal)); | ||
529 | kfree(output.pointer); | ||
530 | return ret; | ||
531 | pcch_free: | ||
532 | pcc_clear_mapping(); | ||
533 | out_free: | ||
534 | kfree(output.pointer); | ||
535 | return ret; | ||
536 | } | ||
537 | |||
538 | static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) | ||
539 | { | ||
540 | unsigned int cpu = policy->cpu; | ||
541 | unsigned int result = 0; | ||
542 | |||
543 | if (!pcch_virt_addr) { | ||
544 | result = -1; | ||
545 | goto pcch_null; | ||
546 | } | ||
547 | |||
548 | result = pcc_get_offset(cpu); | ||
549 | if (result) { | ||
550 | dprintk("init: PCCP evaluation failed\n"); | ||
551 | goto free; | ||
552 | } | ||
553 | |||
554 | policy->max = policy->cpuinfo.max_freq = | ||
555 | ioread32(&pcch_hdr->nominal) * 1000; | ||
556 | policy->min = policy->cpuinfo.min_freq = | ||
557 | ioread32(&pcch_hdr->minimum_frequency) * 1000; | ||
558 | policy->cur = pcc_get_freq(cpu); | ||
559 | |||
560 | dprintk("init: policy->max is %d, policy->min is %d\n", | ||
561 | policy->max, policy->min); | ||
562 | |||
563 | return 0; | ||
564 | free: | ||
565 | pcc_clear_mapping(); | ||
566 | free_percpu(pcc_cpu_info); | ||
567 | pcch_null: | ||
568 | return result; | ||
569 | } | ||
570 | |||
571 | static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
572 | { | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | static struct cpufreq_driver pcc_cpufreq_driver = { | ||
577 | .flags = CPUFREQ_CONST_LOOPS, | ||
578 | .get = pcc_get_freq, | ||
579 | .verify = pcc_cpufreq_verify, | ||
580 | .target = pcc_cpufreq_target, | ||
581 | .init = pcc_cpufreq_cpu_init, | ||
582 | .exit = pcc_cpufreq_cpu_exit, | ||
583 | .name = "pcc-cpufreq", | ||
584 | .owner = THIS_MODULE, | ||
585 | }; | ||
586 | |||
587 | static int __init pcc_cpufreq_init(void) | ||
588 | { | ||
589 | int ret; | ||
590 | |||
591 | if (acpi_disabled) | ||
592 | return 0; | ||
593 | |||
594 | ret = pcc_cpufreq_probe(); | ||
595 | if (ret) { | ||
596 | dprintk("pcc_cpufreq_init: PCCH evaluation failed\n"); | ||
597 | return ret; | ||
598 | } | ||
599 | |||
600 | ret = cpufreq_register_driver(&pcc_cpufreq_driver); | ||
601 | |||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | static void __exit pcc_cpufreq_exit(void) | ||
606 | { | ||
607 | cpufreq_unregister_driver(&pcc_cpufreq_driver); | ||
608 | |||
609 | pcc_clear_mapping(); | ||
610 | |||
611 | free_percpu(pcc_cpu_info); | ||
612 | } | ||
613 | |||
614 | MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); | ||
615 | MODULE_VERSION(PCC_VERSION); | ||
616 | MODULE_DESCRIPTION("Processor Clocking Control interface driver"); | ||
617 | MODULE_LICENSE("GPL"); | ||
618 | |||
619 | late_initcall(pcc_cpufreq_init); | ||
620 | module_exit(pcc_cpufreq_exit); | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 641ccb9dddbc..b1fbdeecf6c9 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -676,7 +676,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
676 | if (c->weight != w) | 676 | if (c->weight != w) |
677 | continue; | 677 | continue; |
678 | 678 | ||
679 | for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { | 679 | for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { |
680 | if (!test_bit(j, used_mask)) | 680 | if (!test_bit(j, used_mask)) |
681 | break; | 681 | break; |
682 | } | 682 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index cf6590cf4a5f..977e7544738c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -757,7 +757,7 @@ again: | |||
757 | 757 | ||
758 | inc_irq_stat(apic_perf_irqs); | 758 | inc_irq_stat(apic_perf_irqs); |
759 | ack = status; | 759 | ack = status; |
760 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 760 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
761 | struct perf_event *event = cpuc->events[bit]; | 761 | struct perf_event *event = cpuc->events[bit]; |
762 | 762 | ||
763 | clear_bit(bit, (unsigned long *) &status); | 763 | clear_bit(bit, (unsigned long *) &status); |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 8c93a84bb627..fb725ee15f55 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -34,6 +34,12 @@ | |||
34 | static int i8259A_auto_eoi; | 34 | static int i8259A_auto_eoi; |
35 | DEFINE_RAW_SPINLOCK(i8259A_lock); | 35 | DEFINE_RAW_SPINLOCK(i8259A_lock); |
36 | static void mask_and_ack_8259A(unsigned int); | 36 | static void mask_and_ack_8259A(unsigned int); |
37 | static void mask_8259A(void); | ||
38 | static void unmask_8259A(void); | ||
39 | static void disable_8259A_irq(unsigned int irq); | ||
40 | static void enable_8259A_irq(unsigned int irq); | ||
41 | static void init_8259A(int auto_eoi); | ||
42 | static int i8259A_irq_pending(unsigned int irq); | ||
37 | 43 | ||
38 | struct irq_chip i8259A_chip = { | 44 | struct irq_chip i8259A_chip = { |
39 | .name = "XT-PIC", | 45 | .name = "XT-PIC", |
@@ -63,7 +69,7 @@ unsigned int cached_irq_mask = 0xffff; | |||
63 | */ | 69 | */ |
64 | unsigned long io_apic_irqs; | 70 | unsigned long io_apic_irqs; |
65 | 71 | ||
66 | void disable_8259A_irq(unsigned int irq) | 72 | static void disable_8259A_irq(unsigned int irq) |
67 | { | 73 | { |
68 | unsigned int mask = 1 << irq; | 74 | unsigned int mask = 1 << irq; |
69 | unsigned long flags; | 75 | unsigned long flags; |
@@ -77,7 +83,7 @@ void disable_8259A_irq(unsigned int irq) | |||
77 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 83 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
78 | } | 84 | } |
79 | 85 | ||
80 | void enable_8259A_irq(unsigned int irq) | 86 | static void enable_8259A_irq(unsigned int irq) |
81 | { | 87 | { |
82 | unsigned int mask = ~(1 << irq); | 88 | unsigned int mask = ~(1 << irq); |
83 | unsigned long flags; | 89 | unsigned long flags; |
@@ -91,7 +97,7 @@ void enable_8259A_irq(unsigned int irq) | |||
91 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 97 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
92 | } | 98 | } |
93 | 99 | ||
94 | int i8259A_irq_pending(unsigned int irq) | 100 | static int i8259A_irq_pending(unsigned int irq) |
95 | { | 101 | { |
96 | unsigned int mask = 1<<irq; | 102 | unsigned int mask = 1<<irq; |
97 | unsigned long flags; | 103 | unsigned long flags; |
@@ -107,7 +113,7 @@ int i8259A_irq_pending(unsigned int irq) | |||
107 | return ret; | 113 | return ret; |
108 | } | 114 | } |
109 | 115 | ||
110 | void make_8259A_irq(unsigned int irq) | 116 | static void make_8259A_irq(unsigned int irq) |
111 | { | 117 | { |
112 | disable_irq_nosync(irq); | 118 | disable_irq_nosync(irq); |
113 | io_apic_irqs &= ~(1<<irq); | 119 | io_apic_irqs &= ~(1<<irq); |
@@ -281,7 +287,7 @@ static int __init i8259A_init_sysfs(void) | |||
281 | 287 | ||
282 | device_initcall(i8259A_init_sysfs); | 288 | device_initcall(i8259A_init_sysfs); |
283 | 289 | ||
284 | void mask_8259A(void) | 290 | static void mask_8259A(void) |
285 | { | 291 | { |
286 | unsigned long flags; | 292 | unsigned long flags; |
287 | 293 | ||
@@ -293,7 +299,7 @@ void mask_8259A(void) | |||
293 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 299 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
294 | } | 300 | } |
295 | 301 | ||
296 | void unmask_8259A(void) | 302 | static void unmask_8259A(void) |
297 | { | 303 | { |
298 | unsigned long flags; | 304 | unsigned long flags; |
299 | 305 | ||
@@ -305,7 +311,7 @@ void unmask_8259A(void) | |||
305 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 311 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
306 | } | 312 | } |
307 | 313 | ||
308 | void init_8259A(int auto_eoi) | 314 | static void init_8259A(int auto_eoi) |
309 | { | 315 | { |
310 | unsigned long flags; | 316 | unsigned long flags; |
311 | 317 | ||
@@ -358,3 +364,47 @@ void init_8259A(int auto_eoi) | |||
358 | 364 | ||
359 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | 365 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
360 | } | 366 | } |
367 | |||
368 | /* | ||
369 | * make i8259 a driver so that we can select pic functions at run time. the goal | ||
370 | * is to make x86 binary compatible among pc compatible and non-pc compatible | ||
371 | * platforms, such as x86 MID. | ||
372 | */ | ||
373 | |||
374 | static void legacy_pic_noop(void) { }; | ||
375 | static void legacy_pic_uint_noop(unsigned int unused) { }; | ||
376 | static void legacy_pic_int_noop(int unused) { }; | ||
377 | |||
378 | static struct irq_chip dummy_pic_chip = { | ||
379 | .name = "dummy pic", | ||
380 | .mask = legacy_pic_uint_noop, | ||
381 | .unmask = legacy_pic_uint_noop, | ||
382 | .disable = legacy_pic_uint_noop, | ||
383 | .mask_ack = legacy_pic_uint_noop, | ||
384 | }; | ||
385 | static int legacy_pic_irq_pending_noop(unsigned int irq) | ||
386 | { | ||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | struct legacy_pic null_legacy_pic = { | ||
391 | .nr_legacy_irqs = 0, | ||
392 | .chip = &dummy_pic_chip, | ||
393 | .mask_all = legacy_pic_noop, | ||
394 | .restore_mask = legacy_pic_noop, | ||
395 | .init = legacy_pic_int_noop, | ||
396 | .irq_pending = legacy_pic_irq_pending_noop, | ||
397 | .make_irq = legacy_pic_uint_noop, | ||
398 | }; | ||
399 | |||
400 | struct legacy_pic default_legacy_pic = { | ||
401 | .nr_legacy_irqs = NR_IRQS_LEGACY, | ||
402 | .chip = &i8259A_chip, | ||
403 | .mask_all = mask_8259A, | ||
404 | .restore_mask = unmask_8259A, | ||
405 | .init = init_8259A, | ||
406 | .irq_pending = i8259A_irq_pending, | ||
407 | .make_irq = make_8259A_irq, | ||
408 | }; | ||
409 | |||
410 | struct legacy_pic *legacy_pic = &default_legacy_pic; | ||
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index fce55d532631..ef257fc2921b 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -99,9 +99,6 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | /* Number of legacy interrupts */ | ||
103 | int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY; | ||
104 | |||
105 | void __init init_ISA_irqs(void) | 102 | void __init init_ISA_irqs(void) |
106 | { | 103 | { |
107 | int i; | 104 | int i; |
@@ -109,12 +106,12 @@ void __init init_ISA_irqs(void) | |||
109 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | 106 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
110 | init_bsp_APIC(); | 107 | init_bsp_APIC(); |
111 | #endif | 108 | #endif |
112 | init_8259A(0); | 109 | legacy_pic->init(0); |
113 | 110 | ||
114 | /* | 111 | /* |
115 | * 16 old-style INTA-cycle interrupts: | 112 | * 16 old-style INTA-cycle interrupts: |
116 | */ | 113 | */ |
117 | for (i = 0; i < NR_IRQS_LEGACY; i++) { | 114 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { |
118 | struct irq_desc *desc = irq_to_desc(i); | 115 | struct irq_desc *desc = irq_to_desc(i); |
119 | 116 | ||
120 | desc->status = IRQ_DISABLED; | 117 | desc->status = IRQ_DISABLED; |
@@ -138,7 +135,7 @@ void __init init_IRQ(void) | |||
138 | * then this vector space can be freed and re-used dynamically as the | 135 | * then this vector space can be freed and re-used dynamically as the |
139 | * irq's migrate etc. | 136 | * irq's migrate etc. |
140 | */ | 137 | */ |
141 | for (i = 0; i < nr_legacy_irqs; i++) | 138 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) |
142 | per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i; | 139 | per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i; |
143 | 140 | ||
144 | x86_init.irqs.intr_init(); | 141 | x86_init.irqs.intr_init(); |
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c index 3b7078abc871..0aad8670858e 100644 --- a/arch/x86/kernel/mrst.c +++ b/arch/x86/kernel/mrst.c | |||
@@ -10,8 +10,211 @@ | |||
10 | * of the License. | 10 | * of the License. |
11 | */ | 11 | */ |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sfi.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/module.h> | ||
13 | 17 | ||
14 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
19 | #include <asm/mpspec_def.h> | ||
20 | #include <asm/hw_irq.h> | ||
21 | #include <asm/apic.h> | ||
22 | #include <asm/io_apic.h> | ||
23 | #include <asm/mrst.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/i8259.h> | ||
26 | #include <asm/apb_timer.h> | ||
27 | |||
28 | static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; | ||
29 | static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; | ||
30 | int sfi_mtimer_num; | ||
31 | |||
32 | struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX]; | ||
33 | EXPORT_SYMBOL_GPL(sfi_mrtc_array); | ||
34 | int sfi_mrtc_num; | ||
35 | |||
36 | static inline void assign_to_mp_irq(struct mpc_intsrc *m, | ||
37 | struct mpc_intsrc *mp_irq) | ||
38 | { | ||
39 | memcpy(mp_irq, m, sizeof(struct mpc_intsrc)); | ||
40 | } | ||
41 | |||
42 | static inline int mp_irq_cmp(struct mpc_intsrc *mp_irq, | ||
43 | struct mpc_intsrc *m) | ||
44 | { | ||
45 | return memcmp(mp_irq, m, sizeof(struct mpc_intsrc)); | ||
46 | } | ||
47 | |||
48 | static void save_mp_irq(struct mpc_intsrc *m) | ||
49 | { | ||
50 | int i; | ||
51 | |||
52 | for (i = 0; i < mp_irq_entries; i++) { | ||
53 | if (!mp_irq_cmp(&mp_irqs[i], m)) | ||
54 | return; | ||
55 | } | ||
56 | |||
57 | assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]); | ||
58 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | ||
59 | panic("Max # of irq sources exceeded!!\n"); | ||
60 | } | ||
61 | |||
62 | /* parse all the mtimer info to a static mtimer array */ | ||
63 | static int __init sfi_parse_mtmr(struct sfi_table_header *table) | ||
64 | { | ||
65 | struct sfi_table_simple *sb; | ||
66 | struct sfi_timer_table_entry *pentry; | ||
67 | struct mpc_intsrc mp_irq; | ||
68 | int totallen; | ||
69 | |||
70 | sb = (struct sfi_table_simple *)table; | ||
71 | if (!sfi_mtimer_num) { | ||
72 | sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb, | ||
73 | struct sfi_timer_table_entry); | ||
74 | pentry = (struct sfi_timer_table_entry *) sb->pentry; | ||
75 | totallen = sfi_mtimer_num * sizeof(*pentry); | ||
76 | memcpy(sfi_mtimer_array, pentry, totallen); | ||
77 | } | ||
78 | |||
79 | printk(KERN_INFO "SFI: MTIMER info (num = %d):\n", sfi_mtimer_num); | ||
80 | pentry = sfi_mtimer_array; | ||
81 | for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) { | ||
82 | printk(KERN_INFO "timer[%d]: paddr = 0x%08x, freq = %dHz," | ||
83 | " irq = %d\n", totallen, (u32)pentry->phys_addr, | ||
84 | pentry->freq_hz, pentry->irq); | ||
85 | if (!pentry->irq) | ||
86 | continue; | ||
87 | mp_irq.type = MP_IOAPIC; | ||
88 | mp_irq.irqtype = mp_INT; | ||
89 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ | ||
90 | mp_irq.irqflag = 5; | ||
91 | mp_irq.srcbus = 0; | ||
92 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | ||
93 | mp_irq.dstapic = MP_APIC_ALL; | ||
94 | mp_irq.dstirq = pentry->irq; | ||
95 | save_mp_irq(&mp_irq); | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | struct sfi_timer_table_entry *sfi_get_mtmr(int hint) | ||
102 | { | ||
103 | int i; | ||
104 | if (hint < sfi_mtimer_num) { | ||
105 | if (!sfi_mtimer_usage[hint]) { | ||
106 | pr_debug("hint taken for timer %d irq %d\n",\ | ||
107 | hint, sfi_mtimer_array[hint].irq); | ||
108 | sfi_mtimer_usage[hint] = 1; | ||
109 | return &sfi_mtimer_array[hint]; | ||
110 | } | ||
111 | } | ||
112 | /* take the first timer available */ | ||
113 | for (i = 0; i < sfi_mtimer_num;) { | ||
114 | if (!sfi_mtimer_usage[i]) { | ||
115 | sfi_mtimer_usage[i] = 1; | ||
116 | return &sfi_mtimer_array[i]; | ||
117 | } | ||
118 | i++; | ||
119 | } | ||
120 | return NULL; | ||
121 | } | ||
122 | |||
123 | void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr) | ||
124 | { | ||
125 | int i; | ||
126 | for (i = 0; i < sfi_mtimer_num;) { | ||
127 | if (mtmr->irq == sfi_mtimer_array[i].irq) { | ||
128 | sfi_mtimer_usage[i] = 0; | ||
129 | return; | ||
130 | } | ||
131 | i++; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | /* parse all the mrtc info to a global mrtc array */ | ||
136 | int __init sfi_parse_mrtc(struct sfi_table_header *table) | ||
137 | { | ||
138 | struct sfi_table_simple *sb; | ||
139 | struct sfi_rtc_table_entry *pentry; | ||
140 | struct mpc_intsrc mp_irq; | ||
141 | |||
142 | int totallen; | ||
143 | |||
144 | sb = (struct sfi_table_simple *)table; | ||
145 | if (!sfi_mrtc_num) { | ||
146 | sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb, | ||
147 | struct sfi_rtc_table_entry); | ||
148 | pentry = (struct sfi_rtc_table_entry *)sb->pentry; | ||
149 | totallen = sfi_mrtc_num * sizeof(*pentry); | ||
150 | memcpy(sfi_mrtc_array, pentry, totallen); | ||
151 | } | ||
152 | |||
153 | printk(KERN_INFO "SFI: RTC info (num = %d):\n", sfi_mrtc_num); | ||
154 | pentry = sfi_mrtc_array; | ||
155 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { | ||
156 | printk(KERN_INFO "RTC[%d]: paddr = 0x%08x, irq = %d\n", | ||
157 | totallen, (u32)pentry->phys_addr, pentry->irq); | ||
158 | mp_irq.type = MP_IOAPIC; | ||
159 | mp_irq.irqtype = mp_INT; | ||
160 | mp_irq.irqflag = 0; | ||
161 | mp_irq.srcbus = 0; | ||
162 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | ||
163 | mp_irq.dstapic = MP_APIC_ALL; | ||
164 | mp_irq.dstirq = pentry->irq; | ||
165 | save_mp_irq(&mp_irq); | ||
166 | } | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * the secondary clock in Moorestown can be APBT or LAPIC clock, default to | ||
172 | * APBT but cmdline option can also override it. | ||
173 | */ | ||
174 | static void __cpuinit mrst_setup_secondary_clock(void) | ||
175 | { | ||
176 | /* restore default lapic clock if disabled by cmdline */ | ||
177 | if (disable_apbt_percpu) | ||
178 | return setup_secondary_APIC_clock(); | ||
179 | apbt_setup_secondary_clock(); | ||
180 | } | ||
181 | |||
182 | static unsigned long __init mrst_calibrate_tsc(void) | ||
183 | { | ||
184 | unsigned long flags, fast_calibrate; | ||
185 | |||
186 | local_irq_save(flags); | ||
187 | fast_calibrate = apbt_quick_calibrate(); | ||
188 | local_irq_restore(flags); | ||
189 | |||
190 | if (fast_calibrate) | ||
191 | return fast_calibrate; | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | void __init mrst_time_init(void) | ||
197 | { | ||
198 | sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); | ||
199 | pre_init_apic_IRQ0(); | ||
200 | apbt_time_init(); | ||
201 | } | ||
202 | |||
203 | void __init mrst_rtc_init(void) | ||
204 | { | ||
205 | sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * if we use per cpu apb timer, the bootclock already setup. if we use lapic | ||
210 | * timer and one apbt timer for broadcast, we need to set up lapic boot clock. | ||
211 | */ | ||
212 | static void __init mrst_setup_boot_clock(void) | ||
213 | { | ||
214 | pr_info("%s: per cpu apbt flag %d \n", __func__, disable_apbt_percpu); | ||
215 | if (disable_apbt_percpu) | ||
216 | setup_boot_APIC_clock(); | ||
217 | }; | ||
15 | 218 | ||
16 | /* | 219 | /* |
17 | * Moorestown specific x86_init function overrides and early setup | 220 | * Moorestown specific x86_init function overrides and early setup |
@@ -21,4 +224,17 @@ void __init x86_mrst_early_setup(void) | |||
21 | { | 224 | { |
22 | x86_init.resources.probe_roms = x86_init_noop; | 225 | x86_init.resources.probe_roms = x86_init_noop; |
23 | x86_init.resources.reserve_resources = x86_init_noop; | 226 | x86_init.resources.reserve_resources = x86_init_noop; |
227 | |||
228 | x86_init.timers.timer_init = mrst_time_init; | ||
229 | x86_init.timers.setup_percpu_clockev = mrst_setup_boot_clock; | ||
230 | |||
231 | x86_init.irqs.pre_vector_init = x86_init_noop; | ||
232 | |||
233 | x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock; | ||
234 | |||
235 | x86_platform.calibrate_tsc = mrst_calibrate_tsc; | ||
236 | x86_init.pci.init = pci_mrst_init; | ||
237 | x86_init.pci.fixup_irqs = x86_init_noop; | ||
238 | |||
239 | legacy_pic = &null_legacy_pic; | ||
24 | } | 240 | } |
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 9d1d263f786f..8297160c41b3 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c | |||
@@ -17,7 +17,9 @@ | |||
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | |||
20 | #include <asm/geode.h> | 21 | #include <asm/geode.h> |
22 | #include <asm/setup.h> | ||
21 | #include <asm/olpc.h> | 23 | #include <asm/olpc.h> |
22 | 24 | ||
23 | #ifdef CONFIG_OPEN_FIRMWARE | 25 | #ifdef CONFIG_OPEN_FIRMWARE |
@@ -243,9 +245,11 @@ static int __init olpc_init(void) | |||
243 | olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, | 245 | olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, |
244 | (unsigned char *) &olpc_platform_info.ecver, 1); | 246 | (unsigned char *) &olpc_platform_info.ecver, 1); |
245 | 247 | ||
246 | /* check to see if the VSA exists */ | 248 | #ifdef CONFIG_PCI_OLPC |
247 | if (cs5535_has_vsa2()) | 249 | /* If the VSA exists let it emulate PCI, if not emulate in kernel */ |
248 | olpc_platform_info.flags |= OLPC_F_VSA; | 250 | if (!cs5535_has_vsa2()) |
251 | x86_init.pci.arch_init = pci_olpc_init; | ||
252 | #endif | ||
249 | 253 | ||
250 | printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", | 254 | printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", |
251 | ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", | 255 | ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a435c76d714e..a02e80c3c54b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/err.h> | 48 | #include <linux/err.h> |
49 | #include <linux/nmi.h> | 49 | #include <linux/nmi.h> |
50 | #include <linux/tboot.h> | 50 | #include <linux/tboot.h> |
51 | #include <linux/stackprotector.h> | ||
51 | 52 | ||
52 | #include <asm/acpi.h> | 53 | #include <asm/acpi.h> |
53 | #include <asm/desc.h> | 54 | #include <asm/desc.h> |
@@ -67,6 +68,7 @@ | |||
67 | #include <linux/mc146818rtc.h> | 68 | #include <linux/mc146818rtc.h> |
68 | 69 | ||
69 | #include <asm/smpboot_hooks.h> | 70 | #include <asm/smpboot_hooks.h> |
71 | #include <asm/i8259.h> | ||
70 | 72 | ||
71 | #ifdef CONFIG_X86_32 | 73 | #ifdef CONFIG_X86_32 |
72 | u8 apicid_2_node[MAX_APICID]; | 74 | u8 apicid_2_node[MAX_APICID]; |
@@ -291,9 +293,9 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
291 | check_tsc_sync_target(); | 293 | check_tsc_sync_target(); |
292 | 294 | ||
293 | if (nmi_watchdog == NMI_IO_APIC) { | 295 | if (nmi_watchdog == NMI_IO_APIC) { |
294 | disable_8259A_irq(0); | 296 | legacy_pic->chip->mask(0); |
295 | enable_NMI_through_LVT0(); | 297 | enable_NMI_through_LVT0(); |
296 | enable_8259A_irq(0); | 298 | legacy_pic->chip->unmask(0); |
297 | } | 299 | } |
298 | 300 | ||
299 | #ifdef CONFIG_X86_32 | 301 | #ifdef CONFIG_X86_32 |
@@ -329,6 +331,9 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
329 | /* enable local interrupts */ | 331 | /* enable local interrupts */ |
330 | local_irq_enable(); | 332 | local_irq_enable(); |
331 | 333 | ||
334 | /* to prevent fake stack check failure in clock setup */ | ||
335 | boot_init_stack_canary(); | ||
336 | |||
332 | x86_cpuinit.setup_percpu_clockev(); | 337 | x86_cpuinit.setup_percpu_clockev(); |
333 | 338 | ||
334 | wmb(); | 339 | wmb(); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index ab38ce0984fa..e680ea52db9b 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -49,11 +49,6 @@ extern int no_broadcast; | |||
49 | char visws_board_type = -1; | 49 | char visws_board_type = -1; |
50 | char visws_board_rev = -1; | 50 | char visws_board_rev = -1; |
51 | 51 | ||
52 | int is_visws_box(void) | ||
53 | { | ||
54 | return visws_board_type >= 0; | ||
55 | } | ||
56 | |||
57 | static void __init visws_time_init(void) | 52 | static void __init visws_time_init(void) |
58 | { | 53 | { |
59 | printk(KERN_INFO "Starting Cobalt Timer system clock\n"); | 54 | printk(KERN_INFO "Starting Cobalt Timer system clock\n"); |
@@ -242,6 +237,8 @@ void __init visws_early_detect(void) | |||
242 | x86_init.irqs.pre_vector_init = visws_pre_intr_init; | 237 | x86_init.irqs.pre_vector_init = visws_pre_intr_init; |
243 | x86_init.irqs.trap_init = visws_trap_init; | 238 | x86_init.irqs.trap_init = visws_trap_init; |
244 | x86_init.timers.timer_init = visws_time_init; | 239 | x86_init.timers.timer_init = visws_time_init; |
240 | x86_init.pci.init = pci_visws_init; | ||
241 | x86_init.pci.init_irq = x86_init_noop; | ||
245 | 242 | ||
246 | /* | 243 | /* |
247 | * Install reboot quirks: | 244 | * Install reboot quirks: |
@@ -508,7 +505,7 @@ static struct irq_chip cobalt_irq_type = { | |||
508 | */ | 505 | */ |
509 | static unsigned int startup_piix4_master_irq(unsigned int irq) | 506 | static unsigned int startup_piix4_master_irq(unsigned int irq) |
510 | { | 507 | { |
511 | init_8259A(0); | 508 | legacy_pic->init(0); |
512 | 509 | ||
513 | return startup_cobalt_irq(irq); | 510 | return startup_cobalt_irq(irq); |
514 | } | 511 | } |
@@ -532,9 +529,6 @@ static struct irq_chip piix4_master_irq_type = { | |||
532 | 529 | ||
533 | static struct irq_chip piix4_virtual_irq_type = { | 530 | static struct irq_chip piix4_virtual_irq_type = { |
534 | .name = "PIIX4-virtual", | 531 | .name = "PIIX4-virtual", |
535 | .shutdown = disable_8259A_irq, | ||
536 | .enable = enable_8259A_irq, | ||
537 | .disable = disable_8259A_irq, | ||
538 | }; | 532 | }; |
539 | 533 | ||
540 | 534 | ||
@@ -609,7 +603,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) | |||
609 | handle_IRQ_event(realirq, desc->action); | 603 | handle_IRQ_event(realirq, desc->action); |
610 | 604 | ||
611 | if (!(desc->status & IRQ_DISABLED)) | 605 | if (!(desc->status & IRQ_DISABLED)) |
612 | enable_8259A_irq(realirq); | 606 | legacy_pic->chip->unmask(realirq); |
613 | 607 | ||
614 | return IRQ_HANDLED; | 608 | return IRQ_HANDLED; |
615 | 609 | ||
@@ -628,6 +622,12 @@ static struct irqaction cascade_action = { | |||
628 | .name = "cascade", | 622 | .name = "cascade", |
629 | }; | 623 | }; |
630 | 624 | ||
625 | static inline void set_piix4_virtual_irq_type(void) | ||
626 | { | ||
627 | piix4_virtual_irq_type.shutdown = i8259A_chip.mask; | ||
628 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; | ||
629 | piix4_virtual_irq_type.disable = i8259A_chip.mask; | ||
630 | } | ||
631 | 631 | ||
632 | void init_VISWS_APIC_irqs(void) | 632 | void init_VISWS_APIC_irqs(void) |
633 | { | 633 | { |
@@ -653,6 +653,7 @@ void init_VISWS_APIC_irqs(void) | |||
653 | desc->chip = &piix4_master_irq_type; | 653 | desc->chip = &piix4_master_irq_type; |
654 | } | 654 | } |
655 | else if (i < CO_IRQ_APIC0) { | 655 | else if (i < CO_IRQ_APIC0) { |
656 | set_piix4_virtual_irq_type(); | ||
656 | desc->chip = &piix4_virtual_irq_type; | 657 | desc->chip = &piix4_virtual_irq_type; |
657 | } | 658 | } |
658 | else if (IS_CO_APIC(i)) { | 659 | else if (IS_CO_APIC(i)) { |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index ee5746c94628..61a1e8c7e19f 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -4,9 +4,11 @@ | |||
4 | * For licencing details see kernel-base/COPYING | 4 | * For licencing details see kernel-base/COPYING |
5 | */ | 5 | */ |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/ioport.h> | ||
7 | 8 | ||
8 | #include <asm/bios_ebda.h> | 9 | #include <asm/bios_ebda.h> |
9 | #include <asm/paravirt.h> | 10 | #include <asm/paravirt.h> |
11 | #include <asm/pci_x86.h> | ||
10 | #include <asm/mpspec.h> | 12 | #include <asm/mpspec.h> |
11 | #include <asm/setup.h> | 13 | #include <asm/setup.h> |
12 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
@@ -70,6 +72,12 @@ struct x86_init_ops x86_init __initdata = { | |||
70 | .iommu = { | 72 | .iommu = { |
71 | .iommu_init = iommu_init_noop, | 73 | .iommu_init = iommu_init_noop, |
72 | }, | 74 | }, |
75 | |||
76 | .pci = { | ||
77 | .init = x86_default_pci_init, | ||
78 | .init_irq = x86_default_pci_init_irq, | ||
79 | .fixup_irqs = x86_default_pci_fixup_irqs, | ||
80 | }, | ||
73 | }; | 81 | }; |
74 | 82 | ||
75 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { | 83 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { |
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index 0b7d3e9593e1..b110d97fb925 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile | |||
@@ -13,6 +13,8 @@ obj-$(CONFIG_X86_VISWS) += visws.o | |||
13 | 13 | ||
14 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o | 14 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o |
15 | 15 | ||
16 | obj-$(CONFIG_X86_MRST) += mrst.o | ||
17 | |||
16 | obj-y += common.o early.o | 18 | obj-y += common.o early.o |
17 | obj-y += amd_bus.o bus_numa.o | 19 | obj-y += amd_bus.o bus_numa.o |
18 | 20 | ||
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 5f11ff6f5389..6e22454bfaa6 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -298,17 +298,14 @@ int __init pci_acpi_init(void) | |||
298 | { | 298 | { |
299 | struct pci_dev *dev = NULL; | 299 | struct pci_dev *dev = NULL; |
300 | 300 | ||
301 | if (pcibios_scanned) | ||
302 | return 0; | ||
303 | |||
304 | if (acpi_noirq) | 301 | if (acpi_noirq) |
305 | return 0; | 302 | return -ENODEV; |
306 | 303 | ||
307 | printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); | 304 | printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); |
308 | acpi_irq_penalty_init(); | 305 | acpi_irq_penalty_init(); |
309 | pcibios_scanned++; | ||
310 | pcibios_enable_irq = acpi_pci_irq_enable; | 306 | pcibios_enable_irq = acpi_pci_irq_enable; |
311 | pcibios_disable_irq = acpi_pci_irq_disable; | 307 | pcibios_disable_irq = acpi_pci_irq_disable; |
308 | x86_init.pci.init_irq = x86_init_noop; | ||
312 | 309 | ||
313 | if (pci_routeirq) { | 310 | if (pci_routeirq) { |
314 | /* | 311 | /* |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 3736176acaab..294e10cb11e1 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -72,12 +72,6 @@ struct pci_ops pci_root_ops = { | |||
72 | }; | 72 | }; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * legacy, numa, and acpi all want to call pcibios_scan_root | ||
76 | * from their initcalls. This flag prevents that. | ||
77 | */ | ||
78 | int pcibios_scanned; | ||
79 | |||
80 | /* | ||
81 | * This interrupt-safe spinlock protects all accesses to PCI | 75 | * This interrupt-safe spinlock protects all accesses to PCI |
82 | * configuration space. | 76 | * configuration space. |
83 | */ | 77 | */ |
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index 25a1f8efed4a..adb62aaa7ecd 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/pci.h> | 1 | #include <linux/pci.h> |
2 | #include <linux/init.h> | 2 | #include <linux/init.h> |
3 | #include <asm/pci_x86.h> | 3 | #include <asm/pci_x86.h> |
4 | #include <asm/x86_init.h> | ||
4 | 5 | ||
5 | /* arch_initcall has too random ordering, so call the initializers | 6 | /* arch_initcall has too random ordering, so call the initializers |
6 | in the right sequence from here. */ | 7 | in the right sequence from here. */ |
@@ -15,10 +16,9 @@ static __init int pci_arch_init(void) | |||
15 | if (!(pci_probe & PCI_PROBE_NOEARLY)) | 16 | if (!(pci_probe & PCI_PROBE_NOEARLY)) |
16 | pci_mmcfg_early_init(); | 17 | pci_mmcfg_early_init(); |
17 | 18 | ||
18 | #ifdef CONFIG_PCI_OLPC | 19 | if (x86_init.pci.arch_init && !x86_init.pci.arch_init()) |
19 | if (!pci_olpc_init()) | 20 | return 0; |
20 | return 0; /* skip additional checks if it's an XO */ | 21 | |
21 | #endif | ||
22 | #ifdef CONFIG_PCI_BIOS | 22 | #ifdef CONFIG_PCI_BIOS |
23 | pci_pcbios_init(); | 23 | pci_pcbios_init(); |
24 | #endif | 24 | #endif |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index b02f6d8ac922..8b107521d24e 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -53,7 +53,7 @@ struct irq_router_handler { | |||
53 | int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); | 53 | int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); |
54 | }; | 54 | }; |
55 | 55 | ||
56 | int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL; | 56 | int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq; |
57 | void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; | 57 | void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; |
58 | 58 | ||
59 | /* | 59 | /* |
@@ -1018,7 +1018,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
1018 | return 1; | 1018 | return 1; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | static void __init pcibios_fixup_irqs(void) | 1021 | void __init pcibios_fixup_irqs(void) |
1022 | { | 1022 | { |
1023 | struct pci_dev *dev = NULL; | 1023 | struct pci_dev *dev = NULL; |
1024 | u8 pin; | 1024 | u8 pin; |
@@ -1112,12 +1112,12 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = { | |||
1112 | { } | 1112 | { } |
1113 | }; | 1113 | }; |
1114 | 1114 | ||
1115 | int __init pcibios_irq_init(void) | 1115 | void __init pcibios_irq_init(void) |
1116 | { | 1116 | { |
1117 | DBG(KERN_DEBUG "PCI: IRQ init\n"); | 1117 | DBG(KERN_DEBUG "PCI: IRQ init\n"); |
1118 | 1118 | ||
1119 | if (pcibios_enable_irq || raw_pci_ops == NULL) | 1119 | if (raw_pci_ops == NULL) |
1120 | return 0; | 1120 | return; |
1121 | 1121 | ||
1122 | dmi_check_system(pciirq_dmi_table); | 1122 | dmi_check_system(pciirq_dmi_table); |
1123 | 1123 | ||
@@ -1144,9 +1144,7 @@ int __init pcibios_irq_init(void) | |||
1144 | pirq_table = NULL; | 1144 | pirq_table = NULL; |
1145 | } | 1145 | } |
1146 | 1146 | ||
1147 | pcibios_enable_irq = pirq_enable_irq; | 1147 | x86_init.pci.fixup_irqs(); |
1148 | |||
1149 | pcibios_fixup_irqs(); | ||
1150 | 1148 | ||
1151 | if (io_apic_assign_pci_irqs && pci_routeirq) { | 1149 | if (io_apic_assign_pci_irqs && pci_routeirq) { |
1152 | struct pci_dev *dev = NULL; | 1150 | struct pci_dev *dev = NULL; |
@@ -1159,8 +1157,6 @@ int __init pcibios_irq_init(void) | |||
1159 | for_each_pci_dev(dev) | 1157 | for_each_pci_dev(dev) |
1160 | pirq_enable_irq(dev); | 1158 | pirq_enable_irq(dev); |
1161 | } | 1159 | } |
1162 | |||
1163 | return 0; | ||
1164 | } | 1160 | } |
1165 | 1161 | ||
1166 | static void pirq_penalize_isa_irq(int irq, int active) | 1162 | static void pirq_penalize_isa_irq(int irq, int active) |
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index 4061bb0f267d..0db5eaf54560 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c | |||
@@ -35,16 +35,13 @@ static void __devinit pcibios_fixup_peer_bridges(void) | |||
35 | } | 35 | } |
36 | } | 36 | } |
37 | 37 | ||
38 | static int __init pci_legacy_init(void) | 38 | int __init pci_legacy_init(void) |
39 | { | 39 | { |
40 | if (!raw_pci_ops) { | 40 | if (!raw_pci_ops) { |
41 | printk("PCI: System does not support PCI\n"); | 41 | printk("PCI: System does not support PCI\n"); |
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
44 | 44 | ||
45 | if (pcibios_scanned++) | ||
46 | return 0; | ||
47 | |||
48 | printk("PCI: Probing PCI hardware\n"); | 45 | printk("PCI: Probing PCI hardware\n"); |
49 | pci_root_bus = pcibios_scan_root(0); | 46 | pci_root_bus = pcibios_scan_root(0); |
50 | if (pci_root_bus) | 47 | if (pci_root_bus) |
@@ -55,18 +52,15 @@ static int __init pci_legacy_init(void) | |||
55 | 52 | ||
56 | int __init pci_subsys_init(void) | 53 | int __init pci_subsys_init(void) |
57 | { | 54 | { |
58 | #ifdef CONFIG_X86_NUMAQ | 55 | /* |
59 | pci_numaq_init(); | 56 | * The init function returns an non zero value when |
60 | #endif | 57 | * pci_legacy_init should be invoked. |
61 | #ifdef CONFIG_ACPI | 58 | */ |
62 | pci_acpi_init(); | 59 | if (x86_init.pci.init()) |
63 | #endif | 60 | pci_legacy_init(); |
64 | #ifdef CONFIG_X86_VISWS | 61 | |
65 | pci_visws_init(); | ||
66 | #endif | ||
67 | pci_legacy_init(); | ||
68 | pcibios_fixup_peer_bridges(); | 62 | pcibios_fixup_peer_bridges(); |
69 | pcibios_irq_init(); | 63 | x86_init.pci.init_irq(); |
70 | pcibios_init(); | 64 | pcibios_init(); |
71 | 65 | ||
72 | return 0; | 66 | return 0; |
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c new file mode 100644 index 000000000000..8bf2fcb88d04 --- /dev/null +++ b/arch/x86/pci/mrst.c | |||
@@ -0,0 +1,262 @@ | |||
1 | /* | ||
2 | * Moorestown PCI support | ||
3 | * Copyright (c) 2008 Intel Corporation | ||
4 | * Jesse Barnes <jesse.barnes@intel.com> | ||
5 | * | ||
6 | * Moorestown has an interesting PCI implementation: | ||
7 | * - configuration space is memory mapped (as defined by MCFG) | ||
8 | * - Lincroft devices also have a real, type 1 configuration space | ||
9 | * - Early Lincroft silicon has a type 1 access bug that will cause | ||
10 | * a hang if non-existent devices are accessed | ||
11 | * - some devices have the "fixed BAR" capability, which means | ||
12 | * they can't be relocated or modified; check for that during | ||
13 | * BAR sizing | ||
14 | * | ||
15 | * So, we use the MCFG space for all reads and writes, but also send | ||
16 | * Lincroft writes to type 1 space. But only read/write if the device | ||
17 | * actually exists, otherwise return all 1s for reads and bit bucket | ||
18 | * the writes. | ||
19 | */ | ||
20 | |||
21 | #include <linux/sched.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/ioport.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/dmi.h> | ||
26 | |||
27 | #include <asm/acpi.h> | ||
28 | #include <asm/segment.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/smp.h> | ||
31 | #include <asm/pci_x86.h> | ||
32 | #include <asm/hw_irq.h> | ||
33 | #include <asm/io_apic.h> | ||
34 | |||
35 | #define PCIE_CAP_OFFSET 0x100 | ||
36 | |||
37 | /* Fixed BAR fields */ | ||
38 | #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */ | ||
39 | #define PCI_FIXED_BAR_0_SIZE 0x04 | ||
40 | #define PCI_FIXED_BAR_1_SIZE 0x08 | ||
41 | #define PCI_FIXED_BAR_2_SIZE 0x0c | ||
42 | #define PCI_FIXED_BAR_3_SIZE 0x10 | ||
43 | #define PCI_FIXED_BAR_4_SIZE 0x14 | ||
44 | #define PCI_FIXED_BAR_5_SIZE 0x1c | ||
45 | |||
46 | /** | ||
47 | * fixed_bar_cap - return the offset of the fixed BAR cap if found | ||
48 | * @bus: PCI bus | ||
49 | * @devfn: device in question | ||
50 | * | ||
51 | * Look for the fixed BAR cap on @bus and @devfn, returning its offset | ||
52 | * if found or 0 otherwise. | ||
53 | */ | ||
54 | static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn) | ||
55 | { | ||
56 | int pos; | ||
57 | u32 pcie_cap = 0, cap_data; | ||
58 | |||
59 | pos = PCIE_CAP_OFFSET; | ||
60 | |||
61 | if (!raw_pci_ext_ops) | ||
62 | return 0; | ||
63 | |||
64 | while (pos) { | ||
65 | if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, | ||
66 | devfn, pos, 4, &pcie_cap)) | ||
67 | return 0; | ||
68 | |||
69 | if (pcie_cap == 0xffffffff) | ||
70 | return 0; | ||
71 | |||
72 | if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) { | ||
73 | raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, | ||
74 | devfn, pos + 4, 4, &cap_data); | ||
75 | if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR) | ||
76 | return pos; | ||
77 | } | ||
78 | |||
79 | pos = pcie_cap >> 20; | ||
80 | } | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn, | ||
86 | int reg, int len, u32 val, int offset) | ||
87 | { | ||
88 | u32 size; | ||
89 | unsigned int domain, busnum; | ||
90 | int bar = (reg - PCI_BASE_ADDRESS_0) >> 2; | ||
91 | |||
92 | domain = pci_domain_nr(bus); | ||
93 | busnum = bus->number; | ||
94 | |||
95 | if (val == ~0 && len == 4) { | ||
96 | unsigned long decode; | ||
97 | |||
98 | raw_pci_ext_ops->read(domain, busnum, devfn, | ||
99 | offset + 8 + (bar * 4), 4, &size); | ||
100 | |||
101 | /* Turn the size into a decode pattern for the sizing code */ | ||
102 | if (size) { | ||
103 | decode = size - 1; | ||
104 | decode |= decode >> 1; | ||
105 | decode |= decode >> 2; | ||
106 | decode |= decode >> 4; | ||
107 | decode |= decode >> 8; | ||
108 | decode |= decode >> 16; | ||
109 | decode++; | ||
110 | decode = ~(decode - 1); | ||
111 | } else { | ||
112 | decode = ~0; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * If val is all ones, the core code is trying to size the reg, | ||
117 | * so update the mmconfig space with the real size. | ||
118 | * | ||
119 | * Note: this assumes the fixed size we got is a power of two. | ||
120 | */ | ||
121 | return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4, | ||
122 | decode); | ||
123 | } | ||
124 | |||
125 | /* This is some other kind of BAR write, so just do it. */ | ||
126 | return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val); | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * type1_access_ok - check whether to use type 1 | ||
131 | * @bus: bus number | ||
132 | * @devfn: device & function in question | ||
133 | * | ||
134 | * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at | ||
135 | * all, the we can go ahead with any reads & writes. If it's on a Lincroft, | ||
136 | * but doesn't exist, avoid the access altogether to keep the chip from | ||
137 | * hanging. | ||
138 | */ | ||
139 | static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) | ||
140 | { | ||
141 | /* This is a workaround for A0 LNC bug where PCI status register does | ||
142 | * not have new CAP bit set. can not be written by SW either. | ||
143 | * | ||
144 | * PCI header type in real LNC indicates a single function device, this | ||
145 | * will prevent probing other devices under the same function in PCI | ||
146 | * shim. Therefore, use the header type in shim instead. | ||
147 | */ | ||
148 | if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) | ||
149 | return 0; | ||
150 | if (bus == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(0, 0))) | ||
151 | return 1; | ||
152 | return 0; /* langwell on others */ | ||
153 | } | ||
154 | |||
155 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | ||
156 | int size, u32 *value) | ||
157 | { | ||
158 | if (type1_access_ok(bus->number, devfn, where)) | ||
159 | return pci_direct_conf1.read(pci_domain_nr(bus), bus->number, | ||
160 | devfn, where, size, value); | ||
161 | return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, | ||
162 | devfn, where, size, value); | ||
163 | } | ||
164 | |||
165 | static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, | ||
166 | int size, u32 value) | ||
167 | { | ||
168 | int offset; | ||
169 | |||
170 | /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read | ||
171 | * to ROM BAR return 0 then being ignored. | ||
172 | */ | ||
173 | if (where == PCI_ROM_ADDRESS) | ||
174 | return 0; | ||
175 | |||
176 | /* | ||
177 | * Devices with fixed BARs need special handling: | ||
178 | * - BAR sizing code will save, write ~0, read size, restore | ||
179 | * - so writes to fixed BARs need special handling | ||
180 | * - other writes to fixed BAR devices should go through mmconfig | ||
181 | */ | ||
182 | offset = fixed_bar_cap(bus, devfn); | ||
183 | if (offset && | ||
184 | (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) { | ||
185 | return pci_device_update_fixed(bus, devfn, where, size, value, | ||
186 | offset); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * On Moorestown update both real & mmconfig space | ||
191 | * Note: early Lincroft silicon can't handle type 1 accesses to | ||
192 | * non-existent devices, so just eat the write in that case. | ||
193 | */ | ||
194 | if (type1_access_ok(bus->number, devfn, where)) | ||
195 | return pci_direct_conf1.write(pci_domain_nr(bus), bus->number, | ||
196 | devfn, where, size, value); | ||
197 | return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn, | ||
198 | where, size, value); | ||
199 | } | ||
200 | |||
201 | static int mrst_pci_irq_enable(struct pci_dev *dev) | ||
202 | { | ||
203 | u8 pin; | ||
204 | struct io_apic_irq_attr irq_attr; | ||
205 | |||
206 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | ||
207 | |||
208 | /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to | ||
209 | * IOAPIC RTE entries, so we just enable RTE for the device. | ||
210 | */ | ||
211 | irq_attr.ioapic = mp_find_ioapic(dev->irq); | ||
212 | irq_attr.ioapic_pin = dev->irq; | ||
213 | irq_attr.trigger = 1; /* level */ | ||
214 | irq_attr.polarity = 1; /* active low */ | ||
215 | io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | struct pci_ops pci_mrst_ops = { | ||
221 | .read = pci_read, | ||
222 | .write = pci_write, | ||
223 | }; | ||
224 | |||
225 | /** | ||
226 | * pci_mrst_init - installs pci_mrst_ops | ||
227 | * | ||
228 | * Moorestown has an interesting PCI implementation (see above). | ||
229 | * Called when the early platform detection installs it. | ||
230 | */ | ||
231 | int __init pci_mrst_init(void) | ||
232 | { | ||
233 | printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n"); | ||
234 | pci_mmcfg_late_init(); | ||
235 | pcibios_enable_irq = mrst_pci_irq_enable; | ||
236 | pci_root_ops = pci_mrst_ops; | ||
237 | /* Continue with standard init */ | ||
238 | return 1; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Langwell devices reside at fixed offsets, don't try to move them. | ||
243 | */ | ||
244 | static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev) | ||
245 | { | ||
246 | unsigned long offset; | ||
247 | u32 size; | ||
248 | int i; | ||
249 | |||
250 | /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */ | ||
251 | offset = fixed_bar_cap(dev->bus, dev->devfn); | ||
252 | if (!offset || PCI_DEVFN(2, 0) == dev->devfn || | ||
253 | PCI_DEVFN(2, 2) == dev->devfn) | ||
254 | return; | ||
255 | |||
256 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | ||
257 | pci_read_config_dword(dev, offset + 8 + (i * 4), &size); | ||
258 | dev->resource[i].end = dev->resource[i].start + size - 1; | ||
259 | dev->resource[i].flags |= IORESOURCE_PCI_FIXED; | ||
260 | } | ||
261 | } | ||
262 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup); | ||
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 8884a1c1ada6..8223738ad806 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c | |||
@@ -148,14 +148,8 @@ int __init pci_numaq_init(void) | |||
148 | { | 148 | { |
149 | int quad; | 149 | int quad; |
150 | 150 | ||
151 | if (!found_numaq) | ||
152 | return 0; | ||
153 | |||
154 | raw_pci_ops = &pci_direct_conf1_mq; | 151 | raw_pci_ops = &pci_direct_conf1_mq; |
155 | 152 | ||
156 | if (pcibios_scanned++) | ||
157 | return 0; | ||
158 | |||
159 | pci_root_bus = pcibios_scan_root(0); | 153 | pci_root_bus = pcibios_scan_root(0); |
160 | if (pci_root_bus) | 154 | if (pci_root_bus) |
161 | pci_bus_add_devices(pci_root_bus); | 155 | pci_bus_add_devices(pci_root_bus); |
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c index b889d824f7c6..b34815408f58 100644 --- a/arch/x86/pci/olpc.c +++ b/arch/x86/pci/olpc.c | |||
@@ -304,9 +304,6 @@ static struct pci_raw_ops pci_olpc_conf = { | |||
304 | 304 | ||
305 | int __init pci_olpc_init(void) | 305 | int __init pci_olpc_init(void) |
306 | { | 306 | { |
307 | if (!machine_is_olpc() || olpc_has_vsa()) | ||
308 | return -ENODEV; | ||
309 | |||
310 | printk(KERN_INFO "PCI: Using configuration type OLPC\n"); | 307 | printk(KERN_INFO "PCI: Using configuration type OLPC\n"); |
311 | raw_pci_ops = &pci_olpc_conf; | 308 | raw_pci_ops = &pci_olpc_conf; |
312 | is_lx = is_geode_lx(); | 309 | is_lx = is_geode_lx(); |
diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c index bcead7a46871..03008f72eb04 100644 --- a/arch/x86/pci/visws.c +++ b/arch/x86/pci/visws.c | |||
@@ -69,9 +69,6 @@ void __init pcibios_update_irq(struct pci_dev *dev, int irq) | |||
69 | 69 | ||
70 | int __init pci_visws_init(void) | 70 | int __init pci_visws_init(void) |
71 | { | 71 | { |
72 | if (!is_visws_box()) | ||
73 | return -1; | ||
74 | |||
75 | pcibios_enable_irq = &pci_visws_enable_irq; | 72 | pcibios_enable_irq = &pci_visws_enable_irq; |
76 | pcibios_disable_irq = &pci_visws_disable_irq; | 73 | pcibios_disable_irq = &pci_visws_disable_irq; |
77 | 74 | ||
@@ -90,5 +87,6 @@ int __init pci_visws_init(void) | |||
90 | pci_scan_bus_with_sysdata(pci_bus1); | 87 | pci_scan_bus_with_sysdata(pci_bus1); |
91 | pci_fixup_irqs(pci_common_swizzle, visws_map_irq); | 88 | pci_fixup_irqs(pci_common_swizzle, visws_map_irq); |
92 | pcibios_resource_survey(); | 89 | pcibios_resource_survey(); |
93 | return 0; | 90 | /* Request bus scan */ |
91 | return 1; | ||
94 | } | 92 | } |