diff options
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r-- | arch/parisc/kernel/cache.c | 26 | ||||
-rw-r--r-- | arch/parisc/kernel/inventory.c | 41 | ||||
-rw-r--r-- | arch/parisc/kernel/irq.c | 10 | ||||
-rw-r--r-- | arch/parisc/kernel/pci-dma.c | 12 | ||||
-rw-r--r-- | arch/parisc/kernel/pci.c | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/processor.c | 19 | ||||
-rw-r--r-- | arch/parisc/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/sys_parisc32.c | 62 | ||||
-rw-r--r-- | arch/parisc/kernel/syscall_table.S | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/time.c | 90 |
10 files changed, 125 insertions, 145 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 837530ea32e7..b6ed34de14e1 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $ | 1 | /* |
2 | * | ||
3 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
4 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
5 | * for more details. | 4 | * for more details. |
@@ -398,12 +397,13 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm); | |||
398 | 397 | ||
399 | void clear_user_page_asm(void *page, unsigned long vaddr) | 398 | void clear_user_page_asm(void *page, unsigned long vaddr) |
400 | { | 399 | { |
400 | unsigned long flags; | ||
401 | /* This function is implemented in assembly in pacache.S */ | 401 | /* This function is implemented in assembly in pacache.S */ |
402 | extern void __clear_user_page_asm(void *page, unsigned long vaddr); | 402 | extern void __clear_user_page_asm(void *page, unsigned long vaddr); |
403 | 403 | ||
404 | purge_tlb_start(); | 404 | purge_tlb_start(flags); |
405 | __clear_user_page_asm(page, vaddr); | 405 | __clear_user_page_asm(page, vaddr); |
406 | purge_tlb_end(); | 406 | purge_tlb_end(flags); |
407 | } | 407 | } |
408 | 408 | ||
409 | #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ | 409 | #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ |
@@ -444,20 +444,24 @@ extern void clear_user_page_asm(void *page, unsigned long vaddr); | |||
444 | 444 | ||
445 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | 445 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) |
446 | { | 446 | { |
447 | unsigned long flags; | ||
448 | |||
447 | purge_kernel_dcache_page((unsigned long)page); | 449 | purge_kernel_dcache_page((unsigned long)page); |
448 | purge_tlb_start(); | 450 | purge_tlb_start(flags); |
449 | pdtlb_kernel(page); | 451 | pdtlb_kernel(page); |
450 | purge_tlb_end(); | 452 | purge_tlb_end(flags); |
451 | clear_user_page_asm(page, vaddr); | 453 | clear_user_page_asm(page, vaddr); |
452 | } | 454 | } |
453 | EXPORT_SYMBOL(clear_user_page); | 455 | EXPORT_SYMBOL(clear_user_page); |
454 | 456 | ||
455 | void flush_kernel_dcache_page_addr(void *addr) | 457 | void flush_kernel_dcache_page_addr(void *addr) |
456 | { | 458 | { |
459 | unsigned long flags; | ||
460 | |||
457 | flush_kernel_dcache_page_asm(addr); | 461 | flush_kernel_dcache_page_asm(addr); |
458 | purge_tlb_start(); | 462 | purge_tlb_start(flags); |
459 | pdtlb_kernel(addr); | 463 | pdtlb_kernel(addr); |
460 | purge_tlb_end(); | 464 | purge_tlb_end(flags); |
461 | } | 465 | } |
462 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); | 466 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); |
463 | 467 | ||
@@ -490,8 +494,10 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, | |||
490 | if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ | 494 | if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ |
491 | flush_tlb_all(); | 495 | flush_tlb_all(); |
492 | else { | 496 | else { |
497 | unsigned long flags; | ||
498 | |||
493 | mtsp(sid, 1); | 499 | mtsp(sid, 1); |
494 | purge_tlb_start(); | 500 | purge_tlb_start(flags); |
495 | if (split_tlb) { | 501 | if (split_tlb) { |
496 | while (npages--) { | 502 | while (npages--) { |
497 | pdtlb(start); | 503 | pdtlb(start); |
@@ -504,7 +510,7 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, | |||
504 | start += PAGE_SIZE; | 510 | start += PAGE_SIZE; |
505 | } | 511 | } |
506 | } | 512 | } |
507 | purge_tlb_end(); | 513 | purge_tlb_end(flags); |
508 | } | 514 | } |
509 | } | 515 | } |
510 | 516 | ||
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index bd1f7f1ff74e..d228d8237879 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c | |||
@@ -170,23 +170,27 @@ static void __init pagezero_memconfig(void) | |||
170 | static int __init | 170 | static int __init |
171 | pat_query_module(ulong pcell_loc, ulong mod_index) | 171 | pat_query_module(ulong pcell_loc, ulong mod_index) |
172 | { | 172 | { |
173 | pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; | 173 | pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; |
174 | unsigned long bytecnt; | 174 | unsigned long bytecnt; |
175 | unsigned long temp; /* 64-bit scratch value */ | 175 | unsigned long temp; /* 64-bit scratch value */ |
176 | long status; /* PDC return value status */ | 176 | long status; /* PDC return value status */ |
177 | struct parisc_device *dev; | 177 | struct parisc_device *dev; |
178 | 178 | ||
179 | pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); | ||
180 | if (!pa_pdc_cell) | ||
181 | panic("couldn't allocate memory for PDC_PAT_CELL!"); | ||
182 | |||
179 | /* return cell module (PA or Processor view) */ | 183 | /* return cell module (PA or Processor view) */ |
180 | status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, | 184 | status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, |
181 | PA_VIEW, &pa_pdc_cell); | 185 | PA_VIEW, pa_pdc_cell); |
182 | 186 | ||
183 | if (status != PDC_OK) { | 187 | if (status != PDC_OK) { |
184 | /* no more cell modules or error */ | 188 | /* no more cell modules or error */ |
185 | return status; | 189 | return status; |
186 | } | 190 | } |
187 | 191 | ||
188 | temp = pa_pdc_cell.cba; | 192 | temp = pa_pdc_cell->cba; |
189 | dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); | 193 | dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); |
190 | if (!dev) { | 194 | if (!dev) { |
191 | return PDC_OK; | 195 | return PDC_OK; |
192 | } | 196 | } |
@@ -203,8 +207,8 @@ pat_query_module(ulong pcell_loc, ulong mod_index) | |||
203 | 207 | ||
204 | /* save generic info returned from the call */ | 208 | /* save generic info returned from the call */ |
205 | /* REVISIT: who is the consumer of this? not sure yet... */ | 209 | /* REVISIT: who is the consumer of this? not sure yet... */ |
206 | dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */ | 210 | dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ |
207 | dev->pmod_loc = pa_pdc_cell.mod_location; | 211 | dev->pmod_loc = pa_pdc_cell->mod_location; |
208 | 212 | ||
209 | register_parisc_device(dev); /* advertise device */ | 213 | register_parisc_device(dev); /* advertise device */ |
210 | 214 | ||
@@ -216,14 +220,14 @@ pat_query_module(ulong pcell_loc, ulong mod_index) | |||
216 | 220 | ||
217 | case PAT_ENTITY_PROC: | 221 | case PAT_ENTITY_PROC: |
218 | printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n", | 222 | printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n", |
219 | pa_pdc_cell.mod[0]); | 223 | pa_pdc_cell->mod[0]); |
220 | break; | 224 | break; |
221 | 225 | ||
222 | case PAT_ENTITY_MEM: | 226 | case PAT_ENTITY_MEM: |
223 | printk(KERN_DEBUG | 227 | printk(KERN_DEBUG |
224 | "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n", | 228 | "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n", |
225 | pa_pdc_cell.mod[0], pa_pdc_cell.mod[1], | 229 | pa_pdc_cell->mod[0], pa_pdc_cell->mod[1], |
226 | pa_pdc_cell.mod[2]); | 230 | pa_pdc_cell->mod[2]); |
227 | break; | 231 | break; |
228 | case PAT_ENTITY_CA: | 232 | case PAT_ENTITY_CA: |
229 | printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc); | 233 | printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc); |
@@ -243,23 +247,26 @@ pat_query_module(ulong pcell_loc, ulong mod_index) | |||
243 | print_ranges: | 247 | print_ranges: |
244 | pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, | 248 | pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, |
245 | IO_VIEW, &io_pdc_cell); | 249 | IO_VIEW, &io_pdc_cell); |
246 | printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]); | 250 | printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]); |
247 | for (i = 0; i < pa_pdc_cell.mod[1]; i++) { | 251 | for (i = 0; i < pa_pdc_cell->mod[1]; i++) { |
248 | printk(KERN_DEBUG | 252 | printk(KERN_DEBUG |
249 | " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", | 253 | " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", |
250 | i, pa_pdc_cell.mod[2 + i * 3], /* type */ | 254 | i, pa_pdc_cell->mod[2 + i * 3], /* type */ |
251 | pa_pdc_cell.mod[3 + i * 3], /* start */ | 255 | pa_pdc_cell->mod[3 + i * 3], /* start */ |
252 | pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ | 256 | pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ |
253 | printk(KERN_DEBUG | 257 | printk(KERN_DEBUG |
254 | " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", | 258 | " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", |
255 | i, io_pdc_cell.mod[2 + i * 3], /* type */ | 259 | i, io_pdc_cell->mod[2 + i * 3], /* type */ |
256 | io_pdc_cell.mod[3 + i * 3], /* start */ | 260 | io_pdc_cell->mod[3 + i * 3], /* start */ |
257 | io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ | 261 | io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ |
258 | } | 262 | } |
259 | printk(KERN_DEBUG "\n"); | 263 | printk(KERN_DEBUG "\n"); |
260 | break; | 264 | break; |
261 | } | 265 | } |
262 | #endif /* DEBUG_PAT */ | 266 | #endif /* DEBUG_PAT */ |
267 | |||
268 | kfree(pa_pdc_cell); | ||
269 | |||
263 | return PDC_OK; | 270 | return PDC_OK; |
264 | } | 271 | } |
265 | 272 | ||
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 8007f1e65729..330f536a9324 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) | |||
120 | if (CHECK_IRQ_PER_CPU(irq)) { | 120 | if (CHECK_IRQ_PER_CPU(irq)) { |
121 | /* Bad linux design decision. The mask has already | 121 | /* Bad linux design decision. The mask has already |
122 | * been set; we must reset it */ | 122 | * been set; we must reset it */ |
123 | cpumask_setall(&irq_desc[irq].affinity); | 123 | cpumask_setall(irq_desc[irq].affinity); |
124 | return -EINVAL; | 124 | return -EINVAL; |
125 | } | 125 | } |
126 | 126 | ||
@@ -138,13 +138,13 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) | |||
138 | if (cpu_dest < 0) | 138 | if (cpu_dest < 0) |
139 | return -1; | 139 | return -1; |
140 | 140 | ||
141 | cpumask_copy(&irq_desc[irq].affinity, dest); | 141 | cpumask_copy(irq_desc[irq].affinity, dest); |
142 | 142 | ||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | static struct hw_interrupt_type cpu_interrupt_type = { | 147 | static struct irq_chip cpu_interrupt_type = { |
148 | .typename = "CPU", | 148 | .typename = "CPU", |
149 | .startup = cpu_startup_irq, | 149 | .startup = cpu_startup_irq, |
150 | .shutdown = cpu_disable_irq, | 150 | .shutdown = cpu_disable_irq, |
@@ -299,7 +299,7 @@ int txn_alloc_irq(unsigned int bits_wide) | |||
299 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) | 299 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) |
300 | { | 300 | { |
301 | #ifdef CONFIG_SMP | 301 | #ifdef CONFIG_SMP |
302 | cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu)); | 302 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | return per_cpu(cpu_data, cpu).txn_addr; | 305 | return per_cpu(cpu_data, cpu).txn_addr; |
@@ -356,7 +356,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
356 | irq = eirr_to_irq(eirr_val); | 356 | irq = eirr_to_irq(eirr_val); |
357 | 357 | ||
358 | #ifdef CONFIG_SMP | 358 | #ifdef CONFIG_SMP |
359 | cpumask_copy(&dest, &irq_desc[irq].affinity); | 359 | cpumask_copy(&dest, irq_desc[irq].affinity); |
360 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && | 360 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && |
361 | !cpu_isset(smp_processor_id(), dest)) { | 361 | !cpu_isset(smp_processor_id(), dest)) { |
362 | int cpu = first_cpu(dest); | 362 | int cpu = first_cpu(dest); |
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 7d927eac932b..c07f618ff7da 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -90,12 +90,14 @@ static inline int map_pte_uncached(pte_t * pte, | |||
90 | if (end > PMD_SIZE) | 90 | if (end > PMD_SIZE) |
91 | end = PMD_SIZE; | 91 | end = PMD_SIZE; |
92 | do { | 92 | do { |
93 | unsigned long flags; | ||
94 | |||
93 | if (!pte_none(*pte)) | 95 | if (!pte_none(*pte)) |
94 | printk(KERN_ERR "map_pte_uncached: page already exists\n"); | 96 | printk(KERN_ERR "map_pte_uncached: page already exists\n"); |
95 | set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); | 97 | set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); |
96 | purge_tlb_start(); | 98 | purge_tlb_start(flags); |
97 | pdtlb_kernel(orig_vaddr); | 99 | pdtlb_kernel(orig_vaddr); |
98 | purge_tlb_end(); | 100 | purge_tlb_end(flags); |
99 | vaddr += PAGE_SIZE; | 101 | vaddr += PAGE_SIZE; |
100 | orig_vaddr += PAGE_SIZE; | 102 | orig_vaddr += PAGE_SIZE; |
101 | (*paddr_ptr) += PAGE_SIZE; | 103 | (*paddr_ptr) += PAGE_SIZE; |
@@ -168,11 +170,13 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, | |||
168 | if (end > PMD_SIZE) | 170 | if (end > PMD_SIZE) |
169 | end = PMD_SIZE; | 171 | end = PMD_SIZE; |
170 | do { | 172 | do { |
173 | unsigned long flags; | ||
171 | pte_t page = *pte; | 174 | pte_t page = *pte; |
175 | |||
172 | pte_clear(&init_mm, vaddr, pte); | 176 | pte_clear(&init_mm, vaddr, pte); |
173 | purge_tlb_start(); | 177 | purge_tlb_start(flags); |
174 | pdtlb_kernel(orig_vaddr); | 178 | pdtlb_kernel(orig_vaddr); |
175 | purge_tlb_end(); | 179 | purge_tlb_end(flags); |
176 | vaddr += PAGE_SIZE; | 180 | vaddr += PAGE_SIZE; |
177 | orig_vaddr += PAGE_SIZE; | 181 | orig_vaddr += PAGE_SIZE; |
178 | pte++; | 182 | pte++; |
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 6936386c9861..f7064abc3bb6 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: pci.c,v 1.6 2000/01/29 00:12:05 grundler Exp $ | 1 | /* |
2 | * | ||
3 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
4 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
5 | * for more details. | 4 | * for more details. |
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index e09d0f7fb6b0..c8fb61ed32f4 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: processor.c,v 1.1 2002/07/20 16:27:06 rhirst Exp $ | 1 | /* |
2 | * | ||
3 | * Initial setup-routines for HP 9000 based hardware. | 2 | * Initial setup-routines for HP 9000 based hardware. |
4 | * | 3 | * |
5 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
@@ -121,22 +120,28 @@ static int __cpuinit processor_probe(struct parisc_device *dev) | |||
121 | if (is_pdc_pat()) { | 120 | if (is_pdc_pat()) { |
122 | ulong status; | 121 | ulong status; |
123 | unsigned long bytecnt; | 122 | unsigned long bytecnt; |
124 | pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; | 123 | pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; |
125 | #undef USE_PAT_CPUID | 124 | #undef USE_PAT_CPUID |
126 | #ifdef USE_PAT_CPUID | 125 | #ifdef USE_PAT_CPUID |
127 | struct pdc_pat_cpu_num cpu_info; | 126 | struct pdc_pat_cpu_num cpu_info; |
128 | #endif | 127 | #endif |
129 | 128 | ||
129 | pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); | ||
130 | if (!pa_pdc_cell) | ||
131 | panic("couldn't allocate memory for PDC_PAT_CELL!"); | ||
132 | |||
130 | status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc, | 133 | status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc, |
131 | dev->mod_index, PA_VIEW, &pa_pdc_cell); | 134 | dev->mod_index, PA_VIEW, pa_pdc_cell); |
132 | 135 | ||
133 | BUG_ON(PDC_OK != status); | 136 | BUG_ON(PDC_OK != status); |
134 | 137 | ||
135 | /* verify it's the same as what do_pat_inventory() found */ | 138 | /* verify it's the same as what do_pat_inventory() found */ |
136 | BUG_ON(dev->mod_info != pa_pdc_cell.mod_info); | 139 | BUG_ON(dev->mod_info != pa_pdc_cell->mod_info); |
137 | BUG_ON(dev->pmod_loc != pa_pdc_cell.mod_location); | 140 | BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location); |
141 | |||
142 | txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */ | ||
138 | 143 | ||
139 | txn_addr = pa_pdc_cell.mod[0]; /* id_eid for IO sapic */ | 144 | kfree(pa_pdc_cell); |
140 | 145 | ||
141 | #ifdef USE_PAT_CPUID | 146 | #ifdef USE_PAT_CPUID |
142 | /* We need contiguous numbers for cpuid. Firmware's notion | 147 | /* We need contiguous numbers for cpuid. Firmware's notion |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 82131ca8e05c..cb71f3dac995 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: setup.c,v 1.8 2000/02/02 04:42:38 prumpf Exp $ | 1 | /* |
2 | * | ||
3 | * Initial setup-routines for HP 9000 based hardware. | 2 | * Initial setup-routines for HP 9000 based hardware. |
4 | * | 3 | * |
5 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 1adb40c81669..92a0acaa0d12 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c | |||
@@ -174,68 +174,6 @@ asmlinkage long sys32_sched_rr_get_interval(pid_t pid, | |||
174 | return ret; | 174 | return ret; |
175 | } | 175 | } |
176 | 176 | ||
177 | /*** copied from mips64 ***/ | ||
178 | /* | ||
179 | * Ooo, nasty. We need here to frob 32-bit unsigned longs to | ||
180 | * 64-bit unsigned longs. | ||
181 | */ | ||
182 | |||
183 | static inline int | ||
184 | get_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset) | ||
185 | { | ||
186 | n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32)); | ||
187 | if (ufdset) { | ||
188 | unsigned long odd; | ||
189 | |||
190 | if (!access_ok(VERIFY_WRITE, ufdset, n*sizeof(u32))) | ||
191 | return -EFAULT; | ||
192 | |||
193 | odd = n & 1UL; | ||
194 | n &= ~1UL; | ||
195 | while (n) { | ||
196 | unsigned long h, l; | ||
197 | __get_user(l, ufdset); | ||
198 | __get_user(h, ufdset+1); | ||
199 | ufdset += 2; | ||
200 | *fdset++ = h << 32 | l; | ||
201 | n -= 2; | ||
202 | } | ||
203 | if (odd) | ||
204 | __get_user(*fdset, ufdset); | ||
205 | } else { | ||
206 | /* Tricky, must clear full unsigned long in the | ||
207 | * kernel fdset at the end, this makes sure that | ||
208 | * actually happens. | ||
209 | */ | ||
210 | memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32)); | ||
211 | } | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static inline void | ||
216 | set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset) | ||
217 | { | ||
218 | unsigned long odd; | ||
219 | n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32)); | ||
220 | |||
221 | if (!ufdset) | ||
222 | return; | ||
223 | |||
224 | odd = n & 1UL; | ||
225 | n &= ~1UL; | ||
226 | while (n) { | ||
227 | unsigned long h, l; | ||
228 | l = *fdset++; | ||
229 | h = l >> 32; | ||
230 | __put_user(l, ufdset); | ||
231 | __put_user(h, ufdset+1); | ||
232 | ufdset += 2; | ||
233 | n -= 2; | ||
234 | } | ||
235 | if (odd) | ||
236 | __put_user(*fdset, ufdset); | ||
237 | } | ||
238 | |||
239 | struct msgbuf32 { | 177 | struct msgbuf32 { |
240 | int mtype; | 178 | int mtype; |
241 | char mtext[1]; | 179 | char mtext[1]; |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 03b9a01bc16c..cf145eb026b3 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -413,6 +413,10 @@ | |||
413 | ENTRY_SAME(dup3) | 413 | ENTRY_SAME(dup3) |
414 | ENTRY_SAME(pipe2) | 414 | ENTRY_SAME(pipe2) |
415 | ENTRY_SAME(inotify_init1) | 415 | ENTRY_SAME(inotify_init1) |
416 | ENTRY_COMP(preadv) /* 315 */ | ||
417 | ENTRY_COMP(pwritev) | ||
418 | ENTRY_COMP(rt_tgsigqueueinfo) | ||
419 | ENTRY_SAME(perf_counter_open) | ||
416 | 420 | ||
417 | /* Nothing yet */ | 421 | /* Nothing yet */ |
418 | 422 | ||
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index d4dd05674c62..a79c6f9e7e2c 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -56,9 +56,9 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */ | |||
56 | */ | 56 | */ |
57 | irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | 57 | irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) |
58 | { | 58 | { |
59 | unsigned long now; | 59 | unsigned long now, now2; |
60 | unsigned long next_tick; | 60 | unsigned long next_tick; |
61 | unsigned long cycles_elapsed, ticks_elapsed; | 61 | unsigned long cycles_elapsed, ticks_elapsed = 1; |
62 | unsigned long cycles_remainder; | 62 | unsigned long cycles_remainder; |
63 | unsigned int cpu = smp_processor_id(); | 63 | unsigned int cpu = smp_processor_id(); |
64 | struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); | 64 | struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); |
@@ -71,44 +71,24 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
71 | /* Initialize next_tick to the expected tick time. */ | 71 | /* Initialize next_tick to the expected tick time. */ |
72 | next_tick = cpuinfo->it_value; | 72 | next_tick = cpuinfo->it_value; |
73 | 73 | ||
74 | /* Get current interval timer. | 74 | /* Get current cycle counter (Control Register 16). */ |
75 | * CR16 reads as 64 bits in CPU wide mode. | ||
76 | * CR16 reads as 32 bits in CPU narrow mode. | ||
77 | */ | ||
78 | now = mfctl(16); | 75 | now = mfctl(16); |
79 | 76 | ||
80 | cycles_elapsed = now - next_tick; | 77 | cycles_elapsed = now - next_tick; |
81 | 78 | ||
82 | if ((cycles_elapsed >> 5) < cpt) { | 79 | if ((cycles_elapsed >> 6) < cpt) { |
83 | /* use "cheap" math (add/subtract) instead | 80 | /* use "cheap" math (add/subtract) instead |
84 | * of the more expensive div/mul method | 81 | * of the more expensive div/mul method |
85 | */ | 82 | */ |
86 | cycles_remainder = cycles_elapsed; | 83 | cycles_remainder = cycles_elapsed; |
87 | ticks_elapsed = 1; | ||
88 | while (cycles_remainder > cpt) { | 84 | while (cycles_remainder > cpt) { |
89 | cycles_remainder -= cpt; | 85 | cycles_remainder -= cpt; |
90 | ticks_elapsed++; | 86 | ticks_elapsed++; |
91 | } | 87 | } |
92 | } else { | 88 | } else { |
89 | /* TODO: Reduce this to one fdiv op */ | ||
93 | cycles_remainder = cycles_elapsed % cpt; | 90 | cycles_remainder = cycles_elapsed % cpt; |
94 | ticks_elapsed = 1 + cycles_elapsed / cpt; | 91 | ticks_elapsed += cycles_elapsed / cpt; |
95 | } | ||
96 | |||
97 | /* Can we differentiate between "early CR16" (aka Scenario 1) and | ||
98 | * "long delay" (aka Scenario 3)? I don't think so. | ||
99 | * | ||
100 | * We expected timer_interrupt to be delivered at least a few hundred | ||
101 | * cycles after the IT fires. But it's arbitrary how much time passes | ||
102 | * before we call it "late". I've picked one second. | ||
103 | */ | ||
104 | if (unlikely(ticks_elapsed > HZ)) { | ||
105 | /* Scenario 3: very long delay? bad in any case */ | ||
106 | printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" | ||
107 | " cycles %lX rem %lX " | ||
108 | " next/now %lX/%lX\n", | ||
109 | cpu, | ||
110 | cycles_elapsed, cycles_remainder, | ||
111 | next_tick, now ); | ||
112 | } | 92 | } |
113 | 93 | ||
114 | /* convert from "division remainder" to "remainder of clock tick" */ | 94 | /* convert from "division remainder" to "remainder of clock tick" */ |
@@ -122,18 +102,56 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
122 | 102 | ||
123 | cpuinfo->it_value = next_tick; | 103 | cpuinfo->it_value = next_tick; |
124 | 104 | ||
125 | /* Skip one clocktick on purpose if we are likely to miss next_tick. | 105 | /* Program the IT when to deliver the next interrupt. |
126 | * We want to avoid the new next_tick being less than CR16. | 106 | * Only bottom 32-bits of next_tick are writable in CR16! |
127 | * If that happened, itimer wouldn't fire until CR16 wrapped. | ||
128 | * We'll catch the tick we missed on the tick after that. | ||
129 | */ | 107 | */ |
130 | if (!(cycles_remainder >> 13)) | ||
131 | next_tick += cpt; | ||
132 | |||
133 | /* Program the IT when to deliver the next interrupt. */ | ||
134 | /* Only bottom 32-bits of next_tick are written to cr16. */ | ||
135 | mtctl(next_tick, 16); | 108 | mtctl(next_tick, 16); |
136 | 109 | ||
110 | /* Skip one clocktick on purpose if we missed next_tick. | ||
111 | * The new CR16 must be "later" than current CR16 otherwise | ||
112 | * itimer would not fire until CR16 wrapped - e.g 4 seconds | ||
113 | * later on a 1Ghz processor. We'll account for the missed | ||
114 | * tick on the next timer interrupt. | ||
115 | * | ||
116 | * "next_tick - now" will always give the difference regardless | ||
117 | * if one or the other wrapped. If "now" is "bigger" we'll end up | ||
118 | * with a very large unsigned number. | ||
119 | */ | ||
120 | now2 = mfctl(16); | ||
121 | if (next_tick - now2 > cpt) | ||
122 | mtctl(next_tick+cpt, 16); | ||
123 | |||
124 | #if 1 | ||
125 | /* | ||
126 | * GGG: DEBUG code for how many cycles programming CR16 used. | ||
127 | */ | ||
128 | if (unlikely(now2 - now > 0x3000)) /* 12K cycles */ | ||
129 | printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!" | ||
130 | " cyc %lX rem %lX " | ||
131 | " next/now %lX/%lX\n", | ||
132 | cpu, now2 - now, cycles_elapsed, cycles_remainder, | ||
133 | next_tick, now ); | ||
134 | #endif | ||
135 | |||
136 | /* Can we differentiate between "early CR16" (aka Scenario 1) and | ||
137 | * "long delay" (aka Scenario 3)? I don't think so. | ||
138 | * | ||
139 | * Timer_interrupt will be delivered at least a few hundred cycles | ||
140 | * after the IT fires. But it's arbitrary how much time passes | ||
141 | * before we call it "late". I've picked one second. | ||
142 | * | ||
143 | * It's important NO printk's are between reading CR16 and | ||
144 | * setting up the next value. May introduce huge variance. | ||
145 | */ | ||
146 | if (unlikely(ticks_elapsed > HZ)) { | ||
147 | /* Scenario 3: very long delay? bad in any case */ | ||
148 | printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" | ||
149 | " cycles %lX rem %lX " | ||
150 | " next/now %lX/%lX\n", | ||
151 | cpu, | ||
152 | cycles_elapsed, cycles_remainder, | ||
153 | next_tick, now ); | ||
154 | } | ||
137 | 155 | ||
138 | /* Done mucking with unreliable delivery of interrupts. | 156 | /* Done mucking with unreliable delivery of interrupts. |
139 | * Go do system house keeping. | 157 | * Go do system house keeping. |
@@ -173,7 +191,7 @@ EXPORT_SYMBOL(profile_pc); | |||
173 | 191 | ||
174 | /* clock source code */ | 192 | /* clock source code */ |
175 | 193 | ||
176 | static cycle_t read_cr16(void) | 194 | static cycle_t read_cr16(struct clocksource *cs) |
177 | { | 195 | { |
178 | return get_cycles(); | 196 | return get_cycles(); |
179 | } | 197 | } |