diff options
author | Dave Kleikamp <shaggy@austin.ibm.com> | 2006-01-24 15:34:47 -0500 |
---|---|---|
committer | Dave Kleikamp <shaggy@austin.ibm.com> | 2006-01-24 15:34:47 -0500 |
commit | 0a0fc0ddbe732779366ab6b1b879f62195e65967 (patch) | |
tree | 7b42490a676cf39ae0691b6859ecf7fd410f229b /arch/parisc | |
parent | 4d5dbd0945d9e0833dd7964a3d6ee33157f7cc7a (diff) | |
parent | 3ee68c4af3fd7228c1be63254b9f884614f9ebb2 (diff) |
Merge with /home/shaggy/git/linus-clean/
Diffstat (limited to 'arch/parisc')
30 files changed, 301 insertions, 772 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 874a283edb95..e77a06e9621e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -19,9 +19,6 @@ config MMU | |||
19 | config STACK_GROWSUP | 19 | config STACK_GROWSUP |
20 | def_bool y | 20 | def_bool y |
21 | 21 | ||
22 | config UID16 | ||
23 | bool | ||
24 | |||
25 | config RWSEM_GENERIC_SPINLOCK | 22 | config RWSEM_GENERIC_SPINLOCK |
26 | def_bool y | 23 | def_bool y |
27 | 24 | ||
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig index 955ef5084f3e..959ad3c4e372 100644 --- a/arch/parisc/configs/a500_defconfig +++ b/arch/parisc/configs/a500_defconfig | |||
@@ -602,6 +602,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y | |||
602 | # CONFIG_DL2K is not set | 602 | # CONFIG_DL2K is not set |
603 | CONFIG_E1000=m | 603 | CONFIG_E1000=m |
604 | CONFIG_E1000_NAPI=y | 604 | CONFIG_E1000_NAPI=y |
605 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
605 | # CONFIG_NS83820 is not set | 606 | # CONFIG_NS83820 is not set |
606 | # CONFIG_HAMACHI is not set | 607 | # CONFIG_HAMACHI is not set |
607 | # CONFIG_YELLOWFIN is not set | 608 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig index 9d86b6b1ebd1..0b1c8c1fa8a3 100644 --- a/arch/parisc/configs/c3000_defconfig +++ b/arch/parisc/configs/c3000_defconfig | |||
@@ -626,6 +626,7 @@ CONFIG_ACENIC=m | |||
626 | # CONFIG_DL2K is not set | 626 | # CONFIG_DL2K is not set |
627 | CONFIG_E1000=m | 627 | CONFIG_E1000=m |
628 | # CONFIG_E1000_NAPI is not set | 628 | # CONFIG_E1000_NAPI is not set |
629 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
629 | # CONFIG_NS83820 is not set | 630 | # CONFIG_NS83820 is not set |
630 | # CONFIG_HAMACHI is not set | 631 | # CONFIG_HAMACHI is not set |
631 | # CONFIG_YELLOWFIN is not set | 632 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index a64fd48fbfb5..29b4d61898f2 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/capability.h> | ||
25 | #include <linux/file.h> | 26 | #include <linux/file.h> |
26 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
27 | #include <linux/namei.h> | 28 | #include <linux/namei.h> |
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 171f9c239f60..27827bc3717e 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile | |||
@@ -6,7 +6,6 @@ extra-y := init_task.o head.o vmlinux.lds | |||
6 | 6 | ||
7 | AFLAGS_entry.o := -traditional | 7 | AFLAGS_entry.o := -traditional |
8 | AFLAGS_pacache.o := -traditional | 8 | AFLAGS_pacache.o := -traditional |
9 | CFLAGS_ioctl32.o := -Ifs/ | ||
10 | 9 | ||
11 | obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ | 10 | obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ |
12 | pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ | 11 | pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ |
@@ -19,6 +18,6 @@ obj-$(CONFIG_SMP) += smp.o | |||
19 | obj-$(CONFIG_PA11) += pci-dma.o | 18 | obj-$(CONFIG_PA11) += pci-dma.o |
20 | obj-$(CONFIG_PCI) += pci.o | 19 | obj-$(CONFIG_PCI) += pci.o |
21 | obj-$(CONFIG_MODULES) += module.o | 20 | obj-$(CONFIG_MODULES) += module.o |
22 | obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o ioctl32.o signal32.o | 21 | obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o |
23 | # only supported for PCX-W/U in 64-bit mode at the moment | 22 | # only supported for PCX-W/U in 64-bit mode at the moment |
24 | obj-$(CONFIG_64BIT) += perf.o perf_asm.o | 23 | obj-$(CONFIG_64BIT) += perf.o perf_asm.o |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index a065349aee37..d8a4ca021aac 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -29,9 +29,9 @@ | |||
29 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | 31 | ||
32 | int split_tlb; | 32 | int split_tlb __read_mostly; |
33 | int dcache_stride; | 33 | int dcache_stride __read_mostly; |
34 | int icache_stride; | 34 | int icache_stride __read_mostly; |
35 | EXPORT_SYMBOL(dcache_stride); | 35 | EXPORT_SYMBOL(dcache_stride); |
36 | 36 | ||
37 | 37 | ||
@@ -45,29 +45,29 @@ DEFINE_SPINLOCK(pa_tlb_lock); | |||
45 | EXPORT_SYMBOL(pa_tlb_lock); | 45 | EXPORT_SYMBOL(pa_tlb_lock); |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | struct pdc_cache_info cache_info; | 48 | struct pdc_cache_info cache_info __read_mostly; |
49 | #ifndef CONFIG_PA20 | 49 | #ifndef CONFIG_PA20 |
50 | static struct pdc_btlb_info btlb_info; | 50 | static struct pdc_btlb_info btlb_info __read_mostly; |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef CONFIG_SMP | 53 | #ifdef CONFIG_SMP |
54 | void | 54 | void |
55 | flush_data_cache(void) | 55 | flush_data_cache(void) |
56 | { | 56 | { |
57 | on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1); | 57 | on_each_cpu(flush_data_cache_local, NULL, 1, 1); |
58 | } | 58 | } |
59 | void | 59 | void |
60 | flush_instruction_cache(void) | 60 | flush_instruction_cache(void) |
61 | { | 61 | { |
62 | on_each_cpu((void (*)(void *))flush_instruction_cache_local, NULL, 1, 1); | 62 | on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); |
63 | } | 63 | } |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | void | 66 | void |
67 | flush_cache_all_local(void) | 67 | flush_cache_all_local(void) |
68 | { | 68 | { |
69 | flush_instruction_cache_local(); | 69 | flush_instruction_cache_local(NULL); |
70 | flush_data_cache_local(); | 70 | flush_data_cache_local(NULL); |
71 | } | 71 | } |
72 | EXPORT_SYMBOL(flush_cache_all_local); | 72 | EXPORT_SYMBOL(flush_cache_all_local); |
73 | 73 | ||
@@ -332,7 +332,7 @@ void clear_user_page_asm(void *page, unsigned long vaddr) | |||
332 | } | 332 | } |
333 | 333 | ||
334 | #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ | 334 | #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ |
335 | int parisc_cache_flush_threshold = FLUSH_THRESHOLD; | 335 | int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; |
336 | 336 | ||
337 | void parisc_setup_cache_timing(void) | 337 | void parisc_setup_cache_timing(void) |
338 | { | 338 | { |
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 988844a169e6..2d804e2d16d1 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <asm/parisc-device.h> | 39 | #include <asm/parisc-device.h> |
40 | 40 | ||
41 | /* See comments in include/asm-parisc/pci.h */ | 41 | /* See comments in include/asm-parisc/pci.h */ |
42 | struct hppa_dma_ops *hppa_dma_ops; | 42 | struct hppa_dma_ops *hppa_dma_ops __read_mostly; |
43 | EXPORT_SYMBOL(hppa_dma_ops); | 43 | EXPORT_SYMBOL(hppa_dma_ops); |
44 | 44 | ||
45 | static struct device root = { | 45 | static struct device root = { |
@@ -173,8 +173,6 @@ int register_parisc_driver(struct parisc_driver *driver) | |||
173 | WARN_ON(driver->drv.probe != NULL); | 173 | WARN_ON(driver->drv.probe != NULL); |
174 | WARN_ON(driver->drv.remove != NULL); | 174 | WARN_ON(driver->drv.remove != NULL); |
175 | 175 | ||
176 | driver->drv.probe = parisc_driver_probe; | ||
177 | driver->drv.remove = parisc_driver_remove; | ||
178 | driver->drv.name = driver->name; | 176 | driver->drv.name = driver->name; |
179 | 177 | ||
180 | return driver_register(&driver->drv); | 178 | return driver_register(&driver->drv); |
@@ -499,8 +497,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) | |||
499 | 497 | ||
500 | dev = create_parisc_device(mod_path); | 498 | dev = create_parisc_device(mod_path); |
501 | if (dev->id.hw_type != HPHW_FAULTY) { | 499 | if (dev->id.hw_type != HPHW_FAULTY) { |
502 | printk("Two devices have hardware path %s. Please file a bug with HP.\n" | 500 | printk(KERN_ERR "Two devices have hardware path [%s]. " |
503 | "In the meantime, you could try rearranging your cards.\n", parisc_pathname(dev)); | 501 | "IODC data for second device: " |
502 | "%02x%02x%02x%02x%02x%02x\n" | ||
503 | "Rearranging GSC cards sometimes helps\n", | ||
504 | parisc_pathname(dev), iodc_data[0], iodc_data[1], | ||
505 | iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]); | ||
504 | return NULL; | 506 | return NULL; |
505 | } | 507 | } |
506 | 508 | ||
@@ -511,8 +513,13 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) | |||
511 | (iodc_data[5] << 8) | iodc_data[6]; | 513 | (iodc_data[5] << 8) | iodc_data[6]; |
512 | dev->hpa.name = parisc_pathname(dev); | 514 | dev->hpa.name = parisc_pathname(dev); |
513 | dev->hpa.start = hpa; | 515 | dev->hpa.start = hpa; |
514 | if (hpa == 0xf4000000 || hpa == 0xf6000000 || | 516 | /* This is awkward. The STI spec says that gfx devices may occupy |
515 | hpa == 0xf8000000 || hpa == 0xfa000000) { | 517 | * 32MB or 64MB. Unfortunately, we don't know how to tell whether |
518 | * it's the former or the latter. Assumptions either way can hurt us. | ||
519 | */ | ||
520 | if (hpa == 0xf4000000 || hpa == 0xf8000000) { | ||
521 | dev->hpa.end = hpa + 0x03ffffff; | ||
522 | } else if (hpa == 0xf6000000 || hpa == 0xfa000000) { | ||
516 | dev->hpa.end = hpa + 0x01ffffff; | 523 | dev->hpa.end = hpa + 0x01ffffff; |
517 | } else { | 524 | } else { |
518 | dev->hpa.end = hpa + 0xfff; | 525 | dev->hpa.end = hpa + 0xfff; |
@@ -566,6 +573,8 @@ struct bus_type parisc_bus_type = { | |||
566 | .name = "parisc", | 573 | .name = "parisc", |
567 | .match = parisc_generic_match, | 574 | .match = parisc_generic_match, |
568 | .dev_attrs = parisc_device_attrs, | 575 | .dev_attrs = parisc_device_attrs, |
576 | .probe = parisc_driver_probe, | ||
577 | .remove = parisc_driver_remove, | ||
569 | }; | 578 | }; |
570 | 579 | ||
571 | /** | 580 | /** |
@@ -830,7 +839,7 @@ static void print_parisc_device(struct parisc_device *dev) | |||
830 | 839 | ||
831 | if (dev->num_addrs) { | 840 | if (dev->num_addrs) { |
832 | int k; | 841 | int k; |
833 | printk(", additional addresses: "); | 842 | printk(", additional addresses: "); |
834 | for (k = 0; k < dev->num_addrs; k++) | 843 | for (k = 0; k < dev->num_addrs; k++) |
835 | printk("0x%lx ", dev->addr[k]); | 844 | printk("0x%lx ", dev->addr[k]); |
836 | } | 845 | } |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index c7e66ee5b083..9af4b22a6d77 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -1846,6 +1846,7 @@ sys_clone_wrapper: | |||
1846 | ldo -16(%r30),%r29 /* Reference param save area */ | 1846 | ldo -16(%r30),%r29 /* Reference param save area */ |
1847 | #endif | 1847 | #endif |
1848 | 1848 | ||
1849 | /* WARNING - Clobbers r19 and r21, userspace must save these! */ | ||
1849 | STREG %r2,PT_GR19(%r1) /* save for child */ | 1850 | STREG %r2,PT_GR19(%r1) /* save for child */ |
1850 | STREG %r30,PT_GR21(%r1) | 1851 | STREG %r30,PT_GR21(%r1) |
1851 | BL sys_clone,%r2 | 1852 | BL sys_clone,%r2 |
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 553f8fe03224..2dc06b8e1817 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c | |||
@@ -80,7 +80,7 @@ static unsigned long pdc_result2[32] __attribute__ ((aligned (8))); | |||
80 | 80 | ||
81 | /* Firmware needs to be initially set to narrow to determine the | 81 | /* Firmware needs to be initially set to narrow to determine the |
82 | * actual firmware width. */ | 82 | * actual firmware width. */ |
83 | int parisc_narrow_firmware = 1; | 83 | int parisc_narrow_firmware __read_mostly = 1; |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | /* On most currently-supported platforms, IODC I/O calls are 32-bit calls | 86 | /* On most currently-supported platforms, IODC I/O calls are 32-bit calls |
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index 2071b5bba15c..3058bffd8a2c 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c | |||
@@ -551,6 +551,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = { | |||
551 | {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"}, | 551 | {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"}, |
552 | {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"}, | 552 | {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"}, |
553 | {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"}, | 553 | {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"}, |
554 | {HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"}, | ||
554 | {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"}, | 555 | {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"}, |
555 | {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"}, | 556 | {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"}, |
556 | {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"}, | 557 | {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"}, |
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 1a1c66422736..4e847ba53180 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c | |||
@@ -38,7 +38,7 @@ | |||
38 | */ | 38 | */ |
39 | #undef DEBUG_PAT | 39 | #undef DEBUG_PAT |
40 | 40 | ||
41 | int pdc_type = PDC_TYPE_ILLEGAL; | 41 | int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; |
42 | 42 | ||
43 | void __init setup_pdc(void) | 43 | void __init setup_pdc(void) |
44 | { | 44 | { |
@@ -120,8 +120,8 @@ set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start, | |||
120 | * pdc info is bad in this case). | 120 | * pdc info is bad in this case). |
121 | */ | 121 | */ |
122 | 122 | ||
123 | if ( ((start & (PAGE_SIZE - 1)) != 0) | 123 | if (unlikely( ((start & (PAGE_SIZE - 1)) != 0) |
124 | || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) { | 124 | || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) { |
125 | 125 | ||
126 | panic("Memory range doesn't align with page size!\n"); | 126 | panic("Memory range doesn't align with page size!\n"); |
127 | } | 127 | } |
@@ -188,7 +188,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index) | |||
188 | temp = pa_pdc_cell.cba; | 188 | temp = pa_pdc_cell.cba; |
189 | dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); | 189 | dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); |
190 | if (!dev) { | 190 | if (!dev) { |
191 | return PDC_NE_MOD; | 191 | return PDC_OK; |
192 | } | 192 | } |
193 | 193 | ||
194 | /* alloc_pa_dev sets dev->hpa */ | 194 | /* alloc_pa_dev sets dev->hpa */ |
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c deleted file mode 100644 index 0a331104ad56..000000000000 --- a/arch/parisc/kernel/ioctl32.c +++ /dev/null | |||
@@ -1,606 +0,0 @@ | |||
1 | /* $Id: ioctl32.c,v 1.5 2002/10/18 00:21:43 varenet Exp $ | ||
2 | * ioctl32.c: Conversion between 32bit and 64bit native ioctls. | ||
3 | * | ||
4 | * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com) | ||
5 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) | ||
6 | * | ||
7 | * These routines maintain argument size conversion between 32bit and 64bit | ||
8 | * ioctls. | ||
9 | */ | ||
10 | |||
11 | #include <linux/syscalls.h> | ||
12 | |||
13 | #define INCLUDES | ||
14 | #include "compat_ioctl.c" | ||
15 | |||
16 | #include <asm/perf.h> | ||
17 | #include <asm/ioctls.h> | ||
18 | |||
19 | #define CODE | ||
20 | #include "compat_ioctl.c" | ||
21 | |||
22 | /* Use this to get at 32-bit user passed pointers. | ||
23 | See sys_sparc32.c for description about these. */ | ||
24 | #define A(__x) ((unsigned long)(__x)) | ||
25 | /* The same for use with copy_from_user() and copy_to_user(). */ | ||
26 | #define B(__x) ((void *)(unsigned long)(__x)) | ||
27 | |||
28 | #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE) | ||
29 | /* This really belongs in include/linux/drm.h -DaveM */ | ||
30 | #include "../../../drivers/char/drm/drm.h" | ||
31 | |||
32 | typedef struct drm32_version { | ||
33 | int version_major; /* Major version */ | ||
34 | int version_minor; /* Minor version */ | ||
35 | int version_patchlevel;/* Patch level */ | ||
36 | int name_len; /* Length of name buffer */ | ||
37 | u32 name; /* Name of driver */ | ||
38 | int date_len; /* Length of date buffer */ | ||
39 | u32 date; /* User-space buffer to hold date */ | ||
40 | int desc_len; /* Length of desc buffer */ | ||
41 | u32 desc; /* User-space buffer to hold desc */ | ||
42 | } drm32_version_t; | ||
43 | #define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t) | ||
44 | |||
45 | static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
46 | { | ||
47 | drm32_version_t *uversion = (drm32_version_t *)arg; | ||
48 | char *name_ptr, *date_ptr, *desc_ptr; | ||
49 | u32 tmp1, tmp2, tmp3; | ||
50 | drm_version_t kversion; | ||
51 | mm_segment_t old_fs; | ||
52 | int ret; | ||
53 | |||
54 | memset(&kversion, 0, sizeof(kversion)); | ||
55 | if (get_user(kversion.name_len, &uversion->name_len) || | ||
56 | get_user(kversion.date_len, &uversion->date_len) || | ||
57 | get_user(kversion.desc_len, &uversion->desc_len) || | ||
58 | get_user(tmp1, &uversion->name) || | ||
59 | get_user(tmp2, &uversion->date) || | ||
60 | get_user(tmp3, &uversion->desc)) | ||
61 | return -EFAULT; | ||
62 | |||
63 | name_ptr = (char *) A(tmp1); | ||
64 | date_ptr = (char *) A(tmp2); | ||
65 | desc_ptr = (char *) A(tmp3); | ||
66 | |||
67 | ret = -ENOMEM; | ||
68 | if (kversion.name_len && name_ptr) { | ||
69 | kversion.name = kmalloc(kversion.name_len, GFP_KERNEL); | ||
70 | if (!kversion.name) | ||
71 | goto out; | ||
72 | } | ||
73 | if (kversion.date_len && date_ptr) { | ||
74 | kversion.date = kmalloc(kversion.date_len, GFP_KERNEL); | ||
75 | if (!kversion.date) | ||
76 | goto out; | ||
77 | } | ||
78 | if (kversion.desc_len && desc_ptr) { | ||
79 | kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL); | ||
80 | if (!kversion.desc) | ||
81 | goto out; | ||
82 | } | ||
83 | |||
84 | old_fs = get_fs(); | ||
85 | set_fs(KERNEL_DS); | ||
86 | ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion); | ||
87 | set_fs(old_fs); | ||
88 | |||
89 | if (!ret) { | ||
90 | if ((kversion.name && | ||
91 | copy_to_user(name_ptr, kversion.name, kversion.name_len)) || | ||
92 | (kversion.date && | ||
93 | copy_to_user(date_ptr, kversion.date, kversion.date_len)) || | ||
94 | (kversion.desc && | ||
95 | copy_to_user(desc_ptr, kversion.desc, kversion.desc_len))) | ||
96 | ret = -EFAULT; | ||
97 | if (put_user(kversion.version_major, &uversion->version_major) || | ||
98 | put_user(kversion.version_minor, &uversion->version_minor) || | ||
99 | put_user(kversion.version_patchlevel, &uversion->version_patchlevel) || | ||
100 | put_user(kversion.name_len, &uversion->name_len) || | ||
101 | put_user(kversion.date_len, &uversion->date_len) || | ||
102 | put_user(kversion.desc_len, &uversion->desc_len)) | ||
103 | ret = -EFAULT; | ||
104 | } | ||
105 | |||
106 | out: | ||
107 | kfree(kversion.name); | ||
108 | kfree(kversion.date); | ||
109 | kfree(kversion.desc); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | typedef struct drm32_unique { | ||
114 | int unique_len; /* Length of unique */ | ||
115 | u32 unique; /* Unique name for driver instantiation */ | ||
116 | } drm32_unique_t; | ||
117 | #define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t) | ||
118 | #define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t) | ||
119 | |||
120 | static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
121 | { | ||
122 | drm32_unique_t *uarg = (drm32_unique_t *)arg; | ||
123 | drm_unique_t karg; | ||
124 | mm_segment_t old_fs; | ||
125 | char *uptr; | ||
126 | u32 tmp; | ||
127 | int ret; | ||
128 | |||
129 | if (get_user(karg.unique_len, &uarg->unique_len)) | ||
130 | return -EFAULT; | ||
131 | karg.unique = NULL; | ||
132 | |||
133 | if (get_user(tmp, &uarg->unique)) | ||
134 | return -EFAULT; | ||
135 | |||
136 | uptr = (char *) A(tmp); | ||
137 | |||
138 | if (uptr) { | ||
139 | karg.unique = kmalloc(karg.unique_len, GFP_KERNEL); | ||
140 | if (!karg.unique) | ||
141 | return -ENOMEM; | ||
142 | if (cmd == DRM32_IOCTL_SET_UNIQUE && | ||
143 | copy_from_user(karg.unique, uptr, karg.unique_len)) { | ||
144 | kfree(karg.unique); | ||
145 | return -EFAULT; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | old_fs = get_fs(); | ||
150 | set_fs(KERNEL_DS); | ||
151 | if (cmd == DRM32_IOCTL_GET_UNIQUE) | ||
152 | ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg); | ||
153 | else | ||
154 | ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg); | ||
155 | set_fs(old_fs); | ||
156 | |||
157 | if (!ret) { | ||
158 | if (cmd == DRM32_IOCTL_GET_UNIQUE && | ||
159 | uptr != NULL && | ||
160 | copy_to_user(uptr, karg.unique, karg.unique_len)) | ||
161 | ret = -EFAULT; | ||
162 | if (put_user(karg.unique_len, &uarg->unique_len)) | ||
163 | ret = -EFAULT; | ||
164 | } | ||
165 | |||
166 | kfree(karg.unique); | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | typedef struct drm32_map { | ||
171 | u32 offset; /* Requested physical address (0 for SAREA)*/ | ||
172 | u32 size; /* Requested physical size (bytes) */ | ||
173 | drm_map_type_t type; /* Type of memory to map */ | ||
174 | drm_map_flags_t flags; /* Flags */ | ||
175 | u32 handle; /* User-space: "Handle" to pass to mmap */ | ||
176 | /* Kernel-space: kernel-virtual address */ | ||
177 | int mtrr; /* MTRR slot used */ | ||
178 | /* Private data */ | ||
179 | } drm32_map_t; | ||
180 | #define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t) | ||
181 | |||
182 | static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
183 | { | ||
184 | drm32_map_t *uarg = (drm32_map_t *) arg; | ||
185 | drm_map_t karg; | ||
186 | mm_segment_t old_fs; | ||
187 | u32 tmp; | ||
188 | int ret; | ||
189 | |||
190 | ret = get_user(karg.offset, &uarg->offset); | ||
191 | ret |= get_user(karg.size, &uarg->size); | ||
192 | ret |= get_user(karg.type, &uarg->type); | ||
193 | ret |= get_user(karg.flags, &uarg->flags); | ||
194 | ret |= get_user(tmp, &uarg->handle); | ||
195 | ret |= get_user(karg.mtrr, &uarg->mtrr); | ||
196 | if (ret) | ||
197 | return -EFAULT; | ||
198 | |||
199 | karg.handle = (void *) A(tmp); | ||
200 | |||
201 | old_fs = get_fs(); | ||
202 | set_fs(KERNEL_DS); | ||
203 | ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg); | ||
204 | set_fs(old_fs); | ||
205 | |||
206 | if (!ret) { | ||
207 | ret = put_user(karg.offset, &uarg->offset); | ||
208 | ret |= put_user(karg.size, &uarg->size); | ||
209 | ret |= put_user(karg.type, &uarg->type); | ||
210 | ret |= put_user(karg.flags, &uarg->flags); | ||
211 | tmp = (u32) (long)karg.handle; | ||
212 | ret |= put_user(tmp, &uarg->handle); | ||
213 | ret |= put_user(karg.mtrr, &uarg->mtrr); | ||
214 | if (ret) | ||
215 | ret = -EFAULT; | ||
216 | } | ||
217 | |||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | typedef struct drm32_buf_info { | ||
222 | int count; /* Entries in list */ | ||
223 | u32 list; /* (drm_buf_desc_t *) */ | ||
224 | } drm32_buf_info_t; | ||
225 | #define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t) | ||
226 | |||
227 | static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
228 | { | ||
229 | drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg; | ||
230 | drm_buf_desc_t *ulist; | ||
231 | drm_buf_info_t karg; | ||
232 | mm_segment_t old_fs; | ||
233 | int orig_count, ret; | ||
234 | u32 tmp; | ||
235 | |||
236 | if (get_user(karg.count, &uarg->count) || | ||
237 | get_user(tmp, &uarg->list)) | ||
238 | return -EFAULT; | ||
239 | |||
240 | ulist = (drm_buf_desc_t *) A(tmp); | ||
241 | |||
242 | orig_count = karg.count; | ||
243 | |||
244 | karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL); | ||
245 | if (!karg.list) | ||
246 | return -EFAULT; | ||
247 | |||
248 | old_fs = get_fs(); | ||
249 | set_fs(KERNEL_DS); | ||
250 | ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg); | ||
251 | set_fs(old_fs); | ||
252 | |||
253 | if (!ret) { | ||
254 | if (karg.count <= orig_count && | ||
255 | (copy_to_user(ulist, karg.list, | ||
256 | karg.count * sizeof(drm_buf_desc_t)))) | ||
257 | ret = -EFAULT; | ||
258 | if (put_user(karg.count, &uarg->count)) | ||
259 | ret = -EFAULT; | ||
260 | } | ||
261 | |||
262 | kfree(karg.list); | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | typedef struct drm32_buf_free { | ||
267 | int count; | ||
268 | u32 list; /* (int *) */ | ||
269 | } drm32_buf_free_t; | ||
270 | #define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t) | ||
271 | |||
272 | static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
273 | { | ||
274 | drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg; | ||
275 | drm_buf_free_t karg; | ||
276 | mm_segment_t old_fs; | ||
277 | int *ulist; | ||
278 | int ret; | ||
279 | u32 tmp; | ||
280 | |||
281 | if (get_user(karg.count, &uarg->count) || | ||
282 | get_user(tmp, &uarg->list)) | ||
283 | return -EFAULT; | ||
284 | |||
285 | ulist = (int *) A(tmp); | ||
286 | |||
287 | karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL); | ||
288 | if (!karg.list) | ||
289 | return -ENOMEM; | ||
290 | |||
291 | ret = -EFAULT; | ||
292 | if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int)))) | ||
293 | goto out; | ||
294 | |||
295 | old_fs = get_fs(); | ||
296 | set_fs(KERNEL_DS); | ||
297 | ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg); | ||
298 | set_fs(old_fs); | ||
299 | |||
300 | out: | ||
301 | kfree(karg.list); | ||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | typedef struct drm32_buf_pub { | ||
306 | int idx; /* Index into master buflist */ | ||
307 | int total; /* Buffer size */ | ||
308 | int used; /* Amount of buffer in use (for DMA) */ | ||
309 | u32 address; /* Address of buffer (void *) */ | ||
310 | } drm32_buf_pub_t; | ||
311 | |||
312 | typedef struct drm32_buf_map { | ||
313 | int count; /* Length of buflist */ | ||
314 | u32 virtual; /* Mmaped area in user-virtual (void *) */ | ||
315 | u32 list; /* Buffer information (drm_buf_pub_t *) */ | ||
316 | } drm32_buf_map_t; | ||
317 | #define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t) | ||
318 | |||
319 | static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
320 | { | ||
321 | drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg; | ||
322 | drm32_buf_pub_t *ulist; | ||
323 | drm_buf_map_t karg; | ||
324 | mm_segment_t old_fs; | ||
325 | int orig_count, ret, i; | ||
326 | u32 tmp1, tmp2; | ||
327 | |||
328 | if (get_user(karg.count, &uarg->count) || | ||
329 | get_user(tmp1, &uarg->virtual) || | ||
330 | get_user(tmp2, &uarg->list)) | ||
331 | return -EFAULT; | ||
332 | |||
333 | karg.virtual = (void *) A(tmp1); | ||
334 | ulist = (drm32_buf_pub_t *) A(tmp2); | ||
335 | |||
336 | orig_count = karg.count; | ||
337 | |||
338 | karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL); | ||
339 | if (!karg.list) | ||
340 | return -ENOMEM; | ||
341 | |||
342 | ret = -EFAULT; | ||
343 | for (i = 0; i < karg.count; i++) { | ||
344 | if (get_user(karg.list[i].idx, &ulist[i].idx) || | ||
345 | get_user(karg.list[i].total, &ulist[i].total) || | ||
346 | get_user(karg.list[i].used, &ulist[i].used) || | ||
347 | get_user(tmp1, &ulist[i].address)) | ||
348 | goto out; | ||
349 | |||
350 | karg.list[i].address = (void *) A(tmp1); | ||
351 | } | ||
352 | |||
353 | old_fs = get_fs(); | ||
354 | set_fs(KERNEL_DS); | ||
355 | ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg); | ||
356 | set_fs(old_fs); | ||
357 | |||
358 | if (!ret) { | ||
359 | for (i = 0; i < orig_count; i++) { | ||
360 | tmp1 = (u32) (long) karg.list[i].address; | ||
361 | if (put_user(karg.list[i].idx, &ulist[i].idx) || | ||
362 | put_user(karg.list[i].total, &ulist[i].total) || | ||
363 | put_user(karg.list[i].used, &ulist[i].used) || | ||
364 | put_user(tmp1, &ulist[i].address)) { | ||
365 | ret = -EFAULT; | ||
366 | goto out; | ||
367 | } | ||
368 | } | ||
369 | if (put_user(karg.count, &uarg->count)) | ||
370 | ret = -EFAULT; | ||
371 | } | ||
372 | |||
373 | out: | ||
374 | kfree(karg.list); | ||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | typedef struct drm32_dma { | ||
379 | /* Indices here refer to the offset into | ||
380 | buflist in drm_buf_get_t. */ | ||
381 | int context; /* Context handle */ | ||
382 | int send_count; /* Number of buffers to send */ | ||
383 | u32 send_indices; /* List of handles to buffers (int *) */ | ||
384 | u32 send_sizes; /* Lengths of data to send (int *) */ | ||
385 | drm_dma_flags_t flags; /* Flags */ | ||
386 | int request_count; /* Number of buffers requested */ | ||
387 | int request_size; /* Desired size for buffers */ | ||
388 | u32 request_indices; /* Buffer information (int *) */ | ||
389 | u32 request_sizes; /* (int *) */ | ||
390 | int granted_count; /* Number of buffers granted */ | ||
391 | } drm32_dma_t; | ||
392 | #define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t) | ||
393 | |||
394 | /* RED PEN The DRM layer blindly dereferences the send/request | ||
395 | * indice/size arrays even though they are userland | ||
396 | * pointers. -DaveM | ||
397 | */ | ||
398 | static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
399 | { | ||
400 | drm32_dma_t *uarg = (drm32_dma_t *) arg; | ||
401 | int *u_si, *u_ss, *u_ri, *u_rs; | ||
402 | drm_dma_t karg; | ||
403 | mm_segment_t old_fs; | ||
404 | int ret; | ||
405 | u32 tmp1, tmp2, tmp3, tmp4; | ||
406 | |||
407 | karg.send_indices = karg.send_sizes = NULL; | ||
408 | karg.request_indices = karg.request_sizes = NULL; | ||
409 | |||
410 | if (get_user(karg.context, &uarg->context) || | ||
411 | get_user(karg.send_count, &uarg->send_count) || | ||
412 | get_user(tmp1, &uarg->send_indices) || | ||
413 | get_user(tmp2, &uarg->send_sizes) || | ||
414 | get_user(karg.flags, &uarg->flags) || | ||
415 | get_user(karg.request_count, &uarg->request_count) || | ||
416 | get_user(karg.request_size, &uarg->request_size) || | ||
417 | get_user(tmp3, &uarg->request_indices) || | ||
418 | get_user(tmp4, &uarg->request_sizes) || | ||
419 | get_user(karg.granted_count, &uarg->granted_count)) | ||
420 | return -EFAULT; | ||
421 | |||
422 | u_si = (int *) A(tmp1); | ||
423 | u_ss = (int *) A(tmp2); | ||
424 | u_ri = (int *) A(tmp3); | ||
425 | u_rs = (int *) A(tmp4); | ||
426 | |||
427 | if (karg.send_count) { | ||
428 | karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL); | ||
429 | karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL); | ||
430 | |||
431 | ret = -ENOMEM; | ||
432 | if (!karg.send_indices || !karg.send_sizes) | ||
433 | goto out; | ||
434 | |||
435 | ret = -EFAULT; | ||
436 | if (copy_from_user(karg.send_indices, u_si, | ||
437 | (karg.send_count * sizeof(int))) || | ||
438 | copy_from_user(karg.send_sizes, u_ss, | ||
439 | (karg.send_count * sizeof(int)))) | ||
440 | goto out; | ||
441 | } | ||
442 | |||
443 | if (karg.request_count) { | ||
444 | karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL); | ||
445 | karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL); | ||
446 | |||
447 | ret = -ENOMEM; | ||
448 | if (!karg.request_indices || !karg.request_sizes) | ||
449 | goto out; | ||
450 | |||
451 | ret = -EFAULT; | ||
452 | if (copy_from_user(karg.request_indices, u_ri, | ||
453 | (karg.request_count * sizeof(int))) || | ||
454 | copy_from_user(karg.request_sizes, u_rs, | ||
455 | (karg.request_count * sizeof(int)))) | ||
456 | goto out; | ||
457 | } | ||
458 | |||
459 | old_fs = get_fs(); | ||
460 | set_fs(KERNEL_DS); | ||
461 | ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg); | ||
462 | set_fs(old_fs); | ||
463 | |||
464 | if (!ret) { | ||
465 | if (put_user(karg.context, &uarg->context) || | ||
466 | put_user(karg.send_count, &uarg->send_count) || | ||
467 | put_user(karg.flags, &uarg->flags) || | ||
468 | put_user(karg.request_count, &uarg->request_count) || | ||
469 | put_user(karg.request_size, &uarg->request_size) || | ||
470 | put_user(karg.granted_count, &uarg->granted_count)) | ||
471 | ret = -EFAULT; | ||
472 | |||
473 | if (karg.send_count) { | ||
474 | if (copy_to_user(u_si, karg.send_indices, | ||
475 | (karg.send_count * sizeof(int))) || | ||
476 | copy_to_user(u_ss, karg.send_sizes, | ||
477 | (karg.send_count * sizeof(int)))) | ||
478 | ret = -EFAULT; | ||
479 | } | ||
480 | if (karg.request_count) { | ||
481 | if (copy_to_user(u_ri, karg.request_indices, | ||
482 | (karg.request_count * sizeof(int))) || | ||
483 | copy_to_user(u_rs, karg.request_sizes, | ||
484 | (karg.request_count * sizeof(int)))) | ||
485 | ret = -EFAULT; | ||
486 | } | ||
487 | } | ||
488 | |||
489 | out: | ||
490 | kfree(karg.send_indices); | ||
491 | kfree(karg.send_sizes); | ||
492 | kfree(karg.request_indices); | ||
493 | kfree(karg.request_sizes); | ||
494 | return ret; | ||
495 | } | ||
496 | |||
497 | typedef struct drm32_ctx_res { | ||
498 | int count; | ||
499 | u32 contexts; /* (drm_ctx_t *) */ | ||
500 | } drm32_ctx_res_t; | ||
501 | #define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t) | ||
502 | |||
503 | static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg) | ||
504 | { | ||
505 | drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg; | ||
506 | drm_ctx_t *ulist; | ||
507 | drm_ctx_res_t karg; | ||
508 | mm_segment_t old_fs; | ||
509 | int orig_count, ret; | ||
510 | u32 tmp; | ||
511 | |||
512 | karg.contexts = NULL; | ||
513 | if (get_user(karg.count, &uarg->count) || | ||
514 | get_user(tmp, &uarg->contexts)) | ||
515 | return -EFAULT; | ||
516 | |||
517 | ulist = (drm_ctx_t *) A(tmp); | ||
518 | |||
519 | orig_count = karg.count; | ||
520 | if (karg.count && ulist) { | ||
521 | karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL); | ||
522 | if (!karg.contexts) | ||
523 | return -ENOMEM; | ||
524 | if (copy_from_user(karg.contexts, ulist, | ||
525 | (karg.count * sizeof(drm_ctx_t)))) { | ||
526 | kfree(karg.contexts); | ||
527 | return -EFAULT; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | old_fs = get_fs(); | ||
532 | set_fs(KERNEL_DS); | ||
533 | ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg); | ||
534 | set_fs(old_fs); | ||
535 | |||
536 | if (!ret) { | ||
537 | if (orig_count) { | ||
538 | if (copy_to_user(ulist, karg.contexts, | ||
539 | (orig_count * sizeof(drm_ctx_t)))) | ||
540 | ret = -EFAULT; | ||
541 | } | ||
542 | if (put_user(karg.count, &uarg->count)) | ||
543 | ret = -EFAULT; | ||
544 | } | ||
545 | |||
546 | kfree(karg.contexts); | ||
547 | return ret; | ||
548 | } | ||
549 | |||
550 | #endif | ||
551 | |||
552 | #define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL }, | ||
553 | #define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl) | ||
554 | |||
555 | #define IOCTL_TABLE_START struct ioctl_trans ioctl_start[] = { | ||
556 | #define IOCTL_TABLE_END }; | ||
557 | |||
558 | IOCTL_TABLE_START | ||
559 | #include <linux/compat_ioctl.h> | ||
560 | |||
561 | #define DECLARES | ||
562 | #include "compat_ioctl.c" | ||
563 | |||
564 | /* PA-specific ioctls */ | ||
565 | COMPATIBLE_IOCTL(PA_PERF_ON) | ||
566 | COMPATIBLE_IOCTL(PA_PERF_OFF) | ||
567 | COMPATIBLE_IOCTL(PA_PERF_VERSION) | ||
568 | |||
569 | /* And these ioctls need translation */ | ||
570 | HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc) | ||
571 | HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc) | ||
572 | HANDLE_IOCTL(SIOCGPPPVER, dev_ifsioc) | ||
573 | |||
574 | #if defined(CONFIG_GEN_RTC) | ||
575 | COMPATIBLE_IOCTL(RTC_AIE_ON) | ||
576 | COMPATIBLE_IOCTL(RTC_AIE_OFF) | ||
577 | COMPATIBLE_IOCTL(RTC_UIE_ON) | ||
578 | COMPATIBLE_IOCTL(RTC_UIE_OFF) | ||
579 | COMPATIBLE_IOCTL(RTC_PIE_ON) | ||
580 | COMPATIBLE_IOCTL(RTC_PIE_OFF) | ||
581 | COMPATIBLE_IOCTL(RTC_WIE_ON) | ||
582 | COMPATIBLE_IOCTL(RTC_WIE_OFF) | ||
583 | COMPATIBLE_IOCTL(RTC_ALM_SET) /* struct rtc_time only has ints */ | ||
584 | COMPATIBLE_IOCTL(RTC_ALM_READ) /* struct rtc_time only has ints */ | ||
585 | COMPATIBLE_IOCTL(RTC_RD_TIME) /* struct rtc_time only has ints */ | ||
586 | COMPATIBLE_IOCTL(RTC_SET_TIME) /* struct rtc_time only has ints */ | ||
587 | HANDLE_IOCTL(RTC_IRQP_READ, w_long) | ||
588 | COMPATIBLE_IOCTL(RTC_IRQP_SET) | ||
589 | HANDLE_IOCTL(RTC_EPOCH_READ, w_long) | ||
590 | COMPATIBLE_IOCTL(RTC_EPOCH_SET) | ||
591 | #endif | ||
592 | |||
593 | #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE) | ||
594 | HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version); | ||
595 | HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique); | ||
596 | HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique); | ||
597 | HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap); | ||
598 | HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs); | ||
599 | HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs); | ||
600 | HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs); | ||
601 | HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma); | ||
602 | HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx); | ||
603 | #endif /* DRM */ | ||
604 | IOCTL_TABLE_END | ||
605 | |||
606 | int ioctl_table_size = ARRAY_SIZE(ioctl_start); | ||
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 006385dbee66..197936d9359a 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -30,6 +30,9 @@ | |||
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <asm/io.h> | ||
34 | |||
35 | #include <asm/smp.h> | ||
33 | 36 | ||
34 | #undef PARISC_IRQ_CR16_COUNTS | 37 | #undef PARISC_IRQ_CR16_COUNTS |
35 | 38 | ||
@@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); | |||
43 | */ | 46 | */ |
44 | static volatile unsigned long cpu_eiem = 0; | 47 | static volatile unsigned long cpu_eiem = 0; |
45 | 48 | ||
46 | static void cpu_set_eiem(void *info) | 49 | static void cpu_disable_irq(unsigned int irq) |
47 | { | ||
48 | set_eiem((unsigned long) info); | ||
49 | } | ||
50 | |||
51 | static inline void cpu_disable_irq(unsigned int irq) | ||
52 | { | 50 | { |
53 | unsigned long eirr_bit = EIEM_MASK(irq); | 51 | unsigned long eirr_bit = EIEM_MASK(irq); |
54 | 52 | ||
55 | cpu_eiem &= ~eirr_bit; | 53 | cpu_eiem &= ~eirr_bit; |
56 | on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); | 54 | /* Do nothing on the other CPUs. If they get this interrupt, |
55 | * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't | ||
56 | * handle it, and the set_eiem() at the bottom will ensure it | ||
57 | * then gets disabled */ | ||
57 | } | 58 | } |
58 | 59 | ||
59 | static void cpu_enable_irq(unsigned int irq) | 60 | static void cpu_enable_irq(unsigned int irq) |
60 | { | 61 | { |
61 | unsigned long eirr_bit = EIEM_MASK(irq); | 62 | unsigned long eirr_bit = EIEM_MASK(irq); |
62 | 63 | ||
63 | mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ | ||
64 | cpu_eiem |= eirr_bit; | 64 | cpu_eiem |= eirr_bit; |
65 | on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); | 65 | |
66 | /* FIXME: while our interrupts aren't nested, we cannot reset | ||
67 | * the eiem mask if we're already in an interrupt. Once we | ||
68 | * implement nested interrupts, this can go away | ||
69 | */ | ||
70 | if (!in_interrupt()) | ||
71 | set_eiem(cpu_eiem); | ||
72 | |||
73 | /* This is just a simple NOP IPI. But what it does is cause | ||
74 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end | ||
75 | * of the interrupt handler */ | ||
76 | smp_send_all_nop(); | ||
66 | } | 77 | } |
67 | 78 | ||
68 | static unsigned int cpu_startup_irq(unsigned int irq) | 79 | static unsigned int cpu_startup_irq(unsigned int irq) |
@@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq) | |||
74 | void no_ack_irq(unsigned int irq) { } | 85 | void no_ack_irq(unsigned int irq) { } |
75 | void no_end_irq(unsigned int irq) { } | 86 | void no_end_irq(unsigned int irq) { } |
76 | 87 | ||
88 | #ifdef CONFIG_SMP | ||
89 | int cpu_check_affinity(unsigned int irq, cpumask_t *dest) | ||
90 | { | ||
91 | int cpu_dest; | ||
92 | |||
93 | /* timer and ipi have to always be received on all CPUs */ | ||
94 | if (irq == TIMER_IRQ || irq == IPI_IRQ) { | ||
95 | /* Bad linux design decision. The mask has already | ||
96 | * been set; we must reset it */ | ||
97 | irq_affinity[irq] = CPU_MASK_ALL; | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | |||
101 | /* whatever mask they set, we just allow one CPU */ | ||
102 | cpu_dest = first_cpu(*dest); | ||
103 | *dest = cpumask_of_cpu(cpu_dest); | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) | ||
109 | { | ||
110 | if (cpu_check_affinity(irq, &dest)) | ||
111 | return; | ||
112 | |||
113 | irq_affinity[irq] = dest; | ||
114 | } | ||
115 | #endif | ||
116 | |||
77 | static struct hw_interrupt_type cpu_interrupt_type = { | 117 | static struct hw_interrupt_type cpu_interrupt_type = { |
78 | .typename = "CPU", | 118 | .typename = "CPU", |
79 | .startup = cpu_startup_irq, | 119 | .startup = cpu_startup_irq, |
@@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = { | |||
82 | .disable = cpu_disable_irq, | 122 | .disable = cpu_disable_irq, |
83 | .ack = no_ack_irq, | 123 | .ack = no_ack_irq, |
84 | .end = no_end_irq, | 124 | .end = no_end_irq, |
85 | // .set_affinity = cpu_set_affinity_irq, | 125 | #ifdef CONFIG_SMP |
126 | .set_affinity = cpu_set_affinity_irq, | ||
127 | #endif | ||
86 | }; | 128 | }; |
87 | 129 | ||
88 | int show_interrupts(struct seq_file *p, void *v) | 130 | int show_interrupts(struct seq_file *p, void *v) |
@@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide) | |||
219 | return -1; | 261 | return -1; |
220 | } | 262 | } |
221 | 263 | ||
264 | |||
265 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) | ||
266 | { | ||
267 | #ifdef CONFIG_SMP | ||
268 | irq_affinity[irq] = cpumask_of_cpu(cpu); | ||
269 | #endif | ||
270 | |||
271 | return cpu_data[cpu].txn_addr; | ||
272 | } | ||
273 | |||
274 | |||
222 | unsigned long txn_alloc_addr(unsigned int virt_irq) | 275 | unsigned long txn_alloc_addr(unsigned int virt_irq) |
223 | { | 276 | { |
224 | static int next_cpu = -1; | 277 | static int next_cpu = -1; |
@@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) | |||
233 | if (next_cpu >= NR_CPUS) | 286 | if (next_cpu >= NR_CPUS) |
234 | next_cpu = 0; /* nothing else, assign monarch */ | 287 | next_cpu = 0; /* nothing else, assign monarch */ |
235 | 288 | ||
236 | return cpu_data[next_cpu].txn_addr; | 289 | return txn_affinity_addr(virt_irq, next_cpu); |
237 | } | 290 | } |
238 | 291 | ||
239 | 292 | ||
@@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
250 | irq_enter(); | 303 | irq_enter(); |
251 | 304 | ||
252 | /* | 305 | /* |
253 | * Only allow interrupt processing to be interrupted by the | 306 | * Don't allow TIMER or IPI nested interrupts. |
254 | * timer tick | 307 | * Allowing any single interrupt to nest can lead to that CPU |
308 | * handling interrupts with all enabled interrupts unmasked. | ||
255 | */ | 309 | */ |
256 | set_eiem(EIEM_MASK(TIMER_IRQ)); | 310 | set_eiem(0UL); |
257 | 311 | ||
258 | /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) | 312 | /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) |
259 | * 2) We loop here on EIRR contents in order to avoid | 313 | * 2) We loop here on EIRR contents in order to avoid |
@@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
267 | if (!eirr_val) | 321 | if (!eirr_val) |
268 | break; | 322 | break; |
269 | 323 | ||
270 | if (eirr_val & EIEM_MASK(TIMER_IRQ)) | ||
271 | set_eiem(0); | ||
272 | |||
273 | mtctl(eirr_val, 23); /* reset bits we are going to process */ | 324 | mtctl(eirr_val, 23); /* reset bits we are going to process */ |
274 | 325 | ||
275 | /* Work our way from MSb to LSb...same order we alloc EIRs */ | 326 | /* Work our way from MSb to LSb...same order we alloc EIRs */ |
276 | for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { | 327 | for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { |
328 | #ifdef CONFIG_SMP | ||
329 | cpumask_t dest = irq_affinity[irq]; | ||
330 | #endif | ||
277 | if (!(bit & eirr_val)) | 331 | if (!(bit & eirr_val)) |
278 | continue; | 332 | continue; |
279 | 333 | ||
280 | /* clear bit in mask - can exit loop sooner */ | 334 | /* clear bit in mask - can exit loop sooner */ |
281 | eirr_val &= ~bit; | 335 | eirr_val &= ~bit; |
282 | 336 | ||
337 | #ifdef CONFIG_SMP | ||
338 | /* FIXME: because generic set affinity mucks | ||
339 | * with the affinity before sending it to us | ||
340 | * we can get the situation where the affinity is | ||
341 | * wrong for our CPU type interrupts */ | ||
342 | if (irq != TIMER_IRQ && irq != IPI_IRQ && | ||
343 | !cpu_isset(smp_processor_id(), dest)) { | ||
344 | int cpu = first_cpu(dest); | ||
345 | |||
346 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | ||
347 | irq, smp_processor_id(), cpu); | ||
348 | gsc_writel(irq + CPU_IRQ_BASE, | ||
349 | cpu_data[cpu].hpa); | ||
350 | continue; | ||
351 | } | ||
352 | #endif | ||
353 | |||
283 | __do_IRQ(irq, regs); | 354 | __do_IRQ(irq, regs); |
284 | } | 355 | } |
285 | } | 356 | } |
286 | set_eiem(cpu_eiem); | 357 | |
358 | set_eiem(cpu_eiem); /* restore original mask */ | ||
287 | irq_exit(); | 359 | irq_exit(); |
288 | } | 360 | } |
289 | 361 | ||
@@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
291 | static struct irqaction timer_action = { | 363 | static struct irqaction timer_action = { |
292 | .handler = timer_interrupt, | 364 | .handler = timer_interrupt, |
293 | .name = "timer", | 365 | .name = "timer", |
366 | .flags = SA_INTERRUPT, | ||
294 | }; | 367 | }; |
295 | 368 | ||
296 | #ifdef CONFIG_SMP | 369 | #ifdef CONFIG_SMP |
297 | static struct irqaction ipi_action = { | 370 | static struct irqaction ipi_action = { |
298 | .handler = ipi_interrupt, | 371 | .handler = ipi_interrupt, |
299 | .name = "IPI", | 372 | .name = "IPI", |
373 | .flags = SA_INTERRUPT, | ||
300 | }; | 374 | }; |
301 | #endif | 375 | #endif |
302 | 376 | ||
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index f94a02ef3d95..a6caf1073085 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -33,10 +33,10 @@ | |||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ | 34 | #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ |
35 | 35 | ||
36 | static struct proc_dir_entry * proc_gsc_root = NULL; | 36 | static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; |
37 | static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length); | 37 | static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length); |
38 | static unsigned long pcxl_used_bytes = 0; | 38 | static unsigned long pcxl_used_bytes __read_mostly = 0; |
39 | static unsigned long pcxl_used_pages = 0; | 39 | static unsigned long pcxl_used_pages __read_mostly = 0; |
40 | 40 | ||
41 | extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ | 41 | extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ |
42 | static spinlock_t pcxl_res_lock; | 42 | static spinlock_t pcxl_res_lock; |
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c index 52004ae28d20..2a01fe1bdc98 100644 --- a/arch/parisc/kernel/pdc_chassis.c +++ b/arch/parisc/kernel/pdc_chassis.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/reboot.h> | 31 | #include <linux/reboot.h> |
32 | #include <linux/notifier.h> | 32 | #include <linux/notifier.h> |
33 | #include <linux/cache.h> | ||
33 | 34 | ||
34 | #include <asm/pdc_chassis.h> | 35 | #include <asm/pdc_chassis.h> |
35 | #include <asm/processor.h> | 36 | #include <asm/processor.h> |
@@ -38,8 +39,8 @@ | |||
38 | 39 | ||
39 | 40 | ||
40 | #ifdef CONFIG_PDC_CHASSIS | 41 | #ifdef CONFIG_PDC_CHASSIS |
41 | static int pdc_chassis_old = 0; | 42 | static int pdc_chassis_old __read_mostly = 0; |
42 | static unsigned int pdc_chassis_enabled = 1; | 43 | static unsigned int pdc_chassis_enabled __read_mostly = 1; |
43 | 44 | ||
44 | 45 | ||
45 | /** | 46 | /** |
@@ -132,7 +133,7 @@ void __init parisc_pdc_chassis_init(void) | |||
132 | { | 133 | { |
133 | #ifdef CONFIG_PDC_CHASSIS | 134 | #ifdef CONFIG_PDC_CHASSIS |
134 | int handle = 0; | 135 | int handle = 0; |
135 | if (pdc_chassis_enabled) { | 136 | if (likely(pdc_chassis_enabled)) { |
136 | DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__); | 137 | DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__); |
137 | 138 | ||
138 | /* Let see if we have something to handle... */ | 139 | /* Let see if we have something to handle... */ |
@@ -142,7 +143,7 @@ void __init parisc_pdc_chassis_init(void) | |||
142 | printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n"); | 143 | printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n"); |
143 | handle = 1; | 144 | handle = 1; |
144 | } | 145 | } |
145 | else if (pdc_chassis_old) { | 146 | else if (unlikely(pdc_chassis_old)) { |
146 | printk(KERN_INFO "Enabling old style chassis LED panel support.\n"); | 147 | printk(KERN_INFO "Enabling old style chassis LED panel support.\n"); |
147 | handle = 1; | 148 | handle = 1; |
148 | } | 149 | } |
@@ -178,7 +179,7 @@ int pdc_chassis_send_status(int message) | |||
178 | /* Maybe we should do that in an other way ? */ | 179 | /* Maybe we should do that in an other way ? */ |
179 | int retval = 0; | 180 | int retval = 0; |
180 | #ifdef CONFIG_PDC_CHASSIS | 181 | #ifdef CONFIG_PDC_CHASSIS |
181 | if (pdc_chassis_enabled) { | 182 | if (likely(pdc_chassis_enabled)) { |
182 | 183 | ||
183 | DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message); | 184 | DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message); |
184 | 185 | ||
@@ -214,7 +215,7 @@ int pdc_chassis_send_status(int message) | |||
214 | } | 215 | } |
215 | } else retval = -1; | 216 | } else retval = -1; |
216 | #else | 217 | #else |
217 | if (pdc_chassis_old) { | 218 | if (unlikely(pdc_chassis_old)) { |
218 | switch (message) { | 219 | switch (message) { |
219 | case PDC_CHASSIS_DIRECT_BSTART: | 220 | case PDC_CHASSIS_DIRECT_BSTART: |
220 | case PDC_CHASSIS_DIRECT_BCOMPLETE: | 221 | case PDC_CHASSIS_DIRECT_BCOMPLETE: |
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index 44670d6e06f4..11d406cd0b3e 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c | |||
@@ -42,6 +42,7 @@ | |||
42 | * on every box. | 42 | * on every box. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/capability.h> | ||
45 | #include <linux/init.h> | 46 | #include <linux/init.h> |
46 | #include <linux/proc_fs.h> | 47 | #include <linux/proc_fs.h> |
47 | #include <linux/miscdevice.h> | 48 | #include <linux/miscdevice.h> |
@@ -66,10 +67,10 @@ struct rdr_tbl_ent { | |||
66 | uint8_t write_control; | 67 | uint8_t write_control; |
67 | }; | 68 | }; |
68 | 69 | ||
69 | static int perf_processor_interface = UNKNOWN_INTF; | 70 | static int perf_processor_interface __read_mostly = UNKNOWN_INTF; |
70 | static int perf_enabled = 0; | 71 | static int perf_enabled __read_mostly = 0; |
71 | static spinlock_t perf_lock; | 72 | static spinlock_t perf_lock; |
72 | struct parisc_device *cpu_device = NULL; | 73 | struct parisc_device *cpu_device __read_mostly = NULL; |
73 | 74 | ||
74 | /* RDRs to write for PCX-W */ | 75 | /* RDRs to write for PCX-W */ |
75 | static int perf_rdrs_W[] = | 76 | static int perf_rdrs_W[] = |
@@ -196,8 +197,7 @@ static int perf_open(struct inode *inode, struct file *file); | |||
196 | static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); | 197 | static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); |
197 | static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, | 198 | static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, |
198 | loff_t *ppos); | 199 | loff_t *ppos); |
199 | static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | 200 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
200 | unsigned long arg); | ||
201 | static void perf_start_counters(void); | 201 | static void perf_start_counters(void); |
202 | static int perf_stop_counters(uint32_t *raddr); | 202 | static int perf_stop_counters(uint32_t *raddr); |
203 | static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); | 203 | static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); |
@@ -438,48 +438,56 @@ static void perf_patch_images(void) | |||
438 | * must be running on the processor that you wish to change. | 438 | * must be running on the processor that you wish to change. |
439 | */ | 439 | */ |
440 | 440 | ||
441 | static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | 441 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
442 | unsigned long arg) | ||
443 | { | 442 | { |
444 | long error_start; | 443 | long error_start; |
445 | uint32_t raddr[4]; | 444 | uint32_t raddr[4]; |
445 | int error = 0; | ||
446 | 446 | ||
447 | lock_kernel(); | ||
447 | switch (cmd) { | 448 | switch (cmd) { |
448 | 449 | ||
449 | case PA_PERF_ON: | 450 | case PA_PERF_ON: |
450 | /* Start the counters */ | 451 | /* Start the counters */ |
451 | perf_start_counters(); | 452 | perf_start_counters(); |
452 | return 0; | 453 | break; |
453 | 454 | ||
454 | case PA_PERF_OFF: | 455 | case PA_PERF_OFF: |
455 | error_start = perf_stop_counters(raddr); | 456 | error_start = perf_stop_counters(raddr); |
456 | if (error_start != 0) { | 457 | if (error_start != 0) { |
457 | printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start); | 458 | printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start); |
458 | return -EFAULT; | 459 | error = -EFAULT; |
460 | break; | ||
459 | } | 461 | } |
460 | 462 | ||
461 | /* copy out the Counters */ | 463 | /* copy out the Counters */ |
462 | if (copy_to_user((void __user *)arg, raddr, | 464 | if (copy_to_user((void __user *)arg, raddr, |
463 | sizeof (raddr)) != 0) { | 465 | sizeof (raddr)) != 0) { |
464 | return -EFAULT; | 466 | error = -EFAULT; |
467 | break; | ||
465 | } | 468 | } |
466 | return 0; | 469 | break; |
467 | 470 | ||
468 | case PA_PERF_VERSION: | 471 | case PA_PERF_VERSION: |
469 | /* Return the version # */ | 472 | /* Return the version # */ |
470 | return put_user(PERF_VERSION, (int *)arg); | 473 | error = put_user(PERF_VERSION, (int *)arg); |
474 | break; | ||
471 | 475 | ||
472 | default: | 476 | default: |
473 | break; | 477 | error = -ENOTTY; |
474 | } | 478 | } |
475 | return -ENOTTY; | 479 | |
480 | unlock_kernel(); | ||
481 | |||
482 | return error; | ||
476 | } | 483 | } |
477 | 484 | ||
478 | static struct file_operations perf_fops = { | 485 | static struct file_operations perf_fops = { |
479 | .llseek = no_llseek, | 486 | .llseek = no_llseek, |
480 | .read = perf_read, | 487 | .read = perf_read, |
481 | .write = perf_write, | 488 | .write = perf_write, |
482 | .ioctl = perf_ioctl, | 489 | .unlocked_ioctl = perf_ioctl, |
490 | .compat_ioctl = perf_ioctl, | ||
483 | .open = perf_open, | 491 | .open = perf_open, |
484 | .release = perf_release | 492 | .release = perf_release |
485 | }; | 493 | }; |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index fee4f1f09adc..5da41677e70b 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
55 | #include <asm/unwind.h> | 55 | #include <asm/unwind.h> |
56 | 56 | ||
57 | static int hlt_counter; | 57 | static int hlt_counter __read_mostly; |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Power off function, if any | 60 | * Power off function, if any |
@@ -295,7 +295,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
295 | struct task_struct * p, struct pt_regs * pregs) | 295 | struct task_struct * p, struct pt_regs * pregs) |
296 | { | 296 | { |
297 | struct pt_regs * cregs = &(p->thread.regs); | 297 | struct pt_regs * cregs = &(p->thread.regs); |
298 | struct thread_info *ti = p->thread_info; | 298 | void *stack = task_stack_page(p); |
299 | 299 | ||
300 | /* We have to use void * instead of a function pointer, because | 300 | /* We have to use void * instead of a function pointer, because |
301 | * function pointers aren't a pointer to the function on 64-bit. | 301 | * function pointers aren't a pointer to the function on 64-bit. |
@@ -322,7 +322,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
322 | */ | 322 | */ |
323 | if (usp == 1) { | 323 | if (usp == 1) { |
324 | /* kernel thread */ | 324 | /* kernel thread */ |
325 | cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN); | 325 | cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN; |
326 | /* Must exit via ret_from_kernel_thread in order | 326 | /* Must exit via ret_from_kernel_thread in order |
327 | * to call schedule_tail() | 327 | * to call schedule_tail() |
328 | */ | 328 | */ |
@@ -344,7 +344,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
344 | */ | 344 | */ |
345 | 345 | ||
346 | /* Use same stack depth as parent */ | 346 | /* Use same stack depth as parent */ |
347 | cregs->ksp = ((unsigned long)(ti)) | 347 | cregs->ksp = (unsigned long)stack |
348 | + (pregs->gr[21] & (THREAD_SIZE - 1)); | 348 | + (pregs->gr[21] & (THREAD_SIZE - 1)); |
349 | cregs->gr[30] = usp; | 349 | cregs->gr[30] = usp; |
350 | if (p->personality == PER_HPUX) { | 350 | if (p->personality == PER_HPUX) { |
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 4f5bbcf1f5a4..6df9f62cecb5 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c | |||
@@ -44,10 +44,10 @@ | |||
44 | #include <asm/irq.h> /* for struct irq_region */ | 44 | #include <asm/irq.h> /* for struct irq_region */ |
45 | #include <asm/parisc-device.h> | 45 | #include <asm/parisc-device.h> |
46 | 46 | ||
47 | struct system_cpuinfo_parisc boot_cpu_data; | 47 | struct system_cpuinfo_parisc boot_cpu_data __read_mostly; |
48 | EXPORT_SYMBOL(boot_cpu_data); | 48 | EXPORT_SYMBOL(boot_cpu_data); |
49 | 49 | ||
50 | struct cpuinfo_parisc cpu_data[NR_CPUS]; | 50 | struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly; |
51 | 51 | ||
52 | /* | 52 | /* |
53 | ** PARISC CPU driver - claim "device" and initialize CPU data structures. | 53 | ** PARISC CPU driver - claim "device" and initialize CPU data structures. |
@@ -378,12 +378,12 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
378 | return 0; | 378 | return 0; |
379 | } | 379 | } |
380 | 380 | ||
381 | static struct parisc_device_id processor_tbl[] = { | 381 | static struct parisc_device_id processor_tbl[] __read_mostly = { |
382 | { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID }, | 382 | { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID }, |
383 | { 0, } | 383 | { 0, } |
384 | }; | 384 | }; |
385 | 385 | ||
386 | static struct parisc_driver cpu_driver = { | 386 | static struct parisc_driver cpu_driver __read_mostly = { |
387 | .name = "CPU", | 387 | .name = "CPU", |
388 | .id_table = processor_tbl, | 388 | .id_table = processor_tbl, |
389 | .probe = processor_probe | 389 | .probe = processor_probe |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index b6fe202a620d..27160e8bf15b 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -264,6 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
264 | * sigkill. perhaps it should be put in the status | 264 | * sigkill. perhaps it should be put in the status |
265 | * that it wants to exit. | 265 | * that it wants to exit. |
266 | */ | 266 | */ |
267 | ret = 0; | ||
267 | DBG("sys_ptrace(KILL)\n"); | 268 | DBG("sys_ptrace(KILL)\n"); |
268 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | 269 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ |
269 | goto out_tsk; | 270 | goto out_tsk; |
@@ -344,11 +345,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
344 | 345 | ||
345 | case PTRACE_GETEVENTMSG: | 346 | case PTRACE_GETEVENTMSG: |
346 | ret = put_user(child->ptrace_message, (unsigned int __user *) data); | 347 | ret = put_user(child->ptrace_message, (unsigned int __user *) data); |
347 | goto out; | 348 | goto out_tsk; |
348 | 349 | ||
349 | default: | 350 | default: |
350 | ret = ptrace_request(child, request, addr, data); | 351 | ret = ptrace_request(child, request, addr, data); |
351 | goto out; | 352 | goto out_tsk; |
352 | } | 353 | } |
353 | 354 | ||
354 | out_wake_notrap: | 355 | out_wake_notrap: |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 73e9c34b0948..4a36ec3f6ac1 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -46,15 +46,15 @@ | |||
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | #include <asm/setup.h> | 47 | #include <asm/setup.h> |
48 | 48 | ||
49 | char command_line[COMMAND_LINE_SIZE]; | 49 | char command_line[COMMAND_LINE_SIZE] __read_mostly; |
50 | 50 | ||
51 | /* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ | 51 | /* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ |
52 | struct proc_dir_entry * proc_runway_root = NULL; | 52 | struct proc_dir_entry * proc_runway_root __read_mostly = NULL; |
53 | struct proc_dir_entry * proc_gsc_root = NULL; | 53 | struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; |
54 | struct proc_dir_entry * proc_mckinley_root = NULL; | 54 | struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; |
55 | 55 | ||
56 | #if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) | 56 | #if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) |
57 | int parisc_bus_is_phys = 1; /* Assume no IOMMU is present */ | 57 | int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ |
58 | EXPORT_SYMBOL(parisc_bus_is_phys); | 58 | EXPORT_SYMBOL(parisc_bus_is_phys); |
59 | #endif | 59 | #endif |
60 | 60 | ||
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 82c24e62ab63..3a25a7bd673e 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c | |||
@@ -296,7 +296,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
296 | struct rt_sigframe __user *frame; | 296 | struct rt_sigframe __user *frame; |
297 | unsigned long rp, usp; | 297 | unsigned long rp, usp; |
298 | unsigned long haddr, sigframe_size; | 298 | unsigned long haddr, sigframe_size; |
299 | struct siginfo si; | ||
300 | int err = 0; | 299 | int err = 0; |
301 | #ifdef __LP64__ | 300 | #ifdef __LP64__ |
302 | compat_int_t compat_val; | 301 | compat_int_t compat_val; |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index a9ecf6465784..25564b7ca6bb 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
40 | #include <asm/current.h> | 40 | #include <asm/current.h> |
41 | #include <asm/delay.h> | 41 | #include <asm/delay.h> |
42 | #include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */ | 42 | #include <asm/tlbflush.h> |
43 | 43 | ||
44 | #include <asm/io.h> | 44 | #include <asm/io.h> |
45 | #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ | 45 | #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ |
@@ -58,9 +58,9 @@ DEFINE_SPINLOCK(smp_lock); | |||
58 | 58 | ||
59 | volatile struct task_struct *smp_init_current_idle_task; | 59 | volatile struct task_struct *smp_init_current_idle_task; |
60 | 60 | ||
61 | static volatile int cpu_now_booting = 0; /* track which CPU is booting */ | 61 | static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */ |
62 | 62 | ||
63 | static int parisc_max_cpus = 1; | 63 | static int parisc_max_cpus __read_mostly = 1; |
64 | 64 | ||
65 | /* online cpus are ones that we've managed to bring up completely | 65 | /* online cpus are ones that we've managed to bring up completely |
66 | * possible cpus are all valid cpu | 66 | * possible cpus are all valid cpu |
@@ -71,8 +71,8 @@ static int parisc_max_cpus = 1; | |||
71 | * empty in the beginning. | 71 | * empty in the beginning. |
72 | */ | 72 | */ |
73 | 73 | ||
74 | cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */ | 74 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */ |
75 | cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */ | 75 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */ |
76 | 76 | ||
77 | EXPORT_SYMBOL(cpu_online_map); | 77 | EXPORT_SYMBOL(cpu_online_map); |
78 | EXPORT_SYMBOL(cpu_possible_map); | 78 | EXPORT_SYMBOL(cpu_possible_map); |
@@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
181 | while (ops) { | 181 | while (ops) { |
182 | unsigned long which = ffz(~ops); | 182 | unsigned long which = ffz(~ops); |
183 | 183 | ||
184 | ops &= ~(1 << which); | ||
185 | |||
184 | switch (which) { | 186 | switch (which) { |
187 | case IPI_NOP: | ||
188 | #if (kDEBUG>=100) | ||
189 | printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu); | ||
190 | #endif /* kDEBUG */ | ||
191 | break; | ||
192 | |||
185 | case IPI_RESCHEDULE: | 193 | case IPI_RESCHEDULE: |
186 | #if (kDEBUG>=100) | 194 | #if (kDEBUG>=100) |
187 | printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); | 195 | printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); |
188 | #endif /* kDEBUG */ | 196 | #endif /* kDEBUG */ |
189 | ops &= ~(1 << IPI_RESCHEDULE); | ||
190 | /* | 197 | /* |
191 | * Reschedule callback. Everything to be | 198 | * Reschedule callback. Everything to be |
192 | * done is done by the interrupt return path. | 199 | * done is done by the interrupt return path. |
@@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
197 | #if (kDEBUG>=100) | 204 | #if (kDEBUG>=100) |
198 | printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); | 205 | printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); |
199 | #endif /* kDEBUG */ | 206 | #endif /* kDEBUG */ |
200 | ops &= ~(1 << IPI_CALL_FUNC); | ||
201 | { | 207 | { |
202 | volatile struct smp_call_struct *data; | 208 | volatile struct smp_call_struct *data; |
203 | void (*func)(void *info); | 209 | void (*func)(void *info); |
@@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
231 | #if (kDEBUG>=100) | 237 | #if (kDEBUG>=100) |
232 | printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); | 238 | printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); |
233 | #endif /* kDEBUG */ | 239 | #endif /* kDEBUG */ |
234 | ops &= ~(1 << IPI_CPU_START); | ||
235 | #ifdef ENTRY_SYS_CPUS | 240 | #ifdef ENTRY_SYS_CPUS |
236 | p->state = STATE_RUNNING; | 241 | p->state = STATE_RUNNING; |
237 | #endif | 242 | #endif |
@@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
241 | #if (kDEBUG>=100) | 246 | #if (kDEBUG>=100) |
242 | printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); | 247 | printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); |
243 | #endif /* kDEBUG */ | 248 | #endif /* kDEBUG */ |
244 | ops &= ~(1 << IPI_CPU_STOP); | ||
245 | #ifdef ENTRY_SYS_CPUS | 249 | #ifdef ENTRY_SYS_CPUS |
246 | #else | 250 | #else |
247 | halt_processor(); | 251 | halt_processor(); |
@@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
252 | #if (kDEBUG>=100) | 256 | #if (kDEBUG>=100) |
253 | printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); | 257 | printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); |
254 | #endif /* kDEBUG */ | 258 | #endif /* kDEBUG */ |
255 | ops &= ~(1 << IPI_CPU_TEST); | ||
256 | break; | 259 | break; |
257 | 260 | ||
258 | default: | 261 | default: |
259 | printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", | 262 | printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", |
260 | this_cpu, which); | 263 | this_cpu, which); |
261 | ops &= ~(1 << which); | ||
262 | return IRQ_NONE; | 264 | return IRQ_NONE; |
263 | } /* Switch */ | 265 | } /* Switch */ |
264 | } /* while (ops) */ | 266 | } /* while (ops) */ |
@@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); } | |||
312 | void | 314 | void |
313 | smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } | 315 | smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } |
314 | 316 | ||
317 | void | ||
318 | smp_send_all_nop(void) | ||
319 | { | ||
320 | send_IPI_allbutself(IPI_NOP); | ||
321 | } | ||
322 | |||
315 | 323 | ||
316 | /** | 324 | /** |
317 | * Run a function on all other CPUs. | 325 | * Run a function on all other CPUs. |
@@ -338,6 +346,10 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | |||
338 | 346 | ||
339 | /* Can deadlock when called with interrupts disabled */ | 347 | /* Can deadlock when called with interrupts disabled */ |
340 | WARN_ON(irqs_disabled()); | 348 | WARN_ON(irqs_disabled()); |
349 | |||
350 | /* can also deadlock if IPIs are disabled */ | ||
351 | WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); | ||
352 | |||
341 | 353 | ||
342 | data.func = func; | 354 | data.func = func; |
343 | data.info = info; | 355 | data.info = info; |
@@ -394,12 +406,10 @@ EXPORT_SYMBOL(smp_call_function); | |||
394 | * as we want to ensure all TLB's flushed before proceeding. | 406 | * as we want to ensure all TLB's flushed before proceeding. |
395 | */ | 407 | */ |
396 | 408 | ||
397 | extern void flush_tlb_all_local(void); | ||
398 | |||
399 | void | 409 | void |
400 | smp_flush_tlb_all(void) | 410 | smp_flush_tlb_all(void) |
401 | { | 411 | { |
402 | on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); | 412 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); |
403 | } | 413 | } |
404 | 414 | ||
405 | 415 | ||
@@ -475,7 +485,7 @@ void __init smp_callin(void) | |||
475 | #endif | 485 | #endif |
476 | 486 | ||
477 | flush_cache_all_local(); /* start with known state */ | 487 | flush_cache_all_local(); /* start with known state */ |
478 | flush_tlb_all_local(); | 488 | flush_tlb_all_local(NULL); |
479 | 489 | ||
480 | local_irq_enable(); /* Interrupts have been off until now */ | 490 | local_irq_enable(); /* Interrupts have been off until now */ |
481 | 491 | ||
@@ -507,7 +517,7 @@ int __init smp_boot_one_cpu(int cpuid) | |||
507 | if (IS_ERR(idle)) | 517 | if (IS_ERR(idle)) |
508 | panic("SMP: fork failed for CPU:%d", cpuid); | 518 | panic("SMP: fork failed for CPU:%d", cpuid); |
509 | 519 | ||
510 | idle->thread_info->cpu = cpuid; | 520 | task_thread_info(idle)->cpu = cpuid; |
511 | 521 | ||
512 | /* Let _start know what logical CPU we're booting | 522 | /* Let _start know what logical CPU we're booting |
513 | ** (offset into init_tasks[],cpu_data[]) | 523 | ** (offset into init_tasks[],cpu_data[]) |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index b29b76b42bb7..d66163492890 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -164,7 +164,7 @@ linux_gateway_entry: | |||
164 | #endif | 164 | #endif |
165 | STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ | 165 | STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ |
166 | 166 | ||
167 | STREG %r20, TASK_PT_GR20(%r1) | 167 | STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */ |
168 | STREG %r21, TASK_PT_GR21(%r1) | 168 | STREG %r21, TASK_PT_GR21(%r1) |
169 | STREG %r22, TASK_PT_GR22(%r1) | 169 | STREG %r22, TASK_PT_GR22(%r1) |
170 | STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ | 170 | STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ |
@@ -527,6 +527,7 @@ lws_compare_and_swap: | |||
527 | We *must* giveup this call and fail. | 527 | We *must* giveup this call and fail. |
528 | */ | 528 | */ |
529 | ldw 4(%sr2,%r20), %r28 /* Load thread register */ | 529 | ldw 4(%sr2,%r20), %r28 /* Load thread register */ |
530 | /* WARNING: If cr27 cycles to the same value we have problems */ | ||
530 | mfctl %cr27, %r21 /* Get current thread register */ | 531 | mfctl %cr27, %r21 /* Get current thread register */ |
531 | cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ | 532 | cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ |
532 | b lws_exit /* Return error! */ | 533 | b lws_exit /* Return error! */ |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index cded25680787..594930bc4bcf 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -36,8 +36,8 @@ | |||
36 | /* xtime and wall_jiffies keep wall-clock time */ | 36 | /* xtime and wall_jiffies keep wall-clock time */ |
37 | extern unsigned long wall_jiffies; | 37 | extern unsigned long wall_jiffies; |
38 | 38 | ||
39 | static long clocktick; /* timer cycles per tick */ | 39 | static long clocktick __read_mostly; /* timer cycles per tick */ |
40 | static long halftick; | 40 | static long halftick __read_mostly; |
41 | 41 | ||
42 | #ifdef CONFIG_SMP | 42 | #ifdef CONFIG_SMP |
43 | extern void smp_do_timer(struct pt_regs *regs); | 43 | extern void smp_do_timer(struct pt_regs *regs); |
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c index ac2a40681414..3ba040050e4c 100644 --- a/arch/parisc/kernel/topology.c +++ b/arch/parisc/kernel/topology.c | |||
@@ -20,8 +20,9 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/cache.h> | ||
23 | 24 | ||
24 | static struct cpu cpu_devices[NR_CPUS]; | 25 | static struct cpu cpu_devices[NR_CPUS] __read_mostly; |
25 | 26 | ||
26 | static int __init topology_init(void) | 27 | static int __init topology_init(void) |
27 | { | 28 | { |
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index eaae8a021f9f..de0a1b21cb40 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c | |||
@@ -122,7 +122,7 @@ | |||
122 | #define ERR_NOTHANDLED -1 | 122 | #define ERR_NOTHANDLED -1 |
123 | #define ERR_PAGEFAULT -2 | 123 | #define ERR_PAGEFAULT -2 |
124 | 124 | ||
125 | int unaligned_enabled = 1; | 125 | int unaligned_enabled __read_mostly = 1; |
126 | 126 | ||
127 | void die_if_kernel (char *str, struct pt_regs *regs, long err); | 127 | void die_if_kernel (char *str, struct pt_regs *regs, long err); |
128 | 128 | ||
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index db141108412e..cc1c1afc3187 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -35,7 +35,7 @@ static spinlock_t unwind_lock; | |||
35 | * we can call unwind_init as early in the bootup process as | 35 | * we can call unwind_init as early in the bootup process as |
36 | * possible (before the slab allocator is initialized) | 36 | * possible (before the slab allocator is initialized) |
37 | */ | 37 | */ |
38 | static struct unwind_table kernel_unwind_table; | 38 | static struct unwind_table kernel_unwind_table __read_mostly; |
39 | static LIST_HEAD(unwind_tables); | 39 | static LIST_HEAD(unwind_tables); |
40 | 40 | ||
41 | static inline const struct unwind_table_entry * | 41 | static inline const struct unwind_table_entry * |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index e5fac3e08c7a..6d6436a6b624 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
@@ -68,7 +68,7 @@ SECTIONS | |||
68 | RODATA | 68 | RODATA |
69 | 69 | ||
70 | /* writeable */ | 70 | /* writeable */ |
71 | . = ALIGN(4096); /* Make sure this is paged aligned so | 71 | . = ALIGN(4096); /* Make sure this is page aligned so |
72 | that we can properly leave these | 72 | that we can properly leave these |
73 | as writable */ | 73 | as writable */ |
74 | data_start = .; | 74 | data_start = .; |
@@ -105,6 +105,10 @@ SECTIONS | |||
105 | . = ALIGN(16); | 105 | . = ALIGN(16); |
106 | .data.lock_aligned : { *(.data.lock_aligned) } | 106 | .data.lock_aligned : { *(.data.lock_aligned) } |
107 | 107 | ||
108 | /* rarely changed data like cpu maps */ | ||
109 | . = ALIGN(16); | ||
110 | .data.read_mostly : { *(.data.read_mostly) } | ||
111 | |||
108 | _edata = .; /* End of data section */ | 112 | _edata = .; /* End of data section */ |
109 | 113 | ||
110 | . = ALIGN(16384); /* init_task */ | 114 | . = ALIGN(16384); /* init_task */ |
@@ -194,14 +198,7 @@ SECTIONS | |||
194 | #endif | 198 | #endif |
195 | } | 199 | } |
196 | 200 | ||
197 | /* Stabs debugging sections. */ | 201 | STABS_DEBUG |
198 | .stab 0 : { *(.stab) } | ||
199 | .stabstr 0 : { *(.stabstr) } | ||
200 | .stab.excl 0 : { *(.stab.excl) } | ||
201 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
202 | .stab.index 0 : { *(.stab.index) } | ||
203 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
204 | .comment 0 : { *(.comment) } | ||
205 | .note 0 : { *(.note) } | 202 | .note 0 : { *(.note) } |
206 | 203 | ||
207 | } | 204 | } |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 29b998e430e6..720287d46e55 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -36,9 +36,9 @@ extern char _end; /* end of BSS, defined by linker */ | |||
36 | extern char __init_begin, __init_end; | 36 | extern char __init_begin, __init_end; |
37 | 37 | ||
38 | #ifdef CONFIG_DISCONTIGMEM | 38 | #ifdef CONFIG_DISCONTIGMEM |
39 | struct node_map_data node_data[MAX_NUMNODES]; | 39 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; |
40 | bootmem_data_t bmem_data[MAX_NUMNODES]; | 40 | bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly; |
41 | unsigned char pfnnid_map[PFNNID_MAP_MAX]; | 41 | unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | static struct resource data_resource = { | 44 | static struct resource data_resource = { |
@@ -58,14 +58,14 @@ static struct resource pdcdata_resource = { | |||
58 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | 58 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES]; | 61 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; |
62 | 62 | ||
63 | /* The following array is initialized from the firmware specific | 63 | /* The following array is initialized from the firmware specific |
64 | * information retrieved in kernel/inventory.c. | 64 | * information retrieved in kernel/inventory.c. |
65 | */ | 65 | */ |
66 | 66 | ||
67 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES]; | 67 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; |
68 | int npmem_ranges; | 68 | int npmem_ranges __read_mostly; |
69 | 69 | ||
70 | #ifdef __LP64__ | 70 | #ifdef __LP64__ |
71 | #define MAX_MEM (~0UL) | 71 | #define MAX_MEM (~0UL) |
@@ -73,7 +73,7 @@ int npmem_ranges; | |||
73 | #define MAX_MEM (3584U*1024U*1024U) | 73 | #define MAX_MEM (3584U*1024U*1024U) |
74 | #endif /* !__LP64__ */ | 74 | #endif /* !__LP64__ */ |
75 | 75 | ||
76 | static unsigned long mem_limit = MAX_MEM; | 76 | static unsigned long mem_limit __read_mostly = MAX_MEM; |
77 | 77 | ||
78 | static void __init mem_limit_func(void) | 78 | static void __init mem_limit_func(void) |
79 | { | 79 | { |
@@ -300,6 +300,13 @@ static void __init setup_bootmem(void) | |||
300 | max_pfn = start_pfn + npages; | 300 | max_pfn = start_pfn + npages; |
301 | } | 301 | } |
302 | 302 | ||
303 | /* IOMMU is always used to access "high mem" on those boxes | ||
304 | * that can support enough mem that a PCI device couldn't | ||
305 | * directly DMA to any physical addresses. | ||
306 | * ISA DMA support will need to revisit this. | ||
307 | */ | ||
308 | max_low_pfn = max_pfn; | ||
309 | |||
303 | if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { | 310 | if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { |
304 | printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); | 311 | printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); |
305 | BUG(); | 312 | BUG(); |
@@ -431,11 +438,11 @@ void free_initmem(void) | |||
431 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | 438 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ |
432 | & ~(VM_MAP_OFFSET-1))) | 439 | & ~(VM_MAP_OFFSET-1))) |
433 | 440 | ||
434 | void *vmalloc_start; | 441 | void *vmalloc_start __read_mostly; |
435 | EXPORT_SYMBOL(vmalloc_start); | 442 | EXPORT_SYMBOL(vmalloc_start); |
436 | 443 | ||
437 | #ifdef CONFIG_PA11 | 444 | #ifdef CONFIG_PA11 |
438 | unsigned long pcxl_dma_start; | 445 | unsigned long pcxl_dma_start __read_mostly; |
439 | #endif | 446 | #endif |
440 | 447 | ||
441 | void __init mem_init(void) | 448 | void __init mem_init(void) |
@@ -475,7 +482,7 @@ int do_check_pgt_cache(int low, int high) | |||
475 | return 0; | 482 | return 0; |
476 | } | 483 | } |
477 | 484 | ||
478 | unsigned long *empty_zero_page; | 485 | unsigned long *empty_zero_page __read_mostly; |
479 | 486 | ||
480 | void show_mem(void) | 487 | void show_mem(void) |
481 | { | 488 | { |
@@ -998,7 +1005,7 @@ void flush_tlb_all(void) | |||
998 | void flush_tlb_all(void) | 1005 | void flush_tlb_all(void) |
999 | { | 1006 | { |
1000 | spin_lock(&sid_lock); | 1007 | spin_lock(&sid_lock); |
1001 | flush_tlb_all_local(); | 1008 | flush_tlb_all_local(NULL); |
1002 | recycle_sids(); | 1009 | recycle_sids(); |
1003 | spin_unlock(&sid_lock); | 1010 | spin_unlock(&sid_lock); |
1004 | } | 1011 | } |
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 5c7a1b3b9326..edd9a9559cba 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c | |||
@@ -1,12 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * arch/parisc/mm/ioremap.c | 2 | * arch/parisc/mm/ioremap.c |
3 | * | 3 | * |
4 | * Re-map IO memory to kernel address space so that we can access it. | ||
5 | * This is needed for high PCI addresses that aren't mapped in the | ||
6 | * 640k-1MB IO memory area on PC's | ||
7 | * | ||
8 | * (C) Copyright 1995 1996 Linus Torvalds | 4 | * (C) Copyright 1995 1996 Linus Torvalds |
9 | * (C) Copyright 2001 Helge Deller <deller@gmx.de> | 5 | * (C) Copyright 2001 Helge Deller <deller@gmx.de> |
6 | * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org> | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #include <linux/vmalloc.h> | 9 | #include <linux/vmalloc.h> |
@@ -14,81 +11,107 @@ | |||
14 | #include <linux/module.h> | 11 | #include <linux/module.h> |
15 | #include <asm/io.h> | 12 | #include <asm/io.h> |
16 | #include <asm/pgalloc.h> | 13 | #include <asm/pgalloc.h> |
14 | #include <asm/tlbflush.h> | ||
15 | #include <asm/cacheflush.h> | ||
17 | 16 | ||
18 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | 17 | static inline void |
19 | unsigned long phys_addr, unsigned long flags) | 18 | remap_area_pte(pte_t *pte, unsigned long address, unsigned long size, |
19 | unsigned long phys_addr, unsigned long flags) | ||
20 | { | 20 | { |
21 | unsigned long end; | 21 | unsigned long end, pfn; |
22 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | | ||
23 | _PAGE_ACCESSED | flags); | ||
22 | 24 | ||
23 | address &= ~PMD_MASK; | 25 | address &= ~PMD_MASK; |
26 | |||
24 | end = address + size; | 27 | end = address + size; |
25 | if (end > PMD_SIZE) | 28 | if (end > PMD_SIZE) |
26 | end = PMD_SIZE; | 29 | end = PMD_SIZE; |
27 | if (address >= end) | 30 | |
28 | BUG(); | 31 | BUG_ON(address >= end); |
32 | |||
33 | pfn = phys_addr >> PAGE_SHIFT; | ||
29 | do { | 34 | do { |
30 | if (!pte_none(*pte)) { | 35 | BUG_ON(!pte_none(*pte)); |
31 | printk(KERN_ERR "remap_area_pte: page already exists\n"); | 36 | |
32 | BUG(); | 37 | set_pte(pte, pfn_pte(pfn, pgprot)); |
33 | } | 38 | |
34 | set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW | | ||
35 | _PAGE_DIRTY | _PAGE_ACCESSED | flags))); | ||
36 | address += PAGE_SIZE; | 39 | address += PAGE_SIZE; |
37 | phys_addr += PAGE_SIZE; | 40 | pfn++; |
38 | pte++; | 41 | pte++; |
39 | } while (address && (address < end)); | 42 | } while (address && (address < end)); |
40 | } | 43 | } |
41 | 44 | ||
42 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | 45 | static inline int |
43 | unsigned long phys_addr, unsigned long flags) | 46 | remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size, |
47 | unsigned long phys_addr, unsigned long flags) | ||
44 | { | 48 | { |
45 | unsigned long end; | 49 | unsigned long end; |
46 | 50 | ||
47 | address &= ~PGDIR_MASK; | 51 | address &= ~PGDIR_MASK; |
52 | |||
48 | end = address + size; | 53 | end = address + size; |
49 | if (end > PGDIR_SIZE) | 54 | if (end > PGDIR_SIZE) |
50 | end = PGDIR_SIZE; | 55 | end = PGDIR_SIZE; |
56 | |||
57 | BUG_ON(address >= end); | ||
58 | |||
51 | phys_addr -= address; | 59 | phys_addr -= address; |
52 | if (address >= end) | ||
53 | BUG(); | ||
54 | do { | 60 | do { |
55 | pte_t * pte = pte_alloc_kernel(pmd, address); | 61 | pte_t *pte = pte_alloc_kernel(pmd, address); |
56 | if (!pte) | 62 | if (!pte) |
57 | return -ENOMEM; | 63 | return -ENOMEM; |
58 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 64 | |
65 | remap_area_pte(pte, address, end - address, | ||
66 | address + phys_addr, flags); | ||
67 | |||
59 | address = (address + PMD_SIZE) & PMD_MASK; | 68 | address = (address + PMD_SIZE) & PMD_MASK; |
60 | pmd++; | 69 | pmd++; |
61 | } while (address && (address < end)); | 70 | } while (address && (address < end)); |
71 | |||
62 | return 0; | 72 | return 0; |
63 | } | 73 | } |
64 | 74 | ||
65 | #if (USE_HPPA_IOREMAP) | 75 | #if USE_HPPA_IOREMAP |
66 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | 76 | static int |
67 | unsigned long size, unsigned long flags) | 77 | remap_area_pages(unsigned long address, unsigned long phys_addr, |
78 | unsigned long size, unsigned long flags) | ||
68 | { | 79 | { |
69 | int error; | 80 | pgd_t *dir; |
70 | pgd_t * dir; | 81 | int error = 0; |
71 | unsigned long end = address + size; | 82 | unsigned long end = address + size; |
72 | 83 | ||
84 | BUG_ON(address >= end); | ||
85 | |||
73 | phys_addr -= address; | 86 | phys_addr -= address; |
74 | dir = pgd_offset(&init_mm, address); | 87 | dir = pgd_offset_k(address); |
88 | |||
75 | flush_cache_all(); | 89 | flush_cache_all(); |
76 | if (address >= end) | 90 | |
77 | BUG(); | ||
78 | do { | 91 | do { |
92 | pud_t *pud; | ||
79 | pmd_t *pmd; | 93 | pmd_t *pmd; |
80 | pmd = pmd_alloc(&init_mm, dir, address); | 94 | |
81 | error = -ENOMEM; | 95 | error = -ENOMEM; |
96 | pud = pud_alloc(&init_mm, dir, address); | ||
97 | if (!pud) | ||
98 | break; | ||
99 | |||
100 | pmd = pmd_alloc(&init_mm, pud, address); | ||
82 | if (!pmd) | 101 | if (!pmd) |
83 | break; | 102 | break; |
103 | |||
84 | if (remap_area_pmd(pmd, address, end - address, | 104 | if (remap_area_pmd(pmd, address, end - address, |
85 | phys_addr + address, flags)) | 105 | phys_addr + address, flags)) |
86 | break; | 106 | break; |
107 | |||
87 | error = 0; | 108 | error = 0; |
88 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 109 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
89 | dir++; | 110 | dir++; |
90 | } while (address && (address < end)); | 111 | } while (address && (address < end)); |
112 | |||
91 | flush_tlb_all(); | 113 | flush_tlb_all(); |
114 | |||
92 | return error; | 115 | return error; |
93 | } | 116 | } |
94 | #endif /* USE_HPPA_IOREMAP */ | 117 | #endif /* USE_HPPA_IOREMAP */ |
@@ -123,8 +146,7 @@ EXPORT_SYMBOL(__raw_bad_addr); | |||
123 | 146 | ||
124 | /* | 147 | /* |
125 | * Remap an arbitrary physical address space into the kernel virtual | 148 | * Remap an arbitrary physical address space into the kernel virtual |
126 | * address space. Needed when the kernel wants to access high addresses | 149 | * address space. |
127 | * directly. | ||
128 | * | 150 | * |
129 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | 151 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
130 | * have to convert them into an offset in a page-aligned mapping, but the | 152 | * have to convert them into an offset in a page-aligned mapping, but the |
@@ -148,8 +170,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l | |||
148 | #endif | 170 | #endif |
149 | 171 | ||
150 | #else | 172 | #else |
151 | void * addr; | 173 | void *addr; |
152 | struct vm_struct * area; | 174 | struct vm_struct *area; |
153 | unsigned long offset, last_addr; | 175 | unsigned long offset, last_addr; |
154 | 176 | ||
155 | /* Don't allow wraparound or zero size */ | 177 | /* Don't allow wraparound or zero size */ |
@@ -167,9 +189,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l | |||
167 | t_addr = __va(phys_addr); | 189 | t_addr = __va(phys_addr); |
168 | t_end = t_addr + (size - 1); | 190 | t_end = t_addr + (size - 1); |
169 | 191 | ||
170 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) | 192 | for (page = virt_to_page(t_addr); |
193 | page <= virt_to_page(t_end); page++) { | ||
171 | if(!PageReserved(page)) | 194 | if(!PageReserved(page)) |
172 | return NULL; | 195 | return NULL; |
196 | } | ||
173 | } | 197 | } |
174 | 198 | ||
175 | /* | 199 | /* |
@@ -185,11 +209,13 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l | |||
185 | area = get_vm_area(size, VM_IOREMAP); | 209 | area = get_vm_area(size, VM_IOREMAP); |
186 | if (!area) | 210 | if (!area) |
187 | return NULL; | 211 | return NULL; |
212 | |||
188 | addr = area->addr; | 213 | addr = area->addr; |
189 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | 214 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { |
190 | vfree(addr); | 215 | vfree(addr); |
191 | return NULL; | 216 | return NULL; |
192 | } | 217 | } |
218 | |||
193 | return (void __iomem *) (offset + (char *)addr); | 219 | return (void __iomem *) (offset + (char *)addr); |
194 | #endif | 220 | #endif |
195 | } | 221 | } |