diff options
Diffstat (limited to 'arch')
202 files changed, 15462 insertions, 6254 deletions
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 99a7f19da13a..a4555f497639 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c | |||
@@ -47,7 +47,7 @@ typedef struct irq_swizzle_struct | |||
47 | 47 | ||
48 | static irq_swizzle_t *sable_lynx_irq_swizzle; | 48 | static irq_swizzle_t *sable_lynx_irq_swizzle; |
49 | 49 | ||
50 | static void sable_lynx_init_irq(int nr_irqs); | 50 | static void sable_lynx_init_irq(int nr_of_irqs); |
51 | 51 | ||
52 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) | 52 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) |
53 | 53 | ||
@@ -530,11 +530,11 @@ sable_lynx_srm_device_interrupt(unsigned long vector) | |||
530 | } | 530 | } |
531 | 531 | ||
532 | static void __init | 532 | static void __init |
533 | sable_lynx_init_irq(int nr_irqs) | 533 | sable_lynx_init_irq(int nr_of_irqs) |
534 | { | 534 | { |
535 | long i; | 535 | long i; |
536 | 536 | ||
537 | for (i = 0; i < nr_irqs; ++i) { | 537 | for (i = 0; i < nr_of_irqs; ++i) { |
538 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 538 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; |
539 | irq_desc[i].chip = &sable_lynx_irq_type; | 539 | irq_desc[i].chip = &sable_lynx_irq_type; |
540 | } | 540 | } |
diff --git a/arch/arm/mach-iop13xx/include/mach/time.h b/arch/arm/mach-iop13xx/include/mach/time.h index 49213d9d7cad..d6d52527589d 100644 --- a/arch/arm/mach-iop13xx/include/mach/time.h +++ b/arch/arm/mach-iop13xx/include/mach/time.h | |||
@@ -41,7 +41,7 @@ static inline unsigned long iop13xx_core_freq(void) | |||
41 | return 1200000000; | 41 | return 1200000000; |
42 | default: | 42 | default: |
43 | printk("%s: warning unknown frequency, defaulting to 800Mhz\n", | 43 | printk("%s: warning unknown frequency, defaulting to 800Mhz\n", |
44 | __FUNCTION__); | 44 | __func__); |
45 | } | 45 | } |
46 | 46 | ||
47 | return 800000000; | 47 | return 800000000; |
@@ -60,7 +60,7 @@ static inline unsigned long iop13xx_xsi_bus_ratio(void) | |||
60 | return 4; | 60 | return 4; |
61 | default: | 61 | default: |
62 | printk("%s: warning unknown ratio, defaulting to 2\n", | 62 | printk("%s: warning unknown ratio, defaulting to 2\n", |
63 | __FUNCTION__); | 63 | __func__); |
64 | } | 64 | } |
65 | 65 | ||
66 | return 2; | 66 | return 2; |
diff --git a/arch/arm/mach-ixp2000/ixdp2x00.c b/arch/arm/mach-ixp2000/ixdp2x00.c index b0653a87159a..30451300751b 100644 --- a/arch/arm/mach-ixp2000/ixdp2x00.c +++ b/arch/arm/mach-ixp2000/ixdp2x00.c | |||
@@ -143,7 +143,7 @@ static struct irq_chip ixdp2x00_cpld_irq_chip = { | |||
143 | .unmask = ixdp2x00_irq_unmask | 143 | .unmask = ixdp2x00_irq_unmask |
144 | }; | 144 | }; |
145 | 145 | ||
146 | void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_irqs) | 146 | void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_of_irqs) |
147 | { | 147 | { |
148 | unsigned int irq; | 148 | unsigned int irq; |
149 | 149 | ||
@@ -154,7 +154,7 @@ void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigne | |||
154 | 154 | ||
155 | board_irq_stat = stat_reg; | 155 | board_irq_stat = stat_reg; |
156 | board_irq_mask = mask_reg; | 156 | board_irq_mask = mask_reg; |
157 | board_irq_count = nr_irqs; | 157 | board_irq_count = nr_of_irqs; |
158 | 158 | ||
159 | *board_irq_mask = 0xffffffff; | 159 | *board_irq_mask = 0xffffffff; |
160 | 160 | ||
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c index d354e0fe4477..c40fc378a251 100644 --- a/arch/arm/mach-omap2/irq.c +++ b/arch/arm/mach-omap2/irq.c | |||
@@ -119,7 +119,7 @@ static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank) | |||
119 | 119 | ||
120 | void __init omap_init_irq(void) | 120 | void __init omap_init_irq(void) |
121 | { | 121 | { |
122 | unsigned long nr_irqs = 0; | 122 | unsigned long nr_of_irqs = 0; |
123 | unsigned int nr_banks = 0; | 123 | unsigned int nr_banks = 0; |
124 | int i; | 124 | int i; |
125 | 125 | ||
@@ -133,14 +133,14 @@ void __init omap_init_irq(void) | |||
133 | 133 | ||
134 | omap_irq_bank_init_one(bank); | 134 | omap_irq_bank_init_one(bank); |
135 | 135 | ||
136 | nr_irqs += bank->nr_irqs; | 136 | nr_of_irqs += bank->nr_irqs; |
137 | nr_banks++; | 137 | nr_banks++; |
138 | } | 138 | } |
139 | 139 | ||
140 | printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n", | 140 | printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n", |
141 | nr_irqs, nr_banks, nr_banks > 1 ? "s" : ""); | 141 | nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : ""); |
142 | 142 | ||
143 | for (i = 0; i < nr_irqs; i++) { | 143 | for (i = 0; i < nr_of_irqs; i++) { |
144 | set_irq_chip(i, &omap_irq_chip); | 144 | set_irq_chip(i, &omap_irq_chip); |
145 | set_irq_handler(i, handle_level_irq); | 145 | set_irq_handler(i, handle_level_irq); |
146 | set_irq_flags(i, IRQF_VALID); | 146 | set_irq_flags(i, IRQF_VALID); |
diff --git a/arch/arm/mach-pxa/include/mach/zylonite.h b/arch/arm/mach-pxa/include/mach/zylonite.h index 0d35ca04731e..bf6785adccf4 100644 --- a/arch/arm/mach-pxa/include/mach/zylonite.h +++ b/arch/arm/mach-pxa/include/mach/zylonite.h | |||
@@ -30,7 +30,7 @@ extern void zylonite_pxa300_init(void); | |||
30 | static inline void zylonite_pxa300_init(void) | 30 | static inline void zylonite_pxa300_init(void) |
31 | { | 31 | { |
32 | if (cpu_is_pxa300() || cpu_is_pxa310()) | 32 | if (cpu_is_pxa300() || cpu_is_pxa310()) |
33 | panic("%s: PXA300/PXA310 not supported\n", __FUNCTION__); | 33 | panic("%s: PXA300/PXA310 not supported\n", __func__); |
34 | } | 34 | } |
35 | #endif | 35 | #endif |
36 | 36 | ||
@@ -40,7 +40,7 @@ extern void zylonite_pxa320_init(void); | |||
40 | static inline void zylonite_pxa320_init(void) | 40 | static inline void zylonite_pxa320_init(void) |
41 | { | 41 | { |
42 | if (cpu_is_pxa320()) | 42 | if (cpu_is_pxa320()) |
43 | panic("%s: PXA320 not supported\n", __FUNCTION__); | 43 | panic("%s: PXA320 not supported\n", __func__); |
44 | } | 44 | } |
45 | #endif | 45 | #endif |
46 | 46 | ||
diff --git a/arch/arm/mach-sa1100/include/mach/ide.h b/arch/arm/mach-sa1100/include/mach/ide.h deleted file mode 100644 index 4c99c8f5e617..000000000000 --- a/arch/arm/mach-sa1100/include/mach/ide.h +++ /dev/null | |||
@@ -1,75 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-sa1100/include/mach/ide.h | ||
3 | * | ||
4 | * Copyright (c) 1998 Hugo Fiennes & Nicolas Pitre | ||
5 | * | ||
6 | * 18-aug-2000: Cleanup by Erik Mouw (J.A.K.Mouw@its.tudelft.nl) | ||
7 | * Get rid of the special ide_init_hwif_ports() functions | ||
8 | * and make a generalised function that can be used by all | ||
9 | * architectures. | ||
10 | */ | ||
11 | |||
12 | #include <asm/irq.h> | ||
13 | #include <mach/hardware.h> | ||
14 | #include <asm/mach-types.h> | ||
15 | |||
16 | #error "This code is broken and needs update to match with current ide support" | ||
17 | |||
18 | |||
19 | /* | ||
20 | * Set up a hw structure for a specified data port, control port and IRQ. | ||
21 | * This should follow whatever the default interface uses. | ||
22 | */ | ||
23 | static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, | ||
24 | unsigned long ctrl_port, int *irq) | ||
25 | { | ||
26 | unsigned long reg = data_port; | ||
27 | int i; | ||
28 | int regincr = 1; | ||
29 | |||
30 | /* The Empeg board has the first two address lines unused */ | ||
31 | if (machine_is_empeg()) | ||
32 | regincr = 1 << 2; | ||
33 | |||
34 | /* The LART doesn't use A0 for IDE */ | ||
35 | if (machine_is_lart()) | ||
36 | regincr = 1 << 1; | ||
37 | |||
38 | memset(hw, 0, sizeof(*hw)); | ||
39 | |||
40 | for (i = 0; i <= 7; i++) { | ||
41 | hw->io_ports_array[i] = reg; | ||
42 | reg += regincr; | ||
43 | } | ||
44 | |||
45 | hw->io_ports.ctl_addr = ctrl_port; | ||
46 | |||
47 | if (irq) | ||
48 | *irq = 0; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * This registers the standard ports for this architecture with the IDE | ||
53 | * driver. | ||
54 | */ | ||
55 | static __inline__ void | ||
56 | ide_init_default_hwifs(void) | ||
57 | { | ||
58 | if (machine_is_lart()) { | ||
59 | #ifdef CONFIG_SA1100_LART | ||
60 | hw_regs_t hw; | ||
61 | |||
62 | /* Enable GPIO as interrupt line */ | ||
63 | GPDR &= ~LART_GPIO_IDE; | ||
64 | set_irq_type(LART_IRQ_IDE, IRQ_TYPE_EDGE_RISING); | ||
65 | |||
66 | /* set PCMCIA interface timing */ | ||
67 | MECR = 0x00060006; | ||
68 | |||
69 | /* init the interface */ | ||
70 | ide_init_hwif_ports(&hw, PCMCIA_IO_0_BASE + 0x0000, PCMCIA_IO_0_BASE + 0x1000, NULL); | ||
71 | hw.irq = LART_IRQ_IDE; | ||
72 | ide_register_hw(&hw); | ||
73 | #endif | ||
74 | } | ||
75 | } | ||
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index c36a6d59d6f0..310477ba1bbf 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c | |||
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) | |||
191 | struct eic *eic; | 191 | struct eic *eic; |
192 | struct resource *regs; | 192 | struct resource *regs; |
193 | unsigned int i; | 193 | unsigned int i; |
194 | unsigned int nr_irqs; | 194 | unsigned int nr_of_irqs; |
195 | unsigned int int_irq; | 195 | unsigned int int_irq; |
196 | int ret; | 196 | int ret; |
197 | u32 pattern; | 197 | u32 pattern; |
@@ -224,7 +224,7 @@ static int __init eic_probe(struct platform_device *pdev) | |||
224 | eic_writel(eic, IDR, ~0UL); | 224 | eic_writel(eic, IDR, ~0UL); |
225 | eic_writel(eic, MODE, ~0UL); | 225 | eic_writel(eic, MODE, ~0UL); |
226 | pattern = eic_readl(eic, MODE); | 226 | pattern = eic_readl(eic, MODE); |
227 | nr_irqs = fls(pattern); | 227 | nr_of_irqs = fls(pattern); |
228 | 228 | ||
229 | /* Trigger on low level unless overridden by driver */ | 229 | /* Trigger on low level unless overridden by driver */ |
230 | eic_writel(eic, EDGE, 0UL); | 230 | eic_writel(eic, EDGE, 0UL); |
@@ -232,7 +232,7 @@ static int __init eic_probe(struct platform_device *pdev) | |||
232 | 232 | ||
233 | eic->chip = &eic_chip; | 233 | eic->chip = &eic_chip; |
234 | 234 | ||
235 | for (i = 0; i < nr_irqs; i++) { | 235 | for (i = 0; i < nr_of_irqs; i++) { |
236 | set_irq_chip_and_handler(eic->first_irq + i, &eic_chip, | 236 | set_irq_chip_and_handler(eic->first_irq + i, &eic_chip, |
237 | handle_level_irq); | 237 | handle_level_irq); |
238 | set_irq_chip_data(eic->first_irq + i, eic); | 238 | set_irq_chip_data(eic->first_irq + i, eic); |
@@ -256,7 +256,7 @@ static int __init eic_probe(struct platform_device *pdev) | |||
256 | eic->regs, int_irq); | 256 | eic->regs, int_irq); |
257 | dev_info(&pdev->dev, | 257 | dev_info(&pdev->dev, |
258 | "Handling %u external IRQs, starting with IRQ %u\n", | 258 | "Handling %u external IRQs, starting with IRQ %u\n", |
259 | nr_irqs, eic->first_irq); | 259 | nr_of_irqs, eic->first_irq); |
260 | 260 | ||
261 | return 0; | 261 | return 0; |
262 | 262 | ||
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 0149097b736d..ce342fb74246 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h | |||
@@ -95,16 +95,8 @@ extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, | |||
95 | enum pci_mmap_state mmap_state, int write_combine); | 95 | enum pci_mmap_state mmap_state, int write_combine); |
96 | #define HAVE_PCI_LEGACY | 96 | #define HAVE_PCI_LEGACY |
97 | extern int pci_mmap_legacy_page_range(struct pci_bus *bus, | 97 | extern int pci_mmap_legacy_page_range(struct pci_bus *bus, |
98 | struct vm_area_struct *vma); | 98 | struct vm_area_struct *vma, |
99 | extern ssize_t pci_read_legacy_io(struct kobject *kobj, | 99 | enum pci_mmap_state mmap_state); |
100 | struct bin_attribute *bin_attr, | ||
101 | char *buf, loff_t off, size_t count); | ||
102 | extern ssize_t pci_write_legacy_io(struct kobject *kobj, | ||
103 | struct bin_attribute *bin_attr, | ||
104 | char *buf, loff_t off, size_t count); | ||
105 | extern int pci_mmap_legacy_mem(struct kobject *kobj, | ||
106 | struct bin_attribute *attr, | ||
107 | struct vm_area_struct *vma); | ||
108 | 100 | ||
109 | #define pci_get_legacy_mem platform_pci_get_legacy_mem | 101 | #define pci_get_legacy_mem platform_pci_get_legacy_mem |
110 | #define pci_legacy_read platform_pci_legacy_read | 102 | #define pci_legacy_read platform_pci_legacy_read |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7545037a8625..211fcfd115f9 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -614,12 +614,17 @@ char *ia64_pci_get_legacy_mem(struct pci_bus *bus) | |||
614 | * vector to get the base address. | 614 | * vector to get the base address. |
615 | */ | 615 | */ |
616 | int | 616 | int |
617 | pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) | 617 | pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, |
618 | enum pci_mmap_state mmap_state) | ||
618 | { | 619 | { |
619 | unsigned long size = vma->vm_end - vma->vm_start; | 620 | unsigned long size = vma->vm_end - vma->vm_start; |
620 | pgprot_t prot; | 621 | pgprot_t prot; |
621 | char *addr; | 622 | char *addr; |
622 | 623 | ||
624 | /* We only support mmap'ing of legacy memory space */ | ||
625 | if (mmap_state != pci_mmap_mem) | ||
626 | return -ENOSYS; | ||
627 | |||
623 | /* | 628 | /* |
624 | * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt | 629 | * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt |
625 | * for more details. | 630 | * for more details. |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index fc2994811f15..39cb6da72dcb 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -40,6 +40,7 @@ | |||
40 | */ | 40 | */ |
41 | 41 | ||
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
43 | #include <linux/cpu.h> | ||
43 | #include <linux/init.h> | 44 | #include <linux/init.h> |
44 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
45 | #include <linux/mm.h> | 46 | #include <linux/mm.h> |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 2bd1f6ef5db0..644a70b1b04e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -9,6 +9,8 @@ config PARISC | |||
9 | def_bool y | 9 | def_bool y |
10 | select HAVE_IDE | 10 | select HAVE_IDE |
11 | select HAVE_OPROFILE | 11 | select HAVE_OPROFILE |
12 | select RTC_CLASS | ||
13 | select RTC_DRV_PARISC | ||
12 | help | 14 | help |
13 | The PA-RISC microprocessor is designed by Hewlett-Packard and used | 15 | The PA-RISC microprocessor is designed by Hewlett-Packard and used |
14 | in many of their workstations & servers (HP9000 700 and 800 series, | 16 | in many of their workstations & servers (HP9000 700 and 800 series, |
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild new file mode 100644 index 000000000000..f88b252e419c --- /dev/null +++ b/arch/parisc/include/asm/Kbuild | |||
@@ -0,0 +1,3 @@ | |||
1 | include include/asm-generic/Kbuild.asm | ||
2 | |||
3 | unifdef-y += pdc.h | ||
diff --git a/arch/parisc/include/asm/agp.h b/arch/parisc/include/asm/agp.h new file mode 100644 index 000000000000..9651660da639 --- /dev/null +++ b/arch/parisc/include/asm/agp.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef _ASM_PARISC_AGP_H | ||
2 | #define _ASM_PARISC_AGP_H | ||
3 | |||
4 | /* | ||
5 | * PARISC specific AGP definitions. | ||
6 | * Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #define map_page_into_agp(page) /* nothing */ | ||
11 | #define unmap_page_from_agp(page) /* nothing */ | ||
12 | #define flush_agp_cache() mb() | ||
13 | |||
14 | /* Convert a physical address to an address suitable for the GART. */ | ||
15 | #define phys_to_gart(x) (x) | ||
16 | #define gart_to_phys(x) (x) | ||
17 | |||
18 | /* GATT allocation. Returns/accepts GATT kernel virtual address. */ | ||
19 | #define alloc_gatt_pages(order) \ | ||
20 | ((char *)__get_free_pages(GFP_KERNEL, (order))) | ||
21 | #define free_gatt_pages(table, order) \ | ||
22 | free_pages((unsigned long)(table), (order)) | ||
23 | |||
24 | #endif /* _ASM_PARISC_AGP_H */ | ||
diff --git a/arch/parisc/include/asm/asmregs.h b/arch/parisc/include/asm/asmregs.h new file mode 100644 index 000000000000..d93c646e1887 --- /dev/null +++ b/arch/parisc/include/asm/asmregs.h | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2, or (at your option) | ||
7 | * any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef _PARISC_ASMREGS_H | ||
20 | #define _PARISC_ASMREGS_H | ||
21 | |||
22 | ;! General Registers | ||
23 | |||
24 | rp: .reg %r2 | ||
25 | arg3: .reg %r23 | ||
26 | arg2: .reg %r24 | ||
27 | arg1: .reg %r25 | ||
28 | arg0: .reg %r26 | ||
29 | dp: .reg %r27 | ||
30 | ret0: .reg %r28 | ||
31 | ret1: .reg %r29 | ||
32 | sl: .reg %r29 | ||
33 | sp: .reg %r30 | ||
34 | |||
35 | #if 0 | ||
36 | /* PA20_REVISIT */ | ||
37 | arg7: .reg r19 | ||
38 | arg6: .reg r20 | ||
39 | arg5: .reg r21 | ||
40 | arg4: .reg r22 | ||
41 | gp: .reg r27 | ||
42 | ap: .reg r29 | ||
43 | #endif | ||
44 | |||
45 | |||
46 | r0: .reg %r0 | ||
47 | r1: .reg %r1 | ||
48 | r2: .reg %r2 | ||
49 | r3: .reg %r3 | ||
50 | r4: .reg %r4 | ||
51 | r5: .reg %r5 | ||
52 | r6: .reg %r6 | ||
53 | r7: .reg %r7 | ||
54 | r8: .reg %r8 | ||
55 | r9: .reg %r9 | ||
56 | r10: .reg %r10 | ||
57 | r11: .reg %r11 | ||
58 | r12: .reg %r12 | ||
59 | r13: .reg %r13 | ||
60 | r14: .reg %r14 | ||
61 | r15: .reg %r15 | ||
62 | r16: .reg %r16 | ||
63 | r17: .reg %r17 | ||
64 | r18: .reg %r18 | ||
65 | r19: .reg %r19 | ||
66 | r20: .reg %r20 | ||
67 | r21: .reg %r21 | ||
68 | r22: .reg %r22 | ||
69 | r23: .reg %r23 | ||
70 | r24: .reg %r24 | ||
71 | r25: .reg %r25 | ||
72 | r26: .reg %r26 | ||
73 | r27: .reg %r27 | ||
74 | r28: .reg %r28 | ||
75 | r29: .reg %r29 | ||
76 | r30: .reg %r30 | ||
77 | r31: .reg %r31 | ||
78 | |||
79 | |||
80 | ;! Space Registers | ||
81 | |||
82 | sr0: .reg %sr0 | ||
83 | sr1: .reg %sr1 | ||
84 | sr2: .reg %sr2 | ||
85 | sr3: .reg %sr3 | ||
86 | sr4: .reg %sr4 | ||
87 | sr5: .reg %sr5 | ||
88 | sr6: .reg %sr6 | ||
89 | sr7: .reg %sr7 | ||
90 | |||
91 | |||
92 | ;! Floating Point Registers | ||
93 | |||
94 | fr0: .reg %fr0 | ||
95 | fr1: .reg %fr1 | ||
96 | fr2: .reg %fr2 | ||
97 | fr3: .reg %fr3 | ||
98 | fr4: .reg %fr4 | ||
99 | fr5: .reg %fr5 | ||
100 | fr6: .reg %fr6 | ||
101 | fr7: .reg %fr7 | ||
102 | fr8: .reg %fr8 | ||
103 | fr9: .reg %fr9 | ||
104 | fr10: .reg %fr10 | ||
105 | fr11: .reg %fr11 | ||
106 | fr12: .reg %fr12 | ||
107 | fr13: .reg %fr13 | ||
108 | fr14: .reg %fr14 | ||
109 | fr15: .reg %fr15 | ||
110 | fr16: .reg %fr16 | ||
111 | fr17: .reg %fr17 | ||
112 | fr18: .reg %fr18 | ||
113 | fr19: .reg %fr19 | ||
114 | fr20: .reg %fr20 | ||
115 | fr21: .reg %fr21 | ||
116 | fr22: .reg %fr22 | ||
117 | fr23: .reg %fr23 | ||
118 | fr24: .reg %fr24 | ||
119 | fr25: .reg %fr25 | ||
120 | fr26: .reg %fr26 | ||
121 | fr27: .reg %fr27 | ||
122 | fr28: .reg %fr28 | ||
123 | fr29: .reg %fr29 | ||
124 | fr30: .reg %fr30 | ||
125 | fr31: .reg %fr31 | ||
126 | |||
127 | |||
128 | ;! Control Registers | ||
129 | |||
130 | rctr: .reg %cr0 | ||
131 | pidr1: .reg %cr8 | ||
132 | pidr2: .reg %cr9 | ||
133 | ccr: .reg %cr10 | ||
134 | sar: .reg %cr11 | ||
135 | pidr3: .reg %cr12 | ||
136 | pidr4: .reg %cr13 | ||
137 | iva: .reg %cr14 | ||
138 | eiem: .reg %cr15 | ||
139 | itmr: .reg %cr16 | ||
140 | pcsq: .reg %cr17 | ||
141 | pcoq: .reg %cr18 | ||
142 | iir: .reg %cr19 | ||
143 | isr: .reg %cr20 | ||
144 | ior: .reg %cr21 | ||
145 | ipsw: .reg %cr22 | ||
146 | eirr: .reg %cr23 | ||
147 | tr0: .reg %cr24 | ||
148 | tr1: .reg %cr25 | ||
149 | tr2: .reg %cr26 | ||
150 | tr3: .reg %cr27 | ||
151 | tr4: .reg %cr28 | ||
152 | tr5: .reg %cr29 | ||
153 | tr6: .reg %cr30 | ||
154 | tr7: .reg %cr31 | ||
155 | |||
156 | |||
157 | cr0: .reg %cr0 | ||
158 | cr8: .reg %cr8 | ||
159 | cr9: .reg %cr9 | ||
160 | cr10: .reg %cr10 | ||
161 | cr11: .reg %cr11 | ||
162 | cr12: .reg %cr12 | ||
163 | cr13: .reg %cr13 | ||
164 | cr14: .reg %cr14 | ||
165 | cr15: .reg %cr15 | ||
166 | cr16: .reg %cr16 | ||
167 | cr17: .reg %cr17 | ||
168 | cr18: .reg %cr18 | ||
169 | cr19: .reg %cr19 | ||
170 | cr20: .reg %cr20 | ||
171 | cr21: .reg %cr21 | ||
172 | cr22: .reg %cr22 | ||
173 | cr23: .reg %cr23 | ||
174 | cr24: .reg %cr24 | ||
175 | cr25: .reg %cr25 | ||
176 | cr26: .reg %cr26 | ||
177 | cr27: .reg %cr27 | ||
178 | cr28: .reg %cr28 | ||
179 | cr29: .reg %cr29 | ||
180 | cr30: .reg %cr30 | ||
181 | cr31: .reg %cr31 | ||
182 | |||
183 | #endif | ||
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h new file mode 100644 index 000000000000..ffb208840ecc --- /dev/null +++ b/arch/parisc/include/asm/assembly.h | |||
@@ -0,0 +1,519 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) | ||
3 | * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org> | ||
4 | * Copyright (C) 1999 SuSE GmbH | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _PARISC_ASSEMBLY_H | ||
22 | #define _PARISC_ASSEMBLY_H | ||
23 | |||
24 | #define CALLEE_FLOAT_FRAME_SIZE 80 | ||
25 | |||
26 | #ifdef CONFIG_64BIT | ||
27 | #define LDREG ldd | ||
28 | #define STREG std | ||
29 | #define LDREGX ldd,s | ||
30 | #define LDREGM ldd,mb | ||
31 | #define STREGM std,ma | ||
32 | #define SHRREG shrd | ||
33 | #define SHLREG shld | ||
34 | #define ANDCM andcm,* | ||
35 | #define COND(x) * ## x | ||
36 | #define RP_OFFSET 16 | ||
37 | #define FRAME_SIZE 128 | ||
38 | #define CALLEE_REG_FRAME_SIZE 144 | ||
39 | #define ASM_ULONG_INSN .dword | ||
40 | #else /* CONFIG_64BIT */ | ||
41 | #define LDREG ldw | ||
42 | #define STREG stw | ||
43 | #define LDREGX ldwx,s | ||
44 | #define LDREGM ldwm | ||
45 | #define STREGM stwm | ||
46 | #define SHRREG shr | ||
47 | #define SHLREG shlw | ||
48 | #define ANDCM andcm | ||
49 | #define COND(x) x | ||
50 | #define RP_OFFSET 20 | ||
51 | #define FRAME_SIZE 64 | ||
52 | #define CALLEE_REG_FRAME_SIZE 128 | ||
53 | #define ASM_ULONG_INSN .word | ||
54 | #endif | ||
55 | |||
56 | #define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE) | ||
57 | |||
58 | #ifdef CONFIG_PA20 | ||
59 | #define LDCW ldcw,co | ||
60 | #define BL b,l | ||
61 | # ifdef CONFIG_64BIT | ||
62 | # define LEVEL 2.0w | ||
63 | # else | ||
64 | # define LEVEL 2.0 | ||
65 | # endif | ||
66 | #else | ||
67 | #define LDCW ldcw | ||
68 | #define BL bl | ||
69 | #define LEVEL 1.1 | ||
70 | #endif | ||
71 | |||
72 | #ifdef __ASSEMBLY__ | ||
73 | |||
74 | #ifdef CONFIG_64BIT | ||
75 | /* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so | ||
76 | * work around that for now... */ | ||
77 | .level 2.0w | ||
78 | #endif | ||
79 | |||
80 | #include <asm/asm-offsets.h> | ||
81 | #include <asm/page.h> | ||
82 | |||
83 | #include <asm/asmregs.h> | ||
84 | |||
85 | sp = 30 | ||
86 | gp = 27 | ||
87 | ipsw = 22 | ||
88 | |||
89 | /* | ||
90 | * We provide two versions of each macro to convert from physical | ||
91 | * to virtual and vice versa. The "_r1" versions take one argument | ||
92 | * register, but trashes r1 to do the conversion. The other | ||
93 | * version takes two arguments: a src and destination register. | ||
94 | * However, the source and destination registers can not be | ||
95 | * the same register. | ||
96 | */ | ||
97 | |||
98 | .macro tophys grvirt, grphys | ||
99 | ldil L%(__PAGE_OFFSET), \grphys | ||
100 | sub \grvirt, \grphys, \grphys | ||
101 | .endm | ||
102 | |||
103 | .macro tovirt grphys, grvirt | ||
104 | ldil L%(__PAGE_OFFSET), \grvirt | ||
105 | add \grphys, \grvirt, \grvirt | ||
106 | .endm | ||
107 | |||
108 | .macro tophys_r1 gr | ||
109 | ldil L%(__PAGE_OFFSET), %r1 | ||
110 | sub \gr, %r1, \gr | ||
111 | .endm | ||
112 | |||
113 | .macro tovirt_r1 gr | ||
114 | ldil L%(__PAGE_OFFSET), %r1 | ||
115 | add \gr, %r1, \gr | ||
116 | .endm | ||
117 | |||
118 | .macro delay value | ||
119 | ldil L%\value, 1 | ||
120 | ldo R%\value(1), 1 | ||
121 | addib,UV,n -1,1,. | ||
122 | addib,NUV,n -1,1,.+8 | ||
123 | nop | ||
124 | .endm | ||
125 | |||
126 | .macro debug value | ||
127 | .endm | ||
128 | |||
129 | |||
130 | /* Shift Left - note the r and t can NOT be the same! */ | ||
131 | .macro shl r, sa, t | ||
132 | dep,z \r, 31-\sa, 32-\sa, \t | ||
133 | .endm | ||
134 | |||
135 | /* The PA 2.0 shift left */ | ||
136 | .macro shlw r, sa, t | ||
137 | depw,z \r, 31-\sa, 32-\sa, \t | ||
138 | .endm | ||
139 | |||
140 | /* And the PA 2.0W shift left */ | ||
141 | .macro shld r, sa, t | ||
142 | depd,z \r, 63-\sa, 64-\sa, \t | ||
143 | .endm | ||
144 | |||
145 | /* Shift Right - note the r and t can NOT be the same! */ | ||
146 | .macro shr r, sa, t | ||
147 | extru \r, 31-\sa, 32-\sa, \t | ||
148 | .endm | ||
149 | |||
150 | /* pa20w version of shift right */ | ||
151 | .macro shrd r, sa, t | ||
152 | extrd,u \r, 63-\sa, 64-\sa, \t | ||
153 | .endm | ||
154 | |||
155 | /* load 32-bit 'value' into 'reg' compensating for the ldil | ||
156 | * sign-extension when running in wide mode. | ||
157 | * WARNING!! neither 'value' nor 'reg' can be expressions | ||
158 | * containing '.'!!!! */ | ||
159 | .macro load32 value, reg | ||
160 | ldil L%\value, \reg | ||
161 | ldo R%\value(\reg), \reg | ||
162 | .endm | ||
163 | |||
164 | .macro loadgp | ||
165 | #ifdef CONFIG_64BIT | ||
166 | ldil L%__gp, %r27 | ||
167 | ldo R%__gp(%r27), %r27 | ||
168 | #else | ||
169 | ldil L%$global$, %r27 | ||
170 | ldo R%$global$(%r27), %r27 | ||
171 | #endif | ||
172 | .endm | ||
173 | |||
174 | #define SAVE_SP(r, where) mfsp r, %r1 ! STREG %r1, where | ||
175 | #define REST_SP(r, where) LDREG where, %r1 ! mtsp %r1, r | ||
176 | #define SAVE_CR(r, where) mfctl r, %r1 ! STREG %r1, where | ||
177 | #define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r | ||
178 | |||
179 | .macro save_general regs | ||
180 | STREG %r1, PT_GR1 (\regs) | ||
181 | STREG %r2, PT_GR2 (\regs) | ||
182 | STREG %r3, PT_GR3 (\regs) | ||
183 | STREG %r4, PT_GR4 (\regs) | ||
184 | STREG %r5, PT_GR5 (\regs) | ||
185 | STREG %r6, PT_GR6 (\regs) | ||
186 | STREG %r7, PT_GR7 (\regs) | ||
187 | STREG %r8, PT_GR8 (\regs) | ||
188 | STREG %r9, PT_GR9 (\regs) | ||
189 | STREG %r10, PT_GR10(\regs) | ||
190 | STREG %r11, PT_GR11(\regs) | ||
191 | STREG %r12, PT_GR12(\regs) | ||
192 | STREG %r13, PT_GR13(\regs) | ||
193 | STREG %r14, PT_GR14(\regs) | ||
194 | STREG %r15, PT_GR15(\regs) | ||
195 | STREG %r16, PT_GR16(\regs) | ||
196 | STREG %r17, PT_GR17(\regs) | ||
197 | STREG %r18, PT_GR18(\regs) | ||
198 | STREG %r19, PT_GR19(\regs) | ||
199 | STREG %r20, PT_GR20(\regs) | ||
200 | STREG %r21, PT_GR21(\regs) | ||
201 | STREG %r22, PT_GR22(\regs) | ||
202 | STREG %r23, PT_GR23(\regs) | ||
203 | STREG %r24, PT_GR24(\regs) | ||
204 | STREG %r25, PT_GR25(\regs) | ||
205 | /* r26 is saved in get_stack and used to preserve a value across virt_map */ | ||
206 | STREG %r27, PT_GR27(\regs) | ||
207 | STREG %r28, PT_GR28(\regs) | ||
208 | /* r29 is saved in get_stack and used to point to saved registers */ | ||
209 | /* r30 stack pointer saved in get_stack */ | ||
210 | STREG %r31, PT_GR31(\regs) | ||
211 | .endm | ||
212 | |||
213 | .macro rest_general regs | ||
214 | /* r1 used as a temp in rest_stack and is restored there */ | ||
215 | LDREG PT_GR2 (\regs), %r2 | ||
216 | LDREG PT_GR3 (\regs), %r3 | ||
217 | LDREG PT_GR4 (\regs), %r4 | ||
218 | LDREG PT_GR5 (\regs), %r5 | ||
219 | LDREG PT_GR6 (\regs), %r6 | ||
220 | LDREG PT_GR7 (\regs), %r7 | ||
221 | LDREG PT_GR8 (\regs), %r8 | ||
222 | LDREG PT_GR9 (\regs), %r9 | ||
223 | LDREG PT_GR10(\regs), %r10 | ||
224 | LDREG PT_GR11(\regs), %r11 | ||
225 | LDREG PT_GR12(\regs), %r12 | ||
226 | LDREG PT_GR13(\regs), %r13 | ||
227 | LDREG PT_GR14(\regs), %r14 | ||
228 | LDREG PT_GR15(\regs), %r15 | ||
229 | LDREG PT_GR16(\regs), %r16 | ||
230 | LDREG PT_GR17(\regs), %r17 | ||
231 | LDREG PT_GR18(\regs), %r18 | ||
232 | LDREG PT_GR19(\regs), %r19 | ||
233 | LDREG PT_GR20(\regs), %r20 | ||
234 | LDREG PT_GR21(\regs), %r21 | ||
235 | LDREG PT_GR22(\regs), %r22 | ||
236 | LDREG PT_GR23(\regs), %r23 | ||
237 | LDREG PT_GR24(\regs), %r24 | ||
238 | LDREG PT_GR25(\regs), %r25 | ||
239 | LDREG PT_GR26(\regs), %r26 | ||
240 | LDREG PT_GR27(\regs), %r27 | ||
241 | LDREG PT_GR28(\regs), %r28 | ||
242 | /* r29 points to register save area, and is restored in rest_stack */ | ||
243 | /* r30 stack pointer restored in rest_stack */ | ||
244 | LDREG PT_GR31(\regs), %r31 | ||
245 | .endm | ||
246 | |||
247 | .macro save_fp regs | ||
248 | fstd,ma %fr0, 8(\regs) | ||
249 | fstd,ma %fr1, 8(\regs) | ||
250 | fstd,ma %fr2, 8(\regs) | ||
251 | fstd,ma %fr3, 8(\regs) | ||
252 | fstd,ma %fr4, 8(\regs) | ||
253 | fstd,ma %fr5, 8(\regs) | ||
254 | fstd,ma %fr6, 8(\regs) | ||
255 | fstd,ma %fr7, 8(\regs) | ||
256 | fstd,ma %fr8, 8(\regs) | ||
257 | fstd,ma %fr9, 8(\regs) | ||
258 | fstd,ma %fr10, 8(\regs) | ||
259 | fstd,ma %fr11, 8(\regs) | ||
260 | fstd,ma %fr12, 8(\regs) | ||
261 | fstd,ma %fr13, 8(\regs) | ||
262 | fstd,ma %fr14, 8(\regs) | ||
263 | fstd,ma %fr15, 8(\regs) | ||
264 | fstd,ma %fr16, 8(\regs) | ||
265 | fstd,ma %fr17, 8(\regs) | ||
266 | fstd,ma %fr18, 8(\regs) | ||
267 | fstd,ma %fr19, 8(\regs) | ||
268 | fstd,ma %fr20, 8(\regs) | ||
269 | fstd,ma %fr21, 8(\regs) | ||
270 | fstd,ma %fr22, 8(\regs) | ||
271 | fstd,ma %fr23, 8(\regs) | ||
272 | fstd,ma %fr24, 8(\regs) | ||
273 | fstd,ma %fr25, 8(\regs) | ||
274 | fstd,ma %fr26, 8(\regs) | ||
275 | fstd,ma %fr27, 8(\regs) | ||
276 | fstd,ma %fr28, 8(\regs) | ||
277 | fstd,ma %fr29, 8(\regs) | ||
278 | fstd,ma %fr30, 8(\regs) | ||
279 | fstd %fr31, 0(\regs) | ||
280 | .endm | ||
281 | |||
282 | .macro rest_fp regs | ||
283 | fldd 0(\regs), %fr31 | ||
284 | fldd,mb -8(\regs), %fr30 | ||
285 | fldd,mb -8(\regs), %fr29 | ||
286 | fldd,mb -8(\regs), %fr28 | ||
287 | fldd,mb -8(\regs), %fr27 | ||
288 | fldd,mb -8(\regs), %fr26 | ||
289 | fldd,mb -8(\regs), %fr25 | ||
290 | fldd,mb -8(\regs), %fr24 | ||
291 | fldd,mb -8(\regs), %fr23 | ||
292 | fldd,mb -8(\regs), %fr22 | ||
293 | fldd,mb -8(\regs), %fr21 | ||
294 | fldd,mb -8(\regs), %fr20 | ||
295 | fldd,mb -8(\regs), %fr19 | ||
296 | fldd,mb -8(\regs), %fr18 | ||
297 | fldd,mb -8(\regs), %fr17 | ||
298 | fldd,mb -8(\regs), %fr16 | ||
299 | fldd,mb -8(\regs), %fr15 | ||
300 | fldd,mb -8(\regs), %fr14 | ||
301 | fldd,mb -8(\regs), %fr13 | ||
302 | fldd,mb -8(\regs), %fr12 | ||
303 | fldd,mb -8(\regs), %fr11 | ||
304 | fldd,mb -8(\regs), %fr10 | ||
305 | fldd,mb -8(\regs), %fr9 | ||
306 | fldd,mb -8(\regs), %fr8 | ||
307 | fldd,mb -8(\regs), %fr7 | ||
308 | fldd,mb -8(\regs), %fr6 | ||
309 | fldd,mb -8(\regs), %fr5 | ||
310 | fldd,mb -8(\regs), %fr4 | ||
311 | fldd,mb -8(\regs), %fr3 | ||
312 | fldd,mb -8(\regs), %fr2 | ||
313 | fldd,mb -8(\regs), %fr1 | ||
314 | fldd,mb -8(\regs), %fr0 | ||
315 | .endm | ||
316 | |||
317 | .macro callee_save_float | ||
318 | fstd,ma %fr12, 8(%r30) | ||
319 | fstd,ma %fr13, 8(%r30) | ||
320 | fstd,ma %fr14, 8(%r30) | ||
321 | fstd,ma %fr15, 8(%r30) | ||
322 | fstd,ma %fr16, 8(%r30) | ||
323 | fstd,ma %fr17, 8(%r30) | ||
324 | fstd,ma %fr18, 8(%r30) | ||
325 | fstd,ma %fr19, 8(%r30) | ||
326 | fstd,ma %fr20, 8(%r30) | ||
327 | fstd,ma %fr21, 8(%r30) | ||
328 | .endm | ||
329 | |||
330 | .macro callee_rest_float | ||
331 | fldd,mb -8(%r30), %fr21 | ||
332 | fldd,mb -8(%r30), %fr20 | ||
333 | fldd,mb -8(%r30), %fr19 | ||
334 | fldd,mb -8(%r30), %fr18 | ||
335 | fldd,mb -8(%r30), %fr17 | ||
336 | fldd,mb -8(%r30), %fr16 | ||
337 | fldd,mb -8(%r30), %fr15 | ||
338 | fldd,mb -8(%r30), %fr14 | ||
339 | fldd,mb -8(%r30), %fr13 | ||
340 | fldd,mb -8(%r30), %fr12 | ||
341 | .endm | ||
342 | |||
343 | #ifdef CONFIG_64BIT | ||
344 | .macro callee_save | ||
345 | std,ma %r3, CALLEE_REG_FRAME_SIZE(%r30) | ||
346 | mfctl %cr27, %r3 | ||
347 | std %r4, -136(%r30) | ||
348 | std %r5, -128(%r30) | ||
349 | std %r6, -120(%r30) | ||
350 | std %r7, -112(%r30) | ||
351 | std %r8, -104(%r30) | ||
352 | std %r9, -96(%r30) | ||
353 | std %r10, -88(%r30) | ||
354 | std %r11, -80(%r30) | ||
355 | std %r12, -72(%r30) | ||
356 | std %r13, -64(%r30) | ||
357 | std %r14, -56(%r30) | ||
358 | std %r15, -48(%r30) | ||
359 | std %r16, -40(%r30) | ||
360 | std %r17, -32(%r30) | ||
361 | std %r18, -24(%r30) | ||
362 | std %r3, -16(%r30) | ||
363 | .endm | ||
364 | |||
365 | .macro callee_rest | ||
366 | ldd -16(%r30), %r3 | ||
367 | ldd -24(%r30), %r18 | ||
368 | ldd -32(%r30), %r17 | ||
369 | ldd -40(%r30), %r16 | ||
370 | ldd -48(%r30), %r15 | ||
371 | ldd -56(%r30), %r14 | ||
372 | ldd -64(%r30), %r13 | ||
373 | ldd -72(%r30), %r12 | ||
374 | ldd -80(%r30), %r11 | ||
375 | ldd -88(%r30), %r10 | ||
376 | ldd -96(%r30), %r9 | ||
377 | ldd -104(%r30), %r8 | ||
378 | ldd -112(%r30), %r7 | ||
379 | ldd -120(%r30), %r6 | ||
380 | ldd -128(%r30), %r5 | ||
381 | ldd -136(%r30), %r4 | ||
382 | mtctl %r3, %cr27 | ||
383 | ldd,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3 | ||
384 | .endm | ||
385 | |||
386 | #else /* ! CONFIG_64BIT */ | ||
387 | |||
388 | .macro callee_save | ||
389 | stw,ma %r3, CALLEE_REG_FRAME_SIZE(%r30) | ||
390 | mfctl %cr27, %r3 | ||
391 | stw %r4, -124(%r30) | ||
392 | stw %r5, -120(%r30) | ||
393 | stw %r6, -116(%r30) | ||
394 | stw %r7, -112(%r30) | ||
395 | stw %r8, -108(%r30) | ||
396 | stw %r9, -104(%r30) | ||
397 | stw %r10, -100(%r30) | ||
398 | stw %r11, -96(%r30) | ||
399 | stw %r12, -92(%r30) | ||
400 | stw %r13, -88(%r30) | ||
401 | stw %r14, -84(%r30) | ||
402 | stw %r15, -80(%r30) | ||
403 | stw %r16, -76(%r30) | ||
404 | stw %r17, -72(%r30) | ||
405 | stw %r18, -68(%r30) | ||
406 | stw %r3, -64(%r30) | ||
407 | .endm | ||
408 | |||
409 | .macro callee_rest | ||
410 | ldw -64(%r30), %r3 | ||
411 | ldw -68(%r30), %r18 | ||
412 | ldw -72(%r30), %r17 | ||
413 | ldw -76(%r30), %r16 | ||
414 | ldw -80(%r30), %r15 | ||
415 | ldw -84(%r30), %r14 | ||
416 | ldw -88(%r30), %r13 | ||
417 | ldw -92(%r30), %r12 | ||
418 | ldw -96(%r30), %r11 | ||
419 | ldw -100(%r30), %r10 | ||
420 | ldw -104(%r30), %r9 | ||
421 | ldw -108(%r30), %r8 | ||
422 | ldw -112(%r30), %r7 | ||
423 | ldw -116(%r30), %r6 | ||
424 | ldw -120(%r30), %r5 | ||
425 | ldw -124(%r30), %r4 | ||
426 | mtctl %r3, %cr27 | ||
427 | ldw,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3 | ||
428 | .endm | ||
429 | #endif /* ! CONFIG_64BIT */ | ||
430 | |||
431 | .macro save_specials regs | ||
432 | |||
433 | SAVE_SP (%sr0, PT_SR0 (\regs)) | ||
434 | SAVE_SP (%sr1, PT_SR1 (\regs)) | ||
435 | SAVE_SP (%sr2, PT_SR2 (\regs)) | ||
436 | SAVE_SP (%sr3, PT_SR3 (\regs)) | ||
437 | SAVE_SP (%sr4, PT_SR4 (\regs)) | ||
438 | SAVE_SP (%sr5, PT_SR5 (\regs)) | ||
439 | SAVE_SP (%sr6, PT_SR6 (\regs)) | ||
440 | SAVE_SP (%sr7, PT_SR7 (\regs)) | ||
441 | |||
442 | SAVE_CR (%cr17, PT_IASQ0(\regs)) | ||
443 | mtctl %r0, %cr17 | ||
444 | SAVE_CR (%cr17, PT_IASQ1(\regs)) | ||
445 | |||
446 | SAVE_CR (%cr18, PT_IAOQ0(\regs)) | ||
447 | mtctl %r0, %cr18 | ||
448 | SAVE_CR (%cr18, PT_IAOQ1(\regs)) | ||
449 | |||
450 | #ifdef CONFIG_64BIT | ||
451 | /* cr11 (sar) is a funny one. 5 bits on PA1.1 and 6 bit on PA2.0 | ||
452 | * For PA2.0 mtsar or mtctl always write 6 bits, but mfctl only | ||
453 | * reads 5 bits. Use mfctl,w to read all six bits. Otherwise | ||
454 | * we lose the 6th bit on a save/restore over interrupt. | ||
455 | */ | ||
456 | mfctl,w %cr11, %r1 | ||
457 | STREG %r1, PT_SAR (\regs) | ||
458 | #else | ||
459 | SAVE_CR (%cr11, PT_SAR (\regs)) | ||
460 | #endif | ||
461 | SAVE_CR (%cr19, PT_IIR (\regs)) | ||
462 | |||
463 | /* | ||
464 | * Code immediately following this macro (in intr_save) relies | ||
465 | * on r8 containing ipsw. | ||
466 | */ | ||
467 | mfctl %cr22, %r8 | ||
468 | STREG %r8, PT_PSW(\regs) | ||
469 | .endm | ||
470 | |||
471 | .macro rest_specials regs | ||
472 | |||
473 | REST_SP (%sr0, PT_SR0 (\regs)) | ||
474 | REST_SP (%sr1, PT_SR1 (\regs)) | ||
475 | REST_SP (%sr2, PT_SR2 (\regs)) | ||
476 | REST_SP (%sr3, PT_SR3 (\regs)) | ||
477 | REST_SP (%sr4, PT_SR4 (\regs)) | ||
478 | REST_SP (%sr5, PT_SR5 (\regs)) | ||
479 | REST_SP (%sr6, PT_SR6 (\regs)) | ||
480 | REST_SP (%sr7, PT_SR7 (\regs)) | ||
481 | |||
482 | REST_CR (%cr17, PT_IASQ0(\regs)) | ||
483 | REST_CR (%cr17, PT_IASQ1(\regs)) | ||
484 | |||
485 | REST_CR (%cr18, PT_IAOQ0(\regs)) | ||
486 | REST_CR (%cr18, PT_IAOQ1(\regs)) | ||
487 | |||
488 | REST_CR (%cr11, PT_SAR (\regs)) | ||
489 | |||
490 | REST_CR (%cr22, PT_PSW (\regs)) | ||
491 | .endm | ||
492 | |||
493 | |||
494 | /* First step to create a "relied upon translation" | ||
495 | * See PA 2.0 Arch. page F-4 and F-5. | ||
496 | * | ||
497 | * The ssm was originally necessary due to a "PCxT bug". | ||
498 | * But someone decided it needed to be added to the architecture | ||
499 | * and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual. | ||
500 | * It's been carried forward into PA 2.0 Arch as well. :^( | ||
501 | * | ||
502 | * "ssm 0,%r0" is a NOP with side effects (prefetch barrier). | ||
503 | * rsm/ssm prevents the ifetch unit from speculatively fetching | ||
504 | * instructions past this line in the code stream. | ||
505 | * PA 2.0 processor will single step all insn in the same QUAD (4 insn). | ||
506 | */ | ||
507 | .macro pcxt_ssm_bug | ||
508 | rsm PSW_SM_I,%r0 | ||
509 | nop /* 1 */ | ||
510 | nop /* 2 */ | ||
511 | nop /* 3 */ | ||
512 | nop /* 4 */ | ||
513 | nop /* 5 */ | ||
514 | nop /* 6 */ | ||
515 | nop /* 7 */ | ||
516 | .endm | ||
517 | |||
518 | #endif /* __ASSEMBLY__ */ | ||
519 | #endif | ||
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h new file mode 100644 index 000000000000..57fcc4a5ebb4 --- /dev/null +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -0,0 +1,348 @@ | |||
1 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | ||
2 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | ||
3 | */ | ||
4 | |||
5 | #ifndef _ASM_PARISC_ATOMIC_H_ | ||
6 | #define _ASM_PARISC_ATOMIC_H_ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <asm/system.h> | ||
10 | |||
11 | /* | ||
12 | * Atomic operations that C can't guarantee us. Useful for | ||
13 | * resource counting etc.. | ||
14 | * | ||
15 | * And probably incredibly slow on parisc. OTOH, we don't | ||
16 | * have to write any serious assembly. prumpf | ||
17 | */ | ||
18 | |||
19 | #ifdef CONFIG_SMP | ||
20 | #include <asm/spinlock.h> | ||
21 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | ||
22 | |||
23 | /* Use an array of spinlocks for our atomic_ts. | ||
24 | * Hash function to index into a different SPINLOCK. | ||
25 | * Since "a" is usually an address, use one spinlock per cacheline. | ||
26 | */ | ||
27 | # define ATOMIC_HASH_SIZE 4 | ||
28 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | ||
29 | |||
30 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | ||
31 | |||
32 | /* Can't use raw_spin_lock_irq because of #include problems, so | ||
33 | * this is the substitute */ | ||
34 | #define _atomic_spin_lock_irqsave(l,f) do { \ | ||
35 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
36 | local_irq_save(f); \ | ||
37 | __raw_spin_lock(s); \ | ||
38 | } while(0) | ||
39 | |||
40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | ||
41 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
42 | __raw_spin_unlock(s); \ | ||
43 | local_irq_restore(f); \ | ||
44 | } while(0) | ||
45 | |||
46 | |||
47 | #else | ||
48 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | ||
49 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | ||
50 | #endif | ||
51 | |||
52 | /* This should get optimized out since it's never called. | ||
53 | ** Or get a link error if xchg is used "wrong". | ||
54 | */ | ||
55 | extern void __xchg_called_with_bad_pointer(void); | ||
56 | |||
57 | |||
58 | /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ | ||
59 | extern unsigned long __xchg8(char, char *); | ||
60 | extern unsigned long __xchg32(int, int *); | ||
61 | #ifdef CONFIG_64BIT | ||
62 | extern unsigned long __xchg64(unsigned long, unsigned long *); | ||
63 | #endif | ||
64 | |||
65 | /* optimizer better get rid of switch since size is a constant */ | ||
66 | static __inline__ unsigned long | ||
67 | __xchg(unsigned long x, __volatile__ void * ptr, int size) | ||
68 | { | ||
69 | switch(size) { | ||
70 | #ifdef CONFIG_64BIT | ||
71 | case 8: return __xchg64(x,(unsigned long *) ptr); | ||
72 | #endif | ||
73 | case 4: return __xchg32((int) x, (int *) ptr); | ||
74 | case 1: return __xchg8((char) x, (char *) ptr); | ||
75 | } | ||
76 | __xchg_called_with_bad_pointer(); | ||
77 | return x; | ||
78 | } | ||
79 | |||
80 | |||
81 | /* | ||
82 | ** REVISIT - Abandoned use of LDCW in xchg() for now: | ||
83 | ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes | ||
84 | ** o and while we are at it, could CONFIG_64BIT code use LDCD too? | ||
85 | ** | ||
86 | ** if (__builtin_constant_p(x) && (x == NULL)) | ||
87 | ** if (((unsigned long)p & 0xf) == 0) | ||
88 | ** return __ldcw(p); | ||
89 | */ | ||
90 | #define xchg(ptr,x) \ | ||
91 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
92 | |||
93 | |||
94 | #define __HAVE_ARCH_CMPXCHG 1 | ||
95 | |||
96 | /* bug catcher for when unsupported size is used - won't link */ | ||
97 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
98 | |||
99 | /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ | ||
100 | extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); | ||
101 | extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); | ||
102 | |||
103 | /* don't worry...optimizer will get rid of most of this */ | ||
104 | static __inline__ unsigned long | ||
105 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | ||
106 | { | ||
107 | switch(size) { | ||
108 | #ifdef CONFIG_64BIT | ||
109 | case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); | ||
110 | #endif | ||
111 | case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); | ||
112 | } | ||
113 | __cmpxchg_called_with_bad_pointer(); | ||
114 | return old; | ||
115 | } | ||
116 | |||
117 | #define cmpxchg(ptr,o,n) \ | ||
118 | ({ \ | ||
119 | __typeof__(*(ptr)) _o_ = (o); \ | ||
120 | __typeof__(*(ptr)) _n_ = (n); \ | ||
121 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
122 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
123 | }) | ||
124 | |||
125 | #include <asm-generic/cmpxchg-local.h> | ||
126 | |||
127 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
128 | unsigned long old, | ||
129 | unsigned long new_, int size) | ||
130 | { | ||
131 | switch (size) { | ||
132 | #ifdef CONFIG_64BIT | ||
133 | case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); | ||
134 | #endif | ||
135 | case 4: return __cmpxchg_u32(ptr, old, new_); | ||
136 | default: | ||
137 | return __cmpxchg_local_generic(ptr, old, new_, size); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | ||
143 | * them available. | ||
144 | */ | ||
145 | #define cmpxchg_local(ptr, o, n) \ | ||
146 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
147 | (unsigned long)(n), sizeof(*(ptr)))) | ||
148 | #ifdef CONFIG_64BIT | ||
149 | #define cmpxchg64_local(ptr, o, n) \ | ||
150 | ({ \ | ||
151 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
152 | cmpxchg_local((ptr), (o), (n)); \ | ||
153 | }) | ||
154 | #else | ||
155 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
156 | #endif | ||
157 | |||
158 | /* Note that we need not lock read accesses - aligned word writes/reads | ||
159 | * are atomic, so a reader never sees unconsistent values. | ||
160 | * | ||
161 | * Cache-line alignment would conflict with, for example, linux/module.h | ||
162 | */ | ||
163 | |||
164 | typedef struct { volatile int counter; } atomic_t; | ||
165 | |||
166 | /* It's possible to reduce all atomic operations to either | ||
167 | * __atomic_add_return, atomic_set and atomic_read (the latter | ||
168 | * is there only for consistency). | ||
169 | */ | ||
170 | |||
171 | static __inline__ int __atomic_add_return(int i, atomic_t *v) | ||
172 | { | ||
173 | int ret; | ||
174 | unsigned long flags; | ||
175 | _atomic_spin_lock_irqsave(v, flags); | ||
176 | |||
177 | ret = (v->counter += i); | ||
178 | |||
179 | _atomic_spin_unlock_irqrestore(v, flags); | ||
180 | return ret; | ||
181 | } | ||
182 | |||
183 | static __inline__ void atomic_set(atomic_t *v, int i) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | _atomic_spin_lock_irqsave(v, flags); | ||
187 | |||
188 | v->counter = i; | ||
189 | |||
190 | _atomic_spin_unlock_irqrestore(v, flags); | ||
191 | } | ||
192 | |||
193 | static __inline__ int atomic_read(const atomic_t *v) | ||
194 | { | ||
195 | return v->counter; | ||
196 | } | ||
197 | |||
198 | /* exported interface */ | ||
199 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | ||
200 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
201 | |||
202 | /** | ||
203 | * atomic_add_unless - add unless the number is a given value | ||
204 | * @v: pointer of type atomic_t | ||
205 | * @a: the amount to add to v... | ||
206 | * @u: ...unless v is equal to u. | ||
207 | * | ||
208 | * Atomically adds @a to @v, so long as it was not @u. | ||
209 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
210 | */ | ||
211 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | ||
212 | { | ||
213 | int c, old; | ||
214 | c = atomic_read(v); | ||
215 | for (;;) { | ||
216 | if (unlikely(c == (u))) | ||
217 | break; | ||
218 | old = atomic_cmpxchg((v), c, c + (a)); | ||
219 | if (likely(old == c)) | ||
220 | break; | ||
221 | c = old; | ||
222 | } | ||
223 | return c != (u); | ||
224 | } | ||
225 | |||
226 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
227 | |||
228 | #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) | ||
229 | #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) | ||
230 | #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) | ||
231 | #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) | ||
232 | |||
233 | #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v))) | ||
234 | #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v))) | ||
235 | #define atomic_inc_return(v) (__atomic_add_return( 1,(v))) | ||
236 | #define atomic_dec_return(v) (__atomic_add_return( -1,(v))) | ||
237 | |||
238 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
239 | |||
240 | /* | ||
241 | * atomic_inc_and_test - increment and test | ||
242 | * @v: pointer of type atomic_t | ||
243 | * | ||
244 | * Atomically increments @v by 1 | ||
245 | * and returns true if the result is zero, or false for all | ||
246 | * other cases. | ||
247 | */ | ||
248 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
249 | |||
250 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | ||
251 | |||
252 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) | ||
253 | |||
254 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) | ||
255 | |||
256 | #define smp_mb__before_atomic_dec() smp_mb() | ||
257 | #define smp_mb__after_atomic_dec() smp_mb() | ||
258 | #define smp_mb__before_atomic_inc() smp_mb() | ||
259 | #define smp_mb__after_atomic_inc() smp_mb() | ||
260 | |||
261 | #ifdef CONFIG_64BIT | ||
262 | |||
263 | typedef struct { volatile s64 counter; } atomic64_t; | ||
264 | |||
265 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | ||
266 | |||
267 | static __inline__ int | ||
268 | __atomic64_add_return(s64 i, atomic64_t *v) | ||
269 | { | ||
270 | int ret; | ||
271 | unsigned long flags; | ||
272 | _atomic_spin_lock_irqsave(v, flags); | ||
273 | |||
274 | ret = (v->counter += i); | ||
275 | |||
276 | _atomic_spin_unlock_irqrestore(v, flags); | ||
277 | return ret; | ||
278 | } | ||
279 | |||
280 | static __inline__ void | ||
281 | atomic64_set(atomic64_t *v, s64 i) | ||
282 | { | ||
283 | unsigned long flags; | ||
284 | _atomic_spin_lock_irqsave(v, flags); | ||
285 | |||
286 | v->counter = i; | ||
287 | |||
288 | _atomic_spin_unlock_irqrestore(v, flags); | ||
289 | } | ||
290 | |||
291 | static __inline__ s64 | ||
292 | atomic64_read(const atomic64_t *v) | ||
293 | { | ||
294 | return v->counter; | ||
295 | } | ||
296 | |||
297 | #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v)))) | ||
298 | #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v)))) | ||
299 | #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) | ||
300 | #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) | ||
301 | |||
302 | #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v))) | ||
303 | #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v))) | ||
304 | #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) | ||
305 | #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) | ||
306 | |||
307 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
308 | |||
309 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
310 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | ||
311 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) | ||
312 | |||
313 | /* exported interface */ | ||
314 | #define atomic64_cmpxchg(v, o, n) \ | ||
315 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | ||
316 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | ||
317 | |||
318 | /** | ||
319 | * atomic64_add_unless - add unless the number is a given value | ||
320 | * @v: pointer of type atomic64_t | ||
321 | * @a: the amount to add to v... | ||
322 | * @u: ...unless v is equal to u. | ||
323 | * | ||
324 | * Atomically adds @a to @v, so long as it was not @u. | ||
325 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
326 | */ | ||
327 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
328 | { | ||
329 | long c, old; | ||
330 | c = atomic64_read(v); | ||
331 | for (;;) { | ||
332 | if (unlikely(c == (u))) | ||
333 | break; | ||
334 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
335 | if (likely(old == c)) | ||
336 | break; | ||
337 | c = old; | ||
338 | } | ||
339 | return c != (u); | ||
340 | } | ||
341 | |||
342 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
343 | |||
344 | #endif /* CONFIG_64BIT */ | ||
345 | |||
346 | #include <asm-generic/atomic.h> | ||
347 | |||
348 | #endif /* _ASM_PARISC_ATOMIC_H_ */ | ||
diff --git a/arch/parisc/include/asm/auxvec.h b/arch/parisc/include/asm/auxvec.h new file mode 100644 index 000000000000..9c3ac4b89dc9 --- /dev/null +++ b/arch/parisc/include/asm/auxvec.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifndef __ASMPARISC_AUXVEC_H | ||
2 | #define __ASMPARISC_AUXVEC_H | ||
3 | |||
4 | #endif | ||
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h new file mode 100644 index 000000000000..7a6ea10bd231 --- /dev/null +++ b/arch/parisc/include/asm/bitops.h | |||
@@ -0,0 +1,239 @@ | |||
1 | #ifndef _PARISC_BITOPS_H | ||
2 | #define _PARISC_BITOPS_H | ||
3 | |||
4 | #ifndef _LINUX_BITOPS_H | ||
5 | #error only <linux/bitops.h> can be included directly | ||
6 | #endif | ||
7 | |||
8 | #include <linux/compiler.h> | ||
9 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ | ||
10 | #include <asm/byteorder.h> | ||
11 | #include <asm/atomic.h> | ||
12 | |||
13 | /* | ||
14 | * HP-PARISC specific bit operations | ||
15 | * for a detailed description of the functions please refer | ||
16 | * to include/asm-i386/bitops.h or kerneldoc | ||
17 | */ | ||
18 | |||
19 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) | ||
20 | |||
21 | |||
22 | #define smp_mb__before_clear_bit() smp_mb() | ||
23 | #define smp_mb__after_clear_bit() smp_mb() | ||
24 | |||
25 | /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion | ||
26 | * on use of volatile and __*_bit() (set/clear/change): | ||
27 | * *_bit() want use of volatile. | ||
28 | * __*_bit() are "relaxed" and don't use spinlock or volatile. | ||
29 | */ | ||
30 | |||
31 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | ||
32 | { | ||
33 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
34 | unsigned long flags; | ||
35 | |||
36 | addr += (nr >> SHIFT_PER_LONG); | ||
37 | _atomic_spin_lock_irqsave(addr, flags); | ||
38 | *addr |= mask; | ||
39 | _atomic_spin_unlock_irqrestore(addr, flags); | ||
40 | } | ||
41 | |||
42 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | ||
43 | { | ||
44 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); | ||
45 | unsigned long flags; | ||
46 | |||
47 | addr += (nr >> SHIFT_PER_LONG); | ||
48 | _atomic_spin_lock_irqsave(addr, flags); | ||
49 | *addr &= mask; | ||
50 | _atomic_spin_unlock_irqrestore(addr, flags); | ||
51 | } | ||
52 | |||
53 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | ||
54 | { | ||
55 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
56 | unsigned long flags; | ||
57 | |||
58 | addr += (nr >> SHIFT_PER_LONG); | ||
59 | _atomic_spin_lock_irqsave(addr, flags); | ||
60 | *addr ^= mask; | ||
61 | _atomic_spin_unlock_irqrestore(addr, flags); | ||
62 | } | ||
63 | |||
64 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | ||
65 | { | ||
66 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
67 | unsigned long old; | ||
68 | unsigned long flags; | ||
69 | int set; | ||
70 | |||
71 | addr += (nr >> SHIFT_PER_LONG); | ||
72 | _atomic_spin_lock_irqsave(addr, flags); | ||
73 | old = *addr; | ||
74 | set = (old & mask) ? 1 : 0; | ||
75 | if (!set) | ||
76 | *addr = old | mask; | ||
77 | _atomic_spin_unlock_irqrestore(addr, flags); | ||
78 | |||
79 | return set; | ||
80 | } | ||
81 | |||
82 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | ||
83 | { | ||
84 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
85 | unsigned long old; | ||
86 | unsigned long flags; | ||
87 | int set; | ||
88 | |||
89 | addr += (nr >> SHIFT_PER_LONG); | ||
90 | _atomic_spin_lock_irqsave(addr, flags); | ||
91 | old = *addr; | ||
92 | set = (old & mask) ? 1 : 0; | ||
93 | if (set) | ||
94 | *addr = old & ~mask; | ||
95 | _atomic_spin_unlock_irqrestore(addr, flags); | ||
96 | |||
97 | return set; | ||
98 | } | ||
99 | |||
100 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | ||
101 | { | ||
102 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
103 | unsigned long oldbit; | ||
104 | unsigned long flags; | ||
105 | |||
106 | addr += (nr >> SHIFT_PER_LONG); | ||
107 | _atomic_spin_lock_irqsave(addr, flags); | ||
108 | oldbit = *addr; | ||
109 | *addr = oldbit ^ mask; | ||
110 | _atomic_spin_unlock_irqrestore(addr, flags); | ||
111 | |||
112 | return (oldbit & mask) ? 1 : 0; | ||
113 | } | ||
114 | |||
115 | #include <asm-generic/bitops/non-atomic.h> | ||
116 | |||
117 | #ifdef __KERNEL__ | ||
118 | |||
119 | /** | ||
120 | * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1". | ||
121 | * @word: The word to search | ||
122 | * | ||
123 | * __ffs() return is undefined if no bit is set. | ||
124 | * | ||
125 | * 32-bit fast __ffs by LaMont Jones "lamont At hp com". | ||
126 | * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org". | ||
127 | * (with help from willy/jejb to get the semantics right) | ||
128 | * | ||
129 | * This algorithm avoids branches by making use of nullification. | ||
130 | * One side effect of "extr" instructions is it sets PSW[N] bit. | ||
131 | * How PSW[N] (nullify next insn) gets set is determined by the | ||
132 | * "condition" field (eg "<>" or "TR" below) in the extr* insn. | ||
133 | * Only the 1st and one of either the 2cd or 3rd insn will get executed. | ||
134 | * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so | ||
135 | * cycles for each mispredicted branch. | ||
136 | */ | ||
137 | |||
138 | static __inline__ unsigned long __ffs(unsigned long x) | ||
139 | { | ||
140 | unsigned long ret; | ||
141 | |||
142 | __asm__( | ||
143 | #ifdef CONFIG_64BIT | ||
144 | " ldi 63,%1\n" | ||
145 | " extrd,u,*<> %0,63,32,%%r0\n" | ||
146 | " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ | ||
147 | " addi -32,%1,%1\n" | ||
148 | #else | ||
149 | " ldi 31,%1\n" | ||
150 | #endif | ||
151 | " extru,<> %0,31,16,%%r0\n" | ||
152 | " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */ | ||
153 | " addi -16,%1,%1\n" | ||
154 | " extru,<> %0,31,8,%%r0\n" | ||
155 | " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */ | ||
156 | " addi -8,%1,%1\n" | ||
157 | " extru,<> %0,31,4,%%r0\n" | ||
158 | " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */ | ||
159 | " addi -4,%1,%1\n" | ||
160 | " extru,<> %0,31,2,%%r0\n" | ||
161 | " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */ | ||
162 | " addi -2,%1,%1\n" | ||
163 | " extru,= %0,31,1,%%r0\n" /* check last bit */ | ||
164 | " addi -1,%1,%1\n" | ||
165 | : "+r" (x), "=r" (ret) ); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | #include <asm-generic/bitops/ffz.h> | ||
170 | |||
171 | /* | ||
172 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) | ||
173 | * This is defined the same way as the libc and compiler builtin | ||
174 | * ffs routines, therefore differs in spirit from the above ffz (man ffs). | ||
175 | */ | ||
176 | static __inline__ int ffs(int x) | ||
177 | { | ||
178 | return x ? (__ffs((unsigned long)x) + 1) : 0; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * fls: find last (most significant) bit set. | ||
183 | * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
184 | */ | ||
185 | |||
186 | static __inline__ int fls(int x) | ||
187 | { | ||
188 | int ret; | ||
189 | if (!x) | ||
190 | return 0; | ||
191 | |||
192 | __asm__( | ||
193 | " ldi 1,%1\n" | ||
194 | " extru,<> %0,15,16,%%r0\n" | ||
195 | " zdep,TR %0,15,16,%0\n" /* xxxx0000 */ | ||
196 | " addi 16,%1,%1\n" | ||
197 | " extru,<> %0,7,8,%%r0\n" | ||
198 | " zdep,TR %0,23,24,%0\n" /* xx000000 */ | ||
199 | " addi 8,%1,%1\n" | ||
200 | " extru,<> %0,3,4,%%r0\n" | ||
201 | " zdep,TR %0,27,28,%0\n" /* x0000000 */ | ||
202 | " addi 4,%1,%1\n" | ||
203 | " extru,<> %0,1,2,%%r0\n" | ||
204 | " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */ | ||
205 | " addi 2,%1,%1\n" | ||
206 | " extru,= %0,0,1,%%r0\n" | ||
207 | " addi 1,%1,%1\n" /* if y & 8, add 1 */ | ||
208 | : "+r" (x), "=r" (ret) ); | ||
209 | |||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | #include <asm-generic/bitops/__fls.h> | ||
214 | #include <asm-generic/bitops/fls64.h> | ||
215 | #include <asm-generic/bitops/hweight.h> | ||
216 | #include <asm-generic/bitops/lock.h> | ||
217 | #include <asm-generic/bitops/sched.h> | ||
218 | |||
219 | #endif /* __KERNEL__ */ | ||
220 | |||
221 | #include <asm-generic/bitops/find.h> | ||
222 | |||
223 | #ifdef __KERNEL__ | ||
224 | |||
225 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
226 | |||
227 | /* '3' is bits per byte */ | ||
228 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) | ||
229 | |||
230 | #define ext2_set_bit_atomic(l,nr,addr) \ | ||
231 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
232 | #define ext2_clear_bit_atomic(l,nr,addr) \ | ||
233 | test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
234 | |||
235 | #endif /* __KERNEL__ */ | ||
236 | |||
237 | #include <asm-generic/bitops/minix-le.h> | ||
238 | |||
239 | #endif /* _PARISC_BITOPS_H */ | ||
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h new file mode 100644 index 000000000000..8cfc553fc837 --- /dev/null +++ b/arch/parisc/include/asm/bug.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _PARISC_BUG_H | ||
2 | #define _PARISC_BUG_H | ||
3 | |||
4 | /* | ||
5 | * Tell the user there is some problem. | ||
6 | * The offending file and line are encoded in the __bug_table section. | ||
7 | */ | ||
8 | |||
9 | #ifdef CONFIG_BUG | ||
10 | #define HAVE_ARCH_BUG | ||
11 | #define HAVE_ARCH_WARN_ON | ||
12 | |||
13 | /* the break instruction is used as BUG() marker. */ | ||
14 | #define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff" | ||
15 | #define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */ | ||
16 | |||
17 | #if defined(CONFIG_64BIT) | ||
18 | #define ASM_WORD_INSN ".dword\t" | ||
19 | #else | ||
20 | #define ASM_WORD_INSN ".word\t" | ||
21 | #endif | ||
22 | |||
23 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
24 | #define BUG() \ | ||
25 | do { \ | ||
26 | asm volatile("\n" \ | ||
27 | "1:\t" PARISC_BUG_BREAK_ASM "\n" \ | ||
28 | "\t.pushsection __bug_table,\"a\"\n" \ | ||
29 | "2:\t" ASM_WORD_INSN "1b, %c0\n" \ | ||
30 | "\t.short %c1, %c2\n" \ | ||
31 | "\t.org 2b+%c3\n" \ | ||
32 | "\t.popsection" \ | ||
33 | : : "i" (__FILE__), "i" (__LINE__), \ | ||
34 | "i" (0), "i" (sizeof(struct bug_entry)) ); \ | ||
35 | for(;;) ; \ | ||
36 | } while(0) | ||
37 | |||
38 | #else | ||
39 | #define BUG() \ | ||
40 | do { \ | ||
41 | asm volatile(PARISC_BUG_BREAK_ASM : : ); \ | ||
42 | for(;;) ; \ | ||
43 | } while(0) | ||
44 | #endif | ||
45 | |||
46 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
47 | #define __WARN() \ | ||
48 | do { \ | ||
49 | asm volatile("\n" \ | ||
50 | "1:\t" PARISC_BUG_BREAK_ASM "\n" \ | ||
51 | "\t.pushsection __bug_table,\"a\"\n" \ | ||
52 | "2:\t" ASM_WORD_INSN "1b, %c0\n" \ | ||
53 | "\t.short %c1, %c2\n" \ | ||
54 | "\t.org 2b+%c3\n" \ | ||
55 | "\t.popsection" \ | ||
56 | : : "i" (__FILE__), "i" (__LINE__), \ | ||
57 | "i" (BUGFLAG_WARNING), \ | ||
58 | "i" (sizeof(struct bug_entry)) ); \ | ||
59 | } while(0) | ||
60 | #else | ||
61 | #define __WARN() \ | ||
62 | do { \ | ||
63 | asm volatile("\n" \ | ||
64 | "1:\t" PARISC_BUG_BREAK_ASM "\n" \ | ||
65 | "\t.pushsection __bug_table,\"a\"\n" \ | ||
66 | "2:\t" ASM_WORD_INSN "1b\n" \ | ||
67 | "\t.short %c0\n" \ | ||
68 | "\t.org 2b+%c1\n" \ | ||
69 | "\t.popsection" \ | ||
70 | : : "i" (BUGFLAG_WARNING), \ | ||
71 | "i" (sizeof(struct bug_entry)) ); \ | ||
72 | } while(0) | ||
73 | #endif | ||
74 | |||
75 | |||
76 | #define WARN_ON(x) ({ \ | ||
77 | int __ret_warn_on = !!(x); \ | ||
78 | if (__builtin_constant_p(__ret_warn_on)) { \ | ||
79 | if (__ret_warn_on) \ | ||
80 | __WARN(); \ | ||
81 | } else { \ | ||
82 | if (unlikely(__ret_warn_on)) \ | ||
83 | __WARN(); \ | ||
84 | } \ | ||
85 | unlikely(__ret_warn_on); \ | ||
86 | }) | ||
87 | |||
88 | #endif | ||
89 | |||
90 | #include <asm-generic/bug.h> | ||
91 | #endif | ||
92 | |||
diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h new file mode 100644 index 000000000000..9e6284342a5f --- /dev/null +++ b/arch/parisc/include/asm/bugs.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/bugs.h | ||
3 | * | ||
4 | * Copyright (C) 1999 Mike Shaver | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This is included by init/main.c to check for architecture-dependent bugs. | ||
9 | * | ||
10 | * Needs: | ||
11 | * void check_bugs(void); | ||
12 | */ | ||
13 | |||
14 | #include <asm/processor.h> | ||
15 | |||
16 | static inline void check_bugs(void) | ||
17 | { | ||
18 | // identify_cpu(&boot_cpu_data); | ||
19 | } | ||
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h new file mode 100644 index 000000000000..db148313de5d --- /dev/null +++ b/arch/parisc/include/asm/byteorder.h | |||
@@ -0,0 +1,82 @@ | |||
1 | #ifndef _PARISC_BYTEORDER_H | ||
2 | #define _PARISC_BYTEORDER_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
7 | #ifdef __GNUC__ | ||
8 | |||
9 | static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) | ||
10 | { | ||
11 | __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ | ||
12 | "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ | ||
13 | : "=r" (x) | ||
14 | : "0" (x)); | ||
15 | return x; | ||
16 | } | ||
17 | |||
18 | static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x) | ||
19 | { | ||
20 | __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ | ||
21 | "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ | ||
22 | "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */ | ||
23 | : "=r" (x) | ||
24 | : "0" (x)); | ||
25 | return x; | ||
26 | } | ||
27 | |||
28 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | ||
29 | { | ||
30 | unsigned int temp; | ||
31 | __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ | ||
32 | "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */ | ||
33 | "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */ | ||
34 | : "=r" (x), "=&r" (temp) | ||
35 | : "0" (x)); | ||
36 | return x; | ||
37 | } | ||
38 | |||
39 | |||
40 | #if BITS_PER_LONG > 32 | ||
41 | /* | ||
42 | ** From "PA-RISC 2.0 Architecture", HP Professional Books. | ||
43 | ** See Appendix I page 8 , "Endian Byte Swapping". | ||
44 | ** | ||
45 | ** Pretty cool algorithm: (* == zero'd bits) | ||
46 | ** PERMH 01234567 -> 67452301 into %0 | ||
47 | ** HSHL 67452301 -> 7*5*3*1* into %1 | ||
48 | ** HSHR 67452301 -> *6*4*2*0 into %0 | ||
49 | ** OR %0 | %1 -> 76543210 into %0 (all done!) | ||
50 | */ | ||
51 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) { | ||
52 | __u64 temp; | ||
53 | __asm__("permh,3210 %0, %0\n\t" | ||
54 | "hshl %0, 8, %1\n\t" | ||
55 | "hshr,u %0, 8, %0\n\t" | ||
56 | "or %1, %0, %0" | ||
57 | : "=r" (x), "=&r" (temp) | ||
58 | : "0" (x)); | ||
59 | return x; | ||
60 | } | ||
61 | #define __arch__swab64(x) ___arch__swab64(x) | ||
62 | #define __BYTEORDER_HAS_U64__ | ||
63 | #elif !defined(__STRICT_ANSI__) | ||
64 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) | ||
65 | { | ||
66 | __u32 t1 = ___arch__swab32((__u32) x); | ||
67 | __u32 t2 = ___arch__swab32((__u32) (x >> 32)); | ||
68 | return (((__u64) t1 << 32) | t2); | ||
69 | } | ||
70 | #define __arch__swab64(x) ___arch__swab64(x) | ||
71 | #define __BYTEORDER_HAS_U64__ | ||
72 | #endif | ||
73 | |||
74 | #define __arch__swab16(x) ___arch__swab16(x) | ||
75 | #define __arch__swab24(x) ___arch__swab24(x) | ||
76 | #define __arch__swab32(x) ___arch__swab32(x) | ||
77 | |||
78 | #endif /* __GNUC__ */ | ||
79 | |||
80 | #include <linux/byteorder/big_endian.h> | ||
81 | |||
82 | #endif /* _PARISC_BYTEORDER_H */ | ||
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h new file mode 100644 index 000000000000..32c2cca74345 --- /dev/null +++ b/arch/parisc/include/asm/cache.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/cache.h | ||
3 | */ | ||
4 | |||
5 | #ifndef __ARCH_PARISC_CACHE_H | ||
6 | #define __ARCH_PARISC_CACHE_H | ||
7 | |||
8 | |||
9 | /* | ||
10 | * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have | ||
11 | * 32-byte cachelines. The default configuration is not for SMP anyway, | ||
12 | * so if you're building for SMP, you should select the appropriate | ||
13 | * processor type. There is a potential livelock danger when running | ||
14 | * a machine with this value set too small, but it's more probable you'll | ||
15 | * just ruin performance. | ||
16 | */ | ||
17 | #ifdef CONFIG_PA20 | ||
18 | #define L1_CACHE_BYTES 64 | ||
19 | #define L1_CACHE_SHIFT 6 | ||
20 | #else | ||
21 | #define L1_CACHE_BYTES 32 | ||
22 | #define L1_CACHE_SHIFT 5 | ||
23 | #endif | ||
24 | |||
25 | #ifndef __ASSEMBLY__ | ||
26 | |||
27 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) | ||
28 | |||
29 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | ||
30 | |||
31 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
32 | |||
33 | void parisc_cache_init(void); /* initializes cache-flushing */ | ||
34 | void disable_sr_hashing_asm(int); /* low level support for above */ | ||
35 | void disable_sr_hashing(void); /* turns off space register hashing */ | ||
36 | void free_sid(unsigned long); | ||
37 | unsigned long alloc_sid(void); | ||
38 | |||
39 | struct seq_file; | ||
40 | extern void show_cache_info(struct seq_file *m); | ||
41 | |||
42 | extern int split_tlb; | ||
43 | extern int dcache_stride; | ||
44 | extern int icache_stride; | ||
45 | extern struct pdc_cache_info cache_info; | ||
46 | void parisc_setup_cache_timing(void); | ||
47 | |||
48 | #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr)); | ||
49 | #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr)); | ||
50 | #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr)); | ||
51 | |||
52 | #endif /* ! __ASSEMBLY__ */ | ||
53 | |||
54 | /* Classes of processor wrt: disabling space register hashing */ | ||
55 | |||
56 | #define SRHASH_PCXST 0 /* pcxs, pcxt, pcxt_ */ | ||
57 | #define SRHASH_PCXL 1 /* pcxl */ | ||
58 | #define SRHASH_PA20 2 /* pcxu, pcxu_, pcxw, pcxw_ */ | ||
59 | |||
60 | #endif | ||
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h new file mode 100644 index 000000000000..b7ca6dc7fddc --- /dev/null +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -0,0 +1,121 @@ | |||
1 | #ifndef _PARISC_CACHEFLUSH_H | ||
2 | #define _PARISC_CACHEFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | |||
6 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | ||
7 | * Unfortunately, that doesn't apply to PA-RISC. */ | ||
8 | |||
9 | /* Internal implementation */ | ||
10 | void flush_data_cache_local(void *); /* flushes local data-cache only */ | ||
11 | void flush_instruction_cache_local(void *); /* flushes local code-cache only */ | ||
12 | #ifdef CONFIG_SMP | ||
13 | void flush_data_cache(void); /* flushes data-cache only (all processors) */ | ||
14 | void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ | ||
15 | #else | ||
16 | #define flush_data_cache() flush_data_cache_local(NULL) | ||
17 | #define flush_instruction_cache() flush_instruction_cache_local(NULL) | ||
18 | #endif | ||
19 | |||
20 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | ||
21 | |||
22 | void flush_user_icache_range_asm(unsigned long, unsigned long); | ||
23 | void flush_kernel_icache_range_asm(unsigned long, unsigned long); | ||
24 | void flush_user_dcache_range_asm(unsigned long, unsigned long); | ||
25 | void flush_kernel_dcache_range_asm(unsigned long, unsigned long); | ||
26 | void flush_kernel_dcache_page_asm(void *); | ||
27 | void flush_kernel_icache_page(void *); | ||
28 | void flush_user_dcache_page(unsigned long); | ||
29 | void flush_user_icache_page(unsigned long); | ||
30 | void flush_user_dcache_range(unsigned long, unsigned long); | ||
31 | void flush_user_icache_range(unsigned long, unsigned long); | ||
32 | |||
33 | /* Cache flush operations */ | ||
34 | |||
35 | void flush_cache_all_local(void); | ||
36 | void flush_cache_all(void); | ||
37 | void flush_cache_mm(struct mm_struct *mm); | ||
38 | |||
39 | #define flush_kernel_dcache_range(start,size) \ | ||
40 | flush_kernel_dcache_range_asm((start), (start)+(size)); | ||
41 | |||
42 | #define flush_cache_vmap(start, end) flush_cache_all() | ||
43 | #define flush_cache_vunmap(start, end) flush_cache_all() | ||
44 | |||
45 | extern void flush_dcache_page(struct page *page); | ||
46 | |||
47 | #define flush_dcache_mmap_lock(mapping) \ | ||
48 | spin_lock_irq(&(mapping)->tree_lock) | ||
49 | #define flush_dcache_mmap_unlock(mapping) \ | ||
50 | spin_unlock_irq(&(mapping)->tree_lock) | ||
51 | |||
52 | #define flush_icache_page(vma,page) do { \ | ||
53 | flush_kernel_dcache_page(page); \ | ||
54 | flush_kernel_icache_page(page_address(page)); \ | ||
55 | } while (0) | ||
56 | |||
57 | #define flush_icache_range(s,e) do { \ | ||
58 | flush_kernel_dcache_range_asm(s,e); \ | ||
59 | flush_kernel_icache_range_asm(s,e); \ | ||
60 | } while (0) | ||
61 | |||
62 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
63 | do { \ | ||
64 | flush_cache_page(vma, vaddr, page_to_pfn(page)); \ | ||
65 | memcpy(dst, src, len); \ | ||
66 | flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \ | ||
67 | } while (0) | ||
68 | |||
69 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
70 | do { \ | ||
71 | flush_cache_page(vma, vaddr, page_to_pfn(page)); \ | ||
72 | memcpy(dst, src, len); \ | ||
73 | } while (0) | ||
74 | |||
75 | void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); | ||
76 | void flush_cache_range(struct vm_area_struct *vma, | ||
77 | unsigned long start, unsigned long end); | ||
78 | |||
79 | #define ARCH_HAS_FLUSH_ANON_PAGE | ||
80 | static inline void | ||
81 | flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | ||
82 | { | ||
83 | if (PageAnon(page)) | ||
84 | flush_user_dcache_page(vmaddr); | ||
85 | } | ||
86 | |||
87 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | ||
88 | void flush_kernel_dcache_page_addr(void *addr); | ||
89 | static inline void flush_kernel_dcache_page(struct page *page) | ||
90 | { | ||
91 | flush_kernel_dcache_page_addr(page_address(page)); | ||
92 | } | ||
93 | |||
94 | #ifdef CONFIG_DEBUG_RODATA | ||
95 | void mark_rodata_ro(void); | ||
96 | #endif | ||
97 | |||
98 | #ifdef CONFIG_PA8X00 | ||
99 | /* Only pa8800, pa8900 needs this */ | ||
100 | #define ARCH_HAS_KMAP | ||
101 | |||
102 | void kunmap_parisc(void *addr); | ||
103 | |||
104 | static inline void *kmap(struct page *page) | ||
105 | { | ||
106 | might_sleep(); | ||
107 | return page_address(page); | ||
108 | } | ||
109 | |||
110 | #define kunmap(page) kunmap_parisc(page_address(page)) | ||
111 | |||
112 | #define kmap_atomic(page, idx) page_address(page) | ||
113 | |||
114 | #define kunmap_atomic(addr, idx) kunmap_parisc(addr) | ||
115 | |||
116 | #define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) | ||
117 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) | ||
118 | #endif | ||
119 | |||
120 | #endif /* _PARISC_CACHEFLUSH_H */ | ||
121 | |||
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h new file mode 100644 index 000000000000..e9639ccc3fce --- /dev/null +++ b/arch/parisc/include/asm/checksum.h | |||
@@ -0,0 +1,210 @@ | |||
1 | #ifndef _PARISC_CHECKSUM_H | ||
2 | #define _PARISC_CHECKSUM_H | ||
3 | |||
4 | #include <linux/in6.h> | ||
5 | |||
6 | /* | ||
7 | * computes the checksum of a memory block at buff, length len, | ||
8 | * and adds in "sum" (32-bit) | ||
9 | * | ||
10 | * returns a 32-bit number suitable for feeding into itself | ||
11 | * or csum_tcpudp_magic | ||
12 | * | ||
13 | * this function must be called with even lengths, except | ||
14 | * for the last fragment, which may be odd | ||
15 | * | ||
16 | * it's best to have buff aligned on a 32-bit boundary | ||
17 | */ | ||
18 | extern __wsum csum_partial(const void *, int, __wsum); | ||
19 | |||
20 | /* | ||
21 | * The same as csum_partial, but copies from src while it checksums. | ||
22 | * | ||
23 | * Here even more important to align src and dst on a 32-bit (or even | ||
24 | * better 64-bit) boundary | ||
25 | */ | ||
26 | extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); | ||
27 | |||
28 | /* | ||
29 | * this is a new version of the above that records errors it finds in *errp, | ||
30 | * but continues and zeros the rest of the buffer. | ||
31 | */ | ||
32 | extern __wsum csum_partial_copy_from_user(const void __user *src, | ||
33 | void *dst, int len, __wsum sum, int *errp); | ||
34 | |||
35 | /* | ||
36 | * Optimized for IP headers, which always checksum on 4 octet boundaries. | ||
37 | * | ||
38 | * Written by Randolph Chung <tausq@debian.org>, and then mucked with by | ||
39 | * LaMont Jones <lamont@debian.org> | ||
40 | */ | ||
41 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
42 | { | ||
43 | unsigned int sum; | ||
44 | |||
45 | __asm__ __volatile__ ( | ||
46 | " ldws,ma 4(%1), %0\n" | ||
47 | " addib,<= -4, %2, 2f\n" | ||
48 | "\n" | ||
49 | " ldws 4(%1), %%r20\n" | ||
50 | " ldws 8(%1), %%r21\n" | ||
51 | " add %0, %%r20, %0\n" | ||
52 | " ldws,ma 12(%1), %%r19\n" | ||
53 | " addc %0, %%r21, %0\n" | ||
54 | " addc %0, %%r19, %0\n" | ||
55 | "1: ldws,ma 4(%1), %%r19\n" | ||
56 | " addib,< 0, %2, 1b\n" | ||
57 | " addc %0, %%r19, %0\n" | ||
58 | "\n" | ||
59 | " extru %0, 31, 16, %%r20\n" | ||
60 | " extru %0, 15, 16, %%r21\n" | ||
61 | " addc %%r20, %%r21, %0\n" | ||
62 | " extru %0, 15, 16, %%r21\n" | ||
63 | " add %0, %%r21, %0\n" | ||
64 | " subi -1, %0, %0\n" | ||
65 | "2:\n" | ||
66 | : "=r" (sum), "=r" (iph), "=r" (ihl) | ||
67 | : "1" (iph), "2" (ihl) | ||
68 | : "r19", "r20", "r21", "memory"); | ||
69 | |||
70 | return (__force __sum16)sum; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Fold a partial checksum | ||
75 | */ | ||
76 | static inline __sum16 csum_fold(__wsum csum) | ||
77 | { | ||
78 | u32 sum = (__force u32)csum; | ||
79 | /* add the swapped two 16-bit halves of sum, | ||
80 | a possible carry from adding the two 16-bit halves, | ||
81 | will carry from the lower half into the upper half, | ||
82 | giving us the correct sum in the upper half. */ | ||
83 | sum += (sum << 16) + (sum >> 16); | ||
84 | return (__force __sum16)(~sum >> 16); | ||
85 | } | ||
86 | |||
87 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
88 | unsigned short len, | ||
89 | unsigned short proto, | ||
90 | __wsum sum) | ||
91 | { | ||
92 | __asm__( | ||
93 | " add %1, %0, %0\n" | ||
94 | " addc %2, %0, %0\n" | ||
95 | " addc %3, %0, %0\n" | ||
96 | " addc %%r0, %0, %0\n" | ||
97 | : "=r" (sum) | ||
98 | : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum)); | ||
99 | return sum; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * computes the checksum of the TCP/UDP pseudo-header | ||
104 | * returns a 16-bit checksum, already complemented | ||
105 | */ | ||
106 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | ||
107 | unsigned short len, | ||
108 | unsigned short proto, | ||
109 | __wsum sum) | ||
110 | { | ||
111 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
116 | * in icmp.c | ||
117 | */ | ||
118 | static inline __sum16 ip_compute_csum(const void *buf, int len) | ||
119 | { | ||
120 | return csum_fold (csum_partial(buf, len, 0)); | ||
121 | } | ||
122 | |||
123 | |||
124 | #define _HAVE_ARCH_IPV6_CSUM | ||
125 | static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | ||
126 | const struct in6_addr *daddr, | ||
127 | __u32 len, unsigned short proto, | ||
128 | __wsum sum) | ||
129 | { | ||
130 | __asm__ __volatile__ ( | ||
131 | |||
132 | #if BITS_PER_LONG > 32 | ||
133 | |||
134 | /* | ||
135 | ** We can execute two loads and two adds per cycle on PA 8000. | ||
136 | ** But add insn's get serialized waiting for the carry bit. | ||
137 | ** Try to keep 4 registers with "live" values ahead of the ALU. | ||
138 | */ | ||
139 | |||
140 | " ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */ | ||
141 | " ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */ | ||
142 | " add %8, %3, %3\n"/* add 16-bit proto + len */ | ||
143 | " add %%r19, %0, %0\n" | ||
144 | " ldd,ma 8(%1), %%r21\n" /* 2cd saddr */ | ||
145 | " ldd,ma 8(%2), %%r22\n" /* 2cd daddr */ | ||
146 | " add,dc %%r20, %0, %0\n" | ||
147 | " add,dc %%r21, %0, %0\n" | ||
148 | " add,dc %%r22, %0, %0\n" | ||
149 | " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ | ||
150 | " extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */ | ||
151 | " depdi 0, 31, 32, %0\n" /* clear upper half */ | ||
152 | " add %%r19, %0, %0\n" /* fold into 32-bits */ | ||
153 | " addc 0, %0, %0\n" /* add carry */ | ||
154 | |||
155 | #else | ||
156 | |||
157 | /* | ||
158 | ** For PA 1.x, the insn order doesn't matter as much. | ||
159 | ** Insn stream is serialized on the carry bit here too. | ||
160 | ** result from the previous operation (eg r0 + x) | ||
161 | */ | ||
162 | |||
163 | " ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */ | ||
164 | " ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */ | ||
165 | " add %8, %3, %3\n" /* add 16-bit proto + len */ | ||
166 | " add %%r19, %0, %0\n" | ||
167 | " ldw,ma 4(%1), %%r21\n" /* 2cd saddr */ | ||
168 | " addc %%r20, %0, %0\n" | ||
169 | " ldw,ma 4(%2), %%r22\n" /* 2cd daddr */ | ||
170 | " addc %%r21, %0, %0\n" | ||
171 | " ldw,ma 4(%1), %%r19\n" /* 3rd saddr */ | ||
172 | " addc %%r22, %0, %0\n" | ||
173 | " ldw,ma 4(%2), %%r20\n" /* 3rd daddr */ | ||
174 | " addc %%r19, %0, %0\n" | ||
175 | " ldw,ma 4(%1), %%r21\n" /* 4th saddr */ | ||
176 | " addc %%r20, %0, %0\n" | ||
177 | " ldw,ma 4(%2), %%r22\n" /* 4th daddr */ | ||
178 | " addc %%r21, %0, %0\n" | ||
179 | " addc %%r22, %0, %0\n" | ||
180 | " addc %3, %0, %0\n" /* fold in proto+len, catch carry */ | ||
181 | |||
182 | #endif | ||
183 | : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) | ||
184 | : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) | ||
185 | : "r19", "r20", "r21", "r22"); | ||
186 | return csum_fold(sum); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Copy and checksum to user | ||
191 | */ | ||
192 | #define HAVE_CSUM_COPY_USER | ||
193 | static __inline__ __wsum csum_and_copy_to_user(const void *src, | ||
194 | void __user *dst, | ||
195 | int len, __wsum sum, | ||
196 | int *err_ptr) | ||
197 | { | ||
198 | /* code stolen from include/asm-mips64 */ | ||
199 | sum = csum_partial(src, len, sum); | ||
200 | |||
201 | if (copy_to_user(dst, src, len)) { | ||
202 | *err_ptr = -EFAULT; | ||
203 | return (__force __wsum)-1; | ||
204 | } | ||
205 | |||
206 | return sum; | ||
207 | } | ||
208 | |||
209 | #endif | ||
210 | |||
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h new file mode 100644 index 000000000000..7f32611a7a5e --- /dev/null +++ b/arch/parisc/include/asm/compat.h | |||
@@ -0,0 +1,165 @@ | |||
1 | #ifndef _ASM_PARISC_COMPAT_H | ||
2 | #define _ASM_PARISC_COMPAT_H | ||
3 | /* | ||
4 | * Architecture specific compatibility types | ||
5 | */ | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/thread_info.h> | ||
9 | |||
10 | #define COMPAT_USER_HZ 100 | ||
11 | |||
12 | typedef u32 compat_size_t; | ||
13 | typedef s32 compat_ssize_t; | ||
14 | typedef s32 compat_time_t; | ||
15 | typedef s32 compat_clock_t; | ||
16 | typedef s32 compat_pid_t; | ||
17 | typedef u32 __compat_uid_t; | ||
18 | typedef u32 __compat_gid_t; | ||
19 | typedef u32 __compat_uid32_t; | ||
20 | typedef u32 __compat_gid32_t; | ||
21 | typedef u16 compat_mode_t; | ||
22 | typedef u32 compat_ino_t; | ||
23 | typedef u32 compat_dev_t; | ||
24 | typedef s32 compat_off_t; | ||
25 | typedef s64 compat_loff_t; | ||
26 | typedef u16 compat_nlink_t; | ||
27 | typedef u16 compat_ipc_pid_t; | ||
28 | typedef s32 compat_daddr_t; | ||
29 | typedef u32 compat_caddr_t; | ||
30 | typedef s32 compat_timer_t; | ||
31 | |||
32 | typedef s32 compat_int_t; | ||
33 | typedef s32 compat_long_t; | ||
34 | typedef s64 compat_s64; | ||
35 | typedef u32 compat_uint_t; | ||
36 | typedef u32 compat_ulong_t; | ||
37 | typedef u64 compat_u64; | ||
38 | |||
39 | struct compat_timespec { | ||
40 | compat_time_t tv_sec; | ||
41 | s32 tv_nsec; | ||
42 | }; | ||
43 | |||
44 | struct compat_timeval { | ||
45 | compat_time_t tv_sec; | ||
46 | s32 tv_usec; | ||
47 | }; | ||
48 | |||
49 | struct compat_stat { | ||
50 | compat_dev_t st_dev; /* dev_t is 32 bits on parisc */ | ||
51 | compat_ino_t st_ino; /* 32 bits */ | ||
52 | compat_mode_t st_mode; /* 16 bits */ | ||
53 | compat_nlink_t st_nlink; /* 16 bits */ | ||
54 | u16 st_reserved1; /* old st_uid */ | ||
55 | u16 st_reserved2; /* old st_gid */ | ||
56 | compat_dev_t st_rdev; | ||
57 | compat_off_t st_size; | ||
58 | compat_time_t st_atime; | ||
59 | u32 st_atime_nsec; | ||
60 | compat_time_t st_mtime; | ||
61 | u32 st_mtime_nsec; | ||
62 | compat_time_t st_ctime; | ||
63 | u32 st_ctime_nsec; | ||
64 | s32 st_blksize; | ||
65 | s32 st_blocks; | ||
66 | u32 __unused1; /* ACL stuff */ | ||
67 | compat_dev_t __unused2; /* network */ | ||
68 | compat_ino_t __unused3; /* network */ | ||
69 | u32 __unused4; /* cnodes */ | ||
70 | u16 __unused5; /* netsite */ | ||
71 | short st_fstype; | ||
72 | compat_dev_t st_realdev; | ||
73 | u16 st_basemode; | ||
74 | u16 st_spareshort; | ||
75 | __compat_uid32_t st_uid; | ||
76 | __compat_gid32_t st_gid; | ||
77 | u32 st_spare4[3]; | ||
78 | }; | ||
79 | |||
80 | struct compat_flock { | ||
81 | short l_type; | ||
82 | short l_whence; | ||
83 | compat_off_t l_start; | ||
84 | compat_off_t l_len; | ||
85 | compat_pid_t l_pid; | ||
86 | }; | ||
87 | |||
88 | struct compat_flock64 { | ||
89 | short l_type; | ||
90 | short l_whence; | ||
91 | compat_loff_t l_start; | ||
92 | compat_loff_t l_len; | ||
93 | compat_pid_t l_pid; | ||
94 | }; | ||
95 | |||
96 | struct compat_statfs { | ||
97 | s32 f_type; | ||
98 | s32 f_bsize; | ||
99 | s32 f_blocks; | ||
100 | s32 f_bfree; | ||
101 | s32 f_bavail; | ||
102 | s32 f_files; | ||
103 | s32 f_ffree; | ||
104 | __kernel_fsid_t f_fsid; | ||
105 | s32 f_namelen; | ||
106 | s32 f_frsize; | ||
107 | s32 f_spare[5]; | ||
108 | }; | ||
109 | |||
110 | struct compat_sigcontext { | ||
111 | compat_int_t sc_flags; | ||
112 | compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */ | ||
113 | u64 sc_fr[32]; | ||
114 | compat_int_t sc_iasq[2]; | ||
115 | compat_int_t sc_iaoq[2]; | ||
116 | compat_int_t sc_sar; /* cr11 */ | ||
117 | }; | ||
118 | |||
119 | #define COMPAT_RLIM_INFINITY 0xffffffff | ||
120 | |||
121 | typedef u32 compat_old_sigset_t; /* at least 32 bits */ | ||
122 | |||
123 | #define _COMPAT_NSIG 64 | ||
124 | #define _COMPAT_NSIG_BPW 32 | ||
125 | |||
126 | typedef u32 compat_sigset_word; | ||
127 | |||
128 | #define COMPAT_OFF_T_MAX 0x7fffffff | ||
129 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL | ||
130 | |||
131 | /* | ||
132 | * A pointer passed in from user mode. This should not | ||
133 | * be used for syscall parameters, just declare them | ||
134 | * as pointers because the syscall entry code will have | ||
135 | * appropriately converted them already. | ||
136 | */ | ||
137 | typedef u32 compat_uptr_t; | ||
138 | |||
139 | static inline void __user *compat_ptr(compat_uptr_t uptr) | ||
140 | { | ||
141 | return (void __user *)(unsigned long)uptr; | ||
142 | } | ||
143 | |||
144 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
145 | { | ||
146 | return (u32)(unsigned long)uptr; | ||
147 | } | ||
148 | |||
149 | static __inline__ void __user *compat_alloc_user_space(long len) | ||
150 | { | ||
151 | struct pt_regs *regs = ¤t->thread.regs; | ||
152 | return (void __user *)regs->gr[30]; | ||
153 | } | ||
154 | |||
155 | static inline int __is_compat_task(struct task_struct *t) | ||
156 | { | ||
157 | return test_ti_thread_flag(task_thread_info(t), TIF_32BIT); | ||
158 | } | ||
159 | |||
160 | static inline int is_compat_task(void) | ||
161 | { | ||
162 | return __is_compat_task(current); | ||
163 | } | ||
164 | |||
165 | #endif /* _ASM_PARISC_COMPAT_H */ | ||
diff --git a/arch/parisc/include/asm/compat_rt_sigframe.h b/arch/parisc/include/asm/compat_rt_sigframe.h new file mode 100644 index 000000000000..81bec28bdc48 --- /dev/null +++ b/arch/parisc/include/asm/compat_rt_sigframe.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #include<linux/compat.h> | ||
2 | #include<linux/compat_siginfo.h> | ||
3 | #include<asm/compat_ucontext.h> | ||
4 | |||
5 | #ifndef _ASM_PARISC_COMPAT_RT_SIGFRAME_H | ||
6 | #define _ASM_PARISC_COMPAT_RT_SIGFRAME_H | ||
7 | |||
8 | /* In a deft move of uber-hackery, we decide to carry the top half of all | ||
9 | * 64-bit registers in a non-portable, non-ABI, hidden structure. | ||
10 | * Userspace can read the hidden structure if it *wants* but is never | ||
11 | * guaranteed to be in the same place. Infact the uc_sigmask from the | ||
12 | * ucontext_t structure may push the hidden register file downards | ||
13 | */ | ||
14 | struct compat_regfile { | ||
15 | /* Upper half of all the 64-bit registers that were truncated | ||
16 | on a copy to a 32-bit userspace */ | ||
17 | compat_int_t rf_gr[32]; | ||
18 | compat_int_t rf_iasq[2]; | ||
19 | compat_int_t rf_iaoq[2]; | ||
20 | compat_int_t rf_sar; | ||
21 | }; | ||
22 | |||
23 | #define COMPAT_SIGRETURN_TRAMP 4 | ||
24 | #define COMPAT_SIGRESTARTBLOCK_TRAMP 5 | ||
25 | #define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + COMPAT_SIGRESTARTBLOCK_TRAMP) | ||
26 | |||
27 | struct compat_rt_sigframe { | ||
28 | /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c | ||
29 | Secondary to that it must protect the ERESTART_RESTARTBLOCK | ||
30 | trampoline we left on the stack (we were bad and didn't | ||
31 | change sp so we could run really fast.) */ | ||
32 | compat_uint_t tramp[COMPAT_TRAMP_SIZE]; | ||
33 | compat_siginfo_t info; | ||
34 | struct compat_ucontext uc; | ||
35 | /* Hidden location of truncated registers, *must* be last. */ | ||
36 | struct compat_regfile regs; | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * The 32-bit ABI wants at least 48 bytes for a function call frame: | ||
41 | * 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of | ||
42 | * which Linux/parisc uses is sp-20 for the saved return pointer...) | ||
43 | * Then, the stack pointer must be rounded to a cache line (64 bytes). | ||
44 | */ | ||
45 | #define SIGFRAME32 64 | ||
46 | #define FUNCTIONCALLFRAME32 48 | ||
47 | #define PARISC_RT_SIGFRAME_SIZE32 \ | ||
48 | (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32) | ||
49 | |||
50 | #endif | ||
diff --git a/arch/parisc/include/asm/compat_signal.h b/arch/parisc/include/asm/compat_signal.h new file mode 100644 index 000000000000..6ad02c360b21 --- /dev/null +++ b/arch/parisc/include/asm/compat_signal.h | |||
@@ -0,0 +1,2 @@ | |||
1 | /* Use generic */ | ||
2 | #include <asm-generic/compat_signal.h> | ||
diff --git a/arch/parisc/include/asm/compat_ucontext.h b/arch/parisc/include/asm/compat_ucontext.h new file mode 100644 index 000000000000..2f7292afde3c --- /dev/null +++ b/arch/parisc/include/asm/compat_ucontext.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef _ASM_PARISC_COMPAT_UCONTEXT_H | ||
2 | #define _ASM_PARISC_COMPAT_UCONTEXT_H | ||
3 | |||
4 | #include <linux/compat.h> | ||
5 | |||
6 | /* 32-bit ucontext as seen from an 64-bit kernel */ | ||
7 | struct compat_ucontext { | ||
8 | compat_uint_t uc_flags; | ||
9 | compat_uptr_t uc_link; | ||
10 | compat_stack_t uc_stack; /* struct compat_sigaltstack (12 bytes)*/ | ||
11 | /* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */ | ||
12 | compat_uint_t pad[1]; | ||
13 | struct compat_sigcontext uc_mcontext; | ||
14 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
15 | }; | ||
16 | |||
17 | #endif /* !_ASM_PARISC_COMPAT_UCONTEXT_H */ | ||
diff --git a/arch/parisc/include/asm/cputime.h b/arch/parisc/include/asm/cputime.h new file mode 100644 index 000000000000..dcdf2fbd7e72 --- /dev/null +++ b/arch/parisc/include/asm/cputime.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __PARISC_CPUTIME_H | ||
2 | #define __PARISC_CPUTIME_H | ||
3 | |||
4 | #include <asm-generic/cputime.h> | ||
5 | |||
6 | #endif /* __PARISC_CPUTIME_H */ | ||
diff --git a/arch/parisc/include/asm/current.h b/arch/parisc/include/asm/current.h new file mode 100644 index 000000000000..0fb9338e3bf2 --- /dev/null +++ b/arch/parisc/include/asm/current.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _PARISC_CURRENT_H | ||
2 | #define _PARISC_CURRENT_H | ||
3 | |||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | struct task_struct; | ||
7 | |||
8 | static inline struct task_struct * get_current(void) | ||
9 | { | ||
10 | return current_thread_info()->task; | ||
11 | } | ||
12 | |||
13 | #define current get_current() | ||
14 | |||
15 | #endif /* !(_PARISC_CURRENT_H) */ | ||
diff --git a/arch/parisc/include/asm/delay.h b/arch/parisc/include/asm/delay.h new file mode 100644 index 000000000000..7a75e984674b --- /dev/null +++ b/arch/parisc/include/asm/delay.h | |||
@@ -0,0 +1,43 @@ | |||
1 | #ifndef _PARISC_DELAY_H | ||
2 | #define _PARISC_DELAY_H | ||
3 | |||
4 | #include <asm/system.h> /* for mfctl() */ | ||
5 | #include <asm/processor.h> /* for boot_cpu_data */ | ||
6 | |||
7 | |||
8 | /* | ||
9 | * Copyright (C) 1993 Linus Torvalds | ||
10 | * | ||
11 | * Delay routines | ||
12 | */ | ||
13 | |||
14 | static __inline__ void __delay(unsigned long loops) { | ||
15 | asm volatile( | ||
16 | " .balignl 64,0x34000034\n" | ||
17 | " addib,UV -1,%0,.\n" | ||
18 | " nop\n" | ||
19 | : "=r" (loops) : "0" (loops)); | ||
20 | } | ||
21 | |||
22 | static __inline__ void __cr16_delay(unsigned long clocks) { | ||
23 | unsigned long start; | ||
24 | |||
25 | /* | ||
26 | * Note: Due to unsigned math, cr16 rollovers shouldn't be | ||
27 | * a problem here. However, on 32 bit, we need to make sure | ||
28 | * we don't pass in too big a value. The current default | ||
29 | * value of MAX_UDELAY_MS should help prevent this. | ||
30 | */ | ||
31 | |||
32 | start = mfctl(16); | ||
33 | while ((mfctl(16) - start) < clocks) | ||
34 | ; | ||
35 | } | ||
36 | |||
37 | static __inline__ void __udelay(unsigned long usecs) { | ||
38 | __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL)); | ||
39 | } | ||
40 | |||
41 | #define udelay(n) __udelay(n) | ||
42 | |||
43 | #endif /* defined(_PARISC_DELAY_H) */ | ||
diff --git a/arch/parisc/include/asm/device.h b/arch/parisc/include/asm/device.h new file mode 100644 index 000000000000..d8f9872b0e2d --- /dev/null +++ b/arch/parisc/include/asm/device.h | |||
@@ -0,0 +1,7 @@ | |||
1 | /* | ||
2 | * Arch specific extensions to struct device | ||
3 | * | ||
4 | * This file is released under the GPLv2 | ||
5 | */ | ||
6 | #include <asm-generic/device.h> | ||
7 | |||
diff --git a/arch/parisc/include/asm/div64.h b/arch/parisc/include/asm/div64.h new file mode 100644 index 000000000000..6cd978cefb28 --- /dev/null +++ b/arch/parisc/include/asm/div64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/div64.h> | |||
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h new file mode 100644 index 000000000000..53af696f23d2 --- /dev/null +++ b/arch/parisc/include/asm/dma-mapping.h | |||
@@ -0,0 +1,253 @@ | |||
1 | #ifndef _PARISC_DMA_MAPPING_H | ||
2 | #define _PARISC_DMA_MAPPING_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <asm/cacheflush.h> | ||
6 | #include <asm/scatterlist.h> | ||
7 | |||
8 | /* See Documentation/DMA-mapping.txt */ | ||
9 | struct hppa_dma_ops { | ||
10 | int (*dma_supported)(struct device *dev, u64 mask); | ||
11 | void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); | ||
12 | void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); | ||
13 | void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova); | ||
14 | dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction); | ||
15 | void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction); | ||
16 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); | ||
17 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction); | ||
18 | void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction); | ||
19 | void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction); | ||
20 | void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); | ||
21 | void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | ** We could live without the hppa_dma_ops indirection if we didn't want | ||
26 | ** to support 4 different coherent dma models with one binary (they will | ||
27 | ** someday be loadable modules): | ||
28 | ** I/O MMU consistent method dma_sync behavior | ||
29 | ** ============= ====================== ======================= | ||
30 | ** a) PA-7x00LC uncachable host memory flush/purge | ||
31 | ** b) U2/Uturn cachable host memory NOP | ||
32 | ** c) Ike/Astro cachable host memory NOP | ||
33 | ** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel | ||
34 | ** | ||
35 | ** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU. | ||
36 | ** | ||
37 | ** Systems (eg PCX-T workstations) that don't fall into the above | ||
38 | ** categories will need to modify the needed drivers to perform | ||
39 | ** flush/purge and allocate "regular" cacheable pages for everything. | ||
40 | */ | ||
41 | |||
42 | #ifdef CONFIG_PA11 | ||
43 | extern struct hppa_dma_ops pcxl_dma_ops; | ||
44 | extern struct hppa_dma_ops pcx_dma_ops; | ||
45 | #endif | ||
46 | |||
47 | extern struct hppa_dma_ops *hppa_dma_ops; | ||
48 | |||
49 | static inline void * | ||
50 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
51 | gfp_t flag) | ||
52 | { | ||
53 | return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag); | ||
54 | } | ||
55 | |||
56 | static inline void * | ||
57 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
58 | gfp_t flag) | ||
59 | { | ||
60 | return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag); | ||
61 | } | ||
62 | |||
63 | static inline void | ||
64 | dma_free_coherent(struct device *dev, size_t size, | ||
65 | void *vaddr, dma_addr_t dma_handle) | ||
66 | { | ||
67 | hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle); | ||
68 | } | ||
69 | |||
70 | static inline void | ||
71 | dma_free_noncoherent(struct device *dev, size_t size, | ||
72 | void *vaddr, dma_addr_t dma_handle) | ||
73 | { | ||
74 | hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle); | ||
75 | } | ||
76 | |||
77 | static inline dma_addr_t | ||
78 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
79 | enum dma_data_direction direction) | ||
80 | { | ||
81 | return hppa_dma_ops->map_single(dev, ptr, size, direction); | ||
82 | } | ||
83 | |||
84 | static inline void | ||
85 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
86 | enum dma_data_direction direction) | ||
87 | { | ||
88 | hppa_dma_ops->unmap_single(dev, dma_addr, size, direction); | ||
89 | } | ||
90 | |||
91 | static inline int | ||
92 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
93 | enum dma_data_direction direction) | ||
94 | { | ||
95 | return hppa_dma_ops->map_sg(dev, sg, nents, direction); | ||
96 | } | ||
97 | |||
98 | static inline void | ||
99 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
100 | enum dma_data_direction direction) | ||
101 | { | ||
102 | hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction); | ||
103 | } | ||
104 | |||
105 | static inline dma_addr_t | ||
106 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
107 | size_t size, enum dma_data_direction direction) | ||
108 | { | ||
109 | return dma_map_single(dev, (page_address(page) + (offset)), size, direction); | ||
110 | } | ||
111 | |||
112 | static inline void | ||
113 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
114 | enum dma_data_direction direction) | ||
115 | { | ||
116 | dma_unmap_single(dev, dma_address, size, direction); | ||
117 | } | ||
118 | |||
119 | |||
120 | static inline void | ||
121 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
122 | enum dma_data_direction direction) | ||
123 | { | ||
124 | if(hppa_dma_ops->dma_sync_single_for_cpu) | ||
125 | hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction); | ||
126 | } | ||
127 | |||
128 | static inline void | ||
129 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
130 | enum dma_data_direction direction) | ||
131 | { | ||
132 | if(hppa_dma_ops->dma_sync_single_for_device) | ||
133 | hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction); | ||
134 | } | ||
135 | |||
136 | static inline void | ||
137 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
138 | unsigned long offset, size_t size, | ||
139 | enum dma_data_direction direction) | ||
140 | { | ||
141 | if(hppa_dma_ops->dma_sync_single_for_cpu) | ||
142 | hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction); | ||
143 | } | ||
144 | |||
145 | static inline void | ||
146 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
147 | unsigned long offset, size_t size, | ||
148 | enum dma_data_direction direction) | ||
149 | { | ||
150 | if(hppa_dma_ops->dma_sync_single_for_device) | ||
151 | hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction); | ||
152 | } | ||
153 | |||
154 | static inline void | ||
155 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
156 | enum dma_data_direction direction) | ||
157 | { | ||
158 | if(hppa_dma_ops->dma_sync_sg_for_cpu) | ||
159 | hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction); | ||
160 | } | ||
161 | |||
162 | static inline void | ||
163 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
164 | enum dma_data_direction direction) | ||
165 | { | ||
166 | if(hppa_dma_ops->dma_sync_sg_for_device) | ||
167 | hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction); | ||
168 | } | ||
169 | |||
170 | static inline int | ||
171 | dma_supported(struct device *dev, u64 mask) | ||
172 | { | ||
173 | return hppa_dma_ops->dma_supported(dev, mask); | ||
174 | } | ||
175 | |||
176 | static inline int | ||
177 | dma_set_mask(struct device *dev, u64 mask) | ||
178 | { | ||
179 | if(!dev->dma_mask || !dma_supported(dev, mask)) | ||
180 | return -EIO; | ||
181 | |||
182 | *dev->dma_mask = mask; | ||
183 | |||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static inline int | ||
188 | dma_get_cache_alignment(void) | ||
189 | { | ||
190 | return dcache_stride; | ||
191 | } | ||
192 | |||
193 | static inline int | ||
194 | dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
195 | { | ||
196 | return (hppa_dma_ops->dma_sync_single_for_cpu == NULL); | ||
197 | } | ||
198 | |||
199 | static inline void | ||
200 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
201 | enum dma_data_direction direction) | ||
202 | { | ||
203 | if(hppa_dma_ops->dma_sync_single_for_cpu) | ||
204 | flush_kernel_dcache_range((unsigned long)vaddr, size); | ||
205 | } | ||
206 | |||
207 | static inline void * | ||
208 | parisc_walk_tree(struct device *dev) | ||
209 | { | ||
210 | struct device *otherdev; | ||
211 | if(likely(dev->platform_data != NULL)) | ||
212 | return dev->platform_data; | ||
213 | /* OK, just traverse the bus to find it */ | ||
214 | for(otherdev = dev->parent; otherdev; | ||
215 | otherdev = otherdev->parent) { | ||
216 | if(otherdev->platform_data) { | ||
217 | dev->platform_data = otherdev->platform_data; | ||
218 | break; | ||
219 | } | ||
220 | } | ||
221 | BUG_ON(!dev->platform_data); | ||
222 | return dev->platform_data; | ||
223 | } | ||
224 | |||
225 | #define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu); | ||
226 | |||
227 | |||
228 | #ifdef CONFIG_IOMMU_CCIO | ||
229 | struct parisc_device; | ||
230 | struct ioc; | ||
231 | void * ccio_get_iommu(const struct parisc_device *dev); | ||
232 | int ccio_request_resource(const struct parisc_device *dev, | ||
233 | struct resource *res); | ||
234 | int ccio_allocate_resource(const struct parisc_device *dev, | ||
235 | struct resource *res, unsigned long size, | ||
236 | unsigned long min, unsigned long max, unsigned long align); | ||
237 | #else /* !CONFIG_IOMMU_CCIO */ | ||
238 | #define ccio_get_iommu(dev) NULL | ||
239 | #define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res) | ||
240 | #define ccio_allocate_resource(dev, res, size, min, max, align) \ | ||
241 | allocate_resource(&iomem_resource, res, size, min, max, \ | ||
242 | align, NULL, NULL) | ||
243 | #endif /* !CONFIG_IOMMU_CCIO */ | ||
244 | |||
245 | #ifdef CONFIG_IOMMU_SBA | ||
246 | struct parisc_device; | ||
247 | void * sba_get_iommu(struct parisc_device *dev); | ||
248 | #endif | ||
249 | |||
250 | /* At the moment, we panic on error for IOMMU resource exaustion */ | ||
251 | #define dma_mapping_error(dev, x) 0 | ||
252 | |||
253 | #endif | ||
diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h new file mode 100644 index 000000000000..31ad0f05af3d --- /dev/null +++ b/arch/parisc/include/asm/dma.h | |||
@@ -0,0 +1,186 @@ | |||
1 | /* $Id: dma.h,v 1.2 1999/04/27 00:46:18 deller Exp $ | ||
2 | * linux/include/asm/dma.h: Defines for using and allocating dma channels. | ||
3 | * Written by Hennus Bergman, 1992. | ||
4 | * High DMA channel support & info by Hannu Savolainen | ||
5 | * and John Boyd, Nov. 1992. | ||
6 | * (c) Copyright 2000, Grant Grundler | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_DMA_H | ||
10 | #define _ASM_DMA_H | ||
11 | |||
12 | #include <asm/io.h> /* need byte IO */ | ||
13 | #include <asm/system.h> | ||
14 | |||
15 | #define dma_outb outb | ||
16 | #define dma_inb inb | ||
17 | |||
18 | /* | ||
19 | ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up | ||
20 | ** (or rather not merge) DMAs into manageable chunks. | ||
21 | ** On parisc, this is more of the software/tuning constraint | ||
22 | ** rather than the HW. I/O MMU allocation algorithms can be | ||
23 | ** faster with smaller sizes (to some degree). | ||
24 | */ | ||
25 | #define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE) | ||
26 | |||
27 | /* The maximum address that we can perform a DMA transfer to on this platform | ||
28 | ** New dynamic DMA interfaces should obsolete this.... | ||
29 | */ | ||
30 | #define MAX_DMA_ADDRESS (~0UL) | ||
31 | |||
32 | /* | ||
33 | ** We don't have DMA channels... well V-class does but the | ||
34 | ** Dynamic DMA Mapping interface will support them... right? :^) | ||
35 | ** Note: this is not relevant right now for PA-RISC, but we cannot | ||
36 | ** leave this as undefined because some things (e.g. sound) | ||
37 | ** won't compile :-( | ||
38 | */ | ||
39 | #define MAX_DMA_CHANNELS 8 | ||
40 | #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ | ||
41 | #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ | ||
42 | #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ | ||
43 | |||
44 | #define DMA_AUTOINIT 0x10 | ||
45 | |||
46 | /* 8237 DMA controllers */ | ||
47 | #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ | ||
48 | #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ | ||
49 | |||
50 | /* DMA controller registers */ | ||
51 | #define DMA1_CMD_REG 0x08 /* command register (w) */ | ||
52 | #define DMA1_STAT_REG 0x08 /* status register (r) */ | ||
53 | #define DMA1_REQ_REG 0x09 /* request register (w) */ | ||
54 | #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ | ||
55 | #define DMA1_MODE_REG 0x0B /* mode register (w) */ | ||
56 | #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ | ||
57 | #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ | ||
58 | #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ | ||
59 | #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ | ||
60 | #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ | ||
61 | #define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) | ||
62 | |||
63 | #define DMA2_CMD_REG 0xD0 /* command register (w) */ | ||
64 | #define DMA2_STAT_REG 0xD0 /* status register (r) */ | ||
65 | #define DMA2_REQ_REG 0xD2 /* request register (w) */ | ||
66 | #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ | ||
67 | #define DMA2_MODE_REG 0xD6 /* mode register (w) */ | ||
68 | #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ | ||
69 | #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ | ||
70 | #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ | ||
71 | #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ | ||
72 | #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ | ||
73 | #define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) | ||
74 | |||
75 | static __inline__ unsigned long claim_dma_lock(void) | ||
76 | { | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static __inline__ void release_dma_lock(unsigned long flags) | ||
81 | { | ||
82 | } | ||
83 | |||
84 | |||
85 | /* Get DMA residue count. After a DMA transfer, this | ||
86 | * should return zero. Reading this while a DMA transfer is | ||
87 | * still in progress will return unpredictable results. | ||
88 | * If called before the channel has been used, it may return 1. | ||
89 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
90 | * | ||
91 | * Assumes DMA flip-flop is clear. | ||
92 | */ | ||
93 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
94 | { | ||
95 | unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE | ||
96 | : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; | ||
97 | |||
98 | /* using short to get 16-bit wrap around */ | ||
99 | unsigned short count; | ||
100 | |||
101 | count = 1 + dma_inb(io_port); | ||
102 | count += dma_inb(io_port) << 8; | ||
103 | |||
104 | return (dmanr<=3)? count : (count<<1); | ||
105 | } | ||
106 | |||
107 | /* enable/disable a specific DMA channel */ | ||
108 | static __inline__ void enable_dma(unsigned int dmanr) | ||
109 | { | ||
110 | #ifdef CONFIG_SUPERIO | ||
111 | if (dmanr<=3) | ||
112 | dma_outb(dmanr, DMA1_MASK_REG); | ||
113 | else | ||
114 | dma_outb(dmanr & 3, DMA2_MASK_REG); | ||
115 | #endif | ||
116 | } | ||
117 | |||
118 | static __inline__ void disable_dma(unsigned int dmanr) | ||
119 | { | ||
120 | #ifdef CONFIG_SUPERIO | ||
121 | if (dmanr<=3) | ||
122 | dma_outb(dmanr | 4, DMA1_MASK_REG); | ||
123 | else | ||
124 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | ||
125 | #endif | ||
126 | } | ||
127 | |||
128 | /* reserve a DMA channel */ | ||
129 | #define request_dma(dmanr, device_id) (0) | ||
130 | |||
131 | /* Clear the 'DMA Pointer Flip Flop'. | ||
132 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
133 | * Use this once to initialize the FF to a known state. | ||
134 | * After that, keep track of it. :-) | ||
135 | * --- In order to do that, the DMA routines below should --- | ||
136 | * --- only be used while holding the DMA lock ! --- | ||
137 | */ | ||
138 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | /* set mode (above) for a specific DMA channel */ | ||
143 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
144 | { | ||
145 | } | ||
146 | |||
147 | /* Set only the page register bits of the transfer address. | ||
148 | * This is used for successive transfers when we know the contents of | ||
149 | * the lower 16 bits of the DMA current address register, but a 64k boundary | ||
150 | * may have been crossed. | ||
151 | */ | ||
152 | static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | ||
153 | { | ||
154 | } | ||
155 | |||
156 | |||
157 | /* Set transfer address & page bits for specific DMA channel. | ||
158 | * Assumes dma flipflop is clear. | ||
159 | */ | ||
160 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
161 | { | ||
162 | } | ||
163 | |||
164 | |||
165 | /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for | ||
166 | * a specific DMA channel. | ||
167 | * You must ensure the parameters are valid. | ||
168 | * NOTE: from a manual: "the number of transfers is one more | ||
169 | * than the initial word count"! This is taken into account. | ||
170 | * Assumes dma flip-flop is clear. | ||
171 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | ||
172 | */ | ||
173 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
174 | { | ||
175 | } | ||
176 | |||
177 | |||
178 | #define free_dma(dmanr) | ||
179 | |||
180 | #ifdef CONFIG_PCI | ||
181 | extern int isa_dma_bridge_buggy; | ||
182 | #else | ||
183 | #define isa_dma_bridge_buggy (0) | ||
184 | #endif | ||
185 | |||
186 | #endif /* _ASM_DMA_H */ | ||
diff --git a/arch/parisc/include/asm/eisa_bus.h b/arch/parisc/include/asm/eisa_bus.h new file mode 100644 index 000000000000..201085f83dd5 --- /dev/null +++ b/arch/parisc/include/asm/eisa_bus.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * eisa_bus.h interface between the eisa BA driver and the bus enumerator | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * Copyright (c) 2002 Daniel Engstrom <5116@telia.com> | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #ifndef ASM_EISA_H | ||
14 | #define ASM_EISA_H | ||
15 | |||
16 | extern void eisa_make_irq_level(int num); | ||
17 | extern void eisa_make_irq_edge(int num); | ||
18 | extern int eisa_enumerator(unsigned long eeprom_addr, | ||
19 | struct resource *io_parent, | ||
20 | struct resource *mem_parent); | ||
21 | extern int eisa_eeprom_init(unsigned long addr); | ||
22 | |||
23 | #endif | ||
diff --git a/arch/parisc/include/asm/eisa_eeprom.h b/arch/parisc/include/asm/eisa_eeprom.h new file mode 100644 index 000000000000..9c9da980402a --- /dev/null +++ b/arch/parisc/include/asm/eisa_eeprom.h | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * eisa_eeprom.h - provide support for EISA adapters in PA-RISC machines | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * Copyright (c) 2001, 2002 Daniel Engstrom <5116@telia.com> | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #ifndef ASM_EISA_EEPROM_H | ||
14 | #define ASM_EISA_EEPROM_H | ||
15 | |||
16 | extern void __iomem *eisa_eeprom_addr; | ||
17 | |||
18 | #define HPEE_MAX_LENGTH 0x2000 /* maximum eeprom length */ | ||
19 | |||
20 | #define HPEE_SLOT_INFO(slot) (20+(48*slot)) | ||
21 | |||
22 | struct eeprom_header | ||
23 | { | ||
24 | |||
25 | u_int32_t num_writes; /* number of writes */ | ||
26 | u_int8_t flags; /* flags, usage? */ | ||
27 | u_int8_t ver_maj; | ||
28 | u_int8_t ver_min; | ||
29 | u_int8_t num_slots; /* number of EISA slots in system */ | ||
30 | u_int16_t csum; /* checksum, I don't know how to calulate this */ | ||
31 | u_int8_t pad[10]; | ||
32 | } __attribute__ ((packed)); | ||
33 | |||
34 | |||
35 | struct eeprom_eisa_slot_info | ||
36 | { | ||
37 | u_int32_t eisa_slot_id; | ||
38 | u_int32_t config_data_offset; | ||
39 | u_int32_t num_writes; | ||
40 | u_int16_t csum; | ||
41 | u_int16_t num_functions; | ||
42 | u_int16_t config_data_length; | ||
43 | |||
44 | /* bits 0..3 are the duplicate slot id */ | ||
45 | #define HPEE_SLOT_INFO_EMBEDDED 0x10 | ||
46 | #define HPEE_SLOT_INFO_VIRTUAL 0x20 | ||
47 | #define HPEE_SLOT_INFO_NO_READID 0x40 | ||
48 | #define HPEE_SLOT_INFO_DUPLICATE 0x80 | ||
49 | u_int8_t slot_info; | ||
50 | |||
51 | #define HPEE_SLOT_FEATURES_ENABLE 0x01 | ||
52 | #define HPEE_SLOT_FEATURES_IOCHK 0x02 | ||
53 | #define HPEE_SLOT_FEATURES_CFG_INCOMPLETE 0x80 | ||
54 | u_int8_t slot_features; | ||
55 | |||
56 | u_int8_t ver_min; | ||
57 | u_int8_t ver_maj; | ||
58 | |||
59 | #define HPEE_FUNCTION_INFO_HAVE_TYPE 0x01 | ||
60 | #define HPEE_FUNCTION_INFO_HAVE_MEMORY 0x02 | ||
61 | #define HPEE_FUNCTION_INFO_HAVE_IRQ 0x04 | ||
62 | #define HPEE_FUNCTION_INFO_HAVE_DMA 0x08 | ||
63 | #define HPEE_FUNCTION_INFO_HAVE_PORT 0x10 | ||
64 | #define HPEE_FUNCTION_INFO_HAVE_PORT_INIT 0x20 | ||
65 | /* I think there are two slighty different | ||
66 | * versions of the function_info field | ||
67 | * one int the fixed header and one optional | ||
68 | * in the parsed slot data area */ | ||
69 | #define HPEE_FUNCTION_INFO_HAVE_FUNCTION 0x01 | ||
70 | #define HPEE_FUNCTION_INFO_F_DISABLED 0x80 | ||
71 | #define HPEE_FUNCTION_INFO_CFG_FREE_FORM 0x40 | ||
72 | u_int8_t function_info; | ||
73 | |||
74 | #define HPEE_FLAG_BOARD_IS_ISA 0x01 /* flag and minor version for isa board */ | ||
75 | u_int8_t flags; | ||
76 | u_int8_t pad[24]; | ||
77 | } __attribute__ ((packed)); | ||
78 | |||
79 | |||
80 | #define HPEE_MEMORY_MAX_ENT 9 | ||
81 | /* memory descriptor: byte 0 */ | ||
82 | #define HPEE_MEMORY_WRITABLE 0x01 | ||
83 | #define HPEE_MEMORY_CACHABLE 0x02 | ||
84 | #define HPEE_MEMORY_TYPE_MASK 0x18 | ||
85 | #define HPEE_MEMORY_TYPE_SYS 0x00 | ||
86 | #define HPEE_MEMORY_TYPE_EXP 0x08 | ||
87 | #define HPEE_MEMORY_TYPE_VIR 0x10 | ||
88 | #define HPEE_MEMORY_TYPE_OTH 0x18 | ||
89 | #define HPEE_MEMORY_SHARED 0x20 | ||
90 | #define HPEE_MEMORY_MORE 0x80 | ||
91 | |||
92 | /* memory descriptor: byte 1 */ | ||
93 | #define HPEE_MEMORY_WIDTH_MASK 0x03 | ||
94 | #define HPEE_MEMORY_WIDTH_BYTE 0x00 | ||
95 | #define HPEE_MEMORY_WIDTH_WORD 0x01 | ||
96 | #define HPEE_MEMORY_WIDTH_DWORD 0x02 | ||
97 | #define HPEE_MEMORY_DECODE_MASK 0x0c | ||
98 | #define HPEE_MEMORY_DECODE_20BITS 0x00 | ||
99 | #define HPEE_MEMORY_DECODE_24BITS 0x04 | ||
100 | #define HPEE_MEMORY_DECODE_32BITS 0x08 | ||
101 | /* byte 2 and 3 are a 16bit LE value | ||
102 | * containging the memory size in kilobytes */ | ||
103 | /* byte 4,5,6 are a 24bit LE value | ||
104 | * containing the memory base address */ | ||
105 | |||
106 | |||
107 | #define HPEE_IRQ_MAX_ENT 7 | ||
108 | /* Interrupt entry: byte 0 */ | ||
109 | #define HPEE_IRQ_CHANNEL_MASK 0xf | ||
110 | #define HPEE_IRQ_TRIG_LEVEL 0x20 | ||
111 | #define HPEE_IRQ_MORE 0x80 | ||
112 | /* byte 1 seems to be unused */ | ||
113 | |||
114 | #define HPEE_DMA_MAX_ENT 4 | ||
115 | |||
116 | /* dma entry: byte 0 */ | ||
117 | #define HPEE_DMA_CHANNEL_MASK 7 | ||
118 | #define HPEE_DMA_SIZE_MASK 0xc | ||
119 | #define HPEE_DMA_SIZE_BYTE 0x0 | ||
120 | #define HPEE_DMA_SIZE_WORD 0x4 | ||
121 | #define HPEE_DMA_SIZE_DWORD 0x8 | ||
122 | #define HPEE_DMA_SHARED 0x40 | ||
123 | #define HPEE_DMA_MORE 0x80 | ||
124 | |||
125 | /* dma entry: byte 1 */ | ||
126 | #define HPEE_DMA_TIMING_MASK 0x30 | ||
127 | #define HPEE_DMA_TIMING_ISA 0x0 | ||
128 | #define HPEE_DMA_TIMING_TYPEA 0x10 | ||
129 | #define HPEE_DMA_TIMING_TYPEB 0x20 | ||
130 | #define HPEE_DMA_TIMING_TYPEC 0x30 | ||
131 | |||
132 | #define HPEE_PORT_MAX_ENT 20 | ||
133 | /* port entry byte 0 */ | ||
134 | #define HPEE_PORT_SIZE_MASK 0x1f | ||
135 | #define HPEE_PORT_SHARED 0x40 | ||
136 | #define HPEE_PORT_MORE 0x80 | ||
137 | /* byte 1 and 2 is a 16bit LE value | ||
138 | * conating the start port number */ | ||
139 | |||
140 | #define HPEE_PORT_INIT_MAX_LEN 60 /* in bytes here */ | ||
141 | /* port init entry byte 0 */ | ||
142 | #define HPEE_PORT_INIT_WIDTH_MASK 0x3 | ||
143 | #define HPEE_PORT_INIT_WIDTH_BYTE 0x0 | ||
144 | #define HPEE_PORT_INIT_WIDTH_WORD 0x1 | ||
145 | #define HPEE_PORT_INIT_WIDTH_DWORD 0x2 | ||
146 | #define HPEE_PORT_INIT_MASK 0x4 | ||
147 | #define HPEE_PORT_INIT_MORE 0x80 | ||
148 | |||
149 | #define HPEE_SELECTION_MAX_ENT 26 | ||
150 | |||
151 | #define HPEE_TYPE_MAX_LEN 80 | ||
152 | |||
153 | #endif | ||
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h new file mode 100644 index 000000000000..7fa675799e6d --- /dev/null +++ b/arch/parisc/include/asm/elf.h | |||
@@ -0,0 +1,342 @@ | |||
1 | #ifndef __ASMPARISC_ELF_H | ||
2 | #define __ASMPARISC_ELF_H | ||
3 | |||
4 | /* | ||
5 | * ELF register definitions.. | ||
6 | */ | ||
7 | |||
8 | #include <asm/ptrace.h> | ||
9 | |||
10 | #define EM_PARISC 15 | ||
11 | |||
12 | /* HPPA specific definitions. */ | ||
13 | |||
14 | /* Legal values for e_flags field of Elf32_Ehdr. */ | ||
15 | |||
16 | #define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ | ||
17 | #define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ | ||
18 | #define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ | ||
19 | #define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ | ||
20 | #define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch | ||
21 | prediction. */ | ||
22 | #define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ | ||
23 | #define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ | ||
24 | |||
25 | /* Defined values for `e_flags & EF_PARISC_ARCH' are: */ | ||
26 | |||
27 | #define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ | ||
28 | #define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ | ||
29 | #define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ | ||
30 | |||
31 | /* Additional section indices. */ | ||
32 | |||
33 | #define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared | ||
34 | symbols in ANSI C. */ | ||
35 | #define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ | ||
36 | |||
37 | /* Legal values for sh_type field of Elf32_Shdr. */ | ||
38 | |||
39 | #define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ | ||
40 | #define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ | ||
41 | #define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ | ||
42 | |||
43 | /* Legal values for sh_flags field of Elf32_Shdr. */ | ||
44 | |||
45 | #define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ | ||
46 | #define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ | ||
47 | #define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ | ||
48 | |||
49 | /* Legal values for ST_TYPE subfield of st_info (symbol type). */ | ||
50 | |||
51 | #define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ | ||
52 | |||
53 | #define STT_HP_OPAQUE (STT_LOOS + 0x1) | ||
54 | #define STT_HP_STUB (STT_LOOS + 0x2) | ||
55 | |||
56 | /* HPPA relocs. */ | ||
57 | |||
58 | #define R_PARISC_NONE 0 /* No reloc. */ | ||
59 | #define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ | ||
60 | #define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ | ||
61 | #define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ | ||
62 | #define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ | ||
63 | #define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ | ||
64 | #define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ | ||
65 | #define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ | ||
66 | #define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ | ||
67 | #define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ | ||
68 | #define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ | ||
69 | #define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ | ||
70 | #define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ | ||
71 | #define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ | ||
72 | #define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ | ||
73 | #define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ | ||
74 | #define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ | ||
75 | #define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ | ||
76 | #define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ | ||
77 | #define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ | ||
78 | #define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ | ||
79 | #define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ | ||
80 | #define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ | ||
81 | #define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ | ||
82 | #define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ | ||
83 | #define R_PARISC_FPTR64 64 /* 64 bits function address. */ | ||
84 | #define R_PARISC_PLABEL32 65 /* 32 bits function address. */ | ||
85 | #define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ | ||
86 | #define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ | ||
87 | #define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ | ||
88 | #define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ | ||
89 | #define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ | ||
90 | #define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ | ||
91 | #define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ | ||
92 | #define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ | ||
93 | #define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ | ||
94 | #define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ | ||
95 | #define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ | ||
96 | #define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ | ||
97 | #define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ | ||
98 | #define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ | ||
99 | #define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ | ||
100 | #define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ | ||
101 | #define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ | ||
102 | #define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ | ||
103 | #define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ | ||
104 | #define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ | ||
105 | #define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ | ||
106 | #define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ | ||
107 | #define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ | ||
108 | #define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ | ||
109 | #define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ | ||
110 | #define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ | ||
111 | #define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ | ||
112 | #define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ | ||
113 | #define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ | ||
114 | #define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ | ||
115 | #define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ | ||
116 | #define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ | ||
117 | #define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ | ||
118 | #define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ | ||
119 | #define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ | ||
120 | #define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ | ||
121 | #define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ | ||
122 | #define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ | ||
123 | #define R_PARISC_LORESERVE 128 | ||
124 | #define R_PARISC_COPY 128 /* Copy relocation. */ | ||
125 | #define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ | ||
126 | #define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ | ||
127 | #define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ | ||
128 | #define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ | ||
129 | #define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ | ||
130 | #define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ | ||
131 | #define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ | ||
132 | #define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ | ||
133 | #define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ | ||
134 | #define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ | ||
135 | #define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ | ||
136 | #define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ | ||
137 | #define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ | ||
138 | #define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ | ||
139 | #define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ | ||
140 | #define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ | ||
141 | #define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ | ||
142 | #define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ | ||
143 | #define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ | ||
144 | #define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ | ||
145 | #define R_PARISC_HIRESERVE 255 | ||
146 | |||
147 | #define PA_PLABEL_FDESC 0x02 /* bit set if PLABEL points to | ||
148 | * a function descriptor, not | ||
149 | * an address */ | ||
150 | |||
151 | /* The following are PA function descriptors | ||
152 | * | ||
153 | * addr: the absolute address of the function | ||
154 | * gp: either the data pointer (r27) for non-PIC code or the | ||
155 | * the PLT pointer (r19) for PIC code */ | ||
156 | |||
157 | /* Format for the Elf32 Function descriptor */ | ||
158 | typedef struct elf32_fdesc { | ||
159 | __u32 addr; | ||
160 | __u32 gp; | ||
161 | } Elf32_Fdesc; | ||
162 | |||
163 | /* Format for the Elf64 Function descriptor */ | ||
164 | typedef struct elf64_fdesc { | ||
165 | __u64 dummy[2]; /* FIXME: nothing uses these, why waste | ||
166 | * the space */ | ||
167 | __u64 addr; | ||
168 | __u64 gp; | ||
169 | } Elf64_Fdesc; | ||
170 | |||
171 | /* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ | ||
172 | |||
173 | #define PT_HP_TLS (PT_LOOS + 0x0) | ||
174 | #define PT_HP_CORE_NONE (PT_LOOS + 0x1) | ||
175 | #define PT_HP_CORE_VERSION (PT_LOOS + 0x2) | ||
176 | #define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) | ||
177 | #define PT_HP_CORE_COMM (PT_LOOS + 0x4) | ||
178 | #define PT_HP_CORE_PROC (PT_LOOS + 0x5) | ||
179 | #define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) | ||
180 | #define PT_HP_CORE_STACK (PT_LOOS + 0x7) | ||
181 | #define PT_HP_CORE_SHM (PT_LOOS + 0x8) | ||
182 | #define PT_HP_CORE_MMF (PT_LOOS + 0x9) | ||
183 | #define PT_HP_PARALLEL (PT_LOOS + 0x10) | ||
184 | #define PT_HP_FASTBIND (PT_LOOS + 0x11) | ||
185 | #define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) | ||
186 | #define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) | ||
187 | #define PT_HP_STACK (PT_LOOS + 0x14) | ||
188 | |||
189 | #define PT_PARISC_ARCHEXT 0x70000000 | ||
190 | #define PT_PARISC_UNWIND 0x70000001 | ||
191 | |||
192 | /* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */ | ||
193 | |||
194 | #define PF_PARISC_SBP 0x08000000 | ||
195 | |||
196 | #define PF_HP_PAGE_SIZE 0x00100000 | ||
197 | #define PF_HP_FAR_SHARED 0x00200000 | ||
198 | #define PF_HP_NEAR_SHARED 0x00400000 | ||
199 | #define PF_HP_CODE 0x01000000 | ||
200 | #define PF_HP_MODIFY 0x02000000 | ||
201 | #define PF_HP_LAZYSWAP 0x04000000 | ||
202 | #define PF_HP_SBP 0x08000000 | ||
203 | |||
204 | /* | ||
205 | * The following definitions are those for 32-bit ELF binaries on a 32-bit | ||
206 | * kernel and for 64-bit binaries on a 64-bit kernel. To run 32-bit binaries | ||
207 | * on a 64-bit kernel, arch/parisc/kernel/binfmt_elf32.c defines these | ||
208 | * macros appropriately and then #includes binfmt_elf.c, which then includes | ||
209 | * this file. | ||
210 | */ | ||
211 | #ifndef ELF_CLASS | ||
212 | |||
213 | /* | ||
214 | * This is used to ensure we don't load something for the wrong architecture. | ||
215 | * | ||
216 | * Note that this header file is used by default in fs/binfmt_elf.c. So | ||
217 | * the following macros are for the default case. However, for the 64 | ||
218 | * bit kernel we also support 32 bit parisc binaries. To do that | ||
219 | * arch/parisc/kernel/binfmt_elf32.c defines its own set of these | ||
220 | * macros, and then it includes fs/binfmt_elf.c to provide an alternate | ||
221 | * elf binary handler for 32 bit binaries (on the 64 bit kernel). | ||
222 | */ | ||
223 | #ifdef CONFIG_64BIT | ||
224 | #define ELF_CLASS ELFCLASS64 | ||
225 | #else | ||
226 | #define ELF_CLASS ELFCLASS32 | ||
227 | #endif | ||
228 | |||
229 | typedef unsigned long elf_greg_t; | ||
230 | |||
231 | /* | ||
232 | * This yields a string that ld.so will use to load implementation | ||
233 | * specific libraries for optimization. This is more specific in | ||
234 | * intent than poking at uname or /proc/cpuinfo. | ||
235 | */ | ||
236 | |||
237 | #define ELF_PLATFORM ("PARISC\0") | ||
238 | |||
239 | #define SET_PERSONALITY(ex) \ | ||
240 | current->personality = PER_LINUX; \ | ||
241 | current->thread.map_base = DEFAULT_MAP_BASE; \ | ||
242 | current->thread.task_size = DEFAULT_TASK_SIZE \ | ||
243 | |||
244 | /* | ||
245 | * Fill in general registers in a core dump. This saves pretty | ||
246 | * much the same registers as hp-ux, although in a different order. | ||
247 | * Registers marked # below are not currently saved in pt_regs, so | ||
248 | * we use their current values here. | ||
249 | * | ||
250 | * gr0..gr31 | ||
251 | * sr0..sr7 | ||
252 | * iaoq0..iaoq1 | ||
253 | * iasq0..iasq1 | ||
254 | * cr11 (sar) | ||
255 | * cr19 (iir) | ||
256 | * cr20 (isr) | ||
257 | * cr21 (ior) | ||
258 | * # cr22 (ipsw) | ||
259 | * # cr0 (recovery counter) | ||
260 | * # cr24..cr31 (temporary registers) | ||
261 | * # cr8,9,12,13 (protection IDs) | ||
262 | * # cr10 (scr/ccr) | ||
263 | * # cr15 (ext int enable mask) | ||
264 | * | ||
265 | */ | ||
266 | |||
267 | #define ELF_CORE_COPY_REGS(dst, pt) \ | ||
268 | memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \ | ||
269 | memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \ | ||
270 | memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \ | ||
271 | memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \ | ||
272 | memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \ | ||
273 | dst[44] = pt->sar; dst[45] = pt->iir; \ | ||
274 | dst[46] = pt->isr; dst[47] = pt->ior; \ | ||
275 | dst[48] = mfctl(22); dst[49] = mfctl(0); \ | ||
276 | dst[50] = mfctl(24); dst[51] = mfctl(25); \ | ||
277 | dst[52] = mfctl(26); dst[53] = mfctl(27); \ | ||
278 | dst[54] = mfctl(28); dst[55] = mfctl(29); \ | ||
279 | dst[56] = mfctl(30); dst[57] = mfctl(31); \ | ||
280 | dst[58] = mfctl( 8); dst[59] = mfctl( 9); \ | ||
281 | dst[60] = mfctl(12); dst[61] = mfctl(13); \ | ||
282 | dst[62] = mfctl(10); dst[63] = mfctl(15); | ||
283 | |||
284 | #endif /* ! ELF_CLASS */ | ||
285 | |||
286 | #define ELF_NGREG 80 /* We only need 64 at present, but leave space | ||
287 | for expansion. */ | ||
288 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
289 | |||
290 | #define ELF_NFPREG 32 | ||
291 | typedef double elf_fpreg_t; | ||
292 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
293 | |||
294 | struct task_struct; | ||
295 | |||
296 | extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); | ||
297 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | ||
298 | |||
299 | struct pt_regs; /* forward declaration... */ | ||
300 | |||
301 | |||
302 | #define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS) | ||
303 | |||
304 | /* | ||
305 | * These are used to set parameters in the core dumps. | ||
306 | */ | ||
307 | #define ELF_DATA ELFDATA2MSB | ||
308 | #define ELF_ARCH EM_PARISC | ||
309 | #define ELF_OSABI ELFOSABI_LINUX | ||
310 | |||
311 | /* %r23 is set by ld.so to a pointer to a function which might be | ||
312 | registered using atexit. This provides a means for the dynamic | ||
313 | linker to call DT_FINI functions for shared libraries that have | ||
314 | been loaded before the code runs. | ||
315 | |||
316 | So that we can use the same startup file with static executables, | ||
317 | we start programs with a value of 0 to indicate that there is no | ||
318 | such function. */ | ||
319 | #define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0 | ||
320 | |||
321 | #define USE_ELF_CORE_DUMP | ||
322 | #define ELF_EXEC_PAGESIZE 4096 | ||
323 | |||
324 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
325 | use of this is to invoke "./ld.so someprog" to test out a new version of | ||
326 | the loader. We need to make sure that it is out of the way of the program | ||
327 | that it will "exec", and that there is sufficient room for the brk. | ||
328 | |||
329 | (2 * TASK_SIZE / 3) turns into something undefined when run through a | ||
330 | 32 bit preprocessor and in some cases results in the kernel trying to map | ||
331 | ld.so to the kernel virtual base. Use a sane value instead. /Jes | ||
332 | */ | ||
333 | |||
334 | #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) | ||
335 | |||
336 | /* This yields a mask that user programs can use to figure out what | ||
337 | instruction set this CPU supports. This could be done in user space, | ||
338 | but it's not easy, and we've already done it here. */ | ||
339 | |||
340 | #define ELF_HWCAP 0 | ||
341 | |||
342 | #endif | ||
diff --git a/arch/parisc/include/asm/emergency-restart.h b/arch/parisc/include/asm/emergency-restart.h new file mode 100644 index 000000000000..108d8c48e42e --- /dev/null +++ b/arch/parisc/include/asm/emergency-restart.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_EMERGENCY_RESTART_H | ||
2 | #define _ASM_EMERGENCY_RESTART_H | ||
3 | |||
4 | #include <asm-generic/emergency-restart.h> | ||
5 | |||
6 | #endif /* _ASM_EMERGENCY_RESTART_H */ | ||
diff --git a/arch/parisc/include/asm/errno.h b/arch/parisc/include/asm/errno.h new file mode 100644 index 000000000000..e2f3ddc796be --- /dev/null +++ b/arch/parisc/include/asm/errno.h | |||
@@ -0,0 +1,124 @@ | |||
1 | #ifndef _PARISC_ERRNO_H | ||
2 | #define _PARISC_ERRNO_H | ||
3 | |||
4 | #include <asm-generic/errno-base.h> | ||
5 | |||
6 | #define ENOMSG 35 /* No message of desired type */ | ||
7 | #define EIDRM 36 /* Identifier removed */ | ||
8 | #define ECHRNG 37 /* Channel number out of range */ | ||
9 | #define EL2NSYNC 38 /* Level 2 not synchronized */ | ||
10 | #define EL3HLT 39 /* Level 3 halted */ | ||
11 | #define EL3RST 40 /* Level 3 reset */ | ||
12 | #define ELNRNG 41 /* Link number out of range */ | ||
13 | #define EUNATCH 42 /* Protocol driver not attached */ | ||
14 | #define ENOCSI 43 /* No CSI structure available */ | ||
15 | #define EL2HLT 44 /* Level 2 halted */ | ||
16 | #define EDEADLK 45 /* Resource deadlock would occur */ | ||
17 | #define EDEADLOCK EDEADLK | ||
18 | #define ENOLCK 46 /* No record locks available */ | ||
19 | #define EILSEQ 47 /* Illegal byte sequence */ | ||
20 | |||
21 | #define ENONET 50 /* Machine is not on the network */ | ||
22 | #define ENODATA 51 /* No data available */ | ||
23 | #define ETIME 52 /* Timer expired */ | ||
24 | #define ENOSR 53 /* Out of streams resources */ | ||
25 | #define ENOSTR 54 /* Device not a stream */ | ||
26 | #define ENOPKG 55 /* Package not installed */ | ||
27 | |||
28 | #define ENOLINK 57 /* Link has been severed */ | ||
29 | #define EADV 58 /* Advertise error */ | ||
30 | #define ESRMNT 59 /* Srmount error */ | ||
31 | #define ECOMM 60 /* Communication error on send */ | ||
32 | #define EPROTO 61 /* Protocol error */ | ||
33 | |||
34 | #define EMULTIHOP 64 /* Multihop attempted */ | ||
35 | |||
36 | #define EDOTDOT 66 /* RFS specific error */ | ||
37 | #define EBADMSG 67 /* Not a data message */ | ||
38 | #define EUSERS 68 /* Too many users */ | ||
39 | #define EDQUOT 69 /* Quota exceeded */ | ||
40 | #define ESTALE 70 /* Stale NFS file handle */ | ||
41 | #define EREMOTE 71 /* Object is remote */ | ||
42 | #define EOVERFLOW 72 /* Value too large for defined data type */ | ||
43 | |||
44 | /* these errnos are defined by Linux but not HPUX. */ | ||
45 | |||
46 | #define EBADE 160 /* Invalid exchange */ | ||
47 | #define EBADR 161 /* Invalid request descriptor */ | ||
48 | #define EXFULL 162 /* Exchange full */ | ||
49 | #define ENOANO 163 /* No anode */ | ||
50 | #define EBADRQC 164 /* Invalid request code */ | ||
51 | #define EBADSLT 165 /* Invalid slot */ | ||
52 | #define EBFONT 166 /* Bad font file format */ | ||
53 | #define ENOTUNIQ 167 /* Name not unique on network */ | ||
54 | #define EBADFD 168 /* File descriptor in bad state */ | ||
55 | #define EREMCHG 169 /* Remote address changed */ | ||
56 | #define ELIBACC 170 /* Can not access a needed shared library */ | ||
57 | #define ELIBBAD 171 /* Accessing a corrupted shared library */ | ||
58 | #define ELIBSCN 172 /* .lib section in a.out corrupted */ | ||
59 | #define ELIBMAX 173 /* Attempting to link in too many shared libraries */ | ||
60 | #define ELIBEXEC 174 /* Cannot exec a shared library directly */ | ||
61 | #define ERESTART 175 /* Interrupted system call should be restarted */ | ||
62 | #define ESTRPIPE 176 /* Streams pipe error */ | ||
63 | #define EUCLEAN 177 /* Structure needs cleaning */ | ||
64 | #define ENOTNAM 178 /* Not a XENIX named type file */ | ||
65 | #define ENAVAIL 179 /* No XENIX semaphores available */ | ||
66 | #define EISNAM 180 /* Is a named type file */ | ||
67 | #define EREMOTEIO 181 /* Remote I/O error */ | ||
68 | #define ENOMEDIUM 182 /* No medium found */ | ||
69 | #define EMEDIUMTYPE 183 /* Wrong medium type */ | ||
70 | #define ENOKEY 184 /* Required key not available */ | ||
71 | #define EKEYEXPIRED 185 /* Key has expired */ | ||
72 | #define EKEYREVOKED 186 /* Key has been revoked */ | ||
73 | #define EKEYREJECTED 187 /* Key was rejected by service */ | ||
74 | |||
75 | /* We now return you to your regularly scheduled HPUX. */ | ||
76 | |||
77 | #define ENOSYM 215 /* symbol does not exist in executable */ | ||
78 | #define ENOTSOCK 216 /* Socket operation on non-socket */ | ||
79 | #define EDESTADDRREQ 217 /* Destination address required */ | ||
80 | #define EMSGSIZE 218 /* Message too long */ | ||
81 | #define EPROTOTYPE 219 /* Protocol wrong type for socket */ | ||
82 | #define ENOPROTOOPT 220 /* Protocol not available */ | ||
83 | #define EPROTONOSUPPORT 221 /* Protocol not supported */ | ||
84 | #define ESOCKTNOSUPPORT 222 /* Socket type not supported */ | ||
85 | #define EOPNOTSUPP 223 /* Operation not supported on transport endpoint */ | ||
86 | #define EPFNOSUPPORT 224 /* Protocol family not supported */ | ||
87 | #define EAFNOSUPPORT 225 /* Address family not supported by protocol */ | ||
88 | #define EADDRINUSE 226 /* Address already in use */ | ||
89 | #define EADDRNOTAVAIL 227 /* Cannot assign requested address */ | ||
90 | #define ENETDOWN 228 /* Network is down */ | ||
91 | #define ENETUNREACH 229 /* Network is unreachable */ | ||
92 | #define ENETRESET 230 /* Network dropped connection because of reset */ | ||
93 | #define ECONNABORTED 231 /* Software caused connection abort */ | ||
94 | #define ECONNRESET 232 /* Connection reset by peer */ | ||
95 | #define ENOBUFS 233 /* No buffer space available */ | ||
96 | #define EISCONN 234 /* Transport endpoint is already connected */ | ||
97 | #define ENOTCONN 235 /* Transport endpoint is not connected */ | ||
98 | #define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */ | ||
99 | #define ETOOMANYREFS 237 /* Too many references: cannot splice */ | ||
100 | #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ | ||
101 | #define ETIMEDOUT 238 /* Connection timed out */ | ||
102 | #define ECONNREFUSED 239 /* Connection refused */ | ||
103 | #define EREMOTERELEASE 240 /* Remote peer released connection */ | ||
104 | #define EHOSTDOWN 241 /* Host is down */ | ||
105 | #define EHOSTUNREACH 242 /* No route to host */ | ||
106 | |||
107 | #define EALREADY 244 /* Operation already in progress */ | ||
108 | #define EINPROGRESS 245 /* Operation now in progress */ | ||
109 | #define EWOULDBLOCK 246 /* Operation would block (Linux returns EAGAIN) */ | ||
110 | #define ENOTEMPTY 247 /* Directory not empty */ | ||
111 | #define ENAMETOOLONG 248 /* File name too long */ | ||
112 | #define ELOOP 249 /* Too many symbolic links encountered */ | ||
113 | #define ENOSYS 251 /* Function not implemented */ | ||
114 | |||
115 | #define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */ | ||
116 | #define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */ | ||
117 | #define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */ | ||
118 | |||
119 | /* for robust mutexes */ | ||
120 | #define EOWNERDEAD 254 /* Owner died */ | ||
121 | #define ENOTRECOVERABLE 255 /* State not recoverable */ | ||
122 | |||
123 | |||
124 | #endif | ||
diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h new file mode 100644 index 000000000000..4d503a023ab2 --- /dev/null +++ b/arch/parisc/include/asm/fb.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | |||
4 | #include <linux/fb.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, | ||
9 | unsigned long off) | ||
10 | { | ||
11 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | ||
12 | } | ||
13 | |||
14 | static inline int fb_is_primary_device(struct fb_info *info) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | |||
19 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h new file mode 100644 index 000000000000..1e1c824764ee --- /dev/null +++ b/arch/parisc/include/asm/fcntl.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef _PARISC_FCNTL_H | ||
2 | #define _PARISC_FCNTL_H | ||
3 | |||
4 | /* open/fcntl - O_SYNC is only implemented on blocks devices and on files | ||
5 | located on an ext2 file system */ | ||
6 | #define O_APPEND 000000010 | ||
7 | #define O_BLKSEEK 000000100 /* HPUX only */ | ||
8 | #define O_CREAT 000000400 /* not fcntl */ | ||
9 | #define O_EXCL 000002000 /* not fcntl */ | ||
10 | #define O_LARGEFILE 000004000 | ||
11 | #define O_SYNC 000100000 | ||
12 | #define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */ | ||
13 | #define O_NOCTTY 000400000 /* not fcntl */ | ||
14 | #define O_DSYNC 001000000 /* HPUX only */ | ||
15 | #define O_RSYNC 002000000 /* HPUX only */ | ||
16 | #define O_NOATIME 004000000 | ||
17 | #define O_CLOEXEC 010000000 /* set close_on_exec */ | ||
18 | |||
19 | #define O_DIRECTORY 000010000 /* must be a directory */ | ||
20 | #define O_NOFOLLOW 000000200 /* don't follow links */ | ||
21 | #define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ | ||
22 | |||
23 | #define F_GETLK64 8 | ||
24 | #define F_SETLK64 9 | ||
25 | #define F_SETLKW64 10 | ||
26 | |||
27 | #define F_GETOWN 11 /* for sockets. */ | ||
28 | #define F_SETOWN 12 /* for sockets. */ | ||
29 | #define F_SETSIG 13 /* for sockets. */ | ||
30 | #define F_GETSIG 14 /* for sockets. */ | ||
31 | |||
32 | /* for posix fcntl() and lockf() */ | ||
33 | #define F_RDLCK 01 | ||
34 | #define F_WRLCK 02 | ||
35 | #define F_UNLCK 03 | ||
36 | |||
37 | #include <asm-generic/fcntl.h> | ||
38 | |||
39 | #endif | ||
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h new file mode 100644 index 000000000000..de3fe3a18229 --- /dev/null +++ b/arch/parisc/include/asm/fixmap.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef _ASM_FIXMAP_H | ||
2 | #define _ASM_FIXMAP_H | ||
3 | |||
4 | /* | ||
5 | * This file defines the locations of the fixed mappings on parisc. | ||
6 | * | ||
7 | * All of the values in this file are machine virtual addresses. | ||
8 | * | ||
9 | * All of the values in this file must be <4GB (because of assembly | ||
10 | * loading restrictions). If you place this region anywhere above | ||
11 | * __PAGE_OFFSET, you must adjust the memory map accordingly */ | ||
12 | |||
13 | /* The alias region is used in kernel space to do copy/clear to or | ||
14 | * from areas congruently mapped with user space. It is 8MB large | ||
15 | * and must be 16MB aligned */ | ||
16 | #define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024) | ||
17 | /* This is the kernel area for all maps (vmalloc, dma etc.) most | ||
18 | * usually, it extends up to TMPALIAS_MAP_START. Virtual addresses | ||
19 | * 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */ | ||
20 | #define KERNEL_MAP_START (GATEWAY_PAGE_SIZE) | ||
21 | #define KERNEL_MAP_END (TMPALIAS_MAP_START) | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | extern void *vmalloc_start; | ||
25 | #define PCXL_DMA_MAP_SIZE (8*1024*1024) | ||
26 | #define VMALLOC_START ((unsigned long)vmalloc_start) | ||
27 | #define VMALLOC_END (KERNEL_MAP_END) | ||
28 | #endif /*__ASSEMBLY__*/ | ||
29 | |||
30 | #endif /*_ASM_FIXMAP_H*/ | ||
diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h new file mode 100644 index 000000000000..4ca69f558fae --- /dev/null +++ b/arch/parisc/include/asm/floppy.h | |||
@@ -0,0 +1,271 @@ | |||
1 | /* Architecture specific parts of the Floppy driver | ||
2 | * | ||
3 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) | ||
4 | * Copyright (C) 2000 Matthew Wilcox (willy a debian . org) | ||
5 | * Copyright (C) 2000 Dave Kennedy | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | #ifndef __ASM_PARISC_FLOPPY_H | ||
22 | #define __ASM_PARISC_FLOPPY_H | ||
23 | |||
24 | #include <linux/vmalloc.h> | ||
25 | |||
26 | |||
27 | /* | ||
28 | * The DMA channel used by the floppy controller cannot access data at | ||
29 | * addresses >= 16MB | ||
30 | * | ||
31 | * Went back to the 1MB limit, as some people had problems with the floppy | ||
32 | * driver otherwise. It doesn't matter much for performance anyway, as most | ||
33 | * floppy accesses go through the track buffer. | ||
34 | */ | ||
35 | #define _CROSS_64KB(a,s,vdma) \ | ||
36 | (!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) | ||
37 | |||
38 | #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) | ||
39 | |||
40 | |||
41 | #define SW fd_routine[use_virtual_dma&1] | ||
42 | #define CSW fd_routine[can_use_virtual_dma & 1] | ||
43 | |||
44 | |||
45 | #define fd_inb(port) readb(port) | ||
46 | #define fd_outb(value, port) writeb(value, port) | ||
47 | |||
48 | #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") | ||
49 | #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) | ||
50 | #define fd_enable_irq() enable_irq(FLOPPY_IRQ) | ||
51 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) | ||
52 | #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) | ||
53 | #define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) | ||
54 | #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) | ||
55 | #define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) | ||
56 | |||
57 | #define FLOPPY_CAN_FALLBACK_ON_NODMA | ||
58 | |||
59 | static int virtual_dma_count=0; | ||
60 | static int virtual_dma_residue=0; | ||
61 | static char *virtual_dma_addr=0; | ||
62 | static int virtual_dma_mode=0; | ||
63 | static int doing_pdma=0; | ||
64 | |||
65 | static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) | ||
66 | { | ||
67 | register unsigned char st; | ||
68 | |||
69 | #undef TRACE_FLPY_INT | ||
70 | |||
71 | #ifdef TRACE_FLPY_INT | ||
72 | static int calls=0; | ||
73 | static int bytes=0; | ||
74 | static int dma_wait=0; | ||
75 | #endif | ||
76 | if (!doing_pdma) { | ||
77 | floppy_interrupt(irq, dev_id, regs); | ||
78 | return; | ||
79 | } | ||
80 | |||
81 | #ifdef TRACE_FLPY_INT | ||
82 | if(!calls) | ||
83 | bytes = virtual_dma_count; | ||
84 | #endif | ||
85 | |||
86 | { | ||
87 | register int lcount; | ||
88 | register char *lptr = virtual_dma_addr; | ||
89 | |||
90 | for (lcount = virtual_dma_count; lcount; lcount--) { | ||
91 | st = fd_inb(virtual_dma_port+4) & 0xa0 ; | ||
92 | if (st != 0xa0) | ||
93 | break; | ||
94 | if (virtual_dma_mode) { | ||
95 | fd_outb(*lptr, virtual_dma_port+5); | ||
96 | } else { | ||
97 | *lptr = fd_inb(virtual_dma_port+5); | ||
98 | } | ||
99 | lptr++; | ||
100 | } | ||
101 | virtual_dma_count = lcount; | ||
102 | virtual_dma_addr = lptr; | ||
103 | st = fd_inb(virtual_dma_port+4); | ||
104 | } | ||
105 | |||
106 | #ifdef TRACE_FLPY_INT | ||
107 | calls++; | ||
108 | #endif | ||
109 | if (st == 0x20) | ||
110 | return; | ||
111 | if (!(st & 0x20)) { | ||
112 | virtual_dma_residue += virtual_dma_count; | ||
113 | virtual_dma_count = 0; | ||
114 | #ifdef TRACE_FLPY_INT | ||
115 | printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", | ||
116 | virtual_dma_count, virtual_dma_residue, calls, bytes, | ||
117 | dma_wait); | ||
118 | calls = 0; | ||
119 | dma_wait=0; | ||
120 | #endif | ||
121 | doing_pdma = 0; | ||
122 | floppy_interrupt(irq, dev_id, regs); | ||
123 | return; | ||
124 | } | ||
125 | #ifdef TRACE_FLPY_INT | ||
126 | if (!virtual_dma_count) | ||
127 | dma_wait++; | ||
128 | #endif | ||
129 | } | ||
130 | |||
131 | static void fd_disable_dma(void) | ||
132 | { | ||
133 | if(! (can_use_virtual_dma & 1)) | ||
134 | disable_dma(FLOPPY_DMA); | ||
135 | doing_pdma = 0; | ||
136 | virtual_dma_residue += virtual_dma_count; | ||
137 | virtual_dma_count=0; | ||
138 | } | ||
139 | |||
140 | static int vdma_request_dma(unsigned int dmanr, const char * device_id) | ||
141 | { | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static void vdma_nop(unsigned int dummy) | ||
146 | { | ||
147 | } | ||
148 | |||
149 | |||
150 | static int vdma_get_dma_residue(unsigned int dummy) | ||
151 | { | ||
152 | return virtual_dma_count + virtual_dma_residue; | ||
153 | } | ||
154 | |||
155 | |||
156 | static int fd_request_irq(void) | ||
157 | { | ||
158 | if(can_use_virtual_dma) | ||
159 | return request_irq(FLOPPY_IRQ, floppy_hardint, | ||
160 | IRQF_DISABLED, "floppy", NULL); | ||
161 | else | ||
162 | return request_irq(FLOPPY_IRQ, floppy_interrupt, | ||
163 | IRQF_DISABLED, "floppy", NULL); | ||
164 | } | ||
165 | |||
166 | static unsigned long dma_mem_alloc(unsigned long size) | ||
167 | { | ||
168 | return __get_dma_pages(GFP_KERNEL, get_order(size)); | ||
169 | } | ||
170 | |||
171 | |||
172 | static unsigned long vdma_mem_alloc(unsigned long size) | ||
173 | { | ||
174 | return (unsigned long) vmalloc(size); | ||
175 | |||
176 | } | ||
177 | |||
178 | #define nodma_mem_alloc(size) vdma_mem_alloc(size) | ||
179 | |||
180 | static void _fd_dma_mem_free(unsigned long addr, unsigned long size) | ||
181 | { | ||
182 | if((unsigned int) addr >= (unsigned int) high_memory) | ||
183 | return vfree((void *)addr); | ||
184 | else | ||
185 | free_pages(addr, get_order(size)); | ||
186 | } | ||
187 | |||
188 | #define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) | ||
189 | |||
190 | static void _fd_chose_dma_mode(char *addr, unsigned long size) | ||
191 | { | ||
192 | if(can_use_virtual_dma == 2) { | ||
193 | if((unsigned int) addr >= (unsigned int) high_memory || | ||
194 | virt_to_bus(addr) >= 0x1000000 || | ||
195 | _CROSS_64KB(addr, size, 0)) | ||
196 | use_virtual_dma = 1; | ||
197 | else | ||
198 | use_virtual_dma = 0; | ||
199 | } else { | ||
200 | use_virtual_dma = can_use_virtual_dma & 1; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | #define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) | ||
205 | |||
206 | |||
207 | static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) | ||
208 | { | ||
209 | doing_pdma = 1; | ||
210 | virtual_dma_port = io; | ||
211 | virtual_dma_mode = (mode == DMA_MODE_WRITE); | ||
212 | virtual_dma_addr = addr; | ||
213 | virtual_dma_count = size; | ||
214 | virtual_dma_residue = 0; | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) | ||
219 | { | ||
220 | #ifdef FLOPPY_SANITY_CHECK | ||
221 | if (CROSS_64KB(addr, size)) { | ||
222 | printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size); | ||
223 | return -1; | ||
224 | } | ||
225 | #endif | ||
226 | /* actual, physical DMA */ | ||
227 | doing_pdma = 0; | ||
228 | clear_dma_ff(FLOPPY_DMA); | ||
229 | set_dma_mode(FLOPPY_DMA,mode); | ||
230 | set_dma_addr(FLOPPY_DMA,virt_to_bus(addr)); | ||
231 | set_dma_count(FLOPPY_DMA,size); | ||
232 | enable_dma(FLOPPY_DMA); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static struct fd_routine_l { | ||
237 | int (*_request_dma)(unsigned int dmanr, const char * device_id); | ||
238 | void (*_free_dma)(unsigned int dmanr); | ||
239 | int (*_get_dma_residue)(unsigned int dummy); | ||
240 | unsigned long (*_dma_mem_alloc) (unsigned long size); | ||
241 | int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); | ||
242 | } fd_routine[] = { | ||
243 | { | ||
244 | request_dma, | ||
245 | free_dma, | ||
246 | get_dma_residue, | ||
247 | dma_mem_alloc, | ||
248 | hard_dma_setup | ||
249 | }, | ||
250 | { | ||
251 | vdma_request_dma, | ||
252 | vdma_nop, | ||
253 | vdma_get_dma_residue, | ||
254 | vdma_mem_alloc, | ||
255 | vdma_dma_setup | ||
256 | } | ||
257 | }; | ||
258 | |||
259 | |||
260 | static int FDC1 = 0x3f0; /* Lies. Floppy controller is memory mapped, not io mapped */ | ||
261 | static int FDC2 = -1; | ||
262 | |||
263 | #define FLOPPY0_TYPE 0 | ||
264 | #define FLOPPY1_TYPE 0 | ||
265 | |||
266 | #define N_FDC 1 | ||
267 | #define N_DRIVE 8 | ||
268 | |||
269 | #define EXTRA_FLOPPY_PARAMS | ||
270 | |||
271 | #endif /* __ASM_PARISC_FLOPPY_H */ | ||
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h new file mode 100644 index 000000000000..0c705c3a55ef --- /dev/null +++ b/arch/parisc/include/asm/futex.h | |||
@@ -0,0 +1,77 @@ | |||
1 | #ifndef _ASM_PARISC_FUTEX_H | ||
2 | #define _ASM_PARISC_FUTEX_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/futex.h> | ||
7 | #include <linux/uaccess.h> | ||
8 | #include <asm/errno.h> | ||
9 | |||
10 | static inline int | ||
11 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
12 | { | ||
13 | int op = (encoded_op >> 28) & 7; | ||
14 | int cmp = (encoded_op >> 24) & 15; | ||
15 | int oparg = (encoded_op << 8) >> 20; | ||
16 | int cmparg = (encoded_op << 20) >> 20; | ||
17 | int oldval = 0, ret; | ||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
19 | oparg = 1 << oparg; | ||
20 | |||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
22 | return -EFAULT; | ||
23 | |||
24 | pagefault_disable(); | ||
25 | |||
26 | switch (op) { | ||
27 | case FUTEX_OP_SET: | ||
28 | case FUTEX_OP_ADD: | ||
29 | case FUTEX_OP_OR: | ||
30 | case FUTEX_OP_ANDN: | ||
31 | case FUTEX_OP_XOR: | ||
32 | default: | ||
33 | ret = -ENOSYS; | ||
34 | } | ||
35 | |||
36 | pagefault_enable(); | ||
37 | |||
38 | if (!ret) { | ||
39 | switch (cmp) { | ||
40 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
41 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
42 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
43 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
44 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
45 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
46 | default: ret = -ENOSYS; | ||
47 | } | ||
48 | } | ||
49 | return ret; | ||
50 | } | ||
51 | |||
52 | /* Non-atomic version */ | ||
53 | static inline int | ||
54 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
55 | { | ||
56 | int err = 0; | ||
57 | int uval; | ||
58 | |||
59 | /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is | ||
60 | * our gateway page, and causes no end of trouble... | ||
61 | */ | ||
62 | if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) | ||
63 | return -EFAULT; | ||
64 | |||
65 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
66 | return -EFAULT; | ||
67 | |||
68 | err = get_user(uval, uaddr); | ||
69 | if (err) return -EFAULT; | ||
70 | if (uval == oldval) | ||
71 | err = put_user(newval, uaddr); | ||
72 | if (err) return -EFAULT; | ||
73 | return uval; | ||
74 | } | ||
75 | |||
76 | #endif /*__KERNEL__*/ | ||
77 | #endif /*_ASM_PARISC_FUTEX_H*/ | ||
diff --git a/arch/parisc/include/asm/grfioctl.h b/arch/parisc/include/asm/grfioctl.h new file mode 100644 index 000000000000..671e06042b40 --- /dev/null +++ b/arch/parisc/include/asm/grfioctl.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /* Architecture specific parts of HP's STI (framebuffer) driver. | ||
2 | * Structures are HP-UX compatible for XFree86 usage. | ||
3 | * | ||
4 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) | ||
5 | * Copyright (C) 2001 Helge Deller (deller a parisc-linux org) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef __ASM_PARISC_GRFIOCTL_H | ||
23 | #define __ASM_PARISC_GRFIOCTL_H | ||
24 | |||
25 | /* upper 32 bits of graphics id (HP/UX identifier) */ | ||
26 | |||
27 | #define GRFGATOR 8 | ||
28 | #define S9000_ID_S300 9 | ||
29 | #define GRFBOBCAT 9 | ||
30 | #define GRFCATSEYE 9 | ||
31 | #define S9000_ID_98720 10 | ||
32 | #define GRFRBOX 10 | ||
33 | #define S9000_ID_98550 11 | ||
34 | #define GRFFIREEYE 11 | ||
35 | #define S9000_ID_A1096A 12 | ||
36 | #define GRFHYPERION 12 | ||
37 | #define S9000_ID_FRI 13 | ||
38 | #define S9000_ID_98730 14 | ||
39 | #define GRFDAVINCI 14 | ||
40 | #define S9000_ID_98705 0x26C08070 /* Tigershark */ | ||
41 | #define S9000_ID_98736 0x26D148AB | ||
42 | #define S9000_ID_A1659A 0x26D1482A /* CRX 8 plane color (=ELK) */ | ||
43 | #define S9000_ID_ELK S9000_ID_A1659A | ||
44 | #define S9000_ID_A1439A 0x26D148EE /* CRX24 = CRX+ (24-plane color) */ | ||
45 | #define S9000_ID_A1924A 0x26D1488C /* GRX gray-scale */ | ||
46 | #define S9000_ID_ELM S9000_ID_A1924A | ||
47 | #define S9000_ID_98765 0x27480DEF | ||
48 | #define S9000_ID_ELK_768 0x27482101 | ||
49 | #define S9000_ID_STINGER 0x27A4A402 | ||
50 | #define S9000_ID_TIMBER 0x27F12392 /* Bushmaster (710) Graphics */ | ||
51 | #define S9000_ID_TOMCAT 0x27FCCB6D /* dual-headed ELK (Dual CRX) */ | ||
52 | #define S9000_ID_ARTIST 0x2B4DED6D /* Artist (Gecko/712 & 715) onboard Graphics */ | ||
53 | #define S9000_ID_HCRX 0x2BCB015A /* Hyperdrive/Hyperbowl (A4071A) Graphics */ | ||
54 | #define CRX24_OVERLAY_PLANES 0x920825AA /* Overlay planes on CRX24 */ | ||
55 | |||
56 | #define CRT_ID_ELK_1024 S9000_ID_ELK_768 /* Elk 1024x768 CRX */ | ||
57 | #define CRT_ID_ELK_1280 S9000_ID_A1659A /* Elk 1280x1024 CRX */ | ||
58 | #define CRT_ID_ELK_1024DB 0x27849CA5 /* Elk 1024x768 double buffer */ | ||
59 | #define CRT_ID_ELK_GS S9000_ID_A1924A /* Elk 1280x1024 GreyScale */ | ||
60 | #define CRT_ID_CRX24 S9000_ID_A1439A /* Piranha */ | ||
61 | #define CRT_ID_VISUALIZE_EG 0x2D08C0A7 /* Graffiti, A4450A (built-in B132+/B160L) */ | ||
62 | #define CRT_ID_THUNDER 0x2F23E5FC /* Thunder 1 VISUALIZE 48*/ | ||
63 | #define CRT_ID_THUNDER2 0x2F8D570E /* Thunder 2 VISUALIZE 48 XP*/ | ||
64 | #define CRT_ID_HCRX S9000_ID_HCRX /* Hyperdrive HCRX */ | ||
65 | #define CRT_ID_CRX48Z S9000_ID_STINGER /* Stinger */ | ||
66 | #define CRT_ID_DUAL_CRX S9000_ID_TOMCAT /* Tomcat */ | ||
67 | #define CRT_ID_PVRX S9000_ID_98705 /* Tigershark */ | ||
68 | #define CRT_ID_TIMBER S9000_ID_TIMBER /* Timber (710 builtin) */ | ||
69 | #define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */ | ||
70 | #define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */ | ||
71 | #define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */ | ||
72 | #define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */ | ||
73 | #define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */ | ||
74 | |||
75 | /* structure for ioctl(GCDESCRIBE) */ | ||
76 | |||
77 | #define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */ | ||
78 | |||
79 | struct grf_fbinfo { | ||
80 | unsigned int id; /* upper 32 bits of graphics id */ | ||
81 | unsigned int mapsize; /* mapped size of framebuffer */ | ||
82 | unsigned int dwidth, dlength;/* x and y sizes */ | ||
83 | unsigned int width, length; /* total x and total y size */ | ||
84 | unsigned int xlen; /* x pitch size */ | ||
85 | unsigned int bpp, bppu; /* bits per pixel and used bpp */ | ||
86 | unsigned int npl, nplbytes; /* # of planes and bytes per plane */ | ||
87 | char name[32]; /* name of the device (from ROM) */ | ||
88 | unsigned int attr; /* attributes */ | ||
89 | gaddr_t fbbase, regbase;/* framebuffer and register base addr */ | ||
90 | gaddr_t regions[6]; /* region bases */ | ||
91 | }; | ||
92 | |||
93 | #define GCID _IOR('G', 0, int) | ||
94 | #define GCON _IO('G', 1) | ||
95 | #define GCOFF _IO('G', 2) | ||
96 | #define GCAON _IO('G', 3) | ||
97 | #define GCAOFF _IO('G', 4) | ||
98 | #define GCMAP _IOWR('G', 5, int) | ||
99 | #define GCUNMAP _IOWR('G', 6, int) | ||
100 | #define GCMAP_HPUX _IO('G', 5) | ||
101 | #define GCUNMAP_HPUX _IO('G', 6) | ||
102 | #define GCLOCK _IO('G', 7) | ||
103 | #define GCUNLOCK _IO('G', 8) | ||
104 | #define GCLOCK_MINIMUM _IO('G', 9) | ||
105 | #define GCUNLOCK_MINIMUM _IO('G', 10) | ||
106 | #define GCSTATIC_CMAP _IO('G', 11) | ||
107 | #define GCVARIABLE_CMAP _IO('G', 12) | ||
108 | #define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */ | ||
109 | #define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo) | ||
110 | #define GCFASTLOCK _IO('G', 26) | ||
111 | |||
112 | #endif /* __ASM_PARISC_GRFIOCTL_H */ | ||
113 | |||
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h new file mode 100644 index 000000000000..ce93133d5112 --- /dev/null +++ b/arch/parisc/include/asm/hardirq.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* hardirq.h: PA-RISC hard IRQ support. | ||
2 | * | ||
3 | * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> | ||
4 | * | ||
5 | * The locking is really quite interesting. There's a cpu-local | ||
6 | * count of how many interrupts are being handled, and a global | ||
7 | * lock. An interrupt can only be serviced if the global lock | ||
8 | * is free. You can't be sure no more interrupts are being | ||
9 | * serviced until you've acquired the lock and then checked | ||
10 | * all the per-cpu interrupt counts are all zero. It's a specialised | ||
11 | * br_lock, and that's exactly how Sparc does it. We don't because | ||
12 | * it's more locking for us. This way is lock-free in the interrupt path. | ||
13 | */ | ||
14 | |||
15 | #ifndef _PARISC_HARDIRQ_H | ||
16 | #define _PARISC_HARDIRQ_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/irq.h> | ||
20 | |||
21 | typedef struct { | ||
22 | unsigned long __softirq_pending; /* set_bit is used on this */ | ||
23 | } ____cacheline_aligned irq_cpustat_t; | ||
24 | |||
25 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
26 | |||
27 | void ack_bad_irq(unsigned int irq); | ||
28 | |||
29 | #endif /* _PARISC_HARDIRQ_H */ | ||
diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h new file mode 100644 index 000000000000..4e9626836bab --- /dev/null +++ b/arch/parisc/include/asm/hardware.h | |||
@@ -0,0 +1,127 @@ | |||
1 | #ifndef _PARISC_HARDWARE_H | ||
2 | #define _PARISC_HARDWARE_H | ||
3 | |||
4 | #include <linux/mod_devicetable.h> | ||
5 | #include <asm/pdc.h> | ||
6 | |||
7 | #define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID | ||
8 | #define HVERSION_ANY_ID PA_HVERSION_ANY_ID | ||
9 | #define HVERSION_REV_ANY_ID PA_HVERSION_REV_ANY_ID | ||
10 | #define SVERSION_ANY_ID PA_SVERSION_ANY_ID | ||
11 | |||
12 | struct hp_hardware { | ||
13 | unsigned short hw_type:5; /* HPHW_xxx */ | ||
14 | unsigned short hversion; | ||
15 | unsigned long sversion:28; | ||
16 | unsigned short opt; | ||
17 | const char name[80]; /* The hardware description */ | ||
18 | }; | ||
19 | |||
20 | struct parisc_device; | ||
21 | |||
22 | enum cpu_type { | ||
23 | pcx = 0, /* pa7000 pa 1.0 */ | ||
24 | pcxs = 1, /* pa7000 pa 1.1a */ | ||
25 | pcxt = 2, /* pa7100 pa 1.1b */ | ||
26 | pcxt_ = 3, /* pa7200 (t') pa 1.1c */ | ||
27 | pcxl = 4, /* pa7100lc pa 1.1d */ | ||
28 | pcxl2 = 5, /* pa7300lc pa 1.1e */ | ||
29 | pcxu = 6, /* pa8000 pa 2.0 */ | ||
30 | pcxu_ = 7, /* pa8200 (u+) pa 2.0 */ | ||
31 | pcxw = 8, /* pa8500 pa 2.0 */ | ||
32 | pcxw_ = 9, /* pa8600 (w+) pa 2.0 */ | ||
33 | pcxw2 = 10, /* pa8700 pa 2.0 */ | ||
34 | mako = 11, /* pa8800 pa 2.0 */ | ||
35 | mako2 = 12 /* pa8900 pa 2.0 */ | ||
36 | }; | ||
37 | |||
38 | extern const char * const cpu_name_version[][2]; /* mapping from enum cpu_type to strings */ | ||
39 | |||
40 | struct parisc_driver; | ||
41 | |||
42 | struct io_module { | ||
43 | volatile uint32_t nothing; /* reg 0 */ | ||
44 | volatile uint32_t io_eim; | ||
45 | volatile uint32_t io_dc_adata; | ||
46 | volatile uint32_t io_ii_cdata; | ||
47 | volatile uint32_t io_dma_link; /* reg 4 */ | ||
48 | volatile uint32_t io_dma_command; | ||
49 | volatile uint32_t io_dma_address; | ||
50 | volatile uint32_t io_dma_count; | ||
51 | volatile uint32_t io_flex; /* reg 8 */ | ||
52 | volatile uint32_t io_spa_address; | ||
53 | volatile uint32_t reserved1[2]; | ||
54 | volatile uint32_t io_command; /* reg 12 */ | ||
55 | volatile uint32_t io_status; | ||
56 | volatile uint32_t io_control; | ||
57 | volatile uint32_t io_data; | ||
58 | volatile uint32_t reserved2; /* reg 16 */ | ||
59 | volatile uint32_t chain_addr; | ||
60 | volatile uint32_t sub_mask_clr; | ||
61 | volatile uint32_t reserved3[13]; | ||
62 | volatile uint32_t undefined[480]; | ||
63 | volatile uint32_t unpriv[512]; | ||
64 | }; | ||
65 | |||
66 | struct bc_module { | ||
67 | volatile uint32_t unused1[12]; | ||
68 | volatile uint32_t io_command; | ||
69 | volatile uint32_t io_status; | ||
70 | volatile uint32_t io_control; | ||
71 | volatile uint32_t unused2[1]; | ||
72 | volatile uint32_t io_err_resp; | ||
73 | volatile uint32_t io_err_info; | ||
74 | volatile uint32_t io_err_req; | ||
75 | volatile uint32_t unused3[11]; | ||
76 | volatile uint32_t io_io_low; | ||
77 | volatile uint32_t io_io_high; | ||
78 | }; | ||
79 | |||
80 | #define HPHW_NPROC 0 | ||
81 | #define HPHW_MEMORY 1 | ||
82 | #define HPHW_B_DMA 2 | ||
83 | #define HPHW_OBSOLETE 3 | ||
84 | #define HPHW_A_DMA 4 | ||
85 | #define HPHW_A_DIRECT 5 | ||
86 | #define HPHW_OTHER 6 | ||
87 | #define HPHW_BCPORT 7 | ||
88 | #define HPHW_CIO 8 | ||
89 | #define HPHW_CONSOLE 9 | ||
90 | #define HPHW_FIO 10 | ||
91 | #define HPHW_BA 11 | ||
92 | #define HPHW_IOA 12 | ||
93 | #define HPHW_BRIDGE 13 | ||
94 | #define HPHW_FABRIC 14 | ||
95 | #define HPHW_MC 15 | ||
96 | #define HPHW_FAULTY 31 | ||
97 | |||
98 | |||
99 | /* hardware.c: */ | ||
100 | extern const char *parisc_hardware_description(struct parisc_device_id *id); | ||
101 | extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); | ||
102 | |||
103 | struct pci_dev; | ||
104 | |||
105 | /* drivers.c: */ | ||
106 | extern struct parisc_device *alloc_pa_dev(unsigned long hpa, | ||
107 | struct hardware_path *path); | ||
108 | extern int register_parisc_device(struct parisc_device *dev); | ||
109 | extern int register_parisc_driver(struct parisc_driver *driver); | ||
110 | extern int count_parisc_driver(struct parisc_driver *driver); | ||
111 | extern int unregister_parisc_driver(struct parisc_driver *driver); | ||
112 | extern void walk_central_bus(void); | ||
113 | extern const struct parisc_device *find_pa_parent_type(const struct parisc_device *, int); | ||
114 | extern void print_parisc_devices(void); | ||
115 | extern char *print_pa_hwpath(struct parisc_device *dev, char *path); | ||
116 | extern char *print_pci_hwpath(struct pci_dev *dev, char *path); | ||
117 | extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path); | ||
118 | extern void init_parisc_bus(void); | ||
119 | extern struct device *hwpath_to_device(struct hardware_path *modpath); | ||
120 | extern void device_to_hwpath(struct device *dev, struct hardware_path *path); | ||
121 | |||
122 | |||
123 | /* inventory.c: */ | ||
124 | extern void do_memory_inventory(void); | ||
125 | extern void do_device_inventory(void); | ||
126 | |||
127 | #endif /* _PARISC_HARDWARE_H */ | ||
diff --git a/arch/parisc/include/asm/hw_irq.h b/arch/parisc/include/asm/hw_irq.h new file mode 100644 index 000000000000..6707f7df3921 --- /dev/null +++ b/arch/parisc/include/asm/hw_irq.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _ASM_HW_IRQ_H | ||
2 | #define _ASM_HW_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm/hw_irq.h | ||
6 | */ | ||
7 | |||
8 | #endif | ||
diff --git a/arch/parisc/include/asm/ide.h b/arch/parisc/include/asm/ide.h new file mode 100644 index 000000000000..81700a2321cf --- /dev/null +++ b/arch/parisc/include/asm/ide.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * linux/include/asm-parisc/ide.h | ||
3 | * | ||
4 | * Copyright (C) 1994-1996 Linus Torvalds & authors | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This file contains the PARISC architecture specific IDE code. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_PARISC_IDE_H | ||
12 | #define __ASM_PARISC_IDE_H | ||
13 | |||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | /* Generic I/O and MEMIO string operations. */ | ||
17 | |||
18 | #define __ide_insw insw | ||
19 | #define __ide_insl insl | ||
20 | #define __ide_outsw outsw | ||
21 | #define __ide_outsl outsl | ||
22 | |||
23 | static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) | ||
24 | { | ||
25 | while (count--) { | ||
26 | *(u16 *)addr = __raw_readw(port); | ||
27 | addr += 2; | ||
28 | } | ||
29 | } | ||
30 | |||
31 | static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) | ||
32 | { | ||
33 | while (count--) { | ||
34 | *(u32 *)addr = __raw_readl(port); | ||
35 | addr += 4; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) | ||
40 | { | ||
41 | while (count--) { | ||
42 | __raw_writew(*(u16 *)addr, port); | ||
43 | addr += 2; | ||
44 | } | ||
45 | } | ||
46 | |||
47 | static __inline__ void __ide_mm_outsl(void __iomem *port, void *addr, u32 count) | ||
48 | { | ||
49 | while (count--) { | ||
50 | __raw_writel(*(u32 *)addr, port); | ||
51 | addr += 4; | ||
52 | } | ||
53 | } | ||
54 | |||
55 | #endif /* __KERNEL__ */ | ||
56 | |||
57 | #endif /* __ASM_PARISC_IDE_H */ | ||
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h new file mode 100644 index 000000000000..55ddb1842107 --- /dev/null +++ b/arch/parisc/include/asm/io.h | |||
@@ -0,0 +1,293 @@ | |||
1 | #ifndef _ASM_IO_H | ||
2 | #define _ASM_IO_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/pgtable.h> | ||
6 | |||
7 | extern unsigned long parisc_vmerge_boundary; | ||
8 | extern unsigned long parisc_vmerge_max_size; | ||
9 | |||
10 | #define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary | ||
11 | #define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size | ||
12 | |||
13 | #define virt_to_phys(a) ((unsigned long)__pa(a)) | ||
14 | #define phys_to_virt(a) __va(a) | ||
15 | #define virt_to_bus virt_to_phys | ||
16 | #define bus_to_virt phys_to_virt | ||
17 | |||
18 | static inline unsigned long isa_bus_to_virt(unsigned long addr) { | ||
19 | BUG(); | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | static inline unsigned long isa_virt_to_bus(void *addr) { | ||
24 | BUG(); | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Memory mapped I/O | ||
30 | * | ||
31 | * readX()/writeX() do byteswapping and take an ioremapped address | ||
32 | * __raw_readX()/__raw_writeX() don't byteswap and take an ioremapped address. | ||
33 | * gsc_*() don't byteswap and operate on physical addresses; | ||
34 | * eg dev->hpa or 0xfee00000. | ||
35 | */ | ||
36 | |||
37 | static inline unsigned char gsc_readb(unsigned long addr) | ||
38 | { | ||
39 | long flags; | ||
40 | unsigned char ret; | ||
41 | |||
42 | __asm__ __volatile__( | ||
43 | " rsm 2,%0\n" | ||
44 | " ldbx 0(%2),%1\n" | ||
45 | " mtsm %0\n" | ||
46 | : "=&r" (flags), "=r" (ret) : "r" (addr) ); | ||
47 | |||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | static inline unsigned short gsc_readw(unsigned long addr) | ||
52 | { | ||
53 | long flags; | ||
54 | unsigned short ret; | ||
55 | |||
56 | __asm__ __volatile__( | ||
57 | " rsm 2,%0\n" | ||
58 | " ldhx 0(%2),%1\n" | ||
59 | " mtsm %0\n" | ||
60 | : "=&r" (flags), "=r" (ret) : "r" (addr) ); | ||
61 | |||
62 | return ret; | ||
63 | } | ||
64 | |||
65 | static inline unsigned int gsc_readl(unsigned long addr) | ||
66 | { | ||
67 | u32 ret; | ||
68 | |||
69 | __asm__ __volatile__( | ||
70 | " ldwax 0(%1),%0\n" | ||
71 | : "=r" (ret) : "r" (addr) ); | ||
72 | |||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | static inline unsigned long long gsc_readq(unsigned long addr) | ||
77 | { | ||
78 | unsigned long long ret; | ||
79 | |||
80 | #ifdef CONFIG_64BIT | ||
81 | __asm__ __volatile__( | ||
82 | " ldda 0(%1),%0\n" | ||
83 | : "=r" (ret) : "r" (addr) ); | ||
84 | #else | ||
85 | /* two reads may have side effects.. */ | ||
86 | ret = ((u64) gsc_readl(addr)) << 32; | ||
87 | ret |= gsc_readl(addr+4); | ||
88 | #endif | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static inline void gsc_writeb(unsigned char val, unsigned long addr) | ||
93 | { | ||
94 | long flags; | ||
95 | __asm__ __volatile__( | ||
96 | " rsm 2,%0\n" | ||
97 | " stbs %1,0(%2)\n" | ||
98 | " mtsm %0\n" | ||
99 | : "=&r" (flags) : "r" (val), "r" (addr) ); | ||
100 | } | ||
101 | |||
102 | static inline void gsc_writew(unsigned short val, unsigned long addr) | ||
103 | { | ||
104 | long flags; | ||
105 | __asm__ __volatile__( | ||
106 | " rsm 2,%0\n" | ||
107 | " sths %1,0(%2)\n" | ||
108 | " mtsm %0\n" | ||
109 | : "=&r" (flags) : "r" (val), "r" (addr) ); | ||
110 | } | ||
111 | |||
112 | static inline void gsc_writel(unsigned int val, unsigned long addr) | ||
113 | { | ||
114 | __asm__ __volatile__( | ||
115 | " stwas %0,0(%1)\n" | ||
116 | : : "r" (val), "r" (addr) ); | ||
117 | } | ||
118 | |||
119 | static inline void gsc_writeq(unsigned long long val, unsigned long addr) | ||
120 | { | ||
121 | #ifdef CONFIG_64BIT | ||
122 | __asm__ __volatile__( | ||
123 | " stda %0,0(%1)\n" | ||
124 | : : "r" (val), "r" (addr) ); | ||
125 | #else | ||
126 | /* two writes may have side effects.. */ | ||
127 | gsc_writel(val >> 32, addr); | ||
128 | gsc_writel(val, addr+4); | ||
129 | #endif | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * The standard PCI ioremap interfaces | ||
134 | */ | ||
135 | |||
136 | extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); | ||
137 | |||
138 | /* Most machines react poorly to I/O-space being cacheable... Instead let's | ||
139 | * define ioremap() in terms of ioremap_nocache(). | ||
140 | */ | ||
141 | static inline void __iomem * ioremap(unsigned long offset, unsigned long size) | ||
142 | { | ||
143 | return __ioremap(offset, size, _PAGE_NO_CACHE); | ||
144 | } | ||
145 | #define ioremap_nocache(off, sz) ioremap((off), (sz)) | ||
146 | |||
147 | extern void iounmap(const volatile void __iomem *addr); | ||
148 | |||
149 | static inline unsigned char __raw_readb(const volatile void __iomem *addr) | ||
150 | { | ||
151 | return (*(volatile unsigned char __force *) (addr)); | ||
152 | } | ||
153 | static inline unsigned short __raw_readw(const volatile void __iomem *addr) | ||
154 | { | ||
155 | return *(volatile unsigned short __force *) addr; | ||
156 | } | ||
157 | static inline unsigned int __raw_readl(const volatile void __iomem *addr) | ||
158 | { | ||
159 | return *(volatile unsigned int __force *) addr; | ||
160 | } | ||
161 | static inline unsigned long long __raw_readq(const volatile void __iomem *addr) | ||
162 | { | ||
163 | return *(volatile unsigned long long __force *) addr; | ||
164 | } | ||
165 | |||
166 | static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr) | ||
167 | { | ||
168 | *(volatile unsigned char __force *) addr = b; | ||
169 | } | ||
170 | static inline void __raw_writew(unsigned short b, volatile void __iomem *addr) | ||
171 | { | ||
172 | *(volatile unsigned short __force *) addr = b; | ||
173 | } | ||
174 | static inline void __raw_writel(unsigned int b, volatile void __iomem *addr) | ||
175 | { | ||
176 | *(volatile unsigned int __force *) addr = b; | ||
177 | } | ||
178 | static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr) | ||
179 | { | ||
180 | *(volatile unsigned long long __force *) addr = b; | ||
181 | } | ||
182 | |||
183 | /* readb can never be const, so use __fswab instead of le*_to_cpu */ | ||
184 | #define readb(addr) __raw_readb(addr) | ||
185 | #define readw(addr) __fswab16(__raw_readw(addr)) | ||
186 | #define readl(addr) __fswab32(__raw_readl(addr)) | ||
187 | #define readq(addr) __fswab64(__raw_readq(addr)) | ||
188 | #define writeb(b, addr) __raw_writeb(b, addr) | ||
189 | #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr) | ||
190 | #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr) | ||
191 | #define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr) | ||
192 | |||
193 | #define readb_relaxed(addr) readb(addr) | ||
194 | #define readw_relaxed(addr) readw(addr) | ||
195 | #define readl_relaxed(addr) readl(addr) | ||
196 | #define readq_relaxed(addr) readq(addr) | ||
197 | |||
198 | #define mmiowb() do { } while (0) | ||
199 | |||
200 | void memset_io(volatile void __iomem *addr, unsigned char val, int count); | ||
201 | void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); | ||
202 | void memcpy_toio(volatile void __iomem *dst, const void *src, int count); | ||
203 | |||
204 | /* Port-space IO */ | ||
205 | |||
206 | #define inb_p inb | ||
207 | #define inw_p inw | ||
208 | #define inl_p inl | ||
209 | #define outb_p outb | ||
210 | #define outw_p outw | ||
211 | #define outl_p outl | ||
212 | |||
213 | extern unsigned char eisa_in8(unsigned short port); | ||
214 | extern unsigned short eisa_in16(unsigned short port); | ||
215 | extern unsigned int eisa_in32(unsigned short port); | ||
216 | extern void eisa_out8(unsigned char data, unsigned short port); | ||
217 | extern void eisa_out16(unsigned short data, unsigned short port); | ||
218 | extern void eisa_out32(unsigned int data, unsigned short port); | ||
219 | |||
220 | #if defined(CONFIG_PCI) | ||
221 | extern unsigned char inb(int addr); | ||
222 | extern unsigned short inw(int addr); | ||
223 | extern unsigned int inl(int addr); | ||
224 | |||
225 | extern void outb(unsigned char b, int addr); | ||
226 | extern void outw(unsigned short b, int addr); | ||
227 | extern void outl(unsigned int b, int addr); | ||
228 | #elif defined(CONFIG_EISA) | ||
229 | #define inb eisa_in8 | ||
230 | #define inw eisa_in16 | ||
231 | #define inl eisa_in32 | ||
232 | #define outb eisa_out8 | ||
233 | #define outw eisa_out16 | ||
234 | #define outl eisa_out32 | ||
235 | #else | ||
236 | static inline char inb(unsigned long addr) | ||
237 | { | ||
238 | BUG(); | ||
239 | return -1; | ||
240 | } | ||
241 | |||
242 | static inline short inw(unsigned long addr) | ||
243 | { | ||
244 | BUG(); | ||
245 | return -1; | ||
246 | } | ||
247 | |||
248 | static inline int inl(unsigned long addr) | ||
249 | { | ||
250 | BUG(); | ||
251 | return -1; | ||
252 | } | ||
253 | |||
254 | #define outb(x, y) BUG() | ||
255 | #define outw(x, y) BUG() | ||
256 | #define outl(x, y) BUG() | ||
257 | #endif | ||
258 | |||
259 | /* | ||
260 | * String versions of in/out ops: | ||
261 | */ | ||
262 | extern void insb (unsigned long port, void *dst, unsigned long count); | ||
263 | extern void insw (unsigned long port, void *dst, unsigned long count); | ||
264 | extern void insl (unsigned long port, void *dst, unsigned long count); | ||
265 | extern void outsb (unsigned long port, const void *src, unsigned long count); | ||
266 | extern void outsw (unsigned long port, const void *src, unsigned long count); | ||
267 | extern void outsl (unsigned long port, const void *src, unsigned long count); | ||
268 | |||
269 | |||
270 | /* IO Port space is : BBiiii where BB is HBA number. */ | ||
271 | #define IO_SPACE_LIMIT 0x00ffffff | ||
272 | |||
273 | /* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32 | ||
274 | * bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit | ||
275 | * mode (essentially just sign extending. This macro takes in a 32 | ||
276 | * bit I/O address (still with the leading f) and outputs the correct | ||
277 | * value for either 32 or 64 bit mode */ | ||
278 | #define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL))) | ||
279 | |||
280 | #include <asm-generic/iomap.h> | ||
281 | |||
282 | /* | ||
283 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
284 | * access | ||
285 | */ | ||
286 | #define xlate_dev_mem_ptr(p) __va(p) | ||
287 | |||
288 | /* | ||
289 | * Convert a virtual cached pointer to an uncached pointer | ||
290 | */ | ||
291 | #define xlate_dev_kmem_ptr(p) p | ||
292 | |||
293 | #endif | ||
diff --git a/arch/parisc/include/asm/ioctl.h b/arch/parisc/include/asm/ioctl.h new file mode 100644 index 000000000000..ec8efa02beda --- /dev/null +++ b/arch/parisc/include/asm/ioctl.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) | ||
3 | * Copyright (C) 1999,2003 Matthew Wilcox < willy at debian . org > | ||
4 | * portions from "linux/ioctl.h for Linux" by H.H. Bergman. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | |||
22 | #ifndef _ASM_PARISC_IOCTL_H | ||
23 | #define _ASM_PARISC_IOCTL_H | ||
24 | |||
25 | /* ioctl command encoding: 32 bits total, command in lower 16 bits, | ||
26 | * size of the parameter structure in the lower 14 bits of the | ||
27 | * upper 16 bits. | ||
28 | * Encoding the size of the parameter structure in the ioctl request | ||
29 | * is useful for catching programs compiled with old versions | ||
30 | * and to avoid overwriting user space outside the user buffer area. | ||
31 | * The highest 2 bits are reserved for indicating the ``access mode''. | ||
32 | * NOTE: This limits the max parameter size to 16kB -1 ! | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * Direction bits. | ||
37 | */ | ||
38 | #define _IOC_NONE 0U | ||
39 | #define _IOC_WRITE 2U | ||
40 | #define _IOC_READ 1U | ||
41 | |||
42 | #include <asm-generic/ioctl.h> | ||
43 | |||
44 | #endif /* _ASM_PARISC_IOCTL_H */ | ||
diff --git a/arch/parisc/include/asm/ioctls.h b/arch/parisc/include/asm/ioctls.h new file mode 100644 index 000000000000..6747fad07a3e --- /dev/null +++ b/arch/parisc/include/asm/ioctls.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef __ARCH_PARISC_IOCTLS_H__ | ||
2 | #define __ARCH_PARISC_IOCTLS_H__ | ||
3 | |||
4 | #include <asm/ioctl.h> | ||
5 | |||
6 | /* 0x54 is just a magic number to make these relatively unique ('T') */ | ||
7 | |||
8 | #define TCGETS _IOR('T', 16, struct termios) /* TCGETATTR */ | ||
9 | #define TCSETS _IOW('T', 17, struct termios) /* TCSETATTR */ | ||
10 | #define TCSETSW _IOW('T', 18, struct termios) /* TCSETATTRD */ | ||
11 | #define TCSETSF _IOW('T', 19, struct termios) /* TCSETATTRF */ | ||
12 | #define TCGETA _IOR('T', 1, struct termio) | ||
13 | #define TCSETA _IOW('T', 2, struct termio) | ||
14 | #define TCSETAW _IOW('T', 3, struct termio) | ||
15 | #define TCSETAF _IOW('T', 4, struct termio) | ||
16 | #define TCSBRK _IO('T', 5) | ||
17 | #define TCXONC _IO('T', 6) | ||
18 | #define TCFLSH _IO('T', 7) | ||
19 | #define TIOCEXCL 0x540C | ||
20 | #define TIOCNXCL 0x540D | ||
21 | #define TIOCSCTTY 0x540E | ||
22 | #define TIOCGPGRP _IOR('T', 30, int) | ||
23 | #define TIOCSPGRP _IOW('T', 29, int) | ||
24 | #define TIOCOUTQ 0x5411 | ||
25 | #define TIOCSTI 0x5412 | ||
26 | #define TIOCGWINSZ 0x5413 | ||
27 | #define TIOCSWINSZ 0x5414 | ||
28 | #define TIOCMGET 0x5415 | ||
29 | #define TIOCMBIS 0x5416 | ||
30 | #define TIOCMBIC 0x5417 | ||
31 | #define TIOCMSET 0x5418 | ||
32 | #define TIOCGSOFTCAR 0x5419 | ||
33 | #define TIOCSSOFTCAR 0x541A | ||
34 | #define FIONREAD 0x541B | ||
35 | #define TIOCINQ FIONREAD | ||
36 | #define TIOCLINUX 0x541C | ||
37 | #define TIOCCONS 0x541D | ||
38 | #define TIOCGSERIAL 0x541E | ||
39 | #define TIOCSSERIAL 0x541F | ||
40 | #define TIOCPKT 0x5420 | ||
41 | #define FIONBIO 0x5421 | ||
42 | #define TIOCNOTTY 0x5422 | ||
43 | #define TIOCSETD 0x5423 | ||
44 | #define TIOCGETD 0x5424 | ||
45 | #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ | ||
46 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | ||
47 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | ||
48 | #define TIOCGSID _IOR('T', 20, int) /* Return the session ID of FD */ | ||
49 | #define TCGETS2 _IOR('T',0x2A, struct termios2) | ||
50 | #define TCSETS2 _IOW('T',0x2B, struct termios2) | ||
51 | #define TCSETSW2 _IOW('T',0x2C, struct termios2) | ||
52 | #define TCSETSF2 _IOW('T',0x2D, struct termios2) | ||
53 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | ||
54 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | ||
55 | |||
56 | #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ | ||
57 | #define FIOCLEX 0x5451 | ||
58 | #define FIOASYNC 0x5452 | ||
59 | #define TIOCSERCONFIG 0x5453 | ||
60 | #define TIOCSERGWILD 0x5454 | ||
61 | #define TIOCSERSWILD 0x5455 | ||
62 | #define TIOCGLCKTRMIOS 0x5456 | ||
63 | #define TIOCSLCKTRMIOS 0x5457 | ||
64 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
65 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
66 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
67 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
68 | |||
69 | #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ | ||
70 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | ||
71 | #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ | ||
72 | #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ | ||
73 | #define FIOQSIZE 0x5460 /* Get exact space used by quota */ | ||
74 | |||
75 | #define TIOCSTART 0x5461 | ||
76 | #define TIOCSTOP 0x5462 | ||
77 | #define TIOCSLTC 0x5462 | ||
78 | |||
79 | /* Used for packet mode */ | ||
80 | #define TIOCPKT_DATA 0 | ||
81 | #define TIOCPKT_FLUSHREAD 1 | ||
82 | #define TIOCPKT_FLUSHWRITE 2 | ||
83 | #define TIOCPKT_STOP 4 | ||
84 | #define TIOCPKT_START 8 | ||
85 | #define TIOCPKT_NOSTOP 16 | ||
86 | #define TIOCPKT_DOSTOP 32 | ||
87 | |||
88 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
89 | |||
90 | #endif /* _ASM_PARISC_IOCTLS_H */ | ||
diff --git a/arch/parisc/include/asm/ipcbuf.h b/arch/parisc/include/asm/ipcbuf.h new file mode 100644 index 000000000000..bd956c425785 --- /dev/null +++ b/arch/parisc/include/asm/ipcbuf.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef __PARISC_IPCBUF_H__ | ||
2 | #define __PARISC_IPCBUF_H__ | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for PA-RISC is almost identical to | ||
6 | * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the kernel. | ||
7 | * 'seq' has been changed from long to int so that it's the same size | ||
8 | * on 64-bit kernels as on 32-bit ones. | ||
9 | */ | ||
10 | |||
11 | struct ipc64_perm | ||
12 | { | ||
13 | key_t key; | ||
14 | uid_t uid; | ||
15 | gid_t gid; | ||
16 | uid_t cuid; | ||
17 | gid_t cgid; | ||
18 | unsigned short int __pad1; | ||
19 | mode_t mode; | ||
20 | unsigned short int __pad2; | ||
21 | unsigned short int seq; | ||
22 | unsigned int __pad3; | ||
23 | unsigned long long int __unused1; | ||
24 | unsigned long long int __unused2; | ||
25 | }; | ||
26 | |||
27 | #endif /* __PARISC_IPCBUF_H__ */ | ||
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h new file mode 100644 index 000000000000..399c81981ed5 --- /dev/null +++ b/arch/parisc/include/asm/irq.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/irq.h | ||
3 | * | ||
4 | * Copyright 2005 Matthew Wilcox <matthew@wil.cx> | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_PARISC_IRQ_H | ||
8 | #define _ASM_PARISC_IRQ_H | ||
9 | |||
10 | #include <linux/cpumask.h> | ||
11 | #include <asm/types.h> | ||
12 | |||
13 | #define NO_IRQ (-1) | ||
14 | |||
15 | #ifdef CONFIG_GSC | ||
16 | #define GSC_IRQ_BASE 16 | ||
17 | #define GSC_IRQ_MAX 63 | ||
18 | #define CPU_IRQ_BASE 64 | ||
19 | #else | ||
20 | #define CPU_IRQ_BASE 16 | ||
21 | #endif | ||
22 | |||
23 | #define TIMER_IRQ (CPU_IRQ_BASE + 0) | ||
24 | #define IPI_IRQ (CPU_IRQ_BASE + 1) | ||
25 | #define CPU_IRQ_MAX (CPU_IRQ_BASE + (BITS_PER_LONG - 1)) | ||
26 | |||
27 | #define NR_IRQS (CPU_IRQ_MAX + 1) | ||
28 | |||
29 | static __inline__ int irq_canonicalize(int irq) | ||
30 | { | ||
31 | return (irq == 2) ? 9 : irq; | ||
32 | } | ||
33 | |||
34 | struct irq_chip; | ||
35 | |||
36 | /* | ||
37 | * Some useful "we don't have to do anything here" handlers. Should | ||
38 | * probably be provided by the generic code. | ||
39 | */ | ||
40 | void no_ack_irq(unsigned int irq); | ||
41 | void no_end_irq(unsigned int irq); | ||
42 | void cpu_ack_irq(unsigned int irq); | ||
43 | void cpu_end_irq(unsigned int irq); | ||
44 | |||
45 | extern int txn_alloc_irq(unsigned int nbits); | ||
46 | extern int txn_claim_irq(int); | ||
47 | extern unsigned int txn_alloc_data(unsigned int); | ||
48 | extern unsigned long txn_alloc_addr(unsigned int); | ||
49 | extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); | ||
50 | |||
51 | extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *); | ||
52 | extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest); | ||
53 | |||
54 | /* soft power switch support (power.c) */ | ||
55 | extern struct tasklet_struct power_tasklet; | ||
56 | |||
57 | #endif /* _ASM_PARISC_IRQ_H */ | ||
diff --git a/arch/parisc/include/asm/irq_regs.h b/arch/parisc/include/asm/irq_regs.h new file mode 100644 index 000000000000..3dd9c0b70270 --- /dev/null +++ b/arch/parisc/include/asm/irq_regs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/irq_regs.h> | |||
diff --git a/arch/parisc/include/asm/kdebug.h b/arch/parisc/include/asm/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/arch/parisc/include/asm/kdebug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/kdebug.h> | |||
diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h new file mode 100644 index 000000000000..806aae3c5338 --- /dev/null +++ b/arch/parisc/include/asm/kmap_types.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef _ASM_KMAP_TYPES_H | ||
2 | #define _ASM_KMAP_TYPES_H | ||
3 | |||
4 | |||
5 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
6 | # define D(n) __KM_FENCE_##n , | ||
7 | #else | ||
8 | # define D(n) | ||
9 | #endif | ||
10 | |||
11 | enum km_type { | ||
12 | D(0) KM_BOUNCE_READ, | ||
13 | D(1) KM_SKB_SUNRPC_DATA, | ||
14 | D(2) KM_SKB_DATA_SOFTIRQ, | ||
15 | D(3) KM_USER0, | ||
16 | D(4) KM_USER1, | ||
17 | D(5) KM_BIO_SRC_IRQ, | ||
18 | D(6) KM_BIO_DST_IRQ, | ||
19 | D(7) KM_PTE0, | ||
20 | D(8) KM_PTE1, | ||
21 | D(9) KM_IRQ0, | ||
22 | D(10) KM_IRQ1, | ||
23 | D(11) KM_SOFTIRQ0, | ||
24 | D(12) KM_SOFTIRQ1, | ||
25 | D(13) KM_TYPE_NR | ||
26 | }; | ||
27 | |||
28 | #undef D | ||
29 | |||
30 | #endif | ||
diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h new file mode 100644 index 000000000000..c3405ab9d60a --- /dev/null +++ b/arch/parisc/include/asm/led.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef LED_H | ||
2 | #define LED_H | ||
3 | |||
4 | #define LED7 0x80 /* top (or furthest right) LED */ | ||
5 | #define LED6 0x40 | ||
6 | #define LED5 0x20 | ||
7 | #define LED4 0x10 | ||
8 | #define LED3 0x08 | ||
9 | #define LED2 0x04 | ||
10 | #define LED1 0x02 | ||
11 | #define LED0 0x01 /* bottom (or furthest left) LED */ | ||
12 | |||
13 | #define LED_LAN_TX LED0 /* for LAN transmit activity */ | ||
14 | #define LED_LAN_RCV LED1 /* for LAN receive activity */ | ||
15 | #define LED_DISK_IO LED2 /* for disk activity */ | ||
16 | #define LED_HEARTBEAT LED3 /* heartbeat */ | ||
17 | |||
18 | /* values for pdc_chassis_lcd_info_ret_block.model: */ | ||
19 | #define DISPLAY_MODEL_LCD 0 /* KittyHawk LED or LCD */ | ||
20 | #define DISPLAY_MODEL_NONE 1 /* no LED or LCD */ | ||
21 | #define DISPLAY_MODEL_LASI 2 /* LASI style 8 bit LED */ | ||
22 | #define DISPLAY_MODEL_OLD_ASP 0x7F /* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */ | ||
23 | |||
24 | #define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */ | ||
25 | |||
26 | /* register_led_driver() */ | ||
27 | int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg); | ||
28 | |||
29 | /* registers the LED regions for procfs */ | ||
30 | void __init register_led_regions(void); | ||
31 | |||
32 | #ifdef CONFIG_CHASSIS_LCD_LED | ||
33 | /* writes a string to the LCD display (if possible on this h/w) */ | ||
34 | int lcd_print(const char *str); | ||
35 | #else | ||
36 | #define lcd_print(str) | ||
37 | #endif | ||
38 | |||
39 | /* main LED initialization function (uses PDC) */ | ||
40 | int __init led_init(void); | ||
41 | |||
42 | #endif /* LED_H */ | ||
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h new file mode 100644 index 000000000000..0b19a7242d0c --- /dev/null +++ b/arch/parisc/include/asm/linkage.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef __ASM_PARISC_LINKAGE_H | ||
2 | #define __ASM_PARISC_LINKAGE_H | ||
3 | |||
4 | #ifndef __ALIGN | ||
5 | #define __ALIGN .align 4 | ||
6 | #define __ALIGN_STR ".align 4" | ||
7 | #endif | ||
8 | |||
9 | /* | ||
10 | * In parisc assembly a semicolon marks a comment while a | ||
11 | * exclamation mark is used to separate independent lines. | ||
12 | */ | ||
13 | #ifdef __ASSEMBLY__ | ||
14 | |||
15 | #define ENTRY(name) \ | ||
16 | .export name !\ | ||
17 | ALIGN !\ | ||
18 | name: | ||
19 | |||
20 | #ifdef CONFIG_64BIT | ||
21 | #define ENDPROC(name) \ | ||
22 | END(name) | ||
23 | #else | ||
24 | #define ENDPROC(name) \ | ||
25 | .type name, @function !\ | ||
26 | END(name) | ||
27 | #endif | ||
28 | |||
29 | #endif /* __ASSEMBLY__ */ | ||
30 | |||
31 | #endif /* __ASM_PARISC_LINKAGE_H */ | ||
diff --git a/arch/parisc/include/asm/local.h b/arch/parisc/include/asm/local.h new file mode 100644 index 000000000000..c11c530f74d0 --- /dev/null +++ b/arch/parisc/include/asm/local.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local.h> | |||
diff --git a/arch/parisc/include/asm/machdep.h b/arch/parisc/include/asm/machdep.h new file mode 100644 index 000000000000..a231c97d703e --- /dev/null +++ b/arch/parisc/include/asm/machdep.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _PARISC_MACHDEP_H | ||
2 | #define _PARISC_MACHDEP_H | ||
3 | |||
4 | #include <linux/notifier.h> | ||
5 | |||
6 | #define MACH_RESTART 1 | ||
7 | #define MACH_HALT 2 | ||
8 | #define MACH_POWER_ON 3 | ||
9 | #define MACH_POWER_OFF 4 | ||
10 | |||
11 | extern struct notifier_block *mach_notifier; | ||
12 | extern void pa7300lc_init(void); | ||
13 | |||
14 | extern void (*cpu_lpmc)(int, struct pt_regs *); | ||
15 | |||
16 | #endif | ||
diff --git a/arch/parisc/include/asm/mc146818rtc.h b/arch/parisc/include/asm/mc146818rtc.h new file mode 100644 index 000000000000..adf41631449f --- /dev/null +++ b/arch/parisc/include/asm/mc146818rtc.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * Machine dependent access functions for RTC registers. | ||
3 | */ | ||
4 | #ifndef _ASM_MC146818RTC_H | ||
5 | #define _ASM_MC146818RTC_H | ||
6 | |||
7 | /* empty include file to satisfy the include in genrtc.c */ | ||
8 | |||
9 | #endif /* _ASM_MC146818RTC_H */ | ||
diff --git a/arch/parisc/include/asm/mckinley.h b/arch/parisc/include/asm/mckinley.h new file mode 100644 index 000000000000..d1ea6f12915e --- /dev/null +++ b/arch/parisc/include/asm/mckinley.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef ASM_PARISC_MCKINLEY_H | ||
2 | #define ASM_PARISC_MCKINLEY_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* declared in arch/parisc/kernel/setup.c */ | ||
6 | extern struct proc_dir_entry * proc_mckinley_root; | ||
7 | |||
8 | #endif /*__KERNEL__*/ | ||
9 | #endif /*ASM_PARISC_MCKINLEY_H*/ | ||
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h new file mode 100644 index 000000000000..defe752cc996 --- /dev/null +++ b/arch/parisc/include/asm/mman.h | |||
@@ -0,0 +1,61 @@ | |||
1 | #ifndef __PARISC_MMAN_H__ | ||
2 | #define __PARISC_MMAN_H__ | ||
3 | |||
4 | #define PROT_READ 0x1 /* page can be read */ | ||
5 | #define PROT_WRITE 0x2 /* page can be written */ | ||
6 | #define PROT_EXEC 0x4 /* page can be executed */ | ||
7 | #define PROT_SEM 0x8 /* page may be used for atomic ops */ | ||
8 | #define PROT_NONE 0x0 /* page can not be accessed */ | ||
9 | #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ | ||
10 | #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ | ||
11 | |||
12 | #define MAP_SHARED 0x01 /* Share changes */ | ||
13 | #define MAP_PRIVATE 0x02 /* Changes are private */ | ||
14 | #define MAP_TYPE 0x03 /* Mask for type of mapping */ | ||
15 | #define MAP_FIXED 0x04 /* Interpret addr exactly */ | ||
16 | #define MAP_ANONYMOUS 0x10 /* don't use a file */ | ||
17 | |||
18 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
19 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
20 | #define MAP_LOCKED 0x2000 /* pages are locked */ | ||
21 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ | ||
22 | #define MAP_GROWSDOWN 0x8000 /* stack-like segment */ | ||
23 | #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */ | ||
24 | #define MAP_NONBLOCK 0x20000 /* do not block on IO */ | ||
25 | |||
26 | #define MS_SYNC 1 /* synchronous memory sync */ | ||
27 | #define MS_ASYNC 2 /* sync memory asynchronously */ | ||
28 | #define MS_INVALIDATE 4 /* invalidate the caches */ | ||
29 | |||
30 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
31 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
32 | |||
33 | #define MADV_NORMAL 0 /* no further special treatment */ | ||
34 | #define MADV_RANDOM 1 /* expect random page references */ | ||
35 | #define MADV_SEQUENTIAL 2 /* expect sequential page references */ | ||
36 | #define MADV_WILLNEED 3 /* will need these pages */ | ||
37 | #define MADV_DONTNEED 4 /* don't need these pages */ | ||
38 | #define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ | ||
39 | #define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ | ||
40 | #define MADV_VPS_INHERIT 7 /* Inherit parents page size */ | ||
41 | |||
42 | /* common/generic parameters */ | ||
43 | #define MADV_REMOVE 9 /* remove these pages & resources */ | ||
44 | #define MADV_DONTFORK 10 /* don't inherit across fork */ | ||
45 | #define MADV_DOFORK 11 /* do inherit across fork */ | ||
46 | |||
47 | /* The range 12-64 is reserved for page size specification. */ | ||
48 | #define MADV_4K_PAGES 12 /* Use 4K pages */ | ||
49 | #define MADV_16K_PAGES 14 /* Use 16K pages */ | ||
50 | #define MADV_64K_PAGES 16 /* Use 64K pages */ | ||
51 | #define MADV_256K_PAGES 18 /* Use 256K pages */ | ||
52 | #define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */ | ||
53 | #define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ | ||
54 | #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ | ||
55 | #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ | ||
56 | |||
57 | /* compatibility flags */ | ||
58 | #define MAP_FILE 0 | ||
59 | #define MAP_VARIABLE 0 | ||
60 | |||
61 | #endif /* __PARISC_MMAN_H__ */ | ||
diff --git a/arch/parisc/include/asm/mmu.h b/arch/parisc/include/asm/mmu.h new file mode 100644 index 000000000000..6a310cf8b734 --- /dev/null +++ b/arch/parisc/include/asm/mmu.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _PARISC_MMU_H_ | ||
2 | #define _PARISC_MMU_H_ | ||
3 | |||
4 | /* On parisc, we store the space id here */ | ||
5 | typedef unsigned long mm_context_t; | ||
6 | |||
7 | #endif /* _PARISC_MMU_H_ */ | ||
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h new file mode 100644 index 000000000000..85856c74ad1d --- /dev/null +++ b/arch/parisc/include/asm/mmu_context.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef __PARISC_MMU_CONTEXT_H | ||
2 | #define __PARISC_MMU_CONTEXT_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <asm/atomic.h> | ||
7 | #include <asm/pgalloc.h> | ||
8 | #include <asm/pgtable.h> | ||
9 | #include <asm-generic/mm_hooks.h> | ||
10 | |||
11 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
12 | { | ||
13 | } | ||
14 | |||
15 | /* on PA-RISC, we actually have enough contexts to justify an allocator | ||
16 | * for them. prumpf */ | ||
17 | |||
18 | extern unsigned long alloc_sid(void); | ||
19 | extern void free_sid(unsigned long); | ||
20 | |||
21 | static inline int | ||
22 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
23 | { | ||
24 | BUG_ON(atomic_read(&mm->mm_users) != 1); | ||
25 | |||
26 | mm->context = alloc_sid(); | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static inline void | ||
31 | destroy_context(struct mm_struct *mm) | ||
32 | { | ||
33 | free_sid(mm->context); | ||
34 | mm->context = 0; | ||
35 | } | ||
36 | |||
37 | static inline void load_context(mm_context_t context) | ||
38 | { | ||
39 | mtsp(context, 3); | ||
40 | #if SPACEID_SHIFT == 0 | ||
41 | mtctl(context << 1,8); | ||
42 | #else | ||
43 | mtctl(context >> (SPACEID_SHIFT - 1),8); | ||
44 | #endif | ||
45 | } | ||
46 | |||
47 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) | ||
48 | { | ||
49 | |||
50 | if (prev != next) { | ||
51 | mtctl(__pa(next->pgd), 25); | ||
52 | load_context(next->context); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
57 | |||
58 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | ||
59 | { | ||
60 | /* | ||
61 | * Activate_mm is our one chance to allocate a space id | ||
62 | * for a new mm created in the exec path. There's also | ||
63 | * some lazy tlb stuff, which is currently dead code, but | ||
64 | * we only allocate a space id if one hasn't been allocated | ||
65 | * already, so we should be OK. | ||
66 | */ | ||
67 | |||
68 | BUG_ON(next == &init_mm); /* Should never happen */ | ||
69 | |||
70 | if (next->context == 0) | ||
71 | next->context = alloc_sid(); | ||
72 | |||
73 | switch_mm(prev,next,current); | ||
74 | } | ||
75 | #endif | ||
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h new file mode 100644 index 000000000000..9608d2cf214a --- /dev/null +++ b/arch/parisc/include/asm/mmzone.h | |||
@@ -0,0 +1,73 @@ | |||
1 | #ifndef _PARISC_MMZONE_H | ||
2 | #define _PARISC_MMZONE_H | ||
3 | |||
4 | #ifdef CONFIG_DISCONTIGMEM | ||
5 | |||
6 | #define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */ | ||
7 | extern int npmem_ranges; | ||
8 | |||
9 | struct node_map_data { | ||
10 | pg_data_t pg_data; | ||
11 | }; | ||
12 | |||
13 | extern struct node_map_data node_data[]; | ||
14 | |||
15 | #define NODE_DATA(nid) (&node_data[nid].pg_data) | ||
16 | |||
17 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
18 | #define node_end_pfn(nid) \ | ||
19 | ({ \ | ||
20 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
21 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
22 | }) | ||
23 | |||
24 | /* We have these possible memory map layouts: | ||
25 | * Astro: 0-3.75, 67.75-68, 4-64 | ||
26 | * zx1: 0-1, 257-260, 4-256 | ||
27 | * Stretch (N-class): 0-2, 4-32, 34-xxx | ||
28 | */ | ||
29 | |||
30 | /* Since each 1GB can only belong to one region (node), we can create | ||
31 | * an index table for pfn to nid lookup; each entry in pfnnid_map | ||
32 | * represents 1GB, and contains the node that the memory belongs to. */ | ||
33 | |||
34 | #define PFNNID_SHIFT (30 - PAGE_SHIFT) | ||
35 | #define PFNNID_MAP_MAX 512 /* support 512GB */ | ||
36 | extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; | ||
37 | |||
38 | #ifndef CONFIG_64BIT | ||
39 | #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) | ||
40 | #else | ||
41 | /* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */ | ||
42 | #define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT)) | ||
43 | #endif | ||
44 | |||
45 | static inline int pfn_to_nid(unsigned long pfn) | ||
46 | { | ||
47 | unsigned int i; | ||
48 | unsigned char r; | ||
49 | |||
50 | if (unlikely(pfn_is_io(pfn))) | ||
51 | return 0; | ||
52 | |||
53 | i = pfn >> PFNNID_SHIFT; | ||
54 | BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0])); | ||
55 | r = pfnnid_map[i]; | ||
56 | BUG_ON(r == 0xff); | ||
57 | |||
58 | return (int)r; | ||
59 | } | ||
60 | |||
61 | static inline int pfn_valid(int pfn) | ||
62 | { | ||
63 | int nid = pfn_to_nid(pfn); | ||
64 | |||
65 | if (nid >= 0) | ||
66 | return (pfn < node_end_pfn(nid)); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | #else /* !CONFIG_DISCONTIGMEM */ | ||
71 | #define MAX_PHYSMEM_RANGES 1 | ||
72 | #endif | ||
73 | #endif /* _PARISC_MMZONE_H */ | ||
diff --git a/arch/parisc/include/asm/module.h b/arch/parisc/include/asm/module.h new file mode 100644 index 000000000000..c2cb49e934c1 --- /dev/null +++ b/arch/parisc/include/asm/module.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef _ASM_PARISC_MODULE_H | ||
2 | #define _ASM_PARISC_MODULE_H | ||
3 | /* | ||
4 | * This file contains the parisc architecture specific module code. | ||
5 | */ | ||
6 | #ifdef CONFIG_64BIT | ||
7 | #define Elf_Shdr Elf64_Shdr | ||
8 | #define Elf_Sym Elf64_Sym | ||
9 | #define Elf_Ehdr Elf64_Ehdr | ||
10 | #define Elf_Addr Elf64_Addr | ||
11 | #define Elf_Rela Elf64_Rela | ||
12 | #else | ||
13 | #define Elf_Shdr Elf32_Shdr | ||
14 | #define Elf_Sym Elf32_Sym | ||
15 | #define Elf_Ehdr Elf32_Ehdr | ||
16 | #define Elf_Addr Elf32_Addr | ||
17 | #define Elf_Rela Elf32_Rela | ||
18 | #endif | ||
19 | |||
20 | struct unwind_table; | ||
21 | |||
22 | struct mod_arch_specific | ||
23 | { | ||
24 | unsigned long got_offset, got_count, got_max; | ||
25 | unsigned long fdesc_offset, fdesc_count, fdesc_max; | ||
26 | unsigned long stub_offset, stub_count, stub_max; | ||
27 | unsigned long init_stub_offset, init_stub_count, init_stub_max; | ||
28 | int unwind_section; | ||
29 | struct unwind_table *unwind; | ||
30 | }; | ||
31 | |||
32 | #endif /* _ASM_PARISC_MODULE_H */ | ||
diff --git a/arch/parisc/include/asm/msgbuf.h b/arch/parisc/include/asm/msgbuf.h new file mode 100644 index 000000000000..fe88f2649418 --- /dev/null +++ b/arch/parisc/include/asm/msgbuf.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _PARISC_MSGBUF_H | ||
2 | #define _PARISC_MSGBUF_H | ||
3 | |||
4 | /* | ||
5 | * The msqid64_ds structure for parisc architecture, copied from sparc. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct msqid64_ds { | ||
15 | struct ipc64_perm msg_perm; | ||
16 | #ifndef CONFIG_64BIT | ||
17 | unsigned int __pad1; | ||
18 | #endif | ||
19 | __kernel_time_t msg_stime; /* last msgsnd time */ | ||
20 | #ifndef CONFIG_64BIT | ||
21 | unsigned int __pad2; | ||
22 | #endif | ||
23 | __kernel_time_t msg_rtime; /* last msgrcv time */ | ||
24 | #ifndef CONFIG_64BIT | ||
25 | unsigned int __pad3; | ||
26 | #endif | ||
27 | __kernel_time_t msg_ctime; /* last change time */ | ||
28 | unsigned int msg_cbytes; /* current number of bytes on queue */ | ||
29 | unsigned int msg_qnum; /* number of messages in queue */ | ||
30 | unsigned int msg_qbytes; /* max number of bytes on queue */ | ||
31 | __kernel_pid_t msg_lspid; /* pid of last msgsnd */ | ||
32 | __kernel_pid_t msg_lrpid; /* last receive pid */ | ||
33 | unsigned int __unused1; | ||
34 | unsigned int __unused2; | ||
35 | }; | ||
36 | |||
37 | #endif /* _PARISC_MSGBUF_H */ | ||
diff --git a/arch/parisc/include/asm/mutex.h b/arch/parisc/include/asm/mutex.h new file mode 100644 index 000000000000..458c1f7fbc18 --- /dev/null +++ b/arch/parisc/include/asm/mutex.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * Pull in the generic implementation for the mutex fastpath. | ||
3 | * | ||
4 | * TODO: implement optimized primitives instead, or leave the generic | ||
5 | * implementation in place, or pick the atomic_xchg() based generic | ||
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | ||
7 | */ | ||
8 | |||
9 | #include <asm-generic/mutex-dec.h> | ||
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h new file mode 100644 index 000000000000..c3941f09a878 --- /dev/null +++ b/arch/parisc/include/asm/page.h | |||
@@ -0,0 +1,173 @@ | |||
1 | #ifndef _PARISC_PAGE_H | ||
2 | #define _PARISC_PAGE_H | ||
3 | |||
4 | #include <linux/const.h> | ||
5 | |||
6 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
7 | # define PAGE_SHIFT 12 | ||
8 | #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) | ||
9 | # define PAGE_SHIFT 14 | ||
10 | #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) | ||
11 | # define PAGE_SHIFT 16 | ||
12 | #else | ||
13 | # error "unknown default kernel page size" | ||
14 | #endif | ||
15 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | ||
16 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
17 | |||
18 | |||
19 | #ifndef __ASSEMBLY__ | ||
20 | |||
21 | #include <asm/types.h> | ||
22 | #include <asm/cache.h> | ||
23 | |||
24 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | ||
25 | #define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from)) | ||
26 | |||
27 | struct page; | ||
28 | |||
29 | void copy_user_page_asm(void *to, void *from); | ||
30 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
31 | struct page *pg); | ||
32 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg); | ||
33 | |||
34 | /* | ||
35 | * These are used to make use of C type-checking.. | ||
36 | */ | ||
37 | #define STRICT_MM_TYPECHECKS | ||
38 | #ifdef STRICT_MM_TYPECHECKS | ||
39 | typedef struct { unsigned long pte; | ||
40 | #if !defined(CONFIG_64BIT) | ||
41 | unsigned long future_flags; | ||
42 | /* XXX: it's possible to remove future_flags and change BITS_PER_PTE_ENTRY | ||
43 | to 2, but then strangely the identical 32bit kernel boots on a | ||
44 | c3000(pa20), but not any longer on a 715(pa11). | ||
45 | Still investigating... HelgeD. | ||
46 | */ | ||
47 | #endif | ||
48 | } pte_t; /* either 32 or 64bit */ | ||
49 | |||
50 | /* NOTE: even on 64 bits, these entries are __u32 because we allocate | ||
51 | * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */ | ||
52 | typedef struct { __u32 pmd; } pmd_t; | ||
53 | typedef struct { __u32 pgd; } pgd_t; | ||
54 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
55 | |||
56 | #define pte_val(x) ((x).pte) | ||
57 | /* These do not work lvalues, so make sure we don't use them as such. */ | ||
58 | #define pmd_val(x) ((x).pmd + 0) | ||
59 | #define pgd_val(x) ((x).pgd + 0) | ||
60 | #define pgprot_val(x) ((x).pgprot) | ||
61 | |||
62 | #define __pte(x) ((pte_t) { (x) } ) | ||
63 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
64 | #define __pgd(x) ((pgd_t) { (x) } ) | ||
65 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
66 | |||
67 | #define __pmd_val_set(x,n) (x).pmd = (n) | ||
68 | #define __pgd_val_set(x,n) (x).pgd = (n) | ||
69 | |||
70 | #else | ||
71 | /* | ||
72 | * .. while these make it easier on the compiler | ||
73 | */ | ||
74 | typedef unsigned long pte_t; | ||
75 | typedef __u32 pmd_t; | ||
76 | typedef __u32 pgd_t; | ||
77 | typedef unsigned long pgprot_t; | ||
78 | |||
79 | #define pte_val(x) (x) | ||
80 | #define pmd_val(x) (x) | ||
81 | #define pgd_val(x) (x) | ||
82 | #define pgprot_val(x) (x) | ||
83 | |||
84 | #define __pte(x) (x) | ||
85 | #define __pmd(x) (x) | ||
86 | #define __pgd(x) (x) | ||
87 | #define __pgprot(x) (x) | ||
88 | |||
89 | #define __pmd_val_set(x,n) (x) = (n) | ||
90 | #define __pgd_val_set(x,n) (x) = (n) | ||
91 | |||
92 | #endif /* STRICT_MM_TYPECHECKS */ | ||
93 | |||
94 | typedef struct page *pgtable_t; | ||
95 | |||
96 | typedef struct __physmem_range { | ||
97 | unsigned long start_pfn; | ||
98 | unsigned long pages; /* PAGE_SIZE pages */ | ||
99 | } physmem_range_t; | ||
100 | |||
101 | extern physmem_range_t pmem_ranges[]; | ||
102 | extern int npmem_ranges; | ||
103 | |||
104 | #endif /* !__ASSEMBLY__ */ | ||
105 | |||
106 | /* WARNING: The definitions below must match exactly to sizeof(pte_t) | ||
107 | * etc | ||
108 | */ | ||
109 | #ifdef CONFIG_64BIT | ||
110 | #define BITS_PER_PTE_ENTRY 3 | ||
111 | #define BITS_PER_PMD_ENTRY 2 | ||
112 | #define BITS_PER_PGD_ENTRY 2 | ||
113 | #else | ||
114 | #define BITS_PER_PTE_ENTRY 3 | ||
115 | #define BITS_PER_PMD_ENTRY 2 | ||
116 | #define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY | ||
117 | #endif | ||
118 | #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) | ||
119 | #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) | ||
120 | #define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY) | ||
121 | |||
122 | #define LINUX_GATEWAY_SPACE 0 | ||
123 | |||
124 | /* This governs the relationship between virtual and physical addresses. | ||
125 | * If you alter it, make sure to take care of our various fixed mapping | ||
126 | * segments in fixmap.h */ | ||
127 | #ifdef CONFIG_64BIT | ||
128 | #define __PAGE_OFFSET (0x40000000) /* 1GB */ | ||
129 | #else | ||
130 | #define __PAGE_OFFSET (0x10000000) /* 256MB */ | ||
131 | #endif | ||
132 | |||
133 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | ||
134 | |||
135 | /* The size of the gateway page (we leave lots of room for expansion) */ | ||
136 | #define GATEWAY_PAGE_SIZE 0x4000 | ||
137 | |||
138 | /* The start of the actual kernel binary---used in vmlinux.lds.S | ||
139 | * Leave some space after __PAGE_OFFSET for detecting kernel null | ||
140 | * ptr derefs */ | ||
141 | #define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000) | ||
142 | |||
143 | /* These macros don't work for 64-bit C code -- don't allow in C at all */ | ||
144 | #ifdef __ASSEMBLY__ | ||
145 | # define PA(x) ((x)-__PAGE_OFFSET) | ||
146 | # define VA(x) ((x)+__PAGE_OFFSET) | ||
147 | #endif | ||
148 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | ||
149 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | ||
150 | |||
151 | #ifndef CONFIG_DISCONTIGMEM | ||
152 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | ||
153 | #endif /* CONFIG_DISCONTIGMEM */ | ||
154 | |||
155 | #ifdef CONFIG_HUGETLB_PAGE | ||
156 | #define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ | ||
157 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) | ||
158 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
159 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
160 | #endif | ||
161 | |||
162 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
163 | |||
164 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | ||
165 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
166 | |||
167 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
168 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
169 | |||
170 | #include <asm-generic/memory_model.h> | ||
171 | #include <asm-generic/page.h> | ||
172 | |||
173 | #endif /* _PARISC_PAGE_H */ | ||
diff --git a/arch/parisc/include/asm/param.h b/arch/parisc/include/asm/param.h new file mode 100644 index 000000000000..32e03d877858 --- /dev/null +++ b/arch/parisc/include/asm/param.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _ASMPARISC_PARAM_H | ||
2 | #define _ASMPARISC_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #define HZ CONFIG_HZ | ||
6 | #define USER_HZ 100 /* some user API use "ticks" */ | ||
7 | #define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 4096 | ||
15 | |||
16 | #ifndef NOGROUP | ||
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | |||
22 | #endif | ||
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h new file mode 100644 index 000000000000..7aa13f2add7a --- /dev/null +++ b/arch/parisc/include/asm/parisc-device.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef _ASM_PARISC_PARISC_DEVICE_H_ | ||
2 | #define _ASM_PARISC_PARISC_DEVICE_H_ | ||
3 | |||
4 | #include <linux/device.h> | ||
5 | |||
6 | struct parisc_device { | ||
7 | struct resource hpa; /* Hard Physical Address */ | ||
8 | struct parisc_device_id id; | ||
9 | struct parisc_driver *driver; /* Driver for this device */ | ||
10 | char name[80]; /* The hardware description */ | ||
11 | int irq; | ||
12 | int aux_irq; /* Some devices have a second IRQ */ | ||
13 | |||
14 | char hw_path; /* The module number on this bus */ | ||
15 | unsigned int num_addrs; /* some devices have additional address ranges. */ | ||
16 | unsigned long *addr; /* which will be stored here */ | ||
17 | |||
18 | #ifdef CONFIG_64BIT | ||
19 | /* parms for pdc_pat_cell_module() call */ | ||
20 | unsigned long pcell_loc; /* Physical Cell location */ | ||
21 | unsigned long mod_index; /* PAT specific - Misc Module info */ | ||
22 | |||
23 | /* generic info returned from pdc_pat_cell_module() */ | ||
24 | unsigned long mod_info; /* PAT specific - Misc Module info */ | ||
25 | unsigned long pmod_loc; /* physical Module location */ | ||
26 | #endif | ||
27 | u64 dma_mask; /* DMA mask for I/O */ | ||
28 | struct device dev; | ||
29 | }; | ||
30 | |||
31 | struct parisc_driver { | ||
32 | struct parisc_driver *next; | ||
33 | char *name; | ||
34 | const struct parisc_device_id *id_table; | ||
35 | int (*probe) (struct parisc_device *dev); /* New device discovered */ | ||
36 | int (*remove) (struct parisc_device *dev); | ||
37 | struct device_driver drv; | ||
38 | }; | ||
39 | |||
40 | |||
41 | #define to_parisc_device(d) container_of(d, struct parisc_device, dev) | ||
42 | #define to_parisc_driver(d) container_of(d, struct parisc_driver, drv) | ||
43 | #define parisc_parent(d) to_parisc_device(d->dev.parent) | ||
44 | |||
45 | static inline char *parisc_pathname(struct parisc_device *d) | ||
46 | { | ||
47 | return d->dev.bus_id; | ||
48 | } | ||
49 | |||
50 | static inline void | ||
51 | parisc_set_drvdata(struct parisc_device *d, void *p) | ||
52 | { | ||
53 | dev_set_drvdata(&d->dev, p); | ||
54 | } | ||
55 | |||
56 | static inline void * | ||
57 | parisc_get_drvdata(struct parisc_device *d) | ||
58 | { | ||
59 | return dev_get_drvdata(&d->dev); | ||
60 | } | ||
61 | |||
62 | extern struct bus_type parisc_bus_type; | ||
63 | |||
64 | #endif /*_ASM_PARISC_PARISC_DEVICE_H_*/ | ||
diff --git a/arch/parisc/include/asm/parport.h b/arch/parisc/include/asm/parport.h new file mode 100644 index 000000000000..00d9cc3e7b97 --- /dev/null +++ b/arch/parisc/include/asm/parport.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * | ||
3 | * parport.h: ia32-compatible parport initialisation | ||
4 | * | ||
5 | * This file should only be included by drivers/parport/parport_pc.c. | ||
6 | */ | ||
7 | #ifndef _ASM_PARPORT_H | ||
8 | #define _ASM_PARPORT_H 1 | ||
9 | |||
10 | |||
11 | static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) | ||
12 | { | ||
13 | /* nothing ! */ | ||
14 | return 0; | ||
15 | } | ||
16 | |||
17 | |||
18 | #endif /* !(_ASM_PARPORT_H) */ | ||
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h new file mode 100644 index 000000000000..4ba868f44a5e --- /dev/null +++ b/arch/parisc/include/asm/pci.h | |||
@@ -0,0 +1,294 @@ | |||
1 | #ifndef __ASM_PARISC_PCI_H | ||
2 | #define __ASM_PARISC_PCI_H | ||
3 | |||
4 | #include <asm/scatterlist.h> | ||
5 | |||
6 | |||
7 | |||
8 | /* | ||
9 | ** HP PCI platforms generally support multiple bus adapters. | ||
10 | ** (workstations 1-~4, servers 2-~32) | ||
11 | ** | ||
12 | ** Newer platforms number the busses across PCI bus adapters *sparsely*. | ||
13 | ** E.g. 0, 8, 16, ... | ||
14 | ** | ||
15 | ** Under a PCI bus, most HP platforms support PPBs up to two or three | ||
16 | ** levels deep. See "Bit3" product line. | ||
17 | */ | ||
18 | #define PCI_MAX_BUSSES 256 | ||
19 | |||
20 | |||
21 | /* To be used as: mdelay(pci_post_reset_delay); | ||
22 | * | ||
23 | * post_reset is the time the kernel should stall to prevent anyone from | ||
24 | * accessing the PCI bus once #RESET is de-asserted. | ||
25 | * PCI spec somewhere says 1 second but with multi-PCI bus systems, | ||
26 | * this makes the boot time much longer than necessary. | ||
27 | * 20ms seems to work for all the HP PCI implementations to date. | ||
28 | */ | ||
29 | #define pci_post_reset_delay 50 | ||
30 | |||
31 | |||
32 | /* | ||
33 | ** pci_hba_data (aka H2P_OBJECT in HP/UX) | ||
34 | ** | ||
35 | ** This is the "common" or "base" data structure which HBA drivers | ||
36 | ** (eg Dino or LBA) are required to place at the top of their own | ||
37 | ** platform_data structure. I've heard this called "C inheritance" too. | ||
38 | ** | ||
39 | ** Data needed by pcibios layer belongs here. | ||
40 | */ | ||
41 | struct pci_hba_data { | ||
42 | void __iomem *base_addr; /* aka Host Physical Address */ | ||
43 | const struct parisc_device *dev; /* device from PA bus walk */ | ||
44 | struct pci_bus *hba_bus; /* primary PCI bus below HBA */ | ||
45 | int hba_num; /* I/O port space access "key" */ | ||
46 | struct resource bus_num; /* PCI bus numbers */ | ||
47 | struct resource io_space; /* PIOP */ | ||
48 | struct resource lmmio_space; /* bus addresses < 4Gb */ | ||
49 | struct resource elmmio_space; /* additional bus addresses < 4Gb */ | ||
50 | struct resource gmmio_space; /* bus addresses > 4Gb */ | ||
51 | |||
52 | /* NOTE: Dino code assumes it can use *all* of the lmmio_space, | ||
53 | * elmmio_space and gmmio_space as a contiguous array of | ||
54 | * resources. This #define represents the array size */ | ||
55 | #define DINO_MAX_LMMIO_RESOURCES 3 | ||
56 | |||
57 | unsigned long lmmio_space_offset; /* CPU view - PCI view */ | ||
58 | void * iommu; /* IOMMU this device is under */ | ||
59 | /* REVISIT - spinlock to protect resources? */ | ||
60 | |||
61 | #define HBA_NAME_SIZE 16 | ||
62 | char io_name[HBA_NAME_SIZE]; | ||
63 | char lmmio_name[HBA_NAME_SIZE]; | ||
64 | char elmmio_name[HBA_NAME_SIZE]; | ||
65 | char gmmio_name[HBA_NAME_SIZE]; | ||
66 | }; | ||
67 | |||
68 | #define HBA_DATA(d) ((struct pci_hba_data *) (d)) | ||
69 | |||
70 | /* | ||
71 | ** We support 2^16 I/O ports per HBA. These are set up in the form | ||
72 | ** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port | ||
73 | ** space address. | ||
74 | */ | ||
75 | #define HBA_PORT_SPACE_BITS 16 | ||
76 | |||
77 | #define HBA_PORT_BASE(h) ((h) << HBA_PORT_SPACE_BITS) | ||
78 | #define HBA_PORT_SPACE_SIZE (1UL << HBA_PORT_SPACE_BITS) | ||
79 | |||
80 | #define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS) | ||
81 | #define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1)) | ||
82 | |||
83 | #ifdef CONFIG_64BIT | ||
84 | #define PCI_F_EXTEND 0xffffffff00000000UL | ||
85 | #define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a) | ||
86 | |||
87 | /* We need to know if an address is LMMMIO or GMMIO. | ||
88 | * LMMIO requires mangling and GMMIO we must use as-is. | ||
89 | */ | ||
90 | static __inline__ int pci_is_lmmio(struct pci_hba_data *hba, unsigned long a) | ||
91 | { | ||
92 | return(((a) & PCI_F_EXTEND) == PCI_F_EXTEND); | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | ** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses. | ||
97 | ** See pci.c for more conversions used by Generic PCI code. | ||
98 | ** | ||
99 | ** Platform characteristics/firmware guarantee that | ||
100 | ** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO | ||
101 | ** (2) PA_VIEW == IO_VIEW for GMMIO | ||
102 | */ | ||
103 | #define PCI_BUS_ADDR(hba,a) (PCI_IS_LMMIO(hba,a) \ | ||
104 | ? ((a) - hba->lmmio_space_offset) /* mangle LMMIO */ \ | ||
105 | : (a)) /* GMMIO */ | ||
106 | #define PCI_HOST_ADDR(hba,a) (((a) & PCI_F_EXTEND) == 0 \ | ||
107 | ? (a) + hba->lmmio_space_offset \ | ||
108 | : (a)) | ||
109 | |||
110 | #else /* !CONFIG_64BIT */ | ||
111 | |||
112 | #define PCI_BUS_ADDR(hba,a) (a) | ||
113 | #define PCI_HOST_ADDR(hba,a) (a) | ||
114 | #define PCI_F_EXTEND 0UL | ||
115 | #define PCI_IS_LMMIO(hba,a) (1) /* 32-bit doesn't support GMMIO */ | ||
116 | |||
117 | #endif /* !CONFIG_64BIT */ | ||
118 | |||
119 | /* | ||
120 | ** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus | ||
121 | ** (This eliminates some of the warnings). | ||
122 | */ | ||
123 | struct pci_bus; | ||
124 | struct pci_dev; | ||
125 | |||
126 | /* | ||
127 | * If the PCI device's view of memory is the same as the CPU's view of memory, | ||
128 | * PCI_DMA_BUS_IS_PHYS is true. The networking and block device layers use | ||
129 | * this boolean for bounce buffer decisions. | ||
130 | */ | ||
131 | #ifdef CONFIG_PA20 | ||
132 | /* All PA-2.0 machines have an IOMMU. */ | ||
133 | #define PCI_DMA_BUS_IS_PHYS 0 | ||
134 | #define parisc_has_iommu() do { } while (0) | ||
135 | #else | ||
136 | |||
137 | #if defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA) | ||
138 | extern int parisc_bus_is_phys; /* in arch/parisc/kernel/setup.c */ | ||
139 | #define PCI_DMA_BUS_IS_PHYS parisc_bus_is_phys | ||
140 | #define parisc_has_iommu() do { parisc_bus_is_phys = 0; } while (0) | ||
141 | #else | ||
142 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
143 | #define parisc_has_iommu() do { } while (0) | ||
144 | #endif | ||
145 | |||
146 | #endif /* !CONFIG_PA20 */ | ||
147 | |||
148 | |||
149 | /* | ||
150 | ** Most PCI devices (eg Tulip, NCR720) also export the same registers | ||
151 | ** to both MMIO and I/O port space. Due to poor performance of I/O Port | ||
152 | ** access under HP PCI bus adapters, strongly recommend the use of MMIO | ||
153 | ** address space. | ||
154 | ** | ||
155 | ** While I'm at it more PA programming notes: | ||
156 | ** | ||
157 | ** 1) MMIO stores (writes) are posted operations. This means the processor | ||
158 | ** gets an "ACK" before the write actually gets to the device. A read | ||
159 | ** to the same device (or typically the bus adapter above it) will | ||
160 | ** force in-flight write transaction(s) out to the targeted device | ||
161 | ** before the read can complete. | ||
162 | ** | ||
163 | ** 2) The Programmed I/O (PIO) data may not always be strongly ordered with | ||
164 | ** respect to DMA on all platforms. Ie PIO data can reach the processor | ||
165 | ** before in-flight DMA reaches memory. Since most SMP PA platforms | ||
166 | ** are I/O coherent, it generally doesn't matter...but sometimes | ||
167 | ** it does. | ||
168 | ** | ||
169 | ** I've helped device driver writers debug both types of problems. | ||
170 | */ | ||
171 | struct pci_port_ops { | ||
172 | u8 (*inb) (struct pci_hba_data *hba, u16 port); | ||
173 | u16 (*inw) (struct pci_hba_data *hba, u16 port); | ||
174 | u32 (*inl) (struct pci_hba_data *hba, u16 port); | ||
175 | void (*outb) (struct pci_hba_data *hba, u16 port, u8 data); | ||
176 | void (*outw) (struct pci_hba_data *hba, u16 port, u16 data); | ||
177 | void (*outl) (struct pci_hba_data *hba, u16 port, u32 data); | ||
178 | }; | ||
179 | |||
180 | |||
181 | struct pci_bios_ops { | ||
182 | void (*init)(void); | ||
183 | void (*fixup_bus)(struct pci_bus *bus); | ||
184 | }; | ||
185 | |||
186 | /* pci_unmap_{single,page} is not a nop, thus... */ | ||
187 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
188 | dma_addr_t ADDR_NAME; | ||
189 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
190 | __u32 LEN_NAME; | ||
191 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
192 | ((PTR)->ADDR_NAME) | ||
193 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
194 | (((PTR)->ADDR_NAME) = (VAL)) | ||
195 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
196 | ((PTR)->LEN_NAME) | ||
197 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
198 | (((PTR)->LEN_NAME) = (VAL)) | ||
199 | |||
200 | /* | ||
201 | ** Stuff declared in arch/parisc/kernel/pci.c | ||
202 | */ | ||
203 | extern struct pci_port_ops *pci_port; | ||
204 | extern struct pci_bios_ops *pci_bios; | ||
205 | |||
206 | #ifdef CONFIG_PCI | ||
207 | extern void pcibios_register_hba(struct pci_hba_data *); | ||
208 | extern void pcibios_set_master(struct pci_dev *); | ||
209 | #else | ||
210 | static inline void pcibios_register_hba(struct pci_hba_data *x) | ||
211 | { | ||
212 | } | ||
213 | #endif | ||
214 | |||
215 | /* | ||
216 | * pcibios_assign_all_busses() is used in drivers/pci/pci.c:pci_do_scan_bus() | ||
217 | * 0 == check if bridge is numbered before re-numbering. | ||
218 | * 1 == pci_do_scan_bus() should automatically number all PCI-PCI bridges. | ||
219 | * | ||
220 | * We *should* set this to zero for "legacy" platforms and one | ||
221 | * for PAT platforms. | ||
222 | * | ||
223 | * But legacy platforms also need to renumber the busses below a Host | ||
224 | * Bus controller. Adding a 4-port Tulip card on the first PCI root | ||
225 | * bus of a C200 resulted in the secondary bus being numbered as 1. | ||
226 | * The second PCI host bus controller's root bus had already been | ||
227 | * assigned bus number 1 by firmware and sysfs complained. | ||
228 | * | ||
229 | * Firmware isn't doing anything wrong here since each controller | ||
230 | * is its own PCI domain. It's simpler and easier for us to renumber | ||
231 | * the busses rather than treat each Dino as a separate PCI domain. | ||
232 | * Eventually, we may want to introduce PCI domains for Superdome or | ||
233 | * rp7420/8420 boxes and then revisit this issue. | ||
234 | */ | ||
235 | #define pcibios_assign_all_busses() (1) | ||
236 | #define pcibios_scan_all_fns(a, b) (0) | ||
237 | |||
238 | #define PCIBIOS_MIN_IO 0x10 | ||
239 | #define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */ | ||
240 | |||
241 | /* export the pci_ DMA API in terms of the dma_ one */ | ||
242 | #include <asm-generic/pci-dma-compat.h> | ||
243 | |||
244 | #ifdef CONFIG_PCI | ||
245 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | ||
246 | enum pci_dma_burst_strategy *strat, | ||
247 | unsigned long *strategy_parameter) | ||
248 | { | ||
249 | unsigned long cacheline_size; | ||
250 | u8 byte; | ||
251 | |||
252 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); | ||
253 | if (byte == 0) | ||
254 | cacheline_size = 1024; | ||
255 | else | ||
256 | cacheline_size = (int) byte * 4; | ||
257 | |||
258 | *strat = PCI_DMA_BURST_MULTIPLE; | ||
259 | *strategy_parameter = cacheline_size; | ||
260 | } | ||
261 | #endif | ||
262 | |||
263 | extern void | ||
264 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | ||
265 | struct resource *res); | ||
266 | |||
267 | extern void | ||
268 | pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | ||
269 | struct pci_bus_region *region); | ||
270 | |||
271 | static inline struct resource * | ||
272 | pcibios_select_root(struct pci_dev *pdev, struct resource *res) | ||
273 | { | ||
274 | struct resource *root = NULL; | ||
275 | |||
276 | if (res->flags & IORESOURCE_IO) | ||
277 | root = &ioport_resource; | ||
278 | if (res->flags & IORESOURCE_MEM) | ||
279 | root = &iomem_resource; | ||
280 | |||
281 | return root; | ||
282 | } | ||
283 | |||
284 | static inline void pcibios_penalize_isa_irq(int irq, int active) | ||
285 | { | ||
286 | /* We don't need to penalize isa irq's */ | ||
287 | } | ||
288 | |||
289 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | ||
290 | { | ||
291 | return channel ? 15 : 14; | ||
292 | } | ||
293 | |||
294 | #endif /* __ASM_PARISC_PCI_H */ | ||
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h new file mode 100644 index 000000000000..c584b00c6074 --- /dev/null +++ b/arch/parisc/include/asm/pdc.h | |||
@@ -0,0 +1,762 @@ | |||
1 | #ifndef _PARISC_PDC_H | ||
2 | #define _PARISC_PDC_H | ||
3 | |||
4 | /* | ||
5 | * PDC return values ... | ||
6 | * All PDC calls return a subset of these errors. | ||
7 | */ | ||
8 | |||
9 | #define PDC_WARN 3 /* Call completed with a warning */ | ||
10 | #define PDC_REQ_ERR_1 2 /* See above */ | ||
11 | #define PDC_REQ_ERR_0 1 /* Call would generate a requestor error */ | ||
12 | #define PDC_OK 0 /* Call completed successfully */ | ||
13 | #define PDC_BAD_PROC -1 /* Called non-existent procedure*/ | ||
14 | #define PDC_BAD_OPTION -2 /* Called with non-existent option */ | ||
15 | #define PDC_ERROR -3 /* Call could not complete without an error */ | ||
16 | #define PDC_NE_MOD -5 /* Module not found */ | ||
17 | #define PDC_NE_CELL_MOD -7 /* Cell module not found */ | ||
18 | #define PDC_INVALID_ARG -10 /* Called with an invalid argument */ | ||
19 | #define PDC_BUS_POW_WARN -12 /* Call could not complete in allowed power budget */ | ||
20 | #define PDC_NOT_NARROW -17 /* Narrow mode not supported */ | ||
21 | |||
22 | /* | ||
23 | * PDC entry points... | ||
24 | */ | ||
25 | |||
26 | #define PDC_POW_FAIL 1 /* perform a power-fail */ | ||
27 | #define PDC_POW_FAIL_PREPARE 0 /* prepare for powerfail */ | ||
28 | |||
29 | #define PDC_CHASSIS 2 /* PDC-chassis functions */ | ||
30 | #define PDC_CHASSIS_DISP 0 /* update chassis display */ | ||
31 | #define PDC_CHASSIS_WARN 1 /* return chassis warnings */ | ||
32 | #define PDC_CHASSIS_DISPWARN 2 /* update&return chassis status */ | ||
33 | #define PDC_RETURN_CHASSIS_INFO 128 /* HVERSION dependent: return chassis LED/LCD info */ | ||
34 | |||
35 | #define PDC_PIM 3 /* Get PIM data */ | ||
36 | #define PDC_PIM_HPMC 0 /* Transfer HPMC data */ | ||
37 | #define PDC_PIM_RETURN_SIZE 1 /* Get Max buffer needed for PIM*/ | ||
38 | #define PDC_PIM_LPMC 2 /* Transfer HPMC data */ | ||
39 | #define PDC_PIM_SOFT_BOOT 3 /* Transfer Soft Boot data */ | ||
40 | #define PDC_PIM_TOC 4 /* Transfer TOC data */ | ||
41 | |||
42 | #define PDC_MODEL 4 /* PDC model information call */ | ||
43 | #define PDC_MODEL_INFO 0 /* returns information */ | ||
44 | #define PDC_MODEL_BOOTID 1 /* set the BOOT_ID */ | ||
45 | #define PDC_MODEL_VERSIONS 2 /* returns cpu-internal versions*/ | ||
46 | #define PDC_MODEL_SYSMODEL 3 /* return system model info */ | ||
47 | #define PDC_MODEL_ENSPEC 4 /* enable specific option */ | ||
48 | #define PDC_MODEL_DISPEC 5 /* disable specific option */ | ||
49 | #define PDC_MODEL_CPU_ID 6 /* returns cpu-id (only newer machines!) */ | ||
50 | #define PDC_MODEL_CAPABILITIES 7 /* returns OS32/OS64-flags */ | ||
51 | /* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */ | ||
52 | #define PDC_MODEL_IOPDIR_FDC (1 << 2) | ||
53 | #define PDC_MODEL_NVA_MASK (3 << 4) | ||
54 | #define PDC_MODEL_NVA_SUPPORTED (0 << 4) | ||
55 | #define PDC_MODEL_NVA_SLOW (1 << 4) | ||
56 | #define PDC_MODEL_NVA_UNSUPPORTED (3 << 4) | ||
57 | #define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */ | ||
58 | #define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */ | ||
59 | |||
60 | #define PA89_INSTRUCTION_SET 0x4 /* capatibilies returned */ | ||
61 | #define PA90_INSTRUCTION_SET 0x8 | ||
62 | |||
63 | #define PDC_CACHE 5 /* return/set cache (& TLB) info*/ | ||
64 | #define PDC_CACHE_INFO 0 /* returns information */ | ||
65 | #define PDC_CACHE_SET_COH 1 /* set coherence state */ | ||
66 | #define PDC_CACHE_RET_SPID 2 /* returns space-ID bits */ | ||
67 | |||
68 | #define PDC_HPA 6 /* return HPA of processor */ | ||
69 | #define PDC_HPA_PROCESSOR 0 | ||
70 | #define PDC_HPA_MODULES 1 | ||
71 | |||
72 | #define PDC_COPROC 7 /* Co-Processor (usually FP unit(s)) */ | ||
73 | #define PDC_COPROC_CFG 0 /* Co-Processor Cfg (FP unit(s) enabled?) */ | ||
74 | |||
75 | #define PDC_IODC 8 /* talk to IODC */ | ||
76 | #define PDC_IODC_READ 0 /* read IODC entry point */ | ||
77 | /* PDC_IODC_RI_ * INDEX parameter of PDC_IODC_READ */ | ||
78 | #define PDC_IODC_RI_DATA_BYTES 0 /* IODC Data Bytes */ | ||
79 | /* 1, 2 obsolete - HVERSION dependent*/ | ||
80 | #define PDC_IODC_RI_INIT 3 /* Initialize module */ | ||
81 | #define PDC_IODC_RI_IO 4 /* Module input/output */ | ||
82 | #define PDC_IODC_RI_SPA 5 /* Module input/output */ | ||
83 | #define PDC_IODC_RI_CONFIG 6 /* Module input/output */ | ||
84 | /* 7 obsolete - HVERSION dependent */ | ||
85 | #define PDC_IODC_RI_TEST 8 /* Module input/output */ | ||
86 | #define PDC_IODC_RI_TLB 9 /* Module input/output */ | ||
87 | #define PDC_IODC_NINIT 2 /* non-destructive init */ | ||
88 | #define PDC_IODC_DINIT 3 /* destructive init */ | ||
89 | #define PDC_IODC_MEMERR 4 /* check for memory errors */ | ||
90 | #define PDC_IODC_INDEX_DATA 0 /* get first 16 bytes from mod IODC */ | ||
91 | #define PDC_IODC_BUS_ERROR -4 /* bus error return value */ | ||
92 | #define PDC_IODC_INVALID_INDEX -5 /* invalid index return value */ | ||
93 | #define PDC_IODC_COUNT -6 /* count is too small */ | ||
94 | |||
95 | #define PDC_TOD 9 /* time-of-day clock (TOD) */ | ||
96 | #define PDC_TOD_READ 0 /* read TOD */ | ||
97 | #define PDC_TOD_WRITE 1 /* write TOD */ | ||
98 | |||
99 | |||
100 | #define PDC_STABLE 10 /* stable storage (sprockets) */ | ||
101 | #define PDC_STABLE_READ 0 | ||
102 | #define PDC_STABLE_WRITE 1 | ||
103 | #define PDC_STABLE_RETURN_SIZE 2 | ||
104 | #define PDC_STABLE_VERIFY_CONTENTS 3 | ||
105 | #define PDC_STABLE_INITIALIZE 4 | ||
106 | |||
107 | #define PDC_NVOLATILE 11 /* often not implemented */ | ||
108 | |||
109 | #define PDC_ADD_VALID 12 /* Memory validation PDC call */ | ||
110 | #define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */ | ||
111 | |||
112 | #define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */ | ||
113 | |||
114 | #define PDC_PROC 16 /* (sprockets) */ | ||
115 | |||
116 | #define PDC_CONFIG 16 /* (sprockets) */ | ||
117 | #define PDC_CONFIG_DECONFIG 0 | ||
118 | #define PDC_CONFIG_DRECONFIG 1 | ||
119 | #define PDC_CONFIG_DRETURN_CONFIG 2 | ||
120 | |||
121 | #define PDC_BLOCK_TLB 18 /* manage hardware block-TLB */ | ||
122 | #define PDC_BTLB_INFO 0 /* returns parameter */ | ||
123 | #define PDC_BTLB_INSERT 1 /* insert BTLB entry */ | ||
124 | #define PDC_BTLB_PURGE 2 /* purge BTLB entries */ | ||
125 | #define PDC_BTLB_PURGE_ALL 3 /* purge all BTLB entries */ | ||
126 | |||
127 | #define PDC_TLB 19 /* manage hardware TLB miss handling */ | ||
128 | #define PDC_TLB_INFO 0 /* returns parameter */ | ||
129 | #define PDC_TLB_SETUP 1 /* set up miss handling */ | ||
130 | |||
131 | #define PDC_MEM 20 /* Manage memory */ | ||
132 | #define PDC_MEM_MEMINFO 0 | ||
133 | #define PDC_MEM_ADD_PAGE 1 | ||
134 | #define PDC_MEM_CLEAR_PDT 2 | ||
135 | #define PDC_MEM_READ_PDT 3 | ||
136 | #define PDC_MEM_RESET_CLEAR 4 | ||
137 | #define PDC_MEM_GOODMEM 5 | ||
138 | #define PDC_MEM_TABLE 128 /* Non contig mem map (sprockets) */ | ||
139 | #define PDC_MEM_RETURN_ADDRESS_TABLE PDC_MEM_TABLE | ||
140 | #define PDC_MEM_GET_MEMORY_SYSTEM_TABLES_SIZE 131 | ||
141 | #define PDC_MEM_GET_MEMORY_SYSTEM_TABLES 132 | ||
142 | #define PDC_MEM_GET_PHYSICAL_LOCATION_FROM_MEMORY_ADDRESS 133 | ||
143 | |||
144 | #define PDC_MEM_RET_SBE_REPLACED 5 /* PDC_MEM return values */ | ||
145 | #define PDC_MEM_RET_DUPLICATE_ENTRY 4 | ||
146 | #define PDC_MEM_RET_BUF_SIZE_SMALL 1 | ||
147 | #define PDC_MEM_RET_PDT_FULL -11 | ||
148 | #define PDC_MEM_RET_INVALID_PHYSICAL_LOCATION ~0ULL | ||
149 | |||
150 | #define PDC_PSW 21 /* Get/Set default System Mask */ | ||
151 | #define PDC_PSW_MASK 0 /* Return mask */ | ||
152 | #define PDC_PSW_GET_DEFAULTS 1 /* Return defaults */ | ||
153 | #define PDC_PSW_SET_DEFAULTS 2 /* Set default */ | ||
154 | #define PDC_PSW_ENDIAN_BIT 1 /* set for big endian */ | ||
155 | #define PDC_PSW_WIDE_BIT 2 /* set for wide mode */ | ||
156 | |||
157 | #define PDC_SYSTEM_MAP 22 /* find system modules */ | ||
158 | #define PDC_FIND_MODULE 0 | ||
159 | #define PDC_FIND_ADDRESS 1 | ||
160 | #define PDC_TRANSLATE_PATH 2 | ||
161 | |||
162 | #define PDC_SOFT_POWER 23 /* soft power switch */ | ||
163 | #define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */ | ||
164 | #define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */ | ||
165 | |||
166 | |||
167 | /* HVERSION dependent */ | ||
168 | |||
169 | /* The PDC_MEM_MAP calls */ | ||
170 | #define PDC_MEM_MAP 128 /* on s700: return page info */ | ||
171 | #define PDC_MEM_MAP_HPA 0 /* returns hpa of a module */ | ||
172 | |||
173 | #define PDC_EEPROM 129 /* EEPROM access */ | ||
174 | #define PDC_EEPROM_READ_WORD 0 | ||
175 | #define PDC_EEPROM_WRITE_WORD 1 | ||
176 | #define PDC_EEPROM_READ_BYTE 2 | ||
177 | #define PDC_EEPROM_WRITE_BYTE 3 | ||
178 | #define PDC_EEPROM_EEPROM_PASSWORD -1000 | ||
179 | |||
180 | #define PDC_NVM 130 /* NVM (non-volatile memory) access */ | ||
181 | #define PDC_NVM_READ_WORD 0 | ||
182 | #define PDC_NVM_WRITE_WORD 1 | ||
183 | #define PDC_NVM_READ_BYTE 2 | ||
184 | #define PDC_NVM_WRITE_BYTE 3 | ||
185 | |||
186 | #define PDC_SEED_ERROR 132 /* (sprockets) */ | ||
187 | |||
188 | #define PDC_IO 135 /* log error info, reset IO system */ | ||
189 | #define PDC_IO_READ_AND_CLEAR_ERRORS 0 | ||
190 | #define PDC_IO_RESET 1 | ||
191 | #define PDC_IO_RESET_DEVICES 2 | ||
192 | /* sets bits 6&7 (little endian) of the HcControl Register */ | ||
193 | #define PDC_IO_USB_SUSPEND 0xC000000000000000 | ||
194 | #define PDC_IO_EEPROM_IO_ERR_TABLE_FULL -5 /* return value */ | ||
195 | #define PDC_IO_NO_SUSPEND -6 /* return value */ | ||
196 | |||
197 | #define PDC_BROADCAST_RESET 136 /* reset all processors */ | ||
198 | #define PDC_DO_RESET 0 /* option: perform a broadcast reset */ | ||
199 | #define PDC_DO_FIRM_TEST_RESET 1 /* Do broadcast reset with bitmap */ | ||
200 | #define PDC_BR_RECONFIGURATION 2 /* reset w/reconfiguration */ | ||
201 | #define PDC_FIRM_TEST_MAGIC 0xab9ec36fUL /* for this reboot only */ | ||
202 | |||
203 | #define PDC_LAN_STATION_ID 138 /* Hversion dependent mechanism for */ | ||
204 | #define PDC_LAN_STATION_ID_READ 0 /* getting the lan station address */ | ||
205 | |||
206 | #define PDC_LAN_STATION_ID_SIZE 6 | ||
207 | |||
208 | #define PDC_CHECK_RANGES 139 /* (sprockets) */ | ||
209 | |||
210 | #define PDC_NV_SECTIONS 141 /* (sprockets) */ | ||
211 | |||
212 | #define PDC_PERFORMANCE 142 /* performance monitoring */ | ||
213 | |||
214 | #define PDC_SYSTEM_INFO 143 /* system information */ | ||
215 | #define PDC_SYSINFO_RETURN_INFO_SIZE 0 | ||
216 | #define PDC_SYSINFO_RRETURN_SYS_INFO 1 | ||
217 | #define PDC_SYSINFO_RRETURN_ERRORS 2 | ||
218 | #define PDC_SYSINFO_RRETURN_WARNINGS 3 | ||
219 | #define PDC_SYSINFO_RETURN_REVISIONS 4 | ||
220 | #define PDC_SYSINFO_RRETURN_DIAGNOSE 5 | ||
221 | #define PDC_SYSINFO_RRETURN_HV_DIAGNOSE 1005 | ||
222 | |||
223 | #define PDC_RDR 144 /* (sprockets) */ | ||
224 | #define PDC_RDR_READ_BUFFER 0 | ||
225 | #define PDC_RDR_READ_SINGLE 1 | ||
226 | #define PDC_RDR_WRITE_SINGLE 2 | ||
227 | |||
228 | #define PDC_INTRIGUE 145 /* (sprockets) */ | ||
229 | #define PDC_INTRIGUE_WRITE_BUFFER 0 | ||
230 | #define PDC_INTRIGUE_GET_SCRATCH_BUFSIZE 1 | ||
231 | #define PDC_INTRIGUE_START_CPU_COUNTERS 2 | ||
232 | #define PDC_INTRIGUE_STOP_CPU_COUNTERS 3 | ||
233 | |||
234 | #define PDC_STI 146 /* STI access */ | ||
235 | /* same as PDC_PCI_XXX values (see below) */ | ||
236 | |||
237 | /* Legacy PDC definitions for same stuff */ | ||
238 | #define PDC_PCI_INDEX 147 | ||
239 | #define PDC_PCI_INTERFACE_INFO 0 | ||
240 | #define PDC_PCI_SLOT_INFO 1 | ||
241 | #define PDC_PCI_INFLIGHT_BYTES 2 | ||
242 | #define PDC_PCI_READ_CONFIG 3 | ||
243 | #define PDC_PCI_WRITE_CONFIG 4 | ||
244 | #define PDC_PCI_READ_PCI_IO 5 | ||
245 | #define PDC_PCI_WRITE_PCI_IO 6 | ||
246 | #define PDC_PCI_READ_CONFIG_DELAY 7 | ||
247 | #define PDC_PCI_UPDATE_CONFIG_DELAY 8 | ||
248 | #define PDC_PCI_PCI_PATH_TO_PCI_HPA 9 | ||
249 | #define PDC_PCI_PCI_HPA_TO_PCI_PATH 10 | ||
250 | #define PDC_PCI_PCI_PATH_TO_PCI_BUS 11 | ||
251 | #define PDC_PCI_PCI_RESERVED 12 | ||
252 | #define PDC_PCI_PCI_INT_ROUTE_SIZE 13 | ||
253 | #define PDC_PCI_GET_INT_TBL_SIZE PDC_PCI_PCI_INT_ROUTE_SIZE | ||
254 | #define PDC_PCI_PCI_INT_ROUTE 14 | ||
255 | #define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE | ||
256 | #define PDC_PCI_READ_MON_TYPE 15 | ||
257 | #define PDC_PCI_WRITE_MON_TYPE 16 | ||
258 | |||
259 | |||
260 | /* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */ | ||
261 | #define PDC_INITIATOR 163 | ||
262 | #define PDC_GET_INITIATOR 0 | ||
263 | #define PDC_SET_INITIATOR 1 | ||
264 | #define PDC_DELETE_INITIATOR 2 | ||
265 | #define PDC_RETURN_TABLE_SIZE 3 | ||
266 | #define PDC_RETURN_TABLE 4 | ||
267 | |||
268 | #define PDC_LINK 165 /* (sprockets) */ | ||
269 | #define PDC_LINK_PCI_ENTRY_POINTS 0 /* list (Arg1) = 0 */ | ||
270 | #define PDC_LINK_USB_ENTRY_POINTS 1 /* list (Arg1) = 1 */ | ||
271 | |||
272 | /* cl_class | ||
273 | * page 3-33 of IO-Firmware ARS | ||
274 | * IODC ENTRY_INIT(Search first) RET[1] | ||
275 | */ | ||
276 | #define CL_NULL 0 /* invalid */ | ||
277 | #define CL_RANDOM 1 /* random access (as disk) */ | ||
278 | #define CL_SEQU 2 /* sequential access (as tape) */ | ||
279 | #define CL_DUPLEX 7 /* full-duplex point-to-point (RS-232, Net) */ | ||
280 | #define CL_KEYBD 8 /* half-duplex console (HIL Keyboard) */ | ||
281 | #define CL_DISPL 9 /* half-duplex console (display) */ | ||
282 | #define CL_FC 10 /* FiberChannel access media */ | ||
283 | |||
284 | /* IODC ENTRY_INIT() */ | ||
285 | #define ENTRY_INIT_SRCH_FRST 2 | ||
286 | #define ENTRY_INIT_SRCH_NEXT 3 | ||
287 | #define ENTRY_INIT_MOD_DEV 4 | ||
288 | #define ENTRY_INIT_DEV 5 | ||
289 | #define ENTRY_INIT_MOD 6 | ||
290 | #define ENTRY_INIT_MSG 9 | ||
291 | |||
292 | /* IODC ENTRY_IO() */ | ||
293 | #define ENTRY_IO_BOOTIN 0 | ||
294 | #define ENTRY_IO_BOOTOUT 1 | ||
295 | #define ENTRY_IO_CIN 2 | ||
296 | #define ENTRY_IO_COUT 3 | ||
297 | #define ENTRY_IO_CLOSE 4 | ||
298 | #define ENTRY_IO_GETMSG 9 | ||
299 | #define ENTRY_IO_BBLOCK_IN 16 | ||
300 | #define ENTRY_IO_BBLOCK_OUT 17 | ||
301 | |||
302 | /* IODC ENTRY_SPA() */ | ||
303 | |||
304 | /* IODC ENTRY_CONFIG() */ | ||
305 | |||
306 | /* IODC ENTRY_TEST() */ | ||
307 | |||
308 | /* IODC ENTRY_TLB() */ | ||
309 | |||
310 | /* constants for OS (NVM...) */ | ||
311 | #define OS_ID_NONE 0 /* Undefined OS ID */ | ||
312 | #define OS_ID_HPUX 1 /* HP-UX OS */ | ||
313 | #define OS_ID_MPEXL 2 /* MPE XL OS */ | ||
314 | #define OS_ID_OSF 3 /* OSF OS */ | ||
315 | #define OS_ID_HPRT 4 /* HP-RT OS */ | ||
316 | #define OS_ID_NOVEL 5 /* NOVELL OS */ | ||
317 | #define OS_ID_LINUX 6 /* Linux */ | ||
318 | |||
319 | |||
320 | /* constants for PDC_CHASSIS */ | ||
321 | #define OSTAT_OFF 0 | ||
322 | #define OSTAT_FLT 1 | ||
323 | #define OSTAT_TEST 2 | ||
324 | #define OSTAT_INIT 3 | ||
325 | #define OSTAT_SHUT 4 | ||
326 | #define OSTAT_WARN 5 | ||
327 | #define OSTAT_RUN 6 | ||
328 | #define OSTAT_ON 7 | ||
329 | |||
330 | /* Page Zero constant offsets used by the HPMC handler */ | ||
331 | #define BOOT_CONSOLE_HPA_OFFSET 0x3c0 | ||
332 | #define BOOT_CONSOLE_SPA_OFFSET 0x3c4 | ||
333 | #define BOOT_CONSOLE_PATH_OFFSET 0x3a8 | ||
334 | |||
335 | /* size of the pdc_result buffer for firmware.c */ | ||
336 | #define NUM_PDC_RESULT 32 | ||
337 | |||
338 | #if !defined(__ASSEMBLY__) | ||
339 | #ifdef __KERNEL__ | ||
340 | |||
341 | #include <linux/types.h> | ||
342 | |||
343 | extern int pdc_type; | ||
344 | |||
345 | /* Values for pdc_type */ | ||
346 | #define PDC_TYPE_ILLEGAL -1 | ||
347 | #define PDC_TYPE_PAT 0 /* 64-bit PAT-PDC */ | ||
348 | #define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */ | ||
349 | #define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */ | ||
350 | |||
351 | struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */ | ||
352 | unsigned long actcnt; /* actual number of bytes returned */ | ||
353 | unsigned long maxcnt; /* maximum number of bytes that could be returned */ | ||
354 | }; | ||
355 | |||
356 | struct pdc_coproc_cfg { /* for PDC_COPROC_CFG */ | ||
357 | unsigned long ccr_functional; | ||
358 | unsigned long ccr_present; | ||
359 | unsigned long revision; | ||
360 | unsigned long model; | ||
361 | }; | ||
362 | |||
363 | struct pdc_model { /* for PDC_MODEL */ | ||
364 | unsigned long hversion; | ||
365 | unsigned long sversion; | ||
366 | unsigned long hw_id; | ||
367 | unsigned long boot_id; | ||
368 | unsigned long sw_id; | ||
369 | unsigned long sw_cap; | ||
370 | unsigned long arch_rev; | ||
371 | unsigned long pot_key; | ||
372 | unsigned long curr_key; | ||
373 | }; | ||
374 | |||
375 | struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */ | ||
376 | unsigned long | ||
377 | #ifdef CONFIG_64BIT | ||
378 | cc_padW:32, | ||
379 | #endif | ||
380 | cc_alias: 4, /* alias boundaries for virtual addresses */ | ||
381 | cc_block: 4, /* to determine most efficient stride */ | ||
382 | cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */ | ||
383 | cc_shift: 2, /* how much to shift cc_block left */ | ||
384 | cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */ | ||
385 | cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */ | ||
386 | cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */ | ||
387 | cc_pad1 : 10, /* reserved */ | ||
388 | cc_hv : 3; /* hversion dependent */ | ||
389 | }; | ||
390 | |||
391 | struct pdc_tlb_cf { /* for PDC_CACHE (I/D-TLB's) */ | ||
392 | unsigned long tc_pad0:12, /* reserved */ | ||
393 | #ifdef CONFIG_64BIT | ||
394 | tc_padW:32, | ||
395 | #endif | ||
396 | tc_sh : 2, /* 0 = separate I/D-TLB, else shared I/D-TLB */ | ||
397 | tc_hv : 1, /* HV */ | ||
398 | tc_page : 1, /* 0 = 2K page-size-machine, 1 = 4k page size */ | ||
399 | tc_cst : 3, /* 0 = incoherent operations, else coherent operations */ | ||
400 | tc_aid : 5, /* ITLB: width of access ids of processor (encoded!) */ | ||
401 | tc_pad1 : 8; /* ITLB: width of space-registers (encoded) */ | ||
402 | }; | ||
403 | |||
404 | struct pdc_cache_info { /* main-PDC_CACHE-structure (caches & TLB's) */ | ||
405 | /* I-cache */ | ||
406 | unsigned long ic_size; /* size in bytes */ | ||
407 | struct pdc_cache_cf ic_conf; /* configuration */ | ||
408 | unsigned long ic_base; /* base-addr */ | ||
409 | unsigned long ic_stride; | ||
410 | unsigned long ic_count; | ||
411 | unsigned long ic_loop; | ||
412 | /* D-cache */ | ||
413 | unsigned long dc_size; /* size in bytes */ | ||
414 | struct pdc_cache_cf dc_conf; /* configuration */ | ||
415 | unsigned long dc_base; /* base-addr */ | ||
416 | unsigned long dc_stride; | ||
417 | unsigned long dc_count; | ||
418 | unsigned long dc_loop; | ||
419 | /* Instruction-TLB */ | ||
420 | unsigned long it_size; /* number of entries in I-TLB */ | ||
421 | struct pdc_tlb_cf it_conf; /* I-TLB-configuration */ | ||
422 | unsigned long it_sp_base; | ||
423 | unsigned long it_sp_stride; | ||
424 | unsigned long it_sp_count; | ||
425 | unsigned long it_off_base; | ||
426 | unsigned long it_off_stride; | ||
427 | unsigned long it_off_count; | ||
428 | unsigned long it_loop; | ||
429 | /* data-TLB */ | ||
430 | unsigned long dt_size; /* number of entries in D-TLB */ | ||
431 | struct pdc_tlb_cf dt_conf; /* D-TLB-configuration */ | ||
432 | unsigned long dt_sp_base; | ||
433 | unsigned long dt_sp_stride; | ||
434 | unsigned long dt_sp_count; | ||
435 | unsigned long dt_off_base; | ||
436 | unsigned long dt_off_stride; | ||
437 | unsigned long dt_off_count; | ||
438 | unsigned long dt_loop; | ||
439 | }; | ||
440 | |||
441 | #if 0 | ||
442 | /* If you start using the next struct, you'll have to adjust it to | ||
443 | * work with 64-bit firmware I think -PB | ||
444 | */ | ||
445 | struct pdc_iodc { /* PDC_IODC */ | ||
446 | unsigned char hversion_model; | ||
447 | unsigned char hversion; | ||
448 | unsigned char spa; | ||
449 | unsigned char type; | ||
450 | unsigned int sversion_rev:4; | ||
451 | unsigned int sversion_model:19; | ||
452 | unsigned int sversion_opt:8; | ||
453 | unsigned char rev; | ||
454 | unsigned char dep; | ||
455 | unsigned char features; | ||
456 | unsigned char pad1; | ||
457 | unsigned int checksum:16; | ||
458 | unsigned int length:16; | ||
459 | unsigned int pad[15]; | ||
460 | } __attribute__((aligned(8))) ; | ||
461 | #endif | ||
462 | |||
463 | #ifndef CONFIG_PA20 | ||
464 | /* no BLTBs in pa2.0 processors */ | ||
465 | struct pdc_btlb_info_range { | ||
466 | __u8 res00; | ||
467 | __u8 num_i; | ||
468 | __u8 num_d; | ||
469 | __u8 num_comb; | ||
470 | }; | ||
471 | |||
472 | struct pdc_btlb_info { /* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */ | ||
473 | unsigned int min_size; /* minimum size of BTLB in pages */ | ||
474 | unsigned int max_size; /* maximum size of BTLB in pages */ | ||
475 | struct pdc_btlb_info_range fixed_range_info; | ||
476 | struct pdc_btlb_info_range variable_range_info; | ||
477 | }; | ||
478 | |||
479 | #endif /* !CONFIG_PA20 */ | ||
480 | |||
481 | #ifdef CONFIG_64BIT | ||
482 | struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */ | ||
483 | unsigned long entries_returned; | ||
484 | unsigned long entries_total; | ||
485 | }; | ||
486 | |||
487 | struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */ | ||
488 | unsigned long paddr; | ||
489 | unsigned int pages; | ||
490 | unsigned int reserved; | ||
491 | }; | ||
492 | #endif /* CONFIG_64BIT */ | ||
493 | |||
494 | struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */ | ||
495 | unsigned long mod_addr; | ||
496 | unsigned long mod_pgs; | ||
497 | unsigned long add_addrs; | ||
498 | }; | ||
499 | |||
500 | struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */ | ||
501 | unsigned long mod_addr; | ||
502 | unsigned long mod_pgs; | ||
503 | }; | ||
504 | |||
505 | struct pdc_initiator { /* PDC_INITIATOR */ | ||
506 | int host_id; | ||
507 | int factor; | ||
508 | int width; | ||
509 | int mode; | ||
510 | }; | ||
511 | |||
512 | struct hardware_path { | ||
513 | char flags; /* see bit definitions below */ | ||
514 | char bc[6]; /* Bus Converter routing info to a specific */ | ||
515 | /* I/O adaptor (< 0 means none, > 63 resvd) */ | ||
516 | char mod; /* fixed field of specified module */ | ||
517 | }; | ||
518 | |||
519 | /* | ||
520 | * Device path specifications used by PDC. | ||
521 | */ | ||
522 | struct pdc_module_path { | ||
523 | struct hardware_path path; | ||
524 | unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */ | ||
525 | }; | ||
526 | |||
527 | #ifndef CONFIG_PA20 | ||
528 | /* Only used on some pre-PA2.0 boxes */ | ||
529 | struct pdc_memory_map { /* PDC_MEMORY_MAP */ | ||
530 | unsigned long hpa; /* mod's register set address */ | ||
531 | unsigned long more_pgs; /* number of additional I/O pgs */ | ||
532 | }; | ||
533 | #endif | ||
534 | |||
535 | struct pdc_tod { | ||
536 | unsigned long tod_sec; | ||
537 | unsigned long tod_usec; | ||
538 | }; | ||
539 | |||
540 | /* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */ | ||
541 | |||
542 | struct pdc_hpmc_pim_11 { /* PDC_PIM */ | ||
543 | __u32 gr[32]; | ||
544 | __u32 cr[32]; | ||
545 | __u32 sr[8]; | ||
546 | __u32 iasq_back; | ||
547 | __u32 iaoq_back; | ||
548 | __u32 check_type; | ||
549 | __u32 cpu_state; | ||
550 | __u32 rsvd1; | ||
551 | __u32 cache_check; | ||
552 | __u32 tlb_check; | ||
553 | __u32 bus_check; | ||
554 | __u32 assists_check; | ||
555 | __u32 rsvd2; | ||
556 | __u32 assist_state; | ||
557 | __u32 responder_addr; | ||
558 | __u32 requestor_addr; | ||
559 | __u32 path_info; | ||
560 | __u64 fr[32]; | ||
561 | }; | ||
562 | |||
563 | /* | ||
564 | * architected results from PDC_PIM/transfer hpmc on a PA2.0 machine | ||
565 | * | ||
566 | * Note that PDC_PIM doesn't care whether or not wide mode was enabled | ||
567 | * so the results are different on PA1.1 vs. PA2.0 when in narrow mode. | ||
568 | * | ||
569 | * Note also that there are unarchitected results available, which | ||
570 | * are hversion dependent. Do a "ser pim 0 hpmc" after rebooting, since | ||
571 | * the firmware is probably the best way of printing hversion dependent | ||
572 | * data. | ||
573 | */ | ||
574 | |||
575 | struct pdc_hpmc_pim_20 { /* PDC_PIM */ | ||
576 | __u64 gr[32]; | ||
577 | __u64 cr[32]; | ||
578 | __u64 sr[8]; | ||
579 | __u64 iasq_back; | ||
580 | __u64 iaoq_back; | ||
581 | __u32 check_type; | ||
582 | __u32 cpu_state; | ||
583 | __u32 cache_check; | ||
584 | __u32 tlb_check; | ||
585 | __u32 bus_check; | ||
586 | __u32 assists_check; | ||
587 | __u32 assist_state; | ||
588 | __u32 path_info; | ||
589 | __u64 responder_addr; | ||
590 | __u64 requestor_addr; | ||
591 | __u64 fr[32]; | ||
592 | }; | ||
593 | |||
594 | void pdc_console_init(void); /* in pdc_console.c */ | ||
595 | void pdc_console_restart(void); | ||
596 | |||
597 | void setup_pdc(void); /* in inventory.c */ | ||
598 | |||
599 | /* wrapper-functions from pdc.c */ | ||
600 | |||
601 | int pdc_add_valid(unsigned long address); | ||
602 | int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len); | ||
603 | int pdc_chassis_disp(unsigned long disp); | ||
604 | int pdc_chassis_warn(unsigned long *warn); | ||
605 | int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info); | ||
606 | int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info); | ||
607 | int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index, | ||
608 | void *iodc_data, unsigned int iodc_data_size); | ||
609 | int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info, | ||
610 | struct pdc_module_path *mod_path, long mod_index); | ||
611 | int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info, | ||
612 | long mod_index, long addr_index); | ||
613 | int pdc_model_info(struct pdc_model *model); | ||
614 | int pdc_model_sysmodel(char *name); | ||
615 | int pdc_model_cpuid(unsigned long *cpu_id); | ||
616 | int pdc_model_versions(unsigned long *versions, int id); | ||
617 | int pdc_model_capabilities(unsigned long *capabilities); | ||
618 | int pdc_cache_info(struct pdc_cache_info *cache); | ||
619 | int pdc_spaceid_bits(unsigned long *space_bits); | ||
620 | #ifndef CONFIG_PA20 | ||
621 | int pdc_btlb_info(struct pdc_btlb_info *btlb); | ||
622 | int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path); | ||
623 | #endif /* !CONFIG_PA20 */ | ||
624 | int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa); | ||
625 | |||
626 | int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count); | ||
627 | int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count); | ||
628 | int pdc_stable_get_size(unsigned long *size); | ||
629 | int pdc_stable_verify_contents(void); | ||
630 | int pdc_stable_initialize(void); | ||
631 | |||
632 | int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa); | ||
633 | int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl); | ||
634 | |||
635 | int pdc_get_initiator(struct hardware_path *, struct pdc_initiator *); | ||
636 | int pdc_tod_read(struct pdc_tod *tod); | ||
637 | int pdc_tod_set(unsigned long sec, unsigned long usec); | ||
638 | |||
639 | #ifdef CONFIG_64BIT | ||
640 | int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr, | ||
641 | struct pdc_memory_table *tbl, unsigned long entries); | ||
642 | #endif | ||
643 | |||
644 | void set_firmware_width(void); | ||
645 | void set_firmware_width_unlocked(void); | ||
646 | int pdc_do_firm_test_reset(unsigned long ftc_bitmap); | ||
647 | int pdc_do_reset(void); | ||
648 | int pdc_soft_power_info(unsigned long *power_reg); | ||
649 | int pdc_soft_power_button(int sw_control); | ||
650 | void pdc_io_reset(void); | ||
651 | void pdc_io_reset_devices(void); | ||
652 | int pdc_iodc_getc(void); | ||
653 | int pdc_iodc_print(const unsigned char *str, unsigned count); | ||
654 | |||
655 | void pdc_emergency_unlock(void); | ||
656 | int pdc_sti_call(unsigned long func, unsigned long flags, | ||
657 | unsigned long inptr, unsigned long outputr, | ||
658 | unsigned long glob_cfg); | ||
659 | |||
660 | static inline char * os_id_to_string(u16 os_id) { | ||
661 | switch(os_id) { | ||
662 | case OS_ID_NONE: return "No OS"; | ||
663 | case OS_ID_HPUX: return "HP-UX"; | ||
664 | case OS_ID_MPEXL: return "MPE-iX"; | ||
665 | case OS_ID_OSF: return "OSF"; | ||
666 | case OS_ID_HPRT: return "HP-RT"; | ||
667 | case OS_ID_NOVEL: return "Novell Netware"; | ||
668 | case OS_ID_LINUX: return "Linux"; | ||
669 | default: return "Unknown"; | ||
670 | } | ||
671 | } | ||
672 | |||
673 | #endif /* __KERNEL__ */ | ||
674 | |||
675 | #define PAGE0 ((struct zeropage *)__PAGE_OFFSET) | ||
676 | |||
677 | /* DEFINITION OF THE ZERO-PAGE (PAG0) */ | ||
678 | /* based on work by Jason Eckhardt (jason@equator.com) */ | ||
679 | |||
680 | /* flags of the device_path */ | ||
681 | #define PF_AUTOBOOT 0x80 | ||
682 | #define PF_AUTOSEARCH 0x40 | ||
683 | #define PF_TIMER 0x0F | ||
684 | |||
685 | struct device_path { /* page 1-69 */ | ||
686 | unsigned char flags; /* flags see above! */ | ||
687 | unsigned char bc[6]; /* bus converter routing info */ | ||
688 | unsigned char mod; | ||
689 | unsigned int layers[6];/* device-specific layer-info */ | ||
690 | } __attribute__((aligned(8))) ; | ||
691 | |||
692 | struct pz_device { | ||
693 | struct device_path dp; /* see above */ | ||
694 | /* struct iomod *hpa; */ | ||
695 | unsigned int hpa; /* HPA base address */ | ||
696 | /* char *spa; */ | ||
697 | unsigned int spa; /* SPA base address */ | ||
698 | /* int (*iodc_io)(struct iomod*, ...); */ | ||
699 | unsigned int iodc_io; /* device entry point */ | ||
700 | short pad; /* reserved */ | ||
701 | unsigned short cl_class;/* see below */ | ||
702 | } __attribute__((aligned(8))) ; | ||
703 | |||
704 | struct zeropage { | ||
705 | /* [0x000] initialize vectors (VEC) */ | ||
706 | unsigned int vec_special; /* must be zero */ | ||
707 | /* int (*vec_pow_fail)(void);*/ | ||
708 | unsigned int vec_pow_fail; /* power failure handler */ | ||
709 | /* int (*vec_toc)(void); */ | ||
710 | unsigned int vec_toc; | ||
711 | unsigned int vec_toclen; | ||
712 | /* int (*vec_rendz)(void); */ | ||
713 | unsigned int vec_rendz; | ||
714 | int vec_pow_fail_flen; | ||
715 | int vec_pad[10]; | ||
716 | |||
717 | /* [0x040] reserved processor dependent */ | ||
718 | int pad0[112]; | ||
719 | |||
720 | /* [0x200] reserved */ | ||
721 | int pad1[84]; | ||
722 | |||
723 | /* [0x350] memory configuration (MC) */ | ||
724 | int memc_cont; /* contiguous mem size (bytes) */ | ||
725 | int memc_phsize; /* physical memory size */ | ||
726 | int memc_adsize; /* additional mem size, bytes of SPA space used by PDC */ | ||
727 | unsigned int mem_pdc_hi; /* used for 64-bit */ | ||
728 | |||
729 | /* [0x360] various parameters for the boot-CPU */ | ||
730 | /* unsigned int *mem_booterr[8]; */ | ||
731 | unsigned int mem_booterr[8]; /* ptr to boot errors */ | ||
732 | unsigned int mem_free; /* first location, where OS can be loaded */ | ||
733 | /* struct iomod *mem_hpa; */ | ||
734 | unsigned int mem_hpa; /* HPA of the boot-CPU */ | ||
735 | /* int (*mem_pdc)(int, ...); */ | ||
736 | unsigned int mem_pdc; /* PDC entry point */ | ||
737 | unsigned int mem_10msec; /* number of clock ticks in 10msec */ | ||
738 | |||
739 | /* [0x390] initial memory module (IMM) */ | ||
740 | /* struct iomod *imm_hpa; */ | ||
741 | unsigned int imm_hpa; /* HPA of the IMM */ | ||
742 | int imm_soft_boot; /* 0 = was hard boot, 1 = was soft boot */ | ||
743 | unsigned int imm_spa_size; /* SPA size of the IMM in bytes */ | ||
744 | unsigned int imm_max_mem; /* bytes of mem in IMM */ | ||
745 | |||
746 | /* [0x3A0] boot console, display device and keyboard */ | ||
747 | struct pz_device mem_cons; /* description of console device */ | ||
748 | struct pz_device mem_boot; /* description of boot device */ | ||
749 | struct pz_device mem_kbd; /* description of keyboard device */ | ||
750 | |||
751 | /* [0x430] reserved */ | ||
752 | int pad430[116]; | ||
753 | |||
754 | /* [0x600] processor dependent */ | ||
755 | __u32 pad600[1]; | ||
756 | __u32 proc_sti; /* pointer to STI ROM */ | ||
757 | __u32 pad608[126]; | ||
758 | }; | ||
759 | |||
760 | #endif /* !defined(__ASSEMBLY__) */ | ||
761 | |||
762 | #endif /* _PARISC_PDC_H */ | ||
diff --git a/arch/parisc/include/asm/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h new file mode 100644 index 000000000000..a609273dc6bf --- /dev/null +++ b/arch/parisc/include/asm/pdc_chassis.h | |||
@@ -0,0 +1,381 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/pdc_chassis.h | ||
3 | * | ||
4 | * Copyright (C) 2002 Laurent Canet <canetl@esiee.fr> | ||
5 | * Copyright (C) 2002 Thibaut Varene <varenet@parisc-linux.org> | ||
6 | * | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License, version 2, as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | * TODO: - handle processor number on SMP systems (Reporting Entity ID) | ||
22 | * - handle message ID | ||
23 | * - handle timestamps | ||
24 | */ | ||
25 | |||
26 | |||
27 | #ifndef _PARISC_PDC_CHASSIS_H | ||
28 | #define _PARISC_PDC_CHASSIS_H | ||
29 | |||
30 | /* | ||
31 | * ---------- | ||
32 | * Prototypes | ||
33 | * ---------- | ||
34 | */ | ||
35 | |||
36 | int pdc_chassis_send_status(int message); | ||
37 | void parisc_pdc_chassis_init(void); | ||
38 | |||
39 | |||
40 | /* | ||
41 | * ----------------- | ||
42 | * Direct call names | ||
43 | * ----------------- | ||
44 | * They setup everything for you, the Log message and the corresponding LED state | ||
45 | */ | ||
46 | |||
47 | #define PDC_CHASSIS_DIRECT_BSTART 0 | ||
48 | #define PDC_CHASSIS_DIRECT_BCOMPLETE 1 | ||
49 | #define PDC_CHASSIS_DIRECT_SHUTDOWN 2 | ||
50 | #define PDC_CHASSIS_DIRECT_PANIC 3 | ||
51 | #define PDC_CHASSIS_DIRECT_HPMC 4 | ||
52 | #define PDC_CHASSIS_DIRECT_LPMC 5 | ||
53 | #define PDC_CHASSIS_DIRECT_DUMP 6 /* not yet implemented */ | ||
54 | #define PDC_CHASSIS_DIRECT_OOPS 7 /* not yet implemented */ | ||
55 | |||
56 | |||
57 | /* | ||
58 | * ------------ | ||
59 | * LEDs control | ||
60 | * ------------ | ||
61 | * Set the three LEDs -- Run, Attn, and Fault. | ||
62 | */ | ||
63 | |||
64 | /* Old PDC LED control */ | ||
65 | #define PDC_CHASSIS_DISP_DATA(v) ((unsigned long)(v) << 17) | ||
66 | |||
67 | /* | ||
68 | * Available PDC PAT LED states | ||
69 | */ | ||
70 | |||
71 | #define PDC_CHASSIS_LED_RUN_OFF (0ULL << 4) | ||
72 | #define PDC_CHASSIS_LED_RUN_FLASH (1ULL << 4) | ||
73 | #define PDC_CHASSIS_LED_RUN_ON (2ULL << 4) | ||
74 | #define PDC_CHASSIS_LED_RUN_NC (3ULL << 4) | ||
75 | #define PDC_CHASSIS_LED_ATTN_OFF (0ULL << 6) | ||
76 | #define PDC_CHASSIS_LED_ATTN_FLASH (1ULL << 6) | ||
77 | #define PDC_CHASSIS_LED_ATTN_NC (3ULL << 6) /* ATTN ON is invalid */ | ||
78 | #define PDC_CHASSIS_LED_FAULT_OFF (0ULL << 8) | ||
79 | #define PDC_CHASSIS_LED_FAULT_FLASH (1ULL << 8) | ||
80 | #define PDC_CHASSIS_LED_FAULT_ON (2ULL << 8) | ||
81 | #define PDC_CHASSIS_LED_FAULT_NC (3ULL << 8) | ||
82 | #define PDC_CHASSIS_LED_VALID (1ULL << 10) | ||
83 | |||
84 | /* | ||
85 | * Valid PDC PAT LED states combinations | ||
86 | */ | ||
87 | |||
88 | /* System running normally */ | ||
89 | #define PDC_CHASSIS_LSTATE_RUN_NORMAL (PDC_CHASSIS_LED_RUN_ON | \ | ||
90 | PDC_CHASSIS_LED_ATTN_OFF | \ | ||
91 | PDC_CHASSIS_LED_FAULT_OFF | \ | ||
92 | PDC_CHASSIS_LED_VALID ) | ||
93 | /* System crashed and rebooted itself successfully */ | ||
94 | #define PDC_CHASSIS_LSTATE_RUN_CRASHREC (PDC_CHASSIS_LED_RUN_ON | \ | ||
95 | PDC_CHASSIS_LED_ATTN_OFF | \ | ||
96 | PDC_CHASSIS_LED_FAULT_FLASH | \ | ||
97 | PDC_CHASSIS_LED_VALID ) | ||
98 | /* There was a system interruption that did not take the system down */ | ||
99 | #define PDC_CHASSIS_LSTATE_RUN_SYSINT (PDC_CHASSIS_LED_RUN_ON | \ | ||
100 | PDC_CHASSIS_LED_ATTN_FLASH | \ | ||
101 | PDC_CHASSIS_LED_FAULT_OFF | \ | ||
102 | PDC_CHASSIS_LED_VALID ) | ||
103 | /* System running and unexpected reboot or non-critical error detected */ | ||
104 | #define PDC_CHASSIS_LSTATE_RUN_NCRIT (PDC_CHASSIS_LED_RUN_ON | \ | ||
105 | PDC_CHASSIS_LED_ATTN_FLASH | \ | ||
106 | PDC_CHASSIS_LED_FAULT_FLASH | \ | ||
107 | PDC_CHASSIS_LED_VALID ) | ||
108 | /* Executing non-OS code */ | ||
109 | #define PDC_CHASSIS_LSTATE_NONOS (PDC_CHASSIS_LED_RUN_FLASH | \ | ||
110 | PDC_CHASSIS_LED_ATTN_OFF | \ | ||
111 | PDC_CHASSIS_LED_FAULT_OFF | \ | ||
112 | PDC_CHASSIS_LED_VALID ) | ||
113 | /* Boot failed - Executing non-OS code */ | ||
114 | #define PDC_CHASSIS_LSTATE_NONOS_BFAIL (PDC_CHASSIS_LED_RUN_FLASH | \ | ||
115 | PDC_CHASSIS_LED_ATTN_OFF | \ | ||
116 | PDC_CHASSIS_LED_FAULT_ON | \ | ||
117 | PDC_CHASSIS_LED_VALID ) | ||
118 | /* Unexpected reboot occurred - Executing non-OS code */ | ||
119 | #define PDC_CHASSIS_LSTATE_NONOS_UNEXP (PDC_CHASSIS_LED_RUN_FLASH | \ | ||
120 | PDC_CHASSIS_LED_ATTN_OFF | \ | ||
121 | PDC_CHASSIS_LED_FAULT_FLASH | \ | ||
122 | PDC_CHASSIS_LED_VALID ) | ||
123 | /* Executing non-OS code - Non-critical error detected */ | ||
124 | #define PDC_CHASSIS_LSTATE_NONOS_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \ | ||
125 | PDC_CHASSIS_LED_ATTN_FLASH | \ | ||
126 | PDC_CHASSIS_LED_FAULT_OFF | \ | ||
127 | PDC_CHASSIS_LED_VALID ) | ||
128 | /* Boot failed - Executing non-OS code - Non-critical error detected */ | ||
129 | #define PDC_CHASSIS_LSTATE_BFAIL_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \ | ||
130 | PDC_CHASSIS_LED_ATTN_FLASH | \ | ||
131 | PDC_CHASSIS_LED_FAULT_ON | \ | ||
132 | PDC_CHASSIS_LED_VALID ) | ||
133 | /* Unexpected reboot/recovering - Executing non-OS code - Non-critical error detected */ | ||
134 | #define PDC_CHASSIS_LSTATE_UNEXP_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \ | ||
135 | PDC_CHASSIS_LED_ATTN_FLASH | \ | ||
136 | PDC_CHASSIS_LED_FAULT_FLASH | \ | ||
137 | PDC_CHASSIS_LED_VALID ) | ||
138 | /* Cannot execute PDC */ | ||
139 | #define PDC_CHASSIS_LSTATE_CANNOT_PDC (PDC_CHASSIS_LED_RUN_OFF | \ | ||
140 | PDC_CHASSIS_LED_ATTN_OFF | \ | ||
141 | PDC_CHASSIS_LED_FAULT_OFF | \ | ||
142 | PDC_CHASSIS_LED_VALID ) | ||
143 | /* Boot failed - OS not up - PDC has detected a failure that prevents boot */ | ||
144 | #define PDC_CHASSIS_LSTATE_FATAL_BFAIL (PDC_CHASSIS_LED_RUN_OFF | \ | ||
145 | PDC_CHASSIS_LED_ATTN_OFF | \ | ||
146 | PDC_CHASSIS_LED_FAULT_ON | \ | ||
147 | PDC_CHASSIS_LED_VALID ) | ||
148 | /* No code running - Non-critical error detected (double fault situation) */ | ||
149 | #define PDC_CHASSIS_LSTATE_NOCODE_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \ | ||
150 | PDC_CHASSIS_LED_ATTN_FLASH | \ | ||
151 | PDC_CHASSIS_LED_FAULT_OFF | \ | ||
152 | PDC_CHASSIS_LED_VALID ) | ||
153 | /* Boot failed - OS not up - Fatal failure detected - Non-critical error detected */ | ||
154 | #define PDC_CHASSIS_LSTATE_FATAL_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \ | ||
155 | PDC_CHASSIS_LED_ATTN_FLASH | \ | ||
156 | PDC_CHASSIS_LED_FAULT_ON | \ | ||
157 | PDC_CHASSIS_LED_VALID ) | ||
158 | /* All other states are invalid */ | ||
159 | |||
160 | |||
161 | /* | ||
162 | * -------------- | ||
163 | * PDC Log events | ||
164 | * -------------- | ||
165 | * Here follows bits needed to fill up the log event sent to PDC_CHASSIS | ||
166 | * The log message contains: Alert level, Source, Source detail, | ||
167 | * Source ID, Problem detail, Caller activity, Activity status, | ||
168 | * Caller subactivity, Reporting entity type, Reporting entity ID, | ||
169 | * Data type, Unique message ID and EOM. | ||
170 | */ | ||
171 | |||
172 | /* Alert level */ | ||
173 | #define PDC_CHASSIS_ALERT_FORWARD (0ULL << 36) /* no failure detected */ | ||
174 | #define PDC_CHASSIS_ALERT_SERPROC (1ULL << 36) /* service proc - no failure */ | ||
175 | #define PDC_CHASSIS_ALERT_NURGENT (2ULL << 36) /* non-urgent operator attn */ | ||
176 | #define PDC_CHASSIS_ALERT_BLOCKED (3ULL << 36) /* system blocked */ | ||
177 | #define PDC_CHASSIS_ALERT_CONF_CHG (4ULL << 36) /* unexpected configuration change */ | ||
178 | #define PDC_CHASSIS_ALERT_ENV_PB (5ULL << 36) /* boot possible, environmental pb */ | ||
179 | #define PDC_CHASSIS_ALERT_PENDING (6ULL << 36) /* boot possible, pending failure */ | ||
180 | #define PDC_CHASSIS_ALERT_PERF_IMP (8ULL << 36) /* boot possible, performance impaired */ | ||
181 | #define PDC_CHASSIS_ALERT_FUNC_IMP (10ULL << 36) /* boot possible, functionality impaired */ | ||
182 | #define PDC_CHASSIS_ALERT_SOFT_FAIL (12ULL << 36) /* software failure */ | ||
183 | #define PDC_CHASSIS_ALERT_HANG (13ULL << 36) /* system hang */ | ||
184 | #define PDC_CHASSIS_ALERT_ENV_FATAL (14ULL << 36) /* fatal power or environmental pb */ | ||
185 | #define PDC_CHASSIS_ALERT_HW_FATAL (15ULL << 36) /* fatal hardware problem */ | ||
186 | |||
187 | /* Source */ | ||
188 | #define PDC_CHASSIS_SRC_NONE (0ULL << 28) /* unknown, no source stated */ | ||
189 | #define PDC_CHASSIS_SRC_PROC (1ULL << 28) /* processor */ | ||
190 | /* For later use ? */ | ||
191 | #define PDC_CHASSIS_SRC_PROC_CACHE (2ULL << 28) /* processor cache*/ | ||
192 | #define PDC_CHASSIS_SRC_PDH (3ULL << 28) /* processor dependent hardware */ | ||
193 | #define PDC_CHASSIS_SRC_PWR (4ULL << 28) /* power */ | ||
194 | #define PDC_CHASSIS_SRC_FAB (5ULL << 28) /* fabric connector */ | ||
195 | #define PDC_CHASSIS_SRC_PLATi (6ULL << 28) /* platform */ | ||
196 | #define PDC_CHASSIS_SRC_MEM (7ULL << 28) /* memory */ | ||
197 | #define PDC_CHASSIS_SRC_IO (8ULL << 28) /* I/O */ | ||
198 | #define PDC_CHASSIS_SRC_CELL (9ULL << 28) /* cell */ | ||
199 | #define PDC_CHASSIS_SRC_PD (10ULL << 28) /* protected domain */ | ||
200 | |||
201 | /* Source detail field */ | ||
202 | #define PDC_CHASSIS_SRC_D_PROC (1ULL << 24) /* processor general */ | ||
203 | |||
204 | /* Source ID - platform dependent */ | ||
205 | #define PDC_CHASSIS_SRC_ID_UNSPEC (0ULL << 16) | ||
206 | |||
207 | /* Problem detail - problem source dependent */ | ||
208 | #define PDC_CHASSIS_PB_D_PROC_NONE (0ULL << 32) /* no problem detail */ | ||
209 | #define PDC_CHASSIS_PB_D_PROC_TIMEOUT (4ULL << 32) /* timeout */ | ||
210 | |||
211 | /* Caller activity */ | ||
212 | #define PDC_CHASSIS_CALL_ACT_HPUX_BL (7ULL << 12) /* Boot Loader */ | ||
213 | #define PDC_CHASSIS_CALL_ACT_HPUX_PD (8ULL << 12) /* SAL_PD activities */ | ||
214 | #define PDC_CHASSIS_CALL_ACT_HPUX_EVENT (9ULL << 12) /* SAL_EVENTS activities */ | ||
215 | #define PDC_CHASSIS_CALL_ACT_HPUX_IO (10ULL << 12) /* SAL_IO activities */ | ||
216 | #define PDC_CHASSIS_CALL_ACT_HPUX_PANIC (11ULL << 12) /* System panic */ | ||
217 | #define PDC_CHASSIS_CALL_ACT_HPUX_INIT (12ULL << 12) /* System initialization */ | ||
218 | #define PDC_CHASSIS_CALL_ACT_HPUX_SHUT (13ULL << 12) /* System shutdown */ | ||
219 | #define PDC_CHASSIS_CALL_ACT_HPUX_WARN (14ULL << 12) /* System warning */ | ||
220 | #define PDC_CHASSIS_CALL_ACT_HPUX_DU (15ULL << 12) /* Display_Activity() update */ | ||
221 | |||
222 | /* Activity status - implementation dependent */ | ||
223 | #define PDC_CHASSIS_ACT_STATUS_UNSPEC (0ULL << 0) | ||
224 | |||
225 | /* Caller subactivity - implementation dependent */ | ||
226 | /* FIXME: other subactivities ? */ | ||
227 | #define PDC_CHASSIS_CALL_SACT_UNSPEC (0ULL << 4) /* implementation dependent */ | ||
228 | |||
229 | /* Reporting entity type */ | ||
230 | #define PDC_CHASSIS_RET_GENERICOS (12ULL << 52) /* generic OSes */ | ||
231 | #define PDC_CHASSIS_RET_IA64_NT (13ULL << 52) /* IA-64 NT */ | ||
232 | #define PDC_CHASSIS_RET_HPUX (14ULL << 52) /* HP-UX */ | ||
233 | #define PDC_CHASSIS_RET_DIAG (15ULL << 52) /* offline diagnostics & utilities */ | ||
234 | |||
235 | /* Reporting entity ID */ | ||
236 | #define PDC_CHASSIS_REID_UNSPEC (0ULL << 44) | ||
237 | |||
238 | /* Data type */ | ||
239 | #define PDC_CHASSIS_DT_NONE (0ULL << 59) /* data field unused */ | ||
240 | /* For later use ? Do we need these ? */ | ||
241 | #define PDC_CHASSIS_DT_PHYS_ADDR (1ULL << 59) /* physical address */ | ||
242 | #define PDC_CHASSIS_DT_DATA_EXPECT (2ULL << 59) /* expected data */ | ||
243 | #define PDC_CHASSIS_DT_ACTUAL (3ULL << 59) /* actual data */ | ||
244 | #define PDC_CHASSIS_DT_PHYS_LOC (4ULL << 59) /* physical location */ | ||
245 | #define PDC_CHASSIS_DT_PHYS_LOC_EXT (5ULL << 59) /* physical location extension */ | ||
246 | #define PDC_CHASSIS_DT_TAG (6ULL << 59) /* tag */ | ||
247 | #define PDC_CHASSIS_DT_SYNDROME (7ULL << 59) /* syndrome */ | ||
248 | #define PDC_CHASSIS_DT_CODE_ADDR (8ULL << 59) /* code address */ | ||
249 | #define PDC_CHASSIS_DT_ASCII_MSG (9ULL << 59) /* ascii message */ | ||
250 | #define PDC_CHASSIS_DT_POST (10ULL << 59) /* POST code */ | ||
251 | #define PDC_CHASSIS_DT_TIMESTAMP (11ULL << 59) /* timestamp */ | ||
252 | #define PDC_CHASSIS_DT_DEV_STAT (12ULL << 59) /* device status */ | ||
253 | #define PDC_CHASSIS_DT_DEV_TYPE (13ULL << 59) /* device type */ | ||
254 | #define PDC_CHASSIS_DT_PB_DET (14ULL << 59) /* problem detail */ | ||
255 | #define PDC_CHASSIS_DT_ACT_LEV (15ULL << 59) /* activity level/timeout */ | ||
256 | #define PDC_CHASSIS_DT_SER_NUM (16ULL << 59) /* serial number */ | ||
257 | #define PDC_CHASSIS_DT_REV_NUM (17ULL << 59) /* revision number */ | ||
258 | #define PDC_CHASSIS_DT_INTERRUPT (18ULL << 59) /* interruption information */ | ||
259 | #define PDC_CHASSIS_DT_TEST_NUM (19ULL << 59) /* test number */ | ||
260 | #define PDC_CHASSIS_DT_STATE_CHG (20ULL << 59) /* major changes in system state */ | ||
261 | #define PDC_CHASSIS_DT_PROC_DEALLOC (21ULL << 59) /* processor deallocate */ | ||
262 | #define PDC_CHASSIS_DT_RESET (30ULL << 59) /* reset type and cause */ | ||
263 | #define PDC_CHASSIS_DT_PA_LEGACY (31ULL << 59) /* legacy PA hex chassis code */ | ||
264 | |||
265 | /* System states - part of major changes in system state data field */ | ||
266 | #define PDC_CHASSIS_SYSTATE_BSTART (0ULL << 0) /* boot start */ | ||
267 | #define PDC_CHASSIS_SYSTATE_BCOMP (1ULL << 0) /* boot complete */ | ||
268 | #define PDC_CHASSIS_SYSTATE_CHANGE (2ULL << 0) /* major change */ | ||
269 | #define PDC_CHASSIS_SYSTATE_LED (3ULL << 0) /* LED change */ | ||
270 | #define PDC_CHASSIS_SYSTATE_PANIC (9ULL << 0) /* OS Panic */ | ||
271 | #define PDC_CHASSIS_SYSTATE_DUMP (10ULL << 0) /* memory dump */ | ||
272 | #define PDC_CHASSIS_SYSTATE_HPMC (11ULL << 0) /* processing HPMC */ | ||
273 | #define PDC_CHASSIS_SYSTATE_HALT (15ULL << 0) /* system halted */ | ||
274 | |||
275 | /* Message ID */ | ||
276 | #define PDC_CHASSIS_MSG_ID (0ULL << 40) /* we do not handle msg IDs atm */ | ||
277 | |||
278 | /* EOM - separates log entries */ | ||
279 | #define PDC_CHASSIS_EOM_CLEAR (0ULL << 43) | ||
280 | #define PDC_CHASSIS_EOM_SET (1ULL << 43) | ||
281 | |||
282 | /* | ||
283 | * Preformated well known messages | ||
284 | */ | ||
285 | |||
286 | /* Boot started */ | ||
287 | #define PDC_CHASSIS_PMSG_BSTART (PDC_CHASSIS_ALERT_SERPROC | \ | ||
288 | PDC_CHASSIS_SRC_PROC | \ | ||
289 | PDC_CHASSIS_SRC_D_PROC | \ | ||
290 | PDC_CHASSIS_SRC_ID_UNSPEC | \ | ||
291 | PDC_CHASSIS_PB_D_PROC_NONE | \ | ||
292 | PDC_CHASSIS_CALL_ACT_HPUX_INIT | \ | ||
293 | PDC_CHASSIS_ACT_STATUS_UNSPEC | \ | ||
294 | PDC_CHASSIS_CALL_SACT_UNSPEC | \ | ||
295 | PDC_CHASSIS_RET_HPUX | \ | ||
296 | PDC_CHASSIS_REID_UNSPEC | \ | ||
297 | PDC_CHASSIS_DT_STATE_CHG | \ | ||
298 | PDC_CHASSIS_SYSTATE_BSTART | \ | ||
299 | PDC_CHASSIS_MSG_ID | \ | ||
300 | PDC_CHASSIS_EOM_SET ) | ||
301 | |||
302 | /* Boot complete */ | ||
303 | #define PDC_CHASSIS_PMSG_BCOMPLETE (PDC_CHASSIS_ALERT_SERPROC | \ | ||
304 | PDC_CHASSIS_SRC_PROC | \ | ||
305 | PDC_CHASSIS_SRC_D_PROC | \ | ||
306 | PDC_CHASSIS_SRC_ID_UNSPEC | \ | ||
307 | PDC_CHASSIS_PB_D_PROC_NONE | \ | ||
308 | PDC_CHASSIS_CALL_ACT_HPUX_INIT | \ | ||
309 | PDC_CHASSIS_ACT_STATUS_UNSPEC | \ | ||
310 | PDC_CHASSIS_CALL_SACT_UNSPEC | \ | ||
311 | PDC_CHASSIS_RET_HPUX | \ | ||
312 | PDC_CHASSIS_REID_UNSPEC | \ | ||
313 | PDC_CHASSIS_DT_STATE_CHG | \ | ||
314 | PDC_CHASSIS_SYSTATE_BCOMP | \ | ||
315 | PDC_CHASSIS_MSG_ID | \ | ||
316 | PDC_CHASSIS_EOM_SET ) | ||
317 | |||
318 | /* Shutdown */ | ||
319 | #define PDC_CHASSIS_PMSG_SHUTDOWN (PDC_CHASSIS_ALERT_SERPROC | \ | ||
320 | PDC_CHASSIS_SRC_PROC | \ | ||
321 | PDC_CHASSIS_SRC_D_PROC | \ | ||
322 | PDC_CHASSIS_SRC_ID_UNSPEC | \ | ||
323 | PDC_CHASSIS_PB_D_PROC_NONE | \ | ||
324 | PDC_CHASSIS_CALL_ACT_HPUX_SHUT | \ | ||
325 | PDC_CHASSIS_ACT_STATUS_UNSPEC | \ | ||
326 | PDC_CHASSIS_CALL_SACT_UNSPEC | \ | ||
327 | PDC_CHASSIS_RET_HPUX | \ | ||
328 | PDC_CHASSIS_REID_UNSPEC | \ | ||
329 | PDC_CHASSIS_DT_STATE_CHG | \ | ||
330 | PDC_CHASSIS_SYSTATE_HALT | \ | ||
331 | PDC_CHASSIS_MSG_ID | \ | ||
332 | PDC_CHASSIS_EOM_SET ) | ||
333 | |||
334 | /* Panic */ | ||
335 | #define PDC_CHASSIS_PMSG_PANIC (PDC_CHASSIS_ALERT_SOFT_FAIL | \ | ||
336 | PDC_CHASSIS_SRC_PROC | \ | ||
337 | PDC_CHASSIS_SRC_D_PROC | \ | ||
338 | PDC_CHASSIS_SRC_ID_UNSPEC | \ | ||
339 | PDC_CHASSIS_PB_D_PROC_NONE | \ | ||
340 | PDC_CHASSIS_CALL_ACT_HPUX_PANIC| \ | ||
341 | PDC_CHASSIS_ACT_STATUS_UNSPEC | \ | ||
342 | PDC_CHASSIS_CALL_SACT_UNSPEC | \ | ||
343 | PDC_CHASSIS_RET_HPUX | \ | ||
344 | PDC_CHASSIS_REID_UNSPEC | \ | ||
345 | PDC_CHASSIS_DT_STATE_CHG | \ | ||
346 | PDC_CHASSIS_SYSTATE_PANIC | \ | ||
347 | PDC_CHASSIS_MSG_ID | \ | ||
348 | PDC_CHASSIS_EOM_SET ) | ||
349 | |||
350 | // FIXME: extrapolated data | ||
351 | /* HPMC */ | ||
352 | #define PDC_CHASSIS_PMSG_HPMC (PDC_CHASSIS_ALERT_CONF_CHG /*?*/ | \ | ||
353 | PDC_CHASSIS_SRC_PROC | \ | ||
354 | PDC_CHASSIS_SRC_D_PROC | \ | ||
355 | PDC_CHASSIS_SRC_ID_UNSPEC | \ | ||
356 | PDC_CHASSIS_PB_D_PROC_NONE | \ | ||
357 | PDC_CHASSIS_CALL_ACT_HPUX_WARN | \ | ||
358 | PDC_CHASSIS_RET_HPUX | \ | ||
359 | PDC_CHASSIS_DT_STATE_CHG | \ | ||
360 | PDC_CHASSIS_SYSTATE_HPMC | \ | ||
361 | PDC_CHASSIS_MSG_ID | \ | ||
362 | PDC_CHASSIS_EOM_SET ) | ||
363 | |||
364 | /* LPMC */ | ||
365 | #define PDC_CHASSIS_PMSG_LPMC (PDC_CHASSIS_ALERT_BLOCKED /*?*/| \ | ||
366 | PDC_CHASSIS_SRC_PROC | \ | ||
367 | PDC_CHASSIS_SRC_D_PROC | \ | ||
368 | PDC_CHASSIS_SRC_ID_UNSPEC | \ | ||
369 | PDC_CHASSIS_PB_D_PROC_NONE | \ | ||
370 | PDC_CHASSIS_CALL_ACT_HPUX_WARN | \ | ||
371 | PDC_CHASSIS_ACT_STATUS_UNSPEC | \ | ||
372 | PDC_CHASSIS_CALL_SACT_UNSPEC | \ | ||
373 | PDC_CHASSIS_RET_HPUX | \ | ||
374 | PDC_CHASSIS_REID_UNSPEC | \ | ||
375 | PDC_CHASSIS_DT_STATE_CHG | \ | ||
376 | PDC_CHASSIS_SYSTATE_CHANGE | \ | ||
377 | PDC_CHASSIS_MSG_ID | \ | ||
378 | PDC_CHASSIS_EOM_SET ) | ||
379 | |||
380 | #endif /* _PARISC_PDC_CHASSIS_H */ | ||
381 | /* vim: set ts=8 */ | ||
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h new file mode 100644 index 000000000000..47539f117958 --- /dev/null +++ b/arch/parisc/include/asm/pdcpat.h | |||
@@ -0,0 +1,308 @@ | |||
1 | #ifndef __PARISC_PATPDC_H | ||
2 | #define __PARISC_PATPDC_H | ||
3 | |||
4 | /* | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | * | ||
9 | * Copyright 2000 (c) Hewlett Packard (Paul Bame <bame()spam.parisc-linux.org>) | ||
10 | * Copyright 2000,2004 (c) Grant Grundler <grundler()nahspam.parisc-linux.org> | ||
11 | */ | ||
12 | |||
13 | |||
14 | #define PDC_PAT_CELL 64L /* Interface for gaining and | ||
15 | * manipulatin g cell state within PD */ | ||
16 | #define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */ | ||
17 | #define PDC_PAT_CELL_GET_INFO 1L /* Returns info about Cell */ | ||
18 | #define PDC_PAT_CELL_MODULE 2L /* Returns info about Module */ | ||
19 | #define PDC_PAT_CELL_SET_ATTENTION 9L /* Set Cell Attention indicator */ | ||
20 | #define PDC_PAT_CELL_NUMBER_TO_LOC 10L /* Cell Number -> Location */ | ||
21 | #define PDC_PAT_CELL_WALK_FABRIC 11L /* Walk the Fabric */ | ||
22 | #define PDC_PAT_CELL_GET_RDT_SIZE 12L /* Return Route Distance Table Sizes */ | ||
23 | #define PDC_PAT_CELL_GET_RDT 13L /* Return Route Distance Tables */ | ||
24 | #define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size */ | ||
25 | #define PDC_PAT_CELL_SET_LOCAL_PDH 15L /* Write Local PDH Buffer */ | ||
26 | #define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */ | ||
27 | #define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */ | ||
28 | #define PDC_PAT_CELL_GET_DBG_INFO 128L /* Return DBG Buffer Info */ | ||
29 | #define PDC_PAT_CELL_CHANGE_ALIAS 129L /* Change Non-Equivalent Alias Chacking */ | ||
30 | |||
31 | |||
32 | /* | ||
33 | ** Arg to PDC_PAT_CELL_MODULE memaddr[4] | ||
34 | ** | ||
35 | ** Addresses on the Merced Bus != all Runway Bus addresses. | ||
36 | ** This is intended for programming SBA/LBA chips range registers. | ||
37 | */ | ||
38 | #define IO_VIEW 0UL | ||
39 | #define PA_VIEW 1UL | ||
40 | |||
41 | /* PDC_PAT_CELL_MODULE entity type values */ | ||
42 | #define PAT_ENTITY_CA 0 /* central agent */ | ||
43 | #define PAT_ENTITY_PROC 1 /* processor */ | ||
44 | #define PAT_ENTITY_MEM 2 /* memory controller */ | ||
45 | #define PAT_ENTITY_SBA 3 /* system bus adapter */ | ||
46 | #define PAT_ENTITY_LBA 4 /* local bus adapter */ | ||
47 | #define PAT_ENTITY_PBC 5 /* processor bus converter */ | ||
48 | #define PAT_ENTITY_XBC 6 /* crossbar fabric connect */ | ||
49 | #define PAT_ENTITY_RC 7 /* fabric interconnect */ | ||
50 | |||
51 | /* PDC_PAT_CELL_MODULE address range type values */ | ||
52 | #define PAT_PBNUM 0 /* PCI Bus Number */ | ||
53 | #define PAT_LMMIO 1 /* < 4G MMIO Space */ | ||
54 | #define PAT_GMMIO 2 /* > 4G MMIO Space */ | ||
55 | #define PAT_NPIOP 3 /* Non Postable I/O Port Space */ | ||
56 | #define PAT_PIOP 4 /* Postable I/O Port Space */ | ||
57 | #define PAT_AHPA 5 /* Addional HPA Space */ | ||
58 | #define PAT_UFO 6 /* HPA Space (UFO for Mariposa) */ | ||
59 | #define PAT_GNIP 7 /* GNI Reserved Space */ | ||
60 | |||
61 | |||
62 | |||
63 | /* PDC PAT CHASSIS LOG -- Platform logging & forward progress functions */ | ||
64 | |||
65 | #define PDC_PAT_CHASSIS_LOG 65L | ||
66 | #define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */ | ||
67 | #define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */ | ||
68 | |||
69 | |||
70 | /* PDC PAT CPU -- CPU configuration within the protection domain */ | ||
71 | |||
72 | #define PDC_PAT_CPU 67L | ||
73 | #define PDC_PAT_CPU_INFO 0L /* Return CPU config info */ | ||
74 | #define PDC_PAT_CPU_DELETE 1L /* Delete CPU */ | ||
75 | #define PDC_PAT_CPU_ADD 2L /* Add CPU */ | ||
76 | #define PDC_PAT_CPU_GET_NUMBER 3L /* Return CPU Number */ | ||
77 | #define PDC_PAT_CPU_GET_HPA 4L /* Return CPU HPA */ | ||
78 | #define PDC_PAT_CPU_STOP 5L /* Stop CPU */ | ||
79 | #define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */ | ||
80 | #define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */ | ||
81 | #define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */ | ||
82 | #define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */ | ||
83 | #define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache | ||
84 | * Cleansing Mode */ | ||
85 | /* PDC PAT EVENT -- Platform Events */ | ||
86 | |||
87 | #define PDC_PAT_EVENT 68L | ||
88 | #define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */ | ||
89 | #define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */ | ||
90 | #define PDC_PAT_EVENT_SCAN 2L /* Scan Event */ | ||
91 | #define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */ | ||
92 | #define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args */ | ||
93 | |||
94 | /* PDC PAT HPMC -- Cause processor to go into spin loop, and wait | ||
95 | * for wake up from Monarch Processor. | ||
96 | */ | ||
97 | |||
98 | #define PDC_PAT_HPMC 70L | ||
99 | #define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */ | ||
100 | #define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC | ||
101 | * will use to interrupt OS during | ||
102 | * machine check rendezvous */ | ||
103 | |||
104 | /* parameters for PDC_PAT_HPMC_SET_PARAMS: */ | ||
105 | #define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */ | ||
106 | #define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */ | ||
107 | |||
108 | |||
109 | /* PDC PAT IO -- On-line services for I/O modules */ | ||
110 | |||
111 | #define PDC_PAT_IO 71L | ||
112 | #define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info*/ | ||
113 | #define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */ | ||
114 | /* Hardware Path */ | ||
115 | #define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from | ||
116 | * Physical Location */ | ||
117 | #define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration | ||
118 | * Address from Hardware Path */ | ||
119 | #define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path | ||
120 | * from PCI Configuration Address */ | ||
121 | #define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L /* Read Host Bridge State Info */ | ||
122 | #define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/ | ||
123 | #define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table | ||
124 | * Size */ | ||
125 | #define PDC_PAT_IO_GET_PCI_ROUTING_TABLE 16L /* Get PCI INT Routing Table */ | ||
126 | #define PDC_PAT_IO_GET_HINT_TABLE_SIZE 17L /* Get Hint Table Size */ | ||
127 | #define PDC_PAT_IO_GET_HINT_TABLE 18L /* Get Hint Table */ | ||
128 | #define PDC_PAT_IO_PCI_CONFIG_READ 19L /* PCI Config Read */ | ||
129 | #define PDC_PAT_IO_PCI_CONFIG_WRITE 20L /* PCI Config Write */ | ||
130 | #define PDC_PAT_IO_GET_NUM_IO_SLOTS 21L /* Get Number of I/O Bay Slots in | ||
131 | * Cabinet */ | ||
132 | #define PDC_PAT_IO_GET_LOC_IO_SLOTS 22L /* Get Physical Location of I/O */ | ||
133 | /* Bay Slots in Cabinet */ | ||
134 | #define PDC_PAT_IO_BAY_STATUS_INFO 28L /* Get I/O Bay Slot Status Info */ | ||
135 | #define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */ | ||
136 | #define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */ | ||
137 | |||
138 | |||
139 | /* PDC PAT MEM -- Manage memory page deallocation */ | ||
140 | |||
141 | #define PDC_PAT_MEM 72L | ||
142 | #define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */ | ||
143 | #define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */ | ||
144 | #define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */ | ||
145 | #define PDC_PAT_MEM_PD_RESET 3L /* Reset clear bit for PD */ | ||
146 | #define PDC_PAT_MEM_CELL_INFO 5L /* Return PDT info For Cell */ | ||
147 | #define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */ | ||
148 | #define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */ | ||
149 | #define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */ | ||
150 | #define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */ | ||
151 | #define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */ | ||
152 | #define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */ | ||
153 | /* Memory Address */ | ||
154 | #define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */ | ||
155 | #define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */ | ||
156 | #define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */ | ||
157 | #define PDC_PAT_MEM_RD_STATE_INFO 15L /* Read Mem Module State Info*/ | ||
158 | #define PDC_PAT_MEM_CLR_STATE_INFO 16L /*Clear Mem Module State Info*/ | ||
159 | #define PDC_PAT_MEM_CLEAN_RANGE 128L /*Clean Mem in specific range*/ | ||
160 | #define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */ | ||
161 | #define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */ | ||
162 | |||
163 | |||
164 | /* PDC PAT NVOLATILE -- Access Non-Volatile Memory */ | ||
165 | |||
166 | #define PDC_PAT_NVOLATILE 73L | ||
167 | #define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */ | ||
168 | #define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */ | ||
169 | #define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */ | ||
170 | #define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */ | ||
171 | #define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */ | ||
172 | |||
173 | /* PDC PAT PD */ | ||
174 | #define PDC_PAT_PD 74L /* Protection Domain Info */ | ||
175 | #define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */ | ||
176 | |||
177 | /* PDC_PAT_PD_GET_ADDR_MAP entry types */ | ||
178 | #define PAT_MEMORY_DESCRIPTOR 1 | ||
179 | |||
180 | /* PDC_PAT_PD_GET_ADDR_MAP memory types */ | ||
181 | #define PAT_MEMTYPE_MEMORY 0 | ||
182 | #define PAT_MEMTYPE_FIRMWARE 4 | ||
183 | |||
184 | /* PDC_PAT_PD_GET_ADDR_MAP memory usage */ | ||
185 | #define PAT_MEMUSE_GENERAL 0 | ||
186 | #define PAT_MEMUSE_GI 128 | ||
187 | #define PAT_MEMUSE_GNI 129 | ||
188 | |||
189 | |||
190 | #ifndef __ASSEMBLY__ | ||
191 | #include <linux/types.h> | ||
192 | |||
193 | #ifdef CONFIG_64BIT | ||
194 | #define is_pdc_pat() (PDC_TYPE_PAT == pdc_type) | ||
195 | extern int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num); | ||
196 | extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num); | ||
197 | #else /* ! CONFIG_64BIT */ | ||
198 | /* No PAT support for 32-bit kernels...sorry */ | ||
199 | #define is_pdc_pat() (0) | ||
200 | #define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC | ||
201 | #define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC | ||
202 | #endif /* ! CONFIG_64BIT */ | ||
203 | |||
204 | |||
205 | struct pdc_pat_cell_num { | ||
206 | unsigned long cell_num; | ||
207 | unsigned long cell_loc; | ||
208 | }; | ||
209 | |||
210 | struct pdc_pat_cpu_num { | ||
211 | unsigned long cpu_num; | ||
212 | unsigned long cpu_loc; | ||
213 | }; | ||
214 | |||
215 | struct pdc_pat_pd_addr_map_entry { | ||
216 | unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */ | ||
217 | unsigned char reserve1[5]; | ||
218 | unsigned char memory_type; | ||
219 | unsigned char memory_usage; | ||
220 | unsigned long paddr; | ||
221 | unsigned int pages; /* Length in 4K pages */ | ||
222 | unsigned int reserve2; | ||
223 | unsigned long cell_map; | ||
224 | }; | ||
225 | |||
226 | /******************************************************************** | ||
227 | * PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr | ||
228 | * ---------------------------------------------------------- | ||
229 | * Bit 0 to 51 - conf_base_addr | ||
230 | * Bit 52 to 62 - reserved | ||
231 | * Bit 63 - endianess bit | ||
232 | ********************************************************************/ | ||
233 | #define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL) | ||
234 | |||
235 | /******************************************************************** | ||
236 | * PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info | ||
237 | * ---------------------------------------------------- | ||
238 | * Bit 0 to 7 - entity type | ||
239 | * 0 = central agent, 1 = processor, | ||
240 | * 2 = memory controller, 3 = system bus adapter, | ||
241 | * 4 = local bus adapter, 5 = processor bus converter, | ||
242 | * 6 = crossbar fabric connect, 7 = fabric interconnect, | ||
243 | * 8 to 254 reserved, 255 = unknown. | ||
244 | * Bit 8 to 15 - DVI | ||
245 | * Bit 16 to 23 - IOC functions | ||
246 | * Bit 24 to 39 - reserved | ||
247 | * Bit 40 to 63 - mod_pages | ||
248 | * number of 4K pages a module occupies starting at conf_base_addr | ||
249 | ********************************************************************/ | ||
250 | #define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL) | ||
251 | #define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL) | ||
252 | #define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL) | ||
253 | #define PAT_GET_MOD_PAGES(value) ((value) & 0xffffffUL) | ||
254 | |||
255 | |||
256 | /* | ||
257 | ** PDC_PAT_CELL_GET_INFO return block | ||
258 | */ | ||
259 | typedef struct pdc_pat_cell_info_rtn_block { | ||
260 | unsigned long cpu_info; | ||
261 | unsigned long cell_info; | ||
262 | unsigned long cell_location; | ||
263 | unsigned long reo_location; | ||
264 | unsigned long mem_size; | ||
265 | unsigned long dimm_status; | ||
266 | unsigned long pdc_rev; | ||
267 | unsigned long fabric_info0; | ||
268 | unsigned long fabric_info1; | ||
269 | unsigned long fabric_info2; | ||
270 | unsigned long fabric_info3; | ||
271 | unsigned long reserved[21]; | ||
272 | } pdc_pat_cell_info_rtn_block_t; | ||
273 | |||
274 | |||
275 | /* FIXME: mod[508] should really be a union of the various mod components */ | ||
276 | struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */ | ||
277 | unsigned long cba; /* func 0 cfg space address */ | ||
278 | unsigned long mod_info; /* module information */ | ||
279 | unsigned long mod_location; /* physical location of the module */ | ||
280 | struct hardware_path mod_path; /* module path (device path - layers) */ | ||
281 | unsigned long mod[508]; /* PAT cell module components */ | ||
282 | } __attribute__((aligned(8))) ; | ||
283 | |||
284 | typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t; | ||
285 | |||
286 | |||
287 | extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data); | ||
288 | extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info); | ||
289 | extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr); | ||
290 | extern int pdc_pat_cell_num_to_loc(void *, unsigned long); | ||
291 | |||
292 | extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa); | ||
293 | |||
294 | extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset); | ||
295 | |||
296 | |||
297 | extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val); | ||
298 | extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val); | ||
299 | |||
300 | |||
301 | /* Flag to indicate this is a PAT box...don't use this unless you | ||
302 | ** really have to...it might go away some day. | ||
303 | */ | ||
304 | extern int pdc_pat; /* arch/parisc/kernel/inventory.c */ | ||
305 | |||
306 | #endif /* __ASSEMBLY__ */ | ||
307 | |||
308 | #endif /* ! __PARISC_PATPDC_H */ | ||
diff --git a/arch/parisc/include/asm/percpu.h b/arch/parisc/include/asm/percpu.h new file mode 100644 index 000000000000..a0dcd1970128 --- /dev/null +++ b/arch/parisc/include/asm/percpu.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _PARISC_PERCPU_H | ||
2 | #define _PARISC_PERCPU_H | ||
3 | |||
4 | #include <asm-generic/percpu.h> | ||
5 | |||
6 | #endif | ||
7 | |||
diff --git a/arch/parisc/include/asm/perf.h b/arch/parisc/include/asm/perf.h new file mode 100644 index 000000000000..a18e11972c09 --- /dev/null +++ b/arch/parisc/include/asm/perf.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef _ASM_PERF_H_ | ||
2 | #define _ASM_PERF_H_ | ||
3 | |||
4 | /* ioctls */ | ||
5 | #define PA_PERF_ON _IO('p', 1) | ||
6 | #define PA_PERF_OFF _IOR('p', 2, unsigned int) | ||
7 | #define PA_PERF_VERSION _IOR('p', 3, int) | ||
8 | |||
9 | #define PA_PERF_DEV "perf" | ||
10 | #define PA_PERF_MINOR 146 | ||
11 | |||
12 | /* Interface types */ | ||
13 | #define UNKNOWN_INTF 255 | ||
14 | #define ONYX_INTF 0 | ||
15 | #define CUDA_INTF 1 | ||
16 | |||
17 | /* Common Onyx and Cuda images */ | ||
18 | #define CPI 0 | ||
19 | #define BUSUTIL 1 | ||
20 | #define TLBMISS 2 | ||
21 | #define TLBHANDMISS 3 | ||
22 | #define PTKN 4 | ||
23 | #define PNTKN 5 | ||
24 | #define IMISS 6 | ||
25 | #define DMISS 7 | ||
26 | #define DMISS_ACCESS 8 | ||
27 | #define BIG_CPI 9 | ||
28 | #define BIG_LS 10 | ||
29 | #define BR_ABORT 11 | ||
30 | #define ISNT 12 | ||
31 | #define QUADRANT 13 | ||
32 | #define RW_PDFET 14 | ||
33 | #define RW_WDFET 15 | ||
34 | #define SHLIB_CPI 16 | ||
35 | |||
36 | /* Cuda only Images */ | ||
37 | #define FLOPS 17 | ||
38 | #define CACHEMISS 18 | ||
39 | #define BRANCHES 19 | ||
40 | #define CRSTACK 20 | ||
41 | #define I_CACHE_SPEC 21 | ||
42 | #define MAX_CUDA_IMAGES 22 | ||
43 | |||
44 | /* Onyx only Images */ | ||
45 | #define ADDR_INV_ABORT_ALU 17 | ||
46 | #define BRAD_STALL 18 | ||
47 | #define CNTL_IN_PIPEL 19 | ||
48 | #define DSNT_XFH 20 | ||
49 | #define FET_SIG1 21 | ||
50 | #define FET_SIG2 22 | ||
51 | #define G7_1 23 | ||
52 | #define G7_2 24 | ||
53 | #define G7_3 25 | ||
54 | #define G7_4 26 | ||
55 | #define MPB_LABORT 27 | ||
56 | #define PANIC 28 | ||
57 | #define RARE_INST 29 | ||
58 | #define RW_DFET 30 | ||
59 | #define RW_IFET 31 | ||
60 | #define RW_SDFET 32 | ||
61 | #define SPEC_IFET 33 | ||
62 | #define ST_COND0 34 | ||
63 | #define ST_COND1 35 | ||
64 | #define ST_COND2 36 | ||
65 | #define ST_COND3 37 | ||
66 | #define ST_COND4 38 | ||
67 | #define ST_UNPRED0 39 | ||
68 | #define ST_UNPRED1 40 | ||
69 | #define UNPRED 41 | ||
70 | #define GO_STORE 42 | ||
71 | #define SHLIB_CALL 43 | ||
72 | #define MAX_ONYX_IMAGES 44 | ||
73 | |||
74 | #endif | ||
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h new file mode 100644 index 000000000000..fc987a1c12a8 --- /dev/null +++ b/arch/parisc/include/asm/pgalloc.h | |||
@@ -0,0 +1,149 @@ | |||
1 | #ifndef _ASM_PGALLOC_H | ||
2 | #define _ASM_PGALLOC_H | ||
3 | |||
4 | #include <linux/gfp.h> | ||
5 | #include <linux/mm.h> | ||
6 | #include <linux/threads.h> | ||
7 | #include <asm/processor.h> | ||
8 | #include <asm/fixmap.h> | ||
9 | |||
10 | #include <asm/cache.h> | ||
11 | |||
12 | /* Allocate the top level pgd (page directory) | ||
13 | * | ||
14 | * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we | ||
15 | * allocate the first pmd adjacent to the pgd. This means that we can | ||
16 | * subtract a constant offset to get to it. The pmd and pgd sizes are | ||
17 | * arranged so that a single pmd covers 4GB (giving a full 64-bit | ||
18 | * process access to 8TB) so our lookups are effectively L2 for the | ||
19 | * first 4GB of the kernel (i.e. for all ILP32 processes and all the | ||
20 | * kernel for machines with under 4GB of memory) */ | ||
21 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
22 | { | ||
23 | pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, | ||
24 | PGD_ALLOC_ORDER); | ||
25 | pgd_t *actual_pgd = pgd; | ||
26 | |||
27 | if (likely(pgd != NULL)) { | ||
28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); | ||
29 | #ifdef CONFIG_64BIT | ||
30 | actual_pgd += PTRS_PER_PGD; | ||
31 | /* Populate first pmd with allocated memory. We mark it | ||
32 | * with PxD_FLAG_ATTACHED as a signal to the system that this | ||
33 | * pmd entry may not be cleared. */ | ||
34 | __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | | ||
35 | PxD_FLAG_VALID | | ||
36 | PxD_FLAG_ATTACHED) | ||
37 | + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); | ||
38 | /* The first pmd entry also is marked with _PAGE_GATEWAY as | ||
39 | * a signal that this pmd may not be freed */ | ||
40 | __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); | ||
41 | #endif | ||
42 | } | ||
43 | return actual_pgd; | ||
44 | } | ||
45 | |||
46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
47 | { | ||
48 | #ifdef CONFIG_64BIT | ||
49 | pgd -= PTRS_PER_PGD; | ||
50 | #endif | ||
51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); | ||
52 | } | ||
53 | |||
54 | #if PT_NLEVELS == 3 | ||
55 | |||
56 | /* Three Level Page Table Support for pmd's */ | ||
57 | |||
58 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | ||
59 | { | ||
60 | __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) + | ||
61 | (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); | ||
62 | } | ||
63 | |||
64 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
65 | { | ||
66 | pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, | ||
67 | PMD_ORDER); | ||
68 | if (pmd) | ||
69 | memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); | ||
70 | return pmd; | ||
71 | } | ||
72 | |||
73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | ||
74 | { | ||
75 | #ifdef CONFIG_64BIT | ||
76 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | ||
77 | /* This is the permanent pmd attached to the pgd; | ||
78 | * cannot free it */ | ||
79 | return; | ||
80 | #endif | ||
81 | free_pages((unsigned long)pmd, PMD_ORDER); | ||
82 | } | ||
83 | |||
84 | #else | ||
85 | |||
86 | /* Two Level Page Table Support for pmd's */ | ||
87 | |||
88 | /* | ||
89 | * allocating and freeing a pmd is trivial: the 1-entry pmd is | ||
90 | * inside the pgd, so has no extra memory associated with it. | ||
91 | */ | ||
92 | |||
93 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) | ||
94 | #define pmd_free(mm, x) do { } while (0) | ||
95 | #define pgd_populate(mm, pmd, pte) BUG() | ||
96 | |||
97 | #endif | ||
98 | |||
99 | static inline void | ||
100 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | ||
101 | { | ||
102 | #ifdef CONFIG_64BIT | ||
103 | /* preserve the gateway marker if this is the beginning of | ||
104 | * the permanent pmd */ | ||
105 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | ||
106 | __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | | ||
107 | PxD_FLAG_VALID | | ||
108 | PxD_FLAG_ATTACHED) | ||
109 | + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); | ||
110 | else | ||
111 | #endif | ||
112 | __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) | ||
113 | + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); | ||
114 | } | ||
115 | |||
116 | #define pmd_populate(mm, pmd, pte_page) \ | ||
117 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) | ||
118 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
119 | |||
120 | static inline pgtable_t | ||
121 | pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
122 | { | ||
123 | struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
124 | if (page) | ||
125 | pgtable_page_ctor(page); | ||
126 | return page; | ||
127 | } | ||
128 | |||
129 | static inline pte_t * | ||
130 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | ||
131 | { | ||
132 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
133 | return pte; | ||
134 | } | ||
135 | |||
136 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
137 | { | ||
138 | free_page((unsigned long)pte); | ||
139 | } | ||
140 | |||
141 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | ||
142 | { | ||
143 | pgtable_page_dtor(pte); | ||
144 | pte_free_kernel(mm, page_address(pte)); | ||
145 | } | ||
146 | |||
147 | #define check_pgt_cache() do { } while (0) | ||
148 | |||
149 | #endif | ||
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h new file mode 100644 index 000000000000..470a4b88124d --- /dev/null +++ b/arch/parisc/include/asm/pgtable.h | |||
@@ -0,0 +1,508 @@ | |||
1 | #ifndef _PARISC_PGTABLE_H | ||
2 | #define _PARISC_PGTABLE_H | ||
3 | |||
4 | #include <asm-generic/4level-fixup.h> | ||
5 | |||
6 | #include <asm/fixmap.h> | ||
7 | |||
8 | #ifndef __ASSEMBLY__ | ||
9 | /* | ||
10 | * we simulate an x86-style page table for the linux mm code | ||
11 | */ | ||
12 | |||
13 | #include <linux/mm.h> /* for vm_area_struct */ | ||
14 | #include <linux/bitops.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/cache.h> | ||
17 | |||
18 | /* | ||
19 | * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel | ||
20 | * memory. For the return value to be meaningful, ADDR must be >= | ||
21 | * PAGE_OFFSET. This operation can be relatively expensive (e.g., | ||
22 | * require a hash-, or multi-level tree-lookup or something of that | ||
23 | * sort) but it guarantees to return TRUE only if accessing the page | ||
24 | * at that address does not cause an error. Note that there may be | ||
25 | * addresses for which kern_addr_valid() returns FALSE even though an | ||
26 | * access would not cause an error (e.g., this is typically true for | ||
27 | * memory mapped I/O regions. | ||
28 | * | ||
29 | * XXX Need to implement this for parisc. | ||
30 | */ | ||
31 | #define kern_addr_valid(addr) (1) | ||
32 | |||
33 | /* Certain architectures need to do special things when PTEs | ||
34 | * within a page table are directly modified. Thus, the following | ||
35 | * hook is made available. | ||
36 | */ | ||
37 | #define set_pte(pteptr, pteval) \ | ||
38 | do{ \ | ||
39 | *(pteptr) = (pteval); \ | ||
40 | } while(0) | ||
41 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
42 | |||
43 | #endif /* !__ASSEMBLY__ */ | ||
44 | |||
45 | #define pte_ERROR(e) \ | ||
46 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
47 | #define pmd_ERROR(e) \ | ||
48 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) | ||
49 | #define pgd_ERROR(e) \ | ||
50 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) | ||
51 | |||
52 | /* This is the size of the initially mapped kernel memory */ | ||
53 | #ifdef CONFIG_64BIT | ||
54 | #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ | ||
55 | #else | ||
56 | #define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */ | ||
57 | #endif | ||
58 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) | ||
59 | |||
60 | #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
61 | #define PT_NLEVELS 3 | ||
62 | #define PGD_ORDER 1 /* Number of pages per pgd */ | ||
63 | #define PMD_ORDER 1 /* Number of pages per pmd */ | ||
64 | #define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ | ||
65 | #else | ||
66 | #define PT_NLEVELS 2 | ||
67 | #define PGD_ORDER 1 /* Number of pages per pgd */ | ||
68 | #define PGD_ALLOC_ORDER PGD_ORDER | ||
69 | #endif | ||
70 | |||
71 | /* Definitions for 3rd level (we use PLD here for Page Lower directory | ||
72 | * because PTE_SHIFT is used lower down to mean shift that has to be | ||
73 | * done to get usable bits out of the PTE) */ | ||
74 | #define PLD_SHIFT PAGE_SHIFT | ||
75 | #define PLD_SIZE PAGE_SIZE | ||
76 | #define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY) | ||
77 | #define PTRS_PER_PTE (1UL << BITS_PER_PTE) | ||
78 | |||
79 | /* Definitions for 2nd level */ | ||
80 | #define pgtable_cache_init() do { } while (0) | ||
81 | |||
82 | #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) | ||
83 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
84 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
85 | #if PT_NLEVELS == 3 | ||
86 | #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) | ||
87 | #else | ||
88 | #define BITS_PER_PMD 0 | ||
89 | #endif | ||
90 | #define PTRS_PER_PMD (1UL << BITS_PER_PMD) | ||
91 | |||
92 | /* Definitions for 1st level */ | ||
93 | #define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) | ||
94 | #define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) | ||
95 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
96 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
97 | #define PTRS_PER_PGD (1UL << BITS_PER_PGD) | ||
98 | #define USER_PTRS_PER_PGD PTRS_PER_PGD | ||
99 | |||
100 | #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) | ||
101 | #define MAX_ADDRESS (1UL << MAX_ADDRBITS) | ||
102 | |||
103 | #define SPACEID_SHIFT (MAX_ADDRBITS - 32) | ||
104 | |||
105 | /* This calculates the number of initial pages we need for the initial | ||
106 | * page tables */ | ||
107 | #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) | ||
108 | # define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) | ||
109 | #else | ||
110 | # define PT_INITIAL (1) /* all initial PTEs fit into one page */ | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * pgd entries used up by user/kernel: | ||
115 | */ | ||
116 | |||
117 | #define FIRST_USER_ADDRESS 0 | ||
118 | |||
119 | /* NB: The tlb miss handlers make certain assumptions about the order */ | ||
120 | /* of the following bits, so be careful (One example, bits 25-31 */ | ||
121 | /* are moved together in one instruction). */ | ||
122 | |||
123 | #define _PAGE_READ_BIT 31 /* (0x001) read access allowed */ | ||
124 | #define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */ | ||
125 | #define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */ | ||
126 | #define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */ | ||
127 | #define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */ | ||
128 | #define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */ | ||
129 | #define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */ | ||
130 | #define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */ | ||
131 | #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ | ||
132 | #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ | ||
133 | #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ | ||
134 | #define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */ | ||
135 | /* for cache flushing only */ | ||
136 | #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ | ||
137 | |||
138 | /* N.B. The bits are defined in terms of a 32 bit word above, so the */ | ||
139 | /* following macro is ok for both 32 and 64 bit. */ | ||
140 | |||
141 | #define xlate_pabit(x) (31 - x) | ||
142 | |||
143 | /* this defines the shift to the usable bits in the PTE it is set so | ||
144 | * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set | ||
145 | * to zero */ | ||
146 | #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) | ||
147 | |||
148 | /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ | ||
149 | #define PFN_PTE_SHIFT 12 | ||
150 | |||
151 | |||
152 | /* this is how many bits may be used by the file functions */ | ||
153 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) | ||
154 | |||
155 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) | ||
156 | #define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE }) | ||
157 | |||
158 | #define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT)) | ||
159 | #define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT)) | ||
160 | #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) | ||
161 | #define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT)) | ||
162 | #define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT)) | ||
163 | #define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT)) | ||
164 | #define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT)) | ||
165 | #define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT)) | ||
166 | #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) | ||
167 | #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) | ||
168 | #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) | ||
169 | #define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT)) | ||
170 | #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) | ||
171 | #define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT)) | ||
172 | |||
173 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
174 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
175 | #define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
176 | |||
177 | /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds | ||
178 | * are page-aligned, we don't care about the PAGE_OFFSET bits, except | ||
179 | * for a few meta-information bits, so we shift the address to be | ||
180 | * able to effectively address 40/42/44-bits of physical address space | ||
181 | * depending on 4k/16k/64k PAGE_SIZE */ | ||
182 | #define _PxD_PRESENT_BIT 31 | ||
183 | #define _PxD_ATTACHED_BIT 30 | ||
184 | #define _PxD_VALID_BIT 29 | ||
185 | |||
186 | #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) | ||
187 | #define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) | ||
188 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) | ||
189 | #define PxD_FLAG_MASK (0xf) | ||
190 | #define PxD_FLAG_SHIFT (4) | ||
191 | #define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ | ||
192 | |||
193 | #ifndef __ASSEMBLY__ | ||
194 | |||
195 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
196 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) | ||
197 | /* Others seem to make this executable, I don't know if that's correct | ||
198 | or not. The stack is mapped this way though so this is necessary | ||
199 | in the short term - dhd@linuxcare.com, 2000-08-08 */ | ||
200 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) | ||
201 | #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) | ||
202 | #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) | ||
203 | #define PAGE_COPY PAGE_EXECREAD | ||
204 | #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) | ||
205 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
206 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) | ||
207 | #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) | ||
208 | #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) | ||
209 | #define PAGE_FLUSH __pgprot(_PAGE_FLUSH) | ||
210 | |||
211 | |||
212 | /* | ||
213 | * We could have an execute only page using "gateway - promote to priv | ||
214 | * level 3", but that is kind of silly. So, the way things are defined | ||
215 | * now, we must always have read permission for pages with execute | ||
216 | * permission. For the fun of it we'll go ahead and support write only | ||
217 | * pages. | ||
218 | */ | ||
219 | |||
220 | /*xwr*/ | ||
221 | #define __P000 PAGE_NONE | ||
222 | #define __P001 PAGE_READONLY | ||
223 | #define __P010 __P000 /* copy on write */ | ||
224 | #define __P011 __P001 /* copy on write */ | ||
225 | #define __P100 PAGE_EXECREAD | ||
226 | #define __P101 PAGE_EXECREAD | ||
227 | #define __P110 __P100 /* copy on write */ | ||
228 | #define __P111 __P101 /* copy on write */ | ||
229 | |||
230 | #define __S000 PAGE_NONE | ||
231 | #define __S001 PAGE_READONLY | ||
232 | #define __S010 PAGE_WRITEONLY | ||
233 | #define __S011 PAGE_SHARED | ||
234 | #define __S100 PAGE_EXECREAD | ||
235 | #define __S101 PAGE_EXECREAD | ||
236 | #define __S110 PAGE_RWX | ||
237 | #define __S111 PAGE_RWX | ||
238 | |||
239 | |||
240 | extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ | ||
241 | |||
242 | /* initial page tables for 0-8MB for kernel */ | ||
243 | |||
244 | extern pte_t pg0[]; | ||
245 | |||
246 | /* zero page used for uninitialized stuff */ | ||
247 | |||
248 | extern unsigned long *empty_zero_page; | ||
249 | |||
250 | /* | ||
251 | * ZERO_PAGE is a global shared page that is always zero: used | ||
252 | * for zero-mapped memory areas etc.. | ||
253 | */ | ||
254 | |||
255 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
256 | |||
257 | #define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) | ||
258 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | ||
259 | #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) | ||
260 | |||
261 | #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) | ||
262 | #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) | ||
263 | #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) | ||
264 | #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) | ||
265 | |||
266 | #if PT_NLEVELS == 3 | ||
267 | /* The first entry of the permanent pmd is not there if it contains | ||
268 | * the gateway marker */ | ||
269 | #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) | ||
270 | #else | ||
271 | #define pmd_none(x) (!pmd_val(x)) | ||
272 | #endif | ||
273 | #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) | ||
274 | #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) | ||
275 | static inline void pmd_clear(pmd_t *pmd) { | ||
276 | #if PT_NLEVELS == 3 | ||
277 | if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | ||
278 | /* This is the entry pointing to the permanent pmd | ||
279 | * attached to the pgd; cannot clear it */ | ||
280 | __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); | ||
281 | else | ||
282 | #endif | ||
283 | __pmd_val_set(*pmd, 0); | ||
284 | } | ||
285 | |||
286 | |||
287 | |||
288 | #if PT_NLEVELS == 3 | ||
289 | #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) | ||
290 | #define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd)) | ||
291 | |||
292 | /* For 64 bit we have three level tables */ | ||
293 | |||
294 | #define pgd_none(x) (!pgd_val(x)) | ||
295 | #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) | ||
296 | #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) | ||
297 | static inline void pgd_clear(pgd_t *pgd) { | ||
298 | #if PT_NLEVELS == 3 | ||
299 | if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) | ||
300 | /* This is the permanent pmd attached to the pgd; cannot | ||
301 | * free it */ | ||
302 | return; | ||
303 | #endif | ||
304 | __pgd_val_set(*pgd, 0); | ||
305 | } | ||
306 | #else | ||
307 | /* | ||
308 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
309 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
310 | * into the pgd entry) | ||
311 | */ | ||
312 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
313 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
314 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
315 | static inline void pgd_clear(pgd_t * pgdp) { } | ||
316 | #endif | ||
317 | |||
318 | /* | ||
319 | * The following only work if pte_present() is true. | ||
320 | * Undefined behaviour if not.. | ||
321 | */ | ||
322 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
323 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
324 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } | ||
325 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
326 | static inline int pte_special(pte_t pte) { return 0; } | ||
327 | |||
328 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | ||
329 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
330 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; } | ||
331 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
332 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
333 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } | ||
334 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | ||
335 | |||
336 | /* | ||
337 | * Conversion functions: convert a page and protection to a page entry, | ||
338 | * and a page entry and page directory to the page they refer to. | ||
339 | */ | ||
340 | #define __mk_pte(addr,pgprot) \ | ||
341 | ({ \ | ||
342 | pte_t __pte; \ | ||
343 | \ | ||
344 | pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \ | ||
345 | \ | ||
346 | __pte; \ | ||
347 | }) | ||
348 | |||
349 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
350 | |||
351 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | ||
352 | { | ||
353 | pte_t pte; | ||
354 | pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); | ||
355 | return pte; | ||
356 | } | ||
357 | |||
358 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
359 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } | ||
360 | |||
361 | /* Permanent address of a page. On parisc we don't have highmem. */ | ||
362 | |||
363 | #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) | ||
364 | |||
365 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | ||
366 | |||
367 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd))) | ||
368 | |||
369 | #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) | ||
370 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) | ||
371 | |||
372 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | ||
373 | |||
374 | /* to find an entry in a page-table-directory */ | ||
375 | #define pgd_offset(mm, address) \ | ||
376 | ((mm)->pgd + ((address) >> PGDIR_SHIFT)) | ||
377 | |||
378 | /* to find an entry in a kernel page-table-directory */ | ||
379 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
380 | |||
381 | /* Find an entry in the second-level page table.. */ | ||
382 | |||
383 | #if PT_NLEVELS == 3 | ||
384 | #define pmd_offset(dir,address) \ | ||
385 | ((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) | ||
386 | #else | ||
387 | #define pmd_offset(dir,addr) ((pmd_t *) dir) | ||
388 | #endif | ||
389 | |||
390 | /* Find an entry in the third-level page table.. */ | ||
391 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | ||
392 | #define pte_offset_kernel(pmd, address) \ | ||
393 | ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) | ||
394 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | ||
395 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | ||
396 | #define pte_unmap(pte) do { } while (0) | ||
397 | #define pte_unmap_nested(pte) do { } while (0) | ||
398 | |||
399 | #define pte_unmap(pte) do { } while (0) | ||
400 | #define pte_unmap_nested(pte) do { } while (0) | ||
401 | |||
402 | extern void paging_init (void); | ||
403 | |||
404 | /* Used for deferring calls to flush_dcache_page() */ | ||
405 | |||
406 | #define PG_dcache_dirty PG_arch_1 | ||
407 | |||
408 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
409 | |||
410 | /* Encode and de-code a swap entry */ | ||
411 | |||
412 | #define __swp_type(x) ((x).val & 0x1f) | ||
413 | #define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ | ||
414 | (((x).val >> 8) & ~0x7) ) | ||
415 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ | ||
416 | ((offset & 0x7) << 6) | \ | ||
417 | ((offset & ~0x7) << 8) }) | ||
418 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
419 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
420 | |||
421 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
422 | { | ||
423 | #ifdef CONFIG_SMP | ||
424 | if (!pte_young(*ptep)) | ||
425 | return 0; | ||
426 | return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); | ||
427 | #else | ||
428 | pte_t pte = *ptep; | ||
429 | if (!pte_young(pte)) | ||
430 | return 0; | ||
431 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); | ||
432 | return 1; | ||
433 | #endif | ||
434 | } | ||
435 | |||
436 | extern spinlock_t pa_dbit_lock; | ||
437 | |||
438 | struct mm_struct; | ||
439 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
440 | { | ||
441 | pte_t old_pte; | ||
442 | pte_t pte; | ||
443 | |||
444 | spin_lock(&pa_dbit_lock); | ||
445 | pte = old_pte = *ptep; | ||
446 | pte_val(pte) &= ~_PAGE_PRESENT; | ||
447 | pte_val(pte) |= _PAGE_FLUSH; | ||
448 | set_pte_at(mm,addr,ptep,pte); | ||
449 | spin_unlock(&pa_dbit_lock); | ||
450 | |||
451 | return old_pte; | ||
452 | } | ||
453 | |||
454 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
455 | { | ||
456 | #ifdef CONFIG_SMP | ||
457 | unsigned long new, old; | ||
458 | |||
459 | do { | ||
460 | old = pte_val(*ptep); | ||
461 | new = pte_val(pte_wrprotect(__pte (old))); | ||
462 | } while (cmpxchg((unsigned long *) ptep, old, new) != old); | ||
463 | #else | ||
464 | pte_t old_pte = *ptep; | ||
465 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | ||
466 | #endif | ||
467 | } | ||
468 | |||
469 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) | ||
470 | |||
471 | #endif /* !__ASSEMBLY__ */ | ||
472 | |||
473 | |||
474 | /* TLB page size encoding - see table 3-1 in parisc20.pdf */ | ||
475 | #define _PAGE_SIZE_ENCODING_4K 0 | ||
476 | #define _PAGE_SIZE_ENCODING_16K 1 | ||
477 | #define _PAGE_SIZE_ENCODING_64K 2 | ||
478 | #define _PAGE_SIZE_ENCODING_256K 3 | ||
479 | #define _PAGE_SIZE_ENCODING_1M 4 | ||
480 | #define _PAGE_SIZE_ENCODING_4M 5 | ||
481 | #define _PAGE_SIZE_ENCODING_16M 6 | ||
482 | #define _PAGE_SIZE_ENCODING_64M 7 | ||
483 | |||
484 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
485 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K | ||
486 | #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) | ||
487 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K | ||
488 | #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) | ||
489 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K | ||
490 | #endif | ||
491 | |||
492 | |||
493 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
494 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
495 | |||
496 | #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) | ||
497 | |||
498 | /* We provide our own get_unmapped_area to provide cache coherency */ | ||
499 | |||
500 | #define HAVE_ARCH_UNMAPPED_AREA | ||
501 | |||
502 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
503 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
504 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
505 | #define __HAVE_ARCH_PTE_SAME | ||
506 | #include <asm-generic/pgtable.h> | ||
507 | |||
508 | #endif /* _PARISC_PGTABLE_H */ | ||
diff --git a/arch/parisc/include/asm/poll.h b/arch/parisc/include/asm/poll.h new file mode 100644 index 000000000000..c98509d3149e --- /dev/null +++ b/arch/parisc/include/asm/poll.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/poll.h> | |||
diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h new file mode 100644 index 000000000000..bb725a6630bb --- /dev/null +++ b/arch/parisc/include/asm/posix_types.h | |||
@@ -0,0 +1,129 @@ | |||
1 | #ifndef __ARCH_PARISC_POSIX_TYPES_H | ||
2 | #define __ARCH_PARISC_POSIX_TYPES_H | ||
3 | |||
4 | /* | ||
5 | * This file is generally used by user-level software, so you need to | ||
6 | * be a little careful about namespace pollution etc. Also, we cannot | ||
7 | * assume GCC is being used. | ||
8 | */ | ||
9 | typedef unsigned long __kernel_ino_t; | ||
10 | typedef unsigned short __kernel_mode_t; | ||
11 | typedef unsigned short __kernel_nlink_t; | ||
12 | typedef long __kernel_off_t; | ||
13 | typedef int __kernel_pid_t; | ||
14 | typedef unsigned short __kernel_ipc_pid_t; | ||
15 | typedef unsigned int __kernel_uid_t; | ||
16 | typedef unsigned int __kernel_gid_t; | ||
17 | typedef int __kernel_suseconds_t; | ||
18 | typedef long __kernel_clock_t; | ||
19 | typedef int __kernel_timer_t; | ||
20 | typedef int __kernel_clockid_t; | ||
21 | typedef int __kernel_daddr_t; | ||
22 | /* Note these change from narrow to wide kernels */ | ||
23 | #ifdef CONFIG_64BIT | ||
24 | typedef unsigned long __kernel_size_t; | ||
25 | typedef long __kernel_ssize_t; | ||
26 | typedef long __kernel_ptrdiff_t; | ||
27 | typedef long __kernel_time_t; | ||
28 | #else | ||
29 | typedef unsigned int __kernel_size_t; | ||
30 | typedef int __kernel_ssize_t; | ||
31 | typedef int __kernel_ptrdiff_t; | ||
32 | typedef long __kernel_time_t; | ||
33 | #endif | ||
34 | typedef char * __kernel_caddr_t; | ||
35 | |||
36 | typedef unsigned short __kernel_uid16_t; | ||
37 | typedef unsigned short __kernel_gid16_t; | ||
38 | typedef unsigned int __kernel_uid32_t; | ||
39 | typedef unsigned int __kernel_gid32_t; | ||
40 | |||
41 | #ifdef __GNUC__ | ||
42 | typedef long long __kernel_loff_t; | ||
43 | typedef long long __kernel_off64_t; | ||
44 | typedef unsigned long long __kernel_ino64_t; | ||
45 | #endif | ||
46 | |||
47 | typedef unsigned int __kernel_old_dev_t; | ||
48 | |||
49 | typedef struct { | ||
50 | int val[2]; | ||
51 | } __kernel_fsid_t; | ||
52 | |||
53 | /* compatibility stuff */ | ||
54 | typedef __kernel_uid_t __kernel_old_uid_t; | ||
55 | typedef __kernel_gid_t __kernel_old_gid_t; | ||
56 | |||
57 | #if defined(__KERNEL__) | ||
58 | |||
59 | #undef __FD_SET | ||
60 | static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) | ||
61 | { | ||
62 | unsigned long __tmp = __fd / __NFDBITS; | ||
63 | unsigned long __rem = __fd % __NFDBITS; | ||
64 | __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); | ||
65 | } | ||
66 | |||
67 | #undef __FD_CLR | ||
68 | static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) | ||
69 | { | ||
70 | unsigned long __tmp = __fd / __NFDBITS; | ||
71 | unsigned long __rem = __fd % __NFDBITS; | ||
72 | __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); | ||
73 | } | ||
74 | |||
75 | #undef __FD_ISSET | ||
76 | static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) | ||
77 | { | ||
78 | unsigned long __tmp = __fd / __NFDBITS; | ||
79 | unsigned long __rem = __fd % __NFDBITS; | ||
80 | return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * This will unroll the loop for the normal constant case (8 ints, | ||
85 | * for a 256-bit fd_set) | ||
86 | */ | ||
87 | #undef __FD_ZERO | ||
88 | static __inline__ void __FD_ZERO(__kernel_fd_set *__p) | ||
89 | { | ||
90 | unsigned long *__tmp = __p->fds_bits; | ||
91 | int __i; | ||
92 | |||
93 | if (__builtin_constant_p(__FDSET_LONGS)) { | ||
94 | switch (__FDSET_LONGS) { | ||
95 | case 16: | ||
96 | __tmp[ 0] = 0; __tmp[ 1] = 0; | ||
97 | __tmp[ 2] = 0; __tmp[ 3] = 0; | ||
98 | __tmp[ 4] = 0; __tmp[ 5] = 0; | ||
99 | __tmp[ 6] = 0; __tmp[ 7] = 0; | ||
100 | __tmp[ 8] = 0; __tmp[ 9] = 0; | ||
101 | __tmp[10] = 0; __tmp[11] = 0; | ||
102 | __tmp[12] = 0; __tmp[13] = 0; | ||
103 | __tmp[14] = 0; __tmp[15] = 0; | ||
104 | return; | ||
105 | |||
106 | case 8: | ||
107 | __tmp[ 0] = 0; __tmp[ 1] = 0; | ||
108 | __tmp[ 2] = 0; __tmp[ 3] = 0; | ||
109 | __tmp[ 4] = 0; __tmp[ 5] = 0; | ||
110 | __tmp[ 6] = 0; __tmp[ 7] = 0; | ||
111 | return; | ||
112 | |||
113 | case 4: | ||
114 | __tmp[ 0] = 0; __tmp[ 1] = 0; | ||
115 | __tmp[ 2] = 0; __tmp[ 3] = 0; | ||
116 | return; | ||
117 | } | ||
118 | } | ||
119 | __i = __FDSET_LONGS; | ||
120 | while (__i) { | ||
121 | __i--; | ||
122 | *__tmp = 0; | ||
123 | __tmp++; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | #endif /* defined(__KERNEL__) */ | ||
128 | |||
129 | #endif | ||
diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h new file mode 100644 index 000000000000..c5edc60c059f --- /dev/null +++ b/arch/parisc/include/asm/prefetch.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/prefetch.h | ||
3 | * | ||
4 | * PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book. | ||
5 | * In addition, many implementations do hardware prefetching of both | ||
6 | * instructions and data. | ||
7 | * | ||
8 | * PA7300LC (page 14-4 of the ERS) also implements prefetching by a load | ||
9 | * to gr0 but not in a way that Linux can use. If the load would cause an | ||
10 | * interruption (eg due to prefetching 0), it is suppressed on PA2.0 | ||
11 | * processors, but not on 7300LC. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef __ASM_PARISC_PREFETCH_H | ||
16 | #define __ASM_PARISC_PREFETCH_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | #ifdef CONFIG_PREFETCH | ||
20 | |||
21 | #define ARCH_HAS_PREFETCH | ||
22 | static inline void prefetch(const void *addr) | ||
23 | { | ||
24 | __asm__("ldw 0(%0), %%r0" : : "r" (addr)); | ||
25 | } | ||
26 | |||
27 | /* LDD is a PA2.0 addition. */ | ||
28 | #ifdef CONFIG_PA20 | ||
29 | #define ARCH_HAS_PREFETCHW | ||
30 | static inline void prefetchw(const void *addr) | ||
31 | { | ||
32 | __asm__("ldd 0(%0), %%r0" : : "r" (addr)); | ||
33 | } | ||
34 | #endif /* CONFIG_PA20 */ | ||
35 | |||
36 | #endif /* CONFIG_PREFETCH */ | ||
37 | #endif /* __ASSEMBLY__ */ | ||
38 | |||
39 | #endif /* __ASM_PARISC_PROCESSOR_H */ | ||
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h new file mode 100644 index 000000000000..3c9d34844c83 --- /dev/null +++ b/arch/parisc/include/asm/processor.h | |||
@@ -0,0 +1,357 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/processor.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | * Copyright (C) 2001 Grant Grundler | ||
6 | */ | ||
7 | |||
8 | #ifndef __ASM_PARISC_PROCESSOR_H | ||
9 | #define __ASM_PARISC_PROCESSOR_H | ||
10 | |||
11 | #ifndef __ASSEMBLY__ | ||
12 | #include <linux/threads.h> | ||
13 | |||
14 | #include <asm/prefetch.h> | ||
15 | #include <asm/hardware.h> | ||
16 | #include <asm/pdc.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/types.h> | ||
19 | #include <asm/system.h> | ||
20 | #endif /* __ASSEMBLY__ */ | ||
21 | |||
22 | #define KERNEL_STACK_SIZE (4*PAGE_SIZE) | ||
23 | |||
24 | /* | ||
25 | * Default implementation of macro that returns current | ||
26 | * instruction pointer ("program counter"). | ||
27 | */ | ||
28 | #ifdef CONFIG_PA20 | ||
29 | #define current_ia(x) __asm__("mfia %0" : "=r"(x)) | ||
30 | #else /* mfia added in pa2.0 */ | ||
31 | #define current_ia(x) __asm__("blr 0,%0\n\tnop" : "=r"(x)) | ||
32 | #endif | ||
33 | #define current_text_addr() ({ void *pc; current_ia(pc); pc; }) | ||
34 | |||
35 | #define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size) | ||
36 | #define TASK_SIZE TASK_SIZE_OF(current) | ||
37 | #define TASK_UNMAPPED_BASE (current->thread.map_base) | ||
38 | |||
39 | #define DEFAULT_TASK_SIZE32 (0xFFF00000UL) | ||
40 | #define DEFAULT_MAP_BASE32 (0x40000000UL) | ||
41 | |||
42 | #ifdef CONFIG_64BIT | ||
43 | #define DEFAULT_TASK_SIZE (MAX_ADDRESS-0xf000000) | ||
44 | #define DEFAULT_MAP_BASE (0x200000000UL) | ||
45 | #else | ||
46 | #define DEFAULT_TASK_SIZE DEFAULT_TASK_SIZE32 | ||
47 | #define DEFAULT_MAP_BASE DEFAULT_MAP_BASE32 | ||
48 | #endif | ||
49 | |||
50 | #ifdef __KERNEL__ | ||
51 | |||
52 | /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc. | ||
53 | * prumpf */ | ||
54 | |||
55 | #define STACK_TOP TASK_SIZE | ||
56 | #define STACK_TOP_MAX DEFAULT_TASK_SIZE | ||
57 | |||
58 | #endif | ||
59 | |||
60 | #ifndef __ASSEMBLY__ | ||
61 | |||
62 | /* | ||
63 | * Data detected about CPUs at boot time which is the same for all CPU's. | ||
64 | * HP boxes are SMP - ie identical processors. | ||
65 | * | ||
66 | * FIXME: some CPU rev info may be processor specific... | ||
67 | */ | ||
68 | struct system_cpuinfo_parisc { | ||
69 | unsigned int cpu_count; | ||
70 | unsigned int cpu_hz; | ||
71 | unsigned int hversion; | ||
72 | unsigned int sversion; | ||
73 | enum cpu_type cpu_type; | ||
74 | |||
75 | struct { | ||
76 | struct pdc_model model; | ||
77 | unsigned long versions; | ||
78 | unsigned long cpuid; | ||
79 | unsigned long capabilities; | ||
80 | char sys_model_name[81]; /* PDC-ROM returnes this model name */ | ||
81 | } pdc; | ||
82 | |||
83 | const char *cpu_name; /* e.g. "PA7300LC (PCX-L2)" */ | ||
84 | const char *family_name; /* e.g. "1.1e" */ | ||
85 | }; | ||
86 | |||
87 | |||
88 | /* Per CPU data structure - ie varies per CPU. */ | ||
89 | struct cpuinfo_parisc { | ||
90 | unsigned long it_value; /* Interval Timer at last timer Intr */ | ||
91 | unsigned long it_delta; /* Interval delta (tic_10ms / HZ * 100) */ | ||
92 | unsigned long irq_count; /* number of IRQ's since boot */ | ||
93 | unsigned long irq_max_cr16; /* longest time to handle a single IRQ */ | ||
94 | unsigned long cpuid; /* aka slot_number or set to NO_PROC_ID */ | ||
95 | unsigned long hpa; /* Host Physical address */ | ||
96 | unsigned long txn_addr; /* MMIO addr of EIR or id_eid */ | ||
97 | #ifdef CONFIG_SMP | ||
98 | unsigned long pending_ipi; /* bitmap of type ipi_message_type */ | ||
99 | unsigned long ipi_count; /* number ipi Interrupts */ | ||
100 | #endif | ||
101 | unsigned long bh_count; /* number of times bh was invoked */ | ||
102 | unsigned long prof_counter; /* per CPU profiling support */ | ||
103 | unsigned long prof_multiplier; /* per CPU profiling support */ | ||
104 | unsigned long fp_rev; | ||
105 | unsigned long fp_model; | ||
106 | unsigned int state; | ||
107 | struct parisc_device *dev; | ||
108 | unsigned long loops_per_jiffy; | ||
109 | }; | ||
110 | |||
111 | extern struct system_cpuinfo_parisc boot_cpu_data; | ||
112 | extern struct cpuinfo_parisc cpu_data[NR_CPUS]; | ||
113 | #define current_cpu_data cpu_data[smp_processor_id()] | ||
114 | |||
115 | #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) | ||
116 | |||
117 | typedef struct { | ||
118 | int seg; | ||
119 | } mm_segment_t; | ||
120 | |||
121 | #define ARCH_MIN_TASKALIGN 8 | ||
122 | |||
123 | struct thread_struct { | ||
124 | struct pt_regs regs; | ||
125 | unsigned long task_size; | ||
126 | unsigned long map_base; | ||
127 | unsigned long flags; | ||
128 | }; | ||
129 | |||
130 | /* Thread struct flags. */ | ||
131 | #define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */ | ||
132 | #define PARISC_UAC_SIGBUS (1UL << 1) | ||
133 | #define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */ | ||
134 | |||
135 | #define PARISC_UAC_SHIFT 0 | ||
136 | #define PARISC_UAC_MASK (PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS) | ||
137 | |||
138 | #define SET_UNALIGN_CTL(task,value) \ | ||
139 | ({ \ | ||
140 | (task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \ | ||
141 | | (((value) << PARISC_UAC_SHIFT) & \ | ||
142 | PARISC_UAC_MASK)); \ | ||
143 | 0; \ | ||
144 | }) | ||
145 | |||
146 | #define GET_UNALIGN_CTL(task,addr) \ | ||
147 | ({ \ | ||
148 | put_user(((task)->thread.flags & PARISC_UAC_MASK) \ | ||
149 | >> PARISC_UAC_SHIFT, (int __user *) (addr)); \ | ||
150 | }) | ||
151 | |||
152 | #define INIT_THREAD { \ | ||
153 | .regs = { .gr = { 0, }, \ | ||
154 | .fr = { 0, }, \ | ||
155 | .sr = { 0, }, \ | ||
156 | .iasq = { 0, }, \ | ||
157 | .iaoq = { 0, }, \ | ||
158 | .cr27 = 0, \ | ||
159 | }, \ | ||
160 | .task_size = DEFAULT_TASK_SIZE, \ | ||
161 | .map_base = DEFAULT_MAP_BASE, \ | ||
162 | .flags = 0 \ | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Return saved PC of a blocked thread. This is used by ps mostly. | ||
167 | */ | ||
168 | |||
169 | unsigned long thread_saved_pc(struct task_struct *t); | ||
170 | void show_trace(struct task_struct *task, unsigned long *stack); | ||
171 | |||
172 | /* | ||
173 | * Start user thread in another space. | ||
174 | * | ||
175 | * Note that we set both the iaoq and r31 to the new pc. When | ||
176 | * the kernel initially calls execve it will return through an | ||
177 | * rfi path that will use the values in the iaoq. The execve | ||
178 | * syscall path will return through the gateway page, and | ||
179 | * that uses r31 to branch to. | ||
180 | * | ||
181 | * For ELF we clear r23, because the dynamic linker uses it to pass | ||
182 | * the address of the finalizer function. | ||
183 | * | ||
184 | * We also initialize sr3 to an illegal value (illegal for our | ||
185 | * implementation, not for the architecture). | ||
186 | */ | ||
187 | typedef unsigned int elf_caddr_t; | ||
188 | |||
189 | #define start_thread_som(regs, new_pc, new_sp) do { \ | ||
190 | unsigned long *sp = (unsigned long *)new_sp; \ | ||
191 | __u32 spaceid = (__u32)current->mm->context; \ | ||
192 | unsigned long pc = (unsigned long)new_pc; \ | ||
193 | /* offset pc for priv. level */ \ | ||
194 | pc |= 3; \ | ||
195 | \ | ||
196 | set_fs(USER_DS); \ | ||
197 | regs->iasq[0] = spaceid; \ | ||
198 | regs->iasq[1] = spaceid; \ | ||
199 | regs->iaoq[0] = pc; \ | ||
200 | regs->iaoq[1] = pc + 4; \ | ||
201 | regs->sr[2] = LINUX_GATEWAY_SPACE; \ | ||
202 | regs->sr[3] = 0xffff; \ | ||
203 | regs->sr[4] = spaceid; \ | ||
204 | regs->sr[5] = spaceid; \ | ||
205 | regs->sr[6] = spaceid; \ | ||
206 | regs->sr[7] = spaceid; \ | ||
207 | regs->gr[ 0] = USER_PSW; \ | ||
208 | regs->gr[30] = ((new_sp)+63)&~63; \ | ||
209 | regs->gr[31] = pc; \ | ||
210 | \ | ||
211 | get_user(regs->gr[26],&sp[0]); \ | ||
212 | get_user(regs->gr[25],&sp[-1]); \ | ||
213 | get_user(regs->gr[24],&sp[-2]); \ | ||
214 | get_user(regs->gr[23],&sp[-3]); \ | ||
215 | } while(0) | ||
216 | |||
217 | /* The ELF abi wants things done a "wee bit" differently than | ||
218 | * som does. Supporting this behavior here avoids | ||
219 | * having our own version of create_elf_tables. | ||
220 | * | ||
221 | * Oh, and yes, that is not a typo, we are really passing argc in r25 | ||
222 | * and argv in r24 (rather than r26 and r25). This is because that's | ||
223 | * where __libc_start_main wants them. | ||
224 | * | ||
225 | * Duplicated from dl-machine.h for the benefit of readers: | ||
226 | * | ||
227 | * Our initial stack layout is rather different from everyone else's | ||
228 | * due to the unique PA-RISC ABI. As far as I know it looks like | ||
229 | * this: | ||
230 | |||
231 | ----------------------------------- (user startup code creates this frame) | ||
232 | | 32 bytes of magic | | ||
233 | |---------------------------------| | ||
234 | | 32 bytes argument/sp save area | | ||
235 | |---------------------------------| (bprm->p) | ||
236 | | ELF auxiliary info | | ||
237 | | (up to 28 words) | | ||
238 | |---------------------------------| | ||
239 | | NULL | | ||
240 | |---------------------------------| | ||
241 | | Environment pointers | | ||
242 | |---------------------------------| | ||
243 | | NULL | | ||
244 | |---------------------------------| | ||
245 | | Argument pointers | | ||
246 | |---------------------------------| <- argv | ||
247 | | argc (1 word) | | ||
248 | |---------------------------------| <- bprm->exec (HACK!) | ||
249 | | N bytes of slack | | ||
250 | |---------------------------------| | ||
251 | | filename passed to execve | | ||
252 | |---------------------------------| (mm->env_end) | ||
253 | | env strings | | ||
254 | |---------------------------------| (mm->env_start, mm->arg_end) | ||
255 | | arg strings | | ||
256 | |---------------------------------| | ||
257 | | additional faked arg strings if | | ||
258 | | we're invoked via binfmt_script | | ||
259 | |---------------------------------| (mm->arg_start) | ||
260 | stack base is at TASK_SIZE - rlim_max. | ||
261 | |||
262 | on downward growing arches, it looks like this: | ||
263 | stack base at TASK_SIZE | ||
264 | | filename passed to execve | ||
265 | | env strings | ||
266 | | arg strings | ||
267 | | faked arg strings | ||
268 | | slack | ||
269 | | ELF | ||
270 | | envps | ||
271 | | argvs | ||
272 | | argc | ||
273 | |||
274 | * The pleasant part of this is that if we need to skip arguments we | ||
275 | * can just decrement argc and move argv, because the stack pointer | ||
276 | * is utterly unrelated to the location of the environment and | ||
277 | * argument vectors. | ||
278 | * | ||
279 | * Note that the S/390 people took the easy way out and hacked their | ||
280 | * GCC to make the stack grow downwards. | ||
281 | * | ||
282 | * Final Note: For entry from syscall, the W (wide) bit of the PSW | ||
283 | * is stuffed into the lowest bit of the user sp (%r30), so we fill | ||
284 | * it in here from the current->personality | ||
285 | */ | ||
286 | |||
287 | #ifdef CONFIG_64BIT | ||
288 | #define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT)) | ||
289 | #else | ||
290 | #define USER_WIDE_MODE 0 | ||
291 | #endif | ||
292 | |||
293 | #define start_thread(regs, new_pc, new_sp) do { \ | ||
294 | elf_addr_t *sp = (elf_addr_t *)new_sp; \ | ||
295 | __u32 spaceid = (__u32)current->mm->context; \ | ||
296 | elf_addr_t pc = (elf_addr_t)new_pc | 3; \ | ||
297 | elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \ | ||
298 | \ | ||
299 | set_fs(USER_DS); \ | ||
300 | regs->iasq[0] = spaceid; \ | ||
301 | regs->iasq[1] = spaceid; \ | ||
302 | regs->iaoq[0] = pc; \ | ||
303 | regs->iaoq[1] = pc + 4; \ | ||
304 | regs->sr[2] = LINUX_GATEWAY_SPACE; \ | ||
305 | regs->sr[3] = 0xffff; \ | ||
306 | regs->sr[4] = spaceid; \ | ||
307 | regs->sr[5] = spaceid; \ | ||
308 | regs->sr[6] = spaceid; \ | ||
309 | regs->sr[7] = spaceid; \ | ||
310 | regs->gr[ 0] = USER_PSW | (USER_WIDE_MODE ? PSW_W : 0); \ | ||
311 | regs->fr[ 0] = 0LL; \ | ||
312 | regs->fr[ 1] = 0LL; \ | ||
313 | regs->fr[ 2] = 0LL; \ | ||
314 | regs->fr[ 3] = 0LL; \ | ||
315 | regs->gr[30] = (((unsigned long)sp + 63) &~ 63) | (USER_WIDE_MODE ? 1 : 0); \ | ||
316 | regs->gr[31] = pc; \ | ||
317 | \ | ||
318 | get_user(regs->gr[25], (argv - 1)); \ | ||
319 | regs->gr[24] = (long) argv; \ | ||
320 | regs->gr[23] = 0; \ | ||
321 | } while(0) | ||
322 | |||
323 | struct task_struct; | ||
324 | struct mm_struct; | ||
325 | |||
326 | /* Free all resources held by a thread. */ | ||
327 | extern void release_thread(struct task_struct *); | ||
328 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
329 | |||
330 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
331 | #define prepare_to_copy(tsk) do { } while (0) | ||
332 | |||
333 | extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm); | ||
334 | |||
335 | extern unsigned long get_wchan(struct task_struct *p); | ||
336 | |||
337 | #define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0]) | ||
338 | #define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30]) | ||
339 | |||
340 | #define cpu_relax() barrier() | ||
341 | |||
342 | /* Used as a macro to identify the combined VIPT/PIPT cached | ||
343 | * CPUs which require a guarantee of coherency (no inequivalent | ||
344 | * aliases with different data, whether clean or not) to operate */ | ||
345 | static inline int parisc_requires_coherency(void) | ||
346 | { | ||
347 | #ifdef CONFIG_PA8X00 | ||
348 | return (boot_cpu_data.cpu_type == mako) || | ||
349 | (boot_cpu_data.cpu_type == mako2); | ||
350 | #else | ||
351 | return 0; | ||
352 | #endif | ||
353 | } | ||
354 | |||
355 | #endif /* __ASSEMBLY__ */ | ||
356 | |||
357 | #endif /* __ASM_PARISC_PROCESSOR_H */ | ||
diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h new file mode 100644 index 000000000000..5a3e23c9ce63 --- /dev/null +++ b/arch/parisc/include/asm/psw.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef _PARISC_PSW_H | ||
2 | |||
3 | |||
4 | #define PSW_I 0x00000001 | ||
5 | #define PSW_D 0x00000002 | ||
6 | #define PSW_P 0x00000004 | ||
7 | #define PSW_Q 0x00000008 | ||
8 | |||
9 | #define PSW_R 0x00000010 | ||
10 | #define PSW_F 0x00000020 | ||
11 | #define PSW_G 0x00000040 /* PA1.x only */ | ||
12 | #define PSW_O 0x00000080 /* PA2.0 only */ | ||
13 | |||
14 | /* ssm/rsm instructions number PSW_W and PSW_E differently */ | ||
15 | #define PSW_SM_I PSW_I /* Enable External Interrupts */ | ||
16 | #define PSW_SM_D PSW_D | ||
17 | #define PSW_SM_P PSW_P | ||
18 | #define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */ | ||
19 | #define PSW_SM_R PSW_R /* Enable Recover Counter Trap */ | ||
20 | #define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */ | ||
21 | |||
22 | #define PSW_SM_QUIET PSW_SM_R+PSW_SM_Q+PSW_SM_P+PSW_SM_D+PSW_SM_I | ||
23 | |||
24 | #define PSW_CB 0x0000ff00 | ||
25 | |||
26 | #define PSW_M 0x00010000 | ||
27 | #define PSW_V 0x00020000 | ||
28 | #define PSW_C 0x00040000 | ||
29 | #define PSW_B 0x00080000 | ||
30 | |||
31 | #define PSW_X 0x00100000 | ||
32 | #define PSW_N 0x00200000 | ||
33 | #define PSW_L 0x00400000 | ||
34 | #define PSW_H 0x00800000 | ||
35 | |||
36 | #define PSW_T 0x01000000 | ||
37 | #define PSW_S 0x02000000 | ||
38 | #define PSW_E 0x04000000 | ||
39 | #define PSW_W 0x08000000 /* PA2.0 only */ | ||
40 | #define PSW_W_BIT 36 /* PA2.0 only */ | ||
41 | |||
42 | #define PSW_Z 0x40000000 /* PA1.x only */ | ||
43 | #define PSW_Y 0x80000000 /* PA1.x only */ | ||
44 | |||
45 | #ifdef CONFIG_64BIT | ||
46 | # define PSW_HI_CB 0x000000ff /* PA2.0 only */ | ||
47 | #endif | ||
48 | |||
49 | #ifdef CONFIG_64BIT | ||
50 | # define USER_PSW_HI_MASK PSW_HI_CB | ||
51 | # define WIDE_PSW PSW_W | ||
52 | #else | ||
53 | # define WIDE_PSW 0 | ||
54 | #endif | ||
55 | |||
56 | /* Used when setting up for rfi */ | ||
57 | #define KERNEL_PSW (WIDE_PSW | PSW_C | PSW_Q | PSW_P | PSW_D) | ||
58 | #define REAL_MODE_PSW (WIDE_PSW | PSW_Q) | ||
59 | #define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) | ||
60 | #define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) | ||
61 | |||
62 | #endif | ||
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h new file mode 100644 index 000000000000..afa5333187b4 --- /dev/null +++ b/arch/parisc/include/asm/ptrace.h | |||
@@ -0,0 +1,68 @@ | |||
1 | #ifndef _PARISC_PTRACE_H | ||
2 | #define _PARISC_PTRACE_H | ||
3 | |||
4 | /* written by Philipp Rumpf, Copyright (C) 1999 SuSE GmbH Nuernberg | ||
5 | ** Copyright (C) 2000 Grant Grundler, Hewlett-Packard | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | |||
10 | /* This struct defines the way the registers are stored on the | ||
11 | * stack during a system call. | ||
12 | * | ||
13 | * N.B. gdb/strace care about the size and offsets within this | ||
14 | * structure. If you change things, you may break object compatibility | ||
15 | * for those applications. | ||
16 | */ | ||
17 | |||
18 | struct pt_regs { | ||
19 | unsigned long gr[32]; /* PSW is in gr[0] */ | ||
20 | __u64 fr[32]; | ||
21 | unsigned long sr[ 8]; | ||
22 | unsigned long iasq[2]; | ||
23 | unsigned long iaoq[2]; | ||
24 | unsigned long cr27; | ||
25 | unsigned long pad0; /* available for other uses */ | ||
26 | unsigned long orig_r28; | ||
27 | unsigned long ksp; | ||
28 | unsigned long kpc; | ||
29 | unsigned long sar; /* CR11 */ | ||
30 | unsigned long iir; /* CR19 */ | ||
31 | unsigned long isr; /* CR20 */ | ||
32 | unsigned long ior; /* CR21 */ | ||
33 | unsigned long ipsw; /* CR22 */ | ||
34 | }; | ||
35 | |||
36 | /* | ||
37 | * The numbers chosen here are somewhat arbitrary but absolutely MUST | ||
38 | * not overlap with any of the number assigned in <linux/ptrace.h>. | ||
39 | * | ||
40 | * These ones are taken from IA-64 on the assumption that theirs are | ||
41 | * the most correct (and we also want to support PTRACE_SINGLEBLOCK | ||
42 | * since we have taken branch traps too) | ||
43 | */ | ||
44 | #define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */ | ||
45 | |||
46 | #ifdef __KERNEL__ | ||
47 | |||
48 | #define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS)) | ||
49 | |||
50 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | ||
51 | |||
52 | struct task_struct; | ||
53 | #define arch_has_single_step() 1 | ||
54 | void user_disable_single_step(struct task_struct *task); | ||
55 | void user_enable_single_step(struct task_struct *task); | ||
56 | |||
57 | #define arch_has_block_step() 1 | ||
58 | void user_enable_block_step(struct task_struct *task); | ||
59 | |||
60 | /* XXX should we use iaoq[1] or iaoq[0] ? */ | ||
61 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) | ||
62 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) | ||
63 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) | ||
64 | unsigned long profile_pc(struct pt_regs *); | ||
65 | extern void show_regs(struct pt_regs *); | ||
66 | #endif | ||
67 | |||
68 | #endif | ||
diff --git a/arch/parisc/include/asm/real.h b/arch/parisc/include/asm/real.h new file mode 100644 index 000000000000..82acb25db395 --- /dev/null +++ b/arch/parisc/include/asm/real.h | |||
@@ -0,0 +1,5 @@ | |||
1 | #ifndef _PARISC_REAL_H | ||
2 | #define _PARISC_REAL_H | ||
3 | |||
4 | |||
5 | #endif | ||
diff --git a/arch/parisc/include/asm/resource.h b/arch/parisc/include/asm/resource.h new file mode 100644 index 000000000000..8b06343b62ed --- /dev/null +++ b/arch/parisc/include/asm/resource.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _ASM_PARISC_RESOURCE_H | ||
2 | #define _ASM_PARISC_RESOURCE_H | ||
3 | |||
4 | #define _STK_LIM_MAX 10 * _STK_LIM | ||
5 | #include <asm-generic/resource.h> | ||
6 | |||
7 | #endif | ||
diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h new file mode 100644 index 000000000000..09f51d5ab57c --- /dev/null +++ b/arch/parisc/include/asm/ropes.h | |||
@@ -0,0 +1,322 @@ | |||
1 | #ifndef _ASM_PARISC_ROPES_H_ | ||
2 | #define _ASM_PARISC_ROPES_H_ | ||
3 | |||
4 | #include <asm/parisc-device.h> | ||
5 | |||
6 | #ifdef CONFIG_64BIT | ||
7 | /* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */ | ||
8 | #define ZX1_SUPPORT | ||
9 | #endif | ||
10 | |||
11 | #ifdef CONFIG_PROC_FS | ||
12 | /* depends on proc fs support. But costs CPU performance */ | ||
13 | #undef SBA_COLLECT_STATS | ||
14 | #endif | ||
15 | |||
16 | /* | ||
17 | ** The number of pdir entries to "free" before issuing | ||
18 | ** a read to PCOM register to flush out PCOM writes. | ||
19 | ** Interacts with allocation granularity (ie 4 or 8 entries | ||
20 | ** allocated and free'd/purged at a time might make this | ||
21 | ** less interesting). | ||
22 | */ | ||
23 | #define DELAYED_RESOURCE_CNT 16 | ||
24 | |||
25 | #define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */ | ||
26 | #define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */ | ||
27 | |||
28 | struct ioc { | ||
29 | void __iomem *ioc_hpa; /* I/O MMU base address */ | ||
30 | char *res_map; /* resource map, bit == pdir entry */ | ||
31 | u64 *pdir_base; /* physical base address */ | ||
32 | unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */ | ||
33 | unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */ | ||
34 | #ifdef ZX1_SUPPORT | ||
35 | unsigned long iovp_mask; /* help convert IOVA to IOVP */ | ||
36 | #endif | ||
37 | unsigned long *res_hint; /* next avail IOVP - circular search */ | ||
38 | spinlock_t res_lock; | ||
39 | unsigned int res_bitshift; /* from the LEFT! */ | ||
40 | unsigned int res_size; /* size of resource map in bytes */ | ||
41 | #ifdef SBA_HINT_SUPPORT | ||
42 | /* FIXME : DMA HINTs not used */ | ||
43 | unsigned long hint_mask_pdir; /* bits used for DMA hints */ | ||
44 | unsigned int hint_shift_pdir; | ||
45 | #endif | ||
46 | #if DELAYED_RESOURCE_CNT > 0 | ||
47 | int saved_cnt; | ||
48 | struct sba_dma_pair { | ||
49 | dma_addr_t iova; | ||
50 | size_t size; | ||
51 | } saved[DELAYED_RESOURCE_CNT]; | ||
52 | #endif | ||
53 | |||
54 | #ifdef SBA_COLLECT_STATS | ||
55 | #define SBA_SEARCH_SAMPLE 0x100 | ||
56 | unsigned long avg_search[SBA_SEARCH_SAMPLE]; | ||
57 | unsigned long avg_idx; /* current index into avg_search */ | ||
58 | unsigned long used_pages; | ||
59 | unsigned long msingle_calls; | ||
60 | unsigned long msingle_pages; | ||
61 | unsigned long msg_calls; | ||
62 | unsigned long msg_pages; | ||
63 | unsigned long usingle_calls; | ||
64 | unsigned long usingle_pages; | ||
65 | unsigned long usg_calls; | ||
66 | unsigned long usg_pages; | ||
67 | #endif | ||
68 | /* STUFF We don't need in performance path */ | ||
69 | unsigned int pdir_size; /* in bytes, determined by IOV Space size */ | ||
70 | }; | ||
71 | |||
72 | struct sba_device { | ||
73 | struct sba_device *next; /* list of SBA's in system */ | ||
74 | struct parisc_device *dev; /* dev found in bus walk */ | ||
75 | const char *name; | ||
76 | void __iomem *sba_hpa; /* base address */ | ||
77 | spinlock_t sba_lock; | ||
78 | unsigned int flags; /* state/functionality enabled */ | ||
79 | unsigned int hw_rev; /* HW revision of chip */ | ||
80 | |||
81 | struct resource chip_resv; /* MMIO reserved for chip */ | ||
82 | struct resource iommu_resv; /* MMIO reserved for iommu */ | ||
83 | |||
84 | unsigned int num_ioc; /* number of on-board IOC's */ | ||
85 | struct ioc ioc[MAX_IOC]; | ||
86 | }; | ||
87 | |||
88 | #define ASTRO_RUNWAY_PORT 0x582 | ||
89 | #define IKE_MERCED_PORT 0x803 | ||
90 | #define REO_MERCED_PORT 0x804 | ||
91 | #define REOG_MERCED_PORT 0x805 | ||
92 | #define PLUTO_MCKINLEY_PORT 0x880 | ||
93 | |||
94 | static inline int IS_ASTRO(struct parisc_device *d) { | ||
95 | return d->id.hversion == ASTRO_RUNWAY_PORT; | ||
96 | } | ||
97 | |||
98 | static inline int IS_IKE(struct parisc_device *d) { | ||
99 | return d->id.hversion == IKE_MERCED_PORT; | ||
100 | } | ||
101 | |||
102 | static inline int IS_PLUTO(struct parisc_device *d) { | ||
103 | return d->id.hversion == PLUTO_MCKINLEY_PORT; | ||
104 | } | ||
105 | |||
106 | #define PLUTO_IOVA_BASE (1UL*1024*1024*1024) /* 1GB */ | ||
107 | #define PLUTO_IOVA_SIZE (1UL*1024*1024*1024) /* 1GB */ | ||
108 | #define PLUTO_GART_SIZE (PLUTO_IOVA_SIZE / 2) | ||
109 | |||
110 | #define SBA_PDIR_VALID_BIT 0x8000000000000000ULL | ||
111 | |||
112 | #define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL | ||
113 | |||
114 | #define SBA_FUNC_ID 0x0000 /* function id */ | ||
115 | #define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ | ||
116 | |||
117 | #define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */ | ||
118 | |||
119 | #define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE) | ||
120 | #define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE) | ||
121 | /* Ike's IOC's occupy functions 2 and 3 */ | ||
122 | #define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE) | ||
123 | |||
124 | #define IOC_CTRL 0x8 /* IOC_CTRL offset */ | ||
125 | #define IOC_CTRL_TC (1 << 0) /* TOC Enable */ | ||
126 | #define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */ | ||
127 | #define IOC_CTRL_DE (1 << 2) /* Dillon Enable */ | ||
128 | #define IOC_CTRL_RM (1 << 8) /* Real Mode */ | ||
129 | #define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */ | ||
130 | #define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */ | ||
131 | #define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */ | ||
132 | |||
133 | /* | ||
134 | ** Offsets into MBIB (Function 0 on Ike and hopefully Astro) | ||
135 | ** Firmware programs this stuff. Don't touch it. | ||
136 | */ | ||
137 | #define LMMIO_DIRECT0_BASE 0x300 | ||
138 | #define LMMIO_DIRECT0_MASK 0x308 | ||
139 | #define LMMIO_DIRECT0_ROUTE 0x310 | ||
140 | |||
141 | #define LMMIO_DIST_BASE 0x360 | ||
142 | #define LMMIO_DIST_MASK 0x368 | ||
143 | #define LMMIO_DIST_ROUTE 0x370 | ||
144 | |||
145 | #define IOS_DIST_BASE 0x390 | ||
146 | #define IOS_DIST_MASK 0x398 | ||
147 | #define IOS_DIST_ROUTE 0x3A0 | ||
148 | |||
149 | #define IOS_DIRECT_BASE 0x3C0 | ||
150 | #define IOS_DIRECT_MASK 0x3C8 | ||
151 | #define IOS_DIRECT_ROUTE 0x3D0 | ||
152 | |||
153 | /* | ||
154 | ** Offsets into I/O TLB (Function 2 and 3 on Ike) | ||
155 | */ | ||
156 | #define ROPE0_CTL 0x200 /* "regbus pci0" */ | ||
157 | #define ROPE1_CTL 0x208 | ||
158 | #define ROPE2_CTL 0x210 | ||
159 | #define ROPE3_CTL 0x218 | ||
160 | #define ROPE4_CTL 0x220 | ||
161 | #define ROPE5_CTL 0x228 | ||
162 | #define ROPE6_CTL 0x230 | ||
163 | #define ROPE7_CTL 0x238 | ||
164 | |||
165 | #define IOC_ROPE0_CFG 0x500 /* pluto only */ | ||
166 | #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */ | ||
167 | |||
168 | #define HF_ENABLE 0x40 | ||
169 | |||
170 | #define IOC_IBASE 0x300 /* IO TLB */ | ||
171 | #define IOC_IMASK 0x308 | ||
172 | #define IOC_PCOM 0x310 | ||
173 | #define IOC_TCNFG 0x318 | ||
174 | #define IOC_PDIR_BASE 0x320 | ||
175 | |||
176 | /* | ||
177 | ** IOC supports 4/8/16/64KB page sizes (see TCNFG register) | ||
178 | ** It's safer (avoid memory corruption) to keep DMA page mappings | ||
179 | ** equivalently sized to VM PAGE_SIZE. | ||
180 | ** | ||
181 | ** We really can't avoid generating a new mapping for each | ||
182 | ** page since the Virtual Coherence Index has to be generated | ||
183 | ** and updated for each page. | ||
184 | ** | ||
185 | ** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse. | ||
186 | */ | ||
187 | #define IOVP_SIZE PAGE_SIZE | ||
188 | #define IOVP_SHIFT PAGE_SHIFT | ||
189 | #define IOVP_MASK PAGE_MASK | ||
190 | |||
191 | #define SBA_PERF_CFG 0x708 /* Performance Counter stuff */ | ||
192 | #define SBA_PERF_MASK1 0x718 | ||
193 | #define SBA_PERF_MASK2 0x730 | ||
194 | |||
195 | /* | ||
196 | ** Offsets into PCI Performance Counters (functions 12 and 13) | ||
197 | ** Controlled by PERF registers in function 2 & 3 respectively. | ||
198 | */ | ||
199 | #define SBA_PERF_CNT1 0x200 | ||
200 | #define SBA_PERF_CNT2 0x208 | ||
201 | #define SBA_PERF_CNT3 0x210 | ||
202 | |||
203 | /* | ||
204 | ** lba_device: Per instance Elroy data structure | ||
205 | */ | ||
206 | struct lba_device { | ||
207 | struct pci_hba_data hba; | ||
208 | |||
209 | spinlock_t lba_lock; | ||
210 | void *iosapic_obj; | ||
211 | |||
212 | #ifdef CONFIG_64BIT | ||
213 | void __iomem *iop_base; /* PA_VIEW - for IO port accessor funcs */ | ||
214 | #endif | ||
215 | |||
216 | int flags; /* state/functionality enabled */ | ||
217 | int hw_rev; /* HW revision of chip */ | ||
218 | }; | ||
219 | |||
220 | #define ELROY_HVERS 0x782 | ||
221 | #define MERCURY_HVERS 0x783 | ||
222 | #define QUICKSILVER_HVERS 0x784 | ||
223 | |||
224 | static inline int IS_ELROY(struct parisc_device *d) { | ||
225 | return (d->id.hversion == ELROY_HVERS); | ||
226 | } | ||
227 | |||
228 | static inline int IS_MERCURY(struct parisc_device *d) { | ||
229 | return (d->id.hversion == MERCURY_HVERS); | ||
230 | } | ||
231 | |||
232 | static inline int IS_QUICKSILVER(struct parisc_device *d) { | ||
233 | return (d->id.hversion == QUICKSILVER_HVERS); | ||
234 | } | ||
235 | |||
236 | static inline int agp_mode_mercury(void __iomem *hpa) { | ||
237 | u64 bus_mode; | ||
238 | |||
239 | bus_mode = readl(hpa + 0x0620); | ||
240 | if (bus_mode & 1) | ||
241 | return 1; | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | ** I/O SAPIC init function | ||
248 | ** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC. | ||
249 | ** Call setup as part of per instance initialization. | ||
250 | ** (ie *not* init_module() function unless only one is present.) | ||
251 | ** fixup_irq is to initialize PCI IRQ line support and | ||
252 | ** virtualize pcidev->irq value. To be called by pci_fixup_bus(). | ||
253 | */ | ||
254 | extern void *iosapic_register(unsigned long hpa); | ||
255 | extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev); | ||
256 | |||
257 | #define LBA_FUNC_ID 0x0000 /* function id */ | ||
258 | #define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */ | ||
259 | #define LBA_CAPABLE 0x0030 /* capabilities register */ | ||
260 | |||
261 | #define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */ | ||
262 | #define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */ | ||
263 | |||
264 | #define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */ | ||
265 | #define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */ | ||
266 | #define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */ | ||
267 | |||
268 | #define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */ | ||
269 | #define LBA_ARB_PRI 0x0088 /* firmware sets this. */ | ||
270 | #define LBA_ARB_MODE 0x0090 /* firmware sets this. */ | ||
271 | #define LBA_ARB_MTLT 0x0098 /* firmware sets this. */ | ||
272 | |||
273 | #define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */ | ||
274 | |||
275 | #define LBA_STAT_CTL 0x0108 /* Status & Control */ | ||
276 | #define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */ | ||
277 | #define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */ | ||
278 | #define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */ | ||
279 | #define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */ | ||
280 | |||
281 | #define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */ | ||
282 | #define LBA_LMMIO_MASK 0x0208 | ||
283 | |||
284 | #define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */ | ||
285 | #define LBA_GMMIO_MASK 0x0218 | ||
286 | |||
287 | #define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */ | ||
288 | #define LBA_WLMMIO_MASK 0x0228 | ||
289 | |||
290 | #define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */ | ||
291 | #define LBA_WGMMIO_MASK 0x0238 | ||
292 | |||
293 | #define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */ | ||
294 | #define LBA_IOS_MASK 0x0248 | ||
295 | |||
296 | #define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */ | ||
297 | #define LBA_ELMMIO_MASK 0x0258 | ||
298 | |||
299 | #define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */ | ||
300 | #define LBA_EIOS_MASK 0x0268 | ||
301 | |||
302 | #define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */ | ||
303 | #define LBA_DMA_CTL 0x0278 /* firmware sets this */ | ||
304 | |||
305 | #define LBA_IBASE 0x0300 /* SBA DMA support */ | ||
306 | #define LBA_IMASK 0x0308 | ||
307 | |||
308 | /* FIXME: ignore DMA Hint stuff until we can measure performance */ | ||
309 | #define LBA_HINT_CFG 0x0310 | ||
310 | #define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */ | ||
311 | |||
312 | #define LBA_BUS_MODE 0x0620 | ||
313 | |||
314 | /* ERROR regs are needed for config cycle kluges */ | ||
315 | #define LBA_ERROR_CONFIG 0x0680 | ||
316 | #define LBA_SMART_MODE 0x20 | ||
317 | #define LBA_ERROR_STATUS 0x0688 | ||
318 | #define LBA_ROPE_CTL 0x06A0 | ||
319 | |||
320 | #define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */ | ||
321 | |||
322 | #endif /*_ASM_PARISC_ROPES_H_*/ | ||
diff --git a/arch/parisc/include/asm/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h new file mode 100644 index 000000000000..f0dd3b30f6c4 --- /dev/null +++ b/arch/parisc/include/asm/rt_sigframe.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef _ASM_PARISC_RT_SIGFRAME_H | ||
2 | #define _ASM_PARISC_RT_SIGFRAME_H | ||
3 | |||
4 | #define SIGRETURN_TRAMP 4 | ||
5 | #define SIGRESTARTBLOCK_TRAMP 5 | ||
6 | #define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP) | ||
7 | |||
8 | struct rt_sigframe { | ||
9 | /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c | ||
10 | Secondary to that it must protect the ERESTART_RESTARTBLOCK | ||
11 | trampoline we left on the stack (we were bad and didn't | ||
12 | change sp so we could run really fast.) */ | ||
13 | unsigned int tramp[TRAMP_SIZE]; | ||
14 | struct siginfo info; | ||
15 | struct ucontext uc; | ||
16 | }; | ||
17 | |||
18 | #define SIGFRAME 128 | ||
19 | #define FUNCTIONCALLFRAME 96 | ||
20 | #define PARISC_RT_SIGFRAME_SIZE \ | ||
21 | (((sizeof(struct rt_sigframe) + FUNCTIONCALLFRAME) + SIGFRAME) & -SIGFRAME) | ||
22 | |||
23 | #endif | ||
diff --git a/arch/parisc/include/asm/rtc.h b/arch/parisc/include/asm/rtc.h new file mode 100644 index 000000000000..099d641a42c2 --- /dev/null +++ b/arch/parisc/include/asm/rtc.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/rtc.h | ||
3 | * | ||
4 | * Copyright 2002 Randolph CHung <tausq@debian.org> | ||
5 | * | ||
6 | * Based on: include/asm-ppc/rtc.h and the genrtc driver in the | ||
7 | * 2.4 parisc linux tree | ||
8 | */ | ||
9 | |||
10 | #ifndef __ASM_RTC_H__ | ||
11 | #define __ASM_RTC_H__ | ||
12 | |||
13 | #ifdef __KERNEL__ | ||
14 | |||
15 | #include <linux/rtc.h> | ||
16 | |||
17 | #include <asm/pdc.h> | ||
18 | |||
19 | #define SECS_PER_HOUR (60 * 60) | ||
20 | #define SECS_PER_DAY (SECS_PER_HOUR * 24) | ||
21 | |||
22 | |||
23 | #define RTC_PIE 0x40 /* periodic interrupt enable */ | ||
24 | #define RTC_AIE 0x20 /* alarm interrupt enable */ | ||
25 | #define RTC_UIE 0x10 /* update-finished interrupt enable */ | ||
26 | |||
27 | #define RTC_BATT_BAD 0x100 /* battery bad */ | ||
28 | |||
29 | /* some dummy definitions */ | ||
30 | #define RTC_SQWE 0x08 /* enable square-wave output */ | ||
31 | #define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ | ||
32 | #define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ | ||
33 | #define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ | ||
34 | |||
35 | # define __isleap(year) \ | ||
36 | ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) | ||
37 | |||
38 | /* How many days come before each month (0-12). */ | ||
39 | static const unsigned short int __mon_yday[2][13] = | ||
40 | { | ||
41 | /* Normal years. */ | ||
42 | { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, | ||
43 | /* Leap years. */ | ||
44 | { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } | ||
45 | }; | ||
46 | |||
47 | static inline unsigned int get_rtc_time(struct rtc_time *wtime) | ||
48 | { | ||
49 | struct pdc_tod tod_data; | ||
50 | long int days, rem, y; | ||
51 | const unsigned short int *ip; | ||
52 | |||
53 | memset(wtime, 0, sizeof(*wtime)); | ||
54 | if (pdc_tod_read(&tod_data) < 0) | ||
55 | return RTC_24H | RTC_BATT_BAD; | ||
56 | |||
57 | // most of the remainder of this function is: | ||
58 | // Copyright (C) 1991, 1993, 1997, 1998 Free Software Foundation, Inc. | ||
59 | // This was originally a part of the GNU C Library. | ||
60 | // It is distributed under the GPL, and was swiped from offtime.c | ||
61 | |||
62 | |||
63 | days = tod_data.tod_sec / SECS_PER_DAY; | ||
64 | rem = tod_data.tod_sec % SECS_PER_DAY; | ||
65 | |||
66 | wtime->tm_hour = rem / SECS_PER_HOUR; | ||
67 | rem %= SECS_PER_HOUR; | ||
68 | wtime->tm_min = rem / 60; | ||
69 | wtime->tm_sec = rem % 60; | ||
70 | |||
71 | y = 1970; | ||
72 | |||
73 | #define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) | ||
74 | #define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) | ||
75 | |||
76 | while (days < 0 || days >= (__isleap (y) ? 366 : 365)) | ||
77 | { | ||
78 | /* Guess a corrected year, assuming 365 days per year. */ | ||
79 | long int yg = y + days / 365 - (days % 365 < 0); | ||
80 | |||
81 | /* Adjust DAYS and Y to match the guessed year. */ | ||
82 | days -= ((yg - y) * 365 | ||
83 | + LEAPS_THRU_END_OF (yg - 1) | ||
84 | - LEAPS_THRU_END_OF (y - 1)); | ||
85 | y = yg; | ||
86 | } | ||
87 | wtime->tm_year = y - 1900; | ||
88 | |||
89 | ip = __mon_yday[__isleap(y)]; | ||
90 | for (y = 11; days < (long int) ip[y]; --y) | ||
91 | continue; | ||
92 | days -= ip[y]; | ||
93 | wtime->tm_mon = y; | ||
94 | wtime->tm_mday = days + 1; | ||
95 | |||
96 | return RTC_24H; | ||
97 | } | ||
98 | |||
99 | static int set_rtc_time(struct rtc_time *wtime) | ||
100 | { | ||
101 | u_int32_t secs; | ||
102 | |||
103 | secs = mktime(wtime->tm_year + 1900, wtime->tm_mon + 1, wtime->tm_mday, | ||
104 | wtime->tm_hour, wtime->tm_min, wtime->tm_sec); | ||
105 | |||
106 | if(pdc_tod_set(secs, 0) < 0) | ||
107 | return -1; | ||
108 | else | ||
109 | return 0; | ||
110 | |||
111 | } | ||
112 | |||
113 | static inline unsigned int get_rtc_ss(void) | ||
114 | { | ||
115 | struct rtc_time h; | ||
116 | |||
117 | get_rtc_time(&h); | ||
118 | return h.tm_sec; | ||
119 | } | ||
120 | |||
121 | static inline int get_rtc_pll(struct rtc_pll_info *pll) | ||
122 | { | ||
123 | return -EINVAL; | ||
124 | } | ||
125 | static inline int set_rtc_pll(struct rtc_pll_info *pll) | ||
126 | { | ||
127 | return -EINVAL; | ||
128 | } | ||
129 | |||
130 | #endif /* __KERNEL__ */ | ||
131 | #endif /* __ASM_RTC_H__ */ | ||
diff --git a/arch/parisc/include/asm/runway.h b/arch/parisc/include/asm/runway.h new file mode 100644 index 000000000000..5bea02da7e22 --- /dev/null +++ b/arch/parisc/include/asm/runway.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef ASM_PARISC_RUNWAY_H | ||
2 | #define ASM_PARISC_RUNWAY_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* declared in arch/parisc/kernel/setup.c */ | ||
6 | extern struct proc_dir_entry * proc_runway_root; | ||
7 | |||
8 | #define RUNWAY_STATUS 0x10 | ||
9 | #define RUNWAY_DEBUG 0x40 | ||
10 | |||
11 | #endif /* __KERNEL__ */ | ||
12 | #endif /* ASM_PARISC_RUNWAY_H */ | ||
diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h new file mode 100644 index 000000000000..62269b31ebf4 --- /dev/null +++ b/arch/parisc/include/asm/scatterlist.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef _ASM_PARISC_SCATTERLIST_H | ||
2 | #define _ASM_PARISC_SCATTERLIST_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <asm/types.h> | ||
6 | |||
7 | struct scatterlist { | ||
8 | #ifdef CONFIG_DEBUG_SG | ||
9 | unsigned long sg_magic; | ||
10 | #endif | ||
11 | unsigned long page_link; | ||
12 | unsigned int offset; | ||
13 | |||
14 | unsigned int length; | ||
15 | |||
16 | /* an IOVA can be 64-bits on some PA-Risc platforms. */ | ||
17 | dma_addr_t iova; /* I/O Virtual Address */ | ||
18 | __u32 iova_length; /* bytes mapped */ | ||
19 | }; | ||
20 | |||
21 | #define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) | ||
22 | #define sg_dma_address(sg) ((sg)->iova) | ||
23 | #define sg_dma_len(sg) ((sg)->iova_length) | ||
24 | |||
25 | #define ISA_DMA_THRESHOLD (~0UL) | ||
26 | |||
27 | #endif /* _ASM_PARISC_SCATTERLIST_H */ | ||
diff --git a/arch/parisc/include/asm/sections.h b/arch/parisc/include/asm/sections.h new file mode 100644 index 000000000000..9d13c3507ad6 --- /dev/null +++ b/arch/parisc/include/asm/sections.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _PARISC_SECTIONS_H | ||
2 | #define _PARISC_SECTIONS_H | ||
3 | |||
4 | /* nothing to see, move along */ | ||
5 | #include <asm-generic/sections.h> | ||
6 | |||
7 | #ifdef CONFIG_64BIT | ||
8 | #undef dereference_function_descriptor | ||
9 | void *dereference_function_descriptor(void *); | ||
10 | #endif | ||
11 | |||
12 | #endif | ||
diff --git a/arch/parisc/include/asm/segment.h b/arch/parisc/include/asm/segment.h new file mode 100644 index 000000000000..26794ddb6524 --- /dev/null +++ b/arch/parisc/include/asm/segment.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __PARISC_SEGMENT_H | ||
2 | #define __PARISC_SEGMENT_H | ||
3 | |||
4 | /* Only here because we have some old header files that expect it.. */ | ||
5 | |||
6 | #endif | ||
diff --git a/arch/parisc/include/asm/sembuf.h b/arch/parisc/include/asm/sembuf.h new file mode 100644 index 000000000000..1e59ffd3bd1e --- /dev/null +++ b/arch/parisc/include/asm/sembuf.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _PARISC_SEMBUF_H | ||
2 | #define _PARISC_SEMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The semid64_ds structure for parisc architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct semid64_ds { | ||
15 | struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ | ||
16 | #ifndef CONFIG_64BIT | ||
17 | unsigned int __pad1; | ||
18 | #endif | ||
19 | __kernel_time_t sem_otime; /* last semop time */ | ||
20 | #ifndef CONFIG_64BIT | ||
21 | unsigned int __pad2; | ||
22 | #endif | ||
23 | __kernel_time_t sem_ctime; /* last change time */ | ||
24 | unsigned int sem_nsems; /* no. of semaphores in array */ | ||
25 | unsigned int __unused1; | ||
26 | unsigned int __unused2; | ||
27 | }; | ||
28 | |||
29 | #endif /* _PARISC_SEMBUF_H */ | ||
diff --git a/arch/parisc/include/asm/serial.h b/arch/parisc/include/asm/serial.h new file mode 100644 index 000000000000..d7e3cc60dbc3 --- /dev/null +++ b/arch/parisc/include/asm/serial.h | |||
@@ -0,0 +1,10 @@ | |||
1 | /* | ||
2 | * include/asm-parisc/serial.h | ||
3 | */ | ||
4 | |||
5 | /* | ||
6 | * This is used for 16550-compatible UARTs | ||
7 | */ | ||
8 | #define BASE_BAUD ( 1843200 / 16 ) | ||
9 | |||
10 | #define SERIAL_PORT_DFNS | ||
diff --git a/arch/parisc/include/asm/setup.h b/arch/parisc/include/asm/setup.h new file mode 100644 index 000000000000..7da2e5b8747e --- /dev/null +++ b/arch/parisc/include/asm/setup.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _PARISC_SETUP_H | ||
2 | #define _PARISC_SETUP_H | ||
3 | |||
4 | #define COMMAND_LINE_SIZE 1024 | ||
5 | |||
6 | #endif /* _PARISC_SETUP_H */ | ||
diff --git a/arch/parisc/include/asm/shmbuf.h b/arch/parisc/include/asm/shmbuf.h new file mode 100644 index 000000000000..0a3eada1863b --- /dev/null +++ b/arch/parisc/include/asm/shmbuf.h | |||
@@ -0,0 +1,58 @@ | |||
1 | #ifndef _PARISC_SHMBUF_H | ||
2 | #define _PARISC_SHMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The shmid64_ds structure for parisc architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct shmid64_ds { | ||
15 | struct ipc64_perm shm_perm; /* operation perms */ | ||
16 | #ifndef CONFIG_64BIT | ||
17 | unsigned int __pad1; | ||
18 | #endif | ||
19 | __kernel_time_t shm_atime; /* last attach time */ | ||
20 | #ifndef CONFIG_64BIT | ||
21 | unsigned int __pad2; | ||
22 | #endif | ||
23 | __kernel_time_t shm_dtime; /* last detach time */ | ||
24 | #ifndef CONFIG_64BIT | ||
25 | unsigned int __pad3; | ||
26 | #endif | ||
27 | __kernel_time_t shm_ctime; /* last change time */ | ||
28 | #ifndef CONFIG_64BIT | ||
29 | unsigned int __pad4; | ||
30 | #endif | ||
31 | size_t shm_segsz; /* size of segment (bytes) */ | ||
32 | __kernel_pid_t shm_cpid; /* pid of creator */ | ||
33 | __kernel_pid_t shm_lpid; /* pid of last operator */ | ||
34 | unsigned int shm_nattch; /* no. of current attaches */ | ||
35 | unsigned int __unused1; | ||
36 | unsigned int __unused2; | ||
37 | }; | ||
38 | |||
39 | #ifdef CONFIG_64BIT | ||
40 | /* The 'unsigned int' (formerly 'unsigned long') data types below will | ||
41 | * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on | ||
42 | * a wide kernel, but if some of these values are meant to contain pointers | ||
43 | * they may need to be 'long long' instead. -PB XXX FIXME | ||
44 | */ | ||
45 | #endif | ||
46 | struct shminfo64 { | ||
47 | unsigned int shmmax; | ||
48 | unsigned int shmmin; | ||
49 | unsigned int shmmni; | ||
50 | unsigned int shmseg; | ||
51 | unsigned int shmall; | ||
52 | unsigned int __unused1; | ||
53 | unsigned int __unused2; | ||
54 | unsigned int __unused3; | ||
55 | unsigned int __unused4; | ||
56 | }; | ||
57 | |||
58 | #endif /* _PARISC_SHMBUF_H */ | ||
diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h new file mode 100644 index 000000000000..628ddc22faa8 --- /dev/null +++ b/arch/parisc/include/asm/shmparam.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _ASMPARISC_SHMPARAM_H | ||
2 | #define _ASMPARISC_SHMPARAM_H | ||
3 | |||
4 | #define __ARCH_FORCE_SHMLBA 1 | ||
5 | |||
6 | #define SHMLBA 0x00400000 /* attach addr needs to be 4 Mb aligned */ | ||
7 | |||
8 | #endif /* _ASMPARISC_SHMPARAM_H */ | ||
diff --git a/arch/parisc/include/asm/sigcontext.h b/arch/parisc/include/asm/sigcontext.h new file mode 100644 index 000000000000..27ef31bb3b6e --- /dev/null +++ b/arch/parisc/include/asm/sigcontext.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ASMPARISC_SIGCONTEXT_H | ||
2 | #define _ASMPARISC_SIGCONTEXT_H | ||
3 | |||
4 | #define PARISC_SC_FLAG_ONSTACK 1<<0 | ||
5 | #define PARISC_SC_FLAG_IN_SYSCALL 1<<1 | ||
6 | |||
7 | /* We will add more stuff here as it becomes necessary, until we know | ||
8 | it works. */ | ||
9 | struct sigcontext { | ||
10 | unsigned long sc_flags; | ||
11 | |||
12 | unsigned long sc_gr[32]; /* PSW in sc_gr[0] */ | ||
13 | unsigned long long sc_fr[32]; /* FIXME, do we need other state info? */ | ||
14 | unsigned long sc_iasq[2]; | ||
15 | unsigned long sc_iaoq[2]; | ||
16 | unsigned long sc_sar; /* cr11 */ | ||
17 | }; | ||
18 | |||
19 | |||
20 | #endif | ||
diff --git a/arch/parisc/include/asm/siginfo.h b/arch/parisc/include/asm/siginfo.h new file mode 100644 index 000000000000..d7034728f377 --- /dev/null +++ b/arch/parisc/include/asm/siginfo.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _PARISC_SIGINFO_H | ||
2 | #define _PARISC_SIGINFO_H | ||
3 | |||
4 | #include <asm-generic/siginfo.h> | ||
5 | |||
6 | #undef NSIGTRAP | ||
7 | #define NSIGTRAP 4 | ||
8 | |||
9 | #endif | ||
diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h new file mode 100644 index 000000000000..c20356375d1d --- /dev/null +++ b/arch/parisc/include/asm/signal.h | |||
@@ -0,0 +1,153 @@ | |||
1 | #ifndef _ASM_PARISC_SIGNAL_H | ||
2 | #define _ASM_PARISC_SIGNAL_H | ||
3 | |||
4 | #define SIGHUP 1 | ||
5 | #define SIGINT 2 | ||
6 | #define SIGQUIT 3 | ||
7 | #define SIGILL 4 | ||
8 | #define SIGTRAP 5 | ||
9 | #define SIGABRT 6 | ||
10 | #define SIGIOT 6 | ||
11 | #define SIGEMT 7 | ||
12 | #define SIGFPE 8 | ||
13 | #define SIGKILL 9 | ||
14 | #define SIGBUS 10 | ||
15 | #define SIGSEGV 11 | ||
16 | #define SIGSYS 12 /* Linux doesn't use this */ | ||
17 | #define SIGPIPE 13 | ||
18 | #define SIGALRM 14 | ||
19 | #define SIGTERM 15 | ||
20 | #define SIGUSR1 16 | ||
21 | #define SIGUSR2 17 | ||
22 | #define SIGCHLD 18 | ||
23 | #define SIGPWR 19 | ||
24 | #define SIGVTALRM 20 | ||
25 | #define SIGPROF 21 | ||
26 | #define SIGIO 22 | ||
27 | #define SIGPOLL SIGIO | ||
28 | #define SIGWINCH 23 | ||
29 | #define SIGSTOP 24 | ||
30 | #define SIGTSTP 25 | ||
31 | #define SIGCONT 26 | ||
32 | #define SIGTTIN 27 | ||
33 | #define SIGTTOU 28 | ||
34 | #define SIGURG 29 | ||
35 | #define SIGLOST 30 /* Linux doesn't use this either */ | ||
36 | #define SIGUNUSED 31 | ||
37 | #define SIGRESERVE SIGUNUSED | ||
38 | |||
39 | #define SIGXCPU 33 | ||
40 | #define SIGXFSZ 34 | ||
41 | #define SIGSTKFLT 36 | ||
42 | |||
43 | /* These should not be considered constants from userland. */ | ||
44 | #define SIGRTMIN 37 | ||
45 | #define SIGRTMAX _NSIG /* it's 44 under HP/UX */ | ||
46 | |||
47 | /* | ||
48 | * SA_FLAGS values: | ||
49 | * | ||
50 | * SA_ONSTACK indicates that a registered stack_t will be used. | ||
51 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
52 | * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. | ||
53 | * SA_RESETHAND clears the handler when the signal is delivered. | ||
54 | * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. | ||
55 | * SA_NODEFER prevents the current signal from being masked in the handler. | ||
56 | * | ||
57 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | ||
58 | * Unix names RESETHAND and NODEFER respectively. | ||
59 | */ | ||
60 | #define SA_ONSTACK 0x00000001 | ||
61 | #define SA_RESETHAND 0x00000004 | ||
62 | #define SA_NOCLDSTOP 0x00000008 | ||
63 | #define SA_SIGINFO 0x00000010 | ||
64 | #define SA_NODEFER 0x00000020 | ||
65 | #define SA_RESTART 0x00000040 | ||
66 | #define SA_NOCLDWAIT 0x00000080 | ||
67 | #define _SA_SIGGFAULT 0x00000100 /* HPUX */ | ||
68 | |||
69 | #define SA_NOMASK SA_NODEFER | ||
70 | #define SA_ONESHOT SA_RESETHAND | ||
71 | |||
72 | #define SA_RESTORER 0x04000000 /* obsolete -- ignored */ | ||
73 | |||
74 | /* | ||
75 | * sigaltstack controls | ||
76 | */ | ||
77 | #define SS_ONSTACK 1 | ||
78 | #define SS_DISABLE 2 | ||
79 | |||
80 | #define MINSIGSTKSZ 2048 | ||
81 | #define SIGSTKSZ 8192 | ||
82 | |||
83 | #ifdef __KERNEL__ | ||
84 | |||
85 | #define _NSIG 64 | ||
86 | /* bits-per-word, where word apparently means 'long' not 'int' */ | ||
87 | #define _NSIG_BPW BITS_PER_LONG | ||
88 | #define _NSIG_WORDS (_NSIG / _NSIG_BPW) | ||
89 | |||
90 | #endif /* __KERNEL__ */ | ||
91 | |||
92 | #define SIG_BLOCK 0 /* for blocking signals */ | ||
93 | #define SIG_UNBLOCK 1 /* for unblocking signals */ | ||
94 | #define SIG_SETMASK 2 /* for setting the signal mask */ | ||
95 | |||
96 | #define SIG_DFL ((__sighandler_t)0) /* default signal handling */ | ||
97 | #define SIG_IGN ((__sighandler_t)1) /* ignore signal */ | ||
98 | #define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ | ||
99 | |||
100 | # ifndef __ASSEMBLY__ | ||
101 | |||
102 | # include <linux/types.h> | ||
103 | |||
104 | /* Avoid too many header ordering problems. */ | ||
105 | struct siginfo; | ||
106 | |||
107 | /* Type of a signal handler. */ | ||
108 | #ifdef CONFIG_64BIT | ||
109 | /* function pointers on 64-bit parisc are pointers to little structs and the | ||
110 | * compiler doesn't support code which changes or tests the address of | ||
111 | * the function in the little struct. This is really ugly -PB | ||
112 | */ | ||
113 | typedef char __user *__sighandler_t; | ||
114 | #else | ||
115 | typedef void __signalfn_t(int); | ||
116 | typedef __signalfn_t __user *__sighandler_t; | ||
117 | #endif | ||
118 | |||
119 | typedef struct sigaltstack { | ||
120 | void __user *ss_sp; | ||
121 | int ss_flags; | ||
122 | size_t ss_size; | ||
123 | } stack_t; | ||
124 | |||
125 | #ifdef __KERNEL__ | ||
126 | |||
127 | /* Most things should be clean enough to redefine this at will, if care | ||
128 | is taken to make libc match. */ | ||
129 | |||
130 | typedef unsigned long old_sigset_t; /* at least 32 bits */ | ||
131 | |||
132 | typedef struct { | ||
133 | /* next_signal() assumes this is a long - no choice */ | ||
134 | unsigned long sig[_NSIG_WORDS]; | ||
135 | } sigset_t; | ||
136 | |||
137 | struct sigaction { | ||
138 | __sighandler_t sa_handler; | ||
139 | unsigned long sa_flags; | ||
140 | sigset_t sa_mask; /* mask last for extensibility */ | ||
141 | }; | ||
142 | |||
143 | struct k_sigaction { | ||
144 | struct sigaction sa; | ||
145 | }; | ||
146 | |||
147 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | ||
148 | |||
149 | #include <asm/sigcontext.h> | ||
150 | |||
151 | #endif /* __KERNEL__ */ | ||
152 | #endif /* !__ASSEMBLY */ | ||
153 | #endif /* _ASM_PARISC_SIGNAL_H */ | ||
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h new file mode 100644 index 000000000000..398cdbaf4e54 --- /dev/null +++ b/arch/parisc/include/asm/smp.h | |||
@@ -0,0 +1,68 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | |||
5 | #if defined(CONFIG_SMP) | ||
6 | |||
7 | /* Page Zero Location PDC will look for the address to branch to when we poke | ||
8 | ** slave CPUs still in "Icache loop". | ||
9 | */ | ||
10 | #define PDC_OS_BOOT_RENDEZVOUS 0x10 | ||
11 | #define PDC_OS_BOOT_RENDEZVOUS_HI 0x28 | ||
12 | |||
13 | #ifndef ASSEMBLY | ||
14 | #include <linux/bitops.h> | ||
15 | #include <linux/threads.h> /* for NR_CPUS */ | ||
16 | #include <linux/cpumask.h> | ||
17 | typedef unsigned long address_t; | ||
18 | |||
19 | extern cpumask_t cpu_online_map; | ||
20 | |||
21 | |||
22 | /* | ||
23 | * Private routines/data | ||
24 | * | ||
25 | * physical and logical are equivalent until we support CPU hotplug. | ||
26 | */ | ||
27 | #define cpu_number_map(cpu) (cpu) | ||
28 | #define cpu_logical_map(cpu) (cpu) | ||
29 | |||
30 | extern void smp_send_reschedule(int cpu); | ||
31 | extern void smp_send_all_nop(void); | ||
32 | |||
33 | extern void arch_send_call_function_single_ipi(int cpu); | ||
34 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
35 | |||
36 | #endif /* !ASSEMBLY */ | ||
37 | |||
38 | /* | ||
39 | * This magic constant controls our willingness to transfer | ||
40 | * a process across CPUs. Such a transfer incurs cache and tlb | ||
41 | * misses. The current value is inherited from i386. Still needs | ||
42 | * to be tuned for parisc. | ||
43 | */ | ||
44 | |||
45 | #define PROC_CHANGE_PENALTY 15 /* Schedule penalty */ | ||
46 | |||
47 | extern unsigned long cpu_present_mask; | ||
48 | |||
49 | #define raw_smp_processor_id() (current_thread_info()->cpu) | ||
50 | |||
51 | #else /* CONFIG_SMP */ | ||
52 | |||
53 | static inline void smp_send_all_nop(void) { return; } | ||
54 | |||
55 | #endif | ||
56 | |||
57 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | ||
58 | #define ANY_PROC_ID 0xFF /* Any processor magic marker */ | ||
59 | static inline int __cpu_disable (void) { | ||
60 | return 0; | ||
61 | } | ||
62 | static inline void __cpu_die (unsigned int cpu) { | ||
63 | while(1) | ||
64 | ; | ||
65 | } | ||
66 | extern int __cpu_up (unsigned int cpu); | ||
67 | |||
68 | #endif /* __ASM_SMP_H */ | ||
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h new file mode 100644 index 000000000000..fba402c95ac2 --- /dev/null +++ b/arch/parisc/include/asm/socket.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef _ASM_SOCKET_H | ||
2 | #define _ASM_SOCKET_H | ||
3 | |||
4 | #include <asm/sockios.h> | ||
5 | |||
6 | /* For setsockopt(2) */ | ||
7 | #define SOL_SOCKET 0xffff | ||
8 | |||
9 | #define SO_DEBUG 0x0001 | ||
10 | #define SO_REUSEADDR 0x0004 | ||
11 | #define SO_KEEPALIVE 0x0008 | ||
12 | #define SO_DONTROUTE 0x0010 | ||
13 | #define SO_BROADCAST 0x0020 | ||
14 | #define SO_LINGER 0x0080 | ||
15 | #define SO_OOBINLINE 0x0100 | ||
16 | /* To add :#define SO_REUSEPORT 0x0200 */ | ||
17 | #define SO_SNDBUF 0x1001 | ||
18 | #define SO_RCVBUF 0x1002 | ||
19 | #define SO_SNDBUFFORCE 0x100a | ||
20 | #define SO_RCVBUFFORCE 0x100b | ||
21 | #define SO_SNDLOWAT 0x1003 | ||
22 | #define SO_RCVLOWAT 0x1004 | ||
23 | #define SO_SNDTIMEO 0x1005 | ||
24 | #define SO_RCVTIMEO 0x1006 | ||
25 | #define SO_ERROR 0x1007 | ||
26 | #define SO_TYPE 0x1008 | ||
27 | #define SO_PEERNAME 0x2000 | ||
28 | |||
29 | #define SO_NO_CHECK 0x400b | ||
30 | #define SO_PRIORITY 0x400c | ||
31 | #define SO_BSDCOMPAT 0x400e | ||
32 | #define SO_PASSCRED 0x4010 | ||
33 | #define SO_PEERCRED 0x4011 | ||
34 | #define SO_TIMESTAMP 0x4012 | ||
35 | #define SCM_TIMESTAMP SO_TIMESTAMP | ||
36 | #define SO_TIMESTAMPNS 0x4013 | ||
37 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS | ||
38 | |||
39 | /* Security levels - as per NRL IPv6 - don't actually do anything */ | ||
40 | #define SO_SECURITY_AUTHENTICATION 0x4016 | ||
41 | #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x4017 | ||
42 | #define SO_SECURITY_ENCRYPTION_NETWORK 0x4018 | ||
43 | |||
44 | #define SO_BINDTODEVICE 0x4019 | ||
45 | |||
46 | /* Socket filtering */ | ||
47 | #define SO_ATTACH_FILTER 0x401a | ||
48 | #define SO_DETACH_FILTER 0x401b | ||
49 | |||
50 | #define SO_ACCEPTCONN 0x401c | ||
51 | |||
52 | #define SO_PEERSEC 0x401d | ||
53 | #define SO_PASSSEC 0x401e | ||
54 | |||
55 | #define SO_MARK 0x401f | ||
56 | |||
57 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we | ||
58 | * have to define SOCK_NONBLOCK to a different value here. | ||
59 | */ | ||
60 | #define SOCK_NONBLOCK 0x40000000 | ||
61 | |||
62 | #endif /* _ASM_SOCKET_H */ | ||
diff --git a/arch/parisc/include/asm/sockios.h b/arch/parisc/include/asm/sockios.h new file mode 100644 index 000000000000..dabfbc7483f6 --- /dev/null +++ b/arch/parisc/include/asm/sockios.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ARCH_PARISC_SOCKIOS__ | ||
2 | #define __ARCH_PARISC_SOCKIOS__ | ||
3 | |||
4 | /* Socket-level I/O control calls. */ | ||
5 | #define FIOSETOWN 0x8901 | ||
6 | #define SIOCSPGRP 0x8902 | ||
7 | #define FIOGETOWN 0x8903 | ||
8 | #define SIOCGPGRP 0x8904 | ||
9 | #define SIOCATMARK 0x8905 | ||
10 | #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ | ||
11 | #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ | ||
12 | |||
13 | #endif | ||
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h new file mode 100644 index 000000000000..f3d2090a18dc --- /dev/null +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -0,0 +1,194 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/system.h> | ||
5 | #include <asm/processor.h> | ||
6 | #include <asm/spinlock_types.h> | ||
7 | |||
8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | ||
9 | { | ||
10 | volatile unsigned int *a = __ldcw_align(x); | ||
11 | return *a == 0; | ||
12 | } | ||
13 | |||
14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | ||
15 | #define __raw_spin_unlock_wait(x) \ | ||
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
17 | |||
18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | ||
19 | unsigned long flags) | ||
20 | { | ||
21 | volatile unsigned int *a; | ||
22 | |||
23 | mb(); | ||
24 | a = __ldcw_align(x); | ||
25 | while (__ldcw(a) == 0) | ||
26 | while (*a == 0) | ||
27 | if (flags & PSW_SM_I) { | ||
28 | local_irq_enable(); | ||
29 | cpu_relax(); | ||
30 | local_irq_disable(); | ||
31 | } else | ||
32 | cpu_relax(); | ||
33 | mb(); | ||
34 | } | ||
35 | |||
36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) | ||
37 | { | ||
38 | volatile unsigned int *a; | ||
39 | mb(); | ||
40 | a = __ldcw_align(x); | ||
41 | *a = 1; | ||
42 | mb(); | ||
43 | } | ||
44 | |||
45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) | ||
46 | { | ||
47 | volatile unsigned int *a; | ||
48 | int ret; | ||
49 | |||
50 | mb(); | ||
51 | a = __ldcw_align(x); | ||
52 | ret = __ldcw(a) != 0; | ||
53 | mb(); | ||
54 | |||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
60 | * Linux rwlocks are unfair to writers; they can be starved for an indefinite | ||
61 | * time by readers. With care, they can also be taken in interrupt context. | ||
62 | * | ||
63 | * In the PA-RISC implementation, we have a spinlock and a counter. | ||
64 | * Readers use the lock to serialise their access to the counter (which | ||
65 | * records how many readers currently hold the lock). | ||
66 | * Writers hold the spinlock, preventing any readers or other writers from | ||
67 | * grabbing the rwlock. | ||
68 | */ | ||
69 | |||
70 | /* Note that we have to ensure interrupts are disabled in case we're | ||
71 | * interrupted by some other code that wants to grab the same read lock */ | ||
72 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | ||
73 | { | ||
74 | unsigned long flags; | ||
75 | local_irq_save(flags); | ||
76 | __raw_spin_lock_flags(&rw->lock, flags); | ||
77 | rw->counter++; | ||
78 | __raw_spin_unlock(&rw->lock); | ||
79 | local_irq_restore(flags); | ||
80 | } | ||
81 | |||
82 | /* Note that we have to ensure interrupts are disabled in case we're | ||
83 | * interrupted by some other code that wants to grab the same read lock */ | ||
84 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | local_irq_save(flags); | ||
88 | __raw_spin_lock_flags(&rw->lock, flags); | ||
89 | rw->counter--; | ||
90 | __raw_spin_unlock(&rw->lock); | ||
91 | local_irq_restore(flags); | ||
92 | } | ||
93 | |||
94 | /* Note that we have to ensure interrupts are disabled in case we're | ||
95 | * interrupted by some other code that wants to grab the same read lock */ | ||
96 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | retry: | ||
100 | local_irq_save(flags); | ||
101 | if (__raw_spin_trylock(&rw->lock)) { | ||
102 | rw->counter++; | ||
103 | __raw_spin_unlock(&rw->lock); | ||
104 | local_irq_restore(flags); | ||
105 | return 1; | ||
106 | } | ||
107 | |||
108 | local_irq_restore(flags); | ||
109 | /* If write-locked, we fail to acquire the lock */ | ||
110 | if (rw->counter < 0) | ||
111 | return 0; | ||
112 | |||
113 | /* Wait until we have a realistic chance at the lock */ | ||
114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) | ||
115 | cpu_relax(); | ||
116 | |||
117 | goto retry; | ||
118 | } | ||
119 | |||
120 | /* Note that we have to ensure interrupts are disabled in case we're | ||
121 | * interrupted by some other code that wants to read_trylock() this lock */ | ||
122 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | ||
123 | { | ||
124 | unsigned long flags; | ||
125 | retry: | ||
126 | local_irq_save(flags); | ||
127 | __raw_spin_lock_flags(&rw->lock, flags); | ||
128 | |||
129 | if (rw->counter != 0) { | ||
130 | __raw_spin_unlock(&rw->lock); | ||
131 | local_irq_restore(flags); | ||
132 | |||
133 | while (rw->counter != 0) | ||
134 | cpu_relax(); | ||
135 | |||
136 | goto retry; | ||
137 | } | ||
138 | |||
139 | rw->counter = -1; /* mark as write-locked */ | ||
140 | mb(); | ||
141 | local_irq_restore(flags); | ||
142 | } | ||
143 | |||
144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | ||
145 | { | ||
146 | rw->counter = 0; | ||
147 | __raw_spin_unlock(&rw->lock); | ||
148 | } | ||
149 | |||
150 | /* Note that we have to ensure interrupts are disabled in case we're | ||
151 | * interrupted by some other code that wants to read_trylock() this lock */ | ||
152 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | ||
153 | { | ||
154 | unsigned long flags; | ||
155 | int result = 0; | ||
156 | |||
157 | local_irq_save(flags); | ||
158 | if (__raw_spin_trylock(&rw->lock)) { | ||
159 | if (rw->counter == 0) { | ||
160 | rw->counter = -1; | ||
161 | result = 1; | ||
162 | } else { | ||
163 | /* Read-locked. Oh well. */ | ||
164 | __raw_spin_unlock(&rw->lock); | ||
165 | } | ||
166 | } | ||
167 | local_irq_restore(flags); | ||
168 | |||
169 | return result; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * read_can_lock - would read_trylock() succeed? | ||
174 | * @lock: the rwlock in question. | ||
175 | */ | ||
176 | static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | ||
177 | { | ||
178 | return rw->counter >= 0; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * write_can_lock - would write_trylock() succeed? | ||
183 | * @lock: the rwlock in question. | ||
184 | */ | ||
185 | static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | ||
186 | { | ||
187 | return !rw->counter; | ||
188 | } | ||
189 | |||
190 | #define _raw_spin_relax(lock) cpu_relax() | ||
191 | #define _raw_read_relax(lock) cpu_relax() | ||
192 | #define _raw_write_relax(lock) cpu_relax() | ||
193 | |||
194 | #endif /* __ASM_SPINLOCK_H */ | ||
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h new file mode 100644 index 000000000000..3f72f47cf4b2 --- /dev/null +++ b/arch/parisc/include/asm/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | typedef struct { | ||
5 | #ifdef CONFIG_PA20 | ||
6 | volatile unsigned int slock; | ||
7 | # define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
8 | #else | ||
9 | volatile unsigned int lock[4]; | ||
10 | # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
11 | #endif | ||
12 | } raw_spinlock_t; | ||
13 | |||
14 | typedef struct { | ||
15 | raw_spinlock_t lock; | ||
16 | volatile int counter; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/arch/parisc/include/asm/stat.h b/arch/parisc/include/asm/stat.h new file mode 100644 index 000000000000..9d5fbbc5c31f --- /dev/null +++ b/arch/parisc/include/asm/stat.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #ifndef _PARISC_STAT_H | ||
2 | #define _PARISC_STAT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | struct stat { | ||
7 | unsigned int st_dev; /* dev_t is 32 bits on parisc */ | ||
8 | ino_t st_ino; /* 32 bits */ | ||
9 | mode_t st_mode; /* 16 bits */ | ||
10 | nlink_t st_nlink; /* 16 bits */ | ||
11 | unsigned short st_reserved1; /* old st_uid */ | ||
12 | unsigned short st_reserved2; /* old st_gid */ | ||
13 | unsigned int st_rdev; | ||
14 | off_t st_size; | ||
15 | time_t st_atime; | ||
16 | unsigned int st_atime_nsec; | ||
17 | time_t st_mtime; | ||
18 | unsigned int st_mtime_nsec; | ||
19 | time_t st_ctime; | ||
20 | unsigned int st_ctime_nsec; | ||
21 | int st_blksize; | ||
22 | int st_blocks; | ||
23 | unsigned int __unused1; /* ACL stuff */ | ||
24 | unsigned int __unused2; /* network */ | ||
25 | ino_t __unused3; /* network */ | ||
26 | unsigned int __unused4; /* cnodes */ | ||
27 | unsigned short __unused5; /* netsite */ | ||
28 | short st_fstype; | ||
29 | unsigned int st_realdev; | ||
30 | unsigned short st_basemode; | ||
31 | unsigned short st_spareshort; | ||
32 | uid_t st_uid; | ||
33 | gid_t st_gid; | ||
34 | unsigned int st_spare4[3]; | ||
35 | }; | ||
36 | |||
37 | #define STAT_HAVE_NSEC | ||
38 | |||
39 | typedef __kernel_off64_t off64_t; | ||
40 | |||
41 | struct hpux_stat64 { | ||
42 | unsigned int st_dev; /* dev_t is 32 bits on parisc */ | ||
43 | ino_t st_ino; /* 32 bits */ | ||
44 | mode_t st_mode; /* 16 bits */ | ||
45 | nlink_t st_nlink; /* 16 bits */ | ||
46 | unsigned short st_reserved1; /* old st_uid */ | ||
47 | unsigned short st_reserved2; /* old st_gid */ | ||
48 | unsigned int st_rdev; | ||
49 | off64_t st_size; | ||
50 | time_t st_atime; | ||
51 | unsigned int st_spare1; | ||
52 | time_t st_mtime; | ||
53 | unsigned int st_spare2; | ||
54 | time_t st_ctime; | ||
55 | unsigned int st_spare3; | ||
56 | int st_blksize; | ||
57 | __u64 st_blocks; | ||
58 | unsigned int __unused1; /* ACL stuff */ | ||
59 | unsigned int __unused2; /* network */ | ||
60 | ino_t __unused3; /* network */ | ||
61 | unsigned int __unused4; /* cnodes */ | ||
62 | unsigned short __unused5; /* netsite */ | ||
63 | short st_fstype; | ||
64 | unsigned int st_realdev; | ||
65 | unsigned short st_basemode; | ||
66 | unsigned short st_spareshort; | ||
67 | uid_t st_uid; | ||
68 | gid_t st_gid; | ||
69 | unsigned int st_spare4[3]; | ||
70 | }; | ||
71 | |||
72 | /* This is the struct that 32-bit userspace applications are expecting. | ||
73 | * How 64-bit apps are going to be compiled, I have no idea. But at least | ||
74 | * this way, we don't have a wrapper in the kernel. | ||
75 | */ | ||
76 | struct stat64 { | ||
77 | unsigned long long st_dev; | ||
78 | unsigned int __pad1; | ||
79 | |||
80 | unsigned int __st_ino; /* Not actually filled in */ | ||
81 | unsigned int st_mode; | ||
82 | unsigned int st_nlink; | ||
83 | unsigned int st_uid; | ||
84 | unsigned int st_gid; | ||
85 | unsigned long long st_rdev; | ||
86 | unsigned int __pad2; | ||
87 | signed long long st_size; | ||
88 | signed int st_blksize; | ||
89 | |||
90 | signed long long st_blocks; | ||
91 | signed int st_atime; | ||
92 | unsigned int st_atime_nsec; | ||
93 | signed int st_mtime; | ||
94 | unsigned int st_mtime_nsec; | ||
95 | signed int st_ctime; | ||
96 | unsigned int st_ctime_nsec; | ||
97 | unsigned long long st_ino; | ||
98 | }; | ||
99 | |||
100 | #endif | ||
diff --git a/arch/parisc/include/asm/statfs.h b/arch/parisc/include/asm/statfs.h new file mode 100644 index 000000000000..324bea905dc6 --- /dev/null +++ b/arch/parisc/include/asm/statfs.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _PARISC_STATFS_H | ||
2 | #define _PARISC_STATFS_H | ||
3 | |||
4 | #define __statfs_word long | ||
5 | #include <asm-generic/statfs.h> | ||
6 | |||
7 | #endif | ||
diff --git a/arch/parisc/include/asm/string.h b/arch/parisc/include/asm/string.h new file mode 100644 index 000000000000..eda01be65e35 --- /dev/null +++ b/arch/parisc/include/asm/string.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _PA_STRING_H_ | ||
2 | #define _PA_STRING_H_ | ||
3 | |||
4 | #define __HAVE_ARCH_MEMSET | ||
5 | extern void * memset(void *, int, size_t); | ||
6 | |||
7 | #define __HAVE_ARCH_MEMCPY | ||
8 | void * memcpy(void * dest,const void *src,size_t count); | ||
9 | |||
10 | #endif | ||
diff --git a/arch/parisc/include/asm/superio.h b/arch/parisc/include/asm/superio.h new file mode 100644 index 000000000000..6598acb4d46d --- /dev/null +++ b/arch/parisc/include/asm/superio.h | |||
@@ -0,0 +1,85 @@ | |||
1 | #ifndef _PARISC_SUPERIO_H | ||
2 | #define _PARISC_SUPERIO_H | ||
3 | |||
4 | #define IC_PIC1 0x20 /* PCI I/O address of master 8259 */ | ||
5 | #define IC_PIC2 0xA0 /* PCI I/O address of slave */ | ||
6 | |||
7 | /* Config Space Offsets to configuration and base address registers */ | ||
8 | #define SIO_CR 0x5A /* Configuration Register */ | ||
9 | #define SIO_ACPIBAR 0x88 /* ACPI BAR */ | ||
10 | #define SIO_FDCBAR 0x90 /* Floppy Disk Controller BAR */ | ||
11 | #define SIO_SP1BAR 0x94 /* Serial 1 BAR */ | ||
12 | #define SIO_SP2BAR 0x98 /* Serial 2 BAR */ | ||
13 | #define SIO_PPBAR 0x9C /* Parallel BAR */ | ||
14 | |||
15 | #define TRIGGER_1 0x67 /* Edge/level trigger register 1 */ | ||
16 | #define TRIGGER_2 0x68 /* Edge/level trigger register 2 */ | ||
17 | |||
18 | /* Interrupt Routing Control registers */ | ||
19 | #define CFG_IR_SER 0x69 /* Serial 1 [0:3] and Serial 2 [4:7] */ | ||
20 | #define CFG_IR_PFD 0x6a /* Parallel [0:3] and Floppy [4:7] */ | ||
21 | #define CFG_IR_IDE 0x6b /* IDE1 [0:3] and IDE2 [4:7] */ | ||
22 | #define CFG_IR_INTAB 0x6c /* PCI INTA [0:3] and INT B [4:7] */ | ||
23 | #define CFG_IR_INTCD 0x6d /* PCI INTC [0:3] and INT D [4:7] */ | ||
24 | #define CFG_IR_PS2 0x6e /* PS/2 KBINT [0:3] and Mouse [4:7] */ | ||
25 | #define CFG_IR_FXBUS 0x6f /* FXIRQ[0] [0:3] and FXIRQ[1] [4:7] */ | ||
26 | #define CFG_IR_USB 0x70 /* FXIRQ[2] [0:3] and USB [4:7] */ | ||
27 | #define CFG_IR_ACPI 0x71 /* ACPI SCI [0:3] and reserved [4:7] */ | ||
28 | |||
29 | #define CFG_IR_LOW CFG_IR_SER /* Lowest interrupt routing reg */ | ||
30 | #define CFG_IR_HIGH CFG_IR_ACPI /* Highest interrupt routing reg */ | ||
31 | |||
32 | /* 8259 operational control words */ | ||
33 | #define OCW2_EOI 0x20 /* Non-specific EOI */ | ||
34 | #define OCW2_SEOI 0x60 /* Specific EOI */ | ||
35 | #define OCW3_IIR 0x0A /* Read request register */ | ||
36 | #define OCW3_ISR 0x0B /* Read service register */ | ||
37 | #define OCW3_POLL 0x0C /* Poll the PIC for an interrupt vector */ | ||
38 | |||
39 | /* Interrupt lines. Only PIC1 is used */ | ||
40 | #define USB_IRQ 1 /* USB */ | ||
41 | #define SP1_IRQ 3 /* Serial port 1 */ | ||
42 | #define SP2_IRQ 4 /* Serial port 2 */ | ||
43 | #define PAR_IRQ 5 /* Parallel port */ | ||
44 | #define FDC_IRQ 6 /* Floppy controller */ | ||
45 | #define IDE_IRQ 7 /* IDE (pri+sec) */ | ||
46 | |||
47 | /* ACPI registers */ | ||
48 | #define USB_REG_CR 0x1f /* USB Regulator Control Register */ | ||
49 | |||
50 | #define SUPERIO_NIRQS 8 | ||
51 | |||
52 | struct superio_device { | ||
53 | u32 fdc_base; | ||
54 | u32 sp1_base; | ||
55 | u32 sp2_base; | ||
56 | u32 pp_base; | ||
57 | u32 acpi_base; | ||
58 | int suckyio_irq_enabled; | ||
59 | struct pci_dev *lio_pdev; /* pci device for legacy IO (fn 1) */ | ||
60 | struct pci_dev *usb_pdev; /* pci device for USB (fn 2) */ | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * Does NS make a 87415 based plug in PCI card? If so, because of this | ||
65 | * macro we currently don't support it being plugged into a machine | ||
66 | * that contains a SuperIO chip AND has CONFIG_SUPERIO enabled. | ||
67 | * | ||
68 | * This could be fixed by checking to see if function 1 exists, and | ||
69 | * if it is SuperIO Legacy IO; but really now, is this combination | ||
70 | * going to EVER happen? | ||
71 | */ | ||
72 | |||
73 | #define SUPERIO_IDE_FN 0 /* Function number of IDE controller */ | ||
74 | #define SUPERIO_LIO_FN 1 /* Function number of Legacy IO controller */ | ||
75 | #define SUPERIO_USB_FN 2 /* Function number of USB controller */ | ||
76 | |||
77 | #define is_superio_device(x) \ | ||
78 | (((x)->vendor == PCI_VENDOR_ID_NS) && \ | ||
79 | ( ((x)->device == PCI_DEVICE_ID_NS_87415) \ | ||
80 | || ((x)->device == PCI_DEVICE_ID_NS_87560_LIO) \ | ||
81 | || ((x)->device == PCI_DEVICE_ID_NS_87560_USB) ) ) | ||
82 | |||
83 | extern int superio_fixup_irq(struct pci_dev *pcidev); /* called by iosapic */ | ||
84 | |||
85 | #endif /* _PARISC_SUPERIO_H */ | ||
diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h new file mode 100644 index 000000000000..ee80c920b464 --- /dev/null +++ b/arch/parisc/include/asm/system.h | |||
@@ -0,0 +1,182 @@ | |||
1 | #ifndef __PARISC_SYSTEM_H | ||
2 | #define __PARISC_SYSTEM_H | ||
3 | |||
4 | #include <asm/psw.h> | ||
5 | |||
6 | /* The program status word as bitfields. */ | ||
7 | struct pa_psw { | ||
8 | unsigned int y:1; | ||
9 | unsigned int z:1; | ||
10 | unsigned int rv:2; | ||
11 | unsigned int w:1; | ||
12 | unsigned int e:1; | ||
13 | unsigned int s:1; | ||
14 | unsigned int t:1; | ||
15 | |||
16 | unsigned int h:1; | ||
17 | unsigned int l:1; | ||
18 | unsigned int n:1; | ||
19 | unsigned int x:1; | ||
20 | unsigned int b:1; | ||
21 | unsigned int c:1; | ||
22 | unsigned int v:1; | ||
23 | unsigned int m:1; | ||
24 | |||
25 | unsigned int cb:8; | ||
26 | |||
27 | unsigned int o:1; | ||
28 | unsigned int g:1; | ||
29 | unsigned int f:1; | ||
30 | unsigned int r:1; | ||
31 | unsigned int q:1; | ||
32 | unsigned int p:1; | ||
33 | unsigned int d:1; | ||
34 | unsigned int i:1; | ||
35 | }; | ||
36 | |||
37 | #ifdef CONFIG_64BIT | ||
38 | #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) | ||
39 | #else | ||
40 | #define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) | ||
41 | #endif | ||
42 | |||
43 | struct task_struct; | ||
44 | |||
45 | extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *); | ||
46 | |||
47 | #define switch_to(prev, next, last) do { \ | ||
48 | (last) = _switch_to(prev, next); \ | ||
49 | } while(0) | ||
50 | |||
51 | /* interrupt control */ | ||
52 | #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") | ||
53 | #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) | ||
54 | #define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) | ||
55 | |||
56 | #define local_irq_save(x) \ | ||
57 | __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" ) | ||
58 | #define local_irq_restore(x) \ | ||
59 | __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" ) | ||
60 | |||
61 | #define irqs_disabled() \ | ||
62 | ({ \ | ||
63 | unsigned long flags; \ | ||
64 | local_save_flags(flags); \ | ||
65 | (flags & PSW_I) == 0; \ | ||
66 | }) | ||
67 | |||
68 | #define mfctl(reg) ({ \ | ||
69 | unsigned long cr; \ | ||
70 | __asm__ __volatile__( \ | ||
71 | "mfctl " #reg ",%0" : \ | ||
72 | "=r" (cr) \ | ||
73 | ); \ | ||
74 | cr; \ | ||
75 | }) | ||
76 | |||
77 | #define mtctl(gr, cr) \ | ||
78 | __asm__ __volatile__("mtctl %0,%1" \ | ||
79 | : /* no outputs */ \ | ||
80 | : "r" (gr), "i" (cr) : "memory") | ||
81 | |||
82 | /* these are here to de-mystefy the calling code, and to provide hooks */ | ||
83 | /* which I needed for debugging EIEM problems -PB */ | ||
84 | #define get_eiem() mfctl(15) | ||
85 | static inline void set_eiem(unsigned long val) | ||
86 | { | ||
87 | mtctl(val, 15); | ||
88 | } | ||
89 | |||
90 | #define mfsp(reg) ({ \ | ||
91 | unsigned long cr; \ | ||
92 | __asm__ __volatile__( \ | ||
93 | "mfsp " #reg ",%0" : \ | ||
94 | "=r" (cr) \ | ||
95 | ); \ | ||
96 | cr; \ | ||
97 | }) | ||
98 | |||
99 | #define mtsp(gr, cr) \ | ||
100 | __asm__ __volatile__("mtsp %0,%1" \ | ||
101 | : /* no outputs */ \ | ||
102 | : "r" (gr), "i" (cr) : "memory") | ||
103 | |||
104 | |||
105 | /* | ||
106 | ** This is simply the barrier() macro from linux/kernel.h but when serial.c | ||
107 | ** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h | ||
108 | ** hasn't yet been included yet so it fails, thus repeating the macro here. | ||
109 | ** | ||
110 | ** PA-RISC architecture allows for weakly ordered memory accesses although | ||
111 | ** none of the processors use it. There is a strong ordered bit that is | ||
112 | ** set in the O-bit of the page directory entry. Operating systems that | ||
113 | ** can not tolerate out of order accesses should set this bit when mapping | ||
114 | ** pages. The O-bit of the PSW should also be set to 1 (I don't believe any | ||
115 | ** of the processor implemented the PSW O-bit). The PCX-W ERS states that | ||
116 | ** the TLB O-bit is not implemented so the page directory does not need to | ||
117 | ** have the O-bit set when mapping pages (section 3.1). This section also | ||
118 | ** states that the PSW Y, Z, G, and O bits are not implemented. | ||
119 | ** So it looks like nothing needs to be done for parisc-linux (yet). | ||
120 | ** (thanks to chada for the above comment -ggg) | ||
121 | ** | ||
122 | ** The __asm__ op below simple prevents gcc/ld from reordering | ||
123 | ** instructions across the mb() "call". | ||
124 | */ | ||
125 | #define mb() __asm__ __volatile__("":::"memory") /* barrier() */ | ||
126 | #define rmb() mb() | ||
127 | #define wmb() mb() | ||
128 | #define smp_mb() mb() | ||
129 | #define smp_rmb() mb() | ||
130 | #define smp_wmb() mb() | ||
131 | #define smp_read_barrier_depends() do { } while(0) | ||
132 | #define read_barrier_depends() do { } while(0) | ||
133 | |||
134 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
135 | |||
136 | #ifndef CONFIG_PA20 | ||
137 | /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, | ||
138 | and GCC only guarantees 8-byte alignment for stack locals, we can't | ||
139 | be assured of 16-byte alignment for atomic lock data even if we | ||
140 | specify "__attribute ((aligned(16)))" in the type declaration. So, | ||
141 | we use a struct containing an array of four ints for the atomic lock | ||
142 | type and dynamically select the 16-byte aligned int from the array | ||
143 | for the semaphore. */ | ||
144 | |||
145 | #define __PA_LDCW_ALIGNMENT 16 | ||
146 | #define __ldcw_align(a) ({ \ | ||
147 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ | ||
148 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ | ||
149 | & ~(__PA_LDCW_ALIGNMENT - 1); \ | ||
150 | (volatile unsigned int *) __ret; \ | ||
151 | }) | ||
152 | #define __LDCW "ldcw" | ||
153 | |||
154 | #else /*CONFIG_PA20*/ | ||
155 | /* From: "Jim Hull" <jim.hull of hp.com> | ||
156 | I've attached a summary of the change, but basically, for PA 2.0, as | ||
157 | long as the ",CO" (coherent operation) completer is specified, then the | ||
158 | 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead | ||
159 | they only require "natural" alignment (4-byte for ldcw, 8-byte for | ||
160 | ldcd). */ | ||
161 | |||
162 | #define __PA_LDCW_ALIGNMENT 4 | ||
163 | #define __ldcw_align(a) ((volatile unsigned int *)a) | ||
164 | #define __LDCW "ldcw,co" | ||
165 | |||
166 | #endif /*!CONFIG_PA20*/ | ||
167 | |||
168 | /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ | ||
169 | #define __ldcw(a) ({ \ | ||
170 | unsigned __ret; \ | ||
171 | __asm__ __volatile__(__LDCW " 0(%1),%0" \ | ||
172 | : "=r" (__ret) : "r" (a)); \ | ||
173 | __ret; \ | ||
174 | }) | ||
175 | |||
176 | #ifdef CONFIG_SMP | ||
177 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | ||
178 | #endif | ||
179 | |||
180 | #define arch_align_stack(x) (x) | ||
181 | |||
182 | #endif | ||
diff --git a/arch/parisc/include/asm/termbits.h b/arch/parisc/include/asm/termbits.h new file mode 100644 index 000000000000..d8bbc73b16b7 --- /dev/null +++ b/arch/parisc/include/asm/termbits.h | |||
@@ -0,0 +1,200 @@ | |||
1 | #ifndef __ARCH_PARISC_TERMBITS_H__ | ||
2 | #define __ARCH_PARISC_TERMBITS_H__ | ||
3 | |||
4 | #include <linux/posix_types.h> | ||
5 | |||
6 | typedef unsigned char cc_t; | ||
7 | typedef unsigned int speed_t; | ||
8 | typedef unsigned int tcflag_t; | ||
9 | |||
10 | #define NCCS 19 | ||
11 | struct termios { | ||
12 | tcflag_t c_iflag; /* input mode flags */ | ||
13 | tcflag_t c_oflag; /* output mode flags */ | ||
14 | tcflag_t c_cflag; /* control mode flags */ | ||
15 | tcflag_t c_lflag; /* local mode flags */ | ||
16 | cc_t c_line; /* line discipline */ | ||
17 | cc_t c_cc[NCCS]; /* control characters */ | ||
18 | }; | ||
19 | |||
20 | struct termios2 { | ||
21 | tcflag_t c_iflag; /* input mode flags */ | ||
22 | tcflag_t c_oflag; /* output mode flags */ | ||
23 | tcflag_t c_cflag; /* control mode flags */ | ||
24 | tcflag_t c_lflag; /* local mode flags */ | ||
25 | cc_t c_line; /* line discipline */ | ||
26 | cc_t c_cc[NCCS]; /* control characters */ | ||
27 | speed_t c_ispeed; /* input speed */ | ||
28 | speed_t c_ospeed; /* output speed */ | ||
29 | }; | ||
30 | |||
31 | struct ktermios { | ||
32 | tcflag_t c_iflag; /* input mode flags */ | ||
33 | tcflag_t c_oflag; /* output mode flags */ | ||
34 | tcflag_t c_cflag; /* control mode flags */ | ||
35 | tcflag_t c_lflag; /* local mode flags */ | ||
36 | cc_t c_line; /* line discipline */ | ||
37 | cc_t c_cc[NCCS]; /* control characters */ | ||
38 | speed_t c_ispeed; /* input speed */ | ||
39 | speed_t c_ospeed; /* output speed */ | ||
40 | }; | ||
41 | |||
42 | /* c_cc characters */ | ||
43 | #define VINTR 0 | ||
44 | #define VQUIT 1 | ||
45 | #define VERASE 2 | ||
46 | #define VKILL 3 | ||
47 | #define VEOF 4 | ||
48 | #define VTIME 5 | ||
49 | #define VMIN 6 | ||
50 | #define VSWTC 7 | ||
51 | #define VSTART 8 | ||
52 | #define VSTOP 9 | ||
53 | #define VSUSP 10 | ||
54 | #define VEOL 11 | ||
55 | #define VREPRINT 12 | ||
56 | #define VDISCARD 13 | ||
57 | #define VWERASE 14 | ||
58 | #define VLNEXT 15 | ||
59 | #define VEOL2 16 | ||
60 | |||
61 | |||
62 | /* c_iflag bits */ | ||
63 | #define IGNBRK 0000001 | ||
64 | #define BRKINT 0000002 | ||
65 | #define IGNPAR 0000004 | ||
66 | #define PARMRK 0000010 | ||
67 | #define INPCK 0000020 | ||
68 | #define ISTRIP 0000040 | ||
69 | #define INLCR 0000100 | ||
70 | #define IGNCR 0000200 | ||
71 | #define ICRNL 0000400 | ||
72 | #define IUCLC 0001000 | ||
73 | #define IXON 0002000 | ||
74 | #define IXANY 0004000 | ||
75 | #define IXOFF 0010000 | ||
76 | #define IMAXBEL 0040000 | ||
77 | #define IUTF8 0100000 | ||
78 | |||
79 | /* c_oflag bits */ | ||
80 | #define OPOST 0000001 | ||
81 | #define OLCUC 0000002 | ||
82 | #define ONLCR 0000004 | ||
83 | #define OCRNL 0000010 | ||
84 | #define ONOCR 0000020 | ||
85 | #define ONLRET 0000040 | ||
86 | #define OFILL 0000100 | ||
87 | #define OFDEL 0000200 | ||
88 | #define NLDLY 0000400 | ||
89 | #define NL0 0000000 | ||
90 | #define NL1 0000400 | ||
91 | #define CRDLY 0003000 | ||
92 | #define CR0 0000000 | ||
93 | #define CR1 0001000 | ||
94 | #define CR2 0002000 | ||
95 | #define CR3 0003000 | ||
96 | #define TABDLY 0014000 | ||
97 | #define TAB0 0000000 | ||
98 | #define TAB1 0004000 | ||
99 | #define TAB2 0010000 | ||
100 | #define TAB3 0014000 | ||
101 | #define XTABS 0014000 | ||
102 | #define BSDLY 0020000 | ||
103 | #define BS0 0000000 | ||
104 | #define BS1 0020000 | ||
105 | #define VTDLY 0040000 | ||
106 | #define VT0 0000000 | ||
107 | #define VT1 0040000 | ||
108 | #define FFDLY 0100000 | ||
109 | #define FF0 0000000 | ||
110 | #define FF1 0100000 | ||
111 | |||
112 | /* c_cflag bit meaning */ | ||
113 | #define CBAUD 0010017 | ||
114 | #define B0 0000000 /* hang up */ | ||
115 | #define B50 0000001 | ||
116 | #define B75 0000002 | ||
117 | #define B110 0000003 | ||
118 | #define B134 0000004 | ||
119 | #define B150 0000005 | ||
120 | #define B200 0000006 | ||
121 | #define B300 0000007 | ||
122 | #define B600 0000010 | ||
123 | #define B1200 0000011 | ||
124 | #define B1800 0000012 | ||
125 | #define B2400 0000013 | ||
126 | #define B4800 0000014 | ||
127 | #define B9600 0000015 | ||
128 | #define B19200 0000016 | ||
129 | #define B38400 0000017 | ||
130 | #define EXTA B19200 | ||
131 | #define EXTB B38400 | ||
132 | #define CSIZE 0000060 | ||
133 | #define CS5 0000000 | ||
134 | #define CS6 0000020 | ||
135 | #define CS7 0000040 | ||
136 | #define CS8 0000060 | ||
137 | #define CSTOPB 0000100 | ||
138 | #define CREAD 0000200 | ||
139 | #define PARENB 0000400 | ||
140 | #define PARODD 0001000 | ||
141 | #define HUPCL 0002000 | ||
142 | #define CLOCAL 0004000 | ||
143 | #define CBAUDEX 0010000 | ||
144 | #define BOTHER 0010000 | ||
145 | #define B57600 0010001 | ||
146 | #define B115200 0010002 | ||
147 | #define B230400 0010003 | ||
148 | #define B460800 0010004 | ||
149 | #define B500000 0010005 | ||
150 | #define B576000 0010006 | ||
151 | #define B921600 0010007 | ||
152 | #define B1000000 0010010 | ||
153 | #define B1152000 0010011 | ||
154 | #define B1500000 0010012 | ||
155 | #define B2000000 0010013 | ||
156 | #define B2500000 0010014 | ||
157 | #define B3000000 0010015 | ||
158 | #define B3500000 0010016 | ||
159 | #define B4000000 0010017 | ||
160 | #define CIBAUD 002003600000 /* input baud rate */ | ||
161 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | ||
162 | #define CRTSCTS 020000000000 /* flow control */ | ||
163 | |||
164 | #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ | ||
165 | |||
166 | |||
167 | /* c_lflag bits */ | ||
168 | #define ISIG 0000001 | ||
169 | #define ICANON 0000002 | ||
170 | #define XCASE 0000004 | ||
171 | #define ECHO 0000010 | ||
172 | #define ECHOE 0000020 | ||
173 | #define ECHOK 0000040 | ||
174 | #define ECHONL 0000100 | ||
175 | #define NOFLSH 0000200 | ||
176 | #define TOSTOP 0000400 | ||
177 | #define ECHOCTL 0001000 | ||
178 | #define ECHOPRT 0002000 | ||
179 | #define ECHOKE 0004000 | ||
180 | #define FLUSHO 0010000 | ||
181 | #define PENDIN 0040000 | ||
182 | #define IEXTEN 0100000 | ||
183 | |||
184 | /* tcflow() and TCXONC use these */ | ||
185 | #define TCOOFF 0 | ||
186 | #define TCOON 1 | ||
187 | #define TCIOFF 2 | ||
188 | #define TCION 3 | ||
189 | |||
190 | /* tcflush() and TCFLSH use these */ | ||
191 | #define TCIFLUSH 0 | ||
192 | #define TCOFLUSH 1 | ||
193 | #define TCIOFLUSH 2 | ||
194 | |||
195 | /* tcsetattr uses these */ | ||
196 | #define TCSANOW 0 | ||
197 | #define TCSADRAIN 1 | ||
198 | #define TCSAFLUSH 2 | ||
199 | |||
200 | #endif | ||
diff --git a/arch/parisc/include/asm/termios.h b/arch/parisc/include/asm/termios.h new file mode 100644 index 000000000000..a2a57a4548af --- /dev/null +++ b/arch/parisc/include/asm/termios.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef _PARISC_TERMIOS_H | ||
2 | #define _PARISC_TERMIOS_H | ||
3 | |||
4 | #include <asm/termbits.h> | ||
5 | #include <asm/ioctls.h> | ||
6 | |||
7 | struct winsize { | ||
8 | unsigned short ws_row; | ||
9 | unsigned short ws_col; | ||
10 | unsigned short ws_xpixel; | ||
11 | unsigned short ws_ypixel; | ||
12 | }; | ||
13 | |||
14 | #define NCC 8 | ||
15 | struct termio { | ||
16 | unsigned short c_iflag; /* input mode flags */ | ||
17 | unsigned short c_oflag; /* output mode flags */ | ||
18 | unsigned short c_cflag; /* control mode flags */ | ||
19 | unsigned short c_lflag; /* local mode flags */ | ||
20 | unsigned char c_line; /* line discipline */ | ||
21 | unsigned char c_cc[NCC]; /* control characters */ | ||
22 | }; | ||
23 | |||
24 | /* modem lines */ | ||
25 | #define TIOCM_LE 0x001 | ||
26 | #define TIOCM_DTR 0x002 | ||
27 | #define TIOCM_RTS 0x004 | ||
28 | #define TIOCM_ST 0x008 | ||
29 | #define TIOCM_SR 0x010 | ||
30 | #define TIOCM_CTS 0x020 | ||
31 | #define TIOCM_CAR 0x040 | ||
32 | #define TIOCM_RNG 0x080 | ||
33 | #define TIOCM_DSR 0x100 | ||
34 | #define TIOCM_CD TIOCM_CAR | ||
35 | #define TIOCM_RI TIOCM_RNG | ||
36 | #define TIOCM_OUT1 0x2000 | ||
37 | #define TIOCM_OUT2 0x4000 | ||
38 | #define TIOCM_LOOP 0x8000 | ||
39 | |||
40 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
41 | |||
42 | #ifdef __KERNEL__ | ||
43 | |||
44 | /* intr=^C quit=^\ erase=del kill=^U | ||
45 | eof=^D vtime=\0 vmin=\1 sxtc=\0 | ||
46 | start=^Q stop=^S susp=^Z eol=\0 | ||
47 | reprint=^R discard=^U werase=^W lnext=^V | ||
48 | eol2=\0 | ||
49 | */ | ||
50 | #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" | ||
51 | |||
52 | /* | ||
53 | * Translate a "termio" structure into a "termios". Ugh. | ||
54 | */ | ||
55 | #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ | ||
56 | unsigned short __tmp; \ | ||
57 | get_user(__tmp,&(termio)->x); \ | ||
58 | *(unsigned short *) &(termios)->x = __tmp; \ | ||
59 | } | ||
60 | |||
61 | #define user_termio_to_kernel_termios(termios, termio) \ | ||
62 | ({ \ | ||
63 | SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ | ||
64 | SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ | ||
65 | SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ | ||
66 | SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ | ||
67 | copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ | ||
68 | }) | ||
69 | |||
70 | /* | ||
71 | * Translate a "termios" structure into a "termio". Ugh. | ||
72 | */ | ||
73 | #define kernel_termios_to_user_termio(termio, termios) \ | ||
74 | ({ \ | ||
75 | put_user((termios)->c_iflag, &(termio)->c_iflag); \ | ||
76 | put_user((termios)->c_oflag, &(termio)->c_oflag); \ | ||
77 | put_user((termios)->c_cflag, &(termio)->c_cflag); \ | ||
78 | put_user((termios)->c_lflag, &(termio)->c_lflag); \ | ||
79 | put_user((termios)->c_line, &(termio)->c_line); \ | ||
80 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ | ||
81 | }) | ||
82 | |||
83 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) | ||
84 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) | ||
85 | #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) | ||
86 | #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) | ||
87 | |||
88 | #endif /* __KERNEL__ */ | ||
89 | |||
90 | #endif /* _PARISC_TERMIOS_H */ | ||
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h new file mode 100644 index 000000000000..0407959da489 --- /dev/null +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -0,0 +1,76 @@ | |||
1 | #ifndef _ASM_PARISC_THREAD_INFO_H | ||
2 | #define _ASM_PARISC_THREAD_INFO_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | #include <asm/processor.h> | ||
8 | |||
9 | struct thread_info { | ||
10 | struct task_struct *task; /* main task structure */ | ||
11 | struct exec_domain *exec_domain;/* execution domain */ | ||
12 | unsigned long flags; /* thread_info flags (see TIF_*) */ | ||
13 | mm_segment_t addr_limit; /* user-level address space limit */ | ||
14 | __u32 cpu; /* current CPU */ | ||
15 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ | ||
16 | struct restart_block restart_block; | ||
17 | }; | ||
18 | |||
19 | #define INIT_THREAD_INFO(tsk) \ | ||
20 | { \ | ||
21 | .task = &tsk, \ | ||
22 | .exec_domain = &default_exec_domain, \ | ||
23 | .flags = 0, \ | ||
24 | .cpu = 0, \ | ||
25 | .addr_limit = KERNEL_DS, \ | ||
26 | .preempt_count = 1, \ | ||
27 | .restart_block = { \ | ||
28 | .fn = do_no_restart_syscall \ | ||
29 | } \ | ||
30 | } | ||
31 | |||
32 | #define init_thread_info (init_thread_union.thread_info) | ||
33 | #define init_stack (init_thread_union.stack) | ||
34 | |||
35 | /* thread information allocation */ | ||
36 | |||
37 | #define THREAD_SIZE_ORDER 2 | ||
38 | /* Be sure to hunt all references to this down when you change the size of | ||
39 | * the kernel stack */ | ||
40 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
41 | #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) | ||
42 | |||
43 | /* how to get the thread information struct from C */ | ||
44 | #define current_thread_info() ((struct thread_info *)mfctl(30)) | ||
45 | |||
46 | #endif /* !__ASSEMBLY */ | ||
47 | |||
48 | #define PREEMPT_ACTIVE_BIT 28 | ||
49 | #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) | ||
50 | |||
51 | /* | ||
52 | * thread information flags | ||
53 | */ | ||
54 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
55 | #define TIF_SIGPENDING 1 /* signal pending */ | ||
56 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | ||
57 | #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | ||
58 | #define TIF_32BIT 4 /* 32 bit binary */ | ||
59 | #define TIF_MEMDIE 5 | ||
60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | ||
61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | ||
62 | |||
63 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | ||
64 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | ||
65 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | ||
66 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | ||
67 | #define _TIF_32BIT (1 << TIF_32BIT) | ||
68 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | ||
69 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
70 | |||
71 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \ | ||
72 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | ||
73 | |||
74 | #endif /* __KERNEL__ */ | ||
75 | |||
76 | #endif /* _ASM_PARISC_THREAD_INFO_H */ | ||
diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h new file mode 100644 index 000000000000..3b68d77273d9 --- /dev/null +++ b/arch/parisc/include/asm/timex.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * linux/include/asm-parisc/timex.h | ||
3 | * | ||
4 | * PARISC architecture timex specifications | ||
5 | */ | ||
6 | #ifndef _ASMPARISC_TIMEX_H | ||
7 | #define _ASMPARISC_TIMEX_H | ||
8 | |||
9 | #include <asm/system.h> | ||
10 | |||
11 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ | ||
12 | |||
13 | typedef unsigned long cycles_t; | ||
14 | |||
15 | static inline cycles_t get_cycles (void) | ||
16 | { | ||
17 | return mfctl(16); | ||
18 | } | ||
19 | |||
20 | #endif | ||
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h new file mode 100644 index 000000000000..383b1db310ee --- /dev/null +++ b/arch/parisc/include/asm/tlb.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef _PARISC_TLB_H | ||
2 | #define _PARISC_TLB_H | ||
3 | |||
4 | #define tlb_flush(tlb) \ | ||
5 | do { if ((tlb)->fullmm) \ | ||
6 | flush_tlb_mm((tlb)->mm);\ | ||
7 | } while (0) | ||
8 | |||
9 | #define tlb_start_vma(tlb, vma) \ | ||
10 | do { if (!(tlb)->fullmm) \ | ||
11 | flush_cache_range(vma, vma->vm_start, vma->vm_end); \ | ||
12 | } while (0) | ||
13 | |||
14 | #define tlb_end_vma(tlb, vma) \ | ||
15 | do { if (!(tlb)->fullmm) \ | ||
16 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ | ||
17 | } while (0) | ||
18 | |||
19 | #define __tlb_remove_tlb_entry(tlb, pte, address) \ | ||
20 | do { } while (0) | ||
21 | |||
22 | #include <asm-generic/tlb.h> | ||
23 | |||
24 | #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) | ||
25 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) | ||
26 | |||
27 | #endif | ||
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h new file mode 100644 index 000000000000..b72ec66db699 --- /dev/null +++ b/arch/parisc/include/asm/tlbflush.h | |||
@@ -0,0 +1,80 @@ | |||
1 | #ifndef _PARISC_TLBFLUSH_H | ||
2 | #define _PARISC_TLBFLUSH_H | ||
3 | |||
4 | /* TLB flushing routines.... */ | ||
5 | |||
6 | #include <linux/mm.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <asm/mmu_context.h> | ||
9 | |||
10 | |||
11 | /* This is for the serialisation of PxTLB broadcasts. At least on the | ||
12 | * N class systems, only one PxTLB inter processor broadcast can be | ||
13 | * active at any one time on the Merced bus. This tlb purge | ||
14 | * synchronisation is fairly lightweight and harmless so we activate | ||
15 | * it on all SMP systems not just the N class. We also need to have | ||
16 | * preemption disabled on uniprocessor machines, and spin_lock does that | ||
17 | * nicely. | ||
18 | */ | ||
19 | extern spinlock_t pa_tlb_lock; | ||
20 | |||
21 | #define purge_tlb_start(x) spin_lock(&pa_tlb_lock) | ||
22 | #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) | ||
23 | |||
24 | extern void flush_tlb_all(void); | ||
25 | extern void flush_tlb_all_local(void *); | ||
26 | |||
27 | /* | ||
28 | * flush_tlb_mm() | ||
29 | * | ||
30 | * XXX This code is NOT valid for HP-UX compatibility processes, | ||
31 | * (although it will probably work 99% of the time). HP-UX | ||
32 | * processes are free to play with the space id's and save them | ||
33 | * over long periods of time, etc. so we have to preserve the | ||
34 | * space and just flush the entire tlb. We need to check the | ||
35 | * personality in order to do that, but the personality is not | ||
36 | * currently being set correctly. | ||
37 | * | ||
38 | * Of course, Linux processes could do the same thing, but | ||
39 | * we don't support that (and the compilers, dynamic linker, | ||
40 | * etc. do not do that). | ||
41 | */ | ||
42 | |||
43 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
44 | { | ||
45 | BUG_ON(mm == &init_mm); /* Should never happen */ | ||
46 | |||
47 | #ifdef CONFIG_SMP | ||
48 | flush_tlb_all(); | ||
49 | #else | ||
50 | if (mm) { | ||
51 | if (mm->context != 0) | ||
52 | free_sid(mm->context); | ||
53 | mm->context = alloc_sid(); | ||
54 | if (mm == current->active_mm) | ||
55 | load_context(mm->context); | ||
56 | } | ||
57 | #endif | ||
58 | } | ||
59 | |||
60 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
61 | unsigned long addr) | ||
62 | { | ||
63 | /* For one page, it's not worth testing the split_tlb variable */ | ||
64 | |||
65 | mb(); | ||
66 | mtsp(vma->vm_mm->context,1); | ||
67 | purge_tlb_start(); | ||
68 | pdtlb(addr); | ||
69 | pitlb(addr); | ||
70 | purge_tlb_end(); | ||
71 | } | ||
72 | |||
73 | void __flush_tlb_range(unsigned long sid, | ||
74 | unsigned long start, unsigned long end); | ||
75 | |||
76 | #define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end) | ||
77 | |||
78 | #define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end) | ||
79 | |||
80 | #endif | ||
diff --git a/arch/parisc/include/asm/topology.h b/arch/parisc/include/asm/topology.h new file mode 100644 index 000000000000..d8133eb0b1e7 --- /dev/null +++ b/arch/parisc/include/asm/topology.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_PARISC_TOPOLOGY_H | ||
2 | #define _ASM_PARISC_TOPOLOGY_H | ||
3 | |||
4 | #include <asm-generic/topology.h> | ||
5 | |||
6 | #endif /* _ASM_PARISC_TOPOLOGY_H */ | ||
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h new file mode 100644 index 000000000000..1945f995f2df --- /dev/null +++ b/arch/parisc/include/asm/traps.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef __ASM_TRAPS_H | ||
2 | #define __ASM_TRAPS_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | struct pt_regs; | ||
6 | |||
7 | /* traps.c */ | ||
8 | void parisc_terminate(char *msg, struct pt_regs *regs, | ||
9 | int code, unsigned long offset); | ||
10 | |||
11 | /* mm/fault.c */ | ||
12 | void do_page_fault(struct pt_regs *regs, unsigned long code, | ||
13 | unsigned long address); | ||
14 | #endif | ||
15 | |||
16 | #endif | ||
diff --git a/arch/parisc/include/asm/types.h b/arch/parisc/include/asm/types.h new file mode 100644 index 000000000000..7f5a39bfb4ce --- /dev/null +++ b/arch/parisc/include/asm/types.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _PARISC_TYPES_H | ||
2 | #define _PARISC_TYPES_H | ||
3 | |||
4 | #include <asm-generic/int-ll64.h> | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | |||
8 | typedef unsigned short umode_t; | ||
9 | |||
10 | #endif /* __ASSEMBLY__ */ | ||
11 | |||
12 | /* | ||
13 | * These aren't exported outside the kernel to avoid name space clashes | ||
14 | */ | ||
15 | #ifdef __KERNEL__ | ||
16 | |||
17 | #ifdef CONFIG_64BIT | ||
18 | #define BITS_PER_LONG 64 | ||
19 | #define SHIFT_PER_LONG 6 | ||
20 | #else | ||
21 | #define BITS_PER_LONG 32 | ||
22 | #define SHIFT_PER_LONG 5 | ||
23 | #endif | ||
24 | |||
25 | #ifndef __ASSEMBLY__ | ||
26 | |||
27 | /* Dma addresses are 32-bits wide. */ | ||
28 | |||
29 | typedef u32 dma_addr_t; | ||
30 | typedef u64 dma64_addr_t; | ||
31 | |||
32 | #endif /* __ASSEMBLY__ */ | ||
33 | |||
34 | #endif /* __KERNEL__ */ | ||
35 | |||
36 | #endif | ||
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h new file mode 100644 index 000000000000..4878b9501f24 --- /dev/null +++ b/arch/parisc/include/asm/uaccess.h | |||
@@ -0,0 +1,244 @@ | |||
1 | #ifndef __PARISC_UACCESS_H | ||
2 | #define __PARISC_UACCESS_H | ||
3 | |||
4 | /* | ||
5 | * User space memory access functions | ||
6 | */ | ||
7 | #include <asm/page.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/cache.h> | ||
10 | #include <asm-generic/uaccess.h> | ||
11 | |||
12 | #define VERIFY_READ 0 | ||
13 | #define VERIFY_WRITE 1 | ||
14 | |||
15 | #define KERNEL_DS ((mm_segment_t){0}) | ||
16 | #define USER_DS ((mm_segment_t){1}) | ||
17 | |||
18 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
19 | |||
20 | #define get_ds() (KERNEL_DS) | ||
21 | #define get_fs() (current_thread_info()->addr_limit) | ||
22 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
23 | |||
24 | /* | ||
25 | * Note that since kernel addresses are in a separate address space on | ||
26 | * parisc, we don't need to do anything for access_ok(). | ||
27 | * We just let the page fault handler do the right thing. This also means | ||
28 | * that put_user is the same as __put_user, etc. | ||
29 | */ | ||
30 | |||
31 | extern int __get_kernel_bad(void); | ||
32 | extern int __get_user_bad(void); | ||
33 | extern int __put_kernel_bad(void); | ||
34 | extern int __put_user_bad(void); | ||
35 | |||
36 | static inline long access_ok(int type, const void __user * addr, | ||
37 | unsigned long size) | ||
38 | { | ||
39 | return 1; | ||
40 | } | ||
41 | |||
42 | #define put_user __put_user | ||
43 | #define get_user __get_user | ||
44 | |||
45 | #if !defined(CONFIG_64BIT) | ||
46 | #define LDD_KERNEL(ptr) __get_kernel_bad(); | ||
47 | #define LDD_USER(ptr) __get_user_bad(); | ||
48 | #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) | ||
49 | #define STD_USER(x, ptr) __put_user_asm64(x,ptr) | ||
50 | #define ASM_WORD_INSN ".word\t" | ||
51 | #else | ||
52 | #define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr) | ||
53 | #define LDD_USER(ptr) __get_user_asm("ldd",ptr) | ||
54 | #define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr) | ||
55 | #define STD_USER(x, ptr) __put_user_asm("std",x,ptr) | ||
56 | #define ASM_WORD_INSN ".dword\t" | ||
57 | #endif | ||
58 | |||
59 | /* | ||
60 | * The exception table contains two values: the first is an address | ||
61 | * for an instruction that is allowed to fault, and the second is | ||
62 | * the address to the fixup routine. | ||
63 | */ | ||
64 | |||
65 | struct exception_table_entry { | ||
66 | unsigned long insn; /* address of insn that is allowed to fault. */ | ||
67 | long fixup; /* fixup routine */ | ||
68 | }; | ||
69 | |||
70 | #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ | ||
71 | ".section __ex_table,\"aw\"\n" \ | ||
72 | ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \ | ||
73 | ".previous\n" | ||
74 | |||
75 | /* | ||
76 | * The page fault handler stores, in a per-cpu area, the following information | ||
77 | * if a fixup routine is available. | ||
78 | */ | ||
79 | struct exception_data { | ||
80 | unsigned long fault_ip; | ||
81 | unsigned long fault_space; | ||
82 | unsigned long fault_addr; | ||
83 | }; | ||
84 | |||
85 | #define __get_user(x,ptr) \ | ||
86 | ({ \ | ||
87 | register long __gu_err __asm__ ("r8") = 0; \ | ||
88 | register long __gu_val __asm__ ("r9") = 0; \ | ||
89 | \ | ||
90 | if (segment_eq(get_fs(),KERNEL_DS)) { \ | ||
91 | switch (sizeof(*(ptr))) { \ | ||
92 | case 1: __get_kernel_asm("ldb",ptr); break; \ | ||
93 | case 2: __get_kernel_asm("ldh",ptr); break; \ | ||
94 | case 4: __get_kernel_asm("ldw",ptr); break; \ | ||
95 | case 8: LDD_KERNEL(ptr); break; \ | ||
96 | default: __get_kernel_bad(); break; \ | ||
97 | } \ | ||
98 | } \ | ||
99 | else { \ | ||
100 | switch (sizeof(*(ptr))) { \ | ||
101 | case 1: __get_user_asm("ldb",ptr); break; \ | ||
102 | case 2: __get_user_asm("ldh",ptr); break; \ | ||
103 | case 4: __get_user_asm("ldw",ptr); break; \ | ||
104 | case 8: LDD_USER(ptr); break; \ | ||
105 | default: __get_user_bad(); break; \ | ||
106 | } \ | ||
107 | } \ | ||
108 | \ | ||
109 | (x) = (__typeof__(*(ptr))) __gu_val; \ | ||
110 | __gu_err; \ | ||
111 | }) | ||
112 | |||
113 | #define __get_kernel_asm(ldx,ptr) \ | ||
114 | __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \ | ||
115 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ | ||
116 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
117 | : "r"(ptr), "1"(__gu_err) \ | ||
118 | : "r1"); | ||
119 | |||
120 | #define __get_user_asm(ldx,ptr) \ | ||
121 | __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \ | ||
122 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\ | ||
123 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
124 | : "r"(ptr), "1"(__gu_err) \ | ||
125 | : "r1"); | ||
126 | |||
127 | #define __put_user(x,ptr) \ | ||
128 | ({ \ | ||
129 | register long __pu_err __asm__ ("r8") = 0; \ | ||
130 | __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ | ||
131 | \ | ||
132 | if (segment_eq(get_fs(),KERNEL_DS)) { \ | ||
133 | switch (sizeof(*(ptr))) { \ | ||
134 | case 1: __put_kernel_asm("stb",__x,ptr); break; \ | ||
135 | case 2: __put_kernel_asm("sth",__x,ptr); break; \ | ||
136 | case 4: __put_kernel_asm("stw",__x,ptr); break; \ | ||
137 | case 8: STD_KERNEL(__x,ptr); break; \ | ||
138 | default: __put_kernel_bad(); break; \ | ||
139 | } \ | ||
140 | } \ | ||
141 | else { \ | ||
142 | switch (sizeof(*(ptr))) { \ | ||
143 | case 1: __put_user_asm("stb",__x,ptr); break; \ | ||
144 | case 2: __put_user_asm("sth",__x,ptr); break; \ | ||
145 | case 4: __put_user_asm("stw",__x,ptr); break; \ | ||
146 | case 8: STD_USER(__x,ptr); break; \ | ||
147 | default: __put_user_bad(); break; \ | ||
148 | } \ | ||
149 | } \ | ||
150 | \ | ||
151 | __pu_err; \ | ||
152 | }) | ||
153 | |||
154 | /* | ||
155 | * The "__put_user/kernel_asm()" macros tell gcc they read from memory | ||
156 | * instead of writing. This is because they do not write to any memory | ||
157 | * gcc knows about, so there are no aliasing issues. These macros must | ||
158 | * also be aware that "fixup_put_user_skip_[12]" are executed in the | ||
159 | * context of the fault, and any registers used there must be listed | ||
160 | * as clobbers. In this case only "r1" is used by the current routines. | ||
161 | * r8/r9 are already listed as err/val. | ||
162 | */ | ||
163 | |||
164 | #define __put_kernel_asm(stx,x,ptr) \ | ||
165 | __asm__ __volatile__ ( \ | ||
166 | "\n1:\t" stx "\t%2,0(%1)\n\t" \ | ||
167 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ | ||
168 | : "=r"(__pu_err) \ | ||
169 | : "r"(ptr), "r"(x), "0"(__pu_err) \ | ||
170 | : "r1") | ||
171 | |||
172 | #define __put_user_asm(stx,x,ptr) \ | ||
173 | __asm__ __volatile__ ( \ | ||
174 | "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \ | ||
175 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ | ||
176 | : "=r"(__pu_err) \ | ||
177 | : "r"(ptr), "r"(x), "0"(__pu_err) \ | ||
178 | : "r1") | ||
179 | |||
180 | |||
181 | #if !defined(CONFIG_64BIT) | ||
182 | |||
183 | #define __put_kernel_asm64(__val,ptr) do { \ | ||
184 | u64 __val64 = (u64)(__val); \ | ||
185 | u32 hi = (__val64) >> 32; \ | ||
186 | u32 lo = (__val64) & 0xffffffff; \ | ||
187 | __asm__ __volatile__ ( \ | ||
188 | "\n1:\tstw %2,0(%1)" \ | ||
189 | "\n2:\tstw %3,4(%1)\n\t" \ | ||
190 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ | ||
191 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ | ||
192 | : "=r"(__pu_err) \ | ||
193 | : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ | ||
194 | : "r1"); \ | ||
195 | } while (0) | ||
196 | |||
197 | #define __put_user_asm64(__val,ptr) do { \ | ||
198 | u64 __val64 = (u64)(__val); \ | ||
199 | u32 hi = (__val64) >> 32; \ | ||
200 | u32 lo = (__val64) & 0xffffffff; \ | ||
201 | __asm__ __volatile__ ( \ | ||
202 | "\n1:\tstw %2,0(%%sr3,%1)" \ | ||
203 | "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ | ||
204 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ | ||
205 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ | ||
206 | : "=r"(__pu_err) \ | ||
207 | : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ | ||
208 | : "r1"); \ | ||
209 | } while (0) | ||
210 | |||
211 | #endif /* !defined(CONFIG_64BIT) */ | ||
212 | |||
213 | |||
214 | /* | ||
215 | * Complex access routines -- external declarations | ||
216 | */ | ||
217 | |||
218 | extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); | ||
219 | extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); | ||
220 | extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); | ||
221 | extern long lstrncpy_from_user(char *, const char __user *, long); | ||
222 | extern unsigned lclear_user(void __user *,unsigned long); | ||
223 | extern long lstrnlen_user(const char __user *,long); | ||
224 | |||
225 | /* | ||
226 | * Complex access routines -- macros | ||
227 | */ | ||
228 | |||
229 | #define strncpy_from_user lstrncpy_from_user | ||
230 | #define strnlen_user lstrnlen_user | ||
231 | #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) | ||
232 | #define clear_user lclear_user | ||
233 | #define __clear_user lclear_user | ||
234 | |||
235 | unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); | ||
236 | #define __copy_to_user copy_to_user | ||
237 | unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len); | ||
238 | #define __copy_from_user copy_from_user | ||
239 | unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); | ||
240 | #define __copy_in_user copy_in_user | ||
241 | #define __copy_to_user_inatomic __copy_to_user | ||
242 | #define __copy_from_user_inatomic __copy_from_user | ||
243 | |||
244 | #endif /* __PARISC_UACCESS_H */ | ||
diff --git a/arch/parisc/include/asm/ucontext.h b/arch/parisc/include/asm/ucontext.h new file mode 100644 index 000000000000..6c8883e4b0bd --- /dev/null +++ b/arch/parisc/include/asm/ucontext.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_PARISC_UCONTEXT_H | ||
2 | #define _ASM_PARISC_UCONTEXT_H | ||
3 | |||
4 | struct ucontext { | ||
5 | unsigned int uc_flags; | ||
6 | struct ucontext *uc_link; | ||
7 | stack_t uc_stack; | ||
8 | struct sigcontext uc_mcontext; | ||
9 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
10 | }; | ||
11 | |||
12 | #endif /* !_ASM_PARISC_UCONTEXT_H */ | ||
diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h new file mode 100644 index 000000000000..dfc5d3321a54 --- /dev/null +++ b/arch/parisc/include/asm/unaligned.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_PARISC_UNALIGNED_H | ||
2 | #define _ASM_PARISC_UNALIGNED_H | ||
3 | |||
4 | #include <linux/unaligned/be_struct.h> | ||
5 | #include <linux/unaligned/le_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
7 | #define get_unaligned __get_unaligned_be | ||
8 | #define put_unaligned __put_unaligned_be | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | struct pt_regs; | ||
12 | void handle_unaligned(struct pt_regs *regs); | ||
13 | int check_unaligned(struct pt_regs *regs); | ||
14 | #endif | ||
15 | |||
16 | #endif /* _ASM_PARISC_UNALIGNED_H */ | ||
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h new file mode 100644 index 000000000000..ef26b009dc5d --- /dev/null +++ b/arch/parisc/include/asm/unistd.h | |||
@@ -0,0 +1,997 @@ | |||
1 | #ifndef _ASM_PARISC_UNISTD_H_ | ||
2 | #define _ASM_PARISC_UNISTD_H_ | ||
3 | |||
4 | /* | ||
5 | * This file contains the system call numbers. | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * HP-UX system calls get their native numbers for binary compatibility. | ||
10 | */ | ||
11 | |||
12 | #define __NR_HPUX_exit 1 | ||
13 | #define __NR_HPUX_fork 2 | ||
14 | #define __NR_HPUX_read 3 | ||
15 | #define __NR_HPUX_write 4 | ||
16 | #define __NR_HPUX_open 5 | ||
17 | #define __NR_HPUX_close 6 | ||
18 | #define __NR_HPUX_wait 7 | ||
19 | #define __NR_HPUX_creat 8 | ||
20 | #define __NR_HPUX_link 9 | ||
21 | #define __NR_HPUX_unlink 10 | ||
22 | #define __NR_HPUX_execv 11 | ||
23 | #define __NR_HPUX_chdir 12 | ||
24 | #define __NR_HPUX_time 13 | ||
25 | #define __NR_HPUX_mknod 14 | ||
26 | #define __NR_HPUX_chmod 15 | ||
27 | #define __NR_HPUX_chown 16 | ||
28 | #define __NR_HPUX_break 17 | ||
29 | #define __NR_HPUX_lchmod 18 | ||
30 | #define __NR_HPUX_lseek 19 | ||
31 | #define __NR_HPUX_getpid 20 | ||
32 | #define __NR_HPUX_mount 21 | ||
33 | #define __NR_HPUX_umount 22 | ||
34 | #define __NR_HPUX_setuid 23 | ||
35 | #define __NR_HPUX_getuid 24 | ||
36 | #define __NR_HPUX_stime 25 | ||
37 | #define __NR_HPUX_ptrace 26 | ||
38 | #define __NR_HPUX_alarm 27 | ||
39 | #define __NR_HPUX_oldfstat 28 | ||
40 | #define __NR_HPUX_pause 29 | ||
41 | #define __NR_HPUX_utime 30 | ||
42 | #define __NR_HPUX_stty 31 | ||
43 | #define __NR_HPUX_gtty 32 | ||
44 | #define __NR_HPUX_access 33 | ||
45 | #define __NR_HPUX_nice 34 | ||
46 | #define __NR_HPUX_ftime 35 | ||
47 | #define __NR_HPUX_sync 36 | ||
48 | #define __NR_HPUX_kill 37 | ||
49 | #define __NR_HPUX_stat 38 | ||
50 | #define __NR_HPUX_setpgrp3 39 | ||
51 | #define __NR_HPUX_lstat 40 | ||
52 | #define __NR_HPUX_dup 41 | ||
53 | #define __NR_HPUX_pipe 42 | ||
54 | #define __NR_HPUX_times 43 | ||
55 | #define __NR_HPUX_profil 44 | ||
56 | #define __NR_HPUX_ki_call 45 | ||
57 | #define __NR_HPUX_setgid 46 | ||
58 | #define __NR_HPUX_getgid 47 | ||
59 | #define __NR_HPUX_sigsys 48 | ||
60 | #define __NR_HPUX_reserved1 49 | ||
61 | #define __NR_HPUX_reserved2 50 | ||
62 | #define __NR_HPUX_acct 51 | ||
63 | #define __NR_HPUX_set_userthreadid 52 | ||
64 | #define __NR_HPUX_oldlock 53 | ||
65 | #define __NR_HPUX_ioctl 54 | ||
66 | #define __NR_HPUX_reboot 55 | ||
67 | #define __NR_HPUX_symlink 56 | ||
68 | #define __NR_HPUX_utssys 57 | ||
69 | #define __NR_HPUX_readlink 58 | ||
70 | #define __NR_HPUX_execve 59 | ||
71 | #define __NR_HPUX_umask 60 | ||
72 | #define __NR_HPUX_chroot 61 | ||
73 | #define __NR_HPUX_fcntl 62 | ||
74 | #define __NR_HPUX_ulimit 63 | ||
75 | #define __NR_HPUX_getpagesize 64 | ||
76 | #define __NR_HPUX_mremap 65 | ||
77 | #define __NR_HPUX_vfork 66 | ||
78 | #define __NR_HPUX_vread 67 | ||
79 | #define __NR_HPUX_vwrite 68 | ||
80 | #define __NR_HPUX_sbrk 69 | ||
81 | #define __NR_HPUX_sstk 70 | ||
82 | #define __NR_HPUX_mmap 71 | ||
83 | #define __NR_HPUX_vadvise 72 | ||
84 | #define __NR_HPUX_munmap 73 | ||
85 | #define __NR_HPUX_mprotect 74 | ||
86 | #define __NR_HPUX_madvise 75 | ||
87 | #define __NR_HPUX_vhangup 76 | ||
88 | #define __NR_HPUX_swapoff 77 | ||
89 | #define __NR_HPUX_mincore 78 | ||
90 | #define __NR_HPUX_getgroups 79 | ||
91 | #define __NR_HPUX_setgroups 80 | ||
92 | #define __NR_HPUX_getpgrp2 81 | ||
93 | #define __NR_HPUX_setpgrp2 82 | ||
94 | #define __NR_HPUX_setitimer 83 | ||
95 | #define __NR_HPUX_wait3 84 | ||
96 | #define __NR_HPUX_swapon 85 | ||
97 | #define __NR_HPUX_getitimer 86 | ||
98 | #define __NR_HPUX_gethostname42 87 | ||
99 | #define __NR_HPUX_sethostname42 88 | ||
100 | #define __NR_HPUX_getdtablesize 89 | ||
101 | #define __NR_HPUX_dup2 90 | ||
102 | #define __NR_HPUX_getdopt 91 | ||
103 | #define __NR_HPUX_fstat 92 | ||
104 | #define __NR_HPUX_select 93 | ||
105 | #define __NR_HPUX_setdopt 94 | ||
106 | #define __NR_HPUX_fsync 95 | ||
107 | #define __NR_HPUX_setpriority 96 | ||
108 | #define __NR_HPUX_socket_old 97 | ||
109 | #define __NR_HPUX_connect_old 98 | ||
110 | #define __NR_HPUX_accept_old 99 | ||
111 | #define __NR_HPUX_getpriority 100 | ||
112 | #define __NR_HPUX_send_old 101 | ||
113 | #define __NR_HPUX_recv_old 102 | ||
114 | #define __NR_HPUX_socketaddr_old 103 | ||
115 | #define __NR_HPUX_bind_old 104 | ||
116 | #define __NR_HPUX_setsockopt_old 105 | ||
117 | #define __NR_HPUX_listen_old 106 | ||
118 | #define __NR_HPUX_vtimes_old 107 | ||
119 | #define __NR_HPUX_sigvector 108 | ||
120 | #define __NR_HPUX_sigblock 109 | ||
121 | #define __NR_HPUX_siggetmask 110 | ||
122 | #define __NR_HPUX_sigpause 111 | ||
123 | #define __NR_HPUX_sigstack 112 | ||
124 | #define __NR_HPUX_recvmsg_old 113 | ||
125 | #define __NR_HPUX_sendmsg_old 114 | ||
126 | #define __NR_HPUX_vtrace_old 115 | ||
127 | #define __NR_HPUX_gettimeofday 116 | ||
128 | #define __NR_HPUX_getrusage 117 | ||
129 | #define __NR_HPUX_getsockopt_old 118 | ||
130 | #define __NR_HPUX_resuba_old 119 | ||
131 | #define __NR_HPUX_readv 120 | ||
132 | #define __NR_HPUX_writev 121 | ||
133 | #define __NR_HPUX_settimeofday 122 | ||
134 | #define __NR_HPUX_fchown 123 | ||
135 | #define __NR_HPUX_fchmod 124 | ||
136 | #define __NR_HPUX_recvfrom_old 125 | ||
137 | #define __NR_HPUX_setresuid 126 | ||
138 | #define __NR_HPUX_setresgid 127 | ||
139 | #define __NR_HPUX_rename 128 | ||
140 | #define __NR_HPUX_truncate 129 | ||
141 | #define __NR_HPUX_ftruncate 130 | ||
142 | #define __NR_HPUX_flock_old 131 | ||
143 | #define __NR_HPUX_sysconf 132 | ||
144 | #define __NR_HPUX_sendto_old 133 | ||
145 | #define __NR_HPUX_shutdown_old 134 | ||
146 | #define __NR_HPUX_socketpair_old 135 | ||
147 | #define __NR_HPUX_mkdir 136 | ||
148 | #define __NR_HPUX_rmdir 137 | ||
149 | #define __NR_HPUX_utimes_old 138 | ||
150 | #define __NR_HPUX_sigcleanup_old 139 | ||
151 | #define __NR_HPUX_setcore 140 | ||
152 | #define __NR_HPUX_getpeername_old 141 | ||
153 | #define __NR_HPUX_gethostid 142 | ||
154 | #define __NR_HPUX_sethostid 143 | ||
155 | #define __NR_HPUX_getrlimit 144 | ||
156 | #define __NR_HPUX_setrlimit 145 | ||
157 | #define __NR_HPUX_killpg_old 146 | ||
158 | #define __NR_HPUX_cachectl 147 | ||
159 | #define __NR_HPUX_quotactl 148 | ||
160 | #define __NR_HPUX_get_sysinfo 149 | ||
161 | #define __NR_HPUX_getsockname_old 150 | ||
162 | #define __NR_HPUX_privgrp 151 | ||
163 | #define __NR_HPUX_rtprio 152 | ||
164 | #define __NR_HPUX_plock 153 | ||
165 | #define __NR_HPUX_reserved3 154 | ||
166 | #define __NR_HPUX_lockf 155 | ||
167 | #define __NR_HPUX_semget 156 | ||
168 | #define __NR_HPUX_osemctl 157 | ||
169 | #define __NR_HPUX_semop 158 | ||
170 | #define __NR_HPUX_msgget 159 | ||
171 | #define __NR_HPUX_omsgctl 160 | ||
172 | #define __NR_HPUX_msgsnd 161 | ||
173 | #define __NR_HPUX_msgrecv 162 | ||
174 | #define __NR_HPUX_shmget 163 | ||
175 | #define __NR_HPUX_oshmctl 164 | ||
176 | #define __NR_HPUX_shmat 165 | ||
177 | #define __NR_HPUX_shmdt 166 | ||
178 | #define __NR_HPUX_m68020_advise 167 | ||
179 | /* [168,189] are for Discless/DUX */ | ||
180 | #define __NR_HPUX_csp 168 | ||
181 | #define __NR_HPUX_cluster 169 | ||
182 | #define __NR_HPUX_mkrnod 170 | ||
183 | #define __NR_HPUX_test 171 | ||
184 | #define __NR_HPUX_unsp_open 172 | ||
185 | #define __NR_HPUX_reserved4 173 | ||
186 | #define __NR_HPUX_getcontext_old 174 | ||
187 | #define __NR_HPUX_osetcontext 175 | ||
188 | #define __NR_HPUX_bigio 176 | ||
189 | #define __NR_HPUX_pipenode 177 | ||
190 | #define __NR_HPUX_lsync 178 | ||
191 | #define __NR_HPUX_getmachineid 179 | ||
192 | #define __NR_HPUX_cnodeid 180 | ||
193 | #define __NR_HPUX_cnodes 181 | ||
194 | #define __NR_HPUX_swapclients 182 | ||
195 | #define __NR_HPUX_rmt_process 183 | ||
196 | #define __NR_HPUX_dskless_stats 184 | ||
197 | #define __NR_HPUX_sigprocmask 185 | ||
198 | #define __NR_HPUX_sigpending 186 | ||
199 | #define __NR_HPUX_sigsuspend 187 | ||
200 | #define __NR_HPUX_sigaction 188 | ||
201 | #define __NR_HPUX_reserved5 189 | ||
202 | #define __NR_HPUX_nfssvc 190 | ||
203 | #define __NR_HPUX_getfh 191 | ||
204 | #define __NR_HPUX_getdomainname 192 | ||
205 | #define __NR_HPUX_setdomainname 193 | ||
206 | #define __NR_HPUX_async_daemon 194 | ||
207 | #define __NR_HPUX_getdirentries 195 | ||
208 | #define __NR_HPUX_statfs 196 | ||
209 | #define __NR_HPUX_fstatfs 197 | ||
210 | #define __NR_HPUX_vfsmount 198 | ||
211 | #define __NR_HPUX_reserved6 199 | ||
212 | #define __NR_HPUX_waitpid 200 | ||
213 | /* 201 - 223 missing */ | ||
214 | #define __NR_HPUX_sigsetreturn 224 | ||
215 | #define __NR_HPUX_sigsetstatemask 225 | ||
216 | /* 226 missing */ | ||
217 | #define __NR_HPUX_cs 227 | ||
218 | #define __NR_HPUX_cds 228 | ||
219 | #define __NR_HPUX_set_no_trunc 229 | ||
220 | #define __NR_HPUX_pathconf 230 | ||
221 | #define __NR_HPUX_fpathconf 231 | ||
222 | /* 232, 233 missing */ | ||
223 | #define __NR_HPUX_nfs_fcntl 234 | ||
224 | #define __NR_HPUX_ogetacl 235 | ||
225 | #define __NR_HPUX_ofgetacl 236 | ||
226 | #define __NR_HPUX_osetacl 237 | ||
227 | #define __NR_HPUX_ofsetacl 238 | ||
228 | #define __NR_HPUX_pstat 239 | ||
229 | #define __NR_HPUX_getaudid 240 | ||
230 | #define __NR_HPUX_setaudid 241 | ||
231 | #define __NR_HPUX_getaudproc 242 | ||
232 | #define __NR_HPUX_setaudproc 243 | ||
233 | #define __NR_HPUX_getevent 244 | ||
234 | #define __NR_HPUX_setevent 245 | ||
235 | #define __NR_HPUX_audwrite 246 | ||
236 | #define __NR_HPUX_audswitch 247 | ||
237 | #define __NR_HPUX_audctl 248 | ||
238 | #define __NR_HPUX_ogetaccess 249 | ||
239 | #define __NR_HPUX_fsctl 250 | ||
240 | /* 251 - 258 missing */ | ||
241 | #define __NR_HPUX_swapfs 259 | ||
242 | #define __NR_HPUX_fss 260 | ||
243 | /* 261 - 266 missing */ | ||
244 | #define __NR_HPUX_tsync 267 | ||
245 | #define __NR_HPUX_getnumfds 268 | ||
246 | #define __NR_HPUX_poll 269 | ||
247 | #define __NR_HPUX_getmsg 270 | ||
248 | #define __NR_HPUX_putmsg 271 | ||
249 | #define __NR_HPUX_fchdir 272 | ||
250 | #define __NR_HPUX_getmount_cnt 273 | ||
251 | #define __NR_HPUX_getmount_entry 274 | ||
252 | #define __NR_HPUX_accept 275 | ||
253 | #define __NR_HPUX_bind 276 | ||
254 | #define __NR_HPUX_connect 277 | ||
255 | #define __NR_HPUX_getpeername 278 | ||
256 | #define __NR_HPUX_getsockname 279 | ||
257 | #define __NR_HPUX_getsockopt 280 | ||
258 | #define __NR_HPUX_listen 281 | ||
259 | #define __NR_HPUX_recv 282 | ||
260 | #define __NR_HPUX_recvfrom 283 | ||
261 | #define __NR_HPUX_recvmsg 284 | ||
262 | #define __NR_HPUX_send 285 | ||
263 | #define __NR_HPUX_sendmsg 286 | ||
264 | #define __NR_HPUX_sendto 287 | ||
265 | #define __NR_HPUX_setsockopt 288 | ||
266 | #define __NR_HPUX_shutdown 289 | ||
267 | #define __NR_HPUX_socket 290 | ||
268 | #define __NR_HPUX_socketpair 291 | ||
269 | #define __NR_HPUX_proc_open 292 | ||
270 | #define __NR_HPUX_proc_close 293 | ||
271 | #define __NR_HPUX_proc_send 294 | ||
272 | #define __NR_HPUX_proc_recv 295 | ||
273 | #define __NR_HPUX_proc_sendrecv 296 | ||
274 | #define __NR_HPUX_proc_syscall 297 | ||
275 | /* 298 - 311 missing */ | ||
276 | #define __NR_HPUX_semctl 312 | ||
277 | #define __NR_HPUX_msgctl 313 | ||
278 | #define __NR_HPUX_shmctl 314 | ||
279 | #define __NR_HPUX_mpctl 315 | ||
280 | #define __NR_HPUX_exportfs 316 | ||
281 | #define __NR_HPUX_getpmsg 317 | ||
282 | #define __NR_HPUX_putpmsg 318 | ||
283 | /* 319 missing */ | ||
284 | #define __NR_HPUX_msync 320 | ||
285 | #define __NR_HPUX_msleep 321 | ||
286 | #define __NR_HPUX_mwakeup 322 | ||
287 | #define __NR_HPUX_msem_init 323 | ||
288 | #define __NR_HPUX_msem_remove 324 | ||
289 | #define __NR_HPUX_adjtime 325 | ||
290 | #define __NR_HPUX_kload 326 | ||
291 | #define __NR_HPUX_fattach 327 | ||
292 | #define __NR_HPUX_fdetach 328 | ||
293 | #define __NR_HPUX_serialize 329 | ||
294 | #define __NR_HPUX_statvfs 330 | ||
295 | #define __NR_HPUX_fstatvfs 331 | ||
296 | #define __NR_HPUX_lchown 332 | ||
297 | #define __NR_HPUX_getsid 333 | ||
298 | #define __NR_HPUX_sysfs 334 | ||
299 | /* 335, 336 missing */ | ||
300 | #define __NR_HPUX_sched_setparam 337 | ||
301 | #define __NR_HPUX_sched_getparam 338 | ||
302 | #define __NR_HPUX_sched_setscheduler 339 | ||
303 | #define __NR_HPUX_sched_getscheduler 340 | ||
304 | #define __NR_HPUX_sched_yield 341 | ||
305 | #define __NR_HPUX_sched_get_priority_max 342 | ||
306 | #define __NR_HPUX_sched_get_priority_min 343 | ||
307 | #define __NR_HPUX_sched_rr_get_interval 344 | ||
308 | #define __NR_HPUX_clock_settime 345 | ||
309 | #define __NR_HPUX_clock_gettime 346 | ||
310 | #define __NR_HPUX_clock_getres 347 | ||
311 | #define __NR_HPUX_timer_create 348 | ||
312 | #define __NR_HPUX_timer_delete 349 | ||
313 | #define __NR_HPUX_timer_settime 350 | ||
314 | #define __NR_HPUX_timer_gettime 351 | ||
315 | #define __NR_HPUX_timer_getoverrun 352 | ||
316 | #define __NR_HPUX_nanosleep 353 | ||
317 | #define __NR_HPUX_toolbox 354 | ||
318 | /* 355 missing */ | ||
319 | #define __NR_HPUX_getdents 356 | ||
320 | #define __NR_HPUX_getcontext 357 | ||
321 | #define __NR_HPUX_sysinfo 358 | ||
322 | #define __NR_HPUX_fcntl64 359 | ||
323 | #define __NR_HPUX_ftruncate64 360 | ||
324 | #define __NR_HPUX_fstat64 361 | ||
325 | #define __NR_HPUX_getdirentries64 362 | ||
326 | #define __NR_HPUX_getrlimit64 363 | ||
327 | #define __NR_HPUX_lockf64 364 | ||
328 | #define __NR_HPUX_lseek64 365 | ||
329 | #define __NR_HPUX_lstat64 366 | ||
330 | #define __NR_HPUX_mmap64 367 | ||
331 | #define __NR_HPUX_setrlimit64 368 | ||
332 | #define __NR_HPUX_stat64 369 | ||
333 | #define __NR_HPUX_truncate64 370 | ||
334 | #define __NR_HPUX_ulimit64 371 | ||
335 | #define __NR_HPUX_pread 372 | ||
336 | #define __NR_HPUX_preadv 373 | ||
337 | #define __NR_HPUX_pwrite 374 | ||
338 | #define __NR_HPUX_pwritev 375 | ||
339 | #define __NR_HPUX_pread64 376 | ||
340 | #define __NR_HPUX_preadv64 377 | ||
341 | #define __NR_HPUX_pwrite64 378 | ||
342 | #define __NR_HPUX_pwritev64 379 | ||
343 | #define __NR_HPUX_setcontext 380 | ||
344 | #define __NR_HPUX_sigaltstack 381 | ||
345 | #define __NR_HPUX_waitid 382 | ||
346 | #define __NR_HPUX_setpgrp 383 | ||
347 | #define __NR_HPUX_recvmsg2 384 | ||
348 | #define __NR_HPUX_sendmsg2 385 | ||
349 | #define __NR_HPUX_socket2 386 | ||
350 | #define __NR_HPUX_socketpair2 387 | ||
351 | #define __NR_HPUX_setregid 388 | ||
352 | #define __NR_HPUX_lwp_create 389 | ||
353 | #define __NR_HPUX_lwp_terminate 390 | ||
354 | #define __NR_HPUX_lwp_wait 391 | ||
355 | #define __NR_HPUX_lwp_suspend 392 | ||
356 | #define __NR_HPUX_lwp_resume 393 | ||
357 | /* 394 missing */ | ||
358 | #define __NR_HPUX_lwp_abort_syscall 395 | ||
359 | #define __NR_HPUX_lwp_info 396 | ||
360 | #define __NR_HPUX_lwp_kill 397 | ||
361 | #define __NR_HPUX_ksleep 398 | ||
362 | #define __NR_HPUX_kwakeup 399 | ||
363 | /* 400 missing */ | ||
364 | #define __NR_HPUX_pstat_getlwp 401 | ||
365 | #define __NR_HPUX_lwp_exit 402 | ||
366 | #define __NR_HPUX_lwp_continue 403 | ||
367 | #define __NR_HPUX_getacl 404 | ||
368 | #define __NR_HPUX_fgetacl 405 | ||
369 | #define __NR_HPUX_setacl 406 | ||
370 | #define __NR_HPUX_fsetacl 407 | ||
371 | #define __NR_HPUX_getaccess 408 | ||
372 | #define __NR_HPUX_lwp_mutex_init 409 | ||
373 | #define __NR_HPUX_lwp_mutex_lock_sys 410 | ||
374 | #define __NR_HPUX_lwp_mutex_unlock 411 | ||
375 | #define __NR_HPUX_lwp_cond_init 412 | ||
376 | #define __NR_HPUX_lwp_cond_signal 413 | ||
377 | #define __NR_HPUX_lwp_cond_broadcast 414 | ||
378 | #define __NR_HPUX_lwp_cond_wait_sys 415 | ||
379 | #define __NR_HPUX_lwp_getscheduler 416 | ||
380 | #define __NR_HPUX_lwp_setscheduler 417 | ||
381 | #define __NR_HPUX_lwp_getstate 418 | ||
382 | #define __NR_HPUX_lwp_setstate 419 | ||
383 | #define __NR_HPUX_lwp_detach 420 | ||
384 | #define __NR_HPUX_mlock 421 | ||
385 | #define __NR_HPUX_munlock 422 | ||
386 | #define __NR_HPUX_mlockall 423 | ||
387 | #define __NR_HPUX_munlockall 424 | ||
388 | #define __NR_HPUX_shm_open 425 | ||
389 | #define __NR_HPUX_shm_unlink 426 | ||
390 | #define __NR_HPUX_sigqueue 427 | ||
391 | #define __NR_HPUX_sigwaitinfo 428 | ||
392 | #define __NR_HPUX_sigtimedwait 429 | ||
393 | #define __NR_HPUX_sigwait 430 | ||
394 | #define __NR_HPUX_aio_read 431 | ||
395 | #define __NR_HPUX_aio_write 432 | ||
396 | #define __NR_HPUX_lio_listio 433 | ||
397 | #define __NR_HPUX_aio_error 434 | ||
398 | #define __NR_HPUX_aio_return 435 | ||
399 | #define __NR_HPUX_aio_cancel 436 | ||
400 | #define __NR_HPUX_aio_suspend 437 | ||
401 | #define __NR_HPUX_aio_fsync 438 | ||
402 | #define __NR_HPUX_mq_open 439 | ||
403 | #define __NR_HPUX_mq_close 440 | ||
404 | #define __NR_HPUX_mq_unlink 441 | ||
405 | #define __NR_HPUX_mq_send 442 | ||
406 | #define __NR_HPUX_mq_receive 443 | ||
407 | #define __NR_HPUX_mq_notify 444 | ||
408 | #define __NR_HPUX_mq_setattr 445 | ||
409 | #define __NR_HPUX_mq_getattr 446 | ||
410 | #define __NR_HPUX_ksem_open 447 | ||
411 | #define __NR_HPUX_ksem_unlink 448 | ||
412 | #define __NR_HPUX_ksem_close 449 | ||
413 | #define __NR_HPUX_ksem_post 450 | ||
414 | #define __NR_HPUX_ksem_wait 451 | ||
415 | #define __NR_HPUX_ksem_read 452 | ||
416 | #define __NR_HPUX_ksem_trywait 453 | ||
417 | #define __NR_HPUX_lwp_rwlock_init 454 | ||
418 | #define __NR_HPUX_lwp_rwlock_destroy 455 | ||
419 | #define __NR_HPUX_lwp_rwlock_rdlock_sys 456 | ||
420 | #define __NR_HPUX_lwp_rwlock_wrlock_sys 457 | ||
421 | #define __NR_HPUX_lwp_rwlock_tryrdlock 458 | ||
422 | #define __NR_HPUX_lwp_rwlock_trywrlock 459 | ||
423 | #define __NR_HPUX_lwp_rwlock_unlock 460 | ||
424 | #define __NR_HPUX_ttrace 461 | ||
425 | #define __NR_HPUX_ttrace_wait 462 | ||
426 | #define __NR_HPUX_lf_wire_mem 463 | ||
427 | #define __NR_HPUX_lf_unwire_mem 464 | ||
428 | #define __NR_HPUX_lf_send_pin_map 465 | ||
429 | #define __NR_HPUX_lf_free_buf 466 | ||
430 | #define __NR_HPUX_lf_wait_nq 467 | ||
431 | #define __NR_HPUX_lf_wakeup_conn_q 468 | ||
432 | #define __NR_HPUX_lf_unused 469 | ||
433 | #define __NR_HPUX_lwp_sema_init 470 | ||
434 | #define __NR_HPUX_lwp_sema_post 471 | ||
435 | #define __NR_HPUX_lwp_sema_wait 472 | ||
436 | #define __NR_HPUX_lwp_sema_trywait 473 | ||
437 | #define __NR_HPUX_lwp_sema_destroy 474 | ||
438 | #define __NR_HPUX_statvfs64 475 | ||
439 | #define __NR_HPUX_fstatvfs64 476 | ||
440 | #define __NR_HPUX_msh_register 477 | ||
441 | #define __NR_HPUX_ptrace64 478 | ||
442 | #define __NR_HPUX_sendfile 479 | ||
443 | #define __NR_HPUX_sendpath 480 | ||
444 | #define __NR_HPUX_sendfile64 481 | ||
445 | #define __NR_HPUX_sendpath64 482 | ||
446 | #define __NR_HPUX_modload 483 | ||
447 | #define __NR_HPUX_moduload 484 | ||
448 | #define __NR_HPUX_modpath 485 | ||
449 | #define __NR_HPUX_getksym 486 | ||
450 | #define __NR_HPUX_modadm 487 | ||
451 | #define __NR_HPUX_modstat 488 | ||
452 | #define __NR_HPUX_lwp_detached_exit 489 | ||
453 | #define __NR_HPUX_crashconf 490 | ||
454 | #define __NR_HPUX_siginhibit 491 | ||
455 | #define __NR_HPUX_sigenable 492 | ||
456 | #define __NR_HPUX_spuctl 493 | ||
457 | #define __NR_HPUX_zerokernelsum 494 | ||
458 | #define __NR_HPUX_nfs_kstat 495 | ||
459 | #define __NR_HPUX_aio_read64 496 | ||
460 | #define __NR_HPUX_aio_write64 497 | ||
461 | #define __NR_HPUX_aio_error64 498 | ||
462 | #define __NR_HPUX_aio_return64 499 | ||
463 | #define __NR_HPUX_aio_cancel64 500 | ||
464 | #define __NR_HPUX_aio_suspend64 501 | ||
465 | #define __NR_HPUX_aio_fsync64 502 | ||
466 | #define __NR_HPUX_lio_listio64 503 | ||
467 | #define __NR_HPUX_recv2 504 | ||
468 | #define __NR_HPUX_recvfrom2 505 | ||
469 | #define __NR_HPUX_send2 506 | ||
470 | #define __NR_HPUX_sendto2 507 | ||
471 | #define __NR_HPUX_acl 508 | ||
472 | #define __NR_HPUX___cnx_p2p_ctl 509 | ||
473 | #define __NR_HPUX___cnx_gsched_ctl 510 | ||
474 | #define __NR_HPUX___cnx_pmon_ctl 511 | ||
475 | |||
476 | #define __NR_HPUX_syscalls 512 | ||
477 | |||
478 | /* | ||
479 | * Linux system call numbers. | ||
480 | * | ||
481 | * Cary Coutant says that we should just use another syscall gateway | ||
482 | * page to avoid clashing with the HPUX space, and I think he's right: | ||
483 | * it will would keep a branch out of our syscall entry path, at the | ||
484 | * very least. If we decide to change it later, we can ``just'' tweak | ||
485 | * the LINUX_GATEWAY_ADDR define at the bottom and make __NR_Linux be | ||
486 | * 1024 or something. Oh, and recompile libc. =) | ||
487 | * | ||
488 | * 64-bit HPUX binaries get the syscall gateway address passed in a register | ||
489 | * from the kernel at startup, which seems a sane strategy. | ||
490 | */ | ||
491 | |||
492 | #define __NR_Linux 0 | ||
493 | #define __NR_restart_syscall (__NR_Linux + 0) | ||
494 | #define __NR_exit (__NR_Linux + 1) | ||
495 | #define __NR_fork (__NR_Linux + 2) | ||
496 | #define __NR_read (__NR_Linux + 3) | ||
497 | #define __NR_write (__NR_Linux + 4) | ||
498 | #define __NR_open (__NR_Linux + 5) | ||
499 | #define __NR_close (__NR_Linux + 6) | ||
500 | #define __NR_waitpid (__NR_Linux + 7) | ||
501 | #define __NR_creat (__NR_Linux + 8) | ||
502 | #define __NR_link (__NR_Linux + 9) | ||
503 | #define __NR_unlink (__NR_Linux + 10) | ||
504 | #define __NR_execve (__NR_Linux + 11) | ||
505 | #define __NR_chdir (__NR_Linux + 12) | ||
506 | #define __NR_time (__NR_Linux + 13) | ||
507 | #define __NR_mknod (__NR_Linux + 14) | ||
508 | #define __NR_chmod (__NR_Linux + 15) | ||
509 | #define __NR_lchown (__NR_Linux + 16) | ||
510 | #define __NR_socket (__NR_Linux + 17) | ||
511 | #define __NR_stat (__NR_Linux + 18) | ||
512 | #define __NR_lseek (__NR_Linux + 19) | ||
513 | #define __NR_getpid (__NR_Linux + 20) | ||
514 | #define __NR_mount (__NR_Linux + 21) | ||
515 | #define __NR_bind (__NR_Linux + 22) | ||
516 | #define __NR_setuid (__NR_Linux + 23) | ||
517 | #define __NR_getuid (__NR_Linux + 24) | ||
518 | #define __NR_stime (__NR_Linux + 25) | ||
519 | #define __NR_ptrace (__NR_Linux + 26) | ||
520 | #define __NR_alarm (__NR_Linux + 27) | ||
521 | #define __NR_fstat (__NR_Linux + 28) | ||
522 | #define __NR_pause (__NR_Linux + 29) | ||
523 | #define __NR_utime (__NR_Linux + 30) | ||
524 | #define __NR_connect (__NR_Linux + 31) | ||
525 | #define __NR_listen (__NR_Linux + 32) | ||
526 | #define __NR_access (__NR_Linux + 33) | ||
527 | #define __NR_nice (__NR_Linux + 34) | ||
528 | #define __NR_accept (__NR_Linux + 35) | ||
529 | #define __NR_sync (__NR_Linux + 36) | ||
530 | #define __NR_kill (__NR_Linux + 37) | ||
531 | #define __NR_rename (__NR_Linux + 38) | ||
532 | #define __NR_mkdir (__NR_Linux + 39) | ||
533 | #define __NR_rmdir (__NR_Linux + 40) | ||
534 | #define __NR_dup (__NR_Linux + 41) | ||
535 | #define __NR_pipe (__NR_Linux + 42) | ||
536 | #define __NR_times (__NR_Linux + 43) | ||
537 | #define __NR_getsockname (__NR_Linux + 44) | ||
538 | #define __NR_brk (__NR_Linux + 45) | ||
539 | #define __NR_setgid (__NR_Linux + 46) | ||
540 | #define __NR_getgid (__NR_Linux + 47) | ||
541 | #define __NR_signal (__NR_Linux + 48) | ||
542 | #define __NR_geteuid (__NR_Linux + 49) | ||
543 | #define __NR_getegid (__NR_Linux + 50) | ||
544 | #define __NR_acct (__NR_Linux + 51) | ||
545 | #define __NR_umount2 (__NR_Linux + 52) | ||
546 | #define __NR_getpeername (__NR_Linux + 53) | ||
547 | #define __NR_ioctl (__NR_Linux + 54) | ||
548 | #define __NR_fcntl (__NR_Linux + 55) | ||
549 | #define __NR_socketpair (__NR_Linux + 56) | ||
550 | #define __NR_setpgid (__NR_Linux + 57) | ||
551 | #define __NR_send (__NR_Linux + 58) | ||
552 | #define __NR_uname (__NR_Linux + 59) | ||
553 | #define __NR_umask (__NR_Linux + 60) | ||
554 | #define __NR_chroot (__NR_Linux + 61) | ||
555 | #define __NR_ustat (__NR_Linux + 62) | ||
556 | #define __NR_dup2 (__NR_Linux + 63) | ||
557 | #define __NR_getppid (__NR_Linux + 64) | ||
558 | #define __NR_getpgrp (__NR_Linux + 65) | ||
559 | #define __NR_setsid (__NR_Linux + 66) | ||
560 | #define __NR_pivot_root (__NR_Linux + 67) | ||
561 | #define __NR_sgetmask (__NR_Linux + 68) | ||
562 | #define __NR_ssetmask (__NR_Linux + 69) | ||
563 | #define __NR_setreuid (__NR_Linux + 70) | ||
564 | #define __NR_setregid (__NR_Linux + 71) | ||
565 | #define __NR_mincore (__NR_Linux + 72) | ||
566 | #define __NR_sigpending (__NR_Linux + 73) | ||
567 | #define __NR_sethostname (__NR_Linux + 74) | ||
568 | #define __NR_setrlimit (__NR_Linux + 75) | ||
569 | #define __NR_getrlimit (__NR_Linux + 76) | ||
570 | #define __NR_getrusage (__NR_Linux + 77) | ||
571 | #define __NR_gettimeofday (__NR_Linux + 78) | ||
572 | #define __NR_settimeofday (__NR_Linux + 79) | ||
573 | #define __NR_getgroups (__NR_Linux + 80) | ||
574 | #define __NR_setgroups (__NR_Linux + 81) | ||
575 | #define __NR_sendto (__NR_Linux + 82) | ||
576 | #define __NR_symlink (__NR_Linux + 83) | ||
577 | #define __NR_lstat (__NR_Linux + 84) | ||
578 | #define __NR_readlink (__NR_Linux + 85) | ||
579 | #define __NR_uselib (__NR_Linux + 86) | ||
580 | #define __NR_swapon (__NR_Linux + 87) | ||
581 | #define __NR_reboot (__NR_Linux + 88) | ||
582 | #define __NR_mmap2 (__NR_Linux + 89) | ||
583 | #define __NR_mmap (__NR_Linux + 90) | ||
584 | #define __NR_munmap (__NR_Linux + 91) | ||
585 | #define __NR_truncate (__NR_Linux + 92) | ||
586 | #define __NR_ftruncate (__NR_Linux + 93) | ||
587 | #define __NR_fchmod (__NR_Linux + 94) | ||
588 | #define __NR_fchown (__NR_Linux + 95) | ||
589 | #define __NR_getpriority (__NR_Linux + 96) | ||
590 | #define __NR_setpriority (__NR_Linux + 97) | ||
591 | #define __NR_recv (__NR_Linux + 98) | ||
592 | #define __NR_statfs (__NR_Linux + 99) | ||
593 | #define __NR_fstatfs (__NR_Linux + 100) | ||
594 | #define __NR_stat64 (__NR_Linux + 101) | ||
595 | /* #define __NR_socketcall (__NR_Linux + 102) */ | ||
596 | #define __NR_syslog (__NR_Linux + 103) | ||
597 | #define __NR_setitimer (__NR_Linux + 104) | ||
598 | #define __NR_getitimer (__NR_Linux + 105) | ||
599 | #define __NR_capget (__NR_Linux + 106) | ||
600 | #define __NR_capset (__NR_Linux + 107) | ||
601 | #define __NR_pread64 (__NR_Linux + 108) | ||
602 | #define __NR_pwrite64 (__NR_Linux + 109) | ||
603 | #define __NR_getcwd (__NR_Linux + 110) | ||
604 | #define __NR_vhangup (__NR_Linux + 111) | ||
605 | #define __NR_fstat64 (__NR_Linux + 112) | ||
606 | #define __NR_vfork (__NR_Linux + 113) | ||
607 | #define __NR_wait4 (__NR_Linux + 114) | ||
608 | #define __NR_swapoff (__NR_Linux + 115) | ||
609 | #define __NR_sysinfo (__NR_Linux + 116) | ||
610 | #define __NR_shutdown (__NR_Linux + 117) | ||
611 | #define __NR_fsync (__NR_Linux + 118) | ||
612 | #define __NR_madvise (__NR_Linux + 119) | ||
613 | #define __NR_clone (__NR_Linux + 120) | ||
614 | #define __NR_setdomainname (__NR_Linux + 121) | ||
615 | #define __NR_sendfile (__NR_Linux + 122) | ||
616 | #define __NR_recvfrom (__NR_Linux + 123) | ||
617 | #define __NR_adjtimex (__NR_Linux + 124) | ||
618 | #define __NR_mprotect (__NR_Linux + 125) | ||
619 | #define __NR_sigprocmask (__NR_Linux + 126) | ||
620 | #define __NR_create_module (__NR_Linux + 127) | ||
621 | #define __NR_init_module (__NR_Linux + 128) | ||
622 | #define __NR_delete_module (__NR_Linux + 129) | ||
623 | #define __NR_get_kernel_syms (__NR_Linux + 130) | ||
624 | #define __NR_quotactl (__NR_Linux + 131) | ||
625 | #define __NR_getpgid (__NR_Linux + 132) | ||
626 | #define __NR_fchdir (__NR_Linux + 133) | ||
627 | #define __NR_bdflush (__NR_Linux + 134) | ||
628 | #define __NR_sysfs (__NR_Linux + 135) | ||
629 | #define __NR_personality (__NR_Linux + 136) | ||
630 | #define __NR_afs_syscall (__NR_Linux + 137) /* Syscall for Andrew File System */ | ||
631 | #define __NR_setfsuid (__NR_Linux + 138) | ||
632 | #define __NR_setfsgid (__NR_Linux + 139) | ||
633 | #define __NR__llseek (__NR_Linux + 140) | ||
634 | #define __NR_getdents (__NR_Linux + 141) | ||
635 | #define __NR__newselect (__NR_Linux + 142) | ||
636 | #define __NR_flock (__NR_Linux + 143) | ||
637 | #define __NR_msync (__NR_Linux + 144) | ||
638 | #define __NR_readv (__NR_Linux + 145) | ||
639 | #define __NR_writev (__NR_Linux + 146) | ||
640 | #define __NR_getsid (__NR_Linux + 147) | ||
641 | #define __NR_fdatasync (__NR_Linux + 148) | ||
642 | #define __NR__sysctl (__NR_Linux + 149) | ||
643 | #define __NR_mlock (__NR_Linux + 150) | ||
644 | #define __NR_munlock (__NR_Linux + 151) | ||
645 | #define __NR_mlockall (__NR_Linux + 152) | ||
646 | #define __NR_munlockall (__NR_Linux + 153) | ||
647 | #define __NR_sched_setparam (__NR_Linux + 154) | ||
648 | #define __NR_sched_getparam (__NR_Linux + 155) | ||
649 | #define __NR_sched_setscheduler (__NR_Linux + 156) | ||
650 | #define __NR_sched_getscheduler (__NR_Linux + 157) | ||
651 | #define __NR_sched_yield (__NR_Linux + 158) | ||
652 | #define __NR_sched_get_priority_max (__NR_Linux + 159) | ||
653 | #define __NR_sched_get_priority_min (__NR_Linux + 160) | ||
654 | #define __NR_sched_rr_get_interval (__NR_Linux + 161) | ||
655 | #define __NR_nanosleep (__NR_Linux + 162) | ||
656 | #define __NR_mremap (__NR_Linux + 163) | ||
657 | #define __NR_setresuid (__NR_Linux + 164) | ||
658 | #define __NR_getresuid (__NR_Linux + 165) | ||
659 | #define __NR_sigaltstack (__NR_Linux + 166) | ||
660 | #define __NR_query_module (__NR_Linux + 167) | ||
661 | #define __NR_poll (__NR_Linux + 168) | ||
662 | #define __NR_nfsservctl (__NR_Linux + 169) | ||
663 | #define __NR_setresgid (__NR_Linux + 170) | ||
664 | #define __NR_getresgid (__NR_Linux + 171) | ||
665 | #define __NR_prctl (__NR_Linux + 172) | ||
666 | #define __NR_rt_sigreturn (__NR_Linux + 173) | ||
667 | #define __NR_rt_sigaction (__NR_Linux + 174) | ||
668 | #define __NR_rt_sigprocmask (__NR_Linux + 175) | ||
669 | #define __NR_rt_sigpending (__NR_Linux + 176) | ||
670 | #define __NR_rt_sigtimedwait (__NR_Linux + 177) | ||
671 | #define __NR_rt_sigqueueinfo (__NR_Linux + 178) | ||
672 | #define __NR_rt_sigsuspend (__NR_Linux + 179) | ||
673 | #define __NR_chown (__NR_Linux + 180) | ||
674 | #define __NR_setsockopt (__NR_Linux + 181) | ||
675 | #define __NR_getsockopt (__NR_Linux + 182) | ||
676 | #define __NR_sendmsg (__NR_Linux + 183) | ||
677 | #define __NR_recvmsg (__NR_Linux + 184) | ||
678 | #define __NR_semop (__NR_Linux + 185) | ||
679 | #define __NR_semget (__NR_Linux + 186) | ||
680 | #define __NR_semctl (__NR_Linux + 187) | ||
681 | #define __NR_msgsnd (__NR_Linux + 188) | ||
682 | #define __NR_msgrcv (__NR_Linux + 189) | ||
683 | #define __NR_msgget (__NR_Linux + 190) | ||
684 | #define __NR_msgctl (__NR_Linux + 191) | ||
685 | #define __NR_shmat (__NR_Linux + 192) | ||
686 | #define __NR_shmdt (__NR_Linux + 193) | ||
687 | #define __NR_shmget (__NR_Linux + 194) | ||
688 | #define __NR_shmctl (__NR_Linux + 195) | ||
689 | |||
690 | #define __NR_getpmsg (__NR_Linux + 196) /* Somebody *wants* streams? */ | ||
691 | #define __NR_putpmsg (__NR_Linux + 197) | ||
692 | |||
693 | #define __NR_lstat64 (__NR_Linux + 198) | ||
694 | #define __NR_truncate64 (__NR_Linux + 199) | ||
695 | #define __NR_ftruncate64 (__NR_Linux + 200) | ||
696 | #define __NR_getdents64 (__NR_Linux + 201) | ||
697 | #define __NR_fcntl64 (__NR_Linux + 202) | ||
698 | #define __NR_attrctl (__NR_Linux + 203) | ||
699 | #define __NR_acl_get (__NR_Linux + 204) | ||
700 | #define __NR_acl_set (__NR_Linux + 205) | ||
701 | #define __NR_gettid (__NR_Linux + 206) | ||
702 | #define __NR_readahead (__NR_Linux + 207) | ||
703 | #define __NR_tkill (__NR_Linux + 208) | ||
704 | #define __NR_sendfile64 (__NR_Linux + 209) | ||
705 | #define __NR_futex (__NR_Linux + 210) | ||
706 | #define __NR_sched_setaffinity (__NR_Linux + 211) | ||
707 | #define __NR_sched_getaffinity (__NR_Linux + 212) | ||
708 | #define __NR_set_thread_area (__NR_Linux + 213) | ||
709 | #define __NR_get_thread_area (__NR_Linux + 214) | ||
710 | #define __NR_io_setup (__NR_Linux + 215) | ||
711 | #define __NR_io_destroy (__NR_Linux + 216) | ||
712 | #define __NR_io_getevents (__NR_Linux + 217) | ||
713 | #define __NR_io_submit (__NR_Linux + 218) | ||
714 | #define __NR_io_cancel (__NR_Linux + 219) | ||
715 | #define __NR_alloc_hugepages (__NR_Linux + 220) | ||
716 | #define __NR_free_hugepages (__NR_Linux + 221) | ||
717 | #define __NR_exit_group (__NR_Linux + 222) | ||
718 | #define __NR_lookup_dcookie (__NR_Linux + 223) | ||
719 | #define __NR_epoll_create (__NR_Linux + 224) | ||
720 | #define __NR_epoll_ctl (__NR_Linux + 225) | ||
721 | #define __NR_epoll_wait (__NR_Linux + 226) | ||
722 | #define __NR_remap_file_pages (__NR_Linux + 227) | ||
723 | #define __NR_semtimedop (__NR_Linux + 228) | ||
724 | #define __NR_mq_open (__NR_Linux + 229) | ||
725 | #define __NR_mq_unlink (__NR_Linux + 230) | ||
726 | #define __NR_mq_timedsend (__NR_Linux + 231) | ||
727 | #define __NR_mq_timedreceive (__NR_Linux + 232) | ||
728 | #define __NR_mq_notify (__NR_Linux + 233) | ||
729 | #define __NR_mq_getsetattr (__NR_Linux + 234) | ||
730 | #define __NR_waitid (__NR_Linux + 235) | ||
731 | #define __NR_fadvise64_64 (__NR_Linux + 236) | ||
732 | #define __NR_set_tid_address (__NR_Linux + 237) | ||
733 | #define __NR_setxattr (__NR_Linux + 238) | ||
734 | #define __NR_lsetxattr (__NR_Linux + 239) | ||
735 | #define __NR_fsetxattr (__NR_Linux + 240) | ||
736 | #define __NR_getxattr (__NR_Linux + 241) | ||
737 | #define __NR_lgetxattr (__NR_Linux + 242) | ||
738 | #define __NR_fgetxattr (__NR_Linux + 243) | ||
739 | #define __NR_listxattr (__NR_Linux + 244) | ||
740 | #define __NR_llistxattr (__NR_Linux + 245) | ||
741 | #define __NR_flistxattr (__NR_Linux + 246) | ||
742 | #define __NR_removexattr (__NR_Linux + 247) | ||
743 | #define __NR_lremovexattr (__NR_Linux + 248) | ||
744 | #define __NR_fremovexattr (__NR_Linux + 249) | ||
745 | #define __NR_timer_create (__NR_Linux + 250) | ||
746 | #define __NR_timer_settime (__NR_Linux + 251) | ||
747 | #define __NR_timer_gettime (__NR_Linux + 252) | ||
748 | #define __NR_timer_getoverrun (__NR_Linux + 253) | ||
749 | #define __NR_timer_delete (__NR_Linux + 254) | ||
750 | #define __NR_clock_settime (__NR_Linux + 255) | ||
751 | #define __NR_clock_gettime (__NR_Linux + 256) | ||
752 | #define __NR_clock_getres (__NR_Linux + 257) | ||
753 | #define __NR_clock_nanosleep (__NR_Linux + 258) | ||
754 | #define __NR_tgkill (__NR_Linux + 259) | ||
755 | #define __NR_mbind (__NR_Linux + 260) | ||
756 | #define __NR_get_mempolicy (__NR_Linux + 261) | ||
757 | #define __NR_set_mempolicy (__NR_Linux + 262) | ||
758 | #define __NR_vserver (__NR_Linux + 263) | ||
759 | #define __NR_add_key (__NR_Linux + 264) | ||
760 | #define __NR_request_key (__NR_Linux + 265) | ||
761 | #define __NR_keyctl (__NR_Linux + 266) | ||
762 | #define __NR_ioprio_set (__NR_Linux + 267) | ||
763 | #define __NR_ioprio_get (__NR_Linux + 268) | ||
764 | #define __NR_inotify_init (__NR_Linux + 269) | ||
765 | #define __NR_inotify_add_watch (__NR_Linux + 270) | ||
766 | #define __NR_inotify_rm_watch (__NR_Linux + 271) | ||
767 | #define __NR_migrate_pages (__NR_Linux + 272) | ||
768 | #define __NR_pselect6 (__NR_Linux + 273) | ||
769 | #define __NR_ppoll (__NR_Linux + 274) | ||
770 | #define __NR_openat (__NR_Linux + 275) | ||
771 | #define __NR_mkdirat (__NR_Linux + 276) | ||
772 | #define __NR_mknodat (__NR_Linux + 277) | ||
773 | #define __NR_fchownat (__NR_Linux + 278) | ||
774 | #define __NR_futimesat (__NR_Linux + 279) | ||
775 | #define __NR_fstatat64 (__NR_Linux + 280) | ||
776 | #define __NR_unlinkat (__NR_Linux + 281) | ||
777 | #define __NR_renameat (__NR_Linux + 282) | ||
778 | #define __NR_linkat (__NR_Linux + 283) | ||
779 | #define __NR_symlinkat (__NR_Linux + 284) | ||
780 | #define __NR_readlinkat (__NR_Linux + 285) | ||
781 | #define __NR_fchmodat (__NR_Linux + 286) | ||
782 | #define __NR_faccessat (__NR_Linux + 287) | ||
783 | #define __NR_unshare (__NR_Linux + 288) | ||
784 | #define __NR_set_robust_list (__NR_Linux + 289) | ||
785 | #define __NR_get_robust_list (__NR_Linux + 290) | ||
786 | #define __NR_splice (__NR_Linux + 291) | ||
787 | #define __NR_sync_file_range (__NR_Linux + 292) | ||
788 | #define __NR_tee (__NR_Linux + 293) | ||
789 | #define __NR_vmsplice (__NR_Linux + 294) | ||
790 | #define __NR_move_pages (__NR_Linux + 295) | ||
791 | #define __NR_getcpu (__NR_Linux + 296) | ||
792 | #define __NR_epoll_pwait (__NR_Linux + 297) | ||
793 | #define __NR_statfs64 (__NR_Linux + 298) | ||
794 | #define __NR_fstatfs64 (__NR_Linux + 299) | ||
795 | #define __NR_kexec_load (__NR_Linux + 300) | ||
796 | #define __NR_utimensat (__NR_Linux + 301) | ||
797 | #define __NR_signalfd (__NR_Linux + 302) | ||
798 | #define __NR_timerfd (__NR_Linux + 303) | ||
799 | #define __NR_eventfd (__NR_Linux + 304) | ||
800 | #define __NR_fallocate (__NR_Linux + 305) | ||
801 | #define __NR_timerfd_create (__NR_Linux + 306) | ||
802 | #define __NR_timerfd_settime (__NR_Linux + 307) | ||
803 | #define __NR_timerfd_gettime (__NR_Linux + 308) | ||
804 | #define __NR_signalfd4 (__NR_Linux + 309) | ||
805 | #define __NR_eventfd2 (__NR_Linux + 310) | ||
806 | #define __NR_epoll_create1 (__NR_Linux + 311) | ||
807 | #define __NR_dup3 (__NR_Linux + 312) | ||
808 | #define __NR_pipe2 (__NR_Linux + 313) | ||
809 | #define __NR_inotify_init1 (__NR_Linux + 314) | ||
810 | |||
811 | #define __NR_Linux_syscalls (__NR_inotify_init1 + 1) | ||
812 | |||
813 | |||
814 | #define __IGNORE_select /* newselect */ | ||
815 | #define __IGNORE_fadvise64 /* fadvise64_64 */ | ||
816 | #define __IGNORE_utimes /* utime */ | ||
817 | |||
818 | |||
819 | #define HPUX_GATEWAY_ADDR 0xC0000004 | ||
820 | #define LINUX_GATEWAY_ADDR 0x100 | ||
821 | |||
822 | #ifdef __KERNEL__ | ||
823 | #ifndef __ASSEMBLY__ | ||
824 | |||
825 | #define SYS_ify(syscall_name) __NR_##syscall_name | ||
826 | |||
827 | #ifndef ASM_LINE_SEP | ||
828 | # define ASM_LINE_SEP ; | ||
829 | #endif | ||
830 | |||
831 | /* Definition taken from glibc 2.3.3 | ||
832 | * sysdeps/unix/sysv/linux/hppa/sysdep.h | ||
833 | */ | ||
834 | |||
835 | #ifdef PIC | ||
836 | /* WARNING: CANNOT BE USED IN A NOP! */ | ||
837 | # define K_STW_ASM_PIC " copy %%r19, %%r4\n" | ||
838 | # define K_LDW_ASM_PIC " copy %%r4, %%r19\n" | ||
839 | # define K_USING_GR4 "%r4", | ||
840 | #else | ||
841 | # define K_STW_ASM_PIC " \n" | ||
842 | # define K_LDW_ASM_PIC " \n" | ||
843 | # define K_USING_GR4 | ||
844 | #endif | ||
845 | |||
846 | /* GCC has to be warned that a syscall may clobber all the ABI | ||
847 | registers listed as "caller-saves", see page 8, Table 2 | ||
848 | in section 2.2.6 of the PA-RISC RUN-TIME architecture | ||
849 | document. However! r28 is the result and will conflict with | ||
850 | the clobber list so it is left out. Also the input arguments | ||
851 | registers r20 -> r26 will conflict with the list so they | ||
852 | are treated specially. Although r19 is clobbered by the syscall | ||
853 | we cannot say this because it would violate ABI, thus we say | ||
854 | r4 is clobbered and use that register to save/restore r19 | ||
855 | across the syscall. */ | ||
856 | |||
857 | #define K_CALL_CLOB_REGS "%r1", "%r2", K_USING_GR4 \ | ||
858 | "%r20", "%r29", "%r31" | ||
859 | |||
860 | #undef K_INLINE_SYSCALL | ||
861 | #define K_INLINE_SYSCALL(name, nr, args...) ({ \ | ||
862 | long __sys_res; \ | ||
863 | { \ | ||
864 | register unsigned long __res __asm__("r28"); \ | ||
865 | K_LOAD_ARGS_##nr(args) \ | ||
866 | /* FIXME: HACK stw/ldw r19 around syscall */ \ | ||
867 | __asm__ volatile( \ | ||
868 | K_STW_ASM_PIC \ | ||
869 | " ble 0x100(%%sr2, %%r0)\n" \ | ||
870 | " ldi %1, %%r20\n" \ | ||
871 | K_LDW_ASM_PIC \ | ||
872 | : "=r" (__res) \ | ||
873 | : "i" (SYS_ify(name)) K_ASM_ARGS_##nr \ | ||
874 | : "memory", K_CALL_CLOB_REGS K_CLOB_ARGS_##nr \ | ||
875 | ); \ | ||
876 | __sys_res = (long)__res; \ | ||
877 | } \ | ||
878 | if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){ \ | ||
879 | errno = -__sys_res; \ | ||
880 | __sys_res = -1; \ | ||
881 | } \ | ||
882 | __sys_res; \ | ||
883 | }) | ||
884 | |||
885 | #define K_LOAD_ARGS_0() | ||
886 | #define K_LOAD_ARGS_1(r26) \ | ||
887 | register unsigned long __r26 __asm__("r26") = (unsigned long)(r26); \ | ||
888 | K_LOAD_ARGS_0() | ||
889 | #define K_LOAD_ARGS_2(r26,r25) \ | ||
890 | register unsigned long __r25 __asm__("r25") = (unsigned long)(r25); \ | ||
891 | K_LOAD_ARGS_1(r26) | ||
892 | #define K_LOAD_ARGS_3(r26,r25,r24) \ | ||
893 | register unsigned long __r24 __asm__("r24") = (unsigned long)(r24); \ | ||
894 | K_LOAD_ARGS_2(r26,r25) | ||
895 | #define K_LOAD_ARGS_4(r26,r25,r24,r23) \ | ||
896 | register unsigned long __r23 __asm__("r23") = (unsigned long)(r23); \ | ||
897 | K_LOAD_ARGS_3(r26,r25,r24) | ||
898 | #define K_LOAD_ARGS_5(r26,r25,r24,r23,r22) \ | ||
899 | register unsigned long __r22 __asm__("r22") = (unsigned long)(r22); \ | ||
900 | K_LOAD_ARGS_4(r26,r25,r24,r23) | ||
901 | #define K_LOAD_ARGS_6(r26,r25,r24,r23,r22,r21) \ | ||
902 | register unsigned long __r21 __asm__("r21") = (unsigned long)(r21); \ | ||
903 | K_LOAD_ARGS_5(r26,r25,r24,r23,r22) | ||
904 | |||
905 | /* Even with zero args we use r20 for the syscall number */ | ||
906 | #define K_ASM_ARGS_0 | ||
907 | #define K_ASM_ARGS_1 K_ASM_ARGS_0, "r" (__r26) | ||
908 | #define K_ASM_ARGS_2 K_ASM_ARGS_1, "r" (__r25) | ||
909 | #define K_ASM_ARGS_3 K_ASM_ARGS_2, "r" (__r24) | ||
910 | #define K_ASM_ARGS_4 K_ASM_ARGS_3, "r" (__r23) | ||
911 | #define K_ASM_ARGS_5 K_ASM_ARGS_4, "r" (__r22) | ||
912 | #define K_ASM_ARGS_6 K_ASM_ARGS_5, "r" (__r21) | ||
913 | |||
914 | /* The registers not listed as inputs but clobbered */ | ||
915 | #define K_CLOB_ARGS_6 | ||
916 | #define K_CLOB_ARGS_5 K_CLOB_ARGS_6, "%r21" | ||
917 | #define K_CLOB_ARGS_4 K_CLOB_ARGS_5, "%r22" | ||
918 | #define K_CLOB_ARGS_3 K_CLOB_ARGS_4, "%r23" | ||
919 | #define K_CLOB_ARGS_2 K_CLOB_ARGS_3, "%r24" | ||
920 | #define K_CLOB_ARGS_1 K_CLOB_ARGS_2, "%r25" | ||
921 | #define K_CLOB_ARGS_0 K_CLOB_ARGS_1, "%r26" | ||
922 | |||
923 | #define _syscall0(type,name) \ | ||
924 | type name(void) \ | ||
925 | { \ | ||
926 | return K_INLINE_SYSCALL(name, 0); \ | ||
927 | } | ||
928 | |||
929 | #define _syscall1(type,name,type1,arg1) \ | ||
930 | type name(type1 arg1) \ | ||
931 | { \ | ||
932 | return K_INLINE_SYSCALL(name, 1, arg1); \ | ||
933 | } | ||
934 | |||
935 | #define _syscall2(type,name,type1,arg1,type2,arg2) \ | ||
936 | type name(type1 arg1, type2 arg2) \ | ||
937 | { \ | ||
938 | return K_INLINE_SYSCALL(name, 2, arg1, arg2); \ | ||
939 | } | ||
940 | |||
941 | #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ | ||
942 | type name(type1 arg1, type2 arg2, type3 arg3) \ | ||
943 | { \ | ||
944 | return K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3); \ | ||
945 | } | ||
946 | |||
947 | #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ | ||
948 | type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ | ||
949 | { \ | ||
950 | return K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4); \ | ||
951 | } | ||
952 | |||
953 | /* select takes 5 arguments */ | ||
954 | #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ | ||
955 | type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ | ||
956 | { \ | ||
957 | return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \ | ||
958 | } | ||
959 | |||
960 | #define __ARCH_WANT_OLD_READDIR | ||
961 | #define __ARCH_WANT_STAT64 | ||
962 | #define __ARCH_WANT_SYS_ALARM | ||
963 | #define __ARCH_WANT_SYS_GETHOSTNAME | ||
964 | #define __ARCH_WANT_SYS_PAUSE | ||
965 | #define __ARCH_WANT_SYS_SGETMASK | ||
966 | #define __ARCH_WANT_SYS_SIGNAL | ||
967 | #define __ARCH_WANT_SYS_TIME | ||
968 | #define __ARCH_WANT_COMPAT_SYS_TIME | ||
969 | #define __ARCH_WANT_SYS_UTIME | ||
970 | #define __ARCH_WANT_SYS_WAITPID | ||
971 | #define __ARCH_WANT_SYS_SOCKETCALL | ||
972 | #define __ARCH_WANT_SYS_FADVISE64 | ||
973 | #define __ARCH_WANT_SYS_GETPGRP | ||
974 | #define __ARCH_WANT_SYS_LLSEEK | ||
975 | #define __ARCH_WANT_SYS_NICE | ||
976 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | ||
977 | #define __ARCH_WANT_SYS_OLDUMOUNT | ||
978 | #define __ARCH_WANT_SYS_SIGPENDING | ||
979 | #define __ARCH_WANT_SYS_SIGPROCMASK | ||
980 | #define __ARCH_WANT_SYS_RT_SIGACTION | ||
981 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
982 | #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | ||
983 | |||
984 | #endif /* __ASSEMBLY__ */ | ||
985 | |||
986 | #undef STR | ||
987 | |||
988 | /* | ||
989 | * "Conditional" syscalls | ||
990 | * | ||
991 | * What we want is __attribute__((weak,alias("sys_ni_syscall"))), | ||
992 | * but it doesn't work on all toolchains, so we just do it by hand | ||
993 | */ | ||
994 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | ||
995 | |||
996 | #endif /* __KERNEL__ */ | ||
997 | #endif /* _ASM_PARISC_UNISTD_H_ */ | ||
diff --git a/arch/parisc/include/asm/unwind.h b/arch/parisc/include/asm/unwind.h new file mode 100644 index 000000000000..52482e4fc20d --- /dev/null +++ b/arch/parisc/include/asm/unwind.h | |||
@@ -0,0 +1,79 @@ | |||
1 | #ifndef _UNWIND_H_ | ||
2 | #define _UNWIND_H_ | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | |||
6 | /* From ABI specifications */ | ||
7 | struct unwind_table_entry { | ||
8 | unsigned int region_start; | ||
9 | unsigned int region_end; | ||
10 | unsigned int Cannot_unwind:1; /* 0 */ | ||
11 | unsigned int Millicode:1; /* 1 */ | ||
12 | unsigned int Millicode_save_sr0:1; /* 2 */ | ||
13 | unsigned int Region_description:2; /* 3..4 */ | ||
14 | unsigned int reserved1:1; /* 5 */ | ||
15 | unsigned int Entry_SR:1; /* 6 */ | ||
16 | unsigned int Entry_FR:4; /* number saved *//* 7..10 */ | ||
17 | unsigned int Entry_GR:5; /* number saved *//* 11..15 */ | ||
18 | unsigned int Args_stored:1; /* 16 */ | ||
19 | unsigned int Variable_Frame:1; /* 17 */ | ||
20 | unsigned int Separate_Package_Body:1; /* 18 */ | ||
21 | unsigned int Frame_Extension_Millicode:1; /* 19 */ | ||
22 | unsigned int Stack_Overflow_Check:1; /* 20 */ | ||
23 | unsigned int Two_Instruction_SP_Increment:1; /* 21 */ | ||
24 | unsigned int Ada_Region:1; /* 22 */ | ||
25 | unsigned int cxx_info:1; /* 23 */ | ||
26 | unsigned int cxx_try_catch:1; /* 24 */ | ||
27 | unsigned int sched_entry_seq:1; /* 25 */ | ||
28 | unsigned int reserved2:1; /* 26 */ | ||
29 | unsigned int Save_SP:1; /* 27 */ | ||
30 | unsigned int Save_RP:1; /* 28 */ | ||
31 | unsigned int Save_MRP_in_frame:1; /* 29 */ | ||
32 | unsigned int extn_ptr_defined:1; /* 30 */ | ||
33 | unsigned int Cleanup_defined:1; /* 31 */ | ||
34 | |||
35 | unsigned int MPE_XL_interrupt_marker:1; /* 0 */ | ||
36 | unsigned int HP_UX_interrupt_marker:1; /* 1 */ | ||
37 | unsigned int Large_frame:1; /* 2 */ | ||
38 | unsigned int Pseudo_SP_Set:1; /* 3 */ | ||
39 | unsigned int reserved4:1; /* 4 */ | ||
40 | unsigned int Total_frame_size:27; /* 5..31 */ | ||
41 | }; | ||
42 | |||
43 | struct unwind_table { | ||
44 | struct list_head list; | ||
45 | const char *name; | ||
46 | unsigned long gp; | ||
47 | unsigned long base_addr; | ||
48 | unsigned long start; | ||
49 | unsigned long end; | ||
50 | const struct unwind_table_entry *table; | ||
51 | unsigned long length; | ||
52 | }; | ||
53 | |||
54 | struct unwind_frame_info { | ||
55 | struct task_struct *t; | ||
56 | /* Eventually we would like to be able to get at any of the registers | ||
57 | available; but for now we only try to get the sp and ip for each | ||
58 | frame */ | ||
59 | /* struct pt_regs regs; */ | ||
60 | unsigned long sp, ip, rp, r31; | ||
61 | unsigned long prev_sp, prev_ip; | ||
62 | }; | ||
63 | |||
64 | struct unwind_table * | ||
65 | unwind_table_add(const char *name, unsigned long base_addr, | ||
66 | unsigned long gp, void *start, void *end); | ||
67 | void | ||
68 | unwind_table_remove(struct unwind_table *table); | ||
69 | |||
70 | void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, | ||
71 | struct pt_regs *regs); | ||
72 | void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t); | ||
73 | void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs); | ||
74 | int unwind_once(struct unwind_frame_info *info); | ||
75 | int unwind_to_user(struct unwind_frame_info *info); | ||
76 | |||
77 | int unwind_init(void); | ||
78 | |||
79 | #endif | ||
diff --git a/arch/parisc/include/asm/user.h b/arch/parisc/include/asm/user.h new file mode 100644 index 000000000000..80224753e508 --- /dev/null +++ b/arch/parisc/include/asm/user.h | |||
@@ -0,0 +1,5 @@ | |||
1 | /* This file should not exist, but lots of generic code still includes | ||
2 | it. It's a hangover from old a.out days and the traditional core | ||
3 | dump format. We are ELF-only, and so are our core dumps. If we | ||
4 | need to support HP/UX core format then we'll do it here | ||
5 | eventually. */ | ||
diff --git a/arch/parisc/include/asm/vga.h b/arch/parisc/include/asm/vga.h new file mode 100644 index 000000000000..171399a88ca6 --- /dev/null +++ b/arch/parisc/include/asm/vga.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_PARISC_VGA_H__ | ||
2 | #define __ASM_PARISC_VGA_H__ | ||
3 | |||
4 | /* nothing */ | ||
5 | |||
6 | #endif /* __ASM_PARISC_VGA_H__ */ | ||
diff --git a/arch/parisc/include/asm/xor.h b/arch/parisc/include/asm/xor.h new file mode 100644 index 000000000000..c82eb12a5b18 --- /dev/null +++ b/arch/parisc/include/asm/xor.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/xor.h> | |||
diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/parisc/kernel/.gitignore | |||
@@ -0,0 +1 @@ | |||
vmlinux.lds | |||
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 3efc0b73e4ff..699cf8ef2118 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
@@ -290,5 +290,8 @@ int main(void) | |||
290 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); | 290 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); |
291 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); | 291 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); |
292 | DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); | 292 | DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); |
293 | BLANK(); | ||
294 | DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long)); | ||
295 | BLANK(); | ||
293 | return 0; | 296 | return 0; |
294 | } | 297 | } |
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 7177a6cd1b7f..03f26bd75bd8 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c | |||
@@ -71,8 +71,8 @@ | |||
71 | #include <asm/processor.h> /* for boot_cpu_data */ | 71 | #include <asm/processor.h> /* for boot_cpu_data */ |
72 | 72 | ||
73 | static DEFINE_SPINLOCK(pdc_lock); | 73 | static DEFINE_SPINLOCK(pdc_lock); |
74 | static unsigned long pdc_result[32] __attribute__ ((aligned (8))); | 74 | extern unsigned long pdc_result[NUM_PDC_RESULT]; |
75 | static unsigned long pdc_result2[32] __attribute__ ((aligned (8))); | 75 | extern unsigned long pdc_result2[NUM_PDC_RESULT]; |
76 | 76 | ||
77 | #ifdef CONFIG_64BIT | 77 | #ifdef CONFIG_64BIT |
78 | #define WIDE_FIRMWARE 0x1 | 78 | #define WIDE_FIRMWARE 0x1 |
@@ -150,26 +150,40 @@ static void convert_to_wide(unsigned long *addr) | |||
150 | #endif | 150 | #endif |
151 | } | 151 | } |
152 | 152 | ||
153 | #ifdef CONFIG_64BIT | ||
154 | void __init set_firmware_width_unlocked(void) | ||
155 | { | ||
156 | int ret; | ||
157 | |||
158 | ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, | ||
159 | __pa(pdc_result), 0); | ||
160 | convert_to_wide(pdc_result); | ||
161 | if (pdc_result[0] != NARROW_FIRMWARE) | ||
162 | parisc_narrow_firmware = 0; | ||
163 | } | ||
164 | |||
153 | /** | 165 | /** |
154 | * set_firmware_width - Determine if the firmware is wide or narrow. | 166 | * set_firmware_width - Determine if the firmware is wide or narrow. |
155 | * | 167 | * |
156 | * This function must be called before any pdc_* function that uses the convert_to_wide | 168 | * This function must be called before any pdc_* function that uses the |
157 | * function. | 169 | * convert_to_wide function. |
158 | */ | 170 | */ |
159 | void __init set_firmware_width(void) | 171 | void __init set_firmware_width(void) |
160 | { | 172 | { |
161 | #ifdef CONFIG_64BIT | ||
162 | int retval; | ||
163 | unsigned long flags; | 173 | unsigned long flags; |
174 | spin_lock_irqsave(&pdc_lock, flags); | ||
175 | set_firmware_width_unlocked(); | ||
176 | spin_unlock_irqrestore(&pdc_lock, flags); | ||
177 | } | ||
178 | #else | ||
179 | void __init set_firmware_width_unlocked(void) { | ||
180 | return; | ||
181 | } | ||
164 | 182 | ||
165 | spin_lock_irqsave(&pdc_lock, flags); | 183 | void __init set_firmware_width(void) { |
166 | retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0); | 184 | return; |
167 | convert_to_wide(pdc_result); | ||
168 | if(pdc_result[0] != NARROW_FIRMWARE) | ||
169 | parisc_narrow_firmware = 0; | ||
170 | spin_unlock_irqrestore(&pdc_lock, flags); | ||
171 | #endif | ||
172 | } | 185 | } |
186 | #endif /*CONFIG_64BIT*/ | ||
173 | 187 | ||
174 | /** | 188 | /** |
175 | * pdc_emergency_unlock - Unlock the linux pdc lock | 189 | * pdc_emergency_unlock - Unlock the linux pdc lock |
@@ -288,6 +302,20 @@ int pdc_chassis_warn(unsigned long *warn) | |||
288 | return retval; | 302 | return retval; |
289 | } | 303 | } |
290 | 304 | ||
305 | int __init pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info) | ||
306 | { | ||
307 | int ret; | ||
308 | |||
309 | ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result)); | ||
310 | convert_to_wide(pdc_result); | ||
311 | pdc_coproc_info->ccr_functional = pdc_result[0]; | ||
312 | pdc_coproc_info->ccr_present = pdc_result[1]; | ||
313 | pdc_coproc_info->revision = pdc_result[17]; | ||
314 | pdc_coproc_info->model = pdc_result[18]; | ||
315 | |||
316 | return ret; | ||
317 | } | ||
318 | |||
291 | /** | 319 | /** |
292 | * pdc_coproc_cfg - To identify coprocessors attached to the processor. | 320 | * pdc_coproc_cfg - To identify coprocessors attached to the processor. |
293 | * @pdc_coproc_info: Return buffer address. | 321 | * @pdc_coproc_info: Return buffer address. |
@@ -297,19 +325,14 @@ int pdc_chassis_warn(unsigned long *warn) | |||
297 | */ | 325 | */ |
298 | int __init pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info) | 326 | int __init pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info) |
299 | { | 327 | { |
300 | int retval; | 328 | int ret; |
301 | unsigned long flags; | 329 | unsigned long flags; |
302 | 330 | ||
303 | spin_lock_irqsave(&pdc_lock, flags); | 331 | spin_lock_irqsave(&pdc_lock, flags); |
304 | retval = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result)); | 332 | ret = pdc_coproc_cfg_unlocked(pdc_coproc_info); |
305 | convert_to_wide(pdc_result); | 333 | spin_unlock_irqrestore(&pdc_lock, flags); |
306 | pdc_coproc_info->ccr_functional = pdc_result[0]; | ||
307 | pdc_coproc_info->ccr_present = pdc_result[1]; | ||
308 | pdc_coproc_info->revision = pdc_result[17]; | ||
309 | pdc_coproc_info->model = pdc_result[18]; | ||
310 | spin_unlock_irqrestore(&pdc_lock, flags); | ||
311 | 334 | ||
312 | return retval; | 335 | return ret; |
313 | } | 336 | } |
314 | 337 | ||
315 | /** | 338 | /** |
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index a84e31e82876..0e3d9f9b9e33 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
@@ -121,7 +121,7 @@ $pgt_fill_loop: | |||
121 | copy %r0,%r2 | 121 | copy %r0,%r2 |
122 | 122 | ||
123 | /* And the RFI Target address too */ | 123 | /* And the RFI Target address too */ |
124 | load32 start_kernel,%r11 | 124 | load32 start_parisc,%r11 |
125 | 125 | ||
126 | /* And the initial task pointer */ | 126 | /* And the initial task pointer */ |
127 | load32 init_thread_union,%r6 | 127 | load32 init_thread_union,%r6 |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 49c637970789..90904f9dfc50 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. | 4 | * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. |
5 | * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> | 5 | * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> |
6 | * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> | 6 | * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> |
7 | * Copyright (C) 2008 Helge Deller <deller@gmx.de> | ||
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
@@ -27,15 +28,149 @@ | |||
27 | /* PSW bits we allow the debugger to modify */ | 28 | /* PSW bits we allow the debugger to modify */ |
28 | #define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) | 29 | #define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) |
29 | 30 | ||
30 | #undef DEBUG_PTRACE | 31 | /* |
32 | * Called by kernel/ptrace.c when detaching.. | ||
33 | * | ||
34 | * Make sure single step bits etc are not set. | ||
35 | */ | ||
36 | void ptrace_disable(struct task_struct *task) | ||
37 | { | ||
38 | task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); | ||
31 | 39 | ||
32 | #ifdef DEBUG_PTRACE | 40 | /* make sure the trap bits are not set */ |
33 | #define DBG(x...) printk(x) | 41 | pa_psw(task)->r = 0; |
34 | #else | 42 | pa_psw(task)->t = 0; |
35 | #define DBG(x...) | 43 | pa_psw(task)->h = 0; |
36 | #endif | 44 | pa_psw(task)->l = 0; |
45 | } | ||
46 | |||
47 | /* | ||
48 | * The following functions are called by ptrace_resume() when | ||
49 | * enabling or disabling single/block tracing. | ||
50 | */ | ||
51 | void user_disable_single_step(struct task_struct *task) | ||
52 | { | ||
53 | ptrace_disable(task); | ||
54 | } | ||
55 | |||
56 | void user_enable_single_step(struct task_struct *task) | ||
57 | { | ||
58 | task->ptrace &= ~PT_BLOCKSTEP; | ||
59 | task->ptrace |= PT_SINGLESTEP; | ||
60 | |||
61 | if (pa_psw(task)->n) { | ||
62 | struct siginfo si; | ||
63 | |||
64 | /* Nullified, just crank over the queue. */ | ||
65 | task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; | ||
66 | task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; | ||
67 | task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; | ||
68 | pa_psw(task)->n = 0; | ||
69 | pa_psw(task)->x = 0; | ||
70 | pa_psw(task)->y = 0; | ||
71 | pa_psw(task)->z = 0; | ||
72 | pa_psw(task)->b = 0; | ||
73 | ptrace_disable(task); | ||
74 | /* Don't wake up the task, but let the | ||
75 | parent know something happened. */ | ||
76 | si.si_code = TRAP_TRACE; | ||
77 | si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3); | ||
78 | si.si_signo = SIGTRAP; | ||
79 | si.si_errno = 0; | ||
80 | force_sig_info(SIGTRAP, &si, task); | ||
81 | /* notify_parent(task, SIGCHLD); */ | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | /* Enable recovery counter traps. The recovery counter | ||
86 | * itself will be set to zero on a task switch. If the | ||
87 | * task is suspended on a syscall then the syscall return | ||
88 | * path will overwrite the recovery counter with a suitable | ||
89 | * value such that it traps once back in user space. We | ||
90 | * disable interrupts in the tasks PSW here also, to avoid | ||
91 | * interrupts while the recovery counter is decrementing. | ||
92 | */ | ||
93 | pa_psw(task)->r = 1; | ||
94 | pa_psw(task)->t = 0; | ||
95 | pa_psw(task)->h = 0; | ||
96 | pa_psw(task)->l = 0; | ||
97 | } | ||
98 | |||
99 | void user_enable_block_step(struct task_struct *task) | ||
100 | { | ||
101 | task->ptrace &= ~PT_SINGLESTEP; | ||
102 | task->ptrace |= PT_BLOCKSTEP; | ||
103 | |||
104 | /* Enable taken branch trap. */ | ||
105 | pa_psw(task)->r = 0; | ||
106 | pa_psw(task)->t = 1; | ||
107 | pa_psw(task)->h = 0; | ||
108 | pa_psw(task)->l = 0; | ||
109 | } | ||
110 | |||
111 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | ||
112 | { | ||
113 | unsigned long tmp; | ||
114 | long ret = -EIO; | ||
37 | 115 | ||
38 | #ifdef CONFIG_64BIT | 116 | switch (request) { |
117 | |||
118 | /* Read the word at location addr in the USER area. For ptraced | ||
119 | processes, the kernel saves all regs on a syscall. */ | ||
120 | case PTRACE_PEEKUSR: | ||
121 | if ((addr & (sizeof(long)-1)) || | ||
122 | (unsigned long) addr >= sizeof(struct pt_regs)) | ||
123 | break; | ||
124 | tmp = *(unsigned long *) ((char *) task_regs(child) + addr); | ||
125 | ret = put_user(tmp, (unsigned long *) data); | ||
126 | break; | ||
127 | |||
128 | /* Write the word at location addr in the USER area. This will need | ||
129 | to change when the kernel no longer saves all regs on a syscall. | ||
130 | FIXME. There is a problem at the moment in that r3-r18 are only | ||
131 | saved if the process is ptraced on syscall entry, and even then | ||
132 | those values are overwritten by actual register values on syscall | ||
133 | exit. */ | ||
134 | case PTRACE_POKEUSR: | ||
135 | /* Some register values written here may be ignored in | ||
136 | * entry.S:syscall_restore_rfi; e.g. iaoq is written with | ||
137 | * r31/r31+4, and not with the values in pt_regs. | ||
138 | */ | ||
139 | if (addr == PT_PSW) { | ||
140 | /* Allow writing to Nullify, Divide-step-correction, | ||
141 | * and carry/borrow bits. | ||
142 | * BEWARE, if you set N, and then single step, it won't | ||
143 | * stop on the nullified instruction. | ||
144 | */ | ||
145 | data &= USER_PSW_BITS; | ||
146 | task_regs(child)->gr[0] &= ~USER_PSW_BITS; | ||
147 | task_regs(child)->gr[0] |= data; | ||
148 | ret = 0; | ||
149 | break; | ||
150 | } | ||
151 | |||
152 | if ((addr & (sizeof(long)-1)) || | ||
153 | (unsigned long) addr >= sizeof(struct pt_regs)) | ||
154 | break; | ||
155 | if ((addr >= PT_GR1 && addr <= PT_GR31) || | ||
156 | addr == PT_IAOQ0 || addr == PT_IAOQ1 || | ||
157 | (addr >= PT_FR0 && addr <= PT_FR31 + 4) || | ||
158 | addr == PT_SAR) { | ||
159 | *(unsigned long *) ((char *) task_regs(child) + addr) = data; | ||
160 | ret = 0; | ||
161 | } | ||
162 | break; | ||
163 | |||
164 | default: | ||
165 | ret = ptrace_request(child, request, addr, data); | ||
166 | break; | ||
167 | } | ||
168 | |||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | |||
173 | #ifdef CONFIG_COMPAT | ||
39 | 174 | ||
40 | /* This function is needed to translate 32 bit pt_regs offsets in to | 175 | /* This function is needed to translate 32 bit pt_regs offsets in to |
41 | * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel | 176 | * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel |
@@ -61,106 +196,25 @@ static long translate_usr_offset(long offset) | |||
61 | else | 196 | else |
62 | return -1; | 197 | return -1; |
63 | } | 198 | } |
64 | #endif | ||
65 | 199 | ||
66 | /* | 200 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
67 | * Called by kernel/ptrace.c when detaching.. | 201 | compat_ulong_t addr, compat_ulong_t data) |
68 | * | ||
69 | * Make sure single step bits etc are not set. | ||
70 | */ | ||
71 | void ptrace_disable(struct task_struct *child) | ||
72 | { | 202 | { |
73 | /* make sure the trap bits are not set */ | 203 | compat_uint_t tmp; |
74 | pa_psw(child)->r = 0; | 204 | long ret = -EIO; |
75 | pa_psw(child)->t = 0; | ||
76 | pa_psw(child)->h = 0; | ||
77 | pa_psw(child)->l = 0; | ||
78 | } | ||
79 | |||
80 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | ||
81 | { | ||
82 | long ret; | ||
83 | #ifdef DEBUG_PTRACE | ||
84 | long oaddr=addr, odata=data; | ||
85 | #endif | ||
86 | 205 | ||
87 | switch (request) { | 206 | switch (request) { |
88 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | ||
89 | case PTRACE_PEEKDATA: { | ||
90 | #ifdef CONFIG_64BIT | ||
91 | if (__is_compat_task(child)) { | ||
92 | int copied; | ||
93 | unsigned int tmp; | ||
94 | |||
95 | addr &= 0xffffffffL; | ||
96 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | ||
97 | ret = -EIO; | ||
98 | if (copied != sizeof(tmp)) | ||
99 | goto out_tsk; | ||
100 | ret = put_user(tmp,(unsigned int *) data); | ||
101 | DBG("sys_ptrace(PEEK%s, %d, %lx, %lx) returning %ld, data %x\n", | ||
102 | request == PTRACE_PEEKTEXT ? "TEXT" : "DATA", | ||
103 | pid, oaddr, odata, ret, tmp); | ||
104 | } | ||
105 | else | ||
106 | #endif | ||
107 | ret = generic_ptrace_peekdata(child, addr, data); | ||
108 | goto out_tsk; | ||
109 | } | ||
110 | 207 | ||
111 | /* when I and D space are separate, this will have to be fixed. */ | 208 | case PTRACE_PEEKUSR: |
112 | case PTRACE_POKETEXT: /* write the word at location addr. */ | 209 | if (addr & (sizeof(compat_uint_t)-1)) |
113 | case PTRACE_POKEDATA: | 210 | break; |
114 | ret = 0; | 211 | addr = translate_usr_offset(addr); |
115 | #ifdef CONFIG_64BIT | 212 | if (addr < 0) |
116 | if (__is_compat_task(child)) { | 213 | break; |
117 | unsigned int tmp = (unsigned int)data; | ||
118 | DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n", | ||
119 | request == PTRACE_POKETEXT ? "TEXT" : "DATA", | ||
120 | pid, oaddr, odata); | ||
121 | addr &= 0xffffffffL; | ||
122 | if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1) == sizeof(tmp)) | ||
123 | goto out_tsk; | ||
124 | } | ||
125 | else | ||
126 | #endif | ||
127 | { | ||
128 | if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) | ||
129 | goto out_tsk; | ||
130 | } | ||
131 | ret = -EIO; | ||
132 | goto out_tsk; | ||
133 | |||
134 | /* Read the word at location addr in the USER area. For ptraced | ||
135 | processes, the kernel saves all regs on a syscall. */ | ||
136 | case PTRACE_PEEKUSR: { | ||
137 | ret = -EIO; | ||
138 | #ifdef CONFIG_64BIT | ||
139 | if (__is_compat_task(child)) { | ||
140 | unsigned int tmp; | ||
141 | |||
142 | if (addr & (sizeof(int)-1)) | ||
143 | goto out_tsk; | ||
144 | if ((addr = translate_usr_offset(addr)) < 0) | ||
145 | goto out_tsk; | ||
146 | |||
147 | tmp = *(unsigned int *) ((char *) task_regs(child) + addr); | ||
148 | ret = put_user(tmp, (unsigned int *) data); | ||
149 | DBG("sys_ptrace(PEEKUSR, %d, %lx, %lx) returning %ld, addr %lx, data %x\n", | ||
150 | pid, oaddr, odata, ret, addr, tmp); | ||
151 | } | ||
152 | else | ||
153 | #endif | ||
154 | { | ||
155 | unsigned long tmp; | ||
156 | 214 | ||
157 | if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs)) | 215 | tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); |
158 | goto out_tsk; | 216 | ret = put_user(tmp, (compat_uint_t *) (unsigned long) data); |
159 | tmp = *(unsigned long *) ((char *) task_regs(child) + addr); | 217 | break; |
160 | ret = put_user(tmp, (unsigned long *) data); | ||
161 | } | ||
162 | goto out_tsk; | ||
163 | } | ||
164 | 218 | ||
165 | /* Write the word at location addr in the USER area. This will need | 219 | /* Write the word at location addr in the USER area. This will need |
166 | to change when the kernel no longer saves all regs on a syscall. | 220 | to change when the kernel no longer saves all regs on a syscall. |
@@ -169,185 +223,46 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
169 | those values are overwritten by actual register values on syscall | 223 | those values are overwritten by actual register values on syscall |
170 | exit. */ | 224 | exit. */ |
171 | case PTRACE_POKEUSR: | 225 | case PTRACE_POKEUSR: |
172 | ret = -EIO; | ||
173 | /* Some register values written here may be ignored in | 226 | /* Some register values written here may be ignored in |
174 | * entry.S:syscall_restore_rfi; e.g. iaoq is written with | 227 | * entry.S:syscall_restore_rfi; e.g. iaoq is written with |
175 | * r31/r31+4, and not with the values in pt_regs. | 228 | * r31/r31+4, and not with the values in pt_regs. |
176 | */ | 229 | */ |
177 | /* PT_PSW=0, so this is valid for 32 bit processes under 64 | ||
178 | * bit kernels. | ||
179 | */ | ||
180 | if (addr == PT_PSW) { | 230 | if (addr == PT_PSW) { |
181 | /* PT_PSW=0, so this is valid for 32 bit processes | 231 | /* Since PT_PSW==0, it is valid for 32 bit processes |
182 | * under 64 bit kernels. | 232 | * under 64 bit kernels as well. |
183 | * | ||
184 | * Allow writing to Nullify, Divide-step-correction, | ||
185 | * and carry/borrow bits. | ||
186 | * BEWARE, if you set N, and then single step, it won't | ||
187 | * stop on the nullified instruction. | ||
188 | */ | 233 | */ |
189 | DBG("sys_ptrace(POKEUSR, %d, %lx, %lx)\n", | 234 | ret = arch_ptrace(child, request, addr, data); |
190 | pid, oaddr, odata); | 235 | } else { |
191 | data &= USER_PSW_BITS; | 236 | if (addr & (sizeof(compat_uint_t)-1)) |
192 | task_regs(child)->gr[0] &= ~USER_PSW_BITS; | 237 | break; |
193 | task_regs(child)->gr[0] |= data; | 238 | addr = translate_usr_offset(addr); |
194 | ret = 0; | 239 | if (addr < 0) |
195 | goto out_tsk; | 240 | break; |
196 | } | ||
197 | #ifdef CONFIG_64BIT | ||
198 | if (__is_compat_task(child)) { | ||
199 | if (addr & (sizeof(int)-1)) | ||
200 | goto out_tsk; | ||
201 | if ((addr = translate_usr_offset(addr)) < 0) | ||
202 | goto out_tsk; | ||
203 | DBG("sys_ptrace(POKEUSR, %d, %lx, %lx) addr %lx\n", | ||
204 | pid, oaddr, odata, addr); | ||
205 | if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { | 241 | if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { |
206 | /* Special case, fp regs are 64 bits anyway */ | 242 | /* Special case, fp regs are 64 bits anyway */ |
207 | *(unsigned int *) ((char *) task_regs(child) + addr) = data; | 243 | *(__u64 *) ((char *) task_regs(child) + addr) = data; |
208 | ret = 0; | 244 | ret = 0; |
209 | } | 245 | } |
210 | else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || | 246 | else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || |
211 | addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || | 247 | addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || |
212 | addr == PT_SAR+4) { | 248 | addr == PT_SAR+4) { |
213 | /* Zero the top 32 bits */ | 249 | /* Zero the top 32 bits */ |
214 | *(unsigned int *) ((char *) task_regs(child) + addr - 4) = 0; | 250 | *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0; |
215 | *(unsigned int *) ((char *) task_regs(child) + addr) = data; | 251 | *(__u32 *) ((char *) task_regs(child) + addr) = data; |
216 | ret = 0; | 252 | ret = 0; |
217 | } | 253 | } |
218 | goto out_tsk; | ||
219 | } | 254 | } |
220 | else | 255 | break; |
221 | #endif | ||
222 | { | ||
223 | if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs)) | ||
224 | goto out_tsk; | ||
225 | if ((addr >= PT_GR1 && addr <= PT_GR31) || | ||
226 | addr == PT_IAOQ0 || addr == PT_IAOQ1 || | ||
227 | (addr >= PT_FR0 && addr <= PT_FR31 + 4) || | ||
228 | addr == PT_SAR) { | ||
229 | *(unsigned long *) ((char *) task_regs(child) + addr) = data; | ||
230 | ret = 0; | ||
231 | } | ||
232 | goto out_tsk; | ||
233 | } | ||
234 | |||
235 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | ||
236 | case PTRACE_CONT: | ||
237 | ret = -EIO; | ||
238 | DBG("sys_ptrace(%s)\n", | ||
239 | request == PTRACE_SYSCALL ? "SYSCALL" : "CONT"); | ||
240 | if (!valid_signal(data)) | ||
241 | goto out_tsk; | ||
242 | child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); | ||
243 | if (request == PTRACE_SYSCALL) { | ||
244 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
245 | } else { | ||
246 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
247 | } | ||
248 | child->exit_code = data; | ||
249 | goto out_wake_notrap; | ||
250 | |||
251 | case PTRACE_KILL: | ||
252 | /* | ||
253 | * make the child exit. Best I can do is send it a | ||
254 | * sigkill. perhaps it should be put in the status | ||
255 | * that it wants to exit. | ||
256 | */ | ||
257 | ret = 0; | ||
258 | DBG("sys_ptrace(KILL)\n"); | ||
259 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
260 | goto out_tsk; | ||
261 | child->exit_code = SIGKILL; | ||
262 | goto out_wake_notrap; | ||
263 | |||
264 | case PTRACE_SINGLEBLOCK: | ||
265 | DBG("sys_ptrace(SINGLEBLOCK)\n"); | ||
266 | ret = -EIO; | ||
267 | if (!valid_signal(data)) | ||
268 | goto out_tsk; | ||
269 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
270 | child->ptrace &= ~PT_SINGLESTEP; | ||
271 | child->ptrace |= PT_BLOCKSTEP; | ||
272 | child->exit_code = data; | ||
273 | |||
274 | /* Enable taken branch trap. */ | ||
275 | pa_psw(child)->r = 0; | ||
276 | pa_psw(child)->t = 1; | ||
277 | pa_psw(child)->h = 0; | ||
278 | pa_psw(child)->l = 0; | ||
279 | goto out_wake; | ||
280 | |||
281 | case PTRACE_SINGLESTEP: | ||
282 | DBG("sys_ptrace(SINGLESTEP)\n"); | ||
283 | ret = -EIO; | ||
284 | if (!valid_signal(data)) | ||
285 | goto out_tsk; | ||
286 | |||
287 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
288 | child->ptrace &= ~PT_BLOCKSTEP; | ||
289 | child->ptrace |= PT_SINGLESTEP; | ||
290 | child->exit_code = data; | ||
291 | |||
292 | if (pa_psw(child)->n) { | ||
293 | struct siginfo si; | ||
294 | |||
295 | /* Nullified, just crank over the queue. */ | ||
296 | task_regs(child)->iaoq[0] = task_regs(child)->iaoq[1]; | ||
297 | task_regs(child)->iasq[0] = task_regs(child)->iasq[1]; | ||
298 | task_regs(child)->iaoq[1] = task_regs(child)->iaoq[0] + 4; | ||
299 | pa_psw(child)->n = 0; | ||
300 | pa_psw(child)->x = 0; | ||
301 | pa_psw(child)->y = 0; | ||
302 | pa_psw(child)->z = 0; | ||
303 | pa_psw(child)->b = 0; | ||
304 | ptrace_disable(child); | ||
305 | /* Don't wake up the child, but let the | ||
306 | parent know something happened. */ | ||
307 | si.si_code = TRAP_TRACE; | ||
308 | si.si_addr = (void __user *) (task_regs(child)->iaoq[0] & ~3); | ||
309 | si.si_signo = SIGTRAP; | ||
310 | si.si_errno = 0; | ||
311 | force_sig_info(SIGTRAP, &si, child); | ||
312 | //notify_parent(child, SIGCHLD); | ||
313 | //ret = 0; | ||
314 | goto out_wake; | ||
315 | } | ||
316 | |||
317 | /* Enable recovery counter traps. The recovery counter | ||
318 | * itself will be set to zero on a task switch. If the | ||
319 | * task is suspended on a syscall then the syscall return | ||
320 | * path will overwrite the recovery counter with a suitable | ||
321 | * value such that it traps once back in user space. We | ||
322 | * disable interrupts in the childs PSW here also, to avoid | ||
323 | * interrupts while the recovery counter is decrementing. | ||
324 | */ | ||
325 | pa_psw(child)->r = 1; | ||
326 | pa_psw(child)->t = 0; | ||
327 | pa_psw(child)->h = 0; | ||
328 | pa_psw(child)->l = 0; | ||
329 | /* give it a chance to run. */ | ||
330 | goto out_wake; | ||
331 | |||
332 | case PTRACE_GETEVENTMSG: | ||
333 | ret = put_user(child->ptrace_message, (unsigned int __user *) data); | ||
334 | goto out_tsk; | ||
335 | 256 | ||
336 | default: | 257 | default: |
337 | ret = ptrace_request(child, request, addr, data); | 258 | ret = compat_ptrace_request(child, request, addr, data); |
338 | goto out_tsk; | 259 | break; |
339 | } | 260 | } |
340 | 261 | ||
341 | out_wake_notrap: | ||
342 | ptrace_disable(child); | ||
343 | out_wake: | ||
344 | wake_up_process(child); | ||
345 | ret = 0; | ||
346 | out_tsk: | ||
347 | DBG("arch_ptrace(%ld, %d, %lx, %lx) returning %ld\n", | ||
348 | request, pid, oaddr, odata, ret); | ||
349 | return ret; | 262 | return ret; |
350 | } | 263 | } |
264 | #endif | ||
265 | |||
351 | 266 | ||
352 | void syscall_trace(void) | 267 | void syscall_trace(void) |
353 | { | 268 | { |
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 7a92695d95a6..5f3d3a1f9037 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S | |||
@@ -8,12 +8,24 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <asm/pdc.h> | ||
11 | #include <asm/psw.h> | 12 | #include <asm/psw.h> |
12 | #include <asm/assembly.h> | 13 | #include <asm/assembly.h> |
14 | #include <asm/asm-offsets.h> | ||
13 | 15 | ||
14 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
15 | 17 | ||
18 | |||
16 | .section .bss | 19 | .section .bss |
20 | |||
21 | .export pdc_result | ||
22 | .export pdc_result2 | ||
23 | .align 8 | ||
24 | pdc_result: | ||
25 | .block ASM_PDC_RESULT_SIZE | ||
26 | pdc_result2: | ||
27 | .block ASM_PDC_RESULT_SIZE | ||
28 | |||
17 | .export real_stack | 29 | .export real_stack |
18 | .export real32_stack | 30 | .export real32_stack |
19 | .export real64_stack | 31 | .export real64_stack |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 39e7c5a5946a..7d27853ff8c8 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/pdc_chassis.h> | 44 | #include <asm/pdc_chassis.h> |
45 | #include <asm/io.h> | 45 | #include <asm/io.h> |
46 | #include <asm/setup.h> | 46 | #include <asm/setup.h> |
47 | #include <asm/unwind.h> | ||
47 | 48 | ||
48 | static char __initdata command_line[COMMAND_LINE_SIZE]; | 49 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
49 | 50 | ||
@@ -123,6 +124,7 @@ void __init setup_arch(char **cmdline_p) | |||
123 | #ifdef CONFIG_64BIT | 124 | #ifdef CONFIG_64BIT |
124 | extern int parisc_narrow_firmware; | 125 | extern int parisc_narrow_firmware; |
125 | #endif | 126 | #endif |
127 | unwind_init(); | ||
126 | 128 | ||
127 | init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */ | 129 | init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */ |
128 | 130 | ||
@@ -368,6 +370,31 @@ static int __init parisc_init(void) | |||
368 | 370 | ||
369 | return 0; | 371 | return 0; |
370 | } | 372 | } |
371 | |||
372 | arch_initcall(parisc_init); | 373 | arch_initcall(parisc_init); |
373 | 374 | ||
375 | void start_parisc(void) | ||
376 | { | ||
377 | extern void start_kernel(void); | ||
378 | |||
379 | int ret, cpunum; | ||
380 | struct pdc_coproc_cfg coproc_cfg; | ||
381 | |||
382 | cpunum = smp_processor_id(); | ||
383 | |||
384 | set_firmware_width_unlocked(); | ||
385 | |||
386 | ret = pdc_coproc_cfg_unlocked(&coproc_cfg); | ||
387 | if (ret >= 0 && coproc_cfg.ccr_functional) { | ||
388 | mtctl(coproc_cfg.ccr_functional, 10); | ||
389 | |||
390 | cpu_data[cpunum].fp_rev = coproc_cfg.revision; | ||
391 | cpu_data[cpunum].fp_model = coproc_cfg.model; | ||
392 | |||
393 | asm volatile ("fstd %fr0,8(%sp)"); | ||
394 | } else { | ||
395 | panic("must have an fpu to boot linux"); | ||
396 | } | ||
397 | |||
398 | start_kernel(); | ||
399 | // not reached | ||
400 | } | ||
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index c7e59f548817..303d2b647e41 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -87,7 +87,7 @@ | |||
87 | ENTRY_SAME(setuid) | 87 | ENTRY_SAME(setuid) |
88 | ENTRY_SAME(getuid) | 88 | ENTRY_SAME(getuid) |
89 | ENTRY_COMP(stime) /* 25 */ | 89 | ENTRY_COMP(stime) /* 25 */ |
90 | ENTRY_SAME(ptrace) | 90 | ENTRY_COMP(ptrace) |
91 | ENTRY_SAME(alarm) | 91 | ENTRY_SAME(alarm) |
92 | /* see stat comment */ | 92 | /* see stat comment */ |
93 | ENTRY_COMP(newfstat) | 93 | ENTRY_COMP(newfstat) |
@@ -407,6 +407,12 @@ | |||
407 | ENTRY_SAME(timerfd_create) | 407 | ENTRY_SAME(timerfd_create) |
408 | ENTRY_COMP(timerfd_settime) | 408 | ENTRY_COMP(timerfd_settime) |
409 | ENTRY_COMP(timerfd_gettime) | 409 | ENTRY_COMP(timerfd_gettime) |
410 | ENTRY_COMP(signalfd4) | ||
411 | ENTRY_SAME(eventfd2) /* 310 */ | ||
412 | ENTRY_SAME(epoll_create1) | ||
413 | ENTRY_SAME(dup3) | ||
414 | ENTRY_SAME(pipe2) | ||
415 | ENTRY_SAME(inotify_init1) | ||
410 | 416 | ||
411 | /* Nothing yet */ | 417 | /* Nothing yet */ |
412 | 418 | ||
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 24be86bba94d..4d09203bc693 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
24 | #include <linux/profile.h> | 24 | #include <linux/profile.h> |
25 | #include <linux/clocksource.h> | 25 | #include <linux/clocksource.h> |
26 | #include <linux/platform_device.h> | ||
26 | 27 | ||
27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
28 | #include <asm/io.h> | 29 | #include <asm/io.h> |
@@ -215,6 +216,24 @@ void __init start_cpu_itimer(void) | |||
215 | cpu_data[cpu].it_value = next_tick; | 216 | cpu_data[cpu].it_value = next_tick; |
216 | } | 217 | } |
217 | 218 | ||
219 | struct platform_device rtc_parisc_dev = { | ||
220 | .name = "rtc-parisc", | ||
221 | .id = -1, | ||
222 | }; | ||
223 | |||
224 | static int __init rtc_init(void) | ||
225 | { | ||
226 | int ret; | ||
227 | |||
228 | ret = platform_device_register(&rtc_parisc_dev); | ||
229 | if (ret < 0) | ||
230 | printk(KERN_ERR "unable to register rtc device...\n"); | ||
231 | |||
232 | /* not necessarily an error */ | ||
233 | return 0; | ||
234 | } | ||
235 | module_init(rtc_init); | ||
236 | |||
218 | void __init time_init(void) | 237 | void __init time_init(void) |
219 | { | 238 | { |
220 | static struct pdc_tod tod_data; | 239 | static struct pdc_tod tod_data; |
@@ -245,4 +264,3 @@ void __init time_init(void) | |||
245 | xtime.tv_nsec = 0; | 264 | xtime.tv_nsec = 0; |
246 | } | 265 | } |
247 | } | 266 | } |
248 | |||
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 701b2d2d8882..6773c582e457 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -170,7 +170,7 @@ void unwind_table_remove(struct unwind_table *table) | |||
170 | } | 170 | } |
171 | 171 | ||
172 | /* Called from setup_arch to import the kernel unwind info */ | 172 | /* Called from setup_arch to import the kernel unwind info */ |
173 | static int unwind_init(void) | 173 | int unwind_init(void) |
174 | { | 174 | { |
175 | long start, stop; | 175 | long start, stop; |
176 | register unsigned long gp __asm__ ("r27"); | 176 | register unsigned long gp __asm__ ("r27"); |
@@ -417,5 +417,3 @@ int unwind_to_user(struct unwind_frame_info *info) | |||
417 | 417 | ||
418 | return ret; | 418 | return ret; |
419 | } | 419 | } |
420 | |||
421 | module_init(unwind_init); | ||
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 64e144505f65..5ac51e6efc1d 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -10,9 +10,13 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | ||
14 | #include <linux/types.h> | ||
15 | #else | ||
16 | #include <asm/types.h> | ||
17 | #endif | ||
13 | #include <asm/asm-compat.h> | 18 | #include <asm/asm-compat.h> |
14 | #include <asm/kdump.h> | 19 | #include <asm/kdump.h> |
15 | #include <asm/types.h> | ||
16 | 20 | ||
17 | /* | 21 | /* |
18 | * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software | 22 | * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software |
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index ae2ea803a0f2..9047af7baa69 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h | |||
@@ -74,6 +74,13 @@ struct pci_controller { | |||
74 | unsigned long pci_io_size; | 74 | unsigned long pci_io_size; |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | /* Some machines have a special region to forward the ISA | ||
78 | * "memory" cycles such as VGA memory regions. Left to 0 | ||
79 | * if unsupported | ||
80 | */ | ||
81 | resource_size_t isa_mem_phys; | ||
82 | resource_size_t isa_mem_size; | ||
83 | |||
77 | struct pci_ops *ops; | 84 | struct pci_ops *ops; |
78 | unsigned int __iomem *cfg_addr; | 85 | unsigned int __iomem *cfg_addr; |
79 | void __iomem *cfg_data; | 86 | void __iomem *cfg_data; |
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 0e52c7828ea4..39d547fde956 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -123,6 +123,16 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, | |||
123 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ | 123 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ |
124 | #define HAVE_PCI_MMAP 1 | 124 | #define HAVE_PCI_MMAP 1 |
125 | 125 | ||
126 | extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, | ||
127 | size_t count); | ||
128 | extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, | ||
129 | size_t count); | ||
130 | extern int pci_mmap_legacy_page_range(struct pci_bus *bus, | ||
131 | struct vm_area_struct *vma, | ||
132 | enum pci_mmap_state mmap_state); | ||
133 | |||
134 | #define HAVE_PCI_LEGACY 1 | ||
135 | |||
126 | #if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE) | 136 | #if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE) |
127 | /* | 137 | /* |
128 | * For 64-bit kernels, pci_unmap_{single,page} is not a nop. | 138 | * For 64-bit kernels, pci_unmap_{single,page} is not a nop. |
@@ -226,5 +236,6 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
226 | extern void pcibios_do_bus_setup(struct pci_bus *bus); | 236 | extern void pcibios_do_bus_setup(struct pci_bus *bus); |
227 | extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus); | 237 | extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus); |
228 | 238 | ||
239 | |||
229 | #endif /* __KERNEL__ */ | 240 | #endif /* __KERNEL__ */ |
230 | #endif /* __ASM_POWERPC_PCI_H */ | 241 | #endif /* __ASM_POWERPC_PCI_H */ |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 734e0754fb9b..280a90cc9894 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
@@ -129,7 +129,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, | |||
129 | #define CHECK_FULL_REGS(regs) \ | 129 | #define CHECK_FULL_REGS(regs) \ |
130 | do { \ | 130 | do { \ |
131 | if ((regs)->trap & 1) \ | 131 | if ((regs)->trap & 1) \ |
132 | printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \ | 132 | printk(KERN_CRIT "%s: partial register set\n", __func__); \ |
133 | } while (0) | 133 | } while (0) |
134 | #endif /* __powerpc64__ */ | 134 | #endif /* __powerpc64__ */ |
135 | 135 | ||
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 01ce8c38bae6..3815d84a1ef4 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -451,7 +451,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, | |||
451 | pci_dev_put(pdev); | 451 | pci_dev_put(pdev); |
452 | } | 452 | } |
453 | 453 | ||
454 | DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); | 454 | DBG("non-PCI map for %llx, prot: %lx\n", |
455 | (unsigned long long)offset, prot); | ||
455 | 456 | ||
456 | return __pgprot(prot); | 457 | return __pgprot(prot); |
457 | } | 458 | } |
@@ -490,6 +491,131 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
490 | return ret; | 491 | return ret; |
491 | } | 492 | } |
492 | 493 | ||
494 | /* This provides legacy IO read access on a bus */ | ||
495 | int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) | ||
496 | { | ||
497 | unsigned long offset; | ||
498 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
499 | struct resource *rp = &hose->io_resource; | ||
500 | void __iomem *addr; | ||
501 | |||
502 | /* Check if port can be supported by that bus. We only check | ||
503 | * the ranges of the PHB though, not the bus itself as the rules | ||
504 | * for forwarding legacy cycles down bridges are not our problem | ||
505 | * here. So if the host bridge supports it, we do it. | ||
506 | */ | ||
507 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
508 | offset += port; | ||
509 | |||
510 | if (!(rp->flags & IORESOURCE_IO)) | ||
511 | return -ENXIO; | ||
512 | if (offset < rp->start || (offset + size) > rp->end) | ||
513 | return -ENXIO; | ||
514 | addr = hose->io_base_virt + port; | ||
515 | |||
516 | switch(size) { | ||
517 | case 1: | ||
518 | *((u8 *)val) = in_8(addr); | ||
519 | return 1; | ||
520 | case 2: | ||
521 | if (port & 1) | ||
522 | return -EINVAL; | ||
523 | *((u16 *)val) = in_le16(addr); | ||
524 | return 2; | ||
525 | case 4: | ||
526 | if (port & 3) | ||
527 | return -EINVAL; | ||
528 | *((u32 *)val) = in_le32(addr); | ||
529 | return 4; | ||
530 | } | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | |||
534 | /* This provides legacy IO write access on a bus */ | ||
535 | int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) | ||
536 | { | ||
537 | unsigned long offset; | ||
538 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
539 | struct resource *rp = &hose->io_resource; | ||
540 | void __iomem *addr; | ||
541 | |||
542 | /* Check if port can be supported by that bus. We only check | ||
543 | * the ranges of the PHB though, not the bus itself as the rules | ||
544 | * for forwarding legacy cycles down bridges are not our problem | ||
545 | * here. So if the host bridge supports it, we do it. | ||
546 | */ | ||
547 | offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
548 | offset += port; | ||
549 | |||
550 | if (!(rp->flags & IORESOURCE_IO)) | ||
551 | return -ENXIO; | ||
552 | if (offset < rp->start || (offset + size) > rp->end) | ||
553 | return -ENXIO; | ||
554 | addr = hose->io_base_virt + port; | ||
555 | |||
556 | /* WARNING: The generic code is idiotic. It gets passed a pointer | ||
557 | * to what can be a 1, 2 or 4 byte quantity and always reads that | ||
558 | * as a u32, which means that we have to correct the location of | ||
559 | * the data read within those 32 bits for size 1 and 2 | ||
560 | */ | ||
561 | switch(size) { | ||
562 | case 1: | ||
563 | out_8(addr, val >> 24); | ||
564 | return 1; | ||
565 | case 2: | ||
566 | if (port & 1) | ||
567 | return -EINVAL; | ||
568 | out_le16(addr, val >> 16); | ||
569 | return 2; | ||
570 | case 4: | ||
571 | if (port & 3) | ||
572 | return -EINVAL; | ||
573 | out_le32(addr, val); | ||
574 | return 4; | ||
575 | } | ||
576 | return -EINVAL; | ||
577 | } | ||
578 | |||
579 | /* This provides legacy IO or memory mmap access on a bus */ | ||
580 | int pci_mmap_legacy_page_range(struct pci_bus *bus, | ||
581 | struct vm_area_struct *vma, | ||
582 | enum pci_mmap_state mmap_state) | ||
583 | { | ||
584 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
585 | resource_size_t offset = | ||
586 | ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; | ||
587 | resource_size_t size = vma->vm_end - vma->vm_start; | ||
588 | struct resource *rp; | ||
589 | |||
590 | pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", | ||
591 | pci_domain_nr(bus), bus->number, | ||
592 | mmap_state == pci_mmap_mem ? "MEM" : "IO", | ||
593 | (unsigned long long)offset, | ||
594 | (unsigned long long)(offset + size - 1)); | ||
595 | |||
596 | if (mmap_state == pci_mmap_mem) { | ||
597 | if ((offset + size) > hose->isa_mem_size) | ||
598 | return -ENXIO; | ||
599 | offset += hose->isa_mem_phys; | ||
600 | } else { | ||
601 | unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; | ||
602 | unsigned long roffset = offset + io_offset; | ||
603 | rp = &hose->io_resource; | ||
604 | if (!(rp->flags & IORESOURCE_IO)) | ||
605 | return -ENXIO; | ||
606 | if (roffset < rp->start || (roffset + size) > rp->end) | ||
607 | return -ENXIO; | ||
608 | offset += hose->io_base_phys; | ||
609 | } | ||
610 | pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); | ||
611 | |||
612 | vma->vm_pgoff = offset >> PAGE_SHIFT; | ||
613 | vma->vm_page_prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | ||
614 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
615 | vma->vm_end - vma->vm_start, | ||
616 | vma->vm_page_prot); | ||
617 | } | ||
618 | |||
493 | void pci_resource_to_user(const struct pci_dev *dev, int bar, | 619 | void pci_resource_to_user(const struct pci_dev *dev, int bar, |
494 | const struct resource *rsrc, | 620 | const struct resource *rsrc, |
495 | resource_size_t *start, resource_size_t *end) | 621 | resource_size_t *start, resource_size_t *end) |
@@ -592,6 +718,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, | |||
592 | cpu_addr = of_translate_address(dev, ranges + 3); | 718 | cpu_addr = of_translate_address(dev, ranges + 3); |
593 | size = of_read_number(ranges + pna + 3, 2); | 719 | size = of_read_number(ranges + pna + 3, 2); |
594 | ranges += np; | 720 | ranges += np; |
721 | |||
722 | /* If we failed translation or got a zero-sized region | ||
723 | * (some FW try to feed us with non sensical zero sized regions | ||
724 | * such as power3 which look like some kind of attempt at exposing | ||
725 | * the VGA memory hole) | ||
726 | */ | ||
595 | if (cpu_addr == OF_BAD_ADDR || size == 0) | 727 | if (cpu_addr == OF_BAD_ADDR || size == 0) |
596 | continue; | 728 | continue; |
597 | 729 | ||
@@ -665,6 +797,8 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, | |||
665 | isa_hole = memno; | 797 | isa_hole = memno; |
666 | if (primary || isa_mem_base == 0) | 798 | if (primary || isa_mem_base == 0) |
667 | isa_mem_base = cpu_addr; | 799 | isa_mem_base = cpu_addr; |
800 | hose->isa_mem_phys = cpu_addr; | ||
801 | hose->isa_mem_size = size; | ||
668 | } | 802 | } |
669 | 803 | ||
670 | /* We get the PCI/Mem offset from the first range or | 804 | /* We get the PCI/Mem offset from the first range or |
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 92d20e993ede..2ece399f2862 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c | |||
@@ -232,6 +232,7 @@ static void __exit sputrace_exit(void) | |||
232 | 232 | ||
233 | remove_proc_entry("sputrace", NULL); | 233 | remove_proc_entry("sputrace", NULL); |
234 | kfree(sputrace_log); | 234 | kfree(sputrace_log); |
235 | marker_synchronize_unregister(); | ||
235 | } | 236 | } |
236 | 237 | ||
237 | module_init(sputrace_init); | 238 | module_init(sputrace_init); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 49349ba77d80..5b9b12321ad1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -26,6 +26,7 @@ config X86 | |||
26 | select HAVE_KPROBES | 26 | select HAVE_KPROBES |
27 | select ARCH_WANT_OPTIONAL_GPIOLIB | 27 | select ARCH_WANT_OPTIONAL_GPIOLIB |
28 | select HAVE_KRETPROBES | 28 | select HAVE_KRETPROBES |
29 | select HAVE_FTRACE_MCOUNT_RECORD | ||
29 | select HAVE_DYNAMIC_FTRACE | 30 | select HAVE_DYNAMIC_FTRACE |
30 | select HAVE_FTRACE | 31 | select HAVE_FTRACE |
31 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 32 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
@@ -1242,14 +1243,6 @@ config EFI | |||
1242 | resultant kernel should continue to boot on existing non-EFI | 1243 | resultant kernel should continue to boot on existing non-EFI |
1243 | platforms. | 1244 | platforms. |
1244 | 1245 | ||
1245 | config IRQBALANCE | ||
1246 | def_bool y | ||
1247 | prompt "Enable kernel irq balancing" | ||
1248 | depends on X86_32 && SMP && X86_IO_APIC | ||
1249 | help | ||
1250 | The default yes will allow the kernel to do irq load balancing. | ||
1251 | Saying no will keep the kernel from doing irq load balancing. | ||
1252 | |||
1253 | config SECCOMP | 1246 | config SECCOMP |
1254 | def_bool y | 1247 | def_bool y |
1255 | prompt "Enable seccomp to safely compute untrusted bytecode" | 1248 | prompt "Enable seccomp to safely compute untrusted bytecode" |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 52d0359719d7..13b8c86ae985 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -287,7 +287,6 @@ CONFIG_MTRR=y | |||
287 | # CONFIG_MTRR_SANITIZER is not set | 287 | # CONFIG_MTRR_SANITIZER is not set |
288 | CONFIG_X86_PAT=y | 288 | CONFIG_X86_PAT=y |
289 | CONFIG_EFI=y | 289 | CONFIG_EFI=y |
290 | # CONFIG_IRQBALANCE is not set | ||
291 | CONFIG_SECCOMP=y | 290 | CONFIG_SECCOMP=y |
292 | # CONFIG_HZ_100 is not set | 291 | # CONFIG_HZ_100 is not set |
293 | # CONFIG_HZ_250 is not set | 292 | # CONFIG_HZ_250 is not set |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0d41f0343dc0..d7e5a58ee22f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -23,7 +23,7 @@ CFLAGS_hpet.o := $(nostackp) | |||
23 | CFLAGS_tsc.o := $(nostackp) | 23 | CFLAGS_tsc.o := $(nostackp) |
24 | 24 | ||
25 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o | 25 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o |
26 | obj-y += traps.o irq_$(BITS).o dumpstack_$(BITS).o | 26 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
27 | obj-y += time_$(BITS).o ioport.o ldt.o | 27 | obj-y += time_$(BITS).o ioport.o ldt.o |
28 | obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o | 28 | obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o |
29 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o | 29 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o |
@@ -60,8 +60,8 @@ obj-$(CONFIG_X86_32_SMP) += smpcommon.o | |||
60 | obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o | 60 | obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o |
61 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o | 61 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o |
62 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o | 62 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o |
63 | obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o | 63 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o |
64 | obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o | 64 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
65 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 65 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
66 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 66 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
67 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 67 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
@@ -108,7 +108,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o | |||
108 | # 64 bit specific files | 108 | # 64 bit specific files |
109 | ifeq ($(CONFIG_X86_64),y) | 109 | ifeq ($(CONFIG_X86_64),y) |
110 | obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o | 110 | obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o |
111 | obj-y += bios_uv.o | 111 | obj-y += bios_uv.o uv_irq.o uv_sysfs.o |
112 | obj-y += genx2apic_cluster.o | 112 | obj-y += genx2apic_cluster.o |
113 | obj-y += genx2apic_phys.o | 113 | obj-y += genx2apic_phys.o |
114 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o | 114 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index eb875cdc7367..0d1c26a583c5 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1256,7 +1256,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
1256 | 1256 | ||
1257 | count = | 1257 | count = |
1258 | acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, | 1258 | acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, |
1259 | NR_IRQ_VECTORS); | 1259 | nr_irqs); |
1260 | if (count < 0) { | 1260 | if (count < 0) { |
1261 | printk(KERN_ERR PREFIX | 1261 | printk(KERN_ERR PREFIX |
1262 | "Error parsing interrupt source overrides entry\n"); | 1262 | "Error parsing interrupt source overrides entry\n"); |
@@ -1276,7 +1276,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
1276 | 1276 | ||
1277 | count = | 1277 | count = |
1278 | acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, | 1278 | acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, |
1279 | NR_IRQ_VECTORS); | 1279 | nr_irqs); |
1280 | if (count < 0) { | 1280 | if (count < 0) { |
1281 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); | 1281 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); |
1282 | /* TBD: Cleanup to allow fallback to MPS */ | 1282 | /* TBD: Cleanup to allow fallback to MPS */ |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 426e5d91b63a..c44cd6dbfa14 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/dmi.h> | 10 | #include <linux/dmi.h> |
11 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
12 | #include <asm/segment.h> | 12 | #include <asm/segment.h> |
13 | #include <asm/desc.h> | ||
13 | 14 | ||
14 | #include "realmode/wakeup.h" | 15 | #include "realmode/wakeup.h" |
15 | #include "sleep.h" | 16 | #include "sleep.h" |
@@ -98,6 +99,8 @@ int acpi_save_state_mem(void) | |||
98 | header->trampoline_segment = setup_trampoline() >> 4; | 99 | header->trampoline_segment = setup_trampoline() >> 4; |
99 | #ifdef CONFIG_SMP | 100 | #ifdef CONFIG_SMP |
100 | stack_start.sp = temp_stack + 4096; | 101 | stack_start.sp = temp_stack + 4096; |
102 | early_gdt_descr.address = | ||
103 | (unsigned long)get_cpu_gdt_table(smp_processor_id()); | ||
101 | #endif | 104 | #endif |
102 | initial_code = (unsigned long)wakeup_long64; | 105 | initial_code = (unsigned long)wakeup_long64; |
103 | saved_magic = 0x123456789abcdef0; | 106 | saved_magic = 0x123456789abcdef0; |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic.c index 21c831d96af3..04a7f960bbc0 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic.c | |||
@@ -23,11 +23,13 @@ | |||
23 | #include <linux/mc146818rtc.h> | 23 | #include <linux/mc146818rtc.h> |
24 | #include <linux/kernel_stat.h> | 24 | #include <linux/kernel_stat.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/sysdev.h> |
26 | #include <linux/ioport.h> | ||
26 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
27 | #include <linux/clockchips.h> | 28 | #include <linux/clockchips.h> |
28 | #include <linux/acpi_pmtmr.h> | 29 | #include <linux/acpi_pmtmr.h> |
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/dmi.h> | 31 | #include <linux/dmi.h> |
32 | #include <linux/dmar.h> | ||
31 | 33 | ||
32 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
33 | #include <asm/smp.h> | 35 | #include <asm/smp.h> |
@@ -36,8 +38,14 @@ | |||
36 | #include <asm/desc.h> | 38 | #include <asm/desc.h> |
37 | #include <asm/arch_hooks.h> | 39 | #include <asm/arch_hooks.h> |
38 | #include <asm/hpet.h> | 40 | #include <asm/hpet.h> |
41 | #include <asm/pgalloc.h> | ||
39 | #include <asm/i8253.h> | 42 | #include <asm/i8253.h> |
40 | #include <asm/nmi.h> | 43 | #include <asm/nmi.h> |
44 | #include <asm/idle.h> | ||
45 | #include <asm/proto.h> | ||
46 | #include <asm/timex.h> | ||
47 | #include <asm/apic.h> | ||
48 | #include <asm/i8259.h> | ||
41 | 49 | ||
42 | #include <mach_apic.h> | 50 | #include <mach_apic.h> |
43 | #include <mach_apicdef.h> | 51 | #include <mach_apicdef.h> |
@@ -50,16 +58,58 @@ | |||
50 | # error SPURIOUS_APIC_VECTOR definition error | 58 | # error SPURIOUS_APIC_VECTOR definition error |
51 | #endif | 59 | #endif |
52 | 60 | ||
53 | unsigned long mp_lapic_addr; | 61 | #ifdef CONFIG_X86_32 |
54 | |||
55 | /* | 62 | /* |
56 | * Knob to control our willingness to enable the local APIC. | 63 | * Knob to control our willingness to enable the local APIC. |
57 | * | 64 | * |
58 | * +1=force-enable | 65 | * +1=force-enable |
59 | */ | 66 | */ |
60 | static int force_enable_local_apic; | 67 | static int force_enable_local_apic; |
61 | int disable_apic; | 68 | /* |
69 | * APIC command line parameters | ||
70 | */ | ||
71 | static int __init parse_lapic(char *arg) | ||
72 | { | ||
73 | force_enable_local_apic = 1; | ||
74 | return 0; | ||
75 | } | ||
76 | early_param("lapic", parse_lapic); | ||
77 | /* Local APIC was disabled by the BIOS and enabled by the kernel */ | ||
78 | static int enabled_via_apicbase; | ||
79 | |||
80 | #endif | ||
81 | |||
82 | #ifdef CONFIG_X86_64 | ||
83 | static int apic_calibrate_pmtmr __initdata; | ||
84 | static __init int setup_apicpmtimer(char *s) | ||
85 | { | ||
86 | apic_calibrate_pmtmr = 1; | ||
87 | notsc_setup(NULL); | ||
88 | return 0; | ||
89 | } | ||
90 | __setup("apicpmtimer", setup_apicpmtimer); | ||
91 | #endif | ||
92 | |||
93 | #ifdef CONFIG_X86_64 | ||
94 | #define HAVE_X2APIC | ||
95 | #endif | ||
96 | |||
97 | #ifdef HAVE_X2APIC | ||
98 | int x2apic; | ||
99 | /* x2apic enabled before OS handover */ | ||
100 | int x2apic_preenabled; | ||
101 | int disable_x2apic; | ||
102 | static __init int setup_nox2apic(char *str) | ||
103 | { | ||
104 | disable_x2apic = 1; | ||
105 | setup_clear_cpu_cap(X86_FEATURE_X2APIC); | ||
106 | return 0; | ||
107 | } | ||
108 | early_param("nox2apic", setup_nox2apic); | ||
109 | #endif | ||
62 | 110 | ||
111 | unsigned long mp_lapic_addr; | ||
112 | int disable_apic; | ||
63 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ | 113 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ |
64 | static int disable_apic_timer __cpuinitdata; | 114 | static int disable_apic_timer __cpuinitdata; |
65 | /* Local APIC timer works in C2 */ | 115 | /* Local APIC timer works in C2 */ |
@@ -110,9 +160,6 @@ static struct clock_event_device lapic_clockevent = { | |||
110 | }; | 160 | }; |
111 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | 161 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); |
112 | 162 | ||
113 | /* Local APIC was disabled by the BIOS and enabled by the kernel */ | ||
114 | static int enabled_via_apicbase; | ||
115 | |||
116 | static unsigned long apic_phys; | 163 | static unsigned long apic_phys; |
117 | 164 | ||
118 | /* | 165 | /* |
@@ -202,6 +249,42 @@ static struct apic_ops xapic_ops = { | |||
202 | struct apic_ops __read_mostly *apic_ops = &xapic_ops; | 249 | struct apic_ops __read_mostly *apic_ops = &xapic_ops; |
203 | EXPORT_SYMBOL_GPL(apic_ops); | 250 | EXPORT_SYMBOL_GPL(apic_ops); |
204 | 251 | ||
252 | #ifdef HAVE_X2APIC | ||
253 | static void x2apic_wait_icr_idle(void) | ||
254 | { | ||
255 | /* no need to wait for icr idle in x2apic */ | ||
256 | return; | ||
257 | } | ||
258 | |||
259 | static u32 safe_x2apic_wait_icr_idle(void) | ||
260 | { | ||
261 | /* no need to wait for icr idle in x2apic */ | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | void x2apic_icr_write(u32 low, u32 id) | ||
266 | { | ||
267 | wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); | ||
268 | } | ||
269 | |||
270 | u64 x2apic_icr_read(void) | ||
271 | { | ||
272 | unsigned long val; | ||
273 | |||
274 | rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); | ||
275 | return val; | ||
276 | } | ||
277 | |||
278 | static struct apic_ops x2apic_ops = { | ||
279 | .read = native_apic_msr_read, | ||
280 | .write = native_apic_msr_write, | ||
281 | .icr_read = x2apic_icr_read, | ||
282 | .icr_write = x2apic_icr_write, | ||
283 | .wait_icr_idle = x2apic_wait_icr_idle, | ||
284 | .safe_wait_icr_idle = safe_x2apic_wait_icr_idle, | ||
285 | }; | ||
286 | #endif | ||
287 | |||
205 | /** | 288 | /** |
206 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | 289 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 |
207 | */ | 290 | */ |
@@ -219,6 +302,7 @@ void __cpuinit enable_NMI_through_LVT0(void) | |||
219 | apic_write(APIC_LVT0, v); | 302 | apic_write(APIC_LVT0, v); |
220 | } | 303 | } |
221 | 304 | ||
305 | #ifdef CONFIG_X86_32 | ||
222 | /** | 306 | /** |
223 | * get_physical_broadcast - Get number of physical broadcast IDs | 307 | * get_physical_broadcast - Get number of physical broadcast IDs |
224 | */ | 308 | */ |
@@ -226,6 +310,7 @@ int get_physical_broadcast(void) | |||
226 | { | 310 | { |
227 | return modern_apic() ? 0xff : 0xf; | 311 | return modern_apic() ? 0xff : 0xf; |
228 | } | 312 | } |
313 | #endif | ||
229 | 314 | ||
230 | /** | 315 | /** |
231 | * lapic_get_maxlvt - get the maximum number of local vector table entries | 316 | * lapic_get_maxlvt - get the maximum number of local vector table entries |
@@ -247,11 +332,7 @@ int lapic_get_maxlvt(void) | |||
247 | */ | 332 | */ |
248 | 333 | ||
249 | /* Clock divisor */ | 334 | /* Clock divisor */ |
250 | #ifdef CONFG_X86_64 | ||
251 | #define APIC_DIVISOR 1 | ||
252 | #else | ||
253 | #define APIC_DIVISOR 16 | 335 | #define APIC_DIVISOR 16 |
254 | #endif | ||
255 | 336 | ||
256 | /* | 337 | /* |
257 | * This function sets up the local APIC timer, with a timeout of | 338 | * This function sets up the local APIC timer, with a timeout of |
@@ -383,7 +464,7 @@ static void lapic_timer_broadcast(cpumask_t mask) | |||
383 | * Setup the local APIC timer for this CPU. Copy the initilized values | 464 | * Setup the local APIC timer for this CPU. Copy the initilized values |
384 | * of the boot CPU and register the clock event in the framework. | 465 | * of the boot CPU and register the clock event in the framework. |
385 | */ | 466 | */ |
386 | static void __devinit setup_APIC_timer(void) | 467 | static void __cpuinit setup_APIC_timer(void) |
387 | { | 468 | { |
388 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 469 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
389 | 470 | ||
@@ -453,14 +534,51 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) | |||
453 | } | 534 | } |
454 | } | 535 | } |
455 | 536 | ||
537 | static int __init calibrate_by_pmtimer(long deltapm, long *delta) | ||
538 | { | ||
539 | const long pm_100ms = PMTMR_TICKS_PER_SEC / 10; | ||
540 | const long pm_thresh = pm_100ms / 100; | ||
541 | unsigned long mult; | ||
542 | u64 res; | ||
543 | |||
544 | #ifndef CONFIG_X86_PM_TIMER | ||
545 | return -1; | ||
546 | #endif | ||
547 | |||
548 | apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm); | ||
549 | |||
550 | /* Check, if the PM timer is available */ | ||
551 | if (!deltapm) | ||
552 | return -1; | ||
553 | |||
554 | mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22); | ||
555 | |||
556 | if (deltapm > (pm_100ms - pm_thresh) && | ||
557 | deltapm < (pm_100ms + pm_thresh)) { | ||
558 | apic_printk(APIC_VERBOSE, "... PM timer result ok\n"); | ||
559 | } else { | ||
560 | res = (((u64)deltapm) * mult) >> 22; | ||
561 | do_div(res, 1000000); | ||
562 | printk(KERN_WARNING "APIC calibration not consistent " | ||
563 | "with PM Timer: %ldms instead of 100ms\n", | ||
564 | (long)res); | ||
565 | /* Correct the lapic counter value */ | ||
566 | res = (((u64)(*delta)) * pm_100ms); | ||
567 | do_div(res, deltapm); | ||
568 | printk(KERN_INFO "APIC delta adjusted to PM-Timer: " | ||
569 | "%lu (%ld)\n", (unsigned long)res, *delta); | ||
570 | *delta = (long)res; | ||
571 | } | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
456 | static int __init calibrate_APIC_clock(void) | 576 | static int __init calibrate_APIC_clock(void) |
457 | { | 577 | { |
458 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 578 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
459 | const long pm_100ms = PMTMR_TICKS_PER_SEC/10; | ||
460 | const long pm_thresh = pm_100ms/100; | ||
461 | void (*real_handler)(struct clock_event_device *dev); | 579 | void (*real_handler)(struct clock_event_device *dev); |
462 | unsigned long deltaj; | 580 | unsigned long deltaj; |
463 | long delta, deltapm; | 581 | long delta; |
464 | int pm_referenced = 0; | 582 | int pm_referenced = 0; |
465 | 583 | ||
466 | local_irq_disable(); | 584 | local_irq_disable(); |
@@ -470,10 +588,10 @@ static int __init calibrate_APIC_clock(void) | |||
470 | global_clock_event->event_handler = lapic_cal_handler; | 588 | global_clock_event->event_handler = lapic_cal_handler; |
471 | 589 | ||
472 | /* | 590 | /* |
473 | * Setup the APIC counter to 1e9. There is no way the lapic | 591 | * Setup the APIC counter to maximum. There is no way the lapic |
474 | * can underflow in the 100ms detection time frame | 592 | * can underflow in the 100ms detection time frame |
475 | */ | 593 | */ |
476 | __setup_APIC_LVTT(1000000000, 0, 0); | 594 | __setup_APIC_LVTT(0xffffffff, 0, 0); |
477 | 595 | ||
478 | /* Let the interrupts run */ | 596 | /* Let the interrupts run */ |
479 | local_irq_enable(); | 597 | local_irq_enable(); |
@@ -490,34 +608,9 @@ static int __init calibrate_APIC_clock(void) | |||
490 | delta = lapic_cal_t1 - lapic_cal_t2; | 608 | delta = lapic_cal_t1 - lapic_cal_t2; |
491 | apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); | 609 | apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta); |
492 | 610 | ||
493 | /* Check, if the PM timer is available */ | 611 | /* we trust the PM based calibration if possible */ |
494 | deltapm = lapic_cal_pm2 - lapic_cal_pm1; | 612 | pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, |
495 | apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm); | 613 | &delta); |
496 | |||
497 | if (deltapm) { | ||
498 | unsigned long mult; | ||
499 | u64 res; | ||
500 | |||
501 | mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22); | ||
502 | |||
503 | if (deltapm > (pm_100ms - pm_thresh) && | ||
504 | deltapm < (pm_100ms + pm_thresh)) { | ||
505 | apic_printk(APIC_VERBOSE, "... PM timer result ok\n"); | ||
506 | } else { | ||
507 | res = (((u64) deltapm) * mult) >> 22; | ||
508 | do_div(res, 1000000); | ||
509 | printk(KERN_WARNING "APIC calibration not consistent " | ||
510 | "with PM Timer: %ldms instead of 100ms\n", | ||
511 | (long)res); | ||
512 | /* Correct the lapic counter value */ | ||
513 | res = (((u64) delta) * pm_100ms); | ||
514 | do_div(res, deltapm); | ||
515 | printk(KERN_INFO "APIC delta adjusted to PM-Timer: " | ||
516 | "%lu (%ld)\n", (unsigned long) res, delta); | ||
517 | delta = (long) res; | ||
518 | } | ||
519 | pm_referenced = 1; | ||
520 | } | ||
521 | 614 | ||
522 | /* Calculate the scaled math multiplication factor */ | 615 | /* Calculate the scaled math multiplication factor */ |
523 | lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, | 616 | lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, |
@@ -559,7 +652,10 @@ static int __init calibrate_APIC_clock(void) | |||
559 | 652 | ||
560 | levt->features &= ~CLOCK_EVT_FEAT_DUMMY; | 653 | levt->features &= ~CLOCK_EVT_FEAT_DUMMY; |
561 | 654 | ||
562 | /* We trust the pm timer based calibration */ | 655 | /* |
656 | * PM timer calibration failed or not turned on | ||
657 | * so lets try APIC timer based calibration | ||
658 | */ | ||
563 | if (!pm_referenced) { | 659 | if (!pm_referenced) { |
564 | apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); | 660 | apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); |
565 | 661 | ||
@@ -652,7 +748,7 @@ void __init setup_boot_APIC_clock(void) | |||
652 | setup_APIC_timer(); | 748 | setup_APIC_timer(); |
653 | } | 749 | } |
654 | 750 | ||
655 | void __devinit setup_secondary_APIC_clock(void) | 751 | void __cpuinit setup_secondary_APIC_clock(void) |
656 | { | 752 | { |
657 | setup_APIC_timer(); | 753 | setup_APIC_timer(); |
658 | } | 754 | } |
@@ -718,6 +814,9 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) | |||
718 | * Besides, if we don't timer interrupts ignore the global | 814 | * Besides, if we don't timer interrupts ignore the global |
719 | * interrupt lock, which is the WrongThing (tm) to do. | 815 | * interrupt lock, which is the WrongThing (tm) to do. |
720 | */ | 816 | */ |
817 | #ifdef CONFIG_X86_64 | ||
818 | exit_idle(); | ||
819 | #endif | ||
721 | irq_enter(); | 820 | irq_enter(); |
722 | local_apic_timer_interrupt(); | 821 | local_apic_timer_interrupt(); |
723 | irq_exit(); | 822 | irq_exit(); |
@@ -991,40 +1090,43 @@ void __init init_bsp_APIC(void) | |||
991 | 1090 | ||
992 | static void __cpuinit lapic_setup_esr(void) | 1091 | static void __cpuinit lapic_setup_esr(void) |
993 | { | 1092 | { |
994 | unsigned long oldvalue, value, maxlvt; | 1093 | unsigned int oldvalue, value, maxlvt; |
995 | if (lapic_is_integrated() && !esr_disable) { | 1094 | |
996 | if (esr_disable) { | 1095 | if (!lapic_is_integrated()) { |
997 | /* | 1096 | printk(KERN_INFO "No ESR for 82489DX.\n"); |
998 | * Something untraceable is creating bad interrupts on | 1097 | return; |
999 | * secondary quads ... for the moment, just leave the | 1098 | } |
1000 | * ESR disabled - we can't do anything useful with the | ||
1001 | * errors anyway - mbligh | ||
1002 | */ | ||
1003 | printk(KERN_INFO "Leaving ESR disabled.\n"); | ||
1004 | return; | ||
1005 | } | ||
1006 | /* !82489DX */ | ||
1007 | maxlvt = lapic_get_maxlvt(); | ||
1008 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | ||
1009 | apic_write(APIC_ESR, 0); | ||
1010 | oldvalue = apic_read(APIC_ESR); | ||
1011 | 1099 | ||
1012 | /* enables sending errors */ | 1100 | if (esr_disable) { |
1013 | value = ERROR_APIC_VECTOR; | ||
1014 | apic_write(APIC_LVTERR, value); | ||
1015 | /* | 1101 | /* |
1016 | * spec says clear errors after enabling vector. | 1102 | * Something untraceable is creating bad interrupts on |
1103 | * secondary quads ... for the moment, just leave the | ||
1104 | * ESR disabled - we can't do anything useful with the | ||
1105 | * errors anyway - mbligh | ||
1017 | */ | 1106 | */ |
1018 | if (maxlvt > 3) | 1107 | printk(KERN_INFO "Leaving ESR disabled.\n"); |
1019 | apic_write(APIC_ESR, 0); | 1108 | return; |
1020 | value = apic_read(APIC_ESR); | ||
1021 | if (value != oldvalue) | ||
1022 | apic_printk(APIC_VERBOSE, "ESR value before enabling " | ||
1023 | "vector: 0x%08lx after: 0x%08lx\n", | ||
1024 | oldvalue, value); | ||
1025 | } else { | ||
1026 | printk(KERN_INFO "No ESR for 82489DX.\n"); | ||
1027 | } | 1109 | } |
1110 | |||
1111 | maxlvt = lapic_get_maxlvt(); | ||
1112 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | ||
1113 | apic_write(APIC_ESR, 0); | ||
1114 | oldvalue = apic_read(APIC_ESR); | ||
1115 | |||
1116 | /* enables sending errors */ | ||
1117 | value = ERROR_APIC_VECTOR; | ||
1118 | apic_write(APIC_LVTERR, value); | ||
1119 | |||
1120 | /* | ||
1121 | * spec says clear errors after enabling vector. | ||
1122 | */ | ||
1123 | if (maxlvt > 3) | ||
1124 | apic_write(APIC_ESR, 0); | ||
1125 | value = apic_read(APIC_ESR); | ||
1126 | if (value != oldvalue) | ||
1127 | apic_printk(APIC_VERBOSE, "ESR value before enabling " | ||
1128 | "vector: 0x%08x after: 0x%08x\n", | ||
1129 | oldvalue, value); | ||
1028 | } | 1130 | } |
1029 | 1131 | ||
1030 | 1132 | ||
@@ -1033,24 +1135,27 @@ static void __cpuinit lapic_setup_esr(void) | |||
1033 | */ | 1135 | */ |
1034 | void __cpuinit setup_local_APIC(void) | 1136 | void __cpuinit setup_local_APIC(void) |
1035 | { | 1137 | { |
1036 | unsigned long value, integrated; | 1138 | unsigned int value; |
1037 | int i, j; | 1139 | int i, j; |
1038 | 1140 | ||
1141 | #ifdef CONFIG_X86_32 | ||
1039 | /* Pound the ESR really hard over the head with a big hammer - mbligh */ | 1142 | /* Pound the ESR really hard over the head with a big hammer - mbligh */ |
1040 | if (esr_disable) { | 1143 | if (lapic_is_integrated() && esr_disable) { |
1041 | apic_write(APIC_ESR, 0); | 1144 | apic_write(APIC_ESR, 0); |
1042 | apic_write(APIC_ESR, 0); | 1145 | apic_write(APIC_ESR, 0); |
1043 | apic_write(APIC_ESR, 0); | 1146 | apic_write(APIC_ESR, 0); |
1044 | apic_write(APIC_ESR, 0); | 1147 | apic_write(APIC_ESR, 0); |
1045 | } | 1148 | } |
1149 | #endif | ||
1046 | 1150 | ||
1047 | integrated = lapic_is_integrated(); | 1151 | preempt_disable(); |
1048 | 1152 | ||
1049 | /* | 1153 | /* |
1050 | * Double-check whether this APIC is really registered. | 1154 | * Double-check whether this APIC is really registered. |
1155 | * This is meaningless in clustered apic mode, so we skip it. | ||
1051 | */ | 1156 | */ |
1052 | if (!apic_id_registered()) | 1157 | if (!apic_id_registered()) |
1053 | WARN_ON_ONCE(1); | 1158 | BUG(); |
1054 | 1159 | ||
1055 | /* | 1160 | /* |
1056 | * Intel recommends to set DFR, LDR and TPR before enabling | 1161 | * Intel recommends to set DFR, LDR and TPR before enabling |
@@ -1096,6 +1201,7 @@ void __cpuinit setup_local_APIC(void) | |||
1096 | */ | 1201 | */ |
1097 | value |= APIC_SPIV_APIC_ENABLED; | 1202 | value |= APIC_SPIV_APIC_ENABLED; |
1098 | 1203 | ||
1204 | #ifdef CONFIG_X86_32 | ||
1099 | /* | 1205 | /* |
1100 | * Some unknown Intel IO/APIC (or APIC) errata is biting us with | 1206 | * Some unknown Intel IO/APIC (or APIC) errata is biting us with |
1101 | * certain networking cards. If high frequency interrupts are | 1207 | * certain networking cards. If high frequency interrupts are |
@@ -1116,8 +1222,13 @@ void __cpuinit setup_local_APIC(void) | |||
1116 | * See also the comment in end_level_ioapic_irq(). --macro | 1222 | * See also the comment in end_level_ioapic_irq(). --macro |
1117 | */ | 1223 | */ |
1118 | 1224 | ||
1119 | /* Enable focus processor (bit==0) */ | 1225 | /* |
1226 | * - enable focus processor (bit==0) | ||
1227 | * - 64bit mode always use processor focus | ||
1228 | * so no need to set it | ||
1229 | */ | ||
1120 | value &= ~APIC_SPIV_FOCUS_DISABLED; | 1230 | value &= ~APIC_SPIV_FOCUS_DISABLED; |
1231 | #endif | ||
1121 | 1232 | ||
1122 | /* | 1233 | /* |
1123 | * Set spurious IRQ vector | 1234 | * Set spurious IRQ vector |
@@ -1154,9 +1265,11 @@ void __cpuinit setup_local_APIC(void) | |||
1154 | value = APIC_DM_NMI; | 1265 | value = APIC_DM_NMI; |
1155 | else | 1266 | else |
1156 | value = APIC_DM_NMI | APIC_LVT_MASKED; | 1267 | value = APIC_DM_NMI | APIC_LVT_MASKED; |
1157 | if (!integrated) /* 82489DX */ | 1268 | if (!lapic_is_integrated()) /* 82489DX */ |
1158 | value |= APIC_LVT_LEVEL_TRIGGER; | 1269 | value |= APIC_LVT_LEVEL_TRIGGER; |
1159 | apic_write(APIC_LVT1, value); | 1270 | apic_write(APIC_LVT1, value); |
1271 | |||
1272 | preempt_enable(); | ||
1160 | } | 1273 | } |
1161 | 1274 | ||
1162 | void __cpuinit end_local_APIC_setup(void) | 1275 | void __cpuinit end_local_APIC_setup(void) |
@@ -1177,6 +1290,153 @@ void __cpuinit end_local_APIC_setup(void) | |||
1177 | apic_pm_activate(); | 1290 | apic_pm_activate(); |
1178 | } | 1291 | } |
1179 | 1292 | ||
1293 | #ifdef HAVE_X2APIC | ||
1294 | void check_x2apic(void) | ||
1295 | { | ||
1296 | int msr, msr2; | ||
1297 | |||
1298 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
1299 | |||
1300 | if (msr & X2APIC_ENABLE) { | ||
1301 | printk("x2apic enabled by BIOS, switching to x2apic ops\n"); | ||
1302 | x2apic_preenabled = x2apic = 1; | ||
1303 | apic_ops = &x2apic_ops; | ||
1304 | } | ||
1305 | } | ||
1306 | |||
1307 | void enable_x2apic(void) | ||
1308 | { | ||
1309 | int msr, msr2; | ||
1310 | |||
1311 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
1312 | if (!(msr & X2APIC_ENABLE)) { | ||
1313 | printk("Enabling x2apic\n"); | ||
1314 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | void enable_IR_x2apic(void) | ||
1319 | { | ||
1320 | #ifdef CONFIG_INTR_REMAP | ||
1321 | int ret; | ||
1322 | unsigned long flags; | ||
1323 | |||
1324 | if (!cpu_has_x2apic) | ||
1325 | return; | ||
1326 | |||
1327 | if (!x2apic_preenabled && disable_x2apic) { | ||
1328 | printk(KERN_INFO | ||
1329 | "Skipped enabling x2apic and Interrupt-remapping " | ||
1330 | "because of nox2apic\n"); | ||
1331 | return; | ||
1332 | } | ||
1333 | |||
1334 | if (x2apic_preenabled && disable_x2apic) | ||
1335 | panic("Bios already enabled x2apic, can't enforce nox2apic"); | ||
1336 | |||
1337 | if (!x2apic_preenabled && skip_ioapic_setup) { | ||
1338 | printk(KERN_INFO | ||
1339 | "Skipped enabling x2apic and Interrupt-remapping " | ||
1340 | "because of skipping io-apic setup\n"); | ||
1341 | return; | ||
1342 | } | ||
1343 | |||
1344 | ret = dmar_table_init(); | ||
1345 | if (ret) { | ||
1346 | printk(KERN_INFO | ||
1347 | "dmar_table_init() failed with %d:\n", ret); | ||
1348 | |||
1349 | if (x2apic_preenabled) | ||
1350 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
1351 | else | ||
1352 | printk(KERN_INFO | ||
1353 | "Not enabling x2apic,Intr-remapping\n"); | ||
1354 | return; | ||
1355 | } | ||
1356 | |||
1357 | local_irq_save(flags); | ||
1358 | mask_8259A(); | ||
1359 | |||
1360 | ret = save_mask_IO_APIC_setup(); | ||
1361 | if (ret) { | ||
1362 | printk(KERN_INFO "Saving IO-APIC state failed: %d\n", ret); | ||
1363 | goto end; | ||
1364 | } | ||
1365 | |||
1366 | ret = enable_intr_remapping(1); | ||
1367 | |||
1368 | if (ret && x2apic_preenabled) { | ||
1369 | local_irq_restore(flags); | ||
1370 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
1371 | } | ||
1372 | |||
1373 | if (ret) | ||
1374 | goto end_restore; | ||
1375 | |||
1376 | if (!x2apic) { | ||
1377 | x2apic = 1; | ||
1378 | apic_ops = &x2apic_ops; | ||
1379 | enable_x2apic(); | ||
1380 | } | ||
1381 | |||
1382 | end_restore: | ||
1383 | if (ret) | ||
1384 | /* | ||
1385 | * IR enabling failed | ||
1386 | */ | ||
1387 | restore_IO_APIC_setup(); | ||
1388 | else | ||
1389 | reinit_intr_remapped_IO_APIC(x2apic_preenabled); | ||
1390 | |||
1391 | end: | ||
1392 | unmask_8259A(); | ||
1393 | local_irq_restore(flags); | ||
1394 | |||
1395 | if (!ret) { | ||
1396 | if (!x2apic_preenabled) | ||
1397 | printk(KERN_INFO | ||
1398 | "Enabled x2apic and interrupt-remapping\n"); | ||
1399 | else | ||
1400 | printk(KERN_INFO | ||
1401 | "Enabled Interrupt-remapping\n"); | ||
1402 | } else | ||
1403 | printk(KERN_ERR | ||
1404 | "Failed to enable Interrupt-remapping and x2apic\n"); | ||
1405 | #else | ||
1406 | if (!cpu_has_x2apic) | ||
1407 | return; | ||
1408 | |||
1409 | if (x2apic_preenabled) | ||
1410 | panic("x2apic enabled prior OS handover," | ||
1411 | " enable CONFIG_INTR_REMAP"); | ||
1412 | |||
1413 | printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping " | ||
1414 | " and x2apic\n"); | ||
1415 | #endif | ||
1416 | |||
1417 | return; | ||
1418 | } | ||
1419 | #endif /* HAVE_X2APIC */ | ||
1420 | |||
1421 | #ifdef CONFIG_X86_64 | ||
1422 | /* | ||
1423 | * Detect and enable local APICs on non-SMP boards. | ||
1424 | * Original code written by Keir Fraser. | ||
1425 | * On AMD64 we trust the BIOS - if it says no APIC it is likely | ||
1426 | * not correctly set up (usually the APIC timer won't work etc.) | ||
1427 | */ | ||
1428 | static int __init detect_init_APIC(void) | ||
1429 | { | ||
1430 | if (!cpu_has_apic) { | ||
1431 | printk(KERN_INFO "No local APIC present\n"); | ||
1432 | return -1; | ||
1433 | } | ||
1434 | |||
1435 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | ||
1436 | boot_cpu_physical_apicid = 0; | ||
1437 | return 0; | ||
1438 | } | ||
1439 | #else | ||
1180 | /* | 1440 | /* |
1181 | * Detect and initialize APIC | 1441 | * Detect and initialize APIC |
1182 | */ | 1442 | */ |
@@ -1255,12 +1515,46 @@ no_apic: | |||
1255 | printk(KERN_INFO "No local APIC present or hardware disabled\n"); | 1515 | printk(KERN_INFO "No local APIC present or hardware disabled\n"); |
1256 | return -1; | 1516 | return -1; |
1257 | } | 1517 | } |
1518 | #endif | ||
1519 | |||
1520 | #ifdef CONFIG_X86_64 | ||
1521 | void __init early_init_lapic_mapping(void) | ||
1522 | { | ||
1523 | unsigned long phys_addr; | ||
1524 | |||
1525 | /* | ||
1526 | * If no local APIC can be found then go out | ||
1527 | * : it means there is no mpatable and MADT | ||
1528 | */ | ||
1529 | if (!smp_found_config) | ||
1530 | return; | ||
1531 | |||
1532 | phys_addr = mp_lapic_addr; | ||
1533 | |||
1534 | set_fixmap_nocache(FIX_APIC_BASE, phys_addr); | ||
1535 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | ||
1536 | APIC_BASE, phys_addr); | ||
1537 | |||
1538 | /* | ||
1539 | * Fetch the APIC ID of the BSP in case we have a | ||
1540 | * default configuration (or the MP table is broken). | ||
1541 | */ | ||
1542 | boot_cpu_physical_apicid = read_apic_id(); | ||
1543 | } | ||
1544 | #endif | ||
1258 | 1545 | ||
1259 | /** | 1546 | /** |
1260 | * init_apic_mappings - initialize APIC mappings | 1547 | * init_apic_mappings - initialize APIC mappings |
1261 | */ | 1548 | */ |
1262 | void __init init_apic_mappings(void) | 1549 | void __init init_apic_mappings(void) |
1263 | { | 1550 | { |
1551 | #ifdef HAVE_X2APIC | ||
1552 | if (x2apic) { | ||
1553 | boot_cpu_physical_apicid = read_apic_id(); | ||
1554 | return; | ||
1555 | } | ||
1556 | #endif | ||
1557 | |||
1264 | /* | 1558 | /* |
1265 | * If no local APIC can be found then set up a fake all | 1559 | * If no local APIC can be found then set up a fake all |
1266 | * zeroes page to simulate the local APIC and another | 1560 | * zeroes page to simulate the local APIC and another |
@@ -1273,8 +1567,8 @@ void __init init_apic_mappings(void) | |||
1273 | apic_phys = mp_lapic_addr; | 1567 | apic_phys = mp_lapic_addr; |
1274 | 1568 | ||
1275 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | 1569 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); |
1276 | printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE, | 1570 | apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", |
1277 | apic_phys); | 1571 | APIC_BASE, apic_phys); |
1278 | 1572 | ||
1279 | /* | 1573 | /* |
1280 | * Fetch the APIC ID of the BSP in case we have a | 1574 | * Fetch the APIC ID of the BSP in case we have a |
@@ -1282,18 +1576,27 @@ void __init init_apic_mappings(void) | |||
1282 | */ | 1576 | */ |
1283 | if (boot_cpu_physical_apicid == -1U) | 1577 | if (boot_cpu_physical_apicid == -1U) |
1284 | boot_cpu_physical_apicid = read_apic_id(); | 1578 | boot_cpu_physical_apicid = read_apic_id(); |
1285 | |||
1286 | } | 1579 | } |
1287 | 1580 | ||
1288 | /* | 1581 | /* |
1289 | * This initializes the IO-APIC and APIC hardware if this is | 1582 | * This initializes the IO-APIC and APIC hardware if this is |
1290 | * a UP kernel. | 1583 | * a UP kernel. |
1291 | */ | 1584 | */ |
1292 | |||
1293 | int apic_version[MAX_APICS]; | 1585 | int apic_version[MAX_APICS]; |
1294 | 1586 | ||
1295 | int __init APIC_init_uniprocessor(void) | 1587 | int __init APIC_init_uniprocessor(void) |
1296 | { | 1588 | { |
1589 | #ifdef CONFIG_X86_64 | ||
1590 | if (disable_apic) { | ||
1591 | printk(KERN_INFO "Apic disabled\n"); | ||
1592 | return -1; | ||
1593 | } | ||
1594 | if (!cpu_has_apic) { | ||
1595 | disable_apic = 1; | ||
1596 | printk(KERN_INFO "Apic disabled by BIOS\n"); | ||
1597 | return -1; | ||
1598 | } | ||
1599 | #else | ||
1297 | if (!smp_found_config && !cpu_has_apic) | 1600 | if (!smp_found_config && !cpu_has_apic) |
1298 | return -1; | 1601 | return -1; |
1299 | 1602 | ||
@@ -1302,39 +1605,68 @@ int __init APIC_init_uniprocessor(void) | |||
1302 | */ | 1605 | */ |
1303 | if (!cpu_has_apic && | 1606 | if (!cpu_has_apic && |
1304 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | 1607 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
1305 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | 1608 | printk(KERN_ERR "BIOS bug, local APIC 0x%x not detected!...\n", |
1306 | boot_cpu_physical_apicid); | 1609 | boot_cpu_physical_apicid); |
1307 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); | 1610 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); |
1308 | return -1; | 1611 | return -1; |
1309 | } | 1612 | } |
1613 | #endif | ||
1310 | 1614 | ||
1311 | verify_local_APIC(); | 1615 | #ifdef HAVE_X2APIC |
1616 | enable_IR_x2apic(); | ||
1617 | #endif | ||
1618 | #ifdef CONFIG_X86_64 | ||
1619 | setup_apic_routing(); | ||
1620 | #endif | ||
1312 | 1621 | ||
1622 | verify_local_APIC(); | ||
1313 | connect_bsp_APIC(); | 1623 | connect_bsp_APIC(); |
1314 | 1624 | ||
1625 | #ifdef CONFIG_X86_64 | ||
1626 | apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid)); | ||
1627 | #else | ||
1315 | /* | 1628 | /* |
1316 | * Hack: In case of kdump, after a crash, kernel might be booting | 1629 | * Hack: In case of kdump, after a crash, kernel might be booting |
1317 | * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid | 1630 | * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid |
1318 | * might be zero if read from MP tables. Get it from LAPIC. | 1631 | * might be zero if read from MP tables. Get it from LAPIC. |
1319 | */ | 1632 | */ |
1320 | #ifdef CONFIG_CRASH_DUMP | 1633 | # ifdef CONFIG_CRASH_DUMP |
1321 | boot_cpu_physical_apicid = read_apic_id(); | 1634 | boot_cpu_physical_apicid = read_apic_id(); |
1635 | # endif | ||
1322 | #endif | 1636 | #endif |
1323 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); | 1637 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
1324 | |||
1325 | setup_local_APIC(); | 1638 | setup_local_APIC(); |
1326 | 1639 | ||
1640 | #ifdef CONFIG_X86_64 | ||
1641 | /* | ||
1642 | * Now enable IO-APICs, actually call clear_IO_APIC | ||
1643 | * We need clear_IO_APIC before enabling vector on BP | ||
1644 | */ | ||
1645 | if (!skip_ioapic_setup && nr_ioapics) | ||
1646 | enable_IO_APIC(); | ||
1647 | #endif | ||
1648 | |||
1327 | #ifdef CONFIG_X86_IO_APIC | 1649 | #ifdef CONFIG_X86_IO_APIC |
1328 | if (!smp_found_config || skip_ioapic_setup || !nr_ioapics) | 1650 | if (!smp_found_config || skip_ioapic_setup || !nr_ioapics) |
1329 | #endif | 1651 | #endif |
1330 | localise_nmi_watchdog(); | 1652 | localise_nmi_watchdog(); |
1331 | end_local_APIC_setup(); | 1653 | end_local_APIC_setup(); |
1654 | |||
1332 | #ifdef CONFIG_X86_IO_APIC | 1655 | #ifdef CONFIG_X86_IO_APIC |
1333 | if (smp_found_config) | 1656 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) |
1334 | if (!skip_ioapic_setup && nr_ioapics) | 1657 | setup_IO_APIC(); |
1335 | setup_IO_APIC(); | 1658 | # ifdef CONFIG_X86_64 |
1659 | else | ||
1660 | nr_ioapics = 0; | ||
1661 | # endif | ||
1336 | #endif | 1662 | #endif |
1663 | |||
1664 | #ifdef CONFIG_X86_64 | ||
1665 | setup_boot_APIC_clock(); | ||
1666 | check_nmi_watchdog(); | ||
1667 | #else | ||
1337 | setup_boot_clock(); | 1668 | setup_boot_clock(); |
1669 | #endif | ||
1338 | 1670 | ||
1339 | return 0; | 1671 | return 0; |
1340 | } | 1672 | } |
@@ -1348,8 +1680,11 @@ int __init APIC_init_uniprocessor(void) | |||
1348 | */ | 1680 | */ |
1349 | void smp_spurious_interrupt(struct pt_regs *regs) | 1681 | void smp_spurious_interrupt(struct pt_regs *regs) |
1350 | { | 1682 | { |
1351 | unsigned long v; | 1683 | u32 v; |
1352 | 1684 | ||
1685 | #ifdef CONFIG_X86_64 | ||
1686 | exit_idle(); | ||
1687 | #endif | ||
1353 | irq_enter(); | 1688 | irq_enter(); |
1354 | /* | 1689 | /* |
1355 | * Check if this really is a spurious interrupt and ACK it | 1690 | * Check if this really is a spurious interrupt and ACK it |
@@ -1360,10 +1695,14 @@ void smp_spurious_interrupt(struct pt_regs *regs) | |||
1360 | if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) | 1695 | if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) |
1361 | ack_APIC_irq(); | 1696 | ack_APIC_irq(); |
1362 | 1697 | ||
1698 | #ifdef CONFIG_X86_64 | ||
1699 | add_pda(irq_spurious_count, 1); | ||
1700 | #else | ||
1363 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ | 1701 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ |
1364 | printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " | 1702 | printk(KERN_INFO "spurious APIC interrupt on CPU#%d, " |
1365 | "should never happen.\n", smp_processor_id()); | 1703 | "should never happen.\n", smp_processor_id()); |
1366 | __get_cpu_var(irq_stat).irq_spurious_count++; | 1704 | __get_cpu_var(irq_stat).irq_spurious_count++; |
1705 | #endif | ||
1367 | irq_exit(); | 1706 | irq_exit(); |
1368 | } | 1707 | } |
1369 | 1708 | ||
@@ -1372,8 +1711,11 @@ void smp_spurious_interrupt(struct pt_regs *regs) | |||
1372 | */ | 1711 | */ |
1373 | void smp_error_interrupt(struct pt_regs *regs) | 1712 | void smp_error_interrupt(struct pt_regs *regs) |
1374 | { | 1713 | { |
1375 | unsigned long v, v1; | 1714 | u32 v, v1; |
1376 | 1715 | ||
1716 | #ifdef CONFIG_X86_64 | ||
1717 | exit_idle(); | ||
1718 | #endif | ||
1377 | irq_enter(); | 1719 | irq_enter(); |
1378 | /* First tickle the hardware, only then report what went on. -- REW */ | 1720 | /* First tickle the hardware, only then report what went on. -- REW */ |
1379 | v = apic_read(APIC_ESR); | 1721 | v = apic_read(APIC_ESR); |
@@ -1392,7 +1734,7 @@ void smp_error_interrupt(struct pt_regs *regs) | |||
1392 | 6: Received illegal vector | 1734 | 6: Received illegal vector |
1393 | 7: Illegal register address | 1735 | 7: Illegal register address |
1394 | */ | 1736 | */ |
1395 | printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", | 1737 | printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n", |
1396 | smp_processor_id(), v , v1); | 1738 | smp_processor_id(), v , v1); |
1397 | irq_exit(); | 1739 | irq_exit(); |
1398 | } | 1740 | } |
@@ -1565,6 +1907,13 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1565 | cpu_set(cpu, cpu_present_map); | 1907 | cpu_set(cpu, cpu_present_map); |
1566 | } | 1908 | } |
1567 | 1909 | ||
1910 | #ifdef CONFIG_X86_64 | ||
1911 | int hard_smp_processor_id(void) | ||
1912 | { | ||
1913 | return read_apic_id(); | ||
1914 | } | ||
1915 | #endif | ||
1916 | |||
1568 | /* | 1917 | /* |
1569 | * Power management | 1918 | * Power management |
1570 | */ | 1919 | */ |
@@ -1640,7 +1989,7 @@ static int lapic_resume(struct sys_device *dev) | |||
1640 | 1989 | ||
1641 | local_irq_save(flags); | 1990 | local_irq_save(flags); |
1642 | 1991 | ||
1643 | #ifdef CONFIG_X86_64 | 1992 | #ifdef HAVE_X2APIC |
1644 | if (x2apic) | 1993 | if (x2apic) |
1645 | enable_x2apic(); | 1994 | enable_x2apic(); |
1646 | else | 1995 | else |
@@ -1702,7 +2051,7 @@ static struct sys_device device_lapic = { | |||
1702 | .cls = &lapic_sysclass, | 2051 | .cls = &lapic_sysclass, |
1703 | }; | 2052 | }; |
1704 | 2053 | ||
1705 | static void __devinit apic_pm_activate(void) | 2054 | static void __cpuinit apic_pm_activate(void) |
1706 | { | 2055 | { |
1707 | apic_pm_state.active = 1; | 2056 | apic_pm_state.active = 1; |
1708 | } | 2057 | } |
@@ -1728,16 +2077,87 @@ static void apic_pm_activate(void) { } | |||
1728 | 2077 | ||
1729 | #endif /* CONFIG_PM */ | 2078 | #endif /* CONFIG_PM */ |
1730 | 2079 | ||
2080 | #ifdef CONFIG_X86_64 | ||
1731 | /* | 2081 | /* |
1732 | * APIC command line parameters | 2082 | * apic_is_clustered_box() -- Check if we can expect good TSC |
2083 | * | ||
2084 | * Thus far, the major user of this is IBM's Summit2 series: | ||
2085 | * | ||
2086 | * Clustered boxes may have unsynced TSC problems if they are | ||
2087 | * multi-chassis. Use available data to take a good guess. | ||
2088 | * If in doubt, go HPET. | ||
1733 | */ | 2089 | */ |
1734 | static int __init parse_lapic(char *arg) | 2090 | __cpuinit int apic_is_clustered_box(void) |
1735 | { | 2091 | { |
1736 | force_enable_local_apic = 1; | 2092 | int i, clusters, zeros; |
1737 | return 0; | 2093 | unsigned id; |
2094 | u16 *bios_cpu_apicid; | ||
2095 | DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); | ||
2096 | |||
2097 | /* | ||
2098 | * there is not this kind of box with AMD CPU yet. | ||
2099 | * Some AMD box with quadcore cpu and 8 sockets apicid | ||
2100 | * will be [4, 0x23] or [8, 0x27] could be thought to | ||
2101 | * vsmp box still need checking... | ||
2102 | */ | ||
2103 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) | ||
2104 | return 0; | ||
2105 | |||
2106 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); | ||
2107 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); | ||
2108 | |||
2109 | for (i = 0; i < NR_CPUS; i++) { | ||
2110 | /* are we being called early in kernel startup? */ | ||
2111 | if (bios_cpu_apicid) { | ||
2112 | id = bios_cpu_apicid[i]; | ||
2113 | } | ||
2114 | else if (i < nr_cpu_ids) { | ||
2115 | if (cpu_present(i)) | ||
2116 | id = per_cpu(x86_bios_cpu_apicid, i); | ||
2117 | else | ||
2118 | continue; | ||
2119 | } | ||
2120 | else | ||
2121 | break; | ||
2122 | |||
2123 | if (id != BAD_APICID) | ||
2124 | __set_bit(APIC_CLUSTERID(id), clustermap); | ||
2125 | } | ||
2126 | |||
2127 | /* Problem: Partially populated chassis may not have CPUs in some of | ||
2128 | * the APIC clusters they have been allocated. Only present CPUs have | ||
2129 | * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap. | ||
2130 | * Since clusters are allocated sequentially, count zeros only if | ||
2131 | * they are bounded by ones. | ||
2132 | */ | ||
2133 | clusters = 0; | ||
2134 | zeros = 0; | ||
2135 | for (i = 0; i < NUM_APIC_CLUSTERS; i++) { | ||
2136 | if (test_bit(i, clustermap)) { | ||
2137 | clusters += 1 + zeros; | ||
2138 | zeros = 0; | ||
2139 | } else | ||
2140 | ++zeros; | ||
2141 | } | ||
2142 | |||
2143 | /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are | ||
2144 | * not guaranteed to be synced between boards | ||
2145 | */ | ||
2146 | if (is_vsmp_box() && clusters > 1) | ||
2147 | return 1; | ||
2148 | |||
2149 | /* | ||
2150 | * If clusters > 2, then should be multi-chassis. | ||
2151 | * May have to revisit this when multi-core + hyperthreaded CPUs come | ||
2152 | * out, but AFAIK this will work even for them. | ||
2153 | */ | ||
2154 | return (clusters > 2); | ||
1738 | } | 2155 | } |
1739 | early_param("lapic", parse_lapic); | 2156 | #endif |
1740 | 2157 | ||
2158 | /* | ||
2159 | * APIC command line parameters | ||
2160 | */ | ||
1741 | static int __init setup_disableapic(char *arg) | 2161 | static int __init setup_disableapic(char *arg) |
1742 | { | 2162 | { |
1743 | disable_apic = 1; | 2163 | disable_apic = 1; |
@@ -1779,7 +2199,6 @@ static int __init apic_set_verbosity(char *arg) | |||
1779 | if (!arg) { | 2199 | if (!arg) { |
1780 | #ifdef CONFIG_X86_64 | 2200 | #ifdef CONFIG_X86_64 |
1781 | skip_ioapic_setup = 0; | 2201 | skip_ioapic_setup = 0; |
1782 | ioapic_force = 1; | ||
1783 | return 0; | 2202 | return 0; |
1784 | #endif | 2203 | #endif |
1785 | return -EINVAL; | 2204 | return -EINVAL; |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c deleted file mode 100644 index 94ddb69ae15e..000000000000 --- a/arch/x86/kernel/apic_64.c +++ /dev/null | |||
@@ -1,1848 +0,0 @@ | |||
1 | /* | ||
2 | * Local APIC handling, local APIC timers | ||
3 | * | ||
4 | * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com> | ||
5 | * | ||
6 | * Fixes | ||
7 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs; | ||
8 | * thanks to Eric Gilmore | ||
9 | * and Rolf G. Tews | ||
10 | * for testing these extensively. | ||
11 | * Maciej W. Rozycki : Various updates and fixes. | ||
12 | * Mikael Pettersson : Power Management for UP-APIC. | ||
13 | * Pavel Machek and | ||
14 | * Mikael Pettersson : PM converted to driver model. | ||
15 | */ | ||
16 | |||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <linux/mm.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/mc146818rtc.h> | ||
24 | #include <linux/kernel_stat.h> | ||
25 | #include <linux/sysdev.h> | ||
26 | #include <linux/ioport.h> | ||
27 | #include <linux/clockchips.h> | ||
28 | #include <linux/acpi_pmtmr.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/dmar.h> | ||
31 | |||
32 | #include <asm/atomic.h> | ||
33 | #include <asm/smp.h> | ||
34 | #include <asm/mtrr.h> | ||
35 | #include <asm/mpspec.h> | ||
36 | #include <asm/hpet.h> | ||
37 | #include <asm/pgalloc.h> | ||
38 | #include <asm/nmi.h> | ||
39 | #include <asm/idle.h> | ||
40 | #include <asm/proto.h> | ||
41 | #include <asm/timex.h> | ||
42 | #include <asm/apic.h> | ||
43 | #include <asm/i8259.h> | ||
44 | |||
45 | #include <mach_ipi.h> | ||
46 | #include <mach_apic.h> | ||
47 | |||
48 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ | ||
49 | static int disable_apic_timer __cpuinitdata; | ||
50 | static int apic_calibrate_pmtmr __initdata; | ||
51 | int disable_apic; | ||
52 | int disable_x2apic; | ||
53 | int x2apic; | ||
54 | |||
55 | /* x2apic enabled before OS handover */ | ||
56 | int x2apic_preenabled; | ||
57 | |||
58 | /* Local APIC timer works in C2 */ | ||
59 | int local_apic_timer_c2_ok; | ||
60 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | ||
61 | |||
62 | /* | ||
63 | * Debug level, exported for io_apic.c | ||
64 | */ | ||
65 | unsigned int apic_verbosity; | ||
66 | |||
67 | /* Have we found an MP table */ | ||
68 | int smp_found_config; | ||
69 | |||
70 | static struct resource lapic_resource = { | ||
71 | .name = "Local APIC", | ||
72 | .flags = IORESOURCE_MEM | IORESOURCE_BUSY, | ||
73 | }; | ||
74 | |||
75 | static unsigned int calibration_result; | ||
76 | |||
77 | static int lapic_next_event(unsigned long delta, | ||
78 | struct clock_event_device *evt); | ||
79 | static void lapic_timer_setup(enum clock_event_mode mode, | ||
80 | struct clock_event_device *evt); | ||
81 | static void lapic_timer_broadcast(cpumask_t mask); | ||
82 | static void apic_pm_activate(void); | ||
83 | |||
84 | /* | ||
85 | * The local apic timer can be used for any function which is CPU local. | ||
86 | */ | ||
87 | static struct clock_event_device lapic_clockevent = { | ||
88 | .name = "lapic", | ||
89 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | ||
90 | | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, | ||
91 | .shift = 32, | ||
92 | .set_mode = lapic_timer_setup, | ||
93 | .set_next_event = lapic_next_event, | ||
94 | .broadcast = lapic_timer_broadcast, | ||
95 | .rating = 100, | ||
96 | .irq = -1, | ||
97 | }; | ||
98 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | ||
99 | |||
100 | static unsigned long apic_phys; | ||
101 | |||
102 | unsigned long mp_lapic_addr; | ||
103 | |||
104 | /* | ||
105 | * Get the LAPIC version | ||
106 | */ | ||
107 | static inline int lapic_get_version(void) | ||
108 | { | ||
109 | return GET_APIC_VERSION(apic_read(APIC_LVR)); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Check, if the APIC is integrated or a separate chip | ||
114 | */ | ||
115 | static inline int lapic_is_integrated(void) | ||
116 | { | ||
117 | #ifdef CONFIG_X86_64 | ||
118 | return 1; | ||
119 | #else | ||
120 | return APIC_INTEGRATED(lapic_get_version()); | ||
121 | #endif | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Check, whether this is a modern or a first generation APIC | ||
126 | */ | ||
127 | static int modern_apic(void) | ||
128 | { | ||
129 | /* AMD systems use old APIC versions, so check the CPU */ | ||
130 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | ||
131 | boot_cpu_data.x86 >= 0xf) | ||
132 | return 1; | ||
133 | return lapic_get_version() >= 0x14; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Paravirt kernels also might be using these below ops. So we still | ||
138 | * use generic apic_read()/apic_write(), which might be pointing to different | ||
139 | * ops in PARAVIRT case. | ||
140 | */ | ||
141 | void xapic_wait_icr_idle(void) | ||
142 | { | ||
143 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) | ||
144 | cpu_relax(); | ||
145 | } | ||
146 | |||
147 | u32 safe_xapic_wait_icr_idle(void) | ||
148 | { | ||
149 | u32 send_status; | ||
150 | int timeout; | ||
151 | |||
152 | timeout = 0; | ||
153 | do { | ||
154 | send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; | ||
155 | if (!send_status) | ||
156 | break; | ||
157 | udelay(100); | ||
158 | } while (timeout++ < 1000); | ||
159 | |||
160 | return send_status; | ||
161 | } | ||
162 | |||
163 | void xapic_icr_write(u32 low, u32 id) | ||
164 | { | ||
165 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); | ||
166 | apic_write(APIC_ICR, low); | ||
167 | } | ||
168 | |||
169 | u64 xapic_icr_read(void) | ||
170 | { | ||
171 | u32 icr1, icr2; | ||
172 | |||
173 | icr2 = apic_read(APIC_ICR2); | ||
174 | icr1 = apic_read(APIC_ICR); | ||
175 | |||
176 | return icr1 | ((u64)icr2 << 32); | ||
177 | } | ||
178 | |||
179 | static struct apic_ops xapic_ops = { | ||
180 | .read = native_apic_mem_read, | ||
181 | .write = native_apic_mem_write, | ||
182 | .icr_read = xapic_icr_read, | ||
183 | .icr_write = xapic_icr_write, | ||
184 | .wait_icr_idle = xapic_wait_icr_idle, | ||
185 | .safe_wait_icr_idle = safe_xapic_wait_icr_idle, | ||
186 | }; | ||
187 | |||
188 | struct apic_ops __read_mostly *apic_ops = &xapic_ops; | ||
189 | EXPORT_SYMBOL_GPL(apic_ops); | ||
190 | |||
191 | static void x2apic_wait_icr_idle(void) | ||
192 | { | ||
193 | /* no need to wait for icr idle in x2apic */ | ||
194 | return; | ||
195 | } | ||
196 | |||
197 | static u32 safe_x2apic_wait_icr_idle(void) | ||
198 | { | ||
199 | /* no need to wait for icr idle in x2apic */ | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | void x2apic_icr_write(u32 low, u32 id) | ||
204 | { | ||
205 | wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); | ||
206 | } | ||
207 | |||
208 | u64 x2apic_icr_read(void) | ||
209 | { | ||
210 | unsigned long val; | ||
211 | |||
212 | rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); | ||
213 | return val; | ||
214 | } | ||
215 | |||
216 | static struct apic_ops x2apic_ops = { | ||
217 | .read = native_apic_msr_read, | ||
218 | .write = native_apic_msr_write, | ||
219 | .icr_read = x2apic_icr_read, | ||
220 | .icr_write = x2apic_icr_write, | ||
221 | .wait_icr_idle = x2apic_wait_icr_idle, | ||
222 | .safe_wait_icr_idle = safe_x2apic_wait_icr_idle, | ||
223 | }; | ||
224 | |||
225 | /** | ||
226 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | ||
227 | */ | ||
228 | void __cpuinit enable_NMI_through_LVT0(void) | ||
229 | { | ||
230 | unsigned int v; | ||
231 | |||
232 | /* unmask and set to NMI */ | ||
233 | v = APIC_DM_NMI; | ||
234 | |||
235 | /* Level triggered for 82489DX (32bit mode) */ | ||
236 | if (!lapic_is_integrated()) | ||
237 | v |= APIC_LVT_LEVEL_TRIGGER; | ||
238 | |||
239 | apic_write(APIC_LVT0, v); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * lapic_get_maxlvt - get the maximum number of local vector table entries | ||
244 | */ | ||
245 | int lapic_get_maxlvt(void) | ||
246 | { | ||
247 | unsigned int v; | ||
248 | |||
249 | v = apic_read(APIC_LVR); | ||
250 | /* | ||
251 | * - we always have APIC integrated on 64bit mode | ||
252 | * - 82489DXs do not report # of LVT entries | ||
253 | */ | ||
254 | return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Local APIC timer | ||
259 | */ | ||
260 | |||
261 | /* Clock divisor */ | ||
262 | #ifdef CONFG_X86_64 | ||
263 | #define APIC_DIVISOR 1 | ||
264 | #else | ||
265 | #define APIC_DIVISOR 16 | ||
266 | #endif | ||
267 | |||
268 | /* | ||
269 | * This function sets up the local APIC timer, with a timeout of | ||
270 | * 'clocks' APIC bus clock. During calibration we actually call | ||
271 | * this function twice on the boot CPU, once with a bogus timeout | ||
272 | * value, second time for real. The other (noncalibrating) CPUs | ||
273 | * call this function only once, with the real, calibrated value. | ||
274 | * | ||
275 | * We do reads before writes even if unnecessary, to get around the | ||
276 | * P5 APIC double write bug. | ||
277 | */ | ||
278 | static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) | ||
279 | { | ||
280 | unsigned int lvtt_value, tmp_value; | ||
281 | |||
282 | lvtt_value = LOCAL_TIMER_VECTOR; | ||
283 | if (!oneshot) | ||
284 | lvtt_value |= APIC_LVT_TIMER_PERIODIC; | ||
285 | if (!lapic_is_integrated()) | ||
286 | lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); | ||
287 | |||
288 | if (!irqen) | ||
289 | lvtt_value |= APIC_LVT_MASKED; | ||
290 | |||
291 | apic_write(APIC_LVTT, lvtt_value); | ||
292 | |||
293 | /* | ||
294 | * Divide PICLK by 16 | ||
295 | */ | ||
296 | tmp_value = apic_read(APIC_TDCR); | ||
297 | apic_write(APIC_TDCR, | ||
298 | (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | | ||
299 | APIC_TDR_DIV_16); | ||
300 | |||
301 | if (!oneshot) | ||
302 | apic_write(APIC_TMICT, clocks / APIC_DIVISOR); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * Setup extended LVT, AMD specific (K8, family 10h) | ||
307 | * | ||
308 | * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and | ||
309 | * MCE interrupts are supported. Thus MCE offset must be set to 0. | ||
310 | * | ||
311 | * If mask=1, the LVT entry does not generate interrupts while mask=0 | ||
312 | * enables the vector. See also the BKDGs. | ||
313 | */ | ||
314 | |||
315 | #define APIC_EILVT_LVTOFF_MCE 0 | ||
316 | #define APIC_EILVT_LVTOFF_IBS 1 | ||
317 | |||
318 | static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) | ||
319 | { | ||
320 | unsigned long reg = (lvt_off << 4) + APIC_EILVT0; | ||
321 | unsigned int v = (mask << 16) | (msg_type << 8) | vector; | ||
322 | |||
323 | apic_write(reg, v); | ||
324 | } | ||
325 | |||
326 | u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) | ||
327 | { | ||
328 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); | ||
329 | return APIC_EILVT_LVTOFF_MCE; | ||
330 | } | ||
331 | |||
332 | u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) | ||
333 | { | ||
334 | setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); | ||
335 | return APIC_EILVT_LVTOFF_IBS; | ||
336 | } | ||
337 | EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); | ||
338 | |||
339 | /* | ||
340 | * Program the next event, relative to now | ||
341 | */ | ||
342 | static int lapic_next_event(unsigned long delta, | ||
343 | struct clock_event_device *evt) | ||
344 | { | ||
345 | apic_write(APIC_TMICT, delta); | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | /* | ||
350 | * Setup the lapic timer in periodic or oneshot mode | ||
351 | */ | ||
352 | static void lapic_timer_setup(enum clock_event_mode mode, | ||
353 | struct clock_event_device *evt) | ||
354 | { | ||
355 | unsigned long flags; | ||
356 | unsigned int v; | ||
357 | |||
358 | /* Lapic used as dummy for broadcast ? */ | ||
359 | if (evt->features & CLOCK_EVT_FEAT_DUMMY) | ||
360 | return; | ||
361 | |||
362 | local_irq_save(flags); | ||
363 | |||
364 | switch (mode) { | ||
365 | case CLOCK_EVT_MODE_PERIODIC: | ||
366 | case CLOCK_EVT_MODE_ONESHOT: | ||
367 | __setup_APIC_LVTT(calibration_result, | ||
368 | mode != CLOCK_EVT_MODE_PERIODIC, 1); | ||
369 | break; | ||
370 | case CLOCK_EVT_MODE_UNUSED: | ||
371 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
372 | v = apic_read(APIC_LVTT); | ||
373 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | ||
374 | apic_write(APIC_LVTT, v); | ||
375 | break; | ||
376 | case CLOCK_EVT_MODE_RESUME: | ||
377 | /* Nothing to do here */ | ||
378 | break; | ||
379 | } | ||
380 | |||
381 | local_irq_restore(flags); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Local APIC timer broadcast function | ||
386 | */ | ||
387 | static void lapic_timer_broadcast(cpumask_t mask) | ||
388 | { | ||
389 | #ifdef CONFIG_SMP | ||
390 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | ||
391 | #endif | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * Setup the local APIC timer for this CPU. Copy the initilized values | ||
396 | * of the boot CPU and register the clock event in the framework. | ||
397 | */ | ||
398 | static void setup_APIC_timer(void) | ||
399 | { | ||
400 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | ||
401 | |||
402 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); | ||
403 | levt->cpumask = cpumask_of_cpu(smp_processor_id()); | ||
404 | |||
405 | clockevents_register_device(levt); | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * In this function we calibrate APIC bus clocks to the external | ||
410 | * timer. Unfortunately we cannot use jiffies and the timer irq | ||
411 | * to calibrate, since some later bootup code depends on getting | ||
412 | * the first irq? Ugh. | ||
413 | * | ||
414 | * We want to do the calibration only once since we | ||
415 | * want to have local timer irqs syncron. CPUs connected | ||
416 | * by the same APIC bus have the very same bus frequency. | ||
417 | * And we want to have irqs off anyways, no accidental | ||
418 | * APIC irq that way. | ||
419 | */ | ||
420 | |||
421 | #define TICK_COUNT 100000000 | ||
422 | |||
423 | static int __init calibrate_APIC_clock(void) | ||
424 | { | ||
425 | unsigned apic, apic_start; | ||
426 | unsigned long tsc, tsc_start; | ||
427 | int result; | ||
428 | |||
429 | local_irq_disable(); | ||
430 | |||
431 | /* | ||
432 | * Put whatever arbitrary (but long enough) timeout | ||
433 | * value into the APIC clock, we just want to get the | ||
434 | * counter running for calibration. | ||
435 | * | ||
436 | * No interrupt enable ! | ||
437 | */ | ||
438 | __setup_APIC_LVTT(250000000, 0, 0); | ||
439 | |||
440 | apic_start = apic_read(APIC_TMCCT); | ||
441 | #ifdef CONFIG_X86_PM_TIMER | ||
442 | if (apic_calibrate_pmtmr && pmtmr_ioport) { | ||
443 | pmtimer_wait(5000); /* 5ms wait */ | ||
444 | apic = apic_read(APIC_TMCCT); | ||
445 | result = (apic_start - apic) * 1000L / 5; | ||
446 | } else | ||
447 | #endif | ||
448 | { | ||
449 | rdtscll(tsc_start); | ||
450 | |||
451 | do { | ||
452 | apic = apic_read(APIC_TMCCT); | ||
453 | rdtscll(tsc); | ||
454 | } while ((tsc - tsc_start) < TICK_COUNT && | ||
455 | (apic_start - apic) < TICK_COUNT); | ||
456 | |||
457 | result = (apic_start - apic) * 1000L * tsc_khz / | ||
458 | (tsc - tsc_start); | ||
459 | } | ||
460 | |||
461 | local_irq_enable(); | ||
462 | |||
463 | printk(KERN_DEBUG "APIC timer calibration result %d\n", result); | ||
464 | |||
465 | printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", | ||
466 | result / 1000 / 1000, result / 1000 % 1000); | ||
467 | |||
468 | /* Calculate the scaled math multiplication factor */ | ||
469 | lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, | ||
470 | lapic_clockevent.shift); | ||
471 | lapic_clockevent.max_delta_ns = | ||
472 | clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); | ||
473 | lapic_clockevent.min_delta_ns = | ||
474 | clockevent_delta2ns(0xF, &lapic_clockevent); | ||
475 | |||
476 | calibration_result = (result * APIC_DIVISOR) / HZ; | ||
477 | |||
478 | /* | ||
479 | * Do a sanity check on the APIC calibration result | ||
480 | */ | ||
481 | if (calibration_result < (1000000 / HZ)) { | ||
482 | printk(KERN_WARNING | ||
483 | "APIC frequency too slow, disabling apic timer\n"); | ||
484 | return -1; | ||
485 | } | ||
486 | |||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * Setup the boot APIC | ||
492 | * | ||
493 | * Calibrate and verify the result. | ||
494 | */ | ||
495 | void __init setup_boot_APIC_clock(void) | ||
496 | { | ||
497 | /* | ||
498 | * The local apic timer can be disabled via the kernel | ||
499 | * commandline or from the CPU detection code. Register the lapic | ||
500 | * timer as a dummy clock event source on SMP systems, so the | ||
501 | * broadcast mechanism is used. On UP systems simply ignore it. | ||
502 | */ | ||
503 | if (disable_apic_timer) { | ||
504 | printk(KERN_INFO "Disabling APIC timer\n"); | ||
505 | /* No broadcast on UP ! */ | ||
506 | if (num_possible_cpus() > 1) { | ||
507 | lapic_clockevent.mult = 1; | ||
508 | setup_APIC_timer(); | ||
509 | } | ||
510 | return; | ||
511 | } | ||
512 | |||
513 | apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" | ||
514 | "calibrating APIC timer ...\n"); | ||
515 | |||
516 | if (calibrate_APIC_clock()) { | ||
517 | /* No broadcast on UP ! */ | ||
518 | if (num_possible_cpus() > 1) | ||
519 | setup_APIC_timer(); | ||
520 | return; | ||
521 | } | ||
522 | |||
523 | /* | ||
524 | * If nmi_watchdog is set to IO_APIC, we need the | ||
525 | * PIT/HPET going. Otherwise register lapic as a dummy | ||
526 | * device. | ||
527 | */ | ||
528 | if (nmi_watchdog != NMI_IO_APIC) | ||
529 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; | ||
530 | else | ||
531 | printk(KERN_WARNING "APIC timer registered as dummy," | ||
532 | " due to nmi_watchdog=%d!\n", nmi_watchdog); | ||
533 | |||
534 | /* Setup the lapic or request the broadcast */ | ||
535 | setup_APIC_timer(); | ||
536 | } | ||
537 | |||
538 | void __cpuinit setup_secondary_APIC_clock(void) | ||
539 | { | ||
540 | setup_APIC_timer(); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * The guts of the apic timer interrupt | ||
545 | */ | ||
546 | static void local_apic_timer_interrupt(void) | ||
547 | { | ||
548 | int cpu = smp_processor_id(); | ||
549 | struct clock_event_device *evt = &per_cpu(lapic_events, cpu); | ||
550 | |||
551 | /* | ||
552 | * Normally we should not be here till LAPIC has been initialized but | ||
553 | * in some cases like kdump, its possible that there is a pending LAPIC | ||
554 | * timer interrupt from previous kernel's context and is delivered in | ||
555 | * new kernel the moment interrupts are enabled. | ||
556 | * | ||
557 | * Interrupts are enabled early and LAPIC is setup much later, hence | ||
558 | * its possible that when we get here evt->event_handler is NULL. | ||
559 | * Check for event_handler being NULL and discard the interrupt as | ||
560 | * spurious. | ||
561 | */ | ||
562 | if (!evt->event_handler) { | ||
563 | printk(KERN_WARNING | ||
564 | "Spurious LAPIC timer interrupt on cpu %d\n", cpu); | ||
565 | /* Switch it off */ | ||
566 | lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); | ||
567 | return; | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * the NMI deadlock-detector uses this. | ||
572 | */ | ||
573 | #ifdef CONFIG_X86_64 | ||
574 | add_pda(apic_timer_irqs, 1); | ||
575 | #else | ||
576 | per_cpu(irq_stat, cpu).apic_timer_irqs++; | ||
577 | #endif | ||
578 | |||
579 | evt->event_handler(evt); | ||
580 | } | ||
581 | |||
582 | /* | ||
583 | * Local APIC timer interrupt. This is the most natural way for doing | ||
584 | * local interrupts, but local timer interrupts can be emulated by | ||
585 | * broadcast interrupts too. [in case the hw doesn't support APIC timers] | ||
586 | * | ||
587 | * [ if a single-CPU system runs an SMP kernel then we call the local | ||
588 | * interrupt as well. Thus we cannot inline the local irq ... ] | ||
589 | */ | ||
590 | void smp_apic_timer_interrupt(struct pt_regs *regs) | ||
591 | { | ||
592 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
593 | |||
594 | /* | ||
595 | * NOTE! We'd better ACK the irq immediately, | ||
596 | * because timer handling can be slow. | ||
597 | */ | ||
598 | ack_APIC_irq(); | ||
599 | /* | ||
600 | * update_process_times() expects us to have done irq_enter(). | ||
601 | * Besides, if we don't timer interrupts ignore the global | ||
602 | * interrupt lock, which is the WrongThing (tm) to do. | ||
603 | */ | ||
604 | exit_idle(); | ||
605 | irq_enter(); | ||
606 | local_apic_timer_interrupt(); | ||
607 | irq_exit(); | ||
608 | |||
609 | set_irq_regs(old_regs); | ||
610 | } | ||
611 | |||
612 | int setup_profiling_timer(unsigned int multiplier) | ||
613 | { | ||
614 | return -EINVAL; | ||
615 | } | ||
616 | |||
617 | |||
618 | /* | ||
619 | * Local APIC start and shutdown | ||
620 | */ | ||
621 | |||
622 | /** | ||
623 | * clear_local_APIC - shutdown the local APIC | ||
624 | * | ||
625 | * This is called, when a CPU is disabled and before rebooting, so the state of | ||
626 | * the local APIC has no dangling leftovers. Also used to cleanout any BIOS | ||
627 | * leftovers during boot. | ||
628 | */ | ||
629 | void clear_local_APIC(void) | ||
630 | { | ||
631 | int maxlvt; | ||
632 | u32 v; | ||
633 | |||
634 | /* APIC hasn't been mapped yet */ | ||
635 | if (!apic_phys) | ||
636 | return; | ||
637 | |||
638 | maxlvt = lapic_get_maxlvt(); | ||
639 | /* | ||
640 | * Masking an LVT entry can trigger a local APIC error | ||
641 | * if the vector is zero. Mask LVTERR first to prevent this. | ||
642 | */ | ||
643 | if (maxlvt >= 3) { | ||
644 | v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ | ||
645 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); | ||
646 | } | ||
647 | /* | ||
648 | * Careful: we have to set masks only first to deassert | ||
649 | * any level-triggered sources. | ||
650 | */ | ||
651 | v = apic_read(APIC_LVTT); | ||
652 | apic_write(APIC_LVTT, v | APIC_LVT_MASKED); | ||
653 | v = apic_read(APIC_LVT0); | ||
654 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); | ||
655 | v = apic_read(APIC_LVT1); | ||
656 | apic_write(APIC_LVT1, v | APIC_LVT_MASKED); | ||
657 | if (maxlvt >= 4) { | ||
658 | v = apic_read(APIC_LVTPC); | ||
659 | apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); | ||
660 | } | ||
661 | |||
662 | /* lets not touch this if we didn't frob it */ | ||
663 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL) | ||
664 | if (maxlvt >= 5) { | ||
665 | v = apic_read(APIC_LVTTHMR); | ||
666 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | ||
667 | } | ||
668 | #endif | ||
669 | /* | ||
670 | * Clean APIC state for other OSs: | ||
671 | */ | ||
672 | apic_write(APIC_LVTT, APIC_LVT_MASKED); | ||
673 | apic_write(APIC_LVT0, APIC_LVT_MASKED); | ||
674 | apic_write(APIC_LVT1, APIC_LVT_MASKED); | ||
675 | if (maxlvt >= 3) | ||
676 | apic_write(APIC_LVTERR, APIC_LVT_MASKED); | ||
677 | if (maxlvt >= 4) | ||
678 | apic_write(APIC_LVTPC, APIC_LVT_MASKED); | ||
679 | |||
680 | /* Integrated APIC (!82489DX) ? */ | ||
681 | if (lapic_is_integrated()) { | ||
682 | if (maxlvt > 3) | ||
683 | /* Clear ESR due to Pentium errata 3AP and 11AP */ | ||
684 | apic_write(APIC_ESR, 0); | ||
685 | apic_read(APIC_ESR); | ||
686 | } | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * disable_local_APIC - clear and disable the local APIC | ||
691 | */ | ||
692 | void disable_local_APIC(void) | ||
693 | { | ||
694 | unsigned int value; | ||
695 | |||
696 | clear_local_APIC(); | ||
697 | |||
698 | /* | ||
699 | * Disable APIC (implies clearing of registers | ||
700 | * for 82489DX!). | ||
701 | */ | ||
702 | value = apic_read(APIC_SPIV); | ||
703 | value &= ~APIC_SPIV_APIC_ENABLED; | ||
704 | apic_write(APIC_SPIV, value); | ||
705 | |||
706 | #ifdef CONFIG_X86_32 | ||
707 | /* | ||
708 | * When LAPIC was disabled by the BIOS and enabled by the kernel, | ||
709 | * restore the disabled state. | ||
710 | */ | ||
711 | if (enabled_via_apicbase) { | ||
712 | unsigned int l, h; | ||
713 | |||
714 | rdmsr(MSR_IA32_APICBASE, l, h); | ||
715 | l &= ~MSR_IA32_APICBASE_ENABLE; | ||
716 | wrmsr(MSR_IA32_APICBASE, l, h); | ||
717 | } | ||
718 | #endif | ||
719 | } | ||
720 | |||
721 | /* | ||
722 | * If Linux enabled the LAPIC against the BIOS default disable it down before | ||
723 | * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and | ||
724 | * not power-off. Additionally clear all LVT entries before disable_local_APIC | ||
725 | * for the case where Linux didn't enable the LAPIC. | ||
726 | */ | ||
727 | void lapic_shutdown(void) | ||
728 | { | ||
729 | unsigned long flags; | ||
730 | |||
731 | if (!cpu_has_apic) | ||
732 | return; | ||
733 | |||
734 | local_irq_save(flags); | ||
735 | |||
736 | #ifdef CONFIG_X86_32 | ||
737 | if (!enabled_via_apicbase) | ||
738 | clear_local_APIC(); | ||
739 | else | ||
740 | #endif | ||
741 | disable_local_APIC(); | ||
742 | |||
743 | |||
744 | local_irq_restore(flags); | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * This is to verify that we're looking at a real local APIC. | ||
749 | * Check these against your board if the CPUs aren't getting | ||
750 | * started for no apparent reason. | ||
751 | */ | ||
752 | int __init verify_local_APIC(void) | ||
753 | { | ||
754 | unsigned int reg0, reg1; | ||
755 | |||
756 | /* | ||
757 | * The version register is read-only in a real APIC. | ||
758 | */ | ||
759 | reg0 = apic_read(APIC_LVR); | ||
760 | apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0); | ||
761 | apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK); | ||
762 | reg1 = apic_read(APIC_LVR); | ||
763 | apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1); | ||
764 | |||
765 | /* | ||
766 | * The two version reads above should print the same | ||
767 | * numbers. If the second one is different, then we | ||
768 | * poke at a non-APIC. | ||
769 | */ | ||
770 | if (reg1 != reg0) | ||
771 | return 0; | ||
772 | |||
773 | /* | ||
774 | * Check if the version looks reasonably. | ||
775 | */ | ||
776 | reg1 = GET_APIC_VERSION(reg0); | ||
777 | if (reg1 == 0x00 || reg1 == 0xff) | ||
778 | return 0; | ||
779 | reg1 = lapic_get_maxlvt(); | ||
780 | if (reg1 < 0x02 || reg1 == 0xff) | ||
781 | return 0; | ||
782 | |||
783 | /* | ||
784 | * The ID register is read/write in a real APIC. | ||
785 | */ | ||
786 | reg0 = apic_read(APIC_ID); | ||
787 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); | ||
788 | apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); | ||
789 | reg1 = apic_read(APIC_ID); | ||
790 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); | ||
791 | apic_write(APIC_ID, reg0); | ||
792 | if (reg1 != (reg0 ^ APIC_ID_MASK)) | ||
793 | return 0; | ||
794 | |||
795 | /* | ||
796 | * The next two are just to see if we have sane values. | ||
797 | * They're only really relevant if we're in Virtual Wire | ||
798 | * compatibility mode, but most boxes are anymore. | ||
799 | */ | ||
800 | reg0 = apic_read(APIC_LVT0); | ||
801 | apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0); | ||
802 | reg1 = apic_read(APIC_LVT1); | ||
803 | apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1); | ||
804 | |||
805 | return 1; | ||
806 | } | ||
807 | |||
808 | /** | ||
809 | * sync_Arb_IDs - synchronize APIC bus arbitration IDs | ||
810 | */ | ||
811 | void __init sync_Arb_IDs(void) | ||
812 | { | ||
813 | /* | ||
814 | * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not | ||
815 | * needed on AMD. | ||
816 | */ | ||
817 | if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
818 | return; | ||
819 | |||
820 | /* | ||
821 | * Wait for idle. | ||
822 | */ | ||
823 | apic_wait_icr_idle(); | ||
824 | |||
825 | apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); | ||
826 | apic_write(APIC_ICR, APIC_DEST_ALLINC | | ||
827 | APIC_INT_LEVELTRIG | APIC_DM_INIT); | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | * An initial setup of the virtual wire mode. | ||
832 | */ | ||
833 | void __init init_bsp_APIC(void) | ||
834 | { | ||
835 | unsigned int value; | ||
836 | |||
837 | /* | ||
838 | * Don't do the setup now if we have a SMP BIOS as the | ||
839 | * through-I/O-APIC virtual wire mode might be active. | ||
840 | */ | ||
841 | if (smp_found_config || !cpu_has_apic) | ||
842 | return; | ||
843 | |||
844 | /* | ||
845 | * Do not trust the local APIC being empty at bootup. | ||
846 | */ | ||
847 | clear_local_APIC(); | ||
848 | |||
849 | /* | ||
850 | * Enable APIC. | ||
851 | */ | ||
852 | value = apic_read(APIC_SPIV); | ||
853 | value &= ~APIC_VECTOR_MASK; | ||
854 | value |= APIC_SPIV_APIC_ENABLED; | ||
855 | |||
856 | #ifdef CONFIG_X86_32 | ||
857 | /* This bit is reserved on P4/Xeon and should be cleared */ | ||
858 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
859 | (boot_cpu_data.x86 == 15)) | ||
860 | value &= ~APIC_SPIV_FOCUS_DISABLED; | ||
861 | else | ||
862 | #endif | ||
863 | value |= APIC_SPIV_FOCUS_DISABLED; | ||
864 | value |= SPURIOUS_APIC_VECTOR; | ||
865 | apic_write(APIC_SPIV, value); | ||
866 | |||
867 | /* | ||
868 | * Set up the virtual wire mode. | ||
869 | */ | ||
870 | apic_write(APIC_LVT0, APIC_DM_EXTINT); | ||
871 | value = APIC_DM_NMI; | ||
872 | if (!lapic_is_integrated()) /* 82489DX */ | ||
873 | value |= APIC_LVT_LEVEL_TRIGGER; | ||
874 | apic_write(APIC_LVT1, value); | ||
875 | } | ||
876 | |||
877 | static void __cpuinit lapic_setup_esr(void) | ||
878 | { | ||
879 | unsigned long oldvalue, value, maxlvt; | ||
880 | if (lapic_is_integrated() && !esr_disable) { | ||
881 | if (esr_disable) { | ||
882 | /* | ||
883 | * Something untraceable is creating bad interrupts on | ||
884 | * secondary quads ... for the moment, just leave the | ||
885 | * ESR disabled - we can't do anything useful with the | ||
886 | * errors anyway - mbligh | ||
887 | */ | ||
888 | printk(KERN_INFO "Leaving ESR disabled.\n"); | ||
889 | return; | ||
890 | } | ||
891 | /* !82489DX */ | ||
892 | maxlvt = lapic_get_maxlvt(); | ||
893 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | ||
894 | apic_write(APIC_ESR, 0); | ||
895 | oldvalue = apic_read(APIC_ESR); | ||
896 | |||
897 | /* enables sending errors */ | ||
898 | value = ERROR_APIC_VECTOR; | ||
899 | apic_write(APIC_LVTERR, value); | ||
900 | /* | ||
901 | * spec says clear errors after enabling vector. | ||
902 | */ | ||
903 | if (maxlvt > 3) | ||
904 | apic_write(APIC_ESR, 0); | ||
905 | value = apic_read(APIC_ESR); | ||
906 | if (value != oldvalue) | ||
907 | apic_printk(APIC_VERBOSE, "ESR value before enabling " | ||
908 | "vector: 0x%08lx after: 0x%08lx\n", | ||
909 | oldvalue, value); | ||
910 | } else { | ||
911 | printk(KERN_INFO "No ESR for 82489DX.\n"); | ||
912 | } | ||
913 | } | ||
914 | |||
915 | |||
916 | /** | ||
917 | * setup_local_APIC - setup the local APIC | ||
918 | */ | ||
919 | void __cpuinit setup_local_APIC(void) | ||
920 | { | ||
921 | unsigned int value; | ||
922 | int i, j; | ||
923 | |||
924 | preempt_disable(); | ||
925 | value = apic_read(APIC_LVR); | ||
926 | |||
927 | BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f); | ||
928 | |||
929 | /* | ||
930 | * Double-check whether this APIC is really registered. | ||
931 | * This is meaningless in clustered apic mode, so we skip it. | ||
932 | */ | ||
933 | if (!apic_id_registered()) | ||
934 | BUG(); | ||
935 | |||
936 | /* | ||
937 | * Intel recommends to set DFR, LDR and TPR before enabling | ||
938 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
939 | * document number 292116). So here it goes... | ||
940 | */ | ||
941 | init_apic_ldr(); | ||
942 | |||
943 | /* | ||
944 | * Set Task Priority to 'accept all'. We never change this | ||
945 | * later on. | ||
946 | */ | ||
947 | value = apic_read(APIC_TASKPRI); | ||
948 | value &= ~APIC_TPRI_MASK; | ||
949 | apic_write(APIC_TASKPRI, value); | ||
950 | |||
951 | /* | ||
952 | * After a crash, we no longer service the interrupts and a pending | ||
953 | * interrupt from previous kernel might still have ISR bit set. | ||
954 | * | ||
955 | * Most probably by now CPU has serviced that pending interrupt and | ||
956 | * it might not have done the ack_APIC_irq() because it thought, | ||
957 | * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it | ||
958 | * does not clear the ISR bit and cpu thinks it has already serivced | ||
959 | * the interrupt. Hence a vector might get locked. It was noticed | ||
960 | * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. | ||
961 | */ | ||
962 | for (i = APIC_ISR_NR - 1; i >= 0; i--) { | ||
963 | value = apic_read(APIC_ISR + i*0x10); | ||
964 | for (j = 31; j >= 0; j--) { | ||
965 | if (value & (1<<j)) | ||
966 | ack_APIC_irq(); | ||
967 | } | ||
968 | } | ||
969 | |||
970 | /* | ||
971 | * Now that we are all set up, enable the APIC | ||
972 | */ | ||
973 | value = apic_read(APIC_SPIV); | ||
974 | value &= ~APIC_VECTOR_MASK; | ||
975 | /* | ||
976 | * Enable APIC | ||
977 | */ | ||
978 | value |= APIC_SPIV_APIC_ENABLED; | ||
979 | |||
980 | /* We always use processor focus */ | ||
981 | |||
982 | /* | ||
983 | * Set spurious IRQ vector | ||
984 | */ | ||
985 | value |= SPURIOUS_APIC_VECTOR; | ||
986 | apic_write(APIC_SPIV, value); | ||
987 | |||
988 | /* | ||
989 | * Set up LVT0, LVT1: | ||
990 | * | ||
991 | * set up through-local-APIC on the BP's LINT0. This is not | ||
992 | * strictly necessary in pure symmetric-IO mode, but sometimes | ||
993 | * we delegate interrupts to the 8259A. | ||
994 | */ | ||
995 | /* | ||
996 | * TODO: set up through-local-APIC from through-I/O-APIC? --macro | ||
997 | */ | ||
998 | value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; | ||
999 | if (!smp_processor_id() && !value) { | ||
1000 | value = APIC_DM_EXTINT; | ||
1001 | apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", | ||
1002 | smp_processor_id()); | ||
1003 | } else { | ||
1004 | value = APIC_DM_EXTINT | APIC_LVT_MASKED; | ||
1005 | apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", | ||
1006 | smp_processor_id()); | ||
1007 | } | ||
1008 | apic_write(APIC_LVT0, value); | ||
1009 | |||
1010 | /* | ||
1011 | * only the BP should see the LINT1 NMI signal, obviously. | ||
1012 | */ | ||
1013 | if (!smp_processor_id()) | ||
1014 | value = APIC_DM_NMI; | ||
1015 | else | ||
1016 | value = APIC_DM_NMI | APIC_LVT_MASKED; | ||
1017 | apic_write(APIC_LVT1, value); | ||
1018 | preempt_enable(); | ||
1019 | } | ||
1020 | |||
1021 | void __cpuinit end_local_APIC_setup(void) | ||
1022 | { | ||
1023 | lapic_setup_esr(); | ||
1024 | |||
1025 | #ifdef CONFIG_X86_32 | ||
1026 | { | ||
1027 | unsigned int value; | ||
1028 | /* Disable the local apic timer */ | ||
1029 | value = apic_read(APIC_LVTT); | ||
1030 | value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | ||
1031 | apic_write(APIC_LVTT, value); | ||
1032 | } | ||
1033 | #endif | ||
1034 | |||
1035 | setup_apic_nmi_watchdog(NULL); | ||
1036 | apic_pm_activate(); | ||
1037 | } | ||
1038 | |||
1039 | void check_x2apic(void) | ||
1040 | { | ||
1041 | int msr, msr2; | ||
1042 | |||
1043 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
1044 | |||
1045 | if (msr & X2APIC_ENABLE) { | ||
1046 | printk("x2apic enabled by BIOS, switching to x2apic ops\n"); | ||
1047 | x2apic_preenabled = x2apic = 1; | ||
1048 | apic_ops = &x2apic_ops; | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | void enable_x2apic(void) | ||
1053 | { | ||
1054 | int msr, msr2; | ||
1055 | |||
1056 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
1057 | if (!(msr & X2APIC_ENABLE)) { | ||
1058 | printk("Enabling x2apic\n"); | ||
1059 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1063 | void enable_IR_x2apic(void) | ||
1064 | { | ||
1065 | #ifdef CONFIG_INTR_REMAP | ||
1066 | int ret; | ||
1067 | unsigned long flags; | ||
1068 | |||
1069 | if (!cpu_has_x2apic) | ||
1070 | return; | ||
1071 | |||
1072 | if (!x2apic_preenabled && disable_x2apic) { | ||
1073 | printk(KERN_INFO | ||
1074 | "Skipped enabling x2apic and Interrupt-remapping " | ||
1075 | "because of nox2apic\n"); | ||
1076 | return; | ||
1077 | } | ||
1078 | |||
1079 | if (x2apic_preenabled && disable_x2apic) | ||
1080 | panic("Bios already enabled x2apic, can't enforce nox2apic"); | ||
1081 | |||
1082 | if (!x2apic_preenabled && skip_ioapic_setup) { | ||
1083 | printk(KERN_INFO | ||
1084 | "Skipped enabling x2apic and Interrupt-remapping " | ||
1085 | "because of skipping io-apic setup\n"); | ||
1086 | return; | ||
1087 | } | ||
1088 | |||
1089 | ret = dmar_table_init(); | ||
1090 | if (ret) { | ||
1091 | printk(KERN_INFO | ||
1092 | "dmar_table_init() failed with %d:\n", ret); | ||
1093 | |||
1094 | if (x2apic_preenabled) | ||
1095 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
1096 | else | ||
1097 | printk(KERN_INFO | ||
1098 | "Not enabling x2apic,Intr-remapping\n"); | ||
1099 | return; | ||
1100 | } | ||
1101 | |||
1102 | local_irq_save(flags); | ||
1103 | mask_8259A(); | ||
1104 | save_mask_IO_APIC_setup(); | ||
1105 | |||
1106 | ret = enable_intr_remapping(1); | ||
1107 | |||
1108 | if (ret && x2apic_preenabled) { | ||
1109 | local_irq_restore(flags); | ||
1110 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
1111 | } | ||
1112 | |||
1113 | if (ret) | ||
1114 | goto end; | ||
1115 | |||
1116 | if (!x2apic) { | ||
1117 | x2apic = 1; | ||
1118 | apic_ops = &x2apic_ops; | ||
1119 | enable_x2apic(); | ||
1120 | } | ||
1121 | end: | ||
1122 | if (ret) | ||
1123 | /* | ||
1124 | * IR enabling failed | ||
1125 | */ | ||
1126 | restore_IO_APIC_setup(); | ||
1127 | else | ||
1128 | reinit_intr_remapped_IO_APIC(x2apic_preenabled); | ||
1129 | |||
1130 | unmask_8259A(); | ||
1131 | local_irq_restore(flags); | ||
1132 | |||
1133 | if (!ret) { | ||
1134 | if (!x2apic_preenabled) | ||
1135 | printk(KERN_INFO | ||
1136 | "Enabled x2apic and interrupt-remapping\n"); | ||
1137 | else | ||
1138 | printk(KERN_INFO | ||
1139 | "Enabled Interrupt-remapping\n"); | ||
1140 | } else | ||
1141 | printk(KERN_ERR | ||
1142 | "Failed to enable Interrupt-remapping and x2apic\n"); | ||
1143 | #else | ||
1144 | if (!cpu_has_x2apic) | ||
1145 | return; | ||
1146 | |||
1147 | if (x2apic_preenabled) | ||
1148 | panic("x2apic enabled prior OS handover," | ||
1149 | " enable CONFIG_INTR_REMAP"); | ||
1150 | |||
1151 | printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping " | ||
1152 | " and x2apic\n"); | ||
1153 | #endif | ||
1154 | |||
1155 | return; | ||
1156 | } | ||
1157 | |||
1158 | /* | ||
1159 | * Detect and enable local APICs on non-SMP boards. | ||
1160 | * Original code written by Keir Fraser. | ||
1161 | * On AMD64 we trust the BIOS - if it says no APIC it is likely | ||
1162 | * not correctly set up (usually the APIC timer won't work etc.) | ||
1163 | */ | ||
1164 | static int __init detect_init_APIC(void) | ||
1165 | { | ||
1166 | if (!cpu_has_apic) { | ||
1167 | printk(KERN_INFO "No local APIC present\n"); | ||
1168 | return -1; | ||
1169 | } | ||
1170 | |||
1171 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | ||
1172 | boot_cpu_physical_apicid = 0; | ||
1173 | return 0; | ||
1174 | } | ||
1175 | |||
1176 | void __init early_init_lapic_mapping(void) | ||
1177 | { | ||
1178 | unsigned long phys_addr; | ||
1179 | |||
1180 | /* | ||
1181 | * If no local APIC can be found then go out | ||
1182 | * : it means there is no mpatable and MADT | ||
1183 | */ | ||
1184 | if (!smp_found_config) | ||
1185 | return; | ||
1186 | |||
1187 | phys_addr = mp_lapic_addr; | ||
1188 | |||
1189 | set_fixmap_nocache(FIX_APIC_BASE, phys_addr); | ||
1190 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | ||
1191 | APIC_BASE, phys_addr); | ||
1192 | |||
1193 | /* | ||
1194 | * Fetch the APIC ID of the BSP in case we have a | ||
1195 | * default configuration (or the MP table is broken). | ||
1196 | */ | ||
1197 | boot_cpu_physical_apicid = read_apic_id(); | ||
1198 | } | ||
1199 | |||
1200 | /** | ||
1201 | * init_apic_mappings - initialize APIC mappings | ||
1202 | */ | ||
1203 | void __init init_apic_mappings(void) | ||
1204 | { | ||
1205 | if (x2apic) { | ||
1206 | boot_cpu_physical_apicid = read_apic_id(); | ||
1207 | return; | ||
1208 | } | ||
1209 | |||
1210 | /* | ||
1211 | * If no local APIC can be found then set up a fake all | ||
1212 | * zeroes page to simulate the local APIC and another | ||
1213 | * one for the IO-APIC. | ||
1214 | */ | ||
1215 | if (!smp_found_config && detect_init_APIC()) { | ||
1216 | apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); | ||
1217 | apic_phys = __pa(apic_phys); | ||
1218 | } else | ||
1219 | apic_phys = mp_lapic_addr; | ||
1220 | |||
1221 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | ||
1222 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | ||
1223 | APIC_BASE, apic_phys); | ||
1224 | |||
1225 | /* | ||
1226 | * Fetch the APIC ID of the BSP in case we have a | ||
1227 | * default configuration (or the MP table is broken). | ||
1228 | */ | ||
1229 | boot_cpu_physical_apicid = read_apic_id(); | ||
1230 | } | ||
1231 | |||
1232 | /* | ||
1233 | * This initializes the IO-APIC and APIC hardware if this is | ||
1234 | * a UP kernel. | ||
1235 | */ | ||
1236 | int apic_version[MAX_APICS]; | ||
1237 | |||
1238 | int __init APIC_init_uniprocessor(void) | ||
1239 | { | ||
1240 | if (disable_apic) { | ||
1241 | printk(KERN_INFO "Apic disabled\n"); | ||
1242 | return -1; | ||
1243 | } | ||
1244 | if (!cpu_has_apic) { | ||
1245 | disable_apic = 1; | ||
1246 | printk(KERN_INFO "Apic disabled by BIOS\n"); | ||
1247 | return -1; | ||
1248 | } | ||
1249 | |||
1250 | enable_IR_x2apic(); | ||
1251 | setup_apic_routing(); | ||
1252 | |||
1253 | verify_local_APIC(); | ||
1254 | |||
1255 | connect_bsp_APIC(); | ||
1256 | |||
1257 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); | ||
1258 | apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid)); | ||
1259 | |||
1260 | setup_local_APIC(); | ||
1261 | |||
1262 | /* | ||
1263 | * Now enable IO-APICs, actually call clear_IO_APIC | ||
1264 | * We need clear_IO_APIC before enabling vector on BP | ||
1265 | */ | ||
1266 | if (!skip_ioapic_setup && nr_ioapics) | ||
1267 | enable_IO_APIC(); | ||
1268 | |||
1269 | if (!smp_found_config || skip_ioapic_setup || !nr_ioapics) | ||
1270 | localise_nmi_watchdog(); | ||
1271 | end_local_APIC_setup(); | ||
1272 | |||
1273 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) | ||
1274 | setup_IO_APIC(); | ||
1275 | else | ||
1276 | nr_ioapics = 0; | ||
1277 | setup_boot_APIC_clock(); | ||
1278 | check_nmi_watchdog(); | ||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | /* | ||
1283 | * Local APIC interrupts | ||
1284 | */ | ||
1285 | |||
1286 | /* | ||
1287 | * This interrupt should _never_ happen with our APIC/SMP architecture | ||
1288 | */ | ||
1289 | asmlinkage void smp_spurious_interrupt(void) | ||
1290 | { | ||
1291 | unsigned int v; | ||
1292 | exit_idle(); | ||
1293 | irq_enter(); | ||
1294 | /* | ||
1295 | * Check if this really is a spurious interrupt and ACK it | ||
1296 | * if it is a vectored one. Just in case... | ||
1297 | * Spurious interrupts should not be ACKed. | ||
1298 | */ | ||
1299 | v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); | ||
1300 | if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) | ||
1301 | ack_APIC_irq(); | ||
1302 | |||
1303 | add_pda(irq_spurious_count, 1); | ||
1304 | irq_exit(); | ||
1305 | } | ||
1306 | |||
1307 | /* | ||
1308 | * This interrupt should never happen with our APIC/SMP architecture | ||
1309 | */ | ||
1310 | asmlinkage void smp_error_interrupt(void) | ||
1311 | { | ||
1312 | unsigned int v, v1; | ||
1313 | |||
1314 | exit_idle(); | ||
1315 | irq_enter(); | ||
1316 | /* First tickle the hardware, only then report what went on. -- REW */ | ||
1317 | v = apic_read(APIC_ESR); | ||
1318 | apic_write(APIC_ESR, 0); | ||
1319 | v1 = apic_read(APIC_ESR); | ||
1320 | ack_APIC_irq(); | ||
1321 | atomic_inc(&irq_err_count); | ||
1322 | |||
1323 | /* Here is what the APIC error bits mean: | ||
1324 | 0: Send CS error | ||
1325 | 1: Receive CS error | ||
1326 | 2: Send accept error | ||
1327 | 3: Receive accept error | ||
1328 | 4: Reserved | ||
1329 | 5: Send illegal vector | ||
1330 | 6: Received illegal vector | ||
1331 | 7: Illegal register address | ||
1332 | */ | ||
1333 | printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n", | ||
1334 | smp_processor_id(), v , v1); | ||
1335 | irq_exit(); | ||
1336 | } | ||
1337 | |||
1338 | /** | ||
1339 | * connect_bsp_APIC - attach the APIC to the interrupt system | ||
1340 | */ | ||
1341 | void __init connect_bsp_APIC(void) | ||
1342 | { | ||
1343 | #ifdef CONFIG_X86_32 | ||
1344 | if (pic_mode) { | ||
1345 | /* | ||
1346 | * Do not trust the local APIC being empty at bootup. | ||
1347 | */ | ||
1348 | clear_local_APIC(); | ||
1349 | /* | ||
1350 | * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's | ||
1351 | * local APIC to INT and NMI lines. | ||
1352 | */ | ||
1353 | apic_printk(APIC_VERBOSE, "leaving PIC mode, " | ||
1354 | "enabling APIC mode.\n"); | ||
1355 | outb(0x70, 0x22); | ||
1356 | outb(0x01, 0x23); | ||
1357 | } | ||
1358 | #endif | ||
1359 | enable_apic_mode(); | ||
1360 | } | ||
1361 | |||
1362 | /** | ||
1363 | * disconnect_bsp_APIC - detach the APIC from the interrupt system | ||
1364 | * @virt_wire_setup: indicates, whether virtual wire mode is selected | ||
1365 | * | ||
1366 | * Virtual wire mode is necessary to deliver legacy interrupts even when the | ||
1367 | * APIC is disabled. | ||
1368 | */ | ||
1369 | void disconnect_bsp_APIC(int virt_wire_setup) | ||
1370 | { | ||
1371 | unsigned int value; | ||
1372 | |||
1373 | #ifdef CONFIG_X86_32 | ||
1374 | if (pic_mode) { | ||
1375 | /* | ||
1376 | * Put the board back into PIC mode (has an effect only on | ||
1377 | * certain older boards). Note that APIC interrupts, including | ||
1378 | * IPIs, won't work beyond this point! The only exception are | ||
1379 | * INIT IPIs. | ||
1380 | */ | ||
1381 | apic_printk(APIC_VERBOSE, "disabling APIC mode, " | ||
1382 | "entering PIC mode.\n"); | ||
1383 | outb(0x70, 0x22); | ||
1384 | outb(0x00, 0x23); | ||
1385 | return; | ||
1386 | } | ||
1387 | #endif | ||
1388 | |||
1389 | /* Go back to Virtual Wire compatibility mode */ | ||
1390 | |||
1391 | /* For the spurious interrupt use vector F, and enable it */ | ||
1392 | value = apic_read(APIC_SPIV); | ||
1393 | value &= ~APIC_VECTOR_MASK; | ||
1394 | value |= APIC_SPIV_APIC_ENABLED; | ||
1395 | value |= 0xf; | ||
1396 | apic_write(APIC_SPIV, value); | ||
1397 | |||
1398 | if (!virt_wire_setup) { | ||
1399 | /* | ||
1400 | * For LVT0 make it edge triggered, active high, | ||
1401 | * external and enabled | ||
1402 | */ | ||
1403 | value = apic_read(APIC_LVT0); | ||
1404 | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | | ||
1405 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | ||
1406 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | ||
1407 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | ||
1408 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); | ||
1409 | apic_write(APIC_LVT0, value); | ||
1410 | } else { | ||
1411 | /* Disable LVT0 */ | ||
1412 | apic_write(APIC_LVT0, APIC_LVT_MASKED); | ||
1413 | } | ||
1414 | |||
1415 | /* | ||
1416 | * For LVT1 make it edge triggered, active high, | ||
1417 | * nmi and enabled | ||
1418 | */ | ||
1419 | value = apic_read(APIC_LVT1); | ||
1420 | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | | ||
1421 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | ||
1422 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | ||
1423 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | ||
1424 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); | ||
1425 | apic_write(APIC_LVT1, value); | ||
1426 | } | ||
1427 | |||
1428 | void __cpuinit generic_processor_info(int apicid, int version) | ||
1429 | { | ||
1430 | int cpu; | ||
1431 | cpumask_t tmp_map; | ||
1432 | |||
1433 | /* | ||
1434 | * Validate version | ||
1435 | */ | ||
1436 | if (version == 0x0) { | ||
1437 | printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " | ||
1438 | "fixing up to 0x10. (tell your hw vendor)\n", | ||
1439 | version); | ||
1440 | version = 0x10; | ||
1441 | } | ||
1442 | apic_version[apicid] = version; | ||
1443 | |||
1444 | if (num_processors >= NR_CPUS) { | ||
1445 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | ||
1446 | " Processor ignored.\n", NR_CPUS); | ||
1447 | return; | ||
1448 | } | ||
1449 | |||
1450 | num_processors++; | ||
1451 | cpus_complement(tmp_map, cpu_present_map); | ||
1452 | cpu = first_cpu(tmp_map); | ||
1453 | |||
1454 | physid_set(apicid, phys_cpu_present_map); | ||
1455 | if (apicid == boot_cpu_physical_apicid) { | ||
1456 | /* | ||
1457 | * x86_bios_cpu_apicid is required to have processors listed | ||
1458 | * in same order as logical cpu numbers. Hence the first | ||
1459 | * entry is BSP, and so on. | ||
1460 | */ | ||
1461 | cpu = 0; | ||
1462 | } | ||
1463 | if (apicid > max_physical_apicid) | ||
1464 | max_physical_apicid = apicid; | ||
1465 | |||
1466 | #ifdef CONFIG_X86_32 | ||
1467 | /* | ||
1468 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y | ||
1469 | * but we need to work other dependencies like SMP_SUSPEND etc | ||
1470 | * before this can be done without some confusion. | ||
1471 | * if (CPU_HOTPLUG_ENABLED || num_processors > 8) | ||
1472 | * - Ashok Raj <ashok.raj@intel.com> | ||
1473 | */ | ||
1474 | if (max_physical_apicid >= 8) { | ||
1475 | switch (boot_cpu_data.x86_vendor) { | ||
1476 | case X86_VENDOR_INTEL: | ||
1477 | if (!APIC_XAPIC(version)) { | ||
1478 | def_to_bigsmp = 0; | ||
1479 | break; | ||
1480 | } | ||
1481 | /* If P4 and above fall through */ | ||
1482 | case X86_VENDOR_AMD: | ||
1483 | def_to_bigsmp = 1; | ||
1484 | } | ||
1485 | } | ||
1486 | #endif | ||
1487 | |||
1488 | #if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64) | ||
1489 | /* are we being called early in kernel startup? */ | ||
1490 | if (early_per_cpu_ptr(x86_cpu_to_apicid)) { | ||
1491 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | ||
1492 | u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); | ||
1493 | |||
1494 | cpu_to_apicid[cpu] = apicid; | ||
1495 | bios_cpu_apicid[cpu] = apicid; | ||
1496 | } else { | ||
1497 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | ||
1498 | per_cpu(x86_bios_cpu_apicid, cpu) = apicid; | ||
1499 | } | ||
1500 | #endif | ||
1501 | |||
1502 | cpu_set(cpu, cpu_possible_map); | ||
1503 | cpu_set(cpu, cpu_present_map); | ||
1504 | } | ||
1505 | |||
1506 | int hard_smp_processor_id(void) | ||
1507 | { | ||
1508 | return read_apic_id(); | ||
1509 | } | ||
1510 | |||
1511 | /* | ||
1512 | * Power management | ||
1513 | */ | ||
1514 | #ifdef CONFIG_PM | ||
1515 | |||
1516 | static struct { | ||
1517 | /* | ||
1518 | * 'active' is true if the local APIC was enabled by us and | ||
1519 | * not the BIOS; this signifies that we are also responsible | ||
1520 | * for disabling it before entering apm/acpi suspend | ||
1521 | */ | ||
1522 | int active; | ||
1523 | /* r/w apic fields */ | ||
1524 | unsigned int apic_id; | ||
1525 | unsigned int apic_taskpri; | ||
1526 | unsigned int apic_ldr; | ||
1527 | unsigned int apic_dfr; | ||
1528 | unsigned int apic_spiv; | ||
1529 | unsigned int apic_lvtt; | ||
1530 | unsigned int apic_lvtpc; | ||
1531 | unsigned int apic_lvt0; | ||
1532 | unsigned int apic_lvt1; | ||
1533 | unsigned int apic_lvterr; | ||
1534 | unsigned int apic_tmict; | ||
1535 | unsigned int apic_tdcr; | ||
1536 | unsigned int apic_thmr; | ||
1537 | } apic_pm_state; | ||
1538 | |||
1539 | static int lapic_suspend(struct sys_device *dev, pm_message_t state) | ||
1540 | { | ||
1541 | unsigned long flags; | ||
1542 | int maxlvt; | ||
1543 | |||
1544 | if (!apic_pm_state.active) | ||
1545 | return 0; | ||
1546 | |||
1547 | maxlvt = lapic_get_maxlvt(); | ||
1548 | |||
1549 | apic_pm_state.apic_id = apic_read(APIC_ID); | ||
1550 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); | ||
1551 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); | ||
1552 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); | ||
1553 | apic_pm_state.apic_spiv = apic_read(APIC_SPIV); | ||
1554 | apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); | ||
1555 | if (maxlvt >= 4) | ||
1556 | apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); | ||
1557 | apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); | ||
1558 | apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); | ||
1559 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); | ||
1560 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); | ||
1561 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); | ||
1562 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) | ||
1563 | if (maxlvt >= 5) | ||
1564 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); | ||
1565 | #endif | ||
1566 | |||
1567 | local_irq_save(flags); | ||
1568 | disable_local_APIC(); | ||
1569 | local_irq_restore(flags); | ||
1570 | return 0; | ||
1571 | } | ||
1572 | |||
1573 | static int lapic_resume(struct sys_device *dev) | ||
1574 | { | ||
1575 | unsigned int l, h; | ||
1576 | unsigned long flags; | ||
1577 | int maxlvt; | ||
1578 | |||
1579 | if (!apic_pm_state.active) | ||
1580 | return 0; | ||
1581 | |||
1582 | maxlvt = lapic_get_maxlvt(); | ||
1583 | |||
1584 | local_irq_save(flags); | ||
1585 | |||
1586 | #ifdef CONFIG_X86_64 | ||
1587 | if (x2apic) | ||
1588 | enable_x2apic(); | ||
1589 | else | ||
1590 | #endif | ||
1591 | { | ||
1592 | /* | ||
1593 | * Make sure the APICBASE points to the right address | ||
1594 | * | ||
1595 | * FIXME! This will be wrong if we ever support suspend on | ||
1596 | * SMP! We'll need to do this as part of the CPU restore! | ||
1597 | */ | ||
1598 | rdmsr(MSR_IA32_APICBASE, l, h); | ||
1599 | l &= ~MSR_IA32_APICBASE_BASE; | ||
1600 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; | ||
1601 | wrmsr(MSR_IA32_APICBASE, l, h); | ||
1602 | } | ||
1603 | |||
1604 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); | ||
1605 | apic_write(APIC_ID, apic_pm_state.apic_id); | ||
1606 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); | ||
1607 | apic_write(APIC_LDR, apic_pm_state.apic_ldr); | ||
1608 | apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); | ||
1609 | apic_write(APIC_SPIV, apic_pm_state.apic_spiv); | ||
1610 | apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); | ||
1611 | apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); | ||
1612 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) | ||
1613 | if (maxlvt >= 5) | ||
1614 | apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); | ||
1615 | #endif | ||
1616 | if (maxlvt >= 4) | ||
1617 | apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); | ||
1618 | apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); | ||
1619 | apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); | ||
1620 | apic_write(APIC_TMICT, apic_pm_state.apic_tmict); | ||
1621 | apic_write(APIC_ESR, 0); | ||
1622 | apic_read(APIC_ESR); | ||
1623 | apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); | ||
1624 | apic_write(APIC_ESR, 0); | ||
1625 | apic_read(APIC_ESR); | ||
1626 | |||
1627 | local_irq_restore(flags); | ||
1628 | |||
1629 | return 0; | ||
1630 | } | ||
1631 | |||
1632 | /* | ||
1633 | * This device has no shutdown method - fully functioning local APICs | ||
1634 | * are needed on every CPU up until machine_halt/restart/poweroff. | ||
1635 | */ | ||
1636 | |||
1637 | static struct sysdev_class lapic_sysclass = { | ||
1638 | .name = "lapic", | ||
1639 | .resume = lapic_resume, | ||
1640 | .suspend = lapic_suspend, | ||
1641 | }; | ||
1642 | |||
1643 | static struct sys_device device_lapic = { | ||
1644 | .id = 0, | ||
1645 | .cls = &lapic_sysclass, | ||
1646 | }; | ||
1647 | |||
1648 | static void __cpuinit apic_pm_activate(void) | ||
1649 | { | ||
1650 | apic_pm_state.active = 1; | ||
1651 | } | ||
1652 | |||
1653 | static int __init init_lapic_sysfs(void) | ||
1654 | { | ||
1655 | int error; | ||
1656 | |||
1657 | if (!cpu_has_apic) | ||
1658 | return 0; | ||
1659 | /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ | ||
1660 | |||
1661 | error = sysdev_class_register(&lapic_sysclass); | ||
1662 | if (!error) | ||
1663 | error = sysdev_register(&device_lapic); | ||
1664 | return error; | ||
1665 | } | ||
1666 | device_initcall(init_lapic_sysfs); | ||
1667 | |||
1668 | #else /* CONFIG_PM */ | ||
1669 | |||
1670 | static void apic_pm_activate(void) { } | ||
1671 | |||
1672 | #endif /* CONFIG_PM */ | ||
1673 | |||
1674 | /* | ||
1675 | * apic_is_clustered_box() -- Check if we can expect good TSC | ||
1676 | * | ||
1677 | * Thus far, the major user of this is IBM's Summit2 series: | ||
1678 | * | ||
1679 | * Clustered boxes may have unsynced TSC problems if they are | ||
1680 | * multi-chassis. Use available data to take a good guess. | ||
1681 | * If in doubt, go HPET. | ||
1682 | */ | ||
1683 | __cpuinit int apic_is_clustered_box(void) | ||
1684 | { | ||
1685 | int i, clusters, zeros; | ||
1686 | unsigned id; | ||
1687 | u16 *bios_cpu_apicid; | ||
1688 | DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); | ||
1689 | |||
1690 | /* | ||
1691 | * there is not this kind of box with AMD CPU yet. | ||
1692 | * Some AMD box with quadcore cpu and 8 sockets apicid | ||
1693 | * will be [4, 0x23] or [8, 0x27] could be thought to | ||
1694 | * vsmp box still need checking... | ||
1695 | */ | ||
1696 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) | ||
1697 | return 0; | ||
1698 | |||
1699 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); | ||
1700 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); | ||
1701 | |||
1702 | for (i = 0; i < NR_CPUS; i++) { | ||
1703 | /* are we being called early in kernel startup? */ | ||
1704 | if (bios_cpu_apicid) { | ||
1705 | id = bios_cpu_apicid[i]; | ||
1706 | } | ||
1707 | else if (i < nr_cpu_ids) { | ||
1708 | if (cpu_present(i)) | ||
1709 | id = per_cpu(x86_bios_cpu_apicid, i); | ||
1710 | else | ||
1711 | continue; | ||
1712 | } | ||
1713 | else | ||
1714 | break; | ||
1715 | |||
1716 | if (id != BAD_APICID) | ||
1717 | __set_bit(APIC_CLUSTERID(id), clustermap); | ||
1718 | } | ||
1719 | |||
1720 | /* Problem: Partially populated chassis may not have CPUs in some of | ||
1721 | * the APIC clusters they have been allocated. Only present CPUs have | ||
1722 | * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap. | ||
1723 | * Since clusters are allocated sequentially, count zeros only if | ||
1724 | * they are bounded by ones. | ||
1725 | */ | ||
1726 | clusters = 0; | ||
1727 | zeros = 0; | ||
1728 | for (i = 0; i < NUM_APIC_CLUSTERS; i++) { | ||
1729 | if (test_bit(i, clustermap)) { | ||
1730 | clusters += 1 + zeros; | ||
1731 | zeros = 0; | ||
1732 | } else | ||
1733 | ++zeros; | ||
1734 | } | ||
1735 | |||
1736 | /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are | ||
1737 | * not guaranteed to be synced between boards | ||
1738 | */ | ||
1739 | if (is_vsmp_box() && clusters > 1) | ||
1740 | return 1; | ||
1741 | |||
1742 | /* | ||
1743 | * If clusters > 2, then should be multi-chassis. | ||
1744 | * May have to revisit this when multi-core + hyperthreaded CPUs come | ||
1745 | * out, but AFAIK this will work even for them. | ||
1746 | */ | ||
1747 | return (clusters > 2); | ||
1748 | } | ||
1749 | |||
1750 | static __init int setup_nox2apic(char *str) | ||
1751 | { | ||
1752 | disable_x2apic = 1; | ||
1753 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC); | ||
1754 | return 0; | ||
1755 | } | ||
1756 | early_param("nox2apic", setup_nox2apic); | ||
1757 | |||
1758 | |||
1759 | /* | ||
1760 | * APIC command line parameters | ||
1761 | */ | ||
1762 | static int __init setup_disableapic(char *arg) | ||
1763 | { | ||
1764 | disable_apic = 1; | ||
1765 | setup_clear_cpu_cap(X86_FEATURE_APIC); | ||
1766 | return 0; | ||
1767 | } | ||
1768 | early_param("disableapic", setup_disableapic); | ||
1769 | |||
1770 | /* same as disableapic, for compatibility */ | ||
1771 | static int __init setup_nolapic(char *arg) | ||
1772 | { | ||
1773 | return setup_disableapic(arg); | ||
1774 | } | ||
1775 | early_param("nolapic", setup_nolapic); | ||
1776 | |||
1777 | static int __init parse_lapic_timer_c2_ok(char *arg) | ||
1778 | { | ||
1779 | local_apic_timer_c2_ok = 1; | ||
1780 | return 0; | ||
1781 | } | ||
1782 | early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); | ||
1783 | |||
1784 | static int __init parse_disable_apic_timer(char *arg) | ||
1785 | { | ||
1786 | disable_apic_timer = 1; | ||
1787 | return 0; | ||
1788 | } | ||
1789 | early_param("noapictimer", parse_disable_apic_timer); | ||
1790 | |||
1791 | static int __init parse_nolapic_timer(char *arg) | ||
1792 | { | ||
1793 | disable_apic_timer = 1; | ||
1794 | return 0; | ||
1795 | } | ||
1796 | early_param("nolapic_timer", parse_nolapic_timer); | ||
1797 | |||
1798 | static __init int setup_apicpmtimer(char *s) | ||
1799 | { | ||
1800 | apic_calibrate_pmtmr = 1; | ||
1801 | notsc_setup(NULL); | ||
1802 | return 0; | ||
1803 | } | ||
1804 | __setup("apicpmtimer", setup_apicpmtimer); | ||
1805 | |||
1806 | static int __init apic_set_verbosity(char *arg) | ||
1807 | { | ||
1808 | if (!arg) { | ||
1809 | #ifdef CONFIG_X86_64 | ||
1810 | skip_ioapic_setup = 0; | ||
1811 | ioapic_force = 1; | ||
1812 | return 0; | ||
1813 | #endif | ||
1814 | return -EINVAL; | ||
1815 | } | ||
1816 | |||
1817 | if (strcmp("debug", arg) == 0) | ||
1818 | apic_verbosity = APIC_DEBUG; | ||
1819 | else if (strcmp("verbose", arg) == 0) | ||
1820 | apic_verbosity = APIC_VERBOSE; | ||
1821 | else { | ||
1822 | printk(KERN_WARNING "APIC Verbosity level %s not recognised" | ||
1823 | " use apic=verbose or apic=debug\n", arg); | ||
1824 | return -EINVAL; | ||
1825 | } | ||
1826 | |||
1827 | return 0; | ||
1828 | } | ||
1829 | early_param("apic", apic_set_verbosity); | ||
1830 | |||
1831 | static int __init lapic_insert_resource(void) | ||
1832 | { | ||
1833 | if (!apic_phys) | ||
1834 | return -1; | ||
1835 | |||
1836 | /* Put local APIC into the resource map. */ | ||
1837 | lapic_resource.start = apic_phys; | ||
1838 | lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; | ||
1839 | insert_resource(&iomem_resource, &lapic_resource); | ||
1840 | |||
1841 | return 0; | ||
1842 | } | ||
1843 | |||
1844 | /* | ||
1845 | * need call insert after e820_reserve_resources() | ||
1846 | * that is using request_resource | ||
1847 | */ | ||
1848 | late_initcall(lapic_insert_resource); | ||
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index fdd585f9c53d..f0dfe6f17e7e 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * BIOS run time interface routines. | 2 | * BIOS run time interface routines. |
3 | * | 3 | * |
4 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 6 | * the Free Software Foundation; either version 2 of the License, or |
@@ -16,33 +14,128 @@ | |||
16 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * | ||
18 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
19 | * Copyright (c) Russ Anderson | ||
19 | */ | 20 | */ |
20 | 21 | ||
22 | #include <linux/efi.h> | ||
23 | #include <asm/efi.h> | ||
24 | #include <linux/io.h> | ||
21 | #include <asm/uv/bios.h> | 25 | #include <asm/uv/bios.h> |
26 | #include <asm/uv/uv_hub.h> | ||
27 | |||
28 | struct uv_systab uv_systab; | ||
22 | 29 | ||
23 | const char * | 30 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) |
24 | x86_bios_strerror(long status) | ||
25 | { | 31 | { |
26 | const char *str; | 32 | struct uv_systab *tab = &uv_systab; |
27 | switch (status) { | 33 | |
28 | case 0: str = "Call completed without error"; break; | 34 | if (!tab->function) |
29 | case -1: str = "Not implemented"; break; | 35 | /* |
30 | case -2: str = "Invalid argument"; break; | 36 | * BIOS does not support UV systab |
31 | case -3: str = "Call completed with error"; break; | 37 | */ |
32 | default: str = "Unknown BIOS status code"; break; | 38 | return BIOS_STATUS_UNIMPLEMENTED; |
33 | } | 39 | |
34 | return str; | 40 | return efi_call6((void *)__va(tab->function), |
41 | (u64)which, a1, a2, a3, a4, a5); | ||
35 | } | 42 | } |
36 | 43 | ||
37 | long | 44 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
38 | x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second, | 45 | u64 a4, u64 a5) |
39 | unsigned long *drift_info) | ||
40 | { | 46 | { |
41 | struct uv_bios_retval isrv; | 47 | unsigned long bios_flags; |
48 | s64 ret; | ||
42 | 49 | ||
43 | BIOS_CALL(isrv, BIOS_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); | 50 | local_irq_save(bios_flags); |
44 | *ticks_per_second = isrv.v0; | 51 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); |
45 | *drift_info = isrv.v1; | 52 | local_irq_restore(bios_flags); |
46 | return isrv.status; | 53 | |
54 | return ret; | ||
47 | } | 55 | } |
48 | EXPORT_SYMBOL_GPL(x86_bios_freq_base); | 56 | |
57 | s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | ||
58 | u64 a4, u64 a5) | ||
59 | { | ||
60 | s64 ret; | ||
61 | |||
62 | preempt_disable(); | ||
63 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); | ||
64 | preempt_enable(); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | |||
70 | long sn_partition_id; | ||
71 | EXPORT_SYMBOL_GPL(sn_partition_id); | ||
72 | long uv_coherency_id; | ||
73 | EXPORT_SYMBOL_GPL(uv_coherency_id); | ||
74 | long uv_region_size; | ||
75 | EXPORT_SYMBOL_GPL(uv_region_size); | ||
76 | int uv_type; | ||
77 | |||
78 | |||
79 | s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher, | ||
80 | long *region) | ||
81 | { | ||
82 | s64 ret; | ||
83 | u64 v0, v1; | ||
84 | union partition_info_u part; | ||
85 | |||
86 | ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc, | ||
87 | (u64)(&v0), (u64)(&v1), 0, 0); | ||
88 | if (ret != BIOS_STATUS_SUCCESS) | ||
89 | return ret; | ||
90 | |||
91 | part.val = v0; | ||
92 | if (uvtype) | ||
93 | *uvtype = part.hub_version; | ||
94 | if (partid) | ||
95 | *partid = part.partition_id; | ||
96 | if (coher) | ||
97 | *coher = part.coherence_id; | ||
98 | if (region) | ||
99 | *region = part.region_size; | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | |||
104 | s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second) | ||
105 | { | ||
106 | return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type, | ||
107 | (u64)ticks_per_second, 0, 0, 0); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(uv_bios_freq_base); | ||
110 | |||
111 | |||
112 | #ifdef CONFIG_EFI | ||
113 | void uv_bios_init(void) | ||
114 | { | ||
115 | struct uv_systab *tab; | ||
116 | |||
117 | if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || | ||
118 | (efi.uv_systab == (unsigned long)NULL)) { | ||
119 | printk(KERN_CRIT "No EFI UV System Table.\n"); | ||
120 | uv_systab.function = (unsigned long)NULL; | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | tab = (struct uv_systab *)ioremap(efi.uv_systab, | ||
125 | sizeof(struct uv_systab)); | ||
126 | if (strncmp(tab->signature, "UVST", 4) != 0) | ||
127 | printk(KERN_ERR "bad signature in UV system table!"); | ||
128 | |||
129 | /* | ||
130 | * Copy table to permanent spot for later use. | ||
131 | */ | ||
132 | memcpy(&uv_systab, tab, sizeof(struct uv_systab)); | ||
133 | iounmap(tab); | ||
134 | |||
135 | printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); | ||
136 | } | ||
137 | #else /* !CONFIG_EFI */ | ||
138 | |||
139 | void uv_bios_init(void) { } | ||
140 | #endif | ||
141 | |||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 32e73520adf7..8f1e31db2ad5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -249,7 +249,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
249 | } | 249 | } |
250 | numa_set_node(cpu, node); | 250 | numa_set_node(cpu, node); |
251 | 251 | ||
252 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | 252 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); |
253 | #endif | 253 | #endif |
254 | } | 254 | } |
255 | 255 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index 06fcce516d51..b0461856acfb 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * (C) 2001-2004 Dave Jones. <davej@codemonkey.org.uk> | 2 | * (C) 2001-2004 Dave Jones. <davej@redhat.com> |
3 | * (C) 2002 Padraig Brady. <padraig@antefacto.com> | 3 | * (C) 2002 Padraig Brady. <padraig@antefacto.com> |
4 | * | 4 | * |
5 | * Licensed under the terms of the GNU GPL License version 2. | 5 | * Licensed under the terms of the GNU GPL License version 2. |
@@ -1019,7 +1019,7 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); | |||
1019 | module_param(revid_errata, int, 0644); | 1019 | module_param(revid_errata, int, 0644); |
1020 | MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); | 1020 | MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); |
1021 | 1021 | ||
1022 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); | 1022 | MODULE_AUTHOR ("Dave Jones <davej@redhat.com>"); |
1023 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); | 1023 | MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); |
1024 | MODULE_LICENSE ("GPL"); | 1024 | MODULE_LICENSE ("GPL"); |
1025 | 1025 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index b5ced806a316..c1ac5790c63e 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
@@ -246,7 +246,7 @@ static void __exit powernow_k6_exit(void) | |||
246 | } | 246 | } |
247 | 247 | ||
248 | 248 | ||
249 | MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); | 249 | MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>"); |
250 | MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); | 250 | MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); |
251 | MODULE_LICENSE("GPL"); | 251 | MODULE_LICENSE("GPL"); |
252 | 252 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 0a61159d7b71..7c7d56b43136 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * AMD K7 Powernow driver. | 2 | * AMD K7 Powernow driver. |
3 | * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs. | 3 | * (C) 2003 Dave Jones on behalf of SuSE Labs. |
4 | * (C) 2003-2004 Dave Jones <davej@redhat.com> | 4 | * (C) 2003-2004 Dave Jones <davej@redhat.com> |
5 | * | 5 | * |
6 | * Licensed under the terms of the GNU GPL License version 2. | 6 | * Licensed under the terms of the GNU GPL License version 2. |
@@ -692,7 +692,7 @@ static void __exit powernow_exit (void) | |||
692 | module_param(acpi_force, int, 0444); | 692 | module_param(acpi_force, int, 0444); |
693 | MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); | 693 | MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); |
694 | 694 | ||
695 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); | 695 | MODULE_AUTHOR ("Dave Jones <davej@redhat.com>"); |
696 | MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); | 696 | MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); |
697 | MODULE_LICENSE ("GPL"); | 697 | MODULE_LICENSE ("GPL"); |
698 | 698 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 84bb395038d8..008d23ba491b 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Support : mark.langsdorf@amd.com | 7 | * Support : mark.langsdorf@amd.com |
8 | * | 8 | * |
9 | * Based on the powernow-k7.c module written by Dave Jones. | 9 | * Based on the powernow-k7.c module written by Dave Jones. |
10 | * (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs | 10 | * (C) 2003 Dave Jones on behalf of SuSE Labs |
11 | * (C) 2004 Dominik Brodowski <linux@brodo.de> | 11 | * (C) 2004 Dominik Brodowski <linux@brodo.de> |
12 | * (C) 2004 Pavel Machek <pavel@suse.cz> | 12 | * (C) 2004 Pavel Machek <pavel@suse.cz> |
13 | * Licensed under the terms of the GNU GPL License version 2. | 13 | * Licensed under the terms of the GNU GPL License version 2. |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 191f7263c61d..04d0376b64b0 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -431,7 +431,7 @@ static void __exit speedstep_exit(void) | |||
431 | } | 431 | } |
432 | 432 | ||
433 | 433 | ||
434 | MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); | 434 | MODULE_AUTHOR ("Dave Jones <davej@redhat.com>, Dominik Brodowski <linux@brodo.de>"); |
435 | MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); | 435 | MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); |
436 | MODULE_LICENSE ("GPL"); | 436 | MODULE_LICENSE ("GPL"); |
437 | 437 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 99468dbd08da..cce0b6118d55 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -174,7 +174,7 @@ static void __cpuinit srat_detect_node(void) | |||
174 | node = first_node(node_online_map); | 174 | node = first_node(node_online_map); |
175 | numa_set_node(cpu, node); | 175 | numa_set_node(cpu, node); |
176 | 176 | ||
177 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | 177 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); |
178 | #endif | 178 | #endif |
179 | } | 179 | } |
180 | 180 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c index f390c9f66351..dd3af6e7b39a 100644 --- a/arch/x86/kernel/cpu/mcheck/k7.c +++ b/arch/x86/kernel/cpu/mcheck/k7.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Athlon/Hammer specific Machine Check Exception Reporting | 2 | * Athlon specific Machine Check Exception Reporting |
3 | * (C) Copyright 2002 Dave Jones <davej@codemonkey.org.uk> | 3 | * (C) Copyright 2002 Dave Jones <davej@redhat.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index 774d87cfd8cd..0ebf3fc6a610 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * mce.c - x86 Machine Check Exception Reporting | 2 | * mce.c - x86 Machine Check Exception Reporting |
3 | * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@codemonkey.org.uk> | 3 | * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@redhat.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index cc1fccdd31e0..a74af128efc9 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Non Fatal Machine Check Exception Reporting | 2 | * Non Fatal Machine Check Exception Reporting |
3 | * | 3 | * |
4 | * (C) Copyright 2002 Dave Jones. <davej@codemonkey.org.uk> | 4 | * (C) Copyright 2002 Dave Jones. <davej@redhat.com> |
5 | * | 5 | * |
6 | * This file contains routines to check for non-fatal MCEs every 15s | 6 | * This file contains routines to check for non-fatal MCEs every 15s |
7 | * | 7 | * |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 6bff382094f5..9abd48b22674 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/nmi.h> | 19 | #include <linux/nmi.h> |
20 | #include <linux/kprobes.h> | ||
21 | |||
20 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
21 | #include <asm/intel_arch_perfmon.h> | 23 | #include <asm/intel_arch_perfmon.h> |
22 | 24 | ||
@@ -336,7 +338,8 @@ static void single_msr_unreserve(void) | |||
336 | release_perfctr_nmi(wd_ops->perfctr); | 338 | release_perfctr_nmi(wd_ops->perfctr); |
337 | } | 339 | } |
338 | 340 | ||
339 | static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | 341 | static void __kprobes |
342 | single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
340 | { | 343 | { |
341 | /* start the cycle over again */ | 344 | /* start the cycle over again */ |
342 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | 345 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); |
@@ -401,7 +404,7 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
401 | return 1; | 404 | return 1; |
402 | } | 405 | } |
403 | 406 | ||
404 | static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | 407 | static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) |
405 | { | 408 | { |
406 | /* | 409 | /* |
407 | * P6 based Pentium M need to re-unmask | 410 | * P6 based Pentium M need to re-unmask |
@@ -605,7 +608,7 @@ static void p4_unreserve(void) | |||
605 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | 608 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); |
606 | } | 609 | } |
607 | 610 | ||
608 | static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | 611 | static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) |
609 | { | 612 | { |
610 | unsigned dummy; | 613 | unsigned dummy; |
611 | /* | 614 | /* |
@@ -784,7 +787,7 @@ unsigned lapic_adjust_nmi_hz(unsigned hz) | |||
784 | return hz; | 787 | return hz; |
785 | } | 788 | } |
786 | 789 | ||
787 | int lapic_wd_event(unsigned nmi_hz) | 790 | int __kprobes lapic_wd_event(unsigned nmi_hz) |
788 | { | 791 | { |
789 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 792 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); |
790 | u64 ctr; | 793 | u64 ctr; |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 945a31cdd81f..1119d247fe11 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -367,6 +367,10 @@ void __init efi_init(void) | |||
367 | efi.smbios = config_tables[i].table; | 367 | efi.smbios = config_tables[i].table; |
368 | printk(" SMBIOS=0x%lx ", config_tables[i].table); | 368 | printk(" SMBIOS=0x%lx ", config_tables[i].table); |
369 | } else if (!efi_guidcmp(config_tables[i].guid, | 369 | } else if (!efi_guidcmp(config_tables[i].guid, |
370 | UV_SYSTEM_TABLE_GUID)) { | ||
371 | efi.uv_systab = config_tables[i].table; | ||
372 | printk(" UVsystab=0x%lx ", config_tables[i].table); | ||
373 | } else if (!efi_guidcmp(config_tables[i].guid, | ||
370 | HCDP_TABLE_GUID)) { | 374 | HCDP_TABLE_GUID)) { |
371 | efi.hcdp = config_tables[i].table; | 375 | efi.hcdp = config_tables[i].table; |
372 | printk(" HCDP=0x%lx ", config_tables[i].table); | 376 | printk(" HCDP=0x%lx ", config_tables[i].table); |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index b21fbfaffe39..c356423a6026 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -629,7 +629,7 @@ ENTRY(interrupt) | |||
629 | ENTRY(irq_entries_start) | 629 | ENTRY(irq_entries_start) |
630 | RING0_INT_FRAME | 630 | RING0_INT_FRAME |
631 | vector=0 | 631 | vector=0 |
632 | .rept NR_IRQS | 632 | .rept NR_VECTORS |
633 | ALIGN | 633 | ALIGN |
634 | .if vector | 634 | .if vector |
635 | CFI_ADJUST_CFA_OFFSET -4 | 635 | CFI_ADJUST_CFA_OFFSET -4 |
@@ -1153,20 +1153,6 @@ ENDPROC(xen_failsafe_callback) | |||
1153 | #ifdef CONFIG_DYNAMIC_FTRACE | 1153 | #ifdef CONFIG_DYNAMIC_FTRACE |
1154 | 1154 | ||
1155 | ENTRY(mcount) | 1155 | ENTRY(mcount) |
1156 | pushl %eax | ||
1157 | pushl %ecx | ||
1158 | pushl %edx | ||
1159 | movl 0xc(%esp), %eax | ||
1160 | subl $MCOUNT_INSN_SIZE, %eax | ||
1161 | |||
1162 | .globl mcount_call | ||
1163 | mcount_call: | ||
1164 | call ftrace_stub | ||
1165 | |||
1166 | popl %edx | ||
1167 | popl %ecx | ||
1168 | popl %eax | ||
1169 | |||
1170 | ret | 1156 | ret |
1171 | END(mcount) | 1157 | END(mcount) |
1172 | 1158 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1db6ce4314e1..09e7145484c5 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -64,32 +64,6 @@ | |||
64 | #ifdef CONFIG_FTRACE | 64 | #ifdef CONFIG_FTRACE |
65 | #ifdef CONFIG_DYNAMIC_FTRACE | 65 | #ifdef CONFIG_DYNAMIC_FTRACE |
66 | ENTRY(mcount) | 66 | ENTRY(mcount) |
67 | |||
68 | subq $0x38, %rsp | ||
69 | movq %rax, (%rsp) | ||
70 | movq %rcx, 8(%rsp) | ||
71 | movq %rdx, 16(%rsp) | ||
72 | movq %rsi, 24(%rsp) | ||
73 | movq %rdi, 32(%rsp) | ||
74 | movq %r8, 40(%rsp) | ||
75 | movq %r9, 48(%rsp) | ||
76 | |||
77 | movq 0x38(%rsp), %rdi | ||
78 | subq $MCOUNT_INSN_SIZE, %rdi | ||
79 | |||
80 | .globl mcount_call | ||
81 | mcount_call: | ||
82 | call ftrace_stub | ||
83 | |||
84 | movq 48(%rsp), %r9 | ||
85 | movq 40(%rsp), %r8 | ||
86 | movq 32(%rsp), %rdi | ||
87 | movq 24(%rsp), %rsi | ||
88 | movq 16(%rsp), %rdx | ||
89 | movq 8(%rsp), %rcx | ||
90 | movq (%rsp), %rax | ||
91 | addq $0x38, %rsp | ||
92 | |||
93 | retq | 67 | retq |
94 | END(mcount) | 68 | END(mcount) |
95 | 69 | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index ab115cd15fdf..d073d981a730 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -11,17 +11,18 @@ | |||
11 | 11 | ||
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/hardirq.h> | 13 | #include <linux/hardirq.h> |
14 | #include <linux/uaccess.h> | ||
14 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
15 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/list.h> | 18 | #include <linux/list.h> |
18 | 19 | ||
19 | #include <asm/alternative.h> | ||
20 | #include <asm/ftrace.h> | 20 | #include <asm/ftrace.h> |
21 | #include <asm/nops.h> | ||
21 | 22 | ||
22 | 23 | ||
23 | /* Long is fine, even if it is only 4 bytes ;-) */ | 24 | /* Long is fine, even if it is only 4 bytes ;-) */ |
24 | static long *ftrace_nop; | 25 | static unsigned long *ftrace_nop; |
25 | 26 | ||
26 | union ftrace_code_union { | 27 | union ftrace_code_union { |
27 | char code[MCOUNT_INSN_SIZE]; | 28 | char code[MCOUNT_INSN_SIZE]; |
@@ -60,11 +61,7 @@ notrace int | |||
60 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 61 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
61 | unsigned char *new_code) | 62 | unsigned char *new_code) |
62 | { | 63 | { |
63 | unsigned replaced; | 64 | unsigned char replaced[MCOUNT_INSN_SIZE]; |
64 | unsigned old = *(unsigned *)old_code; /* 4 bytes */ | ||
65 | unsigned new = *(unsigned *)new_code; /* 4 bytes */ | ||
66 | unsigned char newch = new_code[4]; | ||
67 | int faulted = 0; | ||
68 | 65 | ||
69 | /* | 66 | /* |
70 | * Note: Due to modules and __init, code can | 67 | * Note: Due to modules and __init, code can |
@@ -72,29 +69,20 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
72 | * as well as code changing. | 69 | * as well as code changing. |
73 | * | 70 | * |
74 | * No real locking needed, this code is run through | 71 | * No real locking needed, this code is run through |
75 | * kstop_machine. | 72 | * kstop_machine, or before SMP starts. |
76 | */ | 73 | */ |
77 | asm volatile ( | 74 | if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE)) |
78 | "1: lock\n" | 75 | return 1; |
79 | " cmpxchg %3, (%2)\n" | 76 | |
80 | " jnz 2f\n" | 77 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
81 | " movb %b4, 4(%2)\n" | 78 | return 2; |
82 | "2:\n" | ||
83 | ".section .fixup, \"ax\"\n" | ||
84 | "3: movl $1, %0\n" | ||
85 | " jmp 2b\n" | ||
86 | ".previous\n" | ||
87 | _ASM_EXTABLE(1b, 3b) | ||
88 | : "=r"(faulted), "=a"(replaced) | ||
89 | : "r"(ip), "r"(new), "c"(newch), | ||
90 | "0"(faulted), "a"(old) | ||
91 | : "memory"); | ||
92 | sync_core(); | ||
93 | 79 | ||
94 | if (replaced != old && replaced != new) | 80 | WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code, |
95 | faulted = 2; | 81 | MCOUNT_INSN_SIZE)); |
96 | 82 | ||
97 | return faulted; | 83 | sync_core(); |
84 | |||
85 | return 0; | ||
98 | } | 86 | } |
99 | 87 | ||
100 | notrace int ftrace_update_ftrace_func(ftrace_func_t func) | 88 | notrace int ftrace_update_ftrace_func(ftrace_func_t func) |
@@ -112,30 +100,76 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func) | |||
112 | 100 | ||
113 | notrace int ftrace_mcount_set(unsigned long *data) | 101 | notrace int ftrace_mcount_set(unsigned long *data) |
114 | { | 102 | { |
115 | unsigned long ip = (long)(&mcount_call); | 103 | /* mcount is initialized as a nop */ |
116 | unsigned long *addr = data; | 104 | *data = 0; |
117 | unsigned char old[MCOUNT_INSN_SIZE], *new; | ||
118 | |||
119 | /* | ||
120 | * Replace the mcount stub with a pointer to the | ||
121 | * ip recorder function. | ||
122 | */ | ||
123 | memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); | ||
124 | new = ftrace_call_replace(ip, *addr); | ||
125 | *addr = ftrace_modify_code(ip, old, new); | ||
126 | |||
127 | return 0; | 105 | return 0; |
128 | } | 106 | } |
129 | 107 | ||
130 | int __init ftrace_dyn_arch_init(void *data) | 108 | int __init ftrace_dyn_arch_init(void *data) |
131 | { | 109 | { |
132 | const unsigned char *const *noptable = find_nop_table(); | 110 | extern const unsigned char ftrace_test_p6nop[]; |
133 | 111 | extern const unsigned char ftrace_test_nop5[]; | |
134 | /* This is running in kstop_machine */ | 112 | extern const unsigned char ftrace_test_jmp[]; |
135 | 113 | int faulted = 0; | |
136 | ftrace_mcount_set(data); | ||
137 | 114 | ||
138 | ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE]; | 115 | /* |
116 | * There is no good nop for all x86 archs. | ||
117 | * We will default to using the P6_NOP5, but first we | ||
118 | * will test to make sure that the nop will actually | ||
119 | * work on this CPU. If it faults, we will then | ||
120 | * go to a lesser efficient 5 byte nop. If that fails | ||
121 | * we then just use a jmp as our nop. This isn't the most | ||
122 | * efficient nop, but we can not use a multi part nop | ||
123 | * since we would then risk being preempted in the middle | ||
124 | * of that nop, and if we enabled tracing then, it might | ||
125 | * cause a system crash. | ||
126 | * | ||
127 | * TODO: check the cpuid to determine the best nop. | ||
128 | */ | ||
129 | asm volatile ( | ||
130 | "jmp ftrace_test_jmp\n" | ||
131 | /* This code needs to stay around */ | ||
132 | ".section .text, \"ax\"\n" | ||
133 | "ftrace_test_jmp:" | ||
134 | "jmp ftrace_test_p6nop\n" | ||
135 | "nop\n" | ||
136 | "nop\n" | ||
137 | "nop\n" /* 2 byte jmp + 3 bytes */ | ||
138 | "ftrace_test_p6nop:" | ||
139 | P6_NOP5 | ||
140 | "jmp 1f\n" | ||
141 | "ftrace_test_nop5:" | ||
142 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | ||
143 | "jmp 1f\n" | ||
144 | ".previous\n" | ||
145 | "1:" | ||
146 | ".section .fixup, \"ax\"\n" | ||
147 | "2: movl $1, %0\n" | ||
148 | " jmp ftrace_test_nop5\n" | ||
149 | "3: movl $2, %0\n" | ||
150 | " jmp 1b\n" | ||
151 | ".previous\n" | ||
152 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | ||
153 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | ||
154 | : "=r"(faulted) : "0" (faulted)); | ||
155 | |||
156 | switch (faulted) { | ||
157 | case 0: | ||
158 | pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); | ||
159 | ftrace_nop = (unsigned long *)ftrace_test_p6nop; | ||
160 | break; | ||
161 | case 1: | ||
162 | pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); | ||
163 | ftrace_nop = (unsigned long *)ftrace_test_nop5; | ||
164 | break; | ||
165 | case 2: | ||
166 | pr_info("ftrace: converting mcount calls to jmp . + 5\n"); | ||
167 | ftrace_nop = (unsigned long *)ftrace_test_jmp; | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | /* The return code is retured via data */ | ||
172 | *(unsigned long *)data = 0; | ||
139 | 173 | ||
140 | return 0; | 174 | return 0; |
141 | } | 175 | } |
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 9eca5ba7a6b1..2ec2de8d8c46 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -179,8 +179,10 @@ static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
179 | * is an example). | 179 | * is an example). |
180 | */ | 180 | */ |
181 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | 181 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && |
182 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) | 182 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { |
183 | printk(KERN_DEBUG "system APIC only can use physical flat"); | ||
183 | return 1; | 184 | return 1; |
185 | } | ||
184 | #endif | 186 | #endif |
185 | 187 | ||
186 | return 0; | 188 | return 0; |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 33581d94a90e..bfd532843df6 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -341,12 +341,12 @@ static __init void map_mmioh_high(int max_pnode) | |||
341 | 341 | ||
342 | static __init void uv_rtc_init(void) | 342 | static __init void uv_rtc_init(void) |
343 | { | 343 | { |
344 | long status, ticks_per_sec, drift; | 344 | long status; |
345 | u64 ticks_per_sec; | ||
345 | 346 | ||
346 | status = | 347 | status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, |
347 | x86_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, | 348 | &ticks_per_sec); |
348 | &drift); | 349 | if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { |
349 | if (status != 0 || ticks_per_sec < 100000) { | ||
350 | printk(KERN_WARNING | 350 | printk(KERN_WARNING |
351 | "unable to determine platform RTC clock frequency, " | 351 | "unable to determine platform RTC clock frequency, " |
352 | "guessing.\n"); | 352 | "guessing.\n"); |
@@ -356,7 +356,22 @@ static __init void uv_rtc_init(void) | |||
356 | sn_rtc_cycles_per_second = ticks_per_sec; | 356 | sn_rtc_cycles_per_second = ticks_per_sec; |
357 | } | 357 | } |
358 | 358 | ||
359 | static bool uv_system_inited; | 359 | /* |
360 | * Called on each cpu to initialize the per_cpu UV data area. | ||
361 | * ZZZ hotplug not supported yet | ||
362 | */ | ||
363 | void __cpuinit uv_cpu_init(void) | ||
364 | { | ||
365 | /* CPU 0 initilization will be done via uv_system_init. */ | ||
366 | if (!uv_blade_info) | ||
367 | return; | ||
368 | |||
369 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; | ||
370 | |||
371 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | ||
372 | set_x2apic_extra_bits(uv_hub_info->pnode); | ||
373 | } | ||
374 | |||
360 | 375 | ||
361 | void __init uv_system_init(void) | 376 | void __init uv_system_init(void) |
362 | { | 377 | { |
@@ -412,6 +427,9 @@ void __init uv_system_init(void) | |||
412 | gnode_upper = (((unsigned long)node_id.s.node_id) & | 427 | gnode_upper = (((unsigned long)node_id.s.node_id) & |
413 | ~((1 << n_val) - 1)) << m_val; | 428 | ~((1 << n_val) - 1)) << m_val; |
414 | 429 | ||
430 | uv_bios_init(); | ||
431 | uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, | ||
432 | &uv_coherency_id, &uv_region_size); | ||
415 | uv_rtc_init(); | 433 | uv_rtc_init(); |
416 | 434 | ||
417 | for_each_present_cpu(cpu) { | 435 | for_each_present_cpu(cpu) { |
@@ -433,7 +451,7 @@ void __init uv_system_init(void) | |||
433 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; | 451 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; |
434 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; | 452 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; |
435 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | 453 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; |
436 | uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ | 454 | uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id; |
437 | uv_node_to_blade[nid] = blade; | 455 | uv_node_to_blade[nid] = blade; |
438 | uv_cpu_to_blade[cpu] = blade; | 456 | uv_cpu_to_blade[cpu] = blade; |
439 | max_pnode = max(pnode, max_pnode); | 457 | max_pnode = max(pnode, max_pnode); |
@@ -448,21 +466,6 @@ void __init uv_system_init(void) | |||
448 | map_mmr_high(max_pnode); | 466 | map_mmr_high(max_pnode); |
449 | map_config_high(max_pnode); | 467 | map_config_high(max_pnode); |
450 | map_mmioh_high(max_pnode); | 468 | map_mmioh_high(max_pnode); |
451 | uv_system_inited = true; | ||
452 | } | ||
453 | 469 | ||
454 | /* | 470 | uv_cpu_init(); |
455 | * Called on each cpu to initialize the per_cpu UV data area. | ||
456 | * ZZZ hotplug not supported yet | ||
457 | */ | ||
458 | void __cpuinit uv_cpu_init(void) | ||
459 | { | ||
460 | BUG_ON(!uv_system_inited); | ||
461 | |||
462 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; | ||
463 | |||
464 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | ||
465 | set_x2apic_extra_bits(uv_hub_info->pnode); | ||
466 | } | 471 | } |
467 | |||
468 | |||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index acf62fc233da..77017e834cf7 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -1,29 +1,49 @@ | |||
1 | #include <linux/clocksource.h> | 1 | #include <linux/clocksource.h> |
2 | #include <linux/clockchips.h> | 2 | #include <linux/clockchips.h> |
3 | #include <linux/interrupt.h> | ||
4 | #include <linux/sysdev.h> | ||
3 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
4 | #include <linux/errno.h> | 6 | #include <linux/errno.h> |
5 | #include <linux/hpet.h> | 7 | #include <linux/hpet.h> |
6 | #include <linux/init.h> | 8 | #include <linux/init.h> |
7 | #include <linux/sysdev.h> | 9 | #include <linux/cpu.h> |
8 | #include <linux/pm.h> | 10 | #include <linux/pm.h> |
11 | #include <linux/io.h> | ||
9 | 12 | ||
10 | #include <asm/fixmap.h> | 13 | #include <asm/fixmap.h> |
11 | #include <asm/hpet.h> | ||
12 | #include <asm/i8253.h> | 14 | #include <asm/i8253.h> |
13 | #include <asm/io.h> | 15 | #include <asm/hpet.h> |
14 | 16 | ||
15 | #define HPET_MASK CLOCKSOURCE_MASK(32) | 17 | #define HPET_MASK CLOCKSOURCE_MASK(32) |
16 | #define HPET_SHIFT 22 | 18 | #define HPET_SHIFT 22 |
17 | 19 | ||
18 | /* FSEC = 10^-15 | 20 | /* FSEC = 10^-15 |
19 | NSEC = 10^-9 */ | 21 | NSEC = 10^-9 */ |
20 | #define FSEC_PER_NSEC 1000000L | 22 | #define FSEC_PER_NSEC 1000000L |
23 | |||
24 | #define HPET_DEV_USED_BIT 2 | ||
25 | #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) | ||
26 | #define HPET_DEV_VALID 0x8 | ||
27 | #define HPET_DEV_FSB_CAP 0x1000 | ||
28 | #define HPET_DEV_PERI_CAP 0x2000 | ||
29 | |||
30 | #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) | ||
21 | 31 | ||
22 | /* | 32 | /* |
23 | * HPET address is set in acpi/boot.c, when an ACPI entry exists | 33 | * HPET address is set in acpi/boot.c, when an ACPI entry exists |
24 | */ | 34 | */ |
25 | unsigned long hpet_address; | 35 | unsigned long hpet_address; |
26 | static void __iomem *hpet_virt_address; | 36 | unsigned long hpet_num_timers; |
37 | static void __iomem *hpet_virt_address; | ||
38 | |||
39 | struct hpet_dev { | ||
40 | struct clock_event_device evt; | ||
41 | unsigned int num; | ||
42 | int cpu; | ||
43 | unsigned int irq; | ||
44 | unsigned int flags; | ||
45 | char name[10]; | ||
46 | }; | ||
27 | 47 | ||
28 | unsigned long hpet_readl(unsigned long a) | 48 | unsigned long hpet_readl(unsigned long a) |
29 | { | 49 | { |
@@ -59,7 +79,7 @@ static inline void hpet_clear_mapping(void) | |||
59 | static int boot_hpet_disable; | 79 | static int boot_hpet_disable; |
60 | int hpet_force_user; | 80 | int hpet_force_user; |
61 | 81 | ||
62 | static int __init hpet_setup(char* str) | 82 | static int __init hpet_setup(char *str) |
63 | { | 83 | { |
64 | if (str) { | 84 | if (str) { |
65 | if (!strncmp("disable", str, 7)) | 85 | if (!strncmp("disable", str, 7)) |
@@ -80,7 +100,7 @@ __setup("nohpet", disable_hpet); | |||
80 | 100 | ||
81 | static inline int is_hpet_capable(void) | 101 | static inline int is_hpet_capable(void) |
82 | { | 102 | { |
83 | return (!boot_hpet_disable && hpet_address); | 103 | return !boot_hpet_disable && hpet_address; |
84 | } | 104 | } |
85 | 105 | ||
86 | /* | 106 | /* |
@@ -102,6 +122,9 @@ EXPORT_SYMBOL_GPL(is_hpet_enabled); | |||
102 | * timer 0 and timer 1 in case of RTC emulation. | 122 | * timer 0 and timer 1 in case of RTC emulation. |
103 | */ | 123 | */ |
104 | #ifdef CONFIG_HPET | 124 | #ifdef CONFIG_HPET |
125 | |||
126 | static void hpet_reserve_msi_timers(struct hpet_data *hd); | ||
127 | |||
105 | static void hpet_reserve_platform_timers(unsigned long id) | 128 | static void hpet_reserve_platform_timers(unsigned long id) |
106 | { | 129 | { |
107 | struct hpet __iomem *hpet = hpet_virt_address; | 130 | struct hpet __iomem *hpet = hpet_virt_address; |
@@ -111,10 +134,10 @@ static void hpet_reserve_platform_timers(unsigned long id) | |||
111 | 134 | ||
112 | nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; | 135 | nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; |
113 | 136 | ||
114 | memset(&hd, 0, sizeof (hd)); | 137 | memset(&hd, 0, sizeof(hd)); |
115 | hd.hd_phys_address = hpet_address; | 138 | hd.hd_phys_address = hpet_address; |
116 | hd.hd_address = hpet; | 139 | hd.hd_address = hpet; |
117 | hd.hd_nirqs = nrtimers; | 140 | hd.hd_nirqs = nrtimers; |
118 | hpet_reserve_timer(&hd, 0); | 141 | hpet_reserve_timer(&hd, 0); |
119 | 142 | ||
120 | #ifdef CONFIG_HPET_EMULATE_RTC | 143 | #ifdef CONFIG_HPET_EMULATE_RTC |
@@ -130,10 +153,12 @@ static void hpet_reserve_platform_timers(unsigned long id) | |||
130 | hd.hd_irq[1] = HPET_LEGACY_RTC; | 153 | hd.hd_irq[1] = HPET_LEGACY_RTC; |
131 | 154 | ||
132 | for (i = 2; i < nrtimers; timer++, i++) { | 155 | for (i = 2; i < nrtimers; timer++, i++) { |
133 | hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >> | 156 | hd.hd_irq[i] = (readl(&timer->hpet_config) & |
134 | Tn_INT_ROUTE_CNF_SHIFT; | 157 | Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; |
135 | } | 158 | } |
136 | 159 | ||
160 | hpet_reserve_msi_timers(&hd); | ||
161 | |||
137 | hpet_alloc(&hd); | 162 | hpet_alloc(&hd); |
138 | 163 | ||
139 | } | 164 | } |
@@ -227,60 +252,70 @@ static void hpet_legacy_clockevent_register(void) | |||
227 | printk(KERN_DEBUG "hpet clockevent registered\n"); | 252 | printk(KERN_DEBUG "hpet clockevent registered\n"); |
228 | } | 253 | } |
229 | 254 | ||
230 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | 255 | static int hpet_setup_msi_irq(unsigned int irq); |
231 | struct clock_event_device *evt) | 256 | |
257 | static void hpet_set_mode(enum clock_event_mode mode, | ||
258 | struct clock_event_device *evt, int timer) | ||
232 | { | 259 | { |
233 | unsigned long cfg, cmp, now; | 260 | unsigned long cfg, cmp, now; |
234 | uint64_t delta; | 261 | uint64_t delta; |
235 | 262 | ||
236 | switch(mode) { | 263 | switch (mode) { |
237 | case CLOCK_EVT_MODE_PERIODIC: | 264 | case CLOCK_EVT_MODE_PERIODIC: |
238 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult; | 265 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; |
239 | delta >>= hpet_clockevent.shift; | 266 | delta >>= evt->shift; |
240 | now = hpet_readl(HPET_COUNTER); | 267 | now = hpet_readl(HPET_COUNTER); |
241 | cmp = now + (unsigned long) delta; | 268 | cmp = now + (unsigned long) delta; |
242 | cfg = hpet_readl(HPET_T0_CFG); | 269 | cfg = hpet_readl(HPET_Tn_CFG(timer)); |
243 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | | 270 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | |
244 | HPET_TN_SETVAL | HPET_TN_32BIT; | 271 | HPET_TN_SETVAL | HPET_TN_32BIT; |
245 | hpet_writel(cfg, HPET_T0_CFG); | 272 | hpet_writel(cfg, HPET_Tn_CFG(timer)); |
246 | /* | 273 | /* |
247 | * The first write after writing TN_SETVAL to the | 274 | * The first write after writing TN_SETVAL to the |
248 | * config register sets the counter value, the second | 275 | * config register sets the counter value, the second |
249 | * write sets the period. | 276 | * write sets the period. |
250 | */ | 277 | */ |
251 | hpet_writel(cmp, HPET_T0_CMP); | 278 | hpet_writel(cmp, HPET_Tn_CMP(timer)); |
252 | udelay(1); | 279 | udelay(1); |
253 | hpet_writel((unsigned long) delta, HPET_T0_CMP); | 280 | hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); |
254 | break; | 281 | break; |
255 | 282 | ||
256 | case CLOCK_EVT_MODE_ONESHOT: | 283 | case CLOCK_EVT_MODE_ONESHOT: |
257 | cfg = hpet_readl(HPET_T0_CFG); | 284 | cfg = hpet_readl(HPET_Tn_CFG(timer)); |
258 | cfg &= ~HPET_TN_PERIODIC; | 285 | cfg &= ~HPET_TN_PERIODIC; |
259 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | 286 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; |
260 | hpet_writel(cfg, HPET_T0_CFG); | 287 | hpet_writel(cfg, HPET_Tn_CFG(timer)); |
261 | break; | 288 | break; |
262 | 289 | ||
263 | case CLOCK_EVT_MODE_UNUSED: | 290 | case CLOCK_EVT_MODE_UNUSED: |
264 | case CLOCK_EVT_MODE_SHUTDOWN: | 291 | case CLOCK_EVT_MODE_SHUTDOWN: |
265 | cfg = hpet_readl(HPET_T0_CFG); | 292 | cfg = hpet_readl(HPET_Tn_CFG(timer)); |
266 | cfg &= ~HPET_TN_ENABLE; | 293 | cfg &= ~HPET_TN_ENABLE; |
267 | hpet_writel(cfg, HPET_T0_CFG); | 294 | hpet_writel(cfg, HPET_Tn_CFG(timer)); |
268 | break; | 295 | break; |
269 | 296 | ||
270 | case CLOCK_EVT_MODE_RESUME: | 297 | case CLOCK_EVT_MODE_RESUME: |
271 | hpet_enable_legacy_int(); | 298 | if (timer == 0) { |
299 | hpet_enable_legacy_int(); | ||
300 | } else { | ||
301 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | ||
302 | hpet_setup_msi_irq(hdev->irq); | ||
303 | disable_irq(hdev->irq); | ||
304 | irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); | ||
305 | enable_irq(hdev->irq); | ||
306 | } | ||
272 | break; | 307 | break; |
273 | } | 308 | } |
274 | } | 309 | } |
275 | 310 | ||
276 | static int hpet_legacy_next_event(unsigned long delta, | 311 | static int hpet_next_event(unsigned long delta, |
277 | struct clock_event_device *evt) | 312 | struct clock_event_device *evt, int timer) |
278 | { | 313 | { |
279 | u32 cnt; | 314 | u32 cnt; |
280 | 315 | ||
281 | cnt = hpet_readl(HPET_COUNTER); | 316 | cnt = hpet_readl(HPET_COUNTER); |
282 | cnt += (u32) delta; | 317 | cnt += (u32) delta; |
283 | hpet_writel(cnt, HPET_T0_CMP); | 318 | hpet_writel(cnt, HPET_Tn_CMP(timer)); |
284 | 319 | ||
285 | /* | 320 | /* |
286 | * We need to read back the CMP register to make sure that | 321 | * We need to read back the CMP register to make sure that |
@@ -292,6 +327,347 @@ static int hpet_legacy_next_event(unsigned long delta, | |||
292 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 327 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
293 | } | 328 | } |
294 | 329 | ||
330 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | ||
331 | struct clock_event_device *evt) | ||
332 | { | ||
333 | hpet_set_mode(mode, evt, 0); | ||
334 | } | ||
335 | |||
336 | static int hpet_legacy_next_event(unsigned long delta, | ||
337 | struct clock_event_device *evt) | ||
338 | { | ||
339 | return hpet_next_event(delta, evt, 0); | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * HPET MSI Support | ||
344 | */ | ||
345 | #ifdef CONFIG_PCI_MSI | ||
346 | |||
347 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); | ||
348 | static struct hpet_dev *hpet_devs; | ||
349 | |||
350 | void hpet_msi_unmask(unsigned int irq) | ||
351 | { | ||
352 | struct hpet_dev *hdev = get_irq_data(irq); | ||
353 | unsigned long cfg; | ||
354 | |||
355 | /* unmask it */ | ||
356 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | ||
357 | cfg |= HPET_TN_FSB; | ||
358 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | ||
359 | } | ||
360 | |||
361 | void hpet_msi_mask(unsigned int irq) | ||
362 | { | ||
363 | unsigned long cfg; | ||
364 | struct hpet_dev *hdev = get_irq_data(irq); | ||
365 | |||
366 | /* mask it */ | ||
367 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | ||
368 | cfg &= ~HPET_TN_FSB; | ||
369 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | ||
370 | } | ||
371 | |||
372 | void hpet_msi_write(unsigned int irq, struct msi_msg *msg) | ||
373 | { | ||
374 | struct hpet_dev *hdev = get_irq_data(irq); | ||
375 | |||
376 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); | ||
377 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); | ||
378 | } | ||
379 | |||
380 | void hpet_msi_read(unsigned int irq, struct msi_msg *msg) | ||
381 | { | ||
382 | struct hpet_dev *hdev = get_irq_data(irq); | ||
383 | |||
384 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); | ||
385 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); | ||
386 | msg->address_hi = 0; | ||
387 | } | ||
388 | |||
389 | static void hpet_msi_set_mode(enum clock_event_mode mode, | ||
390 | struct clock_event_device *evt) | ||
391 | { | ||
392 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | ||
393 | hpet_set_mode(mode, evt, hdev->num); | ||
394 | } | ||
395 | |||
396 | static int hpet_msi_next_event(unsigned long delta, | ||
397 | struct clock_event_device *evt) | ||
398 | { | ||
399 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | ||
400 | return hpet_next_event(delta, evt, hdev->num); | ||
401 | } | ||
402 | |||
403 | static int hpet_setup_msi_irq(unsigned int irq) | ||
404 | { | ||
405 | if (arch_setup_hpet_msi(irq)) { | ||
406 | destroy_irq(irq); | ||
407 | return -EINVAL; | ||
408 | } | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int hpet_assign_irq(struct hpet_dev *dev) | ||
413 | { | ||
414 | unsigned int irq; | ||
415 | |||
416 | irq = create_irq(); | ||
417 | if (!irq) | ||
418 | return -EINVAL; | ||
419 | |||
420 | set_irq_data(irq, dev); | ||
421 | |||
422 | if (hpet_setup_msi_irq(irq)) | ||
423 | return -EINVAL; | ||
424 | |||
425 | dev->irq = irq; | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static irqreturn_t hpet_interrupt_handler(int irq, void *data) | ||
430 | { | ||
431 | struct hpet_dev *dev = (struct hpet_dev *)data; | ||
432 | struct clock_event_device *hevt = &dev->evt; | ||
433 | |||
434 | if (!hevt->event_handler) { | ||
435 | printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n", | ||
436 | dev->num); | ||
437 | return IRQ_HANDLED; | ||
438 | } | ||
439 | |||
440 | hevt->event_handler(hevt); | ||
441 | return IRQ_HANDLED; | ||
442 | } | ||
443 | |||
444 | static int hpet_setup_irq(struct hpet_dev *dev) | ||
445 | { | ||
446 | |||
447 | if (request_irq(dev->irq, hpet_interrupt_handler, | ||
448 | IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) | ||
449 | return -1; | ||
450 | |||
451 | disable_irq(dev->irq); | ||
452 | irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); | ||
453 | enable_irq(dev->irq); | ||
454 | |||
455 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", | ||
456 | dev->name, dev->irq); | ||
457 | |||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | /* This should be called in specific @cpu */ | ||
462 | static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | ||
463 | { | ||
464 | struct clock_event_device *evt = &hdev->evt; | ||
465 | uint64_t hpet_freq; | ||
466 | |||
467 | WARN_ON(cpu != smp_processor_id()); | ||
468 | if (!(hdev->flags & HPET_DEV_VALID)) | ||
469 | return; | ||
470 | |||
471 | if (hpet_setup_msi_irq(hdev->irq)) | ||
472 | return; | ||
473 | |||
474 | hdev->cpu = cpu; | ||
475 | per_cpu(cpu_hpet_dev, cpu) = hdev; | ||
476 | evt->name = hdev->name; | ||
477 | hpet_setup_irq(hdev); | ||
478 | evt->irq = hdev->irq; | ||
479 | |||
480 | evt->rating = 110; | ||
481 | evt->features = CLOCK_EVT_FEAT_ONESHOT; | ||
482 | if (hdev->flags & HPET_DEV_PERI_CAP) | ||
483 | evt->features |= CLOCK_EVT_FEAT_PERIODIC; | ||
484 | |||
485 | evt->set_mode = hpet_msi_set_mode; | ||
486 | evt->set_next_event = hpet_msi_next_event; | ||
487 | evt->shift = 32; | ||
488 | |||
489 | /* | ||
490 | * The period is a femto seconds value. We need to calculate the | ||
491 | * scaled math multiplication factor for nanosecond to hpet tick | ||
492 | * conversion. | ||
493 | */ | ||
494 | hpet_freq = 1000000000000000ULL; | ||
495 | do_div(hpet_freq, hpet_period); | ||
496 | evt->mult = div_sc((unsigned long) hpet_freq, | ||
497 | NSEC_PER_SEC, evt->shift); | ||
498 | /* Calculate the max delta */ | ||
499 | evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); | ||
500 | /* 5 usec minimum reprogramming delta. */ | ||
501 | evt->min_delta_ns = 5000; | ||
502 | |||
503 | evt->cpumask = cpumask_of_cpu(hdev->cpu); | ||
504 | clockevents_register_device(evt); | ||
505 | } | ||
506 | |||
507 | #ifdef CONFIG_HPET | ||
508 | /* Reserve at least one timer for userspace (/dev/hpet) */ | ||
509 | #define RESERVE_TIMERS 1 | ||
510 | #else | ||
511 | #define RESERVE_TIMERS 0 | ||
512 | #endif | ||
513 | |||
514 | static void hpet_msi_capability_lookup(unsigned int start_timer) | ||
515 | { | ||
516 | unsigned int id; | ||
517 | unsigned int num_timers; | ||
518 | unsigned int num_timers_used = 0; | ||
519 | int i; | ||
520 | |||
521 | id = hpet_readl(HPET_ID); | ||
522 | |||
523 | num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); | ||
524 | num_timers++; /* Value read out starts from 0 */ | ||
525 | |||
526 | hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); | ||
527 | if (!hpet_devs) | ||
528 | return; | ||
529 | |||
530 | hpet_num_timers = num_timers; | ||
531 | |||
532 | for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { | ||
533 | struct hpet_dev *hdev = &hpet_devs[num_timers_used]; | ||
534 | unsigned long cfg = hpet_readl(HPET_Tn_CFG(i)); | ||
535 | |||
536 | /* Only consider HPET timer with MSI support */ | ||
537 | if (!(cfg & HPET_TN_FSB_CAP)) | ||
538 | continue; | ||
539 | |||
540 | hdev->flags = 0; | ||
541 | if (cfg & HPET_TN_PERIODIC_CAP) | ||
542 | hdev->flags |= HPET_DEV_PERI_CAP; | ||
543 | hdev->num = i; | ||
544 | |||
545 | sprintf(hdev->name, "hpet%d", i); | ||
546 | if (hpet_assign_irq(hdev)) | ||
547 | continue; | ||
548 | |||
549 | hdev->flags |= HPET_DEV_FSB_CAP; | ||
550 | hdev->flags |= HPET_DEV_VALID; | ||
551 | num_timers_used++; | ||
552 | if (num_timers_used == num_possible_cpus()) | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n", | ||
557 | num_timers, num_timers_used); | ||
558 | } | ||
559 | |||
560 | #ifdef CONFIG_HPET | ||
561 | static void hpet_reserve_msi_timers(struct hpet_data *hd) | ||
562 | { | ||
563 | int i; | ||
564 | |||
565 | if (!hpet_devs) | ||
566 | return; | ||
567 | |||
568 | for (i = 0; i < hpet_num_timers; i++) { | ||
569 | struct hpet_dev *hdev = &hpet_devs[i]; | ||
570 | |||
571 | if (!(hdev->flags & HPET_DEV_VALID)) | ||
572 | continue; | ||
573 | |||
574 | hd->hd_irq[hdev->num] = hdev->irq; | ||
575 | hpet_reserve_timer(hd, hdev->num); | ||
576 | } | ||
577 | } | ||
578 | #endif | ||
579 | |||
580 | static struct hpet_dev *hpet_get_unused_timer(void) | ||
581 | { | ||
582 | int i; | ||
583 | |||
584 | if (!hpet_devs) | ||
585 | return NULL; | ||
586 | |||
587 | for (i = 0; i < hpet_num_timers; i++) { | ||
588 | struct hpet_dev *hdev = &hpet_devs[i]; | ||
589 | |||
590 | if (!(hdev->flags & HPET_DEV_VALID)) | ||
591 | continue; | ||
592 | if (test_and_set_bit(HPET_DEV_USED_BIT, | ||
593 | (unsigned long *)&hdev->flags)) | ||
594 | continue; | ||
595 | return hdev; | ||
596 | } | ||
597 | return NULL; | ||
598 | } | ||
599 | |||
600 | struct hpet_work_struct { | ||
601 | struct delayed_work work; | ||
602 | struct completion complete; | ||
603 | }; | ||
604 | |||
605 | static void hpet_work(struct work_struct *w) | ||
606 | { | ||
607 | struct hpet_dev *hdev; | ||
608 | int cpu = smp_processor_id(); | ||
609 | struct hpet_work_struct *hpet_work; | ||
610 | |||
611 | hpet_work = container_of(w, struct hpet_work_struct, work.work); | ||
612 | |||
613 | hdev = hpet_get_unused_timer(); | ||
614 | if (hdev) | ||
615 | init_one_hpet_msi_clockevent(hdev, cpu); | ||
616 | |||
617 | complete(&hpet_work->complete); | ||
618 | } | ||
619 | |||
620 | static int hpet_cpuhp_notify(struct notifier_block *n, | ||
621 | unsigned long action, void *hcpu) | ||
622 | { | ||
623 | unsigned long cpu = (unsigned long)hcpu; | ||
624 | struct hpet_work_struct work; | ||
625 | struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); | ||
626 | |||
627 | switch (action & 0xf) { | ||
628 | case CPU_ONLINE: | ||
629 | INIT_DELAYED_WORK(&work.work, hpet_work); | ||
630 | init_completion(&work.complete); | ||
631 | /* FIXME: add schedule_work_on() */ | ||
632 | schedule_delayed_work_on(cpu, &work.work, 0); | ||
633 | wait_for_completion(&work.complete); | ||
634 | break; | ||
635 | case CPU_DEAD: | ||
636 | if (hdev) { | ||
637 | free_irq(hdev->irq, hdev); | ||
638 | hdev->flags &= ~HPET_DEV_USED; | ||
639 | per_cpu(cpu_hpet_dev, cpu) = NULL; | ||
640 | } | ||
641 | break; | ||
642 | } | ||
643 | return NOTIFY_OK; | ||
644 | } | ||
645 | #else | ||
646 | |||
647 | static int hpet_setup_msi_irq(unsigned int irq) | ||
648 | { | ||
649 | return 0; | ||
650 | } | ||
651 | static void hpet_msi_capability_lookup(unsigned int start_timer) | ||
652 | { | ||
653 | return; | ||
654 | } | ||
655 | |||
656 | #ifdef CONFIG_HPET | ||
657 | static void hpet_reserve_msi_timers(struct hpet_data *hd) | ||
658 | { | ||
659 | return; | ||
660 | } | ||
661 | #endif | ||
662 | |||
663 | static int hpet_cpuhp_notify(struct notifier_block *n, | ||
664 | unsigned long action, void *hcpu) | ||
665 | { | ||
666 | return NOTIFY_OK; | ||
667 | } | ||
668 | |||
669 | #endif | ||
670 | |||
295 | /* | 671 | /* |
296 | * Clock source related code | 672 | * Clock source related code |
297 | */ | 673 | */ |
@@ -427,8 +803,10 @@ int __init hpet_enable(void) | |||
427 | 803 | ||
428 | if (id & HPET_ID_LEGSUP) { | 804 | if (id & HPET_ID_LEGSUP) { |
429 | hpet_legacy_clockevent_register(); | 805 | hpet_legacy_clockevent_register(); |
806 | hpet_msi_capability_lookup(2); | ||
430 | return 1; | 807 | return 1; |
431 | } | 808 | } |
809 | hpet_msi_capability_lookup(0); | ||
432 | return 0; | 810 | return 0; |
433 | 811 | ||
434 | out_nohpet: | 812 | out_nohpet: |
@@ -445,6 +823,8 @@ out_nohpet: | |||
445 | */ | 823 | */ |
446 | static __init int hpet_late_init(void) | 824 | static __init int hpet_late_init(void) |
447 | { | 825 | { |
826 | int cpu; | ||
827 | |||
448 | if (boot_hpet_disable) | 828 | if (boot_hpet_disable) |
449 | return -ENODEV; | 829 | return -ENODEV; |
450 | 830 | ||
@@ -460,6 +840,13 @@ static __init int hpet_late_init(void) | |||
460 | 840 | ||
461 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | 841 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); |
462 | 842 | ||
843 | for_each_online_cpu(cpu) { | ||
844 | hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); | ||
845 | } | ||
846 | |||
847 | /* This notifier should be called after workqueue is ready */ | ||
848 | hotcpu_notifier(hpet_cpuhp_notify, -20); | ||
849 | |||
463 | return 0; | 850 | return 0; |
464 | } | 851 | } |
465 | fs_initcall(hpet_late_init); | 852 | fs_initcall(hpet_late_init); |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic.c index 02063ae042f7..b764d7429c61 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -27,17 +27,21 @@ | |||
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/mc146818rtc.h> | 29 | #include <linux/mc146818rtc.h> |
30 | #include <linux/compiler.h> | ||
30 | #include <linux/acpi.h> | 31 | #include <linux/acpi.h> |
32 | #include <linux/module.h> | ||
31 | #include <linux/sysdev.h> | 33 | #include <linux/sysdev.h> |
32 | #include <linux/msi.h> | 34 | #include <linux/msi.h> |
33 | #include <linux/htirq.h> | 35 | #include <linux/htirq.h> |
34 | #include <linux/dmar.h> | 36 | #include <linux/freezer.h> |
35 | #include <linux/jiffies.h> | 37 | #include <linux/kthread.h> |
38 | #include <linux/jiffies.h> /* time_after() */ | ||
36 | #ifdef CONFIG_ACPI | 39 | #ifdef CONFIG_ACPI |
37 | #include <acpi/acpi_bus.h> | 40 | #include <acpi/acpi_bus.h> |
38 | #endif | 41 | #endif |
39 | #include <linux/bootmem.h> | 42 | #include <linux/bootmem.h> |
40 | #include <linux/dmar.h> | 43 | #include <linux/dmar.h> |
44 | #include <linux/hpet.h> | ||
41 | 45 | ||
42 | #include <asm/idle.h> | 46 | #include <asm/idle.h> |
43 | #include <asm/io.h> | 47 | #include <asm/io.h> |
@@ -46,61 +50,28 @@ | |||
46 | #include <asm/proto.h> | 50 | #include <asm/proto.h> |
47 | #include <asm/acpi.h> | 51 | #include <asm/acpi.h> |
48 | #include <asm/dma.h> | 52 | #include <asm/dma.h> |
53 | #include <asm/timer.h> | ||
49 | #include <asm/i8259.h> | 54 | #include <asm/i8259.h> |
50 | #include <asm/nmi.h> | 55 | #include <asm/nmi.h> |
51 | #include <asm/msidef.h> | 56 | #include <asm/msidef.h> |
52 | #include <asm/hypertransport.h> | 57 | #include <asm/hypertransport.h> |
58 | #include <asm/setup.h> | ||
53 | #include <asm/irq_remapping.h> | 59 | #include <asm/irq_remapping.h> |
60 | #include <asm/hpet.h> | ||
61 | #include <asm/uv/uv_hub.h> | ||
62 | #include <asm/uv/uv_irq.h> | ||
54 | 63 | ||
55 | #include <mach_ipi.h> | 64 | #include <mach_ipi.h> |
56 | #include <mach_apic.h> | 65 | #include <mach_apic.h> |
66 | #include <mach_apicdef.h> | ||
57 | 67 | ||
58 | #define __apicdebuginit(type) static type __init | 68 | #define __apicdebuginit(type) static type __init |
59 | 69 | ||
60 | struct irq_cfg { | 70 | /* |
61 | cpumask_t domain; | 71 | * Is the SiS APIC rmw bug present ? |
62 | cpumask_t old_domain; | 72 | * -1 = don't know, 0 = no, 1 = yes |
63 | unsigned move_cleanup_count; | 73 | */ |
64 | u8 vector; | 74 | int sis_apic_bug = -1; |
65 | u8 move_in_progress : 1; | ||
66 | }; | ||
67 | |||
68 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | ||
69 | static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { | ||
70 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | ||
71 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | ||
72 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | ||
73 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | ||
74 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | ||
75 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | ||
76 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | ||
77 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | ||
78 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | ||
79 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | ||
80 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | ||
81 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | ||
82 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | ||
83 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | ||
84 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | ||
85 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | ||
86 | }; | ||
87 | |||
88 | static int assign_irq_vector(int irq, cpumask_t mask); | ||
89 | |||
90 | int first_system_vector = 0xfe; | ||
91 | |||
92 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | ||
93 | |||
94 | int sis_apic_bug; /* not actually supported, dummy for compile */ | ||
95 | |||
96 | static int no_timer_check; | ||
97 | |||
98 | static int disable_timer_pin_1 __initdata; | ||
99 | |||
100 | int timer_through_8259 __initdata; | ||
101 | |||
102 | /* Where if anywhere is the i8259 connect in external int mode */ | ||
103 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | ||
104 | 75 | ||
105 | static DEFINE_SPINLOCK(ioapic_lock); | 76 | static DEFINE_SPINLOCK(ioapic_lock); |
106 | static DEFINE_SPINLOCK(vector_lock); | 77 | static DEFINE_SPINLOCK(vector_lock); |
@@ -110,9 +81,6 @@ static DEFINE_SPINLOCK(vector_lock); | |||
110 | */ | 81 | */ |
111 | int nr_ioapic_registers[MAX_IO_APICS]; | 82 | int nr_ioapic_registers[MAX_IO_APICS]; |
112 | 83 | ||
113 | /* I/O APIC RTE contents at the OS boot up */ | ||
114 | struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS]; | ||
115 | |||
116 | /* I/O APIC entries */ | 84 | /* I/O APIC entries */ |
117 | struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; | 85 | struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; |
118 | int nr_ioapics; | 86 | int nr_ioapics; |
@@ -123,11 +91,69 @@ struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | |||
123 | /* # of MP IRQ source entries */ | 91 | /* # of MP IRQ source entries */ |
124 | int mp_irq_entries; | 92 | int mp_irq_entries; |
125 | 93 | ||
94 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) | ||
95 | int mp_bus_id_to_type[MAX_MP_BUSSES]; | ||
96 | #endif | ||
97 | |||
126 | DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | 98 | DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); |
127 | 99 | ||
100 | int skip_ioapic_setup; | ||
101 | |||
102 | static int __init parse_noapic(char *str) | ||
103 | { | ||
104 | /* disable IO-APIC */ | ||
105 | disable_ioapic_setup(); | ||
106 | return 0; | ||
107 | } | ||
108 | early_param("noapic", parse_noapic); | ||
109 | |||
110 | struct irq_pin_list; | ||
111 | struct irq_cfg { | ||
112 | unsigned int irq; | ||
113 | struct irq_pin_list *irq_2_pin; | ||
114 | cpumask_t domain; | ||
115 | cpumask_t old_domain; | ||
116 | unsigned move_cleanup_count; | ||
117 | u8 vector; | ||
118 | u8 move_in_progress : 1; | ||
119 | }; | ||
120 | |||
121 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | ||
122 | static struct irq_cfg irq_cfgx[NR_IRQS] = { | ||
123 | [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | ||
124 | [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | ||
125 | [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | ||
126 | [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | ||
127 | [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | ||
128 | [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | ||
129 | [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | ||
130 | [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | ||
131 | [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | ||
132 | [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | ||
133 | [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | ||
134 | [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | ||
135 | [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | ||
136 | [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | ||
137 | [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | ||
138 | [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | ||
139 | }; | ||
140 | |||
141 | #define for_each_irq_cfg(irq, cfg) \ | ||
142 | for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++) | ||
143 | |||
144 | static struct irq_cfg *irq_cfg(unsigned int irq) | ||
145 | { | ||
146 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | ||
147 | } | ||
148 | |||
149 | static struct irq_cfg *irq_cfg_alloc(unsigned int irq) | ||
150 | { | ||
151 | return irq_cfg(irq); | ||
152 | } | ||
153 | |||
128 | /* | 154 | /* |
129 | * Rough estimation of how many shared IRQs there are, can | 155 | * Rough estimation of how many shared IRQs there are, can be changed |
130 | * be changed anytime. | 156 | * anytime. |
131 | */ | 157 | */ |
132 | #define MAX_PLUS_SHARED_IRQS NR_IRQS | 158 | #define MAX_PLUS_SHARED_IRQS NR_IRQS |
133 | #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) | 159 | #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) |
@@ -139,9 +165,36 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | |||
139 | * between pins and IRQs. | 165 | * between pins and IRQs. |
140 | */ | 166 | */ |
141 | 167 | ||
142 | static struct irq_pin_list { | 168 | struct irq_pin_list { |
143 | short apic, pin, next; | 169 | int apic, pin; |
144 | } irq_2_pin[PIN_MAP_SIZE]; | 170 | struct irq_pin_list *next; |
171 | }; | ||
172 | |||
173 | static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE]; | ||
174 | static struct irq_pin_list *irq_2_pin_ptr; | ||
175 | |||
176 | static void __init irq_2_pin_init(void) | ||
177 | { | ||
178 | struct irq_pin_list *pin = irq_2_pin_head; | ||
179 | int i; | ||
180 | |||
181 | for (i = 1; i < PIN_MAP_SIZE; i++) | ||
182 | pin[i-1].next = &pin[i]; | ||
183 | |||
184 | irq_2_pin_ptr = &pin[0]; | ||
185 | } | ||
186 | |||
187 | static struct irq_pin_list *get_one_free_irq_2_pin(void) | ||
188 | { | ||
189 | struct irq_pin_list *pin = irq_2_pin_ptr; | ||
190 | |||
191 | if (!pin) | ||
192 | panic("can not get more irq_2_pin\n"); | ||
193 | |||
194 | irq_2_pin_ptr = pin->next; | ||
195 | pin->next = NULL; | ||
196 | return pin; | ||
197 | } | ||
145 | 198 | ||
146 | struct io_apic { | 199 | struct io_apic { |
147 | unsigned int index; | 200 | unsigned int index; |
@@ -172,10 +225,15 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i | |||
172 | /* | 225 | /* |
173 | * Re-write a value: to be used for read-modify-write | 226 | * Re-write a value: to be used for read-modify-write |
174 | * cycles where the read already set up the index register. | 227 | * cycles where the read already set up the index register. |
228 | * | ||
229 | * Older SiS APIC requires we rewrite the index register | ||
175 | */ | 230 | */ |
176 | static inline void io_apic_modify(unsigned int apic, unsigned int value) | 231 | static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) |
177 | { | 232 | { |
178 | struct io_apic __iomem *io_apic = io_apic_base(apic); | 233 | struct io_apic __iomem *io_apic = io_apic_base(apic); |
234 | |||
235 | if (sis_apic_bug) | ||
236 | writel(reg, &io_apic->index); | ||
179 | writel(value, &io_apic->data); | 237 | writel(value, &io_apic->data); |
180 | } | 238 | } |
181 | 239 | ||
@@ -183,16 +241,17 @@ static bool io_apic_level_ack_pending(unsigned int irq) | |||
183 | { | 241 | { |
184 | struct irq_pin_list *entry; | 242 | struct irq_pin_list *entry; |
185 | unsigned long flags; | 243 | unsigned long flags; |
244 | struct irq_cfg *cfg = irq_cfg(irq); | ||
186 | 245 | ||
187 | spin_lock_irqsave(&ioapic_lock, flags); | 246 | spin_lock_irqsave(&ioapic_lock, flags); |
188 | entry = irq_2_pin + irq; | 247 | entry = cfg->irq_2_pin; |
189 | for (;;) { | 248 | for (;;) { |
190 | unsigned int reg; | 249 | unsigned int reg; |
191 | int pin; | 250 | int pin; |
192 | 251 | ||
193 | pin = entry->pin; | 252 | if (!entry) |
194 | if (pin == -1) | ||
195 | break; | 253 | break; |
254 | pin = entry->pin; | ||
196 | reg = io_apic_read(entry->apic, 0x10 + pin*2); | 255 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
197 | /* Is the remote IRR bit set? */ | 256 | /* Is the remote IRR bit set? */ |
198 | if (reg & IO_APIC_REDIR_REMOTE_IRR) { | 257 | if (reg & IO_APIC_REDIR_REMOTE_IRR) { |
@@ -201,45 +260,13 @@ static bool io_apic_level_ack_pending(unsigned int irq) | |||
201 | } | 260 | } |
202 | if (!entry->next) | 261 | if (!entry->next) |
203 | break; | 262 | break; |
204 | entry = irq_2_pin + entry->next; | 263 | entry = entry->next; |
205 | } | 264 | } |
206 | spin_unlock_irqrestore(&ioapic_lock, flags); | 265 | spin_unlock_irqrestore(&ioapic_lock, flags); |
207 | 266 | ||
208 | return false; | 267 | return false; |
209 | } | 268 | } |
210 | 269 | ||
211 | /* | ||
212 | * Synchronize the IO-APIC and the CPU by doing | ||
213 | * a dummy read from the IO-APIC | ||
214 | */ | ||
215 | static inline void io_apic_sync(unsigned int apic) | ||
216 | { | ||
217 | struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
218 | readl(&io_apic->data); | ||
219 | } | ||
220 | |||
221 | #define __DO_ACTION(R, ACTION, FINAL) \ | ||
222 | \ | ||
223 | { \ | ||
224 | int pin; \ | ||
225 | struct irq_pin_list *entry = irq_2_pin + irq; \ | ||
226 | \ | ||
227 | BUG_ON(irq >= NR_IRQS); \ | ||
228 | for (;;) { \ | ||
229 | unsigned int reg; \ | ||
230 | pin = entry->pin; \ | ||
231 | if (pin == -1) \ | ||
232 | break; \ | ||
233 | reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \ | ||
234 | reg ACTION; \ | ||
235 | io_apic_modify(entry->apic, reg); \ | ||
236 | FINAL; \ | ||
237 | if (!entry->next) \ | ||
238 | break; \ | ||
239 | entry = irq_2_pin + entry->next; \ | ||
240 | } \ | ||
241 | } | ||
242 | |||
243 | union entry_union { | 270 | union entry_union { |
244 | struct { u32 w1, w2; }; | 271 | struct { u32 w1, w2; }; |
245 | struct IO_APIC_route_entry entry; | 272 | struct IO_APIC_route_entry entry; |
@@ -299,59 +326,71 @@ static void ioapic_mask_entry(int apic, int pin) | |||
299 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) | 326 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) |
300 | { | 327 | { |
301 | int apic, pin; | 328 | int apic, pin; |
302 | struct irq_pin_list *entry = irq_2_pin + irq; | 329 | struct irq_cfg *cfg; |
330 | struct irq_pin_list *entry; | ||
303 | 331 | ||
304 | BUG_ON(irq >= NR_IRQS); | 332 | cfg = irq_cfg(irq); |
333 | entry = cfg->irq_2_pin; | ||
305 | for (;;) { | 334 | for (;;) { |
306 | unsigned int reg; | 335 | unsigned int reg; |
336 | |||
337 | if (!entry) | ||
338 | break; | ||
339 | |||
307 | apic = entry->apic; | 340 | apic = entry->apic; |
308 | pin = entry->pin; | 341 | pin = entry->pin; |
309 | if (pin == -1) | 342 | #ifdef CONFIG_INTR_REMAP |
310 | break; | ||
311 | /* | 343 | /* |
312 | * With interrupt-remapping, destination information comes | 344 | * With interrupt-remapping, destination information comes |
313 | * from interrupt-remapping table entry. | 345 | * from interrupt-remapping table entry. |
314 | */ | 346 | */ |
315 | if (!irq_remapped(irq)) | 347 | if (!irq_remapped(irq)) |
316 | io_apic_write(apic, 0x11 + pin*2, dest); | 348 | io_apic_write(apic, 0x11 + pin*2, dest); |
349 | #else | ||
350 | io_apic_write(apic, 0x11 + pin*2, dest); | ||
351 | #endif | ||
317 | reg = io_apic_read(apic, 0x10 + pin*2); | 352 | reg = io_apic_read(apic, 0x10 + pin*2); |
318 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 353 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
319 | reg |= vector; | 354 | reg |= vector; |
320 | io_apic_modify(apic, reg); | 355 | io_apic_modify(apic, 0x10 + pin*2, reg); |
321 | if (!entry->next) | 356 | if (!entry->next) |
322 | break; | 357 | break; |
323 | entry = irq_2_pin + entry->next; | 358 | entry = entry->next; |
324 | } | 359 | } |
325 | } | 360 | } |
326 | 361 | ||
362 | static int assign_irq_vector(int irq, cpumask_t mask); | ||
363 | |||
327 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 364 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) |
328 | { | 365 | { |
329 | struct irq_cfg *cfg = irq_cfg + irq; | 366 | struct irq_cfg *cfg; |
330 | unsigned long flags; | 367 | unsigned long flags; |
331 | unsigned int dest; | 368 | unsigned int dest; |
332 | cpumask_t tmp; | 369 | cpumask_t tmp; |
370 | struct irq_desc *desc; | ||
333 | 371 | ||
334 | cpus_and(tmp, mask, cpu_online_map); | 372 | cpus_and(tmp, mask, cpu_online_map); |
335 | if (cpus_empty(tmp)) | 373 | if (cpus_empty(tmp)) |
336 | return; | 374 | return; |
337 | 375 | ||
376 | cfg = irq_cfg(irq); | ||
338 | if (assign_irq_vector(irq, mask)) | 377 | if (assign_irq_vector(irq, mask)) |
339 | return; | 378 | return; |
340 | 379 | ||
341 | cpus_and(tmp, cfg->domain, mask); | 380 | cpus_and(tmp, cfg->domain, mask); |
342 | dest = cpu_mask_to_apicid(tmp); | 381 | dest = cpu_mask_to_apicid(tmp); |
343 | |||
344 | /* | 382 | /* |
345 | * Only the high 8 bits are valid. | 383 | * Only the high 8 bits are valid. |
346 | */ | 384 | */ |
347 | dest = SET_APIC_LOGICAL_ID(dest); | 385 | dest = SET_APIC_LOGICAL_ID(dest); |
348 | 386 | ||
387 | desc = irq_to_desc(irq); | ||
349 | spin_lock_irqsave(&ioapic_lock, flags); | 388 | spin_lock_irqsave(&ioapic_lock, flags); |
350 | __target_IO_APIC_irq(irq, dest, cfg->vector); | 389 | __target_IO_APIC_irq(irq, dest, cfg->vector); |
351 | irq_desc[irq].affinity = mask; | 390 | desc->affinity = mask; |
352 | spin_unlock_irqrestore(&ioapic_lock, flags); | 391 | spin_unlock_irqrestore(&ioapic_lock, flags); |
353 | } | 392 | } |
354 | #endif | 393 | #endif /* CONFIG_SMP */ |
355 | 394 | ||
356 | /* | 395 | /* |
357 | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are | 396 | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are |
@@ -360,19 +399,30 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | |||
360 | */ | 399 | */ |
361 | static void add_pin_to_irq(unsigned int irq, int apic, int pin) | 400 | static void add_pin_to_irq(unsigned int irq, int apic, int pin) |
362 | { | 401 | { |
363 | static int first_free_entry = NR_IRQS; | 402 | struct irq_cfg *cfg; |
364 | struct irq_pin_list *entry = irq_2_pin + irq; | 403 | struct irq_pin_list *entry; |
404 | |||
405 | /* first time to refer irq_cfg, so with new */ | ||
406 | cfg = irq_cfg_alloc(irq); | ||
407 | entry = cfg->irq_2_pin; | ||
408 | if (!entry) { | ||
409 | entry = get_one_free_irq_2_pin(); | ||
410 | cfg->irq_2_pin = entry; | ||
411 | entry->apic = apic; | ||
412 | entry->pin = pin; | ||
413 | return; | ||
414 | } | ||
365 | 415 | ||
366 | BUG_ON(irq >= NR_IRQS); | 416 | while (entry->next) { |
367 | while (entry->next) | 417 | /* not again, please */ |
368 | entry = irq_2_pin + entry->next; | 418 | if (entry->apic == apic && entry->pin == pin) |
419 | return; | ||
369 | 420 | ||
370 | if (entry->pin != -1) { | 421 | entry = entry->next; |
371 | entry->next = first_free_entry; | ||
372 | entry = irq_2_pin + entry->next; | ||
373 | if (++first_free_entry >= PIN_MAP_SIZE) | ||
374 | panic("io_apic.c: ran out of irq_2_pin entries!"); | ||
375 | } | 422 | } |
423 | |||
424 | entry->next = get_one_free_irq_2_pin(); | ||
425 | entry = entry->next; | ||
376 | entry->apic = apic; | 426 | entry->apic = apic; |
377 | entry->pin = pin; | 427 | entry->pin = pin; |
378 | } | 428 | } |
@@ -384,30 +434,86 @@ static void __init replace_pin_at_irq(unsigned int irq, | |||
384 | int oldapic, int oldpin, | 434 | int oldapic, int oldpin, |
385 | int newapic, int newpin) | 435 | int newapic, int newpin) |
386 | { | 436 | { |
387 | struct irq_pin_list *entry = irq_2_pin + irq; | 437 | struct irq_cfg *cfg = irq_cfg(irq); |
438 | struct irq_pin_list *entry = cfg->irq_2_pin; | ||
439 | int replaced = 0; | ||
388 | 440 | ||
389 | while (1) { | 441 | while (entry) { |
390 | if (entry->apic == oldapic && entry->pin == oldpin) { | 442 | if (entry->apic == oldapic && entry->pin == oldpin) { |
391 | entry->apic = newapic; | 443 | entry->apic = newapic; |
392 | entry->pin = newpin; | 444 | entry->pin = newpin; |
393 | } | 445 | replaced = 1; |
394 | if (!entry->next) | 446 | /* every one is different, right? */ |
395 | break; | 447 | break; |
396 | entry = irq_2_pin + entry->next; | 448 | } |
449 | entry = entry->next; | ||
450 | } | ||
451 | |||
452 | /* why? call replace before add? */ | ||
453 | if (!replaced) | ||
454 | add_pin_to_irq(irq, newapic, newpin); | ||
455 | } | ||
456 | |||
457 | static inline void io_apic_modify_irq(unsigned int irq, | ||
458 | int mask_and, int mask_or, | ||
459 | void (*final)(struct irq_pin_list *entry)) | ||
460 | { | ||
461 | int pin; | ||
462 | struct irq_cfg *cfg; | ||
463 | struct irq_pin_list *entry; | ||
464 | |||
465 | cfg = irq_cfg(irq); | ||
466 | for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { | ||
467 | unsigned int reg; | ||
468 | pin = entry->pin; | ||
469 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | ||
470 | reg &= mask_and; | ||
471 | reg |= mask_or; | ||
472 | io_apic_modify(entry->apic, 0x10 + pin * 2, reg); | ||
473 | if (final) | ||
474 | final(entry); | ||
397 | } | 475 | } |
398 | } | 476 | } |
399 | 477 | ||
478 | static void __unmask_IO_APIC_irq(unsigned int irq) | ||
479 | { | ||
480 | io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL); | ||
481 | } | ||
400 | 482 | ||
401 | #define DO_ACTION(name,R,ACTION, FINAL) \ | 483 | #ifdef CONFIG_X86_64 |
402 | \ | 484 | void io_apic_sync(struct irq_pin_list *entry) |
403 | static void name##_IO_APIC_irq (unsigned int irq) \ | 485 | { |
404 | __DO_ACTION(R, ACTION, FINAL) | 486 | /* |
487 | * Synchronize the IO-APIC and the CPU by doing | ||
488 | * a dummy read from the IO-APIC | ||
489 | */ | ||
490 | struct io_apic __iomem *io_apic; | ||
491 | io_apic = io_apic_base(entry->apic); | ||
492 | readl(&io_apic->data); | ||
493 | } | ||
405 | 494 | ||
406 | /* mask = 1 */ | 495 | static void __mask_IO_APIC_irq(unsigned int irq) |
407 | DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic)) | 496 | { |
497 | io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | ||
498 | } | ||
499 | #else /* CONFIG_X86_32 */ | ||
500 | static void __mask_IO_APIC_irq(unsigned int irq) | ||
501 | { | ||
502 | io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL); | ||
503 | } | ||
408 | 504 | ||
409 | /* mask = 0 */ | 505 | static void __mask_and_edge_IO_APIC_irq(unsigned int irq) |
410 | DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, ) | 506 | { |
507 | io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER, | ||
508 | IO_APIC_REDIR_MASKED, NULL); | ||
509 | } | ||
510 | |||
511 | static void __unmask_and_level_IO_APIC_irq(unsigned int irq) | ||
512 | { | ||
513 | io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, | ||
514 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | ||
515 | } | ||
516 | #endif /* CONFIG_X86_32 */ | ||
411 | 517 | ||
412 | static void mask_IO_APIC_irq (unsigned int irq) | 518 | static void mask_IO_APIC_irq (unsigned int irq) |
413 | { | 519 | { |
@@ -450,6 +556,68 @@ static void clear_IO_APIC (void) | |||
450 | clear_IO_APIC_pin(apic, pin); | 556 | clear_IO_APIC_pin(apic, pin); |
451 | } | 557 | } |
452 | 558 | ||
559 | #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32) | ||
560 | void send_IPI_self(int vector) | ||
561 | { | ||
562 | unsigned int cfg; | ||
563 | |||
564 | /* | ||
565 | * Wait for idle. | ||
566 | */ | ||
567 | apic_wait_icr_idle(); | ||
568 | cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; | ||
569 | /* | ||
570 | * Send the IPI. The write to APIC_ICR fires this off. | ||
571 | */ | ||
572 | apic_write(APIC_ICR, cfg); | ||
573 | } | ||
574 | #endif /* !CONFIG_SMP && CONFIG_X86_32*/ | ||
575 | |||
576 | #ifdef CONFIG_X86_32 | ||
577 | /* | ||
578 | * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to | ||
579 | * specific CPU-side IRQs. | ||
580 | */ | ||
581 | |||
582 | #define MAX_PIRQS 8 | ||
583 | static int pirq_entries [MAX_PIRQS]; | ||
584 | static int pirqs_enabled; | ||
585 | |||
586 | static int __init ioapic_pirq_setup(char *str) | ||
587 | { | ||
588 | int i, max; | ||
589 | int ints[MAX_PIRQS+1]; | ||
590 | |||
591 | get_options(str, ARRAY_SIZE(ints), ints); | ||
592 | |||
593 | for (i = 0; i < MAX_PIRQS; i++) | ||
594 | pirq_entries[i] = -1; | ||
595 | |||
596 | pirqs_enabled = 1; | ||
597 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
598 | "PIRQ redirection, working around broken MP-BIOS.\n"); | ||
599 | max = MAX_PIRQS; | ||
600 | if (ints[0] < MAX_PIRQS) | ||
601 | max = ints[0]; | ||
602 | |||
603 | for (i = 0; i < max; i++) { | ||
604 | apic_printk(APIC_VERBOSE, KERN_DEBUG | ||
605 | "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); | ||
606 | /* | ||
607 | * PIRQs are mapped upside down, usually. | ||
608 | */ | ||
609 | pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; | ||
610 | } | ||
611 | return 1; | ||
612 | } | ||
613 | |||
614 | __setup("pirq=", ioapic_pirq_setup); | ||
615 | #endif /* CONFIG_X86_32 */ | ||
616 | |||
617 | #ifdef CONFIG_INTR_REMAP | ||
618 | /* I/O APIC RTE contents at the OS boot up */ | ||
619 | static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS]; | ||
620 | |||
453 | /* | 621 | /* |
454 | * Saves and masks all the unmasked IO-APIC RTE's | 622 | * Saves and masks all the unmasked IO-APIC RTE's |
455 | */ | 623 | */ |
@@ -474,7 +642,7 @@ int save_mask_IO_APIC_setup(void) | |||
474 | kzalloc(sizeof(struct IO_APIC_route_entry) * | 642 | kzalloc(sizeof(struct IO_APIC_route_entry) * |
475 | nr_ioapic_registers[apic], GFP_KERNEL); | 643 | nr_ioapic_registers[apic], GFP_KERNEL); |
476 | if (!early_ioapic_entries[apic]) | 644 | if (!early_ioapic_entries[apic]) |
477 | return -ENOMEM; | 645 | goto nomem; |
478 | } | 646 | } |
479 | 647 | ||
480 | for (apic = 0; apic < nr_ioapics; apic++) | 648 | for (apic = 0; apic < nr_ioapics; apic++) |
@@ -488,17 +656,31 @@ int save_mask_IO_APIC_setup(void) | |||
488 | ioapic_write_entry(apic, pin, entry); | 656 | ioapic_write_entry(apic, pin, entry); |
489 | } | 657 | } |
490 | } | 658 | } |
659 | |||
491 | return 0; | 660 | return 0; |
661 | |||
662 | nomem: | ||
663 | while (apic >= 0) | ||
664 | kfree(early_ioapic_entries[apic--]); | ||
665 | memset(early_ioapic_entries, 0, | ||
666 | ARRAY_SIZE(early_ioapic_entries)); | ||
667 | |||
668 | return -ENOMEM; | ||
492 | } | 669 | } |
493 | 670 | ||
494 | void restore_IO_APIC_setup(void) | 671 | void restore_IO_APIC_setup(void) |
495 | { | 672 | { |
496 | int apic, pin; | 673 | int apic, pin; |
497 | 674 | ||
498 | for (apic = 0; apic < nr_ioapics; apic++) | 675 | for (apic = 0; apic < nr_ioapics; apic++) { |
676 | if (!early_ioapic_entries[apic]) | ||
677 | break; | ||
499 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) | 678 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) |
500 | ioapic_write_entry(apic, pin, | 679 | ioapic_write_entry(apic, pin, |
501 | early_ioapic_entries[apic][pin]); | 680 | early_ioapic_entries[apic][pin]); |
681 | kfree(early_ioapic_entries[apic]); | ||
682 | early_ioapic_entries[apic] = NULL; | ||
683 | } | ||
502 | } | 684 | } |
503 | 685 | ||
504 | void reinit_intr_remapped_IO_APIC(int intr_remapping) | 686 | void reinit_intr_remapped_IO_APIC(int intr_remapping) |
@@ -512,25 +694,7 @@ void reinit_intr_remapped_IO_APIC(int intr_remapping) | |||
512 | */ | 694 | */ |
513 | restore_IO_APIC_setup(); | 695 | restore_IO_APIC_setup(); |
514 | } | 696 | } |
515 | 697 | #endif | |
516 | int skip_ioapic_setup; | ||
517 | int ioapic_force; | ||
518 | |||
519 | static int __init parse_noapic(char *str) | ||
520 | { | ||
521 | disable_ioapic_setup(); | ||
522 | return 0; | ||
523 | } | ||
524 | early_param("noapic", parse_noapic); | ||
525 | |||
526 | /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ | ||
527 | static int __init disable_timer_pin_setup(char *arg) | ||
528 | { | ||
529 | disable_timer_pin_1 = 1; | ||
530 | return 1; | ||
531 | } | ||
532 | __setup("disable_timer_pin_1", disable_timer_pin_setup); | ||
533 | |||
534 | 698 | ||
535 | /* | 699 | /* |
536 | * Find the IRQ entry number of a certain pin. | 700 | * Find the IRQ entry number of a certain pin. |
@@ -634,22 +798,54 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) | |||
634 | best_guess = irq; | 798 | best_guess = irq; |
635 | } | 799 | } |
636 | } | 800 | } |
637 | BUG_ON(best_guess >= NR_IRQS); | ||
638 | return best_guess; | 801 | return best_guess; |
639 | } | 802 | } |
640 | 803 | ||
804 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); | ||
805 | |||
806 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
807 | /* | ||
808 | * EISA Edge/Level control register, ELCR | ||
809 | */ | ||
810 | static int EISA_ELCR(unsigned int irq) | ||
811 | { | ||
812 | if (irq < 16) { | ||
813 | unsigned int port = 0x4d0 + (irq >> 3); | ||
814 | return (inb(port) >> (irq & 7)) & 1; | ||
815 | } | ||
816 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
817 | "Broken MPtable reports ISA irq %d\n", irq); | ||
818 | return 0; | ||
819 | } | ||
820 | |||
821 | #endif | ||
822 | |||
641 | /* ISA interrupts are always polarity zero edge triggered, | 823 | /* ISA interrupts are always polarity zero edge triggered, |
642 | * when listed as conforming in the MP table. */ | 824 | * when listed as conforming in the MP table. */ |
643 | 825 | ||
644 | #define default_ISA_trigger(idx) (0) | 826 | #define default_ISA_trigger(idx) (0) |
645 | #define default_ISA_polarity(idx) (0) | 827 | #define default_ISA_polarity(idx) (0) |
646 | 828 | ||
829 | /* EISA interrupts are always polarity zero and can be edge or level | ||
830 | * trigger depending on the ELCR value. If an interrupt is listed as | ||
831 | * EISA conforming in the MP table, that means its trigger type must | ||
832 | * be read in from the ELCR */ | ||
833 | |||
834 | #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq)) | ||
835 | #define default_EISA_polarity(idx) default_ISA_polarity(idx) | ||
836 | |||
647 | /* PCI interrupts are always polarity one level triggered, | 837 | /* PCI interrupts are always polarity one level triggered, |
648 | * when listed as conforming in the MP table. */ | 838 | * when listed as conforming in the MP table. */ |
649 | 839 | ||
650 | #define default_PCI_trigger(idx) (1) | 840 | #define default_PCI_trigger(idx) (1) |
651 | #define default_PCI_polarity(idx) (1) | 841 | #define default_PCI_polarity(idx) (1) |
652 | 842 | ||
843 | /* MCA interrupts are always polarity zero level triggered, | ||
844 | * when listed as conforming in the MP table. */ | ||
845 | |||
846 | #define default_MCA_trigger(idx) (1) | ||
847 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) | ||
848 | |||
653 | static int MPBIOS_polarity(int idx) | 849 | static int MPBIOS_polarity(int idx) |
654 | { | 850 | { |
655 | int bus = mp_irqs[idx].mp_srcbus; | 851 | int bus = mp_irqs[idx].mp_srcbus; |
@@ -707,6 +903,36 @@ static int MPBIOS_trigger(int idx) | |||
707 | trigger = default_ISA_trigger(idx); | 903 | trigger = default_ISA_trigger(idx); |
708 | else | 904 | else |
709 | trigger = default_PCI_trigger(idx); | 905 | trigger = default_PCI_trigger(idx); |
906 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
907 | switch (mp_bus_id_to_type[bus]) { | ||
908 | case MP_BUS_ISA: /* ISA pin */ | ||
909 | { | ||
910 | /* set before the switch */ | ||
911 | break; | ||
912 | } | ||
913 | case MP_BUS_EISA: /* EISA pin */ | ||
914 | { | ||
915 | trigger = default_EISA_trigger(idx); | ||
916 | break; | ||
917 | } | ||
918 | case MP_BUS_PCI: /* PCI pin */ | ||
919 | { | ||
920 | /* set before the switch */ | ||
921 | break; | ||
922 | } | ||
923 | case MP_BUS_MCA: /* MCA pin */ | ||
924 | { | ||
925 | trigger = default_MCA_trigger(idx); | ||
926 | break; | ||
927 | } | ||
928 | default: | ||
929 | { | ||
930 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
931 | trigger = 1; | ||
932 | break; | ||
933 | } | ||
934 | } | ||
935 | #endif | ||
710 | break; | 936 | break; |
711 | case 1: /* edge */ | 937 | case 1: /* edge */ |
712 | { | 938 | { |
@@ -744,6 +970,7 @@ static inline int irq_trigger(int idx) | |||
744 | return MPBIOS_trigger(idx); | 970 | return MPBIOS_trigger(idx); |
745 | } | 971 | } |
746 | 972 | ||
973 | int (*ioapic_renumber_irq)(int ioapic, int irq); | ||
747 | static int pin_2_irq(int idx, int apic, int pin) | 974 | static int pin_2_irq(int idx, int apic, int pin) |
748 | { | 975 | { |
749 | int irq, i; | 976 | int irq, i; |
@@ -765,8 +992,32 @@ static int pin_2_irq(int idx, int apic, int pin) | |||
765 | while (i < apic) | 992 | while (i < apic) |
766 | irq += nr_ioapic_registers[i++]; | 993 | irq += nr_ioapic_registers[i++]; |
767 | irq += pin; | 994 | irq += pin; |
995 | /* | ||
996 | * For MPS mode, so far only needed by ES7000 platform | ||
997 | */ | ||
998 | if (ioapic_renumber_irq) | ||
999 | irq = ioapic_renumber_irq(apic, irq); | ||
768 | } | 1000 | } |
769 | BUG_ON(irq >= NR_IRQS); | 1001 | |
1002 | #ifdef CONFIG_X86_32 | ||
1003 | /* | ||
1004 | * PCI IRQ command line redirection. Yes, limits are hardcoded. | ||
1005 | */ | ||
1006 | if ((pin >= 16) && (pin <= 23)) { | ||
1007 | if (pirq_entries[pin-16] != -1) { | ||
1008 | if (!pirq_entries[pin-16]) { | ||
1009 | apic_printk(APIC_VERBOSE, KERN_DEBUG | ||
1010 | "disabling PIRQ%d\n", pin-16); | ||
1011 | } else { | ||
1012 | irq = pirq_entries[pin-16]; | ||
1013 | apic_printk(APIC_VERBOSE, KERN_DEBUG | ||
1014 | "using PIRQ%d -> IRQ %d\n", | ||
1015 | pin-16, irq); | ||
1016 | } | ||
1017 | } | ||
1018 | } | ||
1019 | #endif | ||
1020 | |||
770 | return irq; | 1021 | return irq; |
771 | } | 1022 | } |
772 | 1023 | ||
@@ -801,8 +1052,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) | |||
801 | int cpu; | 1052 | int cpu; |
802 | struct irq_cfg *cfg; | 1053 | struct irq_cfg *cfg; |
803 | 1054 | ||
804 | BUG_ON((unsigned)irq >= NR_IRQS); | 1055 | cfg = irq_cfg(irq); |
805 | cfg = &irq_cfg[irq]; | ||
806 | 1056 | ||
807 | /* Only try and allocate irqs on cpus that are present */ | 1057 | /* Only try and allocate irqs on cpus that are present */ |
808 | cpus_and(mask, mask, cpu_online_map); | 1058 | cpus_and(mask, mask, cpu_online_map); |
@@ -837,8 +1087,13 @@ next: | |||
837 | } | 1087 | } |
838 | if (unlikely(current_vector == vector)) | 1088 | if (unlikely(current_vector == vector)) |
839 | continue; | 1089 | continue; |
1090 | #ifdef CONFIG_X86_64 | ||
840 | if (vector == IA32_SYSCALL_VECTOR) | 1091 | if (vector == IA32_SYSCALL_VECTOR) |
841 | goto next; | 1092 | goto next; |
1093 | #else | ||
1094 | if (vector == SYSCALL_VECTOR) | ||
1095 | goto next; | ||
1096 | #endif | ||
842 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1097 | for_each_cpu_mask_nr(new_cpu, new_mask) |
843 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1098 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
844 | goto next; | 1099 | goto next; |
@@ -875,8 +1130,7 @@ static void __clear_irq_vector(int irq) | |||
875 | cpumask_t mask; | 1130 | cpumask_t mask; |
876 | int cpu, vector; | 1131 | int cpu, vector; |
877 | 1132 | ||
878 | BUG_ON((unsigned)irq >= NR_IRQS); | 1133 | cfg = irq_cfg(irq); |
879 | cfg = &irq_cfg[irq]; | ||
880 | BUG_ON(!cfg->vector); | 1134 | BUG_ON(!cfg->vector); |
881 | 1135 | ||
882 | vector = cfg->vector; | 1136 | vector = cfg->vector; |
@@ -893,12 +1147,13 @@ void __setup_vector_irq(int cpu) | |||
893 | /* Initialize vector_irq on a new cpu */ | 1147 | /* Initialize vector_irq on a new cpu */ |
894 | /* This function must be called with vector_lock held */ | 1148 | /* This function must be called with vector_lock held */ |
895 | int irq, vector; | 1149 | int irq, vector; |
1150 | struct irq_cfg *cfg; | ||
896 | 1151 | ||
897 | /* Mark the inuse vectors */ | 1152 | /* Mark the inuse vectors */ |
898 | for (irq = 0; irq < NR_IRQS; ++irq) { | 1153 | for_each_irq_cfg(irq, cfg) { |
899 | if (!cpu_isset(cpu, irq_cfg[irq].domain)) | 1154 | if (!cpu_isset(cpu, cfg->domain)) |
900 | continue; | 1155 | continue; |
901 | vector = irq_cfg[irq].vector; | 1156 | vector = cfg->vector; |
902 | per_cpu(vector_irq, cpu)[vector] = irq; | 1157 | per_cpu(vector_irq, cpu)[vector] = irq; |
903 | } | 1158 | } |
904 | /* Mark the free vectors */ | 1159 | /* Mark the free vectors */ |
@@ -906,7 +1161,9 @@ void __setup_vector_irq(int cpu) | |||
906 | irq = per_cpu(vector_irq, cpu)[vector]; | 1161 | irq = per_cpu(vector_irq, cpu)[vector]; |
907 | if (irq < 0) | 1162 | if (irq < 0) |
908 | continue; | 1163 | continue; |
909 | if (!cpu_isset(cpu, irq_cfg[irq].domain)) | 1164 | |
1165 | cfg = irq_cfg(irq); | ||
1166 | if (!cpu_isset(cpu, cfg->domain)) | ||
910 | per_cpu(vector_irq, cpu)[vector] = -1; | 1167 | per_cpu(vector_irq, cpu)[vector] = -1; |
911 | } | 1168 | } |
912 | } | 1169 | } |
@@ -916,16 +1173,49 @@ static struct irq_chip ioapic_chip; | |||
916 | static struct irq_chip ir_ioapic_chip; | 1173 | static struct irq_chip ir_ioapic_chip; |
917 | #endif | 1174 | #endif |
918 | 1175 | ||
1176 | #define IOAPIC_AUTO -1 | ||
1177 | #define IOAPIC_EDGE 0 | ||
1178 | #define IOAPIC_LEVEL 1 | ||
1179 | |||
1180 | #ifdef CONFIG_X86_32 | ||
1181 | static inline int IO_APIC_irq_trigger(int irq) | ||
1182 | { | ||
1183 | int apic, idx, pin; | ||
1184 | |||
1185 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1186 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | ||
1187 | idx = find_irq_entry(apic, pin, mp_INT); | ||
1188 | if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) | ||
1189 | return irq_trigger(idx); | ||
1190 | } | ||
1191 | } | ||
1192 | /* | ||
1193 | * nonexistent IRQs are edge default | ||
1194 | */ | ||
1195 | return 0; | ||
1196 | } | ||
1197 | #else | ||
1198 | static inline int IO_APIC_irq_trigger(int irq) | ||
1199 | { | ||
1200 | return 1; | ||
1201 | } | ||
1202 | #endif | ||
1203 | |||
919 | static void ioapic_register_intr(int irq, unsigned long trigger) | 1204 | static void ioapic_register_intr(int irq, unsigned long trigger) |
920 | { | 1205 | { |
921 | if (trigger) | 1206 | struct irq_desc *desc; |
922 | irq_desc[irq].status |= IRQ_LEVEL; | 1207 | |
1208 | desc = irq_to_desc(irq); | ||
1209 | |||
1210 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | ||
1211 | trigger == IOAPIC_LEVEL) | ||
1212 | desc->status |= IRQ_LEVEL; | ||
923 | else | 1213 | else |
924 | irq_desc[irq].status &= ~IRQ_LEVEL; | 1214 | desc->status &= ~IRQ_LEVEL; |
925 | 1215 | ||
926 | #ifdef CONFIG_INTR_REMAP | 1216 | #ifdef CONFIG_INTR_REMAP |
927 | if (irq_remapped(irq)) { | 1217 | if (irq_remapped(irq)) { |
928 | irq_desc[irq].status |= IRQ_MOVE_PCNTXT; | 1218 | desc->status |= IRQ_MOVE_PCNTXT; |
929 | if (trigger) | 1219 | if (trigger) |
930 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 1220 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, |
931 | handle_fasteoi_irq, | 1221 | handle_fasteoi_irq, |
@@ -936,7 +1226,8 @@ static void ioapic_register_intr(int irq, unsigned long trigger) | |||
936 | return; | 1226 | return; |
937 | } | 1227 | } |
938 | #endif | 1228 | #endif |
939 | if (trigger) | 1229 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1230 | trigger == IOAPIC_LEVEL) | ||
940 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 1231 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
941 | handle_fasteoi_irq, | 1232 | handle_fasteoi_irq, |
942 | "fasteoi"); | 1233 | "fasteoi"); |
@@ -1009,13 +1300,15 @@ static int setup_ioapic_entry(int apic, int irq, | |||
1009 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | 1300 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, |
1010 | int trigger, int polarity) | 1301 | int trigger, int polarity) |
1011 | { | 1302 | { |
1012 | struct irq_cfg *cfg = irq_cfg + irq; | 1303 | struct irq_cfg *cfg; |
1013 | struct IO_APIC_route_entry entry; | 1304 | struct IO_APIC_route_entry entry; |
1014 | cpumask_t mask; | 1305 | cpumask_t mask; |
1015 | 1306 | ||
1016 | if (!IO_APIC_IRQ(irq)) | 1307 | if (!IO_APIC_IRQ(irq)) |
1017 | return; | 1308 | return; |
1018 | 1309 | ||
1310 | cfg = irq_cfg(irq); | ||
1311 | |||
1019 | mask = TARGET_CPUS; | 1312 | mask = TARGET_CPUS; |
1020 | if (assign_irq_vector(irq, mask)) | 1313 | if (assign_irq_vector(irq, mask)) |
1021 | return; | 1314 | return; |
@@ -1047,37 +1340,49 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | |||
1047 | 1340 | ||
1048 | static void __init setup_IO_APIC_irqs(void) | 1341 | static void __init setup_IO_APIC_irqs(void) |
1049 | { | 1342 | { |
1050 | int apic, pin, idx, irq, first_notcon = 1; | 1343 | int apic, pin, idx, irq; |
1344 | int notcon = 0; | ||
1051 | 1345 | ||
1052 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1346 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
1053 | 1347 | ||
1054 | for (apic = 0; apic < nr_ioapics; apic++) { | 1348 | for (apic = 0; apic < nr_ioapics; apic++) { |
1055 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | 1349 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { |
1056 | |||
1057 | idx = find_irq_entry(apic,pin,mp_INT); | ||
1058 | if (idx == -1) { | ||
1059 | if (first_notcon) { | ||
1060 | apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin); | ||
1061 | first_notcon = 0; | ||
1062 | } else | ||
1063 | apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin); | ||
1064 | continue; | ||
1065 | } | ||
1066 | if (!first_notcon) { | ||
1067 | apic_printk(APIC_VERBOSE, " not connected.\n"); | ||
1068 | first_notcon = 1; | ||
1069 | } | ||
1070 | 1350 | ||
1071 | irq = pin_2_irq(idx, apic, pin); | 1351 | idx = find_irq_entry(apic, pin, mp_INT); |
1072 | add_pin_to_irq(irq, apic, pin); | 1352 | if (idx == -1) { |
1353 | if (!notcon) { | ||
1354 | notcon = 1; | ||
1355 | apic_printk(APIC_VERBOSE, | ||
1356 | KERN_DEBUG " %d-%d", | ||
1357 | mp_ioapics[apic].mp_apicid, | ||
1358 | pin); | ||
1359 | } else | ||
1360 | apic_printk(APIC_VERBOSE, " %d-%d", | ||
1361 | mp_ioapics[apic].mp_apicid, | ||
1362 | pin); | ||
1363 | continue; | ||
1364 | } | ||
1365 | if (notcon) { | ||
1366 | apic_printk(APIC_VERBOSE, | ||
1367 | " (apicid-pin) not connected\n"); | ||
1368 | notcon = 0; | ||
1369 | } | ||
1073 | 1370 | ||
1074 | setup_IO_APIC_irq(apic, pin, irq, | 1371 | irq = pin_2_irq(idx, apic, pin); |
1075 | irq_trigger(idx), irq_polarity(idx)); | 1372 | #ifdef CONFIG_X86_32 |
1076 | } | 1373 | if (multi_timer_check(apic, irq)) |
1374 | continue; | ||
1375 | #endif | ||
1376 | add_pin_to_irq(irq, apic, pin); | ||
1377 | |||
1378 | setup_IO_APIC_irq(apic, pin, irq, | ||
1379 | irq_trigger(idx), irq_polarity(idx)); | ||
1380 | } | ||
1077 | } | 1381 | } |
1078 | 1382 | ||
1079 | if (!first_notcon) | 1383 | if (notcon) |
1080 | apic_printk(APIC_VERBOSE, " not connected.\n"); | 1384 | apic_printk(APIC_VERBOSE, |
1385 | " (apicid-pin) not connected\n"); | ||
1081 | } | 1386 | } |
1082 | 1387 | ||
1083 | /* | 1388 | /* |
@@ -1088,8 +1393,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, | |||
1088 | { | 1393 | { |
1089 | struct IO_APIC_route_entry entry; | 1394 | struct IO_APIC_route_entry entry; |
1090 | 1395 | ||
1396 | #ifdef CONFIG_INTR_REMAP | ||
1091 | if (intr_remapping_enabled) | 1397 | if (intr_remapping_enabled) |
1092 | return; | 1398 | return; |
1399 | #endif | ||
1093 | 1400 | ||
1094 | memset(&entry, 0, sizeof(entry)); | 1401 | memset(&entry, 0, sizeof(entry)); |
1095 | 1402 | ||
@@ -1124,7 +1431,10 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1124 | union IO_APIC_reg_00 reg_00; | 1431 | union IO_APIC_reg_00 reg_00; |
1125 | union IO_APIC_reg_01 reg_01; | 1432 | union IO_APIC_reg_01 reg_01; |
1126 | union IO_APIC_reg_02 reg_02; | 1433 | union IO_APIC_reg_02 reg_02; |
1434 | union IO_APIC_reg_03 reg_03; | ||
1127 | unsigned long flags; | 1435 | unsigned long flags; |
1436 | struct irq_cfg *cfg; | ||
1437 | unsigned int irq; | ||
1128 | 1438 | ||
1129 | if (apic_verbosity == APIC_QUIET) | 1439 | if (apic_verbosity == APIC_QUIET) |
1130 | return; | 1440 | return; |
@@ -1147,12 +1457,16 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1147 | reg_01.raw = io_apic_read(apic, 1); | 1457 | reg_01.raw = io_apic_read(apic, 1); |
1148 | if (reg_01.bits.version >= 0x10) | 1458 | if (reg_01.bits.version >= 0x10) |
1149 | reg_02.raw = io_apic_read(apic, 2); | 1459 | reg_02.raw = io_apic_read(apic, 2); |
1460 | if (reg_01.bits.version >= 0x20) | ||
1461 | reg_03.raw = io_apic_read(apic, 3); | ||
1150 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1462 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1151 | 1463 | ||
1152 | printk("\n"); | 1464 | printk("\n"); |
1153 | printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); | 1465 | printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); |
1154 | printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); | 1466 | printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); |
1155 | printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); | 1467 | printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); |
1468 | printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); | ||
1469 | printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); | ||
1156 | 1470 | ||
1157 | printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); | 1471 | printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); |
1158 | printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); | 1472 | printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); |
@@ -1160,11 +1474,27 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1160 | printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); | 1474 | printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); |
1161 | printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); | 1475 | printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); |
1162 | 1476 | ||
1163 | if (reg_01.bits.version >= 0x10) { | 1477 | /* |
1478 | * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, | ||
1479 | * but the value of reg_02 is read as the previous read register | ||
1480 | * value, so ignore it if reg_02 == reg_01. | ||
1481 | */ | ||
1482 | if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { | ||
1164 | printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); | 1483 | printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); |
1165 | printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); | 1484 | printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); |
1166 | } | 1485 | } |
1167 | 1486 | ||
1487 | /* | ||
1488 | * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 | ||
1489 | * or reg_03, but the value of reg_0[23] is read as the previous read | ||
1490 | * register value, so ignore it if reg_03 == reg_0[12]. | ||
1491 | */ | ||
1492 | if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && | ||
1493 | reg_03.raw != reg_01.raw) { | ||
1494 | printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); | ||
1495 | printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); | ||
1496 | } | ||
1497 | |||
1168 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); | 1498 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); |
1169 | 1499 | ||
1170 | printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" | 1500 | printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" |
@@ -1193,16 +1523,16 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1193 | } | 1523 | } |
1194 | } | 1524 | } |
1195 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); | 1525 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); |
1196 | for (i = 0; i < NR_IRQS; i++) { | 1526 | for_each_irq_cfg(irq, cfg) { |
1197 | struct irq_pin_list *entry = irq_2_pin + i; | 1527 | struct irq_pin_list *entry = cfg->irq_2_pin; |
1198 | if (entry->pin < 0) | 1528 | if (!entry) |
1199 | continue; | 1529 | continue; |
1200 | printk(KERN_DEBUG "IRQ%d ", i); | 1530 | printk(KERN_DEBUG "IRQ%d ", irq); |
1201 | for (;;) { | 1531 | for (;;) { |
1202 | printk("-> %d:%d", entry->apic, entry->pin); | 1532 | printk("-> %d:%d", entry->apic, entry->pin); |
1203 | if (!entry->next) | 1533 | if (!entry->next) |
1204 | break; | 1534 | break; |
1205 | entry = irq_2_pin + entry->next; | 1535 | entry = entry->next; |
1206 | } | 1536 | } |
1207 | printk("\n"); | 1537 | printk("\n"); |
1208 | } | 1538 | } |
@@ -1236,7 +1566,7 @@ __apicdebuginit(void) print_APIC_bitfield(int base) | |||
1236 | __apicdebuginit(void) print_local_APIC(void *dummy) | 1566 | __apicdebuginit(void) print_local_APIC(void *dummy) |
1237 | { | 1567 | { |
1238 | unsigned int v, ver, maxlvt; | 1568 | unsigned int v, ver, maxlvt; |
1239 | unsigned long icr; | 1569 | u64 icr; |
1240 | 1570 | ||
1241 | if (apic_verbosity == APIC_QUIET) | 1571 | if (apic_verbosity == APIC_QUIET) |
1242 | return; | 1572 | return; |
@@ -1253,20 +1583,31 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
1253 | v = apic_read(APIC_TASKPRI); | 1583 | v = apic_read(APIC_TASKPRI); |
1254 | printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); | 1584 | printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
1255 | 1585 | ||
1256 | v = apic_read(APIC_ARBPRI); | 1586 | if (APIC_INTEGRATED(ver)) { /* !82489DX */ |
1257 | printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, | 1587 | if (!APIC_XAPIC(ver)) { |
1258 | v & APIC_ARBPRI_MASK); | 1588 | v = apic_read(APIC_ARBPRI); |
1259 | v = apic_read(APIC_PROCPRI); | 1589 | printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, |
1260 | printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); | 1590 | v & APIC_ARBPRI_MASK); |
1591 | } | ||
1592 | v = apic_read(APIC_PROCPRI); | ||
1593 | printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); | ||
1594 | } | ||
1595 | |||
1596 | /* | ||
1597 | * Remote read supported only in the 82489DX and local APIC for | ||
1598 | * Pentium processors. | ||
1599 | */ | ||
1600 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | ||
1601 | v = apic_read(APIC_RRR); | ||
1602 | printk(KERN_DEBUG "... APIC RRR: %08x\n", v); | ||
1603 | } | ||
1261 | 1604 | ||
1262 | v = apic_read(APIC_EOI); | ||
1263 | printk(KERN_DEBUG "... APIC EOI: %08x\n", v); | ||
1264 | v = apic_read(APIC_RRR); | ||
1265 | printk(KERN_DEBUG "... APIC RRR: %08x\n", v); | ||
1266 | v = apic_read(APIC_LDR); | 1605 | v = apic_read(APIC_LDR); |
1267 | printk(KERN_DEBUG "... APIC LDR: %08x\n", v); | 1606 | printk(KERN_DEBUG "... APIC LDR: %08x\n", v); |
1268 | v = apic_read(APIC_DFR); | 1607 | if (!x2apic_enabled()) { |
1269 | printk(KERN_DEBUG "... APIC DFR: %08x\n", v); | 1608 | v = apic_read(APIC_DFR); |
1609 | printk(KERN_DEBUG "... APIC DFR: %08x\n", v); | ||
1610 | } | ||
1270 | v = apic_read(APIC_SPIV); | 1611 | v = apic_read(APIC_SPIV); |
1271 | printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); | 1612 | printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); |
1272 | 1613 | ||
@@ -1277,8 +1618,13 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
1277 | printk(KERN_DEBUG "... APIC IRR field:\n"); | 1618 | printk(KERN_DEBUG "... APIC IRR field:\n"); |
1278 | print_APIC_bitfield(APIC_IRR); | 1619 | print_APIC_bitfield(APIC_IRR); |
1279 | 1620 | ||
1280 | v = apic_read(APIC_ESR); | 1621 | if (APIC_INTEGRATED(ver)) { /* !82489DX */ |
1281 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | 1622 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
1623 | apic_write(APIC_ESR, 0); | ||
1624 | |||
1625 | v = apic_read(APIC_ESR); | ||
1626 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | ||
1627 | } | ||
1282 | 1628 | ||
1283 | icr = apic_icr_read(); | 1629 | icr = apic_icr_read(); |
1284 | printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); | 1630 | printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); |
@@ -1312,7 +1658,12 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
1312 | 1658 | ||
1313 | __apicdebuginit(void) print_all_local_APICs(void) | 1659 | __apicdebuginit(void) print_all_local_APICs(void) |
1314 | { | 1660 | { |
1315 | on_each_cpu(print_local_APIC, NULL, 1); | 1661 | int cpu; |
1662 | |||
1663 | preempt_disable(); | ||
1664 | for_each_online_cpu(cpu) | ||
1665 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | ||
1666 | preempt_enable(); | ||
1316 | } | 1667 | } |
1317 | 1668 | ||
1318 | __apicdebuginit(void) print_PIC(void) | 1669 | __apicdebuginit(void) print_PIC(void) |
@@ -1359,17 +1710,22 @@ __apicdebuginit(int) print_all_ICs(void) | |||
1359 | fs_initcall(print_all_ICs); | 1710 | fs_initcall(print_all_ICs); |
1360 | 1711 | ||
1361 | 1712 | ||
1713 | /* Where if anywhere is the i8259 connect in external int mode */ | ||
1714 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | ||
1715 | |||
1362 | void __init enable_IO_APIC(void) | 1716 | void __init enable_IO_APIC(void) |
1363 | { | 1717 | { |
1364 | union IO_APIC_reg_01 reg_01; | 1718 | union IO_APIC_reg_01 reg_01; |
1365 | int i8259_apic, i8259_pin; | 1719 | int i8259_apic, i8259_pin; |
1366 | int i, apic; | 1720 | int apic; |
1367 | unsigned long flags; | 1721 | unsigned long flags; |
1368 | 1722 | ||
1369 | for (i = 0; i < PIN_MAP_SIZE; i++) { | 1723 | #ifdef CONFIG_X86_32 |
1370 | irq_2_pin[i].pin = -1; | 1724 | int i; |
1371 | irq_2_pin[i].next = 0; | 1725 | if (!pirqs_enabled) |
1372 | } | 1726 | for (i = 0; i < MAX_PIRQS; i++) |
1727 | pirq_entries[i] = -1; | ||
1728 | #endif | ||
1373 | 1729 | ||
1374 | /* | 1730 | /* |
1375 | * The number of IO-APIC IRQ registers (== #pins): | 1731 | * The number of IO-APIC IRQ registers (== #pins): |
@@ -1399,6 +1755,10 @@ void __init enable_IO_APIC(void) | |||
1399 | } | 1755 | } |
1400 | found_i8259: | 1756 | found_i8259: |
1401 | /* Look to see what if the MP table has reported the ExtINT */ | 1757 | /* Look to see what if the MP table has reported the ExtINT */ |
1758 | /* If we could not find the appropriate pin by looking at the ioapic | ||
1759 | * the i8259 probably is not connected the ioapic but give the | ||
1760 | * mptable a chance anyway. | ||
1761 | */ | ||
1402 | i8259_pin = find_isa_irq_pin(0, mp_ExtINT); | 1762 | i8259_pin = find_isa_irq_pin(0, mp_ExtINT); |
1403 | i8259_apic = find_isa_irq_apic(0, mp_ExtINT); | 1763 | i8259_apic = find_isa_irq_apic(0, mp_ExtINT); |
1404 | /* Trust the MP table if nothing is setup in the hardware */ | 1764 | /* Trust the MP table if nothing is setup in the hardware */ |
@@ -1458,6 +1818,133 @@ void disable_IO_APIC(void) | |||
1458 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); | 1818 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); |
1459 | } | 1819 | } |
1460 | 1820 | ||
1821 | #ifdef CONFIG_X86_32 | ||
1822 | /* | ||
1823 | * function to set the IO-APIC physical IDs based on the | ||
1824 | * values stored in the MPC table. | ||
1825 | * | ||
1826 | * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 | ||
1827 | */ | ||
1828 | |||
1829 | static void __init setup_ioapic_ids_from_mpc(void) | ||
1830 | { | ||
1831 | union IO_APIC_reg_00 reg_00; | ||
1832 | physid_mask_t phys_id_present_map; | ||
1833 | int apic; | ||
1834 | int i; | ||
1835 | unsigned char old_id; | ||
1836 | unsigned long flags; | ||
1837 | |||
1838 | if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) | ||
1839 | return; | ||
1840 | |||
1841 | /* | ||
1842 | * Don't check I/O APIC IDs for xAPIC systems. They have | ||
1843 | * no meaning without the serial APIC bus. | ||
1844 | */ | ||
1845 | if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
1846 | || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
1847 | return; | ||
1848 | /* | ||
1849 | * This is broken; anything with a real cpu count has to | ||
1850 | * circumvent this idiocy regardless. | ||
1851 | */ | ||
1852 | phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); | ||
1853 | |||
1854 | /* | ||
1855 | * Set the IOAPIC ID to the value stored in the MPC table. | ||
1856 | */ | ||
1857 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1858 | |||
1859 | /* Read the register 0 value */ | ||
1860 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1861 | reg_00.raw = io_apic_read(apic, 0); | ||
1862 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1863 | |||
1864 | old_id = mp_ioapics[apic].mp_apicid; | ||
1865 | |||
1866 | if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) { | ||
1867 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", | ||
1868 | apic, mp_ioapics[apic].mp_apicid); | ||
1869 | printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", | ||
1870 | reg_00.bits.ID); | ||
1871 | mp_ioapics[apic].mp_apicid = reg_00.bits.ID; | ||
1872 | } | ||
1873 | |||
1874 | /* | ||
1875 | * Sanity check, is the ID really free? Every APIC in a | ||
1876 | * system must have a unique ID or we get lots of nice | ||
1877 | * 'stuck on smp_invalidate_needed IPI wait' messages. | ||
1878 | */ | ||
1879 | if (check_apicid_used(phys_id_present_map, | ||
1880 | mp_ioapics[apic].mp_apicid)) { | ||
1881 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", | ||
1882 | apic, mp_ioapics[apic].mp_apicid); | ||
1883 | for (i = 0; i < get_physical_broadcast(); i++) | ||
1884 | if (!physid_isset(i, phys_id_present_map)) | ||
1885 | break; | ||
1886 | if (i >= get_physical_broadcast()) | ||
1887 | panic("Max APIC ID exceeded!\n"); | ||
1888 | printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", | ||
1889 | i); | ||
1890 | physid_set(i, phys_id_present_map); | ||
1891 | mp_ioapics[apic].mp_apicid = i; | ||
1892 | } else { | ||
1893 | physid_mask_t tmp; | ||
1894 | tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid); | ||
1895 | apic_printk(APIC_VERBOSE, "Setting %d in the " | ||
1896 | "phys_id_present_map\n", | ||
1897 | mp_ioapics[apic].mp_apicid); | ||
1898 | physids_or(phys_id_present_map, phys_id_present_map, tmp); | ||
1899 | } | ||
1900 | |||
1901 | |||
1902 | /* | ||
1903 | * We need to adjust the IRQ routing table | ||
1904 | * if the ID changed. | ||
1905 | */ | ||
1906 | if (old_id != mp_ioapics[apic].mp_apicid) | ||
1907 | for (i = 0; i < mp_irq_entries; i++) | ||
1908 | if (mp_irqs[i].mp_dstapic == old_id) | ||
1909 | mp_irqs[i].mp_dstapic | ||
1910 | = mp_ioapics[apic].mp_apicid; | ||
1911 | |||
1912 | /* | ||
1913 | * Read the right value from the MPC table and | ||
1914 | * write it into the ID register. | ||
1915 | */ | ||
1916 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
1917 | "...changing IO-APIC physical APIC ID to %d ...", | ||
1918 | mp_ioapics[apic].mp_apicid); | ||
1919 | |||
1920 | reg_00.bits.ID = mp_ioapics[apic].mp_apicid; | ||
1921 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1922 | io_apic_write(apic, 0, reg_00.raw); | ||
1923 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1924 | |||
1925 | /* | ||
1926 | * Sanity check | ||
1927 | */ | ||
1928 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1929 | reg_00.raw = io_apic_read(apic, 0); | ||
1930 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1931 | if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid) | ||
1932 | printk("could not set ID!\n"); | ||
1933 | else | ||
1934 | apic_printk(APIC_VERBOSE, " ok.\n"); | ||
1935 | } | ||
1936 | } | ||
1937 | #endif | ||
1938 | |||
1939 | int no_timer_check __initdata; | ||
1940 | |||
1941 | static int __init notimercheck(char *s) | ||
1942 | { | ||
1943 | no_timer_check = 1; | ||
1944 | return 1; | ||
1945 | } | ||
1946 | __setup("no_timer_check", notimercheck); | ||
1947 | |||
1461 | /* | 1948 | /* |
1462 | * There is a nasty bug in some older SMP boards, their mptable lies | 1949 | * There is a nasty bug in some older SMP boards, their mptable lies |
1463 | * about the timer IRQ. We do the following to work around the situation: | 1950 | * about the timer IRQ. We do the following to work around the situation: |
@@ -1471,6 +1958,9 @@ static int __init timer_irq_works(void) | |||
1471 | unsigned long t1 = jiffies; | 1958 | unsigned long t1 = jiffies; |
1472 | unsigned long flags; | 1959 | unsigned long flags; |
1473 | 1960 | ||
1961 | if (no_timer_check) | ||
1962 | return 1; | ||
1963 | |||
1474 | local_save_flags(flags); | 1964 | local_save_flags(flags); |
1475 | local_irq_enable(); | 1965 | local_irq_enable(); |
1476 | /* Let ten ticks pass... */ | 1966 | /* Let ten ticks pass... */ |
@@ -1531,9 +2021,11 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
1531 | return was_pending; | 2021 | return was_pending; |
1532 | } | 2022 | } |
1533 | 2023 | ||
2024 | #ifdef CONFIG_X86_64 | ||
1534 | static int ioapic_retrigger_irq(unsigned int irq) | 2025 | static int ioapic_retrigger_irq(unsigned int irq) |
1535 | { | 2026 | { |
1536 | struct irq_cfg *cfg = &irq_cfg[irq]; | 2027 | |
2028 | struct irq_cfg *cfg = irq_cfg(irq); | ||
1537 | unsigned long flags; | 2029 | unsigned long flags; |
1538 | 2030 | ||
1539 | spin_lock_irqsave(&vector_lock, flags); | 2031 | spin_lock_irqsave(&vector_lock, flags); |
@@ -1542,6 +2034,14 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
1542 | 2034 | ||
1543 | return 1; | 2035 | return 1; |
1544 | } | 2036 | } |
2037 | #else | ||
2038 | static int ioapic_retrigger_irq(unsigned int irq) | ||
2039 | { | ||
2040 | send_IPI_self(irq_cfg(irq)->vector); | ||
2041 | |||
2042 | return 1; | ||
2043 | } | ||
2044 | #endif | ||
1545 | 2045 | ||
1546 | /* | 2046 | /* |
1547 | * Level and edge triggered IO-APIC interrupts need different handling, | 2047 | * Level and edge triggered IO-APIC interrupts need different handling, |
@@ -1580,11 +2080,11 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
1580 | */ | 2080 | */ |
1581 | static void migrate_ioapic_irq(int irq, cpumask_t mask) | 2081 | static void migrate_ioapic_irq(int irq, cpumask_t mask) |
1582 | { | 2082 | { |
1583 | struct irq_cfg *cfg = irq_cfg + irq; | 2083 | struct irq_cfg *cfg; |
1584 | struct irq_desc *desc = irq_desc + irq; | 2084 | struct irq_desc *desc; |
1585 | cpumask_t tmp, cleanup_mask; | 2085 | cpumask_t tmp, cleanup_mask; |
1586 | struct irte irte; | 2086 | struct irte irte; |
1587 | int modify_ioapic_rte = desc->status & IRQ_LEVEL; | 2087 | int modify_ioapic_rte; |
1588 | unsigned int dest; | 2088 | unsigned int dest; |
1589 | unsigned long flags; | 2089 | unsigned long flags; |
1590 | 2090 | ||
@@ -1598,9 +2098,12 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask) | |||
1598 | if (assign_irq_vector(irq, mask)) | 2098 | if (assign_irq_vector(irq, mask)) |
1599 | return; | 2099 | return; |
1600 | 2100 | ||
2101 | cfg = irq_cfg(irq); | ||
1601 | cpus_and(tmp, cfg->domain, mask); | 2102 | cpus_and(tmp, cfg->domain, mask); |
1602 | dest = cpu_mask_to_apicid(tmp); | 2103 | dest = cpu_mask_to_apicid(tmp); |
1603 | 2104 | ||
2105 | desc = irq_to_desc(irq); | ||
2106 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | ||
1604 | if (modify_ioapic_rte) { | 2107 | if (modify_ioapic_rte) { |
1605 | spin_lock_irqsave(&ioapic_lock, flags); | 2108 | spin_lock_irqsave(&ioapic_lock, flags); |
1606 | __target_IO_APIC_irq(irq, dest, cfg->vector); | 2109 | __target_IO_APIC_irq(irq, dest, cfg->vector); |
@@ -1622,18 +2125,19 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask) | |||
1622 | cfg->move_in_progress = 0; | 2125 | cfg->move_in_progress = 0; |
1623 | } | 2126 | } |
1624 | 2127 | ||
1625 | irq_desc[irq].affinity = mask; | 2128 | desc->affinity = mask; |
1626 | } | 2129 | } |
1627 | 2130 | ||
1628 | static int migrate_irq_remapped_level(int irq) | 2131 | static int migrate_irq_remapped_level(int irq) |
1629 | { | 2132 | { |
1630 | int ret = -1; | 2133 | int ret = -1; |
2134 | struct irq_desc *desc = irq_to_desc(irq); | ||
1631 | 2135 | ||
1632 | mask_IO_APIC_irq(irq); | 2136 | mask_IO_APIC_irq(irq); |
1633 | 2137 | ||
1634 | if (io_apic_level_ack_pending(irq)) { | 2138 | if (io_apic_level_ack_pending(irq)) { |
1635 | /* | 2139 | /* |
1636 | * Interrupt in progress. Migrating irq now will change the | 2140 | * Interrupt in progress. Migrating irq now will change the |
1637 | * vector information in the IO-APIC RTE and that will confuse | 2141 | * vector information in the IO-APIC RTE and that will confuse |
1638 | * the EOI broadcast performed by cpu. | 2142 | * the EOI broadcast performed by cpu. |
1639 | * So, delay the irq migration to the next instance. | 2143 | * So, delay the irq migration to the next instance. |
@@ -1643,11 +2147,11 @@ static int migrate_irq_remapped_level(int irq) | |||
1643 | } | 2147 | } |
1644 | 2148 | ||
1645 | /* everthing is clear. we have right of way */ | 2149 | /* everthing is clear. we have right of way */ |
1646 | migrate_ioapic_irq(irq, irq_desc[irq].pending_mask); | 2150 | migrate_ioapic_irq(irq, desc->pending_mask); |
1647 | 2151 | ||
1648 | ret = 0; | 2152 | ret = 0; |
1649 | irq_desc[irq].status &= ~IRQ_MOVE_PENDING; | 2153 | desc->status &= ~IRQ_MOVE_PENDING; |
1650 | cpus_clear(irq_desc[irq].pending_mask); | 2154 | cpus_clear(desc->pending_mask); |
1651 | 2155 | ||
1652 | unmask: | 2156 | unmask: |
1653 | unmask_IO_APIC_irq(irq); | 2157 | unmask_IO_APIC_irq(irq); |
@@ -1656,10 +2160,10 @@ unmask: | |||
1656 | 2160 | ||
1657 | static void ir_irq_migration(struct work_struct *work) | 2161 | static void ir_irq_migration(struct work_struct *work) |
1658 | { | 2162 | { |
1659 | int irq; | 2163 | unsigned int irq; |
2164 | struct irq_desc *desc; | ||
1660 | 2165 | ||
1661 | for (irq = 0; irq < NR_IRQS; irq++) { | 2166 | for_each_irq_desc(irq, desc) { |
1662 | struct irq_desc *desc = irq_desc + irq; | ||
1663 | if (desc->status & IRQ_MOVE_PENDING) { | 2167 | if (desc->status & IRQ_MOVE_PENDING) { |
1664 | unsigned long flags; | 2168 | unsigned long flags; |
1665 | 2169 | ||
@@ -1671,8 +2175,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
1671 | continue; | 2175 | continue; |
1672 | } | 2176 | } |
1673 | 2177 | ||
1674 | desc->chip->set_affinity(irq, | 2178 | desc->chip->set_affinity(irq, desc->pending_mask); |
1675 | irq_desc[irq].pending_mask); | ||
1676 | spin_unlock_irqrestore(&desc->lock, flags); | 2179 | spin_unlock_irqrestore(&desc->lock, flags); |
1677 | } | 2180 | } |
1678 | } | 2181 | } |
@@ -1683,9 +2186,11 @@ static void ir_irq_migration(struct work_struct *work) | |||
1683 | */ | 2186 | */ |
1684 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 2187 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) |
1685 | { | 2188 | { |
1686 | if (irq_desc[irq].status & IRQ_LEVEL) { | 2189 | struct irq_desc *desc = irq_to_desc(irq); |
1687 | irq_desc[irq].status |= IRQ_MOVE_PENDING; | 2190 | |
1688 | irq_desc[irq].pending_mask = mask; | 2191 | if (desc->status & IRQ_LEVEL) { |
2192 | desc->status |= IRQ_MOVE_PENDING; | ||
2193 | desc->pending_mask = mask; | ||
1689 | migrate_irq_remapped_level(irq); | 2194 | migrate_irq_remapped_level(irq); |
1690 | return; | 2195 | return; |
1691 | } | 2196 | } |
@@ -1698,7 +2203,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
1698 | { | 2203 | { |
1699 | unsigned vector, me; | 2204 | unsigned vector, me; |
1700 | ack_APIC_irq(); | 2205 | ack_APIC_irq(); |
2206 | #ifdef CONFIG_X86_64 | ||
1701 | exit_idle(); | 2207 | exit_idle(); |
2208 | #endif | ||
1702 | irq_enter(); | 2209 | irq_enter(); |
1703 | 2210 | ||
1704 | me = smp_processor_id(); | 2211 | me = smp_processor_id(); |
@@ -1707,11 +2214,12 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
1707 | struct irq_desc *desc; | 2214 | struct irq_desc *desc; |
1708 | struct irq_cfg *cfg; | 2215 | struct irq_cfg *cfg; |
1709 | irq = __get_cpu_var(vector_irq)[vector]; | 2216 | irq = __get_cpu_var(vector_irq)[vector]; |
1710 | if (irq >= NR_IRQS) | 2217 | |
2218 | desc = irq_to_desc(irq); | ||
2219 | if (!desc) | ||
1711 | continue; | 2220 | continue; |
1712 | 2221 | ||
1713 | desc = irq_desc + irq; | 2222 | cfg = irq_cfg(irq); |
1714 | cfg = irq_cfg + irq; | ||
1715 | spin_lock(&desc->lock); | 2223 | spin_lock(&desc->lock); |
1716 | if (!cfg->move_cleanup_count) | 2224 | if (!cfg->move_cleanup_count) |
1717 | goto unlock; | 2225 | goto unlock; |
@@ -1730,7 +2238,7 @@ unlock: | |||
1730 | 2238 | ||
1731 | static void irq_complete_move(unsigned int irq) | 2239 | static void irq_complete_move(unsigned int irq) |
1732 | { | 2240 | { |
1733 | struct irq_cfg *cfg = irq_cfg + irq; | 2241 | struct irq_cfg *cfg = irq_cfg(irq); |
1734 | unsigned vector, me; | 2242 | unsigned vector, me; |
1735 | 2243 | ||
1736 | if (likely(!cfg->move_in_progress)) | 2244 | if (likely(!cfg->move_in_progress)) |
@@ -1769,19 +2277,50 @@ static void ack_apic_edge(unsigned int irq) | |||
1769 | ack_APIC_irq(); | 2277 | ack_APIC_irq(); |
1770 | } | 2278 | } |
1771 | 2279 | ||
2280 | atomic_t irq_mis_count; | ||
2281 | |||
1772 | static void ack_apic_level(unsigned int irq) | 2282 | static void ack_apic_level(unsigned int irq) |
1773 | { | 2283 | { |
2284 | #ifdef CONFIG_X86_32 | ||
2285 | unsigned long v; | ||
2286 | int i; | ||
2287 | #endif | ||
1774 | int do_unmask_irq = 0; | 2288 | int do_unmask_irq = 0; |
1775 | 2289 | ||
1776 | irq_complete_move(irq); | 2290 | irq_complete_move(irq); |
1777 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 2291 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
1778 | /* If we are moving the irq we need to mask it */ | 2292 | /* If we are moving the irq we need to mask it */ |
1779 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { | 2293 | if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { |
1780 | do_unmask_irq = 1; | 2294 | do_unmask_irq = 1; |
1781 | mask_IO_APIC_irq(irq); | 2295 | mask_IO_APIC_irq(irq); |
1782 | } | 2296 | } |
1783 | #endif | 2297 | #endif |
1784 | 2298 | ||
2299 | #ifdef CONFIG_X86_32 | ||
2300 | /* | ||
2301 | * It appears there is an erratum which affects at least version 0x11 | ||
2302 | * of I/O APIC (that's the 82093AA and cores integrated into various | ||
2303 | * chipsets). Under certain conditions a level-triggered interrupt is | ||
2304 | * erroneously delivered as edge-triggered one but the respective IRR | ||
2305 | * bit gets set nevertheless. As a result the I/O unit expects an EOI | ||
2306 | * message but it will never arrive and further interrupts are blocked | ||
2307 | * from the source. The exact reason is so far unknown, but the | ||
2308 | * phenomenon was observed when two consecutive interrupt requests | ||
2309 | * from a given source get delivered to the same CPU and the source is | ||
2310 | * temporarily disabled in between. | ||
2311 | * | ||
2312 | * A workaround is to simulate an EOI message manually. We achieve it | ||
2313 | * by setting the trigger mode to edge and then to level when the edge | ||
2314 | * trigger mode gets detected in the TMR of a local APIC for a | ||
2315 | * level-triggered interrupt. We mask the source for the time of the | ||
2316 | * operation to prevent an edge-triggered interrupt escaping meanwhile. | ||
2317 | * The idea is from Manfred Spraul. --macro | ||
2318 | */ | ||
2319 | i = irq_cfg(irq)->vector; | ||
2320 | |||
2321 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | ||
2322 | #endif | ||
2323 | |||
1785 | /* | 2324 | /* |
1786 | * We must acknowledge the irq before we move it or the acknowledge will | 2325 | * We must acknowledge the irq before we move it or the acknowledge will |
1787 | * not propagate properly. | 2326 | * not propagate properly. |
@@ -1820,31 +2359,41 @@ static void ack_apic_level(unsigned int irq) | |||
1820 | move_masked_irq(irq); | 2359 | move_masked_irq(irq); |
1821 | unmask_IO_APIC_irq(irq); | 2360 | unmask_IO_APIC_irq(irq); |
1822 | } | 2361 | } |
2362 | |||
2363 | #ifdef CONFIG_X86_32 | ||
2364 | if (!(v & (1 << (i & 0x1f)))) { | ||
2365 | atomic_inc(&irq_mis_count); | ||
2366 | spin_lock(&ioapic_lock); | ||
2367 | __mask_and_edge_IO_APIC_irq(irq); | ||
2368 | __unmask_and_level_IO_APIC_irq(irq); | ||
2369 | spin_unlock(&ioapic_lock); | ||
2370 | } | ||
2371 | #endif | ||
1823 | } | 2372 | } |
1824 | 2373 | ||
1825 | static struct irq_chip ioapic_chip __read_mostly = { | 2374 | static struct irq_chip ioapic_chip __read_mostly = { |
1826 | .name = "IO-APIC", | 2375 | .name = "IO-APIC", |
1827 | .startup = startup_ioapic_irq, | 2376 | .startup = startup_ioapic_irq, |
1828 | .mask = mask_IO_APIC_irq, | 2377 | .mask = mask_IO_APIC_irq, |
1829 | .unmask = unmask_IO_APIC_irq, | 2378 | .unmask = unmask_IO_APIC_irq, |
1830 | .ack = ack_apic_edge, | 2379 | .ack = ack_apic_edge, |
1831 | .eoi = ack_apic_level, | 2380 | .eoi = ack_apic_level, |
1832 | #ifdef CONFIG_SMP | 2381 | #ifdef CONFIG_SMP |
1833 | .set_affinity = set_ioapic_affinity_irq, | 2382 | .set_affinity = set_ioapic_affinity_irq, |
1834 | #endif | 2383 | #endif |
1835 | .retrigger = ioapic_retrigger_irq, | 2384 | .retrigger = ioapic_retrigger_irq, |
1836 | }; | 2385 | }; |
1837 | 2386 | ||
1838 | #ifdef CONFIG_INTR_REMAP | 2387 | #ifdef CONFIG_INTR_REMAP |
1839 | static struct irq_chip ir_ioapic_chip __read_mostly = { | 2388 | static struct irq_chip ir_ioapic_chip __read_mostly = { |
1840 | .name = "IR-IO-APIC", | 2389 | .name = "IR-IO-APIC", |
1841 | .startup = startup_ioapic_irq, | 2390 | .startup = startup_ioapic_irq, |
1842 | .mask = mask_IO_APIC_irq, | 2391 | .mask = mask_IO_APIC_irq, |
1843 | .unmask = unmask_IO_APIC_irq, | 2392 | .unmask = unmask_IO_APIC_irq, |
1844 | .ack = ack_x2apic_edge, | 2393 | .ack = ack_x2apic_edge, |
1845 | .eoi = ack_x2apic_level, | 2394 | .eoi = ack_x2apic_level, |
1846 | #ifdef CONFIG_SMP | 2395 | #ifdef CONFIG_SMP |
1847 | .set_affinity = set_ir_ioapic_affinity_irq, | 2396 | .set_affinity = set_ir_ioapic_affinity_irq, |
1848 | #endif | 2397 | #endif |
1849 | .retrigger = ioapic_retrigger_irq, | 2398 | .retrigger = ioapic_retrigger_irq, |
1850 | }; | 2399 | }; |
@@ -1853,6 +2402,8 @@ static struct irq_chip ir_ioapic_chip __read_mostly = { | |||
1853 | static inline void init_IO_APIC_traps(void) | 2402 | static inline void init_IO_APIC_traps(void) |
1854 | { | 2403 | { |
1855 | int irq; | 2404 | int irq; |
2405 | struct irq_desc *desc; | ||
2406 | struct irq_cfg *cfg; | ||
1856 | 2407 | ||
1857 | /* | 2408 | /* |
1858 | * NOTE! The local APIC isn't very good at handling | 2409 | * NOTE! The local APIC isn't very good at handling |
@@ -1865,8 +2416,8 @@ static inline void init_IO_APIC_traps(void) | |||
1865 | * Also, we've got to be careful not to trash gate | 2416 | * Also, we've got to be careful not to trash gate |
1866 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2417 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
1867 | */ | 2418 | */ |
1868 | for (irq = 0; irq < NR_IRQS ; irq++) { | 2419 | for_each_irq_cfg(irq, cfg) { |
1869 | if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) { | 2420 | if (IO_APIC_IRQ(irq) && !cfg->vector) { |
1870 | /* | 2421 | /* |
1871 | * Hmm.. We don't have an entry for this, | 2422 | * Hmm.. We don't have an entry for this, |
1872 | * so default to an old-fashioned 8259 | 2423 | * so default to an old-fashioned 8259 |
@@ -1874,27 +2425,33 @@ static inline void init_IO_APIC_traps(void) | |||
1874 | */ | 2425 | */ |
1875 | if (irq < 16) | 2426 | if (irq < 16) |
1876 | make_8259A_irq(irq); | 2427 | make_8259A_irq(irq); |
1877 | else | 2428 | else { |
2429 | desc = irq_to_desc(irq); | ||
1878 | /* Strange. Oh, well.. */ | 2430 | /* Strange. Oh, well.. */ |
1879 | irq_desc[irq].chip = &no_irq_chip; | 2431 | desc->chip = &no_irq_chip; |
2432 | } | ||
1880 | } | 2433 | } |
1881 | } | 2434 | } |
1882 | } | 2435 | } |
1883 | 2436 | ||
1884 | static void unmask_lapic_irq(unsigned int irq) | 2437 | /* |
2438 | * The local APIC irq-chip implementation: | ||
2439 | */ | ||
2440 | |||
2441 | static void mask_lapic_irq(unsigned int irq) | ||
1885 | { | 2442 | { |
1886 | unsigned long v; | 2443 | unsigned long v; |
1887 | 2444 | ||
1888 | v = apic_read(APIC_LVT0); | 2445 | v = apic_read(APIC_LVT0); |
1889 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); | 2446 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); |
1890 | } | 2447 | } |
1891 | 2448 | ||
1892 | static void mask_lapic_irq(unsigned int irq) | 2449 | static void unmask_lapic_irq(unsigned int irq) |
1893 | { | 2450 | { |
1894 | unsigned long v; | 2451 | unsigned long v; |
1895 | 2452 | ||
1896 | v = apic_read(APIC_LVT0); | 2453 | v = apic_read(APIC_LVT0); |
1897 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); | 2454 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); |
1898 | } | 2455 | } |
1899 | 2456 | ||
1900 | static void ack_lapic_irq (unsigned int irq) | 2457 | static void ack_lapic_irq (unsigned int irq) |
@@ -1911,7 +2468,10 @@ static struct irq_chip lapic_chip __read_mostly = { | |||
1911 | 2468 | ||
1912 | static void lapic_register_intr(int irq) | 2469 | static void lapic_register_intr(int irq) |
1913 | { | 2470 | { |
1914 | irq_desc[irq].status &= ~IRQ_LEVEL; | 2471 | struct irq_desc *desc; |
2472 | |||
2473 | desc = irq_to_desc(irq); | ||
2474 | desc->status &= ~IRQ_LEVEL; | ||
1915 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2475 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
1916 | "edge"); | 2476 | "edge"); |
1917 | } | 2477 | } |
@@ -1919,19 +2479,19 @@ static void lapic_register_intr(int irq) | |||
1919 | static void __init setup_nmi(void) | 2479 | static void __init setup_nmi(void) |
1920 | { | 2480 | { |
1921 | /* | 2481 | /* |
1922 | * Dirty trick to enable the NMI watchdog ... | 2482 | * Dirty trick to enable the NMI watchdog ... |
1923 | * We put the 8259A master into AEOI mode and | 2483 | * We put the 8259A master into AEOI mode and |
1924 | * unmask on all local APICs LVT0 as NMI. | 2484 | * unmask on all local APICs LVT0 as NMI. |
1925 | * | 2485 | * |
1926 | * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') | 2486 | * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') |
1927 | * is from Maciej W. Rozycki - so we do not have to EOI from | 2487 | * is from Maciej W. Rozycki - so we do not have to EOI from |
1928 | * the NMI handler or the timer interrupt. | 2488 | * the NMI handler or the timer interrupt. |
1929 | */ | 2489 | */ |
1930 | printk(KERN_INFO "activating NMI Watchdog ..."); | 2490 | apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); |
1931 | 2491 | ||
1932 | enable_NMI_through_LVT0(); | 2492 | enable_NMI_through_LVT0(); |
1933 | 2493 | ||
1934 | printk(" done.\n"); | 2494 | apic_printk(APIC_VERBOSE, " done.\n"); |
1935 | } | 2495 | } |
1936 | 2496 | ||
1937 | /* | 2497 | /* |
@@ -1948,12 +2508,17 @@ static inline void __init unlock_ExtINT_logic(void) | |||
1948 | unsigned char save_control, save_freq_select; | 2508 | unsigned char save_control, save_freq_select; |
1949 | 2509 | ||
1950 | pin = find_isa_irq_pin(8, mp_INT); | 2510 | pin = find_isa_irq_pin(8, mp_INT); |
2511 | if (pin == -1) { | ||
2512 | WARN_ON_ONCE(1); | ||
2513 | return; | ||
2514 | } | ||
1951 | apic = find_isa_irq_apic(8, mp_INT); | 2515 | apic = find_isa_irq_apic(8, mp_INT); |
1952 | if (pin == -1) | 2516 | if (apic == -1) { |
2517 | WARN_ON_ONCE(1); | ||
1953 | return; | 2518 | return; |
2519 | } | ||
1954 | 2520 | ||
1955 | entry0 = ioapic_read_entry(apic, pin); | 2521 | entry0 = ioapic_read_entry(apic, pin); |
1956 | |||
1957 | clear_IO_APIC_pin(apic, pin); | 2522 | clear_IO_APIC_pin(apic, pin); |
1958 | 2523 | ||
1959 | memset(&entry1, 0, sizeof(entry1)); | 2524 | memset(&entry1, 0, sizeof(entry1)); |
@@ -1988,23 +2553,38 @@ static inline void __init unlock_ExtINT_logic(void) | |||
1988 | ioapic_write_entry(apic, pin, entry0); | 2553 | ioapic_write_entry(apic, pin, entry0); |
1989 | } | 2554 | } |
1990 | 2555 | ||
2556 | static int disable_timer_pin_1 __initdata; | ||
2557 | /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ | ||
2558 | static int __init disable_timer_pin_setup(char *arg) | ||
2559 | { | ||
2560 | disable_timer_pin_1 = 1; | ||
2561 | return 0; | ||
2562 | } | ||
2563 | early_param("disable_timer_pin_1", disable_timer_pin_setup); | ||
2564 | |||
2565 | int timer_through_8259 __initdata; | ||
2566 | |||
1991 | /* | 2567 | /* |
1992 | * This code may look a bit paranoid, but it's supposed to cooperate with | 2568 | * This code may look a bit paranoid, but it's supposed to cooperate with |
1993 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ | 2569 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ |
1994 | * is so screwy. Thanks to Brian Perkins for testing/hacking this beast | 2570 | * is so screwy. Thanks to Brian Perkins for testing/hacking this beast |
1995 | * fanatically on his truly buggy board. | 2571 | * fanatically on his truly buggy board. |
1996 | * | 2572 | * |
1997 | * FIXME: really need to revamp this for modern platforms only. | 2573 | * FIXME: really need to revamp this for all platforms. |
1998 | */ | 2574 | */ |
1999 | static inline void __init check_timer(void) | 2575 | static inline void __init check_timer(void) |
2000 | { | 2576 | { |
2001 | struct irq_cfg *cfg = irq_cfg + 0; | 2577 | struct irq_cfg *cfg = irq_cfg(0); |
2002 | int apic1, pin1, apic2, pin2; | 2578 | int apic1, pin1, apic2, pin2; |
2003 | unsigned long flags; | 2579 | unsigned long flags; |
2580 | unsigned int ver; | ||
2004 | int no_pin1 = 0; | 2581 | int no_pin1 = 0; |
2005 | 2582 | ||
2006 | local_irq_save(flags); | 2583 | local_irq_save(flags); |
2007 | 2584 | ||
2585 | ver = apic_read(APIC_LVR); | ||
2586 | ver = GET_APIC_VERSION(ver); | ||
2587 | |||
2008 | /* | 2588 | /* |
2009 | * get/set the timer IRQ vector: | 2589 | * get/set the timer IRQ vector: |
2010 | */ | 2590 | */ |
@@ -2013,10 +2593,18 @@ static inline void __init check_timer(void) | |||
2013 | 2593 | ||
2014 | /* | 2594 | /* |
2015 | * As IRQ0 is to be enabled in the 8259A, the virtual | 2595 | * As IRQ0 is to be enabled in the 8259A, the virtual |
2016 | * wire has to be disabled in the local APIC. | 2596 | * wire has to be disabled in the local APIC. Also |
2597 | * timer interrupts need to be acknowledged manually in | ||
2598 | * the 8259A for the i82489DX when using the NMI | ||
2599 | * watchdog as that APIC treats NMIs as level-triggered. | ||
2600 | * The AEOI mode will finish them in the 8259A | ||
2601 | * automatically. | ||
2017 | */ | 2602 | */ |
2018 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | 2603 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
2019 | init_8259A(1); | 2604 | init_8259A(1); |
2605 | #ifdef CONFIG_X86_32 | ||
2606 | timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); | ||
2607 | #endif | ||
2020 | 2608 | ||
2021 | pin1 = find_isa_irq_pin(0, mp_INT); | 2609 | pin1 = find_isa_irq_pin(0, mp_INT); |
2022 | apic1 = find_isa_irq_apic(0, mp_INT); | 2610 | apic1 = find_isa_irq_apic(0, mp_INT); |
@@ -2035,8 +2623,10 @@ static inline void __init check_timer(void) | |||
2035 | * 8259A. | 2623 | * 8259A. |
2036 | */ | 2624 | */ |
2037 | if (pin1 == -1) { | 2625 | if (pin1 == -1) { |
2626 | #ifdef CONFIG_INTR_REMAP | ||
2038 | if (intr_remapping_enabled) | 2627 | if (intr_remapping_enabled) |
2039 | panic("BIOS bug: timer not connected to IO-APIC"); | 2628 | panic("BIOS bug: timer not connected to IO-APIC"); |
2629 | #endif | ||
2040 | pin1 = pin2; | 2630 | pin1 = pin2; |
2041 | apic1 = apic2; | 2631 | apic1 = apic2; |
2042 | no_pin1 = 1; | 2632 | no_pin1 = 1; |
@@ -2054,7 +2644,7 @@ static inline void __init check_timer(void) | |||
2054 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); | 2644 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); |
2055 | } | 2645 | } |
2056 | unmask_IO_APIC_irq(0); | 2646 | unmask_IO_APIC_irq(0); |
2057 | if (!no_timer_check && timer_irq_works()) { | 2647 | if (timer_irq_works()) { |
2058 | if (nmi_watchdog == NMI_IO_APIC) { | 2648 | if (nmi_watchdog == NMI_IO_APIC) { |
2059 | setup_nmi(); | 2649 | setup_nmi(); |
2060 | enable_8259A_irq(0); | 2650 | enable_8259A_irq(0); |
@@ -2063,8 +2653,10 @@ static inline void __init check_timer(void) | |||
2063 | clear_IO_APIC_pin(0, pin1); | 2653 | clear_IO_APIC_pin(0, pin1); |
2064 | goto out; | 2654 | goto out; |
2065 | } | 2655 | } |
2656 | #ifdef CONFIG_INTR_REMAP | ||
2066 | if (intr_remapping_enabled) | 2657 | if (intr_remapping_enabled) |
2067 | panic("timer doesn't work through Interrupt-remapped IO-APIC"); | 2658 | panic("timer doesn't work through Interrupt-remapped IO-APIC"); |
2659 | #endif | ||
2068 | clear_IO_APIC_pin(apic1, pin1); | 2660 | clear_IO_APIC_pin(apic1, pin1); |
2069 | if (!no_pin1) | 2661 | if (!no_pin1) |
2070 | apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " | 2662 | apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " |
@@ -2104,6 +2696,9 @@ static inline void __init check_timer(void) | |||
2104 | "through the IO-APIC - disabling NMI Watchdog!\n"); | 2696 | "through the IO-APIC - disabling NMI Watchdog!\n"); |
2105 | nmi_watchdog = NMI_NONE; | 2697 | nmi_watchdog = NMI_NONE; |
2106 | } | 2698 | } |
2699 | #ifdef CONFIG_X86_32 | ||
2700 | timer_ack = 0; | ||
2701 | #endif | ||
2107 | 2702 | ||
2108 | apic_printk(APIC_QUIET, KERN_INFO | 2703 | apic_printk(APIC_QUIET, KERN_INFO |
2109 | "...trying to set up timer as Virtual Wire IRQ...\n"); | 2704 | "...trying to set up timer as Virtual Wire IRQ...\n"); |
@@ -2140,13 +2735,6 @@ out: | |||
2140 | local_irq_restore(flags); | 2735 | local_irq_restore(flags); |
2141 | } | 2736 | } |
2142 | 2737 | ||
2143 | static int __init notimercheck(char *s) | ||
2144 | { | ||
2145 | no_timer_check = 1; | ||
2146 | return 1; | ||
2147 | } | ||
2148 | __setup("no_timer_check", notimercheck); | ||
2149 | |||
2150 | /* | 2738 | /* |
2151 | * Traditionally ISA IRQ2 is the cascade IRQ, and is not available | 2739 | * Traditionally ISA IRQ2 is the cascade IRQ, and is not available |
2152 | * to devices. However there may be an I/O APIC pin available for | 2740 | * to devices. However there may be an I/O APIC pin available for |
@@ -2164,25 +2752,49 @@ __setup("no_timer_check", notimercheck); | |||
2164 | * the I/O APIC in all cases now. No actual device should request | 2752 | * the I/O APIC in all cases now. No actual device should request |
2165 | * it anyway. --macro | 2753 | * it anyway. --macro |
2166 | */ | 2754 | */ |
2167 | #define PIC_IRQS (1<<2) | 2755 | #define PIC_IRQS (1 << PIC_CASCADE_IR) |
2168 | 2756 | ||
2169 | void __init setup_IO_APIC(void) | 2757 | void __init setup_IO_APIC(void) |
2170 | { | 2758 | { |
2171 | 2759 | ||
2760 | #ifdef CONFIG_X86_32 | ||
2761 | enable_IO_APIC(); | ||
2762 | #else | ||
2172 | /* | 2763 | /* |
2173 | * calling enable_IO_APIC() is moved to setup_local_APIC for BP | 2764 | * calling enable_IO_APIC() is moved to setup_local_APIC for BP |
2174 | */ | 2765 | */ |
2766 | #endif | ||
2175 | 2767 | ||
2176 | io_apic_irqs = ~PIC_IRQS; | 2768 | io_apic_irqs = ~PIC_IRQS; |
2177 | 2769 | ||
2178 | apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); | 2770 | apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); |
2179 | 2771 | /* | |
2772 | * Set up IO-APIC IRQ routing. | ||
2773 | */ | ||
2774 | #ifdef CONFIG_X86_32 | ||
2775 | if (!acpi_ioapic) | ||
2776 | setup_ioapic_ids_from_mpc(); | ||
2777 | #endif | ||
2180 | sync_Arb_IDs(); | 2778 | sync_Arb_IDs(); |
2181 | setup_IO_APIC_irqs(); | 2779 | setup_IO_APIC_irqs(); |
2182 | init_IO_APIC_traps(); | 2780 | init_IO_APIC_traps(); |
2183 | check_timer(); | 2781 | check_timer(); |
2184 | } | 2782 | } |
2185 | 2783 | ||
2784 | /* | ||
2785 | * Called after all the initialization is done. If we didnt find any | ||
2786 | * APIC bugs then we can allow the modify fast path | ||
2787 | */ | ||
2788 | |||
2789 | static int __init io_apic_bug_finalize(void) | ||
2790 | { | ||
2791 | if (sis_apic_bug == -1) | ||
2792 | sis_apic_bug = 0; | ||
2793 | return 0; | ||
2794 | } | ||
2795 | |||
2796 | late_initcall(io_apic_bug_finalize); | ||
2797 | |||
2186 | struct sysfs_ioapic_data { | 2798 | struct sysfs_ioapic_data { |
2187 | struct sys_device dev; | 2799 | struct sys_device dev; |
2188 | struct IO_APIC_route_entry entry[0]; | 2800 | struct IO_APIC_route_entry entry[0]; |
@@ -2270,32 +2882,51 @@ device_initcall(ioapic_init_sysfs); | |||
2270 | /* | 2882 | /* |
2271 | * Dynamic irq allocate and deallocation | 2883 | * Dynamic irq allocate and deallocation |
2272 | */ | 2884 | */ |
2273 | int create_irq(void) | 2885 | unsigned int create_irq_nr(unsigned int irq_want) |
2274 | { | 2886 | { |
2275 | /* Allocate an unused irq */ | 2887 | /* Allocate an unused irq */ |
2276 | int irq; | 2888 | unsigned int irq; |
2277 | int new; | 2889 | unsigned int new; |
2278 | unsigned long flags; | 2890 | unsigned long flags; |
2891 | struct irq_cfg *cfg_new; | ||
2892 | |||
2893 | irq_want = nr_irqs - 1; | ||
2279 | 2894 | ||
2280 | irq = -ENOSPC; | 2895 | irq = 0; |
2281 | spin_lock_irqsave(&vector_lock, flags); | 2896 | spin_lock_irqsave(&vector_lock, flags); |
2282 | for (new = (NR_IRQS - 1); new >= 0; new--) { | 2897 | for (new = irq_want; new > 0; new--) { |
2283 | if (platform_legacy_irq(new)) | 2898 | if (platform_legacy_irq(new)) |
2284 | continue; | 2899 | continue; |
2285 | if (irq_cfg[new].vector != 0) | 2900 | cfg_new = irq_cfg(new); |
2901 | if (cfg_new && cfg_new->vector != 0) | ||
2286 | continue; | 2902 | continue; |
2903 | /* check if need to create one */ | ||
2904 | if (!cfg_new) | ||
2905 | cfg_new = irq_cfg_alloc(new); | ||
2287 | if (__assign_irq_vector(new, TARGET_CPUS) == 0) | 2906 | if (__assign_irq_vector(new, TARGET_CPUS) == 0) |
2288 | irq = new; | 2907 | irq = new; |
2289 | break; | 2908 | break; |
2290 | } | 2909 | } |
2291 | spin_unlock_irqrestore(&vector_lock, flags); | 2910 | spin_unlock_irqrestore(&vector_lock, flags); |
2292 | 2911 | ||
2293 | if (irq >= 0) { | 2912 | if (irq > 0) { |
2294 | dynamic_irq_init(irq); | 2913 | dynamic_irq_init(irq); |
2295 | } | 2914 | } |
2296 | return irq; | 2915 | return irq; |
2297 | } | 2916 | } |
2298 | 2917 | ||
2918 | int create_irq(void) | ||
2919 | { | ||
2920 | int irq; | ||
2921 | |||
2922 | irq = create_irq_nr(nr_irqs - 1); | ||
2923 | |||
2924 | if (irq == 0) | ||
2925 | irq = -1; | ||
2926 | |||
2927 | return irq; | ||
2928 | } | ||
2929 | |||
2299 | void destroy_irq(unsigned int irq) | 2930 | void destroy_irq(unsigned int irq) |
2300 | { | 2931 | { |
2301 | unsigned long flags; | 2932 | unsigned long flags; |
@@ -2316,7 +2947,7 @@ void destroy_irq(unsigned int irq) | |||
2316 | #ifdef CONFIG_PCI_MSI | 2947 | #ifdef CONFIG_PCI_MSI |
2317 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | 2948 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) |
2318 | { | 2949 | { |
2319 | struct irq_cfg *cfg = irq_cfg + irq; | 2950 | struct irq_cfg *cfg; |
2320 | int err; | 2951 | int err; |
2321 | unsigned dest; | 2952 | unsigned dest; |
2322 | cpumask_t tmp; | 2953 | cpumask_t tmp; |
@@ -2326,6 +2957,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
2326 | if (err) | 2957 | if (err) |
2327 | return err; | 2958 | return err; |
2328 | 2959 | ||
2960 | cfg = irq_cfg(irq); | ||
2329 | cpus_and(tmp, cfg->domain, tmp); | 2961 | cpus_and(tmp, cfg->domain, tmp); |
2330 | dest = cpu_mask_to_apicid(tmp); | 2962 | dest = cpu_mask_to_apicid(tmp); |
2331 | 2963 | ||
@@ -2383,10 +3015,11 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
2383 | #ifdef CONFIG_SMP | 3015 | #ifdef CONFIG_SMP |
2384 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3016 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) |
2385 | { | 3017 | { |
2386 | struct irq_cfg *cfg = irq_cfg + irq; | 3018 | struct irq_cfg *cfg; |
2387 | struct msi_msg msg; | 3019 | struct msi_msg msg; |
2388 | unsigned int dest; | 3020 | unsigned int dest; |
2389 | cpumask_t tmp; | 3021 | cpumask_t tmp; |
3022 | struct irq_desc *desc; | ||
2390 | 3023 | ||
2391 | cpus_and(tmp, mask, cpu_online_map); | 3024 | cpus_and(tmp, mask, cpu_online_map); |
2392 | if (cpus_empty(tmp)) | 3025 | if (cpus_empty(tmp)) |
@@ -2395,6 +3028,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2395 | if (assign_irq_vector(irq, mask)) | 3028 | if (assign_irq_vector(irq, mask)) |
2396 | return; | 3029 | return; |
2397 | 3030 | ||
3031 | cfg = irq_cfg(irq); | ||
2398 | cpus_and(tmp, cfg->domain, mask); | 3032 | cpus_and(tmp, cfg->domain, mask); |
2399 | dest = cpu_mask_to_apicid(tmp); | 3033 | dest = cpu_mask_to_apicid(tmp); |
2400 | 3034 | ||
@@ -2406,7 +3040,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2406 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3040 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
2407 | 3041 | ||
2408 | write_msi_msg(irq, &msg); | 3042 | write_msi_msg(irq, &msg); |
2409 | irq_desc[irq].affinity = mask; | 3043 | desc = irq_to_desc(irq); |
3044 | desc->affinity = mask; | ||
2410 | } | 3045 | } |
2411 | 3046 | ||
2412 | #ifdef CONFIG_INTR_REMAP | 3047 | #ifdef CONFIG_INTR_REMAP |
@@ -2416,10 +3051,11 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2416 | */ | 3051 | */ |
2417 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3052 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) |
2418 | { | 3053 | { |
2419 | struct irq_cfg *cfg = irq_cfg + irq; | 3054 | struct irq_cfg *cfg; |
2420 | unsigned int dest; | 3055 | unsigned int dest; |
2421 | cpumask_t tmp, cleanup_mask; | 3056 | cpumask_t tmp, cleanup_mask; |
2422 | struct irte irte; | 3057 | struct irte irte; |
3058 | struct irq_desc *desc; | ||
2423 | 3059 | ||
2424 | cpus_and(tmp, mask, cpu_online_map); | 3060 | cpus_and(tmp, mask, cpu_online_map); |
2425 | if (cpus_empty(tmp)) | 3061 | if (cpus_empty(tmp)) |
@@ -2431,6 +3067,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2431 | if (assign_irq_vector(irq, mask)) | 3067 | if (assign_irq_vector(irq, mask)) |
2432 | return; | 3068 | return; |
2433 | 3069 | ||
3070 | cfg = irq_cfg(irq); | ||
2434 | cpus_and(tmp, cfg->domain, mask); | 3071 | cpus_and(tmp, cfg->domain, mask); |
2435 | dest = cpu_mask_to_apicid(tmp); | 3072 | dest = cpu_mask_to_apicid(tmp); |
2436 | 3073 | ||
@@ -2454,7 +3091,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2454 | cfg->move_in_progress = 0; | 3091 | cfg->move_in_progress = 0; |
2455 | } | 3092 | } |
2456 | 3093 | ||
2457 | irq_desc[irq].affinity = mask; | 3094 | desc = irq_to_desc(irq); |
3095 | desc->affinity = mask; | ||
2458 | } | 3096 | } |
2459 | #endif | 3097 | #endif |
2460 | #endif /* CONFIG_SMP */ | 3098 | #endif /* CONFIG_SMP */ |
@@ -2507,7 +3145,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | |||
2507 | if (index < 0) { | 3145 | if (index < 0) { |
2508 | printk(KERN_ERR | 3146 | printk(KERN_ERR |
2509 | "Unable to allocate %d IRTE for PCI %s\n", nvec, | 3147 | "Unable to allocate %d IRTE for PCI %s\n", nvec, |
2510 | pci_name(dev)); | 3148 | pci_name(dev)); |
2511 | return -ENOSPC; | 3149 | return -ENOSPC; |
2512 | } | 3150 | } |
2513 | return index; | 3151 | return index; |
@@ -2528,7 +3166,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) | |||
2528 | 3166 | ||
2529 | #ifdef CONFIG_INTR_REMAP | 3167 | #ifdef CONFIG_INTR_REMAP |
2530 | if (irq_remapped(irq)) { | 3168 | if (irq_remapped(irq)) { |
2531 | struct irq_desc *desc = irq_desc + irq; | 3169 | struct irq_desc *desc = irq_to_desc(irq); |
2532 | /* | 3170 | /* |
2533 | * irq migration in process context | 3171 | * irq migration in process context |
2534 | */ | 3172 | */ |
@@ -2538,16 +3176,34 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) | |||
2538 | #endif | 3176 | #endif |
2539 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 3177 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); |
2540 | 3178 | ||
3179 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); | ||
3180 | |||
2541 | return 0; | 3181 | return 0; |
2542 | } | 3182 | } |
2543 | 3183 | ||
3184 | static unsigned int build_irq_for_pci_dev(struct pci_dev *dev) | ||
3185 | { | ||
3186 | unsigned int irq; | ||
3187 | |||
3188 | irq = dev->bus->number; | ||
3189 | irq <<= 8; | ||
3190 | irq |= dev->devfn; | ||
3191 | irq <<= 12; | ||
3192 | |||
3193 | return irq; | ||
3194 | } | ||
3195 | |||
2544 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | 3196 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) |
2545 | { | 3197 | { |
2546 | int irq, ret; | 3198 | unsigned int irq; |
3199 | int ret; | ||
3200 | unsigned int irq_want; | ||
2547 | 3201 | ||
2548 | irq = create_irq(); | 3202 | irq_want = build_irq_for_pci_dev(dev) + 0x100; |
2549 | if (irq < 0) | 3203 | |
2550 | return irq; | 3204 | irq = create_irq_nr(irq_want); |
3205 | if (irq == 0) | ||
3206 | return -1; | ||
2551 | 3207 | ||
2552 | #ifdef CONFIG_INTR_REMAP | 3208 | #ifdef CONFIG_INTR_REMAP |
2553 | if (!intr_remapping_enabled) | 3209 | if (!intr_remapping_enabled) |
@@ -2574,18 +3230,22 @@ error: | |||
2574 | 3230 | ||
2575 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 3231 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
2576 | { | 3232 | { |
2577 | int irq, ret, sub_handle; | 3233 | unsigned int irq; |
3234 | int ret, sub_handle; | ||
2578 | struct msi_desc *desc; | 3235 | struct msi_desc *desc; |
3236 | unsigned int irq_want; | ||
3237 | |||
2579 | #ifdef CONFIG_INTR_REMAP | 3238 | #ifdef CONFIG_INTR_REMAP |
2580 | struct intel_iommu *iommu = 0; | 3239 | struct intel_iommu *iommu = 0; |
2581 | int index = 0; | 3240 | int index = 0; |
2582 | #endif | 3241 | #endif |
2583 | 3242 | ||
3243 | irq_want = build_irq_for_pci_dev(dev) + 0x100; | ||
2584 | sub_handle = 0; | 3244 | sub_handle = 0; |
2585 | list_for_each_entry(desc, &dev->msi_list, list) { | 3245 | list_for_each_entry(desc, &dev->msi_list, list) { |
2586 | irq = create_irq(); | 3246 | irq = create_irq_nr(irq_want--); |
2587 | if (irq < 0) | 3247 | if (irq == 0) |
2588 | return irq; | 3248 | return -1; |
2589 | #ifdef CONFIG_INTR_REMAP | 3249 | #ifdef CONFIG_INTR_REMAP |
2590 | if (!intr_remapping_enabled) | 3250 | if (!intr_remapping_enabled) |
2591 | goto no_ir; | 3251 | goto no_ir; |
@@ -2636,10 +3296,11 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
2636 | #ifdef CONFIG_SMP | 3296 | #ifdef CONFIG_SMP |
2637 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3297 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) |
2638 | { | 3298 | { |
2639 | struct irq_cfg *cfg = irq_cfg + irq; | 3299 | struct irq_cfg *cfg; |
2640 | struct msi_msg msg; | 3300 | struct msi_msg msg; |
2641 | unsigned int dest; | 3301 | unsigned int dest; |
2642 | cpumask_t tmp; | 3302 | cpumask_t tmp; |
3303 | struct irq_desc *desc; | ||
2643 | 3304 | ||
2644 | cpus_and(tmp, mask, cpu_online_map); | 3305 | cpus_and(tmp, mask, cpu_online_map); |
2645 | if (cpus_empty(tmp)) | 3306 | if (cpus_empty(tmp)) |
@@ -2648,6 +3309,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
2648 | if (assign_irq_vector(irq, mask)) | 3309 | if (assign_irq_vector(irq, mask)) |
2649 | return; | 3310 | return; |
2650 | 3311 | ||
3312 | cfg = irq_cfg(irq); | ||
2651 | cpus_and(tmp, cfg->domain, mask); | 3313 | cpus_and(tmp, cfg->domain, mask); |
2652 | dest = cpu_mask_to_apicid(tmp); | 3314 | dest = cpu_mask_to_apicid(tmp); |
2653 | 3315 | ||
@@ -2659,7 +3321,8 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
2659 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3321 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
2660 | 3322 | ||
2661 | dmar_msi_write(irq, &msg); | 3323 | dmar_msi_write(irq, &msg); |
2662 | irq_desc[irq].affinity = mask; | 3324 | desc = irq_to_desc(irq); |
3325 | desc->affinity = mask; | ||
2663 | } | 3326 | } |
2664 | #endif /* CONFIG_SMP */ | 3327 | #endif /* CONFIG_SMP */ |
2665 | 3328 | ||
@@ -2689,6 +3352,69 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
2689 | } | 3352 | } |
2690 | #endif | 3353 | #endif |
2691 | 3354 | ||
3355 | #ifdef CONFIG_HPET_TIMER | ||
3356 | |||
3357 | #ifdef CONFIG_SMP | ||
3358 | static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | ||
3359 | { | ||
3360 | struct irq_cfg *cfg; | ||
3361 | struct irq_desc *desc; | ||
3362 | struct msi_msg msg; | ||
3363 | unsigned int dest; | ||
3364 | cpumask_t tmp; | ||
3365 | |||
3366 | cpus_and(tmp, mask, cpu_online_map); | ||
3367 | if (cpus_empty(tmp)) | ||
3368 | return; | ||
3369 | |||
3370 | if (assign_irq_vector(irq, mask)) | ||
3371 | return; | ||
3372 | |||
3373 | cfg = irq_cfg(irq); | ||
3374 | cpus_and(tmp, cfg->domain, mask); | ||
3375 | dest = cpu_mask_to_apicid(tmp); | ||
3376 | |||
3377 | hpet_msi_read(irq, &msg); | ||
3378 | |||
3379 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
3380 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | ||
3381 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
3382 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
3383 | |||
3384 | hpet_msi_write(irq, &msg); | ||
3385 | desc = irq_to_desc(irq); | ||
3386 | desc->affinity = mask; | ||
3387 | } | ||
3388 | #endif /* CONFIG_SMP */ | ||
3389 | |||
3390 | struct irq_chip hpet_msi_type = { | ||
3391 | .name = "HPET_MSI", | ||
3392 | .unmask = hpet_msi_unmask, | ||
3393 | .mask = hpet_msi_mask, | ||
3394 | .ack = ack_apic_edge, | ||
3395 | #ifdef CONFIG_SMP | ||
3396 | .set_affinity = hpet_msi_set_affinity, | ||
3397 | #endif | ||
3398 | .retrigger = ioapic_retrigger_irq, | ||
3399 | }; | ||
3400 | |||
3401 | int arch_setup_hpet_msi(unsigned int irq) | ||
3402 | { | ||
3403 | int ret; | ||
3404 | struct msi_msg msg; | ||
3405 | |||
3406 | ret = msi_compose_msg(NULL, irq, &msg); | ||
3407 | if (ret < 0) | ||
3408 | return ret; | ||
3409 | |||
3410 | hpet_msi_write(irq, &msg); | ||
3411 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, | ||
3412 | "edge"); | ||
3413 | |||
3414 | return 0; | ||
3415 | } | ||
3416 | #endif | ||
3417 | |||
2692 | #endif /* CONFIG_PCI_MSI */ | 3418 | #endif /* CONFIG_PCI_MSI */ |
2693 | /* | 3419 | /* |
2694 | * Hypertransport interrupt support | 3420 | * Hypertransport interrupt support |
@@ -2713,9 +3439,10 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
2713 | 3439 | ||
2714 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 3440 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) |
2715 | { | 3441 | { |
2716 | struct irq_cfg *cfg = irq_cfg + irq; | 3442 | struct irq_cfg *cfg; |
2717 | unsigned int dest; | 3443 | unsigned int dest; |
2718 | cpumask_t tmp; | 3444 | cpumask_t tmp; |
3445 | struct irq_desc *desc; | ||
2719 | 3446 | ||
2720 | cpus_and(tmp, mask, cpu_online_map); | 3447 | cpus_and(tmp, mask, cpu_online_map); |
2721 | if (cpus_empty(tmp)) | 3448 | if (cpus_empty(tmp)) |
@@ -2724,11 +3451,13 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2724 | if (assign_irq_vector(irq, mask)) | 3451 | if (assign_irq_vector(irq, mask)) |
2725 | return; | 3452 | return; |
2726 | 3453 | ||
3454 | cfg = irq_cfg(irq); | ||
2727 | cpus_and(tmp, cfg->domain, mask); | 3455 | cpus_and(tmp, cfg->domain, mask); |
2728 | dest = cpu_mask_to_apicid(tmp); | 3456 | dest = cpu_mask_to_apicid(tmp); |
2729 | 3457 | ||
2730 | target_ht_irq(irq, dest, cfg->vector); | 3458 | target_ht_irq(irq, dest, cfg->vector); |
2731 | irq_desc[irq].affinity = mask; | 3459 | desc = irq_to_desc(irq); |
3460 | desc->affinity = mask; | ||
2732 | } | 3461 | } |
2733 | #endif | 3462 | #endif |
2734 | 3463 | ||
@@ -2745,7 +3474,7 @@ static struct irq_chip ht_irq_chip = { | |||
2745 | 3474 | ||
2746 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 3475 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
2747 | { | 3476 | { |
2748 | struct irq_cfg *cfg = irq_cfg + irq; | 3477 | struct irq_cfg *cfg; |
2749 | int err; | 3478 | int err; |
2750 | cpumask_t tmp; | 3479 | cpumask_t tmp; |
2751 | 3480 | ||
@@ -2755,6 +3484,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2755 | struct ht_irq_msg msg; | 3484 | struct ht_irq_msg msg; |
2756 | unsigned dest; | 3485 | unsigned dest; |
2757 | 3486 | ||
3487 | cfg = irq_cfg(irq); | ||
2758 | cpus_and(tmp, cfg->domain, tmp); | 3488 | cpus_and(tmp, cfg->domain, tmp); |
2759 | dest = cpu_mask_to_apicid(tmp); | 3489 | dest = cpu_mask_to_apicid(tmp); |
2760 | 3490 | ||
@@ -2777,20 +3507,196 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2777 | 3507 | ||
2778 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, | 3508 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, |
2779 | handle_edge_irq, "edge"); | 3509 | handle_edge_irq, "edge"); |
3510 | |||
3511 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); | ||
2780 | } | 3512 | } |
2781 | return err; | 3513 | return err; |
2782 | } | 3514 | } |
2783 | #endif /* CONFIG_HT_IRQ */ | 3515 | #endif /* CONFIG_HT_IRQ */ |
2784 | 3516 | ||
3517 | #ifdef CONFIG_X86_64 | ||
3518 | /* | ||
3519 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
3520 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
3521 | */ | ||
3522 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
3523 | unsigned long mmr_offset) | ||
3524 | { | ||
3525 | const cpumask_t *eligible_cpu = get_cpu_mask(cpu); | ||
3526 | struct irq_cfg *cfg; | ||
3527 | int mmr_pnode; | ||
3528 | unsigned long mmr_value; | ||
3529 | struct uv_IO_APIC_route_entry *entry; | ||
3530 | unsigned long flags; | ||
3531 | int err; | ||
3532 | |||
3533 | err = assign_irq_vector(irq, *eligible_cpu); | ||
3534 | if (err != 0) | ||
3535 | return err; | ||
3536 | |||
3537 | spin_lock_irqsave(&vector_lock, flags); | ||
3538 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
3539 | irq_name); | ||
3540 | spin_unlock_irqrestore(&vector_lock, flags); | ||
3541 | |||
3542 | cfg = irq_cfg(irq); | ||
3543 | |||
3544 | mmr_value = 0; | ||
3545 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
3546 | BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3547 | |||
3548 | entry->vector = cfg->vector; | ||
3549 | entry->delivery_mode = INT_DELIVERY_MODE; | ||
3550 | entry->dest_mode = INT_DEST_MODE; | ||
3551 | entry->polarity = 0; | ||
3552 | entry->trigger = 0; | ||
3553 | entry->mask = 0; | ||
3554 | entry->dest = cpu_mask_to_apicid(*eligible_cpu); | ||
3555 | |||
3556 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
3557 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
3558 | |||
3559 | return irq; | ||
3560 | } | ||
3561 | |||
3562 | /* | ||
3563 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
3564 | * longer allowed to be sent. | ||
3565 | */ | ||
3566 | void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) | ||
3567 | { | ||
3568 | unsigned long mmr_value; | ||
3569 | struct uv_IO_APIC_route_entry *entry; | ||
3570 | int mmr_pnode; | ||
3571 | |||
3572 | mmr_value = 0; | ||
3573 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
3574 | BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3575 | |||
3576 | entry->mask = 1; | ||
3577 | |||
3578 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
3579 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
3580 | } | ||
3581 | #endif /* CONFIG_X86_64 */ | ||
3582 | |||
3583 | int __init io_apic_get_redir_entries (int ioapic) | ||
3584 | { | ||
3585 | union IO_APIC_reg_01 reg_01; | ||
3586 | unsigned long flags; | ||
3587 | |||
3588 | spin_lock_irqsave(&ioapic_lock, flags); | ||
3589 | reg_01.raw = io_apic_read(ioapic, 1); | ||
3590 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
3591 | |||
3592 | return reg_01.bits.entries; | ||
3593 | } | ||
3594 | |||
3595 | int __init probe_nr_irqs(void) | ||
3596 | { | ||
3597 | int idx; | ||
3598 | int nr = 0; | ||
3599 | #ifndef CONFIG_XEN | ||
3600 | int nr_min = 32; | ||
3601 | #else | ||
3602 | int nr_min = NR_IRQS; | ||
3603 | #endif | ||
3604 | |||
3605 | for (idx = 0; idx < nr_ioapics; idx++) | ||
3606 | nr += io_apic_get_redir_entries(idx) + 1; | ||
3607 | |||
3608 | /* double it for hotplug and msi and nmi */ | ||
3609 | nr <<= 1; | ||
3610 | |||
3611 | /* something wrong ? */ | ||
3612 | if (nr < nr_min) | ||
3613 | nr = nr_min; | ||
3614 | |||
3615 | return nr; | ||
3616 | } | ||
3617 | |||
2785 | /* -------------------------------------------------------------------------- | 3618 | /* -------------------------------------------------------------------------- |
2786 | ACPI-based IOAPIC Configuration | 3619 | ACPI-based IOAPIC Configuration |
2787 | -------------------------------------------------------------------------- */ | 3620 | -------------------------------------------------------------------------- */ |
2788 | 3621 | ||
2789 | #ifdef CONFIG_ACPI | 3622 | #ifdef CONFIG_ACPI |
2790 | 3623 | ||
2791 | #define IO_APIC_MAX_ID 0xFE | 3624 | #ifdef CONFIG_X86_32 |
3625 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | ||
3626 | { | ||
3627 | union IO_APIC_reg_00 reg_00; | ||
3628 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; | ||
3629 | physid_mask_t tmp; | ||
3630 | unsigned long flags; | ||
3631 | int i = 0; | ||
2792 | 3632 | ||
2793 | int __init io_apic_get_redir_entries (int ioapic) | 3633 | /* |
3634 | * The P4 platform supports up to 256 APIC IDs on two separate APIC | ||
3635 | * buses (one for LAPICs, one for IOAPICs), where predecessors only | ||
3636 | * supports up to 16 on one shared APIC bus. | ||
3637 | * | ||
3638 | * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full | ||
3639 | * advantage of new APIC bus architecture. | ||
3640 | */ | ||
3641 | |||
3642 | if (physids_empty(apic_id_map)) | ||
3643 | apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); | ||
3644 | |||
3645 | spin_lock_irqsave(&ioapic_lock, flags); | ||
3646 | reg_00.raw = io_apic_read(ioapic, 0); | ||
3647 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
3648 | |||
3649 | if (apic_id >= get_physical_broadcast()) { | ||
3650 | printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " | ||
3651 | "%d\n", ioapic, apic_id, reg_00.bits.ID); | ||
3652 | apic_id = reg_00.bits.ID; | ||
3653 | } | ||
3654 | |||
3655 | /* | ||
3656 | * Every APIC in a system must have a unique ID or we get lots of nice | ||
3657 | * 'stuck on smp_invalidate_needed IPI wait' messages. | ||
3658 | */ | ||
3659 | if (check_apicid_used(apic_id_map, apic_id)) { | ||
3660 | |||
3661 | for (i = 0; i < get_physical_broadcast(); i++) { | ||
3662 | if (!check_apicid_used(apic_id_map, i)) | ||
3663 | break; | ||
3664 | } | ||
3665 | |||
3666 | if (i == get_physical_broadcast()) | ||
3667 | panic("Max apic_id exceeded!\n"); | ||
3668 | |||
3669 | printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " | ||
3670 | "trying %d\n", ioapic, apic_id, i); | ||
3671 | |||
3672 | apic_id = i; | ||
3673 | } | ||
3674 | |||
3675 | tmp = apicid_to_cpu_present(apic_id); | ||
3676 | physids_or(apic_id_map, apic_id_map, tmp); | ||
3677 | |||
3678 | if (reg_00.bits.ID != apic_id) { | ||
3679 | reg_00.bits.ID = apic_id; | ||
3680 | |||
3681 | spin_lock_irqsave(&ioapic_lock, flags); | ||
3682 | io_apic_write(ioapic, 0, reg_00.raw); | ||
3683 | reg_00.raw = io_apic_read(ioapic, 0); | ||
3684 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
3685 | |||
3686 | /* Sanity check */ | ||
3687 | if (reg_00.bits.ID != apic_id) { | ||
3688 | printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); | ||
3689 | return -1; | ||
3690 | } | ||
3691 | } | ||
3692 | |||
3693 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
3694 | "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); | ||
3695 | |||
3696 | return apic_id; | ||
3697 | } | ||
3698 | |||
3699 | int __init io_apic_get_version(int ioapic) | ||
2794 | { | 3700 | { |
2795 | union IO_APIC_reg_01 reg_01; | 3701 | union IO_APIC_reg_01 reg_01; |
2796 | unsigned long flags; | 3702 | unsigned long flags; |
@@ -2799,9 +3705,9 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
2799 | reg_01.raw = io_apic_read(ioapic, 1); | 3705 | reg_01.raw = io_apic_read(ioapic, 1); |
2800 | spin_unlock_irqrestore(&ioapic_lock, flags); | 3706 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2801 | 3707 | ||
2802 | return reg_01.bits.entries; | 3708 | return reg_01.bits.version; |
2803 | } | 3709 | } |
2804 | 3710 | #endif | |
2805 | 3711 | ||
2806 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) | 3712 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) |
2807 | { | 3713 | { |
@@ -2853,6 +3759,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
2853 | void __init setup_ioapic_dest(void) | 3759 | void __init setup_ioapic_dest(void) |
2854 | { | 3760 | { |
2855 | int pin, ioapic, irq, irq_entry; | 3761 | int pin, ioapic, irq, irq_entry; |
3762 | struct irq_cfg *cfg; | ||
2856 | 3763 | ||
2857 | if (skip_ioapic_setup == 1) | 3764 | if (skip_ioapic_setup == 1) |
2858 | return; | 3765 | return; |
@@ -2868,7 +3775,8 @@ void __init setup_ioapic_dest(void) | |||
2868 | * when you have too many devices, because at that time only boot | 3775 | * when you have too many devices, because at that time only boot |
2869 | * cpu is online. | 3776 | * cpu is online. |
2870 | */ | 3777 | */ |
2871 | if (!irq_cfg[irq].vector) | 3778 | cfg = irq_cfg(irq); |
3779 | if (!cfg->vector) | ||
2872 | setup_IO_APIC_irq(ioapic, pin, irq, | 3780 | setup_IO_APIC_irq(ioapic, pin, irq, |
2873 | irq_trigger(irq_entry), | 3781 | irq_trigger(irq_entry), |
2874 | irq_polarity(irq_entry)); | 3782 | irq_polarity(irq_entry)); |
@@ -2926,18 +3834,33 @@ void __init ioapic_init_mappings(void) | |||
2926 | struct resource *ioapic_res; | 3834 | struct resource *ioapic_res; |
2927 | int i; | 3835 | int i; |
2928 | 3836 | ||
3837 | irq_2_pin_init(); | ||
2929 | ioapic_res = ioapic_setup_resources(); | 3838 | ioapic_res = ioapic_setup_resources(); |
2930 | for (i = 0; i < nr_ioapics; i++) { | 3839 | for (i = 0; i < nr_ioapics; i++) { |
2931 | if (smp_found_config) { | 3840 | if (smp_found_config) { |
2932 | ioapic_phys = mp_ioapics[i].mp_apicaddr; | 3841 | ioapic_phys = mp_ioapics[i].mp_apicaddr; |
3842 | #ifdef CONFIG_X86_32 | ||
3843 | if (!ioapic_phys) { | ||
3844 | printk(KERN_ERR | ||
3845 | "WARNING: bogus zero IO-APIC " | ||
3846 | "address found in MPTABLE, " | ||
3847 | "disabling IO/APIC support!\n"); | ||
3848 | smp_found_config = 0; | ||
3849 | skip_ioapic_setup = 1; | ||
3850 | goto fake_ioapic_page; | ||
3851 | } | ||
3852 | #endif | ||
2933 | } else { | 3853 | } else { |
3854 | #ifdef CONFIG_X86_32 | ||
3855 | fake_ioapic_page: | ||
3856 | #endif | ||
2934 | ioapic_phys = (unsigned long) | 3857 | ioapic_phys = (unsigned long) |
2935 | alloc_bootmem_pages(PAGE_SIZE); | 3858 | alloc_bootmem_pages(PAGE_SIZE); |
2936 | ioapic_phys = __pa(ioapic_phys); | 3859 | ioapic_phys = __pa(ioapic_phys); |
2937 | } | 3860 | } |
2938 | set_fixmap_nocache(idx, ioapic_phys); | 3861 | set_fixmap_nocache(idx, ioapic_phys); |
2939 | apic_printk(APIC_VERBOSE, | 3862 | apic_printk(APIC_VERBOSE, |
2940 | "mapped IOAPIC to %016lx (%016lx)\n", | 3863 | "mapped IOAPIC to %08lx (%08lx)\n", |
2941 | __fix_to_virt(idx), ioapic_phys); | 3864 | __fix_to_virt(idx), ioapic_phys); |
2942 | idx++; | 3865 | idx++; |
2943 | 3866 | ||
@@ -2971,4 +3894,3 @@ static int __init ioapic_insert_resources(void) | |||
2971 | /* Insert the IO APIC resources after PCI initialization has occured to handle | 3894 | /* Insert the IO APIC resources after PCI initialization has occured to handle |
2972 | * IO APICS that are mapped in on a BAR in PCI space. */ | 3895 | * IO APICS that are mapped in on a BAR in PCI space. */ |
2973 | late_initcall(ioapic_insert_resources); | 3896 | late_initcall(ioapic_insert_resources); |
2974 | |||
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c deleted file mode 100644 index e710289f673e..000000000000 --- a/arch/x86/kernel/io_apic_32.c +++ /dev/null | |||
@@ -1,2908 +0,0 @@ | |||
1 | /* | ||
2 | * Intel IO-APIC support for multi-Pentium hosts. | ||
3 | * | ||
4 | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo | ||
5 | * | ||
6 | * Many thanks to Stig Venaas for trying out countless experimental | ||
7 | * patches and reporting/debugging problems patiently! | ||
8 | * | ||
9 | * (c) 1999, Multiple IO-APIC support, developed by | ||
10 | * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and | ||
11 | * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, | ||
12 | * further tested and cleaned up by Zach Brown <zab@redhat.com> | ||
13 | * and Ingo Molnar <mingo@redhat.com> | ||
14 | * | ||
15 | * Fixes | ||
16 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs; | ||
17 | * thanks to Eric Gilmore | ||
18 | * and Rolf G. Tews | ||
19 | * for testing these extensively | ||
20 | * Paul Diefenbaugh : Added full ACPI support | ||
21 | */ | ||
22 | |||
23 | #include <linux/mm.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/bootmem.h> | ||
29 | #include <linux/mc146818rtc.h> | ||
30 | #include <linux/compiler.h> | ||
31 | #include <linux/acpi.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/sysdev.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/msi.h> | ||
36 | #include <linux/htirq.h> | ||
37 | #include <linux/freezer.h> | ||
38 | #include <linux/kthread.h> | ||
39 | #include <linux/jiffies.h> /* time_after() */ | ||
40 | |||
41 | #include <asm/io.h> | ||
42 | #include <asm/smp.h> | ||
43 | #include <asm/desc.h> | ||
44 | #include <asm/timer.h> | ||
45 | #include <asm/i8259.h> | ||
46 | #include <asm/nmi.h> | ||
47 | #include <asm/msidef.h> | ||
48 | #include <asm/hypertransport.h> | ||
49 | #include <asm/setup.h> | ||
50 | |||
51 | #include <mach_apic.h> | ||
52 | #include <mach_apicdef.h> | ||
53 | |||
54 | #define __apicdebuginit(type) static type __init | ||
55 | |||
56 | int (*ioapic_renumber_irq)(int ioapic, int irq); | ||
57 | atomic_t irq_mis_count; | ||
58 | |||
59 | /* Where if anywhere is the i8259 connect in external int mode */ | ||
60 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | ||
61 | |||
62 | static DEFINE_SPINLOCK(ioapic_lock); | ||
63 | DEFINE_SPINLOCK(vector_lock); | ||
64 | |||
65 | int timer_through_8259 __initdata; | ||
66 | |||
67 | /* | ||
68 | * Is the SiS APIC rmw bug present ? | ||
69 | * -1 = don't know, 0 = no, 1 = yes | ||
70 | */ | ||
71 | int sis_apic_bug = -1; | ||
72 | |||
73 | /* | ||
74 | * # of IRQ routing registers | ||
75 | */ | ||
76 | int nr_ioapic_registers[MAX_IO_APICS]; | ||
77 | |||
78 | /* I/O APIC entries */ | ||
79 | struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
80 | int nr_ioapics; | ||
81 | |||
82 | /* MP IRQ source entries */ | ||
83 | struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
84 | |||
85 | /* # of MP IRQ source entries */ | ||
86 | int mp_irq_entries; | ||
87 | |||
88 | #if defined (CONFIG_MCA) || defined (CONFIG_EISA) | ||
89 | int mp_bus_id_to_type[MAX_MP_BUSSES]; | ||
90 | #endif | ||
91 | |||
92 | DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | ||
93 | |||
94 | static int disable_timer_pin_1 __initdata; | ||
95 | |||
96 | /* | ||
97 | * Rough estimation of how many shared IRQs there are, can | ||
98 | * be changed anytime. | ||
99 | */ | ||
100 | #define MAX_PLUS_SHARED_IRQS NR_IRQS | ||
101 | #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) | ||
102 | |||
103 | /* | ||
104 | * This is performance-critical, we want to do it O(1) | ||
105 | * | ||
106 | * the indexing order of this array favors 1:1 mappings | ||
107 | * between pins and IRQs. | ||
108 | */ | ||
109 | |||
110 | static struct irq_pin_list { | ||
111 | int apic, pin, next; | ||
112 | } irq_2_pin[PIN_MAP_SIZE]; | ||
113 | |||
114 | struct io_apic { | ||
115 | unsigned int index; | ||
116 | unsigned int unused[3]; | ||
117 | unsigned int data; | ||
118 | }; | ||
119 | |||
120 | static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) | ||
121 | { | ||
122 | return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) | ||
123 | + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK); | ||
124 | } | ||
125 | |||
126 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) | ||
127 | { | ||
128 | struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
129 | writel(reg, &io_apic->index); | ||
130 | return readl(&io_apic->data); | ||
131 | } | ||
132 | |||
133 | static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) | ||
134 | { | ||
135 | struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
136 | writel(reg, &io_apic->index); | ||
137 | writel(value, &io_apic->data); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Re-write a value: to be used for read-modify-write | ||
142 | * cycles where the read already set up the index register. | ||
143 | * | ||
144 | * Older SiS APIC requires we rewrite the index register | ||
145 | */ | ||
146 | static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) | ||
147 | { | ||
148 | volatile struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
149 | if (sis_apic_bug) | ||
150 | writel(reg, &io_apic->index); | ||
151 | writel(value, &io_apic->data); | ||
152 | } | ||
153 | |||
154 | union entry_union { | ||
155 | struct { u32 w1, w2; }; | ||
156 | struct IO_APIC_route_entry entry; | ||
157 | }; | ||
158 | |||
159 | static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) | ||
160 | { | ||
161 | union entry_union eu; | ||
162 | unsigned long flags; | ||
163 | spin_lock_irqsave(&ioapic_lock, flags); | ||
164 | eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); | ||
165 | eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); | ||
166 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
167 | return eu.entry; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * When we write a new IO APIC routing entry, we need to write the high | ||
172 | * word first! If the mask bit in the low word is clear, we will enable | ||
173 | * the interrupt, and we need to make sure the entry is fully populated | ||
174 | * before that happens. | ||
175 | */ | ||
176 | static void | ||
177 | __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | ||
178 | { | ||
179 | union entry_union eu; | ||
180 | eu.entry = e; | ||
181 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | ||
182 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | ||
183 | } | ||
184 | |||
185 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | ||
186 | { | ||
187 | unsigned long flags; | ||
188 | spin_lock_irqsave(&ioapic_lock, flags); | ||
189 | __ioapic_write_entry(apic, pin, e); | ||
190 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * When we mask an IO APIC routing entry, we need to write the low | ||
195 | * word first, in order to set the mask bit before we change the | ||
196 | * high bits! | ||
197 | */ | ||
198 | static void ioapic_mask_entry(int apic, int pin) | ||
199 | { | ||
200 | unsigned long flags; | ||
201 | union entry_union eu = { .entry.mask = 1 }; | ||
202 | |||
203 | spin_lock_irqsave(&ioapic_lock, flags); | ||
204 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | ||
205 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | ||
206 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are | ||
211 | * shared ISA-space IRQs, so we have to support them. We are super | ||
212 | * fast in the common case, and fast for shared ISA-space IRQs. | ||
213 | */ | ||
214 | static void add_pin_to_irq(unsigned int irq, int apic, int pin) | ||
215 | { | ||
216 | static int first_free_entry = NR_IRQS; | ||
217 | struct irq_pin_list *entry = irq_2_pin + irq; | ||
218 | |||
219 | while (entry->next) | ||
220 | entry = irq_2_pin + entry->next; | ||
221 | |||
222 | if (entry->pin != -1) { | ||
223 | entry->next = first_free_entry; | ||
224 | entry = irq_2_pin + entry->next; | ||
225 | if (++first_free_entry >= PIN_MAP_SIZE) | ||
226 | panic("io_apic.c: whoops"); | ||
227 | } | ||
228 | entry->apic = apic; | ||
229 | entry->pin = pin; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Reroute an IRQ to a different pin. | ||
234 | */ | ||
235 | static void __init replace_pin_at_irq(unsigned int irq, | ||
236 | int oldapic, int oldpin, | ||
237 | int newapic, int newpin) | ||
238 | { | ||
239 | struct irq_pin_list *entry = irq_2_pin + irq; | ||
240 | |||
241 | while (1) { | ||
242 | if (entry->apic == oldapic && entry->pin == oldpin) { | ||
243 | entry->apic = newapic; | ||
244 | entry->pin = newpin; | ||
245 | } | ||
246 | if (!entry->next) | ||
247 | break; | ||
248 | entry = irq_2_pin + entry->next; | ||
249 | } | ||
250 | } | ||
251 | |||
252 | static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable) | ||
253 | { | ||
254 | struct irq_pin_list *entry = irq_2_pin + irq; | ||
255 | unsigned int pin, reg; | ||
256 | |||
257 | for (;;) { | ||
258 | pin = entry->pin; | ||
259 | if (pin == -1) | ||
260 | break; | ||
261 | reg = io_apic_read(entry->apic, 0x10 + pin*2); | ||
262 | reg &= ~disable; | ||
263 | reg |= enable; | ||
264 | io_apic_modify(entry->apic, 0x10 + pin*2, reg); | ||
265 | if (!entry->next) | ||
266 | break; | ||
267 | entry = irq_2_pin + entry->next; | ||
268 | } | ||
269 | } | ||
270 | |||
271 | /* mask = 1 */ | ||
272 | static void __mask_IO_APIC_irq(unsigned int irq) | ||
273 | { | ||
274 | __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0); | ||
275 | } | ||
276 | |||
277 | /* mask = 0 */ | ||
278 | static void __unmask_IO_APIC_irq(unsigned int irq) | ||
279 | { | ||
280 | __modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED); | ||
281 | } | ||
282 | |||
283 | /* mask = 1, trigger = 0 */ | ||
284 | static void __mask_and_edge_IO_APIC_irq(unsigned int irq) | ||
285 | { | ||
286 | __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, | ||
287 | IO_APIC_REDIR_LEVEL_TRIGGER); | ||
288 | } | ||
289 | |||
290 | /* mask = 0, trigger = 1 */ | ||
291 | static void __unmask_and_level_IO_APIC_irq(unsigned int irq) | ||
292 | { | ||
293 | __modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER, | ||
294 | IO_APIC_REDIR_MASKED); | ||
295 | } | ||
296 | |||
297 | static void mask_IO_APIC_irq(unsigned int irq) | ||
298 | { | ||
299 | unsigned long flags; | ||
300 | |||
301 | spin_lock_irqsave(&ioapic_lock, flags); | ||
302 | __mask_IO_APIC_irq(irq); | ||
303 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
304 | } | ||
305 | |||
306 | static void unmask_IO_APIC_irq(unsigned int irq) | ||
307 | { | ||
308 | unsigned long flags; | ||
309 | |||
310 | spin_lock_irqsave(&ioapic_lock, flags); | ||
311 | __unmask_IO_APIC_irq(irq); | ||
312 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
313 | } | ||
314 | |||
315 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | ||
316 | { | ||
317 | struct IO_APIC_route_entry entry; | ||
318 | |||
319 | /* Check delivery_mode to be sure we're not clearing an SMI pin */ | ||
320 | entry = ioapic_read_entry(apic, pin); | ||
321 | if (entry.delivery_mode == dest_SMI) | ||
322 | return; | ||
323 | |||
324 | /* | ||
325 | * Disable it in the IO-APIC irq-routing table: | ||
326 | */ | ||
327 | ioapic_mask_entry(apic, pin); | ||
328 | } | ||
329 | |||
330 | static void clear_IO_APIC(void) | ||
331 | { | ||
332 | int apic, pin; | ||
333 | |||
334 | for (apic = 0; apic < nr_ioapics; apic++) | ||
335 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) | ||
336 | clear_IO_APIC_pin(apic, pin); | ||
337 | } | ||
338 | |||
339 | #ifdef CONFIG_SMP | ||
340 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) | ||
341 | { | ||
342 | unsigned long flags; | ||
343 | int pin; | ||
344 | struct irq_pin_list *entry = irq_2_pin + irq; | ||
345 | unsigned int apicid_value; | ||
346 | cpumask_t tmp; | ||
347 | |||
348 | cpus_and(tmp, cpumask, cpu_online_map); | ||
349 | if (cpus_empty(tmp)) | ||
350 | tmp = TARGET_CPUS; | ||
351 | |||
352 | cpus_and(cpumask, tmp, CPU_MASK_ALL); | ||
353 | |||
354 | apicid_value = cpu_mask_to_apicid(cpumask); | ||
355 | /* Prepare to do the io_apic_write */ | ||
356 | apicid_value = apicid_value << 24; | ||
357 | spin_lock_irqsave(&ioapic_lock, flags); | ||
358 | for (;;) { | ||
359 | pin = entry->pin; | ||
360 | if (pin == -1) | ||
361 | break; | ||
362 | io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value); | ||
363 | if (!entry->next) | ||
364 | break; | ||
365 | entry = irq_2_pin + entry->next; | ||
366 | } | ||
367 | irq_desc[irq].affinity = cpumask; | ||
368 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
369 | } | ||
370 | |||
371 | #if defined(CONFIG_IRQBALANCE) | ||
372 | # include <asm/processor.h> /* kernel_thread() */ | ||
373 | # include <linux/kernel_stat.h> /* kstat */ | ||
374 | # include <linux/slab.h> /* kmalloc() */ | ||
375 | # include <linux/timer.h> | ||
376 | |||
377 | #define IRQBALANCE_CHECK_ARCH -999 | ||
378 | #define MAX_BALANCED_IRQ_INTERVAL (5*HZ) | ||
379 | #define MIN_BALANCED_IRQ_INTERVAL (HZ/2) | ||
380 | #define BALANCED_IRQ_MORE_DELTA (HZ/10) | ||
381 | #define BALANCED_IRQ_LESS_DELTA (HZ) | ||
382 | |||
383 | static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH; | ||
384 | static int physical_balance __read_mostly; | ||
385 | static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL; | ||
386 | |||
387 | static struct irq_cpu_info { | ||
388 | unsigned long *last_irq; | ||
389 | unsigned long *irq_delta; | ||
390 | unsigned long irq; | ||
391 | } irq_cpu_data[NR_CPUS]; | ||
392 | |||
393 | #define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq) | ||
394 | #define LAST_CPU_IRQ(cpu, irq) (irq_cpu_data[cpu].last_irq[irq]) | ||
395 | #define IRQ_DELTA(cpu, irq) (irq_cpu_data[cpu].irq_delta[irq]) | ||
396 | |||
397 | #define IDLE_ENOUGH(cpu,now) \ | ||
398 | (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1)) | ||
399 | |||
400 | #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) | ||
401 | |||
402 | #define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i))) | ||
403 | |||
404 | static cpumask_t balance_irq_affinity[NR_IRQS] = { | ||
405 | [0 ... NR_IRQS-1] = CPU_MASK_ALL | ||
406 | }; | ||
407 | |||
408 | void set_balance_irq_affinity(unsigned int irq, cpumask_t mask) | ||
409 | { | ||
410 | balance_irq_affinity[irq] = mask; | ||
411 | } | ||
412 | |||
413 | static unsigned long move(int curr_cpu, cpumask_t allowed_mask, | ||
414 | unsigned long now, int direction) | ||
415 | { | ||
416 | int search_idle = 1; | ||
417 | int cpu = curr_cpu; | ||
418 | |||
419 | goto inside; | ||
420 | |||
421 | do { | ||
422 | if (unlikely(cpu == curr_cpu)) | ||
423 | search_idle = 0; | ||
424 | inside: | ||
425 | if (direction == 1) { | ||
426 | cpu++; | ||
427 | if (cpu >= NR_CPUS) | ||
428 | cpu = 0; | ||
429 | } else { | ||
430 | cpu--; | ||
431 | if (cpu == -1) | ||
432 | cpu = NR_CPUS-1; | ||
433 | } | ||
434 | } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) || | ||
435 | (search_idle && !IDLE_ENOUGH(cpu, now))); | ||
436 | |||
437 | return cpu; | ||
438 | } | ||
439 | |||
440 | static inline void balance_irq(int cpu, int irq) | ||
441 | { | ||
442 | unsigned long now = jiffies; | ||
443 | cpumask_t allowed_mask; | ||
444 | unsigned int new_cpu; | ||
445 | |||
446 | if (irqbalance_disabled) | ||
447 | return; | ||
448 | |||
449 | cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]); | ||
450 | new_cpu = move(cpu, allowed_mask, now, 1); | ||
451 | if (cpu != new_cpu) | ||
452 | set_pending_irq(irq, cpumask_of_cpu(new_cpu)); | ||
453 | } | ||
454 | |||
455 | static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | ||
456 | { | ||
457 | int i, j; | ||
458 | |||
459 | for_each_online_cpu(i) { | ||
460 | for (j = 0; j < NR_IRQS; j++) { | ||
461 | if (!irq_desc[j].action) | ||
462 | continue; | ||
463 | /* Is it a significant load ? */ | ||
464 | if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) < | ||
465 | useful_load_threshold) | ||
466 | continue; | ||
467 | balance_irq(i, j); | ||
468 | } | ||
469 | } | ||
470 | balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, | ||
471 | balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); | ||
472 | return; | ||
473 | } | ||
474 | |||
475 | static void do_irq_balance(void) | ||
476 | { | ||
477 | int i, j; | ||
478 | unsigned long max_cpu_irq = 0, min_cpu_irq = (~0); | ||
479 | unsigned long move_this_load = 0; | ||
480 | int max_loaded = 0, min_loaded = 0; | ||
481 | int load; | ||
482 | unsigned long useful_load_threshold = balanced_irq_interval + 10; | ||
483 | int selected_irq; | ||
484 | int tmp_loaded, first_attempt = 1; | ||
485 | unsigned long tmp_cpu_irq; | ||
486 | unsigned long imbalance = 0; | ||
487 | cpumask_t allowed_mask, target_cpu_mask, tmp; | ||
488 | |||
489 | for_each_possible_cpu(i) { | ||
490 | int package_index; | ||
491 | CPU_IRQ(i) = 0; | ||
492 | if (!cpu_online(i)) | ||
493 | continue; | ||
494 | package_index = CPU_TO_PACKAGEINDEX(i); | ||
495 | for (j = 0; j < NR_IRQS; j++) { | ||
496 | unsigned long value_now, delta; | ||
497 | /* Is this an active IRQ or balancing disabled ? */ | ||
498 | if (!irq_desc[j].action || irq_balancing_disabled(j)) | ||
499 | continue; | ||
500 | if (package_index == i) | ||
501 | IRQ_DELTA(package_index, j) = 0; | ||
502 | /* Determine the total count per processor per IRQ */ | ||
503 | value_now = (unsigned long) kstat_cpu(i).irqs[j]; | ||
504 | |||
505 | /* Determine the activity per processor per IRQ */ | ||
506 | delta = value_now - LAST_CPU_IRQ(i, j); | ||
507 | |||
508 | /* Update last_cpu_irq[][] for the next time */ | ||
509 | LAST_CPU_IRQ(i, j) = value_now; | ||
510 | |||
511 | /* Ignore IRQs whose rate is less than the clock */ | ||
512 | if (delta < useful_load_threshold) | ||
513 | continue; | ||
514 | /* update the load for the processor or package total */ | ||
515 | IRQ_DELTA(package_index, j) += delta; | ||
516 | |||
517 | /* Keep track of the higher numbered sibling as well */ | ||
518 | if (i != package_index) | ||
519 | CPU_IRQ(i) += delta; | ||
520 | /* | ||
521 | * We have sibling A and sibling B in the package | ||
522 | * | ||
523 | * cpu_irq[A] = load for cpu A + load for cpu B | ||
524 | * cpu_irq[B] = load for cpu B | ||
525 | */ | ||
526 | CPU_IRQ(package_index) += delta; | ||
527 | } | ||
528 | } | ||
529 | /* Find the least loaded processor package */ | ||
530 | for_each_online_cpu(i) { | ||
531 | if (i != CPU_TO_PACKAGEINDEX(i)) | ||
532 | continue; | ||
533 | if (min_cpu_irq > CPU_IRQ(i)) { | ||
534 | min_cpu_irq = CPU_IRQ(i); | ||
535 | min_loaded = i; | ||
536 | } | ||
537 | } | ||
538 | max_cpu_irq = ULONG_MAX; | ||
539 | |||
540 | tryanothercpu: | ||
541 | /* | ||
542 | * Look for heaviest loaded processor. | ||
543 | * We may come back to get the next heaviest loaded processor. | ||
544 | * Skip processors with trivial loads. | ||
545 | */ | ||
546 | tmp_cpu_irq = 0; | ||
547 | tmp_loaded = -1; | ||
548 | for_each_online_cpu(i) { | ||
549 | if (i != CPU_TO_PACKAGEINDEX(i)) | ||
550 | continue; | ||
551 | if (max_cpu_irq <= CPU_IRQ(i)) | ||
552 | continue; | ||
553 | if (tmp_cpu_irq < CPU_IRQ(i)) { | ||
554 | tmp_cpu_irq = CPU_IRQ(i); | ||
555 | tmp_loaded = i; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | if (tmp_loaded == -1) { | ||
560 | /* | ||
561 | * In the case of small number of heavy interrupt sources, | ||
562 | * loading some of the cpus too much. We use Ingo's original | ||
563 | * approach to rotate them around. | ||
564 | */ | ||
565 | if (!first_attempt && imbalance >= useful_load_threshold) { | ||
566 | rotate_irqs_among_cpus(useful_load_threshold); | ||
567 | return; | ||
568 | } | ||
569 | goto not_worth_the_effort; | ||
570 | } | ||
571 | |||
572 | first_attempt = 0; /* heaviest search */ | ||
573 | max_cpu_irq = tmp_cpu_irq; /* load */ | ||
574 | max_loaded = tmp_loaded; /* processor */ | ||
575 | imbalance = (max_cpu_irq - min_cpu_irq) / 2; | ||
576 | |||
577 | /* | ||
578 | * if imbalance is less than approx 10% of max load, then | ||
579 | * observe diminishing returns action. - quit | ||
580 | */ | ||
581 | if (imbalance < (max_cpu_irq >> 3)) | ||
582 | goto not_worth_the_effort; | ||
583 | |||
584 | tryanotherirq: | ||
585 | /* if we select an IRQ to move that can't go where we want, then | ||
586 | * see if there is another one to try. | ||
587 | */ | ||
588 | move_this_load = 0; | ||
589 | selected_irq = -1; | ||
590 | for (j = 0; j < NR_IRQS; j++) { | ||
591 | /* Is this an active IRQ? */ | ||
592 | if (!irq_desc[j].action) | ||
593 | continue; | ||
594 | if (imbalance <= IRQ_DELTA(max_loaded, j)) | ||
595 | continue; | ||
596 | /* Try to find the IRQ that is closest to the imbalance | ||
597 | * without going over. | ||
598 | */ | ||
599 | if (move_this_load < IRQ_DELTA(max_loaded, j)) { | ||
600 | move_this_load = IRQ_DELTA(max_loaded, j); | ||
601 | selected_irq = j; | ||
602 | } | ||
603 | } | ||
604 | if (selected_irq == -1) | ||
605 | goto tryanothercpu; | ||
606 | |||
607 | imbalance = move_this_load; | ||
608 | |||
609 | /* For physical_balance case, we accumulated both load | ||
610 | * values in the one of the siblings cpu_irq[], | ||
611 | * to use the same code for physical and logical processors | ||
612 | * as much as possible. | ||
613 | * | ||
614 | * NOTE: the cpu_irq[] array holds the sum of the load for | ||
615 | * sibling A and sibling B in the slot for the lowest numbered | ||
616 | * sibling (A), _AND_ the load for sibling B in the slot for | ||
617 | * the higher numbered sibling. | ||
618 | * | ||
619 | * We seek the least loaded sibling by making the comparison | ||
620 | * (A+B)/2 vs B | ||
621 | */ | ||
622 | load = CPU_IRQ(min_loaded) >> 1; | ||
623 | for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) { | ||
624 | if (load > CPU_IRQ(j)) { | ||
625 | /* This won't change cpu_sibling_map[min_loaded] */ | ||
626 | load = CPU_IRQ(j); | ||
627 | min_loaded = j; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | cpus_and(allowed_mask, | ||
632 | cpu_online_map, | ||
633 | balance_irq_affinity[selected_irq]); | ||
634 | target_cpu_mask = cpumask_of_cpu(min_loaded); | ||
635 | cpus_and(tmp, target_cpu_mask, allowed_mask); | ||
636 | |||
637 | if (!cpus_empty(tmp)) { | ||
638 | /* mark for change destination */ | ||
639 | set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded)); | ||
640 | |||
641 | /* Since we made a change, come back sooner to | ||
642 | * check for more variation. | ||
643 | */ | ||
644 | balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, | ||
645 | balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); | ||
646 | return; | ||
647 | } | ||
648 | goto tryanotherirq; | ||
649 | |||
650 | not_worth_the_effort: | ||
651 | /* | ||
652 | * if we did not find an IRQ to move, then adjust the time interval | ||
653 | * upward | ||
654 | */ | ||
655 | balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, | ||
656 | balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); | ||
657 | return; | ||
658 | } | ||
659 | |||
660 | static int balanced_irq(void *unused) | ||
661 | { | ||
662 | int i; | ||
663 | unsigned long prev_balance_time = jiffies; | ||
664 | long time_remaining = balanced_irq_interval; | ||
665 | |||
666 | /* push everything to CPU 0 to give us a starting point. */ | ||
667 | for (i = 0 ; i < NR_IRQS ; i++) { | ||
668 | irq_desc[i].pending_mask = cpumask_of_cpu(0); | ||
669 | set_pending_irq(i, cpumask_of_cpu(0)); | ||
670 | } | ||
671 | |||
672 | set_freezable(); | ||
673 | for ( ; ; ) { | ||
674 | time_remaining = schedule_timeout_interruptible(time_remaining); | ||
675 | try_to_freeze(); | ||
676 | if (time_after(jiffies, | ||
677 | prev_balance_time+balanced_irq_interval)) { | ||
678 | preempt_disable(); | ||
679 | do_irq_balance(); | ||
680 | prev_balance_time = jiffies; | ||
681 | time_remaining = balanced_irq_interval; | ||
682 | preempt_enable(); | ||
683 | } | ||
684 | } | ||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | static int __init balanced_irq_init(void) | ||
689 | { | ||
690 | int i; | ||
691 | struct cpuinfo_x86 *c; | ||
692 | cpumask_t tmp; | ||
693 | |||
694 | cpus_shift_right(tmp, cpu_online_map, 2); | ||
695 | c = &boot_cpu_data; | ||
696 | /* When not overwritten by the command line ask subarchitecture. */ | ||
697 | if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH) | ||
698 | irqbalance_disabled = NO_BALANCE_IRQ; | ||
699 | if (irqbalance_disabled) | ||
700 | return 0; | ||
701 | |||
702 | /* disable irqbalance completely if there is only one processor online */ | ||
703 | if (num_online_cpus() < 2) { | ||
704 | irqbalance_disabled = 1; | ||
705 | return 0; | ||
706 | } | ||
707 | /* | ||
708 | * Enable physical balance only if more than 1 physical processor | ||
709 | * is present | ||
710 | */ | ||
711 | if (smp_num_siblings > 1 && !cpus_empty(tmp)) | ||
712 | physical_balance = 1; | ||
713 | |||
714 | for_each_online_cpu(i) { | ||
715 | irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | ||
716 | irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | ||
717 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { | ||
718 | printk(KERN_ERR "balanced_irq_init: out of memory"); | ||
719 | goto failed; | ||
720 | } | ||
721 | } | ||
722 | |||
723 | printk(KERN_INFO "Starting balanced_irq\n"); | ||
724 | if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd"))) | ||
725 | return 0; | ||
726 | printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); | ||
727 | failed: | ||
728 | for_each_possible_cpu(i) { | ||
729 | kfree(irq_cpu_data[i].irq_delta); | ||
730 | irq_cpu_data[i].irq_delta = NULL; | ||
731 | kfree(irq_cpu_data[i].last_irq); | ||
732 | irq_cpu_data[i].last_irq = NULL; | ||
733 | } | ||
734 | return 0; | ||
735 | } | ||
736 | |||
737 | int __devinit irqbalance_disable(char *str) | ||
738 | { | ||
739 | irqbalance_disabled = 1; | ||
740 | return 1; | ||
741 | } | ||
742 | |||
743 | __setup("noirqbalance", irqbalance_disable); | ||
744 | |||
745 | late_initcall(balanced_irq_init); | ||
746 | #endif /* CONFIG_IRQBALANCE */ | ||
747 | #endif /* CONFIG_SMP */ | ||
748 | |||
749 | #ifndef CONFIG_SMP | ||
750 | void send_IPI_self(int vector) | ||
751 | { | ||
752 | unsigned int cfg; | ||
753 | |||
754 | /* | ||
755 | * Wait for idle. | ||
756 | */ | ||
757 | apic_wait_icr_idle(); | ||
758 | cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL; | ||
759 | /* | ||
760 | * Send the IPI. The write to APIC_ICR fires this off. | ||
761 | */ | ||
762 | apic_write(APIC_ICR, cfg); | ||
763 | } | ||
764 | #endif /* !CONFIG_SMP */ | ||
765 | |||
766 | |||
767 | /* | ||
768 | * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to | ||
769 | * specific CPU-side IRQs. | ||
770 | */ | ||
771 | |||
772 | #define MAX_PIRQS 8 | ||
773 | static int pirq_entries [MAX_PIRQS]; | ||
774 | static int pirqs_enabled; | ||
775 | int skip_ioapic_setup; | ||
776 | |||
777 | static int __init ioapic_pirq_setup(char *str) | ||
778 | { | ||
779 | int i, max; | ||
780 | int ints[MAX_PIRQS+1]; | ||
781 | |||
782 | get_options(str, ARRAY_SIZE(ints), ints); | ||
783 | |||
784 | for (i = 0; i < MAX_PIRQS; i++) | ||
785 | pirq_entries[i] = -1; | ||
786 | |||
787 | pirqs_enabled = 1; | ||
788 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
789 | "PIRQ redirection, working around broken MP-BIOS.\n"); | ||
790 | max = MAX_PIRQS; | ||
791 | if (ints[0] < MAX_PIRQS) | ||
792 | max = ints[0]; | ||
793 | |||
794 | for (i = 0; i < max; i++) { | ||
795 | apic_printk(APIC_VERBOSE, KERN_DEBUG | ||
796 | "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); | ||
797 | /* | ||
798 | * PIRQs are mapped upside down, usually. | ||
799 | */ | ||
800 | pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; | ||
801 | } | ||
802 | return 1; | ||
803 | } | ||
804 | |||
805 | __setup("pirq=", ioapic_pirq_setup); | ||
806 | |||
807 | /* | ||
808 | * Find the IRQ entry number of a certain pin. | ||
809 | */ | ||
810 | static int find_irq_entry(int apic, int pin, int type) | ||
811 | { | ||
812 | int i; | ||
813 | |||
814 | for (i = 0; i < mp_irq_entries; i++) | ||
815 | if (mp_irqs[i].mp_irqtype == type && | ||
816 | (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid || | ||
817 | mp_irqs[i].mp_dstapic == MP_APIC_ALL) && | ||
818 | mp_irqs[i].mp_dstirq == pin) | ||
819 | return i; | ||
820 | |||
821 | return -1; | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * Find the pin to which IRQ[irq] (ISA) is connected | ||
826 | */ | ||
827 | static int __init find_isa_irq_pin(int irq, int type) | ||
828 | { | ||
829 | int i; | ||
830 | |||
831 | for (i = 0; i < mp_irq_entries; i++) { | ||
832 | int lbus = mp_irqs[i].mp_srcbus; | ||
833 | |||
834 | if (test_bit(lbus, mp_bus_not_pci) && | ||
835 | (mp_irqs[i].mp_irqtype == type) && | ||
836 | (mp_irqs[i].mp_srcbusirq == irq)) | ||
837 | |||
838 | return mp_irqs[i].mp_dstirq; | ||
839 | } | ||
840 | return -1; | ||
841 | } | ||
842 | |||
843 | static int __init find_isa_irq_apic(int irq, int type) | ||
844 | { | ||
845 | int i; | ||
846 | |||
847 | for (i = 0; i < mp_irq_entries; i++) { | ||
848 | int lbus = mp_irqs[i].mp_srcbus; | ||
849 | |||
850 | if (test_bit(lbus, mp_bus_not_pci) && | ||
851 | (mp_irqs[i].mp_irqtype == type) && | ||
852 | (mp_irqs[i].mp_srcbusirq == irq)) | ||
853 | break; | ||
854 | } | ||
855 | if (i < mp_irq_entries) { | ||
856 | int apic; | ||
857 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
858 | if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic) | ||
859 | return apic; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | return -1; | ||
864 | } | ||
865 | |||
866 | /* | ||
867 | * Find a specific PCI IRQ entry. | ||
868 | * Not an __init, possibly needed by modules | ||
869 | */ | ||
870 | static int pin_2_irq(int idx, int apic, int pin); | ||
871 | |||
872 | int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) | ||
873 | { | ||
874 | int apic, i, best_guess = -1; | ||
875 | |||
876 | apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, " | ||
877 | "slot:%d, pin:%d.\n", bus, slot, pin); | ||
878 | if (test_bit(bus, mp_bus_not_pci)) { | ||
879 | printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus); | ||
880 | return -1; | ||
881 | } | ||
882 | for (i = 0; i < mp_irq_entries; i++) { | ||
883 | int lbus = mp_irqs[i].mp_srcbus; | ||
884 | |||
885 | for (apic = 0; apic < nr_ioapics; apic++) | ||
886 | if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic || | ||
887 | mp_irqs[i].mp_dstapic == MP_APIC_ALL) | ||
888 | break; | ||
889 | |||
890 | if (!test_bit(lbus, mp_bus_not_pci) && | ||
891 | !mp_irqs[i].mp_irqtype && | ||
892 | (bus == lbus) && | ||
893 | (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) { | ||
894 | int irq = pin_2_irq(i, apic, mp_irqs[i].mp_dstirq); | ||
895 | |||
896 | if (!(apic || IO_APIC_IRQ(irq))) | ||
897 | continue; | ||
898 | |||
899 | if (pin == (mp_irqs[i].mp_srcbusirq & 3)) | ||
900 | return irq; | ||
901 | /* | ||
902 | * Use the first all-but-pin matching entry as a | ||
903 | * best-guess fuzzy result for broken mptables. | ||
904 | */ | ||
905 | if (best_guess < 0) | ||
906 | best_guess = irq; | ||
907 | } | ||
908 | } | ||
909 | return best_guess; | ||
910 | } | ||
911 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); | ||
912 | |||
913 | /* | ||
914 | * This function currently is only a helper for the i386 smp boot process where | ||
915 | * we need to reprogram the ioredtbls to cater for the cpus which have come online | ||
916 | * so mask in all cases should simply be TARGET_CPUS | ||
917 | */ | ||
918 | #ifdef CONFIG_SMP | ||
919 | void __init setup_ioapic_dest(void) | ||
920 | { | ||
921 | int pin, ioapic, irq, irq_entry; | ||
922 | |||
923 | if (skip_ioapic_setup == 1) | ||
924 | return; | ||
925 | |||
926 | for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { | ||
927 | for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { | ||
928 | irq_entry = find_irq_entry(ioapic, pin, mp_INT); | ||
929 | if (irq_entry == -1) | ||
930 | continue; | ||
931 | irq = pin_2_irq(irq_entry, ioapic, pin); | ||
932 | set_ioapic_affinity_irq(irq, TARGET_CPUS); | ||
933 | } | ||
934 | |||
935 | } | ||
936 | } | ||
937 | #endif | ||
938 | |||
939 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
940 | /* | ||
941 | * EISA Edge/Level control register, ELCR | ||
942 | */ | ||
943 | static int EISA_ELCR(unsigned int irq) | ||
944 | { | ||
945 | if (irq < 16) { | ||
946 | unsigned int port = 0x4d0 + (irq >> 3); | ||
947 | return (inb(port) >> (irq & 7)) & 1; | ||
948 | } | ||
949 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
950 | "Broken MPtable reports ISA irq %d\n", irq); | ||
951 | return 0; | ||
952 | } | ||
953 | #endif | ||
954 | |||
955 | /* ISA interrupts are always polarity zero edge triggered, | ||
956 | * when listed as conforming in the MP table. */ | ||
957 | |||
958 | #define default_ISA_trigger(idx) (0) | ||
959 | #define default_ISA_polarity(idx) (0) | ||
960 | |||
961 | /* EISA interrupts are always polarity zero and can be edge or level | ||
962 | * trigger depending on the ELCR value. If an interrupt is listed as | ||
963 | * EISA conforming in the MP table, that means its trigger type must | ||
964 | * be read in from the ELCR */ | ||
965 | |||
966 | #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq)) | ||
967 | #define default_EISA_polarity(idx) default_ISA_polarity(idx) | ||
968 | |||
969 | /* PCI interrupts are always polarity one level triggered, | ||
970 | * when listed as conforming in the MP table. */ | ||
971 | |||
972 | #define default_PCI_trigger(idx) (1) | ||
973 | #define default_PCI_polarity(idx) (1) | ||
974 | |||
975 | /* MCA interrupts are always polarity zero level triggered, | ||
976 | * when listed as conforming in the MP table. */ | ||
977 | |||
978 | #define default_MCA_trigger(idx) (1) | ||
979 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) | ||
980 | |||
981 | static int MPBIOS_polarity(int idx) | ||
982 | { | ||
983 | int bus = mp_irqs[idx].mp_srcbus; | ||
984 | int polarity; | ||
985 | |||
986 | /* | ||
987 | * Determine IRQ line polarity (high active or low active): | ||
988 | */ | ||
989 | switch (mp_irqs[idx].mp_irqflag & 3) { | ||
990 | case 0: /* conforms, ie. bus-type dependent polarity */ | ||
991 | { | ||
992 | polarity = test_bit(bus, mp_bus_not_pci)? | ||
993 | default_ISA_polarity(idx): | ||
994 | default_PCI_polarity(idx); | ||
995 | break; | ||
996 | } | ||
997 | case 1: /* high active */ | ||
998 | { | ||
999 | polarity = 0; | ||
1000 | break; | ||
1001 | } | ||
1002 | case 2: /* reserved */ | ||
1003 | { | ||
1004 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1005 | polarity = 1; | ||
1006 | break; | ||
1007 | } | ||
1008 | case 3: /* low active */ | ||
1009 | { | ||
1010 | polarity = 1; | ||
1011 | break; | ||
1012 | } | ||
1013 | default: /* invalid */ | ||
1014 | { | ||
1015 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1016 | polarity = 1; | ||
1017 | break; | ||
1018 | } | ||
1019 | } | ||
1020 | return polarity; | ||
1021 | } | ||
1022 | |||
1023 | static int MPBIOS_trigger(int idx) | ||
1024 | { | ||
1025 | int bus = mp_irqs[idx].mp_srcbus; | ||
1026 | int trigger; | ||
1027 | |||
1028 | /* | ||
1029 | * Determine IRQ trigger mode (edge or level sensitive): | ||
1030 | */ | ||
1031 | switch ((mp_irqs[idx].mp_irqflag>>2) & 3) { | ||
1032 | case 0: /* conforms, ie. bus-type dependent */ | ||
1033 | { | ||
1034 | trigger = test_bit(bus, mp_bus_not_pci)? | ||
1035 | default_ISA_trigger(idx): | ||
1036 | default_PCI_trigger(idx); | ||
1037 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
1038 | switch (mp_bus_id_to_type[bus]) { | ||
1039 | case MP_BUS_ISA: /* ISA pin */ | ||
1040 | { | ||
1041 | /* set before the switch */ | ||
1042 | break; | ||
1043 | } | ||
1044 | case MP_BUS_EISA: /* EISA pin */ | ||
1045 | { | ||
1046 | trigger = default_EISA_trigger(idx); | ||
1047 | break; | ||
1048 | } | ||
1049 | case MP_BUS_PCI: /* PCI pin */ | ||
1050 | { | ||
1051 | /* set before the switch */ | ||
1052 | break; | ||
1053 | } | ||
1054 | case MP_BUS_MCA: /* MCA pin */ | ||
1055 | { | ||
1056 | trigger = default_MCA_trigger(idx); | ||
1057 | break; | ||
1058 | } | ||
1059 | default: | ||
1060 | { | ||
1061 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1062 | trigger = 1; | ||
1063 | break; | ||
1064 | } | ||
1065 | } | ||
1066 | #endif | ||
1067 | break; | ||
1068 | } | ||
1069 | case 1: /* edge */ | ||
1070 | { | ||
1071 | trigger = 0; | ||
1072 | break; | ||
1073 | } | ||
1074 | case 2: /* reserved */ | ||
1075 | { | ||
1076 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1077 | trigger = 1; | ||
1078 | break; | ||
1079 | } | ||
1080 | case 3: /* level */ | ||
1081 | { | ||
1082 | trigger = 1; | ||
1083 | break; | ||
1084 | } | ||
1085 | default: /* invalid */ | ||
1086 | { | ||
1087 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1088 | trigger = 0; | ||
1089 | break; | ||
1090 | } | ||
1091 | } | ||
1092 | return trigger; | ||
1093 | } | ||
1094 | |||
1095 | static inline int irq_polarity(int idx) | ||
1096 | { | ||
1097 | return MPBIOS_polarity(idx); | ||
1098 | } | ||
1099 | |||
1100 | static inline int irq_trigger(int idx) | ||
1101 | { | ||
1102 | return MPBIOS_trigger(idx); | ||
1103 | } | ||
1104 | |||
1105 | static int pin_2_irq(int idx, int apic, int pin) | ||
1106 | { | ||
1107 | int irq, i; | ||
1108 | int bus = mp_irqs[idx].mp_srcbus; | ||
1109 | |||
1110 | /* | ||
1111 | * Debugging check, we are in big trouble if this message pops up! | ||
1112 | */ | ||
1113 | if (mp_irqs[idx].mp_dstirq != pin) | ||
1114 | printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); | ||
1115 | |||
1116 | if (test_bit(bus, mp_bus_not_pci)) | ||
1117 | irq = mp_irqs[idx].mp_srcbusirq; | ||
1118 | else { | ||
1119 | /* | ||
1120 | * PCI IRQs are mapped in order | ||
1121 | */ | ||
1122 | i = irq = 0; | ||
1123 | while (i < apic) | ||
1124 | irq += nr_ioapic_registers[i++]; | ||
1125 | irq += pin; | ||
1126 | |||
1127 | /* | ||
1128 | * For MPS mode, so far only needed by ES7000 platform | ||
1129 | */ | ||
1130 | if (ioapic_renumber_irq) | ||
1131 | irq = ioapic_renumber_irq(apic, irq); | ||
1132 | } | ||
1133 | |||
1134 | /* | ||
1135 | * PCI IRQ command line redirection. Yes, limits are hardcoded. | ||
1136 | */ | ||
1137 | if ((pin >= 16) && (pin <= 23)) { | ||
1138 | if (pirq_entries[pin-16] != -1) { | ||
1139 | if (!pirq_entries[pin-16]) { | ||
1140 | apic_printk(APIC_VERBOSE, KERN_DEBUG | ||
1141 | "disabling PIRQ%d\n", pin-16); | ||
1142 | } else { | ||
1143 | irq = pirq_entries[pin-16]; | ||
1144 | apic_printk(APIC_VERBOSE, KERN_DEBUG | ||
1145 | "using PIRQ%d -> IRQ %d\n", | ||
1146 | pin-16, irq); | ||
1147 | } | ||
1148 | } | ||
1149 | } | ||
1150 | return irq; | ||
1151 | } | ||
1152 | |||
1153 | static inline int IO_APIC_irq_trigger(int irq) | ||
1154 | { | ||
1155 | int apic, idx, pin; | ||
1156 | |||
1157 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1158 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | ||
1159 | idx = find_irq_entry(apic, pin, mp_INT); | ||
1160 | if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) | ||
1161 | return irq_trigger(idx); | ||
1162 | } | ||
1163 | } | ||
1164 | /* | ||
1165 | * nonexistent IRQs are edge default | ||
1166 | */ | ||
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ | ||
1171 | static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; | ||
1172 | |||
1173 | static int __assign_irq_vector(int irq) | ||
1174 | { | ||
1175 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset; | ||
1176 | int vector, offset; | ||
1177 | |||
1178 | BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); | ||
1179 | |||
1180 | if (irq_vector[irq] > 0) | ||
1181 | return irq_vector[irq]; | ||
1182 | |||
1183 | vector = current_vector; | ||
1184 | offset = current_offset; | ||
1185 | next: | ||
1186 | vector += 8; | ||
1187 | if (vector >= first_system_vector) { | ||
1188 | offset = (offset + 1) % 8; | ||
1189 | vector = FIRST_DEVICE_VECTOR + offset; | ||
1190 | } | ||
1191 | if (vector == current_vector) | ||
1192 | return -ENOSPC; | ||
1193 | if (test_and_set_bit(vector, used_vectors)) | ||
1194 | goto next; | ||
1195 | |||
1196 | current_vector = vector; | ||
1197 | current_offset = offset; | ||
1198 | irq_vector[irq] = vector; | ||
1199 | |||
1200 | return vector; | ||
1201 | } | ||
1202 | |||
1203 | static int assign_irq_vector(int irq) | ||
1204 | { | ||
1205 | unsigned long flags; | ||
1206 | int vector; | ||
1207 | |||
1208 | spin_lock_irqsave(&vector_lock, flags); | ||
1209 | vector = __assign_irq_vector(irq); | ||
1210 | spin_unlock_irqrestore(&vector_lock, flags); | ||
1211 | |||
1212 | return vector; | ||
1213 | } | ||
1214 | |||
1215 | static struct irq_chip ioapic_chip; | ||
1216 | |||
1217 | #define IOAPIC_AUTO -1 | ||
1218 | #define IOAPIC_EDGE 0 | ||
1219 | #define IOAPIC_LEVEL 1 | ||
1220 | |||
1221 | static void ioapic_register_intr(int irq, int vector, unsigned long trigger) | ||
1222 | { | ||
1223 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | ||
1224 | trigger == IOAPIC_LEVEL) { | ||
1225 | irq_desc[irq].status |= IRQ_LEVEL; | ||
1226 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | ||
1227 | handle_fasteoi_irq, "fasteoi"); | ||
1228 | } else { | ||
1229 | irq_desc[irq].status &= ~IRQ_LEVEL; | ||
1230 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | ||
1231 | handle_edge_irq, "edge"); | ||
1232 | } | ||
1233 | set_intr_gate(vector, interrupt[irq]); | ||
1234 | } | ||
1235 | |||
1236 | static void __init setup_IO_APIC_irqs(void) | ||
1237 | { | ||
1238 | struct IO_APIC_route_entry entry; | ||
1239 | int apic, pin, idx, irq, first_notcon = 1, vector; | ||
1240 | |||
1241 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | ||
1242 | |||
1243 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1244 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | ||
1245 | |||
1246 | /* | ||
1247 | * add it to the IO-APIC irq-routing table: | ||
1248 | */ | ||
1249 | memset(&entry, 0, sizeof(entry)); | ||
1250 | |||
1251 | entry.delivery_mode = INT_DELIVERY_MODE; | ||
1252 | entry.dest_mode = INT_DEST_MODE; | ||
1253 | entry.mask = 0; /* enable IRQ */ | ||
1254 | entry.dest.logical.logical_dest = | ||
1255 | cpu_mask_to_apicid(TARGET_CPUS); | ||
1256 | |||
1257 | idx = find_irq_entry(apic, pin, mp_INT); | ||
1258 | if (idx == -1) { | ||
1259 | if (first_notcon) { | ||
1260 | apic_printk(APIC_VERBOSE, KERN_DEBUG | ||
1261 | " IO-APIC (apicid-pin) %d-%d", | ||
1262 | mp_ioapics[apic].mp_apicid, | ||
1263 | pin); | ||
1264 | first_notcon = 0; | ||
1265 | } else | ||
1266 | apic_printk(APIC_VERBOSE, ", %d-%d", | ||
1267 | mp_ioapics[apic].mp_apicid, pin); | ||
1268 | continue; | ||
1269 | } | ||
1270 | |||
1271 | if (!first_notcon) { | ||
1272 | apic_printk(APIC_VERBOSE, " not connected.\n"); | ||
1273 | first_notcon = 1; | ||
1274 | } | ||
1275 | |||
1276 | entry.trigger = irq_trigger(idx); | ||
1277 | entry.polarity = irq_polarity(idx); | ||
1278 | |||
1279 | if (irq_trigger(idx)) { | ||
1280 | entry.trigger = 1; | ||
1281 | entry.mask = 1; | ||
1282 | } | ||
1283 | |||
1284 | irq = pin_2_irq(idx, apic, pin); | ||
1285 | /* | ||
1286 | * skip adding the timer int on secondary nodes, which causes | ||
1287 | * a small but painful rift in the time-space continuum | ||
1288 | */ | ||
1289 | if (multi_timer_check(apic, irq)) | ||
1290 | continue; | ||
1291 | else | ||
1292 | add_pin_to_irq(irq, apic, pin); | ||
1293 | |||
1294 | if (!apic && !IO_APIC_IRQ(irq)) | ||
1295 | continue; | ||
1296 | |||
1297 | if (IO_APIC_IRQ(irq)) { | ||
1298 | vector = assign_irq_vector(irq); | ||
1299 | entry.vector = vector; | ||
1300 | ioapic_register_intr(irq, vector, IOAPIC_AUTO); | ||
1301 | |||
1302 | if (!apic && (irq < 16)) | ||
1303 | disable_8259A_irq(irq); | ||
1304 | } | ||
1305 | ioapic_write_entry(apic, pin, entry); | ||
1306 | } | ||
1307 | } | ||
1308 | |||
1309 | if (!first_notcon) | ||
1310 | apic_printk(APIC_VERBOSE, " not connected.\n"); | ||
1311 | } | ||
1312 | |||
1313 | /* | ||
1314 | * Set up the timer pin, possibly with the 8259A-master behind. | ||
1315 | */ | ||
1316 | static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, | ||
1317 | int vector) | ||
1318 | { | ||
1319 | struct IO_APIC_route_entry entry; | ||
1320 | |||
1321 | memset(&entry, 0, sizeof(entry)); | ||
1322 | |||
1323 | /* | ||
1324 | * We use logical delivery to get the timer IRQ | ||
1325 | * to the first CPU. | ||
1326 | */ | ||
1327 | entry.dest_mode = INT_DEST_MODE; | ||
1328 | entry.mask = 1; /* mask IRQ now */ | ||
1329 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); | ||
1330 | entry.delivery_mode = INT_DELIVERY_MODE; | ||
1331 | entry.polarity = 0; | ||
1332 | entry.trigger = 0; | ||
1333 | entry.vector = vector; | ||
1334 | |||
1335 | /* | ||
1336 | * The timer IRQ doesn't have to know that behind the | ||
1337 | * scene we may have a 8259A-master in AEOI mode ... | ||
1338 | */ | ||
1339 | ioapic_register_intr(0, vector, IOAPIC_EDGE); | ||
1340 | |||
1341 | /* | ||
1342 | * Add it to the IO-APIC irq-routing table: | ||
1343 | */ | ||
1344 | ioapic_write_entry(apic, pin, entry); | ||
1345 | } | ||
1346 | |||
1347 | |||
1348 | __apicdebuginit(void) print_IO_APIC(void) | ||
1349 | { | ||
1350 | int apic, i; | ||
1351 | union IO_APIC_reg_00 reg_00; | ||
1352 | union IO_APIC_reg_01 reg_01; | ||
1353 | union IO_APIC_reg_02 reg_02; | ||
1354 | union IO_APIC_reg_03 reg_03; | ||
1355 | unsigned long flags; | ||
1356 | |||
1357 | if (apic_verbosity == APIC_QUIET) | ||
1358 | return; | ||
1359 | |||
1360 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); | ||
1361 | for (i = 0; i < nr_ioapics; i++) | ||
1362 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", | ||
1363 | mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]); | ||
1364 | |||
1365 | /* | ||
1366 | * We are a bit conservative about what we expect. We have to | ||
1367 | * know about every hardware change ASAP. | ||
1368 | */ | ||
1369 | printk(KERN_INFO "testing the IO APIC.......................\n"); | ||
1370 | |||
1371 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1372 | |||
1373 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1374 | reg_00.raw = io_apic_read(apic, 0); | ||
1375 | reg_01.raw = io_apic_read(apic, 1); | ||
1376 | if (reg_01.bits.version >= 0x10) | ||
1377 | reg_02.raw = io_apic_read(apic, 2); | ||
1378 | if (reg_01.bits.version >= 0x20) | ||
1379 | reg_03.raw = io_apic_read(apic, 3); | ||
1380 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1381 | |||
1382 | printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); | ||
1383 | printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); | ||
1384 | printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); | ||
1385 | printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); | ||
1386 | printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); | ||
1387 | |||
1388 | printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw); | ||
1389 | printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); | ||
1390 | |||
1391 | printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); | ||
1392 | printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); | ||
1393 | |||
1394 | /* | ||
1395 | * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, | ||
1396 | * but the value of reg_02 is read as the previous read register | ||
1397 | * value, so ignore it if reg_02 == reg_01. | ||
1398 | */ | ||
1399 | if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { | ||
1400 | printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); | ||
1401 | printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); | ||
1402 | } | ||
1403 | |||
1404 | /* | ||
1405 | * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 | ||
1406 | * or reg_03, but the value of reg_0[23] is read as the previous read | ||
1407 | * register value, so ignore it if reg_03 == reg_0[12]. | ||
1408 | */ | ||
1409 | if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && | ||
1410 | reg_03.raw != reg_01.raw) { | ||
1411 | printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); | ||
1412 | printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); | ||
1413 | } | ||
1414 | |||
1415 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); | ||
1416 | |||
1417 | printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" | ||
1418 | " Stat Dest Deli Vect: \n"); | ||
1419 | |||
1420 | for (i = 0; i <= reg_01.bits.entries; i++) { | ||
1421 | struct IO_APIC_route_entry entry; | ||
1422 | |||
1423 | entry = ioapic_read_entry(apic, i); | ||
1424 | |||
1425 | printk(KERN_DEBUG " %02x %03X %02X ", | ||
1426 | i, | ||
1427 | entry.dest.logical.logical_dest, | ||
1428 | entry.dest.physical.physical_dest | ||
1429 | ); | ||
1430 | |||
1431 | printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", | ||
1432 | entry.mask, | ||
1433 | entry.trigger, | ||
1434 | entry.irr, | ||
1435 | entry.polarity, | ||
1436 | entry.delivery_status, | ||
1437 | entry.dest_mode, | ||
1438 | entry.delivery_mode, | ||
1439 | entry.vector | ||
1440 | ); | ||
1441 | } | ||
1442 | } | ||
1443 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); | ||
1444 | for (i = 0; i < NR_IRQS; i++) { | ||
1445 | struct irq_pin_list *entry = irq_2_pin + i; | ||
1446 | if (entry->pin < 0) | ||
1447 | continue; | ||
1448 | printk(KERN_DEBUG "IRQ%d ", i); | ||
1449 | for (;;) { | ||
1450 | printk("-> %d:%d", entry->apic, entry->pin); | ||
1451 | if (!entry->next) | ||
1452 | break; | ||
1453 | entry = irq_2_pin + entry->next; | ||
1454 | } | ||
1455 | printk("\n"); | ||
1456 | } | ||
1457 | |||
1458 | printk(KERN_INFO ".................................... done.\n"); | ||
1459 | |||
1460 | return; | ||
1461 | } | ||
1462 | |||
1463 | __apicdebuginit(void) print_APIC_bitfield(int base) | ||
1464 | { | ||
1465 | unsigned int v; | ||
1466 | int i, j; | ||
1467 | |||
1468 | if (apic_verbosity == APIC_QUIET) | ||
1469 | return; | ||
1470 | |||
1471 | printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG); | ||
1472 | for (i = 0; i < 8; i++) { | ||
1473 | v = apic_read(base + i*0x10); | ||
1474 | for (j = 0; j < 32; j++) { | ||
1475 | if (v & (1<<j)) | ||
1476 | printk("1"); | ||
1477 | else | ||
1478 | printk("0"); | ||
1479 | } | ||
1480 | printk("\n"); | ||
1481 | } | ||
1482 | } | ||
1483 | |||
1484 | __apicdebuginit(void) print_local_APIC(void *dummy) | ||
1485 | { | ||
1486 | unsigned int v, ver, maxlvt; | ||
1487 | u64 icr; | ||
1488 | |||
1489 | if (apic_verbosity == APIC_QUIET) | ||
1490 | return; | ||
1491 | |||
1492 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | ||
1493 | smp_processor_id(), hard_smp_processor_id()); | ||
1494 | v = apic_read(APIC_ID); | ||
1495 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, | ||
1496 | GET_APIC_ID(v)); | ||
1497 | v = apic_read(APIC_LVR); | ||
1498 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | ||
1499 | ver = GET_APIC_VERSION(v); | ||
1500 | maxlvt = lapic_get_maxlvt(); | ||
1501 | |||
1502 | v = apic_read(APIC_TASKPRI); | ||
1503 | printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); | ||
1504 | |||
1505 | if (APIC_INTEGRATED(ver)) { /* !82489DX */ | ||
1506 | v = apic_read(APIC_ARBPRI); | ||
1507 | printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, | ||
1508 | v & APIC_ARBPRI_MASK); | ||
1509 | v = apic_read(APIC_PROCPRI); | ||
1510 | printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); | ||
1511 | } | ||
1512 | |||
1513 | v = apic_read(APIC_EOI); | ||
1514 | printk(KERN_DEBUG "... APIC EOI: %08x\n", v); | ||
1515 | v = apic_read(APIC_RRR); | ||
1516 | printk(KERN_DEBUG "... APIC RRR: %08x\n", v); | ||
1517 | v = apic_read(APIC_LDR); | ||
1518 | printk(KERN_DEBUG "... APIC LDR: %08x\n", v); | ||
1519 | v = apic_read(APIC_DFR); | ||
1520 | printk(KERN_DEBUG "... APIC DFR: %08x\n", v); | ||
1521 | v = apic_read(APIC_SPIV); | ||
1522 | printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); | ||
1523 | |||
1524 | printk(KERN_DEBUG "... APIC ISR field:\n"); | ||
1525 | print_APIC_bitfield(APIC_ISR); | ||
1526 | printk(KERN_DEBUG "... APIC TMR field:\n"); | ||
1527 | print_APIC_bitfield(APIC_TMR); | ||
1528 | printk(KERN_DEBUG "... APIC IRR field:\n"); | ||
1529 | print_APIC_bitfield(APIC_IRR); | ||
1530 | |||
1531 | if (APIC_INTEGRATED(ver)) { /* !82489DX */ | ||
1532 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | ||
1533 | apic_write(APIC_ESR, 0); | ||
1534 | v = apic_read(APIC_ESR); | ||
1535 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | ||
1536 | } | ||
1537 | |||
1538 | icr = apic_icr_read(); | ||
1539 | printk(KERN_DEBUG "... APIC ICR: %08x\n", icr); | ||
1540 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32); | ||
1541 | |||
1542 | v = apic_read(APIC_LVTT); | ||
1543 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); | ||
1544 | |||
1545 | if (maxlvt > 3) { /* PC is LVT#4. */ | ||
1546 | v = apic_read(APIC_LVTPC); | ||
1547 | printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); | ||
1548 | } | ||
1549 | v = apic_read(APIC_LVT0); | ||
1550 | printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); | ||
1551 | v = apic_read(APIC_LVT1); | ||
1552 | printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); | ||
1553 | |||
1554 | if (maxlvt > 2) { /* ERR is LVT#3. */ | ||
1555 | v = apic_read(APIC_LVTERR); | ||
1556 | printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); | ||
1557 | } | ||
1558 | |||
1559 | v = apic_read(APIC_TMICT); | ||
1560 | printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); | ||
1561 | v = apic_read(APIC_TMCCT); | ||
1562 | printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); | ||
1563 | v = apic_read(APIC_TDCR); | ||
1564 | printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); | ||
1565 | printk("\n"); | ||
1566 | } | ||
1567 | |||
1568 | __apicdebuginit(void) print_all_local_APICs(void) | ||
1569 | { | ||
1570 | on_each_cpu(print_local_APIC, NULL, 1); | ||
1571 | } | ||
1572 | |||
1573 | __apicdebuginit(void) print_PIC(void) | ||
1574 | { | ||
1575 | unsigned int v; | ||
1576 | unsigned long flags; | ||
1577 | |||
1578 | if (apic_verbosity == APIC_QUIET) | ||
1579 | return; | ||
1580 | |||
1581 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | ||
1582 | |||
1583 | spin_lock_irqsave(&i8259A_lock, flags); | ||
1584 | |||
1585 | v = inb(0xa1) << 8 | inb(0x21); | ||
1586 | printk(KERN_DEBUG "... PIC IMR: %04x\n", v); | ||
1587 | |||
1588 | v = inb(0xa0) << 8 | inb(0x20); | ||
1589 | printk(KERN_DEBUG "... PIC IRR: %04x\n", v); | ||
1590 | |||
1591 | outb(0x0b, 0xa0); | ||
1592 | outb(0x0b, 0x20); | ||
1593 | v = inb(0xa0) << 8 | inb(0x20); | ||
1594 | outb(0x0a, 0xa0); | ||
1595 | outb(0x0a, 0x20); | ||
1596 | |||
1597 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
1598 | |||
1599 | printk(KERN_DEBUG "... PIC ISR: %04x\n", v); | ||
1600 | |||
1601 | v = inb(0x4d1) << 8 | inb(0x4d0); | ||
1602 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); | ||
1603 | } | ||
1604 | |||
1605 | __apicdebuginit(int) print_all_ICs(void) | ||
1606 | { | ||
1607 | print_PIC(); | ||
1608 | print_all_local_APICs(); | ||
1609 | print_IO_APIC(); | ||
1610 | |||
1611 | return 0; | ||
1612 | } | ||
1613 | |||
1614 | fs_initcall(print_all_ICs); | ||
1615 | |||
1616 | |||
1617 | static void __init enable_IO_APIC(void) | ||
1618 | { | ||
1619 | union IO_APIC_reg_01 reg_01; | ||
1620 | int i8259_apic, i8259_pin; | ||
1621 | int i, apic; | ||
1622 | unsigned long flags; | ||
1623 | |||
1624 | for (i = 0; i < PIN_MAP_SIZE; i++) { | ||
1625 | irq_2_pin[i].pin = -1; | ||
1626 | irq_2_pin[i].next = 0; | ||
1627 | } | ||
1628 | if (!pirqs_enabled) | ||
1629 | for (i = 0; i < MAX_PIRQS; i++) | ||
1630 | pirq_entries[i] = -1; | ||
1631 | |||
1632 | /* | ||
1633 | * The number of IO-APIC IRQ registers (== #pins): | ||
1634 | */ | ||
1635 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1636 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1637 | reg_01.raw = io_apic_read(apic, 1); | ||
1638 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1639 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | ||
1640 | } | ||
1641 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1642 | int pin; | ||
1643 | /* See if any of the pins is in ExtINT mode */ | ||
1644 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | ||
1645 | struct IO_APIC_route_entry entry; | ||
1646 | entry = ioapic_read_entry(apic, pin); | ||
1647 | |||
1648 | |||
1649 | /* If the interrupt line is enabled and in ExtInt mode | ||
1650 | * I have found the pin where the i8259 is connected. | ||
1651 | */ | ||
1652 | if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { | ||
1653 | ioapic_i8259.apic = apic; | ||
1654 | ioapic_i8259.pin = pin; | ||
1655 | goto found_i8259; | ||
1656 | } | ||
1657 | } | ||
1658 | } | ||
1659 | found_i8259: | ||
1660 | /* Look to see what if the MP table has reported the ExtINT */ | ||
1661 | /* If we could not find the appropriate pin by looking at the ioapic | ||
1662 | * the i8259 probably is not connected the ioapic but give the | ||
1663 | * mptable a chance anyway. | ||
1664 | */ | ||
1665 | i8259_pin = find_isa_irq_pin(0, mp_ExtINT); | ||
1666 | i8259_apic = find_isa_irq_apic(0, mp_ExtINT); | ||
1667 | /* Trust the MP table if nothing is setup in the hardware */ | ||
1668 | if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { | ||
1669 | printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); | ||
1670 | ioapic_i8259.pin = i8259_pin; | ||
1671 | ioapic_i8259.apic = i8259_apic; | ||
1672 | } | ||
1673 | /* Complain if the MP table and the hardware disagree */ | ||
1674 | if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && | ||
1675 | (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) | ||
1676 | { | ||
1677 | printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); | ||
1678 | } | ||
1679 | |||
1680 | /* | ||
1681 | * Do not trust the IO-APIC being empty at bootup | ||
1682 | */ | ||
1683 | clear_IO_APIC(); | ||
1684 | } | ||
1685 | |||
1686 | /* | ||
1687 | * Not an __init, needed by the reboot code | ||
1688 | */ | ||
1689 | void disable_IO_APIC(void) | ||
1690 | { | ||
1691 | /* | ||
1692 | * Clear the IO-APIC before rebooting: | ||
1693 | */ | ||
1694 | clear_IO_APIC(); | ||
1695 | |||
1696 | /* | ||
1697 | * If the i8259 is routed through an IOAPIC | ||
1698 | * Put that IOAPIC in virtual wire mode | ||
1699 | * so legacy interrupts can be delivered. | ||
1700 | */ | ||
1701 | if (ioapic_i8259.pin != -1) { | ||
1702 | struct IO_APIC_route_entry entry; | ||
1703 | |||
1704 | memset(&entry, 0, sizeof(entry)); | ||
1705 | entry.mask = 0; /* Enabled */ | ||
1706 | entry.trigger = 0; /* Edge */ | ||
1707 | entry.irr = 0; | ||
1708 | entry.polarity = 0; /* High */ | ||
1709 | entry.delivery_status = 0; | ||
1710 | entry.dest_mode = 0; /* Physical */ | ||
1711 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | ||
1712 | entry.vector = 0; | ||
1713 | entry.dest.physical.physical_dest = read_apic_id(); | ||
1714 | |||
1715 | /* | ||
1716 | * Add it to the IO-APIC irq-routing table: | ||
1717 | */ | ||
1718 | ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); | ||
1719 | } | ||
1720 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); | ||
1721 | } | ||
1722 | |||
1723 | /* | ||
1724 | * function to set the IO-APIC physical IDs based on the | ||
1725 | * values stored in the MPC table. | ||
1726 | * | ||
1727 | * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 | ||
1728 | */ | ||
1729 | |||
1730 | static void __init setup_ioapic_ids_from_mpc(void) | ||
1731 | { | ||
1732 | union IO_APIC_reg_00 reg_00; | ||
1733 | physid_mask_t phys_id_present_map; | ||
1734 | int apic; | ||
1735 | int i; | ||
1736 | unsigned char old_id; | ||
1737 | unsigned long flags; | ||
1738 | |||
1739 | if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) | ||
1740 | return; | ||
1741 | |||
1742 | /* | ||
1743 | * Don't check I/O APIC IDs for xAPIC systems. They have | ||
1744 | * no meaning without the serial APIC bus. | ||
1745 | */ | ||
1746 | if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
1747 | || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
1748 | return; | ||
1749 | /* | ||
1750 | * This is broken; anything with a real cpu count has to | ||
1751 | * circumvent this idiocy regardless. | ||
1752 | */ | ||
1753 | phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map); | ||
1754 | |||
1755 | /* | ||
1756 | * Set the IOAPIC ID to the value stored in the MPC table. | ||
1757 | */ | ||
1758 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
1759 | |||
1760 | /* Read the register 0 value */ | ||
1761 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1762 | reg_00.raw = io_apic_read(apic, 0); | ||
1763 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1764 | |||
1765 | old_id = mp_ioapics[apic].mp_apicid; | ||
1766 | |||
1767 | if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) { | ||
1768 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", | ||
1769 | apic, mp_ioapics[apic].mp_apicid); | ||
1770 | printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", | ||
1771 | reg_00.bits.ID); | ||
1772 | mp_ioapics[apic].mp_apicid = reg_00.bits.ID; | ||
1773 | } | ||
1774 | |||
1775 | /* | ||
1776 | * Sanity check, is the ID really free? Every APIC in a | ||
1777 | * system must have a unique ID or we get lots of nice | ||
1778 | * 'stuck on smp_invalidate_needed IPI wait' messages. | ||
1779 | */ | ||
1780 | if (check_apicid_used(phys_id_present_map, | ||
1781 | mp_ioapics[apic].mp_apicid)) { | ||
1782 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", | ||
1783 | apic, mp_ioapics[apic].mp_apicid); | ||
1784 | for (i = 0; i < get_physical_broadcast(); i++) | ||
1785 | if (!physid_isset(i, phys_id_present_map)) | ||
1786 | break; | ||
1787 | if (i >= get_physical_broadcast()) | ||
1788 | panic("Max APIC ID exceeded!\n"); | ||
1789 | printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", | ||
1790 | i); | ||
1791 | physid_set(i, phys_id_present_map); | ||
1792 | mp_ioapics[apic].mp_apicid = i; | ||
1793 | } else { | ||
1794 | physid_mask_t tmp; | ||
1795 | tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid); | ||
1796 | apic_printk(APIC_VERBOSE, "Setting %d in the " | ||
1797 | "phys_id_present_map\n", | ||
1798 | mp_ioapics[apic].mp_apicid); | ||
1799 | physids_or(phys_id_present_map, phys_id_present_map, tmp); | ||
1800 | } | ||
1801 | |||
1802 | |||
1803 | /* | ||
1804 | * We need to adjust the IRQ routing table | ||
1805 | * if the ID changed. | ||
1806 | */ | ||
1807 | if (old_id != mp_ioapics[apic].mp_apicid) | ||
1808 | for (i = 0; i < mp_irq_entries; i++) | ||
1809 | if (mp_irqs[i].mp_dstapic == old_id) | ||
1810 | mp_irqs[i].mp_dstapic | ||
1811 | = mp_ioapics[apic].mp_apicid; | ||
1812 | |||
1813 | /* | ||
1814 | * Read the right value from the MPC table and | ||
1815 | * write it into the ID register. | ||
1816 | */ | ||
1817 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
1818 | "...changing IO-APIC physical APIC ID to %d ...", | ||
1819 | mp_ioapics[apic].mp_apicid); | ||
1820 | |||
1821 | reg_00.bits.ID = mp_ioapics[apic].mp_apicid; | ||
1822 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1823 | io_apic_write(apic, 0, reg_00.raw); | ||
1824 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1825 | |||
1826 | /* | ||
1827 | * Sanity check | ||
1828 | */ | ||
1829 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1830 | reg_00.raw = io_apic_read(apic, 0); | ||
1831 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1832 | if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid) | ||
1833 | printk("could not set ID!\n"); | ||
1834 | else | ||
1835 | apic_printk(APIC_VERBOSE, " ok.\n"); | ||
1836 | } | ||
1837 | } | ||
1838 | |||
1839 | int no_timer_check __initdata; | ||
1840 | |||
1841 | static int __init notimercheck(char *s) | ||
1842 | { | ||
1843 | no_timer_check = 1; | ||
1844 | return 1; | ||
1845 | } | ||
1846 | __setup("no_timer_check", notimercheck); | ||
1847 | |||
1848 | /* | ||
1849 | * There is a nasty bug in some older SMP boards, their mptable lies | ||
1850 | * about the timer IRQ. We do the following to work around the situation: | ||
1851 | * | ||
1852 | * - timer IRQ defaults to IO-APIC IRQ | ||
1853 | * - if this function detects that timer IRQs are defunct, then we fall | ||
1854 | * back to ISA timer IRQs | ||
1855 | */ | ||
1856 | static int __init timer_irq_works(void) | ||
1857 | { | ||
1858 | unsigned long t1 = jiffies; | ||
1859 | unsigned long flags; | ||
1860 | |||
1861 | if (no_timer_check) | ||
1862 | return 1; | ||
1863 | |||
1864 | local_save_flags(flags); | ||
1865 | local_irq_enable(); | ||
1866 | /* Let ten ticks pass... */ | ||
1867 | mdelay((10 * 1000) / HZ); | ||
1868 | local_irq_restore(flags); | ||
1869 | |||
1870 | /* | ||
1871 | * Expect a few ticks at least, to be sure some possible | ||
1872 | * glue logic does not lock up after one or two first | ||
1873 | * ticks in a non-ExtINT mode. Also the local APIC | ||
1874 | * might have cached one ExtINT interrupt. Finally, at | ||
1875 | * least one tick may be lost due to delays. | ||
1876 | */ | ||
1877 | if (time_after(jiffies, t1 + 4)) | ||
1878 | return 1; | ||
1879 | |||
1880 | return 0; | ||
1881 | } | ||
1882 | |||
1883 | /* | ||
1884 | * In the SMP+IOAPIC case it might happen that there are an unspecified | ||
1885 | * number of pending IRQ events unhandled. These cases are very rare, | ||
1886 | * so we 'resend' these IRQs via IPIs, to the same CPU. It's much | ||
1887 | * better to do it this way as thus we do not have to be aware of | ||
1888 | * 'pending' interrupts in the IRQ path, except at this point. | ||
1889 | */ | ||
1890 | /* | ||
1891 | * Edge triggered needs to resend any interrupt | ||
1892 | * that was delayed but this is now handled in the device | ||
1893 | * independent code. | ||
1894 | */ | ||
1895 | |||
1896 | /* | ||
1897 | * Startup quirk: | ||
1898 | * | ||
1899 | * Starting up a edge-triggered IO-APIC interrupt is | ||
1900 | * nasty - we need to make sure that we get the edge. | ||
1901 | * If it is already asserted for some reason, we need | ||
1902 | * return 1 to indicate that is was pending. | ||
1903 | * | ||
1904 | * This is not complete - we should be able to fake | ||
1905 | * an edge even if it isn't on the 8259A... | ||
1906 | * | ||
1907 | * (We do this for level-triggered IRQs too - it cannot hurt.) | ||
1908 | */ | ||
1909 | static unsigned int startup_ioapic_irq(unsigned int irq) | ||
1910 | { | ||
1911 | int was_pending = 0; | ||
1912 | unsigned long flags; | ||
1913 | |||
1914 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1915 | if (irq < 16) { | ||
1916 | disable_8259A_irq(irq); | ||
1917 | if (i8259A_irq_pending(irq)) | ||
1918 | was_pending = 1; | ||
1919 | } | ||
1920 | __unmask_IO_APIC_irq(irq); | ||
1921 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1922 | |||
1923 | return was_pending; | ||
1924 | } | ||
1925 | |||
1926 | static void ack_ioapic_irq(unsigned int irq) | ||
1927 | { | ||
1928 | move_native_irq(irq); | ||
1929 | ack_APIC_irq(); | ||
1930 | } | ||
1931 | |||
1932 | static void ack_ioapic_quirk_irq(unsigned int irq) | ||
1933 | { | ||
1934 | unsigned long v; | ||
1935 | int i; | ||
1936 | |||
1937 | move_native_irq(irq); | ||
1938 | /* | ||
1939 | * It appears there is an erratum which affects at least version 0x11 | ||
1940 | * of I/O APIC (that's the 82093AA and cores integrated into various | ||
1941 | * chipsets). Under certain conditions a level-triggered interrupt is | ||
1942 | * erroneously delivered as edge-triggered one but the respective IRR | ||
1943 | * bit gets set nevertheless. As a result the I/O unit expects an EOI | ||
1944 | * message but it will never arrive and further interrupts are blocked | ||
1945 | * from the source. The exact reason is so far unknown, but the | ||
1946 | * phenomenon was observed when two consecutive interrupt requests | ||
1947 | * from a given source get delivered to the same CPU and the source is | ||
1948 | * temporarily disabled in between. | ||
1949 | * | ||
1950 | * A workaround is to simulate an EOI message manually. We achieve it | ||
1951 | * by setting the trigger mode to edge and then to level when the edge | ||
1952 | * trigger mode gets detected in the TMR of a local APIC for a | ||
1953 | * level-triggered interrupt. We mask the source for the time of the | ||
1954 | * operation to prevent an edge-triggered interrupt escaping meanwhile. | ||
1955 | * The idea is from Manfred Spraul. --macro | ||
1956 | */ | ||
1957 | i = irq_vector[irq]; | ||
1958 | |||
1959 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | ||
1960 | |||
1961 | ack_APIC_irq(); | ||
1962 | |||
1963 | if (!(v & (1 << (i & 0x1f)))) { | ||
1964 | atomic_inc(&irq_mis_count); | ||
1965 | spin_lock(&ioapic_lock); | ||
1966 | __mask_and_edge_IO_APIC_irq(irq); | ||
1967 | __unmask_and_level_IO_APIC_irq(irq); | ||
1968 | spin_unlock(&ioapic_lock); | ||
1969 | } | ||
1970 | } | ||
1971 | |||
1972 | static int ioapic_retrigger_irq(unsigned int irq) | ||
1973 | { | ||
1974 | send_IPI_self(irq_vector[irq]); | ||
1975 | |||
1976 | return 1; | ||
1977 | } | ||
1978 | |||
1979 | static struct irq_chip ioapic_chip __read_mostly = { | ||
1980 | .name = "IO-APIC", | ||
1981 | .startup = startup_ioapic_irq, | ||
1982 | .mask = mask_IO_APIC_irq, | ||
1983 | .unmask = unmask_IO_APIC_irq, | ||
1984 | .ack = ack_ioapic_irq, | ||
1985 | .eoi = ack_ioapic_quirk_irq, | ||
1986 | #ifdef CONFIG_SMP | ||
1987 | .set_affinity = set_ioapic_affinity_irq, | ||
1988 | #endif | ||
1989 | .retrigger = ioapic_retrigger_irq, | ||
1990 | }; | ||
1991 | |||
1992 | |||
1993 | static inline void init_IO_APIC_traps(void) | ||
1994 | { | ||
1995 | int irq; | ||
1996 | |||
1997 | /* | ||
1998 | * NOTE! The local APIC isn't very good at handling | ||
1999 | * multiple interrupts at the same interrupt level. | ||
2000 | * As the interrupt level is determined by taking the | ||
2001 | * vector number and shifting that right by 4, we | ||
2002 | * want to spread these out a bit so that they don't | ||
2003 | * all fall in the same interrupt level. | ||
2004 | * | ||
2005 | * Also, we've got to be careful not to trash gate | ||
2006 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | ||
2007 | */ | ||
2008 | for (irq = 0; irq < NR_IRQS ; irq++) { | ||
2009 | if (IO_APIC_IRQ(irq) && !irq_vector[irq]) { | ||
2010 | /* | ||
2011 | * Hmm.. We don't have an entry for this, | ||
2012 | * so default to an old-fashioned 8259 | ||
2013 | * interrupt if we can.. | ||
2014 | */ | ||
2015 | if (irq < 16) | ||
2016 | make_8259A_irq(irq); | ||
2017 | else | ||
2018 | /* Strange. Oh, well.. */ | ||
2019 | irq_desc[irq].chip = &no_irq_chip; | ||
2020 | } | ||
2021 | } | ||
2022 | } | ||
2023 | |||
2024 | /* | ||
2025 | * The local APIC irq-chip implementation: | ||
2026 | */ | ||
2027 | |||
2028 | static void ack_lapic_irq(unsigned int irq) | ||
2029 | { | ||
2030 | ack_APIC_irq(); | ||
2031 | } | ||
2032 | |||
2033 | static void mask_lapic_irq(unsigned int irq) | ||
2034 | { | ||
2035 | unsigned long v; | ||
2036 | |||
2037 | v = apic_read(APIC_LVT0); | ||
2038 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); | ||
2039 | } | ||
2040 | |||
2041 | static void unmask_lapic_irq(unsigned int irq) | ||
2042 | { | ||
2043 | unsigned long v; | ||
2044 | |||
2045 | v = apic_read(APIC_LVT0); | ||
2046 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); | ||
2047 | } | ||
2048 | |||
2049 | static struct irq_chip lapic_chip __read_mostly = { | ||
2050 | .name = "local-APIC", | ||
2051 | .mask = mask_lapic_irq, | ||
2052 | .unmask = unmask_lapic_irq, | ||
2053 | .ack = ack_lapic_irq, | ||
2054 | }; | ||
2055 | |||
2056 | static void lapic_register_intr(int irq, int vector) | ||
2057 | { | ||
2058 | irq_desc[irq].status &= ~IRQ_LEVEL; | ||
2059 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | ||
2060 | "edge"); | ||
2061 | set_intr_gate(vector, interrupt[irq]); | ||
2062 | } | ||
2063 | |||
2064 | static void __init setup_nmi(void) | ||
2065 | { | ||
2066 | /* | ||
2067 | * Dirty trick to enable the NMI watchdog ... | ||
2068 | * We put the 8259A master into AEOI mode and | ||
2069 | * unmask on all local APICs LVT0 as NMI. | ||
2070 | * | ||
2071 | * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') | ||
2072 | * is from Maciej W. Rozycki - so we do not have to EOI from | ||
2073 | * the NMI handler or the timer interrupt. | ||
2074 | */ | ||
2075 | apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); | ||
2076 | |||
2077 | enable_NMI_through_LVT0(); | ||
2078 | |||
2079 | apic_printk(APIC_VERBOSE, " done.\n"); | ||
2080 | } | ||
2081 | |||
2082 | /* | ||
2083 | * This looks a bit hackish but it's about the only one way of sending | ||
2084 | * a few INTA cycles to 8259As and any associated glue logic. ICR does | ||
2085 | * not support the ExtINT mode, unfortunately. We need to send these | ||
2086 | * cycles as some i82489DX-based boards have glue logic that keeps the | ||
2087 | * 8259A interrupt line asserted until INTA. --macro | ||
2088 | */ | ||
2089 | static inline void __init unlock_ExtINT_logic(void) | ||
2090 | { | ||
2091 | int apic, pin, i; | ||
2092 | struct IO_APIC_route_entry entry0, entry1; | ||
2093 | unsigned char save_control, save_freq_select; | ||
2094 | |||
2095 | pin = find_isa_irq_pin(8, mp_INT); | ||
2096 | if (pin == -1) { | ||
2097 | WARN_ON_ONCE(1); | ||
2098 | return; | ||
2099 | } | ||
2100 | apic = find_isa_irq_apic(8, mp_INT); | ||
2101 | if (apic == -1) { | ||
2102 | WARN_ON_ONCE(1); | ||
2103 | return; | ||
2104 | } | ||
2105 | |||
2106 | entry0 = ioapic_read_entry(apic, pin); | ||
2107 | clear_IO_APIC_pin(apic, pin); | ||
2108 | |||
2109 | memset(&entry1, 0, sizeof(entry1)); | ||
2110 | |||
2111 | entry1.dest_mode = 0; /* physical delivery */ | ||
2112 | entry1.mask = 0; /* unmask IRQ now */ | ||
2113 | entry1.dest.physical.physical_dest = hard_smp_processor_id(); | ||
2114 | entry1.delivery_mode = dest_ExtINT; | ||
2115 | entry1.polarity = entry0.polarity; | ||
2116 | entry1.trigger = 0; | ||
2117 | entry1.vector = 0; | ||
2118 | |||
2119 | ioapic_write_entry(apic, pin, entry1); | ||
2120 | |||
2121 | save_control = CMOS_READ(RTC_CONTROL); | ||
2122 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); | ||
2123 | CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, | ||
2124 | RTC_FREQ_SELECT); | ||
2125 | CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); | ||
2126 | |||
2127 | i = 100; | ||
2128 | while (i-- > 0) { | ||
2129 | mdelay(10); | ||
2130 | if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) | ||
2131 | i -= 10; | ||
2132 | } | ||
2133 | |||
2134 | CMOS_WRITE(save_control, RTC_CONTROL); | ||
2135 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | ||
2136 | clear_IO_APIC_pin(apic, pin); | ||
2137 | |||
2138 | ioapic_write_entry(apic, pin, entry0); | ||
2139 | } | ||
2140 | |||
2141 | /* | ||
2142 | * This code may look a bit paranoid, but it's supposed to cooperate with | ||
2143 | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ | ||
2144 | * is so screwy. Thanks to Brian Perkins for testing/hacking this beast | ||
2145 | * fanatically on his truly buggy board. | ||
2146 | */ | ||
2147 | static inline void __init check_timer(void) | ||
2148 | { | ||
2149 | int apic1, pin1, apic2, pin2; | ||
2150 | int no_pin1 = 0; | ||
2151 | int vector; | ||
2152 | unsigned int ver; | ||
2153 | unsigned long flags; | ||
2154 | |||
2155 | local_irq_save(flags); | ||
2156 | |||
2157 | ver = apic_read(APIC_LVR); | ||
2158 | ver = GET_APIC_VERSION(ver); | ||
2159 | |||
2160 | /* | ||
2161 | * get/set the timer IRQ vector: | ||
2162 | */ | ||
2163 | disable_8259A_irq(0); | ||
2164 | vector = assign_irq_vector(0); | ||
2165 | set_intr_gate(vector, interrupt[0]); | ||
2166 | |||
2167 | /* | ||
2168 | * As IRQ0 is to be enabled in the 8259A, the virtual | ||
2169 | * wire has to be disabled in the local APIC. Also | ||
2170 | * timer interrupts need to be acknowledged manually in | ||
2171 | * the 8259A for the i82489DX when using the NMI | ||
2172 | * watchdog as that APIC treats NMIs as level-triggered. | ||
2173 | * The AEOI mode will finish them in the 8259A | ||
2174 | * automatically. | ||
2175 | */ | ||
2176 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | ||
2177 | init_8259A(1); | ||
2178 | timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); | ||
2179 | |||
2180 | pin1 = find_isa_irq_pin(0, mp_INT); | ||
2181 | apic1 = find_isa_irq_apic(0, mp_INT); | ||
2182 | pin2 = ioapic_i8259.pin; | ||
2183 | apic2 = ioapic_i8259.apic; | ||
2184 | |||
2185 | apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " | ||
2186 | "apic1=%d pin1=%d apic2=%d pin2=%d\n", | ||
2187 | vector, apic1, pin1, apic2, pin2); | ||
2188 | |||
2189 | /* | ||
2190 | * Some BIOS writers are clueless and report the ExtINTA | ||
2191 | * I/O APIC input from the cascaded 8259A as the timer | ||
2192 | * interrupt input. So just in case, if only one pin | ||
2193 | * was found above, try it both directly and through the | ||
2194 | * 8259A. | ||
2195 | */ | ||
2196 | if (pin1 == -1) { | ||
2197 | pin1 = pin2; | ||
2198 | apic1 = apic2; | ||
2199 | no_pin1 = 1; | ||
2200 | } else if (pin2 == -1) { | ||
2201 | pin2 = pin1; | ||
2202 | apic2 = apic1; | ||
2203 | } | ||
2204 | |||
2205 | if (pin1 != -1) { | ||
2206 | /* | ||
2207 | * Ok, does IRQ0 through the IOAPIC work? | ||
2208 | */ | ||
2209 | if (no_pin1) { | ||
2210 | add_pin_to_irq(0, apic1, pin1); | ||
2211 | setup_timer_IRQ0_pin(apic1, pin1, vector); | ||
2212 | } | ||
2213 | unmask_IO_APIC_irq(0); | ||
2214 | if (timer_irq_works()) { | ||
2215 | if (nmi_watchdog == NMI_IO_APIC) { | ||
2216 | setup_nmi(); | ||
2217 | enable_8259A_irq(0); | ||
2218 | } | ||
2219 | if (disable_timer_pin_1 > 0) | ||
2220 | clear_IO_APIC_pin(0, pin1); | ||
2221 | goto out; | ||
2222 | } | ||
2223 | clear_IO_APIC_pin(apic1, pin1); | ||
2224 | if (!no_pin1) | ||
2225 | apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " | ||
2226 | "8254 timer not connected to IO-APIC\n"); | ||
2227 | |||
2228 | apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " | ||
2229 | "(IRQ0) through the 8259A ...\n"); | ||
2230 | apic_printk(APIC_QUIET, KERN_INFO | ||
2231 | "..... (found apic %d pin %d) ...\n", apic2, pin2); | ||
2232 | /* | ||
2233 | * legacy devices should be connected to IO APIC #0 | ||
2234 | */ | ||
2235 | replace_pin_at_irq(0, apic1, pin1, apic2, pin2); | ||
2236 | setup_timer_IRQ0_pin(apic2, pin2, vector); | ||
2237 | unmask_IO_APIC_irq(0); | ||
2238 | enable_8259A_irq(0); | ||
2239 | if (timer_irq_works()) { | ||
2240 | apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); | ||
2241 | timer_through_8259 = 1; | ||
2242 | if (nmi_watchdog == NMI_IO_APIC) { | ||
2243 | disable_8259A_irq(0); | ||
2244 | setup_nmi(); | ||
2245 | enable_8259A_irq(0); | ||
2246 | } | ||
2247 | goto out; | ||
2248 | } | ||
2249 | /* | ||
2250 | * Cleanup, just in case ... | ||
2251 | */ | ||
2252 | disable_8259A_irq(0); | ||
2253 | clear_IO_APIC_pin(apic2, pin2); | ||
2254 | apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); | ||
2255 | } | ||
2256 | |||
2257 | if (nmi_watchdog == NMI_IO_APIC) { | ||
2258 | apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work " | ||
2259 | "through the IO-APIC - disabling NMI Watchdog!\n"); | ||
2260 | nmi_watchdog = NMI_NONE; | ||
2261 | } | ||
2262 | timer_ack = 0; | ||
2263 | |||
2264 | apic_printk(APIC_QUIET, KERN_INFO | ||
2265 | "...trying to set up timer as Virtual Wire IRQ...\n"); | ||
2266 | |||
2267 | lapic_register_intr(0, vector); | ||
2268 | apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ | ||
2269 | enable_8259A_irq(0); | ||
2270 | |||
2271 | if (timer_irq_works()) { | ||
2272 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); | ||
2273 | goto out; | ||
2274 | } | ||
2275 | disable_8259A_irq(0); | ||
2276 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); | ||
2277 | apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); | ||
2278 | |||
2279 | apic_printk(APIC_QUIET, KERN_INFO | ||
2280 | "...trying to set up timer as ExtINT IRQ...\n"); | ||
2281 | |||
2282 | init_8259A(0); | ||
2283 | make_8259A_irq(0); | ||
2284 | apic_write(APIC_LVT0, APIC_DM_EXTINT); | ||
2285 | |||
2286 | unlock_ExtINT_logic(); | ||
2287 | |||
2288 | if (timer_irq_works()) { | ||
2289 | apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); | ||
2290 | goto out; | ||
2291 | } | ||
2292 | apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); | ||
2293 | panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " | ||
2294 | "report. Then try booting with the 'noapic' option.\n"); | ||
2295 | out: | ||
2296 | local_irq_restore(flags); | ||
2297 | } | ||
2298 | |||
2299 | /* | ||
2300 | * Traditionally ISA IRQ2 is the cascade IRQ, and is not available | ||
2301 | * to devices. However there may be an I/O APIC pin available for | ||
2302 | * this interrupt regardless. The pin may be left unconnected, but | ||
2303 | * typically it will be reused as an ExtINT cascade interrupt for | ||
2304 | * the master 8259A. In the MPS case such a pin will normally be | ||
2305 | * reported as an ExtINT interrupt in the MP table. With ACPI | ||
2306 | * there is no provision for ExtINT interrupts, and in the absence | ||
2307 | * of an override it would be treated as an ordinary ISA I/O APIC | ||
2308 | * interrupt, that is edge-triggered and unmasked by default. We | ||
2309 | * used to do this, but it caused problems on some systems because | ||
2310 | * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using | ||
2311 | * the same ExtINT cascade interrupt to drive the local APIC of the | ||
2312 | * bootstrap processor. Therefore we refrain from routing IRQ2 to | ||
2313 | * the I/O APIC in all cases now. No actual device should request | ||
2314 | * it anyway. --macro | ||
2315 | */ | ||
2316 | #define PIC_IRQS (1 << PIC_CASCADE_IR) | ||
2317 | |||
2318 | void __init setup_IO_APIC(void) | ||
2319 | { | ||
2320 | int i; | ||
2321 | |||
2322 | /* Reserve all the system vectors. */ | ||
2323 | for (i = first_system_vector; i < NR_VECTORS; i++) | ||
2324 | set_bit(i, used_vectors); | ||
2325 | |||
2326 | enable_IO_APIC(); | ||
2327 | |||
2328 | io_apic_irqs = ~PIC_IRQS; | ||
2329 | |||
2330 | printk("ENABLING IO-APIC IRQs\n"); | ||
2331 | |||
2332 | /* | ||
2333 | * Set up IO-APIC IRQ routing. | ||
2334 | */ | ||
2335 | if (!acpi_ioapic) | ||
2336 | setup_ioapic_ids_from_mpc(); | ||
2337 | sync_Arb_IDs(); | ||
2338 | setup_IO_APIC_irqs(); | ||
2339 | init_IO_APIC_traps(); | ||
2340 | check_timer(); | ||
2341 | } | ||
2342 | |||
2343 | /* | ||
2344 | * Called after all the initialization is done. If we didnt find any | ||
2345 | * APIC bugs then we can allow the modify fast path | ||
2346 | */ | ||
2347 | |||
2348 | static int __init io_apic_bug_finalize(void) | ||
2349 | { | ||
2350 | if (sis_apic_bug == -1) | ||
2351 | sis_apic_bug = 0; | ||
2352 | return 0; | ||
2353 | } | ||
2354 | |||
2355 | late_initcall(io_apic_bug_finalize); | ||
2356 | |||
2357 | struct sysfs_ioapic_data { | ||
2358 | struct sys_device dev; | ||
2359 | struct IO_APIC_route_entry entry[0]; | ||
2360 | }; | ||
2361 | static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS]; | ||
2362 | |||
2363 | static int ioapic_suspend(struct sys_device *dev, pm_message_t state) | ||
2364 | { | ||
2365 | struct IO_APIC_route_entry *entry; | ||
2366 | struct sysfs_ioapic_data *data; | ||
2367 | int i; | ||
2368 | |||
2369 | data = container_of(dev, struct sysfs_ioapic_data, dev); | ||
2370 | entry = data->entry; | ||
2371 | for (i = 0; i < nr_ioapic_registers[dev->id]; i++) | ||
2372 | entry[i] = ioapic_read_entry(dev->id, i); | ||
2373 | |||
2374 | return 0; | ||
2375 | } | ||
2376 | |||
2377 | static int ioapic_resume(struct sys_device *dev) | ||
2378 | { | ||
2379 | struct IO_APIC_route_entry *entry; | ||
2380 | struct sysfs_ioapic_data *data; | ||
2381 | unsigned long flags; | ||
2382 | union IO_APIC_reg_00 reg_00; | ||
2383 | int i; | ||
2384 | |||
2385 | data = container_of(dev, struct sysfs_ioapic_data, dev); | ||
2386 | entry = data->entry; | ||
2387 | |||
2388 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2389 | reg_00.raw = io_apic_read(dev->id, 0); | ||
2390 | if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) { | ||
2391 | reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid; | ||
2392 | io_apic_write(dev->id, 0, reg_00.raw); | ||
2393 | } | ||
2394 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2395 | for (i = 0; i < nr_ioapic_registers[dev->id]; i++) | ||
2396 | ioapic_write_entry(dev->id, i, entry[i]); | ||
2397 | |||
2398 | return 0; | ||
2399 | } | ||
2400 | |||
2401 | static struct sysdev_class ioapic_sysdev_class = { | ||
2402 | .name = "ioapic", | ||
2403 | .suspend = ioapic_suspend, | ||
2404 | .resume = ioapic_resume, | ||
2405 | }; | ||
2406 | |||
2407 | static int __init ioapic_init_sysfs(void) | ||
2408 | { | ||
2409 | struct sys_device *dev; | ||
2410 | int i, size, error = 0; | ||
2411 | |||
2412 | error = sysdev_class_register(&ioapic_sysdev_class); | ||
2413 | if (error) | ||
2414 | return error; | ||
2415 | |||
2416 | for (i = 0; i < nr_ioapics; i++) { | ||
2417 | size = sizeof(struct sys_device) + nr_ioapic_registers[i] | ||
2418 | * sizeof(struct IO_APIC_route_entry); | ||
2419 | mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); | ||
2420 | if (!mp_ioapic_data[i]) { | ||
2421 | printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); | ||
2422 | continue; | ||
2423 | } | ||
2424 | dev = &mp_ioapic_data[i]->dev; | ||
2425 | dev->id = i; | ||
2426 | dev->cls = &ioapic_sysdev_class; | ||
2427 | error = sysdev_register(dev); | ||
2428 | if (error) { | ||
2429 | kfree(mp_ioapic_data[i]); | ||
2430 | mp_ioapic_data[i] = NULL; | ||
2431 | printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); | ||
2432 | continue; | ||
2433 | } | ||
2434 | } | ||
2435 | |||
2436 | return 0; | ||
2437 | } | ||
2438 | |||
2439 | device_initcall(ioapic_init_sysfs); | ||
2440 | |||
2441 | /* | ||
2442 | * Dynamic irq allocate and deallocation | ||
2443 | */ | ||
2444 | int create_irq(void) | ||
2445 | { | ||
2446 | /* Allocate an unused irq */ | ||
2447 | int irq, new, vector = 0; | ||
2448 | unsigned long flags; | ||
2449 | |||
2450 | irq = -ENOSPC; | ||
2451 | spin_lock_irqsave(&vector_lock, flags); | ||
2452 | for (new = (NR_IRQS - 1); new >= 0; new--) { | ||
2453 | if (platform_legacy_irq(new)) | ||
2454 | continue; | ||
2455 | if (irq_vector[new] != 0) | ||
2456 | continue; | ||
2457 | vector = __assign_irq_vector(new); | ||
2458 | if (likely(vector > 0)) | ||
2459 | irq = new; | ||
2460 | break; | ||
2461 | } | ||
2462 | spin_unlock_irqrestore(&vector_lock, flags); | ||
2463 | |||
2464 | if (irq >= 0) { | ||
2465 | set_intr_gate(vector, interrupt[irq]); | ||
2466 | dynamic_irq_init(irq); | ||
2467 | } | ||
2468 | return irq; | ||
2469 | } | ||
2470 | |||
2471 | void destroy_irq(unsigned int irq) | ||
2472 | { | ||
2473 | unsigned long flags; | ||
2474 | |||
2475 | dynamic_irq_cleanup(irq); | ||
2476 | |||
2477 | spin_lock_irqsave(&vector_lock, flags); | ||
2478 | clear_bit(irq_vector[irq], used_vectors); | ||
2479 | irq_vector[irq] = 0; | ||
2480 | spin_unlock_irqrestore(&vector_lock, flags); | ||
2481 | } | ||
2482 | |||
2483 | /* | ||
2484 | * MSI message composition | ||
2485 | */ | ||
2486 | #ifdef CONFIG_PCI_MSI | ||
2487 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | ||
2488 | { | ||
2489 | int vector; | ||
2490 | unsigned dest; | ||
2491 | |||
2492 | vector = assign_irq_vector(irq); | ||
2493 | if (vector >= 0) { | ||
2494 | dest = cpu_mask_to_apicid(TARGET_CPUS); | ||
2495 | |||
2496 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
2497 | msg->address_lo = | ||
2498 | MSI_ADDR_BASE_LO | | ||
2499 | ((INT_DEST_MODE == 0) ? | ||
2500 | MSI_ADDR_DEST_MODE_PHYSICAL: | ||
2501 | MSI_ADDR_DEST_MODE_LOGICAL) | | ||
2502 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? | ||
2503 | MSI_ADDR_REDIRECTION_CPU: | ||
2504 | MSI_ADDR_REDIRECTION_LOWPRI) | | ||
2505 | MSI_ADDR_DEST_ID(dest); | ||
2506 | |||
2507 | msg->data = | ||
2508 | MSI_DATA_TRIGGER_EDGE | | ||
2509 | MSI_DATA_LEVEL_ASSERT | | ||
2510 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? | ||
2511 | MSI_DATA_DELIVERY_FIXED: | ||
2512 | MSI_DATA_DELIVERY_LOWPRI) | | ||
2513 | MSI_DATA_VECTOR(vector); | ||
2514 | } | ||
2515 | return vector; | ||
2516 | } | ||
2517 | |||
2518 | #ifdef CONFIG_SMP | ||
2519 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | ||
2520 | { | ||
2521 | struct msi_msg msg; | ||
2522 | unsigned int dest; | ||
2523 | cpumask_t tmp; | ||
2524 | int vector; | ||
2525 | |||
2526 | cpus_and(tmp, mask, cpu_online_map); | ||
2527 | if (cpus_empty(tmp)) | ||
2528 | tmp = TARGET_CPUS; | ||
2529 | |||
2530 | vector = assign_irq_vector(irq); | ||
2531 | if (vector < 0) | ||
2532 | return; | ||
2533 | |||
2534 | dest = cpu_mask_to_apicid(mask); | ||
2535 | |||
2536 | read_msi_msg(irq, &msg); | ||
2537 | |||
2538 | msg.data &= ~MSI_DATA_VECTOR_MASK; | ||
2539 | msg.data |= MSI_DATA_VECTOR(vector); | ||
2540 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | ||
2541 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | ||
2542 | |||
2543 | write_msi_msg(irq, &msg); | ||
2544 | irq_desc[irq].affinity = mask; | ||
2545 | } | ||
2546 | #endif /* CONFIG_SMP */ | ||
2547 | |||
2548 | /* | ||
2549 | * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, | ||
2550 | * which implement the MSI or MSI-X Capability Structure. | ||
2551 | */ | ||
2552 | static struct irq_chip msi_chip = { | ||
2553 | .name = "PCI-MSI", | ||
2554 | .unmask = unmask_msi_irq, | ||
2555 | .mask = mask_msi_irq, | ||
2556 | .ack = ack_ioapic_irq, | ||
2557 | #ifdef CONFIG_SMP | ||
2558 | .set_affinity = set_msi_irq_affinity, | ||
2559 | #endif | ||
2560 | .retrigger = ioapic_retrigger_irq, | ||
2561 | }; | ||
2562 | |||
2563 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | ||
2564 | { | ||
2565 | struct msi_msg msg; | ||
2566 | int irq, ret; | ||
2567 | irq = create_irq(); | ||
2568 | if (irq < 0) | ||
2569 | return irq; | ||
2570 | |||
2571 | ret = msi_compose_msg(dev, irq, &msg); | ||
2572 | if (ret < 0) { | ||
2573 | destroy_irq(irq); | ||
2574 | return ret; | ||
2575 | } | ||
2576 | |||
2577 | set_irq_msi(irq, desc); | ||
2578 | write_msi_msg(irq, &msg); | ||
2579 | |||
2580 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, | ||
2581 | "edge"); | ||
2582 | |||
2583 | return 0; | ||
2584 | } | ||
2585 | |||
2586 | void arch_teardown_msi_irq(unsigned int irq) | ||
2587 | { | ||
2588 | destroy_irq(irq); | ||
2589 | } | ||
2590 | |||
2591 | #endif /* CONFIG_PCI_MSI */ | ||
2592 | |||
2593 | /* | ||
2594 | * Hypertransport interrupt support | ||
2595 | */ | ||
2596 | #ifdef CONFIG_HT_IRQ | ||
2597 | |||
2598 | #ifdef CONFIG_SMP | ||
2599 | |||
2600 | static void target_ht_irq(unsigned int irq, unsigned int dest) | ||
2601 | { | ||
2602 | struct ht_irq_msg msg; | ||
2603 | fetch_ht_irq_msg(irq, &msg); | ||
2604 | |||
2605 | msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK); | ||
2606 | msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); | ||
2607 | |||
2608 | msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest); | ||
2609 | msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); | ||
2610 | |||
2611 | write_ht_irq_msg(irq, &msg); | ||
2612 | } | ||
2613 | |||
2614 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | ||
2615 | { | ||
2616 | unsigned int dest; | ||
2617 | cpumask_t tmp; | ||
2618 | |||
2619 | cpus_and(tmp, mask, cpu_online_map); | ||
2620 | if (cpus_empty(tmp)) | ||
2621 | tmp = TARGET_CPUS; | ||
2622 | |||
2623 | cpus_and(mask, tmp, CPU_MASK_ALL); | ||
2624 | |||
2625 | dest = cpu_mask_to_apicid(mask); | ||
2626 | |||
2627 | target_ht_irq(irq, dest); | ||
2628 | irq_desc[irq].affinity = mask; | ||
2629 | } | ||
2630 | #endif | ||
2631 | |||
2632 | static struct irq_chip ht_irq_chip = { | ||
2633 | .name = "PCI-HT", | ||
2634 | .mask = mask_ht_irq, | ||
2635 | .unmask = unmask_ht_irq, | ||
2636 | .ack = ack_ioapic_irq, | ||
2637 | #ifdef CONFIG_SMP | ||
2638 | .set_affinity = set_ht_irq_affinity, | ||
2639 | #endif | ||
2640 | .retrigger = ioapic_retrigger_irq, | ||
2641 | }; | ||
2642 | |||
2643 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | ||
2644 | { | ||
2645 | int vector; | ||
2646 | |||
2647 | vector = assign_irq_vector(irq); | ||
2648 | if (vector >= 0) { | ||
2649 | struct ht_irq_msg msg; | ||
2650 | unsigned dest; | ||
2651 | cpumask_t tmp; | ||
2652 | |||
2653 | cpus_clear(tmp); | ||
2654 | cpu_set(vector >> 8, tmp); | ||
2655 | dest = cpu_mask_to_apicid(tmp); | ||
2656 | |||
2657 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | ||
2658 | |||
2659 | msg.address_lo = | ||
2660 | HT_IRQ_LOW_BASE | | ||
2661 | HT_IRQ_LOW_DEST_ID(dest) | | ||
2662 | HT_IRQ_LOW_VECTOR(vector) | | ||
2663 | ((INT_DEST_MODE == 0) ? | ||
2664 | HT_IRQ_LOW_DM_PHYSICAL : | ||
2665 | HT_IRQ_LOW_DM_LOGICAL) | | ||
2666 | HT_IRQ_LOW_RQEOI_EDGE | | ||
2667 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? | ||
2668 | HT_IRQ_LOW_MT_FIXED : | ||
2669 | HT_IRQ_LOW_MT_ARBITRATED) | | ||
2670 | HT_IRQ_LOW_IRQ_MASKED; | ||
2671 | |||
2672 | write_ht_irq_msg(irq, &msg); | ||
2673 | |||
2674 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, | ||
2675 | handle_edge_irq, "edge"); | ||
2676 | } | ||
2677 | return vector; | ||
2678 | } | ||
2679 | #endif /* CONFIG_HT_IRQ */ | ||
2680 | |||
2681 | /* -------------------------------------------------------------------------- | ||
2682 | ACPI-based IOAPIC Configuration | ||
2683 | -------------------------------------------------------------------------- */ | ||
2684 | |||
2685 | #ifdef CONFIG_ACPI | ||
2686 | |||
2687 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | ||
2688 | { | ||
2689 | union IO_APIC_reg_00 reg_00; | ||
2690 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; | ||
2691 | physid_mask_t tmp; | ||
2692 | unsigned long flags; | ||
2693 | int i = 0; | ||
2694 | |||
2695 | /* | ||
2696 | * The P4 platform supports up to 256 APIC IDs on two separate APIC | ||
2697 | * buses (one for LAPICs, one for IOAPICs), where predecessors only | ||
2698 | * supports up to 16 on one shared APIC bus. | ||
2699 | * | ||
2700 | * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full | ||
2701 | * advantage of new APIC bus architecture. | ||
2702 | */ | ||
2703 | |||
2704 | if (physids_empty(apic_id_map)) | ||
2705 | apic_id_map = ioapic_phys_id_map(phys_cpu_present_map); | ||
2706 | |||
2707 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2708 | reg_00.raw = io_apic_read(ioapic, 0); | ||
2709 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2710 | |||
2711 | if (apic_id >= get_physical_broadcast()) { | ||
2712 | printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " | ||
2713 | "%d\n", ioapic, apic_id, reg_00.bits.ID); | ||
2714 | apic_id = reg_00.bits.ID; | ||
2715 | } | ||
2716 | |||
2717 | /* | ||
2718 | * Every APIC in a system must have a unique ID or we get lots of nice | ||
2719 | * 'stuck on smp_invalidate_needed IPI wait' messages. | ||
2720 | */ | ||
2721 | if (check_apicid_used(apic_id_map, apic_id)) { | ||
2722 | |||
2723 | for (i = 0; i < get_physical_broadcast(); i++) { | ||
2724 | if (!check_apicid_used(apic_id_map, i)) | ||
2725 | break; | ||
2726 | } | ||
2727 | |||
2728 | if (i == get_physical_broadcast()) | ||
2729 | panic("Max apic_id exceeded!\n"); | ||
2730 | |||
2731 | printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " | ||
2732 | "trying %d\n", ioapic, apic_id, i); | ||
2733 | |||
2734 | apic_id = i; | ||
2735 | } | ||
2736 | |||
2737 | tmp = apicid_to_cpu_present(apic_id); | ||
2738 | physids_or(apic_id_map, apic_id_map, tmp); | ||
2739 | |||
2740 | if (reg_00.bits.ID != apic_id) { | ||
2741 | reg_00.bits.ID = apic_id; | ||
2742 | |||
2743 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2744 | io_apic_write(ioapic, 0, reg_00.raw); | ||
2745 | reg_00.raw = io_apic_read(ioapic, 0); | ||
2746 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2747 | |||
2748 | /* Sanity check */ | ||
2749 | if (reg_00.bits.ID != apic_id) { | ||
2750 | printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); | ||
2751 | return -1; | ||
2752 | } | ||
2753 | } | ||
2754 | |||
2755 | apic_printk(APIC_VERBOSE, KERN_INFO | ||
2756 | "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); | ||
2757 | |||
2758 | return apic_id; | ||
2759 | } | ||
2760 | |||
2761 | |||
2762 | int __init io_apic_get_version(int ioapic) | ||
2763 | { | ||
2764 | union IO_APIC_reg_01 reg_01; | ||
2765 | unsigned long flags; | ||
2766 | |||
2767 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2768 | reg_01.raw = io_apic_read(ioapic, 1); | ||
2769 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2770 | |||
2771 | return reg_01.bits.version; | ||
2772 | } | ||
2773 | |||
2774 | |||
2775 | int __init io_apic_get_redir_entries(int ioapic) | ||
2776 | { | ||
2777 | union IO_APIC_reg_01 reg_01; | ||
2778 | unsigned long flags; | ||
2779 | |||
2780 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2781 | reg_01.raw = io_apic_read(ioapic, 1); | ||
2782 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2783 | |||
2784 | return reg_01.bits.entries; | ||
2785 | } | ||
2786 | |||
2787 | |||
2788 | int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low) | ||
2789 | { | ||
2790 | struct IO_APIC_route_entry entry; | ||
2791 | |||
2792 | if (!IO_APIC_IRQ(irq)) { | ||
2793 | printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | ||
2794 | ioapic); | ||
2795 | return -EINVAL; | ||
2796 | } | ||
2797 | |||
2798 | /* | ||
2799 | * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. | ||
2800 | * Note that we mask (disable) IRQs now -- these get enabled when the | ||
2801 | * corresponding device driver registers for this IRQ. | ||
2802 | */ | ||
2803 | |||
2804 | memset(&entry, 0, sizeof(entry)); | ||
2805 | |||
2806 | entry.delivery_mode = INT_DELIVERY_MODE; | ||
2807 | entry.dest_mode = INT_DEST_MODE; | ||
2808 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); | ||
2809 | entry.trigger = edge_level; | ||
2810 | entry.polarity = active_high_low; | ||
2811 | entry.mask = 1; | ||
2812 | |||
2813 | /* | ||
2814 | * IRQs < 16 are already in the irq_2_pin[] map | ||
2815 | */ | ||
2816 | if (irq >= 16) | ||
2817 | add_pin_to_irq(irq, ioapic, pin); | ||
2818 | |||
2819 | entry.vector = assign_irq_vector(irq); | ||
2820 | |||
2821 | apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry " | ||
2822 | "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic, | ||
2823 | mp_ioapics[ioapic].mp_apicid, pin, entry.vector, irq, | ||
2824 | edge_level, active_high_low); | ||
2825 | |||
2826 | ioapic_register_intr(irq, entry.vector, edge_level); | ||
2827 | |||
2828 | if (!ioapic && (irq < 16)) | ||
2829 | disable_8259A_irq(irq); | ||
2830 | |||
2831 | ioapic_write_entry(ioapic, pin, entry); | ||
2832 | |||
2833 | return 0; | ||
2834 | } | ||
2835 | |||
2836 | int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | ||
2837 | { | ||
2838 | int i; | ||
2839 | |||
2840 | if (skip_ioapic_setup) | ||
2841 | return -1; | ||
2842 | |||
2843 | for (i = 0; i < mp_irq_entries; i++) | ||
2844 | if (mp_irqs[i].mp_irqtype == mp_INT && | ||
2845 | mp_irqs[i].mp_srcbusirq == bus_irq) | ||
2846 | break; | ||
2847 | if (i >= mp_irq_entries) | ||
2848 | return -1; | ||
2849 | |||
2850 | *trigger = irq_trigger(i); | ||
2851 | *polarity = irq_polarity(i); | ||
2852 | return 0; | ||
2853 | } | ||
2854 | |||
2855 | #endif /* CONFIG_ACPI */ | ||
2856 | |||
2857 | static int __init parse_disable_timer_pin_1(char *arg) | ||
2858 | { | ||
2859 | disable_timer_pin_1 = 1; | ||
2860 | return 0; | ||
2861 | } | ||
2862 | early_param("disable_timer_pin_1", parse_disable_timer_pin_1); | ||
2863 | |||
2864 | static int __init parse_enable_timer_pin_1(char *arg) | ||
2865 | { | ||
2866 | disable_timer_pin_1 = -1; | ||
2867 | return 0; | ||
2868 | } | ||
2869 | early_param("enable_timer_pin_1", parse_enable_timer_pin_1); | ||
2870 | |||
2871 | static int __init parse_noapic(char *arg) | ||
2872 | { | ||
2873 | /* disable IO-APIC */ | ||
2874 | disable_ioapic_setup(); | ||
2875 | return 0; | ||
2876 | } | ||
2877 | early_param("noapic", parse_noapic); | ||
2878 | |||
2879 | void __init ioapic_init_mappings(void) | ||
2880 | { | ||
2881 | unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; | ||
2882 | int i; | ||
2883 | |||
2884 | for (i = 0; i < nr_ioapics; i++) { | ||
2885 | if (smp_found_config) { | ||
2886 | ioapic_phys = mp_ioapics[i].mp_apicaddr; | ||
2887 | if (!ioapic_phys) { | ||
2888 | printk(KERN_ERR | ||
2889 | "WARNING: bogus zero IO-APIC " | ||
2890 | "address found in MPTABLE, " | ||
2891 | "disabling IO/APIC support!\n"); | ||
2892 | smp_found_config = 0; | ||
2893 | skip_ioapic_setup = 1; | ||
2894 | goto fake_ioapic_page; | ||
2895 | } | ||
2896 | } else { | ||
2897 | fake_ioapic_page: | ||
2898 | ioapic_phys = (unsigned long) | ||
2899 | alloc_bootmem_pages(PAGE_SIZE); | ||
2900 | ioapic_phys = __pa(ioapic_phys); | ||
2901 | } | ||
2902 | set_fixmap_nocache(idx, ioapic_phys); | ||
2903 | printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n", | ||
2904 | __fix_to_virt(idx), ioapic_phys); | ||
2905 | idx++; | ||
2906 | } | ||
2907 | } | ||
2908 | |||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c new file mode 100644 index 000000000000..ccf6c503fc3b --- /dev/null +++ b/arch/x86/kernel/irq.c | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Common interrupt code for 32 and 64 bit | ||
3 | */ | ||
4 | #include <linux/cpu.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/kernel_stat.h> | ||
7 | #include <linux/seq_file.h> | ||
8 | |||
9 | #include <asm/apic.h> | ||
10 | #include <asm/io_apic.h> | ||
11 | #include <asm/smp.h> | ||
12 | |||
13 | atomic_t irq_err_count; | ||
14 | |||
15 | /* | ||
16 | * 'what should we do if we get a hw irq event on an illegal vector'. | ||
17 | * each architecture has to answer this themselves. | ||
18 | */ | ||
19 | void ack_bad_irq(unsigned int irq) | ||
20 | { | ||
21 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); | ||
22 | |||
23 | #ifdef CONFIG_X86_LOCAL_APIC | ||
24 | /* | ||
25 | * Currently unexpected vectors happen only on SMP and APIC. | ||
26 | * We _must_ ack these because every local APIC has only N | ||
27 | * irq slots per priority level, and a 'hanging, unacked' IRQ | ||
28 | * holds up an irq slot - in excessive cases (when multiple | ||
29 | * unexpected vectors occur) that might lock up the APIC | ||
30 | * completely. | ||
31 | * But only ack when the APIC is enabled -AK | ||
32 | */ | ||
33 | if (cpu_has_apic) | ||
34 | ack_APIC_irq(); | ||
35 | #endif | ||
36 | } | ||
37 | |||
38 | #ifdef CONFIG_X86_32 | ||
39 | # define irq_stats(x) (&per_cpu(irq_stat,x)) | ||
40 | #else | ||
41 | # define irq_stats(x) cpu_pda(x) | ||
42 | #endif | ||
43 | /* | ||
44 | * /proc/interrupts printing: | ||
45 | */ | ||
46 | static int show_other_interrupts(struct seq_file *p) | ||
47 | { | ||
48 | int j; | ||
49 | |||
50 | seq_printf(p, "NMI: "); | ||
51 | for_each_online_cpu(j) | ||
52 | seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); | ||
53 | seq_printf(p, " Non-maskable interrupts\n"); | ||
54 | #ifdef CONFIG_X86_LOCAL_APIC | ||
55 | seq_printf(p, "LOC: "); | ||
56 | for_each_online_cpu(j) | ||
57 | seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); | ||
58 | seq_printf(p, " Local timer interrupts\n"); | ||
59 | #endif | ||
60 | #ifdef CONFIG_SMP | ||
61 | seq_printf(p, "RES: "); | ||
62 | for_each_online_cpu(j) | ||
63 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); | ||
64 | seq_printf(p, " Rescheduling interrupts\n"); | ||
65 | seq_printf(p, "CAL: "); | ||
66 | for_each_online_cpu(j) | ||
67 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); | ||
68 | seq_printf(p, " Function call interrupts\n"); | ||
69 | seq_printf(p, "TLB: "); | ||
70 | for_each_online_cpu(j) | ||
71 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); | ||
72 | seq_printf(p, " TLB shootdowns\n"); | ||
73 | #endif | ||
74 | #ifdef CONFIG_X86_MCE | ||
75 | seq_printf(p, "TRM: "); | ||
76 | for_each_online_cpu(j) | ||
77 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); | ||
78 | seq_printf(p, " Thermal event interrupts\n"); | ||
79 | # ifdef CONFIG_X86_64 | ||
80 | seq_printf(p, "THR: "); | ||
81 | for_each_online_cpu(j) | ||
82 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); | ||
83 | seq_printf(p, " Threshold APIC interrupts\n"); | ||
84 | # endif | ||
85 | #endif | ||
86 | #ifdef CONFIG_X86_LOCAL_APIC | ||
87 | seq_printf(p, "SPU: "); | ||
88 | for_each_online_cpu(j) | ||
89 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); | ||
90 | seq_printf(p, " Spurious interrupts\n"); | ||
91 | #endif | ||
92 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
93 | #if defined(CONFIG_X86_IO_APIC) | ||
94 | seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); | ||
95 | #endif | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | int show_interrupts(struct seq_file *p, void *v) | ||
100 | { | ||
101 | unsigned long flags, any_count = 0; | ||
102 | int i = *(loff_t *) v, j; | ||
103 | struct irqaction *action; | ||
104 | struct irq_desc *desc; | ||
105 | |||
106 | if (i > nr_irqs) | ||
107 | return 0; | ||
108 | |||
109 | if (i == nr_irqs) | ||
110 | return show_other_interrupts(p); | ||
111 | |||
112 | /* print header */ | ||
113 | if (i == 0) { | ||
114 | seq_printf(p, " "); | ||
115 | for_each_online_cpu(j) | ||
116 | seq_printf(p, "CPU%-8d",j); | ||
117 | seq_putc(p, '\n'); | ||
118 | } | ||
119 | |||
120 | desc = irq_to_desc(i); | ||
121 | spin_lock_irqsave(&desc->lock, flags); | ||
122 | #ifndef CONFIG_SMP | ||
123 | any_count = kstat_irqs(i); | ||
124 | #else | ||
125 | for_each_online_cpu(j) | ||
126 | any_count |= kstat_irqs_cpu(i, j); | ||
127 | #endif | ||
128 | action = desc->action; | ||
129 | if (!action && !any_count) | ||
130 | goto out; | ||
131 | |||
132 | seq_printf(p, "%3d: ", i); | ||
133 | #ifndef CONFIG_SMP | ||
134 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
135 | #else | ||
136 | for_each_online_cpu(j) | ||
137 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
138 | #endif | ||
139 | seq_printf(p, " %8s", desc->chip->name); | ||
140 | seq_printf(p, "-%-8s", desc->name); | ||
141 | |||
142 | if (action) { | ||
143 | seq_printf(p, " %s", action->name); | ||
144 | while ((action = action->next) != NULL) | ||
145 | seq_printf(p, ", %s", action->name); | ||
146 | } | ||
147 | |||
148 | seq_putc(p, '\n'); | ||
149 | out: | ||
150 | spin_unlock_irqrestore(&desc->lock, flags); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * /proc/stat helpers | ||
156 | */ | ||
157 | u64 arch_irq_stat_cpu(unsigned int cpu) | ||
158 | { | ||
159 | u64 sum = irq_stats(cpu)->__nmi_count; | ||
160 | |||
161 | #ifdef CONFIG_X86_LOCAL_APIC | ||
162 | sum += irq_stats(cpu)->apic_timer_irqs; | ||
163 | #endif | ||
164 | #ifdef CONFIG_SMP | ||
165 | sum += irq_stats(cpu)->irq_resched_count; | ||
166 | sum += irq_stats(cpu)->irq_call_count; | ||
167 | sum += irq_stats(cpu)->irq_tlb_count; | ||
168 | #endif | ||
169 | #ifdef CONFIG_X86_MCE | ||
170 | sum += irq_stats(cpu)->irq_thermal_count; | ||
171 | # ifdef CONFIG_X86_64 | ||
172 | sum += irq_stats(cpu)->irq_threshold_count; | ||
173 | #endif | ||
174 | #endif | ||
175 | #ifdef CONFIG_X86_LOCAL_APIC | ||
176 | sum += irq_stats(cpu)->irq_spurious_count; | ||
177 | #endif | ||
178 | return sum; | ||
179 | } | ||
180 | |||
181 | u64 arch_irq_stat(void) | ||
182 | { | ||
183 | u64 sum = atomic_read(&irq_err_count); | ||
184 | |||
185 | #ifdef CONFIG_X86_IO_APIC | ||
186 | sum += atomic_read(&irq_mis_count); | ||
187 | #endif | ||
188 | return sum; | ||
189 | } | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index b71e02d42f4f..a51382672de0 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -25,29 +25,6 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); | |||
25 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); | 25 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
26 | EXPORT_PER_CPU_SYMBOL(irq_regs); | 26 | EXPORT_PER_CPU_SYMBOL(irq_regs); |
27 | 27 | ||
28 | /* | ||
29 | * 'what should we do if we get a hw irq event on an illegal vector'. | ||
30 | * each architecture has to answer this themselves. | ||
31 | */ | ||
32 | void ack_bad_irq(unsigned int irq) | ||
33 | { | ||
34 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); | ||
35 | |||
36 | #ifdef CONFIG_X86_LOCAL_APIC | ||
37 | /* | ||
38 | * Currently unexpected vectors happen only on SMP and APIC. | ||
39 | * We _must_ ack these because every local APIC has only N | ||
40 | * irq slots per priority level, and a 'hanging, unacked' IRQ | ||
41 | * holds up an irq slot - in excessive cases (when multiple | ||
42 | * unexpected vectors occur) that might lock up the APIC | ||
43 | * completely. | ||
44 | * But only ack when the APIC is enabled -AK | ||
45 | */ | ||
46 | if (cpu_has_apic) | ||
47 | ack_APIC_irq(); | ||
48 | #endif | ||
49 | } | ||
50 | |||
51 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 28 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
52 | /* Debugging check for stack overflow: is there less than 1KB free? */ | 29 | /* Debugging check for stack overflow: is there less than 1KB free? */ |
53 | static int check_stack_overflow(void) | 30 | static int check_stack_overflow(void) |
@@ -223,20 +200,25 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
223 | { | 200 | { |
224 | struct pt_regs *old_regs; | 201 | struct pt_regs *old_regs; |
225 | /* high bit used in ret_from_ code */ | 202 | /* high bit used in ret_from_ code */ |
226 | int overflow, irq = ~regs->orig_ax; | 203 | int overflow; |
227 | struct irq_desc *desc = irq_desc + irq; | 204 | unsigned vector = ~regs->orig_ax; |
205 | struct irq_desc *desc; | ||
206 | unsigned irq; | ||
228 | 207 | ||
229 | if (unlikely((unsigned)irq >= NR_IRQS)) { | ||
230 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | ||
231 | __func__, irq); | ||
232 | BUG(); | ||
233 | } | ||
234 | 208 | ||
235 | old_regs = set_irq_regs(regs); | 209 | old_regs = set_irq_regs(regs); |
236 | irq_enter(); | 210 | irq_enter(); |
211 | irq = __get_cpu_var(vector_irq)[vector]; | ||
237 | 212 | ||
238 | overflow = check_stack_overflow(); | 213 | overflow = check_stack_overflow(); |
239 | 214 | ||
215 | desc = irq_to_desc(irq); | ||
216 | if (unlikely(!desc)) { | ||
217 | printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n", | ||
218 | __func__, irq, vector, smp_processor_id()); | ||
219 | BUG(); | ||
220 | } | ||
221 | |||
240 | if (!execute_on_irq_stack(overflow, desc, irq)) { | 222 | if (!execute_on_irq_stack(overflow, desc, irq)) { |
241 | if (unlikely(overflow)) | 223 | if (unlikely(overflow)) |
242 | print_stack_overflow(); | 224 | print_stack_overflow(); |
@@ -248,146 +230,6 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
248 | return 1; | 230 | return 1; |
249 | } | 231 | } |
250 | 232 | ||
251 | /* | ||
252 | * Interrupt statistics: | ||
253 | */ | ||
254 | |||
255 | atomic_t irq_err_count; | ||
256 | |||
257 | /* | ||
258 | * /proc/interrupts printing: | ||
259 | */ | ||
260 | |||
261 | int show_interrupts(struct seq_file *p, void *v) | ||
262 | { | ||
263 | int i = *(loff_t *) v, j; | ||
264 | struct irqaction * action; | ||
265 | unsigned long flags; | ||
266 | |||
267 | if (i == 0) { | ||
268 | seq_printf(p, " "); | ||
269 | for_each_online_cpu(j) | ||
270 | seq_printf(p, "CPU%-8d",j); | ||
271 | seq_putc(p, '\n'); | ||
272 | } | ||
273 | |||
274 | if (i < NR_IRQS) { | ||
275 | unsigned any_count = 0; | ||
276 | |||
277 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
278 | #ifndef CONFIG_SMP | ||
279 | any_count = kstat_irqs(i); | ||
280 | #else | ||
281 | for_each_online_cpu(j) | ||
282 | any_count |= kstat_cpu(j).irqs[i]; | ||
283 | #endif | ||
284 | action = irq_desc[i].action; | ||
285 | if (!action && !any_count) | ||
286 | goto skip; | ||
287 | seq_printf(p, "%3d: ",i); | ||
288 | #ifndef CONFIG_SMP | ||
289 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
290 | #else | ||
291 | for_each_online_cpu(j) | ||
292 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
293 | #endif | ||
294 | seq_printf(p, " %8s", irq_desc[i].chip->name); | ||
295 | seq_printf(p, "-%-8s", irq_desc[i].name); | ||
296 | |||
297 | if (action) { | ||
298 | seq_printf(p, " %s", action->name); | ||
299 | while ((action = action->next) != NULL) | ||
300 | seq_printf(p, ", %s", action->name); | ||
301 | } | ||
302 | |||
303 | seq_putc(p, '\n'); | ||
304 | skip: | ||
305 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
306 | } else if (i == NR_IRQS) { | ||
307 | seq_printf(p, "NMI: "); | ||
308 | for_each_online_cpu(j) | ||
309 | seq_printf(p, "%10u ", nmi_count(j)); | ||
310 | seq_printf(p, " Non-maskable interrupts\n"); | ||
311 | #ifdef CONFIG_X86_LOCAL_APIC | ||
312 | seq_printf(p, "LOC: "); | ||
313 | for_each_online_cpu(j) | ||
314 | seq_printf(p, "%10u ", | ||
315 | per_cpu(irq_stat,j).apic_timer_irqs); | ||
316 | seq_printf(p, " Local timer interrupts\n"); | ||
317 | #endif | ||
318 | #ifdef CONFIG_SMP | ||
319 | seq_printf(p, "RES: "); | ||
320 | for_each_online_cpu(j) | ||
321 | seq_printf(p, "%10u ", | ||
322 | per_cpu(irq_stat,j).irq_resched_count); | ||
323 | seq_printf(p, " Rescheduling interrupts\n"); | ||
324 | seq_printf(p, "CAL: "); | ||
325 | for_each_online_cpu(j) | ||
326 | seq_printf(p, "%10u ", | ||
327 | per_cpu(irq_stat,j).irq_call_count); | ||
328 | seq_printf(p, " Function call interrupts\n"); | ||
329 | seq_printf(p, "TLB: "); | ||
330 | for_each_online_cpu(j) | ||
331 | seq_printf(p, "%10u ", | ||
332 | per_cpu(irq_stat,j).irq_tlb_count); | ||
333 | seq_printf(p, " TLB shootdowns\n"); | ||
334 | #endif | ||
335 | #ifdef CONFIG_X86_MCE | ||
336 | seq_printf(p, "TRM: "); | ||
337 | for_each_online_cpu(j) | ||
338 | seq_printf(p, "%10u ", | ||
339 | per_cpu(irq_stat,j).irq_thermal_count); | ||
340 | seq_printf(p, " Thermal event interrupts\n"); | ||
341 | #endif | ||
342 | #ifdef CONFIG_X86_LOCAL_APIC | ||
343 | seq_printf(p, "SPU: "); | ||
344 | for_each_online_cpu(j) | ||
345 | seq_printf(p, "%10u ", | ||
346 | per_cpu(irq_stat,j).irq_spurious_count); | ||
347 | seq_printf(p, " Spurious interrupts\n"); | ||
348 | #endif | ||
349 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
350 | #if defined(CONFIG_X86_IO_APIC) | ||
351 | seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); | ||
352 | #endif | ||
353 | } | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * /proc/stat helpers | ||
359 | */ | ||
360 | u64 arch_irq_stat_cpu(unsigned int cpu) | ||
361 | { | ||
362 | u64 sum = nmi_count(cpu); | ||
363 | |||
364 | #ifdef CONFIG_X86_LOCAL_APIC | ||
365 | sum += per_cpu(irq_stat, cpu).apic_timer_irqs; | ||
366 | #endif | ||
367 | #ifdef CONFIG_SMP | ||
368 | sum += per_cpu(irq_stat, cpu).irq_resched_count; | ||
369 | sum += per_cpu(irq_stat, cpu).irq_call_count; | ||
370 | sum += per_cpu(irq_stat, cpu).irq_tlb_count; | ||
371 | #endif | ||
372 | #ifdef CONFIG_X86_MCE | ||
373 | sum += per_cpu(irq_stat, cpu).irq_thermal_count; | ||
374 | #endif | ||
375 | #ifdef CONFIG_X86_LOCAL_APIC | ||
376 | sum += per_cpu(irq_stat, cpu).irq_spurious_count; | ||
377 | #endif | ||
378 | return sum; | ||
379 | } | ||
380 | |||
381 | u64 arch_irq_stat(void) | ||
382 | { | ||
383 | u64 sum = atomic_read(&irq_err_count); | ||
384 | |||
385 | #ifdef CONFIG_X86_IO_APIC | ||
386 | sum += atomic_read(&irq_mis_count); | ||
387 | #endif | ||
388 | return sum; | ||
389 | } | ||
390 | |||
391 | #ifdef CONFIG_HOTPLUG_CPU | 233 | #ifdef CONFIG_HOTPLUG_CPU |
392 | #include <mach_apic.h> | 234 | #include <mach_apic.h> |
393 | 235 | ||
@@ -395,20 +237,22 @@ void fixup_irqs(cpumask_t map) | |||
395 | { | 237 | { |
396 | unsigned int irq; | 238 | unsigned int irq; |
397 | static int warned; | 239 | static int warned; |
240 | struct irq_desc *desc; | ||
398 | 241 | ||
399 | for (irq = 0; irq < NR_IRQS; irq++) { | 242 | for_each_irq_desc(irq, desc) { |
400 | cpumask_t mask; | 243 | cpumask_t mask; |
244 | |||
401 | if (irq == 2) | 245 | if (irq == 2) |
402 | continue; | 246 | continue; |
403 | 247 | ||
404 | cpus_and(mask, irq_desc[irq].affinity, map); | 248 | cpus_and(mask, desc->affinity, map); |
405 | if (any_online_cpu(mask) == NR_CPUS) { | 249 | if (any_online_cpu(mask) == NR_CPUS) { |
406 | printk("Breaking affinity for irq %i\n", irq); | 250 | printk("Breaking affinity for irq %i\n", irq); |
407 | mask = map; | 251 | mask = map; |
408 | } | 252 | } |
409 | if (irq_desc[irq].chip->set_affinity) | 253 | if (desc->chip->set_affinity) |
410 | irq_desc[irq].chip->set_affinity(irq, mask); | 254 | desc->chip->set_affinity(irq, mask); |
411 | else if (irq_desc[irq].action && !(warned++)) | 255 | else if (desc->action && !(warned++)) |
412 | printk("Cannot set affinity for irq %i\n", irq); | 256 | printk("Cannot set affinity for irq %i\n", irq); |
413 | } | 257 | } |
414 | 258 | ||
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index f065fe9071b9..60eb84eb77a0 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -18,28 +18,6 @@ | |||
18 | #include <asm/idle.h> | 18 | #include <asm/idle.h> |
19 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
20 | 20 | ||
21 | atomic_t irq_err_count; | ||
22 | |||
23 | /* | ||
24 | * 'what should we do if we get a hw irq event on an illegal vector'. | ||
25 | * each architecture has to answer this themselves. | ||
26 | */ | ||
27 | void ack_bad_irq(unsigned int irq) | ||
28 | { | ||
29 | printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); | ||
30 | /* | ||
31 | * Currently unexpected vectors happen only on SMP and APIC. | ||
32 | * We _must_ ack these because every local APIC has only N | ||
33 | * irq slots per priority level, and a 'hanging, unacked' IRQ | ||
34 | * holds up an irq slot - in excessive cases (when multiple | ||
35 | * unexpected vectors occur) that might lock up the APIC | ||
36 | * completely. | ||
37 | * But don't ack when the APIC is disabled. -AK | ||
38 | */ | ||
39 | if (!disable_apic) | ||
40 | ack_APIC_irq(); | ||
41 | } | ||
42 | |||
43 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 21 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
44 | /* | 22 | /* |
45 | * Probabilistic stack overflow check: | 23 | * Probabilistic stack overflow check: |
@@ -65,122 +43,6 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
65 | #endif | 43 | #endif |
66 | 44 | ||
67 | /* | 45 | /* |
68 | * Generic, controller-independent functions: | ||
69 | */ | ||
70 | |||
71 | int show_interrupts(struct seq_file *p, void *v) | ||
72 | { | ||
73 | int i = *(loff_t *) v, j; | ||
74 | struct irqaction * action; | ||
75 | unsigned long flags; | ||
76 | |||
77 | if (i == 0) { | ||
78 | seq_printf(p, " "); | ||
79 | for_each_online_cpu(j) | ||
80 | seq_printf(p, "CPU%-8d",j); | ||
81 | seq_putc(p, '\n'); | ||
82 | } | ||
83 | |||
84 | if (i < NR_IRQS) { | ||
85 | unsigned any_count = 0; | ||
86 | |||
87 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
88 | #ifndef CONFIG_SMP | ||
89 | any_count = kstat_irqs(i); | ||
90 | #else | ||
91 | for_each_online_cpu(j) | ||
92 | any_count |= kstat_cpu(j).irqs[i]; | ||
93 | #endif | ||
94 | action = irq_desc[i].action; | ||
95 | if (!action && !any_count) | ||
96 | goto skip; | ||
97 | seq_printf(p, "%3d: ",i); | ||
98 | #ifndef CONFIG_SMP | ||
99 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
100 | #else | ||
101 | for_each_online_cpu(j) | ||
102 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
103 | #endif | ||
104 | seq_printf(p, " %8s", irq_desc[i].chip->name); | ||
105 | seq_printf(p, "-%-8s", irq_desc[i].name); | ||
106 | |||
107 | if (action) { | ||
108 | seq_printf(p, " %s", action->name); | ||
109 | while ((action = action->next) != NULL) | ||
110 | seq_printf(p, ", %s", action->name); | ||
111 | } | ||
112 | seq_putc(p, '\n'); | ||
113 | skip: | ||
114 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
115 | } else if (i == NR_IRQS) { | ||
116 | seq_printf(p, "NMI: "); | ||
117 | for_each_online_cpu(j) | ||
118 | seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); | ||
119 | seq_printf(p, " Non-maskable interrupts\n"); | ||
120 | seq_printf(p, "LOC: "); | ||
121 | for_each_online_cpu(j) | ||
122 | seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); | ||
123 | seq_printf(p, " Local timer interrupts\n"); | ||
124 | #ifdef CONFIG_SMP | ||
125 | seq_printf(p, "RES: "); | ||
126 | for_each_online_cpu(j) | ||
127 | seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count); | ||
128 | seq_printf(p, " Rescheduling interrupts\n"); | ||
129 | seq_printf(p, "CAL: "); | ||
130 | for_each_online_cpu(j) | ||
131 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); | ||
132 | seq_printf(p, " Function call interrupts\n"); | ||
133 | seq_printf(p, "TLB: "); | ||
134 | for_each_online_cpu(j) | ||
135 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); | ||
136 | seq_printf(p, " TLB shootdowns\n"); | ||
137 | #endif | ||
138 | #ifdef CONFIG_X86_MCE | ||
139 | seq_printf(p, "TRM: "); | ||
140 | for_each_online_cpu(j) | ||
141 | seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count); | ||
142 | seq_printf(p, " Thermal event interrupts\n"); | ||
143 | seq_printf(p, "THR: "); | ||
144 | for_each_online_cpu(j) | ||
145 | seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count); | ||
146 | seq_printf(p, " Threshold APIC interrupts\n"); | ||
147 | #endif | ||
148 | seq_printf(p, "SPU: "); | ||
149 | for_each_online_cpu(j) | ||
150 | seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count); | ||
151 | seq_printf(p, " Spurious interrupts\n"); | ||
152 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
153 | } | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * /proc/stat helpers | ||
159 | */ | ||
160 | u64 arch_irq_stat_cpu(unsigned int cpu) | ||
161 | { | ||
162 | u64 sum = cpu_pda(cpu)->__nmi_count; | ||
163 | |||
164 | sum += cpu_pda(cpu)->apic_timer_irqs; | ||
165 | #ifdef CONFIG_SMP | ||
166 | sum += cpu_pda(cpu)->irq_resched_count; | ||
167 | sum += cpu_pda(cpu)->irq_call_count; | ||
168 | sum += cpu_pda(cpu)->irq_tlb_count; | ||
169 | #endif | ||
170 | #ifdef CONFIG_X86_MCE | ||
171 | sum += cpu_pda(cpu)->irq_thermal_count; | ||
172 | sum += cpu_pda(cpu)->irq_threshold_count; | ||
173 | #endif | ||
174 | sum += cpu_pda(cpu)->irq_spurious_count; | ||
175 | return sum; | ||
176 | } | ||
177 | |||
178 | u64 arch_irq_stat(void) | ||
179 | { | ||
180 | return atomic_read(&irq_err_count); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * do_IRQ handles all normal device IRQ's (the special | 46 | * do_IRQ handles all normal device IRQ's (the special |
185 | * SMP cross-CPU interrupts have their own specific | 47 | * SMP cross-CPU interrupts have their own specific |
186 | * handlers). | 48 | * handlers). |
@@ -188,6 +50,7 @@ u64 arch_irq_stat(void) | |||
188 | asmlinkage unsigned int do_IRQ(struct pt_regs *regs) | 50 | asmlinkage unsigned int do_IRQ(struct pt_regs *regs) |
189 | { | 51 | { |
190 | struct pt_regs *old_regs = set_irq_regs(regs); | 52 | struct pt_regs *old_regs = set_irq_regs(regs); |
53 | struct irq_desc *desc; | ||
191 | 54 | ||
192 | /* high bit used in ret_from_ code */ | 55 | /* high bit used in ret_from_ code */ |
193 | unsigned vector = ~regs->orig_ax; | 56 | unsigned vector = ~regs->orig_ax; |
@@ -201,8 +64,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) | |||
201 | stack_overflow_check(regs); | 64 | stack_overflow_check(regs); |
202 | #endif | 65 | #endif |
203 | 66 | ||
204 | if (likely(irq < NR_IRQS)) | 67 | desc = irq_to_desc(irq); |
205 | generic_handle_irq(irq); | 68 | if (likely(desc)) |
69 | generic_handle_irq_desc(irq, desc); | ||
206 | else { | 70 | else { |
207 | if (!disable_apic) | 71 | if (!disable_apic) |
208 | ack_APIC_irq(); | 72 | ack_APIC_irq(); |
@@ -223,8 +87,9 @@ void fixup_irqs(cpumask_t map) | |||
223 | { | 87 | { |
224 | unsigned int irq; | 88 | unsigned int irq; |
225 | static int warned; | 89 | static int warned; |
90 | struct irq_desc *desc; | ||
226 | 91 | ||
227 | for (irq = 0; irq < NR_IRQS; irq++) { | 92 | for_each_irq_desc(irq, desc) { |
228 | cpumask_t mask; | 93 | cpumask_t mask; |
229 | int break_affinity = 0; | 94 | int break_affinity = 0; |
230 | int set_affinity = 1; | 95 | int set_affinity = 1; |
@@ -233,32 +98,32 @@ void fixup_irqs(cpumask_t map) | |||
233 | continue; | 98 | continue; |
234 | 99 | ||
235 | /* interrupt's are disabled at this point */ | 100 | /* interrupt's are disabled at this point */ |
236 | spin_lock(&irq_desc[irq].lock); | 101 | spin_lock(&desc->lock); |
237 | 102 | ||
238 | if (!irq_has_action(irq) || | 103 | if (!irq_has_action(irq) || |
239 | cpus_equal(irq_desc[irq].affinity, map)) { | 104 | cpus_equal(desc->affinity, map)) { |
240 | spin_unlock(&irq_desc[irq].lock); | 105 | spin_unlock(&desc->lock); |
241 | continue; | 106 | continue; |
242 | } | 107 | } |
243 | 108 | ||
244 | cpus_and(mask, irq_desc[irq].affinity, map); | 109 | cpus_and(mask, desc->affinity, map); |
245 | if (cpus_empty(mask)) { | 110 | if (cpus_empty(mask)) { |
246 | break_affinity = 1; | 111 | break_affinity = 1; |
247 | mask = map; | 112 | mask = map; |
248 | } | 113 | } |
249 | 114 | ||
250 | if (irq_desc[irq].chip->mask) | 115 | if (desc->chip->mask) |
251 | irq_desc[irq].chip->mask(irq); | 116 | desc->chip->mask(irq); |
252 | 117 | ||
253 | if (irq_desc[irq].chip->set_affinity) | 118 | if (desc->chip->set_affinity) |
254 | irq_desc[irq].chip->set_affinity(irq, mask); | 119 | desc->chip->set_affinity(irq, mask); |
255 | else if (!(warned++)) | 120 | else if (!(warned++)) |
256 | set_affinity = 0; | 121 | set_affinity = 0; |
257 | 122 | ||
258 | if (irq_desc[irq].chip->unmask) | 123 | if (desc->chip->unmask) |
259 | irq_desc[irq].chip->unmask(irq); | 124 | desc->chip->unmask(irq); |
260 | 125 | ||
261 | spin_unlock(&irq_desc[irq].lock); | 126 | spin_unlock(&desc->lock); |
262 | 127 | ||
263 | if (break_affinity && set_affinity) | 128 | if (break_affinity && set_affinity) |
264 | printk("Broke affinity for irq %i\n", irq); | 129 | printk("Broke affinity for irq %i\n", irq); |
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 9200a1e2752d..845aa9803e80 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -69,6 +69,13 @@ void __init init_ISA_irqs (void) | |||
69 | * 16 old-style INTA-cycle interrupts: | 69 | * 16 old-style INTA-cycle interrupts: |
70 | */ | 70 | */ |
71 | for (i = 0; i < 16; i++) { | 71 | for (i = 0; i < 16; i++) { |
72 | /* first time call this irq_desc */ | ||
73 | struct irq_desc *desc = irq_to_desc(i); | ||
74 | |||
75 | desc->status = IRQ_DISABLED; | ||
76 | desc->action = NULL; | ||
77 | desc->depth = 1; | ||
78 | |||
72 | set_irq_chip_and_handler_name(i, &i8259A_chip, | 79 | set_irq_chip_and_handler_name(i, &i8259A_chip, |
73 | handle_level_irq, "XT"); | 80 | handle_level_irq, "XT"); |
74 | } | 81 | } |
@@ -83,6 +90,27 @@ static struct irqaction irq2 = { | |||
83 | .name = "cascade", | 90 | .name = "cascade", |
84 | }; | 91 | }; |
85 | 92 | ||
93 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | ||
94 | [0 ... IRQ0_VECTOR - 1] = -1, | ||
95 | [IRQ0_VECTOR] = 0, | ||
96 | [IRQ1_VECTOR] = 1, | ||
97 | [IRQ2_VECTOR] = 2, | ||
98 | [IRQ3_VECTOR] = 3, | ||
99 | [IRQ4_VECTOR] = 4, | ||
100 | [IRQ5_VECTOR] = 5, | ||
101 | [IRQ6_VECTOR] = 6, | ||
102 | [IRQ7_VECTOR] = 7, | ||
103 | [IRQ8_VECTOR] = 8, | ||
104 | [IRQ9_VECTOR] = 9, | ||
105 | [IRQ10_VECTOR] = 10, | ||
106 | [IRQ11_VECTOR] = 11, | ||
107 | [IRQ12_VECTOR] = 12, | ||
108 | [IRQ13_VECTOR] = 13, | ||
109 | [IRQ14_VECTOR] = 14, | ||
110 | [IRQ15_VECTOR] = 15, | ||
111 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | ||
112 | }; | ||
113 | |||
86 | /* Overridden in paravirt.c */ | 114 | /* Overridden in paravirt.c */ |
87 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | 115 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); |
88 | 116 | ||
@@ -98,22 +126,14 @@ void __init native_init_IRQ(void) | |||
98 | * us. (some of these will be overridden and become | 126 | * us. (some of these will be overridden and become |
99 | * 'special' SMP interrupts) | 127 | * 'special' SMP interrupts) |
100 | */ | 128 | */ |
101 | for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { | 129 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { |
102 | int vector = FIRST_EXTERNAL_VECTOR + i; | ||
103 | if (i >= NR_IRQS) | ||
104 | break; | ||
105 | /* SYSCALL_VECTOR was reserved in trap_init. */ | 130 | /* SYSCALL_VECTOR was reserved in trap_init. */ |
106 | if (!test_bit(vector, used_vectors)) | 131 | if (i != SYSCALL_VECTOR) |
107 | set_intr_gate(vector, interrupt[i]); | 132 | set_intr_gate(i, interrupt[i]); |
108 | } | 133 | } |
109 | 134 | ||
110 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) | ||
111 | /* | ||
112 | * IRQ0 must be given a fixed assignment and initialized, | ||
113 | * because it's used before the IO-APIC is set up. | ||
114 | */ | ||
115 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); | ||
116 | 135 | ||
136 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) | ||
117 | /* | 137 | /* |
118 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | 138 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper |
119 | * IPI, driven by wakeup. | 139 | * IPI, driven by wakeup. |
@@ -128,6 +148,9 @@ void __init native_init_IRQ(void) | |||
128 | 148 | ||
129 | /* IPI for single call function */ | 149 | /* IPI for single call function */ |
130 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); | 150 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); |
151 | |||
152 | /* Low priority IPI to cleanup after moving an irq */ | ||
153 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | ||
131 | #endif | 154 | #endif |
132 | 155 | ||
133 | #ifdef CONFIG_X86_LOCAL_APIC | 156 | #ifdef CONFIG_X86_LOCAL_APIC |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 5b5be9d43c2a..ff0235391285 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -142,23 +142,19 @@ void __init init_ISA_irqs(void) | |||
142 | init_bsp_APIC(); | 142 | init_bsp_APIC(); |
143 | init_8259A(0); | 143 | init_8259A(0); |
144 | 144 | ||
145 | for (i = 0; i < NR_IRQS; i++) { | 145 | for (i = 0; i < 16; i++) { |
146 | irq_desc[i].status = IRQ_DISABLED; | 146 | /* first time call this irq_desc */ |
147 | irq_desc[i].action = NULL; | 147 | struct irq_desc *desc = irq_to_desc(i); |
148 | irq_desc[i].depth = 1; | 148 | |
149 | 149 | desc->status = IRQ_DISABLED; | |
150 | if (i < 16) { | 150 | desc->action = NULL; |
151 | /* | 151 | desc->depth = 1; |
152 | * 16 old-style INTA-cycle interrupts: | 152 | |
153 | */ | 153 | /* |
154 | set_irq_chip_and_handler_name(i, &i8259A_chip, | 154 | * 16 old-style INTA-cycle interrupts: |
155 | */ | ||
156 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
155 | handle_level_irq, "XT"); | 157 | handle_level_irq, "XT"); |
156 | } else { | ||
157 | /* | ||
158 | * 'high' PCI IRQs filled in on demand | ||
159 | */ | ||
160 | irq_desc[i].chip = &no_irq_chip; | ||
161 | } | ||
162 | } | 158 | } |
163 | } | 159 | } |
164 | 160 | ||
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index f6a11b9b1f98..67465ed89310 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -35,9 +35,6 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) | |||
35 | if (!(word & (1 << 13))) { | 35 | if (!(word & (1 << 13))) { |
36 | dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " | 36 | dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " |
37 | "disabling irq balancing and affinity\n"); | 37 | "disabling irq balancing and affinity\n"); |
38 | #ifdef CONFIG_IRQBALANCE | ||
39 | irqbalance_disable(""); | ||
40 | #endif | ||
41 | noirqdebug_setup(""); | 38 | noirqdebug_setup(""); |
42 | #ifdef CONFIG_PROC_FS | 39 | #ifdef CONFIG_PROC_FS |
43 | no_irq_affinity = 1; | 40 | no_irq_affinity = 1; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b2c97874ec0f..0fa6790c1dd3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1073,6 +1073,7 @@ void __init setup_arch(char **cmdline_p) | |||
1073 | #endif | 1073 | #endif |
1074 | 1074 | ||
1075 | prefill_possible_map(); | 1075 | prefill_possible_map(); |
1076 | |||
1076 | #ifdef CONFIG_X86_64 | 1077 | #ifdef CONFIG_X86_64 |
1077 | init_cpu_to_node(); | 1078 | init_cpu_to_node(); |
1078 | #endif | 1079 | #endif |
@@ -1080,6 +1081,9 @@ void __init setup_arch(char **cmdline_p) | |||
1080 | init_apic_mappings(); | 1081 | init_apic_mappings(); |
1081 | ioapic_init_mappings(); | 1082 | ioapic_init_mappings(); |
1082 | 1083 | ||
1084 | /* need to wait for io_apic is mapped */ | ||
1085 | nr_irqs = probe_nr_irqs(); | ||
1086 | |||
1083 | kvm_guest_init(); | 1087 | kvm_guest_init(); |
1084 | 1088 | ||
1085 | e820_reserve_resources(); | 1089 | e820_reserve_resources(); |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 0e67f72d9316..410c88f0bfeb 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -140,25 +140,30 @@ static void __init setup_cpu_pda_map(void) | |||
140 | */ | 140 | */ |
141 | void __init setup_per_cpu_areas(void) | 141 | void __init setup_per_cpu_areas(void) |
142 | { | 142 | { |
143 | ssize_t size = PERCPU_ENOUGH_ROOM; | 143 | ssize_t size, old_size; |
144 | char *ptr; | 144 | char *ptr; |
145 | int cpu; | 145 | int cpu; |
146 | unsigned long align = 1; | ||
146 | 147 | ||
147 | /* Setup cpu_pda map */ | 148 | /* Setup cpu_pda map */ |
148 | setup_cpu_pda_map(); | 149 | setup_cpu_pda_map(); |
149 | 150 | ||
150 | /* Copy section for each CPU (we discard the original) */ | 151 | /* Copy section for each CPU (we discard the original) */ |
151 | size = PERCPU_ENOUGH_ROOM; | 152 | old_size = PERCPU_ENOUGH_ROOM; |
153 | align = max_t(unsigned long, PAGE_SIZE, align); | ||
154 | size = roundup(old_size, align); | ||
152 | printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", | 155 | printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", |
153 | size); | 156 | size); |
154 | 157 | ||
155 | for_each_possible_cpu(cpu) { | 158 | for_each_possible_cpu(cpu) { |
156 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 159 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
157 | ptr = alloc_bootmem_pages(size); | 160 | ptr = __alloc_bootmem(size, align, |
161 | __pa(MAX_DMA_ADDRESS)); | ||
158 | #else | 162 | #else |
159 | int node = early_cpu_to_node(cpu); | 163 | int node = early_cpu_to_node(cpu); |
160 | if (!node_online(node) || !NODE_DATA(node)) { | 164 | if (!node_online(node) || !NODE_DATA(node)) { |
161 | ptr = alloc_bootmem_pages(size); | 165 | ptr = __alloc_bootmem(size, align, |
166 | __pa(MAX_DMA_ADDRESS)); | ||
162 | printk(KERN_INFO | 167 | printk(KERN_INFO |
163 | "cpu %d has no node %d or node-local memory\n", | 168 | "cpu %d has no node %d or node-local memory\n", |
164 | cpu, node); | 169 | cpu, node); |
@@ -167,7 +172,8 @@ void __init setup_per_cpu_areas(void) | |||
167 | cpu, __pa(ptr)); | 172 | cpu, __pa(ptr)); |
168 | } | 173 | } |
169 | else { | 174 | else { |
170 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | 175 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, |
176 | __pa(MAX_DMA_ADDRESS)); | ||
171 | if (ptr) | 177 | if (ptr) |
172 | printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", | 178 | printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", |
173 | cpu, node, __pa(ptr)); | 179 | cpu, node, __pa(ptr)); |
@@ -175,7 +181,6 @@ void __init setup_per_cpu_areas(void) | |||
175 | #endif | 181 | #endif |
176 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 182 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
177 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 183 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
178 | |||
179 | } | 184 | } |
180 | 185 | ||
181 | printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", | 186 | printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7ed9e070a6e9..7ece815ea637 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -543,10 +543,10 @@ static inline void __inquire_remote_apic(int apicid) | |||
543 | int timeout; | 543 | int timeout; |
544 | u32 status; | 544 | u32 status; |
545 | 545 | ||
546 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | 546 | printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid); |
547 | 547 | ||
548 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | 548 | for (i = 0; i < ARRAY_SIZE(regs); i++) { |
549 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); | 549 | printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]); |
550 | 550 | ||
551 | /* | 551 | /* |
552 | * Wait for idle. | 552 | * Wait for idle. |
@@ -874,7 +874,7 @@ do_rest: | |||
874 | start_ip = setup_trampoline(); | 874 | start_ip = setup_trampoline(); |
875 | 875 | ||
876 | /* So we see what's up */ | 876 | /* So we see what's up */ |
877 | printk(KERN_INFO "Booting processor %d/%d ip %lx\n", | 877 | printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n", |
878 | cpu, apicid, start_ip); | 878 | cpu, apicid, start_ip); |
879 | 879 | ||
880 | /* | 880 | /* |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c new file mode 100644 index 000000000000..aeef529917e4 --- /dev/null +++ b/arch/x86/kernel/uv_irq.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV IRQ functions | ||
7 | * | ||
8 | * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/irq.h> | ||
13 | |||
14 | #include <asm/apic.h> | ||
15 | #include <asm/uv/uv_irq.h> | ||
16 | |||
17 | static void uv_noop(unsigned int irq) | ||
18 | { | ||
19 | } | ||
20 | |||
21 | static unsigned int uv_noop_ret(unsigned int irq) | ||
22 | { | ||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | static void uv_ack_apic(unsigned int irq) | ||
27 | { | ||
28 | ack_APIC_irq(); | ||
29 | } | ||
30 | |||
31 | struct irq_chip uv_irq_chip = { | ||
32 | .name = "UV-CORE", | ||
33 | .startup = uv_noop_ret, | ||
34 | .shutdown = uv_noop, | ||
35 | .enable = uv_noop, | ||
36 | .disable = uv_noop, | ||
37 | .ack = uv_noop, | ||
38 | .mask = uv_noop, | ||
39 | .unmask = uv_noop, | ||
40 | .eoi = uv_ack_apic, | ||
41 | .end = uv_noop, | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * Set up a mapping of an available irq and vector, and enable the specified | ||
46 | * MMR that defines the MSI that is to be sent to the specified CPU when an | ||
47 | * interrupt is raised. | ||
48 | */ | ||
49 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, | ||
50 | unsigned long mmr_offset) | ||
51 | { | ||
52 | int irq; | ||
53 | int ret; | ||
54 | |||
55 | irq = create_irq(); | ||
56 | if (irq <= 0) | ||
57 | return -EBUSY; | ||
58 | |||
59 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset); | ||
60 | if (ret != irq) | ||
61 | destroy_irq(irq); | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | EXPORT_SYMBOL_GPL(uv_setup_irq); | ||
66 | |||
67 | /* | ||
68 | * Tear down a mapping of an irq and vector, and disable the specified MMR that | ||
69 | * defined the MSI that was to be sent to the specified CPU when an interrupt | ||
70 | * was raised. | ||
71 | * | ||
72 | * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). | ||
73 | */ | ||
74 | void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset) | ||
75 | { | ||
76 | arch_disable_uv_irq(mmr_blade, mmr_offset); | ||
77 | destroy_irq(irq); | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(uv_teardown_irq); | ||
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c new file mode 100644 index 000000000000..67f9b9dbf800 --- /dev/null +++ b/arch/x86/kernel/uv_sysfs.c | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * This file supports the /sys/firmware/sgi_uv interfaces for SGI UV. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
19 | * Copyright (c) Russ Anderson | ||
20 | */ | ||
21 | |||
22 | #include <linux/sysdev.h> | ||
23 | #include <asm/uv/bios.h> | ||
24 | |||
25 | struct kobject *sgi_uv_kobj; | ||
26 | |||
27 | static ssize_t partition_id_show(struct kobject *kobj, | ||
28 | struct kobj_attribute *attr, char *buf) | ||
29 | { | ||
30 | return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id); | ||
31 | } | ||
32 | |||
33 | static ssize_t coherence_id_show(struct kobject *kobj, | ||
34 | struct kobj_attribute *attr, char *buf) | ||
35 | { | ||
36 | return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id()); | ||
37 | } | ||
38 | |||
39 | static struct kobj_attribute partition_id_attr = | ||
40 | __ATTR(partition_id, S_IRUGO, partition_id_show, NULL); | ||
41 | |||
42 | static struct kobj_attribute coherence_id_attr = | ||
43 | __ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL); | ||
44 | |||
45 | |||
46 | static int __init sgi_uv_sysfs_init(void) | ||
47 | { | ||
48 | unsigned long ret; | ||
49 | |||
50 | if (!sgi_uv_kobj) | ||
51 | sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); | ||
52 | if (!sgi_uv_kobj) { | ||
53 | printk(KERN_WARNING "kobject_create_and_add sgi_uv failed \n"); | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | |||
57 | ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr); | ||
58 | if (ret) { | ||
59 | printk(KERN_WARNING "sysfs_create_file partition_id failed \n"); | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr); | ||
64 | if (ret) { | ||
65 | printk(KERN_WARNING "sysfs_create_file coherence_id failed \n"); | ||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | device_initcall(sgi_uv_sysfs_init); | ||
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 61a97e616f70..0c9667f0752a 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -484,10 +484,11 @@ static void disable_cobalt_irq(unsigned int irq) | |||
484 | static unsigned int startup_cobalt_irq(unsigned int irq) | 484 | static unsigned int startup_cobalt_irq(unsigned int irq) |
485 | { | 485 | { |
486 | unsigned long flags; | 486 | unsigned long flags; |
487 | struct irq_desc *desc = irq_to_desc(irq); | ||
487 | 488 | ||
488 | spin_lock_irqsave(&cobalt_lock, flags); | 489 | spin_lock_irqsave(&cobalt_lock, flags); |
489 | if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) | 490 | if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) |
490 | irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); | 491 | desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); |
491 | enable_cobalt_irq(irq); | 492 | enable_cobalt_irq(irq); |
492 | spin_unlock_irqrestore(&cobalt_lock, flags); | 493 | spin_unlock_irqrestore(&cobalt_lock, flags); |
493 | return 0; | 494 | return 0; |
@@ -506,9 +507,10 @@ static void ack_cobalt_irq(unsigned int irq) | |||
506 | static void end_cobalt_irq(unsigned int irq) | 507 | static void end_cobalt_irq(unsigned int irq) |
507 | { | 508 | { |
508 | unsigned long flags; | 509 | unsigned long flags; |
510 | struct irq_desc *desc = irq_to_desc(irq); | ||
509 | 511 | ||
510 | spin_lock_irqsave(&cobalt_lock, flags); | 512 | spin_lock_irqsave(&cobalt_lock, flags); |
511 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | 513 | if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) |
512 | enable_cobalt_irq(irq); | 514 | enable_cobalt_irq(irq); |
513 | spin_unlock_irqrestore(&cobalt_lock, flags); | 515 | spin_unlock_irqrestore(&cobalt_lock, flags); |
514 | } | 516 | } |
@@ -626,12 +628,12 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) | |||
626 | 628 | ||
627 | spin_unlock_irqrestore(&i8259A_lock, flags); | 629 | spin_unlock_irqrestore(&i8259A_lock, flags); |
628 | 630 | ||
629 | desc = irq_desc + realirq; | 631 | desc = irq_to_desc(realirq); |
630 | 632 | ||
631 | /* | 633 | /* |
632 | * handle this 'virtual interrupt' as a Cobalt one now. | 634 | * handle this 'virtual interrupt' as a Cobalt one now. |
633 | */ | 635 | */ |
634 | kstat_cpu(smp_processor_id()).irqs[realirq]++; | 636 | kstat_incr_irqs_this_cpu(realirq, desc); |
635 | 637 | ||
636 | if (likely(desc->action != NULL)) | 638 | if (likely(desc->action != NULL)) |
637 | handle_IRQ_event(realirq, desc->action); | 639 | handle_IRQ_event(realirq, desc->action); |
@@ -662,27 +664,29 @@ void init_VISWS_APIC_irqs(void) | |||
662 | int i; | 664 | int i; |
663 | 665 | ||
664 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { | 666 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { |
665 | irq_desc[i].status = IRQ_DISABLED; | 667 | struct irq_desc *desc = irq_to_desc(i); |
666 | irq_desc[i].action = 0; | 668 | |
667 | irq_desc[i].depth = 1; | 669 | desc->status = IRQ_DISABLED; |
670 | desc->action = 0; | ||
671 | desc->depth = 1; | ||
668 | 672 | ||
669 | if (i == 0) { | 673 | if (i == 0) { |
670 | irq_desc[i].chip = &cobalt_irq_type; | 674 | desc->chip = &cobalt_irq_type; |
671 | } | 675 | } |
672 | else if (i == CO_IRQ_IDE0) { | 676 | else if (i == CO_IRQ_IDE0) { |
673 | irq_desc[i].chip = &cobalt_irq_type; | 677 | desc->chip = &cobalt_irq_type; |
674 | } | 678 | } |
675 | else if (i == CO_IRQ_IDE1) { | 679 | else if (i == CO_IRQ_IDE1) { |
676 | irq_desc[i].chip = &cobalt_irq_type; | 680 | desc->chip = &cobalt_irq_type; |
677 | } | 681 | } |
678 | else if (i == CO_IRQ_8259) { | 682 | else if (i == CO_IRQ_8259) { |
679 | irq_desc[i].chip = &piix4_master_irq_type; | 683 | desc->chip = &piix4_master_irq_type; |
680 | } | 684 | } |
681 | else if (i < CO_IRQ_APIC0) { | 685 | else if (i < CO_IRQ_APIC0) { |
682 | irq_desc[i].chip = &piix4_virtual_irq_type; | 686 | desc->chip = &piix4_virtual_irq_type; |
683 | } | 687 | } |
684 | else if (IS_CO_APIC(i)) { | 688 | else if (IS_CO_APIC(i)) { |
685 | irq_desc[i].chip = &cobalt_irq_type; | 689 | desc->chip = &cobalt_irq_type; |
686 | } | 690 | } |
687 | } | 691 | } |
688 | 692 | ||
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 6953859fe289..254ee07f8635 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -235,11 +235,14 @@ static void __devinit vmi_time_init_clockevent(void) | |||
235 | 235 | ||
236 | void __init vmi_time_init(void) | 236 | void __init vmi_time_init(void) |
237 | { | 237 | { |
238 | unsigned int cpu; | ||
238 | /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */ | 239 | /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */ |
239 | outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */ | 240 | outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */ |
240 | 241 | ||
241 | vmi_time_init_clockevent(); | 242 | vmi_time_init_clockevent(); |
242 | setup_irq(0, &vmi_clock_action); | 243 | setup_irq(0, &vmi_clock_action); |
244 | for_each_possible_cpu(cpu) | ||
245 | per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0; | ||
243 | } | 246 | } |
244 | 247 | ||
245 | #ifdef CONFIG_X86_LOCAL_APIC | 248 | #ifdef CONFIG_X86_LOCAL_APIC |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 65f0b8a47bed..48ee4f9435f4 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -582,7 +582,7 @@ static void __init lguest_init_IRQ(void) | |||
582 | for (i = 0; i < LGUEST_IRQS; i++) { | 582 | for (i = 0; i < LGUEST_IRQS; i++) { |
583 | int vector = FIRST_EXTERNAL_VECTOR + i; | 583 | int vector = FIRST_EXTERNAL_VECTOR + i; |
584 | if (vector != SYSCALL_VECTOR) { | 584 | if (vector != SYSCALL_VECTOR) { |
585 | set_intr_gate(vector, interrupt[i]); | 585 | set_intr_gate(vector, interrupt[vector]); |
586 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, | 586 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, |
587 | handle_level_irq, | 587 | handle_level_irq, |
588 | "level"); | 588 | "level"); |
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index df37fc9d6a26..3c3b471ea496 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c | |||
@@ -41,6 +41,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { | |||
41 | { } | 41 | { } |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static cpumask_t vector_allocation_domain(int cpu) | ||
45 | { | ||
46 | return cpumask_of_cpu(cpu); | ||
47 | } | ||
44 | 48 | ||
45 | static int probe_bigsmp(void) | 49 | static int probe_bigsmp(void) |
46 | { | 50 | { |
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 6513d41ea21e..28459cab3ddb 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c | |||
@@ -75,4 +75,18 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
75 | } | 75 | } |
76 | #endif | 76 | #endif |
77 | 77 | ||
78 | static cpumask_t vector_allocation_domain(int cpu) | ||
79 | { | ||
80 | /* Careful. Some cpus do not strictly honor the set of cpus | ||
81 | * specified in the interrupt destination when using lowest | ||
82 | * priority interrupt delivery mode. | ||
83 | * | ||
84 | * In particular there was a hyperthreading cpu observed to | ||
85 | * deliver interrupts to the wrong hyperthread when only one | ||
86 | * hyperthread was specified in the interrupt desitination. | ||
87 | */ | ||
88 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | ||
89 | return domain; | ||
90 | } | ||
91 | |||
78 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); | 92 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); |
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 8cf58394975e..71a309b122e6 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c | |||
@@ -38,4 +38,18 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static cpumask_t vector_allocation_domain(int cpu) | ||
42 | { | ||
43 | /* Careful. Some cpus do not strictly honor the set of cpus | ||
44 | * specified in the interrupt destination when using lowest | ||
45 | * priority interrupt delivery mode. | ||
46 | * | ||
47 | * In particular there was a hyperthreading cpu observed to | ||
48 | * deliver interrupts to the wrong hyperthread when only one | ||
49 | * hyperthread was specified in the interrupt desitination. | ||
50 | */ | ||
51 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | ||
52 | return domain; | ||
53 | } | ||
54 | |||
41 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); | 55 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); |
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 6ad6b67a723d..6272b5e69da6 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c | |||
@@ -23,4 +23,18 @@ static int probe_summit(void) | |||
23 | return 0; | 23 | return 0; |
24 | } | 24 | } |
25 | 25 | ||
26 | static cpumask_t vector_allocation_domain(int cpu) | ||
27 | { | ||
28 | /* Careful. Some cpus do not strictly honor the set of cpus | ||
29 | * specified in the interrupt destination when using lowest | ||
30 | * priority interrupt delivery mode. | ||
31 | * | ||
32 | * In particular there was a hyperthreading cpu observed to | ||
33 | * deliver interrupts to the wrong hyperthread when only one | ||
34 | * hyperthread was specified in the interrupt desitination. | ||
35 | */ | ||
36 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | ||
37 | return domain; | ||
38 | } | ||
39 | |||
26 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); | 40 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 199a5f4a873c..0f6e8a6523ae 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -1483,7 +1483,7 @@ static void disable_local_vic_irq(unsigned int irq) | |||
1483 | * the interrupt off to another CPU */ | 1483 | * the interrupt off to another CPU */ |
1484 | static void before_handle_vic_irq(unsigned int irq) | 1484 | static void before_handle_vic_irq(unsigned int irq) |
1485 | { | 1485 | { |
1486 | irq_desc_t *desc = irq_desc + irq; | 1486 | irq_desc_t *desc = irq_to_desc(irq); |
1487 | __u8 cpu = smp_processor_id(); | 1487 | __u8 cpu = smp_processor_id(); |
1488 | 1488 | ||
1489 | _raw_spin_lock(&vic_irq_lock); | 1489 | _raw_spin_lock(&vic_irq_lock); |
@@ -1518,7 +1518,7 @@ static void before_handle_vic_irq(unsigned int irq) | |||
1518 | /* Finish the VIC interrupt: basically mask */ | 1518 | /* Finish the VIC interrupt: basically mask */ |
1519 | static void after_handle_vic_irq(unsigned int irq) | 1519 | static void after_handle_vic_irq(unsigned int irq) |
1520 | { | 1520 | { |
1521 | irq_desc_t *desc = irq_desc + irq; | 1521 | irq_desc_t *desc = irq_to_desc(irq); |
1522 | 1522 | ||
1523 | _raw_spin_lock(&vic_irq_lock); | 1523 | _raw_spin_lock(&vic_irq_lock); |
1524 | { | 1524 | { |
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 635b50e85581..2c4baa88f2cb 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
@@ -56,13 +56,6 @@ struct remap_trace { | |||
56 | static DEFINE_PER_CPU(struct trap_reason, pf_reason); | 56 | static DEFINE_PER_CPU(struct trap_reason, pf_reason); |
57 | static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace); | 57 | static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace); |
58 | 58 | ||
59 | #if 0 /* XXX: no way gather this info anymore */ | ||
60 | /* Access to this is not per-cpu. */ | ||
61 | static DEFINE_PER_CPU(atomic_t, dropped); | ||
62 | #endif | ||
63 | |||
64 | static struct dentry *marker_file; | ||
65 | |||
66 | static DEFINE_MUTEX(mmiotrace_mutex); | 59 | static DEFINE_MUTEX(mmiotrace_mutex); |
67 | static DEFINE_SPINLOCK(trace_lock); | 60 | static DEFINE_SPINLOCK(trace_lock); |
68 | static atomic_t mmiotrace_enabled; | 61 | static atomic_t mmiotrace_enabled; |
@@ -75,7 +68,7 @@ static LIST_HEAD(trace_list); /* struct remap_trace */ | |||
75 | * and trace_lock. | 68 | * and trace_lock. |
76 | * - Routines depending on is_enabled() must take trace_lock. | 69 | * - Routines depending on is_enabled() must take trace_lock. |
77 | * - trace_list users must hold trace_lock. | 70 | * - trace_list users must hold trace_lock. |
78 | * - is_enabled() guarantees that mmio_trace_record is allowed. | 71 | * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed. |
79 | * - pre/post callbacks assume the effect of is_enabled() being true. | 72 | * - pre/post callbacks assume the effect of is_enabled() being true. |
80 | */ | 73 | */ |
81 | 74 | ||
@@ -97,44 +90,6 @@ static bool is_enabled(void) | |||
97 | return atomic_read(&mmiotrace_enabled); | 90 | return atomic_read(&mmiotrace_enabled); |
98 | } | 91 | } |
99 | 92 | ||
100 | #if 0 /* XXX: needs rewrite */ | ||
101 | /* | ||
102 | * Write callback for the debugfs entry: | ||
103 | * Read a marker and write it to the mmio trace log | ||
104 | */ | ||
105 | static ssize_t write_marker(struct file *file, const char __user *buffer, | ||
106 | size_t count, loff_t *ppos) | ||
107 | { | ||
108 | char *event = NULL; | ||
109 | struct mm_io_header *headp; | ||
110 | ssize_t len = (count > 65535) ? 65535 : count; | ||
111 | |||
112 | event = kzalloc(sizeof(*headp) + len, GFP_KERNEL); | ||
113 | if (!event) | ||
114 | return -ENOMEM; | ||
115 | |||
116 | headp = (struct mm_io_header *)event; | ||
117 | headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT); | ||
118 | headp->data_len = len; | ||
119 | |||
120 | if (copy_from_user(event + sizeof(*headp), buffer, len)) { | ||
121 | kfree(event); | ||
122 | return -EFAULT; | ||
123 | } | ||
124 | |||
125 | spin_lock_irq(&trace_lock); | ||
126 | #if 0 /* XXX: convert this to use tracing */ | ||
127 | if (is_enabled()) | ||
128 | relay_write(chan, event, sizeof(*headp) + len); | ||
129 | else | ||
130 | #endif | ||
131 | len = -EINVAL; | ||
132 | spin_unlock_irq(&trace_lock); | ||
133 | kfree(event); | ||
134 | return len; | ||
135 | } | ||
136 | #endif | ||
137 | |||
138 | static void print_pte(unsigned long address) | 93 | static void print_pte(unsigned long address) |
139 | { | 94 | { |
140 | unsigned int level; | 95 | unsigned int level; |
@@ -307,8 +262,10 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size, | |||
307 | map.map_id = trace->id; | 262 | map.map_id = trace->id; |
308 | 263 | ||
309 | spin_lock_irq(&trace_lock); | 264 | spin_lock_irq(&trace_lock); |
310 | if (!is_enabled()) | 265 | if (!is_enabled()) { |
266 | kfree(trace); | ||
311 | goto not_enabled; | 267 | goto not_enabled; |
268 | } | ||
312 | 269 | ||
313 | mmio_trace_mapping(&map); | 270 | mmio_trace_mapping(&map); |
314 | list_add_tail(&trace->list, &trace_list); | 271 | list_add_tail(&trace->list, &trace_list); |
@@ -377,6 +334,23 @@ void mmiotrace_iounmap(volatile void __iomem *addr) | |||
377 | iounmap_trace_core(addr); | 334 | iounmap_trace_core(addr); |
378 | } | 335 | } |
379 | 336 | ||
337 | int mmiotrace_printk(const char *fmt, ...) | ||
338 | { | ||
339 | int ret = 0; | ||
340 | va_list args; | ||
341 | unsigned long flags; | ||
342 | va_start(args, fmt); | ||
343 | |||
344 | spin_lock_irqsave(&trace_lock, flags); | ||
345 | if (is_enabled()) | ||
346 | ret = mmio_trace_printk(fmt, args); | ||
347 | spin_unlock_irqrestore(&trace_lock, flags); | ||
348 | |||
349 | va_end(args); | ||
350 | return ret; | ||
351 | } | ||
352 | EXPORT_SYMBOL(mmiotrace_printk); | ||
353 | |||
380 | static void clear_trace_list(void) | 354 | static void clear_trace_list(void) |
381 | { | 355 | { |
382 | struct remap_trace *trace; | 356 | struct remap_trace *trace; |
@@ -462,26 +436,12 @@ static void leave_uniprocessor(void) | |||
462 | } | 436 | } |
463 | #endif | 437 | #endif |
464 | 438 | ||
465 | #if 0 /* XXX: out of order */ | ||
466 | static struct file_operations fops_marker = { | ||
467 | .owner = THIS_MODULE, | ||
468 | .write = write_marker | ||
469 | }; | ||
470 | #endif | ||
471 | |||
472 | void enable_mmiotrace(void) | 439 | void enable_mmiotrace(void) |
473 | { | 440 | { |
474 | mutex_lock(&mmiotrace_mutex); | 441 | mutex_lock(&mmiotrace_mutex); |
475 | if (is_enabled()) | 442 | if (is_enabled()) |
476 | goto out; | 443 | goto out; |
477 | 444 | ||
478 | #if 0 /* XXX: tracing does not support text entries */ | ||
479 | marker_file = debugfs_create_file("marker", 0660, dir, NULL, | ||
480 | &fops_marker); | ||
481 | if (!marker_file) | ||
482 | pr_err(NAME "marker file creation failed.\n"); | ||
483 | #endif | ||
484 | |||
485 | if (nommiotrace) | 445 | if (nommiotrace) |
486 | pr_info(NAME "MMIO tracing disabled.\n"); | 446 | pr_info(NAME "MMIO tracing disabled.\n"); |
487 | enter_uniprocessor(); | 447 | enter_uniprocessor(); |
@@ -506,11 +466,6 @@ void disable_mmiotrace(void) | |||
506 | 466 | ||
507 | clear_trace_list(); /* guarantees: no more kmmio callbacks */ | 467 | clear_trace_list(); /* guarantees: no more kmmio callbacks */ |
508 | leave_uniprocessor(); | 468 | leave_uniprocessor(); |
509 | if (marker_file) { | ||
510 | debugfs_remove(marker_file); | ||
511 | marker_file = NULL; | ||
512 | } | ||
513 | |||
514 | pr_info(NAME "disabled.\n"); | 469 | pr_info(NAME "disabled.\n"); |
515 | out: | 470 | out: |
516 | mutex_unlock(&mmiotrace_mutex); | 471 | mutex_unlock(&mmiotrace_mutex); |
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c index efa1911e20ca..df3d5c861cda 100644 --- a/arch/x86/mm/pf_in.c +++ b/arch/x86/mm/pf_in.c | |||
@@ -79,25 +79,34 @@ static unsigned int mw32[] = { 0xC7 }; | |||
79 | static unsigned int mw64[] = { 0x89, 0x8B }; | 79 | static unsigned int mw64[] = { 0x89, 0x8B }; |
80 | #endif /* not __i386__ */ | 80 | #endif /* not __i386__ */ |
81 | 81 | ||
82 | static int skip_prefix(unsigned char *addr, int *shorted, int *enlarged, | 82 | struct prefix_bits { |
83 | int *rexr) | 83 | unsigned shorted:1; |
84 | unsigned enlarged:1; | ||
85 | unsigned rexr:1; | ||
86 | unsigned rex:1; | ||
87 | }; | ||
88 | |||
89 | static int skip_prefix(unsigned char *addr, struct prefix_bits *prf) | ||
84 | { | 90 | { |
85 | int i; | 91 | int i; |
86 | unsigned char *p = addr; | 92 | unsigned char *p = addr; |
87 | *shorted = 0; | 93 | prf->shorted = 0; |
88 | *enlarged = 0; | 94 | prf->enlarged = 0; |
89 | *rexr = 0; | 95 | prf->rexr = 0; |
96 | prf->rex = 0; | ||
90 | 97 | ||
91 | restart: | 98 | restart: |
92 | for (i = 0; i < ARRAY_SIZE(prefix_codes); i++) { | 99 | for (i = 0; i < ARRAY_SIZE(prefix_codes); i++) { |
93 | if (*p == prefix_codes[i]) { | 100 | if (*p == prefix_codes[i]) { |
94 | if (*p == 0x66) | 101 | if (*p == 0x66) |
95 | *shorted = 1; | 102 | prf->shorted = 1; |
96 | #ifdef __amd64__ | 103 | #ifdef __amd64__ |
97 | if ((*p & 0xf8) == 0x48) | 104 | if ((*p & 0xf8) == 0x48) |
98 | *enlarged = 1; | 105 | prf->enlarged = 1; |
99 | if ((*p & 0xf4) == 0x44) | 106 | if ((*p & 0xf4) == 0x44) |
100 | *rexr = 1; | 107 | prf->rexr = 1; |
108 | if ((*p & 0xf0) == 0x40) | ||
109 | prf->rex = 1; | ||
101 | #endif | 110 | #endif |
102 | p++; | 111 | p++; |
103 | goto restart; | 112 | goto restart; |
@@ -135,12 +144,12 @@ enum reason_type get_ins_type(unsigned long ins_addr) | |||
135 | { | 144 | { |
136 | unsigned int opcode; | 145 | unsigned int opcode; |
137 | unsigned char *p; | 146 | unsigned char *p; |
138 | int shorted, enlarged, rexr; | 147 | struct prefix_bits prf; |
139 | int i; | 148 | int i; |
140 | enum reason_type rv = OTHERS; | 149 | enum reason_type rv = OTHERS; |
141 | 150 | ||
142 | p = (unsigned char *)ins_addr; | 151 | p = (unsigned char *)ins_addr; |
143 | p += skip_prefix(p, &shorted, &enlarged, &rexr); | 152 | p += skip_prefix(p, &prf); |
144 | p += get_opcode(p, &opcode); | 153 | p += get_opcode(p, &opcode); |
145 | 154 | ||
146 | CHECK_OP_TYPE(opcode, reg_rop, REG_READ); | 155 | CHECK_OP_TYPE(opcode, reg_rop, REG_READ); |
@@ -156,10 +165,11 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr) | |||
156 | { | 165 | { |
157 | unsigned int opcode; | 166 | unsigned int opcode; |
158 | unsigned char *p; | 167 | unsigned char *p; |
159 | int i, shorted, enlarged, rexr; | 168 | struct prefix_bits prf; |
169 | int i; | ||
160 | 170 | ||
161 | p = (unsigned char *)ins_addr; | 171 | p = (unsigned char *)ins_addr; |
162 | p += skip_prefix(p, &shorted, &enlarged, &rexr); | 172 | p += skip_prefix(p, &prf); |
163 | p += get_opcode(p, &opcode); | 173 | p += get_opcode(p, &opcode); |
164 | 174 | ||
165 | for (i = 0; i < ARRAY_SIZE(rw8); i++) | 175 | for (i = 0; i < ARRAY_SIZE(rw8); i++) |
@@ -168,7 +178,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr) | |||
168 | 178 | ||
169 | for (i = 0; i < ARRAY_SIZE(rw32); i++) | 179 | for (i = 0; i < ARRAY_SIZE(rw32); i++) |
170 | if (rw32[i] == opcode) | 180 | if (rw32[i] == opcode) |
171 | return (shorted ? 2 : (enlarged ? 8 : 4)); | 181 | return prf.shorted ? 2 : (prf.enlarged ? 8 : 4); |
172 | 182 | ||
173 | printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode); | 183 | printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode); |
174 | return 0; | 184 | return 0; |
@@ -178,10 +188,11 @@ unsigned int get_ins_mem_width(unsigned long ins_addr) | |||
178 | { | 188 | { |
179 | unsigned int opcode; | 189 | unsigned int opcode; |
180 | unsigned char *p; | 190 | unsigned char *p; |
181 | int i, shorted, enlarged, rexr; | 191 | struct prefix_bits prf; |
192 | int i; | ||
182 | 193 | ||
183 | p = (unsigned char *)ins_addr; | 194 | p = (unsigned char *)ins_addr; |
184 | p += skip_prefix(p, &shorted, &enlarged, &rexr); | 195 | p += skip_prefix(p, &prf); |
185 | p += get_opcode(p, &opcode); | 196 | p += get_opcode(p, &opcode); |
186 | 197 | ||
187 | for (i = 0; i < ARRAY_SIZE(mw8); i++) | 198 | for (i = 0; i < ARRAY_SIZE(mw8); i++) |
@@ -194,11 +205,11 @@ unsigned int get_ins_mem_width(unsigned long ins_addr) | |||
194 | 205 | ||
195 | for (i = 0; i < ARRAY_SIZE(mw32); i++) | 206 | for (i = 0; i < ARRAY_SIZE(mw32); i++) |
196 | if (mw32[i] == opcode) | 207 | if (mw32[i] == opcode) |
197 | return shorted ? 2 : 4; | 208 | return prf.shorted ? 2 : 4; |
198 | 209 | ||
199 | for (i = 0; i < ARRAY_SIZE(mw64); i++) | 210 | for (i = 0; i < ARRAY_SIZE(mw64); i++) |
200 | if (mw64[i] == opcode) | 211 | if (mw64[i] == opcode) |
201 | return shorted ? 2 : (enlarged ? 8 : 4); | 212 | return prf.shorted ? 2 : (prf.enlarged ? 8 : 4); |
202 | 213 | ||
203 | printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode); | 214 | printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode); |
204 | return 0; | 215 | return 0; |
@@ -238,7 +249,7 @@ enum { | |||
238 | #endif | 249 | #endif |
239 | }; | 250 | }; |
240 | 251 | ||
241 | static unsigned char *get_reg_w8(int no, struct pt_regs *regs) | 252 | static unsigned char *get_reg_w8(int no, int rex, struct pt_regs *regs) |
242 | { | 253 | { |
243 | unsigned char *rv = NULL; | 254 | unsigned char *rv = NULL; |
244 | 255 | ||
@@ -255,18 +266,6 @@ static unsigned char *get_reg_w8(int no, struct pt_regs *regs) | |||
255 | case arg_DL: | 266 | case arg_DL: |
256 | rv = (unsigned char *)®s->dx; | 267 | rv = (unsigned char *)®s->dx; |
257 | break; | 268 | break; |
258 | case arg_AH: | ||
259 | rv = 1 + (unsigned char *)®s->ax; | ||
260 | break; | ||
261 | case arg_BH: | ||
262 | rv = 1 + (unsigned char *)®s->bx; | ||
263 | break; | ||
264 | case arg_CH: | ||
265 | rv = 1 + (unsigned char *)®s->cx; | ||
266 | break; | ||
267 | case arg_DH: | ||
268 | rv = 1 + (unsigned char *)®s->dx; | ||
269 | break; | ||
270 | #ifdef __amd64__ | 269 | #ifdef __amd64__ |
271 | case arg_R8: | 270 | case arg_R8: |
272 | rv = (unsigned char *)®s->r8; | 271 | rv = (unsigned char *)®s->r8; |
@@ -294,9 +293,55 @@ static unsigned char *get_reg_w8(int no, struct pt_regs *regs) | |||
294 | break; | 293 | break; |
295 | #endif | 294 | #endif |
296 | default: | 295 | default: |
297 | printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no); | ||
298 | break; | 296 | break; |
299 | } | 297 | } |
298 | |||
299 | if (rv) | ||
300 | return rv; | ||
301 | |||
302 | if (rex) { | ||
303 | /* | ||
304 | * If REX prefix exists, access low bytes of SI etc. | ||
305 | * instead of AH etc. | ||
306 | */ | ||
307 | switch (no) { | ||
308 | case arg_SI: | ||
309 | rv = (unsigned char *)®s->si; | ||
310 | break; | ||
311 | case arg_DI: | ||
312 | rv = (unsigned char *)®s->di; | ||
313 | break; | ||
314 | case arg_BP: | ||
315 | rv = (unsigned char *)®s->bp; | ||
316 | break; | ||
317 | case arg_SP: | ||
318 | rv = (unsigned char *)®s->sp; | ||
319 | break; | ||
320 | default: | ||
321 | break; | ||
322 | } | ||
323 | } else { | ||
324 | switch (no) { | ||
325 | case arg_AH: | ||
326 | rv = 1 + (unsigned char *)®s->ax; | ||
327 | break; | ||
328 | case arg_BH: | ||
329 | rv = 1 + (unsigned char *)®s->bx; | ||
330 | break; | ||
331 | case arg_CH: | ||
332 | rv = 1 + (unsigned char *)®s->cx; | ||
333 | break; | ||
334 | case arg_DH: | ||
335 | rv = 1 + (unsigned char *)®s->dx; | ||
336 | break; | ||
337 | default: | ||
338 | break; | ||
339 | } | ||
340 | } | ||
341 | |||
342 | if (!rv) | ||
343 | printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no); | ||
344 | |||
300 | return rv; | 345 | return rv; |
301 | } | 346 | } |
302 | 347 | ||
@@ -368,11 +413,12 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) | |||
368 | unsigned char mod_rm; | 413 | unsigned char mod_rm; |
369 | int reg; | 414 | int reg; |
370 | unsigned char *p; | 415 | unsigned char *p; |
371 | int i, shorted, enlarged, rexr; | 416 | struct prefix_bits prf; |
417 | int i; | ||
372 | unsigned long rv; | 418 | unsigned long rv; |
373 | 419 | ||
374 | p = (unsigned char *)ins_addr; | 420 | p = (unsigned char *)ins_addr; |
375 | p += skip_prefix(p, &shorted, &enlarged, &rexr); | 421 | p += skip_prefix(p, &prf); |
376 | p += get_opcode(p, &opcode); | 422 | p += get_opcode(p, &opcode); |
377 | for (i = 0; i < ARRAY_SIZE(reg_rop); i++) | 423 | for (i = 0; i < ARRAY_SIZE(reg_rop); i++) |
378 | if (reg_rop[i] == opcode) { | 424 | if (reg_rop[i] == opcode) { |
@@ -392,10 +438,10 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) | |||
392 | 438 | ||
393 | do_work: | 439 | do_work: |
394 | mod_rm = *p; | 440 | mod_rm = *p; |
395 | reg = ((mod_rm >> 3) & 0x7) | (rexr << 3); | 441 | reg = ((mod_rm >> 3) & 0x7) | (prf.rexr << 3); |
396 | switch (get_ins_reg_width(ins_addr)) { | 442 | switch (get_ins_reg_width(ins_addr)) { |
397 | case 1: | 443 | case 1: |
398 | return *get_reg_w8(reg, regs); | 444 | return *get_reg_w8(reg, prf.rex, regs); |
399 | 445 | ||
400 | case 2: | 446 | case 2: |
401 | return *(unsigned short *)get_reg_w32(reg, regs); | 447 | return *(unsigned short *)get_reg_w32(reg, regs); |
@@ -422,11 +468,12 @@ unsigned long get_ins_imm_val(unsigned long ins_addr) | |||
422 | unsigned char mod_rm; | 468 | unsigned char mod_rm; |
423 | unsigned char mod; | 469 | unsigned char mod; |
424 | unsigned char *p; | 470 | unsigned char *p; |
425 | int i, shorted, enlarged, rexr; | 471 | struct prefix_bits prf; |
472 | int i; | ||
426 | unsigned long rv; | 473 | unsigned long rv; |
427 | 474 | ||
428 | p = (unsigned char *)ins_addr; | 475 | p = (unsigned char *)ins_addr; |
429 | p += skip_prefix(p, &shorted, &enlarged, &rexr); | 476 | p += skip_prefix(p, &prf); |
430 | p += get_opcode(p, &opcode); | 477 | p += get_opcode(p, &opcode); |
431 | for (i = 0; i < ARRAY_SIZE(imm_wop); i++) | 478 | for (i = 0; i < ARRAY_SIZE(imm_wop); i++) |
432 | if (imm_wop[i] == opcode) { | 479 | if (imm_wop[i] == opcode) { |
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index d877c5b423ef..ab50a8d7402c 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c | |||
@@ -3,6 +3,7 @@ | |||
3 | */ | 3 | */ |
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/io.h> | 5 | #include <linux/io.h> |
6 | #include <linux/mmiotrace.h> | ||
6 | 7 | ||
7 | #define MODULE_NAME "testmmiotrace" | 8 | #define MODULE_NAME "testmmiotrace" |
8 | 9 | ||
@@ -13,6 +14,7 @@ MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB."); | |||
13 | static void do_write_test(void __iomem *p) | 14 | static void do_write_test(void __iomem *p) |
14 | { | 15 | { |
15 | unsigned int i; | 16 | unsigned int i; |
17 | mmiotrace_printk("Write test.\n"); | ||
16 | for (i = 0; i < 256; i++) | 18 | for (i = 0; i < 256; i++) |
17 | iowrite8(i, p + i); | 19 | iowrite8(i, p + i); |
18 | for (i = 1024; i < (5 * 1024); i += 2) | 20 | for (i = 1024; i < (5 * 1024); i += 2) |
@@ -24,6 +26,7 @@ static void do_write_test(void __iomem *p) | |||
24 | static void do_read_test(void __iomem *p) | 26 | static void do_read_test(void __iomem *p) |
25 | { | 27 | { |
26 | unsigned int i; | 28 | unsigned int i; |
29 | mmiotrace_printk("Read test.\n"); | ||
27 | for (i = 0; i < 256; i++) | 30 | for (i = 0; i < 256; i++) |
28 | ioread8(p + i); | 31 | ioread8(p + i); |
29 | for (i = 1024; i < (5 * 1024); i += 2) | 32 | for (i = 1024; i < (5 * 1024); i += 2) |
@@ -39,6 +42,7 @@ static void do_test(void) | |||
39 | pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); | 42 | pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); |
40 | return; | 43 | return; |
41 | } | 44 | } |
45 | mmiotrace_printk("ioremap returned %p.\n", p); | ||
42 | do_write_test(p); | 46 | do_write_test(p); |
43 | do_read_test(p); | 47 | do_read_test(p); |
44 | iounmap(p); | 48 | iounmap(p); |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 006599db0dc7..bf69dbe08bff 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -493,7 +493,7 @@ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq | |||
493 | if (pirq <= 4) | 493 | if (pirq <= 4) |
494 | irq = read_config_nybble(router, 0x56, pirq - 1); | 494 | irq = read_config_nybble(router, 0x56, pirq - 1); |
495 | dev_info(&dev->dev, | 495 | dev_info(&dev->dev, |
496 | "AMD756: dev [%04x/%04x], router PIRQ %d get IRQ %d\n", | 496 | "AMD756: dev [%04x:%04x], router PIRQ %d get IRQ %d\n", |
497 | dev->vendor, dev->device, pirq, irq); | 497 | dev->vendor, dev->device, pirq, irq); |
498 | return irq; | 498 | return irq; |
499 | } | 499 | } |
@@ -501,7 +501,7 @@ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq | |||
501 | static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) | 501 | static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) |
502 | { | 502 | { |
503 | dev_info(&dev->dev, | 503 | dev_info(&dev->dev, |
504 | "AMD756: dev [%04x/%04x], router PIRQ %d set IRQ %d\n", | 504 | "AMD756: dev [%04x:%04x], router PIRQ %d set IRQ %d\n", |
505 | dev->vendor, dev->device, pirq, irq); | 505 | dev->vendor, dev->device, pirq, irq); |
506 | if (pirq <= 4) | 506 | if (pirq <= 4) |
507 | write_config_nybble(router, 0x56, pirq - 1, irq); | 507 | write_config_nybble(router, 0x56, pirq - 1, irq); |
@@ -590,13 +590,20 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route | |||
590 | case PCI_DEVICE_ID_INTEL_ICH10_1: | 590 | case PCI_DEVICE_ID_INTEL_ICH10_1: |
591 | case PCI_DEVICE_ID_INTEL_ICH10_2: | 591 | case PCI_DEVICE_ID_INTEL_ICH10_2: |
592 | case PCI_DEVICE_ID_INTEL_ICH10_3: | 592 | case PCI_DEVICE_ID_INTEL_ICH10_3: |
593 | case PCI_DEVICE_ID_INTEL_PCH_0: | ||
594 | case PCI_DEVICE_ID_INTEL_PCH_1: | ||
595 | r->name = "PIIX/ICH"; | 593 | r->name = "PIIX/ICH"; |
596 | r->get = pirq_piix_get; | 594 | r->get = pirq_piix_get; |
597 | r->set = pirq_piix_set; | 595 | r->set = pirq_piix_set; |
598 | return 1; | 596 | return 1; |
599 | } | 597 | } |
598 | |||
599 | if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && | ||
600 | (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) { | ||
601 | r->name = "PIIX/ICH"; | ||
602 | r->get = pirq_piix_get; | ||
603 | r->set = pirq_piix_set; | ||
604 | return 1; | ||
605 | } | ||
606 | |||
600 | return 0; | 607 | return 0; |
601 | } | 608 | } |
602 | 609 | ||
@@ -823,7 +830,7 @@ static void __init pirq_find_router(struct irq_router *r) | |||
823 | r->get = NULL; | 830 | r->get = NULL; |
824 | r->set = NULL; | 831 | r->set = NULL; |
825 | 832 | ||
826 | DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n", | 833 | DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n", |
827 | rt->rtr_vendor, rt->rtr_device); | 834 | rt->rtr_vendor, rt->rtr_device); |
828 | 835 | ||
829 | pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn); | 836 | pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn); |
@@ -843,7 +850,7 @@ static void __init pirq_find_router(struct irq_router *r) | |||
843 | h->probe(r, pirq_router_dev, pirq_router_dev->device)) | 850 | h->probe(r, pirq_router_dev, pirq_router_dev->device)) |
844 | break; | 851 | break; |
845 | } | 852 | } |
846 | dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x/%04x]\n", | 853 | dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x:%04x]\n", |
847 | pirq_router.name, | 854 | pirq_router.name, |
848 | pirq_router_dev->vendor, pirq_router_dev->device); | 855 | pirq_router_dev->vendor, pirq_router_dev->device); |
849 | 856 | ||
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 28b85ab8422e..bb042608c602 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -21,7 +21,6 @@ void xen_force_evtchn_callback(void) | |||
21 | 21 | ||
22 | static void __init __xen_init_IRQ(void) | 22 | static void __init __xen_init_IRQ(void) |
23 | { | 23 | { |
24 | #ifdef CONFIG_X86_64 | ||
25 | int i; | 24 | int i; |
26 | 25 | ||
27 | /* Create identity vector->irq map */ | 26 | /* Create identity vector->irq map */ |
@@ -31,7 +30,6 @@ static void __init __xen_init_IRQ(void) | |||
31 | for_each_possible_cpu(cpu) | 30 | for_each_possible_cpu(cpu) |
32 | per_cpu(vector_irq, cpu)[i] = i; | 31 | per_cpu(vector_irq, cpu)[i] = i; |
33 | } | 32 | } |
34 | #endif /* CONFIG_X86_64 */ | ||
35 | 33 | ||
36 | xen_init_IRQ(); | 34 | xen_init_IRQ(); |
37 | } | 35 | } |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index dd71e3a021cd..5601506f2dd9 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
241 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); | 241 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); |
242 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ | 242 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ |
243 | 243 | ||
244 | kstat_this_cpu.irqs[irq]++; | 244 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); |
245 | 245 | ||
246 | out: | 246 | out: |
247 | raw_local_irq_restore(flags); | 247 | raw_local_irq_restore(flags); |