diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/alpha | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/alpha')
71 files changed, 667 insertions, 989 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index b9647bb66d13..60219bf94198 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -1,7 +1,3 @@ | |||
1 | # | ||
2 | # For a description of the syntax of this configuration file, | ||
3 | # see Documentation/kbuild/kconfig-language.txt. | ||
4 | # | ||
5 | config ALPHA | 1 | config ALPHA |
6 | bool | 2 | bool |
7 | default y | 3 | default y |
@@ -9,8 +5,14 @@ config ALPHA | |||
9 | select HAVE_IDE | 5 | select HAVE_IDE |
10 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
11 | select HAVE_SYSCALL_WRAPPERS | 7 | select HAVE_SYSCALL_WRAPPERS |
8 | select HAVE_IRQ_WORK | ||
12 | select HAVE_PERF_EVENTS | 9 | select HAVE_PERF_EVENTS |
13 | select HAVE_DMA_ATTRS | 10 | select HAVE_DMA_ATTRS |
11 | select HAVE_GENERIC_HARDIRQS | ||
12 | select GENERIC_IRQ_PROBE | ||
13 | select AUTO_IRQ_AFFINITY if SMP | ||
14 | select GENERIC_IRQ_SHOW | ||
15 | select ARCH_WANT_OPTIONAL_GPIOLIB | ||
14 | help | 16 | help |
15 | The Alpha is a 64-bit general-purpose processor designed and | 17 | The Alpha is a 64-bit general-purpose processor designed and |
16 | marketed by the Digital Equipment Corporation of blessed memory, | 18 | marketed by the Digital Equipment Corporation of blessed memory, |
@@ -39,10 +41,6 @@ config ARCH_HAS_ILOG2_U64 | |||
39 | bool | 41 | bool |
40 | default n | 42 | default n |
41 | 43 | ||
42 | config GENERIC_FIND_NEXT_BIT | ||
43 | bool | ||
44 | default y | ||
45 | |||
46 | config GENERIC_CALIBRATE_DELAY | 44 | config GENERIC_CALIBRATE_DELAY |
47 | bool | 45 | bool |
48 | default y | 46 | default y |
@@ -50,10 +48,16 @@ config GENERIC_CALIBRATE_DELAY | |||
50 | config GENERIC_CMOS_UPDATE | 48 | config GENERIC_CMOS_UPDATE |
51 | def_bool y | 49 | def_bool y |
52 | 50 | ||
51 | config GENERIC_GPIO | ||
52 | def_bool y | ||
53 | |||
53 | config ZONE_DMA | 54 | config ZONE_DMA |
54 | bool | 55 | bool |
55 | default y | 56 | default y |
56 | 57 | ||
58 | config ARCH_DMA_ADDR_T_64BIT | ||
59 | def_bool y | ||
60 | |||
57 | config NEED_DMA_MAP_STATE | 61 | config NEED_DMA_MAP_STATE |
58 | def_bool y | 62 | def_bool y |
59 | 63 | ||
@@ -68,19 +72,6 @@ config GENERIC_IOMAP | |||
68 | bool | 72 | bool |
69 | default n | 73 | default n |
70 | 74 | ||
71 | config GENERIC_HARDIRQS | ||
72 | bool | ||
73 | default y | ||
74 | |||
75 | config GENERIC_IRQ_PROBE | ||
76 | bool | ||
77 | default y | ||
78 | |||
79 | config AUTO_IRQ_AFFINITY | ||
80 | bool | ||
81 | depends on SMP | ||
82 | default y | ||
83 | |||
84 | source "init/Kconfig" | 75 | source "init/Kconfig" |
85 | source "kernel/Kconfig.freezer" | 76 | source "kernel/Kconfig.freezer" |
86 | 77 | ||
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index adfab8a21dfe..85b815215776 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h | |||
@@ -454,13 +454,11 @@ sched_find_first_bit(const unsigned long b[2]) | |||
454 | return __ffs(tmp) + ofs; | 454 | return __ffs(tmp) + ofs; |
455 | } | 455 | } |
456 | 456 | ||
457 | #include <asm-generic/bitops/ext2-non-atomic.h> | 457 | #include <asm-generic/bitops/le.h> |
458 | 458 | ||
459 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 459 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
460 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 460 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
461 | 461 | ||
462 | #include <asm-generic/bitops/minix.h> | ||
463 | |||
464 | #endif /* __KERNEL__ */ | 462 | #endif /* __KERNEL__ */ |
465 | 463 | ||
466 | #endif /* _ALPHA_BITOPS_H */ | 464 | #endif /* _ALPHA_BITOPS_H */ |
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h index 012f1243b1c1..a9cb6aa447aa 100644 --- a/arch/alpha/include/asm/cacheflush.h +++ b/arch/alpha/include/asm/cacheflush.h | |||
@@ -63,7 +63,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, | |||
63 | struct page *page, unsigned long addr, int len); | 63 | struct page *page, unsigned long addr, int len); |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | /* This is used only in do_no_page and do_swap_page. */ | 66 | /* This is used only in __do_fault and do_swap_page. */ |
67 | #define flush_icache_page(vma, page) \ | 67 | #define flush_icache_page(vma, page) \ |
68 | flush_icache_user_range((vma), (page), 0, 0) | 68 | flush_icache_user_range((vma), (page), 0, 0) |
69 | 69 | ||
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h index 21ac53383b37..9f67a056b461 100644 --- a/arch/alpha/include/asm/core_mcpcia.h +++ b/arch/alpha/include/asm/core_mcpcia.h | |||
@@ -247,7 +247,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck { | |||
247 | #define vip volatile int __force * | 247 | #define vip volatile int __force * |
248 | #define vuip volatile unsigned int __force * | 248 | #define vuip volatile unsigned int __force * |
249 | 249 | ||
250 | #ifdef MCPCIA_ONE_HAE_WINDOW | 250 | #ifndef MCPCIA_ONE_HAE_WINDOW |
251 | #define MCPCIA_FROB_MMIO \ | 251 | #define MCPCIA_FROB_MMIO \ |
252 | if (__mcpcia_is_mmio(hose)) { \ | 252 | if (__mcpcia_is_mmio(hose)) { \ |
253 | set_hae(hose & 0xffffffff); \ | 253 | set_hae(hose & 0xffffffff); \ |
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index 471c07292e0b..91b46801b290 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef __ALPHA_T2__H__ | 1 | #ifndef __ALPHA_T2__H__ |
2 | #define __ALPHA_T2__H__ | 2 | #define __ALPHA_T2__H__ |
3 | 3 | ||
4 | /* Fit everything into one 128MB HAE window. */ | ||
5 | #define T2_ONE_HAE_WINDOW 1 | ||
6 | |||
4 | #include <linux/types.h> | 7 | #include <linux/types.h> |
5 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
6 | #include <asm/compiler.h> | 9 | #include <asm/compiler.h> |
@@ -19,7 +22,7 @@ | |||
19 | * | 22 | * |
20 | */ | 23 | */ |
21 | 24 | ||
22 | #define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ | 25 | #define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */ |
23 | 26 | ||
24 | /* GAMMA-SABLE is a SABLE with EV5-based CPUs */ | 27 | /* GAMMA-SABLE is a SABLE with EV5-based CPUs */ |
25 | /* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ | 28 | /* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ |
@@ -85,7 +88,9 @@ | |||
85 | #define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) | 88 | #define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) |
86 | #define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) | 89 | #define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) |
87 | 90 | ||
91 | #ifndef T2_ONE_HAE_WINDOW | ||
88 | #define T2_HAE_ADDRESS T2_HAE_1 | 92 | #define T2_HAE_ADDRESS T2_HAE_1 |
93 | #endif | ||
89 | 94 | ||
90 | /* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to | 95 | /* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to |
91 | 3.8fff.ffff | 96 | 3.8fff.ffff |
@@ -429,13 +434,15 @@ extern inline void t2_outl(u32 b, unsigned long addr) | |||
429 | * | 434 | * |
430 | */ | 435 | */ |
431 | 436 | ||
437 | #ifdef T2_ONE_HAE_WINDOW | ||
438 | #define t2_set_hae | ||
439 | #else | ||
432 | #define t2_set_hae { \ | 440 | #define t2_set_hae { \ |
433 | msb = addr >> 27; \ | 441 | unsigned long msb = addr >> 27; \ |
434 | addr &= T2_MEM_R1_MASK; \ | 442 | addr &= T2_MEM_R1_MASK; \ |
435 | set_hae(msb); \ | 443 | set_hae(msb); \ |
436 | } | 444 | } |
437 | 445 | #endif | |
438 | extern raw_spinlock_t t2_hae_lock; | ||
439 | 446 | ||
440 | /* | 447 | /* |
441 | * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since | 448 | * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since |
@@ -446,28 +453,22 @@ extern raw_spinlock_t t2_hae_lock; | |||
446 | __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) | 453 | __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) |
447 | { | 454 | { |
448 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 455 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
449 | unsigned long result, msb; | 456 | unsigned long result; |
450 | unsigned long flags; | ||
451 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
452 | 457 | ||
453 | t2_set_hae; | 458 | t2_set_hae; |
454 | 459 | ||
455 | result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); | 460 | result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); |
456 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
457 | return __kernel_extbl(result, addr & 3); | 461 | return __kernel_extbl(result, addr & 3); |
458 | } | 462 | } |
459 | 463 | ||
460 | __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) | 464 | __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) |
461 | { | 465 | { |
462 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 466 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
463 | unsigned long result, msb; | 467 | unsigned long result; |
464 | unsigned long flags; | ||
465 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
466 | 468 | ||
467 | t2_set_hae; | 469 | t2_set_hae; |
468 | 470 | ||
469 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); | 471 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); |
470 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
471 | return __kernel_extwl(result, addr & 3); | 472 | return __kernel_extwl(result, addr & 3); |
472 | } | 473 | } |
473 | 474 | ||
@@ -478,59 +479,47 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) | |||
478 | __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) | 479 | __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) |
479 | { | 480 | { |
480 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 481 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
481 | unsigned long result, msb; | 482 | unsigned long result; |
482 | unsigned long flags; | ||
483 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
484 | 483 | ||
485 | t2_set_hae; | 484 | t2_set_hae; |
486 | 485 | ||
487 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); | 486 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); |
488 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
489 | return result & 0xffffffffUL; | 487 | return result & 0xffffffffUL; |
490 | } | 488 | } |
491 | 489 | ||
492 | __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) | 490 | __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) |
493 | { | 491 | { |
494 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 492 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
495 | unsigned long r0, r1, work, msb; | 493 | unsigned long r0, r1, work; |
496 | unsigned long flags; | ||
497 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
498 | 494 | ||
499 | t2_set_hae; | 495 | t2_set_hae; |
500 | 496 | ||
501 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; | 497 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; |
502 | r0 = *(vuip)(work); | 498 | r0 = *(vuip)(work); |
503 | r1 = *(vuip)(work + (4 << 5)); | 499 | r1 = *(vuip)(work + (4 << 5)); |
504 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
505 | return r1 << 32 | r0; | 500 | return r1 << 32 | r0; |
506 | } | 501 | } |
507 | 502 | ||
508 | __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) | 503 | __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) |
509 | { | 504 | { |
510 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 505 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
511 | unsigned long msb, w; | 506 | unsigned long w; |
512 | unsigned long flags; | ||
513 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
514 | 507 | ||
515 | t2_set_hae; | 508 | t2_set_hae; |
516 | 509 | ||
517 | w = __kernel_insbl(b, addr & 3); | 510 | w = __kernel_insbl(b, addr & 3); |
518 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; | 511 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; |
519 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
520 | } | 512 | } |
521 | 513 | ||
522 | __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) | 514 | __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) |
523 | { | 515 | { |
524 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 516 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
525 | unsigned long msb, w; | 517 | unsigned long w; |
526 | unsigned long flags; | ||
527 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
528 | 518 | ||
529 | t2_set_hae; | 519 | t2_set_hae; |
530 | 520 | ||
531 | w = __kernel_inswl(b, addr & 3); | 521 | w = __kernel_inswl(b, addr & 3); |
532 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; | 522 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; |
533 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
534 | } | 523 | } |
535 | 524 | ||
536 | /* | 525 | /* |
@@ -540,29 +529,22 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) | |||
540 | __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) | 529 | __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) |
541 | { | 530 | { |
542 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 531 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
543 | unsigned long msb; | ||
544 | unsigned long flags; | ||
545 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
546 | 532 | ||
547 | t2_set_hae; | 533 | t2_set_hae; |
548 | 534 | ||
549 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; | 535 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; |
550 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
551 | } | 536 | } |
552 | 537 | ||
553 | __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) | 538 | __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) |
554 | { | 539 | { |
555 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 540 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
556 | unsigned long msb, work; | 541 | unsigned long work; |
557 | unsigned long flags; | ||
558 | raw_spin_lock_irqsave(&t2_hae_lock, flags); | ||
559 | 542 | ||
560 | t2_set_hae; | 543 | t2_set_hae; |
561 | 544 | ||
562 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; | 545 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; |
563 | *(vuip)work = b; | 546 | *(vuip)work = b; |
564 | *(vuip)(work + (4 << 5)) = b >> 32; | 547 | *(vuip)(work + (4 << 5)) = b >> 32; |
565 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
566 | } | 548 | } |
567 | 549 | ||
568 | __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) | 550 | __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) |
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h index 9baae8afe8a3..da5449e22175 100644 --- a/arch/alpha/include/asm/elf.h +++ b/arch/alpha/include/asm/elf.h | |||
@@ -101,7 +101,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |||
101 | 101 | ||
102 | #define ELF_PLAT_INIT(_r, load_addr) _r->r0 = 0 | 102 | #define ELF_PLAT_INIT(_r, load_addr) _r->r0 = 0 |
103 | 103 | ||
104 | /* The registers are layed out in pt_regs for PAL and syscall | 104 | /* The registers are laid out in pt_regs for PAL and syscall |
105 | convenience. Re-order them for the linear elf_gregset_t. */ | 105 | convenience. Re-order them for the linear elf_gregset_t. */ |
106 | 106 | ||
107 | struct pt_regs; | 107 | struct pt_regs; |
diff --git a/arch/alpha/include/asm/errno.h b/arch/alpha/include/asm/errno.h index 98099bda9370..e5f29ca28180 100644 --- a/arch/alpha/include/asm/errno.h +++ b/arch/alpha/include/asm/errno.h | |||
@@ -122,4 +122,6 @@ | |||
122 | 122 | ||
123 | #define ERFKILL 138 /* Operation not possible due to RF-kill */ | 123 | #define ERFKILL 138 /* Operation not possible due to RF-kill */ |
124 | 124 | ||
125 | #define EHWPOISON 139 /* Memory page has hardware error */ | ||
126 | |||
125 | #endif | 127 | #endif |
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h index 70145cbb21cb..1b71ca70c9f6 100644 --- a/arch/alpha/include/asm/fcntl.h +++ b/arch/alpha/include/asm/fcntl.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #define __O_SYNC 020000000 | 31 | #define __O_SYNC 020000000 |
32 | #define O_SYNC (__O_SYNC|O_DSYNC) | 32 | #define O_SYNC (__O_SYNC|O_DSYNC) |
33 | 33 | ||
34 | #define O_PATH 040000000 | ||
35 | |||
34 | #define F_GETLK 7 | 36 | #define F_GETLK 7 |
35 | #define F_SETLK 8 | 37 | #define F_SETLK 8 |
36 | #define F_SETLKW 9 | 38 | #define F_SETLKW 9 |
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index 945de222ab91..e8a761aee088 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h | |||
@@ -29,7 +29,7 @@ | |||
29 | : "r" (uaddr), "r"(oparg) \ | 29 | : "r" (uaddr), "r"(oparg) \ |
30 | : "memory") | 30 | : "memory") |
31 | 31 | ||
32 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 32 | static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
33 | { | 33 | { |
34 | int op = (encoded_op >> 28) & 7; | 34 | int op = (encoded_op >> 28) & 7; |
35 | int cmp = (encoded_op >> 24) & 15; | 35 | int cmp = (encoded_op >> 24) & 15; |
@@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
40 | oparg = 1 << oparg; | 40 | oparg = 1 << oparg; |
41 | 41 | ||
42 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 42 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
43 | return -EFAULT; | 43 | return -EFAULT; |
44 | 44 | ||
45 | pagefault_disable(); | 45 | pagefault_disable(); |
@@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | static inline int | 83 | static inline int |
84 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 84 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
85 | u32 oldval, u32 newval) | ||
85 | { | 86 | { |
86 | int prev, cmp; | 87 | int ret = 0, cmp; |
88 | u32 prev; | ||
87 | 89 | ||
88 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 90 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
89 | return -EFAULT; | 91 | return -EFAULT; |
90 | 92 | ||
91 | __asm__ __volatile__ ( | 93 | __asm__ __volatile__ ( |
92 | __ASM_SMP_MB | 94 | __ASM_SMP_MB |
93 | "1: ldl_l %0,0(%2)\n" | 95 | "1: ldl_l %1,0(%3)\n" |
94 | " cmpeq %0,%3,%1\n" | 96 | " cmpeq %1,%4,%2\n" |
95 | " beq %1,3f\n" | 97 | " beq %2,3f\n" |
96 | " mov %4,%1\n" | 98 | " mov %5,%2\n" |
97 | "2: stl_c %1,0(%2)\n" | 99 | "2: stl_c %2,0(%3)\n" |
98 | " beq %1,4f\n" | 100 | " beq %2,4f\n" |
99 | "3: .subsection 2\n" | 101 | "3: .subsection 2\n" |
100 | "4: br 1b\n" | 102 | "4: br 1b\n" |
101 | " .previous\n" | 103 | " .previous\n" |
@@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
105 | " .long 2b-.\n" | 107 | " .long 2b-.\n" |
106 | " lda $31,3b-2b(%0)\n" | 108 | " lda $31,3b-2b(%0)\n" |
107 | " .previous\n" | 109 | " .previous\n" |
108 | : "=&r"(prev), "=&r"(cmp) | 110 | : "+r"(ret), "=&r"(prev), "=&r"(cmp) |
109 | : "r"(uaddr), "r"((long)oldval), "r"(newval) | 111 | : "r"(uaddr), "r"((long)oldval), "r"(newval) |
110 | : "memory"); | 112 | : "memory"); |
111 | 113 | ||
112 | return prev; | 114 | *uval = prev; |
115 | return ret; | ||
113 | } | 116 | } |
114 | 117 | ||
115 | #endif /* __KERNEL__ */ | 118 | #endif /* __KERNEL__ */ |
diff --git a/arch/alpha/include/asm/gpio.h b/arch/alpha/include/asm/gpio.h new file mode 100644 index 000000000000..7dc6a6343c06 --- /dev/null +++ b/arch/alpha/include/asm/gpio.h | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * Generic GPIO API implementation for Alpha. | ||
3 | * | ||
4 | * A stright copy of that for PowerPC which was: | ||
5 | * | ||
6 | * Copyright (c) 2007-2008 MontaVista Software, Inc. | ||
7 | * | ||
8 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_ALPHA_GPIO_H | ||
17 | #define _ASM_ALPHA_GPIO_H | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <asm-generic/gpio.h> | ||
21 | |||
22 | #ifdef CONFIG_GPIOLIB | ||
23 | |||
24 | /* | ||
25 | * We don't (yet) implement inlined/rapid versions for on-chip gpios. | ||
26 | * Just call gpiolib. | ||
27 | */ | ||
28 | static inline int gpio_get_value(unsigned int gpio) | ||
29 | { | ||
30 | return __gpio_get_value(gpio); | ||
31 | } | ||
32 | |||
33 | static inline void gpio_set_value(unsigned int gpio, int value) | ||
34 | { | ||
35 | __gpio_set_value(gpio, value); | ||
36 | } | ||
37 | |||
38 | static inline int gpio_cansleep(unsigned int gpio) | ||
39 | { | ||
40 | return __gpio_cansleep(gpio); | ||
41 | } | ||
42 | |||
43 | static inline int gpio_to_irq(unsigned int gpio) | ||
44 | { | ||
45 | return __gpio_to_irq(gpio); | ||
46 | } | ||
47 | |||
48 | static inline int irq_to_gpio(unsigned int irq) | ||
49 | { | ||
50 | return -EINVAL; | ||
51 | } | ||
52 | |||
53 | #endif /* CONFIG_GPIOLIB */ | ||
54 | |||
55 | #endif /* _ASM_ALPHA_GPIO_H */ | ||
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index eda9b909aa05..56ff96501350 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h | |||
@@ -37,8 +37,9 @@ | |||
37 | */ | 37 | */ |
38 | extern inline void __set_hae(unsigned long new_hae) | 38 | extern inline void __set_hae(unsigned long new_hae) |
39 | { | 39 | { |
40 | unsigned long flags; | 40 | unsigned long flags = swpipl(IPL_MAX); |
41 | local_irq_save(flags); | 41 | |
42 | barrier(); | ||
42 | 43 | ||
43 | alpha_mv.hae_cache = new_hae; | 44 | alpha_mv.hae_cache = new_hae; |
44 | *alpha_mv.hae_register = new_hae; | 45 | *alpha_mv.hae_register = new_hae; |
@@ -46,7 +47,8 @@ extern inline void __set_hae(unsigned long new_hae) | |||
46 | /* Re-read to make sure it was written. */ | 47 | /* Re-read to make sure it was written. */ |
47 | new_hae = *alpha_mv.hae_register; | 48 | new_hae = *alpha_mv.hae_register; |
48 | 49 | ||
49 | local_irq_restore(flags); | 50 | setipl(flags); |
51 | barrier(); | ||
50 | } | 52 | } |
51 | 53 | ||
52 | extern inline void set_hae(unsigned long new_hae) | 54 | extern inline void set_hae(unsigned long new_hae) |
diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h index 59617c3c2be6..80e1cee90f1f 100644 --- a/arch/alpha/include/asm/ioctls.h +++ b/arch/alpha/include/asm/ioctls.h | |||
@@ -92,7 +92,9 @@ | |||
92 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | 92 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ |
93 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 93 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ |
94 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 94 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
95 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | ||
95 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ | 96 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ |
97 | #define TIOCVHANGUP 0x5437 | ||
96 | 98 | ||
97 | #define TIOCSERCONFIG 0x5453 | 99 | #define TIOCSERCONFIG 0x5453 |
98 | #define TIOCSERGWILD 0x5454 | 100 | #define TIOCSERGWILD 0x5454 |
diff --git a/arch/alpha/include/asm/irqflags.h b/arch/alpha/include/asm/irqflags.h new file mode 100644 index 000000000000..299bbc7e9d71 --- /dev/null +++ b/arch/alpha/include/asm/irqflags.h | |||
@@ -0,0 +1,67 @@ | |||
1 | #ifndef __ALPHA_IRQFLAGS_H | ||
2 | #define __ALPHA_IRQFLAGS_H | ||
3 | |||
4 | #include <asm/system.h> | ||
5 | |||
6 | #define IPL_MIN 0 | ||
7 | #define IPL_SW0 1 | ||
8 | #define IPL_SW1 2 | ||
9 | #define IPL_DEV0 3 | ||
10 | #define IPL_DEV1 4 | ||
11 | #define IPL_TIMER 5 | ||
12 | #define IPL_PERF 6 | ||
13 | #define IPL_POWERFAIL 6 | ||
14 | #define IPL_MCHECK 7 | ||
15 | #define IPL_MAX 7 | ||
16 | |||
17 | #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK | ||
18 | #undef IPL_MIN | ||
19 | #define IPL_MIN __min_ipl | ||
20 | extern int __min_ipl; | ||
21 | #endif | ||
22 | |||
23 | #define getipl() (rdps() & 7) | ||
24 | #define setipl(ipl) ((void) swpipl(ipl)) | ||
25 | |||
26 | static inline unsigned long arch_local_save_flags(void) | ||
27 | { | ||
28 | return rdps(); | ||
29 | } | ||
30 | |||
31 | static inline void arch_local_irq_disable(void) | ||
32 | { | ||
33 | setipl(IPL_MAX); | ||
34 | barrier(); | ||
35 | } | ||
36 | |||
37 | static inline unsigned long arch_local_irq_save(void) | ||
38 | { | ||
39 | unsigned long flags = swpipl(IPL_MAX); | ||
40 | barrier(); | ||
41 | return flags; | ||
42 | } | ||
43 | |||
44 | static inline void arch_local_irq_enable(void) | ||
45 | { | ||
46 | barrier(); | ||
47 | setipl(IPL_MIN); | ||
48 | } | ||
49 | |||
50 | static inline void arch_local_irq_restore(unsigned long flags) | ||
51 | { | ||
52 | barrier(); | ||
53 | setipl(flags); | ||
54 | barrier(); | ||
55 | } | ||
56 | |||
57 | static inline bool arch_irqs_disabled_flags(unsigned long flags) | ||
58 | { | ||
59 | return flags == IPL_MAX; | ||
60 | } | ||
61 | |||
62 | static inline bool arch_irqs_disabled(void) | ||
63 | { | ||
64 | return arch_irqs_disabled_flags(getipl()); | ||
65 | } | ||
66 | |||
67 | #endif /* __ALPHA_IRQFLAGS_H */ | ||
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h index 99c56d47879d..72db984f8781 100644 --- a/arch/alpha/include/asm/mman.h +++ b/arch/alpha/include/asm/mman.h | |||
@@ -53,6 +53,9 @@ | |||
53 | #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ | 53 | #define MADV_MERGEABLE 12 /* KSM may merge identical pages */ |
54 | #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ | 54 | #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ |
55 | 55 | ||
56 | #define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ | ||
57 | #define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ | ||
58 | |||
56 | /* compatibility flags */ | 59 | /* compatibility flags */ |
57 | #define MAP_FILE 0 | 60 | #define MAP_FILE 0 |
58 | 61 | ||
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h index 8af56ce346ad..445dc42e0334 100644 --- a/arch/alpha/include/asm/mmzone.h +++ b/arch/alpha/include/asm/mmzone.h | |||
@@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) | |||
56 | * Given a kernel address, find the home node of the underlying memory. | 56 | * Given a kernel address, find the home node of the underlying memory. |
57 | */ | 57 | */ |
58 | #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) | 58 | #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) |
59 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
60 | 59 | ||
61 | /* | 60 | /* |
62 | * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory | 61 | * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory |
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h index 4157cd3c44a9..5996e7a6757e 100644 --- a/arch/alpha/include/asm/perf_event.h +++ b/arch/alpha/include/asm/perf_event.h | |||
@@ -1,15 +1,4 @@ | |||
1 | #ifndef __ASM_ALPHA_PERF_EVENT_H | 1 | #ifndef __ASM_ALPHA_PERF_EVENT_H |
2 | #define __ASM_ALPHA_PERF_EVENT_H | 2 | #define __ASM_ALPHA_PERF_EVENT_H |
3 | 3 | ||
4 | /* Alpha only supports software events through this interface. */ | ||
5 | extern void set_perf_event_pending(void); | ||
6 | |||
7 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
8 | |||
9 | #ifdef CONFIG_PERF_EVENTS | ||
10 | extern void init_hw_perf_events(void); | ||
11 | #else | ||
12 | static inline void init_hw_perf_events(void) { } | ||
13 | #endif | ||
14 | |||
15 | #endif /* __ASM_ALPHA_PERF_EVENT_H */ | 4 | #endif /* __ASM_ALPHA_PERF_EVENT_H */ |
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 71a243294142..de98a732683d 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h | |||
@@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) | |||
318 | } | 318 | } |
319 | 319 | ||
320 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) | 320 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) |
321 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) | ||
322 | #define pte_unmap(pte) do { } while (0) | 321 | #define pte_unmap(pte) do { } while (0) |
323 | #define pte_unmap_nested(pte) do { } while (0) | ||
324 | 322 | ||
325 | extern pgd_t swapper_pg_dir[1024]; | 323 | extern pgd_t swapper_pg_dir[1024]; |
326 | 324 | ||
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h index 1570c0b54336..a83bbea62c67 100644 --- a/arch/alpha/include/asm/rwsem.h +++ b/arch/alpha/include/asm/rwsem.h | |||
@@ -13,44 +13,13 @@ | |||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | 14 | ||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/list.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | 16 | ||
19 | struct rwsem_waiter; | ||
20 | |||
21 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
22 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
23 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); | ||
24 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
25 | |||
26 | /* | ||
27 | * the semaphore definition | ||
28 | */ | ||
29 | struct rw_semaphore { | ||
30 | long count; | ||
31 | #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L | 17 | #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L |
32 | #define RWSEM_ACTIVE_BIAS 0x0000000000000001L | 18 | #define RWSEM_ACTIVE_BIAS 0x0000000000000001L |
33 | #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL | 19 | #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL |
34 | #define RWSEM_WAITING_BIAS (-0x0000000100000000L) | 20 | #define RWSEM_WAITING_BIAS (-0x0000000100000000L) |
35 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 21 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
36 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 22 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
37 | spinlock_t wait_lock; | ||
38 | struct list_head wait_list; | ||
39 | }; | ||
40 | |||
41 | #define __RWSEM_INITIALIZER(name) \ | ||
42 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | ||
43 | LIST_HEAD_INIT((name).wait_list) } | ||
44 | |||
45 | #define DECLARE_RWSEM(name) \ | ||
46 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
47 | |||
48 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
49 | { | ||
50 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
51 | spin_lock_init(&sem->wait_lock); | ||
52 | INIT_LIST_HEAD(&sem->wait_list); | ||
53 | } | ||
54 | 23 | ||
55 | static inline void __down_read(struct rw_semaphore *sem) | 24 | static inline void __down_read(struct rw_semaphore *sem) |
56 | { | 25 | { |
@@ -250,10 +219,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) | |||
250 | #endif | 219 | #endif |
251 | } | 220 | } |
252 | 221 | ||
253 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
254 | { | ||
255 | return (sem->count != 0); | ||
256 | } | ||
257 | |||
258 | #endif /* __KERNEL__ */ | 222 | #endif /* __KERNEL__ */ |
259 | #endif /* _ALPHA_RWSEM_H */ | 223 | #endif /* _ALPHA_RWSEM_H */ |
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h index 3f390e8cc0b3..c46e714aa3e0 100644 --- a/arch/alpha/include/asm/smp.h +++ b/arch/alpha/include/asm/smp.h | |||
@@ -39,8 +39,6 @@ struct cpuinfo_alpha { | |||
39 | 39 | ||
40 | extern struct cpuinfo_alpha cpu_data[NR_CPUS]; | 40 | extern struct cpuinfo_alpha cpu_data[NR_CPUS]; |
41 | 41 | ||
42 | #define PROC_CHANGE_PENALTY 20 | ||
43 | |||
44 | #define hard_smp_processor_id() __hard_smp_processor_id() | 42 | #define hard_smp_processor_id() __hard_smp_processor_id() |
45 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 43 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
46 | 44 | ||
diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h index 5aa40cca4f23..9f78e6934637 100644 --- a/arch/alpha/include/asm/system.h +++ b/arch/alpha/include/asm/system.h | |||
@@ -259,34 +259,6 @@ __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); | |||
259 | __CALL_PAL_W1(wrusp, unsigned long); | 259 | __CALL_PAL_W1(wrusp, unsigned long); |
260 | __CALL_PAL_W1(wrvptptr, unsigned long); | 260 | __CALL_PAL_W1(wrvptptr, unsigned long); |
261 | 261 | ||
262 | #define IPL_MIN 0 | ||
263 | #define IPL_SW0 1 | ||
264 | #define IPL_SW1 2 | ||
265 | #define IPL_DEV0 3 | ||
266 | #define IPL_DEV1 4 | ||
267 | #define IPL_TIMER 5 | ||
268 | #define IPL_PERF 6 | ||
269 | #define IPL_POWERFAIL 6 | ||
270 | #define IPL_MCHECK 7 | ||
271 | #define IPL_MAX 7 | ||
272 | |||
273 | #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK | ||
274 | #undef IPL_MIN | ||
275 | #define IPL_MIN __min_ipl | ||
276 | extern int __min_ipl; | ||
277 | #endif | ||
278 | |||
279 | #define getipl() (rdps() & 7) | ||
280 | #define setipl(ipl) ((void) swpipl(ipl)) | ||
281 | |||
282 | #define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) | ||
283 | #define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) | ||
284 | #define local_save_flags(flags) ((flags) = rdps()) | ||
285 | #define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) | ||
286 | #define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) | ||
287 | |||
288 | #define irqs_disabled() (getipl() == IPL_MAX) | ||
289 | |||
290 | /* | 262 | /* |
291 | * TB routines.. | 263 | * TB routines.. |
292 | */ | 264 | */ |
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h index bd621ecd1eb3..881544339c21 100644 --- a/arch/alpha/include/asm/types.h +++ b/arch/alpha/include/asm/types.h | |||
@@ -20,16 +20,4 @@ | |||
20 | typedef unsigned int umode_t; | 20 | typedef unsigned int umode_t; |
21 | 21 | ||
22 | #endif /* __ASSEMBLY__ */ | 22 | #endif /* __ASSEMBLY__ */ |
23 | |||
24 | /* | ||
25 | * These aren't exported outside the kernel to avoid name space clashes | ||
26 | */ | ||
27 | #ifdef __KERNEL__ | ||
28 | #ifndef __ASSEMBLY__ | ||
29 | |||
30 | typedef u64 dma_addr_t; | ||
31 | typedef u64 dma64_addr_t; | ||
32 | |||
33 | #endif /* __ASSEMBLY__ */ | ||
34 | #endif /* __KERNEL__ */ | ||
35 | #endif /* _ALPHA_TYPES_H */ | 23 | #endif /* _ALPHA_TYPES_H */ |
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 058937bf5a77..4ac48a095f3a 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h | |||
@@ -452,10 +452,15 @@ | |||
452 | #define __NR_fanotify_init 494 | 452 | #define __NR_fanotify_init 494 |
453 | #define __NR_fanotify_mark 495 | 453 | #define __NR_fanotify_mark 495 |
454 | #define __NR_prlimit64 496 | 454 | #define __NR_prlimit64 496 |
455 | #define __NR_name_to_handle_at 497 | ||
456 | #define __NR_open_by_handle_at 498 | ||
457 | #define __NR_clock_adjtime 499 | ||
458 | #define __NR_syncfs 500 | ||
459 | #define __NR_setns 501 | ||
455 | 460 | ||
456 | #ifdef __KERNEL__ | 461 | #ifdef __KERNEL__ |
457 | 462 | ||
458 | #define NR_SYSCALLS 497 | 463 | #define NR_SYSCALLS 502 |
459 | 464 | ||
460 | #define __ARCH_WANT_IPC_PARSE_VERSION | 465 | #define __ARCH_WANT_IPC_PARSE_VERSION |
461 | #define __ARCH_WANT_OLD_READDIR | 466 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 1ee9b5b629b8..7a6d908bb865 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile | |||
@@ -3,8 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o vmlinux.lds |
6 | EXTRA_AFLAGS := $(KBUILD_CFLAGS) | 6 | asflags-y := $(KBUILD_CFLAGS) |
7 | EXTRA_CFLAGS := -Werror -Wno-sign-compare | 7 | ccflags-y := -Wno-sign-compare |
8 | 8 | ||
9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ | 9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ |
10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ | 10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ |
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c index 4843f6ec9f3a..cb2801cfd3df 100644 --- a/arch/alpha/kernel/core_lca.c +++ b/arch/alpha/kernel/core_lca.c | |||
@@ -133,7 +133,7 @@ conf_read(unsigned long addr) | |||
133 | 133 | ||
134 | local_irq_save(flags); | 134 | local_irq_save(flags); |
135 | 135 | ||
136 | /* Reset status register to avoid loosing errors. */ | 136 | /* Reset status register to avoid losing errors. */ |
137 | stat0 = *(vulp)LCA_IOC_STAT0; | 137 | stat0 = *(vulp)LCA_IOC_STAT0; |
138 | *(vulp)LCA_IOC_STAT0 = stat0; | 138 | *(vulp)LCA_IOC_STAT0 = stat0; |
139 | mb(); | 139 | mb(); |
@@ -170,7 +170,7 @@ conf_write(unsigned long addr, unsigned int value) | |||
170 | 170 | ||
171 | local_irq_save(flags); /* avoid getting hit by machine check */ | 171 | local_irq_save(flags); /* avoid getting hit by machine check */ |
172 | 172 | ||
173 | /* Reset status register to avoid loosing errors. */ | 173 | /* Reset status register to avoid losing errors. */ |
174 | stat0 = *(vulp)LCA_IOC_STAT0; | 174 | stat0 = *(vulp)LCA_IOC_STAT0; |
175 | *(vulp)LCA_IOC_STAT0 = stat0; | 175 | *(vulp)LCA_IOC_STAT0 = stat0; |
176 | mb(); | 176 | mb(); |
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c index 381fec0af52e..da7bcc372f16 100644 --- a/arch/alpha/kernel/core_mcpcia.c +++ b/arch/alpha/kernel/core_mcpcia.c | |||
@@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1, | |||
88 | { | 88 | { |
89 | unsigned long flags; | 89 | unsigned long flags; |
90 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | 90 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); |
91 | unsigned int stat0, value, temp, cpu; | 91 | unsigned int stat0, value, cpu; |
92 | 92 | ||
93 | cpu = smp_processor_id(); | 93 | cpu = smp_processor_id(); |
94 | 94 | ||
@@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1, | |||
101 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | 101 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); |
102 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; | 102 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; |
103 | mb(); | 103 | mb(); |
104 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | 104 | *(vuip)MCPCIA_CAP_ERR(mid); |
105 | DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); | 105 | DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); |
106 | 106 | ||
107 | mb(); | 107 | mb(); |
@@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
136 | { | 136 | { |
137 | unsigned long flags; | 137 | unsigned long flags; |
138 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | 138 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); |
139 | unsigned int stat0, temp, cpu; | 139 | unsigned int stat0, cpu; |
140 | 140 | ||
141 | cpu = smp_processor_id(); | 141 | cpu = smp_processor_id(); |
142 | 142 | ||
@@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
145 | /* Reset status register to avoid losing errors. */ | 145 | /* Reset status register to avoid losing errors. */ |
146 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | 146 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); |
147 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); | 147 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); |
148 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | 148 | *(vuip)MCPCIA_CAP_ERR(mid); |
149 | DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); | 149 | DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); |
150 | 150 | ||
151 | draina(); | 151 | draina(); |
@@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
157 | *((vuip)addr) = value; | 157 | *((vuip)addr) = value; |
158 | mb(); | 158 | mb(); |
159 | mb(); /* magic */ | 159 | mb(); /* magic */ |
160 | temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ | 160 | *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ |
161 | mcheck_expected(cpu) = 0; | 161 | mcheck_expected(cpu) = 0; |
162 | mb(); | 162 | mb(); |
163 | 163 | ||
@@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr) | |||
572 | void | 572 | void |
573 | mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) | 573 | mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) |
574 | { | 574 | { |
575 | struct el_common *mchk_header; | ||
576 | struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; | 575 | struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; |
577 | unsigned int cpu = smp_processor_id(); | 576 | unsigned int cpu = smp_processor_id(); |
578 | int expected; | 577 | int expected; |
579 | 578 | ||
580 | mchk_header = (struct el_common *)la_ptr; | ||
581 | mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; | 579 | mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; |
582 | expected = mcheck_expected(cpu); | 580 | expected = mcheck_expected(cpu); |
583 | 581 | ||
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c index e6d90568b65d..2f770e994289 100644 --- a/arch/alpha/kernel/core_t2.c +++ b/arch/alpha/kernel/core_t2.c | |||
@@ -74,8 +74,6 @@ | |||
74 | # define DBG(args) | 74 | # define DBG(args) |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | DEFINE_RAW_SPINLOCK(t2_hae_lock); | ||
78 | |||
79 | static volatile unsigned int t2_mcheck_any_expected; | 77 | static volatile unsigned int t2_mcheck_any_expected; |
80 | static volatile unsigned int t2_mcheck_last_taken; | 78 | static volatile unsigned int t2_mcheck_last_taken; |
81 | 79 | ||
@@ -406,6 +404,7 @@ void __init | |||
406 | t2_init_arch(void) | 404 | t2_init_arch(void) |
407 | { | 405 | { |
408 | struct pci_controller *hose; | 406 | struct pci_controller *hose; |
407 | struct resource *hae_mem; | ||
409 | unsigned long temp; | 408 | unsigned long temp; |
410 | unsigned int i; | 409 | unsigned int i; |
411 | 410 | ||
@@ -433,7 +432,13 @@ t2_init_arch(void) | |||
433 | */ | 432 | */ |
434 | pci_isa_hose = hose = alloc_pci_controller(); | 433 | pci_isa_hose = hose = alloc_pci_controller(); |
435 | hose->io_space = &ioport_resource; | 434 | hose->io_space = &ioport_resource; |
436 | hose->mem_space = &iomem_resource; | 435 | hae_mem = alloc_resource(); |
436 | hae_mem->start = 0; | ||
437 | hae_mem->end = T2_MEM_R1_MASK; | ||
438 | hae_mem->name = pci_hae0_name; | ||
439 | if (request_resource(&iomem_resource, hae_mem) < 0) | ||
440 | printk(KERN_ERR "Failed to request HAE_MEM\n"); | ||
441 | hose->mem_space = hae_mem; | ||
437 | hose->index = 0; | 442 | hose->index = 0; |
438 | 443 | ||
439 | hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; | 444 | hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; |
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c index 648ae88aeb8a..ae54ad91e18f 100644 --- a/arch/alpha/kernel/err_marvel.c +++ b/arch/alpha/kernel/err_marvel.c | |||
@@ -1027,7 +1027,7 @@ marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print) | |||
1027 | * normal operation, dismiss them. | 1027 | * normal operation, dismiss them. |
1028 | * | 1028 | * |
1029 | * Dismiss if: | 1029 | * Dismiss if: |
1030 | * C_STAT = 0x14 (Error Reponse) | 1030 | * C_STAT = 0x14 (Error Response) |
1031 | * C_STS<3> = 0 (C_ADDR valid) | 1031 | * C_STS<3> = 0 (C_ADDR valid) |
1032 | * C_ADDR<42> = 1 (I/O) | 1032 | * C_ADDR<42> = 1 (I/O) |
1033 | * C_ADDR<31:22> = 111110xxb (PCI Config space) | 1033 | * C_ADDR<31:22> = 111110xxb (PCI Config space) |
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index c3b3781a03de..14b26c466c89 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c | |||
@@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = { | |||
533 | static struct el_subpacket * | 533 | static struct el_subpacket * |
534 | el_process_regatta_subpacket(struct el_subpacket *header) | 534 | el_process_regatta_subpacket(struct el_subpacket *header) |
535 | { | 535 | { |
536 | int status; | ||
537 | |||
538 | if (header->class != EL_CLASS__REGATTA_FAMILY) { | 536 | if (header->class != EL_CLASS__REGATTA_FAMILY) { |
539 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", | 537 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", |
540 | err_print_prefix, | 538 | err_print_prefix, |
@@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header) | |||
551 | printk("%s ** Occurred on CPU %d:\n", | 549 | printk("%s ** Occurred on CPU %d:\n", |
552 | err_print_prefix, | 550 | err_print_prefix, |
553 | (int)header->by_type.regatta_frame.cpuid); | 551 | (int)header->by_type.regatta_frame.cpuid); |
554 | status = privateer_process_logout_frame((struct el_common *) | 552 | privateer_process_logout_frame((struct el_common *) |
555 | header->by_type.regatta_frame.data_start, 1); | 553 | header->by_type.regatta_frame.data_start, 1); |
556 | break; | 554 | break; |
557 | default: | 555 | default: |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index fe912984d9b1..381431a2d6d9 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -44,10 +44,16 @@ static char irq_user_affinity[NR_IRQS]; | |||
44 | 44 | ||
45 | int irq_select_affinity(unsigned int irq) | 45 | int irq_select_affinity(unsigned int irq) |
46 | { | 46 | { |
47 | struct irq_data *data = irq_get_irq_data(irq); | ||
48 | struct irq_chip *chip; | ||
47 | static int last_cpu; | 49 | static int last_cpu; |
48 | int cpu = last_cpu + 1; | 50 | int cpu = last_cpu + 1; |
49 | 51 | ||
50 | if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) | 52 | if (!data) |
53 | return 1; | ||
54 | chip = irq_data_get_irq_chip(data); | ||
55 | |||
56 | if (!chip->irq_set_affinity || irq_user_affinity[irq]) | ||
51 | return 1; | 57 | return 1; |
52 | 58 | ||
53 | while (!cpu_possible(cpu) || | 59 | while (!cpu_possible(cpu) || |
@@ -55,68 +61,27 @@ int irq_select_affinity(unsigned int irq) | |||
55 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | 61 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
56 | last_cpu = cpu; | 62 | last_cpu = cpu; |
57 | 63 | ||
58 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); | 64 | cpumask_copy(data->affinity, cpumask_of(cpu)); |
59 | irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); | 65 | chip->irq_set_affinity(data, cpumask_of(cpu), false); |
60 | return 0; | 66 | return 0; |
61 | } | 67 | } |
62 | #endif /* CONFIG_SMP */ | 68 | #endif /* CONFIG_SMP */ |
63 | 69 | ||
64 | int | 70 | int arch_show_interrupts(struct seq_file *p, int prec) |
65 | show_interrupts(struct seq_file *p, void *v) | ||
66 | { | 71 | { |
67 | int j; | 72 | int j; |
68 | int irq = *(loff_t *) v; | ||
69 | struct irqaction * action; | ||
70 | unsigned long flags; | ||
71 | |||
72 | #ifdef CONFIG_SMP | ||
73 | if (irq == 0) { | ||
74 | seq_puts(p, " "); | ||
75 | for_each_online_cpu(j) | ||
76 | seq_printf(p, "CPU%d ", j); | ||
77 | seq_putc(p, '\n'); | ||
78 | } | ||
79 | #endif | ||
80 | 73 | ||
81 | if (irq < ACTUAL_NR_IRQS) { | ||
82 | raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); | ||
83 | action = irq_desc[irq].action; | ||
84 | if (!action) | ||
85 | goto unlock; | ||
86 | seq_printf(p, "%3d: ", irq); | ||
87 | #ifndef CONFIG_SMP | ||
88 | seq_printf(p, "%10u ", kstat_irqs(irq)); | ||
89 | #else | ||
90 | for_each_online_cpu(j) | ||
91 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); | ||
92 | #endif | ||
93 | seq_printf(p, " %14s", irq_desc[irq].chip->name); | ||
94 | seq_printf(p, " %c%s", | ||
95 | (action->flags & IRQF_DISABLED)?'+':' ', | ||
96 | action->name); | ||
97 | |||
98 | for (action=action->next; action; action = action->next) { | ||
99 | seq_printf(p, ", %c%s", | ||
100 | (action->flags & IRQF_DISABLED)?'+':' ', | ||
101 | action->name); | ||
102 | } | ||
103 | |||
104 | seq_putc(p, '\n'); | ||
105 | unlock: | ||
106 | raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | ||
107 | } else if (irq == ACTUAL_NR_IRQS) { | ||
108 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
109 | seq_puts(p, "IPI: "); | 75 | seq_puts(p, "IPI: "); |
110 | for_each_online_cpu(j) | 76 | for_each_online_cpu(j) |
111 | seq_printf(p, "%10lu ", cpu_data[j].ipi_count); | 77 | seq_printf(p, "%10lu ", cpu_data[j].ipi_count); |
112 | seq_putc(p, '\n'); | 78 | seq_putc(p, '\n'); |
113 | #endif | 79 | #endif |
114 | seq_puts(p, "PMI: "); | 80 | seq_puts(p, "PMI: "); |
115 | for_each_online_cpu(j) | 81 | for_each_online_cpu(j) |
116 | seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); | 82 | seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); |
117 | seq_puts(p, " Performance Monitoring\n"); | 83 | seq_puts(p, " Performance Monitoring\n"); |
118 | seq_printf(p, "ERR: %10lu\n", irq_err_count); | 84 | seq_printf(p, "ERR: %10lu\n", irq_err_count); |
119 | } | ||
120 | return 0; | 85 | return 0; |
121 | } | 86 | } |
122 | 87 | ||
@@ -142,8 +107,10 @@ handle_irq(int irq) | |||
142 | * handled by some other CPU. (or is disabled) | 107 | * handled by some other CPU. (or is disabled) |
143 | */ | 108 | */ |
144 | static unsigned int illegal_count=0; | 109 | static unsigned int illegal_count=0; |
110 | struct irq_desc *desc = irq_to_desc(irq); | ||
145 | 111 | ||
146 | if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { | 112 | if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS && |
113 | illegal_count < MAX_ILLEGAL_IRQS)) { | ||
147 | irq_err_count++; | 114 | irq_err_count++; |
148 | illegal_count++; | 115 | illegal_count++; |
149 | printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", | 116 | printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", |
@@ -151,14 +118,14 @@ handle_irq(int irq) | |||
151 | return; | 118 | return; |
152 | } | 119 | } |
153 | 120 | ||
154 | irq_enter(); | ||
155 | /* | 121 | /* |
156 | * __do_IRQ() must be called with IPL_MAX. Note that we do not | 122 | * From here we must proceed with IPL_MAX. Note that we do not |
157 | * explicitly enable interrupts afterwards - some MILO PALcode | 123 | * explicitly enable interrupts afterwards - some MILO PALcode |
158 | * (namely LX164 one) seems to have severe problems with RTI | 124 | * (namely LX164 one) seems to have severe problems with RTI |
159 | * at IPL 0. | 125 | * at IPL 0. |
160 | */ | 126 | */ |
161 | local_irq_disable(); | 127 | local_irq_disable(); |
162 | __do_IRQ(irq); | 128 | irq_enter(); |
129 | generic_handle_irq_desc(irq, desc); | ||
163 | irq_exit(); | 130 | irq_exit(); |
164 | } | 131 | } |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 5f77afb88e89..51b7fbd9e4c1 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -112,8 +112,6 @@ init_IRQ(void) | |||
112 | wrent(entInt, 0); | 112 | wrent(entInt, 0); |
113 | 113 | ||
114 | alpha_mv.init_irq(); | 114 | alpha_mv.init_irq(); |
115 | |||
116 | init_hw_perf_events(); | ||
117 | } | 115 | } |
118 | 116 | ||
119 | /* | 117 | /* |
@@ -221,30 +219,17 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, | |||
221 | * processed by PALcode, and comes in via entInt vector 1. | 219 | * processed by PALcode, and comes in via entInt vector 1. |
222 | */ | 220 | */ |
223 | 221 | ||
224 | static void rtc_enable_disable(unsigned int irq) { } | ||
225 | static unsigned int rtc_startup(unsigned int irq) { return 0; } | ||
226 | |||
227 | struct irqaction timer_irqaction = { | 222 | struct irqaction timer_irqaction = { |
228 | .handler = timer_interrupt, | 223 | .handler = timer_interrupt, |
229 | .flags = IRQF_DISABLED, | 224 | .flags = IRQF_DISABLED, |
230 | .name = "timer", | 225 | .name = "timer", |
231 | }; | 226 | }; |
232 | 227 | ||
233 | static struct irq_chip rtc_irq_type = { | ||
234 | .name = "RTC", | ||
235 | .startup = rtc_startup, | ||
236 | .shutdown = rtc_enable_disable, | ||
237 | .enable = rtc_enable_disable, | ||
238 | .disable = rtc_enable_disable, | ||
239 | .ack = rtc_enable_disable, | ||
240 | .end = rtc_enable_disable, | ||
241 | }; | ||
242 | |||
243 | void __init | 228 | void __init |
244 | init_rtc_irq(void) | 229 | init_rtc_irq(void) |
245 | { | 230 | { |
246 | irq_desc[RTC_IRQ].status = IRQ_DISABLED; | 231 | irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, |
247 | irq_desc[RTC_IRQ].chip = &rtc_irq_type; | 232 | handle_simple_irq, "RTC"); |
248 | setup_irq(RTC_IRQ, &timer_irqaction); | 233 | setup_irq(RTC_IRQ, &timer_irqaction); |
249 | } | 234 | } |
250 | 235 | ||
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index 83a9ac280890..e1861c77dabc 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c | |||
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask) | |||
33 | } | 33 | } |
34 | 34 | ||
35 | inline void | 35 | inline void |
36 | i8259a_enable_irq(unsigned int irq) | 36 | i8259a_enable_irq(struct irq_data *d) |
37 | { | 37 | { |
38 | spin_lock(&i8259_irq_lock); | 38 | spin_lock(&i8259_irq_lock); |
39 | i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | 39 | i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); |
40 | spin_unlock(&i8259_irq_lock); | 40 | spin_unlock(&i8259_irq_lock); |
41 | } | 41 | } |
42 | 42 | ||
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | void | 49 | void |
50 | i8259a_disable_irq(unsigned int irq) | 50 | i8259a_disable_irq(struct irq_data *d) |
51 | { | 51 | { |
52 | spin_lock(&i8259_irq_lock); | 52 | spin_lock(&i8259_irq_lock); |
53 | __i8259a_disable_irq(irq); | 53 | __i8259a_disable_irq(d->irq); |
54 | spin_unlock(&i8259_irq_lock); | 54 | spin_unlock(&i8259_irq_lock); |
55 | } | 55 | } |
56 | 56 | ||
57 | void | 57 | void |
58 | i8259a_mask_and_ack_irq(unsigned int irq) | 58 | i8259a_mask_and_ack_irq(struct irq_data *d) |
59 | { | 59 | { |
60 | unsigned int irq = d->irq; | ||
61 | |||
60 | spin_lock(&i8259_irq_lock); | 62 | spin_lock(&i8259_irq_lock); |
61 | __i8259a_disable_irq(irq); | 63 | __i8259a_disable_irq(irq); |
62 | 64 | ||
@@ -69,28 +71,11 @@ i8259a_mask_and_ack_irq(unsigned int irq) | |||
69 | spin_unlock(&i8259_irq_lock); | 71 | spin_unlock(&i8259_irq_lock); |
70 | } | 72 | } |
71 | 73 | ||
72 | unsigned int | ||
73 | i8259a_startup_irq(unsigned int irq) | ||
74 | { | ||
75 | i8259a_enable_irq(irq); | ||
76 | return 0; /* never anything pending */ | ||
77 | } | ||
78 | |||
79 | void | ||
80 | i8259a_end_irq(unsigned int irq) | ||
81 | { | ||
82 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
83 | i8259a_enable_irq(irq); | ||
84 | } | ||
85 | |||
86 | struct irq_chip i8259a_irq_type = { | 74 | struct irq_chip i8259a_irq_type = { |
87 | .name = "XT-PIC", | 75 | .name = "XT-PIC", |
88 | .startup = i8259a_startup_irq, | 76 | .irq_unmask = i8259a_enable_irq, |
89 | .shutdown = i8259a_disable_irq, | 77 | .irq_mask = i8259a_disable_irq, |
90 | .enable = i8259a_enable_irq, | 78 | .irq_mask_ack = i8259a_mask_and_ack_irq, |
91 | .disable = i8259a_disable_irq, | ||
92 | .ack = i8259a_mask_and_ack_irq, | ||
93 | .end = i8259a_end_irq, | ||
94 | }; | 79 | }; |
95 | 80 | ||
96 | void __init | 81 | void __init |
@@ -107,8 +92,7 @@ init_i8259a_irqs(void) | |||
107 | outb(0xff, 0xA1); /* mask all of 8259A-2 */ | 92 | outb(0xff, 0xA1); /* mask all of 8259A-2 */ |
108 | 93 | ||
109 | for (i = 0; i < 16; i++) { | 94 | for (i = 0; i < 16; i++) { |
110 | irq_desc[i].status = IRQ_DISABLED; | 95 | irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); |
111 | irq_desc[i].chip = &i8259a_irq_type; | ||
112 | } | 96 | } |
113 | 97 | ||
114 | setup_irq(2, &cascade); | 98 | setup_irq(2, &cascade); |
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h index b63ccd7386f1..d507a234b05d 100644 --- a/arch/alpha/kernel/irq_impl.h +++ b/arch/alpha/kernel/irq_impl.h | |||
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void); | |||
31 | 31 | ||
32 | extern void common_init_isa_dma(void); | 32 | extern void common_init_isa_dma(void); |
33 | 33 | ||
34 | extern void i8259a_enable_irq(unsigned int); | 34 | extern void i8259a_enable_irq(struct irq_data *d); |
35 | extern void i8259a_disable_irq(unsigned int); | 35 | extern void i8259a_disable_irq(struct irq_data *d); |
36 | extern void i8259a_mask_and_ack_irq(unsigned int); | 36 | extern void i8259a_mask_and_ack_irq(struct irq_data *d); |
37 | extern unsigned int i8259a_startup_irq(unsigned int); | ||
38 | extern void i8259a_end_irq(unsigned int); | ||
39 | extern struct irq_chip i8259a_irq_type; | 37 | extern struct irq_chip i8259a_irq_type; |
40 | extern void init_i8259a_irqs(void); | 38 | extern void init_i8259a_irqs(void); |
41 | 39 | ||
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index 989ce46a0cf3..13c97a5b31e8 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c | |||
@@ -29,35 +29,21 @@ pyxis_update_irq_hw(unsigned long mask) | |||
29 | } | 29 | } |
30 | 30 | ||
31 | static inline void | 31 | static inline void |
32 | pyxis_enable_irq(unsigned int irq) | 32 | pyxis_enable_irq(struct irq_data *d) |
33 | { | 33 | { |
34 | pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 34 | pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
35 | } | 35 | } |
36 | 36 | ||
37 | static void | 37 | static void |
38 | pyxis_disable_irq(unsigned int irq) | 38 | pyxis_disable_irq(struct irq_data *d) |
39 | { | 39 | { |
40 | pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 40 | pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
41 | } | ||
42 | |||
43 | static unsigned int | ||
44 | pyxis_startup_irq(unsigned int irq) | ||
45 | { | ||
46 | pyxis_enable_irq(irq); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static void | ||
51 | pyxis_end_irq(unsigned int irq) | ||
52 | { | ||
53 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
54 | pyxis_enable_irq(irq); | ||
55 | } | 41 | } |
56 | 42 | ||
57 | static void | 43 | static void |
58 | pyxis_mask_and_ack_irq(unsigned int irq) | 44 | pyxis_mask_and_ack_irq(struct irq_data *d) |
59 | { | 45 | { |
60 | unsigned long bit = 1UL << (irq - 16); | 46 | unsigned long bit = 1UL << (d->irq - 16); |
61 | unsigned long mask = cached_irq_mask &= ~bit; | 47 | unsigned long mask = cached_irq_mask &= ~bit; |
62 | 48 | ||
63 | /* Disable the interrupt. */ | 49 | /* Disable the interrupt. */ |
@@ -72,12 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq) | |||
72 | 58 | ||
73 | static struct irq_chip pyxis_irq_type = { | 59 | static struct irq_chip pyxis_irq_type = { |
74 | .name = "PYXIS", | 60 | .name = "PYXIS", |
75 | .startup = pyxis_startup_irq, | 61 | .irq_mask_ack = pyxis_mask_and_ack_irq, |
76 | .shutdown = pyxis_disable_irq, | 62 | .irq_mask = pyxis_disable_irq, |
77 | .enable = pyxis_enable_irq, | 63 | .irq_unmask = pyxis_enable_irq, |
78 | .disable = pyxis_disable_irq, | ||
79 | .ack = pyxis_mask_and_ack_irq, | ||
80 | .end = pyxis_end_irq, | ||
81 | }; | 64 | }; |
82 | 65 | ||
83 | void | 66 | void |
@@ -119,8 +102,8 @@ init_pyxis_irqs(unsigned long ignore_mask) | |||
119 | for (i = 16; i < 48; ++i) { | 102 | for (i = 16; i < 48; ++i) { |
120 | if ((ignore_mask >> i) & 1) | 103 | if ((ignore_mask >> i) & 1) |
121 | continue; | 104 | continue; |
122 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 105 | irq_set_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); |
123 | irq_desc[i].chip = &pyxis_irq_type; | 106 | irq_set_status_flags(i, IRQ_LEVEL); |
124 | } | 107 | } |
125 | 108 | ||
126 | setup_irq(16+7, &isa_cascade_irqaction); | 109 | setup_irq(16+7, &isa_cascade_irqaction); |
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c index d63e93e1e8bf..a79fa30e7552 100644 --- a/arch/alpha/kernel/irq_srm.c +++ b/arch/alpha/kernel/irq_srm.c | |||
@@ -18,44 +18,27 @@ | |||
18 | DEFINE_SPINLOCK(srm_irq_lock); | 18 | DEFINE_SPINLOCK(srm_irq_lock); |
19 | 19 | ||
20 | static inline void | 20 | static inline void |
21 | srm_enable_irq(unsigned int irq) | 21 | srm_enable_irq(struct irq_data *d) |
22 | { | 22 | { |
23 | spin_lock(&srm_irq_lock); | 23 | spin_lock(&srm_irq_lock); |
24 | cserve_ena(irq - 16); | 24 | cserve_ena(d->irq - 16); |
25 | spin_unlock(&srm_irq_lock); | 25 | spin_unlock(&srm_irq_lock); |
26 | } | 26 | } |
27 | 27 | ||
28 | static void | 28 | static void |
29 | srm_disable_irq(unsigned int irq) | 29 | srm_disable_irq(struct irq_data *d) |
30 | { | 30 | { |
31 | spin_lock(&srm_irq_lock); | 31 | spin_lock(&srm_irq_lock); |
32 | cserve_dis(irq - 16); | 32 | cserve_dis(d->irq - 16); |
33 | spin_unlock(&srm_irq_lock); | 33 | spin_unlock(&srm_irq_lock); |
34 | } | 34 | } |
35 | 35 | ||
36 | static unsigned int | ||
37 | srm_startup_irq(unsigned int irq) | ||
38 | { | ||
39 | srm_enable_irq(irq); | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static void | ||
44 | srm_end_irq(unsigned int irq) | ||
45 | { | ||
46 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
47 | srm_enable_irq(irq); | ||
48 | } | ||
49 | |||
50 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ | 36 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ |
51 | static struct irq_chip srm_irq_type = { | 37 | static struct irq_chip srm_irq_type = { |
52 | .name = "SRM", | 38 | .name = "SRM", |
53 | .startup = srm_startup_irq, | 39 | .irq_unmask = srm_enable_irq, |
54 | .shutdown = srm_disable_irq, | 40 | .irq_mask = srm_disable_irq, |
55 | .enable = srm_enable_irq, | 41 | .irq_mask_ack = srm_disable_irq, |
56 | .disable = srm_disable_irq, | ||
57 | .ack = srm_disable_irq, | ||
58 | .end = srm_end_irq, | ||
59 | }; | 42 | }; |
60 | 43 | ||
61 | void __init | 44 | void __init |
@@ -68,8 +51,8 @@ init_srm_irqs(long max, unsigned long ignore_mask) | |||
68 | for (i = 16; i < max; ++i) { | 51 | for (i = 16; i < max; ++i) { |
69 | if (i < 64 && ((ignore_mask >> i) & 1)) | 52 | if (i < 64 && ((ignore_mask >> i) & 1)) |
70 | continue; | 53 | continue; |
71 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 54 | irq_set_chip_and_handler(i, &srm_irq_type, handle_level_irq); |
72 | irq_desc[i].chip = &srm_irq_type; | 55 | irq_set_status_flags(i, IRQ_LEVEL); |
73 | } | 56 | } |
74 | } | 57 | } |
75 | 58 | ||
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h index 512685f78097..7fa62488bd16 100644 --- a/arch/alpha/kernel/machvec_impl.h +++ b/arch/alpha/kernel/machvec_impl.h | |||
@@ -25,6 +25,9 @@ | |||
25 | #ifdef MCPCIA_ONE_HAE_WINDOW | 25 | #ifdef MCPCIA_ONE_HAE_WINDOW |
26 | #define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) | 26 | #define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) |
27 | #endif | 27 | #endif |
28 | #ifdef T2_ONE_HAE_WINDOW | ||
29 | #define T2_HAE_ADDRESS (&alpha_mv.hae_cache) | ||
30 | #endif | ||
28 | 31 | ||
29 | /* Only a few systems don't define IACK_SC, handling all interrupts through | 32 | /* Only a few systems don't define IACK_SC, handling all interrupts through |
30 | the SRM console. But splitting out that one case from IO() below | 33 | the SRM console. But splitting out that one case from IO() below |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 547e8b84b2f7..326f0a2d56e5 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -230,44 +230,24 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st | |||
230 | return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; | 230 | return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; |
231 | } | 231 | } |
232 | 232 | ||
233 | static int | 233 | SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, |
234 | do_osf_statfs(struct path *path, struct osf_statfs __user *buffer, | 234 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) |
235 | unsigned long bufsiz) | ||
236 | { | 235 | { |
237 | struct kstatfs linux_stat; | 236 | struct kstatfs linux_stat; |
238 | int error = vfs_statfs(path, &linux_stat); | 237 | int error = user_statfs(pathname, &linux_stat); |
239 | if (!error) | 238 | if (!error) |
240 | error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); | 239 | error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); |
241 | return error; | 240 | return error; |
242 | } | 241 | } |
243 | 242 | ||
244 | SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, | ||
245 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) | ||
246 | { | ||
247 | struct path path; | ||
248 | int retval; | ||
249 | |||
250 | retval = user_path(pathname, &path); | ||
251 | if (!retval) { | ||
252 | retval = do_osf_statfs(&path, buffer, bufsiz); | ||
253 | path_put(&path); | ||
254 | } | ||
255 | return retval; | ||
256 | } | ||
257 | |||
258 | SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, | 243 | SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, |
259 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) | 244 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) |
260 | { | 245 | { |
261 | struct file *file; | 246 | struct kstatfs linux_stat; |
262 | int retval; | 247 | int error = fd_statfs(fd, &linux_stat); |
263 | 248 | if (!error) | |
264 | retval = -EBADF; | 249 | error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); |
265 | file = fget(fd); | 250 | return error; |
266 | if (file) { | ||
267 | retval = do_osf_statfs(&file->f_path, buffer, bufsiz); | ||
268 | fput(file); | ||
269 | } | ||
270 | return retval; | ||
271 | } | 251 | } |
272 | 252 | ||
273 | /* | 253 | /* |
@@ -429,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) | |||
429 | return -EFAULT; | 409 | return -EFAULT; |
430 | 410 | ||
431 | len = namelen; | 411 | len = namelen; |
432 | if (namelen > 32) | 412 | if (len > 32) |
433 | len = 32; | 413 | len = 32; |
434 | 414 | ||
435 | down_read(&uts_sem); | 415 | down_read(&uts_sem); |
@@ -614,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) | |||
614 | down_read(&uts_sem); | 594 | down_read(&uts_sem); |
615 | res = sysinfo_table[offset]; | 595 | res = sysinfo_table[offset]; |
616 | len = strlen(res)+1; | 596 | len = strlen(res)+1; |
617 | if (len > count) | 597 | if ((unsigned long)len > (unsigned long)count) |
618 | len = count; | 598 | len = count; |
619 | if (copy_to_user(buf, res, len)) | 599 | if (copy_to_user(buf, res, len)) |
620 | err = -EFAULT; | 600 | err = -EFAULT; |
@@ -669,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, | |||
669 | return 1; | 649 | return 1; |
670 | 650 | ||
671 | case GSI_GET_HWRPB: | 651 | case GSI_GET_HWRPB: |
672 | if (nbytes < sizeof(*hwrpb)) | 652 | if (nbytes > sizeof(*hwrpb)) |
673 | return -EINVAL; | 653 | return -EINVAL; |
674 | if (copy_to_user(buffer, hwrpb, nbytes) != 0) | 654 | if (copy_to_user(buffer, hwrpb, nbytes) != 0) |
675 | return -EFAULT; | 655 | return -EFAULT; |
@@ -951,9 +931,6 @@ SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, | |||
951 | return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); | 931 | return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); |
952 | } | 932 | } |
953 | 933 | ||
954 | #define MAX_SELECT_SECONDS \ | ||
955 | ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) | ||
956 | |||
957 | SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, | 934 | SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, |
958 | fd_set __user *, exp, struct timeval32 __user *, tvp) | 935 | fd_set __user *, exp, struct timeval32 __user *, tvp) |
959 | { | 936 | { |
@@ -1031,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, | |||
1031 | { | 1008 | { |
1032 | struct rusage r; | 1009 | struct rusage r; |
1033 | long ret, err; | 1010 | long ret, err; |
1011 | unsigned int status = 0; | ||
1034 | mm_segment_t old_fs; | 1012 | mm_segment_t old_fs; |
1035 | 1013 | ||
1036 | if (!ur) | 1014 | if (!ur) |
@@ -1039,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, | |||
1039 | old_fs = get_fs(); | 1017 | old_fs = get_fs(); |
1040 | 1018 | ||
1041 | set_fs (KERNEL_DS); | 1019 | set_fs (KERNEL_DS); |
1042 | ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); | 1020 | ret = sys_wait4(pid, (unsigned int __user *) &status, options, |
1021 | (struct rusage __user *) &r); | ||
1043 | set_fs (old_fs); | 1022 | set_fs (old_fs); |
1044 | 1023 | ||
1045 | if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) | 1024 | if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) |
1046 | return -EFAULT; | 1025 | return -EFAULT; |
1047 | 1026 | ||
1048 | err = 0; | 1027 | err = 0; |
1028 | err |= put_user(status, ustatus); | ||
1049 | err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); | 1029 | err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); |
1050 | err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); | 1030 | err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); |
1051 | err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); | 1031 | err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); |
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index d1dbd9acd1df..022c2748fa41 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
@@ -223,7 +223,7 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) | |||
223 | */ | 223 | */ |
224 | static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) | 224 | static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) |
225 | { | 225 | { |
226 | dma64_addr_t dac_offset = alpha_mv.pci_dac_offset; | 226 | dma_addr_t dac_offset = alpha_mv.pci_dac_offset; |
227 | int ok = 1; | 227 | int ok = 1; |
228 | 228 | ||
229 | /* If this is not set, the machine doesn't support DAC at all. */ | 229 | /* If this is not set, the machine doesn't support DAC at all. */ |
@@ -756,7 +756,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
756 | spin_lock_irqsave(&arena->lock, flags); | 756 | spin_lock_irqsave(&arena->lock, flags); |
757 | 757 | ||
758 | for (end = sg + nents; sg < end; ++sg) { | 758 | for (end = sg + nents; sg < end; ++sg) { |
759 | dma64_addr_t addr; | 759 | dma_addr_t addr; |
760 | size_t size; | 760 | size_t size; |
761 | long npages, ofs; | 761 | long npages, ofs; |
762 | dma_addr_t tend; | 762 | dma_addr_t tend; |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 85d8e4f58c83..90561c45e7d8 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/kdebug.h> | 15 | #include <linux/kdebug.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/init.h> | ||
17 | 18 | ||
18 | #include <asm/hwrpb.h> | 19 | #include <asm/hwrpb.h> |
19 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
@@ -307,7 +308,7 @@ again: | |||
307 | new_raw_count) != prev_raw_count) | 308 | new_raw_count) != prev_raw_count) |
308 | goto again; | 309 | goto again; |
309 | 310 | ||
310 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; | 311 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; |
311 | 312 | ||
312 | /* It is possible on very rare occasions that the PMC has overflowed | 313 | /* It is possible on very rare occasions that the PMC has overflowed |
313 | * but the interrupt is yet to come. Detect and fix this situation. | 314 | * but the interrupt is yet to come. Detect and fix this situation. |
@@ -402,14 +403,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
402 | struct hw_perf_event *hwc = &pe->hw; | 403 | struct hw_perf_event *hwc = &pe->hw; |
403 | int idx = hwc->idx; | 404 | int idx = hwc->idx; |
404 | 405 | ||
405 | if (cpuc->current_idx[j] != PMC_NO_INDEX) { | 406 | if (cpuc->current_idx[j] == PMC_NO_INDEX) { |
406 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | 407 | alpha_perf_event_set_period(pe, hwc, idx); |
407 | continue; | 408 | cpuc->current_idx[j] = idx; |
408 | } | 409 | } |
409 | 410 | ||
410 | alpha_perf_event_set_period(pe, hwc, idx); | 411 | if (!(hwc->state & PERF_HES_STOPPED)) |
411 | cpuc->current_idx[j] = idx; | 412 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); |
412 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | ||
413 | } | 413 | } |
414 | cpuc->config = cpuc->event[0]->hw.config_base; | 414 | cpuc->config = cpuc->event[0]->hw.config_base; |
415 | } | 415 | } |
@@ -420,12 +420,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
420 | * - this function is called from outside this module via the pmu struct | 420 | * - this function is called from outside this module via the pmu struct |
421 | * returned from perf event initialisation. | 421 | * returned from perf event initialisation. |
422 | */ | 422 | */ |
423 | static int alpha_pmu_enable(struct perf_event *event) | 423 | static int alpha_pmu_add(struct perf_event *event, int flags) |
424 | { | 424 | { |
425 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 425 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
426 | struct hw_perf_event *hwc = &event->hw; | ||
426 | int n0; | 427 | int n0; |
427 | int ret; | 428 | int ret; |
428 | unsigned long flags; | 429 | unsigned long irq_flags; |
429 | 430 | ||
430 | /* | 431 | /* |
431 | * The Sparc code has the IRQ disable first followed by the perf | 432 | * The Sparc code has the IRQ disable first followed by the perf |
@@ -435,8 +436,8 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
435 | * nevertheless we disable the PMCs first to enable a potential | 436 | * nevertheless we disable the PMCs first to enable a potential |
436 | * final PMI to occur before we disable interrupts. | 437 | * final PMI to occur before we disable interrupts. |
437 | */ | 438 | */ |
438 | perf_disable(); | 439 | perf_pmu_disable(event->pmu); |
439 | local_irq_save(flags); | 440 | local_irq_save(irq_flags); |
440 | 441 | ||
441 | /* Default to error to be returned */ | 442 | /* Default to error to be returned */ |
442 | ret = -EAGAIN; | 443 | ret = -EAGAIN; |
@@ -455,8 +456,12 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
455 | } | 456 | } |
456 | } | 457 | } |
457 | 458 | ||
458 | local_irq_restore(flags); | 459 | hwc->state = PERF_HES_UPTODATE; |
459 | perf_enable(); | 460 | if (!(flags & PERF_EF_START)) |
461 | hwc->state |= PERF_HES_STOPPED; | ||
462 | |||
463 | local_irq_restore(irq_flags); | ||
464 | perf_pmu_enable(event->pmu); | ||
460 | 465 | ||
461 | return ret; | 466 | return ret; |
462 | } | 467 | } |
@@ -467,15 +472,15 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
467 | * - this function is called from outside this module via the pmu struct | 472 | * - this function is called from outside this module via the pmu struct |
468 | * returned from perf event initialisation. | 473 | * returned from perf event initialisation. |
469 | */ | 474 | */ |
470 | static void alpha_pmu_disable(struct perf_event *event) | 475 | static void alpha_pmu_del(struct perf_event *event, int flags) |
471 | { | 476 | { |
472 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 477 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
473 | struct hw_perf_event *hwc = &event->hw; | 478 | struct hw_perf_event *hwc = &event->hw; |
474 | unsigned long flags; | 479 | unsigned long irq_flags; |
475 | int j; | 480 | int j; |
476 | 481 | ||
477 | perf_disable(); | 482 | perf_pmu_disable(event->pmu); |
478 | local_irq_save(flags); | 483 | local_irq_save(irq_flags); |
479 | 484 | ||
480 | for (j = 0; j < cpuc->n_events; j++) { | 485 | for (j = 0; j < cpuc->n_events; j++) { |
481 | if (event == cpuc->event[j]) { | 486 | if (event == cpuc->event[j]) { |
@@ -501,8 +506,8 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
501 | } | 506 | } |
502 | } | 507 | } |
503 | 508 | ||
504 | local_irq_restore(flags); | 509 | local_irq_restore(irq_flags); |
505 | perf_enable(); | 510 | perf_pmu_enable(event->pmu); |
506 | } | 511 | } |
507 | 512 | ||
508 | 513 | ||
@@ -514,13 +519,44 @@ static void alpha_pmu_read(struct perf_event *event) | |||
514 | } | 519 | } |
515 | 520 | ||
516 | 521 | ||
517 | static void alpha_pmu_unthrottle(struct perf_event *event) | 522 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
523 | { | ||
524 | struct hw_perf_event *hwc = &event->hw; | ||
525 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
526 | |||
527 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
528 | cpuc->idx_mask &= ~(1UL<<hwc->idx); | ||
529 | hwc->state |= PERF_HES_STOPPED; | ||
530 | } | ||
531 | |||
532 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
533 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | ||
534 | hwc->state |= PERF_HES_UPTODATE; | ||
535 | } | ||
536 | |||
537 | if (cpuc->enabled) | ||
538 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); | ||
539 | } | ||
540 | |||
541 | |||
542 | static void alpha_pmu_start(struct perf_event *event, int flags) | ||
518 | { | 543 | { |
519 | struct hw_perf_event *hwc = &event->hw; | 544 | struct hw_perf_event *hwc = &event->hw; |
520 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 545 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
521 | 546 | ||
547 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | ||
548 | return; | ||
549 | |||
550 | if (flags & PERF_EF_RELOAD) { | ||
551 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
552 | alpha_perf_event_set_period(event, hwc, hwc->idx); | ||
553 | } | ||
554 | |||
555 | hwc->state = 0; | ||
556 | |||
522 | cpuc->idx_mask |= 1UL<<hwc->idx; | 557 | cpuc->idx_mask |= 1UL<<hwc->idx; |
523 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | 558 | if (cpuc->enabled) |
559 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | ||
524 | } | 560 | } |
525 | 561 | ||
526 | 562 | ||
@@ -642,39 +678,36 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
642 | return 0; | 678 | return 0; |
643 | } | 679 | } |
644 | 680 | ||
645 | static const struct pmu pmu = { | ||
646 | .enable = alpha_pmu_enable, | ||
647 | .disable = alpha_pmu_disable, | ||
648 | .read = alpha_pmu_read, | ||
649 | .unthrottle = alpha_pmu_unthrottle, | ||
650 | }; | ||
651 | |||
652 | |||
653 | /* | 681 | /* |
654 | * Main entry point to initialise a HW performance event. | 682 | * Main entry point to initialise a HW performance event. |
655 | */ | 683 | */ |
656 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 684 | static int alpha_pmu_event_init(struct perf_event *event) |
657 | { | 685 | { |
658 | int err; | 686 | int err; |
659 | 687 | ||
688 | switch (event->attr.type) { | ||
689 | case PERF_TYPE_RAW: | ||
690 | case PERF_TYPE_HARDWARE: | ||
691 | case PERF_TYPE_HW_CACHE: | ||
692 | break; | ||
693 | |||
694 | default: | ||
695 | return -ENOENT; | ||
696 | } | ||
697 | |||
660 | if (!alpha_pmu) | 698 | if (!alpha_pmu) |
661 | return ERR_PTR(-ENODEV); | 699 | return -ENODEV; |
662 | 700 | ||
663 | /* Do the real initialisation work. */ | 701 | /* Do the real initialisation work. */ |
664 | err = __hw_perf_event_init(event); | 702 | err = __hw_perf_event_init(event); |
665 | 703 | ||
666 | if (err) | 704 | return err; |
667 | return ERR_PTR(err); | ||
668 | |||
669 | return &pmu; | ||
670 | } | 705 | } |
671 | 706 | ||
672 | |||
673 | |||
674 | /* | 707 | /* |
675 | * Main entry point - enable HW performance counters. | 708 | * Main entry point - enable HW performance counters. |
676 | */ | 709 | */ |
677 | void hw_perf_enable(void) | 710 | static void alpha_pmu_enable(struct pmu *pmu) |
678 | { | 711 | { |
679 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 712 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
680 | 713 | ||
@@ -700,7 +733,7 @@ void hw_perf_enable(void) | |||
700 | * Main entry point - disable HW performance counters. | 733 | * Main entry point - disable HW performance counters. |
701 | */ | 734 | */ |
702 | 735 | ||
703 | void hw_perf_disable(void) | 736 | static void alpha_pmu_disable(struct pmu *pmu) |
704 | { | 737 | { |
705 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 738 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
706 | 739 | ||
@@ -713,6 +746,17 @@ void hw_perf_disable(void) | |||
713 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 746 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
714 | } | 747 | } |
715 | 748 | ||
749 | static struct pmu pmu = { | ||
750 | .pmu_enable = alpha_pmu_enable, | ||
751 | .pmu_disable = alpha_pmu_disable, | ||
752 | .event_init = alpha_pmu_event_init, | ||
753 | .add = alpha_pmu_add, | ||
754 | .del = alpha_pmu_del, | ||
755 | .start = alpha_pmu_start, | ||
756 | .stop = alpha_pmu_stop, | ||
757 | .read = alpha_pmu_read, | ||
758 | }; | ||
759 | |||
716 | 760 | ||
717 | /* | 761 | /* |
718 | * Main entry point - don't know when this is called but it | 762 | * Main entry point - don't know when this is called but it |
@@ -766,7 +810,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
766 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 810 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
767 | 811 | ||
768 | /* la_ptr is the counter that overflowed. */ | 812 | /* la_ptr is the counter that overflowed. */ |
769 | if (unlikely(la_ptr >= perf_max_events)) { | 813 | if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { |
770 | /* This should never occur! */ | 814 | /* This should never occur! */ |
771 | irq_err_count++; | 815 | irq_err_count++; |
772 | pr_warning("PMI: silly index %ld\n", la_ptr); | 816 | pr_warning("PMI: silly index %ld\n", la_ptr); |
@@ -807,7 +851,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
807 | /* Interrupts coming too quickly; "throttle" the | 851 | /* Interrupts coming too quickly; "throttle" the |
808 | * counter, i.e., disable it for a little while. | 852 | * counter, i.e., disable it for a little while. |
809 | */ | 853 | */ |
810 | cpuc->idx_mask &= ~(1UL<<idx); | 854 | alpha_pmu_stop(event, 0); |
811 | } | 855 | } |
812 | } | 856 | } |
813 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | 857 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
@@ -820,13 +864,13 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
820 | /* | 864 | /* |
821 | * Init call to initialise performance events at kernel startup. | 865 | * Init call to initialise performance events at kernel startup. |
822 | */ | 866 | */ |
823 | void __init init_hw_perf_events(void) | 867 | int __init init_hw_perf_events(void) |
824 | { | 868 | { |
825 | pr_info("Performance events: "); | 869 | pr_info("Performance events: "); |
826 | 870 | ||
827 | if (!supported_cpu()) { | 871 | if (!supported_cpu()) { |
828 | pr_cont("No support for your CPU.\n"); | 872 | pr_cont("No support for your CPU.\n"); |
829 | return; | 873 | return 0; |
830 | } | 874 | } |
831 | 875 | ||
832 | pr_cont("Supported CPU type!\n"); | 876 | pr_cont("Supported CPU type!\n"); |
@@ -837,6 +881,9 @@ void __init init_hw_perf_events(void) | |||
837 | 881 | ||
838 | /* And set up PMU specification */ | 882 | /* And set up PMU specification */ |
839 | alpha_pmu = &ev67_pmu; | 883 | alpha_pmu = &ev67_pmu; |
840 | perf_max_events = alpha_pmu->num_pmcs; | ||
841 | } | ||
842 | 884 | ||
885 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
886 | |||
887 | return 0; | ||
888 | } | ||
889 | early_initcall(init_hw_perf_events); | ||
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 3ec35066f1dc..838eac128409 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -121,7 +121,7 @@ common_shutdown_1(void *generic_ptr) | |||
121 | /* Wait for the secondaries to halt. */ | 121 | /* Wait for the secondaries to halt. */ |
122 | set_cpu_present(boot_cpuid, false); | 122 | set_cpu_present(boot_cpuid, false); |
123 | set_cpu_possible(boot_cpuid, false); | 123 | set_cpu_possible(boot_cpuid, false); |
124 | while (cpus_weight(cpu_present_map)) | 124 | while (cpumask_weight(cpu_present_mask)) |
125 | barrier(); | 125 | barrier(); |
126 | #endif | 126 | #endif |
127 | 127 | ||
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index baa903602f6a..e2af5eb59bb4 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c | |||
@@ -269,7 +269,8 @@ void ptrace_disable(struct task_struct *child) | |||
269 | user_disable_single_step(child); | 269 | user_disable_single_step(child); |
270 | } | 270 | } |
271 | 271 | ||
272 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | 272 | long arch_ptrace(struct task_struct *child, long request, |
273 | unsigned long addr, unsigned long data) | ||
273 | { | 274 | { |
274 | unsigned long tmp; | 275 | unsigned long tmp; |
275 | size_t copied; | 276 | size_t copied; |
@@ -292,7 +293,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
292 | case PTRACE_PEEKUSR: | 293 | case PTRACE_PEEKUSR: |
293 | force_successful_syscall_return(); | 294 | force_successful_syscall_return(); |
294 | ret = get_reg(child, addr); | 295 | ret = get_reg(child, addr); |
295 | DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret)); | 296 | DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret)); |
296 | break; | 297 | break; |
297 | 298 | ||
298 | /* When I and D space are separate, this will have to be fixed. */ | 299 | /* When I and D space are separate, this will have to be fixed. */ |
@@ -302,7 +303,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
302 | break; | 303 | break; |
303 | 304 | ||
304 | case PTRACE_POKEUSR: /* write the specified register */ | 305 | case PTRACE_POKEUSR: /* write the specified register */ |
305 | DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data)); | 306 | DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data)); |
306 | ret = put_reg(child, addr, data); | 307 | ret = put_reg(child, addr, data); |
307 | break; | 308 | break; |
308 | default: | 309 | default: |
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index d2634e4476b4..cc0fd862cf26 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c | |||
@@ -1257,7 +1257,7 @@ show_cpuinfo(struct seq_file *f, void *slot) | |||
1257 | #ifdef CONFIG_SMP | 1257 | #ifdef CONFIG_SMP |
1258 | seq_printf(f, "cpus active\t\t: %u\n" | 1258 | seq_printf(f, "cpus active\t\t: %u\n" |
1259 | "cpu active mask\t\t: %016lx\n", | 1259 | "cpu active mask\t\t: %016lx\n", |
1260 | num_online_cpus(), cpus_addr(cpu_possible_map)[0]); | 1260 | num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]); |
1261 | #endif | 1261 | #endif |
1262 | 1262 | ||
1263 | show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); | 1263 | show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); |
@@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type) | |||
1404 | case PCA56_CPU: | 1404 | case PCA56_CPU: |
1405 | case PCA57_CPU: | 1405 | case PCA57_CPU: |
1406 | { | 1406 | { |
1407 | unsigned long cbox_config, size; | ||
1408 | |||
1409 | if (cpu_type == PCA56_CPU) { | 1407 | if (cpu_type == PCA56_CPU) { |
1410 | L1I = CSHAPE(16*1024, 6, 1); | 1408 | L1I = CSHAPE(16*1024, 6, 1); |
1411 | L1D = CSHAPE(8*1024, 5, 1); | 1409 | L1D = CSHAPE(8*1024, 5, 1); |
@@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type) | |||
1415 | } | 1413 | } |
1416 | L3 = -1; | 1414 | L3 = -1; |
1417 | 1415 | ||
1416 | #if 0 | ||
1417 | unsigned long cbox_config, size; | ||
1418 | |||
1418 | cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); | 1419 | cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); |
1419 | size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); | 1420 | size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); |
1420 | 1421 | ||
1421 | #if 0 | ||
1422 | L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); | 1422 | L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); |
1423 | #else | 1423 | #else |
1424 | L2 = external_cache_probe(512*1024, 6); | 1424 | L2 = external_cache_probe(512*1024, 6); |
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c index 3e6a2893af9f..6886b834f487 100644 --- a/arch/alpha/kernel/smc37c93x.c +++ b/arch/alpha/kernel/smc37c93x.c | |||
@@ -79,7 +79,6 @@ | |||
79 | static unsigned long __init SMCConfigState(unsigned long baseAddr) | 79 | static unsigned long __init SMCConfigState(unsigned long baseAddr) |
80 | { | 80 | { |
81 | unsigned char devId; | 81 | unsigned char devId; |
82 | unsigned char devRev; | ||
83 | 82 | ||
84 | unsigned long configPort; | 83 | unsigned long configPort; |
85 | unsigned long indexPort; | 84 | unsigned long indexPort; |
@@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr) | |||
100 | devId = inb(dataPort); | 99 | devId = inb(dataPort); |
101 | if (devId == VALID_DEVICE_ID) { | 100 | if (devId == VALID_DEVICE_ID) { |
102 | outb(DEVICE_REV, indexPort); | 101 | outb(DEVICE_REV, indexPort); |
103 | devRev = inb(dataPort); | 102 | /* unsigned char devRev = */ inb(dataPort); |
104 | break; | 103 | break; |
105 | } | 104 | } |
106 | else | 105 | else |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 42aa078a5e4d..d739703608fc 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -451,7 +451,7 @@ setup_smp(void) | |||
451 | } | 451 | } |
452 | 452 | ||
453 | printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", | 453 | printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", |
454 | smp_num_probed, cpu_present_map.bits[0]); | 454 | smp_num_probed, cpumask_bits(cpu_present_mask)[0]); |
455 | } | 455 | } |
456 | 456 | ||
457 | /* | 457 | /* |
@@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs) | |||
585 | 585 | ||
586 | switch (which) { | 586 | switch (which) { |
587 | case IPI_RESCHEDULE: | 587 | case IPI_RESCHEDULE: |
588 | /* Reschedule callback. Everything to be done | 588 | scheduler_ipi(); |
589 | is done by the interrupt return path. */ | ||
590 | break; | 589 | break; |
591 | 590 | ||
592 | case IPI_CALL_FUNC: | 591 | case IPI_CALL_FUNC: |
@@ -630,8 +629,9 @@ smp_send_reschedule(int cpu) | |||
630 | void | 629 | void |
631 | smp_send_stop(void) | 630 | smp_send_stop(void) |
632 | { | 631 | { |
633 | cpumask_t to_whom = cpu_possible_map; | 632 | cpumask_t to_whom; |
634 | cpu_clear(smp_processor_id(), to_whom); | 633 | cpumask_copy(&to_whom, cpu_possible_mask); |
634 | cpumask_clear_cpu(smp_processor_id(), &to_whom); | ||
635 | #ifdef DEBUG_IPI_MSG | 635 | #ifdef DEBUG_IPI_MSG |
636 | if (hard_smp_processor_id() != boot_cpu_id) | 636 | if (hard_smp_processor_id() != boot_cpu_id) |
637 | printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); | 637 | printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); |
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 20a30b8b9655..0e1439904cdb 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c | |||
@@ -44,59 +44,42 @@ alcor_update_irq_hw(unsigned long mask) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | static inline void | 46 | static inline void |
47 | alcor_enable_irq(unsigned int irq) | 47 | alcor_enable_irq(struct irq_data *d) |
48 | { | 48 | { |
49 | alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 49 | alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void | 52 | static void |
53 | alcor_disable_irq(unsigned int irq) | 53 | alcor_disable_irq(struct irq_data *d) |
54 | { | 54 | { |
55 | alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 55 | alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
56 | } | 56 | } |
57 | 57 | ||
58 | static void | 58 | static void |
59 | alcor_mask_and_ack_irq(unsigned int irq) | 59 | alcor_mask_and_ack_irq(struct irq_data *d) |
60 | { | 60 | { |
61 | alcor_disable_irq(irq); | 61 | alcor_disable_irq(d); |
62 | 62 | ||
63 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | 63 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ |
64 | *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); | 64 | *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); |
65 | *(vuip)GRU_INT_CLEAR = 0; mb(); | 65 | *(vuip)GRU_INT_CLEAR = 0; mb(); |
66 | } | 66 | } |
67 | 67 | ||
68 | static unsigned int | ||
69 | alcor_startup_irq(unsigned int irq) | ||
70 | { | ||
71 | alcor_enable_irq(irq); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static void | 68 | static void |
76 | alcor_isa_mask_and_ack_irq(unsigned int irq) | 69 | alcor_isa_mask_and_ack_irq(struct irq_data *d) |
77 | { | 70 | { |
78 | i8259a_mask_and_ack_irq(irq); | 71 | i8259a_mask_and_ack_irq(d); |
79 | 72 | ||
80 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | 73 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ |
81 | *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); | 74 | *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); |
82 | *(vuip)GRU_INT_CLEAR = 0; mb(); | 75 | *(vuip)GRU_INT_CLEAR = 0; mb(); |
83 | } | 76 | } |
84 | 77 | ||
85 | static void | ||
86 | alcor_end_irq(unsigned int irq) | ||
87 | { | ||
88 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
89 | alcor_enable_irq(irq); | ||
90 | } | ||
91 | |||
92 | static struct irq_chip alcor_irq_type = { | 78 | static struct irq_chip alcor_irq_type = { |
93 | .name = "ALCOR", | 79 | .name = "ALCOR", |
94 | .startup = alcor_startup_irq, | 80 | .irq_unmask = alcor_enable_irq, |
95 | .shutdown = alcor_disable_irq, | 81 | .irq_mask = alcor_disable_irq, |
96 | .enable = alcor_enable_irq, | 82 | .irq_mask_ack = alcor_mask_and_ack_irq, |
97 | .disable = alcor_disable_irq, | ||
98 | .ack = alcor_mask_and_ack_irq, | ||
99 | .end = alcor_end_irq, | ||
100 | }; | 83 | }; |
101 | 84 | ||
102 | static void | 85 | static void |
@@ -142,10 +125,10 @@ alcor_init_irq(void) | |||
142 | on while IRQ probing. */ | 125 | on while IRQ probing. */ |
143 | if (i >= 16+20 && i <= 16+30) | 126 | if (i >= 16+20 && i <= 16+30) |
144 | continue; | 127 | continue; |
145 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 128 | irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq); |
146 | irq_desc[i].chip = &alcor_irq_type; | 129 | irq_set_status_flags(i, IRQ_LEVEL); |
147 | } | 130 | } |
148 | i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; | 131 | i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; |
149 | 132 | ||
150 | init_i8259a_irqs(); | 133 | init_i8259a_irqs(); |
151 | common_init_isa_dma(); | 134 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index 14c8898d19ec..c8c112d51584 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
@@ -46,39 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void | 48 | static inline void |
49 | cabriolet_enable_irq(unsigned int irq) | 49 | cabriolet_enable_irq(struct irq_data *d) |
50 | { | 50 | { |
51 | cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); | 51 | cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void | 54 | static void |
55 | cabriolet_disable_irq(unsigned int irq) | 55 | cabriolet_disable_irq(struct irq_data *d) |
56 | { | 56 | { |
57 | cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); | 57 | cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); |
58 | } | ||
59 | |||
60 | static unsigned int | ||
61 | cabriolet_startup_irq(unsigned int irq) | ||
62 | { | ||
63 | cabriolet_enable_irq(irq); | ||
64 | return 0; /* never anything pending */ | ||
65 | } | ||
66 | |||
67 | static void | ||
68 | cabriolet_end_irq(unsigned int irq) | ||
69 | { | ||
70 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
71 | cabriolet_enable_irq(irq); | ||
72 | } | 58 | } |
73 | 59 | ||
74 | static struct irq_chip cabriolet_irq_type = { | 60 | static struct irq_chip cabriolet_irq_type = { |
75 | .name = "CABRIOLET", | 61 | .name = "CABRIOLET", |
76 | .startup = cabriolet_startup_irq, | 62 | .irq_unmask = cabriolet_enable_irq, |
77 | .shutdown = cabriolet_disable_irq, | 63 | .irq_mask = cabriolet_disable_irq, |
78 | .enable = cabriolet_enable_irq, | 64 | .irq_mask_ack = cabriolet_disable_irq, |
79 | .disable = cabriolet_disable_irq, | ||
80 | .ack = cabriolet_disable_irq, | ||
81 | .end = cabriolet_end_irq, | ||
82 | }; | 65 | }; |
83 | 66 | ||
84 | static void | 67 | static void |
@@ -122,8 +105,9 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) | |||
122 | outb(0xff, 0x806); | 105 | outb(0xff, 0x806); |
123 | 106 | ||
124 | for (i = 16; i < 35; ++i) { | 107 | for (i = 16; i < 35; ++i) { |
125 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 108 | irq_set_chip_and_handler(i, &cabriolet_irq_type, |
126 | irq_desc[i].chip = &cabriolet_irq_type; | 109 | handle_level_irq); |
110 | irq_set_status_flags(i, IRQ_LEVEL); | ||
127 | } | 111 | } |
128 | } | 112 | } |
129 | 113 | ||
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index 4026502ab707..f8856829c22a 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -98,69 +98,41 @@ tsunami_update_irq_hw(unsigned long mask) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | static void | 100 | static void |
101 | dp264_enable_irq(unsigned int irq) | 101 | dp264_enable_irq(struct irq_data *d) |
102 | { | 102 | { |
103 | spin_lock(&dp264_irq_lock); | 103 | spin_lock(&dp264_irq_lock); |
104 | cached_irq_mask |= 1UL << irq; | 104 | cached_irq_mask |= 1UL << d->irq; |
105 | tsunami_update_irq_hw(cached_irq_mask); | 105 | tsunami_update_irq_hw(cached_irq_mask); |
106 | spin_unlock(&dp264_irq_lock); | 106 | spin_unlock(&dp264_irq_lock); |
107 | } | 107 | } |
108 | 108 | ||
109 | static void | 109 | static void |
110 | dp264_disable_irq(unsigned int irq) | 110 | dp264_disable_irq(struct irq_data *d) |
111 | { | 111 | { |
112 | spin_lock(&dp264_irq_lock); | 112 | spin_lock(&dp264_irq_lock); |
113 | cached_irq_mask &= ~(1UL << irq); | 113 | cached_irq_mask &= ~(1UL << d->irq); |
114 | tsunami_update_irq_hw(cached_irq_mask); | 114 | tsunami_update_irq_hw(cached_irq_mask); |
115 | spin_unlock(&dp264_irq_lock); | 115 | spin_unlock(&dp264_irq_lock); |
116 | } | 116 | } |
117 | 117 | ||
118 | static unsigned int | ||
119 | dp264_startup_irq(unsigned int irq) | ||
120 | { | ||
121 | dp264_enable_irq(irq); | ||
122 | return 0; /* never anything pending */ | ||
123 | } | ||
124 | |||
125 | static void | ||
126 | dp264_end_irq(unsigned int irq) | ||
127 | { | ||
128 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
129 | dp264_enable_irq(irq); | ||
130 | } | ||
131 | |||
132 | static void | 118 | static void |
133 | clipper_enable_irq(unsigned int irq) | 119 | clipper_enable_irq(struct irq_data *d) |
134 | { | 120 | { |
135 | spin_lock(&dp264_irq_lock); | 121 | spin_lock(&dp264_irq_lock); |
136 | cached_irq_mask |= 1UL << (irq - 16); | 122 | cached_irq_mask |= 1UL << (d->irq - 16); |
137 | tsunami_update_irq_hw(cached_irq_mask); | 123 | tsunami_update_irq_hw(cached_irq_mask); |
138 | spin_unlock(&dp264_irq_lock); | 124 | spin_unlock(&dp264_irq_lock); |
139 | } | 125 | } |
140 | 126 | ||
141 | static void | 127 | static void |
142 | clipper_disable_irq(unsigned int irq) | 128 | clipper_disable_irq(struct irq_data *d) |
143 | { | 129 | { |
144 | spin_lock(&dp264_irq_lock); | 130 | spin_lock(&dp264_irq_lock); |
145 | cached_irq_mask &= ~(1UL << (irq - 16)); | 131 | cached_irq_mask &= ~(1UL << (d->irq - 16)); |
146 | tsunami_update_irq_hw(cached_irq_mask); | 132 | tsunami_update_irq_hw(cached_irq_mask); |
147 | spin_unlock(&dp264_irq_lock); | 133 | spin_unlock(&dp264_irq_lock); |
148 | } | 134 | } |
149 | 135 | ||
150 | static unsigned int | ||
151 | clipper_startup_irq(unsigned int irq) | ||
152 | { | ||
153 | clipper_enable_irq(irq); | ||
154 | return 0; /* never anything pending */ | ||
155 | } | ||
156 | |||
157 | static void | ||
158 | clipper_end_irq(unsigned int irq) | ||
159 | { | ||
160 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
161 | clipper_enable_irq(irq); | ||
162 | } | ||
163 | |||
164 | static void | 136 | static void |
165 | cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | 137 | cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) |
166 | { | 138 | { |
@@ -168,7 +140,7 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
168 | 140 | ||
169 | for (cpu = 0; cpu < 4; cpu++) { | 141 | for (cpu = 0; cpu < 4; cpu++) { |
170 | unsigned long aff = cpu_irq_affinity[cpu]; | 142 | unsigned long aff = cpu_irq_affinity[cpu]; |
171 | if (cpu_isset(cpu, affinity)) | 143 | if (cpumask_test_cpu(cpu, &affinity)) |
172 | aff |= 1UL << irq; | 144 | aff |= 1UL << irq; |
173 | else | 145 | else |
174 | aff &= ~(1UL << irq); | 146 | aff &= ~(1UL << irq); |
@@ -177,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
177 | } | 149 | } |
178 | 150 | ||
179 | static int | 151 | static int |
180 | dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) | 152 | dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, |
181 | { | 153 | bool force) |
154 | { | ||
182 | spin_lock(&dp264_irq_lock); | 155 | spin_lock(&dp264_irq_lock); |
183 | cpu_set_irq_affinity(irq, *affinity); | 156 | cpu_set_irq_affinity(d->irq, *affinity); |
184 | tsunami_update_irq_hw(cached_irq_mask); | 157 | tsunami_update_irq_hw(cached_irq_mask); |
185 | spin_unlock(&dp264_irq_lock); | 158 | spin_unlock(&dp264_irq_lock); |
186 | 159 | ||
@@ -188,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) | |||
188 | } | 161 | } |
189 | 162 | ||
190 | static int | 163 | static int |
191 | clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | 164 | clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, |
192 | { | 165 | bool force) |
166 | { | ||
193 | spin_lock(&dp264_irq_lock); | 167 | spin_lock(&dp264_irq_lock); |
194 | cpu_set_irq_affinity(irq - 16, *affinity); | 168 | cpu_set_irq_affinity(d->irq - 16, *affinity); |
195 | tsunami_update_irq_hw(cached_irq_mask); | 169 | tsunami_update_irq_hw(cached_irq_mask); |
196 | spin_unlock(&dp264_irq_lock); | 170 | spin_unlock(&dp264_irq_lock); |
197 | 171 | ||
@@ -199,25 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | |||
199 | } | 173 | } |
200 | 174 | ||
201 | static struct irq_chip dp264_irq_type = { | 175 | static struct irq_chip dp264_irq_type = { |
202 | .name = "DP264", | 176 | .name = "DP264", |
203 | .startup = dp264_startup_irq, | 177 | .irq_unmask = dp264_enable_irq, |
204 | .shutdown = dp264_disable_irq, | 178 | .irq_mask = dp264_disable_irq, |
205 | .enable = dp264_enable_irq, | 179 | .irq_mask_ack = dp264_disable_irq, |
206 | .disable = dp264_disable_irq, | 180 | .irq_set_affinity = dp264_set_affinity, |
207 | .ack = dp264_disable_irq, | ||
208 | .end = dp264_end_irq, | ||
209 | .set_affinity = dp264_set_affinity, | ||
210 | }; | 181 | }; |
211 | 182 | ||
212 | static struct irq_chip clipper_irq_type = { | 183 | static struct irq_chip clipper_irq_type = { |
213 | .name = "CLIPPER", | 184 | .name = "CLIPPER", |
214 | .startup = clipper_startup_irq, | 185 | .irq_unmask = clipper_enable_irq, |
215 | .shutdown = clipper_disable_irq, | 186 | .irq_mask = clipper_disable_irq, |
216 | .enable = clipper_enable_irq, | 187 | .irq_mask_ack = clipper_disable_irq, |
217 | .disable = clipper_disable_irq, | 188 | .irq_set_affinity = clipper_set_affinity, |
218 | .ack = clipper_disable_irq, | ||
219 | .end = clipper_end_irq, | ||
220 | .set_affinity = clipper_set_affinity, | ||
221 | }; | 189 | }; |
222 | 190 | ||
223 | static void | 191 | static void |
@@ -302,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) | |||
302 | { | 270 | { |
303 | long i; | 271 | long i; |
304 | for (i = imin; i <= imax; ++i) { | 272 | for (i = imin; i <= imax; ++i) { |
305 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 273 | irq_set_chip_and_handler(i, ops, handle_level_irq); |
306 | irq_desc[i].chip = ops; | 274 | irq_set_status_flags(i, IRQ_LEVEL); |
307 | } | 275 | } |
308 | } | 276 | } |
309 | 277 | ||
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index df2090ce5e7f..a7a23b40eec5 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c | |||
@@ -44,39 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | static inline void | 46 | static inline void |
47 | eb64p_enable_irq(unsigned int irq) | 47 | eb64p_enable_irq(struct irq_data *d) |
48 | { | 48 | { |
49 | eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | 49 | eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void | 52 | static void |
53 | eb64p_disable_irq(unsigned int irq) | 53 | eb64p_disable_irq(struct irq_data *d) |
54 | { | 54 | { |
55 | eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); | 55 | eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); |
56 | } | ||
57 | |||
58 | static unsigned int | ||
59 | eb64p_startup_irq(unsigned int irq) | ||
60 | { | ||
61 | eb64p_enable_irq(irq); | ||
62 | return 0; /* never anything pending */ | ||
63 | } | ||
64 | |||
65 | static void | ||
66 | eb64p_end_irq(unsigned int irq) | ||
67 | { | ||
68 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
69 | eb64p_enable_irq(irq); | ||
70 | } | 56 | } |
71 | 57 | ||
72 | static struct irq_chip eb64p_irq_type = { | 58 | static struct irq_chip eb64p_irq_type = { |
73 | .name = "EB64P", | 59 | .name = "EB64P", |
74 | .startup = eb64p_startup_irq, | 60 | .irq_unmask = eb64p_enable_irq, |
75 | .shutdown = eb64p_disable_irq, | 61 | .irq_mask = eb64p_disable_irq, |
76 | .enable = eb64p_enable_irq, | 62 | .irq_mask_ack = eb64p_disable_irq, |
77 | .disable = eb64p_disable_irq, | ||
78 | .ack = eb64p_disable_irq, | ||
79 | .end = eb64p_end_irq, | ||
80 | }; | 63 | }; |
81 | 64 | ||
82 | static void | 65 | static void |
@@ -135,9 +118,9 @@ eb64p_init_irq(void) | |||
135 | init_i8259a_irqs(); | 118 | init_i8259a_irqs(); |
136 | 119 | ||
137 | for (i = 16; i < 32; ++i) { | 120 | for (i = 16; i < 32; ++i) { |
138 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 121 | irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); |
139 | irq_desc[i].chip = &eb64p_irq_type; | 122 | irq_set_status_flags(i, IRQ_LEVEL); |
140 | } | 123 | } |
141 | 124 | ||
142 | common_init_isa_dma(); | 125 | common_init_isa_dma(); |
143 | setup_irq(16+5, &isa_cascade_irqaction); | 126 | setup_irq(16+5, &isa_cascade_irqaction); |
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 3ca1dbcf4044..a60cd5b2621e 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c | |||
@@ -51,43 +51,28 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static inline void | 53 | static inline void |
54 | eiger_enable_irq(unsigned int irq) | 54 | eiger_enable_irq(struct irq_data *d) |
55 | { | 55 | { |
56 | unsigned int irq = d->irq; | ||
56 | unsigned long mask; | 57 | unsigned long mask; |
57 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | 58 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); |
58 | eiger_update_irq_hw(irq, mask); | 59 | eiger_update_irq_hw(irq, mask); |
59 | } | 60 | } |
60 | 61 | ||
61 | static void | 62 | static void |
62 | eiger_disable_irq(unsigned int irq) | 63 | eiger_disable_irq(struct irq_data *d) |
63 | { | 64 | { |
65 | unsigned int irq = d->irq; | ||
64 | unsigned long mask; | 66 | unsigned long mask; |
65 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | 67 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); |
66 | eiger_update_irq_hw(irq, mask); | 68 | eiger_update_irq_hw(irq, mask); |
67 | } | 69 | } |
68 | 70 | ||
69 | static unsigned int | ||
70 | eiger_startup_irq(unsigned int irq) | ||
71 | { | ||
72 | eiger_enable_irq(irq); | ||
73 | return 0; /* never anything pending */ | ||
74 | } | ||
75 | |||
76 | static void | ||
77 | eiger_end_irq(unsigned int irq) | ||
78 | { | ||
79 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
80 | eiger_enable_irq(irq); | ||
81 | } | ||
82 | |||
83 | static struct irq_chip eiger_irq_type = { | 71 | static struct irq_chip eiger_irq_type = { |
84 | .name = "EIGER", | 72 | .name = "EIGER", |
85 | .startup = eiger_startup_irq, | 73 | .irq_unmask = eiger_enable_irq, |
86 | .shutdown = eiger_disable_irq, | 74 | .irq_mask = eiger_disable_irq, |
87 | .enable = eiger_enable_irq, | 75 | .irq_mask_ack = eiger_disable_irq, |
88 | .disable = eiger_disable_irq, | ||
89 | .ack = eiger_disable_irq, | ||
90 | .end = eiger_end_irq, | ||
91 | }; | 76 | }; |
92 | 77 | ||
93 | static void | 78 | static void |
@@ -153,8 +138,8 @@ eiger_init_irq(void) | |||
153 | init_i8259a_irqs(); | 138 | init_i8259a_irqs(); |
154 | 139 | ||
155 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
156 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 141 | irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq); |
157 | irq_desc[i].chip = &eiger_irq_type; | 142 | irq_set_status_flags(i, IRQ_LEVEL); |
158 | } | 143 | } |
159 | } | 144 | } |
160 | 145 | ||
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 7a7ae36fff91..7f1a87f176e2 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c | |||
@@ -62,70 +62,35 @@ | |||
62 | * world. | 62 | * world. |
63 | */ | 63 | */ |
64 | 64 | ||
65 | static unsigned int | ||
66 | jensen_local_startup(unsigned int irq) | ||
67 | { | ||
68 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
69 | if (irq == 7) | ||
70 | i8259a_startup_irq(1); | ||
71 | else | ||
72 | /* | ||
73 | * For all true local interrupts, set the flag that prevents | ||
74 | * the IPL from being dropped during handler processing. | ||
75 | */ | ||
76 | if (irq_desc[irq].action) | ||
77 | irq_desc[irq].action->flags |= IRQF_DISABLED; | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static void | ||
82 | jensen_local_shutdown(unsigned int irq) | ||
83 | { | ||
84 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
85 | if (irq == 7) | ||
86 | i8259a_disable_irq(1); | ||
87 | } | ||
88 | |||
89 | static void | ||
90 | jensen_local_enable(unsigned int irq) | ||
91 | { | ||
92 | /* the parport is really hw IRQ 1, silly Jensen. */ | ||
93 | if (irq == 7) | ||
94 | i8259a_enable_irq(1); | ||
95 | } | ||
96 | |||
97 | static void | 65 | static void |
98 | jensen_local_disable(unsigned int irq) | 66 | jensen_local_enable(struct irq_data *d) |
99 | { | 67 | { |
100 | /* the parport is really hw IRQ 1, silly Jensen. */ | 68 | /* the parport is really hw IRQ 1, silly Jensen. */ |
101 | if (irq == 7) | 69 | if (d->irq == 7) |
102 | i8259a_disable_irq(1); | 70 | i8259a_enable_irq(d); |
103 | } | 71 | } |
104 | 72 | ||
105 | static void | 73 | static void |
106 | jensen_local_ack(unsigned int irq) | 74 | jensen_local_disable(struct irq_data *d) |
107 | { | 75 | { |
108 | /* the parport is really hw IRQ 1, silly Jensen. */ | 76 | /* the parport is really hw IRQ 1, silly Jensen. */ |
109 | if (irq == 7) | 77 | if (d->irq == 7) |
110 | i8259a_mask_and_ack_irq(1); | 78 | i8259a_disable_irq(d); |
111 | } | 79 | } |
112 | 80 | ||
113 | static void | 81 | static void |
114 | jensen_local_end(unsigned int irq) | 82 | jensen_local_mask_ack(struct irq_data *d) |
115 | { | 83 | { |
116 | /* the parport is really hw IRQ 1, silly Jensen. */ | 84 | /* the parport is really hw IRQ 1, silly Jensen. */ |
117 | if (irq == 7) | 85 | if (d->irq == 7) |
118 | i8259a_end_irq(1); | 86 | i8259a_mask_and_ack_irq(d); |
119 | } | 87 | } |
120 | 88 | ||
121 | static struct irq_chip jensen_local_irq_type = { | 89 | static struct irq_chip jensen_local_irq_type = { |
122 | .name = "LOCAL", | 90 | .name = "LOCAL", |
123 | .startup = jensen_local_startup, | 91 | .irq_unmask = jensen_local_enable, |
124 | .shutdown = jensen_local_shutdown, | 92 | .irq_mask = jensen_local_disable, |
125 | .enable = jensen_local_enable, | 93 | .irq_mask_ack = jensen_local_mask_ack, |
126 | .disable = jensen_local_disable, | ||
127 | .ack = jensen_local_ack, | ||
128 | .end = jensen_local_end, | ||
129 | }; | 94 | }; |
130 | 95 | ||
131 | static void | 96 | static void |
@@ -158,7 +123,7 @@ jensen_device_interrupt(unsigned long vector) | |||
158 | } | 123 | } |
159 | 124 | ||
160 | /* If there is no handler yet... */ | 125 | /* If there is no handler yet... */ |
161 | if (irq_desc[irq].action == NULL) { | 126 | if (!irq_has_action(irq)) { |
162 | /* If it is a local interrupt that cannot be masked... */ | 127 | /* If it is a local interrupt that cannot be masked... */ |
163 | if (vector >= 0x900) | 128 | if (vector >= 0x900) |
164 | { | 129 | { |
@@ -206,11 +171,11 @@ jensen_init_irq(void) | |||
206 | { | 171 | { |
207 | init_i8259a_irqs(); | 172 | init_i8259a_irqs(); |
208 | 173 | ||
209 | irq_desc[1].chip = &jensen_local_irq_type; | 174 | irq_set_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq); |
210 | irq_desc[4].chip = &jensen_local_irq_type; | 175 | irq_set_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq); |
211 | irq_desc[3].chip = &jensen_local_irq_type; | 176 | irq_set_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq); |
212 | irq_desc[7].chip = &jensen_local_irq_type; | 177 | irq_set_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq); |
213 | irq_desc[9].chip = &jensen_local_irq_type; | 178 | irq_set_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq); |
214 | 179 | ||
215 | common_init_isa_dma(); | 180 | common_init_isa_dma(); |
216 | } | 181 | } |
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 0bb3b5c4f693..388b99d1779d 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c | |||
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static void | 106 | static void |
107 | io7_enable_irq(unsigned int irq) | 107 | io7_enable_irq(struct irq_data *d) |
108 | { | 108 | { |
109 | volatile unsigned long *ctl; | 109 | volatile unsigned long *ctl; |
110 | unsigned int irq = d->irq; | ||
110 | struct io7 *io7; | 111 | struct io7 *io7; |
111 | 112 | ||
112 | ctl = io7_get_irq_ctl(irq, &io7); | 113 | ctl = io7_get_irq_ctl(irq, &io7); |
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq) | |||
115 | __func__, irq); | 116 | __func__, irq); |
116 | return; | 117 | return; |
117 | } | 118 | } |
118 | 119 | ||
119 | spin_lock(&io7->irq_lock); | 120 | spin_lock(&io7->irq_lock); |
120 | *ctl |= 1UL << 24; | 121 | *ctl |= 1UL << 24; |
121 | mb(); | 122 | mb(); |
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq) | |||
124 | } | 125 | } |
125 | 126 | ||
126 | static void | 127 | static void |
127 | io7_disable_irq(unsigned int irq) | 128 | io7_disable_irq(struct irq_data *d) |
128 | { | 129 | { |
129 | volatile unsigned long *ctl; | 130 | volatile unsigned long *ctl; |
131 | unsigned int irq = d->irq; | ||
130 | struct io7 *io7; | 132 | struct io7 *io7; |
131 | 133 | ||
132 | ctl = io7_get_irq_ctl(irq, &io7); | 134 | ctl = io7_get_irq_ctl(irq, &io7); |
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq) | |||
135 | __func__, irq); | 137 | __func__, irq); |
136 | return; | 138 | return; |
137 | } | 139 | } |
138 | 140 | ||
139 | spin_lock(&io7->irq_lock); | 141 | spin_lock(&io7->irq_lock); |
140 | *ctl &= ~(1UL << 24); | 142 | *ctl &= ~(1UL << 24); |
141 | mb(); | 143 | mb(); |
@@ -143,60 +145,30 @@ io7_disable_irq(unsigned int irq) | |||
143 | spin_unlock(&io7->irq_lock); | 145 | spin_unlock(&io7->irq_lock); |
144 | } | 146 | } |
145 | 147 | ||
146 | static unsigned int | ||
147 | io7_startup_irq(unsigned int irq) | ||
148 | { | ||
149 | io7_enable_irq(irq); | ||
150 | return 0; /* never anything pending */ | ||
151 | } | ||
152 | |||
153 | static void | 148 | static void |
154 | io7_end_irq(unsigned int irq) | 149 | marvel_irq_noop(struct irq_data *d) |
155 | { | 150 | { |
156 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | 151 | return; |
157 | io7_enable_irq(irq); | ||
158 | } | ||
159 | |||
160 | static void | ||
161 | marvel_irq_noop(unsigned int irq) | ||
162 | { | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | static unsigned int | ||
167 | marvel_irq_noop_return(unsigned int irq) | ||
168 | { | ||
169 | return 0; | ||
170 | } | 152 | } |
171 | 153 | ||
172 | static struct irq_chip marvel_legacy_irq_type = { | 154 | static struct irq_chip marvel_legacy_irq_type = { |
173 | .name = "LEGACY", | 155 | .name = "LEGACY", |
174 | .startup = marvel_irq_noop_return, | 156 | .irq_mask = marvel_irq_noop, |
175 | .shutdown = marvel_irq_noop, | 157 | .irq_unmask = marvel_irq_noop, |
176 | .enable = marvel_irq_noop, | ||
177 | .disable = marvel_irq_noop, | ||
178 | .ack = marvel_irq_noop, | ||
179 | .end = marvel_irq_noop, | ||
180 | }; | 158 | }; |
181 | 159 | ||
182 | static struct irq_chip io7_lsi_irq_type = { | 160 | static struct irq_chip io7_lsi_irq_type = { |
183 | .name = "LSI", | 161 | .name = "LSI", |
184 | .startup = io7_startup_irq, | 162 | .irq_unmask = io7_enable_irq, |
185 | .shutdown = io7_disable_irq, | 163 | .irq_mask = io7_disable_irq, |
186 | .enable = io7_enable_irq, | 164 | .irq_mask_ack = io7_disable_irq, |
187 | .disable = io7_disable_irq, | ||
188 | .ack = io7_disable_irq, | ||
189 | .end = io7_end_irq, | ||
190 | }; | 165 | }; |
191 | 166 | ||
192 | static struct irq_chip io7_msi_irq_type = { | 167 | static struct irq_chip io7_msi_irq_type = { |
193 | .name = "MSI", | 168 | .name = "MSI", |
194 | .startup = io7_startup_irq, | 169 | .irq_unmask = io7_enable_irq, |
195 | .shutdown = io7_disable_irq, | 170 | .irq_mask = io7_disable_irq, |
196 | .enable = io7_enable_irq, | 171 | .irq_ack = marvel_irq_noop, |
197 | .disable = io7_disable_irq, | ||
198 | .ack = marvel_irq_noop, | ||
199 | .end = io7_end_irq, | ||
200 | }; | 172 | }; |
201 | 173 | ||
202 | static void | 174 | static void |
@@ -304,8 +276,8 @@ init_io7_irqs(struct io7 *io7, | |||
304 | 276 | ||
305 | /* Set up the lsi irqs. */ | 277 | /* Set up the lsi irqs. */ |
306 | for (i = 0; i < 128; ++i) { | 278 | for (i = 0; i < 128; ++i) { |
307 | irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; | 279 | irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq); |
308 | irq_desc[base + i].chip = lsi_ops; | 280 | irq_set_status_flags(i, IRQ_LEVEL); |
309 | } | 281 | } |
310 | 282 | ||
311 | /* Disable the implemented irqs in hardware. */ | 283 | /* Disable the implemented irqs in hardware. */ |
@@ -318,8 +290,8 @@ init_io7_irqs(struct io7 *io7, | |||
318 | 290 | ||
319 | /* Set up the msi irqs. */ | 291 | /* Set up the msi irqs. */ |
320 | for (i = 128; i < (128 + 512); ++i) { | 292 | for (i = 128; i < (128 + 512); ++i) { |
321 | irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL; | 293 | irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq); |
322 | irq_desc[base + i].chip = msi_ops; | 294 | irq_set_status_flags(i, IRQ_LEVEL); |
323 | } | 295 | } |
324 | 296 | ||
325 | for (i = 0; i < 16; ++i) | 297 | for (i = 0; i < 16; ++i) |
@@ -336,8 +308,8 @@ marvel_init_irq(void) | |||
336 | 308 | ||
337 | /* Reserve the legacy irqs. */ | 309 | /* Reserve the legacy irqs. */ |
338 | for (i = 0; i < 16; ++i) { | 310 | for (i = 0; i < 16; ++i) { |
339 | irq_desc[i].status = IRQ_DISABLED; | 311 | irq_set_chip_and_handler(i, &marvel_legacy_irq_type, |
340 | irq_desc[i].chip = &marvel_legacy_irq_type; | 312 | handle_level_irq); |
341 | } | 313 | } |
342 | 314 | ||
343 | /* Init the io7 irqs. */ | 315 | /* Init the io7 irqs. */ |
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index ee8865169811..0e6e4697a025 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c | |||
@@ -43,39 +43,22 @@ mikasa_update_irq_hw(int mask) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | static inline void | 45 | static inline void |
46 | mikasa_enable_irq(unsigned int irq) | 46 | mikasa_enable_irq(struct irq_data *d) |
47 | { | 47 | { |
48 | mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); | 48 | mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); |
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | static void |
52 | mikasa_disable_irq(unsigned int irq) | 52 | mikasa_disable_irq(struct irq_data *d) |
53 | { | 53 | { |
54 | mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); | 54 | mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); |
55 | } | ||
56 | |||
57 | static unsigned int | ||
58 | mikasa_startup_irq(unsigned int irq) | ||
59 | { | ||
60 | mikasa_enable_irq(irq); | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static void | ||
65 | mikasa_end_irq(unsigned int irq) | ||
66 | { | ||
67 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
68 | mikasa_enable_irq(irq); | ||
69 | } | 55 | } |
70 | 56 | ||
71 | static struct irq_chip mikasa_irq_type = { | 57 | static struct irq_chip mikasa_irq_type = { |
72 | .name = "MIKASA", | 58 | .name = "MIKASA", |
73 | .startup = mikasa_startup_irq, | 59 | .irq_unmask = mikasa_enable_irq, |
74 | .shutdown = mikasa_disable_irq, | 60 | .irq_mask = mikasa_disable_irq, |
75 | .enable = mikasa_enable_irq, | 61 | .irq_mask_ack = mikasa_disable_irq, |
76 | .disable = mikasa_disable_irq, | ||
77 | .ack = mikasa_disable_irq, | ||
78 | .end = mikasa_end_irq, | ||
79 | }; | 62 | }; |
80 | 63 | ||
81 | static void | 64 | static void |
@@ -115,8 +98,9 @@ mikasa_init_irq(void) | |||
115 | mikasa_update_irq_hw(0); | 98 | mikasa_update_irq_hw(0); |
116 | 99 | ||
117 | for (i = 16; i < 32; ++i) { | 100 | for (i = 16; i < 32; ++i) { |
118 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 101 | irq_set_chip_and_handler(i, &mikasa_irq_type, |
119 | irq_desc[i].chip = &mikasa_irq_type; | 102 | handle_level_irq); |
103 | irq_set_status_flags(i, IRQ_LEVEL); | ||
120 | } | 104 | } |
121 | 105 | ||
122 | init_i8259a_irqs(); | 106 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index 86503fe73a88..a00ac7087167 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c | |||
@@ -48,39 +48,22 @@ noritake_update_irq_hw(int irq, int mask) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | static void | 50 | static void |
51 | noritake_enable_irq(unsigned int irq) | 51 | noritake_enable_irq(struct irq_data *d) |
52 | { | 52 | { |
53 | noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); | 53 | noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); |
54 | } | 54 | } |
55 | 55 | ||
56 | static void | 56 | static void |
57 | noritake_disable_irq(unsigned int irq) | 57 | noritake_disable_irq(struct irq_data *d) |
58 | { | 58 | { |
59 | noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); | 59 | noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); |
60 | } | ||
61 | |||
62 | static unsigned int | ||
63 | noritake_startup_irq(unsigned int irq) | ||
64 | { | ||
65 | noritake_enable_irq(irq); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void | ||
70 | noritake_end_irq(unsigned int irq) | ||
71 | { | ||
72 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
73 | noritake_enable_irq(irq); | ||
74 | } | 60 | } |
75 | 61 | ||
76 | static struct irq_chip noritake_irq_type = { | 62 | static struct irq_chip noritake_irq_type = { |
77 | .name = "NORITAKE", | 63 | .name = "NORITAKE", |
78 | .startup = noritake_startup_irq, | 64 | .irq_unmask = noritake_enable_irq, |
79 | .shutdown = noritake_disable_irq, | 65 | .irq_mask = noritake_disable_irq, |
80 | .enable = noritake_enable_irq, | 66 | .irq_mask_ack = noritake_disable_irq, |
81 | .disable = noritake_disable_irq, | ||
82 | .ack = noritake_disable_irq, | ||
83 | .end = noritake_end_irq, | ||
84 | }; | 67 | }; |
85 | 68 | ||
86 | static void | 69 | static void |
@@ -144,8 +127,9 @@ noritake_init_irq(void) | |||
144 | outw(0, 0x54c); | 127 | outw(0, 0x54c); |
145 | 128 | ||
146 | for (i = 16; i < 48; ++i) { | 129 | for (i = 16; i < 48; ++i) { |
147 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 130 | irq_set_chip_and_handler(i, &noritake_irq_type, |
148 | irq_desc[i].chip = &noritake_irq_type; | 131 | handle_level_irq); |
132 | irq_set_status_flags(i, IRQ_LEVEL); | ||
149 | } | 133 | } |
150 | 134 | ||
151 | init_i8259a_irqs(); | 135 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 26c322bf89ee..7f52161f3d88 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c | |||
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask) | |||
56 | (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) | 56 | (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) |
57 | 57 | ||
58 | static inline void | 58 | static inline void |
59 | rawhide_enable_irq(unsigned int irq) | 59 | rawhide_enable_irq(struct irq_data *d) |
60 | { | 60 | { |
61 | unsigned int mask, hose; | 61 | unsigned int mask, hose; |
62 | unsigned int irq = d->irq; | ||
62 | 63 | ||
63 | irq -= 16; | 64 | irq -= 16; |
64 | hose = irq / 24; | 65 | hose = irq / 24; |
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq) | |||
76 | } | 77 | } |
77 | 78 | ||
78 | static void | 79 | static void |
79 | rawhide_disable_irq(unsigned int irq) | 80 | rawhide_disable_irq(struct irq_data *d) |
80 | { | 81 | { |
81 | unsigned int mask, hose; | 82 | unsigned int mask, hose; |
83 | unsigned int irq = d->irq; | ||
82 | 84 | ||
83 | irq -= 16; | 85 | irq -= 16; |
84 | hose = irq / 24; | 86 | hose = irq / 24; |
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq) | |||
96 | } | 98 | } |
97 | 99 | ||
98 | static void | 100 | static void |
99 | rawhide_mask_and_ack_irq(unsigned int irq) | 101 | rawhide_mask_and_ack_irq(struct irq_data *d) |
100 | { | 102 | { |
101 | unsigned int mask, mask1, hose; | 103 | unsigned int mask, mask1, hose; |
104 | unsigned int irq = d->irq; | ||
102 | 105 | ||
103 | irq -= 16; | 106 | irq -= 16; |
104 | hose = irq / 24; | 107 | hose = irq / 24; |
@@ -121,28 +124,11 @@ rawhide_mask_and_ack_irq(unsigned int irq) | |||
121 | spin_unlock(&rawhide_irq_lock); | 124 | spin_unlock(&rawhide_irq_lock); |
122 | } | 125 | } |
123 | 126 | ||
124 | static unsigned int | ||
125 | rawhide_startup_irq(unsigned int irq) | ||
126 | { | ||
127 | rawhide_enable_irq(irq); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static void | ||
132 | rawhide_end_irq(unsigned int irq) | ||
133 | { | ||
134 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
135 | rawhide_enable_irq(irq); | ||
136 | } | ||
137 | |||
138 | static struct irq_chip rawhide_irq_type = { | 127 | static struct irq_chip rawhide_irq_type = { |
139 | .name = "RAWHIDE", | 128 | .name = "RAWHIDE", |
140 | .startup = rawhide_startup_irq, | 129 | .irq_unmask = rawhide_enable_irq, |
141 | .shutdown = rawhide_disable_irq, | 130 | .irq_mask = rawhide_disable_irq, |
142 | .enable = rawhide_enable_irq, | 131 | .irq_mask_ack = rawhide_mask_and_ack_irq, |
143 | .disable = rawhide_disable_irq, | ||
144 | .ack = rawhide_mask_and_ack_irq, | ||
145 | .end = rawhide_end_irq, | ||
146 | }; | 132 | }; |
147 | 133 | ||
148 | static void | 134 | static void |
@@ -194,8 +180,9 @@ rawhide_init_irq(void) | |||
194 | } | 180 | } |
195 | 181 | ||
196 | for (i = 16; i < 128; ++i) { | 182 | for (i = 16; i < 128; ++i) { |
197 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 183 | irq_set_chip_and_handler(i, &rawhide_irq_type, |
198 | irq_desc[i].chip = &rawhide_irq_type; | 184 | handle_level_irq); |
185 | irq_set_status_flags(i, IRQ_LEVEL); | ||
199 | } | 186 | } |
200 | 187 | ||
201 | init_i8259a_irqs(); | 188 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index be161129eab9..216d94d9c0c1 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c | |||
@@ -47,39 +47,22 @@ rx164_update_irq_hw(unsigned long mask) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | static inline void | 49 | static inline void |
50 | rx164_enable_irq(unsigned int irq) | 50 | rx164_enable_irq(struct irq_data *d) |
51 | { | 51 | { |
52 | rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 52 | rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
53 | } | 53 | } |
54 | 54 | ||
55 | static void | 55 | static void |
56 | rx164_disable_irq(unsigned int irq) | 56 | rx164_disable_irq(struct irq_data *d) |
57 | { | 57 | { |
58 | rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 58 | rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
59 | } | ||
60 | |||
61 | static unsigned int | ||
62 | rx164_startup_irq(unsigned int irq) | ||
63 | { | ||
64 | rx164_enable_irq(irq); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | rx164_end_irq(unsigned int irq) | ||
70 | { | ||
71 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
72 | rx164_enable_irq(irq); | ||
73 | } | 59 | } |
74 | 60 | ||
75 | static struct irq_chip rx164_irq_type = { | 61 | static struct irq_chip rx164_irq_type = { |
76 | .name = "RX164", | 62 | .name = "RX164", |
77 | .startup = rx164_startup_irq, | 63 | .irq_unmask = rx164_enable_irq, |
78 | .shutdown = rx164_disable_irq, | 64 | .irq_mask = rx164_disable_irq, |
79 | .enable = rx164_enable_irq, | 65 | .irq_mask_ack = rx164_disable_irq, |
80 | .disable = rx164_disable_irq, | ||
81 | .ack = rx164_disable_irq, | ||
82 | .end = rx164_end_irq, | ||
83 | }; | 66 | }; |
84 | 67 | ||
85 | static void | 68 | static void |
@@ -116,8 +99,8 @@ rx164_init_irq(void) | |||
116 | 99 | ||
117 | rx164_update_irq_hw(0); | 100 | rx164_update_irq_hw(0); |
118 | for (i = 16; i < 40; ++i) { | 101 | for (i = 16; i < 40; ++i) { |
119 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 102 | irq_set_chip_and_handler(i, &rx164_irq_type, handle_level_irq); |
120 | irq_desc[i].chip = &rx164_irq_type; | 103 | irq_set_status_flags(i, IRQ_LEVEL); |
121 | } | 104 | } |
122 | 105 | ||
123 | init_i8259a_irqs(); | 106 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index b2abe27a23cf..da714e427c5f 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c | |||
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp) | |||
443 | /* GENERIC irq routines */ | 443 | /* GENERIC irq routines */ |
444 | 444 | ||
445 | static inline void | 445 | static inline void |
446 | sable_lynx_enable_irq(unsigned int irq) | 446 | sable_lynx_enable_irq(struct irq_data *d) |
447 | { | 447 | { |
448 | unsigned long bit, mask; | 448 | unsigned long bit, mask; |
449 | 449 | ||
450 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 450 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
451 | spin_lock(&sable_lynx_irq_lock); | 451 | spin_lock(&sable_lynx_irq_lock); |
452 | mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); | 452 | mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); |
453 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 453 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq) | |||
459 | } | 459 | } |
460 | 460 | ||
461 | static void | 461 | static void |
462 | sable_lynx_disable_irq(unsigned int irq) | 462 | sable_lynx_disable_irq(struct irq_data *d) |
463 | { | 463 | { |
464 | unsigned long bit, mask; | 464 | unsigned long bit, mask; |
465 | 465 | ||
466 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 466 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
467 | spin_lock(&sable_lynx_irq_lock); | 467 | spin_lock(&sable_lynx_irq_lock); |
468 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | 468 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; |
469 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 469 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
@@ -474,26 +474,12 @@ sable_lynx_disable_irq(unsigned int irq) | |||
474 | #endif | 474 | #endif |
475 | } | 475 | } |
476 | 476 | ||
477 | static unsigned int | ||
478 | sable_lynx_startup_irq(unsigned int irq) | ||
479 | { | ||
480 | sable_lynx_enable_irq(irq); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | static void | ||
485 | sable_lynx_end_irq(unsigned int irq) | ||
486 | { | ||
487 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
488 | sable_lynx_enable_irq(irq); | ||
489 | } | ||
490 | |||
491 | static void | 477 | static void |
492 | sable_lynx_mask_and_ack_irq(unsigned int irq) | 478 | sable_lynx_mask_and_ack_irq(struct irq_data *d) |
493 | { | 479 | { |
494 | unsigned long bit, mask; | 480 | unsigned long bit, mask; |
495 | 481 | ||
496 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 482 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
497 | spin_lock(&sable_lynx_irq_lock); | 483 | spin_lock(&sable_lynx_irq_lock); |
498 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | 484 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; |
499 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 485 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
@@ -503,12 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq) | |||
503 | 489 | ||
504 | static struct irq_chip sable_lynx_irq_type = { | 490 | static struct irq_chip sable_lynx_irq_type = { |
505 | .name = "SABLE/LYNX", | 491 | .name = "SABLE/LYNX", |
506 | .startup = sable_lynx_startup_irq, | 492 | .irq_unmask = sable_lynx_enable_irq, |
507 | .shutdown = sable_lynx_disable_irq, | 493 | .irq_mask = sable_lynx_disable_irq, |
508 | .enable = sable_lynx_enable_irq, | 494 | .irq_mask_ack = sable_lynx_mask_and_ack_irq, |
509 | .disable = sable_lynx_disable_irq, | ||
510 | .ack = sable_lynx_mask_and_ack_irq, | ||
511 | .end = sable_lynx_end_irq, | ||
512 | }; | 495 | }; |
513 | 496 | ||
514 | static void | 497 | static void |
@@ -535,8 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs) | |||
535 | long i; | 518 | long i; |
536 | 519 | ||
537 | for (i = 0; i < nr_of_irqs; ++i) { | 520 | for (i = 0; i < nr_of_irqs; ++i) { |
538 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 521 | irq_set_chip_and_handler(i, &sable_lynx_irq_type, |
539 | irq_desc[i].chip = &sable_lynx_irq_type; | 522 | handle_level_irq); |
523 | irq_set_status_flags(i, IRQ_LEVEL); | ||
540 | } | 524 | } |
541 | 525 | ||
542 | common_init_isa_dma(); | 526 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 4da596b6adbb..a31f8cd9bd6b 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
@@ -45,43 +45,28 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void | 47 | static inline void |
48 | takara_enable_irq(unsigned int irq) | 48 | takara_enable_irq(struct irq_data *d) |
49 | { | 49 | { |
50 | unsigned int irq = d->irq; | ||
50 | unsigned long mask; | 51 | unsigned long mask; |
51 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | 52 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); |
52 | takara_update_irq_hw(irq, mask); | 53 | takara_update_irq_hw(irq, mask); |
53 | } | 54 | } |
54 | 55 | ||
55 | static void | 56 | static void |
56 | takara_disable_irq(unsigned int irq) | 57 | takara_disable_irq(struct irq_data *d) |
57 | { | 58 | { |
59 | unsigned int irq = d->irq; | ||
58 | unsigned long mask; | 60 | unsigned long mask; |
59 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | 61 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); |
60 | takara_update_irq_hw(irq, mask); | 62 | takara_update_irq_hw(irq, mask); |
61 | } | 63 | } |
62 | 64 | ||
63 | static unsigned int | ||
64 | takara_startup_irq(unsigned int irq) | ||
65 | { | ||
66 | takara_enable_irq(irq); | ||
67 | return 0; /* never anything pending */ | ||
68 | } | ||
69 | |||
70 | static void | ||
71 | takara_end_irq(unsigned int irq) | ||
72 | { | ||
73 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
74 | takara_enable_irq(irq); | ||
75 | } | ||
76 | |||
77 | static struct irq_chip takara_irq_type = { | 65 | static struct irq_chip takara_irq_type = { |
78 | .name = "TAKARA", | 66 | .name = "TAKARA", |
79 | .startup = takara_startup_irq, | 67 | .irq_unmask = takara_enable_irq, |
80 | .shutdown = takara_disable_irq, | 68 | .irq_mask = takara_disable_irq, |
81 | .enable = takara_enable_irq, | 69 | .irq_mask_ack = takara_disable_irq, |
82 | .disable = takara_disable_irq, | ||
83 | .ack = takara_disable_irq, | ||
84 | .end = takara_end_irq, | ||
85 | }; | 70 | }; |
86 | 71 | ||
87 | static void | 72 | static void |
@@ -153,8 +138,9 @@ takara_init_irq(void) | |||
153 | takara_update_irq_hw(i, -1); | 138 | takara_update_irq_hw(i, -1); |
154 | 139 | ||
155 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
156 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 141 | irq_set_chip_and_handler(i, &takara_irq_type, |
157 | irq_desc[i].chip = &takara_irq_type; | 142 | handle_level_irq); |
143 | irq_set_status_flags(i, IRQ_LEVEL); | ||
158 | } | 144 | } |
159 | 145 | ||
160 | common_init_isa_dma(); | 146 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 9008d0f20c53..6994407e242a 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -65,10 +65,11 @@ titan_update_irq_hw(unsigned long mask) | |||
65 | register int bcpu = boot_cpuid; | 65 | register int bcpu = boot_cpuid; |
66 | 66 | ||
67 | #ifdef CONFIG_SMP | 67 | #ifdef CONFIG_SMP |
68 | cpumask_t cpm = cpu_present_map; | 68 | cpumask_t cpm; |
69 | volatile unsigned long *dim0, *dim1, *dim2, *dim3; | 69 | volatile unsigned long *dim0, *dim1, *dim2, *dim3; |
70 | unsigned long mask0, mask1, mask2, mask3, dummy; | 70 | unsigned long mask0, mask1, mask2, mask3, dummy; |
71 | 71 | ||
72 | cpumask_copy(&cpm, cpu_present_mask); | ||
72 | mask &= ~isa_enable; | 73 | mask &= ~isa_enable; |
73 | mask0 = mask & titan_cpu_irq_affinity[0]; | 74 | mask0 = mask & titan_cpu_irq_affinity[0]; |
74 | mask1 = mask & titan_cpu_irq_affinity[1]; | 75 | mask1 = mask & titan_cpu_irq_affinity[1]; |
@@ -84,10 +85,10 @@ titan_update_irq_hw(unsigned long mask) | |||
84 | dim1 = &cchip->dim1.csr; | 85 | dim1 = &cchip->dim1.csr; |
85 | dim2 = &cchip->dim2.csr; | 86 | dim2 = &cchip->dim2.csr; |
86 | dim3 = &cchip->dim3.csr; | 87 | dim3 = &cchip->dim3.csr; |
87 | if (!cpu_isset(0, cpm)) dim0 = &dummy; | 88 | if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy; |
88 | if (!cpu_isset(1, cpm)) dim1 = &dummy; | 89 | if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy; |
89 | if (!cpu_isset(2, cpm)) dim2 = &dummy; | 90 | if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy; |
90 | if (!cpu_isset(3, cpm)) dim3 = &dummy; | 91 | if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy; |
91 | 92 | ||
92 | *dim0 = mask0; | 93 | *dim0 = mask0; |
93 | *dim1 = mask1; | 94 | *dim1 = mask1; |
@@ -112,8 +113,9 @@ titan_update_irq_hw(unsigned long mask) | |||
112 | } | 113 | } |
113 | 114 | ||
114 | static inline void | 115 | static inline void |
115 | titan_enable_irq(unsigned int irq) | 116 | titan_enable_irq(struct irq_data *d) |
116 | { | 117 | { |
118 | unsigned int irq = d->irq; | ||
117 | spin_lock(&titan_irq_lock); | 119 | spin_lock(&titan_irq_lock); |
118 | titan_cached_irq_mask |= 1UL << (irq - 16); | 120 | titan_cached_irq_mask |= 1UL << (irq - 16); |
119 | titan_update_irq_hw(titan_cached_irq_mask); | 121 | titan_update_irq_hw(titan_cached_irq_mask); |
@@ -121,35 +123,22 @@ titan_enable_irq(unsigned int irq) | |||
121 | } | 123 | } |
122 | 124 | ||
123 | static inline void | 125 | static inline void |
124 | titan_disable_irq(unsigned int irq) | 126 | titan_disable_irq(struct irq_data *d) |
125 | { | 127 | { |
128 | unsigned int irq = d->irq; | ||
126 | spin_lock(&titan_irq_lock); | 129 | spin_lock(&titan_irq_lock); |
127 | titan_cached_irq_mask &= ~(1UL << (irq - 16)); | 130 | titan_cached_irq_mask &= ~(1UL << (irq - 16)); |
128 | titan_update_irq_hw(titan_cached_irq_mask); | 131 | titan_update_irq_hw(titan_cached_irq_mask); |
129 | spin_unlock(&titan_irq_lock); | 132 | spin_unlock(&titan_irq_lock); |
130 | } | 133 | } |
131 | 134 | ||
132 | static unsigned int | ||
133 | titan_startup_irq(unsigned int irq) | ||
134 | { | ||
135 | titan_enable_irq(irq); | ||
136 | return 0; /* never anything pending */ | ||
137 | } | ||
138 | |||
139 | static void | ||
140 | titan_end_irq(unsigned int irq) | ||
141 | { | ||
142 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
143 | titan_enable_irq(irq); | ||
144 | } | ||
145 | |||
146 | static void | 135 | static void |
147 | titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | 136 | titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) |
148 | { | 137 | { |
149 | int cpu; | 138 | int cpu; |
150 | 139 | ||
151 | for (cpu = 0; cpu < 4; cpu++) { | 140 | for (cpu = 0; cpu < 4; cpu++) { |
152 | if (cpu_isset(cpu, affinity)) | 141 | if (cpumask_test_cpu(cpu, &affinity)) |
153 | titan_cpu_irq_affinity[cpu] |= 1UL << irq; | 142 | titan_cpu_irq_affinity[cpu] |= 1UL << irq; |
154 | else | 143 | else |
155 | titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); | 144 | titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); |
@@ -158,8 +147,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
158 | } | 147 | } |
159 | 148 | ||
160 | static int | 149 | static int |
161 | titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | 150 | titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, |
151 | bool force) | ||
162 | { | 152 | { |
153 | unsigned int irq = d->irq; | ||
163 | spin_lock(&titan_irq_lock); | 154 | spin_lock(&titan_irq_lock); |
164 | titan_cpu_set_irq_affinity(irq - 16, *affinity); | 155 | titan_cpu_set_irq_affinity(irq - 16, *affinity); |
165 | titan_update_irq_hw(titan_cached_irq_mask); | 156 | titan_update_irq_hw(titan_cached_irq_mask); |
@@ -189,20 +180,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax) | |||
189 | { | 180 | { |
190 | long i; | 181 | long i; |
191 | for (i = imin; i <= imax; ++i) { | 182 | for (i = imin; i <= imax; ++i) { |
192 | irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; | 183 | irq_set_chip_and_handler(i, ops, handle_level_irq); |
193 | irq_desc[i].chip = ops; | 184 | irq_set_status_flags(i, IRQ_LEVEL); |
194 | } | 185 | } |
195 | } | 186 | } |
196 | 187 | ||
197 | static struct irq_chip titan_irq_type = { | 188 | static struct irq_chip titan_irq_type = { |
198 | .name = "TITAN", | 189 | .name = "TITAN", |
199 | .startup = titan_startup_irq, | 190 | .irq_unmask = titan_enable_irq, |
200 | .shutdown = titan_disable_irq, | 191 | .irq_mask = titan_disable_irq, |
201 | .enable = titan_enable_irq, | 192 | .irq_mask_ack = titan_disable_irq, |
202 | .disable = titan_disable_irq, | 193 | .irq_set_affinity = titan_set_irq_affinity, |
203 | .ack = titan_disable_irq, | ||
204 | .end = titan_end_irq, | ||
205 | .set_affinity = titan_set_irq_affinity, | ||
206 | }; | 194 | }; |
207 | 195 | ||
208 | static irqreturn_t | 196 | static irqreturn_t |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index 62fd972e18ef..d92cdc715c65 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static void | 106 | static void |
107 | wildfire_enable_irq(unsigned int irq) | 107 | wildfire_enable_irq(struct irq_data *d) |
108 | { | 108 | { |
109 | unsigned int irq = d->irq; | ||
110 | |||
109 | if (irq < 16) | 111 | if (irq < 16) |
110 | i8259a_enable_irq(irq); | 112 | i8259a_enable_irq(d); |
111 | 113 | ||
112 | spin_lock(&wildfire_irq_lock); | 114 | spin_lock(&wildfire_irq_lock); |
113 | set_bit(irq, &cached_irq_mask); | 115 | set_bit(irq, &cached_irq_mask); |
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq) | |||
116 | } | 118 | } |
117 | 119 | ||
118 | static void | 120 | static void |
119 | wildfire_disable_irq(unsigned int irq) | 121 | wildfire_disable_irq(struct irq_data *d) |
120 | { | 122 | { |
123 | unsigned int irq = d->irq; | ||
124 | |||
121 | if (irq < 16) | 125 | if (irq < 16) |
122 | i8259a_disable_irq(irq); | 126 | i8259a_disable_irq(d); |
123 | 127 | ||
124 | spin_lock(&wildfire_irq_lock); | 128 | spin_lock(&wildfire_irq_lock); |
125 | clear_bit(irq, &cached_irq_mask); | 129 | clear_bit(irq, &cached_irq_mask); |
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq) | |||
128 | } | 132 | } |
129 | 133 | ||
130 | static void | 134 | static void |
131 | wildfire_mask_and_ack_irq(unsigned int irq) | 135 | wildfire_mask_and_ack_irq(struct irq_data *d) |
132 | { | 136 | { |
137 | unsigned int irq = d->irq; | ||
138 | |||
133 | if (irq < 16) | 139 | if (irq < 16) |
134 | i8259a_mask_and_ack_irq(irq); | 140 | i8259a_mask_and_ack_irq(d); |
135 | 141 | ||
136 | spin_lock(&wildfire_irq_lock); | 142 | spin_lock(&wildfire_irq_lock); |
137 | clear_bit(irq, &cached_irq_mask); | 143 | clear_bit(irq, &cached_irq_mask); |
@@ -139,39 +145,17 @@ wildfire_mask_and_ack_irq(unsigned int irq) | |||
139 | spin_unlock(&wildfire_irq_lock); | 145 | spin_unlock(&wildfire_irq_lock); |
140 | } | 146 | } |
141 | 147 | ||
142 | static unsigned int | ||
143 | wildfire_startup_irq(unsigned int irq) | ||
144 | { | ||
145 | wildfire_enable_irq(irq); | ||
146 | return 0; /* never anything pending */ | ||
147 | } | ||
148 | |||
149 | static void | ||
150 | wildfire_end_irq(unsigned int irq) | ||
151 | { | ||
152 | #if 0 | ||
153 | if (!irq_desc[irq].action) | ||
154 | printk("got irq %d\n", irq); | ||
155 | #endif | ||
156 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
157 | wildfire_enable_irq(irq); | ||
158 | } | ||
159 | |||
160 | static struct irq_chip wildfire_irq_type = { | 148 | static struct irq_chip wildfire_irq_type = { |
161 | .name = "WILDFIRE", | 149 | .name = "WILDFIRE", |
162 | .startup = wildfire_startup_irq, | 150 | .irq_unmask = wildfire_enable_irq, |
163 | .shutdown = wildfire_disable_irq, | 151 | .irq_mask = wildfire_disable_irq, |
164 | .enable = wildfire_enable_irq, | 152 | .irq_mask_ack = wildfire_mask_and_ack_irq, |
165 | .disable = wildfire_disable_irq, | ||
166 | .ack = wildfire_mask_and_ack_irq, | ||
167 | .end = wildfire_end_irq, | ||
168 | }; | 153 | }; |
169 | 154 | ||
170 | static void __init | 155 | static void __init |
171 | wildfire_init_irq_per_pca(int qbbno, int pcano) | 156 | wildfire_init_irq_per_pca(int qbbno, int pcano) |
172 | { | 157 | { |
173 | int i, irq_bias; | 158 | int i, irq_bias; |
174 | unsigned long io_bias; | ||
175 | static struct irqaction isa_enable = { | 159 | static struct irqaction isa_enable = { |
176 | .handler = no_action, | 160 | .handler = no_action, |
177 | .name = "isa_enable", | 161 | .name = "isa_enable", |
@@ -180,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) | |||
180 | irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) | 164 | irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) |
181 | + pcano * WILDFIRE_IRQ_PER_PCA; | 165 | + pcano * WILDFIRE_IRQ_PER_PCA; |
182 | 166 | ||
167 | #if 0 | ||
168 | unsigned long io_bias; | ||
169 | |||
183 | /* Only need the following for first PCI bus per PCA. */ | 170 | /* Only need the following for first PCI bus per PCA. */ |
184 | io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; | 171 | io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; |
185 | 172 | ||
186 | #if 0 | ||
187 | outb(0, DMA1_RESET_REG + io_bias); | 173 | outb(0, DMA1_RESET_REG + io_bias); |
188 | outb(0, DMA2_RESET_REG + io_bias); | 174 | outb(0, DMA2_RESET_REG + io_bias); |
189 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); | 175 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); |
@@ -198,18 +184,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) | |||
198 | for (i = 0; i < 16; ++i) { | 184 | for (i = 0; i < 16; ++i) { |
199 | if (i == 2) | 185 | if (i == 2) |
200 | continue; | 186 | continue; |
201 | irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; | 187 | irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, |
202 | irq_desc[i+irq_bias].chip = &wildfire_irq_type; | 188 | handle_level_irq); |
189 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | ||
203 | } | 190 | } |
204 | 191 | ||
205 | irq_desc[36+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; | 192 | irq_set_chip_and_handler(36 + irq_bias, &wildfire_irq_type, |
206 | irq_desc[36+irq_bias].chip = &wildfire_irq_type; | 193 | handle_level_irq); |
194 | irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); | ||
207 | for (i = 40; i < 64; ++i) { | 195 | for (i = 40; i < 64; ++i) { |
208 | irq_desc[i+irq_bias].status = IRQ_DISABLED | IRQ_LEVEL; | 196 | irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, |
209 | irq_desc[i+irq_bias].chip = &wildfire_irq_type; | 197 | handle_level_irq); |
198 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | ||
210 | } | 199 | } |
211 | 200 | ||
212 | setup_irq(32+irq_bias, &isa_enable); | 201 | setup_irq(32+irq_bias, &isa_enable); |
213 | } | 202 | } |
214 | 203 | ||
215 | static void __init | 204 | static void __init |
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index a6a1de9db16f..b9c28f3f1956 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -498,23 +498,28 @@ sys_call_table: | |||
498 | .quad sys_ni_syscall /* sys_timerfd */ | 498 | .quad sys_ni_syscall /* sys_timerfd */ |
499 | .quad sys_eventfd | 499 | .quad sys_eventfd |
500 | .quad sys_recvmmsg | 500 | .quad sys_recvmmsg |
501 | .quad sys_fallocate /* 480 */ | 501 | .quad sys_fallocate /* 480 */ |
502 | .quad sys_timerfd_create | 502 | .quad sys_timerfd_create |
503 | .quad sys_timerfd_settime | 503 | .quad sys_timerfd_settime |
504 | .quad sys_timerfd_gettime | 504 | .quad sys_timerfd_gettime |
505 | .quad sys_signalfd4 | 505 | .quad sys_signalfd4 |
506 | .quad sys_eventfd2 /* 485 */ | 506 | .quad sys_eventfd2 /* 485 */ |
507 | .quad sys_epoll_create1 | 507 | .quad sys_epoll_create1 |
508 | .quad sys_dup3 | 508 | .quad sys_dup3 |
509 | .quad sys_pipe2 | 509 | .quad sys_pipe2 |
510 | .quad sys_inotify_init1 | 510 | .quad sys_inotify_init1 |
511 | .quad sys_preadv /* 490 */ | 511 | .quad sys_preadv /* 490 */ |
512 | .quad sys_pwritev | 512 | .quad sys_pwritev |
513 | .quad sys_rt_tgsigqueueinfo | 513 | .quad sys_rt_tgsigqueueinfo |
514 | .quad sys_perf_event_open | 514 | .quad sys_perf_event_open |
515 | .quad sys_fanotify_init | 515 | .quad sys_fanotify_init |
516 | .quad sys_fanotify_mark /* 495 */ | 516 | .quad sys_fanotify_mark /* 495 */ |
517 | .quad sys_prlimit64 | 517 | .quad sys_prlimit64 |
518 | .quad sys_name_to_handle_at | ||
519 | .quad sys_open_by_handle_at | ||
520 | .quad sys_clock_adjtime | ||
521 | .quad sys_syncfs /* 500 */ | ||
522 | .quad sys_setns | ||
518 | 523 | ||
519 | .size sys_call_table, . - sys_call_table | 524 | .size sys_call_table, . - sys_call_table |
520 | .type sys_call_table, @object | 525 | .type sys_call_table, @object |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 396af1799ea4..818e74ed45dc 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/bcd.h> | 42 | #include <linux/bcd.h> |
43 | #include <linux/profile.h> | 43 | #include <linux/profile.h> |
44 | #include <linux/perf_event.h> | 44 | #include <linux/irq_work.h> |
45 | 45 | ||
46 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
47 | #include <asm/io.h> | 47 | #include <asm/io.h> |
@@ -83,25 +83,25 @@ static struct { | |||
83 | 83 | ||
84 | unsigned long est_cycle_freq; | 84 | unsigned long est_cycle_freq; |
85 | 85 | ||
86 | #ifdef CONFIG_PERF_EVENTS | 86 | #ifdef CONFIG_IRQ_WORK |
87 | 87 | ||
88 | DEFINE_PER_CPU(u8, perf_event_pending); | 88 | DEFINE_PER_CPU(u8, irq_work_pending); |
89 | 89 | ||
90 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | 90 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
91 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 91 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
92 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 92 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
93 | 93 | ||
94 | void set_perf_event_pending(void) | 94 | void set_irq_work_pending(void) |
95 | { | 95 | { |
96 | set_perf_event_pending_flag(); | 96 | set_irq_work_pending_flag(); |
97 | } | 97 | } |
98 | 98 | ||
99 | #else /* CONFIG_PERF_EVENTS */ | 99 | #else /* CONFIG_IRQ_WORK */ |
100 | 100 | ||
101 | #define test_perf_event_pending() 0 | 101 | #define test_irq_work_pending() 0 |
102 | #define clear_perf_event_pending() | 102 | #define clear_irq_work_pending() |
103 | 103 | ||
104 | #endif /* CONFIG_PERF_EVENTS */ | 104 | #endif /* CONFIG_IRQ_WORK */ |
105 | 105 | ||
106 | 106 | ||
107 | static inline __u32 rpcc(void) | 107 | static inline __u32 rpcc(void) |
@@ -153,13 +153,14 @@ void read_persistent_clock(struct timespec *ts) | |||
153 | year += 100; | 153 | year += 100; |
154 | 154 | ||
155 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); | 155 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); |
156 | ts->tv_nsec = 0; | ||
156 | } | 157 | } |
157 | 158 | ||
158 | 159 | ||
159 | 160 | ||
160 | /* | 161 | /* |
161 | * timer_interrupt() needs to keep up the real-time clock, | 162 | * timer_interrupt() needs to keep up the real-time clock, |
162 | * as well as call the "do_timer()" routine every clocktick | 163 | * as well as call the "xtime_update()" routine every clocktick |
163 | */ | 164 | */ |
164 | irqreturn_t timer_interrupt(int irq, void *dev) | 165 | irqreturn_t timer_interrupt(int irq, void *dev) |
165 | { | 166 | { |
@@ -172,8 +173,6 @@ irqreturn_t timer_interrupt(int irq, void *dev) | |||
172 | profile_tick(CPU_PROFILING); | 173 | profile_tick(CPU_PROFILING); |
173 | #endif | 174 | #endif |
174 | 175 | ||
175 | write_seqlock(&xtime_lock); | ||
176 | |||
177 | /* | 176 | /* |
178 | * Calculate how many ticks have passed since the last update, | 177 | * Calculate how many ticks have passed since the last update, |
179 | * including any previous partial leftover. Save any resulting | 178 | * including any previous partial leftover. Save any resulting |
@@ -187,13 +186,11 @@ irqreturn_t timer_interrupt(int irq, void *dev) | |||
187 | nticks = delta >> FIX_SHIFT; | 186 | nticks = delta >> FIX_SHIFT; |
188 | 187 | ||
189 | if (nticks) | 188 | if (nticks) |
190 | do_timer(nticks); | 189 | xtime_update(nticks); |
191 | |||
192 | write_sequnlock(&xtime_lock); | ||
193 | 190 | ||
194 | if (test_perf_event_pending()) { | 191 | if (test_irq_work_pending()) { |
195 | clear_perf_event_pending(); | 192 | clear_irq_work_pending(); |
196 | perf_event_do_pending(); | 193 | irq_work_run(); |
197 | } | 194 | } |
198 | 195 | ||
199 | #ifndef CONFIG_SMP | 196 | #ifndef CONFIG_SMP |
@@ -378,8 +375,7 @@ static struct clocksource clocksource_rpcc = { | |||
378 | 375 | ||
379 | static inline void register_rpcc_clocksource(long cycle_freq) | 376 | static inline void register_rpcc_clocksource(long cycle_freq) |
380 | { | 377 | { |
381 | clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); | 378 | clocksource_register_hz(&clocksource_rpcc, cycle_freq); |
382 | clocksource_register(&clocksource_rpcc); | ||
383 | } | 379 | } |
384 | #else /* !CONFIG_SMP */ | 380 | #else /* !CONFIG_SMP */ |
385 | static inline void register_rpcc_clocksource(long cycle_freq) | 381 | static inline void register_rpcc_clocksource(long cycle_freq) |
@@ -506,7 +502,7 @@ set_rtc_mmss(unsigned long nowtime) | |||
506 | CMOS_WRITE(real_seconds,RTC_SECONDS); | 502 | CMOS_WRITE(real_seconds,RTC_SECONDS); |
507 | CMOS_WRITE(real_minutes,RTC_MINUTES); | 503 | CMOS_WRITE(real_minutes,RTC_MINUTES); |
508 | } else { | 504 | } else { |
509 | printk(KERN_WARNING | 505 | printk_once(KERN_NOTICE |
510 | "set_rtc_mmss: can't update from %d to %d\n", | 506 | "set_rtc_mmss: can't update from %d to %d\n", |
511 | cmos_minutes, real_minutes); | 507 | cmos_minutes, real_minutes); |
512 | retval = -1; | 508 | retval = -1; |
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 003ef4c02585..f937ad123852 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | 1 | #include <asm-generic/vmlinux.lds.h> |
2 | #include <asm/thread_info.h> | 2 | #include <asm/thread_info.h> |
3 | #include <asm/cache.h> | ||
3 | #include <asm/page.h> | 4 | #include <asm/page.h> |
4 | 5 | ||
5 | OUTPUT_FORMAT("elf64-alpha") | 6 | OUTPUT_FORMAT("elf64-alpha") |
@@ -38,15 +39,16 @@ SECTIONS | |||
38 | __init_begin = ALIGN(PAGE_SIZE); | 39 | __init_begin = ALIGN(PAGE_SIZE); |
39 | INIT_TEXT_SECTION(PAGE_SIZE) | 40 | INIT_TEXT_SECTION(PAGE_SIZE) |
40 | INIT_DATA_SECTION(16) | 41 | INIT_DATA_SECTION(16) |
41 | PERCPU(PAGE_SIZE) | 42 | PERCPU_SECTION(L1_CACHE_BYTES) |
42 | /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page | 43 | /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page |
43 | needed for the THREAD_SIZE aligned init_task gets freed after init */ | 44 | needed for the THREAD_SIZE aligned init_task gets freed after init */ |
44 | . = ALIGN(THREAD_SIZE); | 45 | . = ALIGN(THREAD_SIZE); |
45 | __init_end = .; | 46 | __init_end = .; |
46 | /* Freed after init ends here */ | 47 | /* Freed after init ends here */ |
47 | 48 | ||
49 | _sdata = .; /* Start of rw data section */ | ||
48 | _data = .; | 50 | _data = .; |
49 | RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) | 51 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) |
50 | 52 | ||
51 | .got : { | 53 | .got : { |
52 | *(.got) | 54 | *(.got) |
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index 9b72c59c95be..c0a83ab62b78 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile | |||
@@ -2,8 +2,8 @@ | |||
2 | # Makefile for alpha-specific library files.. | 2 | # Makefile for alpha-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | EXTRA_AFLAGS := $(KBUILD_CFLAGS) | 5 | asflags-y := $(KBUILD_CFLAGS) |
6 | EXTRA_CFLAGS := -Werror | 6 | ccflags-y := -Werror |
7 | 7 | ||
8 | # Many of these routines have implementations tuned for ev6. | 8 | # Many of these routines have implementations tuned for ev6. |
9 | # Choose them iff we're targeting ev6 specifically. | 9 | # Choose them iff we're targeting ev6 specifically. |
diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S index 3fd8bf414c7b..dd0d8c6b9f59 100644 --- a/arch/alpha/lib/ev67-strrchr.S +++ b/arch/alpha/lib/ev67-strrchr.S | |||
@@ -82,7 +82,7 @@ $loop: | |||
82 | $eos: | 82 | $eos: |
83 | negq t1, t4 # E : isolate first null byte match | 83 | negq t1, t4 # E : isolate first null byte match |
84 | and t1, t4, t4 # E : | 84 | and t1, t4, t4 # E : |
85 | subq t4, 1, t5 # E : build a mask of the bytes upto... | 85 | subq t4, 1, t5 # E : build a mask of the bytes up to... |
86 | or t4, t5, t4 # E : ... and including the null | 86 | or t4, t5, t4 # E : ... and including the null |
87 | 87 | ||
88 | and t3, t4, t3 # E : mask out char matches after null | 88 | and t3, t4, t3 # E : mask out char matches after null |
diff --git a/arch/alpha/lib/fls.c b/arch/alpha/lib/fls.c index 32afaa3fa686..ddd048c0d825 100644 --- a/arch/alpha/lib/fls.c +++ b/arch/alpha/lib/fls.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
7 | 7 | ||
8 | /* This is fls(x)-1, except zero is held to zero. This allows most | 8 | /* This is fls(x)-1, except zero is held to zero. This allows most |
9 | efficent input into extbl, plus it allows easy handling of fls(0)=0. */ | 9 | efficient input into extbl, plus it allows easy handling of fls(0)=0. */ |
10 | 10 | ||
11 | const unsigned char __flsm1_tab[256] = | 11 | const unsigned char __flsm1_tab[256] = |
12 | { | 12 | { |
diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S index 82cfd0ac907b..1970dc07cfd1 100644 --- a/arch/alpha/lib/strrchr.S +++ b/arch/alpha/lib/strrchr.S | |||
@@ -54,7 +54,7 @@ $loop: | |||
54 | $eos: | 54 | $eos: |
55 | negq t1, t4 # e0 : isolate first null byte match | 55 | negq t1, t4 # e0 : isolate first null byte match |
56 | and t1, t4, t4 # e1 : | 56 | and t1, t4, t4 # e1 : |
57 | subq t4, 1, t5 # e0 : build a mask of the bytes upto... | 57 | subq t4, 1, t5 # e0 : build a mask of the bytes up to... |
58 | or t4, t5, t4 # e1 : ... and including the null | 58 | or t4, t5, t4 # e1 : ... and including the null |
59 | 59 | ||
60 | and t3, t4, t3 # e0 : mask out char matches after null | 60 | and t3, t4, t3 # e0 : mask out char matches after null |
diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile index 359ef087e69e..7f4671995245 100644 --- a/arch/alpha/math-emu/Makefile +++ b/arch/alpha/math-emu/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the FPU instruction emulation. | 2 | # Makefile for the FPU instruction emulation. |
3 | # | 3 | # |
4 | 4 | ||
5 | EXTRA_CFLAGS := -w | 5 | ccflags-y := -w |
6 | 6 | ||
7 | obj-$(CONFIG_MATHEMU) += math-emu.o | 7 | obj-$(CONFIG_MATHEMU) += math-emu.o |
8 | 8 | ||
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile index 09399c5386cb..c993d3f93cf6 100644 --- a/arch/alpha/mm/Makefile +++ b/arch/alpha/mm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux alpha-specific parts of the memory manager. | 2 | # Makefile for the linux alpha-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | EXTRA_CFLAGS := -Werror | 5 | ccflags-y := -Werror |
6 | 6 | ||
7 | obj-y := init.o fault.o extable.o | 7 | obj-y := init.o fault.o extable.o |
8 | 8 | ||
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 86425ab53bf5..69d0c5761e2f 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c | |||
@@ -32,8 +32,6 @@ | |||
32 | #include <asm/console.h> | 32 | #include <asm/console.h> |
33 | #include <asm/tlb.h> | 33 | #include <asm/tlb.h> |
34 | 34 | ||
35 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
36 | |||
37 | extern void die_if_kernel(char *,struct pt_regs *,long); | 35 | extern void die_if_kernel(char *,struct pt_regs *,long); |
38 | 36 | ||
39 | static struct pcb_struct original_pcb; | 37 | static struct pcb_struct original_pcb; |
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 7b2c56d8f930..3973ae395772 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c | |||
@@ -313,6 +313,7 @@ void __init paging_init(void) | |||
313 | zones_size[ZONE_DMA] = dma_local_pfn; | 313 | zones_size[ZONE_DMA] = dma_local_pfn; |
314 | zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; | 314 | zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; |
315 | } | 315 | } |
316 | node_set_state(nid, N_NORMAL_MEMORY); | ||
316 | free_area_init_node(nid, zones_size, start_pfn, NULL); | 317 | free_area_init_node(nid, zones_size, start_pfn, NULL); |
317 | } | 318 | } |
318 | 319 | ||
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile index 4aa56247bdc6..3473de751b03 100644 --- a/arch/alpha/oprofile/Makefile +++ b/arch/alpha/oprofile/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | EXTRA_CFLAGS := -Werror -Wno-sign-compare | 1 | ccflags-y := -Werror -Wno-sign-compare |
2 | 2 | ||
3 | obj-$(CONFIG_OPROFILE) += oprofile.o | 3 | obj-$(CONFIG_OPROFILE) += oprofile.o |
4 | 4 | ||
diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c index 70302086283c..5b9d178e0228 100644 --- a/arch/alpha/oprofile/op_model_ev67.c +++ b/arch/alpha/oprofile/op_model_ev67.c | |||
@@ -192,7 +192,7 @@ ev67_handle_interrupt(unsigned long which, struct pt_regs *regs, | |||
192 | case TRAP_INVALID1: | 192 | case TRAP_INVALID1: |
193 | case TRAP_INVALID2: | 193 | case TRAP_INVALID2: |
194 | case TRAP_INVALID3: | 194 | case TRAP_INVALID3: |
195 | /* Pipeline redirection ocurred. PMPC points | 195 | /* Pipeline redirection occurred. PMPC points |
196 | to PALcode. Recognize ITB miss by PALcode | 196 | to PALcode. Recognize ITB miss by PALcode |
197 | offset address, and get actual PC from | 197 | offset address, and get actual PC from |
198 | EXC_ADDR. */ | 198 | EXC_ADDR. */ |