diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 62 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock_types.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/swiotlb.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 104 | ||||
-rw-r--r-- | arch/ia64/kernel/pci-swiotlb.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/unaligned.c | 6 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 24 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 9 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/io_common.c | 8 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/sn2/sn_hwperf.c | 7 |
10 files changed, 134 insertions, 94 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 30bb930e1111..239ecdc9516d 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -25,61 +25,82 @@ | |||
25 | * by atomically noting the tail and incrementing it by one (thus adding | 25 | * by atomically noting the tail and incrementing it by one (thus adding |
26 | * ourself to the queue and noting our position), then waiting until the head | 26 | * ourself to the queue and noting our position), then waiting until the head |
27 | * becomes equal to the the initial value of the tail. | 27 | * becomes equal to the the initial value of the tail. |
28 | * The pad bits in the middle are used to prevent the next_ticket number | ||
29 | * overflowing into the now_serving number. | ||
28 | * | 30 | * |
29 | * 63 32 31 0 | 31 | * 31 17 16 15 14 0 |
30 | * +----------------------------------------------------+ | 32 | * +----------------------------------------------------+ |
31 | * | next_ticket_number | now_serving | | 33 | * | now_serving | padding | next_ticket | |
32 | * +----------------------------------------------------+ | 34 | * +----------------------------------------------------+ |
33 | */ | 35 | */ |
34 | 36 | ||
35 | #define TICKET_SHIFT 32 | 37 | #define TICKET_SHIFT 17 |
38 | #define TICKET_BITS 15 | ||
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | ||
36 | 40 | ||
37 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
38 | { | 42 | { |
39 | int *p = (int *)&lock->lock, turn, now_serving; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
40 | 44 | ||
41 | now_serving = *p; | 45 | ticket = ia64_fetchadd(1, p, acq); |
42 | turn = ia64_fetchadd(1, p+1, acq); | ||
43 | 46 | ||
44 | if (turn == now_serving) | 47 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) |
45 | return; | 48 | return; |
46 | 49 | ||
47 | do { | 50 | ia64_invala(); |
51 | |||
52 | for (;;) { | ||
53 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory"); | ||
54 | |||
55 | if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
56 | return; | ||
48 | cpu_relax(); | 57 | cpu_relax(); |
49 | } while (ACCESS_ONCE(*p) != turn); | 58 | } |
50 | } | 59 | } |
51 | 60 | ||
52 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
53 | { | 62 | { |
54 | long tmp = ACCESS_ONCE(lock->lock), try; | 63 | int tmp = ACCESS_ONCE(lock->lock); |
55 | |||
56 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { | ||
57 | try = tmp + (1L << TICKET_SHIFT); | ||
58 | 64 | ||
59 | return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; | 65 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) |
60 | } | 66 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; |
61 | return 0; | 67 | return 0; |
62 | } | 68 | } |
63 | 69 | ||
64 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
65 | { | 71 | { |
66 | int *p = (int *)&lock->lock; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
67 | 73 | ||
68 | (void)ia64_fetchadd(1, p, rel); | 74 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); |
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | ||
76 | } | ||
77 | |||
78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | ||
79 | { | ||
80 | int *p = (int *)&lock->lock, ticket; | ||
81 | |||
82 | ia64_invala(); | ||
83 | |||
84 | for (;;) { | ||
85 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory"); | ||
86 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
87 | return; | ||
88 | cpu_relax(); | ||
89 | } | ||
69 | } | 90 | } |
70 | 91 | ||
71 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
72 | { | 93 | { |
73 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
74 | 95 | ||
75 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); | 96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
76 | } | 97 | } |
77 | 98 | ||
78 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 99 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
79 | { | 100 | { |
80 | long tmp = ACCESS_ONCE(lock->lock); | 101 | long tmp = ACCESS_ONCE(lock->lock); |
81 | 102 | ||
82 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
83 | } | 104 | } |
84 | 105 | ||
85 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 106 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
@@ -116,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | |||
116 | 137 | ||
117 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
118 | { | 139 | { |
119 | while (__raw_spin_is_locked(lock)) | 140 | __ticket_spin_unlock_wait(lock); |
120 | cpu_relax(); | ||
121 | } | 141 | } |
122 | 142 | ||
123 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index b61d136d9bc2..474e46f1ab4a 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned long lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h index dcbaea7ce128..f0acde68aaea 100644 --- a/arch/ia64/include/asm/swiotlb.h +++ b/arch/ia64/include/asm/swiotlb.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/dma-mapping.h> | 4 | #include <linux/dma-mapping.h> |
5 | #include <linux/swiotlb.h> | 5 | #include <linux/swiotlb.h> |
6 | 6 | ||
7 | extern int swiotlb_force; | ||
8 | |||
9 | #ifdef CONFIG_SWIOTLB | 7 | #ifdef CONFIG_SWIOTLB |
10 | extern int swiotlb; | 8 | extern int swiotlb; |
11 | extern void pci_swiotlb_init(void); | 9 | extern void pci_swiotlb_init(void); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d2877a7bfe2e..496ac7a99488 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -887,6 +887,60 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) | |||
887 | memcpy(current->comm, comm, sizeof(current->comm)); | 887 | memcpy(current->comm, comm, sizeof(current->comm)); |
888 | } | 888 | } |
889 | 889 | ||
890 | static void | ||
891 | finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | ||
892 | unsigned long *nat) | ||
893 | { | ||
894 | const u64 *bank; | ||
895 | |||
896 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | ||
897 | * pmsa_{xip,xpsr,xfs} | ||
898 | */ | ||
899 | if (ia64_psr(regs)->ic) { | ||
900 | regs->cr_iip = ms->pmsa_iip; | ||
901 | regs->cr_ipsr = ms->pmsa_ipsr; | ||
902 | regs->cr_ifs = ms->pmsa_ifs; | ||
903 | } else { | ||
904 | regs->cr_iip = ms->pmsa_xip; | ||
905 | regs->cr_ipsr = ms->pmsa_xpsr; | ||
906 | regs->cr_ifs = ms->pmsa_xfs; | ||
907 | } | ||
908 | regs->pr = ms->pmsa_pr; | ||
909 | regs->b0 = ms->pmsa_br0; | ||
910 | regs->ar_rsc = ms->pmsa_rsc; | ||
911 | copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, ®s->r1, nat); | ||
912 | copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, ®s->r2, nat); | ||
913 | copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, ®s->r3, nat); | ||
914 | copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, ®s->r8, nat); | ||
915 | copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, ®s->r9, nat); | ||
916 | copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, ®s->r10, nat); | ||
917 | copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, ®s->r11, nat); | ||
918 | copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, ®s->r12, nat); | ||
919 | copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, ®s->r13, nat); | ||
920 | copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, ®s->r14, nat); | ||
921 | copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, ®s->r15, nat); | ||
922 | if (ia64_psr(regs)->bn) | ||
923 | bank = ms->pmsa_bank1_gr; | ||
924 | else | ||
925 | bank = ms->pmsa_bank0_gr; | ||
926 | copy_reg(&bank[16-16], ms->pmsa_nat_bits, ®s->r16, nat); | ||
927 | copy_reg(&bank[17-16], ms->pmsa_nat_bits, ®s->r17, nat); | ||
928 | copy_reg(&bank[18-16], ms->pmsa_nat_bits, ®s->r18, nat); | ||
929 | copy_reg(&bank[19-16], ms->pmsa_nat_bits, ®s->r19, nat); | ||
930 | copy_reg(&bank[20-16], ms->pmsa_nat_bits, ®s->r20, nat); | ||
931 | copy_reg(&bank[21-16], ms->pmsa_nat_bits, ®s->r21, nat); | ||
932 | copy_reg(&bank[22-16], ms->pmsa_nat_bits, ®s->r22, nat); | ||
933 | copy_reg(&bank[23-16], ms->pmsa_nat_bits, ®s->r23, nat); | ||
934 | copy_reg(&bank[24-16], ms->pmsa_nat_bits, ®s->r24, nat); | ||
935 | copy_reg(&bank[25-16], ms->pmsa_nat_bits, ®s->r25, nat); | ||
936 | copy_reg(&bank[26-16], ms->pmsa_nat_bits, ®s->r26, nat); | ||
937 | copy_reg(&bank[27-16], ms->pmsa_nat_bits, ®s->r27, nat); | ||
938 | copy_reg(&bank[28-16], ms->pmsa_nat_bits, ®s->r28, nat); | ||
939 | copy_reg(&bank[29-16], ms->pmsa_nat_bits, ®s->r29, nat); | ||
940 | copy_reg(&bank[30-16], ms->pmsa_nat_bits, ®s->r30, nat); | ||
941 | copy_reg(&bank[31-16], ms->pmsa_nat_bits, ®s->r31, nat); | ||
942 | } | ||
943 | |||
890 | /* On entry to this routine, we are running on the per cpu stack, see | 944 | /* On entry to this routine, we are running on the per cpu stack, see |
891 | * mca_asm.h. The original stack has not been touched by this event. Some of | 945 | * mca_asm.h. The original stack has not been touched by this event. Some of |
892 | * the original stack's registers will be in the RBS on this stack. This stack | 946 | * the original stack's registers will be in the RBS on this stack. This stack |
@@ -921,7 +975,6 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
921 | u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; | 975 | u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; |
922 | u64 ar_bspstore = regs->ar_bspstore; | 976 | u64 ar_bspstore = regs->ar_bspstore; |
923 | u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); | 977 | u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); |
924 | const u64 *bank; | ||
925 | const char *msg; | 978 | const char *msg; |
926 | int cpu = smp_processor_id(); | 979 | int cpu = smp_processor_id(); |
927 | 980 | ||
@@ -1024,54 +1077,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
1024 | p = (char *)r12 - sizeof(*regs); | 1077 | p = (char *)r12 - sizeof(*regs); |
1025 | old_regs = (struct pt_regs *)p; | 1078 | old_regs = (struct pt_regs *)p; |
1026 | memcpy(old_regs, regs, sizeof(*regs)); | 1079 | memcpy(old_regs, regs, sizeof(*regs)); |
1027 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | ||
1028 | * pmsa_{xip,xpsr,xfs} | ||
1029 | */ | ||
1030 | if (ia64_psr(regs)->ic) { | ||
1031 | old_regs->cr_iip = ms->pmsa_iip; | ||
1032 | old_regs->cr_ipsr = ms->pmsa_ipsr; | ||
1033 | old_regs->cr_ifs = ms->pmsa_ifs; | ||
1034 | } else { | ||
1035 | old_regs->cr_iip = ms->pmsa_xip; | ||
1036 | old_regs->cr_ipsr = ms->pmsa_xpsr; | ||
1037 | old_regs->cr_ifs = ms->pmsa_xfs; | ||
1038 | } | ||
1039 | old_regs->pr = ms->pmsa_pr; | ||
1040 | old_regs->b0 = ms->pmsa_br0; | ||
1041 | old_regs->loadrs = loadrs; | 1080 | old_regs->loadrs = loadrs; |
1042 | old_regs->ar_rsc = ms->pmsa_rsc; | ||
1043 | old_unat = old_regs->ar_unat; | 1081 | old_unat = old_regs->ar_unat; |
1044 | copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); | 1082 | finish_pt_regs(old_regs, ms, &old_unat); |
1045 | copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); | ||
1046 | copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); | ||
1047 | copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); | ||
1048 | copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); | ||
1049 | copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); | ||
1050 | copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); | ||
1051 | copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); | ||
1052 | copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); | ||
1053 | copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); | ||
1054 | copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); | ||
1055 | if (ia64_psr(old_regs)->bn) | ||
1056 | bank = ms->pmsa_bank1_gr; | ||
1057 | else | ||
1058 | bank = ms->pmsa_bank0_gr; | ||
1059 | copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); | ||
1060 | copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); | ||
1061 | copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); | ||
1062 | copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); | ||
1063 | copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); | ||
1064 | copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); | ||
1065 | copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); | ||
1066 | copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); | ||
1067 | copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); | ||
1068 | copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); | ||
1069 | copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); | ||
1070 | copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); | ||
1071 | copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); | ||
1072 | copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); | ||
1073 | copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); | ||
1074 | copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); | ||
1075 | 1083 | ||
1076 | /* Next stack a struct switch_stack. mca_asm.S built a partial | 1084 | /* Next stack a struct switch_stack. mca_asm.S built a partial |
1077 | * switch_stack, copy it and fill in the blanks using pt_regs and | 1085 | * switch_stack, copy it and fill in the blanks using pt_regs and |
@@ -1141,6 +1149,8 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
1141 | no_mod: | 1149 | no_mod: |
1142 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | 1150 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", |
1143 | smp_processor_id(), type, msg); | 1151 | smp_processor_id(), type, msg); |
1152 | old_unat = regs->ar_unat; | ||
1153 | finish_pt_regs(regs, ms, &old_unat); | ||
1144 | return previous_current; | 1154 | return previous_current; |
1145 | } | 1155 | } |
1146 | 1156 | ||
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 285aae8431c6..53292abf846c 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = { | |||
41 | void __init swiotlb_dma_init(void) | 41 | void __init swiotlb_dma_init(void) |
42 | { | 42 | { |
43 | dma_ops = &swiotlb_dma_ops; | 43 | dma_ops = &swiotlb_dma_ops; |
44 | swiotlb_init(); | 44 | swiotlb_init(1); |
45 | } | 45 | } |
46 | 46 | ||
47 | void __init pci_swiotlb_init(void) | 47 | void __init pci_swiotlb_init(void) |
@@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void) | |||
51 | swiotlb = 1; | 51 | swiotlb = 1; |
52 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); | 52 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); |
53 | machvec_init("dig"); | 53 | machvec_init("dig"); |
54 | swiotlb_init(); | 54 | swiotlb_init(1); |
55 | dma_ops = &swiotlb_dma_ops; | 55 | dma_ops = &swiotlb_dma_ops; |
56 | #else | 56 | #else |
57 | panic("Unable to find Intel IOMMU"); | 57 | panic("Unable to find Intel IOMMU"); |
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6db08599ebbc..776dd40397e2 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c | |||
@@ -60,7 +60,6 @@ dump (const char *str, void *vp, size_t len) | |||
60 | */ | 60 | */ |
61 | int no_unaligned_warning; | 61 | int no_unaligned_warning; |
62 | int unaligned_dump_stack; | 62 | int unaligned_dump_stack; |
63 | static int noprint_warning; | ||
64 | 63 | ||
65 | /* | 64 | /* |
66 | * For M-unit: | 65 | * For M-unit: |
@@ -1357,9 +1356,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) | |||
1357 | /* watch for command names containing %s */ | 1356 | /* watch for command names containing %s */ |
1358 | printk(KERN_WARNING "%s", buf); | 1357 | printk(KERN_WARNING "%s", buf); |
1359 | } else { | 1358 | } else { |
1360 | if (no_unaligned_warning && !noprint_warning) { | 1359 | if (no_unaligned_warning) { |
1361 | noprint_warning = 1; | 1360 | printk_once(KERN_WARNING "%s(%d) encountered an " |
1362 | printk(KERN_WARNING "%s(%d) encountered an " | ||
1363 | "unaligned exception which required\n" | 1361 | "unaligned exception which required\n" |
1364 | "kernel assistance, which degrades " | 1362 | "kernel assistance, which degrades " |
1365 | "the performance of the application.\n" | 1363 | "the performance of the application.\n" |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index f426dc78d959..ee09d261f2e6 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm) | |||
100 | * this primitive it can be moved up to a spinaphore.h header. | 100 | * this primitive it can be moved up to a spinaphore.h header. |
101 | */ | 101 | */ |
102 | struct spinaphore { | 102 | struct spinaphore { |
103 | atomic_t cur; | 103 | unsigned long ticket; |
104 | unsigned long serve; | ||
104 | }; | 105 | }; |
105 | 106 | ||
106 | static inline void spinaphore_init(struct spinaphore *ss, int val) | 107 | static inline void spinaphore_init(struct spinaphore *ss, int val) |
107 | { | 108 | { |
108 | atomic_set(&ss->cur, val); | 109 | ss->ticket = 0; |
110 | ss->serve = val; | ||
109 | } | 111 | } |
110 | 112 | ||
111 | static inline void down_spin(struct spinaphore *ss) | 113 | static inline void down_spin(struct spinaphore *ss) |
112 | { | 114 | { |
113 | while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) | 115 | unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve; |
114 | while (atomic_read(&ss->cur) == 0) | 116 | |
115 | cpu_relax(); | 117 | if (time_before(t, ss->serve)) |
118 | return; | ||
119 | |||
120 | ia64_invala(); | ||
121 | |||
122 | for (;;) { | ||
123 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); | ||
124 | if (time_before(t, serve)) | ||
125 | return; | ||
126 | cpu_relax(); | ||
127 | } | ||
116 | } | 128 | } |
117 | 129 | ||
118 | static inline void up_spin(struct spinaphore *ss) | 130 | static inline void up_spin(struct spinaphore *ss) |
119 | { | 131 | { |
120 | atomic_add(1, &ss->cur); | 132 | ia64_fetchadd(1, &ss->serve, rel); |
121 | } | 133 | } |
122 | 134 | ||
123 | static struct spinaphore ptcg_sem; | 135 | static struct spinaphore ptcg_sem; |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7de76dd352fe..c0fca2c1c858 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn, | |||
56 | if ((seg | reg) <= 255) { | 56 | if ((seg | reg) <= 255) { |
57 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); | 57 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); |
58 | mode = 0; | 58 | mode = 0; |
59 | } else { | 59 | } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { |
60 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); | 60 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); |
61 | mode = 1; | 61 | mode = 1; |
62 | } else { | ||
63 | return -EINVAL; | ||
62 | } | 64 | } |
65 | |||
63 | result = ia64_sal_pci_config_read(addr, mode, len, &data); | 66 | result = ia64_sal_pci_config_read(addr, mode, len, &data); |
64 | if (result != 0) | 67 | if (result != 0) |
65 | return -EINVAL; | 68 | return -EINVAL; |
@@ -80,9 +83,11 @@ int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn, | |||
80 | if ((seg | reg) <= 255) { | 83 | if ((seg | reg) <= 255) { |
81 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); | 84 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); |
82 | mode = 0; | 85 | mode = 0; |
83 | } else { | 86 | } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { |
84 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); | 87 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); |
85 | mode = 1; | 88 | mode = 1; |
89 | } else { | ||
90 | return -EINVAL; | ||
86 | } | 91 | } |
87 | result = ia64_sal_pci_config_write(addr, mode, len, value); | 92 | result = ia64_sal_pci_config_write(addr, mode, len, value); |
88 | if (result != 0) | 93 | if (result != 0) |
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 25831c47c579..308e6595110e 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -119,7 +119,6 @@ sn_pcidev_info_get(struct pci_dev *dev) | |||
119 | * Additionally note that the struct sn_flush_device_war also has to be | 119 | * Additionally note that the struct sn_flush_device_war also has to be |
120 | * removed from arch/ia64/sn/include/xtalk/hubdev.h | 120 | * removed from arch/ia64/sn/include/xtalk/hubdev.h |
121 | */ | 121 | */ |
122 | static u8 war_implemented = 0; | ||
123 | 122 | ||
124 | static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, | 123 | static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, |
125 | struct sn_flush_device_common *common) | 124 | struct sn_flush_device_common *common) |
@@ -128,11 +127,8 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, | |||
128 | struct sn_flush_device_war *dev_entry; | 127 | struct sn_flush_device_war *dev_entry; |
129 | struct ia64_sal_retval isrv = {0,0,0,0}; | 128 | struct ia64_sal_retval isrv = {0,0,0,0}; |
130 | 129 | ||
131 | if (!war_implemented) { | 130 | printk_once(KERN_WARNING |
132 | printk(KERN_WARNING "PROM version < 4.50 -- implementing old " | 131 | "PROM version < 4.50 -- implementing old PROM flush WAR\n"); |
133 | "PROM flush WAR\n"); | ||
134 | war_implemented = 1; | ||
135 | } | ||
136 | 132 | ||
137 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); | 133 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); |
138 | BUG_ON(!war_list); | 134 | BUG_ON(!war_list); |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 4c7e74790958..55ac3c4e11d2 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -786,17 +786,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) | |||
786 | break; | 786 | break; |
787 | 787 | ||
788 | case SN_HWPERF_GET_OBJ_NODE: | 788 | case SN_HWPERF_GET_OBJ_NODE: |
789 | if (a.sz != sizeof(u64) || a.arg < 0) { | 789 | i = a.arg; |
790 | if (a.sz != sizeof(u64) || i < 0) { | ||
790 | r = -EINVAL; | 791 | r = -EINVAL; |
791 | goto error; | 792 | goto error; |
792 | } | 793 | } |
793 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { | 794 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { |
794 | if (a.arg >= nobj) { | 795 | if (i >= nobj) { |
795 | r = -EINVAL; | 796 | r = -EINVAL; |
796 | vfree(objs); | 797 | vfree(objs); |
797 | goto error; | 798 | goto error; |
798 | } | 799 | } |
799 | if (objs[(i = a.arg)].id != a.arg) { | 800 | if (objs[i].id != a.arg) { |
800 | for (i = 0; i < nobj; i++) { | 801 | for (i = 0; i < nobj; i++) { |
801 | if (objs[i].id == a.arg) | 802 | if (objs[i].id == a.arg) |
802 | break; | 803 | break; |