diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-20 01:16:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-20 01:16:58 -0500 |
commit | 497992917edf03994088ec8757b8262e1d9f568a (patch) | |
tree | 7e82975f98d3d3c9f80e10c981601db23eef5bd1 /arch | |
parent | 7e732bfc5570b8f9bb5f155cf36e94b2e7d6bf6a (diff) | |
parent | 386d1d50c8eef254653b1015fde06622ef38ba76 (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 28 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 1 | ||||
-rw-r--r-- | arch/ia64/sn/include/xtalk/hubdev.h | 9 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/io_init.c | 54 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/mca.c | 7 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xp_main.c | 17 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 34 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 17 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_provider.c | 12 |
10 files changed, 123 insertions, 67 deletions
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 3945d378bd7e..70dba1f0e2ee 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -52,9 +52,9 @@ | |||
52 | #include <linux/compat.h> | 52 | #include <linux/compat.h> |
53 | #include <linux/vfs.h> | 53 | #include <linux/vfs.h> |
54 | #include <linux/mman.h> | 54 | #include <linux/mman.h> |
55 | #include <linux/mutex.h> | ||
55 | 56 | ||
56 | #include <asm/intrinsics.h> | 57 | #include <asm/intrinsics.h> |
57 | #include <asm/semaphore.h> | ||
58 | #include <asm/types.h> | 58 | #include <asm/types.h> |
59 | #include <asm/uaccess.h> | 59 | #include <asm/uaccess.h> |
60 | #include <asm/unistd.h> | 60 | #include <asm/unistd.h> |
@@ -86,7 +86,7 @@ | |||
86 | * while doing so. | 86 | * while doing so. |
87 | */ | 87 | */ |
88 | /* XXX make per-mm: */ | 88 | /* XXX make per-mm: */ |
89 | static DECLARE_MUTEX(ia32_mmap_sem); | 89 | static DEFINE_MUTEX(ia32_mmap_mutex); |
90 | 90 | ||
91 | asmlinkage long | 91 | asmlinkage long |
92 | sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, | 92 | sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, |
@@ -895,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot | |||
895 | prot = get_prot32(prot); | 895 | prot = get_prot32(prot); |
896 | 896 | ||
897 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 897 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
898 | down(&ia32_mmap_sem); | 898 | mutex_lock(&ia32_mmap_mutex); |
899 | { | 899 | { |
900 | addr = emulate_mmap(file, addr, len, prot, flags, offset); | 900 | addr = emulate_mmap(file, addr, len, prot, flags, offset); |
901 | } | 901 | } |
902 | up(&ia32_mmap_sem); | 902 | mutex_unlock(&ia32_mmap_mutex); |
903 | #else | 903 | #else |
904 | down_write(¤t->mm->mmap_sem); | 904 | down_write(¤t->mm->mmap_sem); |
905 | { | 905 | { |
@@ -1000,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len) | |||
1000 | if (start >= end) | 1000 | if (start >= end) |
1001 | return 0; | 1001 | return 0; |
1002 | 1002 | ||
1003 | down(&ia32_mmap_sem); | 1003 | mutex_lock(&ia32_mmap_mutex); |
1004 | { | 1004 | ret = sys_munmap(start, end - start); |
1005 | ret = sys_munmap(start, end - start); | 1005 | mutex_unlock(&ia32_mmap_mutex); |
1006 | } | ||
1007 | up(&ia32_mmap_sem); | ||
1008 | #endif | 1006 | #endif |
1009 | return ret; | 1007 | return ret; |
1010 | } | 1008 | } |
@@ -1056,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot) | |||
1056 | if (retval < 0) | 1054 | if (retval < 0) |
1057 | return retval; | 1055 | return retval; |
1058 | 1056 | ||
1059 | down(&ia32_mmap_sem); | 1057 | mutex_lock(&ia32_mmap_mutex); |
1060 | { | 1058 | { |
1061 | if (offset_in_page(start)) { | 1059 | if (offset_in_page(start)) { |
1062 | /* start address is 4KB aligned but not page aligned. */ | 1060 | /* start address is 4KB aligned but not page aligned. */ |
@@ -1080,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot) | |||
1080 | retval = sys_mprotect(start, end - start, prot); | 1078 | retval = sys_mprotect(start, end - start, prot); |
1081 | } | 1079 | } |
1082 | out: | 1080 | out: |
1083 | up(&ia32_mmap_sem); | 1081 | mutex_unlock(&ia32_mmap_mutex); |
1084 | return retval; | 1082 | return retval; |
1085 | #endif | 1083 | #endif |
1086 | } | 1084 | } |
@@ -1124,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, | |||
1124 | old_len = PAGE_ALIGN(old_end) - addr; | 1122 | old_len = PAGE_ALIGN(old_end) - addr; |
1125 | new_len = PAGE_ALIGN(new_end) - addr; | 1123 | new_len = PAGE_ALIGN(new_end) - addr; |
1126 | 1124 | ||
1127 | down(&ia32_mmap_sem); | 1125 | mutex_lock(&ia32_mmap_mutex); |
1128 | { | 1126 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); |
1129 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); | 1127 | mutex_unlock(&ia32_mmap_mutex); |
1130 | } | ||
1131 | up(&ia32_mmap_sem); | ||
1132 | 1128 | ||
1133 | if ((ret >= 0) && (old_len < new_len)) { | 1129 | if ((ret >= 0) && (old_len < new_len)) { |
1134 | /* mremap expanded successfully */ | 1130 | /* mremap expanded successfully */ |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 2ea4b39efffa..9c5194b385da 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
41 | #include <linux/capability.h> | 41 | #include <linux/capability.h> |
42 | #include <linux/rcupdate.h> | 42 | #include <linux/rcupdate.h> |
43 | #include <linux/completion.h> | ||
43 | 44 | ||
44 | #include <asm/errno.h> | 45 | #include <asm/errno.h> |
45 | #include <asm/intrinsics.h> | 46 | #include <asm/intrinsics.h> |
@@ -286,7 +287,7 @@ typedef struct pfm_context { | |||
286 | 287 | ||
287 | unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ | 288 | unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ |
288 | 289 | ||
289 | struct semaphore ctx_restart_sem; /* use for blocking notification mode */ | 290 | struct completion ctx_restart_done; /* use for blocking notification mode */ |
290 | 291 | ||
291 | unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ | 292 | unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ |
292 | unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ | 293 | unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ |
@@ -1991,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp) | |||
1991 | /* | 1992 | /* |
1992 | * force task to wake up from MASKED state | 1993 | * force task to wake up from MASKED state |
1993 | */ | 1994 | */ |
1994 | up(&ctx->ctx_restart_sem); | 1995 | complete(&ctx->ctx_restart_done); |
1995 | 1996 | ||
1996 | DPRINT(("waking up ctx_state=%d\n", state)); | 1997 | DPRINT(("waking up ctx_state=%d\n", state)); |
1997 | 1998 | ||
@@ -2706,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg | |||
2706 | /* | 2707 | /* |
2707 | * init restart semaphore to locked | 2708 | * init restart semaphore to locked |
2708 | */ | 2709 | */ |
2709 | sema_init(&ctx->ctx_restart_sem, 0); | 2710 | init_completion(&ctx->ctx_restart_done); |
2710 | 2711 | ||
2711 | /* | 2712 | /* |
2712 | * activation is used in SMP only | 2713 | * activation is used in SMP only |
@@ -3687,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
3687 | */ | 3688 | */ |
3688 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { | 3689 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { |
3689 | DPRINT(("unblocking [%d] \n", task->pid)); | 3690 | DPRINT(("unblocking [%d] \n", task->pid)); |
3690 | up(&ctx->ctx_restart_sem); | 3691 | complete(&ctx->ctx_restart_done); |
3691 | } else { | 3692 | } else { |
3692 | DPRINT(("[%d] armed exit trap\n", task->pid)); | 3693 | DPRINT(("[%d] armed exit trap\n", task->pid)); |
3693 | 3694 | ||
@@ -5089,7 +5090,7 @@ pfm_handle_work(void) | |||
5089 | * may go through without blocking on SMP systems | 5090 | * may go through without blocking on SMP systems |
5090 | * if restart has been received already by the time we call down() | 5091 | * if restart has been received already by the time we call down() |
5091 | */ | 5092 | */ |
5092 | ret = down_interruptible(&ctx->ctx_restart_sem); | 5093 | ret = wait_for_completion_interruptible(&ctx->ctx_restart_done); |
5093 | 5094 | ||
5094 | DPRINT(("after block sleeping ret=%d\n", ret)); | 5095 | DPRINT(("after block sleeping ret=%d\n", ret)); |
5095 | 5096 | ||
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index b631cf86ed44..fcd2bad0286f 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg) | |||
210 | 210 | ||
211 | dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); | 211 | dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); |
212 | 212 | ||
213 | touch_softlockup_watchdog(); | ||
213 | memset((char *)start, 0, length); | 214 | memset((char *)start, 0, length); |
214 | 215 | ||
215 | node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); | 216 | node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); |
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h index 7c88e9a58516..8182583c762c 100644 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ b/arch/ia64/sn/include/xtalk/hubdev.h | |||
@@ -51,6 +51,15 @@ struct sn_flush_device_kernel { | |||
51 | struct sn_flush_device_common *common; | 51 | struct sn_flush_device_common *common; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* 01/16/06 This struct is the old PROM/kernel struct and needs to be included | ||
55 | * for older official PROMs to function on the new kernel base. This struct | ||
56 | * will be removed when the next official PROM release occurs. */ | ||
57 | |||
58 | struct sn_flush_device_war { | ||
59 | struct sn_flush_device_common common; | ||
60 | u32 filler; /* older PROMs expect the default size of a spinlock_t */ | ||
61 | }; | ||
62 | |||
54 | /* | 63 | /* |
55 | * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. | 64 | * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. |
56 | */ | 65 | */ |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 233d55115d33..00700f7e6837 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -165,8 +165,45 @@ sn_pcidev_info_get(struct pci_dev *dev) | |||
165 | return NULL; | 165 | return NULL; |
166 | } | 166 | } |
167 | 167 | ||
168 | /* Older PROM flush WAR | ||
169 | * | ||
170 | * 01/16/06 -- This war will be in place until a new official PROM is released. | ||
171 | * Additionally note that the struct sn_flush_device_war also has to be | ||
172 | * removed from arch/ia64/sn/include/xtalk/hubdev.h | ||
173 | */ | ||
174 | static u8 war_implemented = 0; | ||
175 | |||
176 | static void sn_device_fixup_war(u64 nasid, u64 widget, int device, | ||
177 | struct sn_flush_device_common *common) | ||
178 | { | ||
179 | struct sn_flush_device_war *war_list; | ||
180 | struct sn_flush_device_war *dev_entry; | ||
181 | struct ia64_sal_retval isrv = {0,0,0,0}; | ||
182 | |||
183 | if (!war_implemented) { | ||
184 | printk(KERN_WARNING "PROM version < 4.50 -- implementing old " | ||
185 | "PROM flush WAR\n"); | ||
186 | war_implemented = 1; | ||
187 | } | ||
188 | |||
189 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); | ||
190 | if (!war_list) | ||
191 | BUG(); | ||
192 | |||
193 | SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, | ||
194 | nasid, widget, __pa(war_list), 0, 0, 0 ,0); | ||
195 | if (isrv.status) | ||
196 | panic("sn_device_fixup_war failed: %s\n", | ||
197 | ia64_sal_strerror(isrv.status)); | ||
198 | |||
199 | dev_entry = war_list + device; | ||
200 | memcpy(common,dev_entry, sizeof(*common)); | ||
201 | |||
202 | kfree(war_list); | ||
203 | } | ||
204 | |||
168 | /* | 205 | /* |
169 | * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for | 206 | * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for |
170 | * each node in the system. | 207 | * each node in the system. |
171 | */ | 208 | */ |
172 | static void sn_fixup_ionodes(void) | 209 | static void sn_fixup_ionodes(void) |
@@ -246,8 +283,19 @@ static void sn_fixup_ionodes(void) | |||
246 | widget, | 283 | widget, |
247 | device, | 284 | device, |
248 | (u64)(dev_entry->common)); | 285 | (u64)(dev_entry->common)); |
249 | if (status) | 286 | if (status) { |
250 | BUG(); | 287 | if (sn_sal_rev() < 0x0450) { |
288 | /* shortlived WAR for older | ||
289 | * PROM images | ||
290 | */ | ||
291 | sn_device_fixup_war(nasid, | ||
292 | widget, | ||
293 | device, | ||
294 | dev_entry->common); | ||
295 | } | ||
296 | else | ||
297 | BUG(); | ||
298 | } | ||
251 | 299 | ||
252 | spin_lock_init(&dev_entry->sfdl_flush_lock); | 300 | spin_lock_init(&dev_entry->sfdl_flush_lock); |
253 | } | 301 | } |
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 6546db6abdba..9ab684d1bb55 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/timer.h> | 11 | #include <linux/timer.h> |
12 | #include <linux/vmalloc.h> | 12 | #include <linux/vmalloc.h> |
13 | #include <linux/mutex.h> | ||
13 | #include <asm/mca.h> | 14 | #include <asm/mca.h> |
14 | #include <asm/sal.h> | 15 | #include <asm/sal.h> |
15 | #include <asm/sn/sn_sal.h> | 16 | #include <asm/sn/sn_sal.h> |
@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void); | |||
27 | /* Printing oemdata from mca uses data that is not passed through SAL, it is | 28 | /* Printing oemdata from mca uses data that is not passed through SAL, it is |
28 | * global. Only one user at a time. | 29 | * global. Only one user at a time. |
29 | */ | 30 | */ |
30 | static DECLARE_MUTEX(sn_oemdata_mutex); | 31 | static DEFINE_MUTEX(sn_oemdata_mutex); |
31 | static u8 **sn_oemdata; | 32 | static u8 **sn_oemdata; |
32 | static u64 *sn_oemdata_size, sn_oemdata_bufsize; | 33 | static u64 *sn_oemdata_size, sn_oemdata_bufsize; |
33 | 34 | ||
@@ -89,7 +90,7 @@ static int | |||
89 | sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, | 90 | sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, |
90 | u64 * oemdata_size) | 91 | u64 * oemdata_size) |
91 | { | 92 | { |
92 | down(&sn_oemdata_mutex); | 93 | mutex_lock(&sn_oemdata_mutex); |
93 | sn_oemdata = oemdata; | 94 | sn_oemdata = oemdata; |
94 | sn_oemdata_size = oemdata_size; | 95 | sn_oemdata_size = oemdata_size; |
95 | sn_oemdata_bufsize = 0; | 96 | sn_oemdata_bufsize = 0; |
@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, | |||
107 | *sn_oemdata_size = 0; | 108 | *sn_oemdata_size = 0; |
108 | ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); | 109 | ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); |
109 | } | 110 | } |
110 | up(&sn_oemdata_mutex); | 111 | mutex_unlock(&sn_oemdata_mutex); |
111 | return 0; | 112 | return 0; |
112 | } | 113 | } |
113 | 114 | ||
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c index 3be52a34c80f..b7ea46645e12 100644 --- a/arch/ia64/sn/kernel/xp_main.c +++ b/arch/ia64/sn/kernel/xp_main.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/mutex.h> | ||
22 | #include <asm/sn/intr.h> | 23 | #include <asm/sn/intr.h> |
23 | #include <asm/sn/sn_sal.h> | 24 | #include <asm/sn/sn_sal.h> |
24 | #include <asm/sn/xp.h> | 25 | #include <asm/sn/xp.h> |
@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
136 | 137 | ||
137 | registration = &xpc_registrations[ch_number]; | 138 | registration = &xpc_registrations[ch_number]; |
138 | 139 | ||
139 | if (down_interruptible(®istration->sema) != 0) { | 140 | if (mutex_lock_interruptible(®istration->mutex) != 0) { |
140 | return xpcInterrupted; | 141 | return xpcInterrupted; |
141 | } | 142 | } |
142 | 143 | ||
143 | /* if XPC_CHANNEL_REGISTERED(ch_number) */ | 144 | /* if XPC_CHANNEL_REGISTERED(ch_number) */ |
144 | if (registration->func != NULL) { | 145 | if (registration->func != NULL) { |
145 | up(®istration->sema); | 146 | mutex_unlock(®istration->mutex); |
146 | return xpcAlreadyRegistered; | 147 | return xpcAlreadyRegistered; |
147 | } | 148 | } |
148 | 149 | ||
@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
154 | registration->key = key; | 155 | registration->key = key; |
155 | registration->func = func; | 156 | registration->func = func; |
156 | 157 | ||
157 | up(®istration->sema); | 158 | mutex_unlock(®istration->mutex); |
158 | 159 | ||
159 | xpc_interface.connect(ch_number); | 160 | xpc_interface.connect(ch_number); |
160 | 161 | ||
@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number) | |||
190 | * figured XPC's users will just turn around and call xpc_disconnect() | 191 | * figured XPC's users will just turn around and call xpc_disconnect() |
191 | * again anyways, so we might as well wait, if need be. | 192 | * again anyways, so we might as well wait, if need be. |
192 | */ | 193 | */ |
193 | down(®istration->sema); | 194 | mutex_lock(®istration->mutex); |
194 | 195 | ||
195 | /* if !XPC_CHANNEL_REGISTERED(ch_number) */ | 196 | /* if !XPC_CHANNEL_REGISTERED(ch_number) */ |
196 | if (registration->func == NULL) { | 197 | if (registration->func == NULL) { |
197 | up(®istration->sema); | 198 | mutex_unlock(®istration->mutex); |
198 | return; | 199 | return; |
199 | } | 200 | } |
200 | 201 | ||
@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number) | |||
208 | 209 | ||
209 | xpc_interface.disconnect(ch_number); | 210 | xpc_interface.disconnect(ch_number); |
210 | 211 | ||
211 | up(®istration->sema); | 212 | mutex_unlock(®istration->mutex); |
212 | 213 | ||
213 | return; | 214 | return; |
214 | } | 215 | } |
@@ -250,9 +251,9 @@ xp_init(void) | |||
250 | xp_nofault_PIOR_target = SH1_IPI_ACCESS; | 251 | xp_nofault_PIOR_target = SH1_IPI_ACCESS; |
251 | } | 252 | } |
252 | 253 | ||
253 | /* initialize the connection registration semaphores */ | 254 | /* initialize the connection registration mutex */ |
254 | for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { | 255 | for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { |
255 | sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ | 256 | mutex_init(&xpc_registrations[ch_number].mutex); |
256 | } | 257 | } |
257 | 258 | ||
258 | return 0; | 259 | return 0; |
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 0c0a68902409..8d950c778bb6 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/cache.h> | 22 | #include <linux/cache.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/mutex.h> | ||
26 | #include <linux/completion.h> | ||
25 | #include <asm/sn/bte.h> | 27 | #include <asm/sn/bte.h> |
26 | #include <asm/sn/sn_sal.h> | 28 | #include <asm/sn/sn_sal.h> |
27 | #include <asm/sn/xpc.h> | 29 | #include <asm/sn/xpc.h> |
@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) | |||
56 | atomic_set(&ch->n_to_notify, 0); | 58 | atomic_set(&ch->n_to_notify, 0); |
57 | 59 | ||
58 | spin_lock_init(&ch->lock); | 60 | spin_lock_init(&ch->lock); |
59 | sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ | 61 | mutex_init(&ch->msg_to_pull_mutex); |
60 | sema_init(&ch->wdisconnect_sema, 0); /* event wait */ | 62 | init_completion(&ch->wdisconnect_wait); |
61 | 63 | ||
62 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | 64 | atomic_set(&ch->n_on_msg_allocate_wq, 0); |
63 | init_waitqueue_head(&ch->msg_allocate_wq); | 65 | init_waitqueue_head(&ch->msg_allocate_wq); |
@@ -534,7 +536,6 @@ static enum xpc_retval | |||
534 | xpc_allocate_msgqueues(struct xpc_channel *ch) | 536 | xpc_allocate_msgqueues(struct xpc_channel *ch) |
535 | { | 537 | { |
536 | unsigned long irq_flags; | 538 | unsigned long irq_flags; |
537 | int i; | ||
538 | enum xpc_retval ret; | 539 | enum xpc_retval ret; |
539 | 540 | ||
540 | 541 | ||
@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) | |||
552 | return ret; | 553 | return ret; |
553 | } | 554 | } |
554 | 555 | ||
555 | for (i = 0; i < ch->local_nentries; i++) { | ||
556 | /* use a semaphore as an event wait queue */ | ||
557 | sema_init(&ch->notify_queue[i].sema, 0); | ||
558 | } | ||
559 | |||
560 | spin_lock_irqsave(&ch->lock, irq_flags); | 556 | spin_lock_irqsave(&ch->lock, irq_flags); |
561 | ch->flags |= XPC_C_SETUP; | 557 | ch->flags |= XPC_C_SETUP; |
562 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 558 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
799 | } | 795 | } |
800 | 796 | ||
801 | if (ch->flags & XPC_C_WDISCONNECT) { | 797 | if (ch->flags & XPC_C_WDISCONNECT) { |
802 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 798 | /* we won't lose the CPU since we're holding ch->lock */ |
803 | up(&ch->wdisconnect_sema); | 799 | complete(&ch->wdisconnect_wait); |
804 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
805 | |||
806 | } else if (ch->delayed_IPI_flags) { | 800 | } else if (ch->delayed_IPI_flags) { |
807 | if (part->act_state != XPC_P_DEACTIVATING) { | 801 | if (part->act_state != XPC_P_DEACTIVATING) { |
808 | /* time to take action on any delayed IPI flags */ | 802 | /* time to take action on any delayed IPI flags */ |
@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1092 | struct xpc_registration *registration = &xpc_registrations[ch->number]; | 1086 | struct xpc_registration *registration = &xpc_registrations[ch->number]; |
1093 | 1087 | ||
1094 | 1088 | ||
1095 | if (down_trylock(®istration->sema) != 0) { | 1089 | if (mutex_trylock(®istration->mutex) == 0) { |
1096 | return xpcRetry; | 1090 | return xpcRetry; |
1097 | } | 1091 | } |
1098 | 1092 | ||
1099 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { | 1093 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { |
1100 | up(®istration->sema); | 1094 | mutex_unlock(®istration->mutex); |
1101 | return xpcUnregistered; | 1095 | return xpcUnregistered; |
1102 | } | 1096 | } |
1103 | 1097 | ||
@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1108 | 1102 | ||
1109 | if (ch->flags & XPC_C_DISCONNECTING) { | 1103 | if (ch->flags & XPC_C_DISCONNECTING) { |
1110 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1104 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
1111 | up(®istration->sema); | 1105 | mutex_unlock(®istration->mutex); |
1112 | return ch->reason; | 1106 | return ch->reason; |
1113 | } | 1107 | } |
1114 | 1108 | ||
@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1140 | * channel lock be locked and will unlock and relock | 1134 | * channel lock be locked and will unlock and relock |
1141 | * the channel lock as needed. | 1135 | * the channel lock as needed. |
1142 | */ | 1136 | */ |
1143 | up(®istration->sema); | 1137 | mutex_unlock(®istration->mutex); |
1144 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, | 1138 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, |
1145 | &irq_flags); | 1139 | &irq_flags); |
1146 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1140 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1155 | atomic_inc(&xpc_partitions[ch->partid].nchannels_active); | 1149 | atomic_inc(&xpc_partitions[ch->partid].nchannels_active); |
1156 | } | 1150 | } |
1157 | 1151 | ||
1158 | up(®istration->sema); | 1152 | mutex_unlock(®istration->mutex); |
1159 | 1153 | ||
1160 | 1154 | ||
1161 | /* initiate the connection */ | 1155 | /* initiate the connection */ |
@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2089 | enum xpc_retval ret; | 2083 | enum xpc_retval ret; |
2090 | 2084 | ||
2091 | 2085 | ||
2092 | if (down_interruptible(&ch->msg_to_pull_sema) != 0) { | 2086 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { |
2093 | /* we were interrupted by a signal */ | 2087 | /* we were interrupted by a signal */ |
2094 | return NULL; | 2088 | return NULL; |
2095 | } | 2089 | } |
@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2125 | 2119 | ||
2126 | XPC_DEACTIVATE_PARTITION(part, ret); | 2120 | XPC_DEACTIVATE_PARTITION(part, ret); |
2127 | 2121 | ||
2128 | up(&ch->msg_to_pull_sema); | 2122 | mutex_unlock(&ch->msg_to_pull_mutex); |
2129 | return NULL; | 2123 | return NULL; |
2130 | } | 2124 | } |
2131 | 2125 | ||
@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2134 | ch->next_msg_to_pull += nmsgs; | 2128 | ch->next_msg_to_pull += nmsgs; |
2135 | } | 2129 | } |
2136 | 2130 | ||
2137 | up(&ch->msg_to_pull_sema); | 2131 | mutex_unlock(&ch->msg_to_pull_mutex); |
2138 | 2132 | ||
2139 | /* return the message we were looking for */ | 2133 | /* return the message we were looking for */ |
2140 | msg_offset = (get % ch->remote_nentries) * ch->msg_size; | 2134 | msg_offset = (get % ch->remote_nentries) * ch->msg_size; |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 8930586e0eb4..c75f8aeefc2b 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/slab.h> | 55 | #include <linux/slab.h> |
56 | #include <linux/delay.h> | 56 | #include <linux/delay.h> |
57 | #include <linux/reboot.h> | 57 | #include <linux/reboot.h> |
58 | #include <linux/completion.h> | ||
58 | #include <asm/sn/intr.h> | 59 | #include <asm/sn/intr.h> |
59 | #include <asm/sn/sn_sal.h> | 60 | #include <asm/sn/sn_sal.h> |
60 | #include <asm/kdebug.h> | 61 | #include <asm/kdebug.h> |
@@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); | |||
177 | static unsigned long xpc_hb_check_timeout; | 178 | static unsigned long xpc_hb_check_timeout; |
178 | 179 | ||
179 | /* notification that the xpc_hb_checker thread has exited */ | 180 | /* notification that the xpc_hb_checker thread has exited */ |
180 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); | 181 | static DECLARE_COMPLETION(xpc_hb_checker_exited); |
181 | 182 | ||
182 | /* notification that the xpc_discovery thread has exited */ | 183 | /* notification that the xpc_discovery thread has exited */ |
183 | static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); | 184 | static DECLARE_COMPLETION(xpc_discovery_exited); |
184 | 185 | ||
185 | 186 | ||
186 | static struct timer_list xpc_hb_timer; | 187 | static struct timer_list xpc_hb_timer; |
@@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore) | |||
321 | 322 | ||
322 | 323 | ||
323 | /* mark this thread as having exited */ | 324 | /* mark this thread as having exited */ |
324 | up(&xpc_hb_checker_exited); | 325 | complete(&xpc_hb_checker_exited); |
325 | return 0; | 326 | return 0; |
326 | } | 327 | } |
327 | 328 | ||
@@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore) | |||
341 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | 342 | dev_dbg(xpc_part, "discovery thread is exiting\n"); |
342 | 343 | ||
343 | /* mark this thread as having exited */ | 344 | /* mark this thread as having exited */ |
344 | up(&xpc_discovery_exited); | 345 | complete(&xpc_discovery_exited); |
345 | return 0; | 346 | return 0; |
346 | } | 347 | } |
347 | 348 | ||
@@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number) | |||
893 | continue; | 894 | continue; |
894 | } | 895 | } |
895 | 896 | ||
896 | (void) down(&ch->wdisconnect_sema); | 897 | wait_for_completion(&ch->wdisconnect_wait); |
897 | 898 | ||
898 | spin_lock_irqsave(&ch->lock, irq_flags); | 899 | spin_lock_irqsave(&ch->lock, irq_flags); |
899 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); | 900 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); |
@@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason) | |||
946 | free_irq(SGI_XPC_ACTIVATE, NULL); | 947 | free_irq(SGI_XPC_ACTIVATE, NULL); |
947 | 948 | ||
948 | /* wait for the discovery thread to exit */ | 949 | /* wait for the discovery thread to exit */ |
949 | down(&xpc_discovery_exited); | 950 | wait_for_completion(&xpc_discovery_exited); |
950 | 951 | ||
951 | /* wait for the heartbeat checker thread to exit */ | 952 | /* wait for the heartbeat checker thread to exit */ |
952 | down(&xpc_hb_checker_exited); | 953 | wait_for_completion(&xpc_hb_checker_exited); |
953 | 954 | ||
954 | 955 | ||
955 | /* sleep for a 1/3 of a second or so */ | 956 | /* sleep for a 1/3 of a second or so */ |
@@ -1367,7 +1368,7 @@ xpc_init(void) | |||
1367 | dev_err(xpc_part, "failed while forking discovery thread\n"); | 1368 | dev_err(xpc_part, "failed while forking discovery thread\n"); |
1368 | 1369 | ||
1369 | /* mark this new thread as a non-starter */ | 1370 | /* mark this new thread as a non-starter */ |
1370 | up(&xpc_discovery_exited); | 1371 | complete(&xpc_discovery_exited); |
1371 | 1372 | ||
1372 | xpc_do_exit(xpcUnloading); | 1373 | xpc_do_exit(xpcUnloading); |
1373 | return -EBUSY; | 1374 | return -EBUSY; |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 77a1262751d3..2fac27049bf6 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c | |||
@@ -24,13 +24,15 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) | |||
24 | { | 24 | { |
25 | struct ia64_sal_retval ret_stuff; | 25 | struct ia64_sal_retval ret_stuff; |
26 | u64 busnum; | 26 | u64 busnum; |
27 | u64 segment; | ||
27 | 28 | ||
28 | ret_stuff.status = 0; | 29 | ret_stuff.status = 0; |
29 | ret_stuff.v0 = 0; | 30 | ret_stuff.v0 = 0; |
30 | 31 | ||
32 | segment = soft->pbi_buscommon.bs_persist_segment; | ||
31 | busnum = soft->pbi_buscommon.bs_persist_busnum; | 33 | busnum = soft->pbi_buscommon.bs_persist_busnum; |
32 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, | 34 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, |
33 | (u64) device, (u64) resp, 0, 0, 0, 0); | 35 | busnum, (u64) device, (u64) resp, 0, 0, 0); |
34 | 36 | ||
35 | return (int)ret_stuff.v0; | 37 | return (int)ret_stuff.v0; |
36 | } | 38 | } |
@@ -41,14 +43,16 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, | |||
41 | { | 43 | { |
42 | struct ia64_sal_retval ret_stuff; | 44 | struct ia64_sal_retval ret_stuff; |
43 | u64 busnum; | 45 | u64 busnum; |
46 | u64 segment; | ||
44 | 47 | ||
45 | ret_stuff.status = 0; | 48 | ret_stuff.status = 0; |
46 | ret_stuff.v0 = 0; | 49 | ret_stuff.v0 = 0; |
47 | 50 | ||
51 | segment = soft->pbi_buscommon.bs_persist_segment; | ||
48 | busnum = soft->pbi_buscommon.bs_persist_busnum; | 52 | busnum = soft->pbi_buscommon.bs_persist_busnum; |
49 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, | 53 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, |
50 | (u64) busnum, (u64) device, (u64) action, | 54 | segment, busnum, (u64) device, (u64) action, |
51 | (u64) resp, 0, 0, 0); | 55 | (u64) resp, 0, 0); |
52 | 56 | ||
53 | return (int)ret_stuff.v0; | 57 | return (int)ret_stuff.v0; |
54 | } | 58 | } |