aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-02-06 18:46:39 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-06 18:46:39 -0500
commitc03296a868ae7c91aa2d8b372184763b18f16d7a (patch)
tree57ff0b44a412b78fe598a49cc28b3cebf51f1154
parente3f749c4af69c4344d89f11e2293e3790eb4eaca (diff)
parent913e4a75572354995b330f57082d9a86250cd75f (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/kernel/sal.c75
-rw-r--r--arch/ia64/sn/Makefile2
-rw-r--r--arch/ia64/sn/kernel/Makefile2
-rw-r--r--arch/ia64/sn/kernel/bte.c17
-rw-r--r--arch/ia64/sn/kernel/io_init.c9
-rw-r--r--arch/ia64/sn/kernel/irq.c21
-rw-r--r--arch/ia64/sn/kernel/klconflib.c29
-rw-r--r--arch/ia64/sn/kernel/setup.c19
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c196
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c19
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile2
-rw-r--r--drivers/Makefile2
-rw-r--r--include/asm-ia64/processor.h17
-rw-r--r--include/asm-ia64/sal.h10
-rw-r--r--include/asm-ia64/sn/bte.h23
-rw-r--r--include/asm-ia64/sn/intr.h38
-rw-r--r--include/asm-ia64/system.h25
20 files changed, 278 insertions, 236 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 199eeaf0f4e3..845cd0902a50 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -194,7 +194,6 @@ config IA64_L1_CACHE_SHIFT
194 default "7" if MCKINLEY 194 default "7" if MCKINLEY
195 default "6" if ITANIUM 195 default "6" if ITANIUM
196 196
197# align cache-sensitive data to 64 bytes
198config IA64_CYCLONE 197config IA64_CYCLONE
199 bool "Cyclone (EXA) Time Source support" 198 bool "Cyclone (EXA) Time Source support"
200 help 199 help
@@ -374,6 +373,9 @@ config IA64_PALINFO
374 To use this option, you have to ensure that the "/proc file system 373 To use this option, you have to ensure that the "/proc file system
375 support" (CONFIG_PROC_FS) is enabled, too. 374 support" (CONFIG_PROC_FS) is enabled, too.
376 375
376config SGI_SN
377 def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
378
377source "drivers/firmware/Kconfig" 379source "drivers/firmware/Kconfig"
378 380
379source "fs/Kconfig.binfmt" 381source "fs/Kconfig.binfmt"
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index acc0f132f86c..056f7a6eedc7 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -14,6 +14,7 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/string.h> 15#include <linux/string.h>
16 16
17#include <asm/delay.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#include <asm/sal.h> 19#include <asm/sal.h>
19#include <asm/pal.h> 20#include <asm/pal.h>
@@ -214,6 +215,78 @@ chk_nointroute_opt(void)
214static void __init sal_desc_ap_wakeup(void *p) { } 215static void __init sal_desc_ap_wakeup(void *p) { }
215#endif 216#endif
216 217
218/*
219 * HP rx5670 firmware polls for interrupts during SAL_CACHE_FLUSH by reading
220 * cr.ivr, but it never writes cr.eoi. This leaves any interrupt marked as
221 * "in-service" and masks other interrupts of equal or lower priority.
222 *
223 * HP internal defect reports: F1859, F2775, F3031.
224 */
225static int sal_cache_flush_drops_interrupts;
226
227static void __init
228check_sal_cache_flush (void)
229{
230 unsigned long flags, itv;
231 int cpu;
232 u64 vector;
233
234 cpu = get_cpu();
235 local_irq_save(flags);
236
237 /*
238 * Schedule a timer interrupt, wait until it's reported, and see if
239 * SAL_CACHE_FLUSH drops it.
240 */
241 itv = ia64_get_itv();
242 BUG_ON((itv & (1 << 16)) == 0);
243
244 ia64_set_itv(IA64_TIMER_VECTOR);
245 ia64_set_itm(ia64_get_itc() + 1000);
246
247 while (!ia64_get_irr(IA64_TIMER_VECTOR))
248 cpu_relax();
249
250 ia64_sal_cache_flush(3);
251
252 if (ia64_get_irr(IA64_TIMER_VECTOR)) {
253 vector = ia64_get_ivr();
254 ia64_eoi();
255 WARN_ON(vector != IA64_TIMER_VECTOR);
256 } else {
257 sal_cache_flush_drops_interrupts = 1;
258 printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; "
259 "PAL_CACHE_FLUSH will be used instead\n");
260 ia64_eoi();
261 }
262
263 ia64_set_itv(itv);
264 local_irq_restore(flags);
265 put_cpu();
266}
267
268s64
269ia64_sal_cache_flush (u64 cache_type)
270{
271 struct ia64_sal_retval isrv;
272
273 if (sal_cache_flush_drops_interrupts) {
274 unsigned long flags;
275 u64 progress;
276 s64 rc;
277
278 progress = 0;
279 local_irq_save(flags);
280 rc = ia64_pal_cache_flush(cache_type,
281 PAL_CACHE_FLUSH_INVALIDATE, &progress, NULL);
282 local_irq_restore(flags);
283 return rc;
284 }
285
286 SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
287 return isrv.status;
288}
289
217void __init 290void __init
218ia64_sal_init (struct ia64_sal_systab *systab) 291ia64_sal_init (struct ia64_sal_systab *systab)
219{ 292{
@@ -262,6 +335,8 @@ ia64_sal_init (struct ia64_sal_systab *systab)
262 } 335 }
263 p += SAL_DESC_SIZE(*p); 336 p += SAL_DESC_SIZE(*p);
264 } 337 }
338
339 check_sal_cache_flush();
265} 340}
266 341
267int 342int
diff --git a/arch/ia64/sn/Makefile b/arch/ia64/sn/Makefile
index a269f6d84c29..79a7df02e812 100644
--- a/arch/ia64/sn/Makefile
+++ b/arch/ia64/sn/Makefile
@@ -9,6 +9,4 @@
9# Makefile for the sn ia64 subplatform 9# Makefile for the sn ia64 subplatform
10# 10#
11 11
12CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
13
14obj-y += kernel/ pci/ 12obj-y += kernel/ pci/
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 4351c4ff9845..3e9b4eea7418 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -7,6 +7,8 @@
7# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved. 7# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
8# 8#
9 9
10CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
11
10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 12obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
11 huberror.o io_init.o iomv.o klconflib.o sn2/ 13 huberror.o io_init.o iomv.o klconflib.o sn2/
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 14obj-$(CONFIG_IA64_GENERIC) += machvec.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index dd73c0cb754b..1f11db470d90 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/config.h> 9#include <linux/config.h>
@@ -186,18 +186,13 @@ retry_bteop:
186 186
187 /* Initialize the notification to a known value. */ 187 /* Initialize the notification to a known value. */
188 *bte->most_rcnt_na = BTE_WORD_BUSY; 188 *bte->most_rcnt_na = BTE_WORD_BUSY;
189 notif_phys_addr = TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)); 189 notif_phys_addr = (u64)bte->most_rcnt_na;
190 190
191 if (is_shub2()) {
192 src = SH2_TIO_PHYS_TO_DMA(src);
193 dest = SH2_TIO_PHYS_TO_DMA(dest);
194 notif_phys_addr = SH2_TIO_PHYS_TO_DMA(notif_phys_addr);
195 }
196 /* Set the source and destination registers */ 191 /* Set the source and destination registers */
197 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); 192 BTE_PRINTKV(("IBSA = 0x%lx)\n", src));
198 BTE_SRC_STORE(bte, TO_PHYS(src)); 193 BTE_SRC_STORE(bte, src);
199 BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest)))); 194 BTE_PRINTKV(("IBDA = 0x%lx)\n", dest));
200 BTE_DEST_STORE(bte, TO_PHYS(dest)); 195 BTE_DEST_STORE(bte, dest);
201 196
202 /* Set the notification register */ 197 /* Set the notification register */
203 BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr)); 198 BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr));
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index a4c78152b336..d7e4d79e16a8 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -208,7 +208,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
208 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for 208 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
209 * each node in the system. 209 * each node in the system.
210 */ 210 */
211static void sn_fixup_ionodes(void) 211static void __init sn_fixup_ionodes(void)
212{ 212{
213 struct sn_flush_device_kernel *sn_flush_device_kernel; 213 struct sn_flush_device_kernel *sn_flush_device_kernel;
214 struct sn_flush_device_kernel *dev_entry; 214 struct sn_flush_device_kernel *dev_entry;
@@ -467,6 +467,13 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
467 pcidev_info->pdi_sn_irq_info = NULL; 467 pcidev_info->pdi_sn_irq_info = NULL;
468 kfree(sn_irq_info); 468 kfree(sn_irq_info);
469 } 469 }
470
471 /*
472 * MSI currently not supported on altix. Remove this when
473 * the MSI abstraction patches are integrated into the kernel
474 * (sometime after 2.6.16 releases)
475 */
476 dev->no_msi = 1;
470} 477}
471 478
472/* 479/*
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index ec37084bdc17..74d87d903d5d 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -5,11 +5,12 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. 8 * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
9 */ 9 */
10 10
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/init.h>
13#include <asm/sn/addrs.h> 14#include <asm/sn/addrs.h>
14#include <asm/sn/arch.h> 15#include <asm/sn/arch.h>
15#include <asm/sn/intr.h> 16#include <asm/sn/intr.h>
@@ -76,17 +77,15 @@ static void sn_enable_irq(unsigned int irq)
76 77
77static void sn_ack_irq(unsigned int irq) 78static void sn_ack_irq(unsigned int irq)
78{ 79{
79 u64 event_occurred, mask = 0; 80 u64 event_occurred, mask;
80 81
81 irq = irq & 0xff; 82 irq = irq & 0xff;
82 event_occurred = 83 event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
83 HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
84 mask = event_occurred & SH_ALL_INT_MASK; 84 mask = event_occurred & SH_ALL_INT_MASK;
85 HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), 85 HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
86 mask);
87 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); 86 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
88 87
89 move_irq(irq); 88 move_native_irq(irq);
90} 89}
91 90
92static void sn_end_irq(unsigned int irq) 91static void sn_end_irq(unsigned int irq)
@@ -219,9 +218,8 @@ static void register_intr_pda(struct sn_irq_info *sn_irq_info)
219 pdacpu(cpu)->sn_last_irq = irq; 218 pdacpu(cpu)->sn_last_irq = irq;
220 } 219 }
221 220
222 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) { 221 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
223 pdacpu(cpu)->sn_first_irq = irq; 222 pdacpu(cpu)->sn_first_irq = irq;
224 }
225} 223}
226 224
227static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) 225static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
@@ -289,7 +287,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
289 list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); 287 list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
290 spin_unlock(&sn_irq_info_lock); 288 spin_unlock(&sn_irq_info_lock);
291 289
292 (void)register_intr_pda(sn_irq_info); 290 register_intr_pda(sn_irq_info);
293} 291}
294 292
295void sn_irq_unfixup(struct pci_dev *pci_dev) 293void sn_irq_unfixup(struct pci_dev *pci_dev)
@@ -419,7 +417,7 @@ void sn_lb_int_war_check(void)
419 rcu_read_unlock(); 417 rcu_read_unlock();
420} 418}
421 419
422void sn_irq_lh_init(void) 420void __init sn_irq_lh_init(void)
423{ 421{
424 int i; 422 int i;
425 423
@@ -434,5 +432,4 @@ void sn_irq_lh_init(void)
434 432
435 INIT_LIST_HEAD(sn_irq_lh[i]); 433 INIT_LIST_HEAD(sn_irq_lh[i]);
436 } 434 }
437
438} 435}
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c
index 0f11a3299cd2..87682b48ef83 100644
--- a/arch/ia64/sn/kernel/klconflib.c
+++ b/arch/ia64/sn/kernel/klconflib.c
@@ -78,31 +78,30 @@ format_module_id(char *buffer, moduleid_t m, int fmt)
78 position = MODULE_GET_BPOS(m); 78 position = MODULE_GET_BPOS(m);
79 79
80 if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) { 80 if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
81 /* Brief module number format, eg. 002c15 */ 81 /* Brief module number format, eg. 002c15 */
82 82
83 /* Decompress the rack number */ 83 /* Decompress the rack number */
84 *buffer++ = '0' + RACK_GET_CLASS(rack); 84 *buffer++ = '0' + RACK_GET_CLASS(rack);
85 *buffer++ = '0' + RACK_GET_GROUP(rack); 85 *buffer++ = '0' + RACK_GET_GROUP(rack);
86 *buffer++ = '0' + RACK_GET_NUM(rack); 86 *buffer++ = '0' + RACK_GET_NUM(rack);
87 87
88 /* Add the brick type */ 88 /* Add the brick type */
89 *buffer++ = brickchar; 89 *buffer++ = brickchar;
90 } 90 }
91 else if (fmt == MODULE_FORMAT_LONG) { 91 else if (fmt == MODULE_FORMAT_LONG) {
92 /* Fuller hwgraph format, eg. rack/002/bay/15 */ 92 /* Fuller hwgraph format, eg. rack/002/bay/15 */
93 93
94 strcpy(buffer, "rack" "/"); buffer += strlen(buffer); 94 strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
95 95
96 *buffer++ = '0' + RACK_GET_CLASS(rack); 96 *buffer++ = '0' + RACK_GET_CLASS(rack);
97 *buffer++ = '0' + RACK_GET_GROUP(rack); 97 *buffer++ = '0' + RACK_GET_GROUP(rack);
98 *buffer++ = '0' + RACK_GET_NUM(rack); 98 *buffer++ = '0' + RACK_GET_NUM(rack);
99 99
100 strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer); 100 strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
101 } 101 }
102 102
103 /* Add the bay position, using at least two digits */ 103 /* Add the bay position, using at least two digits */
104 if (position < 10) 104 if (position < 10)
105 *buffer++ = '0'; 105 *buffer++ = '0';
106 sprintf(buffer, "%d", position); 106 sprintf(buffer, "%d", position);
107
108} 107}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e510dce9971f..ee36bff93c30 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -209,7 +209,7 @@ void __init early_sn_setup(void)
209} 209}
210 210
211extern int platform_intr_list[]; 211extern int platform_intr_list[];
212static int __initdata shub_1_1_found = 0; 212static int __initdata shub_1_1_found;
213 213
214/* 214/*
215 * sn_check_for_wars 215 * sn_check_for_wars
@@ -578,13 +578,17 @@ void __init sn_cpu_init(void)
578 sn_prom_type = 2; 578 sn_prom_type = 2;
579 else 579 else
580 sn_prom_type = 1; 580 sn_prom_type = 1;
581 printk("Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake"); 581 printk(KERN_INFO "Running on medusa with %s PROM\n",
582 (sn_prom_type == 1) ? "real" : "fake");
582 } 583 }
583 584
584 memset(pda, 0, sizeof(pda)); 585 memset(pda, 0, sizeof(pda));
585 if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, 586 if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
586 &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, 587 &sn_hub_info->nasid_bitmask,
587 &sn_coherency_id, &sn_region_size)) 588 &sn_hub_info->nasid_shift,
589 &sn_system_size, &sn_sharing_domain_size,
590 &sn_partition_id, &sn_coherency_id,
591 &sn_region_size))
588 BUG(); 592 BUG();
589 sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; 593 sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
590 594
@@ -716,7 +720,8 @@ void __init build_cnode_tables(void)
716 for_each_online_node(node) { 720 for_each_online_node(node) {
717 kl_config_hdr_t *klgraph_header; 721 kl_config_hdr_t *klgraph_header;
718 nasid = cnodeid_to_nasid(node); 722 nasid = cnodeid_to_nasid(node);
719 if ((klgraph_header = ia64_sn_get_klconfig_addr(nasid)) == NULL) 723 klgraph_header = ia64_sn_get_klconfig_addr(nasid);
724 if (klgraph_header == NULL)
720 BUG(); 725 BUG();
721 brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); 726 brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
722 while (brd) { 727 while (brd) {
@@ -734,7 +739,7 @@ nasid_slice_to_cpuid(int nasid, int slice)
734{ 739{
735 long cpu; 740 long cpu;
736 741
737 for (cpu=0; cpu < NR_CPUS; cpu++) 742 for (cpu = 0; cpu < NR_CPUS; cpu++)
738 if (cpuid_to_nasid(cpu) == nasid && 743 if (cpuid_to_nasid(cpu) == nasid &&
739 cpuid_to_slice(cpu) == slice) 744 cpuid_to_slice(cpu) == slice)
740 return cpu; 745 return cpu;
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
index 170bde4549da..99e177693234 100644
--- a/arch/ia64/sn/kernel/sn2/Makefile
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -9,5 +9,7 @@
9# sn2 specific kernel files 9# sn2 specific kernel files
10# 10#
11 11
12CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
13
12obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \ 14obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
13 prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o 15 prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 471bbaa65d1b..f153a4c35c70 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -5,7 +5,7 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
@@ -46,104 +46,28 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
46 46
47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); 47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
48 48
49void sn2_ptc_deadlock_recovery(short *, short, int, volatile unsigned long *, unsigned long data0, 49void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long,
50 volatile unsigned long *, unsigned long data1); 50 volatile unsigned long *, unsigned long);
51 51
52#ifdef DEBUG_PTC
53/* 52/*
54 * ptctest: 53 * Note: some is the following is captured here to make degugging easier
55 * 54 * (the macros make more sense if you see the debug patch - not posted)
56 * xyz - 3 digit hex number:
57 * x - Force PTC purges to use shub:
58 * 0 - no force
59 * 1 - force
60 * y - interupt enable
61 * 0 - disable interrupts
62 * 1 - leave interuupts enabled
63 * z - type of lock:
64 * 0 - global lock
65 * 1 - node local lock
66 * 2 - no lock
67 *
68 * Note: on shub1, only ptctest == 0 is supported. Don't try other values!
69 */ 55 */
70
71static unsigned int sn2_ptctest = 0;
72
73static int __init ptc_test(char *str)
74{
75 get_option(&str, &sn2_ptctest);
76 return 1;
77}
78__setup("ptctest=", ptc_test);
79
80static inline int ptc_lock(unsigned long *flagp)
81{
82 unsigned long opt = sn2_ptctest & 255;
83
84 switch (opt) {
85 case 0x00:
86 spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
87 break;
88 case 0x01:
89 spin_lock_irqsave(&sn_nodepda->ptc_lock, *flagp);
90 break;
91 case 0x02:
92 local_irq_save(*flagp);
93 break;
94 case 0x10:
95 spin_lock(&sn2_global_ptc_lock);
96 break;
97 case 0x11:
98 spin_lock(&sn_nodepda->ptc_lock);
99 break;
100 case 0x12:
101 break;
102 default:
103 BUG();
104 }
105 return opt;
106}
107
108static inline void ptc_unlock(unsigned long flags, int opt)
109{
110 switch (opt) {
111 case 0x00:
112 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
113 break;
114 case 0x01:
115 spin_unlock_irqrestore(&sn_nodepda->ptc_lock, flags);
116 break;
117 case 0x02:
118 local_irq_restore(flags);
119 break;
120 case 0x10:
121 spin_unlock(&sn2_global_ptc_lock);
122 break;
123 case 0x11:
124 spin_unlock(&sn_nodepda->ptc_lock);
125 break;
126 case 0x12:
127 break;
128 default:
129 BUG();
130 }
131}
132#else
133
134#define sn2_ptctest 0 56#define sn2_ptctest 0
57#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
58#define max_active_pio(sh1) ((sh1) ? 32 : 7)
59#define reset_max_active_on_deadlock() 1
60#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
135 61
136static inline int ptc_lock(unsigned long *flagp) 62static inline void ptc_lock(int sh1, unsigned long *flagp)
137{ 63{
138 spin_lock_irqsave(&sn2_global_ptc_lock, *flagp); 64 spin_lock_irqsave(PTC_LOCK(sh1), *flagp);
139 return 0;
140} 65}
141 66
142static inline void ptc_unlock(unsigned long flags, int opt) 67static inline void ptc_unlock(int sh1, unsigned long flags)
143{ 68{
144 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); 69 spin_unlock_irqrestore(PTC_LOCK(sh1), flags);
145} 70}
146#endif
147 71
148struct ptc_stats { 72struct ptc_stats {
149 unsigned long ptc_l; 73 unsigned long ptc_l;
@@ -151,27 +75,30 @@ struct ptc_stats {
151 unsigned long shub_ptc_flushes; 75 unsigned long shub_ptc_flushes;
152 unsigned long nodes_flushed; 76 unsigned long nodes_flushed;
153 unsigned long deadlocks; 77 unsigned long deadlocks;
78 unsigned long deadlocks2;
154 unsigned long lock_itc_clocks; 79 unsigned long lock_itc_clocks;
155 unsigned long shub_itc_clocks; 80 unsigned long shub_itc_clocks;
156 unsigned long shub_itc_clocks_max; 81 unsigned long shub_itc_clocks_max;
82 unsigned long shub_ptc_flushes_not_my_mm;
157}; 83};
158 84
159static inline unsigned long wait_piowc(void) 85static inline unsigned long wait_piowc(void)
160{ 86{
161 volatile unsigned long *piows, zeroval; 87 volatile unsigned long *piows;
162 unsigned long ws; 88 unsigned long zeroval, ws;
163 89
164 piows = pda->pio_write_status_addr; 90 piows = pda->pio_write_status_addr;
165 zeroval = pda->pio_write_status_val; 91 zeroval = pda->pio_write_status_val;
166 do { 92 do {
167 cpu_relax(); 93 cpu_relax();
168 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval); 94 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
169 return ws; 95 return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
170} 96}
171 97
172void sn_tlb_migrate_finish(struct mm_struct *mm) 98void sn_tlb_migrate_finish(struct mm_struct *mm)
173{ 99{
174 if (mm == current->mm) 100 /* flush_tlb_mm is inefficient if more than 1 users of mm */
101 if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
175 flush_tlb_mm(mm); 102 flush_tlb_mm(mm);
176} 103}
177 104
@@ -201,12 +128,14 @@ void
201sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, 128sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
202 unsigned long end, unsigned long nbits) 129 unsigned long end, unsigned long nbits)
203{ 130{
204 int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; 131 int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
205 int mymm = (mm == current->active_mm && current->mm); 132 int mymm = (mm == current->active_mm && mm == current->mm);
133 int use_cpu_ptcga;
206 volatile unsigned long *ptc0, *ptc1; 134 volatile unsigned long *ptc0, *ptc1;
207 unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value; 135 unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
208 short nasids[MAX_NUMNODES], nix; 136 short nasids[MAX_NUMNODES], nix;
209 nodemask_t nodes_flushed; 137 nodemask_t nodes_flushed;
138 int active, max_active, deadlock;
210 139
211 nodes_clear(nodes_flushed); 140 nodes_clear(nodes_flushed);
212 i = 0; 141 i = 0;
@@ -267,41 +196,56 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
267 196
268 197
269 mynasid = get_nasid(); 198 mynasid = get_nasid();
199 use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
200 max_active = max_active_pio(shub1);
270 201
271 itc = ia64_get_itc(); 202 itc = ia64_get_itc();
272 opt = ptc_lock(&flags); 203 ptc_lock(shub1, &flags);
273 itc2 = ia64_get_itc(); 204 itc2 = ia64_get_itc();
205
274 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; 206 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
275 __get_cpu_var(ptcstats).shub_ptc_flushes++; 207 __get_cpu_var(ptcstats).shub_ptc_flushes++;
276 __get_cpu_var(ptcstats).nodes_flushed += nix; 208 __get_cpu_var(ptcstats).nodes_flushed += nix;
209 if (!mymm)
210 __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
277 211
212 if (use_cpu_ptcga && !mymm) {
213 old_rr = ia64_get_rr(start);
214 ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
215 ia64_srlz_d();
216 }
217
218 wait_piowc();
278 do { 219 do {
279 if (shub1) 220 if (shub1)
280 data1 = start | (1UL << SH1_PTC_1_START_SHFT); 221 data1 = start | (1UL << SH1_PTC_1_START_SHFT);
281 else 222 else
282 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); 223 data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
283 for (i = 0; i < nix; i++) { 224 deadlock = 0;
225 active = 0;
226 for (ibegin = 0, i = 0; i < nix; i++) {
284 nasid = nasids[i]; 227 nasid = nasids[i];
285 if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid && mymm)) { 228 if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
286 ia64_ptcga(start, nbits << 2); 229 ia64_ptcga(start, nbits << 2);
287 ia64_srlz_i(); 230 ia64_srlz_i();
288 } else { 231 } else {
289 ptc0 = CHANGE_NASID(nasid, ptc0); 232 ptc0 = CHANGE_NASID(nasid, ptc0);
290 if (ptc1) 233 if (ptc1)
291 ptc1 = CHANGE_NASID(nasid, ptc1); 234 ptc1 = CHANGE_NASID(nasid, ptc1);
292 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, 235 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
293 data1); 236 active++;
294 flushed = 1; 237 }
238 if (active >= max_active || i == (nix - 1)) {
239 if ((deadlock = wait_piowc())) {
240 sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
241 if (reset_max_active_on_deadlock())
242 max_active = 1;
243 }
244 active = 0;
245 ibegin = i + 1;
295 } 246 }
296 } 247 }
297 if (flushed
298 && (wait_piowc() &
299 (SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK))) {
300 sn2_ptc_deadlock_recovery(nasids, nix, mynasid, ptc0, data0, ptc1, data1);
301 }
302
303 start += (1UL << nbits); 248 start += (1UL << nbits);
304
305 } while (start < end); 249 } while (start < end);
306 250
307 itc2 = ia64_get_itc() - itc2; 251 itc2 = ia64_get_itc() - itc2;
@@ -309,7 +253,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
309 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) 253 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
310 __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; 254 __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
311 255
312 ptc_unlock(flags, opt); 256 if (old_rr) {
257 ia64_set_rr(start, old_rr);
258 ia64_srlz_d();
259 }
260
261 ptc_unlock(shub1, flags);
313 262
314 preempt_enable(); 263 preempt_enable();
315} 264}
@@ -321,27 +270,30 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
321 * TLB flush transaction. The recovery sequence is somewhat tricky & is 270 * TLB flush transaction. The recovery sequence is somewhat tricky & is
322 * coded in assembly language. 271 * coded in assembly language.
323 */ 272 */
324void sn2_ptc_deadlock_recovery(short *nasids, short nix, int mynasid, volatile unsigned long *ptc0, unsigned long data0, 273void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0,
325 volatile unsigned long *ptc1, unsigned long data1) 274 volatile unsigned long *ptc1, unsigned long data1)
326{ 275{
327 extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, 276 extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
328 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); 277 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
329 short nasid, i; 278 short nasid, i;
330 unsigned long *piows, zeroval; 279 unsigned long *piows, zeroval, n;
331 280
332 __get_cpu_var(ptcstats).deadlocks++; 281 __get_cpu_var(ptcstats).deadlocks++;
333 282
334 piows = (unsigned long *) pda->pio_write_status_addr; 283 piows = (unsigned long *) pda->pio_write_status_addr;
335 zeroval = pda->pio_write_status_val; 284 zeroval = pda->pio_write_status_val;
336 285
337 for (i=0; i < nix; i++) { 286
287 for (i=ib; i <= ie; i++) {
338 nasid = nasids[i]; 288 nasid = nasids[i];
339 if (!(sn2_ptctest & 3) && nasid == mynasid) 289 if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
340 continue; 290 continue;
341 ptc0 = CHANGE_NASID(nasid, ptc0); 291 ptc0 = CHANGE_NASID(nasid, ptc0);
342 if (ptc1) 292 if (ptc1)
343 ptc1 = CHANGE_NASID(nasid, ptc1); 293 ptc1 = CHANGE_NASID(nasid, ptc1);
344 sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); 294
295 n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
296 __get_cpu_var(ptcstats).deadlocks2 += n;
345 } 297 }
346 298
347} 299}
@@ -452,20 +404,22 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
452 cpu = *(loff_t *) data; 404 cpu = *(loff_t *) data;
453 405
454 if (!cpu) { 406 if (!cpu) {
455 seq_printf(file, "# ptc_l change_rid shub_ptc_flushes shub_nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max\n"); 407 seq_printf(file,
408 "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
456 seq_printf(file, "# ptctest %d\n", sn2_ptctest); 409 seq_printf(file, "# ptctest %d\n", sn2_ptctest);
457 } 410 }
458 411
459 if (cpu < NR_CPUS && cpu_online(cpu)) { 412 if (cpu < NR_CPUS && cpu_online(cpu)) {
460 stat = &per_cpu(ptcstats, cpu); 413 stat = &per_cpu(ptcstats, cpu);
461 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 414 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
462 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 415 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
463 stat->deadlocks, 416 stat->deadlocks,
464 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 417 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
465 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 418 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
466 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec); 419 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
420 stat->shub_ptc_flushes_not_my_mm,
421 stat->deadlocks2);
467 } 422 }
468
469 return 0; 423 return 0;
470} 424}
471 425
@@ -476,7 +430,7 @@ static struct seq_operations sn2_ptc_seq_ops = {
476 .show = sn2_ptc_seq_show 430 .show = sn2_ptc_seq_show
477}; 431};
478 432
479int sn2_ptc_proc_open(struct inode *inode, struct file *file) 433static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
480{ 434{
481 return seq_open(file, &sn2_ptc_seq_ops); 435 return seq_open(file, &sn2_ptc_seq_ops);
482} 436}
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index c75f8aeefc2b..9cd460dfe27e 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -575,18 +575,21 @@ xpc_activate_partition(struct xpc_partition *part)
575 575
576 spin_lock_irqsave(&part->act_lock, irq_flags); 576 spin_lock_irqsave(&part->act_lock, irq_flags);
577 577
578 pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
579
580 DBUG_ON(part->act_state != XPC_P_INACTIVE); 578 DBUG_ON(part->act_state != XPC_P_INACTIVE);
581 579
582 if (pid > 0) { 580 part->act_state = XPC_P_ACTIVATION_REQ;
583 part->act_state = XPC_P_ACTIVATION_REQ; 581 XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
584 XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
585 } else {
586 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
587 }
588 582
589 spin_unlock_irqrestore(&part->act_lock, irq_flags); 583 spin_unlock_irqrestore(&part->act_lock, irq_flags);
584
585 pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
586
587 if (unlikely(pid <= 0)) {
588 spin_lock_irqsave(&part->act_lock, irq_flags);
589 part->act_state = XPC_P_INACTIVE;
590 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
591 spin_unlock_irqrestore(&part->act_lock, irq_flags);
592 }
590} 593}
591 594
592 595
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index 321576b1b425..c6946784a6a8 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,4 +7,6 @@
7# 7#
8# Makefile for the sn pci general routines. 8# Makefile for the sn pci general routines.
9 9
10CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
11
10obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/ 12obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
index 1850c4a94c41..3b403ea456f9 100644
--- a/arch/ia64/sn/pci/pcibr/Makefile
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -7,5 +7,7 @@
7# 7#
8# Makefile for the sn2 io routines. 8# Makefile for the sn2 io routines.
9 9
10CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
11
10obj-y += pcibr_dma.o pcibr_reg.o \ 12obj-y += pcibr_dma.o pcibr_reg.o \
11 pcibr_ate.o pcibr_provider.o 13 pcibr_ate.o pcibr_provider.o
diff --git a/drivers/Makefile b/drivers/Makefile
index 619dd964c51c..5c69b86db624 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -69,7 +69,7 @@ obj-$(CONFIG_EISA) += eisa/
69obj-$(CONFIG_CPU_FREQ) += cpufreq/ 69obj-$(CONFIG_CPU_FREQ) += cpufreq/
70obj-$(CONFIG_MMC) += mmc/ 70obj-$(CONFIG_MMC) += mmc/
71obj-$(CONFIG_INFINIBAND) += infiniband/ 71obj-$(CONFIG_INFINIBAND) += infiniband/
72obj-$(CONFIG_SGI_IOC4) += sn/ 72obj-$(CONFIG_SGI_SN) += sn/
73obj-y += firmware/ 73obj-y += firmware/
74obj-$(CONFIG_CRYPTO) += crypto/ 74obj-$(CONFIG_CRYPTO) += crypto/
75obj-$(CONFIG_SUPERH) += sh/ 75obj-$(CONFIG_SUPERH) += sh/
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 09b99029ac1a..23c8e1be1911 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -559,6 +559,23 @@ ia64_eoi (void)
559 559
560#define cpu_relax() ia64_hint(ia64_hint_pause) 560#define cpu_relax() ia64_hint(ia64_hint_pause)
561 561
562static inline int
563ia64_get_irr(unsigned int vector)
564{
565 unsigned int reg = vector / 64;
566 unsigned int bit = vector % 64;
567 u64 irr;
568
569 switch (reg) {
570 case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
571 case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
572 case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
573 case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
574 }
575
576 return test_bit(bit, &irr);
577}
578
562static inline void 579static inline void
563ia64_set_lrr0 (unsigned long val) 580ia64_set_lrr0 (unsigned long val)
564{ 581{
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index 313cad0628d0..0b210abbe003 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -658,15 +658,7 @@ ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
658 return isrv.status; 658 return isrv.status;
659} 659}
660 660
661/* Flush all the processor and platform level instruction and/or data caches */ 661extern s64 ia64_sal_cache_flush (u64 cache_type);
662static inline s64
663ia64_sal_cache_flush (u64 cache_type)
664{
665 struct ia64_sal_retval isrv;
666 SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
667 return isrv.status;
668}
669
670 662
671/* Initialize all the processor and platform level instruction and data caches */ 663/* Initialize all the processor and platform level instruction and data caches */
672static inline s64 664static inline s64
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h
index f50da3d91d07..01e5b4103235 100644
--- a/include/asm-ia64/sn/bte.h
+++ b/include/asm-ia64/sn/bte.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9 9
@@ -100,13 +100,28 @@
100#define BTE_LNSTAT_STORE(_bte, _x) \ 100#define BTE_LNSTAT_STORE(_bte, _x) \
101 HUB_S(_bte->bte_base_addr, (_x)) 101 HUB_S(_bte->bte_base_addr, (_x))
102#define BTE_SRC_STORE(_bte, _x) \ 102#define BTE_SRC_STORE(_bte, _x) \
103 HUB_S(_bte->bte_source_addr, (_x)) 103({ \
104 u64 __addr = ((_x) & ~AS_MASK); \
105 if (is_shub2()) \
106 __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
107 HUB_S(_bte->bte_source_addr, __addr); \
108})
104#define BTE_DEST_STORE(_bte, _x) \ 109#define BTE_DEST_STORE(_bte, _x) \
105 HUB_S(_bte->bte_destination_addr, (_x)) 110({ \
111 u64 __addr = ((_x) & ~AS_MASK); \
112 if (is_shub2()) \
113 __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
114 HUB_S(_bte->bte_destination_addr, __addr); \
115})
106#define BTE_CTRL_STORE(_bte, _x) \ 116#define BTE_CTRL_STORE(_bte, _x) \
107 HUB_S(_bte->bte_control_addr, (_x)) 117 HUB_S(_bte->bte_control_addr, (_x))
108#define BTE_NOTIF_STORE(_bte, _x) \ 118#define BTE_NOTIF_STORE(_bte, _x) \
109 HUB_S(_bte->bte_notify_addr, (_x)) 119({ \
120 u64 __addr = ia64_tpa((_x) & ~AS_MASK); \
121 if (is_shub2()) \
122 __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
123 HUB_S(_bte->bte_notify_addr, __addr); \
124})
110 125
111#define BTE_START_TRANSFER(_bte, _len, _mode) \ 126#define BTE_START_TRANSFER(_bte, _len, _mode) \
112 is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \ 127 is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \
diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h
index a3431372c6e7..60a51a406eec 100644
--- a/include/asm-ia64/sn/intr.h
+++ b/include/asm-ia64/sn/intr.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#ifndef _ASM_IA64_SN_INTR_H 9#ifndef _ASM_IA64_SN_INTR_H
@@ -11,26 +11,26 @@
11 11
12#include <linux/rcupdate.h> 12#include <linux/rcupdate.h>
13 13
14#define SGI_UART_VECTOR (0xe9) 14#define SGI_UART_VECTOR 0xe9
15 15
16/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */ 16/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
17#define SGI_XPC_ACTIVATE (0x30) 17#define SGI_XPC_ACTIVATE 0x30
18#define SGI_II_ERROR (0x31) 18#define SGI_II_ERROR 0x31
19#define SGI_XBOW_ERROR (0x32) 19#define SGI_XBOW_ERROR 0x32
20#define SGI_PCIASIC_ERROR (0x33) 20#define SGI_PCIASIC_ERROR 0x33
21#define SGI_ACPI_SCI_INT (0x34) 21#define SGI_ACPI_SCI_INT 0x34
22#define SGI_TIOCA_ERROR (0x35) 22#define SGI_TIOCA_ERROR 0x35
23#define SGI_TIO_ERROR (0x36) 23#define SGI_TIO_ERROR 0x36
24#define SGI_TIOCX_ERROR (0x37) 24#define SGI_TIOCX_ERROR 0x37
25#define SGI_MMTIMER_VECTOR (0x38) 25#define SGI_MMTIMER_VECTOR 0x38
26#define SGI_XPC_NOTIFY (0xe7) 26#define SGI_XPC_NOTIFY 0xe7
27 27
28#define IA64_SN2_FIRST_DEVICE_VECTOR (0x3c) 28#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c
29#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6) 29#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6
30 30
31#define SN2_IRQ_RESERVED (0x1) 31#define SN2_IRQ_RESERVED 0x1
32#define SN2_IRQ_CONNECTED (0x2) 32#define SN2_IRQ_CONNECTED 0x2
33#define SN2_IRQ_SHARED (0x4) 33#define SN2_IRQ_SHARED 0x4
34 34
35// The SN PROM irq struct 35// The SN PROM irq struct
36struct sn_irq_info { 36struct sn_irq_info {
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 80c5a234e259..062538715623 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -249,32 +249,7 @@ extern void ia64_load_extra (struct task_struct *task);
249# define switch_to(prev,next,last) __switch_to(prev, next, last) 249# define switch_to(prev,next,last) __switch_to(prev, next, last)
250#endif 250#endif
251 251
252/*
253 * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
254 * because that could cause a deadlock. Here is an example by Erich Focht:
255 *
256 * Example:
257 * CPU#0:
258 * schedule()
259 * -> spin_lock_irq(&rq->lock)
260 * -> context_switch()
261 * -> wrap_mmu_context()
262 * -> read_lock(&tasklist_lock)
263 *
264 * CPU#1:
265 * sys_wait4() or release_task() or forget_original_parent()
266 * -> write_lock(&tasklist_lock)
267 * -> do_notify_parent()
268 * -> wake_up_parent()
269 * -> try_to_wake_up()
270 * -> spin_lock_irq(&parent_rq->lock)
271 *
272 * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
273 * of that CPU which will not be released, because there we wait for the
274 * tasklist_lock to become available.
275 */
276#define __ARCH_WANT_UNLOCKED_CTXSW 252#define __ARCH_WANT_UNLOCKED_CTXSW
277
278#define ARCH_HAS_PREFETCH_SWITCH_STACK 253#define ARCH_HAS_PREFETCH_SWITCH_STACK
279#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) 254#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
280 255