diff options
Diffstat (limited to 'arch/ia64/sn')
| -rw-r--r-- | arch/ia64/sn/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/irq.c | 101 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/msi_sn.c | 32 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/setup.c | 2 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/sn2/Makefile | 2 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/sn2/sn_hwperf.c | 1 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/sn2/sn_proc_fs.c | 42 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/sn2/timer.c | 6 | ||||
| -rw-r--r-- | arch/ia64/sn/pci/Makefile | 2 | ||||
| -rw-r--r-- | arch/ia64/sn/pci/pcibr/Makefile | 2 | ||||
| -rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_dma.c | 2 | ||||
| -rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 2 |
12 files changed, 55 insertions, 141 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 0591038735af..d27df1d45da7 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | # Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved. | 7 | # Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved. |
| 8 | # | 8 | # |
| 9 | 9 | ||
| 10 | EXTRA_CFLAGS += -Iarch/ia64/sn/include | 10 | ccflags-y := -Iarch/ia64/sn/include |
| 11 | 11 | ||
| 12 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ | 12 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ |
| 13 | huberror.o io_acpi_init.o io_common.o \ | 13 | huberror.o io_acpi_init.o io_common.o \ |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 13c15d968098..81a1f4e6bcd8 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
| @@ -23,11 +23,9 @@ | |||
| 23 | #include <asm/sn/sn_sal.h> | 23 | #include <asm/sn/sn_sal.h> |
| 24 | #include <asm/sn/sn_feature_sets.h> | 24 | #include <asm/sn/sn_feature_sets.h> |
| 25 | 25 | ||
| 26 | static void force_interrupt(int irq); | ||
| 27 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); | 26 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); |
| 28 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | 27 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); |
| 29 | 28 | ||
| 30 | int sn_force_interrupt_flag = 1; | ||
| 31 | extern int sn_ioif_inited; | 29 | extern int sn_ioif_inited; |
| 32 | struct list_head **sn_irq_lh; | 30 | struct list_head **sn_irq_lh; |
| 33 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ | 31 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ |
| @@ -78,62 +76,40 @@ u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, | |||
| 78 | return ret_stuff.status; | 76 | return ret_stuff.status; |
| 79 | } | 77 | } |
| 80 | 78 | ||
| 81 | static unsigned int sn_startup_irq(unsigned int irq) | 79 | static unsigned int sn_startup_irq(struct irq_data *data) |
| 82 | { | 80 | { |
| 83 | return 0; | 81 | return 0; |
| 84 | } | 82 | } |
| 85 | 83 | ||
| 86 | static void sn_shutdown_irq(unsigned int irq) | 84 | static void sn_shutdown_irq(struct irq_data *data) |
| 87 | { | 85 | { |
| 88 | } | 86 | } |
| 89 | 87 | ||
| 90 | extern void ia64_mca_register_cpev(int); | 88 | extern void ia64_mca_register_cpev(int); |
| 91 | 89 | ||
| 92 | static void sn_disable_irq(unsigned int irq) | 90 | static void sn_disable_irq(struct irq_data *data) |
| 93 | { | 91 | { |
| 94 | if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) | 92 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
| 95 | ia64_mca_register_cpev(0); | 93 | ia64_mca_register_cpev(0); |
| 96 | } | 94 | } |
| 97 | 95 | ||
| 98 | static void sn_enable_irq(unsigned int irq) | 96 | static void sn_enable_irq(struct irq_data *data) |
| 99 | { | 97 | { |
| 100 | if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) | 98 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
| 101 | ia64_mca_register_cpev(irq); | 99 | ia64_mca_register_cpev(data->irq); |
| 102 | } | 100 | } |
| 103 | 101 | ||
| 104 | static void sn_ack_irq(unsigned int irq) | 102 | static void sn_ack_irq(struct irq_data *data) |
| 105 | { | 103 | { |
| 106 | u64 event_occurred, mask; | 104 | u64 event_occurred, mask; |
| 105 | unsigned int irq = data->irq & 0xff; | ||
| 107 | 106 | ||
| 108 | irq = irq & 0xff; | ||
| 109 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); | 107 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); |
| 110 | mask = event_occurred & SH_ALL_INT_MASK; | 108 | mask = event_occurred & SH_ALL_INT_MASK; |
| 111 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); | 109 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); |
| 112 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); | 110 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); |
| 113 | 111 | ||
| 114 | move_native_irq(irq); | 112 | irq_move_irq(data); |
| 115 | } | ||
| 116 | |||
| 117 | static void sn_end_irq(unsigned int irq) | ||
| 118 | { | ||
| 119 | int ivec; | ||
| 120 | u64 event_occurred; | ||
| 121 | |||
| 122 | ivec = irq & 0xff; | ||
| 123 | if (ivec == SGI_UART_VECTOR) { | ||
| 124 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); | ||
| 125 | /* If the UART bit is set here, we may have received an | ||
| 126 | * interrupt from the UART that the driver missed. To | ||
| 127 | * make sure, we IPI ourselves to force us to look again. | ||
| 128 | */ | ||
| 129 | if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { | ||
| 130 | platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, | ||
| 131 | IA64_IPI_DM_INT, 0); | ||
| 132 | } | ||
| 133 | } | ||
| 134 | __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); | ||
| 135 | if (sn_force_interrupt_flag) | ||
| 136 | force_interrupt(irq); | ||
| 137 | } | 113 | } |
| 138 | 114 | ||
| 139 | static void sn_irq_info_free(struct rcu_head *head); | 115 | static void sn_irq_info_free(struct rcu_head *head); |
| @@ -228,9 +204,11 @@ finish_up: | |||
| 228 | return new_irq_info; | 204 | return new_irq_info; |
| 229 | } | 205 | } |
| 230 | 206 | ||
| 231 | static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) | 207 | static int sn_set_affinity_irq(struct irq_data *data, |
| 208 | const struct cpumask *mask, bool force) | ||
| 232 | { | 209 | { |
| 233 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | 210 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
| 211 | unsigned int irq = data->irq; | ||
| 234 | nasid_t nasid; | 212 | nasid_t nasid; |
| 235 | int slice; | 213 | int slice; |
| 236 | 214 | ||
| @@ -249,7 +227,7 @@ void sn_set_err_irq_affinity(unsigned int irq) | |||
| 249 | { | 227 | { |
| 250 | /* | 228 | /* |
| 251 | * On systems which support CPU disabling (SHub2), all error interrupts | 229 | * On systems which support CPU disabling (SHub2), all error interrupts |
| 252 | * are targetted at the boot CPU. | 230 | * are targeted at the boot CPU. |
| 253 | */ | 231 | */ |
| 254 | if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) | 232 | if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) |
| 255 | set_irq_affinity_info(irq, cpu_physical_id(0), 0); | 233 | set_irq_affinity_info(irq, cpu_physical_id(0), 0); |
| @@ -259,26 +237,25 @@ void sn_set_err_irq_affinity(unsigned int irq) { } | |||
| 259 | #endif | 237 | #endif |
| 260 | 238 | ||
| 261 | static void | 239 | static void |
| 262 | sn_mask_irq(unsigned int irq) | 240 | sn_mask_irq(struct irq_data *data) |
| 263 | { | 241 | { |
| 264 | } | 242 | } |
| 265 | 243 | ||
| 266 | static void | 244 | static void |
| 267 | sn_unmask_irq(unsigned int irq) | 245 | sn_unmask_irq(struct irq_data *data) |
| 268 | { | 246 | { |
| 269 | } | 247 | } |
| 270 | 248 | ||
| 271 | struct irq_chip irq_type_sn = { | 249 | struct irq_chip irq_type_sn = { |
| 272 | .name = "SN hub", | 250 | .name = "SN hub", |
| 273 | .startup = sn_startup_irq, | 251 | .irq_startup = sn_startup_irq, |
| 274 | .shutdown = sn_shutdown_irq, | 252 | .irq_shutdown = sn_shutdown_irq, |
| 275 | .enable = sn_enable_irq, | 253 | .irq_enable = sn_enable_irq, |
| 276 | .disable = sn_disable_irq, | 254 | .irq_disable = sn_disable_irq, |
| 277 | .ack = sn_ack_irq, | 255 | .irq_ack = sn_ack_irq, |
| 278 | .end = sn_end_irq, | 256 | .irq_mask = sn_mask_irq, |
| 279 | .mask = sn_mask_irq, | 257 | .irq_unmask = sn_unmask_irq, |
| 280 | .unmask = sn_unmask_irq, | 258 | .irq_set_affinity = sn_set_affinity_irq |
| 281 | .set_affinity = sn_set_affinity_irq | ||
| 282 | }; | 259 | }; |
| 283 | 260 | ||
| 284 | ia64_vector sn_irq_to_vector(int irq) | 261 | ia64_vector sn_irq_to_vector(int irq) |
| @@ -296,15 +273,13 @@ unsigned int sn_local_vector_to_irq(u8 vector) | |||
| 296 | void sn_irq_init(void) | 273 | void sn_irq_init(void) |
| 297 | { | 274 | { |
| 298 | int i; | 275 | int i; |
| 299 | struct irq_desc *base_desc = irq_desc; | ||
| 300 | 276 | ||
| 301 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; | 277 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; |
| 302 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; | 278 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; |
| 303 | 279 | ||
| 304 | for (i = 0; i < NR_IRQS; i++) { | 280 | for (i = 0; i < NR_IRQS; i++) { |
| 305 | if (base_desc[i].chip == &no_irq_chip) { | 281 | if (irq_get_chip(i) == &no_irq_chip) |
| 306 | base_desc[i].chip = &irq_type_sn; | 282 | irq_set_chip(i, &irq_type_sn); |
| 307 | } | ||
| 308 | } | 283 | } |
| 309 | } | 284 | } |
| 310 | 285 | ||
| @@ -378,7 +353,6 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
| 378 | int cpu = nasid_slice_to_cpuid(nasid, slice); | 353 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
| 379 | #ifdef CONFIG_SMP | 354 | #ifdef CONFIG_SMP |
| 380 | int cpuphys; | 355 | int cpuphys; |
| 381 | struct irq_desc *desc; | ||
| 382 | #endif | 356 | #endif |
| 383 | 357 | ||
| 384 | pci_dev_get(pci_dev); | 358 | pci_dev_get(pci_dev); |
| @@ -395,12 +369,11 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
| 395 | #ifdef CONFIG_SMP | 369 | #ifdef CONFIG_SMP |
| 396 | cpuphys = cpu_physical_id(cpu); | 370 | cpuphys = cpu_physical_id(cpu); |
| 397 | set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); | 371 | set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); |
| 398 | desc = irq_to_desc(sn_irq_info->irq_irq); | ||
| 399 | /* | 372 | /* |
| 400 | * Affinity was set by the PROM, prevent it from | 373 | * Affinity was set by the PROM, prevent it from |
| 401 | * being reset by the request_irq() path. | 374 | * being reset by the request_irq() path. |
| 402 | */ | 375 | */ |
| 403 | desc->status |= IRQ_AFFINITY_SET; | 376 | irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq)); |
| 404 | #endif | 377 | #endif |
| 405 | } | 378 | } |
| 406 | 379 | ||
| @@ -439,25 +412,11 @@ sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) | |||
| 439 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; | 412 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; |
| 440 | 413 | ||
| 441 | /* Don't force an interrupt if the irq has been disabled */ | 414 | /* Don't force an interrupt if the irq has been disabled */ |
| 442 | if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) && | 415 | if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) && |
| 443 | pci_provider && pci_provider->force_interrupt) | 416 | pci_provider && pci_provider->force_interrupt) |
| 444 | (*pci_provider->force_interrupt)(sn_irq_info); | 417 | (*pci_provider->force_interrupt)(sn_irq_info); |
| 445 | } | 418 | } |
| 446 | 419 | ||
| 447 | static void force_interrupt(int irq) | ||
| 448 | { | ||
| 449 | struct sn_irq_info *sn_irq_info; | ||
| 450 | |||
| 451 | if (!sn_ioif_inited) | ||
| 452 | return; | ||
| 453 | |||
| 454 | rcu_read_lock(); | ||
| 455 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) | ||
| 456 | sn_call_force_intr_provider(sn_irq_info); | ||
| 457 | |||
| 458 | rcu_read_unlock(); | ||
| 459 | } | ||
| 460 | |||
| 461 | /* | 420 | /* |
| 462 | * Check for lost interrupts. If the PIC int_status reg. says that | 421 | * Check for lost interrupts. If the PIC int_status reg. says that |
| 463 | * an interrupt has been sent, but not handled, and the interrupt | 422 | * an interrupt has been sent, but not handled, and the interrupt |
| @@ -476,7 +435,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | |||
| 476 | /* | 435 | /* |
| 477 | * Bridge types attached to TIO (anything but PIC) do not need this WAR | 436 | * Bridge types attached to TIO (anything but PIC) do not need this WAR |
| 478 | * since they do not target Shub II interrupt registers. If that | 437 | * since they do not target Shub II interrupt registers. If that |
| 479 | * ever changes, this check needs to accomodate. | 438 | * ever changes, this check needs to accommodate. |
| 480 | */ | 439 | */ |
| 481 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) | 440 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) |
| 482 | return; | 441 | return; |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 0c72dd463831..2b98b9e088de 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
| @@ -144,16 +144,16 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) | |||
| 144 | */ | 144 | */ |
| 145 | msg.data = 0x100 + irq; | 145 | msg.data = 0x100 + irq; |
| 146 | 146 | ||
| 147 | set_irq_msi(irq, entry); | 147 | irq_set_msi_desc(irq, entry); |
| 148 | write_msi_msg(irq, &msg); | 148 | write_msi_msg(irq, &msg); |
| 149 | set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); | 149 | irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); |
| 150 | 150 | ||
| 151 | return 0; | 151 | return 0; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | #ifdef CONFIG_SMP | 154 | #ifdef CONFIG_SMP |
| 155 | static int sn_set_msi_irq_affinity(unsigned int irq, | 155 | static int sn_set_msi_irq_affinity(struct irq_data *data, |
| 156 | const struct cpumask *cpu_mask) | 156 | const struct cpumask *cpu_mask, bool force) |
| 157 | { | 157 | { |
| 158 | struct msi_msg msg; | 158 | struct msi_msg msg; |
| 159 | int slice; | 159 | int slice; |
| @@ -164,7 +164,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq, | |||
| 164 | struct sn_irq_info *sn_irq_info; | 164 | struct sn_irq_info *sn_irq_info; |
| 165 | struct sn_irq_info *new_irq_info; | 165 | struct sn_irq_info *new_irq_info; |
| 166 | struct sn_pcibus_provider *provider; | 166 | struct sn_pcibus_provider *provider; |
| 167 | unsigned int cpu; | 167 | unsigned int cpu, irq = data->irq; |
| 168 | 168 | ||
| 169 | cpu = cpumask_first(cpu_mask); | 169 | cpu = cpumask_first(cpu_mask); |
| 170 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | 170 | sn_irq_info = sn_msi_info[irq].sn_irq_info; |
| @@ -206,33 +206,33 @@ static int sn_set_msi_irq_affinity(unsigned int irq, | |||
| 206 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | 206 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); |
| 207 | 207 | ||
| 208 | write_msi_msg(irq, &msg); | 208 | write_msi_msg(irq, &msg); |
| 209 | cpumask_copy(irq_desc[irq].affinity, cpu_mask); | 209 | cpumask_copy(data->affinity, cpu_mask); |
| 210 | 210 | ||
| 211 | return 0; | 211 | return 0; |
| 212 | } | 212 | } |
| 213 | #endif /* CONFIG_SMP */ | 213 | #endif /* CONFIG_SMP */ |
| 214 | 214 | ||
| 215 | static void sn_ack_msi_irq(unsigned int irq) | 215 | static void sn_ack_msi_irq(struct irq_data *data) |
| 216 | { | 216 | { |
| 217 | move_native_irq(irq); | 217 | irq_move_irq(data); |
| 218 | ia64_eoi(); | 218 | ia64_eoi(); |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static int sn_msi_retrigger_irq(unsigned int irq) | 221 | static int sn_msi_retrigger_irq(struct irq_data *data) |
| 222 | { | 222 | { |
| 223 | unsigned int vector = irq; | 223 | unsigned int vector = data->irq; |
| 224 | ia64_resend_irq(vector); | 224 | ia64_resend_irq(vector); |
| 225 | 225 | ||
| 226 | return 1; | 226 | return 1; |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | static struct irq_chip sn_msi_chip = { | 229 | static struct irq_chip sn_msi_chip = { |
| 230 | .name = "PCI-MSI", | 230 | .name = "PCI-MSI", |
| 231 | .mask = mask_msi_irq, | 231 | .irq_mask = mask_msi_irq, |
| 232 | .unmask = unmask_msi_irq, | 232 | .irq_unmask = unmask_msi_irq, |
| 233 | .ack = sn_ack_msi_irq, | 233 | .irq_ack = sn_ack_msi_irq, |
| 234 | #ifdef CONFIG_SMP | 234 | #ifdef CONFIG_SMP |
| 235 | .set_affinity = sn_set_msi_irq_affinity, | 235 | .irq_set_affinity = sn_set_msi_irq_affinity, |
| 236 | #endif | 236 | #endif |
| 237 | .retrigger = sn_msi_retrigger_irq, | 237 | .irq_retrigger = sn_msi_retrigger_irq, |
| 238 | }; | 238 | }; |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index dbc4cbecb5ed..77db0b514fa4 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
| @@ -592,7 +592,7 @@ void __cpuinit sn_cpu_init(void) | |||
| 592 | /* | 592 | /* |
| 593 | * Don't check status. The SAL call is not supported on all PROMs | 593 | * Don't check status. The SAL call is not supported on all PROMs |
| 594 | * but a failure is harmless. | 594 | * but a failure is harmless. |
| 595 | * Architechtuallly, cpu_init is always called twice on cpu 0. We | 595 | * Architecturally, cpu_init is always called twice on cpu 0. We |
| 596 | * should set cpu_number on cpu 0 once. | 596 | * should set cpu_number on cpu 0 once. |
| 597 | */ | 597 | */ |
| 598 | if (cpuid == 0) { | 598 | if (cpuid == 0) { |
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile index 08e6565dc908..3d09108d4277 100644 --- a/arch/ia64/sn/kernel/sn2/Makefile +++ b/arch/ia64/sn/kernel/sn2/Makefile | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | # sn2 specific kernel files | 9 | # sn2 specific kernel files |
| 10 | # | 10 | # |
| 11 | 11 | ||
| 12 | EXTRA_CFLAGS += -Iarch/ia64/sn/include | 12 | ccflags-y := -Iarch/ia64/sn/include |
| 13 | 13 | ||
| 14 | obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \ | 14 | obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \ |
| 15 | prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o | 15 | prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index fa1eceed0d23..30862c0358cd 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
| @@ -860,6 +860,7 @@ error: | |||
| 860 | 860 | ||
| 861 | static const struct file_operations sn_hwperf_fops = { | 861 | static const struct file_operations sn_hwperf_fops = { |
| 862 | .unlocked_ioctl = sn_hwperf_ioctl, | 862 | .unlocked_ioctl = sn_hwperf_ioctl, |
| 863 | .llseek = noop_llseek, | ||
| 863 | }; | 864 | }; |
| 864 | 865 | ||
| 865 | static struct miscdevice sn_hwperf_dev = { | 866 | static struct miscdevice sn_hwperf_dev = { |
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index c76d8dc3aea3..7aab87f48060 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c | |||
| @@ -45,38 +45,6 @@ static int licenseID_open(struct inode *inode, struct file *file) | |||
| 45 | return single_open(file, licenseID_show, NULL); | 45 | return single_open(file, licenseID_show, NULL); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | /* | ||
| 49 | * Enable forced interrupt by default. | ||
| 50 | * When set, the sn interrupt handler writes the force interrupt register on | ||
| 51 | * the bridge chip. The hardware will then send an interrupt message if the | ||
| 52 | * interrupt line is active. This mimics a level sensitive interrupt. | ||
| 53 | */ | ||
| 54 | extern int sn_force_interrupt_flag; | ||
| 55 | |||
| 56 | static int sn_force_interrupt_show(struct seq_file *s, void *p) | ||
| 57 | { | ||
| 58 | seq_printf(s, "Force interrupt is %s\n", | ||
| 59 | sn_force_interrupt_flag ? "enabled" : "disabled"); | ||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | static ssize_t sn_force_interrupt_write_proc(struct file *file, | ||
| 64 | const char __user *buffer, size_t count, loff_t *data) | ||
| 65 | { | ||
| 66 | char val; | ||
| 67 | |||
| 68 | if (copy_from_user(&val, buffer, 1)) | ||
| 69 | return -EFAULT; | ||
| 70 | |||
| 71 | sn_force_interrupt_flag = (val == '0') ? 0 : 1; | ||
| 72 | return count; | ||
| 73 | } | ||
| 74 | |||
| 75 | static int sn_force_interrupt_open(struct inode *inode, struct file *file) | ||
| 76 | { | ||
| 77 | return single_open(file, sn_force_interrupt_show, NULL); | ||
| 78 | } | ||
| 79 | |||
| 80 | static int coherence_id_show(struct seq_file *s, void *p) | 48 | static int coherence_id_show(struct seq_file *s, void *p) |
| 81 | { | 49 | { |
| 82 | seq_printf(s, "%d\n", partition_coherence_id()); | 50 | seq_printf(s, "%d\n", partition_coherence_id()); |
| @@ -114,14 +82,6 @@ static const struct file_operations proc_license_id_fops = { | |||
| 114 | .release = single_release, | 82 | .release = single_release, |
| 115 | }; | 83 | }; |
| 116 | 84 | ||
| 117 | static const struct file_operations proc_sn_force_intr_fops = { | ||
| 118 | .open = sn_force_interrupt_open, | ||
| 119 | .read = seq_read, | ||
| 120 | .write = sn_force_interrupt_write_proc, | ||
| 121 | .llseek = seq_lseek, | ||
| 122 | .release = single_release, | ||
| 123 | }; | ||
| 124 | |||
| 125 | static const struct file_operations proc_coherence_id_fops = { | 85 | static const struct file_operations proc_coherence_id_fops = { |
| 126 | .open = coherence_id_open, | 86 | .open = coherence_id_open, |
| 127 | .read = seq_read, | 87 | .read = seq_read, |
| @@ -149,8 +109,6 @@ void register_sn_procfs(void) | |||
| 149 | proc_create("system_serial_number", 0444, sgi_proc_dir, | 109 | proc_create("system_serial_number", 0444, sgi_proc_dir, |
| 150 | &proc_system_sn_fops); | 110 | &proc_system_sn_fops); |
| 151 | proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops); | 111 | proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops); |
| 152 | proc_create("sn_force_interrupt", 0644, sgi_proc_dir, | ||
| 153 | &proc_sn_force_intr_fops); | ||
| 154 | proc_create("coherence_id", 0444, sgi_proc_dir, | 112 | proc_create("coherence_id", 0444, sgi_proc_dir, |
| 155 | &proc_coherence_id_fops); | 113 | &proc_coherence_id_fops); |
| 156 | proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops); | 114 | proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops); |
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index 21d6f09e3447..c34efda122e1 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c | |||
| @@ -33,8 +33,6 @@ static struct clocksource clocksource_sn2 = { | |||
| 33 | .rating = 450, | 33 | .rating = 450, |
| 34 | .read = read_sn2, | 34 | .read = read_sn2, |
| 35 | .mask = (1LL << 55) - 1, | 35 | .mask = (1LL << 55) - 1, |
| 36 | .mult = 0, | ||
| 37 | .shift = 10, | ||
| 38 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 36 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 39 | }; | 37 | }; |
| 40 | 38 | ||
| @@ -57,9 +55,7 @@ ia64_sn_udelay (unsigned long usecs) | |||
| 57 | void __init sn_timer_init(void) | 55 | void __init sn_timer_init(void) |
| 58 | { | 56 | { |
| 59 | clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; | 57 | clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; |
| 60 | clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, | 58 | clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); |
| 61 | clocksource_sn2.shift); | ||
| 62 | clocksource_register(&clocksource_sn2); | ||
| 63 | 59 | ||
| 64 | ia64_udelay = &ia64_sn_udelay; | 60 | ia64_udelay = &ia64_sn_udelay; |
| 65 | } | 61 | } |
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile index ad4ef34dfe26..df2a90145426 100644 --- a/arch/ia64/sn/pci/Makefile +++ b/arch/ia64/sn/pci/Makefile | |||
| @@ -7,6 +7,6 @@ | |||
| 7 | # | 7 | # |
| 8 | # Makefile for the sn pci general routines. | 8 | # Makefile for the sn pci general routines. |
| 9 | 9 | ||
| 10 | EXTRA_CFLAGS += -Iarch/ia64/sn/include | 10 | ccflags-y := -Iarch/ia64/sn/include |
| 11 | 11 | ||
| 12 | obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/ | 12 | obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/ |
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile index 01192d3247dd..396bcae36309 100644 --- a/arch/ia64/sn/pci/pcibr/Makefile +++ b/arch/ia64/sn/pci/pcibr/Makefile | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | # | 7 | # |
| 8 | # Makefile for the sn2 io routines. | 8 | # Makefile for the sn2 io routines. |
| 9 | 9 | ||
| 10 | EXTRA_CFLAGS += -Iarch/ia64/sn/include | 10 | ccflags-y := -Iarch/ia64/sn/include |
| 11 | 11 | ||
| 12 | obj-y += pcibr_dma.o pcibr_reg.o \ | 12 | obj-y += pcibr_dma.o pcibr_reg.o \ |
| 13 | pcibr_ate.o pcibr_provider.o | 13 | pcibr_ate.o pcibr_provider.o |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index c659ad5613a0..33def666a664 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
| @@ -227,7 +227,7 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction) | |||
| 227 | * after doing the read. For PIC this routine then forces a fake interrupt | 227 | * after doing the read. For PIC this routine then forces a fake interrupt |
| 228 | * on another line, which is logically associated with the slot that the PIO | 228 | * on another line, which is logically associated with the slot that the PIO |
| 229 | * is addressed to. It then spins while watching the memory location that | 229 | * is addressed to. It then spins while watching the memory location that |
| 230 | * the interrupt is targetted to. When the interrupt response arrives, we | 230 | * the interrupt is targeted to. When the interrupt response arrives, we |
| 231 | * are sure that the DMA has landed in memory and it is safe for the driver | 231 | * are sure that the DMA has landed in memory and it is safe for the driver |
| 232 | * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush | 232 | * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush |
| 233 | * Bridge register since it ensures the data has entered the coherence domain, | 233 | * Bridge register since it ensures the data has entered the coherence domain, |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 4d4536e3b6f3..9c271be9919a 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
| @@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
| 509 | * use the GART mapped mode. | 509 | * use the GART mapped mode. |
| 510 | */ | 510 | */ |
| 511 | static u64 | 511 | static u64 |
| 512 | tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) | 512 | tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) |
| 513 | { | 513 | { |
| 514 | u64 mapaddr; | 514 | u64 mapaddr; |
| 515 | 515 | ||
