diff options
author | Tony Luck <tony.luck@intel.com> | 2006-06-23 16:46:23 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-06-23 16:46:23 -0400 |
commit | 8cf60e04a131310199d5776e2f9e915f0c468899 (patch) | |
tree | 373a68e88e6737713a0a5723d552cdeefffff929 /arch/ia64 | |
parent | 1323523f505606cfd24af6122369afddefc3b09d (diff) | |
parent | 95eaa5fa8eb2c345244acd5f65b200b115ae8c65 (diff) |
Auto-update from upstream
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 28 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 19 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 200 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 2 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/io_init.c | 9 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/irq.c | 142 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/sn2/cache.c | 15 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 10 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_dma.c | 62 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 8 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 65 |
17 files changed, 343 insertions, 241 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index cd2051f5c9ce..18318749884b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -77,6 +77,7 @@ choice | |||
77 | config IA64_GENERIC | 77 | config IA64_GENERIC |
78 | bool "generic" | 78 | bool "generic" |
79 | select ACPI | 79 | select ACPI |
80 | select PCI | ||
80 | select NUMA | 81 | select NUMA |
81 | select ACPI_NUMA | 82 | select ACPI_NUMA |
82 | help | 83 | help |
@@ -273,7 +274,6 @@ config HOTPLUG_CPU | |||
273 | config SCHED_SMT | 274 | config SCHED_SMT |
274 | bool "SMT scheduler support" | 275 | bool "SMT scheduler support" |
275 | depends on SMP | 276 | depends on SMP |
276 | default off | ||
277 | help | 277 | help |
278 | Improves the CPU scheduler's decision making when dealing with | 278 | Improves the CPU scheduler's decision making when dealing with |
279 | Intel IA64 chips with MultiThreading at a cost of slightly increased | 279 | Intel IA64 chips with MultiThreading at a cost of slightly increased |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index bdccd0b1eb60..5825ddee58d6 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -1958,7 +1958,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) | |||
1958 | if (pxm < 0) | 1958 | if (pxm < 0) |
1959 | return; | 1959 | return; |
1960 | 1960 | ||
1961 | node = pxm_to_nid_map[pxm]; | 1961 | node = pxm_to_node(pxm); |
1962 | 1962 | ||
1963 | if (node >= MAX_NUMNODES || !node_online(node)) | 1963 | if (node >= MAX_NUMNODES || !node_online(node)) |
1964 | return; | 1964 | return; |
@@ -1999,7 +1999,7 @@ acpi_sba_ioc_add(struct acpi_device *device) | |||
1999 | if (!iovp_shift) | 1999 | if (!iovp_shift) |
2000 | iovp_shift = min(PAGE_SHIFT, 16); | 2000 | iovp_shift = min(PAGE_SHIFT, 16); |
2001 | } | 2001 | } |
2002 | ACPI_MEM_FREE(dev_info); | 2002 | kfree(dev_info); |
2003 | 2003 | ||
2004 | /* | 2004 | /* |
2005 | * default anything not caught above or specified on cmdline to 4k | 2005 | * default anything not caught above or specified on cmdline to 4k |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 58c93a30348c..6ea642beaaee 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -68,8 +68,6 @@ EXPORT_SYMBOL(pm_power_off); | |||
68 | unsigned char acpi_kbd_controller_present = 1; | 68 | unsigned char acpi_kbd_controller_present = 1; |
69 | unsigned char acpi_legacy_devices; | 69 | unsigned char acpi_legacy_devices; |
70 | 70 | ||
71 | static unsigned int __initdata acpi_madt_rev; | ||
72 | |||
73 | unsigned int acpi_cpei_override; | 71 | unsigned int acpi_cpei_override; |
74 | unsigned int acpi_cpei_phys_cpuid; | 72 | unsigned int acpi_cpei_phys_cpuid; |
75 | 73 | ||
@@ -243,6 +241,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) | |||
243 | return iosapic_init(iosapic->address, iosapic->global_irq_base); | 241 | return iosapic_init(iosapic->address, iosapic->global_irq_base); |
244 | } | 242 | } |
245 | 243 | ||
244 | static unsigned int __initdata acpi_madt_rev; | ||
245 | |||
246 | static int __init | 246 | static int __init |
247 | acpi_parse_plat_int_src(acpi_table_entry_header * header, | 247 | acpi_parse_plat_int_src(acpi_table_entry_header * header, |
248 | const unsigned long end) | 248 | const unsigned long end) |
@@ -415,9 +415,6 @@ static int __initdata srat_num_cpus; /* number of cpus */ | |||
415 | static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; | 415 | static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; |
416 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) | 416 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) |
417 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) | 417 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) |
418 | /* maps to convert between proximity domain and logical node ID */ | ||
419 | int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; | ||
420 | int __initdata nid_to_pxm_map[MAX_NUMNODES]; | ||
421 | static struct acpi_table_slit __initdata *slit_table; | 418 | static struct acpi_table_slit __initdata *slit_table; |
422 | 419 | ||
423 | static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) | 420 | static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) |
@@ -533,22 +530,17 @@ void __init acpi_numa_arch_fixup(void) | |||
533 | * MCD - This can probably be dropped now. No need for pxm ID to node ID | 530 | * MCD - This can probably be dropped now. No need for pxm ID to node ID |
534 | * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. | 531 | * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. |
535 | */ | 532 | */ |
536 | /* calculate total number of nodes in system from PXM bitmap */ | ||
537 | memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map)); | ||
538 | memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map)); | ||
539 | nodes_clear(node_online_map); | 533 | nodes_clear(node_online_map); |
540 | for (i = 0; i < MAX_PXM_DOMAINS; i++) { | 534 | for (i = 0; i < MAX_PXM_DOMAINS; i++) { |
541 | if (pxm_bit_test(i)) { | 535 | if (pxm_bit_test(i)) { |
542 | int nid = num_online_nodes(); | 536 | int nid = acpi_map_pxm_to_node(i); |
543 | pxm_to_nid_map[i] = nid; | ||
544 | nid_to_pxm_map[nid] = i; | ||
545 | node_set_online(nid); | 537 | node_set_online(nid); |
546 | } | 538 | } |
547 | } | 539 | } |
548 | 540 | ||
549 | /* set logical node id in memory chunk structure */ | 541 | /* set logical node id in memory chunk structure */ |
550 | for (i = 0; i < num_node_memblks; i++) | 542 | for (i = 0; i < num_node_memblks; i++) |
551 | node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid]; | 543 | node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); |
552 | 544 | ||
553 | /* assign memory bank numbers for each chunk on each node */ | 545 | /* assign memory bank numbers for each chunk on each node */ |
554 | for_each_online_node(i) { | 546 | for_each_online_node(i) { |
@@ -562,7 +554,7 @@ void __init acpi_numa_arch_fixup(void) | |||
562 | 554 | ||
563 | /* set logical node id in cpu structure */ | 555 | /* set logical node id in cpu structure */ |
564 | for (i = 0; i < srat_num_cpus; i++) | 556 | for (i = 0; i < srat_num_cpus; i++) |
565 | node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; | 557 | node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); |
566 | 558 | ||
567 | printk(KERN_INFO "Number of logical nodes in system = %d\n", | 559 | printk(KERN_INFO "Number of logical nodes in system = %d\n", |
568 | num_online_nodes()); | 560 | num_online_nodes()); |
@@ -575,11 +567,11 @@ void __init acpi_numa_arch_fixup(void) | |||
575 | for (i = 0; i < slit_table->localities; i++) { | 567 | for (i = 0; i < slit_table->localities; i++) { |
576 | if (!pxm_bit_test(i)) | 568 | if (!pxm_bit_test(i)) |
577 | continue; | 569 | continue; |
578 | node_from = pxm_to_nid_map[i]; | 570 | node_from = pxm_to_node(i); |
579 | for (j = 0; j < slit_table->localities; j++) { | 571 | for (j = 0; j < slit_table->localities; j++) { |
580 | if (!pxm_bit_test(j)) | 572 | if (!pxm_bit_test(j)) |
581 | continue; | 573 | continue; |
582 | node_to = pxm_to_nid_map[j]; | 574 | node_to = pxm_to_node(j); |
583 | node_distance(node_from, node_to) = | 575 | node_distance(node_from, node_to) = |
584 | slit_table->entry[i * slit_table->localities + j]; | 576 | slit_table->entry[i * slit_table->localities + j]; |
585 | } | 577 | } |
@@ -785,9 +777,9 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) | |||
785 | 777 | ||
786 | /* | 778 | /* |
787 | * Assuming that the container driver would have set the proximity | 779 | * Assuming that the container driver would have set the proximity |
788 | * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag | 780 | * domain and would have initialized pxm_to_node(pxm_id) && pxm_flag |
789 | */ | 781 | */ |
790 | node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id]; | 782 | node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_node(pxm_id); |
791 | 783 | ||
792 | node_cpuid[cpu].phys_id = physid; | 784 | node_cpuid[cpu].phys_id = physid; |
793 | #endif | 785 | #endif |
@@ -966,7 +958,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | |||
966 | if (pxm < 0) | 958 | if (pxm < 0) |
967 | return AE_OK; | 959 | return AE_OK; |
968 | 960 | ||
969 | node = pxm_to_nid_map[pxm]; | 961 | node = pxm_to_node(pxm); |
970 | 962 | ||
971 | if (node >= MAX_NUMNODES || !node_online(node) || | 963 | if (node >= MAX_NUMNODES || !node_online(node) || |
972 | cpus_empty(node_to_cpumask(node))) | 964 | cpus_empty(node_to_cpumask(node))) |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index bcb80ca5cf40..32c999f58d12 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1584,7 +1584,7 @@ sys_call_table: | |||
1584 | data8 sys_keyctl | 1584 | data8 sys_keyctl |
1585 | data8 sys_ioprio_set | 1585 | data8 sys_ioprio_set |
1586 | data8 sys_ioprio_get // 1275 | 1586 | data8 sys_ioprio_get // 1275 |
1587 | data8 sys_ni_syscall | 1587 | data8 sys_move_pages |
1588 | data8 sys_inotify_init | 1588 | data8 sys_inotify_init |
1589 | data8 sys_inotify_add_watch | 1589 | data8 sys_inotify_add_watch |
1590 | data8 sys_inotify_rm_watch | 1590 | data8 sys_inotify_rm_watch |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 6c4d59fd0364..ef9a2b49307a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -46,6 +46,10 @@ | |||
46 | 46 | ||
47 | #define IRQ_DEBUG 0 | 47 | #define IRQ_DEBUG 0 |
48 | 48 | ||
49 | /* These can be overridden in platform_irq_init */ | ||
50 | int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; | ||
51 | int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; | ||
52 | |||
49 | /* default base addr of IPI table */ | 53 | /* default base addr of IPI table */ |
50 | void __iomem *ipi_base_addr = ((void __iomem *) | 54 | void __iomem *ipi_base_addr = ((void __iomem *) |
51 | (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); | 55 | (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); |
@@ -60,7 +64,7 @@ __u8 isa_irq_to_vector_map[16] = { | |||
60 | }; | 64 | }; |
61 | EXPORT_SYMBOL(isa_irq_to_vector_map); | 65 | EXPORT_SYMBOL(isa_irq_to_vector_map); |
62 | 66 | ||
63 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; | 67 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; |
64 | 68 | ||
65 | int | 69 | int |
66 | assign_irq_vector (int irq) | 70 | assign_irq_vector (int irq) |
@@ -89,6 +93,19 @@ free_irq_vector (int vector) | |||
89 | printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); | 93 | printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); |
90 | } | 94 | } |
91 | 95 | ||
96 | int | ||
97 | reserve_irq_vector (int vector) | ||
98 | { | ||
99 | int pos; | ||
100 | |||
101 | if (vector < IA64_FIRST_DEVICE_VECTOR || | ||
102 | vector > IA64_LAST_DEVICE_VECTOR) | ||
103 | return -EINVAL; | ||
104 | |||
105 | pos = vector - IA64_FIRST_DEVICE_VECTOR; | ||
106 | return test_and_set_bit(pos, ia64_vector_mask); | ||
107 | } | ||
108 | |||
92 | #ifdef CONFIG_SMP | 109 | #ifdef CONFIG_SMP |
93 | # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) | 110 | # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) |
94 | #else | 111 | #else |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 077f21216b65..6d7bc8ff7b3a 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -532,7 +532,6 @@ static ctl_table pfm_sysctl_root[] = { | |||
532 | static struct ctl_table_header *pfm_sysctl_header; | 532 | static struct ctl_table_header *pfm_sysctl_header; |
533 | 533 | ||
534 | static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | 534 | static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); |
535 | static int pfm_flush(struct file *filp); | ||
536 | 535 | ||
537 | #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) | 536 | #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) |
538 | #define pfm_get_cpu_data(a,b) per_cpu(a, b) | 537 | #define pfm_get_cpu_data(a,b) per_cpu(a, b) |
@@ -595,10 +594,11 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, | |||
595 | } | 594 | } |
596 | 595 | ||
597 | 596 | ||
598 | static struct super_block * | 597 | static int |
599 | pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) | 598 | pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, |
599 | struct vfsmount *mnt) | ||
600 | { | 600 | { |
601 | return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC); | 601 | return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt); |
602 | } | 602 | } |
603 | 603 | ||
604 | static struct file_system_type pfm_fs_type = { | 604 | static struct file_system_type pfm_fs_type = { |
@@ -1773,7 +1773,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) | |||
1773 | * When caller is self-monitoring, the context is unloaded. | 1773 | * When caller is self-monitoring, the context is unloaded. |
1774 | */ | 1774 | */ |
1775 | static int | 1775 | static int |
1776 | pfm_flush(struct file *filp) | 1776 | pfm_flush(struct file *filp, fl_owner_t id) |
1777 | { | 1777 | { |
1778 | pfm_context_t *ctx; | 1778 | pfm_context_t *ctx; |
1779 | struct task_struct *task; | 1779 | struct task_struct *task; |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index fcd2bad0286f..5f03b9e524dd 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. | 2 | * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of version 2 of the GNU General Public License | 5 | * under the terms of version 2 of the GNU General Public License |
@@ -29,15 +29,8 @@ | |||
29 | #include <asm/tlbflush.h> | 29 | #include <asm/tlbflush.h> |
30 | #include <asm/sn/arch.h> | 30 | #include <asm/sn/arch.h> |
31 | 31 | ||
32 | #define DEBUG 0 | ||
33 | 32 | ||
34 | #if DEBUG | 33 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); |
35 | #define dprintk printk | ||
36 | #else | ||
37 | #define dprintk(x...) do { } while (0) | ||
38 | #endif | ||
39 | |||
40 | void __init efi_memmap_walk_uc (efi_freemem_callback_t callback); | ||
41 | 34 | ||
42 | #define MAX_UNCACHED_GRANULES 5 | 35 | #define MAX_UNCACHED_GRANULES 5 |
43 | static int allocated_granules; | 36 | static int allocated_granules; |
@@ -60,6 +53,7 @@ static void uncached_ipi_visibility(void *data) | |||
60 | static void uncached_ipi_mc_drain(void *data) | 53 | static void uncached_ipi_mc_drain(void *data) |
61 | { | 54 | { |
62 | int status; | 55 | int status; |
56 | |||
63 | status = ia64_pal_mc_drain(); | 57 | status = ia64_pal_mc_drain(); |
64 | if (status) | 58 | if (status) |
65 | printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " | 59 | printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " |
@@ -67,30 +61,35 @@ static void uncached_ipi_mc_drain(void *data) | |||
67 | } | 61 | } |
68 | 62 | ||
69 | 63 | ||
70 | static unsigned long | 64 | /* |
71 | uncached_get_new_chunk(struct gen_pool *poolp) | 65 | * Add a new chunk of uncached memory pages to the specified pool. |
66 | * | ||
67 | * @pool: pool to add new chunk of uncached memory to | ||
68 | * @nid: node id of node to allocate memory from, or -1 | ||
69 | * | ||
70 | * This is accomplished by first allocating a granule of cached memory pages | ||
71 | * and then converting them to uncached memory pages. | ||
72 | */ | ||
73 | static int uncached_add_chunk(struct gen_pool *pool, int nid) | ||
72 | { | 74 | { |
73 | struct page *page; | 75 | struct page *page; |
74 | void *tmp; | ||
75 | int status, i; | 76 | int status, i; |
76 | unsigned long addr, node; | 77 | unsigned long c_addr, uc_addr; |
77 | 78 | ||
78 | if (allocated_granules >= MAX_UNCACHED_GRANULES) | 79 | if (allocated_granules >= MAX_UNCACHED_GRANULES) |
79 | return 0; | 80 | return -1; |
81 | |||
82 | /* attempt to allocate a granule's worth of cached memory pages */ | ||
80 | 83 | ||
81 | node = poolp->private; | 84 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, |
82 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, | ||
83 | IA64_GRANULE_SHIFT-PAGE_SHIFT); | 85 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
86 | if (!page) | ||
87 | return -1; | ||
84 | 88 | ||
85 | dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n", | 89 | /* convert the memory pages from cached to uncached */ |
86 | page, (unsigned long)(page-vmem_map) << PAGE_SHIFT); | ||
87 | 90 | ||
88 | /* | 91 | c_addr = (unsigned long)page_address(page); |
89 | * Do magic if no mem on local node! XXX | 92 | uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; |
90 | */ | ||
91 | if (!page) | ||
92 | return 0; | ||
93 | tmp = page_address(page); | ||
94 | 93 | ||
95 | /* | 94 | /* |
96 | * There's a small race here where it's possible for someone to | 95 | * There's a small race here where it's possible for someone to |
@@ -100,76 +99,90 @@ uncached_get_new_chunk(struct gen_pool *poolp) | |||
100 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) | 99 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) |
101 | SetPageUncached(&page[i]); | 100 | SetPageUncached(&page[i]); |
102 | 101 | ||
103 | flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE); | 102 | flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); |
104 | 103 | ||
105 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 104 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
106 | |||
107 | dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", | ||
108 | status, raw_smp_processor_id()); | ||
109 | |||
110 | if (!status) { | 105 | if (!status) { |
111 | status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); | 106 | status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); |
112 | if (status) | 107 | if (status) |
113 | printk(KERN_WARNING "smp_call_function failed for " | 108 | goto failed; |
114 | "uncached_ipi_visibility! (%i)\n", status); | ||
115 | } | 109 | } |
116 | 110 | ||
111 | preempt_disable(); | ||
112 | |||
117 | if (ia64_platform_is("sn2")) | 113 | if (ia64_platform_is("sn2")) |
118 | sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE); | 114 | sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); |
119 | else | 115 | else |
120 | flush_icache_range((unsigned long)tmp, | 116 | flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); |
121 | (unsigned long)tmp+IA64_GRANULE_SIZE); | 117 | |
118 | /* flush the just introduced uncached translation from the TLB */ | ||
119 | local_flush_tlb_all(); | ||
120 | |||
121 | preempt_enable(); | ||
122 | 122 | ||
123 | ia64_pal_mc_drain(); | 123 | ia64_pal_mc_drain(); |
124 | status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); | 124 | status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); |
125 | if (status) | 125 | if (status) |
126 | printk(KERN_WARNING "smp_call_function failed for " | 126 | goto failed; |
127 | "uncached_ipi_mc_drain! (%i)\n", status); | ||
128 | 127 | ||
129 | addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; | 128 | /* |
129 | * The chunk of memory pages has been converted to uncached so now we | ||
130 | * can add it to the pool. | ||
131 | */ | ||
132 | status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); | ||
133 | if (status) | ||
134 | goto failed; | ||
130 | 135 | ||
131 | allocated_granules++; | 136 | allocated_granules++; |
132 | return addr; | 137 | return 0; |
138 | |||
139 | /* failed to convert or add the chunk so give it back to the kernel */ | ||
140 | failed: | ||
141 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) | ||
142 | ClearPageUncached(&page[i]); | ||
143 | |||
144 | free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); | ||
145 | return -1; | ||
133 | } | 146 | } |
134 | 147 | ||
135 | 148 | ||
136 | /* | 149 | /* |
137 | * uncached_alloc_page | 150 | * uncached_alloc_page |
138 | * | 151 | * |
152 | * @starting_nid: node id of node to start with, or -1 | ||
153 | * | ||
139 | * Allocate 1 uncached page. Allocates on the requested node. If no | 154 | * Allocate 1 uncached page. Allocates on the requested node. If no |
140 | * uncached pages are available on the requested node, roundrobin starting | 155 | * uncached pages are available on the requested node, roundrobin starting |
141 | * with higher nodes. | 156 | * with the next higher node. |
142 | */ | 157 | */ |
143 | unsigned long | 158 | unsigned long uncached_alloc_page(int starting_nid) |
144 | uncached_alloc_page(int nid) | ||
145 | { | 159 | { |
146 | unsigned long maddr; | 160 | unsigned long uc_addr; |
161 | struct gen_pool *pool; | ||
162 | int nid; | ||
147 | 163 | ||
148 | maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE); | 164 | if (unlikely(starting_nid >= MAX_NUMNODES)) |
165 | return 0; | ||
149 | 166 | ||
150 | dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n", | 167 | if (starting_nid < 0) |
151 | maddr, nid); | 168 | starting_nid = numa_node_id(); |
169 | nid = starting_nid; | ||
152 | 170 | ||
153 | /* | 171 | do { |
154 | * If no memory is availble on our local node, try the | 172 | if (!node_online(nid)) |
155 | * remaining nodes in the system. | 173 | continue; |
156 | */ | 174 | pool = uncached_pool[nid]; |
157 | if (!maddr) { | 175 | if (pool == NULL) |
158 | int i; | 176 | continue; |
159 | 177 | do { | |
160 | for (i = MAX_NUMNODES - 1; i >= 0; i--) { | 178 | uc_addr = gen_pool_alloc(pool, PAGE_SIZE); |
161 | if (i == nid || !node_online(i)) | 179 | if (uc_addr != 0) |
162 | continue; | 180 | return uc_addr; |
163 | maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE); | 181 | } while (uncached_add_chunk(pool, nid) == 0); |
164 | dprintk(KERN_DEBUG "uncached_alloc_page alternate search " | 182 | |
165 | "returns %lx on node %i\n", maddr, i); | 183 | } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); |
166 | if (maddr) { | ||
167 | break; | ||
168 | } | ||
169 | } | ||
170 | } | ||
171 | 184 | ||
172 | return maddr; | 185 | return 0; |
173 | } | 186 | } |
174 | EXPORT_SYMBOL(uncached_alloc_page); | 187 | EXPORT_SYMBOL(uncached_alloc_page); |
175 | 188 | ||
@@ -177,21 +190,22 @@ EXPORT_SYMBOL(uncached_alloc_page); | |||
177 | /* | 190 | /* |
178 | * uncached_free_page | 191 | * uncached_free_page |
179 | * | 192 | * |
193 | * @uc_addr: uncached address of page to free | ||
194 | * | ||
180 | * Free a single uncached page. | 195 | * Free a single uncached page. |
181 | */ | 196 | */ |
182 | void | 197 | void uncached_free_page(unsigned long uc_addr) |
183 | uncached_free_page(unsigned long maddr) | ||
184 | { | 198 | { |
185 | int node; | 199 | int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); |
186 | 200 | struct gen_pool *pool = uncached_pool[nid]; | |
187 | node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET); | ||
188 | 201 | ||
189 | dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node); | 202 | if (unlikely(pool == NULL)) |
203 | return; | ||
190 | 204 | ||
191 | if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) | 205 | if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) |
192 | panic("uncached_free_page invalid address %lx\n", maddr); | 206 | panic("uncached_free_page invalid address %lx\n", uc_addr); |
193 | 207 | ||
194 | gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE); | 208 | gen_pool_free(pool, uc_addr, PAGE_SIZE); |
195 | } | 209 | } |
196 | EXPORT_SYMBOL(uncached_free_page); | 210 | EXPORT_SYMBOL(uncached_free_page); |
197 | 211 | ||
@@ -199,43 +213,39 @@ EXPORT_SYMBOL(uncached_free_page); | |||
199 | /* | 213 | /* |
200 | * uncached_build_memmap, | 214 | * uncached_build_memmap, |
201 | * | 215 | * |
216 | * @uc_start: uncached starting address of a chunk of uncached memory | ||
217 | * @uc_end: uncached ending address of a chunk of uncached memory | ||
218 | * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc()) | ||
219 | * | ||
202 | * Called at boot time to build a map of pages that can be used for | 220 | * Called at boot time to build a map of pages that can be used for |
203 | * memory special operations. | 221 | * memory special operations. |
204 | */ | 222 | */ |
205 | static int __init | 223 | static int __init uncached_build_memmap(unsigned long uc_start, |
206 | uncached_build_memmap(unsigned long start, unsigned long end, void *arg) | 224 | unsigned long uc_end, void *arg) |
207 | { | 225 | { |
208 | long length = end - start; | 226 | int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); |
209 | int node; | 227 | struct gen_pool *pool = uncached_pool[nid]; |
210 | 228 | size_t size = uc_end - uc_start; | |
211 | dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); | ||
212 | 229 | ||
213 | touch_softlockup_watchdog(); | 230 | touch_softlockup_watchdog(); |
214 | memset((char *)start, 0, length); | ||
215 | 231 | ||
216 | node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); | 232 | if (pool != NULL) { |
217 | 233 | memset((char *)uc_start, 0, size); | |
218 | for (; start < end ; start += PAGE_SIZE) { | 234 | (void) gen_pool_add(pool, uc_start, size, nid); |
219 | dprintk(KERN_INFO "sticking %lx into the pool!\n", start); | ||
220 | gen_pool_free(uncached_pool[node], start, PAGE_SIZE); | ||
221 | } | 235 | } |
222 | |||
223 | return 0; | 236 | return 0; |
224 | } | 237 | } |
225 | 238 | ||
226 | 239 | ||
227 | static int __init uncached_init(void) { | 240 | static int __init uncached_init(void) |
228 | int i; | 241 | { |
242 | int nid; | ||
229 | 243 | ||
230 | for (i = 0; i < MAX_NUMNODES; i++) { | 244 | for_each_online_node(nid) { |
231 | if (!node_online(i)) | 245 | uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); |
232 | continue; | ||
233 | uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT, | ||
234 | &uncached_get_new_chunk, i); | ||
235 | } | 246 | } |
236 | 247 | ||
237 | efi_memmap_walk_uc(uncached_build_memmap); | 248 | efi_memmap_walk_uc(uncached_build_memmap, NULL); |
238 | |||
239 | return 0; | 249 | return 0; |
240 | } | 250 | } |
241 | 251 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index cafa8776a53d..11f08001f8c2 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -671,9 +671,11 @@ int add_memory(u64 start, u64 size) | |||
671 | 671 | ||
672 | return ret; | 672 | return ret; |
673 | } | 673 | } |
674 | EXPORT_SYMBOL_GPL(add_memory); | ||
674 | 675 | ||
675 | int remove_memory(u64 start, u64 size) | 676 | int remove_memory(u64 start, u64 size) |
676 | { | 677 | { |
677 | return -EINVAL; | 678 | return -EINVAL; |
678 | } | 679 | } |
680 | EXPORT_SYMBOL_GPL(remove_memory); | ||
679 | #endif | 681 | #endif |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 30d148f34042..61dd8608da4f 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -352,7 +352,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) | |||
352 | pxm = acpi_get_pxm(controller->acpi_handle); | 352 | pxm = acpi_get_pxm(controller->acpi_handle); |
353 | #ifdef CONFIG_NUMA | 353 | #ifdef CONFIG_NUMA |
354 | if (pxm >= 0) | 354 | if (pxm >= 0) |
355 | controller->node = pxm_to_nid_map[pxm]; | 355 | controller->node = pxm_to_node(pxm); |
356 | #endif | 356 | #endif |
357 | 357 | ||
358 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, | 358 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 5101ac462643..dc09a6a28a37 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -58,7 +58,7 @@ static int max_pcibus_number = 255; /* Default highest pci bus number */ | |||
58 | */ | 58 | */ |
59 | 59 | ||
60 | static dma_addr_t | 60 | static dma_addr_t |
61 | sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size) | 61 | sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type) |
62 | { | 62 | { |
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
@@ -457,13 +457,6 @@ void sn_pci_fixup_slot(struct pci_dev *dev) | |||
457 | pcidev_info->pdi_sn_irq_info = NULL; | 457 | pcidev_info->pdi_sn_irq_info = NULL; |
458 | kfree(sn_irq_info); | 458 | kfree(sn_irq_info); |
459 | } | 459 | } |
460 | |||
461 | /* | ||
462 | * MSI currently not supported on altix. Remove this when | ||
463 | * the MSI abstraction patches are integrated into the kernel | ||
464 | * (sometime after 2.6.16 releases) | ||
465 | */ | ||
466 | dev->no_msi = 1; | ||
467 | } | 460 | } |
468 | 461 | ||
469 | /* | 462 | /* |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index c265e02f5036..dc8e2b696713 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | |||
26 | 26 | ||
27 | int sn_force_interrupt_flag = 1; | 27 | int sn_force_interrupt_flag = 1; |
28 | extern int sn_ioif_inited; | 28 | extern int sn_ioif_inited; |
29 | static struct list_head **sn_irq_lh; | 29 | struct list_head **sn_irq_lh; |
30 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | 30 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ |
31 | 31 | ||
32 | static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, | 32 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, |
33 | u64 sn_irq_info, | 33 | struct sn_irq_info *sn_irq_info, |
34 | int req_irq, nasid_t req_nasid, | 34 | int req_irq, nasid_t req_nasid, |
35 | int req_slice) | 35 | int req_slice) |
36 | { | 36 | { |
@@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, | |||
40 | 40 | ||
41 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, | 41 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
42 | (u64) SAL_INTR_ALLOC, (u64) local_nasid, | 42 | (u64) SAL_INTR_ALLOC, (u64) local_nasid, |
43 | (u64) local_widget, (u64) sn_irq_info, (u64) req_irq, | 43 | (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, |
44 | (u64) req_nasid, (u64) req_slice); | 44 | (u64) req_nasid, (u64) req_slice); |
45 | |||
45 | return ret_stuff.status; | 46 | return ret_stuff.status; |
46 | } | 47 | } |
47 | 48 | ||
48 | static inline void sn_intr_free(nasid_t local_nasid, int local_widget, | 49 | void sn_intr_free(nasid_t local_nasid, int local_widget, |
49 | struct sn_irq_info *sn_irq_info) | 50 | struct sn_irq_info *sn_irq_info) |
50 | { | 51 | { |
51 | struct ia64_sal_retval ret_stuff; | 52 | struct ia64_sal_retval ret_stuff; |
@@ -112,73 +113,91 @@ static void sn_end_irq(unsigned int irq) | |||
112 | 113 | ||
113 | static void sn_irq_info_free(struct rcu_head *head); | 114 | static void sn_irq_info_free(struct rcu_head *head); |
114 | 115 | ||
115 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 116 | struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, |
117 | nasid_t nasid, int slice) | ||
116 | { | 118 | { |
117 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | 119 | int vector; |
118 | int cpuid, cpuphys; | 120 | int cpuphys; |
121 | int64_t bridge; | ||
122 | int local_widget, status; | ||
123 | nasid_t local_nasid; | ||
124 | struct sn_irq_info *new_irq_info; | ||
125 | struct sn_pcibus_provider *pci_provider; | ||
119 | 126 | ||
120 | cpuid = first_cpu(mask); | 127 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); |
121 | cpuphys = cpu_physical_id(cpuid); | 128 | if (new_irq_info == NULL) |
129 | return NULL; | ||
122 | 130 | ||
123 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | 131 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); |
124 | sn_irq_lh[irq], list) { | 132 | |
125 | u64 bridge; | 133 | bridge = (u64) new_irq_info->irq_bridge; |
126 | int local_widget, status; | 134 | if (!bridge) { |
127 | nasid_t local_nasid; | 135 | kfree(new_irq_info); |
128 | struct sn_irq_info *new_irq_info; | 136 | return NULL; /* irq is not a device interrupt */ |
129 | struct sn_pcibus_provider *pci_provider; | 137 | } |
130 | |||
131 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | ||
132 | if (new_irq_info == NULL) | ||
133 | break; | ||
134 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | ||
135 | |||
136 | bridge = (u64) new_irq_info->irq_bridge; | ||
137 | if (!bridge) { | ||
138 | kfree(new_irq_info); | ||
139 | break; /* irq is not a device interrupt */ | ||
140 | } | ||
141 | 138 | ||
142 | local_nasid = NASID_GET(bridge); | 139 | local_nasid = NASID_GET(bridge); |
143 | 140 | ||
144 | if (local_nasid & 1) | 141 | if (local_nasid & 1) |
145 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | 142 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
146 | else | 143 | else |
147 | local_widget = SWIN_WIDGETNUM(bridge); | 144 | local_widget = SWIN_WIDGETNUM(bridge); |
148 | 145 | ||
149 | /* Free the old PROM new_irq_info structure */ | 146 | vector = sn_irq_info->irq_irq; |
150 | sn_intr_free(local_nasid, local_widget, new_irq_info); | 147 | /* Free the old PROM new_irq_info structure */ |
151 | /* Update kernels new_irq_info with new target info */ | 148 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
152 | unregister_intr_pda(new_irq_info); | 149 | /* Update kernels new_irq_info with new target info */ |
150 | unregister_intr_pda(new_irq_info); | ||
153 | 151 | ||
154 | /* allocate a new PROM new_irq_info struct */ | 152 | /* allocate a new PROM new_irq_info struct */ |
155 | status = sn_intr_alloc(local_nasid, local_widget, | 153 | status = sn_intr_alloc(local_nasid, local_widget, |
156 | __pa(new_irq_info), irq, | 154 | new_irq_info, vector, |
157 | cpuid_to_nasid(cpuid), | 155 | nasid, slice); |
158 | cpuid_to_slice(cpuid)); | ||
159 | 156 | ||
160 | /* SAL call failed */ | 157 | /* SAL call failed */ |
161 | if (status) { | 158 | if (status) { |
162 | kfree(new_irq_info); | 159 | kfree(new_irq_info); |
163 | break; | 160 | return NULL; |
164 | } | 161 | } |
165 | 162 | ||
166 | new_irq_info->irq_cpuid = cpuid; | 163 | cpuphys = nasid_slice_to_cpuid(nasid, slice); |
167 | register_intr_pda(new_irq_info); | 164 | new_irq_info->irq_cpuid = cpuphys; |
165 | register_intr_pda(new_irq_info); | ||
168 | 166 | ||
169 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; | 167 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; |
170 | if (pci_provider && pci_provider->target_interrupt) | ||
171 | (pci_provider->target_interrupt)(new_irq_info); | ||
172 | 168 | ||
173 | spin_lock(&sn_irq_info_lock); | 169 | /* |
174 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | 170 | * If this represents a line interrupt, target it. If it's |
175 | spin_unlock(&sn_irq_info_lock); | 171 | * an msi (irq_int_bit < 0), it's already targeted. |
176 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | 172 | */ |
173 | if (new_irq_info->irq_int_bit >= 0 && | ||
174 | pci_provider && pci_provider->target_interrupt) | ||
175 | (pci_provider->target_interrupt)(new_irq_info); | ||
176 | |||
177 | spin_lock(&sn_irq_info_lock); | ||
178 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
179 | spin_unlock(&sn_irq_info_lock); | ||
180 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
177 | 181 | ||
178 | #ifdef CONFIG_SMP | 182 | #ifdef CONFIG_SMP |
179 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); | 183 | set_irq_affinity_info((vector & 0xff), cpuphys, 0); |
180 | #endif | 184 | #endif |
181 | } | 185 | |
186 | return new_irq_info; | ||
187 | } | ||
188 | |||
189 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | ||
190 | { | ||
191 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | ||
192 | nasid_t nasid; | ||
193 | int slice; | ||
194 | |||
195 | nasid = cpuid_to_nasid(first_cpu(mask)); | ||
196 | slice = cpuid_to_slice(first_cpu(mask)); | ||
197 | |||
198 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | ||
199 | sn_irq_lh[irq], list) | ||
200 | (void)sn_retarget_vector(sn_irq_info, nasid, slice); | ||
182 | } | 201 | } |
183 | 202 | ||
184 | struct hw_interrupt_type irq_type_sn = { | 203 | struct hw_interrupt_type irq_type_sn = { |
@@ -202,6 +221,9 @@ void sn_irq_init(void) | |||
202 | int i; | 221 | int i; |
203 | irq_desc_t *base_desc = irq_desc; | 222 | irq_desc_t *base_desc = irq_desc; |
204 | 223 | ||
224 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; | ||
225 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; | ||
226 | |||
205 | for (i = 0; i < NR_IRQS; i++) { | 227 | for (i = 0; i < NR_IRQS; i++) { |
206 | if (base_desc[i].handler == &no_irq_type) { | 228 | if (base_desc[i].handler == &no_irq_type) { |
207 | base_desc[i].handler = &irq_type_sn; | 229 | base_desc[i].handler = &irq_type_sn; |
@@ -285,6 +307,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
285 | /* link it into the sn_irq[irq] list */ | 307 | /* link it into the sn_irq[irq] list */ |
286 | spin_lock(&sn_irq_info_lock); | 308 | spin_lock(&sn_irq_info_lock); |
287 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); | 309 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
310 | reserve_irq_vector(sn_irq_info->irq_irq); | ||
288 | spin_unlock(&sn_irq_info_lock); | 311 | spin_unlock(&sn_irq_info_lock); |
289 | 312 | ||
290 | register_intr_pda(sn_irq_info); | 313 | register_intr_pda(sn_irq_info); |
@@ -310,8 +333,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev) | |||
310 | spin_lock(&sn_irq_info_lock); | 333 | spin_lock(&sn_irq_info_lock); |
311 | list_del_rcu(&sn_irq_info->list); | 334 | list_del_rcu(&sn_irq_info->list); |
312 | spin_unlock(&sn_irq_info_lock); | 335 | spin_unlock(&sn_irq_info_lock); |
336 | if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) | ||
337 | free_irq_vector(sn_irq_info->irq_irq); | ||
313 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | 338 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); |
314 | pci_dev_put(pci_dev); | 339 | pci_dev_put(pci_dev); |
340 | |||
315 | } | 341 | } |
316 | 342 | ||
317 | static inline void | 343 | static inline void |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 30988dfbddff..93577abae36d 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -139,7 +139,7 @@ static int __init pxm_to_nasid(int pxm) | |||
139 | int i; | 139 | int i; |
140 | int nid; | 140 | int nid; |
141 | 141 | ||
142 | nid = pxm_to_nid_map[pxm]; | 142 | nid = pxm_to_node(pxm); |
143 | for (i = 0; i < num_node_memblks; i++) { | 143 | for (i = 0; i < num_node_memblks; i++) { |
144 | if (node_memblk[i].nid == nid) { | 144 | if (node_memblk[i].nid == nid) { |
145 | return NASID_GET(node_memblk[i].start_paddr); | 145 | return NASID_GET(node_memblk[i].start_paddr); |
@@ -704,7 +704,7 @@ void __init build_cnode_tables(void) | |||
704 | * cnode == node for all C & M bricks. | 704 | * cnode == node for all C & M bricks. |
705 | */ | 705 | */ |
706 | for_each_online_node(node) { | 706 | for_each_online_node(node) { |
707 | nasid = pxm_to_nasid(nid_to_pxm_map[node]); | 707 | nasid = pxm_to_nasid(node_to_pxm(node)); |
708 | sn_cnodeid_to_nasid[node] = nasid; | 708 | sn_cnodeid_to_nasid[node] = nasid; |
709 | physical_node_map[nasid] = node; | 709 | physical_node_map[nasid] = node; |
710 | } | 710 | } |
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c index bc3cfa17cd0f..2862cb33026d 100644 --- a/arch/ia64/sn/kernel/sn2/cache.c +++ b/arch/ia64/sn/kernel/sn2/cache.c | |||
@@ -3,11 +3,12 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved. |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <asm/pgalloc.h> | 10 | #include <asm/pgalloc.h> |
11 | #include <asm/sn/arch.h> | ||
11 | 12 | ||
12 | /** | 13 | /** |
13 | * sn_flush_all_caches - flush a range of address from all caches (incl. L4) | 14 | * sn_flush_all_caches - flush a range of address from all caches (incl. L4) |
@@ -17,18 +18,24 @@ | |||
17 | * Flush a range of addresses from all caches including L4. | 18 | * Flush a range of addresses from all caches including L4. |
18 | * All addresses fully or partially contained within | 19 | * All addresses fully or partially contained within |
19 | * @flush_addr to @flush_addr + @bytes are flushed | 20 | * @flush_addr to @flush_addr + @bytes are flushed |
20 | * from the all caches. | 21 | * from all caches. |
21 | */ | 22 | */ |
22 | void | 23 | void |
23 | sn_flush_all_caches(long flush_addr, long bytes) | 24 | sn_flush_all_caches(long flush_addr, long bytes) |
24 | { | 25 | { |
25 | flush_icache_range(flush_addr, flush_addr+bytes); | 26 | unsigned long addr = flush_addr; |
27 | |||
28 | /* SHub1 requires a cached address */ | ||
29 | if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED)) | ||
30 | addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL); | ||
31 | |||
32 | flush_icache_range(addr, addr + bytes); | ||
26 | /* | 33 | /* |
27 | * The last call may have returned before the caches | 34 | * The last call may have returned before the caches |
28 | * were actually flushed, so we call it again to make | 35 | * were actually flushed, so we call it again to make |
29 | * sure. | 36 | * sure. |
30 | */ | 37 | */ |
31 | flush_icache_range(flush_addr, flush_addr+bytes); | 38 | flush_icache_range(addr, addr + bytes); |
32 | mb(); | 39 | mb(); |
33 | } | 40 | } |
34 | EXPORT_SYMBOL(sn_flush_all_caches); | 41 | EXPORT_SYMBOL(sn_flush_all_caches); |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index b4b84c269210..7a291a271511 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <asm/dma.h> | 13 | #include <asm/dma.h> |
14 | #include <asm/sn/pcibr_provider.h> | 14 | #include <asm/sn/intr.h> |
15 | #include <asm/sn/pcibus_provider_defs.h> | 15 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include <asm/sn/pcidev.h> | 16 | #include <asm/sn/pcidev.h> |
17 | #include <asm/sn/sn_sal.h> | 17 | #include <asm/sn/sn_sal.h> |
@@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
113 | * resources. | 113 | * resources. |
114 | */ | 114 | */ |
115 | 115 | ||
116 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size); | 116 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, |
117 | SN_DMA_ADDR_PHYS); | ||
117 | if (!*dma_handle) { | 118 | if (!*dma_handle) { |
118 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 119 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
119 | free_pages((unsigned long)cpuaddr, get_order(size)); | 120 | free_pages((unsigned long)cpuaddr, get_order(size)); |
@@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
176 | BUG_ON(dev->bus != &pci_bus_type); | 177 | BUG_ON(dev->bus != &pci_bus_type); |
177 | 178 | ||
178 | phys_addr = __pa(cpu_addr); | 179 | phys_addr = __pa(cpu_addr); |
179 | dma_addr = provider->dma_map(pdev, phys_addr, size); | 180 | dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); |
180 | if (!dma_addr) { | 181 | if (!dma_addr) { |
181 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 182 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
182 | return 0; | 183 | return 0; |
@@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
260 | for (i = 0; i < nhwentries; i++, sg++) { | 261 | for (i = 0; i < nhwentries; i++, sg++) { |
261 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | 262 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
262 | sg->dma_address = provider->dma_map(pdev, | 263 | sg->dma_address = provider->dma_map(pdev, |
263 | phys_addr, sg->length); | 264 | phys_addr, sg->length, |
265 | SN_DMA_ADDR_PHYS); | ||
264 | 266 | ||
265 | if (!sg->dma_address) { | 267 | if (!sg->dma_address) { |
266 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 268 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 9f86bb6519aa..a86c7b945962 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -41,7 +41,7 @@ extern int sn_ioif_inited; | |||
41 | 41 | ||
42 | static dma_addr_t | 42 | static dma_addr_t |
43 | pcibr_dmamap_ate32(struct pcidev_info *info, | 43 | pcibr_dmamap_ate32(struct pcidev_info *info, |
44 | u64 paddr, size_t req_size, u64 flags) | 44 | u64 paddr, size_t req_size, u64 flags, int dma_flags) |
45 | { | 45 | { |
46 | 46 | ||
47 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | 47 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; |
@@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
81 | if (IS_PCIX(pcibus_info)) | 81 | if (IS_PCIX(pcibus_info)) |
82 | ate_flags &= ~(PCI32_ATE_PREF); | 82 | ate_flags &= ~(PCI32_ATE_PREF); |
83 | 83 | ||
84 | xio_addr = | 84 | if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS)) |
85 | IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | 85 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : |
86 | PHYS_TO_TIODMA(paddr); | 86 | PHYS_TO_TIODMA(paddr); |
87 | else | ||
88 | xio_addr = paddr; | ||
89 | |||
87 | offset = IOPGOFF(xio_addr); | 90 | offset = IOPGOFF(xio_addr); |
88 | ate = ate_flags | (xio_addr - offset); | 91 | ate = ate_flags | (xio_addr - offset); |
89 | 92 | ||
@@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
91 | if (IS_PIC_SOFT(pcibus_info)) { | 94 | if (IS_PIC_SOFT(pcibus_info)) { |
92 | ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); | 95 | ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); |
93 | } | 96 | } |
97 | |||
98 | /* | ||
99 | * If we're mapping for MSI, set the MSI bit in the ATE | ||
100 | */ | ||
101 | if (dma_flags & SN_DMA_MSI) | ||
102 | ate |= PCI32_ATE_MSI; | ||
103 | |||
94 | ate_write(pcibus_info, ate_index, ate_count, ate); | 104 | ate_write(pcibus_info, ate_index, ate_count, ate); |
95 | 105 | ||
96 | /* | 106 | /* |
@@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
105 | if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) | 115 | if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) |
106 | ATE_SWAP_ON(pci_addr); | 116 | ATE_SWAP_ON(pci_addr); |
107 | 117 | ||
118 | |||
108 | return pci_addr; | 119 | return pci_addr; |
109 | } | 120 | } |
110 | 121 | ||
111 | static dma_addr_t | 122 | static dma_addr_t |
112 | pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | 123 | pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, |
113 | u64 dma_attributes) | 124 | u64 dma_attributes, int dma_flags) |
114 | { | 125 | { |
115 | struct pcibus_info *pcibus_info = (struct pcibus_info *) | 126 | struct pcibus_info *pcibus_info = (struct pcibus_info *) |
116 | ((info->pdi_host_pcidev_info)->pdi_pcibus_info); | 127 | ((info->pdi_host_pcidev_info)->pdi_pcibus_info); |
117 | u64 pci_addr; | 128 | u64 pci_addr; |
118 | 129 | ||
119 | /* Translate to Crosstalk View of Physical Address */ | 130 | /* Translate to Crosstalk View of Physical Address */ |
120 | pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | 131 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) |
121 | PHYS_TO_TIODMA(paddr)) | dma_attributes; | 132 | pci_addr = IS_PIC_SOFT(pcibus_info) ? |
133 | PHYS_TO_DMA(paddr) : | ||
134 | PHYS_TO_TIODMA(paddr) | dma_attributes; | ||
135 | else | ||
136 | pci_addr = IS_PIC_SOFT(pcibus_info) ? | ||
137 | paddr : | ||
138 | paddr | dma_attributes; | ||
122 | 139 | ||
123 | /* Handle Bus mode */ | 140 | /* Handle Bus mode */ |
124 | if (IS_PCIX(pcibus_info)) | 141 | if (IS_PCIX(pcibus_info)) |
@@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | |||
130 | ((u64) pcibus_info-> | 147 | ((u64) pcibus_info-> |
131 | pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); | 148 | pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); |
132 | } else | 149 | } else |
133 | pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; | 150 | pci_addr |= (dma_flags & SN_DMA_MSI) ? |
151 | TIOCP_PCI64_CMDTYPE_MSI : | ||
152 | TIOCP_PCI64_CMDTYPE_MEM; | ||
134 | 153 | ||
135 | /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ | 154 | /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ |
136 | if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) | 155 | if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) |
@@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | |||
141 | 160 | ||
142 | static dma_addr_t | 161 | static dma_addr_t |
143 | pcibr_dmatrans_direct32(struct pcidev_info * info, | 162 | pcibr_dmatrans_direct32(struct pcidev_info * info, |
144 | u64 paddr, size_t req_size, u64 flags) | 163 | u64 paddr, size_t req_size, u64 flags, int dma_flags) |
145 | { | 164 | { |
146 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | 165 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; |
147 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | 166 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> |
@@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, | |||
156 | return 0; | 175 | return 0; |
157 | } | 176 | } |
158 | 177 | ||
159 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | 178 | if (dma_flags & SN_DMA_MSI) |
160 | PHYS_TO_TIODMA(paddr); | 179 | return 0; |
180 | |||
181 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) | ||
182 | xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | ||
183 | PHYS_TO_TIODMA(paddr); | ||
184 | else | ||
185 | xio_addr = paddr; | ||
161 | 186 | ||
162 | xio_base = pcibus_info->pbi_dir_xbase; | 187 | xio_base = pcibus_info->pbi_dir_xbase; |
163 | offset = xio_addr - xio_base; | 188 | offset = xio_addr - xio_base; |
@@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr) | |||
327 | */ | 352 | */ |
328 | 353 | ||
329 | dma_addr_t | 354 | dma_addr_t |
330 | pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | 355 | pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) |
331 | { | 356 | { |
332 | dma_addr_t dma_handle; | 357 | dma_addr_t dma_handle; |
333 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | 358 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); |
@@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | |||
344 | */ | 369 | */ |
345 | 370 | ||
346 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | 371 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, |
347 | PCI64_ATTR_PREF); | 372 | PCI64_ATTR_PREF, dma_flags); |
348 | } else { | 373 | } else { |
349 | /* Handle 32-63 bit cards via direct mapping */ | 374 | /* Handle 32-63 bit cards via direct mapping */ |
350 | dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, | 375 | dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, |
351 | size, 0); | 376 | size, 0, dma_flags); |
352 | if (!dma_handle) { | 377 | if (!dma_handle) { |
353 | /* | 378 | /* |
354 | * It is a 32 bit card and we cannot do direct mapping, | 379 | * It is a 32 bit card and we cannot do direct mapping, |
@@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | |||
356 | */ | 381 | */ |
357 | 382 | ||
358 | dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, | 383 | dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, |
359 | size, PCI32_ATE_PREF); | 384 | size, PCI32_ATE_PREF, |
385 | dma_flags); | ||
360 | } | 386 | } |
361 | } | 387 | } |
362 | 388 | ||
@@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) | |||
365 | 391 | ||
366 | dma_addr_t | 392 | dma_addr_t |
367 | pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, | 393 | pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, |
368 | size_t size) | 394 | size_t size, int dma_flags) |
369 | { | 395 | { |
370 | dma_addr_t dma_handle; | 396 | dma_addr_t dma_handle; |
371 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | 397 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); |
372 | 398 | ||
373 | if (hwdev->dev.coherent_dma_mask == ~0UL) { | 399 | if (hwdev->dev.coherent_dma_mask == ~0UL) { |
374 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | 400 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, |
375 | PCI64_ATTR_BAR); | 401 | PCI64_ATTR_BAR, dma_flags); |
376 | } else { | 402 | } else { |
377 | dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, | 403 | dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, |
378 | phys_addr, size, | 404 | phys_addr, size, |
379 | PCI32_ATE_BAR); | 405 | PCI32_ATE_BAR, dma_flags); |
380 | } | 406 | } |
381 | 407 | ||
382 | return dma_handle; | 408 | return dma_handle; |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index be0176912968..20de72791b97 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -515,11 +515,17 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
515 | * use the GART mapped mode. | 515 | * use the GART mapped mode. |
516 | */ | 516 | */ |
517 | static u64 | 517 | static u64 |
518 | tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count) | 518 | tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
519 | { | 519 | { |
520 | u64 mapaddr; | 520 | u64 mapaddr; |
521 | 521 | ||
522 | /* | 522 | /* |
523 | * Not supported for now ... | ||
524 | */ | ||
525 | if (dma_flags & SN_DMA_MSI) | ||
526 | return 0; | ||
527 | |||
528 | /* | ||
523 | * If card is 64 or 48 bit addresable, use a direct mapping. 32 | 529 | * If card is 64 or 48 bit addresable, use a direct mapping. 32 |
524 | * bit direct is so restrictive w.r.t. where the memory resides that | 530 | * bit direct is so restrictive w.r.t. where the memory resides that |
525 | * we don't use it even though CA has some support. | 531 | * we don't use it even though CA has some support. |
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 85f3b3d4c606..2d7948567ebc 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |||
170 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) | 170 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) |
171 | 171 | ||
172 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) | 172 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) |
173 | #define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) | 173 | #define ATE_MAKE(addr, ps, msi) \ |
174 | (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0)) | ||
174 | 175 | ||
175 | /* | 176 | /* |
176 | * Flavors of ate-based mapping supported by tioce_alloc_map() | 177 | * Flavors of ate-based mapping supported by tioce_alloc_map() |
@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |||
196 | * | 197 | * |
197 | * 63 - must be 1 to indicate d64 mode to CE hardware | 198 | * 63 - must be 1 to indicate d64 mode to CE hardware |
198 | * 62 - barrier bit ... controlled with tioce_dma_barrier() | 199 | * 62 - barrier bit ... controlled with tioce_dma_barrier() |
199 | * 61 - 0 since this is not an MSI transaction | 200 | * 61 - msi bit ... specified through dma_flags |
200 | * 60:54 - reserved, MBZ | 201 | * 60:54 - reserved, MBZ |
201 | */ | 202 | */ |
202 | static u64 | 203 | static u64 |
203 | tioce_dma_d64(unsigned long ct_addr) | 204 | tioce_dma_d64(unsigned long ct_addr, int dma_flags) |
204 | { | 205 | { |
205 | u64 bus_addr; | 206 | u64 bus_addr; |
206 | 207 | ||
207 | bus_addr = ct_addr | (1UL << 63); | 208 | bus_addr = ct_addr | (1UL << 63); |
209 | if (dma_flags & SN_DMA_MSI) | ||
210 | bus_addr |= (1UL << 61); | ||
208 | 211 | ||
209 | return bus_addr; | 212 | return bus_addr; |
210 | } | 213 | } |
@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, | |||
261 | */ | 264 | */ |
262 | static u64 | 265 | static u64 |
263 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | 266 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, |
264 | u64 ct_addr, int len) | 267 | u64 ct_addr, int len, int dma_flags) |
265 | { | 268 | { |
266 | int i; | 269 | int i; |
267 | int j; | 270 | int j; |
@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
270 | int entries; | 273 | int entries; |
271 | int nates; | 274 | int nates; |
272 | u64 pagesize; | 275 | u64 pagesize; |
276 | int msi_capable, msi_wanted; | ||
273 | u64 *ate_shadow; | 277 | u64 *ate_shadow; |
274 | u64 *ate_reg; | 278 | u64 *ate_reg; |
275 | u64 addr; | 279 | u64 addr; |
@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
291 | ate_reg = ce_mmr->ce_ure_ate3240; | 295 | ate_reg = ce_mmr->ce_ure_ate3240; |
292 | pagesize = ce_kern->ce_ate3240_pagesize; | 296 | pagesize = ce_kern->ce_ate3240_pagesize; |
293 | bus_base = TIOCE_M32_MIN; | 297 | bus_base = TIOCE_M32_MIN; |
298 | msi_capable = 1; | ||
294 | break; | 299 | break; |
295 | case TIOCE_ATE_M40: | 300 | case TIOCE_ATE_M40: |
296 | first = 0; | 301 | first = 0; |
@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
299 | ate_reg = ce_mmr->ce_ure_ate40; | 304 | ate_reg = ce_mmr->ce_ure_ate40; |
300 | pagesize = MB(64); | 305 | pagesize = MB(64); |
301 | bus_base = TIOCE_M40_MIN; | 306 | bus_base = TIOCE_M40_MIN; |
307 | msi_capable = 0; | ||
302 | break; | 308 | break; |
303 | case TIOCE_ATE_M40S: | 309 | case TIOCE_ATE_M40S: |
304 | /* | 310 | /* |
@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
311 | ate_reg = ce_mmr->ce_ure_ate3240; | 317 | ate_reg = ce_mmr->ce_ure_ate3240; |
312 | pagesize = GB(16); | 318 | pagesize = GB(16); |
313 | bus_base = TIOCE_M40S_MIN; | 319 | bus_base = TIOCE_M40S_MIN; |
320 | msi_capable = 0; | ||
314 | break; | 321 | break; |
315 | default: | 322 | default: |
316 | return 0; | 323 | return 0; |
317 | } | 324 | } |
318 | 325 | ||
326 | msi_wanted = dma_flags & SN_DMA_MSI; | ||
327 | if (msi_wanted && !msi_capable) | ||
328 | return 0; | ||
329 | |||
319 | nates = ATE_NPAGES(ct_addr, len, pagesize); | 330 | nates = ATE_NPAGES(ct_addr, len, pagesize); |
320 | if (nates > entries) | 331 | if (nates > entries) |
321 | return 0; | 332 | return 0; |
@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
344 | for (j = 0; j < nates; j++) { | 355 | for (j = 0; j < nates; j++) { |
345 | u64 ate; | 356 | u64 ate; |
346 | 357 | ||
347 | ate = ATE_MAKE(addr, pagesize); | 358 | ate = ATE_MAKE(addr, pagesize, msi_wanted); |
348 | ate_shadow[i + j] = ate; | 359 | ate_shadow[i + j] = ate; |
349 | tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); | 360 | tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); |
350 | addr += pagesize; | 361 | addr += pagesize; |
@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
371 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. | 382 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. |
372 | */ | 383 | */ |
373 | static u64 | 384 | static u64 |
374 | tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) | 385 | tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags) |
375 | { | 386 | { |
376 | int dma_ok; | 387 | int dma_ok; |
377 | int port; | 388 | int port; |
@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) | |||
381 | u64 ct_lower; | 392 | u64 ct_lower; |
382 | dma_addr_t bus_addr; | 393 | dma_addr_t bus_addr; |
383 | 394 | ||
395 | if (dma_flags & SN_DMA_MSI) | ||
396 | return 0; | ||
397 | |||
384 | ct_upper = ct_addr & ~0x3fffffffUL; | 398 | ct_upper = ct_addr & ~0x3fffffffUL; |
385 | ct_lower = ct_addr & 0x3fffffffUL; | 399 | ct_lower = ct_addr & 0x3fffffffUL; |
386 | 400 | ||
@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
507 | */ | 521 | */ |
508 | static u64 | 522 | static u64 |
509 | tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | 523 | tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, |
510 | int barrier) | 524 | int barrier, int dma_flags) |
511 | { | 525 | { |
512 | unsigned long flags; | 526 | unsigned long flags; |
513 | u64 ct_addr; | 527 | u64 ct_addr; |
@@ -523,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
523 | if (dma_mask < 0x7fffffffUL) | 537 | if (dma_mask < 0x7fffffffUL) |
524 | return 0; | 538 | return 0; |
525 | 539 | ||
526 | ct_addr = PHYS_TO_TIODMA(paddr); | 540 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) |
541 | ct_addr = PHYS_TO_TIODMA(paddr); | ||
542 | else | ||
543 | ct_addr = paddr; | ||
527 | 544 | ||
528 | /* | 545 | /* |
529 | * If the device can generate 64 bit addresses, create a D64 map. | 546 | * If the device can generate 64 bit addresses, create a D64 map. |
530 | * Since this should never fail, bypass the rest of the checks. | ||
531 | */ | 547 | */ |
532 | if (dma_mask == ~0UL) { | 548 | if (dma_mask == ~0UL) { |
533 | mapaddr = tioce_dma_d64(ct_addr); | 549 | mapaddr = tioce_dma_d64(ct_addr, dma_flags); |
534 | goto dma_map_done; | 550 | if (mapaddr) |
551 | goto dma_map_done; | ||
535 | } | 552 | } |
536 | 553 | ||
537 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); | 554 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); |
@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
574 | 591 | ||
575 | if (byte_count > MB(64)) { | 592 | if (byte_count > MB(64)) { |
576 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | 593 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, |
577 | port, ct_addr, byte_count); | 594 | port, ct_addr, byte_count, |
595 | dma_flags); | ||
578 | if (!mapaddr) | 596 | if (!mapaddr) |
579 | mapaddr = | 597 | mapaddr = |
580 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | 598 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, |
581 | ct_addr, byte_count); | 599 | ct_addr, byte_count, |
600 | dma_flags); | ||
582 | } else { | 601 | } else { |
583 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | 602 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, |
584 | ct_addr, byte_count); | 603 | ct_addr, byte_count, |
604 | dma_flags); | ||
585 | if (!mapaddr) | 605 | if (!mapaddr) |
586 | mapaddr = | 606 | mapaddr = |
587 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | 607 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, |
588 | port, ct_addr, byte_count); | 608 | port, ct_addr, byte_count, |
609 | dma_flags); | ||
589 | } | 610 | } |
590 | } | 611 | } |
591 | 612 | ||
@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
593 | * 32-bit direct is the next mode to try | 614 | * 32-bit direct is the next mode to try |
594 | */ | 615 | */ |
595 | if (!mapaddr && dma_mask >= 0xffffffffUL) | 616 | if (!mapaddr && dma_mask >= 0xffffffffUL) |
596 | mapaddr = tioce_dma_d32(pdev, ct_addr); | 617 | mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags); |
597 | 618 | ||
598 | /* | 619 | /* |
599 | * Last resort, try 32-bit ATE-based map. | 620 | * Last resort, try 32-bit ATE-based map. |
@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |||
601 | if (!mapaddr) | 622 | if (!mapaddr) |
602 | mapaddr = | 623 | mapaddr = |
603 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, | 624 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, |
604 | byte_count); | 625 | byte_count, dma_flags); |
605 | 626 | ||
606 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | 627 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); |
607 | 628 | ||
@@ -622,9 +643,9 @@ dma_map_done: | |||
622 | * in the address. | 643 | * in the address. |
623 | */ | 644 | */ |
624 | static u64 | 645 | static u64 |
625 | tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) | 646 | tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
626 | { | 647 | { |
627 | return tioce_do_dma_map(pdev, paddr, byte_count, 0); | 648 | return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags); |
628 | } | 649 | } |
629 | 650 | ||
630 | /** | 651 | /** |
@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) | |||
636 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set | 657 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set |
637 | * in the address. | 658 | * in the address. |
638 | */ static u64 | 659 | */ static u64 |
639 | tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count) | 660 | tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
640 | { | 661 | { |
641 | return tioce_do_dma_map(pdev, paddr, byte_count, 1); | 662 | return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); |
642 | } | 663 | } |
643 | 664 | ||
644 | /** | 665 | /** |
@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit) | |||
696 | while (ate_index <= last_ate) { | 717 | while (ate_index <= last_ate) { |
697 | u64 ate; | 718 | u64 ate; |
698 | 719 | ||
699 | ate = ATE_MAKE(0xdeadbeef, ps); | 720 | ate = ATE_MAKE(0xdeadbeef, ps, 0); |
700 | ce_kern->ce_ate3240_shadow[ate_index] = ate; | 721 | ce_kern->ce_ate3240_shadow[ate_index] = ate; |
701 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], | 722 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], |
702 | ate); | 723 | ate); |