diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 13 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 45 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simscsi.c | 29 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 96 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.c | 135 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv_asm.S | 48 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
-rw-r--r-- | arch/ia64/lib/swiotlb.c | 106 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/tiocx.c | 62 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_reg.c | 59 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 32 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 30 |
16 files changed, 470 insertions, 201 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 80f8ef013939..317c334c5a18 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/machvec.h> | 17 | #include <asm/machvec.h> |
18 | 18 | ||
19 | /* swiotlb declarations & definitions: */ | 19 | /* swiotlb declarations & definitions: */ |
20 | extern void swiotlb_init_with_default_size (size_t size); | 20 | extern int swiotlb_late_init_with_default_size (size_t size); |
21 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; | 21 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; |
22 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; | 22 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; |
23 | extern ia64_mv_dma_map_single swiotlb_map_single; | 23 | extern ia64_mv_dma_map_single swiotlb_map_single; |
@@ -67,7 +67,16 @@ void | |||
67 | hwsw_init (void) | 67 | hwsw_init (void) |
68 | { | 68 | { |
69 | /* default to a smallish 2MB sw I/O TLB */ | 69 | /* default to a smallish 2MB sw I/O TLB */ |
70 | swiotlb_init_with_default_size (2 * (1<<20)); | 70 | if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { |
71 | #ifdef CONFIG_IA64_GENERIC | ||
72 | /* Better to have normal DMA than panic */ | ||
73 | printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," | ||
74 | " reverting to hpzx1 platform vector\n", __FUNCTION__); | ||
75 | machvec_init("hpzx1"); | ||
76 | #else | ||
77 | panic("Unable to initialize software I/O TLB services"); | ||
78 | #endif | ||
79 | } | ||
71 | } | 80 | } |
72 | 81 | ||
73 | void * | 82 | void * |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 11957598a8b9..e64ca04ace89 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -2028,9 +2028,40 @@ static struct acpi_driver acpi_sba_ioc_driver = { | |||
2028 | static int __init | 2028 | static int __init |
2029 | sba_init(void) | 2029 | sba_init(void) |
2030 | { | 2030 | { |
2031 | if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) | ||
2032 | return 0; | ||
2033 | |||
2031 | acpi_bus_register_driver(&acpi_sba_ioc_driver); | 2034 | acpi_bus_register_driver(&acpi_sba_ioc_driver); |
2032 | if (!ioc_list) | 2035 | if (!ioc_list) { |
2036 | #ifdef CONFIG_IA64_GENERIC | ||
2037 | extern int swiotlb_late_init_with_default_size (size_t size); | ||
2038 | |||
2039 | /* | ||
2040 | * If we didn't find something sba_iommu can claim, we | ||
2041 | * need to setup the swiotlb and switch to the dig machvec. | ||
2042 | */ | ||
2043 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | ||
2044 | panic("Unable to find SBA IOMMU or initialize " | ||
2045 | "software I/O TLB: Try machvec=dig boot option"); | ||
2046 | machvec_init("dig"); | ||
2047 | #else | ||
2048 | panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); | ||
2049 | #endif | ||
2033 | return 0; | 2050 | return 0; |
2051 | } | ||
2052 | |||
2053 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) | ||
2054 | /* | ||
2055 | * hpzx1_swiotlb needs to have a fairly small swiotlb bounce | ||
2056 | * buffer setup to support devices with smaller DMA masks than | ||
2057 | * sba_iommu can handle. | ||
2058 | */ | ||
2059 | if (ia64_platform_is("hpzx1_swiotlb")) { | ||
2060 | extern void hwsw_init(void); | ||
2061 | |||
2062 | hwsw_init(); | ||
2063 | } | ||
2064 | #endif | ||
2034 | 2065 | ||
2035 | #ifdef CONFIG_PCI | 2066 | #ifdef CONFIG_PCI |
2036 | { | 2067 | { |
@@ -2048,18 +2079,6 @@ sba_init(void) | |||
2048 | 2079 | ||
2049 | subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ | 2080 | subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ |
2050 | 2081 | ||
2051 | extern void dig_setup(char**); | ||
2052 | /* | ||
2053 | * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good, | ||
2054 | * so we use the platform_setup hook to fix it up. | ||
2055 | */ | ||
2056 | void __init | ||
2057 | sba_setup(char **cmdline_p) | ||
2058 | { | ||
2059 | MAX_DMA_ADDRESS = ~0UL; | ||
2060 | dig_setup(cmdline_p); | ||
2061 | } | ||
2062 | |||
2063 | static int __init | 2082 | static int __init |
2064 | nosbagart(char *str) | 2083 | nosbagart(char *str) |
2065 | { | 2084 | { |
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 56405dbfd739..a18983a3c934 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c | |||
@@ -233,6 +233,23 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) | |||
233 | simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); | 233 | simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); |
234 | } | 234 | } |
235 | 235 | ||
236 | static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) | ||
237 | { | ||
238 | |||
239 | int scatterlen = sc->use_sg; | ||
240 | struct scatterlist *slp; | ||
241 | |||
242 | if (scatterlen == 0) | ||
243 | memcpy(sc->request_buffer, buf, len); | ||
244 | else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { | ||
245 | unsigned thislen = min(len, slp->length); | ||
246 | |||
247 | memcpy(page_address(slp->page) + slp->offset, buf, thislen); | ||
248 | slp++; | ||
249 | len -= thislen; | ||
250 | } | ||
251 | } | ||
252 | |||
236 | static int | 253 | static int |
237 | simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 254 | simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) |
238 | { | 255 | { |
@@ -240,6 +257,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
240 | char fname[MAX_ROOT_LEN+16]; | 257 | char fname[MAX_ROOT_LEN+16]; |
241 | size_t disk_size; | 258 | size_t disk_size; |
242 | char *buf; | 259 | char *buf; |
260 | char localbuf[36]; | ||
243 | #if DEBUG_SIMSCSI | 261 | #if DEBUG_SIMSCSI |
244 | register long sp asm ("sp"); | 262 | register long sp asm ("sp"); |
245 | 263 | ||
@@ -263,7 +281,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
263 | /* disk doesn't exist... */ | 281 | /* disk doesn't exist... */ |
264 | break; | 282 | break; |
265 | } | 283 | } |
266 | buf = sc->request_buffer; | 284 | buf = localbuf; |
267 | buf[0] = 0; /* magnetic disk */ | 285 | buf[0] = 0; /* magnetic disk */ |
268 | buf[1] = 0; /* not a removable medium */ | 286 | buf[1] = 0; /* not a removable medium */ |
269 | buf[2] = 2; /* SCSI-2 compliant device */ | 287 | buf[2] = 2; /* SCSI-2 compliant device */ |
@@ -273,6 +291,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
273 | buf[6] = 0; /* reserved */ | 291 | buf[6] = 0; /* reserved */ |
274 | buf[7] = 0; /* various flags */ | 292 | buf[7] = 0; /* various flags */ |
275 | memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); | 293 | memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); |
294 | simscsi_fillresult(sc, buf, 36); | ||
276 | sc->result = GOOD; | 295 | sc->result = GOOD; |
277 | break; | 296 | break; |
278 | 297 | ||
@@ -304,16 +323,13 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
304 | simscsi_readwrite10(sc, SSC_WRITE); | 323 | simscsi_readwrite10(sc, SSC_WRITE); |
305 | break; | 324 | break; |
306 | 325 | ||
307 | |||
308 | case READ_CAPACITY: | 326 | case READ_CAPACITY: |
309 | if (desc[target_id] < 0 || sc->request_bufflen < 8) { | 327 | if (desc[target_id] < 0 || sc->request_bufflen < 8) { |
310 | break; | 328 | break; |
311 | } | 329 | } |
312 | buf = sc->request_buffer; | 330 | buf = localbuf; |
313 | |||
314 | disk_size = simscsi_get_disk_size(desc[target_id]); | 331 | disk_size = simscsi_get_disk_size(desc[target_id]); |
315 | 332 | ||
316 | /* pretend to be a 1GB disk (partition table contains real stuff): */ | ||
317 | buf[0] = (disk_size >> 24) & 0xff; | 333 | buf[0] = (disk_size >> 24) & 0xff; |
318 | buf[1] = (disk_size >> 16) & 0xff; | 334 | buf[1] = (disk_size >> 16) & 0xff; |
319 | buf[2] = (disk_size >> 8) & 0xff; | 335 | buf[2] = (disk_size >> 8) & 0xff; |
@@ -323,13 +339,14 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | |||
323 | buf[5] = 0; | 339 | buf[5] = 0; |
324 | buf[6] = 2; | 340 | buf[6] = 2; |
325 | buf[7] = 0; | 341 | buf[7] = 0; |
342 | simscsi_fillresult(sc, buf, 8); | ||
326 | sc->result = GOOD; | 343 | sc->result = GOOD; |
327 | break; | 344 | break; |
328 | 345 | ||
329 | case MODE_SENSE: | 346 | case MODE_SENSE: |
330 | case MODE_SENSE_10: | 347 | case MODE_SENSE_10: |
331 | /* sd.c uses this to determine whether disk does write-caching. */ | 348 | /* sd.c uses this to determine whether disk does write-caching. */ |
332 | memset(sc->request_buffer, 0, 128); | 349 | simscsi_fillresult(sc, (char *)empty_zero_page, sc->request_bufflen); |
333 | sc->result = GOOD; | 350 | sc->result = GOOD; |
334 | break; | 351 | break; |
335 | 352 | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 28a4529fdd60..7e926471e4ec 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -899,7 +899,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) | |||
899 | if ((err = iosapic_init(phys_addr, gsi_base))) | 899 | if ((err = iosapic_init(phys_addr, gsi_base))) |
900 | return err; | 900 | return err; |
901 | 901 | ||
902 | #if CONFIG_ACPI_NUMA | 902 | #ifdef CONFIG_ACPI_NUMA |
903 | acpi_map_iosapic(handle, 0, NULL, NULL); | 903 | acpi_map_iosapic(handle, 0, NULL, NULL); |
904 | #endif /* CONFIG_ACPI_NUMA */ | 904 | #endif /* CONFIG_ACPI_NUMA */ |
905 | 905 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ba0b6a1f429f..0741b066b98f 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -491,7 +491,7 @@ GLOBAL_ENTRY(prefetch_stack) | |||
491 | ;; | 491 | ;; |
492 | lfetch.fault [r16], 128 | 492 | lfetch.fault [r16], 128 |
493 | br.ret.sptk.many rp | 493 | br.ret.sptk.many rp |
494 | END(prefetch_switch_stack) | 494 | END(prefetch_stack) |
495 | 495 | ||
496 | GLOBAL_ENTRY(execve) | 496 | GLOBAL_ENTRY(execve) |
497 | mov r15=__NR_execve // put syscall number in place | 497 | mov r15=__NR_execve // put syscall number in place |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6dc726ad7137..d0a5106fba24 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) | |||
1016 | 1016 | ||
1017 | cmc_polling_enabled = 1; | 1017 | cmc_polling_enabled = 1; |
1018 | spin_unlock(&cmc_history_lock); | 1018 | spin_unlock(&cmc_history_lock); |
1019 | /* If we're being hit with CMC interrupts, we won't | ||
1020 | * ever execute the schedule_work() below. Need to | ||
1021 | * disable CMC interrupts on this processor now. | ||
1022 | */ | ||
1023 | ia64_mca_cmc_vector_disable(NULL); | ||
1019 | schedule_work(&cmc_disable_work); | 1024 | schedule_work(&cmc_disable_work); |
1020 | 1025 | ||
1021 | /* | 1026 | /* |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 499a065f4e60..db32fc1d3935 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -489,24 +489,27 @@ ia64_state_save: | |||
489 | ;; | 489 | ;; |
490 | st8 [temp1]=r17,16 // pal_min_state | 490 | st8 [temp1]=r17,16 // pal_min_state |
491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT | 491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT |
492 | mov r6=IA64_KR(CURRENT_STACK) | ||
493 | ;; | ||
494 | st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK | ||
495 | st8 [temp2]=r0,16 // prev_task, starts off as NULL | ||
492 | mov r6=cr.ifa | 496 | mov r6=cr.ifa |
493 | ;; | 497 | ;; |
494 | st8 [temp1]=r0,16 // prev_task, starts off as NULL | 498 | st8 [temp1]=r12,16 // cr.isr |
495 | st8 [temp2]=r12,16 // cr.isr | 499 | st8 [temp2]=r6,16 // cr.ifa |
496 | mov r12=cr.itir | 500 | mov r12=cr.itir |
497 | ;; | 501 | ;; |
498 | st8 [temp1]=r6,16 // cr.ifa | 502 | st8 [temp1]=r12,16 // cr.itir |
499 | st8 [temp2]=r12,16 // cr.itir | 503 | st8 [temp2]=r11,16 // cr.iipa |
500 | mov r12=cr.iim | 504 | mov r12=cr.iim |
501 | ;; | 505 | ;; |
502 | st8 [temp1]=r11,16 // cr.iipa | 506 | st8 [temp1]=r12,16 // cr.iim |
503 | st8 [temp2]=r12,16 // cr.iim | ||
504 | mov r6=cr.iha | ||
505 | (p1) mov r12=IA64_MCA_COLD_BOOT | 507 | (p1) mov r12=IA64_MCA_COLD_BOOT |
506 | (p2) mov r12=IA64_INIT_WARM_BOOT | 508 | (p2) mov r12=IA64_INIT_WARM_BOOT |
509 | mov r6=cr.iha | ||
507 | ;; | 510 | ;; |
508 | st8 [temp1]=r6,16 // cr.iha | 511 | st8 [temp2]=r6,16 // cr.iha |
509 | st8 [temp2]=r12 // os_status, default is cold boot | 512 | st8 [temp1]=r12 // os_status, default is cold boot |
510 | mov r6=IA64_MCA_SAME_CONTEXT | 513 | mov r6=IA64_MCA_SAME_CONTEXT |
511 | ;; | 514 | ;; |
512 | st8 [temp1]=r6 // context, default is same context | 515 | st8 [temp1]=r6 // context, default is same context |
@@ -823,9 +826,12 @@ ia64_state_restore: | |||
823 | ld8 r12=[temp1],16 // sal_ra | 826 | ld8 r12=[temp1],16 // sal_ra |
824 | ld8 r9=[temp2],16 // sal_gp | 827 | ld8 r9=[temp2],16 // sal_gp |
825 | ;; | 828 | ;; |
826 | ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task | 829 | ld8 r22=[temp1],16 // pal_min_state, virtual |
827 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT | 830 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT |
828 | ;; | 831 | ;; |
832 | ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK | ||
833 | ld8 r20=[temp2],16 // prev_task | ||
834 | ;; | ||
829 | ld8 temp3=[temp1],16 // cr.isr | 835 | ld8 temp3=[temp1],16 // cr.isr |
830 | ld8 temp4=[temp2],16 // cr.ifa | 836 | ld8 temp4=[temp2],16 // cr.ifa |
831 | ;; | 837 | ;; |
@@ -846,6 +852,45 @@ ia64_state_restore: | |||
846 | ld8 r8=[temp1] // os_status | 852 | ld8 r8=[temp1] // os_status |
847 | ld8 r10=[temp2] // context | 853 | ld8 r10=[temp2] // context |
848 | 854 | ||
855 | /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To | ||
856 | * avoid any dependencies on the algorithm in ia64_switch_to(), just | ||
857 | * purge any existing CURRENT_STACK mapping and insert the new one. | ||
858 | * | ||
859 | * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains | ||
860 | * prev_IA64_KR_CURRENT, these values may have been changed by the C | ||
861 | * code. Do not use r8, r9, r10, r22, they contain values ready for | ||
862 | * the return to SAL. | ||
863 | */ | ||
864 | |||
865 | mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK | ||
866 | ;; | ||
867 | shl r15=r15,IA64_GRANULE_SHIFT | ||
868 | ;; | ||
869 | dep r15=-1,r15,61,3 // virtual granule | ||
870 | mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps | ||
871 | ;; | ||
872 | ptr.d r15,r18 | ||
873 | ;; | ||
874 | srlz.d | ||
875 | |||
876 | extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT | ||
877 | shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK | ||
878 | movl r21=PAGE_KERNEL // page properties | ||
879 | ;; | ||
880 | mov IA64_KR(CURRENT_STACK)=r16 | ||
881 | cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? | ||
882 | or r21=r20,r21 // construct PA | page properties | ||
883 | (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( | ||
884 | ;; | ||
885 | mov cr.itir=r18 | ||
886 | mov cr.ifa=r21 | ||
887 | mov r20=IA64_TR_CURRENT_STACK | ||
888 | ;; | ||
889 | itr.d dtr[r20]=r21 | ||
890 | ;; | ||
891 | srlz.d | ||
892 | 1: | ||
893 | |||
849 | br.sptk b0 | 894 | br.sptk b0 |
850 | 895 | ||
851 | //EndStub////////////////////////////////////////////////////////////////////// | 896 | //EndStub////////////////////////////////////////////////////////////////////// |
@@ -982,6 +1027,7 @@ ia64_set_kernel_registers: | |||
982 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp | 1027 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp |
983 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack | 1028 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack |
984 | add r13=temp1, r3 // set current to start of MCA/INIT stack | 1029 | add r13=temp1, r3 // set current to start of MCA/INIT stack |
1030 | add r20=temp1, r3 // physical start of MCA/INIT stack | ||
985 | ;; | 1031 | ;; |
986 | ld8 r1=[temp4] // OS GP from SAL OS state | 1032 | ld8 r1=[temp4] // OS GP from SAL OS state |
987 | ;; | 1033 | ;; |
@@ -991,7 +1037,35 @@ ia64_set_kernel_registers: | |||
991 | ;; | 1037 | ;; |
992 | mov IA64_KR(CURRENT)=r13 | 1038 | mov IA64_KR(CURRENT)=r13 |
993 | 1039 | ||
994 | // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? | 1040 | /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid |
1041 | * any dependencies on the algorithm in ia64_switch_to(), just purge | ||
1042 | * any existing CURRENT_STACK mapping and insert the new one. | ||
1043 | */ | ||
1044 | |||
1045 | mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK | ||
1046 | ;; | ||
1047 | shl r16=r16,IA64_GRANULE_SHIFT | ||
1048 | ;; | ||
1049 | dep r16=-1,r16,61,3 // virtual granule | ||
1050 | mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps | ||
1051 | ;; | ||
1052 | ptr.d r16,r18 | ||
1053 | ;; | ||
1054 | srlz.d | ||
1055 | |||
1056 | shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack | ||
1057 | movl r21=PAGE_KERNEL // page properties | ||
1058 | ;; | ||
1059 | mov IA64_KR(CURRENT_STACK)=r16 | ||
1060 | or r21=r20,r21 // construct PA | page properties | ||
1061 | ;; | ||
1062 | mov cr.itir=r18 | ||
1063 | mov cr.ifa=r13 | ||
1064 | mov r20=IA64_TR_CURRENT_STACK | ||
1065 | ;; | ||
1066 | itr.d dtr[r20]=r21 | ||
1067 | ;; | ||
1068 | srlz.d | ||
995 | 1069 | ||
996 | br.sptk b0 | 1070 | br.sptk b0 |
997 | 1071 | ||
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 6e683745af49..f081c60ab206 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
@@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE]; | |||
56 | static int num_page_isolate = 0; | 56 | static int num_page_isolate = 0; |
57 | 57 | ||
58 | typedef enum { | 58 | typedef enum { |
59 | ISOLATE_NG = 0, | 59 | ISOLATE_NG, |
60 | ISOLATE_OK = 1 | 60 | ISOLATE_OK, |
61 | ISOLATE_NONE | ||
61 | } isolate_status_t; | 62 | } isolate_status_t; |
62 | 63 | ||
63 | /* | 64 | /* |
@@ -74,7 +75,7 @@ static struct { | |||
74 | * @paddr: poisoned memory location | 75 | * @paddr: poisoned memory location |
75 | * | 76 | * |
76 | * Return value: | 77 | * Return value: |
77 | * ISOLATE_OK / ISOLATE_NG | 78 | * one of isolate_status_t, ISOLATE_OK/NG/NONE. |
78 | */ | 79 | */ |
79 | 80 | ||
80 | static isolate_status_t | 81 | static isolate_status_t |
@@ -84,23 +85,26 @@ mca_page_isolate(unsigned long paddr) | |||
84 | struct page *p; | 85 | struct page *p; |
85 | 86 | ||
86 | /* whether physical address is valid or not */ | 87 | /* whether physical address is valid or not */ |
87 | if ( !ia64_phys_addr_valid(paddr) ) | 88 | if (!ia64_phys_addr_valid(paddr)) |
88 | return ISOLATE_NG; | 89 | return ISOLATE_NONE; |
90 | |||
91 | if (!pfn_valid(paddr)) | ||
92 | return ISOLATE_NONE; | ||
89 | 93 | ||
90 | /* convert physical address to physical page number */ | 94 | /* convert physical address to physical page number */ |
91 | p = pfn_to_page(paddr>>PAGE_SHIFT); | 95 | p = pfn_to_page(paddr>>PAGE_SHIFT); |
92 | 96 | ||
93 | /* check whether a page number have been already registered or not */ | 97 | /* check whether a page number have been already registered or not */ |
94 | for( i = 0; i < num_page_isolate; i++ ) | 98 | for (i = 0; i < num_page_isolate; i++) |
95 | if( page_isolate[i] == p ) | 99 | if (page_isolate[i] == p) |
96 | return ISOLATE_OK; /* already listed */ | 100 | return ISOLATE_OK; /* already listed */ |
97 | 101 | ||
98 | /* limitation check */ | 102 | /* limitation check */ |
99 | if( num_page_isolate == MAX_PAGE_ISOLATE ) | 103 | if (num_page_isolate == MAX_PAGE_ISOLATE) |
100 | return ISOLATE_NG; | 104 | return ISOLATE_NG; |
101 | 105 | ||
102 | /* kick pages having attribute 'SLAB' or 'Reserved' */ | 106 | /* kick pages having attribute 'SLAB' or 'Reserved' */ |
103 | if( PageSlab(p) || PageReserved(p) ) | 107 | if (PageSlab(p) || PageReserved(p)) |
104 | return ISOLATE_NG; | 108 | return ISOLATE_NG; |
105 | 109 | ||
106 | /* add attribute 'Reserved' and register the page */ | 110 | /* add attribute 'Reserved' and register the page */ |
@@ -122,10 +126,15 @@ mca_handler_bh(unsigned long paddr) | |||
122 | current->pid, current->comm); | 126 | current->pid, current->comm); |
123 | 127 | ||
124 | spin_lock(&mca_bh_lock); | 128 | spin_lock(&mca_bh_lock); |
125 | if (mca_page_isolate(paddr) == ISOLATE_OK) { | 129 | switch (mca_page_isolate(paddr)) { |
130 | case ISOLATE_OK: | ||
126 | printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); | 131 | printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); |
127 | } else { | 132 | break; |
133 | case ISOLATE_NG: | ||
128 | printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); | 134 | printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); |
135 | break; | ||
136 | default: | ||
137 | break; | ||
129 | } | 138 | } |
130 | spin_unlock(&mca_bh_lock); | 139 | spin_unlock(&mca_bh_lock); |
131 | 140 | ||
@@ -139,10 +148,10 @@ mca_handler_bh(unsigned long paddr) | |||
139 | * @peidx: pointer to index of processor error section | 148 | * @peidx: pointer to index of processor error section |
140 | */ | 149 | */ |
141 | 150 | ||
142 | static void | 151 | static void |
143 | mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) | 152 | mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) |
144 | { | 153 | { |
145 | /* | 154 | /* |
146 | * calculate the start address of | 155 | * calculate the start address of |
147 | * "struct cpuid_info" and "sal_processor_static_info_t". | 156 | * "struct cpuid_info" and "sal_processor_static_info_t". |
148 | */ | 157 | */ |
@@ -164,7 +173,7 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) | |||
164 | } | 173 | } |
165 | 174 | ||
166 | /** | 175 | /** |
167 | * mca_make_slidx - Make index of SAL error record | 176 | * mca_make_slidx - Make index of SAL error record |
168 | * @buffer: pointer to SAL error record | 177 | * @buffer: pointer to SAL error record |
169 | * @slidx: pointer to index of SAL error record | 178 | * @slidx: pointer to index of SAL error record |
170 | * | 179 | * |
@@ -172,12 +181,12 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) | |||
172 | * 1 if record has platform error / 0 if not | 181 | * 1 if record has platform error / 0 if not |
173 | */ | 182 | */ |
174 | #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ | 183 | #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ |
175 | { slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ | 184 | {slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ |
176 | hl->hdr = ptr; \ | 185 | hl->hdr = ptr; \ |
177 | list_add(&hl->list, &(sect)); \ | 186 | list_add(&hl->list, &(sect)); \ |
178 | slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } | 187 | slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } |
179 | 188 | ||
180 | static int | 189 | static int |
181 | mca_make_slidx(void *buffer, slidx_table_t *slidx) | 190 | mca_make_slidx(void *buffer, slidx_table_t *slidx) |
182 | { | 191 | { |
183 | int platform_err = 0; | 192 | int platform_err = 0; |
@@ -214,28 +223,36 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) | |||
214 | sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); | 223 | sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); |
215 | if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { | 224 | if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { |
216 | LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); | 225 | LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); |
217 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { | 226 | } else if (!efi_guidcmp(sp->guid, |
227 | SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { | ||
218 | platform_err = 1; | 228 | platform_err = 1; |
219 | LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); | 229 | LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); |
220 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { | 230 | } else if (!efi_guidcmp(sp->guid, |
231 | SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { | ||
221 | platform_err = 1; | 232 | platform_err = 1; |
222 | LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); | 233 | LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); |
223 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { | 234 | } else if (!efi_guidcmp(sp->guid, |
235 | SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { | ||
224 | platform_err = 1; | 236 | platform_err = 1; |
225 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); | 237 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); |
226 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { | 238 | } else if (!efi_guidcmp(sp->guid, |
239 | SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { | ||
227 | platform_err = 1; | 240 | platform_err = 1; |
228 | LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); | 241 | LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); |
229 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { | 242 | } else if (!efi_guidcmp(sp->guid, |
243 | SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { | ||
230 | platform_err = 1; | 244 | platform_err = 1; |
231 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); | 245 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); |
232 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { | 246 | } else if (!efi_guidcmp(sp->guid, |
247 | SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { | ||
233 | platform_err = 1; | 248 | platform_err = 1; |
234 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); | 249 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); |
235 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { | 250 | } else if (!efi_guidcmp(sp->guid, |
251 | SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { | ||
236 | platform_err = 1; | 252 | platform_err = 1; |
237 | LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); | 253 | LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); |
238 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_BUS_ERR_SECT_GUID)) { | 254 | } else if (!efi_guidcmp(sp->guid, |
255 | SAL_PLAT_BUS_ERR_SECT_GUID)) { | ||
239 | platform_err = 1; | 256 | platform_err = 1; |
240 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); | 257 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); |
241 | } else { | 258 | } else { |
@@ -253,15 +270,16 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) | |||
253 | * Return value: | 270 | * Return value: |
254 | * 0 on Success / -ENOMEM on Failure | 271 | * 0 on Success / -ENOMEM on Failure |
255 | */ | 272 | */ |
256 | static int | 273 | static int |
257 | init_record_index_pools(void) | 274 | init_record_index_pools(void) |
258 | { | 275 | { |
259 | int i; | 276 | int i; |
260 | int rec_max_size; /* Maximum size of SAL error records */ | 277 | int rec_max_size; /* Maximum size of SAL error records */ |
261 | int sect_min_size; /* Minimum size of SAL error sections */ | 278 | int sect_min_size; /* Minimum size of SAL error sections */ |
262 | /* minimum size table of each section */ | 279 | /* minimum size table of each section */ |
263 | static int sal_log_sect_min_sizes[] = { | 280 | static int sal_log_sect_min_sizes[] = { |
264 | sizeof(sal_log_processor_info_t) + sizeof(sal_processor_static_info_t), | 281 | sizeof(sal_log_processor_info_t) |
282 | + sizeof(sal_processor_static_info_t), | ||
265 | sizeof(sal_log_mem_dev_err_info_t), | 283 | sizeof(sal_log_mem_dev_err_info_t), |
266 | sizeof(sal_log_sel_dev_err_info_t), | 284 | sizeof(sal_log_sel_dev_err_info_t), |
267 | sizeof(sal_log_pci_bus_err_info_t), | 285 | sizeof(sal_log_pci_bus_err_info_t), |
@@ -294,7 +312,8 @@ init_record_index_pools(void) | |||
294 | 312 | ||
295 | /* - 3 - */ | 313 | /* - 3 - */ |
296 | slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; | 314 | slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; |
297 | slidx_pool.buffer = (slidx_list_t *) kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); | 315 | slidx_pool.buffer = (slidx_list_t *) |
316 | kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); | ||
298 | 317 | ||
299 | return slidx_pool.buffer ? 0 : -ENOMEM; | 318 | return slidx_pool.buffer ? 0 : -ENOMEM; |
300 | } | 319 | } |
@@ -308,6 +327,7 @@ init_record_index_pools(void) | |||
308 | * is_mca_global - Check whether this MCA is global or not | 327 | * is_mca_global - Check whether this MCA is global or not |
309 | * @peidx: pointer of index of processor error section | 328 | * @peidx: pointer of index of processor error section |
310 | * @pbci: pointer to pal_bus_check_info_t | 329 | * @pbci: pointer to pal_bus_check_info_t |
330 | * @sos: pointer to hand off struct between SAL and OS | ||
311 | * | 331 | * |
312 | * Return value: | 332 | * Return value: |
313 | * MCA_IS_LOCAL / MCA_IS_GLOBAL | 333 | * MCA_IS_LOCAL / MCA_IS_GLOBAL |
@@ -317,11 +337,12 @@ static mca_type_t | |||
317 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, | 337 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
318 | struct ia64_sal_os_state *sos) | 338 | struct ia64_sal_os_state *sos) |
319 | { | 339 | { |
320 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 340 | pal_processor_state_info_t *psp = |
341 | (pal_processor_state_info_t*)peidx_psp(peidx); | ||
321 | 342 | ||
322 | /* | 343 | /* |
323 | * PAL can request a rendezvous, if the MCA has a global scope. | 344 | * PAL can request a rendezvous, if the MCA has a global scope. |
324 | * If "rz_always" flag is set, SAL requests MCA rendezvous | 345 | * If "rz_always" flag is set, SAL requests MCA rendezvous |
325 | * in spite of global MCA. | 346 | * in spite of global MCA. |
326 | * Therefore it is local MCA when rendezvous has not been requested. | 347 | * Therefore it is local MCA when rendezvous has not been requested. |
327 | * Failed to rendezvous, the system must be down. | 348 | * Failed to rendezvous, the system must be down. |
@@ -381,13 +402,15 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, | |||
381 | * @slidx: pointer of index of SAL error record | 402 | * @slidx: pointer of index of SAL error record |
382 | * @peidx: pointer of index of processor error section | 403 | * @peidx: pointer of index of processor error section |
383 | * @pbci: pointer of pal_bus_check_info | 404 | * @pbci: pointer of pal_bus_check_info |
405 | * @sos: pointer to hand off struct between SAL and OS | ||
384 | * | 406 | * |
385 | * Return value: | 407 | * Return value: |
386 | * 1 on Success / 0 on Failure | 408 | * 1 on Success / 0 on Failure |
387 | */ | 409 | */ |
388 | 410 | ||
389 | static int | 411 | static int |
390 | recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, | 412 | recover_from_read_error(slidx_table_t *slidx, |
413 | peidx_table_t *peidx, pal_bus_check_info_t *pbci, | ||
391 | struct ia64_sal_os_state *sos) | 414 | struct ia64_sal_os_state *sos) |
392 | { | 415 | { |
393 | sal_log_mod_error_info_t *smei; | 416 | sal_log_mod_error_info_t *smei; |
@@ -453,24 +476,28 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
453 | * @slidx: pointer of index of SAL error record | 476 | * @slidx: pointer of index of SAL error record |
454 | * @peidx: pointer of index of processor error section | 477 | * @peidx: pointer of index of processor error section |
455 | * @pbci: pointer of pal_bus_check_info | 478 | * @pbci: pointer of pal_bus_check_info |
479 | * @sos: pointer to hand off struct between SAL and OS | ||
456 | * | 480 | * |
457 | * Return value: | 481 | * Return value: |
458 | * 1 on Success / 0 on Failure | 482 | * 1 on Success / 0 on Failure |
459 | */ | 483 | */ |
460 | 484 | ||
461 | static int | 485 | static int |
462 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, | 486 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, |
487 | pal_bus_check_info_t *pbci, | ||
463 | struct ia64_sal_os_state *sos) | 488 | struct ia64_sal_os_state *sos) |
464 | { | 489 | { |
465 | int status = 0; | 490 | int status = 0; |
466 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 491 | pal_processor_state_info_t *psp = |
492 | (pal_processor_state_info_t*)peidx_psp(peidx); | ||
467 | 493 | ||
468 | if (psp->bc && pbci->eb && pbci->bsi == 0) { | 494 | if (psp->bc && pbci->eb && pbci->bsi == 0) { |
469 | switch(pbci->type) { | 495 | switch(pbci->type) { |
470 | case 1: /* partial read */ | 496 | case 1: /* partial read */ |
471 | case 3: /* full line(cpu) read */ | 497 | case 3: /* full line(cpu) read */ |
472 | case 9: /* I/O space read */ | 498 | case 9: /* I/O space read */ |
473 | status = recover_from_read_error(slidx, peidx, pbci, sos); | 499 | status = recover_from_read_error(slidx, peidx, pbci, |
500 | sos); | ||
474 | break; | 501 | break; |
475 | case 0: /* unknown */ | 502 | case 0: /* unknown */ |
476 | case 2: /* partial write */ | 503 | case 2: /* partial write */ |
@@ -481,7 +508,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
481 | case 8: /* write coalescing transactions */ | 508 | case 8: /* write coalescing transactions */ |
482 | case 10: /* I/O space write */ | 509 | case 10: /* I/O space write */ |
483 | case 11: /* inter-processor interrupt message(IPI) */ | 510 | case 11: /* inter-processor interrupt message(IPI) */ |
484 | case 12: /* interrupt acknowledge or external task priority cycle */ | 511 | case 12: /* interrupt acknowledge or |
512 | external task priority cycle */ | ||
485 | default: | 513 | default: |
486 | break; | 514 | break; |
487 | } | 515 | } |
@@ -496,6 +524,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
496 | * @slidx: pointer of index of SAL error record | 524 | * @slidx: pointer of index of SAL error record |
497 | * @peidx: pointer of index of processor error section | 525 | * @peidx: pointer of index of processor error section |
498 | * @pbci: pointer of pal_bus_check_info | 526 | * @pbci: pointer of pal_bus_check_info |
527 | * @sos: pointer to hand off struct between SAL and OS | ||
499 | * | 528 | * |
500 | * Return value: | 529 | * Return value: |
501 | * 1 on Success / 0 on Failure | 530 | * 1 on Success / 0 on Failure |
@@ -509,15 +538,17 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
509 | */ | 538 | */ |
510 | 539 | ||
511 | static int | 540 | static int |
512 | recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, | 541 | recover_from_processor_error(int platform, slidx_table_t *slidx, |
542 | peidx_table_t *peidx, pal_bus_check_info_t *pbci, | ||
513 | struct ia64_sal_os_state *sos) | 543 | struct ia64_sal_os_state *sos) |
514 | { | 544 | { |
515 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 545 | pal_processor_state_info_t *psp = |
546 | (pal_processor_state_info_t*)peidx_psp(peidx); | ||
516 | 547 | ||
517 | /* | 548 | /* |
518 | * We cannot recover errors with other than bus_check. | 549 | * We cannot recover errors with other than bus_check. |
519 | */ | 550 | */ |
520 | if (psp->cc || psp->rc || psp->uc) | 551 | if (psp->cc || psp->rc || psp->uc) |
521 | return 0; | 552 | return 0; |
522 | 553 | ||
523 | /* | 554 | /* |
@@ -546,10 +577,10 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
546 | * (e.g. a load from poisoned memory) | 577 | * (e.g. a load from poisoned memory) |
547 | * This means "there are some platform errors". | 578 | * This means "there are some platform errors". |
548 | */ | 579 | */ |
549 | if (platform) | 580 | if (platform) |
550 | return recover_from_platform_error(slidx, peidx, pbci, sos); | 581 | return recover_from_platform_error(slidx, peidx, pbci, sos); |
551 | /* | 582 | /* |
552 | * On account of strange SAL error record, we cannot recover. | 583 | * On account of strange SAL error record, we cannot recover. |
553 | */ | 584 | */ |
554 | return 0; | 585 | return 0; |
555 | } | 586 | } |
@@ -557,14 +588,14 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
557 | /** | 588 | /** |
558 | * mca_try_to_recover - Try to recover from MCA | 589 | * mca_try_to_recover - Try to recover from MCA |
559 | * @rec: pointer to a SAL error record | 590 | * @rec: pointer to a SAL error record |
591 | * @sos: pointer to hand off struct between SAL and OS | ||
560 | * | 592 | * |
561 | * Return value: | 593 | * Return value: |
562 | * 1 on Success / 0 on Failure | 594 | * 1 on Success / 0 on Failure |
563 | */ | 595 | */ |
564 | 596 | ||
565 | static int | 597 | static int |
566 | mca_try_to_recover(void *rec, | 598 | mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos) |
567 | struct ia64_sal_os_state *sos) | ||
568 | { | 599 | { |
569 | int platform_err; | 600 | int platform_err; |
570 | int n_proc_err; | 601 | int n_proc_err; |
@@ -588,7 +619,8 @@ mca_try_to_recover(void *rec, | |||
588 | } | 619 | } |
589 | 620 | ||
590 | /* Make index of processor error section */ | 621 | /* Make index of processor error section */ |
591 | mca_make_peidx((sal_log_processor_info_t*)slidx_first_entry(&slidx.proc_err)->hdr, &peidx); | 622 | mca_make_peidx((sal_log_processor_info_t*) |
623 | slidx_first_entry(&slidx.proc_err)->hdr, &peidx); | ||
592 | 624 | ||
593 | /* Extract Processor BUS_CHECK[0] */ | 625 | /* Extract Processor BUS_CHECK[0] */ |
594 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); | 626 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); |
@@ -598,7 +630,8 @@ mca_try_to_recover(void *rec, | |||
598 | return 0; | 630 | return 0; |
599 | 631 | ||
600 | /* Try to recover a processor error */ | 632 | /* Try to recover a processor error */ |
601 | return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); | 633 | return recover_from_processor_error(platform_err, &slidx, &peidx, |
634 | &pbci, sos); | ||
602 | } | 635 | } |
603 | 636 | ||
604 | /* | 637 | /* |
@@ -611,7 +644,7 @@ int __init mca_external_handler_init(void) | |||
611 | return -ENOMEM; | 644 | return -ENOMEM; |
612 | 645 | ||
613 | /* register external mca handlers */ | 646 | /* register external mca handlers */ |
614 | if (ia64_reg_MCA_extension(mca_try_to_recover)){ | 647 | if (ia64_reg_MCA_extension(mca_try_to_recover)) { |
615 | printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); | 648 | printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); |
616 | kfree(slidx_pool.buffer); | 649 | kfree(slidx_pool.buffer); |
617 | return -EFAULT; | 650 | return -EFAULT; |
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h index 0227b761f2c4..e2f6fa1e0ef6 100644 --- a/arch/ia64/kernel/mca_drv.h +++ b/arch/ia64/kernel/mca_drv.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) | 6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) |
7 | */ | 7 | */ |
8 | /* | 8 | /* |
9 | * Processor error section: | 9 | * Processor error section: |
10 | * | 10 | * |
11 | * +-sal_log_processor_info_t *info-------------+ | 11 | * +-sal_log_processor_info_t *info-------------+ |
12 | * | sal_log_section_hdr_t header; | | 12 | * | sal_log_section_hdr_t header; | |
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index 2d7e0217638d..3f298ee4d00c 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S | |||
@@ -13,45 +13,45 @@ | |||
13 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
14 | 14 | ||
15 | GLOBAL_ENTRY(mca_handler_bhhook) | 15 | GLOBAL_ENTRY(mca_handler_bhhook) |
16 | invala // clear RSE ? | 16 | invala // clear RSE ? |
17 | ;; // | 17 | ;; |
18 | cover // | 18 | cover |
19 | ;; // | 19 | ;; |
20 | clrrrb // | 20 | clrrrb |
21 | ;; | 21 | ;; |
22 | alloc r16=ar.pfs,0,2,1,0 // make a new frame | 22 | alloc r16=ar.pfs,0,2,1,0 // make a new frame |
23 | ;; | 23 | ;; |
24 | mov ar.rsc=0 | 24 | mov ar.rsc=0 |
25 | ;; | 25 | ;; |
26 | mov r13=IA64_KR(CURRENT) // current task pointer | 26 | mov r13=IA64_KR(CURRENT) // current task pointer |
27 | ;; | 27 | ;; |
28 | mov r2=r13 | 28 | mov r2=r13 |
29 | ;; | 29 | ;; |
30 | addl r22=IA64_RBS_OFFSET,r2 | 30 | addl r22=IA64_RBS_OFFSET,r2 |
31 | ;; | 31 | ;; |
32 | mov ar.bspstore=r22 | 32 | mov ar.bspstore=r22 |
33 | ;; | 33 | ;; |
34 | addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 | 34 | addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 |
35 | ;; | 35 | ;; |
36 | adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | 36 | adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
37 | ;; | 37 | ;; |
38 | st1 [r2]=r0 // clear current->thread.on_ustack flag | 38 | st1 [r2]=r0 // clear current->thread.on_ustack flag |
39 | mov loc0=r16 | 39 | mov loc0=r16 |
40 | movl loc1=mca_handler_bh // recovery C function | 40 | movl loc1=mca_handler_bh // recovery C function |
41 | ;; | 41 | ;; |
42 | mov out0=r8 // poisoned address | 42 | mov out0=r8 // poisoned address |
43 | mov b6=loc1 | 43 | mov b6=loc1 |
44 | ;; | 44 | ;; |
45 | mov loc1=rp | 45 | mov loc1=rp |
46 | ;; | 46 | ;; |
47 | ssm psr.i | 47 | ssm psr.i |
48 | ;; | 48 | ;; |
49 | br.call.sptk.many rp=b6 // does not return ... | 49 | br.call.sptk.many rp=b6 // does not return ... |
50 | ;; | 50 | ;; |
51 | mov ar.pfs=loc0 | 51 | mov ar.pfs=loc0 |
52 | mov rp=loc1 | 52 | mov rp=loc1 |
53 | ;; | 53 | ;; |
54 | mov r8=r0 | 54 | mov r8=r0 |
55 | br.ret.sptk.many rp | 55 | br.ret.sptk.many rp |
56 | ;; | 56 | ;; |
57 | END(mca_handler_bhhook) | 57 | END(mca_handler_bhhook) |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 1650353e3f77..d71731ee5b61 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -574,7 +574,7 @@ pfm_protect_ctx_ctxsw(pfm_context_t *x) | |||
574 | return 0UL; | 574 | return 0UL; |
575 | } | 575 | } |
576 | 576 | ||
577 | static inline unsigned long | 577 | static inline void |
578 | pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) | 578 | pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) |
579 | { | 579 | { |
580 | spin_unlock(&(x)->ctx_lock); | 580 | spin_unlock(&(x)->ctx_lock); |
@@ -2218,12 +2218,13 @@ static void | |||
2218 | pfm_free_fd(int fd, struct file *file) | 2218 | pfm_free_fd(int fd, struct file *file) |
2219 | { | 2219 | { |
2220 | struct files_struct *files = current->files; | 2220 | struct files_struct *files = current->files; |
2221 | struct fdtable *fdt = files_fdtable(files); | 2221 | struct fdtable *fdt; |
2222 | 2222 | ||
2223 | /* | 2223 | /* |
2224 | * there ie no fd_uninstall(), so we do it here | 2224 | * there ie no fd_uninstall(), so we do it here |
2225 | */ | 2225 | */ |
2226 | spin_lock(&files->file_lock); | 2226 | spin_lock(&files->file_lock); |
2227 | fdt = files_fdtable(files); | ||
2227 | rcu_assign_pointer(fdt->fd[fd], NULL); | 2228 | rcu_assign_pointer(fdt->fd[fd], NULL); |
2228 | spin_unlock(&files->file_lock); | 2229 | spin_unlock(&files->file_lock); |
2229 | 2230 | ||
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c index dbc0b3e449c5..48e5ff26eb1d 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/arch/ia64/lib/swiotlb.c | |||
@@ -49,6 +49,15 @@ | |||
49 | */ | 49 | */ |
50 | #define IO_TLB_SHIFT 11 | 50 | #define IO_TLB_SHIFT 11 |
51 | 51 | ||
52 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | ||
53 | |||
54 | /* | ||
55 | * Minimum IO TLB size to bother booting with. Systems with mainly | ||
56 | * 64bit capable cards will only lightly use the swiotlb. If we can't | ||
57 | * allocate a contiguous 1MB, we're probably in trouble anyway. | ||
58 | */ | ||
59 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | ||
60 | |||
52 | int swiotlb_force; | 61 | int swiotlb_force; |
53 | 62 | ||
54 | /* | 63 | /* |
@@ -123,8 +132,8 @@ swiotlb_init_with_default_size (size_t default_size) | |||
123 | /* | 132 | /* |
124 | * Get IO TLB memory from the low pages | 133 | * Get IO TLB memory from the low pages |
125 | */ | 134 | */ |
126 | io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * | 135 | io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * |
127 | (1 << IO_TLB_SHIFT)); | 136 | (1 << IO_TLB_SHIFT), 0x100000000); |
128 | if (!io_tlb_start) | 137 | if (!io_tlb_start) |
129 | panic("Cannot allocate SWIOTLB buffer"); | 138 | panic("Cannot allocate SWIOTLB buffer"); |
130 | io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); | 139 | io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); |
@@ -154,6 +163,99 @@ swiotlb_init (void) | |||
154 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 163 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ |
155 | } | 164 | } |
156 | 165 | ||
166 | /* | ||
167 | * Systems with larger DMA zones (those that don't support ISA) can | ||
168 | * initialize the swiotlb later using the slab allocator if needed. | ||
169 | * This should be just like above, but with some error catching. | ||
170 | */ | ||
171 | int | ||
172 | swiotlb_late_init_with_default_size (size_t default_size) | ||
173 | { | ||
174 | unsigned long i, req_nslabs = io_tlb_nslabs; | ||
175 | unsigned int order; | ||
176 | |||
177 | if (!io_tlb_nslabs) { | ||
178 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
179 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Get IO TLB memory from the low pages | ||
184 | */ | ||
185 | order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); | ||
186 | io_tlb_nslabs = SLABS_PER_PAGE << order; | ||
187 | |||
188 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | ||
189 | io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, | ||
190 | order); | ||
191 | if (io_tlb_start) | ||
192 | break; | ||
193 | order--; | ||
194 | } | ||
195 | |||
196 | if (!io_tlb_start) | ||
197 | goto cleanup1; | ||
198 | |||
199 | if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { | ||
200 | printk(KERN_WARNING "Warning: only able to allocate %ld MB " | ||
201 | "for software IO TLB\n", (PAGE_SIZE << order) >> 20); | ||
202 | io_tlb_nslabs = SLABS_PER_PAGE << order; | ||
203 | } | ||
204 | io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); | ||
205 | memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); | ||
206 | |||
207 | /* | ||
208 | * Allocate and initialize the free list array. This array is used | ||
209 | * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE | ||
210 | * between io_tlb_start and io_tlb_end. | ||
211 | */ | ||
212 | io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, | ||
213 | get_order(io_tlb_nslabs * sizeof(int))); | ||
214 | if (!io_tlb_list) | ||
215 | goto cleanup2; | ||
216 | |||
217 | for (i = 0; i < io_tlb_nslabs; i++) | ||
218 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | ||
219 | io_tlb_index = 0; | ||
220 | |||
221 | io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, | ||
222 | get_order(io_tlb_nslabs * sizeof(char *))); | ||
223 | if (!io_tlb_orig_addr) | ||
224 | goto cleanup3; | ||
225 | |||
226 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); | ||
227 | |||
228 | /* | ||
229 | * Get the overflow emergency buffer | ||
230 | */ | ||
231 | io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, | ||
232 | get_order(io_tlb_overflow)); | ||
233 | if (!io_tlb_overflow_buffer) | ||
234 | goto cleanup4; | ||
235 | |||
236 | printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " | ||
237 | "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, | ||
238 | virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); | ||
239 | |||
240 | return 0; | ||
241 | |||
242 | cleanup4: | ||
243 | free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * | ||
244 | sizeof(char *))); | ||
245 | io_tlb_orig_addr = NULL; | ||
246 | cleanup3: | ||
247 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | ||
248 | sizeof(int))); | ||
249 | io_tlb_list = NULL; | ||
250 | io_tlb_end = NULL; | ||
251 | cleanup2: | ||
252 | free_pages((unsigned long)io_tlb_start, order); | ||
253 | io_tlb_start = NULL; | ||
254 | cleanup1: | ||
255 | io_tlb_nslabs = req_nslabs; | ||
256 | return -ENOMEM; | ||
257 | } | ||
258 | |||
157 | static inline int | 259 | static inline int |
158 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 260 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) |
159 | { | 261 | { |
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index b45db5133f55..e0819ec53116 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -183,11 +183,12 @@ int cx_driver_unregister(struct cx_drv *cx_driver) | |||
183 | * @part_num: device's part number | 183 | * @part_num: device's part number |
184 | * @mfg_num: device's manufacturer number | 184 | * @mfg_num: device's manufacturer number |
185 | * @hubdev: hub info associated with this device | 185 | * @hubdev: hub info associated with this device |
186 | * @bt: board type of the device | ||
186 | * | 187 | * |
187 | */ | 188 | */ |
188 | int | 189 | int |
189 | cx_device_register(nasid_t nasid, int part_num, int mfg_num, | 190 | cx_device_register(nasid_t nasid, int part_num, int mfg_num, |
190 | struct hubdev_info *hubdev) | 191 | struct hubdev_info *hubdev, int bt) |
191 | { | 192 | { |
192 | struct cx_dev *cx_dev; | 193 | struct cx_dev *cx_dev; |
193 | 194 | ||
@@ -200,6 +201,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num, | |||
200 | cx_dev->cx_id.mfg_num = mfg_num; | 201 | cx_dev->cx_id.mfg_num = mfg_num; |
201 | cx_dev->cx_id.nasid = nasid; | 202 | cx_dev->cx_id.nasid = nasid; |
202 | cx_dev->hubdev = hubdev; | 203 | cx_dev->hubdev = hubdev; |
204 | cx_dev->bt = bt; | ||
203 | 205 | ||
204 | cx_dev->dev.parent = NULL; | 206 | cx_dev->dev.parent = NULL; |
205 | cx_dev->dev.bus = &tiocx_bus_type; | 207 | cx_dev->dev.bus = &tiocx_bus_type; |
@@ -238,7 +240,8 @@ static int cx_device_reload(struct cx_dev *cx_dev) | |||
238 | { | 240 | { |
239 | cx_device_unregister(cx_dev); | 241 | cx_device_unregister(cx_dev); |
240 | return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, | 242 | return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, |
241 | cx_dev->cx_id.mfg_num, cx_dev->hubdev); | 243 | cx_dev->cx_id.mfg_num, cx_dev->hubdev, |
244 | cx_dev->bt); | ||
242 | } | 245 | } |
243 | 246 | ||
244 | static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget, | 247 | static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget, |
@@ -365,26 +368,20 @@ static void tio_corelet_reset(nasid_t nasid, int corelet) | |||
365 | udelay(2000); | 368 | udelay(2000); |
366 | } | 369 | } |
367 | 370 | ||
368 | static int tiocx_btchar_get(int nasid) | 371 | static int is_fpga_tio(int nasid, int *bt) |
369 | { | 372 | { |
370 | moduleid_t module_id; | 373 | int ioboard_type; |
371 | geoid_t geoid; | ||
372 | int cnodeid; | ||
373 | |||
374 | cnodeid = nasid_to_cnodeid(nasid); | ||
375 | geoid = cnodeid_get_geoid(cnodeid); | ||
376 | module_id = geo_module(geoid); | ||
377 | return MODULE_GET_BTCHAR(module_id); | ||
378 | } | ||
379 | 374 | ||
380 | static int is_fpga_brick(int nasid) | 375 | ioboard_type = ia64_sn_sysctl_ioboard_get(nasid); |
381 | { | 376 | |
382 | switch (tiocx_btchar_get(nasid)) { | 377 | switch (ioboard_type) { |
383 | case L1_BRICKTYPE_SA: | 378 | case L1_BRICKTYPE_SA: |
384 | case L1_BRICKTYPE_ATHENA: | 379 | case L1_BRICKTYPE_ATHENA: |
385 | case L1_BRICKTYPE_DAYTONA: | 380 | case L1_BOARDTYPE_DAYTONA: |
381 | *bt = ioboard_type; | ||
386 | return 1; | 382 | return 1; |
387 | } | 383 | } |
384 | |||
388 | return 0; | 385 | return 0; |
389 | } | 386 | } |
390 | 387 | ||
@@ -407,16 +404,22 @@ static int tiocx_reload(struct cx_dev *cx_dev) | |||
407 | 404 | ||
408 | if (bitstream_loaded(nasid)) { | 405 | if (bitstream_loaded(nasid)) { |
409 | uint64_t cx_id; | 406 | uint64_t cx_id; |
410 | 407 | int rv; | |
411 | cx_id = | 408 | |
412 | *(volatile uint64_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) + | 409 | rv = ia64_sn_sysctl_tio_clock_reset(nasid); |
410 | if (rv) { | ||
411 | printk(KERN_ALERT "CX port JTAG reset failed.\n"); | ||
412 | } else { | ||
413 | cx_id = *(volatile uint64_t *) | ||
414 | (TIO_SWIN_BASE(nasid, TIOCX_CORELET) + | ||
413 | WIDGET_ID); | 415 | WIDGET_ID); |
414 | part_num = XWIDGET_PART_NUM(cx_id); | 416 | part_num = XWIDGET_PART_NUM(cx_id); |
415 | mfg_num = XWIDGET_MFG_NUM(cx_id); | 417 | mfg_num = XWIDGET_MFG_NUM(cx_id); |
416 | DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num); | 418 | DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num); |
417 | /* just ignore it if it's a CE */ | 419 | /* just ignore it if it's a CE */ |
418 | if (part_num == TIO_CE_ASIC_PARTNUM) | 420 | if (part_num == TIO_CE_ASIC_PARTNUM) |
419 | return 0; | 421 | return 0; |
422 | } | ||
420 | } | 423 | } |
421 | 424 | ||
422 | cx_dev->cx_id.part_num = part_num; | 425 | cx_dev->cx_id.part_num = part_num; |
@@ -436,10 +439,10 @@ static ssize_t show_cxdev_control(struct device *dev, struct device_attribute *a | |||
436 | { | 439 | { |
437 | struct cx_dev *cx_dev = to_cx_dev(dev); | 440 | struct cx_dev *cx_dev = to_cx_dev(dev); |
438 | 441 | ||
439 | return sprintf(buf, "0x%x 0x%x 0x%x %d\n", | 442 | return sprintf(buf, "0x%x 0x%x 0x%x 0x%x\n", |
440 | cx_dev->cx_id.nasid, | 443 | cx_dev->cx_id.nasid, |
441 | cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, | 444 | cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, |
442 | tiocx_btchar_get(cx_dev->cx_id.nasid)); | 445 | cx_dev->bt); |
443 | } | 446 | } |
444 | 447 | ||
445 | static ssize_t store_cxdev_control(struct device *dev, struct device_attribute *attr, const char *buf, | 448 | static ssize_t store_cxdev_control(struct device *dev, struct device_attribute *attr, const char *buf, |
@@ -488,11 +491,12 @@ static int __init tiocx_init(void) | |||
488 | 491 | ||
489 | for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { | 492 | for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { |
490 | nasid_t nasid; | 493 | nasid_t nasid; |
494 | int bt; | ||
491 | 495 | ||
492 | if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) | 496 | if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) |
493 | break; /* No more nasids .. bail out of loop */ | 497 | break; /* No more nasids .. bail out of loop */ |
494 | 498 | ||
495 | if ((nasid & 0x1) && is_fpga_brick(nasid)) { | 499 | if ((nasid & 0x1) && is_fpga_tio(nasid, &bt)) { |
496 | struct hubdev_info *hubdev; | 500 | struct hubdev_info *hubdev; |
497 | struct xwidget_info *widgetp; | 501 | struct xwidget_info *widgetp; |
498 | 502 | ||
@@ -512,7 +516,7 @@ static int __init tiocx_init(void) | |||
512 | 516 | ||
513 | if (cx_device_register | 517 | if (cx_device_register |
514 | (nasid, widgetp->xwi_hwid.part_num, | 518 | (nasid, widgetp->xwi_hwid.part_num, |
515 | widgetp->xwi_hwid.mfg_num, hubdev) < 0) | 519 | widgetp->xwi_hwid.mfg_num, hubdev, bt) < 0) |
516 | return -ENXIO; | 520 | return -ENXIO; |
517 | else | 521 | else |
518 | found_tiocx_device++; | 522 | found_tiocx_device++; |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c index 21426d02fbe6..4f718c3e93d3 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <asm/sn/io.h> | ||
11 | #include <asm/sn/pcibr_provider.h> | 12 | #include <asm/sn/pcibr_provider.h> |
12 | #include <asm/sn/pcibus_provider_defs.h> | 13 | #include <asm/sn/pcibus_provider_defs.h> |
13 | #include <asm/sn/pcidev.h> | 14 | #include <asm/sn/pcidev.h> |
@@ -29,10 +30,10 @@ void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) | |||
29 | if (pcibus_info) { | 30 | if (pcibus_info) { |
30 | switch (pcibus_info->pbi_bridge_type) { | 31 | switch (pcibus_info->pbi_bridge_type) { |
31 | case PCIBR_BRIDGETYPE_TIOCP: | 32 | case PCIBR_BRIDGETYPE_TIOCP: |
32 | ptr->tio.cp_control &= ~bits; | 33 | __sn_clrq_relaxed(&ptr->tio.cp_control, bits); |
33 | break; | 34 | break; |
34 | case PCIBR_BRIDGETYPE_PIC: | 35 | case PCIBR_BRIDGETYPE_PIC: |
35 | ptr->pic.p_wid_control &= ~bits; | 36 | __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits); |
36 | break; | 37 | break; |
37 | default: | 38 | default: |
38 | panic | 39 | panic |
@@ -49,10 +50,10 @@ void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) | |||
49 | if (pcibus_info) { | 50 | if (pcibus_info) { |
50 | switch (pcibus_info->pbi_bridge_type) { | 51 | switch (pcibus_info->pbi_bridge_type) { |
51 | case PCIBR_BRIDGETYPE_TIOCP: | 52 | case PCIBR_BRIDGETYPE_TIOCP: |
52 | ptr->tio.cp_control |= bits; | 53 | __sn_setq_relaxed(&ptr->tio.cp_control, bits); |
53 | break; | 54 | break; |
54 | case PCIBR_BRIDGETYPE_PIC: | 55 | case PCIBR_BRIDGETYPE_PIC: |
55 | ptr->pic.p_wid_control |= bits; | 56 | __sn_setq_relaxed(&ptr->pic.p_wid_control, bits); |
56 | break; | 57 | break; |
57 | default: | 58 | default: |
58 | panic | 59 | panic |
@@ -73,10 +74,10 @@ uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info) | |||
73 | if (pcibus_info) { | 74 | if (pcibus_info) { |
74 | switch (pcibus_info->pbi_bridge_type) { | 75 | switch (pcibus_info->pbi_bridge_type) { |
75 | case PCIBR_BRIDGETYPE_TIOCP: | 76 | case PCIBR_BRIDGETYPE_TIOCP: |
76 | ret = ptr->tio.cp_tflush; | 77 | ret = __sn_readq_relaxed(&ptr->tio.cp_tflush); |
77 | break; | 78 | break; |
78 | case PCIBR_BRIDGETYPE_PIC: | 79 | case PCIBR_BRIDGETYPE_PIC: |
79 | ret = ptr->pic.p_wid_tflush; | 80 | ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush); |
80 | break; | 81 | break; |
81 | default: | 82 | default: |
82 | panic | 83 | panic |
@@ -103,10 +104,10 @@ uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info) | |||
103 | if (pcibus_info) { | 104 | if (pcibus_info) { |
104 | switch (pcibus_info->pbi_bridge_type) { | 105 | switch (pcibus_info->pbi_bridge_type) { |
105 | case PCIBR_BRIDGETYPE_TIOCP: | 106 | case PCIBR_BRIDGETYPE_TIOCP: |
106 | ret = ptr->tio.cp_int_status; | 107 | ret = __sn_readq_relaxed(&ptr->tio.cp_int_status); |
107 | break; | 108 | break; |
108 | case PCIBR_BRIDGETYPE_PIC: | 109 | case PCIBR_BRIDGETYPE_PIC: |
109 | ret = ptr->pic.p_int_status; | 110 | ret = __sn_readq_relaxed(&ptr->pic.p_int_status); |
110 | break; | 111 | break; |
111 | default: | 112 | default: |
112 | panic | 113 | panic |
@@ -127,10 +128,10 @@ void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) | |||
127 | if (pcibus_info) { | 128 | if (pcibus_info) { |
128 | switch (pcibus_info->pbi_bridge_type) { | 129 | switch (pcibus_info->pbi_bridge_type) { |
129 | case PCIBR_BRIDGETYPE_TIOCP: | 130 | case PCIBR_BRIDGETYPE_TIOCP: |
130 | ptr->tio.cp_int_enable &= ~bits; | 131 | __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits); |
131 | break; | 132 | break; |
132 | case PCIBR_BRIDGETYPE_PIC: | 133 | case PCIBR_BRIDGETYPE_PIC: |
133 | ptr->pic.p_int_enable &= ~bits; | 134 | __sn_clrq_relaxed(&ptr->pic.p_int_enable, ~bits); |
134 | break; | 135 | break; |
135 | default: | 136 | default: |
136 | panic | 137 | panic |
@@ -147,10 +148,10 @@ void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) | |||
147 | if (pcibus_info) { | 148 | if (pcibus_info) { |
148 | switch (pcibus_info->pbi_bridge_type) { | 149 | switch (pcibus_info->pbi_bridge_type) { |
149 | case PCIBR_BRIDGETYPE_TIOCP: | 150 | case PCIBR_BRIDGETYPE_TIOCP: |
150 | ptr->tio.cp_int_enable |= bits; | 151 | __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits); |
151 | break; | 152 | break; |
152 | case PCIBR_BRIDGETYPE_PIC: | 153 | case PCIBR_BRIDGETYPE_PIC: |
153 | ptr->pic.p_int_enable |= bits; | 154 | __sn_setq_relaxed(&ptr->pic.p_int_enable, bits); |
154 | break; | 155 | break; |
155 | default: | 156 | default: |
156 | panic | 157 | panic |
@@ -171,14 +172,16 @@ void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, | |||
171 | if (pcibus_info) { | 172 | if (pcibus_info) { |
172 | switch (pcibus_info->pbi_bridge_type) { | 173 | switch (pcibus_info->pbi_bridge_type) { |
173 | case PCIBR_BRIDGETYPE_TIOCP: | 174 | case PCIBR_BRIDGETYPE_TIOCP: |
174 | ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR; | 175 | __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n], |
175 | ptr->tio.cp_int_addr[int_n] |= | 176 | TIOCP_HOST_INTR_ADDR); |
176 | (addr & TIOCP_HOST_INTR_ADDR); | 177 | __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n], |
178 | (addr & TIOCP_HOST_INTR_ADDR)); | ||
177 | break; | 179 | break; |
178 | case PCIBR_BRIDGETYPE_PIC: | 180 | case PCIBR_BRIDGETYPE_PIC: |
179 | ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR; | 181 | __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n], |
180 | ptr->pic.p_int_addr[int_n] |= | 182 | PIC_HOST_INTR_ADDR); |
181 | (addr & PIC_HOST_INTR_ADDR); | 183 | __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n], |
184 | (addr & PIC_HOST_INTR_ADDR)); | ||
182 | break; | 185 | break; |
183 | default: | 186 | default: |
184 | panic | 187 | panic |
@@ -198,10 +201,10 @@ void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n) | |||
198 | if (pcibus_info) { | 201 | if (pcibus_info) { |
199 | switch (pcibus_info->pbi_bridge_type) { | 202 | switch (pcibus_info->pbi_bridge_type) { |
200 | case PCIBR_BRIDGETYPE_TIOCP: | 203 | case PCIBR_BRIDGETYPE_TIOCP: |
201 | ptr->tio.cp_force_pin[int_n] = 1; | 204 | writeq(1, &ptr->tio.cp_force_pin[int_n]); |
202 | break; | 205 | break; |
203 | case PCIBR_BRIDGETYPE_PIC: | 206 | case PCIBR_BRIDGETYPE_PIC: |
204 | ptr->pic.p_force_pin[int_n] = 1; | 207 | writeq(1, &ptr->pic.p_force_pin[int_n]); |
205 | break; | 208 | break; |
206 | default: | 209 | default: |
207 | panic | 210 | panic |
@@ -222,10 +225,12 @@ uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) | |||
222 | if (pcibus_info) { | 225 | if (pcibus_info) { |
223 | switch (pcibus_info->pbi_bridge_type) { | 226 | switch (pcibus_info->pbi_bridge_type) { |
224 | case PCIBR_BRIDGETYPE_TIOCP: | 227 | case PCIBR_BRIDGETYPE_TIOCP: |
225 | ret = ptr->tio.cp_wr_req_buf[device]; | 228 | ret = |
229 | __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]); | ||
226 | break; | 230 | break; |
227 | case PCIBR_BRIDGETYPE_PIC: | 231 | case PCIBR_BRIDGETYPE_PIC: |
228 | ret = ptr->pic.p_wr_req_buf[device]; | 232 | ret = |
233 | __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]); | ||
229 | break; | 234 | break; |
230 | default: | 235 | default: |
231 | panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr); | 236 | panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr); |
@@ -244,10 +249,10 @@ void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, | |||
244 | if (pcibus_info) { | 249 | if (pcibus_info) { |
245 | switch (pcibus_info->pbi_bridge_type) { | 250 | switch (pcibus_info->pbi_bridge_type) { |
246 | case PCIBR_BRIDGETYPE_TIOCP: | 251 | case PCIBR_BRIDGETYPE_TIOCP: |
247 | ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val; | 252 | writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]); |
248 | break; | 253 | break; |
249 | case PCIBR_BRIDGETYPE_PIC: | 254 | case PCIBR_BRIDGETYPE_PIC: |
250 | ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val; | 255 | writeq(val, &ptr->pic.p_int_ate_ram[ate_index]); |
251 | break; | 256 | break; |
252 | default: | 257 | default: |
253 | panic | 258 | panic |
@@ -265,12 +270,10 @@ uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) | |||
265 | if (pcibus_info) { | 270 | if (pcibus_info) { |
266 | switch (pcibus_info->pbi_bridge_type) { | 271 | switch (pcibus_info->pbi_bridge_type) { |
267 | case PCIBR_BRIDGETYPE_TIOCP: | 272 | case PCIBR_BRIDGETYPE_TIOCP: |
268 | ret = | 273 | ret = &ptr->tio.cp_int_ate_ram[ate_index]; |
269 | (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]); | ||
270 | break; | 274 | break; |
271 | case PCIBR_BRIDGETYPE_PIC: | 275 | case PCIBR_BRIDGETYPE_PIC: |
272 | ret = | 276 | ret = &ptr->pic.p_int_ate_ram[ate_index]; |
273 | (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]); | ||
274 | break; | 277 | break; |
275 | default: | 278 | default: |
276 | panic | 279 | panic |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 19bced34d5f1..46b646a6d345 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/sn/sn_sal.h> | 12 | #include <asm/sn/sn_sal.h> |
13 | #include <asm/sn/addrs.h> | 13 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/io.h> | ||
14 | #include <asm/sn/pcidev.h> | 15 | #include <asm/sn/pcidev.h> |
15 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include <asm/sn/tioca_provider.h> | 17 | #include <asm/sn/tioca_provider.h> |
@@ -37,7 +38,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) | |||
37 | uint64_t offset; | 38 | uint64_t offset; |
38 | struct page *tmp; | 39 | struct page *tmp; |
39 | struct tioca_common *tioca_common; | 40 | struct tioca_common *tioca_common; |
40 | volatile struct tioca *ca_base; | 41 | struct tioca *ca_base; |
41 | 42 | ||
42 | tioca_common = tioca_kern->ca_common; | 43 | tioca_common = tioca_kern->ca_common; |
43 | ca_base = (struct tioca *)tioca_common->ca_common.bs_base; | 44 | ca_base = (struct tioca *)tioca_common->ca_common.bs_base; |
@@ -174,27 +175,29 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) | |||
174 | * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 | 175 | * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 |
175 | */ | 176 | */ |
176 | 177 | ||
177 | ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */ | 178 | __sn_setq_relaxed(&ca_base->ca_control1, |
178 | ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM); | 179 | CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */ |
179 | ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT); | 180 | __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); |
181 | __sn_setq_relaxed(&ca_base->ca_control2, | ||
182 | (0x2ull << CA_GART_MEM_PARAM_SHFT)); | ||
180 | tioca_kern->ca_gart_iscoherent = 1; | 183 | tioca_kern->ca_gart_iscoherent = 1; |
181 | ca_base->ca_control2 &= | 184 | __sn_clrq_relaxed(&ca_base->ca_control2, |
182 | ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB); | 185 | (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB)); |
183 | 186 | ||
184 | /* | 187 | /* |
185 | * Unmask GART fetch error interrupts. Clear residual errors first. | 188 | * Unmask GART fetch error interrupts. Clear residual errors first. |
186 | */ | 189 | */ |
187 | 190 | ||
188 | ca_base->ca_int_status_alias = CA_GART_FETCH_ERR; | 191 | writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias); |
189 | ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR; | 192 | writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias); |
190 | ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR; | 193 | __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR); |
191 | 194 | ||
192 | /* | 195 | /* |
193 | * Program the aperature and gart registers in TIOCA | 196 | * Program the aperature and gart registers in TIOCA |
194 | */ | 197 | */ |
195 | 198 | ||
196 | ca_base->ca_gart_aperature = ap_reg; | 199 | writeq(ap_reg, &ca_base->ca_gart_aperature); |
197 | ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1; | 200 | writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table); |
198 | 201 | ||
199 | return 0; | 202 | return 0; |
200 | } | 203 | } |
@@ -211,7 +214,6 @@ void | |||
211 | tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) | 214 | tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) |
212 | { | 215 | { |
213 | int cap_ptr; | 216 | int cap_ptr; |
214 | uint64_t ca_control1; | ||
215 | uint32_t reg; | 217 | uint32_t reg; |
216 | struct tioca *tioca_base; | 218 | struct tioca *tioca_base; |
217 | struct pci_dev *pdev; | 219 | struct pci_dev *pdev; |
@@ -256,9 +258,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) | |||
256 | */ | 258 | */ |
257 | 259 | ||
258 | tioca_base = (struct tioca *)common->ca_common.bs_base; | 260 | tioca_base = (struct tioca *)common->ca_common.bs_base; |
259 | ca_control1 = tioca_base->ca_control1; | 261 | __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE); |
260 | ca_control1 |= CA_AGP_FW_ENABLE; | ||
261 | tioca_base->ca_control1 = ca_control1; | ||
262 | } | 262 | } |
263 | 263 | ||
264 | EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ | 264 | EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ |
@@ -345,7 +345,7 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) | |||
345 | return 0; | 345 | return 0; |
346 | } | 346 | } |
347 | 347 | ||
348 | agp_dma_extn = ca_base->ca_agp_dma_addr_extn; | 348 | agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn); |
349 | if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { | 349 | if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { |
350 | printk(KERN_ERR "%s: coretalk upper node (%u) " | 350 | printk(KERN_ERR "%s: coretalk upper node (%u) " |
351 | "mismatch with ca_agp_dma_addr_extn (%lu)\n", | 351 | "mismatch with ca_agp_dma_addr_extn (%lu)\n", |
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 8e75db2b825d..9f03d4e5121c 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/sn/sn_sal.h> | 12 | #include <asm/sn/sn_sal.h> |
13 | #include <asm/sn/addrs.h> | 13 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/io.h> | ||
14 | #include <asm/sn/pcidev.h> | 15 | #include <asm/sn/pcidev.h> |
15 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include <asm/sn/tioce_provider.h> | 17 | #include <asm/sn/tioce_provider.h> |
@@ -227,7 +228,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
227 | 228 | ||
228 | ate = ATE_MAKE(addr, pagesize); | 229 | ate = ATE_MAKE(addr, pagesize); |
229 | ate_shadow[i + j] = ate; | 230 | ate_shadow[i + j] = ate; |
230 | ate_reg[i + j] = ate; | 231 | writeq(ate, &ate_reg[i + j]); |
231 | addr += pagesize; | 232 | addr += pagesize; |
232 | } | 233 | } |
233 | 234 | ||
@@ -268,10 +269,10 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) | |||
268 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | 269 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); |
269 | 270 | ||
270 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { | 271 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { |
271 | volatile uint64_t tmp; | 272 | uint64_t tmp; |
272 | 273 | ||
273 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; | 274 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; |
274 | ce_mmr->ce_ure_dir_map[port] = ct_upper; | 275 | writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]); |
275 | tmp = ce_mmr->ce_ure_dir_map[port]; | 276 | tmp = ce_mmr->ce_ure_dir_map[port]; |
276 | dma_ok = 1; | 277 | dma_ok = 1; |
277 | } else | 278 | } else |
@@ -343,7 +344,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
343 | if (TIOCE_D32_ADDR(bus_addr)) { | 344 | if (TIOCE_D32_ADDR(bus_addr)) { |
344 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { | 345 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { |
345 | ce_kern->ce_port[port].dirmap_shadow = 0; | 346 | ce_kern->ce_port[port].dirmap_shadow = 0; |
346 | ce_mmr->ce_ure_dir_map[port] = 0; | 347 | writeq(0, &ce_mmr->ce_ure_dir_map[port]); |
347 | } | 348 | } |
348 | } else { | 349 | } else { |
349 | struct tioce_dmamap *map; | 350 | struct tioce_dmamap *map; |
@@ -582,18 +583,18 @@ tioce_kern_init(struct tioce_common *tioce_common) | |||
582 | */ | 583 | */ |
583 | 584 | ||
584 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; | 585 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; |
585 | tioce_mmr->ce_ure_page_map &= ~CE_URE_PAGESIZE_MASK; | 586 | __sn_clrq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_PAGESIZE_MASK); |
586 | tioce_mmr->ce_ure_page_map |= CE_URE_256K_PAGESIZE; | 587 | __sn_setq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_256K_PAGESIZE); |
587 | tioce_kern->ce_ate3240_pagesize = KB(256); | 588 | tioce_kern->ce_ate3240_pagesize = KB(256); |
588 | 589 | ||
589 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { | 590 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { |
590 | tioce_kern->ce_ate40_shadow[i] = 0; | 591 | tioce_kern->ce_ate40_shadow[i] = 0; |
591 | tioce_mmr->ce_ure_ate40[i] = 0; | 592 | writeq(0, &tioce_mmr->ce_ure_ate40[i]); |
592 | } | 593 | } |
593 | 594 | ||
594 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { | 595 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { |
595 | tioce_kern->ce_ate3240_shadow[i] = 0; | 596 | tioce_kern->ce_ate3240_shadow[i] = 0; |
596 | tioce_mmr->ce_ure_ate3240[i] = 0; | 597 | writeq(0, &tioce_mmr->ce_ure_ate3240[i]); |
597 | } | 598 | } |
598 | 599 | ||
599 | return tioce_kern; | 600 | return tioce_kern; |
@@ -665,7 +666,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | |||
665 | default: | 666 | default: |
666 | return; | 667 | return; |
667 | } | 668 | } |
668 | ce_mmr->ce_adm_force_int = force_int_val; | 669 | writeq(force_int_val, &ce_mmr->ce_adm_force_int); |
669 | } | 670 | } |
670 | 671 | ||
671 | /** | 672 | /** |
@@ -686,6 +687,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
686 | struct tioce_common *ce_common; | 687 | struct tioce_common *ce_common; |
687 | struct tioce *ce_mmr; | 688 | struct tioce *ce_mmr; |
688 | int bit; | 689 | int bit; |
690 | uint64_t vector; | ||
689 | 691 | ||
690 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | 692 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
691 | if (!pcidev_info) | 693 | if (!pcidev_info) |
@@ -696,11 +698,11 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
696 | 698 | ||
697 | bit = sn_irq_info->irq_int_bit; | 699 | bit = sn_irq_info->irq_int_bit; |
698 | 700 | ||
699 | ce_mmr->ce_adm_int_mask |= (1UL << bit); | 701 | __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); |
700 | ce_mmr->ce_adm_int_dest[bit] = | 702 | vector = (uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; |
701 | ((uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT) | | 703 | vector |= sn_irq_info->irq_xtalkaddr; |
702 | sn_irq_info->irq_xtalkaddr; | 704 | writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); |
703 | ce_mmr->ce_adm_int_mask &= ~(1UL << bit); | 705 | __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); |
704 | 706 | ||
705 | tioce_force_interrupt(sn_irq_info); | 707 | tioce_force_interrupt(sn_irq_info); |
706 | } | 708 | } |