aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-10-20 13:41:44 -0400
committerTony Luck <tony.luck@intel.com>2005-10-20 13:41:44 -0400
commit9cec58dc138d6fcad9f447a19c8ff69f6540e667 (patch)
tree4fe1cca94fdba8b705c87615bee06d3346f687ce /arch/ia64
parent17e5ad6c0ce5a970e2830d0de8bdd60a2f077d38 (diff)
parentac9b9c667c2e1194e22ebe0a441ae1c37aaa9b90 (diff)
Update from upstream with manual merge of Yasunori Goto's
changes to swiotlb.c made in commit 281dd25cdc0d6903929b79183816d151ea626341 since this file has been moved from arch/ia64/lib/swiotlb.c to lib/swiotlb.c Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/hp/sim/simscsi.c29
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/mca_asm.S96
-rw-r--r--arch/ia64/kernel/mca_drv.c135
-rw-r--r--arch/ia64/kernel/mca_drv.h2
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S48
-rw-r--r--arch/ia64/kernel/perfmon.c5
-rw-r--r--arch/ia64/lib/Makefile1
-rw-r--r--arch/ia64/lib/dec_and_lock.c42
12 files changed, 227 insertions, 145 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ea4a889d8196..8f699a2e7981 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -302,11 +302,6 @@ config PREEMPT
302 302
303source "mm/Kconfig" 303source "mm/Kconfig"
304 304
305config HAVE_DEC_LOCK
306 bool
307 depends on (SMP || PREEMPT)
308 default y
309
310config IA32_SUPPORT 305config IA32_SUPPORT
311 bool "Support for Linux/x86 binaries" 306 bool "Support for Linux/x86 binaries"
312 help 307 help
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 56405dbfd739..a18983a3c934 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -233,6 +233,23 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
233 simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); 233 simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512);
234} 234}
235 235
236static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
237{
238
239 int scatterlen = sc->use_sg;
240 struct scatterlist *slp;
241
242 if (scatterlen == 0)
243 memcpy(sc->request_buffer, buf, len);
244 else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) {
245 unsigned thislen = min(len, slp->length);
246
247 memcpy(page_address(slp->page) + slp->offset, buf, thislen);
248 slp++;
249 len -= thislen;
250 }
251}
252
236static int 253static int
237simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 254simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
238{ 255{
@@ -240,6 +257,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
240 char fname[MAX_ROOT_LEN+16]; 257 char fname[MAX_ROOT_LEN+16];
241 size_t disk_size; 258 size_t disk_size;
242 char *buf; 259 char *buf;
260 char localbuf[36];
243#if DEBUG_SIMSCSI 261#if DEBUG_SIMSCSI
244 register long sp asm ("sp"); 262 register long sp asm ("sp");
245 263
@@ -263,7 +281,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
263 /* disk doesn't exist... */ 281 /* disk doesn't exist... */
264 break; 282 break;
265 } 283 }
266 buf = sc->request_buffer; 284 buf = localbuf;
267 buf[0] = 0; /* magnetic disk */ 285 buf[0] = 0; /* magnetic disk */
268 buf[1] = 0; /* not a removable medium */ 286 buf[1] = 0; /* not a removable medium */
269 buf[2] = 2; /* SCSI-2 compliant device */ 287 buf[2] = 2; /* SCSI-2 compliant device */
@@ -273,6 +291,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
273 buf[6] = 0; /* reserved */ 291 buf[6] = 0; /* reserved */
274 buf[7] = 0; /* various flags */ 292 buf[7] = 0; /* various flags */
275 memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); 293 memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28);
294 simscsi_fillresult(sc, buf, 36);
276 sc->result = GOOD; 295 sc->result = GOOD;
277 break; 296 break;
278 297
@@ -304,16 +323,13 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
304 simscsi_readwrite10(sc, SSC_WRITE); 323 simscsi_readwrite10(sc, SSC_WRITE);
305 break; 324 break;
306 325
307
308 case READ_CAPACITY: 326 case READ_CAPACITY:
309 if (desc[target_id] < 0 || sc->request_bufflen < 8) { 327 if (desc[target_id] < 0 || sc->request_bufflen < 8) {
310 break; 328 break;
311 } 329 }
312 buf = sc->request_buffer; 330 buf = localbuf;
313
314 disk_size = simscsi_get_disk_size(desc[target_id]); 331 disk_size = simscsi_get_disk_size(desc[target_id]);
315 332
316 /* pretend to be a 1GB disk (partition table contains real stuff): */
317 buf[0] = (disk_size >> 24) & 0xff; 333 buf[0] = (disk_size >> 24) & 0xff;
318 buf[1] = (disk_size >> 16) & 0xff; 334 buf[1] = (disk_size >> 16) & 0xff;
319 buf[2] = (disk_size >> 8) & 0xff; 335 buf[2] = (disk_size >> 8) & 0xff;
@@ -323,13 +339,14 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
323 buf[5] = 0; 339 buf[5] = 0;
324 buf[6] = 2; 340 buf[6] = 2;
325 buf[7] = 0; 341 buf[7] = 0;
342 simscsi_fillresult(sc, buf, 8);
326 sc->result = GOOD; 343 sc->result = GOOD;
327 break; 344 break;
328 345
329 case MODE_SENSE: 346 case MODE_SENSE:
330 case MODE_SENSE_10: 347 case MODE_SENSE_10:
331 /* sd.c uses this to determine whether disk does write-caching. */ 348 /* sd.c uses this to determine whether disk does write-caching. */
332 memset(sc->request_buffer, 0, 128); 349 simscsi_fillresult(sc, (char *)empty_zero_page, sc->request_bufflen);
333 sc->result = GOOD; 350 sc->result = GOOD;
334 break; 351 break;
335 352
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 28a4529fdd60..7e926471e4ec 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -899,7 +899,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
899 if ((err = iosapic_init(phys_addr, gsi_base))) 899 if ((err = iosapic_init(phys_addr, gsi_base)))
900 return err; 900 return err;
901 901
902#if CONFIG_ACPI_NUMA 902#ifdef CONFIG_ACPI_NUMA
903 acpi_map_iosapic(handle, 0, NULL, NULL); 903 acpi_map_iosapic(handle, 0, NULL, NULL);
904#endif /* CONFIG_ACPI_NUMA */ 904#endif /* CONFIG_ACPI_NUMA */
905 905
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ba0b6a1f429f..0741b066b98f 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -491,7 +491,7 @@ GLOBAL_ENTRY(prefetch_stack)
491 ;; 491 ;;
492 lfetch.fault [r16], 128 492 lfetch.fault [r16], 128
493 br.ret.sptk.many rp 493 br.ret.sptk.many rp
494END(prefetch_switch_stack) 494END(prefetch_stack)
495 495
496GLOBAL_ENTRY(execve) 496GLOBAL_ENTRY(execve)
497 mov r15=__NR_execve // put syscall number in place 497 mov r15=__NR_execve // put syscall number in place
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6dc726ad7137..d0a5106fba24 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1016 1016
1017 cmc_polling_enabled = 1; 1017 cmc_polling_enabled = 1;
1018 spin_unlock(&cmc_history_lock); 1018 spin_unlock(&cmc_history_lock);
1019 /* If we're being hit with CMC interrupts, we won't
1020 * ever execute the schedule_work() below. Need to
1021 * disable CMC interrupts on this processor now.
1022 */
1023 ia64_mca_cmc_vector_disable(NULL);
1019 schedule_work(&cmc_disable_work); 1024 schedule_work(&cmc_disable_work);
1020 1025
1021 /* 1026 /*
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 499a065f4e60..db32fc1d3935 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -489,24 +489,27 @@ ia64_state_save:
489 ;; 489 ;;
490 st8 [temp1]=r17,16 // pal_min_state 490 st8 [temp1]=r17,16 // pal_min_state
491 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT 491 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
492 mov r6=IA64_KR(CURRENT_STACK)
493 ;;
494 st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
495 st8 [temp2]=r0,16 // prev_task, starts off as NULL
492 mov r6=cr.ifa 496 mov r6=cr.ifa
493 ;; 497 ;;
494 st8 [temp1]=r0,16 // prev_task, starts off as NULL 498 st8 [temp1]=r12,16 // cr.isr
495 st8 [temp2]=r12,16 // cr.isr 499 st8 [temp2]=r6,16 // cr.ifa
496 mov r12=cr.itir 500 mov r12=cr.itir
497 ;; 501 ;;
498 st8 [temp1]=r6,16 // cr.ifa 502 st8 [temp1]=r12,16 // cr.itir
499 st8 [temp2]=r12,16 // cr.itir 503 st8 [temp2]=r11,16 // cr.iipa
500 mov r12=cr.iim 504 mov r12=cr.iim
501 ;; 505 ;;
502 st8 [temp1]=r11,16 // cr.iipa 506 st8 [temp1]=r12,16 // cr.iim
503 st8 [temp2]=r12,16 // cr.iim
504 mov r6=cr.iha
505(p1) mov r12=IA64_MCA_COLD_BOOT 507(p1) mov r12=IA64_MCA_COLD_BOOT
506(p2) mov r12=IA64_INIT_WARM_BOOT 508(p2) mov r12=IA64_INIT_WARM_BOOT
509 mov r6=cr.iha
507 ;; 510 ;;
508 st8 [temp1]=r6,16 // cr.iha 511 st8 [temp2]=r6,16 // cr.iha
509 st8 [temp2]=r12 // os_status, default is cold boot 512 st8 [temp1]=r12 // os_status, default is cold boot
510 mov r6=IA64_MCA_SAME_CONTEXT 513 mov r6=IA64_MCA_SAME_CONTEXT
511 ;; 514 ;;
512 st8 [temp1]=r6 // context, default is same context 515 st8 [temp1]=r6 // context, default is same context
@@ -823,9 +826,12 @@ ia64_state_restore:
823 ld8 r12=[temp1],16 // sal_ra 826 ld8 r12=[temp1],16 // sal_ra
824 ld8 r9=[temp2],16 // sal_gp 827 ld8 r9=[temp2],16 // sal_gp
825 ;; 828 ;;
826 ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task 829 ld8 r22=[temp1],16 // pal_min_state, virtual
827 ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT 830 ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT
828 ;; 831 ;;
832 ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
833 ld8 r20=[temp2],16 // prev_task
834 ;;
829 ld8 temp3=[temp1],16 // cr.isr 835 ld8 temp3=[temp1],16 // cr.isr
830 ld8 temp4=[temp2],16 // cr.ifa 836 ld8 temp4=[temp2],16 // cr.ifa
831 ;; 837 ;;
@@ -846,6 +852,45 @@ ia64_state_restore:
846 ld8 r8=[temp1] // os_status 852 ld8 r8=[temp1] // os_status
847 ld8 r10=[temp2] // context 853 ld8 r10=[temp2] // context
848 854
855 /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
856 * avoid any dependencies on the algorithm in ia64_switch_to(), just
857 * purge any existing CURRENT_STACK mapping and insert the new one.
858 *
859 * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains
860 * prev_IA64_KR_CURRENT, these values may have been changed by the C
861 * code. Do not use r8, r9, r10, r22, they contain values ready for
862 * the return to SAL.
863 */
864
865 mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
866 ;;
867 shl r15=r15,IA64_GRANULE_SHIFT
868 ;;
869 dep r15=-1,r15,61,3 // virtual granule
870 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
871 ;;
872 ptr.d r15,r18
873 ;;
874 srlz.d
875
876 extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT
877 shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
878 movl r21=PAGE_KERNEL // page properties
879 ;;
880 mov IA64_KR(CURRENT_STACK)=r16
881 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
882 or r21=r20,r21 // construct PA | page properties
883(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
884 ;;
885 mov cr.itir=r18
886 mov cr.ifa=r21
887 mov r20=IA64_TR_CURRENT_STACK
888 ;;
889 itr.d dtr[r20]=r21
890 ;;
891 srlz.d
8921:
893
849 br.sptk b0 894 br.sptk b0
850 895
851//EndStub////////////////////////////////////////////////////////////////////// 896//EndStub//////////////////////////////////////////////////////////////////////
@@ -982,6 +1027,7 @@ ia64_set_kernel_registers:
982 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp 1027 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
983 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack 1028 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
984 add r13=temp1, r3 // set current to start of MCA/INIT stack 1029 add r13=temp1, r3 // set current to start of MCA/INIT stack
1030 add r20=temp1, r3 // physical start of MCA/INIT stack
985 ;; 1031 ;;
986 ld8 r1=[temp4] // OS GP from SAL OS state 1032 ld8 r1=[temp4] // OS GP from SAL OS state
987 ;; 1033 ;;
@@ -991,7 +1037,35 @@ ia64_set_kernel_registers:
991 ;; 1037 ;;
992 mov IA64_KR(CURRENT)=r13 1038 mov IA64_KR(CURRENT)=r13
993 1039
994 // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? 1040 /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
1041 * any dependencies on the algorithm in ia64_switch_to(), just purge
1042 * any existing CURRENT_STACK mapping and insert the new one.
1043 */
1044
1045 mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
1046 ;;
1047 shl r16=r16,IA64_GRANULE_SHIFT
1048 ;;
1049 dep r16=-1,r16,61,3 // virtual granule
1050 mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
1051 ;;
1052 ptr.d r16,r18
1053 ;;
1054 srlz.d
1055
1056 shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
1057 movl r21=PAGE_KERNEL // page properties
1058 ;;
1059 mov IA64_KR(CURRENT_STACK)=r16
1060 or r21=r20,r21 // construct PA | page properties
1061 ;;
1062 mov cr.itir=r18
1063 mov cr.ifa=r13
1064 mov r20=IA64_TR_CURRENT_STACK
1065 ;;
1066 itr.d dtr[r20]=r21
1067 ;;
1068 srlz.d
995 1069
996 br.sptk b0 1070 br.sptk b0
997 1071
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 6e683745af49..f081c60ab206 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE];
56static int num_page_isolate = 0; 56static int num_page_isolate = 0;
57 57
58typedef enum { 58typedef enum {
59 ISOLATE_NG = 0, 59 ISOLATE_NG,
60 ISOLATE_OK = 1 60 ISOLATE_OK,
61 ISOLATE_NONE
61} isolate_status_t; 62} isolate_status_t;
62 63
63/* 64/*
@@ -74,7 +75,7 @@ static struct {
74 * @paddr: poisoned memory location 75 * @paddr: poisoned memory location
75 * 76 *
76 * Return value: 77 * Return value:
77 * ISOLATE_OK / ISOLATE_NG 78 * one of isolate_status_t, ISOLATE_OK/NG/NONE.
78 */ 79 */
79 80
80static isolate_status_t 81static isolate_status_t
@@ -84,23 +85,26 @@ mca_page_isolate(unsigned long paddr)
84 struct page *p; 85 struct page *p;
85 86
86 /* whether physical address is valid or not */ 87 /* whether physical address is valid or not */
87 if ( !ia64_phys_addr_valid(paddr) ) 88 if (!ia64_phys_addr_valid(paddr))
88 return ISOLATE_NG; 89 return ISOLATE_NONE;
90
91 if (!pfn_valid(paddr))
92 return ISOLATE_NONE;
89 93
90 /* convert physical address to physical page number */ 94 /* convert physical address to physical page number */
91 p = pfn_to_page(paddr>>PAGE_SHIFT); 95 p = pfn_to_page(paddr>>PAGE_SHIFT);
92 96
93 /* check whether a page number have been already registered or not */ 97 /* check whether a page number have been already registered or not */
94 for( i = 0; i < num_page_isolate; i++ ) 98 for (i = 0; i < num_page_isolate; i++)
95 if( page_isolate[i] == p ) 99 if (page_isolate[i] == p)
96 return ISOLATE_OK; /* already listed */ 100 return ISOLATE_OK; /* already listed */
97 101
98 /* limitation check */ 102 /* limitation check */
99 if( num_page_isolate == MAX_PAGE_ISOLATE ) 103 if (num_page_isolate == MAX_PAGE_ISOLATE)
100 return ISOLATE_NG; 104 return ISOLATE_NG;
101 105
102 /* kick pages having attribute 'SLAB' or 'Reserved' */ 106 /* kick pages having attribute 'SLAB' or 'Reserved' */
103 if( PageSlab(p) || PageReserved(p) ) 107 if (PageSlab(p) || PageReserved(p))
104 return ISOLATE_NG; 108 return ISOLATE_NG;
105 109
106 /* add attribute 'Reserved' and register the page */ 110 /* add attribute 'Reserved' and register the page */
@@ -122,10 +126,15 @@ mca_handler_bh(unsigned long paddr)
122 current->pid, current->comm); 126 current->pid, current->comm);
123 127
124 spin_lock(&mca_bh_lock); 128 spin_lock(&mca_bh_lock);
125 if (mca_page_isolate(paddr) == ISOLATE_OK) { 129 switch (mca_page_isolate(paddr)) {
130 case ISOLATE_OK:
126 printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); 131 printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
127 } else { 132 break;
133 case ISOLATE_NG:
128 printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); 134 printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr);
135 break;
136 default:
137 break;
129 } 138 }
130 spin_unlock(&mca_bh_lock); 139 spin_unlock(&mca_bh_lock);
131 140
@@ -139,10 +148,10 @@ mca_handler_bh(unsigned long paddr)
139 * @peidx: pointer to index of processor error section 148 * @peidx: pointer to index of processor error section
140 */ 149 */
141 150
142static void 151static void
143mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) 152mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx)
144{ 153{
145 /* 154 /*
146 * calculate the start address of 155 * calculate the start address of
147 * "struct cpuid_info" and "sal_processor_static_info_t". 156 * "struct cpuid_info" and "sal_processor_static_info_t".
148 */ 157 */
@@ -164,7 +173,7 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx)
164} 173}
165 174
166/** 175/**
167 * mca_make_slidx - Make index of SAL error record 176 * mca_make_slidx - Make index of SAL error record
168 * @buffer: pointer to SAL error record 177 * @buffer: pointer to SAL error record
169 * @slidx: pointer to index of SAL error record 178 * @slidx: pointer to index of SAL error record
170 * 179 *
@@ -172,12 +181,12 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx)
172 * 1 if record has platform error / 0 if not 181 * 1 if record has platform error / 0 if not
173 */ 182 */
174#define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ 183#define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \
175 { slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ 184 {slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \
176 hl->hdr = ptr; \ 185 hl->hdr = ptr; \
177 list_add(&hl->list, &(sect)); \ 186 list_add(&hl->list, &(sect)); \
178 slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } 187 slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; }
179 188
180static int 189static int
181mca_make_slidx(void *buffer, slidx_table_t *slidx) 190mca_make_slidx(void *buffer, slidx_table_t *slidx)
182{ 191{
183 int platform_err = 0; 192 int platform_err = 0;
@@ -214,28 +223,36 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx)
214 sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); 223 sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos);
215 if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { 224 if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) {
216 LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); 225 LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp);
217 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { 226 } else if (!efi_guidcmp(sp->guid,
227 SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) {
218 platform_err = 1; 228 platform_err = 1;
219 LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); 229 LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp);
220 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { 230 } else if (!efi_guidcmp(sp->guid,
231 SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) {
221 platform_err = 1; 232 platform_err = 1;
222 LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); 233 LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp);
223 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { 234 } else if (!efi_guidcmp(sp->guid,
235 SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) {
224 platform_err = 1; 236 platform_err = 1;
225 LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); 237 LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp);
226 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { 238 } else if (!efi_guidcmp(sp->guid,
239 SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) {
227 platform_err = 1; 240 platform_err = 1;
228 LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); 241 LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp);
229 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { 242 } else if (!efi_guidcmp(sp->guid,
243 SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) {
230 platform_err = 1; 244 platform_err = 1;
231 LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); 245 LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp);
232 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { 246 } else if (!efi_guidcmp(sp->guid,
247 SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) {
233 platform_err = 1; 248 platform_err = 1;
234 LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); 249 LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp);
235 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { 250 } else if (!efi_guidcmp(sp->guid,
251 SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) {
236 platform_err = 1; 252 platform_err = 1;
237 LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); 253 LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp);
238 } else if (!efi_guidcmp(sp->guid, SAL_PLAT_BUS_ERR_SECT_GUID)) { 254 } else if (!efi_guidcmp(sp->guid,
255 SAL_PLAT_BUS_ERR_SECT_GUID)) {
239 platform_err = 1; 256 platform_err = 1;
240 LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); 257 LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp);
241 } else { 258 } else {
@@ -253,15 +270,16 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx)
253 * Return value: 270 * Return value:
254 * 0 on Success / -ENOMEM on Failure 271 * 0 on Success / -ENOMEM on Failure
255 */ 272 */
256static int 273static int
257init_record_index_pools(void) 274init_record_index_pools(void)
258{ 275{
259 int i; 276 int i;
260 int rec_max_size; /* Maximum size of SAL error records */ 277 int rec_max_size; /* Maximum size of SAL error records */
261 int sect_min_size; /* Minimum size of SAL error sections */ 278 int sect_min_size; /* Minimum size of SAL error sections */
262 /* minimum size table of each section */ 279 /* minimum size table of each section */
263 static int sal_log_sect_min_sizes[] = { 280 static int sal_log_sect_min_sizes[] = {
264 sizeof(sal_log_processor_info_t) + sizeof(sal_processor_static_info_t), 281 sizeof(sal_log_processor_info_t)
282 + sizeof(sal_processor_static_info_t),
265 sizeof(sal_log_mem_dev_err_info_t), 283 sizeof(sal_log_mem_dev_err_info_t),
266 sizeof(sal_log_sel_dev_err_info_t), 284 sizeof(sal_log_sel_dev_err_info_t),
267 sizeof(sal_log_pci_bus_err_info_t), 285 sizeof(sal_log_pci_bus_err_info_t),
@@ -294,7 +312,8 @@ init_record_index_pools(void)
294 312
295 /* - 3 - */ 313 /* - 3 - */
296 slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; 314 slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
297 slidx_pool.buffer = (slidx_list_t *) kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); 315 slidx_pool.buffer = (slidx_list_t *)
316 kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
298 317
299 return slidx_pool.buffer ? 0 : -ENOMEM; 318 return slidx_pool.buffer ? 0 : -ENOMEM;
300} 319}
@@ -308,6 +327,7 @@ init_record_index_pools(void)
308 * is_mca_global - Check whether this MCA is global or not 327 * is_mca_global - Check whether this MCA is global or not
309 * @peidx: pointer of index of processor error section 328 * @peidx: pointer of index of processor error section
310 * @pbci: pointer to pal_bus_check_info_t 329 * @pbci: pointer to pal_bus_check_info_t
330 * @sos: pointer to hand off struct between SAL and OS
311 * 331 *
312 * Return value: 332 * Return value:
313 * MCA_IS_LOCAL / MCA_IS_GLOBAL 333 * MCA_IS_LOCAL / MCA_IS_GLOBAL
@@ -317,11 +337,12 @@ static mca_type_t
317is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, 337is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
318 struct ia64_sal_os_state *sos) 338 struct ia64_sal_os_state *sos)
319{ 339{
320 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 340 pal_processor_state_info_t *psp =
341 (pal_processor_state_info_t*)peidx_psp(peidx);
321 342
322 /* 343 /*
323 * PAL can request a rendezvous, if the MCA has a global scope. 344 * PAL can request a rendezvous, if the MCA has a global scope.
324 * If "rz_always" flag is set, SAL requests MCA rendezvous 345 * If "rz_always" flag is set, SAL requests MCA rendezvous
325 * in spite of global MCA. 346 * in spite of global MCA.
326 * Therefore it is local MCA when rendezvous has not been requested. 347 * Therefore it is local MCA when rendezvous has not been requested.
327 * Failed to rendezvous, the system must be down. 348 * Failed to rendezvous, the system must be down.
@@ -381,13 +402,15 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
381 * @slidx: pointer of index of SAL error record 402 * @slidx: pointer of index of SAL error record
382 * @peidx: pointer of index of processor error section 403 * @peidx: pointer of index of processor error section
383 * @pbci: pointer of pal_bus_check_info 404 * @pbci: pointer of pal_bus_check_info
405 * @sos: pointer to hand off struct between SAL and OS
384 * 406 *
385 * Return value: 407 * Return value:
386 * 1 on Success / 0 on Failure 408 * 1 on Success / 0 on Failure
387 */ 409 */
388 410
389static int 411static int
390recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, 412recover_from_read_error(slidx_table_t *slidx,
413 peidx_table_t *peidx, pal_bus_check_info_t *pbci,
391 struct ia64_sal_os_state *sos) 414 struct ia64_sal_os_state *sos)
392{ 415{
393 sal_log_mod_error_info_t *smei; 416 sal_log_mod_error_info_t *smei;
@@ -453,24 +476,28 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
453 * @slidx: pointer of index of SAL error record 476 * @slidx: pointer of index of SAL error record
454 * @peidx: pointer of index of processor error section 477 * @peidx: pointer of index of processor error section
455 * @pbci: pointer of pal_bus_check_info 478 * @pbci: pointer of pal_bus_check_info
479 * @sos: pointer to hand off struct between SAL and OS
456 * 480 *
457 * Return value: 481 * Return value:
458 * 1 on Success / 0 on Failure 482 * 1 on Success / 0 on Failure
459 */ 483 */
460 484
461static int 485static int
462recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, 486recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx,
487 pal_bus_check_info_t *pbci,
463 struct ia64_sal_os_state *sos) 488 struct ia64_sal_os_state *sos)
464{ 489{
465 int status = 0; 490 int status = 0;
466 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 491 pal_processor_state_info_t *psp =
492 (pal_processor_state_info_t*)peidx_psp(peidx);
467 493
468 if (psp->bc && pbci->eb && pbci->bsi == 0) { 494 if (psp->bc && pbci->eb && pbci->bsi == 0) {
469 switch(pbci->type) { 495 switch(pbci->type) {
470 case 1: /* partial read */ 496 case 1: /* partial read */
471 case 3: /* full line(cpu) read */ 497 case 3: /* full line(cpu) read */
472 case 9: /* I/O space read */ 498 case 9: /* I/O space read */
473 status = recover_from_read_error(slidx, peidx, pbci, sos); 499 status = recover_from_read_error(slidx, peidx, pbci,
500 sos);
474 break; 501 break;
475 case 0: /* unknown */ 502 case 0: /* unknown */
476 case 2: /* partial write */ 503 case 2: /* partial write */
@@ -481,7 +508,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
481 case 8: /* write coalescing transactions */ 508 case 8: /* write coalescing transactions */
482 case 10: /* I/O space write */ 509 case 10: /* I/O space write */
483 case 11: /* inter-processor interrupt message(IPI) */ 510 case 11: /* inter-processor interrupt message(IPI) */
484 case 12: /* interrupt acknowledge or external task priority cycle */ 511 case 12: /* interrupt acknowledge or
512 external task priority cycle */
485 default: 513 default:
486 break; 514 break;
487 } 515 }
@@ -496,6 +524,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
496 * @slidx: pointer of index of SAL error record 524 * @slidx: pointer of index of SAL error record
497 * @peidx: pointer of index of processor error section 525 * @peidx: pointer of index of processor error section
498 * @pbci: pointer of pal_bus_check_info 526 * @pbci: pointer of pal_bus_check_info
527 * @sos: pointer to hand off struct between SAL and OS
499 * 528 *
500 * Return value: 529 * Return value:
501 * 1 on Success / 0 on Failure 530 * 1 on Success / 0 on Failure
@@ -509,15 +538,17 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
509 */ 538 */
510 539
511static int 540static int
512recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, 541recover_from_processor_error(int platform, slidx_table_t *slidx,
542 peidx_table_t *peidx, pal_bus_check_info_t *pbci,
513 struct ia64_sal_os_state *sos) 543 struct ia64_sal_os_state *sos)
514{ 544{
515 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 545 pal_processor_state_info_t *psp =
546 (pal_processor_state_info_t*)peidx_psp(peidx);
516 547
517 /* 548 /*
518 * We cannot recover errors with other than bus_check. 549 * We cannot recover errors with other than bus_check.
519 */ 550 */
520 if (psp->cc || psp->rc || psp->uc) 551 if (psp->cc || psp->rc || psp->uc)
521 return 0; 552 return 0;
522 553
523 /* 554 /*
@@ -546,10 +577,10 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
546 * (e.g. a load from poisoned memory) 577 * (e.g. a load from poisoned memory)
547 * This means "there are some platform errors". 578 * This means "there are some platform errors".
548 */ 579 */
549 if (platform) 580 if (platform)
550 return recover_from_platform_error(slidx, peidx, pbci, sos); 581 return recover_from_platform_error(slidx, peidx, pbci, sos);
551 /* 582 /*
552 * On account of strange SAL error record, we cannot recover. 583 * On account of strange SAL error record, we cannot recover.
553 */ 584 */
554 return 0; 585 return 0;
555} 586}
@@ -557,14 +588,14 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
557/** 588/**
558 * mca_try_to_recover - Try to recover from MCA 589 * mca_try_to_recover - Try to recover from MCA
559 * @rec: pointer to a SAL error record 590 * @rec: pointer to a SAL error record
591 * @sos: pointer to hand off struct between SAL and OS
560 * 592 *
561 * Return value: 593 * Return value:
562 * 1 on Success / 0 on Failure 594 * 1 on Success / 0 on Failure
563 */ 595 */
564 596
565static int 597static int
566mca_try_to_recover(void *rec, 598mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
567 struct ia64_sal_os_state *sos)
568{ 599{
569 int platform_err; 600 int platform_err;
570 int n_proc_err; 601 int n_proc_err;
@@ -588,7 +619,8 @@ mca_try_to_recover(void *rec,
588 } 619 }
589 620
590 /* Make index of processor error section */ 621 /* Make index of processor error section */
591 mca_make_peidx((sal_log_processor_info_t*)slidx_first_entry(&slidx.proc_err)->hdr, &peidx); 622 mca_make_peidx((sal_log_processor_info_t*)
623 slidx_first_entry(&slidx.proc_err)->hdr, &peidx);
592 624
593 /* Extract Processor BUS_CHECK[0] */ 625 /* Extract Processor BUS_CHECK[0] */
594 *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); 626 *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
@@ -598,7 +630,8 @@ mca_try_to_recover(void *rec,
598 return 0; 630 return 0;
599 631
600 /* Try to recover a processor error */ 632 /* Try to recover a processor error */
601 return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); 633 return recover_from_processor_error(platform_err, &slidx, &peidx,
634 &pbci, sos);
602} 635}
603 636
604/* 637/*
@@ -611,7 +644,7 @@ int __init mca_external_handler_init(void)
611 return -ENOMEM; 644 return -ENOMEM;
612 645
613 /* register external mca handlers */ 646 /* register external mca handlers */
614 if (ia64_reg_MCA_extension(mca_try_to_recover)){ 647 if (ia64_reg_MCA_extension(mca_try_to_recover)) {
615 printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); 648 printk(KERN_ERR "ia64_reg_MCA_extension failed.\n");
616 kfree(slidx_pool.buffer); 649 kfree(slidx_pool.buffer);
617 return -EFAULT; 650 return -EFAULT;
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
index 0227b761f2c4..e2f6fa1e0ef6 100644
--- a/arch/ia64/kernel/mca_drv.h
+++ b/arch/ia64/kernel/mca_drv.h
@@ -6,7 +6,7 @@
6 * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) 6 * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
7 */ 7 */
8/* 8/*
9 * Processor error section: 9 * Processor error section:
10 * 10 *
11 * +-sal_log_processor_info_t *info-------------+ 11 * +-sal_log_processor_info_t *info-------------+
12 * | sal_log_section_hdr_t header; | 12 * | sal_log_section_hdr_t header; |
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index 2d7e0217638d..3f298ee4d00c 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -13,45 +13,45 @@
13#include <asm/ptrace.h> 13#include <asm/ptrace.h>
14 14
15GLOBAL_ENTRY(mca_handler_bhhook) 15GLOBAL_ENTRY(mca_handler_bhhook)
16 invala // clear RSE ? 16 invala // clear RSE ?
17 ;; // 17 ;;
18 cover // 18 cover
19 ;; // 19 ;;
20 clrrrb // 20 clrrrb
21 ;; 21 ;;
22 alloc r16=ar.pfs,0,2,1,0 // make a new frame 22 alloc r16=ar.pfs,0,2,1,0 // make a new frame
23 ;; 23 ;;
24 mov ar.rsc=0 24 mov ar.rsc=0
25 ;; 25 ;;
26 mov r13=IA64_KR(CURRENT) // current task pointer 26 mov r13=IA64_KR(CURRENT) // current task pointer
27 ;; 27 ;;
28 mov r2=r13 28 mov r2=r13
29 ;; 29 ;;
30 addl r22=IA64_RBS_OFFSET,r2 30 addl r22=IA64_RBS_OFFSET,r2
31 ;; 31 ;;
32 mov ar.bspstore=r22 32 mov ar.bspstore=r22
33 ;; 33 ;;
34 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 34 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
35 ;; 35 ;;
36 adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 36 adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
37 ;; 37 ;;
38 st1 [r2]=r0 // clear current->thread.on_ustack flag 38 st1 [r2]=r0 // clear current->thread.on_ustack flag
39 mov loc0=r16 39 mov loc0=r16
40 movl loc1=mca_handler_bh // recovery C function 40 movl loc1=mca_handler_bh // recovery C function
41 ;; 41 ;;
42 mov out0=r8 // poisoned address 42 mov out0=r8 // poisoned address
43 mov b6=loc1 43 mov b6=loc1
44 ;; 44 ;;
45 mov loc1=rp 45 mov loc1=rp
46 ;; 46 ;;
47 ssm psr.i 47 ssm psr.i
48 ;; 48 ;;
49 br.call.sptk.many rp=b6 // does not return ... 49 br.call.sptk.many rp=b6 // does not return ...
50 ;; 50 ;;
51 mov ar.pfs=loc0 51 mov ar.pfs=loc0
52 mov rp=loc1 52 mov rp=loc1
53 ;; 53 ;;
54 mov r8=r0 54 mov r8=r0
55 br.ret.sptk.many rp 55 br.ret.sptk.many rp
56 ;; 56 ;;
57END(mca_handler_bhhook) 57END(mca_handler_bhhook)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 1650353e3f77..d71731ee5b61 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -574,7 +574,7 @@ pfm_protect_ctx_ctxsw(pfm_context_t *x)
574 return 0UL; 574 return 0UL;
575} 575}
576 576
577static inline unsigned long 577static inline void
578pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) 578pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
579{ 579{
580 spin_unlock(&(x)->ctx_lock); 580 spin_unlock(&(x)->ctx_lock);
@@ -2218,12 +2218,13 @@ static void
2218pfm_free_fd(int fd, struct file *file) 2218pfm_free_fd(int fd, struct file *file)
2219{ 2219{
2220 struct files_struct *files = current->files; 2220 struct files_struct *files = current->files;
2221 struct fdtable *fdt = files_fdtable(files); 2221 struct fdtable *fdt;
2222 2222
2223 /* 2223 /*
2224 * there ie no fd_uninstall(), so we do it here 2224 * there ie no fd_uninstall(), so we do it here
2225 */ 2225 */
2226 spin_lock(&files->file_lock); 2226 spin_lock(&files->file_lock);
2227 fdt = files_fdtable(files);
2227 rcu_assign_pointer(fdt->fd[fd], NULL); 2228 rcu_assign_pointer(fdt->fd[fd], NULL);
2228 spin_unlock(&files->file_lock); 2229 spin_unlock(&files->file_lock);
2229 2230
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 4be1546e1726..ac64664a1807 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -15,7 +15,6 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
16lib-$(CONFIG_PERFMON) += carta_random.o 16lib-$(CONFIG_PERFMON) += carta_random.o
17lib-$(CONFIG_MD_RAID5) += xor.o 17lib-$(CONFIG_MD_RAID5) += xor.o
18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
19 18
20AFLAGS___divdi3.o = 19AFLAGS___divdi3.o =
21AFLAGS___udivdi3.o = -DUNSIGNED 20AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/lib/dec_and_lock.c b/arch/ia64/lib/dec_and_lock.c
deleted file mode 100644
index c7ce92f968f1..000000000000
--- a/arch/ia64/lib/dec_and_lock.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (C) 2003 Jerome Marchand, Bull S.A.
3 * Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com>
4 *
5 * This file is released under the GPLv2, or at your option any later version.
6 *
7 * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This
8 * code is an adaptation of the x86 version of "atomic_dec_and_lock()".
9 */
10
11#include <linux/compiler.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14#include <asm/atomic.h>
15
16/*
17 * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these
18 * operations have to be done atomically, so that the count doesn't drop to zero without
19 * acquiring the spinlock first.
20 */
21int
22_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock)
23{
24 int old, new;
25
26 do {
27 old = atomic_read(refcount);
28 new = old - 1;
29
30 if (unlikely (old == 1)) {
31 /* oops, we may be decrementing to zero, do it the slow way... */
32 spin_lock(lock);
33 if (atomic_dec_and_test(refcount))
34 return 1;
35 spin_unlock(lock);
36 return 0;
37 }
38 } while (cmpxchg(&refcount->counter, old, new) != old);
39 return 0;
40}
41
42EXPORT_SYMBOL(_atomic_dec_and_lock);