aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-05-26 21:57:53 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-05-26 21:57:53 -0400
commit8095e4e81b4bc38eef7e0be99f9ecc744eaa1683 (patch)
treed3b5100db11784093e78513f3429022569b4bf7b /arch/parisc/kernel
parent6b5f146535fe6969aeded9f00b0bc42b3783f7fd (diff)
parente4aa937ec75df0eea0bee03bffa3303ad36c986b (diff)
Merge 3.10-rc3 into tty-next
We want these fixes. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/entry.S174
-rw-r--r--arch/parisc/kernel/hardware.c1
-rw-r--r--arch/parisc/kernel/irq.c122
-rw-r--r--arch/parisc/kernel/pacache.S12
-rw-r--r--arch/parisc/kernel/traps.c1
-rw-r--r--arch/parisc/kernel/unaligned.c3
6 files changed, 211 insertions, 102 deletions
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 4bb96ad9b0b1..e8f07dd28401 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -65,15 +65,11 @@
65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
66 mtsp %r0, %sr4 66 mtsp %r0, %sr4
67 mtsp %r0, %sr5 67 mtsp %r0, %sr5
68 mfsp %sr7, %r1 68 mtsp %r0, %sr6
69 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
70 mtsp %r1, %sr3
71 tovirt_r1 %r29 69 tovirt_r1 %r29
72 load32 KERNEL_PSW, %r1 70 load32 KERNEL_PSW, %r1
73 71
74 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 72 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
75 mtsp %r0, %sr6
76 mtsp %r0, %sr7
77 mtctl %r0, %cr17 /* Clear IIASQ tail */ 73 mtctl %r0, %cr17 /* Clear IIASQ tail */
78 mtctl %r0, %cr17 /* Clear IIASQ head */ 74 mtctl %r0, %cr17 /* Clear IIASQ head */
79 mtctl %r1, %ipsw 75 mtctl %r1, %ipsw
@@ -119,17 +115,20 @@
119 115
120 /* we save the registers in the task struct */ 116 /* we save the registers in the task struct */
121 117
118 copy %r30, %r17
122 mfctl %cr30, %r1 119 mfctl %cr30, %r1
120 ldo THREAD_SZ_ALGN(%r1), %r30
121 mtsp %r0,%sr7
122 mtsp %r16,%sr3
123 tophys %r1,%r9 123 tophys %r1,%r9
124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
125 tophys %r1,%r9 125 tophys %r1,%r9
126 ldo TASK_REGS(%r9),%r9 126 ldo TASK_REGS(%r9),%r9
127 STREG %r30, PT_GR30(%r9) 127 STREG %r17,PT_GR30(%r9)
128 STREG %r29,PT_GR29(%r9) 128 STREG %r29,PT_GR29(%r9)
129 STREG %r26,PT_GR26(%r9) 129 STREG %r26,PT_GR26(%r9)
130 STREG %r16,PT_SR7(%r9)
130 copy %r9,%r29 131 copy %r9,%r29
131 mfctl %cr30, %r1
132 ldo THREAD_SZ_ALGN(%r1), %r30
133 .endm 132 .endm
134 133
135 .macro get_stack_use_r30 134 .macro get_stack_use_r30
@@ -137,10 +136,12 @@
137 /* we put a struct pt_regs on the stack and save the registers there */ 136 /* we put a struct pt_regs on the stack and save the registers there */
138 137
139 tophys %r30,%r9 138 tophys %r30,%r9
140 STREG %r30,PT_GR30(%r9) 139 copy %r30,%r1
141 ldo PT_SZ_ALGN(%r30),%r30 140 ldo PT_SZ_ALGN(%r30),%r30
141 STREG %r1,PT_GR30(%r9)
142 STREG %r29,PT_GR29(%r9) 142 STREG %r29,PT_GR29(%r9)
143 STREG %r26,PT_GR26(%r9) 143 STREG %r26,PT_GR26(%r9)
144 STREG %r16,PT_SR7(%r9)
144 copy %r9,%r29 145 copy %r9,%r29
145 .endm 146 .endm
146 147
@@ -452,9 +453,41 @@
452 L2_ptep \pgd,\pte,\index,\va,\fault 453 L2_ptep \pgd,\pte,\index,\va,\fault
453 .endm 454 .endm
454 455
456 /* Acquire pa_dbit_lock lock. */
457 .macro dbit_lock spc,tmp,tmp1
458#ifdef CONFIG_SMP
459 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_dbit_lock),\tmp
4611: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b
463 nop
4642:
465#endif
466 .endm
467
468 /* Release pa_dbit_lock lock without reloading lock address. */
469 .macro dbit_unlock0 spc,tmp
470#ifdef CONFIG_SMP
471 or,COND(=) %r0,\spc,%r0
472 stw \spc,0(\tmp)
473#endif
474 .endm
475
476 /* Release pa_dbit_lock lock. */
477 .macro dbit_unlock1 spc,tmp
478#ifdef CONFIG_SMP
479 load32 PA(pa_dbit_lock),\tmp
480 dbit_unlock0 \spc,\tmp
481#endif
482 .endm
483
455 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 484 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
456 * don't needlessly dirty the cache line if it was already set */ 485 * don't needlessly dirty the cache line if it was already set */
457 .macro update_ptep ptep,pte,tmp,tmp1 486 .macro update_ptep spc,ptep,pte,tmp,tmp1
487#ifdef CONFIG_SMP
488 or,COND(=) %r0,\spc,%r0
489 LDREG 0(\ptep),\pte
490#endif
458 ldi _PAGE_ACCESSED,\tmp1 491 ldi _PAGE_ACCESSED,\tmp1
459 or \tmp1,\pte,\tmp 492 or \tmp1,\pte,\tmp
460 and,COND(<>) \tmp1,\pte,%r0 493 and,COND(<>) \tmp1,\pte,%r0
@@ -463,7 +496,11 @@
463 496
464 /* Set the dirty bit (and accessed bit). No need to be 497 /* Set the dirty bit (and accessed bit). No need to be
465 * clever, this is only used from the dirty fault */ 498 * clever, this is only used from the dirty fault */
466 .macro update_dirty ptep,pte,tmp 499 .macro update_dirty spc,ptep,pte,tmp
500#ifdef CONFIG_SMP
501 or,COND(=) %r0,\spc,%r0
502 LDREG 0(\ptep),\pte
503#endif
467 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 504 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
468 or \tmp,\pte,\pte 505 or \tmp,\pte,\pte
469 STREG \pte,0(\ptep) 506 STREG \pte,0(\ptep)
@@ -1111,11 +1148,13 @@ dtlb_miss_20w:
1111 1148
1112 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1149 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1113 1150
1114 update_ptep ptp,pte,t0,t1 1151 dbit_lock spc,t0,t1
1152 update_ptep spc,ptp,pte,t0,t1
1115 1153
1116 make_insert_tlb spc,pte,prot 1154 make_insert_tlb spc,pte,prot
1117 1155
1118 idtlbt pte,prot 1156 idtlbt pte,prot
1157 dbit_unlock1 spc,t0
1119 1158
1120 rfir 1159 rfir
1121 nop 1160 nop
@@ -1135,11 +1174,13 @@ nadtlb_miss_20w:
1135 1174
1136 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1175 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1137 1176
1138 update_ptep ptp,pte,t0,t1 1177 dbit_lock spc,t0,t1
1178 update_ptep spc,ptp,pte,t0,t1
1139 1179
1140 make_insert_tlb spc,pte,prot 1180 make_insert_tlb spc,pte,prot
1141 1181
1142 idtlbt pte,prot 1182 idtlbt pte,prot
1183 dbit_unlock1 spc,t0
1143 1184
1144 rfir 1185 rfir
1145 nop 1186 nop
@@ -1161,7 +1202,8 @@ dtlb_miss_11:
1161 1202
1162 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1203 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1163 1204
1164 update_ptep ptp,pte,t0,t1 1205 dbit_lock spc,t0,t1
1206 update_ptep spc,ptp,pte,t0,t1
1165 1207
1166 make_insert_tlb_11 spc,pte,prot 1208 make_insert_tlb_11 spc,pte,prot
1167 1209
@@ -1172,6 +1214,7 @@ dtlb_miss_11:
1172 idtlbp prot,(%sr1,va) 1214 idtlbp prot,(%sr1,va)
1173 1215
1174 mtsp t0, %sr1 /* Restore sr1 */ 1216 mtsp t0, %sr1 /* Restore sr1 */
1217 dbit_unlock1 spc,t0
1175 1218
1176 rfir 1219 rfir
1177 nop 1220 nop
@@ -1192,7 +1235,8 @@ nadtlb_miss_11:
1192 1235
1193 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1236 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1194 1237
1195 update_ptep ptp,pte,t0,t1 1238 dbit_lock spc,t0,t1
1239 update_ptep spc,ptp,pte,t0,t1
1196 1240
1197 make_insert_tlb_11 spc,pte,prot 1241 make_insert_tlb_11 spc,pte,prot
1198 1242
@@ -1204,6 +1248,7 @@ nadtlb_miss_11:
1204 idtlbp prot,(%sr1,va) 1248 idtlbp prot,(%sr1,va)
1205 1249
1206 mtsp t0, %sr1 /* Restore sr1 */ 1250 mtsp t0, %sr1 /* Restore sr1 */
1251 dbit_unlock1 spc,t0
1207 1252
1208 rfir 1253 rfir
1209 nop 1254 nop
@@ -1224,13 +1269,15 @@ dtlb_miss_20:
1224 1269
1225 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1270 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1226 1271
1227 update_ptep ptp,pte,t0,t1 1272 dbit_lock spc,t0,t1
1273 update_ptep spc,ptp,pte,t0,t1
1228 1274
1229 make_insert_tlb spc,pte,prot 1275 make_insert_tlb spc,pte,prot
1230 1276
1231 f_extend pte,t0 1277 f_extend pte,t0
1232 1278
1233 idtlbt pte,prot 1279 idtlbt pte,prot
1280 dbit_unlock1 spc,t0
1234 1281
1235 rfir 1282 rfir
1236 nop 1283 nop
@@ -1250,13 +1297,15 @@ nadtlb_miss_20:
1250 1297
1251 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1298 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1252 1299
1253 update_ptep ptp,pte,t0,t1 1300 dbit_lock spc,t0,t1
1301 update_ptep spc,ptp,pte,t0,t1
1254 1302
1255 make_insert_tlb spc,pte,prot 1303 make_insert_tlb spc,pte,prot
1256 1304
1257 f_extend pte,t0 1305 f_extend pte,t0
1258 1306
1259 idtlbt pte,prot 1307 idtlbt pte,prot
1308 dbit_unlock1 spc,t0
1260 1309
1261 rfir 1310 rfir
1262 nop 1311 nop
@@ -1357,11 +1406,13 @@ itlb_miss_20w:
1357 1406
1358 L3_ptep ptp,pte,t0,va,itlb_fault 1407 L3_ptep ptp,pte,t0,va,itlb_fault
1359 1408
1360 update_ptep ptp,pte,t0,t1 1409 dbit_lock spc,t0,t1
1410 update_ptep spc,ptp,pte,t0,t1
1361 1411
1362 make_insert_tlb spc,pte,prot 1412 make_insert_tlb spc,pte,prot
1363 1413
1364 iitlbt pte,prot 1414 iitlbt pte,prot
1415 dbit_unlock1 spc,t0
1365 1416
1366 rfir 1417 rfir
1367 nop 1418 nop
@@ -1379,11 +1430,13 @@ naitlb_miss_20w:
1379 1430
1380 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1431 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1381 1432
1382 update_ptep ptp,pte,t0,t1 1433 dbit_lock spc,t0,t1
1434 update_ptep spc,ptp,pte,t0,t1
1383 1435
1384 make_insert_tlb spc,pte,prot 1436 make_insert_tlb spc,pte,prot
1385 1437
1386 iitlbt pte,prot 1438 iitlbt pte,prot
1439 dbit_unlock1 spc,t0
1387 1440
1388 rfir 1441 rfir
1389 nop 1442 nop
@@ -1405,7 +1458,8 @@ itlb_miss_11:
1405 1458
1406 L2_ptep ptp,pte,t0,va,itlb_fault 1459 L2_ptep ptp,pte,t0,va,itlb_fault
1407 1460
1408 update_ptep ptp,pte,t0,t1 1461 dbit_lock spc,t0,t1
1462 update_ptep spc,ptp,pte,t0,t1
1409 1463
1410 make_insert_tlb_11 spc,pte,prot 1464 make_insert_tlb_11 spc,pte,prot
1411 1465
@@ -1416,6 +1470,7 @@ itlb_miss_11:
1416 iitlbp prot,(%sr1,va) 1470 iitlbp prot,(%sr1,va)
1417 1471
1418 mtsp t0, %sr1 /* Restore sr1 */ 1472 mtsp t0, %sr1 /* Restore sr1 */
1473 dbit_unlock1 spc,t0
1419 1474
1420 rfir 1475 rfir
1421 nop 1476 nop
@@ -1427,7 +1482,8 @@ naitlb_miss_11:
1427 1482
1428 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1483 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1429 1484
1430 update_ptep ptp,pte,t0,t1 1485 dbit_lock spc,t0,t1
1486 update_ptep spc,ptp,pte,t0,t1
1431 1487
1432 make_insert_tlb_11 spc,pte,prot 1488 make_insert_tlb_11 spc,pte,prot
1433 1489
@@ -1438,6 +1494,7 @@ naitlb_miss_11:
1438 iitlbp prot,(%sr1,va) 1494 iitlbp prot,(%sr1,va)
1439 1495
1440 mtsp t0, %sr1 /* Restore sr1 */ 1496 mtsp t0, %sr1 /* Restore sr1 */
1497 dbit_unlock1 spc,t0
1441 1498
1442 rfir 1499 rfir
1443 nop 1500 nop
@@ -1459,13 +1516,15 @@ itlb_miss_20:
1459 1516
1460 L2_ptep ptp,pte,t0,va,itlb_fault 1517 L2_ptep ptp,pte,t0,va,itlb_fault
1461 1518
1462 update_ptep ptp,pte,t0,t1 1519 dbit_lock spc,t0,t1
1520 update_ptep spc,ptp,pte,t0,t1
1463 1521
1464 make_insert_tlb spc,pte,prot 1522 make_insert_tlb spc,pte,prot
1465 1523
1466 f_extend pte,t0 1524 f_extend pte,t0
1467 1525
1468 iitlbt pte,prot 1526 iitlbt pte,prot
1527 dbit_unlock1 spc,t0
1469 1528
1470 rfir 1529 rfir
1471 nop 1530 nop
@@ -1477,13 +1536,15 @@ naitlb_miss_20:
1477 1536
1478 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1537 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1479 1538
1480 update_ptep ptp,pte,t0,t1 1539 dbit_lock spc,t0,t1
1540 update_ptep spc,ptp,pte,t0,t1
1481 1541
1482 make_insert_tlb spc,pte,prot 1542 make_insert_tlb spc,pte,prot
1483 1543
1484 f_extend pte,t0 1544 f_extend pte,t0
1485 1545
1486 iitlbt pte,prot 1546 iitlbt pte,prot
1547 dbit_unlock1 spc,t0
1487 1548
1488 rfir 1549 rfir
1489 nop 1550 nop
@@ -1507,29 +1568,13 @@ dbit_trap_20w:
1507 1568
1508 L3_ptep ptp,pte,t0,va,dbit_fault 1569 L3_ptep ptp,pte,t0,va,dbit_fault
1509 1570
1510#ifdef CONFIG_SMP 1571 dbit_lock spc,t0,t1
1511 cmpib,COND(=),n 0,spc,dbit_nolock_20w 1572 update_dirty spc,ptp,pte,t1
1512 load32 PA(pa_dbit_lock),t0
1513
1514dbit_spin_20w:
1515 LDCW 0(t0),t1
1516 cmpib,COND(=) 0,t1,dbit_spin_20w
1517 nop
1518
1519dbit_nolock_20w:
1520#endif
1521 update_dirty ptp,pte,t1
1522 1573
1523 make_insert_tlb spc,pte,prot 1574 make_insert_tlb spc,pte,prot
1524 1575
1525 idtlbt pte,prot 1576 idtlbt pte,prot
1526#ifdef CONFIG_SMP 1577 dbit_unlock0 spc,t0
1527 cmpib,COND(=),n 0,spc,dbit_nounlock_20w
1528 ldi 1,t1
1529 stw t1,0(t0)
1530
1531dbit_nounlock_20w:
1532#endif
1533 1578
1534 rfir 1579 rfir
1535 nop 1580 nop
@@ -1543,18 +1588,8 @@ dbit_trap_11:
1543 1588
1544 L2_ptep ptp,pte,t0,va,dbit_fault 1589 L2_ptep ptp,pte,t0,va,dbit_fault
1545 1590
1546#ifdef CONFIG_SMP 1591 dbit_lock spc,t0,t1
1547 cmpib,COND(=),n 0,spc,dbit_nolock_11 1592 update_dirty spc,ptp,pte,t1
1548 load32 PA(pa_dbit_lock),t0
1549
1550dbit_spin_11:
1551 LDCW 0(t0),t1
1552 cmpib,= 0,t1,dbit_spin_11
1553 nop
1554
1555dbit_nolock_11:
1556#endif
1557 update_dirty ptp,pte,t1
1558 1593
1559 make_insert_tlb_11 spc,pte,prot 1594 make_insert_tlb_11 spc,pte,prot
1560 1595
@@ -1565,13 +1600,7 @@ dbit_nolock_11:
1565 idtlbp prot,(%sr1,va) 1600 idtlbp prot,(%sr1,va)
1566 1601
1567 mtsp t1, %sr1 /* Restore sr1 */ 1602 mtsp t1, %sr1 /* Restore sr1 */
1568#ifdef CONFIG_SMP 1603 dbit_unlock0 spc,t0
1569 cmpib,COND(=),n 0,spc,dbit_nounlock_11
1570 ldi 1,t1
1571 stw t1,0(t0)
1572
1573dbit_nounlock_11:
1574#endif
1575 1604
1576 rfir 1605 rfir
1577 nop 1606 nop
@@ -1583,32 +1612,15 @@ dbit_trap_20:
1583 1612
1584 L2_ptep ptp,pte,t0,va,dbit_fault 1613 L2_ptep ptp,pte,t0,va,dbit_fault
1585 1614
1586#ifdef CONFIG_SMP 1615 dbit_lock spc,t0,t1
1587 cmpib,COND(=),n 0,spc,dbit_nolock_20 1616 update_dirty spc,ptp,pte,t1
1588 load32 PA(pa_dbit_lock),t0
1589
1590dbit_spin_20:
1591 LDCW 0(t0),t1
1592 cmpib,= 0,t1,dbit_spin_20
1593 nop
1594
1595dbit_nolock_20:
1596#endif
1597 update_dirty ptp,pte,t1
1598 1617
1599 make_insert_tlb spc,pte,prot 1618 make_insert_tlb spc,pte,prot
1600 1619
1601 f_extend pte,t1 1620 f_extend pte,t1
1602 1621
1603 idtlbt pte,prot 1622 idtlbt pte,prot
1604 1623 dbit_unlock0 spc,t0
1605#ifdef CONFIG_SMP
1606 cmpib,COND(=),n 0,spc,dbit_nounlock_20
1607 ldi 1,t1
1608 stw t1,0(t0)
1609
1610dbit_nounlock_20:
1611#endif
1612 1624
1613 rfir 1625 rfir
1614 nop 1626 nop
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index f7752f6af29e..9e2d2e408529 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -222,6 +222,7 @@ static struct hp_hardware hp_hardware_list[] = {
222 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, 222 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
223 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, 223 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
224 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, 224 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
225 {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"},
225 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, 226 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
226 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, 227 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
227 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, 228 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index e255db0bb761..2e6443b1e922 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -27,11 +27,11 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/spinlock.h>
31#include <linux/types.h> 30#include <linux/types.h>
32#include <asm/io.h> 31#include <asm/io.h>
33 32
34#include <asm/smp.h> 33#include <asm/smp.h>
34#include <asm/ldcw.h>
35 35
36#undef PARISC_IRQ_CR16_COUNTS 36#undef PARISC_IRQ_CR16_COUNTS
37 37
@@ -166,22 +166,36 @@ int arch_show_interrupts(struct seq_file *p, int prec)
166 seq_printf(p, "%*s: ", prec, "STK"); 166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j) 167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); 168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_printf(p, " Kernel stack usage\n"); 169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175# endif
170#endif 176#endif
171#ifdef CONFIG_SMP 177#ifdef CONFIG_SMP
172 seq_printf(p, "%*s: ", prec, "RES"); 178 seq_printf(p, "%*s: ", prec, "RES");
173 for_each_online_cpu(j) 179 for_each_online_cpu(j)
174 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 180 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
175 seq_printf(p, " Rescheduling interrupts\n"); 181 seq_puts(p, " Rescheduling interrupts\n");
176 seq_printf(p, "%*s: ", prec, "CAL"); 182 seq_printf(p, "%*s: ", prec, "CAL");
177 for_each_online_cpu(j) 183 for_each_online_cpu(j)
178 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 184 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
179 seq_printf(p, " Function call interrupts\n"); 185 seq_puts(p, " Function call interrupts\n");
180#endif 186#endif
187 seq_printf(p, "%*s: ", prec, "UAH");
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
190 seq_puts(p, " Unaligned access handler traps\n");
191 seq_printf(p, "%*s: ", prec, "FPA");
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
194 seq_puts(p, " Floating point assist traps\n");
181 seq_printf(p, "%*s: ", prec, "TLB"); 195 seq_printf(p, "%*s: ", prec, "TLB");
182 for_each_online_cpu(j) 196 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 197 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
184 seq_printf(p, " TLB shootdowns\n"); 198 seq_puts(p, " TLB shootdowns\n");
185 return 0; 199 return 0;
186} 200}
187 201
@@ -366,6 +380,24 @@ static inline int eirr_to_irq(unsigned long eirr)
366 return (BITS_PER_LONG - bit) + TIMER_IRQ; 380 return (BITS_PER_LONG - bit) + TIMER_IRQ;
367} 381}
368 382
383#ifdef CONFIG_IRQSTACKS
384/*
385 * IRQ STACK - used for irq handler
386 */
387#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
388
389union irq_stack_union {
390 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
391 volatile unsigned int slock[4];
392 volatile unsigned int lock[1];
393};
394
395DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
396 .slock = { 1,1,1,1 },
397 };
398#endif
399
400
369int sysctl_panic_on_stackoverflow = 1; 401int sysctl_panic_on_stackoverflow = 1;
370 402
371static inline void stack_overflow_check(struct pt_regs *regs) 403static inline void stack_overflow_check(struct pt_regs *regs)
@@ -378,6 +410,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
378 unsigned long sp = regs->gr[30]; 410 unsigned long sp = regs->gr[30];
379 unsigned long stack_usage; 411 unsigned long stack_usage;
380 unsigned int *last_usage; 412 unsigned int *last_usage;
413 int cpu = smp_processor_id();
381 414
382 /* if sr7 != 0, we interrupted a userspace process which we do not want 415 /* if sr7 != 0, we interrupted a userspace process which we do not want
383 * to check for stack overflow. We will only check the kernel stack. */ 416 * to check for stack overflow. We will only check the kernel stack. */
@@ -386,7 +419,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
386 419
387 /* calculate kernel stack usage */ 420 /* calculate kernel stack usage */
388 stack_usage = sp - stack_start; 421 stack_usage = sp - stack_start;
389 last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); 422#ifdef CONFIG_IRQSTACKS
423 if (likely(stack_usage <= THREAD_SIZE))
424 goto check_kernel_stack; /* found kernel stack */
425
426 /* check irq stack usage */
427 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
428 stack_usage = sp - stack_start;
429
430 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
431 if (unlikely(stack_usage > *last_usage))
432 *last_usage = stack_usage;
433
434 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
435 return;
436
437 pr_emerg("stackcheck: %s will most likely overflow irq stack "
438 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
439 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
440 goto panic_check;
441
442check_kernel_stack:
443#endif
444
445 /* check kernel stack usage */
446 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
390 447
391 if (unlikely(stack_usage > *last_usage)) 448 if (unlikely(stack_usage > *last_usage))
392 *last_usage = stack_usage; 449 *last_usage = stack_usage;
@@ -398,31 +455,66 @@ static inline void stack_overflow_check(struct pt_regs *regs)
398 "(sp:%lx, stk bottom-top:%lx-%lx)\n", 455 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
399 current->comm, sp, stack_start, stack_start + THREAD_SIZE); 456 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
400 457
458#ifdef CONFIG_IRQSTACKS
459panic_check:
460#endif
401 if (sysctl_panic_on_stackoverflow) 461 if (sysctl_panic_on_stackoverflow)
402 panic("low stack detected by irq handler - check messages\n"); 462 panic("low stack detected by irq handler - check messages\n");
403#endif 463#endif
404} 464}
405 465
406#ifdef CONFIG_IRQSTACKS 466#ifdef CONFIG_IRQSTACKS
407DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); 467/* in entry.S: */
468void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
408 469
409static void execute_on_irq_stack(void *func, unsigned long param1) 470static void execute_on_irq_stack(void *func, unsigned long param1)
410{ 471{
411 unsigned long *irq_stack_start; 472 union irq_stack_union *union_ptr;
412 unsigned long irq_stack; 473 unsigned long irq_stack;
413 int cpu = smp_processor_id(); 474 volatile unsigned int *irq_stack_in_use;
475
476 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
477 irq_stack = (unsigned long) &union_ptr->stack;
478 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
479 64); /* align for stack frame usage */
414 480
415 irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; 481 /* We may be called recursive. If we are already using the irq stack,
416 irq_stack = (unsigned long) irq_stack_start; 482 * just continue to use it. Use spinlocks to serialize
417 irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ 483 * the irq stack usage.
484 */
485 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
486 if (!__ldcw(irq_stack_in_use)) {
487 void (*direct_call)(unsigned long p1) = func;
418 488
419 BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ 489 /* We are using the IRQ stack already.
420 *irq_stack_start = 1; 490 * Do direct call on current stack. */
491 direct_call(param1);
492 return;
493 }
421 494
422 /* This is where we switch to the IRQ stack. */ 495 /* This is where we switch to the IRQ stack. */
423 call_on_stack(param1, func, irq_stack); 496 call_on_stack(param1, func, irq_stack);
424 497
425 *irq_stack_start = 0; 498 /* free up irq stack usage. */
499 *irq_stack_in_use = 1;
500}
501
502asmlinkage void do_softirq(void)
503{
504 __u32 pending;
505 unsigned long flags;
506
507 if (in_interrupt())
508 return;
509
510 local_irq_save(flags);
511
512 pending = local_softirq_pending();
513
514 if (pending)
515 execute_on_irq_stack(__do_softirq, 0);
516
517 local_irq_restore(flags);
426} 518}
427#endif /* CONFIG_IRQSTACKS */ 519#endif /* CONFIG_IRQSTACKS */
428 520
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 5e1de6072be5..36d7f402e48e 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -605,14 +605,14 @@ ENTRY(copy_user_page_asm)
605 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ 605 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
606 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */ 606 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
607 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 607 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
608 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ 608 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
609 copy %r28, %r29 609 copy %r28, %r29
610 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ 610 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
611#else 611#else
612 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 612 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
613 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */ 613 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
614 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */ 614 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
615 depwi 0, 31,12, %r28 /* Clear any offset bits */ 615 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
616 copy %r28, %r29 616 copy %r28, %r29
617 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */ 617 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
618#endif 618#endif
@@ -762,7 +762,7 @@ ENTRY(clear_user_page_asm)
762#else 762#else
763 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 763 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
764 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 764 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
765 depwi 0, 31,12, %r28 /* Clear any offset bits */ 765 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
766#endif 766#endif
767 767
768 /* Purge any old translation */ 768 /* Purge any old translation */
@@ -846,7 +846,7 @@ ENTRY(flush_dcache_page_asm)
846#else 846#else
847 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 847 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
848 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 848 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
849 depwi 0, 31,12, %r28 /* Clear any offset bits */ 849 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
850#endif 850#endif
851 851
852 /* Purge any old translation */ 852 /* Purge any old translation */
@@ -918,11 +918,11 @@ ENTRY(flush_icache_page_asm)
918#endif 918#endif
919 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ 919 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
920 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 920 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
921 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ 921 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
922#else 922#else
923 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 923 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
924 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 924 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
925 depwi 0, 31,12, %r28 /* Clear any offset bits */ 925 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
926#endif 926#endif
927 927
928 /* Purge any old translation */ 928 /* Purge any old translation */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index fe41a98043bb..04e47c6a4562 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -646,6 +646,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
646 case 14: 646 case 14:
647 /* Assist Exception Trap, i.e. floating point exception. */ 647 /* Assist Exception Trap, i.e. floating point exception. */
648 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 648 die_if_kernel("Floating point exception", regs, 0); /* quiet */
649 __inc_irq_stat(irq_fpassist_count);
649 handle_fpe(regs); 650 handle_fpe(regs);
650 return; 651 return;
651 652
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 234e3682cf09..d7c0acb35ec2 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -27,6 +27,7 @@
27#include <linux/signal.h> 27#include <linux/signal.h>
28#include <linux/ratelimit.h> 28#include <linux/ratelimit.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/hardirq.h>
30 31
31/* #define DEBUG_UNALIGNED 1 */ 32/* #define DEBUG_UNALIGNED 1 */
32 33
@@ -454,6 +455,8 @@ void handle_unaligned(struct pt_regs *regs)
454 struct siginfo si; 455 struct siginfo si;
455 register int flop=0; /* true if this is a flop */ 456 register int flop=0; /* true if this is a flop */
456 457
458 __inc_irq_stat(irq_unaligned_count);
459
457 /* log a message with pacing */ 460 /* log a message with pacing */
458 if (user_mode(regs)) { 461 if (user_mode(regs)) {
459 if (current->thread.flags & PARISC_UAC_SIGBUS) { 462 if (current->thread.flags & PARISC_UAC_SIGBUS) {