diff options
Diffstat (limited to 'arch')
236 files changed, 2736 insertions, 2025 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 4b0669cbb3b0..2505740b81d2 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -30,6 +30,10 @@ config OPROFILE_EVENT_MULTIPLEX | |||
30 | config HAVE_OPROFILE | 30 | config HAVE_OPROFILE |
31 | bool | 31 | bool |
32 | 32 | ||
33 | config OPROFILE_NMI_TIMER | ||
34 | def_bool y | ||
35 | depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI | ||
36 | |||
33 | config KPROBES | 37 | config KPROBES |
34 | bool "Kprobes" | 38 | bool "Kprobes" |
35 | depends on MODULES | 39 | depends on MODULES |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e084b7e981e8..b259c7c644e3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H | |||
220 | be avoided when possible. | 220 | be avoided when possible. |
221 | 221 | ||
222 | config PHYS_OFFSET | 222 | config PHYS_OFFSET |
223 | hex "Physical address of main memory" | 223 | hex "Physical address of main memory" if MMU |
224 | depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H | 224 | depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H |
225 | default DRAM_BASE if !MMU | ||
225 | help | 226 | help |
226 | Please provide the physical address corresponding to the | 227 | Please provide the physical address corresponding to the |
227 | location of main memory in your system. | 228 | location of main memory in your system. |
@@ -1245,7 +1246,7 @@ config PL310_ERRATA_588369 | |||
1245 | 1246 | ||
1246 | config ARM_ERRATA_720789 | 1247 | config ARM_ERRATA_720789 |
1247 | bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" | 1248 | bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" |
1248 | depends on CPU_V7 && SMP | 1249 | depends on CPU_V7 |
1249 | help | 1250 | help |
1250 | This option enables the workaround for the 720789 Cortex-A9 (prior to | 1251 | This option enables the workaround for the 720789 Cortex-A9 (prior to |
1251 | r2p0) erratum. A faulty ASID can be sent to the other CPUs for the | 1252 | r2p0) erratum. A faulty ASID can be sent to the other CPUs for the |
@@ -1281,7 +1282,7 @@ config ARM_ERRATA_743622 | |||
1281 | 1282 | ||
1282 | config ARM_ERRATA_751472 | 1283 | config ARM_ERRATA_751472 |
1283 | bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" | 1284 | bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" |
1284 | depends on CPU_V7 && SMP | 1285 | depends on CPU_V7 |
1285 | help | 1286 | help |
1286 | This option enables the workaround for the 751472 Cortex-A9 (prior | 1287 | This option enables the workaround for the 751472 Cortex-A9 (prior |
1287 | to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the | 1288 | to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the |
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c index f407a6b35d3d..8d8df744f7a5 100644 --- a/arch/arm/common/pl330.c +++ b/arch/arm/common/pl330.c | |||
@@ -221,17 +221,6 @@ | |||
221 | */ | 221 | */ |
222 | #define MCODE_BUFF_PER_REQ 256 | 222 | #define MCODE_BUFF_PER_REQ 256 |
223 | 223 | ||
224 | /* | ||
225 | * Mark a _pl330_req as free. | ||
226 | * We do it by writing DMAEND as the first instruction | ||
227 | * because no valid request is going to have DMAEND as | ||
228 | * its first instruction to execute. | ||
229 | */ | ||
230 | #define MARK_FREE(req) do { \ | ||
231 | _emit_END(0, (req)->mc_cpu); \ | ||
232 | (req)->mc_len = 0; \ | ||
233 | } while (0) | ||
234 | |||
235 | /* If the _pl330_req is available to the client */ | 224 | /* If the _pl330_req is available to the client */ |
236 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) | 225 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) |
237 | 226 | ||
@@ -301,8 +290,10 @@ struct pl330_thread { | |||
301 | struct pl330_dmac *dmac; | 290 | struct pl330_dmac *dmac; |
302 | /* Only two at a time */ | 291 | /* Only two at a time */ |
303 | struct _pl330_req req[2]; | 292 | struct _pl330_req req[2]; |
304 | /* Index of the last submitted request */ | 293 | /* Index of the last enqueued request */ |
305 | unsigned lstenq; | 294 | unsigned lstenq; |
295 | /* Index of the last submitted request or -1 if the DMA is stopped */ | ||
296 | int req_running; | ||
306 | }; | 297 | }; |
307 | 298 | ||
308 | enum pl330_dmac_state { | 299 | enum pl330_dmac_state { |
@@ -778,6 +769,22 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, | |||
778 | writel(0, regs + DBGCMD); | 769 | writel(0, regs + DBGCMD); |
779 | } | 770 | } |
780 | 771 | ||
772 | /* | ||
773 | * Mark a _pl330_req as free. | ||
774 | * We do it by writing DMAEND as the first instruction | ||
775 | * because no valid request is going to have DMAEND as | ||
776 | * its first instruction to execute. | ||
777 | */ | ||
778 | static void mark_free(struct pl330_thread *thrd, int idx) | ||
779 | { | ||
780 | struct _pl330_req *req = &thrd->req[idx]; | ||
781 | |||
782 | _emit_END(0, req->mc_cpu); | ||
783 | req->mc_len = 0; | ||
784 | |||
785 | thrd->req_running = -1; | ||
786 | } | ||
787 | |||
781 | static inline u32 _state(struct pl330_thread *thrd) | 788 | static inline u32 _state(struct pl330_thread *thrd) |
782 | { | 789 | { |
783 | void __iomem *regs = thrd->dmac->pinfo->base; | 790 | void __iomem *regs = thrd->dmac->pinfo->base; |
@@ -836,31 +843,6 @@ static inline u32 _state(struct pl330_thread *thrd) | |||
836 | } | 843 | } |
837 | } | 844 | } |
838 | 845 | ||
839 | /* If the request 'req' of thread 'thrd' is currently active */ | ||
840 | static inline bool _req_active(struct pl330_thread *thrd, | ||
841 | struct _pl330_req *req) | ||
842 | { | ||
843 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
844 | u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id)); | ||
845 | |||
846 | if (IS_FREE(req)) | ||
847 | return false; | ||
848 | |||
849 | return (pc >= buf && pc <= buf + req->mc_len) ? true : false; | ||
850 | } | ||
851 | |||
852 | /* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */ | ||
853 | static inline unsigned _thrd_active(struct pl330_thread *thrd) | ||
854 | { | ||
855 | if (_req_active(thrd, &thrd->req[0])) | ||
856 | return 1; /* First req active */ | ||
857 | |||
858 | if (_req_active(thrd, &thrd->req[1])) | ||
859 | return 2; /* Second req active */ | ||
860 | |||
861 | return 0; | ||
862 | } | ||
863 | |||
864 | static void _stop(struct pl330_thread *thrd) | 846 | static void _stop(struct pl330_thread *thrd) |
865 | { | 847 | { |
866 | void __iomem *regs = thrd->dmac->pinfo->base; | 848 | void __iomem *regs = thrd->dmac->pinfo->base; |
@@ -892,17 +874,22 @@ static bool _trigger(struct pl330_thread *thrd) | |||
892 | struct _arg_GO go; | 874 | struct _arg_GO go; |
893 | unsigned ns; | 875 | unsigned ns; |
894 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | 876 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; |
877 | int idx; | ||
895 | 878 | ||
896 | /* Return if already ACTIVE */ | 879 | /* Return if already ACTIVE */ |
897 | if (_state(thrd) != PL330_STATE_STOPPED) | 880 | if (_state(thrd) != PL330_STATE_STOPPED) |
898 | return true; | 881 | return true; |
899 | 882 | ||
900 | if (!IS_FREE(&thrd->req[1 - thrd->lstenq])) | 883 | idx = 1 - thrd->lstenq; |
901 | req = &thrd->req[1 - thrd->lstenq]; | 884 | if (!IS_FREE(&thrd->req[idx])) |
902 | else if (!IS_FREE(&thrd->req[thrd->lstenq])) | 885 | req = &thrd->req[idx]; |
903 | req = &thrd->req[thrd->lstenq]; | 886 | else { |
904 | else | 887 | idx = thrd->lstenq; |
905 | req = NULL; | 888 | if (!IS_FREE(&thrd->req[idx])) |
889 | req = &thrd->req[idx]; | ||
890 | else | ||
891 | req = NULL; | ||
892 | } | ||
906 | 893 | ||
907 | /* Return if no request */ | 894 | /* Return if no request */ |
908 | if (!req || !req->r) | 895 | if (!req || !req->r) |
@@ -933,6 +920,8 @@ static bool _trigger(struct pl330_thread *thrd) | |||
933 | /* Only manager can execute GO */ | 920 | /* Only manager can execute GO */ |
934 | _execute_DBGINSN(thrd, insn, true); | 921 | _execute_DBGINSN(thrd, insn, true); |
935 | 922 | ||
923 | thrd->req_running = idx; | ||
924 | |||
936 | return true; | 925 | return true; |
937 | } | 926 | } |
938 | 927 | ||
@@ -1382,8 +1371,8 @@ static void pl330_dotask(unsigned long data) | |||
1382 | 1371 | ||
1383 | thrd->req[0].r = NULL; | 1372 | thrd->req[0].r = NULL; |
1384 | thrd->req[1].r = NULL; | 1373 | thrd->req[1].r = NULL; |
1385 | MARK_FREE(&thrd->req[0]); | 1374 | mark_free(thrd, 0); |
1386 | MARK_FREE(&thrd->req[1]); | 1375 | mark_free(thrd, 1); |
1387 | 1376 | ||
1388 | /* Clear the reset flag */ | 1377 | /* Clear the reset flag */ |
1389 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | 1378 | pl330->dmac_tbd.reset_chan &= ~(1 << i); |
@@ -1461,14 +1450,12 @@ int pl330_update(const struct pl330_info *pi) | |||
1461 | 1450 | ||
1462 | thrd = &pl330->channels[id]; | 1451 | thrd = &pl330->channels[id]; |
1463 | 1452 | ||
1464 | active = _thrd_active(thrd); | 1453 | active = thrd->req_running; |
1465 | if (!active) /* Aborted */ | 1454 | if (active == -1) /* Aborted */ |
1466 | continue; | 1455 | continue; |
1467 | 1456 | ||
1468 | active -= 1; | ||
1469 | |||
1470 | rqdone = &thrd->req[active]; | 1457 | rqdone = &thrd->req[active]; |
1471 | MARK_FREE(rqdone); | 1458 | mark_free(thrd, active); |
1472 | 1459 | ||
1473 | /* Get going again ASAP */ | 1460 | /* Get going again ASAP */ |
1474 | _start(thrd); | 1461 | _start(thrd); |
@@ -1509,7 +1496,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | |||
1509 | struct pl330_thread *thrd = ch_id; | 1496 | struct pl330_thread *thrd = ch_id; |
1510 | struct pl330_dmac *pl330; | 1497 | struct pl330_dmac *pl330; |
1511 | unsigned long flags; | 1498 | unsigned long flags; |
1512 | int ret = 0, active; | 1499 | int ret = 0, active = thrd->req_running; |
1513 | 1500 | ||
1514 | if (!thrd || thrd->free || thrd->dmac->state == DYING) | 1501 | if (!thrd || thrd->free || thrd->dmac->state == DYING) |
1515 | return -EINVAL; | 1502 | return -EINVAL; |
@@ -1525,28 +1512,24 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | |||
1525 | 1512 | ||
1526 | thrd->req[0].r = NULL; | 1513 | thrd->req[0].r = NULL; |
1527 | thrd->req[1].r = NULL; | 1514 | thrd->req[1].r = NULL; |
1528 | MARK_FREE(&thrd->req[0]); | 1515 | mark_free(thrd, 0); |
1529 | MARK_FREE(&thrd->req[1]); | 1516 | mark_free(thrd, 1); |
1530 | break; | 1517 | break; |
1531 | 1518 | ||
1532 | case PL330_OP_ABORT: | 1519 | case PL330_OP_ABORT: |
1533 | active = _thrd_active(thrd); | ||
1534 | |||
1535 | /* Make sure the channel is stopped */ | 1520 | /* Make sure the channel is stopped */ |
1536 | _stop(thrd); | 1521 | _stop(thrd); |
1537 | 1522 | ||
1538 | /* ABORT is only for the active req */ | 1523 | /* ABORT is only for the active req */ |
1539 | if (!active) | 1524 | if (active == -1) |
1540 | break; | 1525 | break; |
1541 | 1526 | ||
1542 | active--; | ||
1543 | |||
1544 | thrd->req[active].r = NULL; | 1527 | thrd->req[active].r = NULL; |
1545 | MARK_FREE(&thrd->req[active]); | 1528 | mark_free(thrd, active); |
1546 | 1529 | ||
1547 | /* Start the next */ | 1530 | /* Start the next */ |
1548 | case PL330_OP_START: | 1531 | case PL330_OP_START: |
1549 | if (!_thrd_active(thrd) && !_start(thrd)) | 1532 | if ((active == -1) && !_start(thrd)) |
1550 | ret = -EIO; | 1533 | ret = -EIO; |
1551 | break; | 1534 | break; |
1552 | 1535 | ||
@@ -1587,14 +1570,13 @@ int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus) | |||
1587 | else | 1570 | else |
1588 | pstatus->faulting = false; | 1571 | pstatus->faulting = false; |
1589 | 1572 | ||
1590 | active = _thrd_active(thrd); | 1573 | active = thrd->req_running; |
1591 | 1574 | ||
1592 | if (!active) { | 1575 | if (active == -1) { |
1593 | /* Indicate that the thread is not running */ | 1576 | /* Indicate that the thread is not running */ |
1594 | pstatus->top_req = NULL; | 1577 | pstatus->top_req = NULL; |
1595 | pstatus->wait_req = NULL; | 1578 | pstatus->wait_req = NULL; |
1596 | } else { | 1579 | } else { |
1597 | active--; | ||
1598 | pstatus->top_req = thrd->req[active].r; | 1580 | pstatus->top_req = thrd->req[active].r; |
1599 | pstatus->wait_req = !IS_FREE(&thrd->req[1 - active]) | 1581 | pstatus->wait_req = !IS_FREE(&thrd->req[1 - active]) |
1600 | ? thrd->req[1 - active].r : NULL; | 1582 | ? thrd->req[1 - active].r : NULL; |
@@ -1659,9 +1641,9 @@ void *pl330_request_channel(const struct pl330_info *pi) | |||
1659 | thrd->free = false; | 1641 | thrd->free = false; |
1660 | thrd->lstenq = 1; | 1642 | thrd->lstenq = 1; |
1661 | thrd->req[0].r = NULL; | 1643 | thrd->req[0].r = NULL; |
1662 | MARK_FREE(&thrd->req[0]); | 1644 | mark_free(thrd, 0); |
1663 | thrd->req[1].r = NULL; | 1645 | thrd->req[1].r = NULL; |
1664 | MARK_FREE(&thrd->req[1]); | 1646 | mark_free(thrd, 1); |
1665 | break; | 1647 | break; |
1666 | } | 1648 | } |
1667 | } | 1649 | } |
@@ -1767,14 +1749,14 @@ static inline void _reset_thread(struct pl330_thread *thrd) | |||
1767 | thrd->req[0].mc_bus = pl330->mcode_bus | 1749 | thrd->req[0].mc_bus = pl330->mcode_bus |
1768 | + (thrd->id * pi->mcbufsz); | 1750 | + (thrd->id * pi->mcbufsz); |
1769 | thrd->req[0].r = NULL; | 1751 | thrd->req[0].r = NULL; |
1770 | MARK_FREE(&thrd->req[0]); | 1752 | mark_free(thrd, 0); |
1771 | 1753 | ||
1772 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | 1754 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu |
1773 | + pi->mcbufsz / 2; | 1755 | + pi->mcbufsz / 2; |
1774 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | 1756 | thrd->req[1].mc_bus = thrd->req[0].mc_bus |
1775 | + pi->mcbufsz / 2; | 1757 | + pi->mcbufsz / 2; |
1776 | thrd->req[1].r = NULL; | 1758 | thrd->req[1].r = NULL; |
1777 | MARK_FREE(&thrd->req[1]); | 1759 | mark_free(thrd, 1); |
1778 | } | 1760 | } |
1779 | 1761 | ||
1780 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | 1762 | static int dmac_alloc_threads(struct pl330_dmac *pl330) |
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index 11a4192197c8..cf497ce41dfe 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig | |||
@@ -18,9 +18,10 @@ CONFIG_ARCH_MXC=y | |||
18 | CONFIG_ARCH_IMX_V4_V5=y | 18 | CONFIG_ARCH_IMX_V4_V5=y |
19 | CONFIG_ARCH_MX1ADS=y | 19 | CONFIG_ARCH_MX1ADS=y |
20 | CONFIG_MACH_SCB9328=y | 20 | CONFIG_MACH_SCB9328=y |
21 | CONFIG_MACH_APF9328=y | ||
21 | CONFIG_MACH_MX21ADS=y | 22 | CONFIG_MACH_MX21ADS=y |
22 | CONFIG_MACH_MX25_3DS=y | 23 | CONFIG_MACH_MX25_3DS=y |
23 | CONFIG_MACH_EUKREA_CPUIMX25=y | 24 | CONFIG_MACH_EUKREA_CPUIMX25SD=y |
24 | CONFIG_MACH_MX27ADS=y | 25 | CONFIG_MACH_MX27ADS=y |
25 | CONFIG_MACH_PCM038=y | 26 | CONFIG_MACH_PCM038=y |
26 | CONFIG_MACH_CPUIMX27=y | 27 | CONFIG_MACH_CPUIMX27=y |
@@ -72,17 +73,16 @@ CONFIG_MTD_CFI_GEOMETRY=y | |||
72 | CONFIG_MTD_CFI_INTELEXT=y | 73 | CONFIG_MTD_CFI_INTELEXT=y |
73 | CONFIG_MTD_PHYSMAP=y | 74 | CONFIG_MTD_PHYSMAP=y |
74 | CONFIG_MTD_NAND=y | 75 | CONFIG_MTD_NAND=y |
76 | CONFIG_MTD_NAND_MXC=y | ||
75 | CONFIG_MTD_UBI=y | 77 | CONFIG_MTD_UBI=y |
76 | CONFIG_MISC_DEVICES=y | 78 | CONFIG_MISC_DEVICES=y |
77 | CONFIG_EEPROM_AT24=y | 79 | CONFIG_EEPROM_AT24=y |
78 | CONFIG_EEPROM_AT25=y | 80 | CONFIG_EEPROM_AT25=y |
79 | CONFIG_NETDEVICES=y | 81 | CONFIG_NETDEVICES=y |
80 | CONFIG_NET_ETHERNET=y | ||
81 | CONFIG_SMC91X=y | ||
82 | CONFIG_DM9000=y | 82 | CONFIG_DM9000=y |
83 | CONFIG_SMC91X=y | ||
83 | CONFIG_SMC911X=y | 84 | CONFIG_SMC911X=y |
84 | # CONFIG_NETDEV_1000 is not set | 85 | CONFIG_SMSC_PHY=y |
85 | # CONFIG_NETDEV_10000 is not set | ||
86 | # CONFIG_INPUT_MOUSEDEV is not set | 86 | # CONFIG_INPUT_MOUSEDEV is not set |
87 | CONFIG_INPUT_EVDEV=y | 87 | CONFIG_INPUT_EVDEV=y |
88 | # CONFIG_INPUT_KEYBOARD is not set | 88 | # CONFIG_INPUT_KEYBOARD is not set |
@@ -100,6 +100,7 @@ CONFIG_I2C_CHARDEV=y | |||
100 | CONFIG_I2C_IMX=y | 100 | CONFIG_I2C_IMX=y |
101 | CONFIG_SPI=y | 101 | CONFIG_SPI=y |
102 | CONFIG_SPI_IMX=y | 102 | CONFIG_SPI_IMX=y |
103 | CONFIG_SPI_SPIDEV=y | ||
103 | CONFIG_W1=y | 104 | CONFIG_W1=y |
104 | CONFIG_W1_MASTER_MXC=y | 105 | CONFIG_W1_MASTER_MXC=y |
105 | CONFIG_W1_SLAVE_THERM=y | 106 | CONFIG_W1_SLAVE_THERM=y |
@@ -139,6 +140,7 @@ CONFIG_MMC=y | |||
139 | CONFIG_MMC_MXC=y | 140 | CONFIG_MMC_MXC=y |
140 | CONFIG_NEW_LEDS=y | 141 | CONFIG_NEW_LEDS=y |
141 | CONFIG_LEDS_CLASS=y | 142 | CONFIG_LEDS_CLASS=y |
143 | CONFIG_LEDS_GPIO=y | ||
142 | CONFIG_LEDS_MC13783=y | 144 | CONFIG_LEDS_MC13783=y |
143 | CONFIG_LEDS_TRIGGERS=y | 145 | CONFIG_LEDS_TRIGGERS=y |
144 | CONFIG_LEDS_TRIGGER_TIMER=y | 146 | CONFIG_LEDS_TRIGGER_TIMER=y |
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index a7e777581378..945a34f2a34d 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig | |||
@@ -48,12 +48,7 @@ CONFIG_MACH_SX1=y | |||
48 | CONFIG_MACH_NOKIA770=y | 48 | CONFIG_MACH_NOKIA770=y |
49 | CONFIG_MACH_AMS_DELTA=y | 49 | CONFIG_MACH_AMS_DELTA=y |
50 | CONFIG_MACH_OMAP_GENERIC=y | 50 | CONFIG_MACH_OMAP_GENERIC=y |
51 | CONFIG_OMAP_ARM_216MHZ=y | ||
52 | CONFIG_OMAP_ARM_195MHZ=y | ||
53 | CONFIG_OMAP_ARM_192MHZ=y | ||
54 | CONFIG_OMAP_ARM_182MHZ=y | 51 | CONFIG_OMAP_ARM_182MHZ=y |
55 | CONFIG_OMAP_ARM_168MHZ=y | ||
56 | # CONFIG_OMAP_ARM_60MHZ is not set | ||
57 | # CONFIG_ARM_THUMB is not set | 52 | # CONFIG_ARM_THUMB is not set |
58 | CONFIG_PCCARD=y | 53 | CONFIG_PCCARD=y |
59 | CONFIG_OMAP_CF=y | 54 | CONFIG_OMAP_CF=y |
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h index a5edf421005c..d1c3f3a71c94 100644 --- a/arch/arm/include/asm/unwind.h +++ b/arch/arm/include/asm/unwind.h | |||
@@ -30,14 +30,15 @@ enum unwind_reason_code { | |||
30 | }; | 30 | }; |
31 | 31 | ||
32 | struct unwind_idx { | 32 | struct unwind_idx { |
33 | unsigned long addr; | 33 | unsigned long addr_offset; |
34 | unsigned long insn; | 34 | unsigned long insn; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct unwind_table { | 37 | struct unwind_table { |
38 | struct list_head list; | 38 | struct list_head list; |
39 | struct unwind_idx *start; | 39 | const struct unwind_idx *start; |
40 | struct unwind_idx *stop; | 40 | const struct unwind_idx *origin; |
41 | const struct unwind_idx *stop; | ||
41 | unsigned long begin_addr; | 42 | unsigned long begin_addr; |
42 | unsigned long end_addr; | 43 | unsigned long end_addr; |
43 | }; | 44 | }; |
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start, | |||
49 | extern void unwind_table_del(struct unwind_table *tab); | 50 | extern void unwind_table_del(struct unwind_table *tab); |
50 | extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk); | 51 | extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk); |
51 | 52 | ||
52 | #ifdef CONFIG_ARM_UNWIND | ||
53 | extern int __init unwind_init(void); | ||
54 | #else | ||
55 | static inline int __init unwind_init(void) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | #endif /* !__ASSEMBLY__ */ | 53 | #endif /* !__ASSEMBLY__ */ |
62 | 54 | ||
63 | #ifdef CONFIG_ARM_UNWIND | 55 | #ifdef CONFIG_ARM_UNWIND |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c475379199b1..88b0941ce51e 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -353,15 +353,15 @@ validate_group(struct perf_event *event) | |||
353 | fake_pmu.used_mask = fake_used_mask; | 353 | fake_pmu.used_mask = fake_used_mask; |
354 | 354 | ||
355 | if (!validate_event(&fake_pmu, leader)) | 355 | if (!validate_event(&fake_pmu, leader)) |
356 | return -ENOSPC; | 356 | return -EINVAL; |
357 | 357 | ||
358 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | 358 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
359 | if (!validate_event(&fake_pmu, sibling)) | 359 | if (!validate_event(&fake_pmu, sibling)) |
360 | return -ENOSPC; | 360 | return -EINVAL; |
361 | } | 361 | } |
362 | 362 | ||
363 | if (!validate_event(&fake_pmu, event)) | 363 | if (!validate_event(&fake_pmu, event)) |
364 | return -ENOSPC; | 364 | return -EINVAL; |
365 | 365 | ||
366 | return 0; | 366 | return 0; |
367 | } | 367 | } |
@@ -640,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = { | |||
640 | 640 | ||
641 | static int __devinit armpmu_device_probe(struct platform_device *pdev) | 641 | static int __devinit armpmu_device_probe(struct platform_device *pdev) |
642 | { | 642 | { |
643 | if (!cpu_pmu) | ||
644 | return -ENODEV; | ||
645 | |||
643 | cpu_pmu->plat_device = pdev; | 646 | cpu_pmu->plat_device = pdev; |
644 | return 0; | 647 | return 0; |
645 | } | 648 | } |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 3d0c6fb74ae4..e8e8fe505df1 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -183,7 +183,8 @@ void cpu_idle(void) | |||
183 | 183 | ||
184 | /* endless idle loop with no priority at all */ | 184 | /* endless idle loop with no priority at all */ |
185 | while (1) { | 185 | while (1) { |
186 | tick_nohz_stop_sched_tick(1); | 186 | tick_nohz_idle_enter(); |
187 | rcu_idle_enter(); | ||
187 | leds_event(led_idle_start); | 188 | leds_event(led_idle_start); |
188 | while (!need_resched()) { | 189 | while (!need_resched()) { |
189 | #ifdef CONFIG_HOTPLUG_CPU | 190 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -213,7 +214,8 @@ void cpu_idle(void) | |||
213 | } | 214 | } |
214 | } | 215 | } |
215 | leds_event(led_idle_end); | 216 | leds_event(led_idle_end); |
216 | tick_nohz_restart_sched_tick(); | 217 | rcu_idle_exit(); |
218 | tick_nohz_idle_exit(); | ||
217 | preempt_enable_no_resched(); | 219 | preempt_enable_no_resched(); |
218 | schedule(); | 220 | schedule(); |
219 | preempt_disable(); | 221 | preempt_disable(); |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3448a3f9cc8c..c0b59bff6be6 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/mach/time.h> | 52 | #include <asm/mach/time.h> |
53 | #include <asm/traps.h> | 53 | #include <asm/traps.h> |
54 | #include <asm/unwind.h> | 54 | #include <asm/unwind.h> |
55 | #include <asm/memblock.h> | ||
55 | 56 | ||
56 | #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) | 57 | #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) |
57 | #include "compat.h" | 58 | #include "compat.h" |
@@ -895,8 +896,6 @@ void __init setup_arch(char **cmdline_p) | |||
895 | { | 896 | { |
896 | struct machine_desc *mdesc; | 897 | struct machine_desc *mdesc; |
897 | 898 | ||
898 | unwind_init(); | ||
899 | |||
900 | setup_processor(); | 899 | setup_processor(); |
901 | mdesc = setup_machine_fdt(__atags_pointer); | 900 | mdesc = setup_machine_fdt(__atags_pointer); |
902 | if (!mdesc) | 901 | if (!mdesc) |
@@ -904,6 +903,12 @@ void __init setup_arch(char **cmdline_p) | |||
904 | machine_desc = mdesc; | 903 | machine_desc = mdesc; |
905 | machine_name = mdesc->name; | 904 | machine_name = mdesc->name; |
906 | 905 | ||
906 | #ifdef CONFIG_ZONE_DMA | ||
907 | if (mdesc->dma_zone_size) { | ||
908 | extern unsigned long arm_dma_zone_size; | ||
909 | arm_dma_zone_size = mdesc->dma_zone_size; | ||
910 | } | ||
911 | #endif | ||
907 | if (mdesc->soft_reboot) | 912 | if (mdesc->soft_reboot) |
908 | reboot_setup("s"); | 913 | reboot_setup("s"); |
909 | 914 | ||
@@ -934,12 +939,6 @@ void __init setup_arch(char **cmdline_p) | |||
934 | 939 | ||
935 | tcm_init(); | 940 | tcm_init(); |
936 | 941 | ||
937 | #ifdef CONFIG_ZONE_DMA | ||
938 | if (mdesc->dma_zone_size) { | ||
939 | extern unsigned long arm_dma_zone_size; | ||
940 | arm_dma_zone_size = mdesc->dma_zone_size; | ||
941 | } | ||
942 | #endif | ||
943 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 942 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
944 | handle_arch_irq = mdesc->handle_irq; | 943 | handle_arch_irq = mdesc->handle_irq; |
945 | #endif | 944 | #endif |
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index e7e8365795c3..00df012c4678 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c | |||
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2); | |||
67 | 67 | ||
68 | struct unwind_ctrl_block { | 68 | struct unwind_ctrl_block { |
69 | unsigned long vrs[16]; /* virtual register set */ | 69 | unsigned long vrs[16]; /* virtual register set */ |
70 | unsigned long *insn; /* pointer to the current instructions word */ | 70 | const unsigned long *insn; /* pointer to the current instructions word */ |
71 | int entries; /* number of entries left to interpret */ | 71 | int entries; /* number of entries left to interpret */ |
72 | int byte; /* current byte number in the instructions word */ | 72 | int byte; /* current byte number in the instructions word */ |
73 | }; | 73 | }; |
@@ -83,8 +83,9 @@ enum regs { | |||
83 | PC = 15 | 83 | PC = 15 |
84 | }; | 84 | }; |
85 | 85 | ||
86 | extern struct unwind_idx __start_unwind_idx[]; | 86 | extern const struct unwind_idx __start_unwind_idx[]; |
87 | extern struct unwind_idx __stop_unwind_idx[]; | 87 | static const struct unwind_idx *__origin_unwind_idx; |
88 | extern const struct unwind_idx __stop_unwind_idx[]; | ||
88 | 89 | ||
89 | static DEFINE_SPINLOCK(unwind_lock); | 90 | static DEFINE_SPINLOCK(unwind_lock); |
90 | static LIST_HEAD(unwind_tables); | 91 | static LIST_HEAD(unwind_tables); |
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables); | |||
98 | }) | 99 | }) |
99 | 100 | ||
100 | /* | 101 | /* |
101 | * Binary search in the unwind index. The entries entries are | 102 | * Binary search in the unwind index. The entries are |
102 | * guaranteed to be sorted in ascending order by the linker. | 103 | * guaranteed to be sorted in ascending order by the linker. |
104 | * | ||
105 | * start = first entry | ||
106 | * origin = first entry with positive offset (or stop if there is no such entry) | ||
107 | * stop - 1 = last entry | ||
103 | */ | 108 | */ |
104 | static struct unwind_idx *search_index(unsigned long addr, | 109 | static const struct unwind_idx *search_index(unsigned long addr, |
105 | struct unwind_idx *first, | 110 | const struct unwind_idx *start, |
106 | struct unwind_idx *last) | 111 | const struct unwind_idx *origin, |
112 | const struct unwind_idx *stop) | ||
107 | { | 113 | { |
108 | pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last); | 114 | unsigned long addr_prel31; |
115 | |||
116 | pr_debug("%s(%08lx, %p, %p, %p)\n", | ||
117 | __func__, addr, start, origin, stop); | ||
118 | |||
119 | /* | ||
120 | * only search in the section with the matching sign. This way the | ||
121 | * prel31 numbers can be compared as unsigned longs. | ||
122 | */ | ||
123 | if (addr < (unsigned long)start) | ||
124 | /* negative offsets: [start; origin) */ | ||
125 | stop = origin; | ||
126 | else | ||
127 | /* positive offsets: [origin; stop) */ | ||
128 | start = origin; | ||
129 | |||
130 | /* prel31 for address relavive to start */ | ||
131 | addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff; | ||
109 | 132 | ||
110 | if (addr < first->addr) { | 133 | while (start < stop - 1) { |
134 | const struct unwind_idx *mid = start + ((stop - start) >> 1); | ||
135 | |||
136 | /* | ||
137 | * As addr_prel31 is relative to start an offset is needed to | ||
138 | * make it relative to mid. | ||
139 | */ | ||
140 | if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) < | ||
141 | mid->addr_offset) | ||
142 | stop = mid; | ||
143 | else { | ||
144 | /* keep addr_prel31 relative to start */ | ||
145 | addr_prel31 -= ((unsigned long)mid - | ||
146 | (unsigned long)start); | ||
147 | start = mid; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | if (likely(start->addr_offset <= addr_prel31)) | ||
152 | return start; | ||
153 | else { | ||
111 | pr_warning("unwind: Unknown symbol address %08lx\n", addr); | 154 | pr_warning("unwind: Unknown symbol address %08lx\n", addr); |
112 | return NULL; | 155 | return NULL; |
113 | } else if (addr >= last->addr) | 156 | } |
114 | return last; | 157 | } |
115 | 158 | ||
116 | while (first < last - 1) { | 159 | static const struct unwind_idx *unwind_find_origin( |
117 | struct unwind_idx *mid = first + ((last - first + 1) >> 1); | 160 | const struct unwind_idx *start, const struct unwind_idx *stop) |
161 | { | ||
162 | pr_debug("%s(%p, %p)\n", __func__, start, stop); | ||
163 | while (start < stop) { | ||
164 | const struct unwind_idx *mid = start + ((stop - start) >> 1); | ||
118 | 165 | ||
119 | if (addr < mid->addr) | 166 | if (mid->addr_offset >= 0x40000000) |
120 | last = mid; | 167 | /* negative offset */ |
168 | start = mid + 1; | ||
121 | else | 169 | else |
122 | first = mid; | 170 | /* positive offset */ |
171 | stop = mid; | ||
123 | } | 172 | } |
124 | 173 | pr_debug("%s -> %p\n", __func__, stop); | |
125 | return first; | 174 | return stop; |
126 | } | 175 | } |
127 | 176 | ||
128 | static struct unwind_idx *unwind_find_idx(unsigned long addr) | 177 | static const struct unwind_idx *unwind_find_idx(unsigned long addr) |
129 | { | 178 | { |
130 | struct unwind_idx *idx = NULL; | 179 | const struct unwind_idx *idx = NULL; |
131 | unsigned long flags; | 180 | unsigned long flags; |
132 | 181 | ||
133 | pr_debug("%s(%08lx)\n", __func__, addr); | 182 | pr_debug("%s(%08lx)\n", __func__, addr); |
134 | 183 | ||
135 | if (core_kernel_text(addr)) | 184 | if (core_kernel_text(addr)) { |
185 | if (unlikely(!__origin_unwind_idx)) | ||
186 | __origin_unwind_idx = | ||
187 | unwind_find_origin(__start_unwind_idx, | ||
188 | __stop_unwind_idx); | ||
189 | |||
136 | /* main unwind table */ | 190 | /* main unwind table */ |
137 | idx = search_index(addr, __start_unwind_idx, | 191 | idx = search_index(addr, __start_unwind_idx, |
138 | __stop_unwind_idx - 1); | 192 | __origin_unwind_idx, |
139 | else { | 193 | __stop_unwind_idx); |
194 | } else { | ||
140 | /* module unwind tables */ | 195 | /* module unwind tables */ |
141 | struct unwind_table *table; | 196 | struct unwind_table *table; |
142 | 197 | ||
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr) | |||
145 | if (addr >= table->begin_addr && | 200 | if (addr >= table->begin_addr && |
146 | addr < table->end_addr) { | 201 | addr < table->end_addr) { |
147 | idx = search_index(addr, table->start, | 202 | idx = search_index(addr, table->start, |
148 | table->stop - 1); | 203 | table->origin, |
204 | table->stop); | ||
149 | /* Move-to-front to exploit common traces */ | 205 | /* Move-to-front to exploit common traces */ |
150 | list_move(&table->list, &unwind_tables); | 206 | list_move(&table->list, &unwind_tables); |
151 | break; | 207 | break; |
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) | |||
274 | int unwind_frame(struct stackframe *frame) | 330 | int unwind_frame(struct stackframe *frame) |
275 | { | 331 | { |
276 | unsigned long high, low; | 332 | unsigned long high, low; |
277 | struct unwind_idx *idx; | 333 | const struct unwind_idx *idx; |
278 | struct unwind_ctrl_block ctrl; | 334 | struct unwind_ctrl_block ctrl; |
279 | 335 | ||
280 | /* only go to a higher address on the stack */ | 336 | /* only go to a higher address on the stack */ |
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, | |||
399 | unsigned long text_size) | 455 | unsigned long text_size) |
400 | { | 456 | { |
401 | unsigned long flags; | 457 | unsigned long flags; |
402 | struct unwind_idx *idx; | ||
403 | struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); | 458 | struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); |
404 | 459 | ||
405 | pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, | 460 | pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, |
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, | |||
408 | if (!tab) | 463 | if (!tab) |
409 | return tab; | 464 | return tab; |
410 | 465 | ||
411 | tab->start = (struct unwind_idx *)start; | 466 | tab->start = (const struct unwind_idx *)start; |
412 | tab->stop = (struct unwind_idx *)(start + size); | 467 | tab->stop = (const struct unwind_idx *)(start + size); |
468 | tab->origin = unwind_find_origin(tab->start, tab->stop); | ||
413 | tab->begin_addr = text_addr; | 469 | tab->begin_addr = text_addr; |
414 | tab->end_addr = text_addr + text_size; | 470 | tab->end_addr = text_addr + text_size; |
415 | 471 | ||
416 | /* Convert the symbol addresses to absolute values */ | ||
417 | for (idx = tab->start; idx < tab->stop; idx++) | ||
418 | idx->addr = prel31_to_addr(&idx->addr); | ||
419 | |||
420 | spin_lock_irqsave(&unwind_lock, flags); | 472 | spin_lock_irqsave(&unwind_lock, flags); |
421 | list_add_tail(&tab->list, &unwind_tables); | 473 | list_add_tail(&tab->list, &unwind_tables); |
422 | spin_unlock_irqrestore(&unwind_lock, flags); | 474 | spin_unlock_irqrestore(&unwind_lock, flags); |
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab) | |||
437 | 489 | ||
438 | kfree(tab); | 490 | kfree(tab); |
439 | } | 491 | } |
440 | |||
441 | int __init unwind_init(void) | ||
442 | { | ||
443 | struct unwind_idx *idx; | ||
444 | |||
445 | /* Convert the symbol addresses to absolute values */ | ||
446 | for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++) | ||
447 | idx->addr = prel31_to_addr(&idx->addr); | ||
448 | |||
449 | pr_debug("unwind: ARM stack unwinding initialised\n"); | ||
450 | |||
451 | return 0; | ||
452 | } | ||
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 66591fa53e05..ad930688358c 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c | |||
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} | |||
83 | * USB Device (Gadget) | 83 | * USB Device (Gadget) |
84 | * -------------------------------------------------------------------- */ | 84 | * -------------------------------------------------------------------- */ |
85 | 85 | ||
86 | #ifdef CONFIG_USB_GADGET_AT91 | 86 | #ifdef CONFIG_USB_AT91 |
87 | static struct at91_udc_data udc_data; | 87 | static struct at91_udc_data udc_data; |
88 | 88 | ||
89 | static struct resource udc_resources[] = { | 89 | static struct resource udc_resources[] = { |
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c index b84a9f642f59..0d20677fbef0 100644 --- a/arch/arm/mach-at91/at91sam9260.c +++ b/arch/arm/mach-at91/at91sam9260.c | |||
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = { | |||
195 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), | 195 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), |
196 | CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), | 196 | CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), |
197 | CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), | 197 | CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), |
198 | CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk), | 198 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk), |
199 | CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk), | 199 | CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk), |
200 | CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk), | 200 | CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk), |
201 | CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk), | 201 | CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk), |
202 | /* more usart lookup table for DT entries */ | 202 | /* more usart lookup table for DT entries */ |
203 | CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck), | 203 | CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck), |
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 25e3464fb07f..629fa9774972 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c | |||
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} | |||
84 | * USB Device (Gadget) | 84 | * USB Device (Gadget) |
85 | * -------------------------------------------------------------------- */ | 85 | * -------------------------------------------------------------------- */ |
86 | 86 | ||
87 | #ifdef CONFIG_USB_GADGET_AT91 | 87 | #ifdef CONFIG_USB_AT91 |
88 | static struct at91_udc_data udc_data; | 88 | static struct at91_udc_data udc_data; |
89 | 89 | ||
90 | static struct resource udc_resources[] = { | 90 | static struct resource udc_resources[] = { |
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index ae78f4d03b73..a178b58b0b9c 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c | |||
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} | |||
87 | * USB Device (Gadget) | 87 | * USB Device (Gadget) |
88 | * -------------------------------------------------------------------- */ | 88 | * -------------------------------------------------------------------- */ |
89 | 89 | ||
90 | #ifdef CONFIG_USB_GADGET_AT91 | 90 | #ifdef CONFIG_USB_AT91 |
91 | static struct at91_udc_data udc_data; | 91 | static struct at91_udc_data udc_data; |
92 | 92 | ||
93 | static struct resource udc_resources[] = { | 93 | static struct resource udc_resources[] = { |
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index ad017eb1f8df..d5fbac9ff4fa 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c | |||
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} | |||
92 | * USB Device (Gadget) | 92 | * USB Device (Gadget) |
93 | * -------------------------------------------------------------------- */ | 93 | * -------------------------------------------------------------------- */ |
94 | 94 | ||
95 | #ifdef CONFIG_USB_GADGET_AT91 | 95 | #ifdef CONFIG_USB_AT91 |
96 | static struct at91_udc_data udc_data; | 96 | static struct at91_udc_data udc_data; |
97 | 97 | ||
98 | static struct resource udc_resources[] = { | 98 | static struct resource udc_resources[] = { |
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h index 8f4866045b41..ec164a4124c9 100644 --- a/arch/arm/mach-at91/include/mach/system_rev.h +++ b/arch/arm/mach-at91/include/mach/system_rev.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define BOARD_HAVE_NAND_16BIT (1 << 31) | 19 | #define BOARD_HAVE_NAND_16BIT (1 << 31) |
20 | static inline int board_have_nand_16bit(void) | 20 | static inline int board_have_nand_16bit(void) |
21 | { | 21 | { |
22 | return system_rev & BOARD_HAVE_NAND_16BIT; | 22 | return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0; |
23 | } | 23 | } |
24 | 24 | ||
25 | #endif /* __ARCH_SYSTEM_REV_H__ */ | 25 | #endif /* __ARCH_SYSTEM_REV_H__ */ |
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 1d7d24995226..6659a90dbcad 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c | |||
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = { | |||
753 | .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction), | 753 | .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction), |
754 | .tdm_slots = 2, | 754 | .tdm_slots = 2, |
755 | .serial_dir = da850_iis_serializer_direction, | 755 | .serial_dir = da850_iis_serializer_direction, |
756 | .asp_chan_q = EVENTQ_1, | 756 | .asp_chan_q = EVENTQ_0, |
757 | .version = MCASP_VERSION_2, | 757 | .version = MCASP_VERSION_2, |
758 | .txnumevt = 1, | 758 | .txnumevt = 1, |
759 | .rxnumevt = 1, | 759 | .rxnumevt = 1, |
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 1918ae711428..46e1f4173b97 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c | |||
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = { | |||
107 | /* UBL (a few copies) plus U-Boot */ | 107 | /* UBL (a few copies) plus U-Boot */ |
108 | .name = "bootloader", | 108 | .name = "bootloader", |
109 | .offset = 0, | 109 | .offset = 0, |
110 | .size = 28 * NAND_BLOCK_SIZE, | 110 | .size = 30 * NAND_BLOCK_SIZE, |
111 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | 111 | .mask_flags = MTD_WRITEABLE, /* force read-only */ |
112 | }, { | 112 | }, { |
113 | /* U-Boot environment */ | 113 | /* U-Boot environment */ |
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index e574d7f837a8..635bf7740157 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode) | |||
564 | int val; | 564 | int val; |
565 | u32 value; | 565 | u32 value; |
566 | 566 | ||
567 | if (!vpif_vsclkdis_reg || !cpld_client) | 567 | if (!vpif_vidclkctl_reg || !cpld_client) |
568 | return -ENXIO; | 568 | return -ENXIO; |
569 | 569 | ||
570 | val = i2c_smbus_read_byte(cpld_client); | 570 | val = i2c_smbus_read_byte(cpld_client); |
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode) | |||
572 | return val; | 572 | return val; |
573 | 573 | ||
574 | spin_lock_irqsave(&vpif_reg_lock, flags); | 574 | spin_lock_irqsave(&vpif_reg_lock, flags); |
575 | value = __raw_readl(vpif_vsclkdis_reg); | 575 | value = __raw_readl(vpif_vidclkctl_reg); |
576 | if (mux_mode) { | 576 | if (mux_mode) { |
577 | val &= VPIF_INPUT_TWO_CHANNEL; | 577 | val &= VPIF_INPUT_TWO_CHANNEL; |
578 | value |= VIDCH1CLK; | 578 | value |= VIDCH1CLK; |
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode) | |||
580 | val |= VPIF_INPUT_ONE_CHANNEL; | 580 | val |= VPIF_INPUT_ONE_CHANNEL; |
581 | value &= ~VIDCH1CLK; | 581 | value &= ~VIDCH1CLK; |
582 | } | 582 | } |
583 | __raw_writel(value, vpif_vsclkdis_reg); | 583 | __raw_writel(value, vpif_vidclkctl_reg); |
584 | spin_unlock_irqrestore(&vpif_reg_lock, flags); | 584 | spin_unlock_irqrestore(&vpif_reg_lock, flags); |
585 | 585 | ||
586 | err = i2c_smbus_write_byte(cpld_client, val); | 586 | err = i2c_smbus_write_byte(cpld_client, val); |
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 0b68ed534f8e..af27c130595f 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -161,7 +161,6 @@ static struct clk dsp_clk = { | |||
161 | .name = "dsp", | 161 | .name = "dsp", |
162 | .parent = &pll1_sysclk1, | 162 | .parent = &pll1_sysclk1, |
163 | .lpsc = DM646X_LPSC_C64X_CPU, | 163 | .lpsc = DM646X_LPSC_C64X_CPU, |
164 | .flags = PSC_DSP, | ||
165 | .usecount = 1, /* REVISIT how to disable? */ | 164 | .usecount = 1, /* REVISIT how to disable? */ |
166 | }; | 165 | }; |
167 | 166 | ||
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h index fa59c097223d..8bc3fc256171 100644 --- a/arch/arm/mach-davinci/include/mach/psc.h +++ b/arch/arm/mach-davinci/include/mach/psc.h | |||
@@ -233,7 +233,7 @@ | |||
233 | #define PTCMD 0x120 | 233 | #define PTCMD 0x120 |
234 | #define PTSTAT 0x128 | 234 | #define PTSTAT 0x128 |
235 | #define PDSTAT 0x200 | 235 | #define PDSTAT 0x200 |
236 | #define PDCTL1 0x304 | 236 | #define PDCTL 0x300 |
237 | #define MDSTAT 0x800 | 237 | #define MDSTAT 0x800 |
238 | #define MDCTL 0xA00 | 238 | #define MDCTL 0xA00 |
239 | 239 | ||
@@ -244,7 +244,10 @@ | |||
244 | #define PSC_STATE_ENABLE 3 | 244 | #define PSC_STATE_ENABLE 3 |
245 | 245 | ||
246 | #define MDSTAT_STATE_MASK 0x3f | 246 | #define MDSTAT_STATE_MASK 0x3f |
247 | #define PDSTAT_STATE_MASK 0x1f | ||
247 | #define MDCTL_FORCE BIT(31) | 248 | #define MDCTL_FORCE BIT(31) |
249 | #define PDCTL_NEXT BIT(1) | ||
250 | #define PDCTL_EPCGOOD BIT(8) | ||
248 | 251 | ||
249 | #ifndef __ASSEMBLER__ | 252 | #ifndef __ASSEMBLER__ |
250 | 253 | ||
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c index 1fb6bdff38c1..d7e210f4b55c 100644 --- a/arch/arm/mach-davinci/psc.c +++ b/arch/arm/mach-davinci/psc.c | |||
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id) | |||
52 | void davinci_psc_config(unsigned int domain, unsigned int ctlr, | 52 | void davinci_psc_config(unsigned int domain, unsigned int ctlr, |
53 | unsigned int id, bool enable, u32 flags) | 53 | unsigned int id, bool enable, u32 flags) |
54 | { | 54 | { |
55 | u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl; | 55 | u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl; |
56 | void __iomem *psc_base; | 56 | void __iomem *psc_base; |
57 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 57 | struct davinci_soc_info *soc_info = &davinci_soc_info; |
58 | u32 next_state = PSC_STATE_ENABLE; | 58 | u32 next_state = PSC_STATE_ENABLE; |
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr, | |||
79 | mdctl |= MDCTL_FORCE; | 79 | mdctl |= MDCTL_FORCE; |
80 | __raw_writel(mdctl, psc_base + MDCTL + 4 * id); | 80 | __raw_writel(mdctl, psc_base + MDCTL + 4 * id); |
81 | 81 | ||
82 | pdstat = __raw_readl(psc_base + PDSTAT); | 82 | pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain); |
83 | if ((pdstat & 0x00000001) == 0) { | 83 | if ((pdstat & PDSTAT_STATE_MASK) == 0) { |
84 | pdctl1 = __raw_readl(psc_base + PDCTL1); | 84 | pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); |
85 | pdctl1 |= 0x1; | 85 | pdctl |= PDCTL_NEXT; |
86 | __raw_writel(pdctl1, psc_base + PDCTL1); | 86 | __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); |
87 | 87 | ||
88 | ptcmd = 1 << domain; | 88 | ptcmd = 1 << domain; |
89 | __raw_writel(ptcmd, psc_base + PTCMD); | 89 | __raw_writel(ptcmd, psc_base + PTCMD); |
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr, | |||
92 | epcpr = __raw_readl(psc_base + EPCPR); | 92 | epcpr = __raw_readl(psc_base + EPCPR); |
93 | } while ((((epcpr >> domain) & 1) == 0)); | 93 | } while ((((epcpr >> domain) & 1) == 0)); |
94 | 94 | ||
95 | pdctl1 = __raw_readl(psc_base + PDCTL1); | 95 | pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); |
96 | pdctl1 |= 0x100; | 96 | pdctl |= PDCTL_EPCGOOD; |
97 | __raw_writel(pdctl1, psc_base + PDCTL1); | 97 | __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); |
98 | } else { | 98 | } else { |
99 | ptcmd = 1 << domain; | 99 | ptcmd = 1 << domain; |
100 | __raw_writel(ptcmd, psc_base + PTCMD); | 100 | __raw_writel(ptcmd, psc_base + PTCMD); |
diff --git a/arch/arm/mach-exynos/cpu.c b/arch/arm/mach-exynos/cpu.c index 90ec247f3b37..cc8d4bd6d0f7 100644 --- a/arch/arm/mach-exynos/cpu.c +++ b/arch/arm/mach-exynos/cpu.c | |||
@@ -111,11 +111,6 @@ static struct map_desc exynos4_iodesc[] __initdata = { | |||
111 | .length = SZ_4K, | 111 | .length = SZ_4K, |
112 | .type = MT_DEVICE, | 112 | .type = MT_DEVICE, |
113 | }, { | 113 | }, { |
114 | .virtual = (unsigned long)S5P_VA_SROMC, | ||
115 | .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC), | ||
116 | .length = SZ_4K, | ||
117 | .type = MT_DEVICE, | ||
118 | }, { | ||
119 | .virtual = (unsigned long)S3C_VA_USB_HSPHY, | 114 | .virtual = (unsigned long)S3C_VA_USB_HSPHY, |
120 | .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY), | 115 | .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY), |
121 | .length = SZ_4K, | 116 | .length = SZ_4K, |
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c index 97343df8f132..85b5527d0918 100644 --- a/arch/arm/mach-exynos/mct.c +++ b/arch/arm/mach-exynos/mct.c | |||
@@ -44,8 +44,6 @@ struct mct_clock_event_device { | |||
44 | char name[10]; | 44 | char name[10]; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); | ||
48 | |||
49 | static void exynos4_mct_write(unsigned int value, void *addr) | 47 | static void exynos4_mct_write(unsigned int value, void *addr) |
50 | { | 48 | { |
51 | void __iomem *stat_addr; | 49 | void __iomem *stat_addr; |
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void) | |||
264 | } | 262 | } |
265 | 263 | ||
266 | #ifdef CONFIG_LOCAL_TIMERS | 264 | #ifdef CONFIG_LOCAL_TIMERS |
265 | |||
266 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); | ||
267 | |||
267 | /* Clock event handling */ | 268 | /* Clock event handling */ |
268 | static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) | 269 | static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) |
269 | { | 270 | { |
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt) | |||
428 | 429 | ||
429 | void local_timer_stop(struct clock_event_device *evt) | 430 | void local_timer_stop(struct clock_event_device *evt) |
430 | { | 431 | { |
432 | unsigned int cpu = smp_processor_id(); | ||
431 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | 433 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); |
432 | if (mct_int_type == MCT_INT_SPI) | 434 | if (mct_int_type == MCT_INT_SPI) |
433 | disable_irq(evt->irq); | 435 | if (cpu == 0) |
436 | remove_irq(evt->irq, &mct_tick0_event_irq); | ||
437 | else | ||
438 | remove_irq(evt->irq, &mct_tick1_event_irq); | ||
434 | else | 439 | else |
435 | disable_percpu_irq(IRQ_MCT_LOCALTIMER); | 440 | disable_percpu_irq(IRQ_MCT_LOCALTIMER); |
436 | } | 441 | } |
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void) | |||
443 | 448 | ||
444 | clk_rate = clk_get_rate(mct_clk); | 449 | clk_rate = clk_get_rate(mct_clk); |
445 | 450 | ||
451 | #ifdef CONFIG_LOCAL_TIMERS | ||
446 | if (mct_int_type == MCT_INT_PPI) { | 452 | if (mct_int_type == MCT_INT_PPI) { |
447 | int err; | 453 | int err; |
448 | 454 | ||
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void) | |||
452 | WARN(err, "MCT: can't request IRQ %d (%d)\n", | 458 | WARN(err, "MCT: can't request IRQ %d (%d)\n", |
453 | IRQ_MCT_LOCALTIMER, err); | 459 | IRQ_MCT_LOCALTIMER, err); |
454 | } | 460 | } |
461 | #endif /* CONFIG_LOCAL_TIMERS */ | ||
455 | } | 462 | } |
456 | 463 | ||
457 | static void __init exynos4_timer_init(void) | 464 | static void __init exynos4_timer_init(void) |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index c44aa974e79c..0e6f1af260b6 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
@@ -132,7 +132,7 @@ config MACH_MX25_3DS | |||
132 | select IMX_HAVE_PLATFORM_MXC_NAND | 132 | select IMX_HAVE_PLATFORM_MXC_NAND |
133 | select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX | 133 | select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX |
134 | 134 | ||
135 | config MACH_EUKREA_CPUIMX25 | 135 | config MACH_EUKREA_CPUIMX25SD |
136 | bool "Support Eukrea CPUIMX25 Platform" | 136 | bool "Support Eukrea CPUIMX25 Platform" |
137 | select SOC_IMX25 | 137 | select SOC_IMX25 |
138 | select IMX_HAVE_PLATFORM_FLEXCAN | 138 | select IMX_HAVE_PLATFORM_FLEXCAN |
@@ -148,7 +148,7 @@ config MACH_EUKREA_CPUIMX25 | |||
148 | 148 | ||
149 | choice | 149 | choice |
150 | prompt "Baseboard" | 150 | prompt "Baseboard" |
151 | depends on MACH_EUKREA_CPUIMX25 | 151 | depends on MACH_EUKREA_CPUIMX25SD |
152 | default MACH_EUKREA_MBIMXSD25_BASEBOARD | 152 | default MACH_EUKREA_MBIMXSD25_BASEBOARD |
153 | 153 | ||
154 | config MACH_EUKREA_MBIMXSD25_BASEBOARD | 154 | config MACH_EUKREA_MBIMXSD25_BASEBOARD |
@@ -542,7 +542,7 @@ config MACH_MX35_3DS | |||
542 | Include support for MX35PDK platform. This includes specific | 542 | Include support for MX35PDK platform. This includes specific |
543 | configurations for the board and its peripherals. | 543 | configurations for the board and its peripherals. |
544 | 544 | ||
545 | config MACH_EUKREA_CPUIMX35 | 545 | config MACH_EUKREA_CPUIMX35SD |
546 | bool "Support Eukrea CPUIMX35 Platform" | 546 | bool "Support Eukrea CPUIMX35 Platform" |
547 | select SOC_IMX35 | 547 | select SOC_IMX35 |
548 | select IMX_HAVE_PLATFORM_FLEXCAN | 548 | select IMX_HAVE_PLATFORM_FLEXCAN |
@@ -560,7 +560,7 @@ config MACH_EUKREA_CPUIMX35 | |||
560 | 560 | ||
561 | choice | 561 | choice |
562 | prompt "Baseboard" | 562 | prompt "Baseboard" |
563 | depends on MACH_EUKREA_CPUIMX35 | 563 | depends on MACH_EUKREA_CPUIMX35SD |
564 | default MACH_EUKREA_MBIMXSD35_BASEBOARD | 564 | default MACH_EUKREA_MBIMXSD35_BASEBOARD |
565 | 565 | ||
566 | config MACH_EUKREA_MBIMXSD35_BASEBOARD | 566 | config MACH_EUKREA_MBIMXSD35_BASEBOARD |
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index aba73214c2a8..d97f409ce98b 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile | |||
@@ -24,7 +24,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o | |||
24 | 24 | ||
25 | # i.MX25 based machines | 25 | # i.MX25 based machines |
26 | obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o | 26 | obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o |
27 | obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o | 27 | obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o |
28 | obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o | 28 | obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o |
29 | 29 | ||
30 | # i.MX27 based machines | 30 | # i.MX27 based machines |
@@ -57,7 +57,7 @@ obj-$(CONFIG_MACH_BUG) += mach-bug.o | |||
57 | # i.MX35 based machines | 57 | # i.MX35 based machines |
58 | obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o | 58 | obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o |
59 | obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o | 59 | obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o |
60 | obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o | 60 | obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o |
61 | obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o | 61 | obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o |
62 | obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o | 62 | obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o |
63 | 63 | ||
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c index 8116f119517d..ac8238caecb9 100644 --- a/arch/arm/mach-imx/clock-imx35.c +++ b/arch/arm/mach-imx/clock-imx35.c | |||
@@ -507,7 +507,7 @@ static struct clk_lookup lookups[] = { | |||
507 | 507 | ||
508 | int __init mx35_clocks_init() | 508 | int __init mx35_clocks_init() |
509 | { | 509 | { |
510 | unsigned int cgr2 = 3 << 26, cgr3 = 0; | 510 | unsigned int cgr2 = 3 << 26; |
511 | 511 | ||
512 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) | 512 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) |
513 | cgr2 |= 3 << 16; | 513 | cgr2 |= 3 << 16; |
@@ -521,6 +521,12 @@ int __init mx35_clocks_init() | |||
521 | __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); | 521 | __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); |
522 | __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), | 522 | __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), |
523 | CCM_BASE + CCM_CGR1); | 523 | CCM_BASE + CCM_CGR1); |
524 | __raw_writel(cgr2, CCM_BASE + CCM_CGR2); | ||
525 | __raw_writel(0, CCM_BASE + CCM_CGR3); | ||
526 | |||
527 | clk_enable(&iim_clk); | ||
528 | imx_print_silicon_rev("i.MX35", mx35_revision()); | ||
529 | clk_disable(&iim_clk); | ||
524 | 530 | ||
525 | /* | 531 | /* |
526 | * Check if we came up in internal boot mode. If yes, we need some | 532 | * Check if we came up in internal boot mode. If yes, we need some |
@@ -529,17 +535,11 @@ int __init mx35_clocks_init() | |||
529 | */ | 535 | */ |
530 | if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) { | 536 | if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) { |
531 | /* Additionally turn on UART1, SCC, and IIM clocks */ | 537 | /* Additionally turn on UART1, SCC, and IIM clocks */ |
532 | cgr2 |= 3 << 16 | 3 << 4; | 538 | clk_enable(&iim_clk); |
533 | cgr3 |= 3 << 2; | 539 | clk_enable(&uart1_clk); |
540 | clk_enable(&scc_clk); | ||
534 | } | 541 | } |
535 | 542 | ||
536 | __raw_writel(cgr2, CCM_BASE + CCM_CGR2); | ||
537 | __raw_writel(cgr3, CCM_BASE + CCM_CGR3); | ||
538 | |||
539 | clk_enable(&iim_clk); | ||
540 | imx_print_silicon_rev("i.MX35", mx35_revision()); | ||
541 | clk_disable(&iim_clk); | ||
542 | |||
543 | #ifdef CONFIG_MXC_USE_EPIT | 543 | #ifdef CONFIG_MXC_USE_EPIT |
544 | epit_timer_init(&epit1_clk, | 544 | epit_timer_init(&epit1_clk, |
545 | MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); | 545 | MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); |
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c index 66af2e8f7e57..362aae780601 100644 --- a/arch/arm/mach-imx/mach-cpuimx35.c +++ b/arch/arm/mach-imx/mach-cpuimx35.c | |||
@@ -53,12 +53,18 @@ static const struct imxi2c_platform_data | |||
53 | .bitrate = 100000, | 53 | .bitrate = 100000, |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2) | ||
57 | static int tsc2007_get_pendown_state(void) | ||
58 | { | ||
59 | return !gpio_get_value(TSC2007_IRQGPIO); | ||
60 | } | ||
61 | |||
56 | static struct tsc2007_platform_data tsc2007_info = { | 62 | static struct tsc2007_platform_data tsc2007_info = { |
57 | .model = 2007, | 63 | .model = 2007, |
58 | .x_plate_ohms = 180, | 64 | .x_plate_ohms = 180, |
65 | .get_pendown_state = tsc2007_get_pendown_state, | ||
59 | }; | 66 | }; |
60 | 67 | ||
61 | #define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2) | ||
62 | static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = { | 68 | static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = { |
63 | { | 69 | { |
64 | I2C_BOARD_INFO("pcf8563", 0x51), | 70 | I2C_BOARD_INFO("pcf8563", 0x51), |
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 9cd860a27af5..8deb012189b5 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c | |||
@@ -37,14 +37,15 @@ static void __init imx6q_map_io(void) | |||
37 | imx6q_clock_map_io(); | 37 | imx6q_clock_map_io(); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void __init imx6q_gpio_add_irq_domain(struct device_node *np, | 40 | static int __init imx6q_gpio_add_irq_domain(struct device_node *np, |
41 | struct device_node *interrupt_parent) | 41 | struct device_node *interrupt_parent) |
42 | { | 42 | { |
43 | static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - | 43 | static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; |
44 | 32 * 7; /* imx6q gets 7 gpio ports */ | ||
45 | 44 | ||
45 | gpio_irq_base -= 32; | ||
46 | irq_domain_add_simple(np, gpio_irq_base); | 46 | irq_domain_add_simple(np, gpio_irq_base); |
47 | gpio_irq_base += 32; | 47 | |
48 | return 0; | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static const struct of_device_id imx6q_irq_match[] __initconst = { | 51 | static const struct of_device_id imx6q_irq_match[] __initconst = { |
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c index 24030d0da6e3..0fb7a17df398 100644 --- a/arch/arm/mach-msm/devices-iommu.c +++ b/arch/arm/mach-msm/devices-iommu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/bootmem.h> | 20 | #include <linux/bootmem.h> |
21 | #include <linux/module.h> | ||
21 | #include <mach/irqs.h> | 22 | #include <mach/irqs.h> |
22 | #include <mach/iommu.h> | 23 | #include <mach/iommu.h> |
23 | 24 | ||
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c index 5c837603ff0f..24994bb52147 100644 --- a/arch/arm/mach-mx5/board-mx51_babbage.c +++ b/arch/arm/mach-mx5/board-mx51_babbage.c | |||
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void) | |||
362 | { | 362 | { |
363 | iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP; | 363 | iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP; |
364 | iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21, | 364 | iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21, |
365 | PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP); | 365 | PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH); |
366 | 366 | ||
367 | imx51_soc_init(); | 367 | imx51_soc_init(); |
368 | 368 | ||
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c index 6bea31ab8f85..64bbfcea6f35 100644 --- a/arch/arm/mach-mx5/board-mx53_evk.c +++ b/arch/arm/mach-mx5/board-mx53_evk.c | |||
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void) | |||
106 | gpio_set_value(MX53_EVK_FEC_PHY_RST, 1); | 106 | gpio_set_value(MX53_EVK_FEC_PHY_RST, 1); |
107 | } | 107 | } |
108 | 108 | ||
109 | static struct fec_platform_data mx53_evk_fec_pdata = { | 109 | static const struct fec_platform_data mx53_evk_fec_pdata __initconst = { |
110 | .phy = PHY_INTERFACE_MODE_RMII, | 110 | .phy = PHY_INTERFACE_MODE_RMII, |
111 | }; | 111 | }; |
112 | 112 | ||
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c index 7678f7734db6..237bdecd9331 100644 --- a/arch/arm/mach-mx5/board-mx53_loco.c +++ b/arch/arm/mach-mx5/board-mx53_loco.c | |||
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void) | |||
242 | gpio_set_value(LOCO_FEC_PHY_RST, 1); | 242 | gpio_set_value(LOCO_FEC_PHY_RST, 1); |
243 | } | 243 | } |
244 | 244 | ||
245 | static struct fec_platform_data mx53_loco_fec_data = { | 245 | static const struct fec_platform_data mx53_loco_fec_data __initconst = { |
246 | .phy = PHY_INTERFACE_MODE_RMII, | 246 | .phy = PHY_INTERFACE_MODE_RMII, |
247 | }; | 247 | }; |
248 | 248 | ||
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c index 59c0845eb4a6..d42132a80e8f 100644 --- a/arch/arm/mach-mx5/board-mx53_smd.c +++ b/arch/arm/mach-mx5/board-mx53_smd.c | |||
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void) | |||
104 | gpio_set_value(SMD_FEC_PHY_RST, 1); | 104 | gpio_set_value(SMD_FEC_PHY_RST, 1); |
105 | } | 105 | } |
106 | 106 | ||
107 | static struct fec_platform_data mx53_smd_fec_data = { | 107 | static const struct fec_platform_data mx53_smd_fec_data __initconst = { |
108 | .phy = PHY_INTERFACE_MODE_RMII, | 108 | .phy = PHY_INTERFACE_MODE_RMII, |
109 | }; | 109 | }; |
110 | 110 | ||
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-mx5/imx51-dt.c index ccc61585659b..596edd967dbf 100644 --- a/arch/arm/mach-mx5/imx51-dt.c +++ b/arch/arm/mach-mx5/imx51-dt.c | |||
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = { | |||
44 | { /* sentinel */ } | 44 | { /* sentinel */ } |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static void __init imx51_tzic_add_irq_domain(struct device_node *np, | 47 | static int __init imx51_tzic_add_irq_domain(struct device_node *np, |
48 | struct device_node *interrupt_parent) | 48 | struct device_node *interrupt_parent) |
49 | { | 49 | { |
50 | irq_domain_add_simple(np, 0); | 50 | irq_domain_add_simple(np, 0); |
51 | return 0; | ||
51 | } | 52 | } |
52 | 53 | ||
53 | static void __init imx51_gpio_add_irq_domain(struct device_node *np, | 54 | static int __init imx51_gpio_add_irq_domain(struct device_node *np, |
54 | struct device_node *interrupt_parent) | 55 | struct device_node *interrupt_parent) |
55 | { | 56 | { |
56 | static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - | 57 | static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; |
57 | 32 * 4; /* imx51 gets 4 gpio ports */ | ||
58 | 58 | ||
59 | gpio_irq_base -= 32; | ||
59 | irq_domain_add_simple(np, gpio_irq_base); | 60 | irq_domain_add_simple(np, gpio_irq_base); |
60 | gpio_irq_base += 32; | 61 | |
62 | return 0; | ||
61 | } | 63 | } |
62 | 64 | ||
63 | static const struct of_device_id imx51_irq_match[] __initconst = { | 65 | static const struct of_device_id imx51_irq_match[] __initconst = { |
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-mx5/imx53-dt.c index ccaa0b81b768..85bfd5ff21b0 100644 --- a/arch/arm/mach-mx5/imx53-dt.c +++ b/arch/arm/mach-mx5/imx53-dt.c | |||
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = { | |||
48 | { /* sentinel */ } | 48 | { /* sentinel */ } |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static void __init imx53_tzic_add_irq_domain(struct device_node *np, | 51 | static int __init imx53_tzic_add_irq_domain(struct device_node *np, |
52 | struct device_node *interrupt_parent) | 52 | struct device_node *interrupt_parent) |
53 | { | 53 | { |
54 | irq_domain_add_simple(np, 0); | 54 | irq_domain_add_simple(np, 0); |
55 | return 0; | ||
55 | } | 56 | } |
56 | 57 | ||
57 | static void __init imx53_gpio_add_irq_domain(struct device_node *np, | 58 | static int __init imx53_gpio_add_irq_domain(struct device_node *np, |
58 | struct device_node *interrupt_parent) | 59 | struct device_node *interrupt_parent) |
59 | { | 60 | { |
60 | static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - | 61 | static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; |
61 | 32 * 7; /* imx53 gets 7 gpio ports */ | ||
62 | 62 | ||
63 | gpio_irq_base -= 32; | ||
63 | irq_domain_add_simple(np, gpio_irq_base); | 64 | irq_domain_add_simple(np, gpio_irq_base); |
64 | gpio_irq_base += 32; | 65 | |
66 | return 0; | ||
65 | } | 67 | } |
66 | 68 | ||
67 | static const struct of_device_id imx53_irq_match[] __initconst = { | 69 | static const struct of_device_id imx53_irq_match[] __initconst = { |
diff --git a/arch/arm/mach-mxs/include/mach/mx28.h b/arch/arm/mach-mxs/include/mach/mx28.h index 75d86118b76a..30c7990f3c01 100644 --- a/arch/arm/mach-mxs/include/mach/mx28.h +++ b/arch/arm/mach-mxs/include/mach/mx28.h | |||
@@ -104,8 +104,8 @@ | |||
104 | #define MX28_INT_CAN1 9 | 104 | #define MX28_INT_CAN1 9 |
105 | #define MX28_INT_LRADC_TOUCH 10 | 105 | #define MX28_INT_LRADC_TOUCH 10 |
106 | #define MX28_INT_HSADC 13 | 106 | #define MX28_INT_HSADC 13 |
107 | #define MX28_INT_IRADC_THRESH0 14 | 107 | #define MX28_INT_LRADC_THRESH0 14 |
108 | #define MX28_INT_IRADC_THRESH1 15 | 108 | #define MX28_INT_LRADC_THRESH1 15 |
109 | #define MX28_INT_LRADC_CH0 16 | 109 | #define MX28_INT_LRADC_CH0 16 |
110 | #define MX28_INT_LRADC_CH1 17 | 110 | #define MX28_INT_LRADC_CH1 17 |
111 | #define MX28_INT_LRADC_CH2 18 | 111 | #define MX28_INT_LRADC_CH2 18 |
diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h index 0d2d2b470998..bde5f6634747 100644 --- a/arch/arm/mach-mxs/include/mach/mxs.h +++ b/arch/arm/mach-mxs/include/mach/mxs.h | |||
@@ -30,6 +30,7 @@ | |||
30 | */ | 30 | */ |
31 | #define cpu_is_mx23() ( \ | 31 | #define cpu_is_mx23() ( \ |
32 | machine_is_mx23evk() || \ | 32 | machine_is_mx23evk() || \ |
33 | machine_is_stmp378x() || \ | ||
33 | 0) | 34 | 0) |
34 | #define cpu_is_mx28() ( \ | 35 | #define cpu_is_mx28() ( \ |
35 | machine_is_mx28evk() || \ | 36 | machine_is_mx28evk() || \ |
diff --git a/arch/arm/mach-mxs/mach-m28evk.c b/arch/arm/mach-mxs/mach-m28evk.c index 3b1681e4f49a..6b00577b7025 100644 --- a/arch/arm/mach-mxs/mach-m28evk.c +++ b/arch/arm/mach-mxs/mach-m28evk.c | |||
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = { | |||
361 | MACHINE_START(M28EVK, "DENX M28 EVK") | 361 | MACHINE_START(M28EVK, "DENX M28 EVK") |
362 | .map_io = mx28_map_io, | 362 | .map_io = mx28_map_io, |
363 | .init_irq = mx28_init_irq, | 363 | .init_irq = mx28_init_irq, |
364 | .init_machine = m28evk_init, | ||
365 | .timer = &m28evk_timer, | 364 | .timer = &m28evk_timer, |
365 | .init_machine = m28evk_init, | ||
366 | MACHINE_END | 366 | MACHINE_END |
diff --git a/arch/arm/mach-mxs/mach-stmp378x_devb.c b/arch/arm/mach-mxs/mach-stmp378x_devb.c index 177e53123a02..6834dea38c04 100644 --- a/arch/arm/mach-mxs/mach-stmp378x_devb.c +++ b/arch/arm/mach-mxs/mach-stmp378x_devb.c | |||
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = { | |||
115 | MACHINE_START(STMP378X, "STMP378X") | 115 | MACHINE_START(STMP378X, "STMP378X") |
116 | .map_io = mx23_map_io, | 116 | .map_io = mx23_map_io, |
117 | .init_irq = mx23_init_irq, | 117 | .init_irq = mx23_init_irq, |
118 | .init_machine = stmp378x_dvb_init, | ||
119 | .timer = &stmp378x_dvb_timer, | 118 | .timer = &stmp378x_dvb_timer, |
119 | .init_machine = stmp378x_dvb_init, | ||
120 | MACHINE_END | 120 | MACHINE_END |
diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c index 0fcff47009cf..9a7b08b2a925 100644 --- a/arch/arm/mach-mxs/module-tx28.c +++ b/arch/arm/mach-mxs/module-tx28.c | |||
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = { | |||
66 | MX28_PAD_ENET0_CRS__ENET1_RX_EN, | 66 | MX28_PAD_ENET0_CRS__ENET1_RX_EN, |
67 | }; | 67 | }; |
68 | 68 | ||
69 | static struct fec_platform_data tx28_fec0_data = { | 69 | static const struct fec_platform_data tx28_fec0_data __initconst = { |
70 | .phy = PHY_INTERFACE_MODE_RMII, | 70 | .phy = PHY_INTERFACE_MODE_RMII, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static struct fec_platform_data tx28_fec1_data = { | 73 | static const struct fec_platform_data tx28_fec1_data __initconst = { |
74 | .phy = PHY_INTERFACE_MODE_RMII, | 74 | .phy = PHY_INTERFACE_MODE_RMII, |
75 | }; | 75 | }; |
76 | 76 | ||
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index 1297bb58869c..9ff90a744a21 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/cpufreq.h> | ||
20 | #include <linux/delay.h> | ||
19 | #include <linux/io.h> | 21 | #include <linux/io.h> |
20 | 22 | ||
21 | #include <asm/mach-types.h> /* for machine_is_* */ | 23 | #include <asm/mach-types.h> /* for machine_is_* */ |
@@ -927,16 +929,22 @@ int __init omap1_clk_init(void) | |||
927 | 929 | ||
928 | void __init omap1_clk_late_init(void) | 930 | void __init omap1_clk_late_init(void) |
929 | { | 931 | { |
930 | if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE) | 932 | unsigned long rate = ck_dpll1.rate; |
933 | |||
934 | if (rate >= OMAP1_DPLL1_SANE_VALUE) | ||
931 | return; | 935 | return; |
932 | 936 | ||
937 | /* System booting at unusable rate, force reprogramming of DPLL1 */ | ||
938 | ck_dpll1_p->rate = 0; | ||
939 | |||
933 | /* Find the highest supported frequency and enable it */ | 940 | /* Find the highest supported frequency and enable it */ |
934 | if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) { | 941 | if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) { |
935 | pr_err("System frequencies not set, using default. Check your config.\n"); | 942 | pr_err("System frequencies not set, using default. Check your config.\n"); |
936 | omap_writew(0x2290, DPLL_CTL); | 943 | omap_writew(0x2290, DPLL_CTL); |
937 | omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL); | 944 | omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL); |
938 | ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE; | 945 | ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE; |
939 | } | 946 | } |
940 | propagate_rate(&ck_dpll1); | 947 | propagate_rate(&ck_dpll1); |
941 | omap1_show_rates(); | 948 | omap1_show_rates(); |
949 | loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate); | ||
942 | } | 950 | } |
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index ba1aa07bdb29..c15c5c9c9085 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c | |||
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = { | |||
193 | static void __init rx51_charger_init(void) | 193 | static void __init rx51_charger_init(void) |
194 | { | 194 | { |
195 | WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO, | 195 | WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO, |
196 | GPIOF_OUT_INIT_LOW, "isp1704_reset")); | 196 | GPIOF_OUT_INIT_HIGH, "isp1704_reset")); |
197 | 197 | ||
198 | platform_device_register(&rx51_charger_device); | 198 | platform_device_register(&rx51_charger_device); |
199 | } | 199 | } |
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index 292eee3be15f..28fcb27005d2 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c | |||
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused) | |||
145 | pdata->reg_size = 4; | 145 | pdata->reg_size = 4; |
146 | pdata->has_ccr = true; | 146 | pdata->has_ccr = true; |
147 | } | 147 | } |
148 | pdata->set_clk_src = omap2_mcbsp_set_clk_src; | ||
149 | if (id == 1) | ||
150 | pdata->mux_signal = omap2_mcbsp1_mux_rx_clk; | ||
148 | 151 | ||
149 | if (oh->class->rev == MCBSP_CONFIG_TYPE3) { | 152 | if (oh->class->rev == MCBSP_CONFIG_TYPE3) { |
150 | if (id == 2) | 153 | if (id == 2) |
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused) | |||
174 | name, oh->name); | 177 | name, oh->name); |
175 | return PTR_ERR(pdev); | 178 | return PTR_ERR(pdev); |
176 | } | 179 | } |
177 | pdata->set_clk_src = omap2_mcbsp_set_clk_src; | ||
178 | if (id == 1) | ||
179 | pdata->mux_signal = omap2_mcbsp1_mux_rx_clk; | ||
180 | omap_mcbsp_count++; | 180 | omap_mcbsp_count++; |
181 | return 0; | 181 | return 0; |
182 | } | 182 | } |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 7f8915ad5099..eef43e2e163e 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -3247,18 +3247,14 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { | |||
3247 | 3247 | ||
3248 | /* 3430ES1-only hwmods */ | 3248 | /* 3430ES1-only hwmods */ |
3249 | static __initdata struct omap_hwmod *omap3430es1_hwmods[] = { | 3249 | static __initdata struct omap_hwmod *omap3430es1_hwmods[] = { |
3250 | &omap3xxx_iva_hwmod, | ||
3251 | &omap3430es1_dss_core_hwmod, | 3250 | &omap3430es1_dss_core_hwmod, |
3252 | &omap3xxx_mailbox_hwmod, | ||
3253 | NULL | 3251 | NULL |
3254 | }; | 3252 | }; |
3255 | 3253 | ||
3256 | /* 3430ES2+-only hwmods */ | 3254 | /* 3430ES2+-only hwmods */ |
3257 | static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = { | 3255 | static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = { |
3258 | &omap3xxx_iva_hwmod, | ||
3259 | &omap3xxx_dss_core_hwmod, | 3256 | &omap3xxx_dss_core_hwmod, |
3260 | &omap3xxx_usbhsotg_hwmod, | 3257 | &omap3xxx_usbhsotg_hwmod, |
3261 | &omap3xxx_mailbox_hwmod, | ||
3262 | NULL | 3258 | NULL |
3263 | }; | 3259 | }; |
3264 | 3260 | ||
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index cb53160f6c5d..26ebb57719df 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/suspend.h> | 10 | #include <linux/suspend.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/module.h> | ||
12 | #include <linux/of.h> | 13 | #include <linux/of.h> |
13 | #include <linux/of_address.h> | 14 | #include <linux/of_address.h> |
14 | #include <linux/of_device.h> | 15 | #include <linux/of_device.h> |
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c index ef555c041962..a12b689a8702 100644 --- a/arch/arm/mach-prima2/prima2.c +++ b/arch/arm/mach-prima2/prima2.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <asm/sizes.h> | ||
11 | #include <asm/mach-types.h> | 12 | #include <asm/mach-types.h> |
12 | #include <asm/mach/arch.h> | 13 | #include <asm/mach/arch.h> |
13 | #include <linux/of.h> | 14 | #include <linux/of.h> |
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c index 5e6b42089eb4..3341fd118723 100644 --- a/arch/arm/mach-s3c64xx/dev-spi.c +++ b/arch/arm/mach-s3c64xx/dev-spi.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/export.h> | ||
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
14 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
15 | #include <linux/gpio.h> | 16 | #include <linux/gpio.h> |
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c index 7a3bc32df425..51c00f2453c6 100644 --- a/arch/arm/mach-s3c64xx/s3c6400.c +++ b/arch/arm/mach-s3c64xx/s3c6400.c | |||
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void) | |||
70 | s3c64xx_init_irq(~0 & ~(0xf << 5), ~0); | 70 | s3c64xx_init_irq(~0 & ~(0xf << 5), ~0); |
71 | } | 71 | } |
72 | 72 | ||
73 | struct sysdev_class s3c6400_sysclass = { | 73 | static struct sysdev_class s3c6400_sysclass = { |
74 | .name = "s3c6400-core", | 74 | .name = "s3c6400-core", |
75 | }; | 75 | }; |
76 | 76 | ||
diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c index 83d2afb79e9f..2cf80026c58d 100644 --- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c +++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <plat/fb.h> | 20 | #include <plat/fb.h> |
21 | #include <plat/gpio-cfg.h> | 21 | #include <plat/gpio-cfg.h> |
22 | 22 | ||
23 | extern void s3c64xx_fb_gpio_setup_24bpp(void) | 23 | void s3c64xx_fb_gpio_setup_24bpp(void) |
24 | { | 24 | { |
25 | s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2)); | 25 | s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2)); |
26 | s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2)); | 26 | s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2)); |
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c index a9106c392398..8662ef6e5681 100644 --- a/arch/arm/mach-s5pv210/mach-smdkv210.c +++ b/arch/arm/mach-s5pv210/mach-smdkv210.c | |||
@@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = { | |||
273 | 273 | ||
274 | static struct platform_pwm_backlight_data smdkv210_bl_data = { | 274 | static struct platform_pwm_backlight_data smdkv210_bl_data = { |
275 | .pwm_id = 3, | 275 | .pwm_id = 3, |
276 | .pwm_period_ns = 1000, | ||
276 | }; | 277 | }; |
277 | 278 | ||
278 | static void __init smdkv210_map_io(void) | 279 | static void __init smdkv210_map_io(void) |
diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot index 5a616f6e5612..f7951aa04562 100644 --- a/arch/arm/mach-sa1100/Makefile.boot +++ b/arch/arm/mach-sa1100/Makefile.boot | |||
@@ -1,5 +1,5 @@ | |||
1 | ifeq ($(CONFIG_ARCH_SA1100),y) | 1 | ifeq ($(CONFIG_SA1111),y) |
2 | zreladdr-$(CONFIG_SA1111) += 0xc0208000 | 2 | zreladdr-y += 0xc0208000 |
3 | else | 3 | else |
4 | zreladdr-y += 0xc0008000 | 4 | zreladdr-y += 0xc0008000 |
5 | endif | 5 | endif |
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index b862e9f81e3e..7119b87cbfa0 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c | |||
@@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = { | |||
607 | 607 | ||
608 | MACHINE_START(AG5EVM, "ag5evm") | 608 | MACHINE_START(AG5EVM, "ag5evm") |
609 | .map_io = ag5evm_map_io, | 609 | .map_io = ag5evm_map_io, |
610 | .nr_irqs = NR_IRQS_LEGACY, | ||
610 | .init_irq = sh73a0_init_irq, | 611 | .init_irq = sh73a0_init_irq, |
611 | .handle_irq = shmobile_handle_irq_gic, | 612 | .handle_irq = shmobile_handle_irq_gic, |
612 | .init_machine = ag5evm_init, | 613 | .init_machine = ag5evm_init, |
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c index bd9a78424d6b..f44150b5ae46 100644 --- a/arch/arm/mach-shmobile/board-kota2.c +++ b/arch/arm/mach-shmobile/board-kota2.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/input/sh_keysc.h> | 33 | #include <linux/input/sh_keysc.h> |
34 | #include <linux/gpio_keys.h> | 34 | #include <linux/gpio_keys.h> |
35 | #include <linux/leds.h> | 35 | #include <linux/leds.h> |
36 | #include <linux/platform_data/leds-renesas-tpu.h> | ||
36 | #include <linux/mmc/host.h> | 37 | #include <linux/mmc/host.h> |
37 | #include <linux/mmc/sh_mmcif.h> | 38 | #include <linux/mmc/sh_mmcif.h> |
38 | #include <linux/mfd/tmio.h> | 39 | #include <linux/mfd/tmio.h> |
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = { | |||
56 | .flags = IORESOURCE_MEM, | 57 | .flags = IORESOURCE_MEM, |
57 | }, | 58 | }, |
58 | [1] = { | 59 | [1] = { |
59 | .start = gic_spi(33), /* PINTA2 @ PORT144 */ | 60 | .start = SH73A0_PINT0_IRQ(2), /* PINTA2 */ |
60 | .flags = IORESOURCE_IRQ, | 61 | .flags = IORESOURCE_IRQ, |
61 | }, | 62 | }, |
62 | }; | 63 | }; |
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = { | |||
157 | #define GPIO_LED(n, g) { .name = n, .gpio = g } | 158 | #define GPIO_LED(n, g) { .name = n, .gpio = g } |
158 | 159 | ||
159 | static struct gpio_led gpio_leds[] = { | 160 | static struct gpio_led gpio_leds[] = { |
160 | GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */ | ||
161 | GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */ | ||
162 | GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */ | ||
163 | GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */ | ||
164 | GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */ | 161 | GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */ |
165 | GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */ | 162 | GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */ |
166 | GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */ | 163 | GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */ |
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = { | |||
179 | }, | 176 | }, |
180 | }; | 177 | }; |
181 | 178 | ||
179 | /* TPU LED */ | ||
180 | static struct led_renesas_tpu_config led_renesas_tpu12_pdata = { | ||
181 | .name = "V2513", | ||
182 | .pin_gpio_fn = GPIO_FN_TPU1TO2, | ||
183 | .pin_gpio = GPIO_PORT153, | ||
184 | .channel_offset = 0x90, | ||
185 | .timer_bit = 2, | ||
186 | .max_brightness = 1000, | ||
187 | }; | ||
188 | |||
189 | static struct resource tpu12_resources[] = { | ||
190 | [0] = { | ||
191 | .name = "TPU12", | ||
192 | .start = 0xe6610090, | ||
193 | .end = 0xe66100b5, | ||
194 | .flags = IORESOURCE_MEM, | ||
195 | }, | ||
196 | }; | ||
197 | |||
198 | static struct platform_device leds_tpu12_device = { | ||
199 | .name = "leds-renesas-tpu", | ||
200 | .id = 12, | ||
201 | .dev = { | ||
202 | .platform_data = &led_renesas_tpu12_pdata, | ||
203 | }, | ||
204 | .num_resources = ARRAY_SIZE(tpu12_resources), | ||
205 | .resource = tpu12_resources, | ||
206 | }; | ||
207 | |||
208 | static struct led_renesas_tpu_config led_renesas_tpu41_pdata = { | ||
209 | .name = "V2514", | ||
210 | .pin_gpio_fn = GPIO_FN_TPU4TO1, | ||
211 | .pin_gpio = GPIO_PORT199, | ||
212 | .channel_offset = 0x50, | ||
213 | .timer_bit = 1, | ||
214 | .max_brightness = 1000, | ||
215 | }; | ||
216 | |||
217 | static struct resource tpu41_resources[] = { | ||
218 | [0] = { | ||
219 | .name = "TPU41", | ||
220 | .start = 0xe6640050, | ||
221 | .end = 0xe6640075, | ||
222 | .flags = IORESOURCE_MEM, | ||
223 | }, | ||
224 | }; | ||
225 | |||
226 | static struct platform_device leds_tpu41_device = { | ||
227 | .name = "leds-renesas-tpu", | ||
228 | .id = 41, | ||
229 | .dev = { | ||
230 | .platform_data = &led_renesas_tpu41_pdata, | ||
231 | }, | ||
232 | .num_resources = ARRAY_SIZE(tpu41_resources), | ||
233 | .resource = tpu41_resources, | ||
234 | }; | ||
235 | |||
236 | static struct led_renesas_tpu_config led_renesas_tpu21_pdata = { | ||
237 | .name = "V2515", | ||
238 | .pin_gpio_fn = GPIO_FN_TPU2TO1, | ||
239 | .pin_gpio = GPIO_PORT197, | ||
240 | .channel_offset = 0x50, | ||
241 | .timer_bit = 1, | ||
242 | .max_brightness = 1000, | ||
243 | }; | ||
244 | |||
245 | static struct resource tpu21_resources[] = { | ||
246 | [0] = { | ||
247 | .name = "TPU21", | ||
248 | .start = 0xe6620050, | ||
249 | .end = 0xe6620075, | ||
250 | .flags = IORESOURCE_MEM, | ||
251 | }, | ||
252 | }; | ||
253 | |||
254 | static struct platform_device leds_tpu21_device = { | ||
255 | .name = "leds-renesas-tpu", | ||
256 | .id = 21, | ||
257 | .dev = { | ||
258 | .platform_data = &led_renesas_tpu21_pdata, | ||
259 | }, | ||
260 | .num_resources = ARRAY_SIZE(tpu21_resources), | ||
261 | .resource = tpu21_resources, | ||
262 | }; | ||
263 | |||
264 | static struct led_renesas_tpu_config led_renesas_tpu30_pdata = { | ||
265 | .name = "KEYLED", | ||
266 | .pin_gpio_fn = GPIO_FN_TPU3TO0, | ||
267 | .pin_gpio = GPIO_PORT163, | ||
268 | .channel_offset = 0x10, | ||
269 | .timer_bit = 0, | ||
270 | .max_brightness = 1000, | ||
271 | }; | ||
272 | |||
273 | static struct resource tpu30_resources[] = { | ||
274 | [0] = { | ||
275 | .name = "TPU30", | ||
276 | .start = 0xe6630010, | ||
277 | .end = 0xe6630035, | ||
278 | .flags = IORESOURCE_MEM, | ||
279 | }, | ||
280 | }; | ||
281 | |||
282 | static struct platform_device leds_tpu30_device = { | ||
283 | .name = "leds-renesas-tpu", | ||
284 | .id = 30, | ||
285 | .dev = { | ||
286 | .platform_data = &led_renesas_tpu30_pdata, | ||
287 | }, | ||
288 | .num_resources = ARRAY_SIZE(tpu30_resources), | ||
289 | .resource = tpu30_resources, | ||
290 | }; | ||
291 | |||
182 | /* MMCIF */ | 292 | /* MMCIF */ |
183 | static struct resource mmcif_resources[] = { | 293 | static struct resource mmcif_resources[] = { |
184 | [0] = { | 294 | [0] = { |
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = { | |||
291 | &keysc_device, | 401 | &keysc_device, |
292 | &gpio_keys_device, | 402 | &gpio_keys_device, |
293 | &gpio_leds_device, | 403 | &gpio_leds_device, |
404 | &leds_tpu12_device, | ||
405 | &leds_tpu41_device, | ||
406 | &leds_tpu21_device, | ||
407 | &leds_tpu30_device, | ||
294 | &mmcif_device, | 408 | &mmcif_device, |
295 | &sdhi0_device, | 409 | &sdhi0_device, |
296 | &sdhi1_device, | 410 | &sdhi1_device, |
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void) | |||
317 | shmobile_setup_console(); | 431 | shmobile_setup_console(); |
318 | } | 432 | } |
319 | 433 | ||
320 | #define PINTER0A 0xe69000a0 | ||
321 | #define PINTCR0A 0xe69000b0 | ||
322 | |||
323 | void __init kota2_init_irq(void) | ||
324 | { | ||
325 | sh73a0_init_irq(); | ||
326 | |||
327 | /* setup PINT: enable PINTA2 as active low */ | ||
328 | __raw_writel(1 << 29, PINTER0A); | ||
329 | __raw_writew(2 << 10, PINTCR0A); | ||
330 | } | ||
331 | |||
332 | static void __init kota2_init(void) | 434 | static void __init kota2_init(void) |
333 | { | 435 | { |
334 | sh73a0_pinmux_init(); | 436 | sh73a0_pinmux_init(); |
@@ -447,7 +549,8 @@ struct sys_timer kota2_timer = { | |||
447 | 549 | ||
448 | MACHINE_START(KOTA2, "kota2") | 550 | MACHINE_START(KOTA2, "kota2") |
449 | .map_io = kota2_map_io, | 551 | .map_io = kota2_map_io, |
450 | .init_irq = kota2_init_irq, | 552 | .nr_irqs = NR_IRQS_LEGACY, |
553 | .init_irq = sh73a0_init_irq, | ||
451 | .handle_irq = shmobile_handle_irq_gic, | 554 | .handle_irq = shmobile_handle_irq_gic, |
452 | .init_machine = kota2_init, | 555 | .init_machine = kota2_init, |
453 | .timer = &kota2_timer, | 556 | .timer = &kota2_timer, |
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c index 61a846bb30f2..1370a89ca358 100644 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ b/arch/arm/mach-shmobile/clock-sh73a0.c | |||
@@ -113,6 +113,12 @@ static struct clk main_clk = { | |||
113 | .ops = &main_clk_ops, | 113 | .ops = &main_clk_ops, |
114 | }; | 114 | }; |
115 | 115 | ||
116 | /* Divide Main clock by two */ | ||
117 | static struct clk main_div2_clk = { | ||
118 | .ops = &div2_clk_ops, | ||
119 | .parent = &main_clk, | ||
120 | }; | ||
121 | |||
116 | /* PLL0, PLL1, PLL2, PLL3 */ | 122 | /* PLL0, PLL1, PLL2, PLL3 */ |
117 | static unsigned long pll_recalc(struct clk *clk) | 123 | static unsigned long pll_recalc(struct clk *clk) |
118 | { | 124 | { |
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = { | |||
181 | &extal1_div2_clk, | 187 | &extal1_div2_clk, |
182 | &extal2_div2_clk, | 188 | &extal2_div2_clk, |
183 | &main_clk, | 189 | &main_clk, |
190 | &main_div2_clk, | ||
184 | &pll0_clk, | 191 | &pll0_clk, |
185 | &pll1_clk, | 192 | &pll1_clk, |
186 | &pll2_clk, | 193 | &pll2_clk, |
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = { | |||
243 | [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0), | 250 | [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0), |
244 | [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0), | 251 | [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0), |
245 | [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0), | 252 | [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0), |
246 | [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0), | 253 | [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT), |
247 | [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0), | 254 | [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0), |
248 | [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0), | 255 | [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0), |
249 | [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0), | 256 | [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0), |
@@ -268,6 +275,7 @@ enum { MSTP001, | |||
268 | MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, | 275 | MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, |
269 | MSTP331, MSTP329, MSTP325, MSTP323, MSTP318, | 276 | MSTP331, MSTP329, MSTP325, MSTP323, MSTP318, |
270 | MSTP314, MSTP313, MSTP312, MSTP311, | 277 | MSTP314, MSTP313, MSTP312, MSTP311, |
278 | MSTP303, MSTP302, MSTP301, MSTP300, | ||
271 | MSTP411, MSTP410, MSTP403, | 279 | MSTP411, MSTP410, MSTP403, |
272 | MSTP_NR }; | 280 | MSTP_NR }; |
273 | 281 | ||
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = { | |||
301 | [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */ | 309 | [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */ |
302 | [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */ | 310 | [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */ |
303 | [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */ | 311 | [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */ |
312 | [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */ | ||
313 | [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */ | ||
314 | [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */ | ||
315 | [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */ | ||
304 | [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */ | 316 | [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */ |
305 | [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */ | 317 | [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */ |
306 | [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ | 318 | [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ |
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = { | |||
350 | CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ | 362 | CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ |
351 | CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */ | 363 | CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */ |
352 | CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */ | 364 | CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */ |
365 | CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */ | ||
366 | CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */ | ||
367 | CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */ | ||
368 | CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */ | ||
353 | CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */ | 369 | CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */ |
354 | CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */ | 370 | CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */ |
355 | CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ | 371 | CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index fbdd12ea3a58..7c38474e533a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <asm/mach/arch.h> | 33 | #include <asm/mach/arch.h> |
34 | #include <asm/mach/map.h> | 34 | #include <asm/mach/map.h> |
35 | #include <asm/memblock.h> | ||
35 | 36 | ||
36 | #include "mm.h" | 37 | #include "mm.h" |
37 | 38 | ||
@@ -332,7 +333,6 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
332 | 333 | ||
333 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | 334 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); |
334 | 335 | ||
335 | memblock_init(); | ||
336 | for (i = 0; i < mi->nr_banks; i++) | 336 | for (i = 0; i < mi->nr_banks; i++) |
337 | memblock_add(mi->bank[i].start, mi->bank[i].size); | 337 | memblock_add(mi->bank[i].start, mi->bank[i].size); |
338 | 338 | ||
@@ -371,7 +371,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
371 | if (mdesc->reserve) | 371 | if (mdesc->reserve) |
372 | mdesc->reserve(); | 372 | mdesc->reserve(); |
373 | 373 | ||
374 | memblock_analyze(); | 374 | memblock_allow_resize(); |
375 | memblock_dump_all(); | 375 | memblock_dump_all(); |
376 | } | 376 | } |
377 | 377 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 2c559ac38142..e70a73731eaa 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -363,11 +363,13 @@ __v7_setup: | |||
363 | orreq r10, r10, #1 << 6 @ set bit #6 | 363 | orreq r10, r10, #1 << 6 @ set bit #6 |
364 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 364 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
365 | #endif | 365 | #endif |
366 | #ifdef CONFIG_ARM_ERRATA_751472 | 366 | #if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP) |
367 | cmp r6, #0x30 @ present prior to r3p0 | 367 | ALT_SMP(cmp r6, #0x30) @ present prior to r3p0 |
368 | ALT_UP_B(1f) | ||
368 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | 369 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register |
369 | orrlt r10, r10, #1 << 11 @ set bit #11 | 370 | orrlt r10, r10, #1 << 11 @ set bit #11 |
370 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | 371 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register |
372 | 1: | ||
371 | #endif | 373 | #endif |
372 | 374 | ||
373 | 3: mov r10, #0 | 375 | 3: mov r10, #0 |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index c074e66ad224..4e0a371630b3 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
116 | return oprofile_perf_init(ops); | 116 | return oprofile_perf_init(ops); |
117 | } | 117 | } |
118 | 118 | ||
119 | void __exit oprofile_arch_exit(void) | 119 | void oprofile_arch_exit(void) |
120 | { | 120 | { |
121 | oprofile_perf_exit(); | 121 | oprofile_perf_exit(); |
122 | } | 122 | } |
diff --git a/arch/arm/plat-mxc/cpufreq.c b/arch/arm/plat-mxc/cpufreq.c index 74aac96cda20..73db34bf588a 100644 --- a/arch/arm/plat-mxc/cpufreq.c +++ b/arch/arm/plat-mxc/cpufreq.c | |||
@@ -17,6 +17,7 @@ | |||
17 | * the CPU clock speed on the fly. | 17 | * the CPU clock speed on the fly. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/module.h> | ||
20 | #include <linux/cpufreq.h> | 21 | #include <linux/cpufreq.h> |
21 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
22 | #include <linux/err.h> | 23 | #include <linux/err.h> |
@@ -97,7 +98,7 @@ static int mxc_set_target(struct cpufreq_policy *policy, | |||
97 | return ret; | 98 | return ret; |
98 | } | 99 | } |
99 | 100 | ||
100 | static int __init mxc_cpufreq_init(struct cpufreq_policy *policy) | 101 | static int mxc_cpufreq_init(struct cpufreq_policy *policy) |
101 | { | 102 | { |
102 | int ret; | 103 | int ret; |
103 | int i; | 104 | int i; |
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h index 88fd40452567..477971b00930 100644 --- a/arch/arm/plat-mxc/include/mach/uncompress.h +++ b/arch/arm/plat-mxc/include/mach/uncompress.h | |||
@@ -98,6 +98,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) | |||
98 | case MACH_TYPE_PCM043: | 98 | case MACH_TYPE_PCM043: |
99 | case MACH_TYPE_LILLY1131: | 99 | case MACH_TYPE_LILLY1131: |
100 | case MACH_TYPE_VPR200: | 100 | case MACH_TYPE_VPR200: |
101 | case MACH_TYPE_EUKREA_CPUIMX35SD: | ||
101 | uart_base = MX3X_UART1_BASE_ADDR; | 102 | uart_base = MX3X_UART1_BASE_ADDR; |
102 | break; | 103 | break; |
103 | case MACH_TYPE_MAGX_ZN5: | 104 | case MACH_TYPE_MAGX_ZN5: |
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c index 42d74ea59084..e032717f7d02 100644 --- a/arch/arm/plat-mxc/pwm.c +++ b/arch/arm/plat-mxc/pwm.c | |||
@@ -32,6 +32,9 @@ | |||
32 | #define MX3_PWMSAR 0x0C /* PWM Sample Register */ | 32 | #define MX3_PWMSAR 0x0C /* PWM Sample Register */ |
33 | #define MX3_PWMPR 0x10 /* PWM Period Register */ | 33 | #define MX3_PWMPR 0x10 /* PWM Period Register */ |
34 | #define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4) | 34 | #define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4) |
35 | #define MX3_PWMCR_DOZEEN (1 << 24) | ||
36 | #define MX3_PWMCR_WAITEN (1 << 23) | ||
37 | #define MX3_PWMCR_DBGEN (1 << 22) | ||
35 | #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) | 38 | #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) |
36 | #define MX3_PWMCR_CLKSRC_IPG (1 << 16) | 39 | #define MX3_PWMCR_CLKSRC_IPG (1 << 16) |
37 | #define MX3_PWMCR_EN (1 << 0) | 40 | #define MX3_PWMCR_EN (1 << 0) |
@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) | |||
74 | do_div(c, period_ns); | 77 | do_div(c, period_ns); |
75 | duty_cycles = c; | 78 | duty_cycles = c; |
76 | 79 | ||
80 | /* | ||
81 | * according to imx pwm RM, the real period value should be | ||
82 | * PERIOD value in PWMPR plus 2. | ||
83 | */ | ||
84 | if (period_cycles > 2) | ||
85 | period_cycles -= 2; | ||
86 | else | ||
87 | period_cycles = 0; | ||
88 | |||
77 | writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR); | 89 | writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR); |
78 | writel(period_cycles, pwm->mmio_base + MX3_PWMPR); | 90 | writel(period_cycles, pwm->mmio_base + MX3_PWMPR); |
79 | 91 | ||
80 | cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN; | 92 | cr = MX3_PWMCR_PRESCALER(prescale) | |
93 | MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | | ||
94 | MX3_PWMCR_DBGEN | MX3_PWMCR_EN; | ||
81 | 95 | ||
82 | if (cpu_is_mx25()) | 96 | if (cpu_is_mx25()) |
83 | cr |= MX3_PWMCR_CLKSRC_IPG; | 97 | cr |= MX3_PWMCR_CLKSRC_IPG; |
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 41ab97ebe4cf..10d160888133 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c | |||
@@ -384,12 +384,16 @@ void __init orion_gpio_init(int gpio_base, int ngpio, | |||
384 | struct orion_gpio_chip *ochip; | 384 | struct orion_gpio_chip *ochip; |
385 | struct irq_chip_generic *gc; | 385 | struct irq_chip_generic *gc; |
386 | struct irq_chip_type *ct; | 386 | struct irq_chip_type *ct; |
387 | char gc_label[16]; | ||
387 | 388 | ||
388 | if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips)) | 389 | if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips)) |
389 | return; | 390 | return; |
390 | 391 | ||
392 | snprintf(gc_label, sizeof(gc_label), "orion_gpio%d", | ||
393 | orion_gpio_chip_count); | ||
394 | |||
391 | ochip = orion_gpio_chips + orion_gpio_chip_count; | 395 | ochip = orion_gpio_chips + orion_gpio_chip_count; |
392 | ochip->chip.label = "orion_gpio"; | 396 | ochip->chip.label = kstrdup(gc_label, GFP_KERNEL); |
393 | ochip->chip.request = orion_gpio_request; | 397 | ochip->chip.request = orion_gpio_request; |
394 | ochip->chip.direction_input = orion_gpio_direction_input; | 398 | ochip->chip.direction_input = orion_gpio_direction_input; |
395 | ochip->chip.get = orion_gpio_get; | 399 | ochip->chip.get = orion_gpio_get; |
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c index e657305644cc..a976c023b286 100644 --- a/arch/arm/plat-samsung/dev-backlight.c +++ b/arch/arm/plat-samsung/dev-backlight.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/pwm_backlight.h> | 17 | #include <linux/pwm_backlight.h> |
18 | #include <linux/slab.h> | ||
19 | 18 | ||
20 | #include <plat/devs.h> | 19 | #include <plat/devs.h> |
21 | #include <plat/gpio-cfg.h> | 20 | #include <plat/gpio-cfg.h> |
diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h index dac4760c0f0a..95509d8eb140 100644 --- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h +++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h | |||
@@ -202,14 +202,6 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls, | |||
202 | extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void); | 202 | extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void); |
203 | extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void); | 203 | extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void); |
204 | 204 | ||
205 | extern void s3c2410_iotiming_debugfs(struct seq_file *seq, | ||
206 | struct s3c_cpufreq_config *cfg, | ||
207 | union s3c_iobank *iob); | ||
208 | |||
209 | extern void s3c2412_iotiming_debugfs(struct seq_file *seq, | ||
210 | struct s3c_cpufreq_config *cfg, | ||
211 | union s3c_iobank *iob); | ||
212 | |||
213 | #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS | 205 | #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS |
214 | #define s3c_cpufreq_debugfs_call(x) x | 206 | #define s3c_cpufreq_debugfs_call(x) x |
215 | #else | 207 | #else |
@@ -226,6 +218,10 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg); | |||
226 | extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg); | 218 | extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg); |
227 | 219 | ||
228 | #ifdef CONFIG_S3C2410_IOTIMING | 220 | #ifdef CONFIG_S3C2410_IOTIMING |
221 | extern void s3c2410_iotiming_debugfs(struct seq_file *seq, | ||
222 | struct s3c_cpufreq_config *cfg, | ||
223 | union s3c_iobank *iob); | ||
224 | |||
229 | extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg, | 225 | extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg, |
230 | struct s3c_iotimings *iot); | 226 | struct s3c_iotimings *iot); |
231 | 227 | ||
@@ -235,6 +231,7 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg, | |||
235 | extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, | 231 | extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, |
236 | struct s3c_iotimings *iot); | 232 | struct s3c_iotimings *iot); |
237 | #else | 233 | #else |
234 | #define s3c2410_iotiming_debugfs NULL | ||
238 | #define s3c2410_iotiming_calc NULL | 235 | #define s3c2410_iotiming_calc NULL |
239 | #define s3c2410_iotiming_get NULL | 236 | #define s3c2410_iotiming_get NULL |
240 | #define s3c2410_iotiming_set NULL | 237 | #define s3c2410_iotiming_set NULL |
@@ -242,8 +239,10 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg, | |||
242 | 239 | ||
243 | /* S3C2412 compatible routines */ | 240 | /* S3C2412 compatible routines */ |
244 | 241 | ||
245 | extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, | 242 | #ifdef CONFIG_S3C2412_IOTIMING |
246 | struct s3c_iotimings *timings); | 243 | extern void s3c2412_iotiming_debugfs(struct seq_file *seq, |
244 | struct s3c_cpufreq_config *cfg, | ||
245 | union s3c_iobank *iob); | ||
247 | 246 | ||
248 | extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, | 247 | extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, |
249 | struct s3c_iotimings *timings); | 248 | struct s3c_iotimings *timings); |
@@ -253,6 +252,12 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg, | |||
253 | 252 | ||
254 | extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg, | 253 | extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg, |
255 | struct s3c_iotimings *iot); | 254 | struct s3c_iotimings *iot); |
255 | #else | ||
256 | #define s3c2412_iotiming_debugfs NULL | ||
257 | #define s3c2412_iotiming_calc NULL | ||
258 | #define s3c2412_iotiming_get NULL | ||
259 | #define s3c2412_iotiming_set NULL | ||
260 | #endif /* CONFIG_S3C2412_IOTIMING */ | ||
256 | 261 | ||
257 | #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG | 262 | #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG |
258 | #define s3c_freq_dbg(x...) printk(KERN_INFO x) | 263 | #define s3c_freq_dbg(x...) printk(KERN_INFO x) |
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index ef5a2a08fcca..ea3395750324 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c | |||
@@ -34,10 +34,12 @@ void cpu_idle(void) | |||
34 | { | 34 | { |
35 | /* endless idle loop with no priority at all */ | 35 | /* endless idle loop with no priority at all */ |
36 | while (1) { | 36 | while (1) { |
37 | tick_nohz_stop_sched_tick(1); | 37 | tick_nohz_idle_enter(); |
38 | rcu_idle_enter(); | ||
38 | while (!need_resched()) | 39 | while (!need_resched()) |
39 | cpu_idle_sleep(); | 40 | cpu_idle_sleep(); |
40 | tick_nohz_restart_sched_tick(); | 41 | rcu_idle_exit(); |
42 | tick_nohz_idle_exit(); | ||
41 | preempt_enable_no_resched(); | 43 | preempt_enable_no_resched(); |
42 | schedule(); | 44 | schedule(); |
43 | preempt_disable(); | 45 | preempt_disable(); |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 6a80a9e9fc4a..8dd0416673cb 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -88,10 +88,12 @@ void cpu_idle(void) | |||
88 | #endif | 88 | #endif |
89 | if (!idle) | 89 | if (!idle) |
90 | idle = default_idle; | 90 | idle = default_idle; |
91 | tick_nohz_stop_sched_tick(1); | 91 | tick_nohz_idle_enter(); |
92 | rcu_idle_enter(); | ||
92 | while (!need_resched()) | 93 | while (!need_resched()) |
93 | idle(); | 94 | idle(); |
94 | tick_nohz_restart_sched_tick(); | 95 | rcu_idle_exit(); |
96 | tick_nohz_idle_exit(); | ||
95 | preempt_enable_no_resched(); | 97 | preempt_enable_no_resched(); |
96 | schedule(); | 98 | schedule(); |
97 | preempt_disable(); | 99 | preempt_disable(); |
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c index bb978ede8985..6773fc83a670 100644 --- a/arch/cris/arch-v32/kernel/time.c +++ b/arch/cris/arch-v32/kernel/time.c | |||
@@ -47,14 +47,12 @@ static struct clocksource cont_rotime = { | |||
47 | .rating = 300, | 47 | .rating = 300, |
48 | .read = read_cont_rotime, | 48 | .read = read_cont_rotime, |
49 | .mask = CLOCKSOURCE_MASK(32), | 49 | .mask = CLOCKSOURCE_MASK(32), |
50 | .shift = 10, | ||
51 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 50 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
52 | }; | 51 | }; |
53 | 52 | ||
54 | static int __init etrax_init_cont_rotime(void) | 53 | static int __init etrax_init_cont_rotime(void) |
55 | { | 54 | { |
56 | cont_rotime.mult = clocksource_khz2mult(100000, cont_rotime.shift); | 55 | clocksource_register_khz(&cont_rotime, 100000); |
57 | clocksource_register(&cont_rotime); | ||
58 | return 0; | 56 | return 0; |
59 | } | 57 | } |
60 | arch_initcall(etrax_init_cont_rotime); | 58 | arch_initcall(etrax_init_cont_rotime); |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 27489b6dd533..3b7a7c483785 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -23,6 +23,9 @@ config IA64 | |||
23 | select HAVE_ARCH_TRACEHOOK | 23 | select HAVE_ARCH_TRACEHOOK |
24 | select HAVE_DMA_API_DEBUG | 24 | select HAVE_DMA_API_DEBUG |
25 | select HAVE_GENERIC_HARDIRQS | 25 | select HAVE_GENERIC_HARDIRQS |
26 | select HAVE_MEMBLOCK | ||
27 | select HAVE_MEMBLOCK_NODE_MAP | ||
28 | select ARCH_DISCARD_MEMBLOCK | ||
26 | select GENERIC_IRQ_PROBE | 29 | select GENERIC_IRQ_PROBE |
27 | select GENERIC_PENDING_IRQ if SMP | 30 | select GENERIC_PENDING_IRQ if SMP |
28 | select IRQ_PER_CPU | 31 | select IRQ_PER_CPU |
@@ -474,9 +477,6 @@ config NODES_SHIFT | |||
474 | MAX_NUMNODES will be 2^(This value). | 477 | MAX_NUMNODES will be 2^(This value). |
475 | If in doubt, use the default. | 478 | If in doubt, use the default. |
476 | 479 | ||
477 | config ARCH_POPULATES_NODE_MAP | ||
478 | def_bool y | ||
479 | |||
480 | # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. | 480 | # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. |
481 | # VIRTUAL_MEM_MAP has been retained for historical reasons. | 481 | # VIRTUAL_MEM_MAP has been retained for historical reasons. |
482 | config VIRTUAL_MEM_MAP | 482 | config VIRTUAL_MEM_MAP |
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index 6073b187528a..3deac956d325 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h | |||
@@ -26,59 +26,53 @@ | |||
26 | #include <linux/jiffies.h> | 26 | #include <linux/jiffies.h> |
27 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
28 | 28 | ||
29 | typedef u64 cputime_t; | 29 | typedef u64 __nocast cputime_t; |
30 | typedef u64 cputime64_t; | 30 | typedef u64 __nocast cputime64_t; |
31 | 31 | ||
32 | #define cputime_zero ((cputime_t)0) | ||
33 | #define cputime_one_jiffy jiffies_to_cputime(1) | 32 | #define cputime_one_jiffy jiffies_to_cputime(1) |
34 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) | ||
35 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
36 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
37 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
38 | #define cputime_halve(__a) ((__a) >> 1) | ||
39 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
40 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
41 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
42 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
43 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
44 | |||
45 | #define cputime64_zero ((cputime64_t)0) | ||
46 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
47 | #define cputime64_sub(__a, __b) ((__a) - (__b)) | ||
48 | #define cputime_to_cputime64(__ct) (__ct) | ||
49 | 33 | ||
50 | /* | 34 | /* |
51 | * Convert cputime <-> jiffies (HZ) | 35 | * Convert cputime <-> jiffies (HZ) |
52 | */ | 36 | */ |
53 | #define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | 37 | #define cputime_to_jiffies(__ct) \ |
54 | #define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | 38 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) |
55 | #define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | 39 | #define jiffies_to_cputime(__jif) \ |
56 | #define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | 40 | (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) |
41 | #define cputime64_to_jiffies64(__ct) \ | ||
42 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) | ||
43 | #define jiffies64_to_cputime64(__jif) \ | ||
44 | (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) | ||
57 | 45 | ||
58 | /* | 46 | /* |
59 | * Convert cputime <-> microseconds | 47 | * Convert cputime <-> microseconds |
60 | */ | 48 | */ |
61 | #define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) | 49 | #define cputime_to_usecs(__ct) \ |
62 | #define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) | 50 | ((__force u64)(__ct) / NSEC_PER_USEC) |
51 | #define usecs_to_cputime(__usecs) \ | ||
52 | (__force cputime_t)((__usecs) * NSEC_PER_USEC) | ||
53 | #define usecs_to_cputime64(__usecs) \ | ||
54 | (__force cputime64_t)((__usecs) * NSEC_PER_USEC) | ||
63 | 55 | ||
64 | /* | 56 | /* |
65 | * Convert cputime <-> seconds | 57 | * Convert cputime <-> seconds |
66 | */ | 58 | */ |
67 | #define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) | 59 | #define cputime_to_secs(__ct) \ |
68 | #define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) | 60 | ((__force u64)(__ct) / NSEC_PER_SEC) |
61 | #define secs_to_cputime(__secs) \ | ||
62 | (__force cputime_t)((__secs) * NSEC_PER_SEC) | ||
69 | 63 | ||
70 | /* | 64 | /* |
71 | * Convert cputime <-> timespec (nsec) | 65 | * Convert cputime <-> timespec (nsec) |
72 | */ | 66 | */ |
73 | static inline cputime_t timespec_to_cputime(const struct timespec *val) | 67 | static inline cputime_t timespec_to_cputime(const struct timespec *val) |
74 | { | 68 | { |
75 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | 69 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; |
76 | return (ret + val->tv_nsec); | 70 | return (__force cputime_t) ret; |
77 | } | 71 | } |
78 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | 72 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) |
79 | { | 73 | { |
80 | val->tv_sec = ct / NSEC_PER_SEC; | 74 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; |
81 | val->tv_nsec = ct % NSEC_PER_SEC; | 75 | val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; |
82 | } | 76 | } |
83 | 77 | ||
84 | /* | 78 | /* |
@@ -86,25 +80,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | |||
86 | */ | 80 | */ |
87 | static inline cputime_t timeval_to_cputime(struct timeval *val) | 81 | static inline cputime_t timeval_to_cputime(struct timeval *val) |
88 | { | 82 | { |
89 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | 83 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; |
90 | return (ret + val->tv_usec * NSEC_PER_USEC); | 84 | return (__force cputime_t) ret; |
91 | } | 85 | } |
92 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) | 86 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) |
93 | { | 87 | { |
94 | val->tv_sec = ct / NSEC_PER_SEC; | 88 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; |
95 | val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; | 89 | val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; |
96 | } | 90 | } |
97 | 91 | ||
98 | /* | 92 | /* |
99 | * Convert cputime <-> clock (USER_HZ) | 93 | * Convert cputime <-> clock (USER_HZ) |
100 | */ | 94 | */ |
101 | #define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) | 95 | #define cputime_to_clock_t(__ct) \ |
102 | #define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) | 96 | ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) |
97 | #define clock_t_to_cputime(__x) \ | ||
98 | (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) | ||
103 | 99 | ||
104 | /* | 100 | /* |
105 | * Convert cputime64 to clock. | 101 | * Convert cputime64 to clock. |
106 | */ | 102 | */ |
107 | #define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) | 103 | #define cputime64_to_clock_t(__ct) \ |
104 | cputime_to_clock_t((__force cputime_t)__ct) | ||
108 | 105 | ||
109 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 106 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
110 | #endif /* __IA64_CPUTIME_H */ | 107 | #endif /* __IA64_CPUTIME_H */ |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index f114a3b14c6a..1516d1dc11fd 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -16,6 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | #include <linux/bootmem.h> | 17 | #include <linux/bootmem.h> |
18 | #include <linux/efi.h> | 18 | #include <linux/efi.h> |
19 | #include <linux/memblock.h> | ||
19 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
20 | #include <linux/nmi.h> | 21 | #include <linux/nmi.h> |
21 | #include <linux/swap.h> | 22 | #include <linux/swap.h> |
@@ -348,7 +349,7 @@ paging_init (void) | |||
348 | printk("Virtual mem_map starts at 0x%p\n", mem_map); | 349 | printk("Virtual mem_map starts at 0x%p\n", mem_map); |
349 | } | 350 | } |
350 | #else /* !CONFIG_VIRTUAL_MEM_MAP */ | 351 | #else /* !CONFIG_VIRTUAL_MEM_MAP */ |
351 | add_active_range(0, 0, max_low_pfn); | 352 | memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); |
352 | free_area_init_nodes(max_zone_pfns); | 353 | free_area_init_nodes(max_zone_pfns); |
353 | #endif /* !CONFIG_VIRTUAL_MEM_MAP */ | 354 | #endif /* !CONFIG_VIRTUAL_MEM_MAP */ |
354 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 355 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 00cb0e26c64e..13df239dbed1 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/bootmem.h> | 10 | #include <linux/bootmem.h> |
11 | #include <linux/efi.h> | 11 | #include <linux/efi.h> |
12 | #include <linux/elf.h> | 12 | #include <linux/elf.h> |
13 | #include <linux/memblock.h> | ||
13 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
14 | #include <linux/mmzone.h> | 15 | #include <linux/mmzone.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
@@ -557,8 +558,7 @@ int __init register_active_ranges(u64 start, u64 len, int nid) | |||
557 | #endif | 558 | #endif |
558 | 559 | ||
559 | if (start < end) | 560 | if (start < end) |
560 | add_active_range(nid, __pa(start) >> PAGE_SHIFT, | 561 | memblock_add_node(__pa(start), end - start, nid); |
561 | __pa(end) >> PAGE_SHIFT); | ||
562 | return 0; | 562 | return 0; |
563 | } | 563 | } |
564 | 564 | ||
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 43f984e93970..303192fc9260 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -350,10 +350,12 @@ | |||
350 | #define __NR_clock_adjtime 342 | 350 | #define __NR_clock_adjtime 342 |
351 | #define __NR_syncfs 343 | 351 | #define __NR_syncfs 343 |
352 | #define __NR_setns 344 | 352 | #define __NR_setns 344 |
353 | #define __NR_process_vm_readv 345 | ||
354 | #define __NR_process_vm_writev 346 | ||
353 | 355 | ||
354 | #ifdef __KERNEL__ | 356 | #ifdef __KERNEL__ |
355 | 357 | ||
356 | #define NR_syscalls 345 | 358 | #define NR_syscalls 347 |
357 | 359 | ||
358 | #define __ARCH_WANT_IPC_PARSE_VERSION | 360 | #define __ARCH_WANT_IPC_PARSE_VERSION |
359 | #define __ARCH_WANT_OLD_READDIR | 361 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index c468f2edaa85..ce827b376110 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -365,4 +365,6 @@ ENTRY(sys_call_table) | |||
365 | .long sys_clock_adjtime | 365 | .long sys_clock_adjtime |
366 | .long sys_syncfs | 366 | .long sys_syncfs |
367 | .long sys_setns | 367 | .long sys_setns |
368 | .long sys_process_vm_readv /* 345 */ | ||
369 | .long sys_process_vm_writev | ||
368 | 370 | ||
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c index 309f725995bf..f2678866067b 100644 --- a/arch/m68k/platform/68328/timers.c +++ b/arch/m68k/platform/68328/timers.c | |||
@@ -93,7 +93,6 @@ static struct clocksource m68328_clk = { | |||
93 | .name = "timer", | 93 | .name = "timer", |
94 | .rating = 250, | 94 | .rating = 250, |
95 | .read = m68328_read_clk, | 95 | .read = m68328_read_clk, |
96 | .shift = 20, | ||
97 | .mask = CLOCKSOURCE_MASK(32), | 96 | .mask = CLOCKSOURCE_MASK(32), |
98 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 97 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
99 | }; | 98 | }; |
@@ -115,8 +114,7 @@ void hw_timer_init(void) | |||
115 | 114 | ||
116 | /* Enable timer 1 */ | 115 | /* Enable timer 1 */ |
117 | TCTL |= TCTL_TEN; | 116 | TCTL |= TCTL_TEN; |
118 | m68328_clk.mult = clocksource_hz2mult(TICKS_PER_JIFFY*HZ, m68328_clk.shift); | 117 | clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); |
119 | clocksource_register(&m68328_clk); | ||
120 | } | 118 | } |
121 | 119 | ||
122 | /***************************************************************************/ | 120 | /***************************************************************************/ |
diff --git a/arch/m68k/platform/coldfire/dma_timer.c b/arch/m68k/platform/coldfire/dma_timer.c index a5f562823d7a..235ad57c4707 100644 --- a/arch/m68k/platform/coldfire/dma_timer.c +++ b/arch/m68k/platform/coldfire/dma_timer.c | |||
@@ -44,7 +44,6 @@ static struct clocksource clocksource_cf_dt = { | |||
44 | .rating = 200, | 44 | .rating = 200, |
45 | .read = cf_dt_get_cycles, | 45 | .read = cf_dt_get_cycles, |
46 | .mask = CLOCKSOURCE_MASK(32), | 46 | .mask = CLOCKSOURCE_MASK(32), |
47 | .shift = 20, | ||
48 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 47 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
49 | }; | 48 | }; |
50 | 49 | ||
@@ -60,9 +59,7 @@ static int __init init_cf_dt_clocksource(void) | |||
60 | __raw_writeb(0x00, DTER0); | 59 | __raw_writeb(0x00, DTER0); |
61 | __raw_writel(0x00000000, DTRR0); | 60 | __raw_writel(0x00000000, DTRR0); |
62 | __raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0); | 61 | __raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0); |
63 | clocksource_cf_dt.mult = clocksource_hz2mult(DMA_FREQ, | 62 | return clocksource_register_hz(&clocksource_cf_dt, DMA_FREQ); |
64 | clocksource_cf_dt.shift); | ||
65 | return clocksource_register(&clocksource_cf_dt); | ||
66 | } | 63 | } |
67 | 64 | ||
68 | arch_initcall(init_cf_dt_clocksource); | 65 | arch_initcall(init_cf_dt_clocksource); |
diff --git a/arch/m68k/platform/coldfire/pit.c b/arch/m68k/platform/coldfire/pit.c index c2b980926bec..02663d25822d 100644 --- a/arch/m68k/platform/coldfire/pit.c +++ b/arch/m68k/platform/coldfire/pit.c | |||
@@ -144,7 +144,6 @@ static struct clocksource pit_clk = { | |||
144 | .name = "pit", | 144 | .name = "pit", |
145 | .rating = 100, | 145 | .rating = 100, |
146 | .read = pit_read_clk, | 146 | .read = pit_read_clk, |
147 | .shift = 20, | ||
148 | .mask = CLOCKSOURCE_MASK(32), | 147 | .mask = CLOCKSOURCE_MASK(32), |
149 | }; | 148 | }; |
150 | 149 | ||
@@ -162,8 +161,7 @@ void hw_timer_init(void) | |||
162 | 161 | ||
163 | setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq); | 162 | setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq); |
164 | 163 | ||
165 | pit_clk.mult = clocksource_hz2mult(FREQ, pit_clk.shift); | 164 | clocksource_register_hz(&pit_clk, FREQ); |
166 | clocksource_register(&pit_clk); | ||
167 | } | 165 | } |
168 | 166 | ||
169 | /***************************************************************************/ | 167 | /***************************************************************************/ |
diff --git a/arch/m68k/platform/coldfire/sltimers.c b/arch/m68k/platform/coldfire/sltimers.c index 6a85daf9a7fd..b7f822b552bb 100644 --- a/arch/m68k/platform/coldfire/sltimers.c +++ b/arch/m68k/platform/coldfire/sltimers.c | |||
@@ -114,7 +114,6 @@ static struct clocksource mcfslt_clk = { | |||
114 | .name = "slt", | 114 | .name = "slt", |
115 | .rating = 250, | 115 | .rating = 250, |
116 | .read = mcfslt_read_clk, | 116 | .read = mcfslt_read_clk, |
117 | .shift = 20, | ||
118 | .mask = CLOCKSOURCE_MASK(32), | 117 | .mask = CLOCKSOURCE_MASK(32), |
119 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 118 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
120 | }; | 119 | }; |
@@ -136,8 +135,7 @@ void hw_timer_init(void) | |||
136 | 135 | ||
137 | setup_irq(MCF_IRQ_TIMER, &mcfslt_timer_irq); | 136 | setup_irq(MCF_IRQ_TIMER, &mcfslt_timer_irq); |
138 | 137 | ||
139 | mcfslt_clk.mult = clocksource_hz2mult(MCF_BUSCLK, mcfslt_clk.shift); | 138 | clocksource_register_hz(&mcfslt_clk, MCF_BUSCLK); |
140 | clocksource_register(&mcfslt_clk); | ||
141 | 139 | ||
142 | #ifdef CONFIG_HIGHPROFILE | 140 | #ifdef CONFIG_HIGHPROFILE |
143 | mcfslt_profile_init(); | 141 | mcfslt_profile_init(); |
diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c index 60242f65fea9..0d90da32fcdb 100644 --- a/arch/m68k/platform/coldfire/timers.c +++ b/arch/m68k/platform/coldfire/timers.c | |||
@@ -88,7 +88,6 @@ static struct clocksource mcftmr_clk = { | |||
88 | .name = "tmr", | 88 | .name = "tmr", |
89 | .rating = 250, | 89 | .rating = 250, |
90 | .read = mcftmr_read_clk, | 90 | .read = mcftmr_read_clk, |
91 | .shift = 20, | ||
92 | .mask = CLOCKSOURCE_MASK(32), | 91 | .mask = CLOCKSOURCE_MASK(32), |
93 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 92 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
94 | }; | 93 | }; |
@@ -109,8 +108,7 @@ void hw_timer_init(void) | |||
109 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | | 108 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | |
110 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); | 109 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); |
111 | 110 | ||
112 | mcftmr_clk.mult = clocksource_hz2mult(FREQ, mcftmr_clk.shift); | 111 | clocksource_register_hz(&mcftmr_clk, FREQ); |
113 | clocksource_register(&mcftmr_clk); | ||
114 | 112 | ||
115 | setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq); | 113 | setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq); |
116 | 114 | ||
diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h deleted file mode 100644 index 20a8e257c77f..000000000000 --- a/arch/microblaze/include/asm/memblock.h +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Michal Simek <monstr@monstr.eu> | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_MICROBLAZE_MEMBLOCK_H | ||
10 | #define _ASM_MICROBLAZE_MEMBLOCK_H | ||
11 | |||
12 | #endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ | ||
13 | |||
14 | |||
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 95cc295976a7..7dcb5bfffb75 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
@@ -103,10 +103,12 @@ void cpu_idle(void) | |||
103 | if (!idle) | 103 | if (!idle) |
104 | idle = default_idle; | 104 | idle = default_idle; |
105 | 105 | ||
106 | tick_nohz_stop_sched_tick(1); | 106 | tick_nohz_idle_enter(); |
107 | rcu_idle_enter(); | ||
107 | while (!need_resched()) | 108 | while (!need_resched()) |
108 | idle(); | 109 | idle(); |
109 | tick_nohz_restart_sched_tick(); | 110 | rcu_idle_exit(); |
111 | tick_nohz_idle_exit(); | ||
110 | 112 | ||
111 | preempt_enable_no_resched(); | 113 | preempt_enable_no_resched(); |
112 | schedule(); | 114 | schedule(); |
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index 977484add216..80d314e81901 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c | |||
@@ -122,7 +122,6 @@ void __init early_init_devtree(void *params) | |||
122 | of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); | 122 | of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); |
123 | 123 | ||
124 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 124 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
125 | memblock_init(); | ||
126 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 125 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
127 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); | 126 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); |
128 | 127 | ||
@@ -130,7 +129,7 @@ void __init early_init_devtree(void *params) | |||
130 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | 129 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); |
131 | parse_early_param(); | 130 | parse_early_param(); |
132 | 131 | ||
133 | memblock_analyze(); | 132 | memblock_allow_resize(); |
134 | 133 | ||
135 | pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); | 134 | pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); |
136 | 135 | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d46f1da18a3c..9c652eb68aaa 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -25,6 +25,9 @@ config MIPS | |||
25 | select GENERIC_IRQ_SHOW | 25 | select GENERIC_IRQ_SHOW |
26 | select HAVE_ARCH_JUMP_LABEL | 26 | select HAVE_ARCH_JUMP_LABEL |
27 | select IRQ_FORCED_THREADING | 27 | select IRQ_FORCED_THREADING |
28 | select HAVE_MEMBLOCK | ||
29 | select HAVE_MEMBLOCK_NODE_MAP | ||
30 | select ARCH_DISCARD_MEMBLOCK | ||
28 | 31 | ||
29 | menu "Machine selection" | 32 | menu "Machine selection" |
30 | 33 | ||
@@ -2064,9 +2067,6 @@ config ARCH_DISCONTIGMEM_ENABLE | |||
2064 | or have huge holes in the physical address space for other reasons. | 2067 | or have huge holes in the physical address space for other reasons. |
2065 | See <file:Documentation/vm/numa> for more. | 2068 | See <file:Documentation/vm/numa> for more. |
2066 | 2069 | ||
2067 | config ARCH_POPULATES_NODE_MAP | ||
2068 | def_bool y | ||
2069 | |||
2070 | config ARCH_SPARSEMEM_ENABLE | 2070 | config ARCH_SPARSEMEM_ENABLE |
2071 | bool | 2071 | bool |
2072 | select SPARSEMEM_STATIC | 2072 | select SPARSEMEM_STATIC |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 4f2971bcf8e5..315fc0b250f8 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event) | |||
623 | if (!atomic_inc_not_zero(&active_events)) { | 623 | if (!atomic_inc_not_zero(&active_events)) { |
624 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | 624 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { |
625 | atomic_dec(&active_events); | 625 | atomic_dec(&active_events); |
626 | return -ENOSPC; | 626 | return -EINVAL; |
627 | } | 627 | } |
628 | 628 | ||
629 | mutex_lock(&pmu_reserve_mutex); | 629 | mutex_lock(&pmu_reserve_mutex); |
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event) | |||
732 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | 732 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); |
733 | 733 | ||
734 | if (!validate_event(&fake_cpuc, leader)) | 734 | if (!validate_event(&fake_cpuc, leader)) |
735 | return -ENOSPC; | 735 | return -EINVAL; |
736 | 736 | ||
737 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | 737 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
738 | if (!validate_event(&fake_cpuc, sibling)) | 738 | if (!validate_event(&fake_cpuc, sibling)) |
739 | return -ENOSPC; | 739 | return -EINVAL; |
740 | } | 740 | } |
741 | 741 | ||
742 | if (!validate_event(&fake_cpuc, event)) | 742 | if (!validate_event(&fake_cpuc, event)) |
743 | return -ENOSPC; | 743 | return -EINVAL; |
744 | 744 | ||
745 | return 0; | 745 | return 0; |
746 | } | 746 | } |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c47f96e453c0..7955409051c4 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -56,7 +56,8 @@ void __noreturn cpu_idle(void) | |||
56 | 56 | ||
57 | /* endless idle loop with no priority at all */ | 57 | /* endless idle loop with no priority at all */ |
58 | while (1) { | 58 | while (1) { |
59 | tick_nohz_stop_sched_tick(1); | 59 | tick_nohz_idle_enter(); |
60 | rcu_idle_enter(); | ||
60 | while (!need_resched() && cpu_online(cpu)) { | 61 | while (!need_resched() && cpu_online(cpu)) { |
61 | #ifdef CONFIG_MIPS_MT_SMTC | 62 | #ifdef CONFIG_MIPS_MT_SMTC |
62 | extern void smtc_idle_loop_hook(void); | 63 | extern void smtc_idle_loop_hook(void); |
@@ -77,7 +78,8 @@ void __noreturn cpu_idle(void) | |||
77 | system_state == SYSTEM_BOOTING)) | 78 | system_state == SYSTEM_BOOTING)) |
78 | play_dead(); | 79 | play_dead(); |
79 | #endif | 80 | #endif |
80 | tick_nohz_restart_sched_tick(); | 81 | rcu_idle_exit(); |
82 | tick_nohz_idle_exit(); | ||
81 | preempt_enable_no_resched(); | 83 | preempt_enable_no_resched(); |
82 | schedule(); | 84 | schedule(); |
83 | preempt_disable(); | 85 | preempt_disable(); |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 84af26ab2212..b1cb8f87d7b4 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
16 | #include <linux/screen_info.h> | 16 | #include <linux/screen_info.h> |
17 | #include <linux/memblock.h> | ||
17 | #include <linux/bootmem.h> | 18 | #include <linux/bootmem.h> |
18 | #include <linux/initrd.h> | 19 | #include <linux/initrd.h> |
19 | #include <linux/root_dev.h> | 20 | #include <linux/root_dev.h> |
@@ -352,7 +353,7 @@ static void __init bootmem_init(void) | |||
352 | continue; | 353 | continue; |
353 | #endif | 354 | #endif |
354 | 355 | ||
355 | add_active_range(0, start, end); | 356 | memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); |
356 | } | 357 | } |
357 | 358 | ||
358 | /* | 359 | /* |
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index bc1297109cc5..b105eca3c020 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/memblock.h> | ||
15 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
16 | #include <linux/mmzone.h> | 17 | #include <linux/mmzone.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
@@ -381,8 +382,8 @@ static void __init szmem(void) | |||
381 | continue; | 382 | continue; |
382 | } | 383 | } |
383 | num_physpages += slot_psize; | 384 | num_physpages += slot_psize; |
384 | add_active_range(node, slot_getbasepfn(node, slot), | 385 | memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), |
385 | slot_getbasepfn(node, slot) + slot_psize); | 386 | PFN_PHYS(slot_psize), node); |
386 | } | 387 | } |
387 | } | 388 | } |
388 | } | 389 | } |
diff --git a/arch/openrisc/include/asm/memblock.h b/arch/openrisc/include/asm/memblock.h deleted file mode 100644 index bbe5a1c788cb..000000000000 --- a/arch/openrisc/include/asm/memblock.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /* | ||
2 | * OpenRISC Linux | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * OpenRISC implementation: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
11 | * et al. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ASM_OPENRISC_MEMBLOCK_H | ||
20 | #define __ASM_OPENRISC_MEMBLOCK_H | ||
21 | |||
22 | /* empty */ | ||
23 | |||
24 | #endif /* __ASM_OPENRISC_MEMBLOCK_H */ | ||
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c index d5bc5f813e89..e5fc78877830 100644 --- a/arch/openrisc/kernel/idle.c +++ b/arch/openrisc/kernel/idle.c | |||
@@ -51,7 +51,8 @@ void cpu_idle(void) | |||
51 | 51 | ||
52 | /* endless idle loop with no priority at all */ | 52 | /* endless idle loop with no priority at all */ |
53 | while (1) { | 53 | while (1) { |
54 | tick_nohz_stop_sched_tick(1); | 54 | tick_nohz_idle_enter(); |
55 | rcu_idle_enter(); | ||
55 | 56 | ||
56 | while (!need_resched()) { | 57 | while (!need_resched()) { |
57 | check_pgt_cache(); | 58 | check_pgt_cache(); |
@@ -69,7 +70,8 @@ void cpu_idle(void) | |||
69 | set_thread_flag(TIF_POLLING_NRFLAG); | 70 | set_thread_flag(TIF_POLLING_NRFLAG); |
70 | } | 71 | } |
71 | 72 | ||
72 | tick_nohz_restart_sched_tick(); | 73 | rcu_idle_exit(); |
74 | tick_nohz_idle_exit(); | ||
73 | preempt_enable_no_resched(); | 75 | preempt_enable_no_resched(); |
74 | schedule(); | 76 | schedule(); |
75 | preempt_disable(); | 77 | preempt_disable(); |
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c index 1bb58ba89afa..3d4478f6c942 100644 --- a/arch/openrisc/kernel/prom.c +++ b/arch/openrisc/kernel/prom.c | |||
@@ -76,14 +76,13 @@ void __init early_init_devtree(void *params) | |||
76 | of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); | 76 | of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); |
77 | 77 | ||
78 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 78 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
79 | memblock_init(); | ||
80 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 79 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
81 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); | 80 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); |
82 | 81 | ||
83 | /* Save command line for /proc/cmdline and then parse parameters */ | 82 | /* Save command line for /proc/cmdline and then parse parameters */ |
84 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | 83 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); |
85 | 84 | ||
86 | memblock_analyze(); | 85 | memblock_allow_resize(); |
87 | 86 | ||
88 | /* We must copy the flattend device tree from init memory to regular | 87 | /* We must copy the flattend device tree from init memory to regular |
89 | * memory because the device tree references the strings in it | 88 | * memory because the device tree references the strings in it |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 45b7389d77aa..7c0774397b89 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -198,8 +198,6 @@ static struct clocksource clocksource_cr16 = { | |||
198 | .rating = 300, | 198 | .rating = 300, |
199 | .read = read_cr16, | 199 | .read = read_cr16, |
200 | .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), | 200 | .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), |
201 | .mult = 0, /* to be set */ | ||
202 | .shift = 22, | ||
203 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 201 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
204 | }; | 202 | }; |
205 | 203 | ||
@@ -270,7 +268,5 @@ void __init time_init(void) | |||
270 | 268 | ||
271 | /* register at clocksource framework */ | 269 | /* register at clocksource framework */ |
272 | current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ | 270 | current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ |
273 | clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz, | 271 | clocksource_register_khz(&clocksource_cr16, current_cr16_khz); |
274 | clocksource_cr16.shift); | ||
275 | clocksource_register(&clocksource_cr16); | ||
276 | } | 272 | } |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 951e18f5335b..ead0bc68439d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -117,6 +117,7 @@ config PPC | |||
117 | select HAVE_KRETPROBES | 117 | select HAVE_KRETPROBES |
118 | select HAVE_ARCH_TRACEHOOK | 118 | select HAVE_ARCH_TRACEHOOK |
119 | select HAVE_MEMBLOCK | 119 | select HAVE_MEMBLOCK |
120 | select HAVE_MEMBLOCK_NODE_MAP | ||
120 | select HAVE_DMA_ATTRS | 121 | select HAVE_DMA_ATTRS |
121 | select HAVE_DMA_API_DEBUG | 122 | select HAVE_DMA_API_DEBUG |
122 | select USE_GENERIC_SMP_HELPERS if SMP | 123 | select USE_GENERIC_SMP_HELPERS if SMP |
@@ -421,9 +422,6 @@ config ARCH_SPARSEMEM_DEFAULT | |||
421 | def_bool y | 422 | def_bool y |
422 | depends on (SMP && PPC_PSERIES) || PPC_PS3 | 423 | depends on (SMP && PPC_PSERIES) || PPC_PS3 |
423 | 424 | ||
424 | config ARCH_POPULATES_NODE_MAP | ||
425 | def_bool y | ||
426 | |||
427 | config SYS_SUPPORTS_HUGETLBFS | 425 | config SYS_SUPPORTS_HUGETLBFS |
428 | bool | 426 | bool |
429 | 427 | ||
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 1cf20bdfbeca..6ec1c380a4d6 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h | |||
@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { } | |||
29 | #include <asm/time.h> | 29 | #include <asm/time.h> |
30 | #include <asm/param.h> | 30 | #include <asm/param.h> |
31 | 31 | ||
32 | typedef u64 cputime_t; | 32 | typedef u64 __nocast cputime_t; |
33 | typedef u64 cputime64_t; | 33 | typedef u64 __nocast cputime64_t; |
34 | |||
35 | #define cputime_zero ((cputime_t)0) | ||
36 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) | ||
37 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
38 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
39 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
40 | #define cputime_halve(__a) ((__a) >> 1) | ||
41 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
42 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
43 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
44 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
45 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
46 | |||
47 | #define cputime64_zero ((cputime64_t)0) | ||
48 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
49 | #define cputime64_sub(__a, __b) ((__a) - (__b)) | ||
50 | #define cputime_to_cputime64(__ct) (__ct) | ||
51 | 34 | ||
52 | #ifdef __KERNEL__ | 35 | #ifdef __KERNEL__ |
53 | 36 | ||
@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta); | |||
65 | 48 | ||
66 | static inline unsigned long cputime_to_jiffies(const cputime_t ct) | 49 | static inline unsigned long cputime_to_jiffies(const cputime_t ct) |
67 | { | 50 | { |
68 | return mulhdu(ct, __cputime_jiffies_factor); | 51 | return mulhdu((__force u64) ct, __cputime_jiffies_factor); |
69 | } | 52 | } |
70 | 53 | ||
71 | /* Estimate the scaled cputime by scaling the real cputime based on | 54 | /* Estimate the scaled cputime by scaling the real cputime based on |
@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct) | |||
74 | { | 57 | { |
75 | if (cpu_has_feature(CPU_FTR_SPURR) && | 58 | if (cpu_has_feature(CPU_FTR_SPURR) && |
76 | __get_cpu_var(cputime_last_delta)) | 59 | __get_cpu_var(cputime_last_delta)) |
77 | return ct * __get_cpu_var(cputime_scaled_last_delta) / | 60 | return (__force u64) ct * |
78 | __get_cpu_var(cputime_last_delta); | 61 | __get_cpu_var(cputime_scaled_last_delta) / |
62 | __get_cpu_var(cputime_last_delta); | ||
79 | return ct; | 63 | return ct; |
80 | } | 64 | } |
81 | 65 | ||
82 | static inline cputime_t jiffies_to_cputime(const unsigned long jif) | 66 | static inline cputime_t jiffies_to_cputime(const unsigned long jif) |
83 | { | 67 | { |
84 | cputime_t ct; | 68 | u64 ct; |
85 | unsigned long sec; | 69 | unsigned long sec; |
86 | 70 | ||
87 | /* have to be a little careful about overflow */ | 71 | /* have to be a little careful about overflow */ |
@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) | |||
93 | } | 77 | } |
94 | if (sec) | 78 | if (sec) |
95 | ct += (cputime_t) sec * tb_ticks_per_sec; | 79 | ct += (cputime_t) sec * tb_ticks_per_sec; |
96 | return ct; | 80 | return (__force cputime_t) ct; |
97 | } | 81 | } |
98 | 82 | ||
99 | static inline void setup_cputime_one_jiffy(void) | 83 | static inline void setup_cputime_one_jiffy(void) |
@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void) | |||
103 | 87 | ||
104 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) | 88 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) |
105 | { | 89 | { |
106 | cputime_t ct; | 90 | u64 ct; |
107 | u64 sec; | 91 | u64 sec; |
108 | 92 | ||
109 | /* have to be a little careful about overflow */ | 93 | /* have to be a little careful about overflow */ |
@@ -114,13 +98,13 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif) | |||
114 | do_div(ct, HZ); | 98 | do_div(ct, HZ); |
115 | } | 99 | } |
116 | if (sec) | 100 | if (sec) |
117 | ct += (cputime_t) sec * tb_ticks_per_sec; | 101 | ct += (u64) sec * tb_ticks_per_sec; |
118 | return ct; | 102 | return (__force cputime64_t) ct; |
119 | } | 103 | } |
120 | 104 | ||
121 | static inline u64 cputime64_to_jiffies64(const cputime_t ct) | 105 | static inline u64 cputime64_to_jiffies64(const cputime_t ct) |
122 | { | 106 | { |
123 | return mulhdu(ct, __cputime_jiffies_factor); | 107 | return mulhdu((__force u64) ct, __cputime_jiffies_factor); |
124 | } | 108 | } |
125 | 109 | ||
126 | /* | 110 | /* |
@@ -130,12 +114,12 @@ extern u64 __cputime_msec_factor; | |||
130 | 114 | ||
131 | static inline unsigned long cputime_to_usecs(const cputime_t ct) | 115 | static inline unsigned long cputime_to_usecs(const cputime_t ct) |
132 | { | 116 | { |
133 | return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; | 117 | return mulhdu((__force u64) ct, __cputime_msec_factor) * USEC_PER_MSEC; |
134 | } | 118 | } |
135 | 119 | ||
136 | static inline cputime_t usecs_to_cputime(const unsigned long us) | 120 | static inline cputime_t usecs_to_cputime(const unsigned long us) |
137 | { | 121 | { |
138 | cputime_t ct; | 122 | u64 ct; |
139 | unsigned long sec; | 123 | unsigned long sec; |
140 | 124 | ||
141 | /* have to be a little careful about overflow */ | 125 | /* have to be a little careful about overflow */ |
@@ -147,9 +131,11 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) | |||
147 | } | 131 | } |
148 | if (sec) | 132 | if (sec) |
149 | ct += (cputime_t) sec * tb_ticks_per_sec; | 133 | ct += (cputime_t) sec * tb_ticks_per_sec; |
150 | return ct; | 134 | return (__force cputime_t) ct; |
151 | } | 135 | } |
152 | 136 | ||
137 | #define usecs_to_cputime64(us) usecs_to_cputime(us) | ||
138 | |||
153 | /* | 139 | /* |
154 | * Convert cputime <-> seconds | 140 | * Convert cputime <-> seconds |
155 | */ | 141 | */ |
@@ -157,12 +143,12 @@ extern u64 __cputime_sec_factor; | |||
157 | 143 | ||
158 | static inline unsigned long cputime_to_secs(const cputime_t ct) | 144 | static inline unsigned long cputime_to_secs(const cputime_t ct) |
159 | { | 145 | { |
160 | return mulhdu(ct, __cputime_sec_factor); | 146 | return mulhdu((__force u64) ct, __cputime_sec_factor); |
161 | } | 147 | } |
162 | 148 | ||
163 | static inline cputime_t secs_to_cputime(const unsigned long sec) | 149 | static inline cputime_t secs_to_cputime(const unsigned long sec) |
164 | { | 150 | { |
165 | return (cputime_t) sec * tb_ticks_per_sec; | 151 | return (__force cputime_t)((u64) sec * tb_ticks_per_sec); |
166 | } | 152 | } |
167 | 153 | ||
168 | /* | 154 | /* |
@@ -170,7 +156,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec) | |||
170 | */ | 156 | */ |
171 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) | 157 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) |
172 | { | 158 | { |
173 | u64 x = ct; | 159 | u64 x = (__force u64) ct; |
174 | unsigned int frac; | 160 | unsigned int frac; |
175 | 161 | ||
176 | frac = do_div(x, tb_ticks_per_sec); | 162 | frac = do_div(x, tb_ticks_per_sec); |
@@ -182,11 +168,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) | |||
182 | 168 | ||
183 | static inline cputime_t timespec_to_cputime(const struct timespec *p) | 169 | static inline cputime_t timespec_to_cputime(const struct timespec *p) |
184 | { | 170 | { |
185 | cputime_t ct; | 171 | u64 ct; |
186 | 172 | ||
187 | ct = (u64) p->tv_nsec * tb_ticks_per_sec; | 173 | ct = (u64) p->tv_nsec * tb_ticks_per_sec; |
188 | do_div(ct, 1000000000); | 174 | do_div(ct, 1000000000); |
189 | return ct + (u64) p->tv_sec * tb_ticks_per_sec; | 175 | return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); |
190 | } | 176 | } |
191 | 177 | ||
192 | /* | 178 | /* |
@@ -194,7 +180,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p) | |||
194 | */ | 180 | */ |
195 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) | 181 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) |
196 | { | 182 | { |
197 | u64 x = ct; | 183 | u64 x = (__force u64) ct; |
198 | unsigned int frac; | 184 | unsigned int frac; |
199 | 185 | ||
200 | frac = do_div(x, tb_ticks_per_sec); | 186 | frac = do_div(x, tb_ticks_per_sec); |
@@ -206,11 +192,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) | |||
206 | 192 | ||
207 | static inline cputime_t timeval_to_cputime(const struct timeval *p) | 193 | static inline cputime_t timeval_to_cputime(const struct timeval *p) |
208 | { | 194 | { |
209 | cputime_t ct; | 195 | u64 ct; |
210 | 196 | ||
211 | ct = (u64) p->tv_usec * tb_ticks_per_sec; | 197 | ct = (u64) p->tv_usec * tb_ticks_per_sec; |
212 | do_div(ct, 1000000); | 198 | do_div(ct, 1000000); |
213 | return ct + (u64) p->tv_sec * tb_ticks_per_sec; | 199 | return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); |
214 | } | 200 | } |
215 | 201 | ||
216 | /* | 202 | /* |
@@ -220,12 +206,12 @@ extern u64 __cputime_clockt_factor; | |||
220 | 206 | ||
221 | static inline unsigned long cputime_to_clock_t(const cputime_t ct) | 207 | static inline unsigned long cputime_to_clock_t(const cputime_t ct) |
222 | { | 208 | { |
223 | return mulhdu(ct, __cputime_clockt_factor); | 209 | return mulhdu((__force u64) ct, __cputime_clockt_factor); |
224 | } | 210 | } |
225 | 211 | ||
226 | static inline cputime_t clock_t_to_cputime(const unsigned long clk) | 212 | static inline cputime_t clock_t_to_cputime(const unsigned long clk) |
227 | { | 213 | { |
228 | cputime_t ct; | 214 | u64 ct; |
229 | unsigned long sec; | 215 | unsigned long sec; |
230 | 216 | ||
231 | /* have to be a little careful about overflow */ | 217 | /* have to be a little careful about overflow */ |
@@ -236,8 +222,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) | |||
236 | do_div(ct, USER_HZ); | 222 | do_div(ct, USER_HZ); |
237 | } | 223 | } |
238 | if (sec) | 224 | if (sec) |
239 | ct += (cputime_t) sec * tb_ticks_per_sec; | 225 | ct += (u64) sec * tb_ticks_per_sec; |
240 | return ct; | 226 | return (__force cputime_t) ct; |
241 | } | 227 | } |
242 | 228 | ||
243 | #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) | 229 | #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index d4df013ad779..69c7377d2071 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -381,39 +381,6 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
381 | } | 381 | } |
382 | #endif | 382 | #endif |
383 | 383 | ||
384 | static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, | ||
385 | unsigned long pte_index) | ||
386 | { | ||
387 | unsigned long rb, va_low; | ||
388 | |||
389 | rb = (v & ~0x7fUL) << 16; /* AVA field */ | ||
390 | va_low = pte_index >> 3; | ||
391 | if (v & HPTE_V_SECONDARY) | ||
392 | va_low = ~va_low; | ||
393 | /* xor vsid from AVA */ | ||
394 | if (!(v & HPTE_V_1TB_SEG)) | ||
395 | va_low ^= v >> 12; | ||
396 | else | ||
397 | va_low ^= v >> 24; | ||
398 | va_low &= 0x7ff; | ||
399 | if (v & HPTE_V_LARGE) { | ||
400 | rb |= 1; /* L field */ | ||
401 | if (cpu_has_feature(CPU_FTR_ARCH_206) && | ||
402 | (r & 0xff000)) { | ||
403 | /* non-16MB large page, must be 64k */ | ||
404 | /* (masks depend on page size) */ | ||
405 | rb |= 0x1000; /* page encoding in LP field */ | ||
406 | rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ | ||
407 | rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ | ||
408 | } | ||
409 | } else { | ||
410 | /* 4kB page */ | ||
411 | rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ | ||
412 | } | ||
413 | rb |= (v >> 54) & 0x300; /* B field */ | ||
414 | return rb; | ||
415 | } | ||
416 | |||
417 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly | 384 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
418 | * instruction for the OSI hypercalls */ | 385 | * instruction for the OSI hypercalls */ |
419 | #define OSI_SC_MAGIC_R3 0x113724FA | 386 | #define OSI_SC_MAGIC_R3 0x113724FA |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index e43fe42b9875..d0ac94f98f9e 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -29,4 +29,37 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) | |||
29 | 29 | ||
30 | #define SPAPR_TCE_SHIFT 12 | 30 | #define SPAPR_TCE_SHIFT 12 |
31 | 31 | ||
32 | static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, | ||
33 | unsigned long pte_index) | ||
34 | { | ||
35 | unsigned long rb, va_low; | ||
36 | |||
37 | rb = (v & ~0x7fUL) << 16; /* AVA field */ | ||
38 | va_low = pte_index >> 3; | ||
39 | if (v & HPTE_V_SECONDARY) | ||
40 | va_low = ~va_low; | ||
41 | /* xor vsid from AVA */ | ||
42 | if (!(v & HPTE_V_1TB_SEG)) | ||
43 | va_low ^= v >> 12; | ||
44 | else | ||
45 | va_low ^= v >> 24; | ||
46 | va_low &= 0x7ff; | ||
47 | if (v & HPTE_V_LARGE) { | ||
48 | rb |= 1; /* L field */ | ||
49 | if (cpu_has_feature(CPU_FTR_ARCH_206) && | ||
50 | (r & 0xff000)) { | ||
51 | /* non-16MB large page, must be 64k */ | ||
52 | /* (masks depend on page size) */ | ||
53 | rb |= 0x1000; /* page encoding in LP field */ | ||
54 | rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ | ||
55 | rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ | ||
56 | } | ||
57 | } else { | ||
58 | /* 4kB page */ | ||
59 | rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ | ||
60 | } | ||
61 | rb |= (v >> 54) & 0x300; /* B field */ | ||
62 | return rb; | ||
63 | } | ||
64 | |||
32 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ | 65 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ |
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h deleted file mode 100644 index 43efc345065e..000000000000 --- a/arch/powerpc/include/asm/memblock.h +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | #ifndef _ASM_POWERPC_MEMBLOCK_H | ||
2 | #define _ASM_POWERPC_MEMBLOCK_H | ||
3 | |||
4 | #include <asm/udbg.h> | ||
5 | |||
6 | #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) | ||
7 | |||
8 | #endif /* _ASM_POWERPC_MEMBLOCK_H */ | ||
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 39a2baa6ad58..9c3cd490b1bd 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -46,6 +46,12 @@ static int __init powersave_off(char *arg) | |||
46 | } | 46 | } |
47 | __setup("powersave=off", powersave_off); | 47 | __setup("powersave=off", powersave_off); |
48 | 48 | ||
49 | #if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_TRACEPOINTS) | ||
50 | static const bool idle_uses_rcu = 1; | ||
51 | #else | ||
52 | static const bool idle_uses_rcu; | ||
53 | #endif | ||
54 | |||
49 | /* | 55 | /* |
50 | * The body of the idle task. | 56 | * The body of the idle task. |
51 | */ | 57 | */ |
@@ -56,7 +62,10 @@ void cpu_idle(void) | |||
56 | 62 | ||
57 | set_thread_flag(TIF_POLLING_NRFLAG); | 63 | set_thread_flag(TIF_POLLING_NRFLAG); |
58 | while (1) { | 64 | while (1) { |
59 | tick_nohz_stop_sched_tick(1); | 65 | tick_nohz_idle_enter(); |
66 | if (!idle_uses_rcu) | ||
67 | rcu_idle_enter(); | ||
68 | |||
60 | while (!need_resched() && !cpu_should_die()) { | 69 | while (!need_resched() && !cpu_should_die()) { |
61 | ppc64_runlatch_off(); | 70 | ppc64_runlatch_off(); |
62 | 71 | ||
@@ -93,7 +102,9 @@ void cpu_idle(void) | |||
93 | 102 | ||
94 | HMT_medium(); | 103 | HMT_medium(); |
95 | ppc64_runlatch_on(); | 104 | ppc64_runlatch_on(); |
96 | tick_nohz_restart_sched_tick(); | 105 | if (!idle_uses_rcu) |
106 | rcu_idle_exit(); | ||
107 | tick_nohz_idle_exit(); | ||
97 | preempt_enable_no_resched(); | 108 | preempt_enable_no_resched(); |
98 | if (cpu_should_die()) | 109 | if (cpu_should_die()) |
99 | cpu_die(); | 110 | cpu_die(); |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 9ce1672afb59..a2158a395d96 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -107,9 +107,6 @@ void __init reserve_crashkernel(void) | |||
107 | unsigned long long crash_size, crash_base; | 107 | unsigned long long crash_size, crash_base; |
108 | int ret; | 108 | int ret; |
109 | 109 | ||
110 | /* this is necessary because of memblock_phys_mem_size() */ | ||
111 | memblock_analyze(); | ||
112 | |||
113 | /* use common parsing */ | 110 | /* use common parsing */ |
114 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), | 111 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), |
115 | &crash_size, &crash_base); | 112 | &crash_size, &crash_base); |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fa1235b0503b..abe405dab34d 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -733,8 +733,6 @@ void __init early_init_devtree(void *params) | |||
733 | of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line); | 733 | of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line); |
734 | 734 | ||
735 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 735 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
736 | memblock_init(); | ||
737 | |||
738 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 736 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
739 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); | 737 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
740 | 738 | ||
@@ -756,20 +754,14 @@ void __init early_init_devtree(void *params) | |||
756 | early_reserve_mem(); | 754 | early_reserve_mem(); |
757 | phyp_dump_reserve_mem(); | 755 | phyp_dump_reserve_mem(); |
758 | 756 | ||
759 | limit = memory_limit; | 757 | /* |
760 | if (! limit) { | 758 | * Ensure that total memory size is page-aligned, because otherwise |
761 | phys_addr_t memsize; | 759 | * mark_bootmem() gets upset. |
762 | 760 | */ | |
763 | /* Ensure that total memory size is page-aligned, because | 761 | limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); |
764 | * otherwise mark_bootmem() gets upset. */ | ||
765 | memblock_analyze(); | ||
766 | memsize = memblock_phys_mem_size(); | ||
767 | if ((memsize & PAGE_MASK) != memsize) | ||
768 | limit = memsize & PAGE_MASK; | ||
769 | } | ||
770 | memblock_enforce_memory_limit(limit); | 762 | memblock_enforce_memory_limit(limit); |
771 | 763 | ||
772 | memblock_analyze(); | 764 | memblock_allow_resize(); |
773 | memblock_dump_all(); | 765 | memblock_dump_all(); |
774 | 766 | ||
775 | DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); | 767 | DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0cb137a9b038..336983da9e72 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) | |||
538 | tpaca->kvm_hstate.napping = 0; | 538 | tpaca->kvm_hstate.napping = 0; |
539 | vcpu->cpu = vc->pcpu; | 539 | vcpu->cpu = vc->pcpu; |
540 | smp_wmb(); | 540 | smp_wmb(); |
541 | #ifdef CONFIG_PPC_ICP_NATIVE | 541 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) |
542 | if (vcpu->arch.ptid) { | 542 | if (vcpu->arch.ptid) { |
543 | tpaca->cpu_start = 0x80; | 543 | tpaca->cpu_start = 0x80; |
544 | wmb(); | 544 | wmb(); |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 3c791e1eb675..e2cfb9e1e20e 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -658,10 +658,12 @@ program_interrupt: | |||
658 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | 658 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
659 | int i; | 659 | int i; |
660 | 660 | ||
661 | #ifdef CONFIG_KVM_BOOK3S_64_PR | ||
661 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { | 662 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
662 | r = RESUME_GUEST; | 663 | r = RESUME_GUEST; |
663 | break; | 664 | break; |
664 | } | 665 | } |
666 | #endif | ||
665 | 667 | ||
666 | run->papr_hcall.nr = cmd; | 668 | run->papr_hcall.nr = cmd; |
667 | for (i = 0; i < 9; ++i) { | 669 | for (i = 0; i < 9; ++i) { |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 26d20903f2bc..8c0d45a6faf7 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kvm_host.h> | 15 | #include <linux/kvm_host.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/export.h> | ||
18 | 19 | ||
19 | #include <asm/reg.h> | 20 | #include <asm/reg.h> |
20 | #include <asm/cputable.h> | 21 | #include <asm/cputable.h> |
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 161cefde5c15..58861fa1220e 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -134,8 +134,7 @@ void __init MMU_init(void) | |||
134 | 134 | ||
135 | if (memblock.memory.cnt > 1) { | 135 | if (memblock.memory.cnt > 1) { |
136 | #ifndef CONFIG_WII | 136 | #ifndef CONFIG_WII |
137 | memblock.memory.cnt = 1; | 137 | memblock_enforce_memory_limit(memblock.memory.regions[0].size); |
138 | memblock_analyze(); | ||
139 | printk(KERN_WARNING "Only using first contiguous memory region"); | 138 | printk(KERN_WARNING "Only using first contiguous memory region"); |
140 | #else | 139 | #else |
141 | wii_memory_fixups(); | 140 | wii_memory_fixups(); |
@@ -158,7 +157,6 @@ void __init MMU_init(void) | |||
158 | #ifndef CONFIG_HIGHMEM | 157 | #ifndef CONFIG_HIGHMEM |
159 | total_memory = total_lowmem; | 158 | total_memory = total_lowmem; |
160 | memblock_enforce_memory_limit(total_lowmem); | 159 | memblock_enforce_memory_limit(total_lowmem); |
161 | memblock_analyze(); | ||
162 | #endif /* CONFIG_HIGHMEM */ | 160 | #endif /* CONFIG_HIGHMEM */ |
163 | } | 161 | } |
164 | 162 | ||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 2dd6bdd31fe1..8e2eb6611b0b 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -199,7 +199,7 @@ void __init do_init_bootmem(void) | |||
199 | unsigned long start_pfn, end_pfn; | 199 | unsigned long start_pfn, end_pfn; |
200 | start_pfn = memblock_region_memory_base_pfn(reg); | 200 | start_pfn = memblock_region_memory_base_pfn(reg); |
201 | end_pfn = memblock_region_memory_end_pfn(reg); | 201 | end_pfn = memblock_region_memory_end_pfn(reg); |
202 | add_active_range(0, start_pfn, end_pfn); | 202 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); |
203 | } | 203 | } |
204 | 204 | ||
205 | /* Add all physical memory to the bootmem map, mark each area | 205 | /* Add all physical memory to the bootmem map, mark each area |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b22a83a91cb8..e6eea0ac80c8 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -127,45 +127,25 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, | |||
127 | } | 127 | } |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * get_active_region_work_fn - A helper function for get_node_active_region | 130 | * get_node_active_region - Return active region containing pfn |
131 | * Returns datax set to the start_pfn and end_pfn if they contain | ||
132 | * the initial value of datax->start_pfn between them | ||
133 | * @start_pfn: start page(inclusive) of region to check | ||
134 | * @end_pfn: end page(exclusive) of region to check | ||
135 | * @datax: comes in with ->start_pfn set to value to search for and | ||
136 | * goes out with active range if it contains it | ||
137 | * Returns 1 if search value is in range else 0 | ||
138 | */ | ||
139 | static int __init get_active_region_work_fn(unsigned long start_pfn, | ||
140 | unsigned long end_pfn, void *datax) | ||
141 | { | ||
142 | struct node_active_region *data; | ||
143 | data = (struct node_active_region *)datax; | ||
144 | |||
145 | if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { | ||
146 | data->start_pfn = start_pfn; | ||
147 | data->end_pfn = end_pfn; | ||
148 | return 1; | ||
149 | } | ||
150 | return 0; | ||
151 | |||
152 | } | ||
153 | |||
154 | /* | ||
155 | * get_node_active_region - Return active region containing start_pfn | ||
156 | * Active range returned is empty if none found. | 131 | * Active range returned is empty if none found. |
157 | * @start_pfn: The page to return the region for. | 132 | * @pfn: The page to return the region for |
158 | * @node_ar: Returned set to the active region containing start_pfn | 133 | * @node_ar: Returned set to the active region containing @pfn |
159 | */ | 134 | */ |
160 | static void __init get_node_active_region(unsigned long start_pfn, | 135 | static void __init get_node_active_region(unsigned long pfn, |
161 | struct node_active_region *node_ar) | 136 | struct node_active_region *node_ar) |
162 | { | 137 | { |
163 | int nid = early_pfn_to_nid(start_pfn); | 138 | unsigned long start_pfn, end_pfn; |
139 | int i, nid; | ||
164 | 140 | ||
165 | node_ar->nid = nid; | 141 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { |
166 | node_ar->start_pfn = start_pfn; | 142 | if (pfn >= start_pfn && pfn < end_pfn) { |
167 | node_ar->end_pfn = start_pfn; | 143 | node_ar->nid = nid; |
168 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); | 144 | node_ar->start_pfn = start_pfn; |
145 | node_ar->end_pfn = end_pfn; | ||
146 | break; | ||
147 | } | ||
148 | } | ||
169 | } | 149 | } |
170 | 150 | ||
171 | static void map_cpu_to_node(int cpu, int node) | 151 | static void map_cpu_to_node(int cpu, int node) |
@@ -710,9 +690,7 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
710 | node_set_online(nid); | 690 | node_set_online(nid); |
711 | sz = numa_enforce_memory_limit(base, size); | 691 | sz = numa_enforce_memory_limit(base, size); |
712 | if (sz) | 692 | if (sz) |
713 | add_active_range(nid, base >> PAGE_SHIFT, | 693 | memblock_set_node(base, sz, nid); |
714 | (base >> PAGE_SHIFT) | ||
715 | + (sz >> PAGE_SHIFT)); | ||
716 | } while (--ranges); | 694 | } while (--ranges); |
717 | } | 695 | } |
718 | } | 696 | } |
@@ -802,8 +780,7 @@ new_range: | |||
802 | continue; | 780 | continue; |
803 | } | 781 | } |
804 | 782 | ||
805 | add_active_range(nid, start >> PAGE_SHIFT, | 783 | memblock_set_node(start, size, nid); |
806 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); | ||
807 | 784 | ||
808 | if (--ranges) | 785 | if (--ranges) |
809 | goto new_range; | 786 | goto new_range; |
@@ -839,7 +816,8 @@ static void __init setup_nonnuma(void) | |||
839 | end_pfn = memblock_region_memory_end_pfn(reg); | 816 | end_pfn = memblock_region_memory_end_pfn(reg); |
840 | 817 | ||
841 | fake_numa_create_new_node(end_pfn, &nid); | 818 | fake_numa_create_new_node(end_pfn, &nid); |
842 | add_active_range(nid, start_pfn, end_pfn); | 819 | memblock_set_node(PFN_PHYS(start_pfn), |
820 | PFN_PHYS(end_pfn - start_pfn), nid); | ||
843 | node_set_online(nid); | 821 | node_set_online(nid); |
844 | } | 822 | } |
845 | } | 823 | } |
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 4e13d6f9023e..573ba3b69d1f 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -615,7 +615,6 @@ static void __early_init_mmu(int boot_cpu) | |||
615 | 615 | ||
616 | /* limit memory so we dont have linear faults */ | 616 | /* limit memory so we dont have linear faults */ |
617 | memblock_enforce_memory_limit(linear_map_top); | 617 | memblock_enforce_memory_limit(linear_map_top); |
618 | memblock_analyze(); | ||
619 | 618 | ||
620 | patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); | 619 | patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); |
621 | patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); | 620 | patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); |
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 1b5dc1a2e145..6d8dadf19f0b 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c | |||
@@ -79,24 +79,19 @@ void __init wii_memory_fixups(void) | |||
79 | BUG_ON(memblock.memory.cnt != 2); | 79 | BUG_ON(memblock.memory.cnt != 2); |
80 | BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); | 80 | BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); |
81 | 81 | ||
82 | p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE); | 82 | /* trim unaligned tail */ |
83 | p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE); | 83 | memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE), |
84 | (phys_addr_t)ULLONG_MAX); | ||
84 | 85 | ||
85 | wii_hole_start = p[0].base + p[0].size; | 86 | /* determine hole, add & reserve them */ |
87 | wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE); | ||
86 | wii_hole_size = p[1].base - wii_hole_start; | 88 | wii_hole_size = p[1].base - wii_hole_start; |
87 | 89 | memblock_add(wii_hole_start, wii_hole_size); | |
88 | pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size); | ||
89 | pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size); | ||
90 | pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size); | ||
91 | |||
92 | p[0].size += wii_hole_size + p[1].size; | ||
93 | |||
94 | memblock.memory.cnt = 1; | ||
95 | memblock_analyze(); | ||
96 | |||
97 | /* reserve the hole */ | ||
98 | memblock_reserve(wii_hole_start, wii_hole_size); | 90 | memblock_reserve(wii_hole_start, wii_hole_size); |
99 | 91 | ||
92 | BUG_ON(memblock.memory.cnt != 1); | ||
93 | __memblock_dump_all(); | ||
94 | |||
100 | /* allow ioremapping the address space in the hole */ | 95 | /* allow ioremapping the address space in the hole */ |
101 | __allow_ioremap_reserved = 1; | 96 | __allow_ioremap_reserved = 1; |
102 | } | 97 | } |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index ea0acbd8966d..8fc62586a973 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -563,7 +563,8 @@ static void yield_shared_processor(void) | |||
563 | static void iseries_shared_idle(void) | 563 | static void iseries_shared_idle(void) |
564 | { | 564 | { |
565 | while (1) { | 565 | while (1) { |
566 | tick_nohz_stop_sched_tick(1); | 566 | tick_nohz_idle_enter(); |
567 | rcu_idle_enter(); | ||
567 | while (!need_resched() && !hvlpevent_is_pending()) { | 568 | while (!need_resched() && !hvlpevent_is_pending()) { |
568 | local_irq_disable(); | 569 | local_irq_disable(); |
569 | ppc64_runlatch_off(); | 570 | ppc64_runlatch_off(); |
@@ -577,7 +578,8 @@ static void iseries_shared_idle(void) | |||
577 | } | 578 | } |
578 | 579 | ||
579 | ppc64_runlatch_on(); | 580 | ppc64_runlatch_on(); |
580 | tick_nohz_restart_sched_tick(); | 581 | rcu_idle_exit(); |
582 | tick_nohz_idle_exit(); | ||
581 | 583 | ||
582 | if (hvlpevent_is_pending()) | 584 | if (hvlpevent_is_pending()) |
583 | process_iSeries_events(); | 585 | process_iSeries_events(); |
@@ -593,7 +595,8 @@ static void iseries_dedicated_idle(void) | |||
593 | set_thread_flag(TIF_POLLING_NRFLAG); | 595 | set_thread_flag(TIF_POLLING_NRFLAG); |
594 | 596 | ||
595 | while (1) { | 597 | while (1) { |
596 | tick_nohz_stop_sched_tick(1); | 598 | tick_nohz_idle_enter(); |
599 | rcu_idle_enter(); | ||
597 | if (!need_resched()) { | 600 | if (!need_resched()) { |
598 | while (!need_resched()) { | 601 | while (!need_resched()) { |
599 | ppc64_runlatch_off(); | 602 | ppc64_runlatch_off(); |
@@ -610,7 +613,8 @@ static void iseries_dedicated_idle(void) | |||
610 | } | 613 | } |
611 | 614 | ||
612 | ppc64_runlatch_on(); | 615 | ppc64_runlatch_on(); |
613 | tick_nohz_restart_sched_tick(); | 616 | rcu_idle_exit(); |
617 | tick_nohz_idle_exit(); | ||
614 | preempt_enable_no_resched(); | 618 | preempt_enable_no_resched(); |
615 | schedule(); | 619 | schedule(); |
616 | preempt_disable(); | 620 | preempt_disable(); |
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 72714ad27842..8bd6ba542691 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c | |||
@@ -319,7 +319,6 @@ static int __init ps3_mm_add_memory(void) | |||
319 | } | 319 | } |
320 | 320 | ||
321 | memblock_add(start_addr, map.r1.size); | 321 | memblock_add(start_addr, map.r1.size); |
322 | memblock_analyze(); | ||
323 | 322 | ||
324 | result = online_pages(start_pfn, nr_pages); | 323 | result = online_pages(start_pfn, nr_pages); |
325 | 324 | ||
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 27a49508b410..52d429be6c76 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -555,6 +555,8 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | |||
555 | 555 | ||
556 | (*depth)++; | 556 | (*depth)++; |
557 | trace_hcall_entry(opcode, args); | 557 | trace_hcall_entry(opcode, args); |
558 | if (opcode == H_CEDE) | ||
559 | rcu_idle_enter(); | ||
558 | (*depth)--; | 560 | (*depth)--; |
559 | 561 | ||
560 | out: | 562 | out: |
@@ -575,6 +577,8 @@ void __trace_hcall_exit(long opcode, unsigned long retval, | |||
575 | goto out; | 577 | goto out; |
576 | 578 | ||
577 | (*depth)++; | 579 | (*depth)++; |
580 | if (opcode == H_CEDE) | ||
581 | rcu_idle_exit(); | ||
578 | trace_hcall_exit(opcode, retval, retbuf); | 582 | trace_hcall_exit(opcode, retval, retbuf); |
579 | (*depth)--; | 583 | (*depth)--; |
580 | 584 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 373679b3744a..d48ede334434 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -92,6 +92,9 @@ config S390 | |||
92 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 | 92 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 |
93 | select HAVE_RCU_TABLE_FREE if SMP | 93 | select HAVE_RCU_TABLE_FREE if SMP |
94 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION | 94 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION |
95 | select HAVE_MEMBLOCK | ||
96 | select HAVE_MEMBLOCK_NODE_MAP | ||
97 | select ARCH_DISCARD_MEMBLOCK | ||
95 | select ARCH_INLINE_SPIN_TRYLOCK | 98 | select ARCH_INLINE_SPIN_TRYLOCK |
96 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 99 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
97 | select ARCH_INLINE_SPIN_LOCK | 100 | select ARCH_INLINE_SPIN_LOCK |
@@ -345,9 +348,6 @@ config WARN_DYNAMIC_STACK | |||
345 | 348 | ||
346 | Say N if you are unsure. | 349 | Say N if you are unsure. |
347 | 350 | ||
348 | config ARCH_POPULATES_NODE_MAP | ||
349 | def_bool y | ||
350 | |||
351 | comment "Kernel preemption" | 351 | comment "Kernel preemption" |
352 | 352 | ||
353 | source "kernel/Kconfig.preempt" | 353 | source "kernel/Kconfig.preempt" |
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 92f1cb745d69..4de031d6b76c 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c | |||
@@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data) | |||
115 | j = 0; | 115 | j = 0; |
116 | for_each_online_cpu(i) { | 116 | for_each_online_cpu(i) { |
117 | os_data->os_cpu[j].per_cpu_user = | 117 | os_data->os_cpu[j].per_cpu_user = |
118 | cputime_to_jiffies(kstat_cpu(i).cpustat.user); | 118 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]); |
119 | os_data->os_cpu[j].per_cpu_nice = | 119 | os_data->os_cpu[j].per_cpu_nice = |
120 | cputime_to_jiffies(kstat_cpu(i).cpustat.nice); | 120 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]); |
121 | os_data->os_cpu[j].per_cpu_system = | 121 | os_data->os_cpu[j].per_cpu_system = |
122 | cputime_to_jiffies(kstat_cpu(i).cpustat.system); | 122 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]); |
123 | os_data->os_cpu[j].per_cpu_idle = | 123 | os_data->os_cpu[j].per_cpu_idle = |
124 | cputime_to_jiffies(kstat_cpu(i).cpustat.idle); | 124 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]); |
125 | os_data->os_cpu[j].per_cpu_irq = | 125 | os_data->os_cpu[j].per_cpu_irq = |
126 | cputime_to_jiffies(kstat_cpu(i).cpustat.irq); | 126 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]); |
127 | os_data->os_cpu[j].per_cpu_softirq = | 127 | os_data->os_cpu[j].per_cpu_softirq = |
128 | cputime_to_jiffies(kstat_cpu(i).cpustat.softirq); | 128 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]); |
129 | os_data->os_cpu[j].per_cpu_iowait = | 129 | os_data->os_cpu[j].per_cpu_iowait = |
130 | cputime_to_jiffies(kstat_cpu(i).cpustat.iowait); | 130 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]); |
131 | os_data->os_cpu[j].per_cpu_steal = | 131 | os_data->os_cpu[j].per_cpu_steal = |
132 | cputime_to_jiffies(kstat_cpu(i).cpustat.steal); | 132 | cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]); |
133 | os_data->os_cpu[j].cpu_id = i; | 133 | os_data->os_cpu[j].cpu_id = i; |
134 | j++; | 134 | j++; |
135 | } | 135 | } |
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 081434878296..c23c3900c304 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h | |||
@@ -16,114 +16,100 @@ | |||
16 | 16 | ||
17 | /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ | 17 | /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ |
18 | 18 | ||
19 | typedef unsigned long long cputime_t; | 19 | typedef unsigned long long __nocast cputime_t; |
20 | typedef unsigned long long cputime64_t; | 20 | typedef unsigned long long __nocast cputime64_t; |
21 | 21 | ||
22 | #ifndef __s390x__ | 22 | static inline unsigned long __div(unsigned long long n, unsigned long base) |
23 | |||
24 | static inline unsigned int | ||
25 | __div(unsigned long long n, unsigned int base) | ||
26 | { | 23 | { |
24 | #ifndef __s390x__ | ||
27 | register_pair rp; | 25 | register_pair rp; |
28 | 26 | ||
29 | rp.pair = n >> 1; | 27 | rp.pair = n >> 1; |
30 | asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); | 28 | asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); |
31 | return rp.subreg.odd; | 29 | return rp.subreg.odd; |
30 | #else /* __s390x__ */ | ||
31 | return n / base; | ||
32 | #endif /* __s390x__ */ | ||
32 | } | 33 | } |
33 | 34 | ||
34 | #else /* __s390x__ */ | 35 | #define cputime_one_jiffy jiffies_to_cputime(1) |
36 | |||
37 | /* | ||
38 | * Convert cputime to jiffies and back. | ||
39 | */ | ||
40 | static inline unsigned long cputime_to_jiffies(const cputime_t cputime) | ||
41 | { | ||
42 | return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); | ||
43 | } | ||
35 | 44 | ||
36 | static inline unsigned int | 45 | static inline cputime_t jiffies_to_cputime(const unsigned int jif) |
37 | __div(unsigned long long n, unsigned int base) | ||
38 | { | 46 | { |
39 | return n / base; | 47 | return (__force cputime_t)(jif * (4096000000ULL / HZ)); |
40 | } | 48 | } |
41 | 49 | ||
42 | #endif /* __s390x__ */ | 50 | static inline u64 cputime64_to_jiffies64(cputime64_t cputime) |
51 | { | ||
52 | unsigned long long jif = (__force unsigned long long) cputime; | ||
53 | do_div(jif, 4096000000ULL / HZ); | ||
54 | return jif; | ||
55 | } | ||
43 | 56 | ||
44 | #define cputime_zero (0ULL) | 57 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) |
45 | #define cputime_one_jiffy jiffies_to_cputime(1) | 58 | { |
46 | #define cputime_max ((~0UL >> 1) - 1) | 59 | return (__force cputime64_t)(jif * (4096000000ULL / HZ)); |
47 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
48 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
49 | #define cputime_div(__a, __n) ({ \ | ||
50 | unsigned long long __div = (__a); \ | ||
51 | do_div(__div,__n); \ | ||
52 | __div; \ | ||
53 | }) | ||
54 | #define cputime_halve(__a) ((__a) >> 1) | ||
55 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
56 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
57 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
58 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
59 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
60 | #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ)) | ||
61 | #define cputime_to_scaled(__ct) (__ct) | ||
62 | #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ)) | ||
63 | |||
64 | #define cputime64_zero (0ULL) | ||
65 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
66 | #define cputime_to_cputime64(__ct) (__ct) | ||
67 | |||
68 | static inline u64 | ||
69 | cputime64_to_jiffies64(cputime64_t cputime) | ||
70 | { | ||
71 | do_div(cputime, 4096000000ULL / HZ); | ||
72 | return cputime; | ||
73 | } | 60 | } |
74 | 61 | ||
75 | /* | 62 | /* |
76 | * Convert cputime to microseconds and back. | 63 | * Convert cputime to microseconds and back. |
77 | */ | 64 | */ |
78 | static inline unsigned int | 65 | static inline unsigned int cputime_to_usecs(const cputime_t cputime) |
79 | cputime_to_usecs(const cputime_t cputime) | ||
80 | { | 66 | { |
81 | return cputime_div(cputime, 4096); | 67 | return (__force unsigned long long) cputime >> 12; |
82 | } | 68 | } |
83 | 69 | ||
84 | static inline cputime_t | 70 | static inline cputime_t usecs_to_cputime(const unsigned int m) |
85 | usecs_to_cputime(const unsigned int m) | ||
86 | { | 71 | { |
87 | return (cputime_t) m * 4096; | 72 | return (__force cputime_t)(m * 4096ULL); |
88 | } | 73 | } |
89 | 74 | ||
75 | #define usecs_to_cputime64(m) usecs_to_cputime(m) | ||
76 | |||
90 | /* | 77 | /* |
91 | * Convert cputime to milliseconds and back. | 78 | * Convert cputime to milliseconds and back. |
92 | */ | 79 | */ |
93 | static inline unsigned int | 80 | static inline unsigned int cputime_to_secs(const cputime_t cputime) |
94 | cputime_to_secs(const cputime_t cputime) | ||
95 | { | 81 | { |
96 | return __div(cputime, 2048000000) >> 1; | 82 | return __div((__force unsigned long long) cputime, 2048000000) >> 1; |
97 | } | 83 | } |
98 | 84 | ||
99 | static inline cputime_t | 85 | static inline cputime_t secs_to_cputime(const unsigned int s) |
100 | secs_to_cputime(const unsigned int s) | ||
101 | { | 86 | { |
102 | return (cputime_t) s * 4096000000ULL; | 87 | return (__force cputime_t)(s * 4096000000ULL); |
103 | } | 88 | } |
104 | 89 | ||
105 | /* | 90 | /* |
106 | * Convert cputime to timespec and back. | 91 | * Convert cputime to timespec and back. |
107 | */ | 92 | */ |
108 | static inline cputime_t | 93 | static inline cputime_t timespec_to_cputime(const struct timespec *value) |
109 | timespec_to_cputime(const struct timespec *value) | ||
110 | { | 94 | { |
111 | return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; | 95 | unsigned long long ret = value->tv_sec * 4096000000ULL; |
96 | return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); | ||
112 | } | 97 | } |
113 | 98 | ||
114 | static inline void | 99 | static inline void cputime_to_timespec(const cputime_t cputime, |
115 | cputime_to_timespec(const cputime_t cputime, struct timespec *value) | 100 | struct timespec *value) |
116 | { | 101 | { |
102 | unsigned long long __cputime = (__force unsigned long long) cputime; | ||
117 | #ifndef __s390x__ | 103 | #ifndef __s390x__ |
118 | register_pair rp; | 104 | register_pair rp; |
119 | 105 | ||
120 | rp.pair = cputime >> 1; | 106 | rp.pair = __cputime >> 1; |
121 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); | 107 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); |
122 | value->tv_nsec = rp.subreg.even * 1000 / 4096; | 108 | value->tv_nsec = rp.subreg.even * 1000 / 4096; |
123 | value->tv_sec = rp.subreg.odd; | 109 | value->tv_sec = rp.subreg.odd; |
124 | #else | 110 | #else |
125 | value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; | 111 | value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; |
126 | value->tv_sec = cputime / 4096000000ULL; | 112 | value->tv_sec = __cputime / 4096000000ULL; |
127 | #endif | 113 | #endif |
128 | } | 114 | } |
129 | 115 | ||
@@ -132,50 +118,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) | |||
132 | * Since cputime and timeval have the same resolution (microseconds) | 118 | * Since cputime and timeval have the same resolution (microseconds) |
133 | * this is easy. | 119 | * this is easy. |
134 | */ | 120 | */ |
135 | static inline cputime_t | 121 | static inline cputime_t timeval_to_cputime(const struct timeval *value) |
136 | timeval_to_cputime(const struct timeval *value) | ||
137 | { | 122 | { |
138 | return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; | 123 | unsigned long long ret = value->tv_sec * 4096000000ULL; |
124 | return (__force cputime_t)(ret + value->tv_usec * 4096ULL); | ||
139 | } | 125 | } |
140 | 126 | ||
141 | static inline void | 127 | static inline void cputime_to_timeval(const cputime_t cputime, |
142 | cputime_to_timeval(const cputime_t cputime, struct timeval *value) | 128 | struct timeval *value) |
143 | { | 129 | { |
130 | unsigned long long __cputime = (__force unsigned long long) cputime; | ||
144 | #ifndef __s390x__ | 131 | #ifndef __s390x__ |
145 | register_pair rp; | 132 | register_pair rp; |
146 | 133 | ||
147 | rp.pair = cputime >> 1; | 134 | rp.pair = __cputime >> 1; |
148 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); | 135 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); |
149 | value->tv_usec = rp.subreg.even / 4096; | 136 | value->tv_usec = rp.subreg.even / 4096; |
150 | value->tv_sec = rp.subreg.odd; | 137 | value->tv_sec = rp.subreg.odd; |
151 | #else | 138 | #else |
152 | value->tv_usec = (cputime % 4096000000ULL) / 4096; | 139 | value->tv_usec = (__cputime % 4096000000ULL) / 4096; |
153 | value->tv_sec = cputime / 4096000000ULL; | 140 | value->tv_sec = __cputime / 4096000000ULL; |
154 | #endif | 141 | #endif |
155 | } | 142 | } |
156 | 143 | ||
157 | /* | 144 | /* |
158 | * Convert cputime to clock and back. | 145 | * Convert cputime to clock and back. |
159 | */ | 146 | */ |
160 | static inline clock_t | 147 | static inline clock_t cputime_to_clock_t(cputime_t cputime) |
161 | cputime_to_clock_t(cputime_t cputime) | ||
162 | { | 148 | { |
163 | return cputime_div(cputime, 4096000000ULL / USER_HZ); | 149 | unsigned long long clock = (__force unsigned long long) cputime; |
150 | do_div(clock, 4096000000ULL / USER_HZ); | ||
151 | return clock; | ||
164 | } | 152 | } |
165 | 153 | ||
166 | static inline cputime_t | 154 | static inline cputime_t clock_t_to_cputime(unsigned long x) |
167 | clock_t_to_cputime(unsigned long x) | ||
168 | { | 155 | { |
169 | return (cputime_t) x * (4096000000ULL / USER_HZ); | 156 | return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); |
170 | } | 157 | } |
171 | 158 | ||
172 | /* | 159 | /* |
173 | * Convert cputime64 to clock. | 160 | * Convert cputime64 to clock. |
174 | */ | 161 | */ |
175 | static inline clock_t | 162 | static inline clock_t cputime64_to_clock_t(cputime64_t cputime) |
176 | cputime64_to_clock_t(cputime64_t cputime) | ||
177 | { | 163 | { |
178 | return cputime_div(cputime, 4096000000ULL / USER_HZ); | 164 | unsigned long long clock = (__force unsigned long long) cputime; |
165 | do_div(clock, 4096000000ULL / USER_HZ); | ||
166 | return clock; | ||
179 | } | 167 | } |
180 | 168 | ||
181 | struct s390_idle_data { | 169 | struct s390_idle_data { |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 524d23b8610c..4f289ff0b7fe 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -599,10 +599,10 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | |||
599 | skey = page_get_storage_key(address); | 599 | skey = page_get_storage_key(address); |
600 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | 600 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
601 | /* Clear page changed & referenced bit in the storage key */ | 601 | /* Clear page changed & referenced bit in the storage key */ |
602 | if (bits) { | 602 | if (bits & _PAGE_CHANGED) |
603 | skey ^= bits; | 603 | page_set_storage_key(address, skey ^ bits, 1); |
604 | page_set_storage_key(address, skey, 1); | 604 | else if (bits) |
605 | } | 605 | page_reset_referenced(address); |
606 | /* Transfer page changed & referenced bit to guest bits in pgste */ | 606 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
607 | pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ | 607 | pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ |
608 | /* Get host changed & referenced bits from pgste */ | 608 | /* Get host changed & referenced bits from pgste */ |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 9451b210a1b4..3201ae447990 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -91,10 +91,12 @@ static void default_idle(void) | |||
91 | void cpu_idle(void) | 91 | void cpu_idle(void) |
92 | { | 92 | { |
93 | for (;;) { | 93 | for (;;) { |
94 | tick_nohz_stop_sched_tick(1); | 94 | tick_nohz_idle_enter(); |
95 | rcu_idle_enter(); | ||
95 | while (!need_resched()) | 96 | while (!need_resched()) |
96 | default_idle(); | 97 | default_idle(); |
97 | tick_nohz_restart_sched_tick(); | 98 | rcu_idle_exit(); |
99 | tick_nohz_idle_exit(); | ||
98 | preempt_enable_no_resched(); | 100 | preempt_enable_no_resched(); |
99 | schedule(); | 101 | schedule(); |
100 | preempt_disable(); | 102 | preempt_disable(); |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 450931a45b68..573bc29551ef 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
296 | ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) | 296 | ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) |
297 | /* Invalid psw mask. */ | 297 | /* Invalid psw mask. */ |
298 | return -EINVAL; | 298 | return -EINVAL; |
299 | if (addr == (addr_t) &dummy->regs.psw.addr) | ||
300 | /* | ||
301 | * The debugger changed the instruction address, | ||
302 | * reset system call restart, see signal.c:do_signal | ||
303 | */ | ||
304 | task_thread_info(child)->system_call = 0; | ||
305 | |||
306 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; | 299 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; |
307 | 300 | ||
308 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { | 301 | } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { |
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child, | |||
614 | /* Transfer 31 bit amode bit to psw mask. */ | 607 | /* Transfer 31 bit amode bit to psw mask. */ |
615 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | | 608 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | |
616 | (__u64)(tmp & PSW32_ADDR_AMODE); | 609 | (__u64)(tmp & PSW32_ADDR_AMODE); |
617 | /* | ||
618 | * The debugger changed the instruction address, | ||
619 | * reset system call restart, see signal.c:do_signal | ||
620 | */ | ||
621 | task_thread_info(child)->system_call = 0; | ||
622 | } else { | 610 | } else { |
623 | /* gpr 0-15 */ | 611 | /* gpr 0-15 */ |
624 | *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; | 612 | *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; |
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target, | |||
905 | return 0; | 893 | return 0; |
906 | } | 894 | } |
907 | 895 | ||
896 | static int s390_last_break_set(struct task_struct *target, | ||
897 | const struct user_regset *regset, | ||
898 | unsigned int pos, unsigned int count, | ||
899 | const void *kbuf, const void __user *ubuf) | ||
900 | { | ||
901 | return 0; | ||
902 | } | ||
903 | |||
908 | #endif | 904 | #endif |
909 | 905 | ||
910 | static int s390_system_call_get(struct task_struct *target, | 906 | static int s390_system_call_get(struct task_struct *target, |
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = { | |||
951 | .size = sizeof(long), | 947 | .size = sizeof(long), |
952 | .align = sizeof(long), | 948 | .align = sizeof(long), |
953 | .get = s390_last_break_get, | 949 | .get = s390_last_break_get, |
950 | .set = s390_last_break_set, | ||
954 | }, | 951 | }, |
955 | #endif | 952 | #endif |
956 | [REGSET_SYSTEM_CALL] = { | 953 | [REGSET_SYSTEM_CALL] = { |
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target, | |||
1116 | return 0; | 1113 | return 0; |
1117 | } | 1114 | } |
1118 | 1115 | ||
1116 | static int s390_compat_last_break_set(struct task_struct *target, | ||
1117 | const struct user_regset *regset, | ||
1118 | unsigned int pos, unsigned int count, | ||
1119 | const void *kbuf, const void __user *ubuf) | ||
1120 | { | ||
1121 | return 0; | ||
1122 | } | ||
1123 | |||
1119 | static const struct user_regset s390_compat_regsets[] = { | 1124 | static const struct user_regset s390_compat_regsets[] = { |
1120 | [REGSET_GENERAL] = { | 1125 | [REGSET_GENERAL] = { |
1121 | .core_note_type = NT_PRSTATUS, | 1126 | .core_note_type = NT_PRSTATUS, |
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = { | |||
1139 | .size = sizeof(long), | 1144 | .size = sizeof(long), |
1140 | .align = sizeof(long), | 1145 | .align = sizeof(long), |
1141 | .get = s390_compat_last_break_get, | 1146 | .get = s390_compat_last_break_get, |
1147 | .set = s390_compat_last_break_set, | ||
1142 | }, | 1148 | }, |
1143 | [REGSET_SYSTEM_CALL] = { | 1149 | [REGSET_SYSTEM_CALL] = { |
1144 | .core_note_type = NT_S390_SYSTEM_CALL, | 1150 | .core_note_type = NT_S390_SYSTEM_CALL, |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index e58a462949b1..f11d1b037c50 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/memblock.h> | ||
24 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
25 | #include <linux/stddef.h> | 26 | #include <linux/stddef.h> |
26 | #include <linux/unistd.h> | 27 | #include <linux/unistd.h> |
@@ -579,7 +580,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size, | |||
579 | *msg = "first memory chunk must be at least crashkernel size"; | 580 | *msg = "first memory chunk must be at least crashkernel size"; |
580 | return 0; | 581 | return 0; |
581 | } | 582 | } |
582 | if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) | 583 | if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) |
583 | return OLDMEM_BASE; | 584 | return OLDMEM_BASE; |
584 | 585 | ||
585 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | 586 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { |
@@ -820,7 +821,8 @@ setup_memory(void) | |||
820 | end_chunk = min(end_chunk, end_pfn); | 821 | end_chunk = min(end_chunk, end_pfn); |
821 | if (start_chunk >= end_chunk) | 822 | if (start_chunk >= end_chunk) |
822 | continue; | 823 | continue; |
823 | add_active_range(0, start_chunk, end_chunk); | 824 | memblock_add_node(PFN_PHYS(start_chunk), |
825 | PFN_PHYS(end_chunk - start_chunk), 0); | ||
824 | pfn = max(start_chunk, start_pfn); | 826 | pfn = max(start_chunk, start_pfn); |
825 | for (; pfn < end_chunk; pfn++) | 827 | for (; pfn < end_chunk; pfn++) |
826 | page_set_storage_key(PFN_PHYS(pfn), | 828 | page_set_storage_key(PFN_PHYS(pfn), |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 05a85bc14c98..7f6f9f354545 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs) | |||
460 | regs->svc_code >> 16); | 460 | regs->svc_code >> 16); |
461 | break; | 461 | break; |
462 | } | 462 | } |
463 | /* No longer in a system call */ | ||
464 | clear_thread_flag(TIF_SYSCALL); | ||
465 | } | 463 | } |
464 | /* No longer in a system call */ | ||
465 | clear_thread_flag(TIF_SYSCALL); | ||
466 | 466 | ||
467 | if ((is_compat_task() ? | 467 | if ((is_compat_task() ? |
468 | handle_signal32(signr, &ka, &info, oldset, regs) : | 468 | handle_signal32(signr, &ka, &info, oldset, regs) : |
@@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs) | |||
486 | } | 486 | } |
487 | 487 | ||
488 | /* No handlers present - check for system call restart */ | 488 | /* No handlers present - check for system call restart */ |
489 | clear_thread_flag(TIF_SYSCALL); | ||
489 | if (current_thread_info()->system_call) { | 490 | if (current_thread_info()->system_call) { |
490 | regs->svc_code = current_thread_info()->system_call; | 491 | regs->svc_code = current_thread_info()->system_call; |
491 | switch (regs->gprs[2]) { | 492 | switch (regs->gprs[2]) { |
@@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs) | |||
500 | regs->gprs[2] = regs->orig_gpr2; | 501 | regs->gprs[2] = regs->orig_gpr2; |
501 | set_thread_flag(TIF_SYSCALL); | 502 | set_thread_flag(TIF_SYSCALL); |
502 | break; | 503 | break; |
503 | default: | ||
504 | clear_thread_flag(TIF_SYSCALL); | ||
505 | break; | ||
506 | } | 504 | } |
507 | } | 505 | } |
508 | 506 | ||
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index f43c0e4282af..9daee91e6c3f 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
23 | 23 | ||
24 | #include "hwsampler.h" | 24 | #include "hwsampler.h" |
25 | #include "op_counter.h" | ||
25 | 26 | ||
26 | #define MAX_NUM_SDB 511 | 27 | #define MAX_NUM_SDB 511 |
27 | #define MIN_NUM_SDB 1 | 28 | #define MIN_NUM_SDB 1 |
@@ -896,6 +897,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, | |||
896 | if (sample_data_ptr->P == 1) { | 897 | if (sample_data_ptr->P == 1) { |
897 | /* userspace sample */ | 898 | /* userspace sample */ |
898 | unsigned int pid = sample_data_ptr->prim_asn; | 899 | unsigned int pid = sample_data_ptr->prim_asn; |
900 | if (!counter_config.user) | ||
901 | goto skip_sample; | ||
899 | rcu_read_lock(); | 902 | rcu_read_lock(); |
900 | tsk = pid_task(find_vpid(pid), PIDTYPE_PID); | 903 | tsk = pid_task(find_vpid(pid), PIDTYPE_PID); |
901 | if (tsk) | 904 | if (tsk) |
@@ -903,6 +906,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, | |||
903 | rcu_read_unlock(); | 906 | rcu_read_unlock(); |
904 | } else { | 907 | } else { |
905 | /* kernelspace sample */ | 908 | /* kernelspace sample */ |
909 | if (!counter_config.kernel) | ||
910 | goto skip_sample; | ||
906 | regs = task_pt_regs(current); | 911 | regs = task_pt_regs(current); |
907 | } | 912 | } |
908 | 913 | ||
@@ -910,7 +915,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, | |||
910 | oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, | 915 | oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, |
911 | !sample_data_ptr->P, tsk); | 916 | !sample_data_ptr->P, tsk); |
912 | mutex_unlock(&hws_sem); | 917 | mutex_unlock(&hws_sem); |
913 | 918 | skip_sample: | |
914 | sample_data_ptr++; | 919 | sample_data_ptr++; |
915 | } | 920 | } |
916 | } | 921 | } |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 6efc18b5e60a..2297be406c61 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -2,10 +2,11 @@ | |||
2 | * arch/s390/oprofile/init.c | 2 | * arch/s390/oprofile/init.c |
3 | * | 3 | * |
4 | * S390 Version | 4 | * S390 Version |
5 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation |
6 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 6 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) |
7 | * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com) | 7 | * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com) |
8 | * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com) | 8 | * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com) |
9 | * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com) | ||
9 | * | 10 | * |
10 | * @remark Copyright 2002-2011 OProfile authors | 11 | * @remark Copyright 2002-2011 OProfile authors |
11 | */ | 12 | */ |
@@ -14,6 +15,8 @@ | |||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/module.h> | ||
19 | #include <asm/processor.h> | ||
17 | 20 | ||
18 | #include "../../../drivers/oprofile/oprof.h" | 21 | #include "../../../drivers/oprofile/oprof.h" |
19 | 22 | ||
@@ -22,6 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); | |||
22 | #ifdef CONFIG_64BIT | 25 | #ifdef CONFIG_64BIT |
23 | 26 | ||
24 | #include "hwsampler.h" | 27 | #include "hwsampler.h" |
28 | #include "op_counter.h" | ||
25 | 29 | ||
26 | #define DEFAULT_INTERVAL 4127518 | 30 | #define DEFAULT_INTERVAL 4127518 |
27 | 31 | ||
@@ -35,16 +39,41 @@ static unsigned long oprofile_max_interval; | |||
35 | static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS; | 39 | static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS; |
36 | static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS; | 40 | static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS; |
37 | 41 | ||
38 | static int hwsampler_file; | 42 | static int hwsampler_enabled; |
39 | static int hwsampler_running; /* start_mutex must be held to change */ | 43 | static int hwsampler_running; /* start_mutex must be held to change */ |
44 | static int hwsampler_available; | ||
40 | 45 | ||
41 | static struct oprofile_operations timer_ops; | 46 | static struct oprofile_operations timer_ops; |
42 | 47 | ||
48 | struct op_counter_config counter_config; | ||
49 | |||
50 | enum __force_cpu_type { | ||
51 | reserved = 0, /* do not force */ | ||
52 | timer, | ||
53 | }; | ||
54 | static int force_cpu_type; | ||
55 | |||
56 | static int set_cpu_type(const char *str, struct kernel_param *kp) | ||
57 | { | ||
58 | if (!strcmp(str, "timer")) { | ||
59 | force_cpu_type = timer; | ||
60 | printk(KERN_INFO "oprofile: forcing timer to be returned " | ||
61 | "as cpu type\n"); | ||
62 | } else { | ||
63 | force_cpu_type = 0; | ||
64 | } | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); | ||
69 | MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" | ||
70 | "(report cpu_type \"timer\""); | ||
71 | |||
43 | static int oprofile_hwsampler_start(void) | 72 | static int oprofile_hwsampler_start(void) |
44 | { | 73 | { |
45 | int retval; | 74 | int retval; |
46 | 75 | ||
47 | hwsampler_running = hwsampler_file; | 76 | hwsampler_running = hwsampler_enabled; |
48 | 77 | ||
49 | if (!hwsampler_running) | 78 | if (!hwsampler_running) |
50 | return timer_ops.start(); | 79 | return timer_ops.start(); |
@@ -72,10 +101,16 @@ static void oprofile_hwsampler_stop(void) | |||
72 | return; | 101 | return; |
73 | } | 102 | } |
74 | 103 | ||
104 | /* | ||
105 | * File ops used for: | ||
106 | * /dev/oprofile/0/enabled | ||
107 | * /dev/oprofile/hwsampling/hwsampler (cpu_type = timer) | ||
108 | */ | ||
109 | |||
75 | static ssize_t hwsampler_read(struct file *file, char __user *buf, | 110 | static ssize_t hwsampler_read(struct file *file, char __user *buf, |
76 | size_t count, loff_t *offset) | 111 | size_t count, loff_t *offset) |
77 | { | 112 | { |
78 | return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset); | 113 | return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset); |
79 | } | 114 | } |
80 | 115 | ||
81 | static ssize_t hwsampler_write(struct file *file, char const __user *buf, | 116 | static ssize_t hwsampler_write(struct file *file, char const __user *buf, |
@@ -88,9 +123,12 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf, | |||
88 | return -EINVAL; | 123 | return -EINVAL; |
89 | 124 | ||
90 | retval = oprofilefs_ulong_from_user(&val, buf, count); | 125 | retval = oprofilefs_ulong_from_user(&val, buf, count); |
91 | if (retval) | 126 | if (retval <= 0) |
92 | return retval; | 127 | return retval; |
93 | 128 | ||
129 | if (val != 0 && val != 1) | ||
130 | return -EINVAL; | ||
131 | |||
94 | if (oprofile_started) | 132 | if (oprofile_started) |
95 | /* | 133 | /* |
96 | * save to do without locking as we set | 134 | * save to do without locking as we set |
@@ -99,7 +137,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf, | |||
99 | */ | 137 | */ |
100 | return -EBUSY; | 138 | return -EBUSY; |
101 | 139 | ||
102 | hwsampler_file = val; | 140 | hwsampler_enabled = val; |
103 | 141 | ||
104 | return count; | 142 | return count; |
105 | } | 143 | } |
@@ -109,38 +147,311 @@ static const struct file_operations hwsampler_fops = { | |||
109 | .write = hwsampler_write, | 147 | .write = hwsampler_write, |
110 | }; | 148 | }; |
111 | 149 | ||
150 | /* | ||
151 | * File ops used for: | ||
152 | * /dev/oprofile/0/count | ||
153 | * /dev/oprofile/hwsampling/hw_interval (cpu_type = timer) | ||
154 | * | ||
155 | * Make sure that the value is within the hardware range. | ||
156 | */ | ||
157 | |||
158 | static ssize_t hw_interval_read(struct file *file, char __user *buf, | ||
159 | size_t count, loff_t *offset) | ||
160 | { | ||
161 | return oprofilefs_ulong_to_user(oprofile_hw_interval, buf, | ||
162 | count, offset); | ||
163 | } | ||
164 | |||
165 | static ssize_t hw_interval_write(struct file *file, char const __user *buf, | ||
166 | size_t count, loff_t *offset) | ||
167 | { | ||
168 | unsigned long val; | ||
169 | int retval; | ||
170 | |||
171 | if (*offset) | ||
172 | return -EINVAL; | ||
173 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
174 | if (retval) | ||
175 | return retval; | ||
176 | if (val < oprofile_min_interval) | ||
177 | oprofile_hw_interval = oprofile_min_interval; | ||
178 | else if (val > oprofile_max_interval) | ||
179 | oprofile_hw_interval = oprofile_max_interval; | ||
180 | else | ||
181 | oprofile_hw_interval = val; | ||
182 | |||
183 | return count; | ||
184 | } | ||
185 | |||
186 | static const struct file_operations hw_interval_fops = { | ||
187 | .read = hw_interval_read, | ||
188 | .write = hw_interval_write, | ||
189 | }; | ||
190 | |||
191 | /* | ||
192 | * File ops used for: | ||
193 | * /dev/oprofile/0/event | ||
194 | * Only a single event with number 0 is supported with this counter. | ||
195 | * | ||
196 | * /dev/oprofile/0/unit_mask | ||
197 | * This is a dummy file needed by the user space tools. | ||
198 | * No value other than 0 is accepted or returned. | ||
199 | */ | ||
200 | |||
201 | static ssize_t hwsampler_zero_read(struct file *file, char __user *buf, | ||
202 | size_t count, loff_t *offset) | ||
203 | { | ||
204 | return oprofilefs_ulong_to_user(0, buf, count, offset); | ||
205 | } | ||
206 | |||
207 | static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf, | ||
208 | size_t count, loff_t *offset) | ||
209 | { | ||
210 | unsigned long val; | ||
211 | int retval; | ||
212 | |||
213 | if (*offset) | ||
214 | return -EINVAL; | ||
215 | |||
216 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
217 | if (retval) | ||
218 | return retval; | ||
219 | if (val != 0) | ||
220 | return -EINVAL; | ||
221 | return count; | ||
222 | } | ||
223 | |||
224 | static const struct file_operations zero_fops = { | ||
225 | .read = hwsampler_zero_read, | ||
226 | .write = hwsampler_zero_write, | ||
227 | }; | ||
228 | |||
229 | /* /dev/oprofile/0/kernel file ops. */ | ||
230 | |||
231 | static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf, | ||
232 | size_t count, loff_t *offset) | ||
233 | { | ||
234 | return oprofilefs_ulong_to_user(counter_config.kernel, | ||
235 | buf, count, offset); | ||
236 | } | ||
237 | |||
238 | static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf, | ||
239 | size_t count, loff_t *offset) | ||
240 | { | ||
241 | unsigned long val; | ||
242 | int retval; | ||
243 | |||
244 | if (*offset) | ||
245 | return -EINVAL; | ||
246 | |||
247 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
248 | if (retval) | ||
249 | return retval; | ||
250 | |||
251 | if (val != 0 && val != 1) | ||
252 | return -EINVAL; | ||
253 | |||
254 | counter_config.kernel = val; | ||
255 | |||
256 | return count; | ||
257 | } | ||
258 | |||
259 | static const struct file_operations kernel_fops = { | ||
260 | .read = hwsampler_kernel_read, | ||
261 | .write = hwsampler_kernel_write, | ||
262 | }; | ||
263 | |||
264 | /* /dev/oprofile/0/user file ops. */ | ||
265 | |||
266 | static ssize_t hwsampler_user_read(struct file *file, char __user *buf, | ||
267 | size_t count, loff_t *offset) | ||
268 | { | ||
269 | return oprofilefs_ulong_to_user(counter_config.user, | ||
270 | buf, count, offset); | ||
271 | } | ||
272 | |||
273 | static ssize_t hwsampler_user_write(struct file *file, char const __user *buf, | ||
274 | size_t count, loff_t *offset) | ||
275 | { | ||
276 | unsigned long val; | ||
277 | int retval; | ||
278 | |||
279 | if (*offset) | ||
280 | return -EINVAL; | ||
281 | |||
282 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
283 | if (retval) | ||
284 | return retval; | ||
285 | |||
286 | if (val != 0 && val != 1) | ||
287 | return -EINVAL; | ||
288 | |||
289 | counter_config.user = val; | ||
290 | |||
291 | return count; | ||
292 | } | ||
293 | |||
294 | static const struct file_operations user_fops = { | ||
295 | .read = hwsampler_user_read, | ||
296 | .write = hwsampler_user_write, | ||
297 | }; | ||
298 | |||
299 | |||
300 | /* | ||
301 | * File ops used for: /dev/oprofile/timer/enabled | ||
302 | * The value always has to be the inverted value of hwsampler_enabled. So | ||
303 | * no separate variable is created. That way we do not need locking. | ||
304 | */ | ||
305 | |||
306 | static ssize_t timer_enabled_read(struct file *file, char __user *buf, | ||
307 | size_t count, loff_t *offset) | ||
308 | { | ||
309 | return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset); | ||
310 | } | ||
311 | |||
312 | static ssize_t timer_enabled_write(struct file *file, char const __user *buf, | ||
313 | size_t count, loff_t *offset) | ||
314 | { | ||
315 | unsigned long val; | ||
316 | int retval; | ||
317 | |||
318 | if (*offset) | ||
319 | return -EINVAL; | ||
320 | |||
321 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
322 | if (retval) | ||
323 | return retval; | ||
324 | |||
325 | if (val != 0 && val != 1) | ||
326 | return -EINVAL; | ||
327 | |||
328 | /* Timer cannot be disabled without having hardware sampling. */ | ||
329 | if (val == 0 && !hwsampler_available) | ||
330 | return -EINVAL; | ||
331 | |||
332 | if (oprofile_started) | ||
333 | /* | ||
334 | * save to do without locking as we set | ||
335 | * hwsampler_running in start() when start_mutex is | ||
336 | * held | ||
337 | */ | ||
338 | return -EBUSY; | ||
339 | |||
340 | hwsampler_enabled = !val; | ||
341 | |||
342 | return count; | ||
343 | } | ||
344 | |||
345 | static const struct file_operations timer_enabled_fops = { | ||
346 | .read = timer_enabled_read, | ||
347 | .write = timer_enabled_write, | ||
348 | }; | ||
349 | |||
350 | |||
112 | static int oprofile_create_hwsampling_files(struct super_block *sb, | 351 | static int oprofile_create_hwsampling_files(struct super_block *sb, |
113 | struct dentry *root) | 352 | struct dentry *root) |
114 | { | 353 | { |
115 | struct dentry *hw_dir; | 354 | struct dentry *dir; |
355 | |||
356 | dir = oprofilefs_mkdir(sb, root, "timer"); | ||
357 | if (!dir) | ||
358 | return -EINVAL; | ||
359 | |||
360 | oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops); | ||
361 | |||
362 | if (!hwsampler_available) | ||
363 | return 0; | ||
116 | 364 | ||
117 | /* reinitialize default values */ | 365 | /* reinitialize default values */ |
118 | hwsampler_file = 1; | 366 | hwsampler_enabled = 1; |
367 | counter_config.kernel = 1; | ||
368 | counter_config.user = 1; | ||
119 | 369 | ||
120 | hw_dir = oprofilefs_mkdir(sb, root, "hwsampling"); | 370 | if (!force_cpu_type) { |
121 | if (!hw_dir) | 371 | /* |
122 | return -EINVAL; | 372 | * Create the counter file system. A single virtual |
373 | * counter is created which can be used to | ||
374 | * enable/disable hardware sampling dynamically from | ||
375 | * user space. The user space will configure a single | ||
376 | * counter with a single event. The value of 'event' | ||
377 | * and 'unit_mask' are not evaluated by the kernel code | ||
378 | * and can only be set to 0. | ||
379 | */ | ||
380 | |||
381 | dir = oprofilefs_mkdir(sb, root, "0"); | ||
382 | if (!dir) | ||
383 | return -EINVAL; | ||
123 | 384 | ||
124 | oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops); | 385 | oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops); |
125 | oprofilefs_create_ulong(sb, hw_dir, "hw_interval", | 386 | oprofilefs_create_file(sb, dir, "event", &zero_fops); |
126 | &oprofile_hw_interval); | 387 | oprofilefs_create_file(sb, dir, "count", &hw_interval_fops); |
127 | oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval", | 388 | oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops); |
128 | &oprofile_min_interval); | 389 | oprofilefs_create_file(sb, dir, "kernel", &kernel_fops); |
129 | oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval", | 390 | oprofilefs_create_file(sb, dir, "user", &user_fops); |
130 | &oprofile_max_interval); | 391 | oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", |
131 | oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks", | 392 | &oprofile_sdbt_blocks); |
132 | &oprofile_sdbt_blocks); | ||
133 | 393 | ||
394 | } else { | ||
395 | /* | ||
396 | * Hardware sampling can be used but the cpu_type is | ||
397 | * forced to timer in order to deal with legacy user | ||
398 | * space tools. The /dev/oprofile/hwsampling fs is | ||
399 | * provided in that case. | ||
400 | */ | ||
401 | dir = oprofilefs_mkdir(sb, root, "hwsampling"); | ||
402 | if (!dir) | ||
403 | return -EINVAL; | ||
404 | |||
405 | oprofilefs_create_file(sb, dir, "hwsampler", | ||
406 | &hwsampler_fops); | ||
407 | oprofilefs_create_file(sb, dir, "hw_interval", | ||
408 | &hw_interval_fops); | ||
409 | oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval", | ||
410 | &oprofile_min_interval); | ||
411 | oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval", | ||
412 | &oprofile_max_interval); | ||
413 | oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", | ||
414 | &oprofile_sdbt_blocks); | ||
415 | } | ||
134 | return 0; | 416 | return 0; |
135 | } | 417 | } |
136 | 418 | ||
137 | static int oprofile_hwsampler_init(struct oprofile_operations *ops) | 419 | static int oprofile_hwsampler_init(struct oprofile_operations *ops) |
138 | { | 420 | { |
421 | /* | ||
422 | * Initialize the timer mode infrastructure as well in order | ||
423 | * to be able to switch back dynamically. oprofile_timer_init | ||
424 | * is not supposed to fail. | ||
425 | */ | ||
426 | if (oprofile_timer_init(ops)) | ||
427 | BUG(); | ||
428 | |||
429 | memcpy(&timer_ops, ops, sizeof(timer_ops)); | ||
430 | ops->create_files = oprofile_create_hwsampling_files; | ||
431 | |||
432 | /* | ||
433 | * If the user space tools do not support newer cpu types, | ||
434 | * the force_cpu_type module parameter | ||
435 | * can be used to always return \"timer\" as cpu type. | ||
436 | */ | ||
437 | if (force_cpu_type != timer) { | ||
438 | struct cpuid id; | ||
439 | |||
440 | get_cpu_id (&id); | ||
441 | |||
442 | switch (id.machine) { | ||
443 | case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; | ||
444 | case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; | ||
445 | default: return -ENODEV; | ||
446 | } | ||
447 | } | ||
448 | |||
139 | if (hwsampler_setup()) | 449 | if (hwsampler_setup()) |
140 | return -ENODEV; | 450 | return -ENODEV; |
141 | 451 | ||
142 | /* | 452 | /* |
143 | * create hwsampler files only if hwsampler_setup() succeeds. | 453 | * Query the range for the sampling interval from the |
454 | * hardware. | ||
144 | */ | 455 | */ |
145 | oprofile_min_interval = hwsampler_query_min_interval(); | 456 | oprofile_min_interval = hwsampler_query_min_interval(); |
146 | if (oprofile_min_interval == 0) | 457 | if (oprofile_min_interval == 0) |
@@ -155,23 +466,17 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) | |||
155 | if (oprofile_hw_interval > oprofile_max_interval) | 466 | if (oprofile_hw_interval > oprofile_max_interval) |
156 | oprofile_hw_interval = oprofile_max_interval; | 467 | oprofile_hw_interval = oprofile_max_interval; |
157 | 468 | ||
158 | if (oprofile_timer_init(ops)) | 469 | printk(KERN_INFO "oprofile: System z hardware sampling " |
159 | return -ENODEV; | 470 | "facility found.\n"); |
160 | |||
161 | printk(KERN_INFO "oprofile: using hardware sampling\n"); | ||
162 | |||
163 | memcpy(&timer_ops, ops, sizeof(timer_ops)); | ||
164 | 471 | ||
165 | ops->start = oprofile_hwsampler_start; | 472 | ops->start = oprofile_hwsampler_start; |
166 | ops->stop = oprofile_hwsampler_stop; | 473 | ops->stop = oprofile_hwsampler_stop; |
167 | ops->create_files = oprofile_create_hwsampling_files; | ||
168 | 474 | ||
169 | return 0; | 475 | return 0; |
170 | } | 476 | } |
171 | 477 | ||
172 | static void oprofile_hwsampler_exit(void) | 478 | static void oprofile_hwsampler_exit(void) |
173 | { | 479 | { |
174 | oprofile_timer_exit(); | ||
175 | hwsampler_shutdown(); | 480 | hwsampler_shutdown(); |
176 | } | 481 | } |
177 | 482 | ||
@@ -182,7 +487,15 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
182 | ops->backtrace = s390_backtrace; | 487 | ops->backtrace = s390_backtrace; |
183 | 488 | ||
184 | #ifdef CONFIG_64BIT | 489 | #ifdef CONFIG_64BIT |
185 | return oprofile_hwsampler_init(ops); | 490 | |
491 | /* | ||
492 | * -ENODEV is not reported to the caller. The module itself | ||
493 | * will use the timer mode sampling as fallback and this is | ||
494 | * always available. | ||
495 | */ | ||
496 | hwsampler_available = oprofile_hwsampler_init(ops) == 0; | ||
497 | |||
498 | return 0; | ||
186 | #else | 499 | #else |
187 | return -ENODEV; | 500 | return -ENODEV; |
188 | #endif | 501 | #endif |
diff --git a/arch/s390/oprofile/op_counter.h b/arch/s390/oprofile/op_counter.h new file mode 100644 index 000000000000..1a8d3ca09014 --- /dev/null +++ b/arch/s390/oprofile/op_counter.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /** | ||
2 | * arch/s390/oprofile/op_counter.h | ||
3 | * | ||
4 | * Copyright (C) 2011 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com) | ||
6 | * | ||
7 | * @remark Copyright 2011 OProfile authors | ||
8 | */ | ||
9 | |||
10 | #ifndef OP_COUNTER_H | ||
11 | #define OP_COUNTER_H | ||
12 | |||
13 | struct op_counter_config { | ||
14 | /* `enabled' maps to the hwsampler_file variable. */ | ||
15 | /* `count' maps to the oprofile_hw_interval variable. */ | ||
16 | /* `event' and `unit_mask' are unused. */ | ||
17 | unsigned long kernel; | ||
18 | unsigned long user; | ||
19 | }; | ||
20 | |||
21 | extern struct op_counter_config counter_config; | ||
22 | |||
23 | #endif /* OP_COUNTER_H */ | ||
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index df169e84db4e..8b0c9464aa9d 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -4,6 +4,9 @@ config SCORE | |||
4 | def_bool y | 4 | def_bool y |
5 | select HAVE_GENERIC_HARDIRQS | 5 | select HAVE_GENERIC_HARDIRQS |
6 | select GENERIC_IRQ_SHOW | 6 | select GENERIC_IRQ_SHOW |
7 | select HAVE_MEMBLOCK | ||
8 | select HAVE_MEMBLOCK_NODE_MAP | ||
9 | select ARCH_DISCARD_MEMBLOCK | ||
7 | 10 | ||
8 | choice | 11 | choice |
9 | prompt "System type" | 12 | prompt "System type" |
@@ -60,9 +63,6 @@ config 32BIT | |||
60 | config ARCH_FLATMEM_ENABLE | 63 | config ARCH_FLATMEM_ENABLE |
61 | def_bool y | 64 | def_bool y |
62 | 65 | ||
63 | config ARCH_POPULATES_NODE_MAP | ||
64 | def_bool y | ||
65 | |||
66 | source "mm/Kconfig" | 66 | source "mm/Kconfig" |
67 | 67 | ||
68 | config MEMORY_START | 68 | config MEMORY_START |
diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c index 6f898c057878..b48459afefdd 100644 --- a/arch/score/kernel/setup.c +++ b/arch/score/kernel/setup.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/bootmem.h> | 26 | #include <linux/bootmem.h> |
27 | #include <linux/initrd.h> | 27 | #include <linux/initrd.h> |
28 | #include <linux/ioport.h> | 28 | #include <linux/ioport.h> |
29 | #include <linux/memblock.h> | ||
29 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
30 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
31 | #include <linux/screen_info.h> | 32 | #include <linux/screen_info.h> |
@@ -54,7 +55,8 @@ static void __init bootmem_init(void) | |||
54 | /* Initialize the boot-time allocator with low memory only. */ | 55 | /* Initialize the boot-time allocator with low memory only. */ |
55 | bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, | 56 | bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, |
56 | min_low_pfn, max_low_pfn); | 57 | min_low_pfn, max_low_pfn); |
57 | add_active_range(0, min_low_pfn, max_low_pfn); | 58 | memblock_add_node(PFN_PHYS(min_low_pfn), |
59 | PFN_PHYS(max_low_pfn - min_low_pfn), 0); | ||
58 | 60 | ||
59 | free_bootmem(PFN_PHYS(start_pfn), | 61 | free_bootmem(PFN_PHYS(start_pfn), |
60 | (max_low_pfn - start_pfn) << PAGE_SHIFT); | 62 | (max_low_pfn - start_pfn) << PAGE_SHIFT); |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5629e2099130..47a2f1c2cb0d 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -4,6 +4,7 @@ config SUPERH | |||
4 | select CLKDEV_LOOKUP | 4 | select CLKDEV_LOOKUP |
5 | select HAVE_IDE if HAS_IOPORT | 5 | select HAVE_IDE if HAS_IOPORT |
6 | select HAVE_MEMBLOCK | 6 | select HAVE_MEMBLOCK |
7 | select HAVE_MEMBLOCK_NODE_MAP | ||
7 | select HAVE_OPROFILE | 8 | select HAVE_OPROFILE |
8 | select HAVE_GENERIC_DMA_COHERENT | 9 | select HAVE_GENERIC_DMA_COHERENT |
9 | select HAVE_ARCH_TRACEHOOK | 10 | select HAVE_ARCH_TRACEHOOK |
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index ec8c84c14b17..895e337c79b6 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c | |||
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = { | |||
50 | #define GBECONT 0xffc10100 | 50 | #define GBECONT 0xffc10100 |
51 | #define GBECONT_RMII1 BIT(17) | 51 | #define GBECONT_RMII1 BIT(17) |
52 | #define GBECONT_RMII0 BIT(16) | 52 | #define GBECONT_RMII0 BIT(16) |
53 | static void sh7757_eth_set_mdio_gate(unsigned long addr) | 53 | static void sh7757_eth_set_mdio_gate(void *addr) |
54 | { | 54 | { |
55 | if ((addr & 0x00000fff) < 0x0800) | 55 | if (((unsigned long)addr & 0x00000fff) < 0x0800) |
56 | writel(readl(GBECONT) | GBECONT_RMII0, GBECONT); | 56 | writel(readl(GBECONT) | GBECONT_RMII0, GBECONT); |
57 | else | 57 | else |
58 | writel(readl(GBECONT) | GBECONT_RMII1, GBECONT); | 58 | writel(readl(GBECONT) | GBECONT_RMII1, GBECONT); |
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = { | |||
116 | }, | 116 | }, |
117 | }; | 117 | }; |
118 | 118 | ||
119 | static void sh7757_eth_giga_set_mdio_gate(unsigned long addr) | 119 | static void sh7757_eth_giga_set_mdio_gate(void *addr) |
120 | { | 120 | { |
121 | if ((addr & 0x00000fff) < 0x0800) { | 121 | if (((unsigned long)addr & 0x00000fff) < 0x0800) { |
122 | gpio_set_value(GPIO_PTT4, 1); | 122 | gpio_set_value(GPIO_PTT4, 1); |
123 | writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT); | 123 | writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT); |
124 | } else { | 124 | } else { |
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = { | |||
210 | }; | 210 | }; |
211 | 211 | ||
212 | static struct sh_mmcif_dma sh7757lcr_mmcif_dma = { | 212 | static struct sh_mmcif_dma sh7757lcr_mmcif_dma = { |
213 | .chan_priv_tx = SHDMA_SLAVE_MMCIF_TX, | 213 | .chan_priv_tx = { |
214 | .chan_priv_rx = SHDMA_SLAVE_MMCIF_RX, | 214 | .slave_id = SHDMA_SLAVE_MMCIF_TX, |
215 | }, | ||
216 | .chan_priv_rx = { | ||
217 | .slave_id = SHDMA_SLAVE_MMCIF_RX, | ||
218 | } | ||
215 | }; | 219 | }; |
216 | 220 | ||
217 | static struct sh_mmcif_plat_data sh_mmcif_plat = { | 221 | static struct sh_mmcif_plat_data sh_mmcif_plat = { |
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h deleted file mode 100644 index e87063fad2ea..000000000000 --- a/arch/sh/include/asm/memblock.h +++ /dev/null | |||
@@ -1,4 +0,0 @@ | |||
1 | #ifndef __ASM_SH_MEMBLOCK_H | ||
2 | #define __ASM_SH_MEMBLOCK_H | ||
3 | |||
4 | #endif /* __ASM_SH_MEMBLOCK_H */ | ||
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index db4ecd731a00..406508d4ce74 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -89,7 +89,8 @@ void cpu_idle(void) | |||
89 | 89 | ||
90 | /* endless idle loop with no priority at all */ | 90 | /* endless idle loop with no priority at all */ |
91 | while (1) { | 91 | while (1) { |
92 | tick_nohz_stop_sched_tick(1); | 92 | tick_nohz_idle_enter(); |
93 | rcu_idle_enter(); | ||
93 | 94 | ||
94 | while (!need_resched()) { | 95 | while (!need_resched()) { |
95 | check_pgt_cache(); | 96 | check_pgt_cache(); |
@@ -111,7 +112,8 @@ void cpu_idle(void) | |||
111 | start_critical_timings(); | 112 | start_critical_timings(); |
112 | } | 113 | } |
113 | 114 | ||
114 | tick_nohz_restart_sched_tick(); | 115 | rcu_idle_exit(); |
116 | tick_nohz_idle_exit(); | ||
115 | preempt_enable_no_resched(); | 117 | preempt_enable_no_resched(); |
116 | schedule(); | 118 | schedule(); |
117 | preempt_disable(); | 119 | preempt_disable(); |
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index c5a33f007f88..9fea49f6e667 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c | |||
@@ -157,9 +157,6 @@ void __init reserve_crashkernel(void) | |||
157 | unsigned long long crash_size, crash_base; | 157 | unsigned long long crash_size, crash_base; |
158 | int ret; | 158 | int ret; |
159 | 159 | ||
160 | /* this is necessary because of memblock_phys_mem_size() */ | ||
161 | memblock_analyze(); | ||
162 | |||
163 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), | 160 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), |
164 | &crash_size, &crash_base); | 161 | &crash_size, &crash_base); |
165 | if (ret == 0 && crash_size > 0) { | 162 | if (ret == 0 && crash_size > 0) { |
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 1a0e946679a4..7b57bf1dc855 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -230,7 +230,8 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
230 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, | 230 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, |
231 | PAGE_KERNEL); | 231 | PAGE_KERNEL); |
232 | 232 | ||
233 | add_active_range(nid, start_pfn, end_pfn); | 233 | memblock_set_node(PFN_PHYS(start_pfn), |
234 | PFN_PHYS(end_pfn - start_pfn), nid); | ||
234 | } | 235 | } |
235 | 236 | ||
236 | void __init __weak plat_early_device_setup(void) | 237 | void __init __weak plat_early_device_setup(void) |
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index c3e61b366493..cb8f9920f4dd 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -143,9 +143,6 @@ config MAX_ACTIVE_REGIONS | |||
143 | CPU_SUBTYPE_SH7785) | 143 | CPU_SUBTYPE_SH7785) |
144 | default "1" | 144 | default "1" |
145 | 145 | ||
146 | config ARCH_POPULATES_NODE_MAP | ||
147 | def_bool y | ||
148 | |||
149 | config ARCH_SELECT_MEMORY_MODEL | 146 | config ARCH_SELECT_MEMORY_MODEL |
150 | def_bool y | 147 | def_bool y |
151 | 148 | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 939ca0f356f6..82cc576fab15 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -324,7 +324,6 @@ void __init paging_init(void) | |||
324 | unsigned long vaddr, end; | 324 | unsigned long vaddr, end; |
325 | int nid; | 325 | int nid; |
326 | 326 | ||
327 | memblock_init(); | ||
328 | sh_mv.mv_mem_init(); | 327 | sh_mv.mv_mem_init(); |
329 | 328 | ||
330 | early_reserve_mem(); | 329 | early_reserve_mem(); |
@@ -337,7 +336,7 @@ void __init paging_init(void) | |||
337 | sh_mv.mv_mem_reserve(); | 336 | sh_mv.mv_mem_reserve(); |
338 | 337 | ||
339 | memblock_enforce_memory_limit(memory_limit); | 338 | memblock_enforce_memory_limit(memory_limit); |
340 | memblock_analyze(); | 339 | memblock_allow_resize(); |
341 | 340 | ||
342 | memblock_dump_all(); | 341 | memblock_dump_all(); |
343 | 342 | ||
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c index b4c2d2b946dd..e4dd5d5a1115 100644 --- a/arch/sh/oprofile/common.c +++ b/arch/sh/oprofile/common.c | |||
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
49 | return oprofile_perf_init(ops); | 49 | return oprofile_perf_init(ops); |
50 | } | 50 | } |
51 | 51 | ||
52 | void __exit oprofile_arch_exit(void) | 52 | void oprofile_arch_exit(void) |
53 | { | 53 | { |
54 | oprofile_perf_exit(); | 54 | oprofile_perf_exit(); |
55 | kfree(sh_pmu_op_name); | 55 | kfree(sh_pmu_op_name); |
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
60 | ops->backtrace = sh_backtrace; | 60 | ops->backtrace = sh_backtrace; |
61 | return -ENODEV; | 61 | return -ENODEV; |
62 | } | 62 | } |
63 | void __exit oprofile_arch_exit(void) {} | 63 | void oprofile_arch_exit(void) {} |
64 | #endif /* CONFIG_HW_PERF_EVENTS */ | 64 | #endif /* CONFIG_HW_PERF_EVENTS */ |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index f92602e86607..70ae9d81870e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -43,6 +43,7 @@ config SPARC64 | |||
43 | select HAVE_KPROBES | 43 | select HAVE_KPROBES |
44 | select HAVE_RCU_TABLE_FREE if SMP | 44 | select HAVE_RCU_TABLE_FREE if SMP |
45 | select HAVE_MEMBLOCK | 45 | select HAVE_MEMBLOCK |
46 | select HAVE_MEMBLOCK_NODE_MAP | ||
46 | select HAVE_SYSCALL_WRAPPERS | 47 | select HAVE_SYSCALL_WRAPPERS |
47 | select HAVE_DYNAMIC_FTRACE | 48 | select HAVE_DYNAMIC_FTRACE |
48 | select HAVE_FTRACE_MCOUNT_RECORD | 49 | select HAVE_FTRACE_MCOUNT_RECORD |
@@ -352,9 +353,6 @@ config NODES_SPAN_OTHER_NODES | |||
352 | def_bool y | 353 | def_bool y |
353 | depends on NEED_MULTIPLE_NODES | 354 | depends on NEED_MULTIPLE_NODES |
354 | 355 | ||
355 | config ARCH_POPULATES_NODE_MAP | ||
356 | def_bool y if SPARC64 | ||
357 | |||
358 | config ARCH_SELECT_MEMORY_MODEL | 356 | config ARCH_SELECT_MEMORY_MODEL |
359 | def_bool y if SPARC64 | 357 | def_bool y if SPARC64 |
360 | 358 | ||
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h deleted file mode 100644 index c67b047ef85e..000000000000 --- a/arch/sparc/include/asm/memblock.h +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | #ifndef _SPARC64_MEMBLOCK_H | ||
2 | #define _SPARC64_MEMBLOCK_H | ||
3 | |||
4 | #include <asm/oplib.h> | ||
5 | |||
6 | #define MEMBLOCK_DBG(fmt...) prom_printf(fmt) | ||
7 | |||
8 | #endif /* !(_SPARC64_MEMBLOCK_H) */ | ||
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 7429b47c3aca..381edcd5bc29 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev, | |||
1181 | 1181 | ||
1182 | dp->rcv_buf_len = 4096; | 1182 | dp->rcv_buf_len = 4096; |
1183 | 1183 | ||
1184 | dp->ds_states = kzalloc(sizeof(ds_states_template), | 1184 | dp->ds_states = kmemdup(ds_states_template, |
1185 | GFP_KERNEL); | 1185 | sizeof(ds_states_template), GFP_KERNEL); |
1186 | if (!dp->ds_states) | 1186 | if (!dp->ds_states) |
1187 | goto out_free_rcv_buf; | 1187 | goto out_free_rcv_buf; |
1188 | 1188 | ||
1189 | memcpy(dp->ds_states, ds_states_template, | ||
1190 | sizeof(ds_states_template)); | ||
1191 | dp->num_ds_states = ARRAY_SIZE(ds_states_template); | 1189 | dp->num_ds_states = ARRAY_SIZE(ds_states_template); |
1192 | 1190 | ||
1193 | for (i = 0; i < dp->num_ds_states; i++) | 1191 | for (i = 0; i < dp->num_ds_states; i++) |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index b272cda35a01..af5755d20fbe 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, | |||
849 | if (!irq) | 849 | if (!irq) |
850 | return -ENOMEM; | 850 | return -ENOMEM; |
851 | 851 | ||
852 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | ||
853 | return -EINVAL; | ||
854 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | 852 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) |
855 | return -EINVAL; | 853 | return -EINVAL; |
854 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | ||
855 | return -EINVAL; | ||
856 | 856 | ||
857 | return irq; | 857 | return irq; |
858 | } | 858 | } |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 3739a06a76cb..39d8b05201a2 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -95,12 +95,14 @@ void cpu_idle(void) | |||
95 | set_thread_flag(TIF_POLLING_NRFLAG); | 95 | set_thread_flag(TIF_POLLING_NRFLAG); |
96 | 96 | ||
97 | while(1) { | 97 | while(1) { |
98 | tick_nohz_stop_sched_tick(1); | 98 | tick_nohz_idle_enter(); |
99 | rcu_idle_enter(); | ||
99 | 100 | ||
100 | while (!need_resched() && !cpu_is_offline(cpu)) | 101 | while (!need_resched() && !cpu_is_offline(cpu)) |
101 | sparc64_yield(cpu); | 102 | sparc64_yield(cpu); |
102 | 103 | ||
103 | tick_nohz_restart_sched_tick(); | 104 | rcu_idle_exit(); |
105 | tick_nohz_idle_exit(); | ||
104 | 106 | ||
105 | preempt_enable_no_resched(); | 107 | preempt_enable_no_resched(); |
106 | 108 | ||
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 46614807a57f..741df916c124 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c | |||
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len | |||
58 | void *new_val; | 58 | void *new_val; |
59 | int err; | 59 | int err; |
60 | 60 | ||
61 | new_val = kmalloc(len, GFP_KERNEL); | 61 | new_val = kmemdup(val, len, GFP_KERNEL); |
62 | if (!new_val) | 62 | if (!new_val) |
63 | return -ENOMEM; | 63 | return -ENOMEM; |
64 | 64 | ||
65 | memcpy(new_val, val, len); | ||
66 | |||
67 | err = -ENODEV; | 65 | err = -ENODEV; |
68 | 66 | ||
69 | mutex_lock(&of_set_property_mutex); | 67 | mutex_lock(&of_set_property_mutex); |
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index fe1e3fc31bc5..ffb883ddd0f0 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
@@ -84,7 +84,7 @@ static void prom_sync_me(void) | |||
84 | 84 | ||
85 | prom_printf("PROM SYNC COMMAND...\n"); | 85 | prom_printf("PROM SYNC COMMAND...\n"); |
86 | show_free_areas(0); | 86 | show_free_areas(0); |
87 | if(current->pid != 0) { | 87 | if (!is_idle_task(current)) { |
88 | local_irq_enable(); | 88 | local_irq_enable(); |
89 | sys_sync(); | 89 | sys_sync(); |
90 | local_irq_disable(); | 90 | local_irq_disable(); |
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c index 5175ac2f4820..8a7f81743c12 100644 --- a/arch/sparc/mm/btfixup.c +++ b/arch/sparc/mm/btfixup.c | |||
@@ -302,8 +302,7 @@ void __init btfixup(void) | |||
302 | case 'i': /* INT */ | 302 | case 'i': /* INT */ |
303 | if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ | 303 | if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ |
304 | set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); | 304 | set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); |
305 | else if ((insn & 0x80002000) == 0x80002000 && | 305 | else if ((insn & 0x80002000) == 0x80002000) /* %LO */ |
306 | (insn & 0x01800000) != 0x01800000) /* %LO */ | ||
307 | set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); | 306 | set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); |
308 | else { | 307 | else { |
309 | prom_printf(insn_i, p, addr, insn); | 308 | prom_printf(insn_i, p, addr, insn); |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 8e073d802139..b3f5e7dfea51 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -790,7 +790,7 @@ static int find_node(unsigned long addr) | |||
790 | return -1; | 790 | return -1; |
791 | } | 791 | } |
792 | 792 | ||
793 | u64 memblock_nid_range(u64 start, u64 end, int *nid) | 793 | static u64 memblock_nid_range(u64 start, u64 end, int *nid) |
794 | { | 794 | { |
795 | *nid = find_node(start); | 795 | *nid = find_node(start); |
796 | start += PAGE_SIZE; | 796 | start += PAGE_SIZE; |
@@ -808,7 +808,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid) | |||
808 | return start; | 808 | return start; |
809 | } | 809 | } |
810 | #else | 810 | #else |
811 | u64 memblock_nid_range(u64 start, u64 end, int *nid) | 811 | static u64 memblock_nid_range(u64 start, u64 end, int *nid) |
812 | { | 812 | { |
813 | *nid = 0; | 813 | *nid = 0; |
814 | return end; | 814 | return end; |
@@ -816,7 +816,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid) | |||
816 | #endif | 816 | #endif |
817 | 817 | ||
818 | /* This must be invoked after performing all of the necessary | 818 | /* This must be invoked after performing all of the necessary |
819 | * add_active_range() calls for 'nid'. We need to be able to get | 819 | * memblock_set_node() calls for 'nid'. We need to be able to get |
820 | * correct data from get_pfn_range_for_nid(). | 820 | * correct data from get_pfn_range_for_nid(). |
821 | */ | 821 | */ |
822 | static void __init allocate_node_data(int nid) | 822 | static void __init allocate_node_data(int nid) |
@@ -987,14 +987,11 @@ static void __init add_node_ranges(void) | |||
987 | 987 | ||
988 | this_end = memblock_nid_range(start, end, &nid); | 988 | this_end = memblock_nid_range(start, end, &nid); |
989 | 989 | ||
990 | numadbg("Adding active range nid[%d] " | 990 | numadbg("Setting memblock NUMA node nid[%d] " |
991 | "start[%lx] end[%lx]\n", | 991 | "start[%lx] end[%lx]\n", |
992 | nid, start, this_end); | 992 | nid, start, this_end); |
993 | 993 | ||
994 | add_active_range(nid, | 994 | memblock_set_node(start, this_end - start, nid); |
995 | start >> PAGE_SHIFT, | ||
996 | this_end >> PAGE_SHIFT); | ||
997 | |||
998 | start = this_end; | 995 | start = this_end; |
999 | } | 996 | } |
1000 | } | 997 | } |
@@ -1282,7 +1279,6 @@ static void __init bootmem_init_nonnuma(void) | |||
1282 | { | 1279 | { |
1283 | unsigned long top_of_ram = memblock_end_of_DRAM(); | 1280 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
1284 | unsigned long total_ram = memblock_phys_mem_size(); | 1281 | unsigned long total_ram = memblock_phys_mem_size(); |
1285 | struct memblock_region *reg; | ||
1286 | 1282 | ||
1287 | numadbg("bootmem_init_nonnuma()\n"); | 1283 | numadbg("bootmem_init_nonnuma()\n"); |
1288 | 1284 | ||
@@ -1292,20 +1288,8 @@ static void __init bootmem_init_nonnuma(void) | |||
1292 | (top_of_ram - total_ram) >> 20); | 1288 | (top_of_ram - total_ram) >> 20); |
1293 | 1289 | ||
1294 | init_node_masks_nonnuma(); | 1290 | init_node_masks_nonnuma(); |
1295 | 1291 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | |
1296 | for_each_memblock(memory, reg) { | ||
1297 | unsigned long start_pfn, end_pfn; | ||
1298 | |||
1299 | if (!reg->size) | ||
1300 | continue; | ||
1301 | |||
1302 | start_pfn = memblock_region_memory_base_pfn(reg); | ||
1303 | end_pfn = memblock_region_memory_end_pfn(reg); | ||
1304 | add_active_range(0, start_pfn, end_pfn); | ||
1305 | } | ||
1306 | |||
1307 | allocate_node_data(0); | 1292 | allocate_node_data(0); |
1308 | |||
1309 | node_set_online(0); | 1293 | node_set_online(0); |
1310 | } | 1294 | } |
1311 | 1295 | ||
@@ -1769,8 +1753,6 @@ void __init paging_init(void) | |||
1769 | sun4v_ktsb_init(); | 1753 | sun4v_ktsb_init(); |
1770 | } | 1754 | } |
1771 | 1755 | ||
1772 | memblock_init(); | ||
1773 | |||
1774 | /* Find available physical memory... | 1756 | /* Find available physical memory... |
1775 | * | 1757 | * |
1776 | * Read it twice in order to work around a bug in openfirmware. | 1758 | * Read it twice in order to work around a bug in openfirmware. |
@@ -1796,7 +1778,7 @@ void __init paging_init(void) | |||
1796 | 1778 | ||
1797 | memblock_enforce_memory_limit(cmdline_memory_size); | 1779 | memblock_enforce_memory_limit(cmdline_memory_size); |
1798 | 1780 | ||
1799 | memblock_analyze(); | 1781 | memblock_allow_resize(); |
1800 | memblock_dump_all(); | 1782 | memblock_dump_all(); |
1801 | 1783 | ||
1802 | set_bit(0, mmu_context_bmap); | 1784 | set_bit(0, mmu_context_bmap); |
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index 94e9a511de84..f80f8ceabc67 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h | |||
@@ -74,16 +74,6 @@ enum { | |||
74 | */ | 74 | */ |
75 | void tile_irq_activate(unsigned int irq, int tile_irq_type); | 75 | void tile_irq_activate(unsigned int irq, int tile_irq_type); |
76 | 76 | ||
77 | /* | ||
78 | * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know | ||
79 | * how to use enable/disable_percpu_irq() to manage interrupts on each | ||
80 | * core. We can't use the generic enable/disable_irq() because they | ||
81 | * use a single reference count per irq, rather than per cpu per irq. | ||
82 | */ | ||
83 | void enable_percpu_irq(unsigned int irq); | ||
84 | void disable_percpu_irq(unsigned int irq); | ||
85 | |||
86 | |||
87 | void setup_irq_regs(void); | 77 | void setup_irq_regs(void); |
88 | 78 | ||
89 | #endif /* _ASM_TILE_IRQ_H */ | 79 | #endif /* _ASM_TILE_IRQ_H */ |
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index aa0134db2dd6..02e628065012 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c | |||
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) | |||
152 | * Remove an irq from the disabled mask. If we're in an interrupt | 152 | * Remove an irq from the disabled mask. If we're in an interrupt |
153 | * context, defer enabling the HW interrupt until we leave. | 153 | * context, defer enabling the HW interrupt until we leave. |
154 | */ | 154 | */ |
155 | void enable_percpu_irq(unsigned int irq) | 155 | static void tile_irq_chip_enable(struct irq_data *d) |
156 | { | 156 | { |
157 | get_cpu_var(irq_disable_mask) &= ~(1UL << irq); | 157 | get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); |
158 | if (__get_cpu_var(irq_depth) == 0) | 158 | if (__get_cpu_var(irq_depth) == 0) |
159 | unmask_irqs(1UL << irq); | 159 | unmask_irqs(1UL << d->irq); |
160 | put_cpu_var(irq_disable_mask); | 160 | put_cpu_var(irq_disable_mask); |
161 | } | 161 | } |
162 | EXPORT_SYMBOL(enable_percpu_irq); | ||
163 | 162 | ||
164 | /* | 163 | /* |
165 | * Add an irq to the disabled mask. We disable the HW interrupt | 164 | * Add an irq to the disabled mask. We disable the HW interrupt |
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq); | |||
167 | * in an interrupt context, the return path is careful to avoid | 166 | * in an interrupt context, the return path is careful to avoid |
168 | * unmasking a newly disabled interrupt. | 167 | * unmasking a newly disabled interrupt. |
169 | */ | 168 | */ |
170 | void disable_percpu_irq(unsigned int irq) | 169 | static void tile_irq_chip_disable(struct irq_data *d) |
171 | { | 170 | { |
172 | get_cpu_var(irq_disable_mask) |= (1UL << irq); | 171 | get_cpu_var(irq_disable_mask) |= (1UL << d->irq); |
173 | mask_irqs(1UL << irq); | 172 | mask_irqs(1UL << d->irq); |
174 | put_cpu_var(irq_disable_mask); | 173 | put_cpu_var(irq_disable_mask); |
175 | } | 174 | } |
176 | EXPORT_SYMBOL(disable_percpu_irq); | ||
177 | 175 | ||
178 | /* Mask an interrupt. */ | 176 | /* Mask an interrupt. */ |
179 | static void tile_irq_chip_mask(struct irq_data *d) | 177 | static void tile_irq_chip_mask(struct irq_data *d) |
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d) | |||
209 | 207 | ||
210 | static struct irq_chip tile_irq_chip = { | 208 | static struct irq_chip tile_irq_chip = { |
211 | .name = "tile_irq_chip", | 209 | .name = "tile_irq_chip", |
210 | .irq_enable = tile_irq_chip_enable, | ||
211 | .irq_disable = tile_irq_chip_disable, | ||
212 | .irq_ack = tile_irq_chip_ack, | 212 | .irq_ack = tile_irq_chip_ack, |
213 | .irq_eoi = tile_irq_chip_eoi, | 213 | .irq_eoi = tile_irq_chip_eoi, |
214 | .irq_mask = tile_irq_chip_mask, | 214 | .irq_mask = tile_irq_chip_mask, |
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 658f2ce426a4..b3ed19f8779c 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/dma-mapping.h> | 16 | #include <linux/dma-mapping.h> |
17 | #include <linux/vmalloc.h> | 17 | #include <linux/vmalloc.h> |
18 | #include <linux/export.h> | ||
18 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
19 | #include <asm/homecache.h> | 20 | #include <asm/homecache.h> |
20 | 21 | ||
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index 2a8014cb1ff5..9d610d3fb11e 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/export.h> | ||
27 | 28 | ||
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
29 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 9c45d8bbdf57..4c1ac6e5347a 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -85,7 +85,8 @@ void cpu_idle(void) | |||
85 | 85 | ||
86 | /* endless idle loop with no priority at all */ | 86 | /* endless idle loop with no priority at all */ |
87 | while (1) { | 87 | while (1) { |
88 | tick_nohz_stop_sched_tick(1); | 88 | tick_nohz_idle_enter(); |
89 | rcu_idle_enter(); | ||
89 | while (!need_resched()) { | 90 | while (!need_resched()) { |
90 | if (cpu_is_offline(cpu)) | 91 | if (cpu_is_offline(cpu)) |
91 | BUG(); /* no HOTPLUG_CPU */ | 92 | BUG(); /* no HOTPLUG_CPU */ |
@@ -105,7 +106,8 @@ void cpu_idle(void) | |||
105 | local_irq_enable(); | 106 | local_irq_enable(); |
106 | current_thread_info()->status |= TS_POLLING; | 107 | current_thread_info()->status |= TS_POLLING; |
107 | } | 108 | } |
108 | tick_nohz_restart_sched_tick(); | 109 | rcu_idle_exit(); |
110 | tick_nohz_idle_exit(); | ||
109 | preempt_enable_no_resched(); | 111 | preempt_enable_no_resched(); |
110 | schedule(); | 112 | schedule(); |
111 | preempt_disable(); | 113 | preempt_disable(); |
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c index b671a86f4515..602908268093 100644 --- a/arch/tile/kernel/sysfs.c +++ b/arch/tile/kernel/sysfs.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/cpu.h> | 18 | #include <linux/cpu.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
21 | #include <linux/stat.h> | ||
21 | #include <hv/hypervisor.h> | 22 | #include <hv/hypervisor.h> |
22 | 23 | ||
23 | /* Return a string queried from the hypervisor, truncated to page size. */ | 24 | /* Return a string queried from the hypervisor, truncated to page size. */ |
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index a87d2a859ba9..2a81d32de0da 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c | |||
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm); | |||
39 | EXPORT_SYMBOL(current_text_addr); | 39 | EXPORT_SYMBOL(current_text_addr); |
40 | EXPORT_SYMBOL(dump_stack); | 40 | EXPORT_SYMBOL(dump_stack); |
41 | 41 | ||
42 | /* arch/tile/kernel/head.S */ | ||
43 | EXPORT_SYMBOL(empty_zero_page); | ||
44 | |||
42 | /* arch/tile/lib/, various memcpy files */ | 45 | /* arch/tile/lib/, various memcpy files */ |
43 | EXPORT_SYMBOL(memcpy); | 46 | EXPORT_SYMBOL(memcpy); |
44 | EXPORT_SYMBOL(__copy_to_user_inatomic); | 47 | EXPORT_SYMBOL(__copy_to_user_inatomic); |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 25b7b90fd620..c1eaaa1fcc20 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -54,7 +54,7 @@ static noinline void force_sig_info_fault(const char *type, int si_signo, | |||
54 | if (unlikely(tsk->pid < 2)) { | 54 | if (unlikely(tsk->pid < 2)) { |
55 | panic("Signal %d (code %d) at %#lx sent to %s!", | 55 | panic("Signal %d (code %d) at %#lx sent to %s!", |
56 | si_signo, si_code & 0xffff, address, | 56 | si_signo, si_code & 0xffff, address, |
57 | tsk->pid ? "init" : "the idle task"); | 57 | is_idle_task(tsk) ? "the idle task" : "init"); |
58 | } | 58 | } |
59 | 59 | ||
60 | info.si_signo = si_signo; | 60 | info.si_signo = si_signo; |
@@ -515,7 +515,7 @@ no_context: | |||
515 | 515 | ||
516 | if (unlikely(tsk->pid < 2)) { | 516 | if (unlikely(tsk->pid < 2)) { |
517 | panic("Kernel page fault running %s!", | 517 | panic("Kernel page fault running %s!", |
518 | tsk->pid ? "init" : "the idle task"); | 518 | is_idle_task(tsk) ? "the idle task" : "init"); |
519 | } | 519 | } |
520 | 520 | ||
521 | /* | 521 | /* |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index cbe6f4f9eca3..1cc6ae477c98 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order) | |||
449 | VM_BUG_ON(!virt_addr_valid((void *)addr)); | 449 | VM_BUG_ON(!virt_addr_valid((void *)addr)); |
450 | page = virt_to_page((void *)addr); | 450 | page = virt_to_page((void *)addr); |
451 | if (put_page_testzero(page)) { | 451 | if (put_page_testzero(page)) { |
452 | int pages = (1 << order); | ||
453 | homecache_change_page_home(page, order, initial_page_home()); | 452 | homecache_change_page_home(page, order, initial_page_home()); |
454 | while (pages--) | 453 | if (order == 0) { |
455 | __free_page(page++); | 454 | free_hot_cold_page(page, 0); |
455 | } else { | ||
456 | init_page_count(page); | ||
457 | __free_pages(page, order); | ||
458 | } | ||
456 | } | 459 | } |
457 | } | 460 | } |
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index c5338351aecd..69f24905abdc 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -246,10 +246,12 @@ void default_idle(void) | |||
246 | if (need_resched()) | 246 | if (need_resched()) |
247 | schedule(); | 247 | schedule(); |
248 | 248 | ||
249 | tick_nohz_stop_sched_tick(1); | 249 | tick_nohz_idle_enter(); |
250 | rcu_idle_enter(); | ||
250 | nsecs = disable_timer(); | 251 | nsecs = disable_timer(); |
251 | idle_sleep(nsecs); | 252 | idle_sleep(nsecs); |
252 | tick_nohz_restart_sched_tick(); | 253 | rcu_idle_exit(); |
254 | tick_nohz_idle_exit(); | ||
253 | } | 255 | } |
254 | } | 256 | } |
255 | 257 | ||
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index a08d9fab81f2..82a6e22f1f35 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c | |||
@@ -75,8 +75,6 @@ static struct clocksource itimer_clocksource = { | |||
75 | .rating = 300, | 75 | .rating = 300, |
76 | .read = itimer_read, | 76 | .read = itimer_read, |
77 | .mask = CLOCKSOURCE_MASK(64), | 77 | .mask = CLOCKSOURCE_MASK(64), |
78 | .mult = 1000, | ||
79 | .shift = 0, | ||
80 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 78 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
81 | }; | 79 | }; |
82 | 80 | ||
@@ -94,9 +92,9 @@ static void __init setup_itimer(void) | |||
94 | clockevent_delta2ns(60 * HZ, &itimer_clockevent); | 92 | clockevent_delta2ns(60 * HZ, &itimer_clockevent); |
95 | itimer_clockevent.min_delta_ns = | 93 | itimer_clockevent.min_delta_ns = |
96 | clockevent_delta2ns(1, &itimer_clockevent); | 94 | clockevent_delta2ns(1, &itimer_clockevent); |
97 | err = clocksource_register(&itimer_clocksource); | 95 | err = clocksource_register_hz(&itimer_clocksource, USEC_PER_SEC); |
98 | if (err) { | 96 | if (err) { |
99 | printk(KERN_ERR "clocksource_register returned %d\n", err); | 97 | printk(KERN_ERR "clocksource_register_hz returned %d\n", err); |
100 | return; | 98 | return; |
101 | } | 99 | } |
102 | clockevents_register_device(&itimer_clockevent); | 100 | clockevents_register_device(&itimer_clockevent); |
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index ba401df971ed..52edc2b62873 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c | |||
@@ -55,7 +55,8 @@ void cpu_idle(void) | |||
55 | { | 55 | { |
56 | /* endless idle loop with no priority at all */ | 56 | /* endless idle loop with no priority at all */ |
57 | while (1) { | 57 | while (1) { |
58 | tick_nohz_stop_sched_tick(1); | 58 | tick_nohz_idle_enter(); |
59 | rcu_idle_enter(); | ||
59 | while (!need_resched()) { | 60 | while (!need_resched()) { |
60 | local_irq_disable(); | 61 | local_irq_disable(); |
61 | stop_critical_timings(); | 62 | stop_critical_timings(); |
@@ -63,7 +64,8 @@ void cpu_idle(void) | |||
63 | local_irq_enable(); | 64 | local_irq_enable(); |
64 | start_critical_timings(); | 65 | start_critical_timings(); |
65 | } | 66 | } |
66 | tick_nohz_restart_sched_tick(); | 67 | rcu_idle_exit(); |
68 | tick_nohz_idle_exit(); | ||
67 | preempt_enable_no_resched(); | 69 | preempt_enable_no_resched(); |
68 | schedule(); | 70 | schedule(); |
69 | preempt_disable(); | 71 | preempt_disable(); |
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c index 471b6bca8da4..673d7a89d8ff 100644 --- a/arch/unicore32/kernel/setup.c +++ b/arch/unicore32/kernel/setup.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
38 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
39 | #include <asm/traps.h> | 39 | #include <asm/traps.h> |
40 | #include <asm/memblock.h> | ||
40 | 41 | ||
41 | #include "setup.h" | 42 | #include "setup.h" |
42 | 43 | ||
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index 3b379cddbc64..de186bde8975 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
27 | #include <asm/sizes.h> | 27 | #include <asm/sizes.h> |
28 | #include <asm/tlb.h> | 28 | #include <asm/tlb.h> |
29 | #include <asm/memblock.h> | ||
29 | #include <mach/map.h> | 30 | #include <mach/map.h> |
30 | 31 | ||
31 | #include "mm.h" | 32 | #include "mm.h" |
@@ -245,7 +246,6 @@ void __init uc32_memblock_init(struct meminfo *mi) | |||
245 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), | 246 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), |
246 | meminfo_cmp, NULL); | 247 | meminfo_cmp, NULL); |
247 | 248 | ||
248 | memblock_init(); | ||
249 | for (i = 0; i < mi->nr_banks; i++) | 249 | for (i = 0; i < mi->nr_banks; i++) |
250 | memblock_add(mi->bank[i].start, mi->bank[i].size); | 250 | memblock_add(mi->bank[i].start, mi->bank[i].size); |
251 | 251 | ||
@@ -264,7 +264,7 @@ void __init uc32_memblock_init(struct meminfo *mi) | |||
264 | 264 | ||
265 | uc32_mm_memblock_reserve(); | 265 | uc32_mm_memblock_reserve(); |
266 | 266 | ||
267 | memblock_analyze(); | 267 | memblock_allow_resize(); |
268 | memblock_dump_all(); | 268 | memblock_dump_all(); |
269 | } | 269 | } |
270 | 270 | ||
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c index 3e5c3e5a0b45..43c20b40e444 100644 --- a/arch/unicore32/mm/mmu.c +++ b/arch/unicore32/mm/mmu.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
26 | #include <asm/sizes.h> | 26 | #include <asm/sizes.h> |
27 | #include <asm/tlb.h> | 27 | #include <asm/tlb.h> |
28 | #include <asm/memblock.h> | ||
28 | 29 | ||
29 | #include <mach/map.h> | 30 | #include <mach/map.h> |
30 | 31 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7b9eaa1ae10b..5731eb70e0a0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -26,6 +26,8 @@ config X86 | |||
26 | select HAVE_IOREMAP_PROT | 26 | select HAVE_IOREMAP_PROT |
27 | select HAVE_KPROBES | 27 | select HAVE_KPROBES |
28 | select HAVE_MEMBLOCK | 28 | select HAVE_MEMBLOCK |
29 | select HAVE_MEMBLOCK_NODE_MAP | ||
30 | select ARCH_DISCARD_MEMBLOCK | ||
29 | select ARCH_WANT_OPTIONAL_GPIOLIB | 31 | select ARCH_WANT_OPTIONAL_GPIOLIB |
30 | select ARCH_WANT_FRAME_POINTERS | 32 | select ARCH_WANT_FRAME_POINTERS |
31 | select HAVE_DMA_ATTRS | 33 | select HAVE_DMA_ATTRS |
@@ -204,9 +206,6 @@ config ZONE_DMA32 | |||
204 | bool | 206 | bool |
205 | default X86_64 | 207 | default X86_64 |
206 | 208 | ||
207 | config ARCH_POPULATES_NODE_MAP | ||
208 | def_bool y | ||
209 | |||
210 | config AUDIT_ARCH | 209 | config AUDIT_ARCH |
211 | bool | 210 | bool |
212 | default X86_64 | 211 | default X86_64 |
@@ -403,7 +402,7 @@ config X86_INTEL_CE | |||
403 | This option compiles in support for the CE4100 SOC for settop | 402 | This option compiles in support for the CE4100 SOC for settop |
404 | boxes and media devices. | 403 | boxes and media devices. |
405 | 404 | ||
406 | config X86_INTEL_MID | 405 | config X86_WANT_INTEL_MID |
407 | bool "Intel MID platform support" | 406 | bool "Intel MID platform support" |
408 | depends on X86_32 | 407 | depends on X86_32 |
409 | depends on X86_EXTENDED_PLATFORM | 408 | depends on X86_EXTENDED_PLATFORM |
@@ -412,7 +411,10 @@ config X86_INTEL_MID | |||
412 | systems which do not have the PCI legacy interfaces (Moorestown, | 411 | systems which do not have the PCI legacy interfaces (Moorestown, |
413 | Medfield). If you are building for a PC class system say N here. | 412 | Medfield). If you are building for a PC class system say N here. |
414 | 413 | ||
415 | if X86_INTEL_MID | 414 | if X86_WANT_INTEL_MID |
415 | |||
416 | config X86_INTEL_MID | ||
417 | bool | ||
416 | 418 | ||
417 | config X86_MRST | 419 | config X86_MRST |
418 | bool "Moorestown MID platform" | 420 | bool "Moorestown MID platform" |
@@ -424,6 +426,7 @@ config X86_MRST | |||
424 | select SPI | 426 | select SPI |
425 | select INTEL_SCU_IPC | 427 | select INTEL_SCU_IPC |
426 | select X86_PLATFORM_DEVICES | 428 | select X86_PLATFORM_DEVICES |
429 | select X86_INTEL_MID | ||
427 | ---help--- | 430 | ---help--- |
428 | Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin | 431 | Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin |
429 | Internet Device(MID) platform. Moorestown consists of two chips: | 432 | Internet Device(MID) platform. Moorestown consists of two chips: |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 908b96957d88..37782566af24 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -117,7 +117,7 @@ static inline void early_memtest(unsigned long start, unsigned long end) | |||
117 | 117 | ||
118 | extern unsigned long e820_end_of_ram_pfn(void); | 118 | extern unsigned long e820_end_of_ram_pfn(void); |
119 | extern unsigned long e820_end_of_low_ram_pfn(void); | 119 | extern unsigned long e820_end_of_low_ram_pfn(void); |
120 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); | 120 | extern u64 early_reserve_e820(u64 sizet, u64 align); |
121 | 121 | ||
122 | void memblock_x86_fill(void); | 122 | void memblock_x86_fill(void); |
123 | void memblock_find_dma_reserve(void); | 123 | void memblock_find_dma_reserve(void); |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index c9e09ea05644..6919e936345b 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu) | |||
218 | #ifdef CONFIG_SMP | 218 | #ifdef CONFIG_SMP |
219 | #define safe_address (__per_cpu_offset[0]) | 219 | #define safe_address (__per_cpu_offset[0]) |
220 | #else | 220 | #else |
221 | #define safe_address (kstat_cpu(0).cpustat.user) | 221 | #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) |
222 | #endif | 222 | #endif |
223 | 223 | ||
224 | /* | 224 | /* |
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 88c765e16410..74df3f1eddfd 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h | |||
@@ -137,6 +137,13 @@ static inline int insn_is_avx(struct insn *insn) | |||
137 | return (insn->vex_prefix.value != 0); | 137 | return (insn->vex_prefix.value != 0); |
138 | } | 138 | } |
139 | 139 | ||
140 | /* Ensure this instruction is decoded completely */ | ||
141 | static inline int insn_complete(struct insn *insn) | ||
142 | { | ||
143 | return insn->opcode.got && insn->modrm.got && insn->sib.got && | ||
144 | insn->displacement.got && insn->immediate.got; | ||
145 | } | ||
146 | |||
140 | static inline insn_byte_t insn_vex_m_bits(struct insn *insn) | 147 | static inline insn_byte_t insn_vex_m_bits(struct insn *insn) |
141 | { | 148 | { |
142 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ | 149 | if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ |
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h index 4420993acc47..925b605eb5c6 100644 --- a/arch/x86/include/asm/intel_scu_ipc.h +++ b/arch/x86/include/asm/intel_scu_ipc.h | |||
@@ -3,11 +3,15 @@ | |||
3 | 3 | ||
4 | #include <linux/notifier.h> | 4 | #include <linux/notifier.h> |
5 | 5 | ||
6 | #define IPCMSG_VRTC 0xFA /* Set vRTC device */ | 6 | #define IPCMSG_WARM_RESET 0xF0 |
7 | 7 | #define IPCMSG_COLD_RESET 0xF1 | |
8 | /* Command id associated with message IPCMSG_VRTC */ | 8 | #define IPCMSG_SOFT_RESET 0xF2 |
9 | #define IPC_CMD_VRTC_SETTIME 1 /* Set time */ | 9 | #define IPCMSG_COLD_BOOT 0xF3 |
10 | #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */ | 10 | |
11 | #define IPCMSG_VRTC 0xFA /* Set vRTC device */ | ||
12 | /* Command id associated with message IPCMSG_VRTC */ | ||
13 | #define IPC_CMD_VRTC_SETTIME 1 /* Set time */ | ||
14 | #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */ | ||
11 | 15 | ||
12 | /* Read single register */ | 16 | /* Read single register */ |
13 | int intel_scu_ipc_ioread8(u16 addr, u8 *data); | 17 | int intel_scu_ipc_ioread8(u16 addr, u8 *data); |
diff --git a/arch/x86/include/asm/mach_timer.h b/arch/x86/include/asm/mach_timer.h index 853728519ae9..88d0c3c74c13 100644 --- a/arch/x86/include/asm/mach_timer.h +++ b/arch/x86/include/asm/mach_timer.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ | 16 | #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ |
17 | #define CALIBRATE_LATCH \ | 17 | #define CALIBRATE_LATCH \ |
18 | ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) | 18 | ((PIT_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) |
19 | 19 | ||
20 | static inline void mach_prepare_counter(void) | 20 | static inline void mach_prepare_counter(void) |
21 | { | 21 | { |
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h deleted file mode 100644 index 0cd3800f33b9..000000000000 --- a/arch/x86/include/asm/memblock.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | #ifndef _X86_MEMBLOCK_H | ||
2 | #define _X86_MEMBLOCK_H | ||
3 | |||
4 | #define ARCH_DISCARD_MEMBLOCK | ||
5 | |||
6 | u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); | ||
7 | |||
8 | void memblock_x86_reserve_range(u64 start, u64 end, char *name); | ||
9 | void memblock_x86_free_range(u64 start, u64 end); | ||
10 | struct range; | ||
11 | int __get_free_all_memory_range(struct range **range, int nodeid, | ||
12 | unsigned long start_pfn, unsigned long end_pfn); | ||
13 | int get_free_all_memory_range(struct range **rangep, int nodeid); | ||
14 | |||
15 | void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, | ||
16 | unsigned long last_pfn); | ||
17 | u64 memblock_x86_hole_size(u64 start, u64 end); | ||
18 | u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); | ||
19 | u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); | ||
20 | u64 memblock_x86_memory_in_range(u64 addr, u64 limit); | ||
21 | bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align); | ||
22 | |||
23 | #endif | ||
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index e6283129c821..93f79094c224 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h | |||
@@ -31,11 +31,20 @@ enum mrst_cpu_type { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | extern enum mrst_cpu_type __mrst_cpu_chip; | 33 | extern enum mrst_cpu_type __mrst_cpu_chip; |
34 | |||
35 | #ifdef CONFIG_X86_INTEL_MID | ||
36 | |||
34 | static inline enum mrst_cpu_type mrst_identify_cpu(void) | 37 | static inline enum mrst_cpu_type mrst_identify_cpu(void) |
35 | { | 38 | { |
36 | return __mrst_cpu_chip; | 39 | return __mrst_cpu_chip; |
37 | } | 40 | } |
38 | 41 | ||
42 | #else /* !CONFIG_X86_INTEL_MID */ | ||
43 | |||
44 | #define mrst_identify_cpu() (0) | ||
45 | |||
46 | #endif /* !CONFIG_X86_INTEL_MID */ | ||
47 | |||
39 | enum mrst_timer_options { | 48 | enum mrst_timer_options { |
40 | MRST_TIMER_DEFAULT, | 49 | MRST_TIMER_DEFAULT, |
41 | MRST_TIMER_APBT_ONLY, | 50 | MRST_TIMER_APBT_ONLY, |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 084ef95274cd..95203d40ffdd 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | |||
169 | return native_write_msr_safe(msr, low, high); | 169 | return native_write_msr_safe(msr, low, high); |
170 | } | 170 | } |
171 | 171 | ||
172 | /* rdmsr with exception handling */ | 172 | /* |
173 | * rdmsr with exception handling. | ||
174 | * | ||
175 | * Please note that the exception handling works only after we've | ||
176 | * switched to the "smart" #GP handler in trap_init() which knows about | ||
177 | * exception tables - using this macro earlier than that causes machine | ||
178 | * hangs on boxes which do not implement the @msr in the first argument. | ||
179 | */ | ||
173 | #define rdmsr_safe(msr, p1, p2) \ | 180 | #define rdmsr_safe(msr, p1, p2) \ |
174 | ({ \ | 181 | ({ \ |
175 | int __err; \ | 182 | int __err; \ |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index f61c62f7d5d8..096c975e099f 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -57,6 +57,7 @@ | |||
57 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | 57 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
58 | 58 | ||
59 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 | 59 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 |
60 | #define ARCH_PERFMON_EVENTS_COUNT 7 | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * Intel "Architectural Performance Monitoring" CPUID | 63 | * Intel "Architectural Performance Monitoring" CPUID |
@@ -72,6 +73,19 @@ union cpuid10_eax { | |||
72 | unsigned int full; | 73 | unsigned int full; |
73 | }; | 74 | }; |
74 | 75 | ||
76 | union cpuid10_ebx { | ||
77 | struct { | ||
78 | unsigned int no_unhalted_core_cycles:1; | ||
79 | unsigned int no_instructions_retired:1; | ||
80 | unsigned int no_unhalted_reference_cycles:1; | ||
81 | unsigned int no_llc_reference:1; | ||
82 | unsigned int no_llc_misses:1; | ||
83 | unsigned int no_branch_instruction_retired:1; | ||
84 | unsigned int no_branch_misses_retired:1; | ||
85 | } split; | ||
86 | unsigned int full; | ||
87 | }; | ||
88 | |||
75 | union cpuid10_edx { | 89 | union cpuid10_edx { |
76 | struct { | 90 | struct { |
77 | unsigned int num_counters_fixed:5; | 91 | unsigned int num_counters_fixed:5; |
@@ -81,6 +95,15 @@ union cpuid10_edx { | |||
81 | unsigned int full; | 95 | unsigned int full; |
82 | }; | 96 | }; |
83 | 97 | ||
98 | struct x86_pmu_capability { | ||
99 | int version; | ||
100 | int num_counters_gp; | ||
101 | int num_counters_fixed; | ||
102 | int bit_width_gp; | ||
103 | int bit_width_fixed; | ||
104 | unsigned int events_mask; | ||
105 | int events_mask_len; | ||
106 | }; | ||
84 | 107 | ||
85 | /* | 108 | /* |
86 | * Fixed-purpose performance events: | 109 | * Fixed-purpose performance events: |
@@ -89,23 +112,24 @@ union cpuid10_edx { | |||
89 | /* | 112 | /* |
90 | * All 3 fixed-mode PMCs are configured via this single MSR: | 113 | * All 3 fixed-mode PMCs are configured via this single MSR: |
91 | */ | 114 | */ |
92 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d | 115 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d |
93 | 116 | ||
94 | /* | 117 | /* |
95 | * The counts are available in three separate MSRs: | 118 | * The counts are available in three separate MSRs: |
96 | */ | 119 | */ |
97 | 120 | ||
98 | /* Instr_Retired.Any: */ | 121 | /* Instr_Retired.Any: */ |
99 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 | 122 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 |
100 | #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) | 123 | #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) |
101 | 124 | ||
102 | /* CPU_CLK_Unhalted.Core: */ | 125 | /* CPU_CLK_Unhalted.Core: */ |
103 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a | 126 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a |
104 | #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) | 127 | #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) |
105 | 128 | ||
106 | /* CPU_CLK_Unhalted.Ref: */ | 129 | /* CPU_CLK_Unhalted.Ref: */ |
107 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 130 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
108 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | 131 | #define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) |
132 | #define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) | ||
109 | 133 | ||
110 | /* | 134 | /* |
111 | * We model BTS tracing as another fixed-mode PMC. | 135 | * We model BTS tracing as another fixed-mode PMC. |
@@ -202,6 +226,7 @@ struct perf_guest_switch_msr { | |||
202 | }; | 226 | }; |
203 | 227 | ||
204 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); | 228 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); |
229 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); | ||
205 | #else | 230 | #else |
206 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | 231 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) |
207 | { | 232 | { |
@@ -209,6 +234,11 @@ static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | |||
209 | return NULL; | 234 | return NULL; |
210 | } | 235 | } |
211 | 236 | ||
237 | static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) | ||
238 | { | ||
239 | memset(cap, 0, sizeof(*cap)); | ||
240 | } | ||
241 | |||
212 | static inline void perf_events_lapic_init(void) { } | 242 | static inline void perf_events_lapic_init(void) { } |
213 | #endif | 243 | #endif |
214 | 244 | ||
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index fa7b9176b76c..431793e5d484 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h | |||
@@ -32,6 +32,22 @@ extern int no_timer_check; | |||
32 | * (mathieu.desnoyers@polymtl.ca) | 32 | * (mathieu.desnoyers@polymtl.ca) |
33 | * | 33 | * |
34 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 34 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
35 | * | ||
36 | * In: | ||
37 | * | ||
38 | * ns = cycles * cyc2ns_scale / SC | ||
39 | * | ||
40 | * Although we may still have enough bits to store the value of ns, | ||
41 | * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, | ||
42 | * leading to an incorrect result. | ||
43 | * | ||
44 | * To avoid this, we can decompose 'cycles' into quotient and remainder | ||
45 | * of division by SC. Then, | ||
46 | * | ||
47 | * ns = (quot * SC + rem) * cyc2ns_scale / SC | ||
48 | * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC | ||
49 | * | ||
50 | * - sqazi@google.com | ||
35 | */ | 51 | */ |
36 | 52 | ||
37 | DECLARE_PER_CPU(unsigned long, cyc2ns); | 53 | DECLARE_PER_CPU(unsigned long, cyc2ns); |
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); | |||
41 | 57 | ||
42 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) | 58 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) |
43 | { | 59 | { |
60 | unsigned long long quot; | ||
61 | unsigned long long rem; | ||
44 | int cpu = smp_processor_id(); | 62 | int cpu = smp_processor_id(); |
45 | unsigned long long ns = per_cpu(cyc2ns_offset, cpu); | 63 | unsigned long long ns = per_cpu(cyc2ns_offset, cpu); |
46 | ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR; | 64 | quot = (cyc >> CYC2NS_SCALE_FACTOR); |
65 | rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1); | ||
66 | ns += quot * per_cpu(cyc2ns, cpu) + | ||
67 | ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR); | ||
47 | return ns; | 68 | return ns; |
48 | } | 69 | } |
49 | 70 | ||
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 83e2efd181e2..15d99153a96d 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -51,6 +51,8 @@ extern int unsynchronized_tsc(void); | |||
51 | extern int check_tsc_unstable(void); | 51 | extern int check_tsc_unstable(void); |
52 | extern unsigned long native_calibrate_tsc(void); | 52 | extern unsigned long native_calibrate_tsc(void); |
53 | 53 | ||
54 | extern int tsc_clocksource_reliable; | ||
55 | |||
54 | /* | 56 | /* |
55 | * Boot-time check whether the TSCs are synchronized across | 57 | * Boot-time check whether the TSCs are synchronized across |
56 | * all CPUs/cores: | 58 | * all CPUs/cores: |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 10474fb1185d..cf1d73643f60 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -57,6 +57,7 @@ | |||
57 | 57 | ||
58 | #define UV1_HUB_PART_NUMBER 0x88a5 | 58 | #define UV1_HUB_PART_NUMBER 0x88a5 |
59 | #define UV2_HUB_PART_NUMBER 0x8eb8 | 59 | #define UV2_HUB_PART_NUMBER 0x8eb8 |
60 | #define UV2_HUB_PART_NUMBER_X 0x1111 | ||
60 | 61 | ||
61 | /* Compat: if this #define is present, UV headers support UV2 */ | 62 | /* Compat: if this #define is present, UV headers support UV2 */ |
62 | #define UV2_HUB_IS_SUPPORTED 1 | 63 | #define UV2_HUB_IS_SUPPORTED 1 |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 3d2661ca6542..6e76c191a835 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -88,13 +88,13 @@ static u32 __init allocate_aperture(void) | |||
88 | */ | 88 | */ |
89 | addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, | 89 | addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, |
90 | aper_size, aper_size); | 90 | aper_size, aper_size); |
91 | if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) { | 91 | if (!addr || addr + aper_size > GART_MAX_ADDR) { |
92 | printk(KERN_ERR | 92 | printk(KERN_ERR |
93 | "Cannot allocate aperture memory hole (%lx,%uK)\n", | 93 | "Cannot allocate aperture memory hole (%lx,%uK)\n", |
94 | addr, aper_size>>10); | 94 | addr, aper_size>>10); |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); | 97 | memblock_reserve(addr, aper_size); |
98 | /* | 98 | /* |
99 | * Kmemleak should not scan this block as it may not be mapped via the | 99 | * Kmemleak should not scan this block as it may not be mapped via the |
100 | * kernel direct mapping. | 100 | * kernel direct mapping. |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ff69d5d79ca7..2eec05b6d1b8 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -887,8 +887,8 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) | |||
887 | * Besides, if we don't timer interrupts ignore the global | 887 | * Besides, if we don't timer interrupts ignore the global |
888 | * interrupt lock, which is the WrongThing (tm) to do. | 888 | * interrupt lock, which is the WrongThing (tm) to do. |
889 | */ | 889 | */ |
890 | exit_idle(); | ||
891 | irq_enter(); | 890 | irq_enter(); |
891 | exit_idle(); | ||
892 | local_apic_timer_interrupt(); | 892 | local_apic_timer_interrupt(); |
893 | irq_exit(); | 893 | irq_exit(); |
894 | 894 | ||
@@ -1864,8 +1864,8 @@ void smp_spurious_interrupt(struct pt_regs *regs) | |||
1864 | { | 1864 | { |
1865 | u32 v; | 1865 | u32 v; |
1866 | 1866 | ||
1867 | exit_idle(); | ||
1868 | irq_enter(); | 1867 | irq_enter(); |
1868 | exit_idle(); | ||
1869 | /* | 1869 | /* |
1870 | * Check if this really is a spurious interrupt and ACK it | 1870 | * Check if this really is a spurious interrupt and ACK it |
1871 | * if it is a vectored one. Just in case... | 1871 | * if it is a vectored one. Just in case... |
@@ -1901,8 +1901,8 @@ void smp_error_interrupt(struct pt_regs *regs) | |||
1901 | "Illegal register address", /* APIC Error Bit 7 */ | 1901 | "Illegal register address", /* APIC Error Bit 7 */ |
1902 | }; | 1902 | }; |
1903 | 1903 | ||
1904 | exit_idle(); | ||
1905 | irq_enter(); | 1904 | irq_enter(); |
1905 | exit_idle(); | ||
1906 | /* First tickle the hardware, only then report what went on. -- REW */ | 1906 | /* First tickle the hardware, only then report what went on. -- REW */ |
1907 | v0 = apic_read(APIC_ESR); | 1907 | v0 = apic_read(APIC_ESR); |
1908 | apic_write(APIC_ESR, 0); | 1908 | apic_write(APIC_ESR, 0); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 45b461fdb344..fb072754bc1d 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2421,8 +2421,8 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2421 | unsigned vector, me; | 2421 | unsigned vector, me; |
2422 | 2422 | ||
2423 | ack_APIC_irq(); | 2423 | ack_APIC_irq(); |
2424 | exit_idle(); | ||
2425 | irq_enter(); | 2424 | irq_enter(); |
2425 | exit_idle(); | ||
2426 | 2426 | ||
2427 | me = smp_processor_id(); | 2427 | me = smp_processor_id(); |
2428 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 2428 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 62ae3001ae02..9d59bbacd4e3 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void) | |||
93 | 93 | ||
94 | if (node_id.s.part_number == UV2_HUB_PART_NUMBER) | 94 | if (node_id.s.part_number == UV2_HUB_PART_NUMBER) |
95 | uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; | 95 | uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; |
96 | if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X) | ||
97 | uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; | ||
96 | 98 | ||
97 | uv_hub_info->hub_revision = uv_min_hub_revision_id; | 99 | uv_hub_info->hub_revision = uv_min_hub_revision_id; |
98 | pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); | 100 | pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); |
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 452932d34730..5da1269e8ddc 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c | |||
@@ -62,7 +62,8 @@ early_param("memory_corruption_check_size", set_corruption_check_size); | |||
62 | 62 | ||
63 | void __init setup_bios_corruption_check(void) | 63 | void __init setup_bios_corruption_check(void) |
64 | { | 64 | { |
65 | u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */ | 65 | phys_addr_t start, end; |
66 | u64 i; | ||
66 | 67 | ||
67 | if (memory_corruption_check == -1) { | 68 | if (memory_corruption_check == -1) { |
68 | memory_corruption_check = | 69 | memory_corruption_check = |
@@ -82,28 +83,23 @@ void __init setup_bios_corruption_check(void) | |||
82 | 83 | ||
83 | corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); | 84 | corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); |
84 | 85 | ||
85 | while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { | 86 | for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { |
86 | u64 size; | 87 | start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE), |
87 | addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE); | 88 | PAGE_SIZE, corruption_check_size); |
89 | end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE), | ||
90 | PAGE_SIZE, corruption_check_size); | ||
91 | if (start >= end) | ||
92 | continue; | ||
88 | 93 | ||
89 | if (addr == MEMBLOCK_ERROR) | 94 | memblock_reserve(start, end - start); |
90 | break; | 95 | scan_areas[num_scan_areas].addr = start; |
91 | 96 | scan_areas[num_scan_areas].size = end - start; | |
92 | if (addr >= corruption_check_size) | ||
93 | break; | ||
94 | |||
95 | if ((addr + size) > corruption_check_size) | ||
96 | size = corruption_check_size - addr; | ||
97 | |||
98 | memblock_x86_reserve_range(addr, addr + size, "SCAN RAM"); | ||
99 | scan_areas[num_scan_areas].addr = addr; | ||
100 | scan_areas[num_scan_areas].size = size; | ||
101 | num_scan_areas++; | ||
102 | 97 | ||
103 | /* Assume we've already mapped this early memory */ | 98 | /* Assume we've already mapped this early memory */ |
104 | memset(__va(addr), 0, size); | 99 | memset(__va(start), 0, end - start); |
105 | 100 | ||
106 | addr += size; | 101 | if (++num_scan_areas >= MAX_SCAN_AREAS) |
102 | break; | ||
107 | } | 103 | } |
108 | 104 | ||
109 | if (num_scan_areas) | 105 | if (num_scan_areas) |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 787e06c84ea6..ce215616d5b9 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -397,8 +397,8 @@ static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; | |||
397 | 397 | ||
398 | asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) | 398 | asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) |
399 | { | 399 | { |
400 | exit_idle(); | ||
401 | irq_enter(); | 400 | irq_enter(); |
401 | exit_idle(); | ||
402 | inc_irq_stat(irq_thermal_count); | 402 | inc_irq_stat(irq_thermal_count); |
403 | smp_thermal_vector(); | 403 | smp_thermal_vector(); |
404 | irq_exit(); | 404 | irq_exit(); |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index d746df2909c9..aa578cadb940 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
@@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = default_threshold_interrupt; | |||
19 | 19 | ||
20 | asmlinkage void smp_threshold_interrupt(void) | 20 | asmlinkage void smp_threshold_interrupt(void) |
21 | { | 21 | { |
22 | exit_idle(); | ||
23 | irq_enter(); | 22 | irq_enter(); |
23 | exit_idle(); | ||
24 | inc_irq_stat(irq_threshold_count); | 24 | inc_irq_stat(irq_threshold_count); |
25 | mce_threshold_vector(); | 25 | mce_threshold_vector(); |
26 | irq_exit(); | 26 | irq_exit(); |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index a71efcdbb092..97b26356e9ee 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
547 | 547 | ||
548 | if (tmp != mask_lo) { | 548 | if (tmp != mask_lo) { |
549 | printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); | 549 | printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); |
550 | add_taint(TAINT_FIRMWARE_WORKAROUND); | ||
550 | mask_lo = tmp; | 551 | mask_lo = tmp; |
551 | } | 552 | } |
552 | } | 553 | } |
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) | |||
693 | 694 | ||
694 | /* Disable MTRRs, and set the default type to uncached */ | 695 | /* Disable MTRRs, and set the default type to uncached */ |
695 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); | 696 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
697 | wbinvd(); | ||
696 | } | 698 | } |
697 | 699 | ||
698 | static void post_set(void) __releases(set_atomicity_lock) | 700 | static void post_set(void) __releases(set_atomicity_lock) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 640891014b2a..5adce1040b11 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event) | |||
312 | return -EOPNOTSUPP; | 312 | return -EOPNOTSUPP; |
313 | } | 313 | } |
314 | 314 | ||
315 | /* | ||
316 | * Do not allow config1 (extended registers) to propagate, | ||
317 | * there's no sane user-space generalization yet: | ||
318 | */ | ||
319 | if (attr->type == PERF_TYPE_RAW) | 315 | if (attr->type == PERF_TYPE_RAW) |
320 | return 0; | 316 | return x86_pmu_extra_regs(event->attr.config, event); |
321 | 317 | ||
322 | if (attr->type == PERF_TYPE_HW_CACHE) | 318 | if (attr->type == PERF_TYPE_HW_CACHE) |
323 | return set_ext_hw_attr(hwc, event); | 319 | return set_ext_hw_attr(hwc, event); |
@@ -488,18 +484,195 @@ static inline int is_x86_event(struct perf_event *event) | |||
488 | return event->pmu == &pmu; | 484 | return event->pmu == &pmu; |
489 | } | 485 | } |
490 | 486 | ||
487 | /* | ||
488 | * Event scheduler state: | ||
489 | * | ||
490 | * Assign events iterating over all events and counters, beginning | ||
491 | * with events with least weights first. Keep the current iterator | ||
492 | * state in struct sched_state. | ||
493 | */ | ||
494 | struct sched_state { | ||
495 | int weight; | ||
496 | int event; /* event index */ | ||
497 | int counter; /* counter index */ | ||
498 | int unassigned; /* number of events to be assigned left */ | ||
499 | unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
500 | }; | ||
501 | |||
502 | /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */ | ||
503 | #define SCHED_STATES_MAX 2 | ||
504 | |||
505 | struct perf_sched { | ||
506 | int max_weight; | ||
507 | int max_events; | ||
508 | struct event_constraint **constraints; | ||
509 | struct sched_state state; | ||
510 | int saved_states; | ||
511 | struct sched_state saved[SCHED_STATES_MAX]; | ||
512 | }; | ||
513 | |||
514 | /* | ||
515 | * Initialize interator that runs through all events and counters. | ||
516 | */ | ||
517 | static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c, | ||
518 | int num, int wmin, int wmax) | ||
519 | { | ||
520 | int idx; | ||
521 | |||
522 | memset(sched, 0, sizeof(*sched)); | ||
523 | sched->max_events = num; | ||
524 | sched->max_weight = wmax; | ||
525 | sched->constraints = c; | ||
526 | |||
527 | for (idx = 0; idx < num; idx++) { | ||
528 | if (c[idx]->weight == wmin) | ||
529 | break; | ||
530 | } | ||
531 | |||
532 | sched->state.event = idx; /* start with min weight */ | ||
533 | sched->state.weight = wmin; | ||
534 | sched->state.unassigned = num; | ||
535 | } | ||
536 | |||
537 | static void perf_sched_save_state(struct perf_sched *sched) | ||
538 | { | ||
539 | if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) | ||
540 | return; | ||
541 | |||
542 | sched->saved[sched->saved_states] = sched->state; | ||
543 | sched->saved_states++; | ||
544 | } | ||
545 | |||
546 | static bool perf_sched_restore_state(struct perf_sched *sched) | ||
547 | { | ||
548 | if (!sched->saved_states) | ||
549 | return false; | ||
550 | |||
551 | sched->saved_states--; | ||
552 | sched->state = sched->saved[sched->saved_states]; | ||
553 | |||
554 | /* continue with next counter: */ | ||
555 | clear_bit(sched->state.counter++, sched->state.used); | ||
556 | |||
557 | return true; | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * Select a counter for the current event to schedule. Return true on | ||
562 | * success. | ||
563 | */ | ||
564 | static bool __perf_sched_find_counter(struct perf_sched *sched) | ||
565 | { | ||
566 | struct event_constraint *c; | ||
567 | int idx; | ||
568 | |||
569 | if (!sched->state.unassigned) | ||
570 | return false; | ||
571 | |||
572 | if (sched->state.event >= sched->max_events) | ||
573 | return false; | ||
574 | |||
575 | c = sched->constraints[sched->state.event]; | ||
576 | |||
577 | /* Prefer fixed purpose counters */ | ||
578 | if (x86_pmu.num_counters_fixed) { | ||
579 | idx = X86_PMC_IDX_FIXED; | ||
580 | for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) { | ||
581 | if (!__test_and_set_bit(idx, sched->state.used)) | ||
582 | goto done; | ||
583 | } | ||
584 | } | ||
585 | /* Grab the first unused counter starting with idx */ | ||
586 | idx = sched->state.counter; | ||
587 | for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) { | ||
588 | if (!__test_and_set_bit(idx, sched->state.used)) | ||
589 | goto done; | ||
590 | } | ||
591 | |||
592 | return false; | ||
593 | |||
594 | done: | ||
595 | sched->state.counter = idx; | ||
596 | |||
597 | if (c->overlap) | ||
598 | perf_sched_save_state(sched); | ||
599 | |||
600 | return true; | ||
601 | } | ||
602 | |||
603 | static bool perf_sched_find_counter(struct perf_sched *sched) | ||
604 | { | ||
605 | while (!__perf_sched_find_counter(sched)) { | ||
606 | if (!perf_sched_restore_state(sched)) | ||
607 | return false; | ||
608 | } | ||
609 | |||
610 | return true; | ||
611 | } | ||
612 | |||
613 | /* | ||
614 | * Go through all unassigned events and find the next one to schedule. | ||
615 | * Take events with the least weight first. Return true on success. | ||
616 | */ | ||
617 | static bool perf_sched_next_event(struct perf_sched *sched) | ||
618 | { | ||
619 | struct event_constraint *c; | ||
620 | |||
621 | if (!sched->state.unassigned || !--sched->state.unassigned) | ||
622 | return false; | ||
623 | |||
624 | do { | ||
625 | /* next event */ | ||
626 | sched->state.event++; | ||
627 | if (sched->state.event >= sched->max_events) { | ||
628 | /* next weight */ | ||
629 | sched->state.event = 0; | ||
630 | sched->state.weight++; | ||
631 | if (sched->state.weight > sched->max_weight) | ||
632 | return false; | ||
633 | } | ||
634 | c = sched->constraints[sched->state.event]; | ||
635 | } while (c->weight != sched->state.weight); | ||
636 | |||
637 | sched->state.counter = 0; /* start with first counter */ | ||
638 | |||
639 | return true; | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * Assign a counter for each event. | ||
644 | */ | ||
645 | static int perf_assign_events(struct event_constraint **constraints, int n, | ||
646 | int wmin, int wmax, int *assign) | ||
647 | { | ||
648 | struct perf_sched sched; | ||
649 | |||
650 | perf_sched_init(&sched, constraints, n, wmin, wmax); | ||
651 | |||
652 | do { | ||
653 | if (!perf_sched_find_counter(&sched)) | ||
654 | break; /* failed */ | ||
655 | if (assign) | ||
656 | assign[sched.state.event] = sched.state.counter; | ||
657 | } while (perf_sched_next_event(&sched)); | ||
658 | |||
659 | return sched.state.unassigned; | ||
660 | } | ||
661 | |||
491 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | 662 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) |
492 | { | 663 | { |
493 | struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; | 664 | struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; |
494 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 665 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
495 | int i, j, w, wmax, num = 0; | 666 | int i, wmin, wmax, num = 0; |
496 | struct hw_perf_event *hwc; | 667 | struct hw_perf_event *hwc; |
497 | 668 | ||
498 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | 669 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); |
499 | 670 | ||
500 | for (i = 0; i < n; i++) { | 671 | for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { |
501 | c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); | 672 | c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); |
502 | constraints[i] = c; | 673 | constraints[i] = c; |
674 | wmin = min(wmin, c->weight); | ||
675 | wmax = max(wmax, c->weight); | ||
503 | } | 676 | } |
504 | 677 | ||
505 | /* | 678 | /* |
@@ -525,59 +698,11 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
525 | if (assign) | 698 | if (assign) |
526 | assign[i] = hwc->idx; | 699 | assign[i] = hwc->idx; |
527 | } | 700 | } |
528 | if (i == n) | ||
529 | goto done; | ||
530 | |||
531 | /* | ||
532 | * begin slow path | ||
533 | */ | ||
534 | |||
535 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | ||
536 | |||
537 | /* | ||
538 | * weight = number of possible counters | ||
539 | * | ||
540 | * 1 = most constrained, only works on one counter | ||
541 | * wmax = least constrained, works on any counter | ||
542 | * | ||
543 | * assign events to counters starting with most | ||
544 | * constrained events. | ||
545 | */ | ||
546 | wmax = x86_pmu.num_counters; | ||
547 | |||
548 | /* | ||
549 | * when fixed event counters are present, | ||
550 | * wmax is incremented by 1 to account | ||
551 | * for one more choice | ||
552 | */ | ||
553 | if (x86_pmu.num_counters_fixed) | ||
554 | wmax++; | ||
555 | |||
556 | for (w = 1, num = n; num && w <= wmax; w++) { | ||
557 | /* for each event */ | ||
558 | for (i = 0; num && i < n; i++) { | ||
559 | c = constraints[i]; | ||
560 | hwc = &cpuc->event_list[i]->hw; | ||
561 | |||
562 | if (c->weight != w) | ||
563 | continue; | ||
564 | 701 | ||
565 | for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { | 702 | /* slow path */ |
566 | if (!test_bit(j, used_mask)) | 703 | if (i != n) |
567 | break; | 704 | num = perf_assign_events(constraints, n, wmin, wmax, assign); |
568 | } | ||
569 | |||
570 | if (j == X86_PMC_IDX_MAX) | ||
571 | break; | ||
572 | 705 | ||
573 | __set_bit(j, used_mask); | ||
574 | |||
575 | if (assign) | ||
576 | assign[i] = j; | ||
577 | num--; | ||
578 | } | ||
579 | } | ||
580 | done: | ||
581 | /* | 706 | /* |
582 | * scheduling failed or is just a simulation, | 707 | * scheduling failed or is just a simulation, |
583 | * free resources if necessary | 708 | * free resources if necessary |
@@ -588,7 +713,7 @@ done: | |||
588 | x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); | 713 | x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); |
589 | } | 714 | } |
590 | } | 715 | } |
591 | return num ? -ENOSPC : 0; | 716 | return num ? -EINVAL : 0; |
592 | } | 717 | } |
593 | 718 | ||
594 | /* | 719 | /* |
@@ -607,7 +732,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, | |||
607 | 732 | ||
608 | if (is_x86_event(leader)) { | 733 | if (is_x86_event(leader)) { |
609 | if (n >= max_count) | 734 | if (n >= max_count) |
610 | return -ENOSPC; | 735 | return -EINVAL; |
611 | cpuc->event_list[n] = leader; | 736 | cpuc->event_list[n] = leader; |
612 | n++; | 737 | n++; |
613 | } | 738 | } |
@@ -620,7 +745,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, | |||
620 | continue; | 745 | continue; |
621 | 746 | ||
622 | if (n >= max_count) | 747 | if (n >= max_count) |
623 | return -ENOSPC; | 748 | return -EINVAL; |
624 | 749 | ||
625 | cpuc->event_list[n] = event; | 750 | cpuc->event_list[n] = event; |
626 | n++; | 751 | n++; |
@@ -1123,6 +1248,7 @@ static void __init pmu_check_apic(void) | |||
1123 | 1248 | ||
1124 | static int __init init_hw_perf_events(void) | 1249 | static int __init init_hw_perf_events(void) |
1125 | { | 1250 | { |
1251 | struct x86_pmu_quirk *quirk; | ||
1126 | struct event_constraint *c; | 1252 | struct event_constraint *c; |
1127 | int err; | 1253 | int err; |
1128 | 1254 | ||
@@ -1151,8 +1277,8 @@ static int __init init_hw_perf_events(void) | |||
1151 | 1277 | ||
1152 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 1278 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
1153 | 1279 | ||
1154 | if (x86_pmu.quirks) | 1280 | for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) |
1155 | x86_pmu.quirks(); | 1281 | quirk->func(); |
1156 | 1282 | ||
1157 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { | 1283 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { |
1158 | WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", | 1284 | WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", |
@@ -1175,12 +1301,18 @@ static int __init init_hw_perf_events(void) | |||
1175 | 1301 | ||
1176 | unconstrained = (struct event_constraint) | 1302 | unconstrained = (struct event_constraint) |
1177 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, | 1303 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, |
1178 | 0, x86_pmu.num_counters); | 1304 | 0, x86_pmu.num_counters, 0); |
1179 | 1305 | ||
1180 | if (x86_pmu.event_constraints) { | 1306 | if (x86_pmu.event_constraints) { |
1307 | /* | ||
1308 | * event on fixed counter2 (REF_CYCLES) only works on this | ||
1309 | * counter, so do not extend mask to generic counters | ||
1310 | */ | ||
1181 | for_each_event_constraint(c, x86_pmu.event_constraints) { | 1311 | for_each_event_constraint(c, x86_pmu.event_constraints) { |
1182 | if (c->cmask != X86_RAW_EVENT_MASK) | 1312 | if (c->cmask != X86_RAW_EVENT_MASK |
1313 | || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) { | ||
1183 | continue; | 1314 | continue; |
1315 | } | ||
1184 | 1316 | ||
1185 | c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; | 1317 | c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; |
1186 | c->weight += x86_pmu.num_counters; | 1318 | c->weight += x86_pmu.num_counters; |
@@ -1316,7 +1448,7 @@ static int validate_event(struct perf_event *event) | |||
1316 | c = x86_pmu.get_event_constraints(fake_cpuc, event); | 1448 | c = x86_pmu.get_event_constraints(fake_cpuc, event); |
1317 | 1449 | ||
1318 | if (!c || !c->weight) | 1450 | if (!c || !c->weight) |
1319 | ret = -ENOSPC; | 1451 | ret = -EINVAL; |
1320 | 1452 | ||
1321 | if (x86_pmu.put_event_constraints) | 1453 | if (x86_pmu.put_event_constraints) |
1322 | x86_pmu.put_event_constraints(fake_cpuc, event); | 1454 | x86_pmu.put_event_constraints(fake_cpuc, event); |
@@ -1341,7 +1473,7 @@ static int validate_group(struct perf_event *event) | |||
1341 | { | 1473 | { |
1342 | struct perf_event *leader = event->group_leader; | 1474 | struct perf_event *leader = event->group_leader; |
1343 | struct cpu_hw_events *fake_cpuc; | 1475 | struct cpu_hw_events *fake_cpuc; |
1344 | int ret = -ENOSPC, n; | 1476 | int ret = -EINVAL, n; |
1345 | 1477 | ||
1346 | fake_cpuc = allocate_fake_cpuc(); | 1478 | fake_cpuc = allocate_fake_cpuc(); |
1347 | if (IS_ERR(fake_cpuc)) | 1479 | if (IS_ERR(fake_cpuc)) |
@@ -1570,3 +1702,15 @@ unsigned long perf_misc_flags(struct pt_regs *regs) | |||
1570 | 1702 | ||
1571 | return misc; | 1703 | return misc; |
1572 | } | 1704 | } |
1705 | |||
1706 | void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) | ||
1707 | { | ||
1708 | cap->version = x86_pmu.version; | ||
1709 | cap->num_counters_gp = x86_pmu.num_counters; | ||
1710 | cap->num_counters_fixed = x86_pmu.num_counters_fixed; | ||
1711 | cap->bit_width_gp = x86_pmu.cntval_bits; | ||
1712 | cap->bit_width_fixed = x86_pmu.cntval_bits; | ||
1713 | cap->events_mask = (unsigned int)x86_pmu.events_maskl; | ||
1714 | cap->events_mask_len = x86_pmu.events_mask_len; | ||
1715 | } | ||
1716 | EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability); | ||
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index b9698d40ac4b..8944062f46e2 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -45,6 +45,7 @@ struct event_constraint { | |||
45 | u64 code; | 45 | u64 code; |
46 | u64 cmask; | 46 | u64 cmask; |
47 | int weight; | 47 | int weight; |
48 | int overlap; | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | struct amd_nb { | 51 | struct amd_nb { |
@@ -151,15 +152,40 @@ struct cpu_hw_events { | |||
151 | void *kfree_on_online; | 152 | void *kfree_on_online; |
152 | }; | 153 | }; |
153 | 154 | ||
154 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ | 155 | #define __EVENT_CONSTRAINT(c, n, m, w, o) {\ |
155 | { .idxmsk64 = (n) }, \ | 156 | { .idxmsk64 = (n) }, \ |
156 | .code = (c), \ | 157 | .code = (c), \ |
157 | .cmask = (m), \ | 158 | .cmask = (m), \ |
158 | .weight = (w), \ | 159 | .weight = (w), \ |
160 | .overlap = (o), \ | ||
159 | } | 161 | } |
160 | 162 | ||
161 | #define EVENT_CONSTRAINT(c, n, m) \ | 163 | #define EVENT_CONSTRAINT(c, n, m) \ |
162 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) | 164 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) |
165 | |||
166 | /* | ||
167 | * The overlap flag marks event constraints with overlapping counter | ||
168 | * masks. This is the case if the counter mask of such an event is not | ||
169 | * a subset of any other counter mask of a constraint with an equal or | ||
170 | * higher weight, e.g.: | ||
171 | * | ||
172 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | ||
173 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | ||
174 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | ||
175 | * | ||
176 | * The event scheduler may not select the correct counter in the first | ||
177 | * cycle because it needs to know which subsequent events will be | ||
178 | * scheduled. It may fail to schedule the events then. So we set the | ||
179 | * overlap flag for such constraints to give the scheduler a hint which | ||
180 | * events to select for counter rescheduling. | ||
181 | * | ||
182 | * Care must be taken as the rescheduling algorithm is O(n!) which | ||
183 | * will increase scheduling cycles for an over-commited system | ||
184 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros | ||
185 | * and its counter masks must be kept at a minimum. | ||
186 | */ | ||
187 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | ||
188 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) | ||
163 | 189 | ||
164 | /* | 190 | /* |
165 | * Constraint on the Event code. | 191 | * Constraint on the Event code. |
@@ -235,6 +261,11 @@ union perf_capabilities { | |||
235 | u64 capabilities; | 261 | u64 capabilities; |
236 | }; | 262 | }; |
237 | 263 | ||
264 | struct x86_pmu_quirk { | ||
265 | struct x86_pmu_quirk *next; | ||
266 | void (*func)(void); | ||
267 | }; | ||
268 | |||
238 | /* | 269 | /* |
239 | * struct x86_pmu - generic x86 pmu | 270 | * struct x86_pmu - generic x86 pmu |
240 | */ | 271 | */ |
@@ -259,6 +290,11 @@ struct x86_pmu { | |||
259 | int num_counters_fixed; | 290 | int num_counters_fixed; |
260 | int cntval_bits; | 291 | int cntval_bits; |
261 | u64 cntval_mask; | 292 | u64 cntval_mask; |
293 | union { | ||
294 | unsigned long events_maskl; | ||
295 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | ||
296 | }; | ||
297 | int events_mask_len; | ||
262 | int apic; | 298 | int apic; |
263 | u64 max_period; | 299 | u64 max_period; |
264 | struct event_constraint * | 300 | struct event_constraint * |
@@ -268,7 +304,7 @@ struct x86_pmu { | |||
268 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 304 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
269 | struct perf_event *event); | 305 | struct perf_event *event); |
270 | struct event_constraint *event_constraints; | 306 | struct event_constraint *event_constraints; |
271 | void (*quirks)(void); | 307 | struct x86_pmu_quirk *quirks; |
272 | int perfctr_second_write; | 308 | int perfctr_second_write; |
273 | 309 | ||
274 | int (*cpu_prepare)(int cpu); | 310 | int (*cpu_prepare)(int cpu); |
@@ -309,6 +345,15 @@ struct x86_pmu { | |||
309 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 345 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
310 | }; | 346 | }; |
311 | 347 | ||
348 | #define x86_add_quirk(func_) \ | ||
349 | do { \ | ||
350 | static struct x86_pmu_quirk __quirk __initdata = { \ | ||
351 | .func = func_, \ | ||
352 | }; \ | ||
353 | __quirk.next = x86_pmu.quirks; \ | ||
354 | x86_pmu.quirks = &__quirk; \ | ||
355 | } while (0) | ||
356 | |||
312 | #define ERF_NO_HT_SHARING 1 | 357 | #define ERF_NO_HT_SHARING 1 |
313 | #define ERF_HAS_RSP_1 2 | 358 | #define ERF_HAS_RSP_1 2 |
314 | 359 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index aeefd45697a2..0397b23be8e9 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -492,7 +492,7 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
492 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | 492 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); |
493 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | 493 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); |
494 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | 494 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); |
495 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0); | 495 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
496 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); | 496 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
497 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | 497 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); |
498 | 498 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index ab6343d21825..3b8a2d30d14e 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void) | |||
199 | goto out; | 199 | goto out; |
200 | } | 200 | } |
201 | 201 | ||
202 | pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset); | 202 | pr_info("IBS: LVT offset %d assigned\n", offset); |
203 | pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); | ||
204 | 203 | ||
205 | return 0; | 204 | return 0; |
206 | out: | 205 | out: |
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h | |||
265 | static __init int amd_ibs_init(void) | 264 | static __init int amd_ibs_init(void) |
266 | { | 265 | { |
267 | u32 caps; | 266 | u32 caps; |
268 | int ret; | 267 | int ret = -EINVAL; |
269 | 268 | ||
270 | caps = __get_ibs_caps(); | 269 | caps = __get_ibs_caps(); |
271 | if (!caps) | 270 | if (!caps) |
272 | return -ENODEV; /* ibs not supported by the cpu */ | 271 | return -ENODEV; /* ibs not supported by the cpu */ |
273 | 272 | ||
274 | if (!ibs_eilvt_valid()) { | 273 | /* |
275 | ret = force_ibs_eilvt_setup(); | 274 | * Force LVT offset assignment for family 10h: The offsets are |
276 | if (ret) { | 275 | * not assigned by the BIOS for this family, so the OS is |
277 | pr_err("Failed to setup IBS, %d\n", ret); | 276 | * responsible for doing it. If the OS assignment fails, fall |
278 | return ret; | 277 | * back to BIOS settings and try to setup this. |
279 | } | 278 | */ |
280 | } | 279 | if (boot_cpu_data.x86 == 0x10) |
280 | force_ibs_eilvt_setup(); | ||
281 | |||
282 | if (!ibs_eilvt_valid()) | ||
283 | goto out; | ||
281 | 284 | ||
282 | get_online_cpus(); | 285 | get_online_cpus(); |
283 | ibs_caps = caps; | 286 | ibs_caps = caps; |
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void) | |||
287 | smp_call_function(setup_APIC_ibs, NULL, 1); | 290 | smp_call_function(setup_APIC_ibs, NULL, 1); |
288 | put_online_cpus(); | 291 | put_online_cpus(); |
289 | 292 | ||
290 | return perf_event_ibs_init(); | 293 | ret = perf_event_ibs_init(); |
294 | out: | ||
295 | if (ret) | ||
296 | pr_err("Failed to setup IBS, %d\n", ret); | ||
297 | return ret; | ||
291 | } | 298 | } |
292 | 299 | ||
293 | /* Since we need the pci subsystem to init ibs we can't do this earlier: */ | 300 | /* Since we need the pci subsystem to init ibs we can't do this earlier: */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 2be5ebe99872..3bd37bdf1b8e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -28,6 +28,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = | |||
28 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | 28 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, |
29 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | 29 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, |
30 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | 30 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, |
31 | [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ | ||
31 | }; | 32 | }; |
32 | 33 | ||
33 | static struct event_constraint intel_core_event_constraints[] __read_mostly = | 34 | static struct event_constraint intel_core_event_constraints[] __read_mostly = |
@@ -45,12 +46,7 @@ static struct event_constraint intel_core2_event_constraints[] __read_mostly = | |||
45 | { | 46 | { |
46 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 47 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
47 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 48 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
48 | /* | 49 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
49 | * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event | ||
50 | * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed | ||
51 | * ratio between these counters. | ||
52 | */ | ||
53 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
54 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | 50 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ |
55 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | 51 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ |
56 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | 52 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ |
@@ -68,7 +64,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = | |||
68 | { | 64 | { |
69 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 65 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
70 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 66 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
71 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | 67 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
72 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | 68 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ |
73 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | 69 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ |
74 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | 70 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ |
@@ -90,7 +86,7 @@ static struct event_constraint intel_westmere_event_constraints[] __read_mostly | |||
90 | { | 86 | { |
91 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 87 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
92 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 88 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
93 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | 89 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
94 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | 90 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ |
95 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | 91 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ |
96 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | 92 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ |
@@ -102,7 +98,7 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = | |||
102 | { | 98 | { |
103 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 99 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
104 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 100 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
105 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | 101 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
106 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | 102 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ |
107 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 103 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
108 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 104 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
@@ -125,7 +121,7 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = | |||
125 | { | 121 | { |
126 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 122 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
127 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 123 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
128 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | 124 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
129 | EVENT_CONSTRAINT_END | 125 | EVENT_CONSTRAINT_END |
130 | }; | 126 | }; |
131 | 127 | ||
@@ -1169,7 +1165,7 @@ again: | |||
1169 | */ | 1165 | */ |
1170 | c = &unconstrained; | 1166 | c = &unconstrained; |
1171 | } else if (intel_try_alt_er(event, orig_idx)) { | 1167 | } else if (intel_try_alt_er(event, orig_idx)) { |
1172 | raw_spin_unlock(&era->lock); | 1168 | raw_spin_unlock_irqrestore(&era->lock, flags); |
1173 | goto again; | 1169 | goto again; |
1174 | } | 1170 | } |
1175 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1171 | raw_spin_unlock_irqrestore(&era->lock, flags); |
@@ -1519,7 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
1519 | .guest_get_msrs = intel_guest_get_msrs, | 1515 | .guest_get_msrs = intel_guest_get_msrs, |
1520 | }; | 1516 | }; |
1521 | 1517 | ||
1522 | static void intel_clovertown_quirks(void) | 1518 | static __init void intel_clovertown_quirk(void) |
1523 | { | 1519 | { |
1524 | /* | 1520 | /* |
1525 | * PEBS is unreliable due to: | 1521 | * PEBS is unreliable due to: |
@@ -1545,12 +1541,60 @@ static void intel_clovertown_quirks(void) | |||
1545 | x86_pmu.pebs_constraints = NULL; | 1541 | x86_pmu.pebs_constraints = NULL; |
1546 | } | 1542 | } |
1547 | 1543 | ||
1544 | static __init void intel_sandybridge_quirk(void) | ||
1545 | { | ||
1546 | printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); | ||
1547 | x86_pmu.pebs = 0; | ||
1548 | x86_pmu.pebs_constraints = NULL; | ||
1549 | } | ||
1550 | |||
1551 | static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { | ||
1552 | { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, | ||
1553 | { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, | ||
1554 | { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, | ||
1555 | { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, | ||
1556 | { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, | ||
1557 | { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, | ||
1558 | { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, | ||
1559 | }; | ||
1560 | |||
1561 | static __init void intel_arch_events_quirk(void) | ||
1562 | { | ||
1563 | int bit; | ||
1564 | |||
1565 | /* disable event that reported as not presend by cpuid */ | ||
1566 | for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { | ||
1567 | intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; | ||
1568 | printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n", | ||
1569 | intel_arch_events_map[bit].name); | ||
1570 | } | ||
1571 | } | ||
1572 | |||
1573 | static __init void intel_nehalem_quirk(void) | ||
1574 | { | ||
1575 | union cpuid10_ebx ebx; | ||
1576 | |||
1577 | ebx.full = x86_pmu.events_maskl; | ||
1578 | if (ebx.split.no_branch_misses_retired) { | ||
1579 | /* | ||
1580 | * Erratum AAJ80 detected, we work it around by using | ||
1581 | * the BR_MISP_EXEC.ANY event. This will over-count | ||
1582 | * branch-misses, but it's still much better than the | ||
1583 | * architectural event which is often completely bogus: | ||
1584 | */ | ||
1585 | intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; | ||
1586 | ebx.split.no_branch_misses_retired = 0; | ||
1587 | x86_pmu.events_maskl = ebx.full; | ||
1588 | printk(KERN_INFO "CPU erratum AAJ80 worked around\n"); | ||
1589 | } | ||
1590 | } | ||
1591 | |||
1548 | __init int intel_pmu_init(void) | 1592 | __init int intel_pmu_init(void) |
1549 | { | 1593 | { |
1550 | union cpuid10_edx edx; | 1594 | union cpuid10_edx edx; |
1551 | union cpuid10_eax eax; | 1595 | union cpuid10_eax eax; |
1596 | union cpuid10_ebx ebx; | ||
1552 | unsigned int unused; | 1597 | unsigned int unused; |
1553 | unsigned int ebx; | ||
1554 | int version; | 1598 | int version; |
1555 | 1599 | ||
1556 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 1600 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
@@ -1567,8 +1611,8 @@ __init int intel_pmu_init(void) | |||
1567 | * Check whether the Architectural PerfMon supports | 1611 | * Check whether the Architectural PerfMon supports |
1568 | * Branch Misses Retired hw_event or not. | 1612 | * Branch Misses Retired hw_event or not. |
1569 | */ | 1613 | */ |
1570 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | 1614 | cpuid(10, &eax.full, &ebx.full, &unused, &edx.full); |
1571 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | 1615 | if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) |
1572 | return -ENODEV; | 1616 | return -ENODEV; |
1573 | 1617 | ||
1574 | version = eax.split.version_id; | 1618 | version = eax.split.version_id; |
@@ -1582,6 +1626,9 @@ __init int intel_pmu_init(void) | |||
1582 | x86_pmu.cntval_bits = eax.split.bit_width; | 1626 | x86_pmu.cntval_bits = eax.split.bit_width; |
1583 | x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; | 1627 | x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; |
1584 | 1628 | ||
1629 | x86_pmu.events_maskl = ebx.full; | ||
1630 | x86_pmu.events_mask_len = eax.split.mask_length; | ||
1631 | |||
1585 | /* | 1632 | /* |
1586 | * Quirk: v2 perfmon does not report fixed-purpose events, so | 1633 | * Quirk: v2 perfmon does not report fixed-purpose events, so |
1587 | * assume at least 3 events: | 1634 | * assume at least 3 events: |
@@ -1601,6 +1648,8 @@ __init int intel_pmu_init(void) | |||
1601 | 1648 | ||
1602 | intel_ds_init(); | 1649 | intel_ds_init(); |
1603 | 1650 | ||
1651 | x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ | ||
1652 | |||
1604 | /* | 1653 | /* |
1605 | * Install the hw-cache-events table: | 1654 | * Install the hw-cache-events table: |
1606 | */ | 1655 | */ |
@@ -1610,7 +1659,7 @@ __init int intel_pmu_init(void) | |||
1610 | break; | 1659 | break; |
1611 | 1660 | ||
1612 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | 1661 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ |
1613 | x86_pmu.quirks = intel_clovertown_quirks; | 1662 | x86_add_quirk(intel_clovertown_quirk); |
1614 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | 1663 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ |
1615 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | 1664 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ |
1616 | case 29: /* six-core 45 nm xeon "Dunnington" */ | 1665 | case 29: /* six-core 45 nm xeon "Dunnington" */ |
@@ -1644,17 +1693,8 @@ __init int intel_pmu_init(void) | |||
1644 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | 1693 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ |
1645 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; | 1694 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; |
1646 | 1695 | ||
1647 | if (ebx & 0x40) { | 1696 | x86_add_quirk(intel_nehalem_quirk); |
1648 | /* | ||
1649 | * Erratum AAJ80 detected, we work it around by using | ||
1650 | * the BR_MISP_EXEC.ANY event. This will over-count | ||
1651 | * branch-misses, but it's still much better than the | ||
1652 | * architectural event which is often completely bogus: | ||
1653 | */ | ||
1654 | intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; | ||
1655 | 1697 | ||
1656 | pr_cont("erratum AAJ80 worked around, "); | ||
1657 | } | ||
1658 | pr_cont("Nehalem events, "); | 1698 | pr_cont("Nehalem events, "); |
1659 | break; | 1699 | break; |
1660 | 1700 | ||
@@ -1694,6 +1734,7 @@ __init int intel_pmu_init(void) | |||
1694 | break; | 1734 | break; |
1695 | 1735 | ||
1696 | case 42: /* SandyBridge */ | 1736 | case 42: /* SandyBridge */ |
1737 | x86_add_quirk(intel_sandybridge_quirk); | ||
1697 | case 45: /* SandyBridge, "Romely-EP" */ | 1738 | case 45: /* SandyBridge, "Romely-EP" */ |
1698 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 1739 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
1699 | sizeof(hw_cache_event_ids)); | 1740 | sizeof(hw_cache_event_ids)); |
@@ -1730,5 +1771,6 @@ __init int intel_pmu_init(void) | |||
1730 | break; | 1771 | break; |
1731 | } | 1772 | } |
1732 | } | 1773 | } |
1774 | |||
1733 | return 0; | 1775 | return 0; |
1734 | } | 1776 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index c0d238f49db8..73da6b64f5b7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | |||
493 | unsigned long from = cpuc->lbr_entries[0].from; | 493 | unsigned long from = cpuc->lbr_entries[0].from; |
494 | unsigned long old_to, to = cpuc->lbr_entries[0].to; | 494 | unsigned long old_to, to = cpuc->lbr_entries[0].to; |
495 | unsigned long ip = regs->ip; | 495 | unsigned long ip = regs->ip; |
496 | int is_64bit = 0; | ||
496 | 497 | ||
497 | /* | 498 | /* |
498 | * We don't need to fixup if the PEBS assist is fault like | 499 | * We don't need to fixup if the PEBS assist is fault like |
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | |||
544 | } else | 545 | } else |
545 | kaddr = (void *)to; | 546 | kaddr = (void *)to; |
546 | 547 | ||
547 | kernel_insn_init(&insn, kaddr); | 548 | #ifdef CONFIG_X86_64 |
549 | is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32); | ||
550 | #endif | ||
551 | insn_init(&insn, kaddr, is_64bit); | ||
548 | insn_get_length(&insn); | 552 | insn_get_length(&insn); |
549 | to += insn.length; | 553 | to += insn.length; |
550 | } while (to < ip); | 554 | } while (to < ip); |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 492bf1358a7c..ef484d9d0a25 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -1268,7 +1268,7 @@ reserve: | |||
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | done: | 1270 | done: |
1271 | return num ? -ENOSPC : 0; | 1271 | return num ? -EINVAL : 0; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | static __initconst const struct x86_pmu p4_pmu = { | 1274 | static __initconst const struct x86_pmu p4_pmu = { |
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 3b97a80ce329..c99f9ed013d5 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs) | |||
116 | for (i = 0; i < code_len; i++, ip++) { | 116 | for (i = 0; i < code_len; i++, ip++) { |
117 | if (ip < (u8 *)PAGE_OFFSET || | 117 | if (ip < (u8 *)PAGE_OFFSET || |
118 | probe_kernel_address(ip, c)) { | 118 | probe_kernel_address(ip, c)) { |
119 | printk(" Bad EIP value."); | 119 | printk(KERN_CONT " Bad EIP value."); |
120 | break; | 120 | break; |
121 | } | 121 | } |
122 | if (ip == (u8 *)regs->ip) | 122 | if (ip == (u8 *)regs->ip) |
123 | printk("<%02x> ", c); | 123 | printk(KERN_CONT "<%02x> ", c); |
124 | else | 124 | else |
125 | printk("%02x ", c); | 125 | printk(KERN_CONT "%02x ", c); |
126 | } | 126 | } |
127 | } | 127 | } |
128 | printk("\n"); | 128 | printk(KERN_CONT "\n"); |
129 | } | 129 | } |
130 | 130 | ||
131 | int is_valid_bugaddr(unsigned long ip) | 131 | int is_valid_bugaddr(unsigned long ip) |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 19853ad8afc5..6d728d9284bd 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs) | |||
284 | for (i = 0; i < code_len; i++, ip++) { | 284 | for (i = 0; i < code_len; i++, ip++) { |
285 | if (ip < (u8 *)PAGE_OFFSET || | 285 | if (ip < (u8 *)PAGE_OFFSET || |
286 | probe_kernel_address(ip, c)) { | 286 | probe_kernel_address(ip, c)) { |
287 | printk(" Bad RIP value."); | 287 | printk(KERN_CONT " Bad RIP value."); |
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | if (ip == (u8 *)regs->ip) | 290 | if (ip == (u8 *)regs->ip) |
291 | printk("<%02x> ", c); | 291 | printk(KERN_CONT "<%02x> ", c); |
292 | else | 292 | else |
293 | printk("%02x ", c); | 293 | printk(KERN_CONT "%02x ", c); |
294 | } | 294 | } |
295 | } | 295 | } |
296 | printk("\n"); | 296 | printk(KERN_CONT "\n"); |
297 | } | 297 | } |
298 | 298 | ||
299 | int is_valid_bugaddr(unsigned long ip) | 299 | int is_valid_bugaddr(unsigned long ip) |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 303a0e48f076..8071e2f3d6eb 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -738,35 +738,17 @@ core_initcall(e820_mark_nvs_memory); | |||
738 | /* | 738 | /* |
739 | * pre allocated 4k and reserved it in memblock and e820_saved | 739 | * pre allocated 4k and reserved it in memblock and e820_saved |
740 | */ | 740 | */ |
741 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | 741 | u64 __init early_reserve_e820(u64 size, u64 align) |
742 | { | 742 | { |
743 | u64 size = 0; | ||
744 | u64 addr; | 743 | u64 addr; |
745 | u64 start; | ||
746 | 744 | ||
747 | for (start = startt; ; start += size) { | 745 | addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
748 | start = memblock_x86_find_in_range_size(start, &size, align); | 746 | if (addr) { |
749 | if (start == MEMBLOCK_ERROR) | 747 | e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED); |
750 | return 0; | 748 | printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); |
751 | if (size >= sizet) | 749 | update_e820_saved(); |
752 | break; | ||
753 | } | 750 | } |
754 | 751 | ||
755 | #ifdef CONFIG_X86_32 | ||
756 | if (start >= MAXMEM) | ||
757 | return 0; | ||
758 | if (start + size > MAXMEM) | ||
759 | size = MAXMEM - start; | ||
760 | #endif | ||
761 | |||
762 | addr = round_down(start + size - sizet, align); | ||
763 | if (addr < start) | ||
764 | return 0; | ||
765 | memblock_x86_reserve_range(addr, addr + sizet, "new next"); | ||
766 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); | ||
767 | printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); | ||
768 | update_e820_saved(); | ||
769 | |||
770 | return addr; | 752 | return addr; |
771 | } | 753 | } |
772 | 754 | ||
@@ -1090,7 +1072,7 @@ void __init memblock_x86_fill(void) | |||
1090 | * We are safe to enable resizing, beause memblock_x86_fill() | 1072 | * We are safe to enable resizing, beause memblock_x86_fill() |
1091 | * is rather later for x86 | 1073 | * is rather later for x86 |
1092 | */ | 1074 | */ |
1093 | memblock_can_resize = 1; | 1075 | memblock_allow_resize(); |
1094 | 1076 | ||
1095 | for (i = 0; i < e820.nr_map; i++) { | 1077 | for (i = 0; i < e820.nr_map; i++) { |
1096 | struct e820entry *ei = &e820.map[i]; | 1078 | struct e820entry *ei = &e820.map[i]; |
@@ -1105,22 +1087,36 @@ void __init memblock_x86_fill(void) | |||
1105 | memblock_add(ei->addr, ei->size); | 1087 | memblock_add(ei->addr, ei->size); |
1106 | } | 1088 | } |
1107 | 1089 | ||
1108 | memblock_analyze(); | ||
1109 | memblock_dump_all(); | 1090 | memblock_dump_all(); |
1110 | } | 1091 | } |
1111 | 1092 | ||
1112 | void __init memblock_find_dma_reserve(void) | 1093 | void __init memblock_find_dma_reserve(void) |
1113 | { | 1094 | { |
1114 | #ifdef CONFIG_X86_64 | 1095 | #ifdef CONFIG_X86_64 |
1115 | u64 free_size_pfn; | 1096 | u64 nr_pages = 0, nr_free_pages = 0; |
1116 | u64 mem_size_pfn; | 1097 | unsigned long start_pfn, end_pfn; |
1098 | phys_addr_t start, end; | ||
1099 | int i; | ||
1100 | u64 u; | ||
1101 | |||
1117 | /* | 1102 | /* |
1118 | * need to find out used area below MAX_DMA_PFN | 1103 | * need to find out used area below MAX_DMA_PFN |
1119 | * need to use memblock to get free size in [0, MAX_DMA_PFN] | 1104 | * need to use memblock to get free size in [0, MAX_DMA_PFN] |
1120 | * at first, and assume boot_mem will not take below MAX_DMA_PFN | 1105 | * at first, and assume boot_mem will not take below MAX_DMA_PFN |
1121 | */ | 1106 | */ |
1122 | mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | 1107 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { |
1123 | free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | 1108 | start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN); |
1124 | set_dma_reserve(mem_size_pfn - free_size_pfn); | 1109 | end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN); |
1110 | nr_pages += end_pfn - start_pfn; | ||
1111 | } | ||
1112 | |||
1113 | for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) { | ||
1114 | start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); | ||
1115 | end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); | ||
1116 | if (start_pfn < end_pfn) | ||
1117 | nr_free_pages += end_pfn - start_pfn; | ||
1118 | } | ||
1119 | |||
1120 | set_dma_reserve(nr_pages - nr_free_pages); | ||
1125 | #endif | 1121 | #endif |
1126 | } | 1122 | } |
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index af0699ba48cf..48d9d4ea1020 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c | |||
@@ -52,5 +52,5 @@ void __init reserve_ebda_region(void) | |||
52 | lowmem = 0x9f000; | 52 | lowmem = 0x9f000; |
53 | 53 | ||
54 | /* reserve all memory between lowmem and the 1MB mark */ | 54 | /* reserve all memory between lowmem and the 1MB mark */ |
55 | memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); | 55 | memblock_reserve(lowmem, 0x100000 - lowmem); |
56 | } | 56 | } |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 3bb08509a7a1..51ff18616d50 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -31,9 +31,8 @@ static void __init i386_default_early_setup(void) | |||
31 | 31 | ||
32 | void __init i386_start_kernel(void) | 32 | void __init i386_start_kernel(void) |
33 | { | 33 | { |
34 | memblock_init(); | 34 | memblock_reserve(__pa_symbol(&_text), |
35 | 35 | __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); | |
36 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | ||
37 | 36 | ||
38 | #ifdef CONFIG_BLK_DEV_INITRD | 37 | #ifdef CONFIG_BLK_DEV_INITRD |
39 | /* Reserve INITRD */ | 38 | /* Reserve INITRD */ |
@@ -42,7 +41,7 @@ void __init i386_start_kernel(void) | |||
42 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 41 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
43 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 42 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
44 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); | 43 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
45 | memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); | 44 | memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); |
46 | } | 45 | } |
47 | #endif | 46 | #endif |
48 | 47 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 5655c2272adb..3a3b779f41d3 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -98,9 +98,8 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
98 | { | 98 | { |
99 | copy_bootdata(__va(real_mode_data)); | 99 | copy_bootdata(__va(real_mode_data)); |
100 | 100 | ||
101 | memblock_init(); | 101 | memblock_reserve(__pa_symbol(&_text), |
102 | 102 | __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); | |
103 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | ||
104 | 103 | ||
105 | #ifdef CONFIG_BLK_DEV_INITRD | 104 | #ifdef CONFIG_BLK_DEV_INITRD |
106 | /* Reserve INITRD */ | 105 | /* Reserve INITRD */ |
@@ -109,7 +108,7 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
109 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | 108 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; |
110 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | 109 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; |
111 | unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); | 110 | unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
112 | memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); | 111 | memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); |
113 | } | 112 | } |
114 | #endif | 113 | #endif |
115 | 114 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index b946a9eac7d9..1bb0bf4d92cd 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void) | |||
1049 | } | 1049 | } |
1050 | EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); | 1050 | EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); |
1051 | 1051 | ||
1052 | static void hpet_disable_rtc_channel(void) | ||
1053 | { | ||
1054 | unsigned long cfg; | ||
1055 | cfg = hpet_readl(HPET_T1_CFG); | ||
1056 | cfg &= ~HPET_TN_ENABLE; | ||
1057 | hpet_writel(cfg, HPET_T1_CFG); | ||
1058 | } | ||
1059 | |||
1052 | /* | 1060 | /* |
1053 | * The functions below are called from rtc driver. | 1061 | * The functions below are called from rtc driver. |
1054 | * Return 0 if HPET is not being used. | 1062 | * Return 0 if HPET is not being used. |
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask) | |||
1060 | return 0; | 1068 | return 0; |
1061 | 1069 | ||
1062 | hpet_rtc_flags &= ~bit_mask; | 1070 | hpet_rtc_flags &= ~bit_mask; |
1071 | if (unlikely(!hpet_rtc_flags)) | ||
1072 | hpet_disable_rtc_channel(); | ||
1073 | |||
1063 | return 1; | 1074 | return 1; |
1064 | } | 1075 | } |
1065 | EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); | 1076 | EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); |
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); | |||
1125 | 1136 | ||
1126 | static void hpet_rtc_timer_reinit(void) | 1137 | static void hpet_rtc_timer_reinit(void) |
1127 | { | 1138 | { |
1128 | unsigned int cfg, delta; | 1139 | unsigned int delta; |
1129 | int lost_ints = -1; | 1140 | int lost_ints = -1; |
1130 | 1141 | ||
1131 | if (unlikely(!hpet_rtc_flags)) { | 1142 | if (unlikely(!hpet_rtc_flags)) |
1132 | cfg = hpet_readl(HPET_T1_CFG); | 1143 | hpet_disable_rtc_channel(); |
1133 | cfg &= ~HPET_TN_ENABLE; | ||
1134 | hpet_writel(cfg, HPET_T1_CFG); | ||
1135 | return; | ||
1136 | } | ||
1137 | 1144 | ||
1138 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) | 1145 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) |
1139 | delta = hpet_default_delta; | 1146 | delta = hpet_default_delta; |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index ef54ed4e307d..7943e0c21bde 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -186,8 +186,8 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
186 | unsigned vector = ~regs->orig_ax; | 186 | unsigned vector = ~regs->orig_ax; |
187 | unsigned irq; | 187 | unsigned irq; |
188 | 188 | ||
189 | exit_idle(); | ||
190 | irq_enter(); | 189 | irq_enter(); |
190 | exit_idle(); | ||
191 | 191 | ||
192 | irq = __this_cpu_read(vector_irq[vector]); | 192 | irq = __this_cpu_read(vector_irq[vector]); |
193 | 193 | ||
@@ -214,10 +214,10 @@ void smp_x86_platform_ipi(struct pt_regs *regs) | |||
214 | 214 | ||
215 | ack_APIC_irq(); | 215 | ack_APIC_irq(); |
216 | 216 | ||
217 | exit_idle(); | ||
218 | |||
219 | irq_enter(); | 217 | irq_enter(); |
220 | 218 | ||
219 | exit_idle(); | ||
220 | |||
221 | inc_irq_stat(x86_platform_ipis); | 221 | inc_irq_stat(x86_platform_ipis); |
222 | 222 | ||
223 | if (x86_platform_ipi_callback) | 223 | if (x86_platform_ipi_callback) |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index acf8fbf8fbda..69bca468c47a 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
38 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 38 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
39 | u64 curbase = (u64)task_stack_page(current); | 39 | u64 curbase = (u64)task_stack_page(current); |
40 | 40 | ||
41 | if (user_mode_vm(regs)) | ||
42 | return; | ||
43 | |||
41 | WARN_ONCE(regs->sp >= curbase && | 44 | WARN_ONCE(regs->sp >= curbase && |
42 | regs->sp <= curbase + THREAD_SIZE && | 45 | regs->sp <= curbase + THREAD_SIZE && |
43 | regs->sp < curbase + sizeof(struct thread_info) + | 46 | regs->sp < curbase + sizeof(struct thread_info) + |
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index ea9d5f2f13ef..2889b3d43882 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c | |||
@@ -50,7 +50,7 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
50 | put_online_cpus(); | 50 | put_online_cpus(); |
51 | } | 51 | } |
52 | 52 | ||
53 | void arch_jump_label_transform_static(struct jump_entry *entry, | 53 | __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, |
54 | enum jump_label_type type) | 54 | enum jump_label_type type) |
55 | { | 55 | { |
56 | __jump_label_transform(entry, type, text_poke_early); | 56 | __jump_label_transform(entry, type, text_poke_early); |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index f2d2a664e797..9d46f5e43b51 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void) | |||
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | static void microcode_dev_exit(void) | 259 | static void __exit microcode_dev_exit(void) |
260 | { | 260 | { |
261 | misc_deregister(µcode_dev); | 261 | misc_deregister(µcode_dev); |
262 | } | 262 | } |
@@ -519,10 +519,8 @@ static int __init microcode_init(void) | |||
519 | 519 | ||
520 | microcode_pdev = platform_device_register_simple("microcode", -1, | 520 | microcode_pdev = platform_device_register_simple("microcode", -1, |
521 | NULL, 0); | 521 | NULL, 0); |
522 | if (IS_ERR(microcode_pdev)) { | 522 | if (IS_ERR(microcode_pdev)) |
523 | microcode_dev_exit(); | ||
524 | return PTR_ERR(microcode_pdev); | 523 | return PTR_ERR(microcode_pdev); |
525 | } | ||
526 | 524 | ||
527 | get_online_cpus(); | 525 | get_online_cpus(); |
528 | mutex_lock(µcode_mutex); | 526 | mutex_lock(µcode_mutex); |
@@ -532,14 +530,12 @@ static int __init microcode_init(void) | |||
532 | mutex_unlock(µcode_mutex); | 530 | mutex_unlock(µcode_mutex); |
533 | put_online_cpus(); | 531 | put_online_cpus(); |
534 | 532 | ||
535 | if (error) { | 533 | if (error) |
536 | platform_device_unregister(microcode_pdev); | 534 | goto out_pdev; |
537 | return error; | ||
538 | } | ||
539 | 535 | ||
540 | error = microcode_dev_init(); | 536 | error = microcode_dev_init(); |
541 | if (error) | 537 | if (error) |
542 | return error; | 538 | goto out_sysdev_driver; |
543 | 539 | ||
544 | register_syscore_ops(&mc_syscore_ops); | 540 | register_syscore_ops(&mc_syscore_ops); |
545 | register_hotcpu_notifier(&mc_cpu_notifier); | 541 | register_hotcpu_notifier(&mc_cpu_notifier); |
@@ -548,6 +544,20 @@ static int __init microcode_init(void) | |||
548 | " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); | 544 | " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); |
549 | 545 | ||
550 | return 0; | 546 | return 0; |
547 | |||
548 | out_sysdev_driver: | ||
549 | get_online_cpus(); | ||
550 | mutex_lock(µcode_mutex); | ||
551 | |||
552 | sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); | ||
553 | |||
554 | mutex_unlock(µcode_mutex); | ||
555 | put_online_cpus(); | ||
556 | |||
557 | out_pdev: | ||
558 | platform_device_unregister(microcode_pdev); | ||
559 | return error; | ||
560 | |||
551 | } | 561 | } |
552 | module_init(microcode_init); | 562 | module_init(microcode_init); |
553 | 563 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 9103b89c145a..ca470e4c92dc 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m) | |||
95 | } | 95 | } |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | set_bit(m->busid, mp_bus_not_pci); | ||
98 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { | 99 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { |
99 | set_bit(m->busid, mp_bus_not_pci); | ||
100 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | 100 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
101 | mp_bus_id_to_type[m->busid] = MP_BUS_ISA; | 101 | mp_bus_id_to_type[m->busid] = MP_BUS_ISA; |
102 | #endif | 102 | #endif |
@@ -564,9 +564,7 @@ void __init default_get_smp_config(unsigned int early) | |||
564 | 564 | ||
565 | static void __init smp_reserve_memory(struct mpf_intel *mpf) | 565 | static void __init smp_reserve_memory(struct mpf_intel *mpf) |
566 | { | 566 | { |
567 | unsigned long size = get_mpc_size(mpf->physptr); | 567 | memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr)); |
568 | |||
569 | memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc"); | ||
570 | } | 568 | } |
571 | 569 | ||
572 | static int __init smp_scan_config(unsigned long base, unsigned long length) | 570 | static int __init smp_scan_config(unsigned long base, unsigned long length) |
@@ -595,7 +593,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) | |||
595 | mpf, (u64)virt_to_phys(mpf)); | 593 | mpf, (u64)virt_to_phys(mpf)); |
596 | 594 | ||
597 | mem = virt_to_phys(mpf); | 595 | mem = virt_to_phys(mpf); |
598 | memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); | 596 | memblock_reserve(mem, sizeof(*mpf)); |
599 | if (mpf->physptr) | 597 | if (mpf->physptr) |
600 | smp_reserve_memory(mpf); | 598 | smp_reserve_memory(mpf); |
601 | 599 | ||
@@ -836,10 +834,8 @@ early_param("alloc_mptable", parse_alloc_mptable_opt); | |||
836 | 834 | ||
837 | void __init early_reserve_e820_mpc_new(void) | 835 | void __init early_reserve_e820_mpc_new(void) |
838 | { | 836 | { |
839 | if (enable_update_mptable && alloc_mptable) { | 837 | if (enable_update_mptable && alloc_mptable) |
840 | u64 startt = 0; | 838 | mpc_new_phys = early_reserve_e820(mpc_new_length, 4); |
841 | mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); | ||
842 | } | ||
843 | } | 839 | } |
844 | 840 | ||
845 | static int __init update_mp_table(void) | 841 | static int __init update_mp_table(void) |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 795b79f984c2..485204f58cda 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -99,7 +99,8 @@ void cpu_idle(void) | |||
99 | 99 | ||
100 | /* endless idle loop with no priority at all */ | 100 | /* endless idle loop with no priority at all */ |
101 | while (1) { | 101 | while (1) { |
102 | tick_nohz_stop_sched_tick(1); | 102 | tick_nohz_idle_enter(); |
103 | rcu_idle_enter(); | ||
103 | while (!need_resched()) { | 104 | while (!need_resched()) { |
104 | 105 | ||
105 | check_pgt_cache(); | 106 | check_pgt_cache(); |
@@ -116,7 +117,8 @@ void cpu_idle(void) | |||
116 | pm_idle(); | 117 | pm_idle(); |
117 | start_critical_timings(); | 118 | start_critical_timings(); |
118 | } | 119 | } |
119 | tick_nohz_restart_sched_tick(); | 120 | rcu_idle_exit(); |
121 | tick_nohz_idle_exit(); | ||
120 | preempt_enable_no_resched(); | 122 | preempt_enable_no_resched(); |
121 | schedule(); | 123 | schedule(); |
122 | preempt_disable(); | 124 | preempt_disable(); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3bd7e6eebf31..64e926c89a6f 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -122,7 +122,7 @@ void cpu_idle(void) | |||
122 | 122 | ||
123 | /* endless idle loop with no priority at all */ | 123 | /* endless idle loop with no priority at all */ |
124 | while (1) { | 124 | while (1) { |
125 | tick_nohz_stop_sched_tick(1); | 125 | tick_nohz_idle_enter(); |
126 | while (!need_resched()) { | 126 | while (!need_resched()) { |
127 | 127 | ||
128 | rmb(); | 128 | rmb(); |
@@ -139,8 +139,14 @@ void cpu_idle(void) | |||
139 | enter_idle(); | 139 | enter_idle(); |
140 | /* Don't trace irqs off for idle */ | 140 | /* Don't trace irqs off for idle */ |
141 | stop_critical_timings(); | 141 | stop_critical_timings(); |
142 | |||
143 | /* enter_idle() needs rcu for notifiers */ | ||
144 | rcu_idle_enter(); | ||
145 | |||
142 | if (cpuidle_idle_call()) | 146 | if (cpuidle_idle_call()) |
143 | pm_idle(); | 147 | pm_idle(); |
148 | |||
149 | rcu_idle_exit(); | ||
144 | start_critical_timings(); | 150 | start_critical_timings(); |
145 | 151 | ||
146 | /* In many cases the interrupt that ended idle | 152 | /* In many cases the interrupt that ended idle |
@@ -149,7 +155,7 @@ void cpu_idle(void) | |||
149 | __exit_idle(); | 155 | __exit_idle(); |
150 | } | 156 | } |
151 | 157 | ||
152 | tick_nohz_restart_sched_tick(); | 158 | tick_nohz_idle_exit(); |
153 | preempt_enable_no_resched(); | 159 | preempt_enable_no_resched(); |
154 | schedule(); | 160 | schedule(); |
155 | preempt_disable(); | 161 | preempt_disable(); |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index b78643d0f9a5..03920a15a632 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, | |||
553 | quirk_amd_nb_node); | 553 | quirk_amd_nb_node); |
554 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, | 554 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, |
555 | quirk_amd_nb_node); | 555 | quirk_amd_nb_node); |
556 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0, | ||
557 | quirk_amd_nb_node); | ||
558 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1, | ||
559 | quirk_amd_nb_node); | ||
560 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2, | ||
561 | quirk_amd_nb_node); | ||
562 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3, | ||
563 | quirk_amd_nb_node); | ||
564 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4, | ||
565 | quirk_amd_nb_node); | ||
566 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, | ||
567 | quirk_amd_nb_node); | ||
568 | |||
556 | #endif | 569 | #endif |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e334be1182b9..37a458b521a6 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup); | |||
124 | */ | 124 | */ |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * Some machines require the "reboot=b" commandline option, | 127 | * Some machines require the "reboot=b" or "reboot=k" commandline options, |
128 | * this quirk makes that automatic. | 128 | * this quirk makes that automatic. |
129 | */ | 129 | */ |
130 | static int __init set_bios_reboot(const struct dmi_system_id *d) | 130 | static int __init set_bios_reboot(const struct dmi_system_id *d) |
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d) | |||
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | static int __init set_kbd_reboot(const struct dmi_system_id *d) | ||
140 | { | ||
141 | if (reboot_type != BOOT_KBD) { | ||
142 | reboot_type = BOOT_KBD; | ||
143 | printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident); | ||
144 | } | ||
145 | return 0; | ||
146 | } | ||
147 | |||
139 | static struct dmi_system_id __initdata reboot_dmi_table[] = { | 148 | static struct dmi_system_id __initdata reboot_dmi_table[] = { |
140 | { /* Handle problems with rebooting on Dell E520's */ | 149 | { /* Handle problems with rebooting on Dell E520's */ |
141 | .callback = set_bios_reboot, | 150 | .callback = set_bios_reboot, |
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
295 | }, | 304 | }, |
296 | }, | 305 | }, |
297 | { /* Handle reboot issue on Acer Aspire one */ | 306 | { /* Handle reboot issue on Acer Aspire one */ |
298 | .callback = set_bios_reboot, | 307 | .callback = set_kbd_reboot, |
299 | .ident = "Acer Aspire One A110", | 308 | .ident = "Acer Aspire One A110", |
300 | .matches = { | 309 | .matches = { |
301 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 310 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { | |||
443 | DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), | 452 | DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), |
444 | }, | 453 | }, |
445 | }, | 454 | }, |
455 | { /* Handle problems with rebooting on the OptiPlex 990. */ | ||
456 | .callback = set_pci_reboot, | ||
457 | .ident = "Dell OptiPlex 990", | ||
458 | .matches = { | ||
459 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
460 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), | ||
461 | }, | ||
462 | }, | ||
446 | { } | 463 | { } |
447 | }; | 464 | }; |
448 | 465 | ||
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 348ce016a835..af6db6ec5b2a 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/vsyscall.h> | 12 | #include <asm/vsyscall.h> |
13 | #include <asm/x86_init.h> | 13 | #include <asm/x86_init.h> |
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/mrst.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_X86_32 | 17 | #ifdef CONFIG_X86_32 |
17 | /* | 18 | /* |
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void) | |||
242 | if (of_have_populated_dt()) | 243 | if (of_have_populated_dt()) |
243 | return 0; | 244 | return 0; |
244 | 245 | ||
246 | /* Intel MID platforms don't have ioport rtc */ | ||
247 | if (mrst_identify_cpu()) | ||
248 | return -ENODEV; | ||
249 | |||
245 | platform_device_register(&rtc_device); | 250 | platform_device_register(&rtc_device); |
246 | dev_info(&rtc_device.dev, | 251 | dev_info(&rtc_device.dev, |
247 | "registered platform RTC device (no PNP device found)\n"); | 252 | "registered platform RTC device (no PNP device found)\n"); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index cf0ef986cb6d..d05444ac2aea 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -306,7 +306,8 @@ static void __init cleanup_highmap(void) | |||
306 | static void __init reserve_brk(void) | 306 | static void __init reserve_brk(void) |
307 | { | 307 | { |
308 | if (_brk_end > _brk_start) | 308 | if (_brk_end > _brk_start) |
309 | memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); | 309 | memblock_reserve(__pa(_brk_start), |
310 | __pa(_brk_end) - __pa(_brk_start)); | ||
310 | 311 | ||
311 | /* Mark brk area as locked down and no longer taking any | 312 | /* Mark brk area as locked down and no longer taking any |
312 | new allocations */ | 313 | new allocations */ |
@@ -331,13 +332,13 @@ static void __init relocate_initrd(void) | |||
331 | ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, | 332 | ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, |
332 | PAGE_SIZE); | 333 | PAGE_SIZE); |
333 | 334 | ||
334 | if (ramdisk_here == MEMBLOCK_ERROR) | 335 | if (!ramdisk_here) |
335 | panic("Cannot find place for new RAMDISK of size %lld\n", | 336 | panic("Cannot find place for new RAMDISK of size %lld\n", |
336 | ramdisk_size); | 337 | ramdisk_size); |
337 | 338 | ||
338 | /* Note: this includes all the lowmem currently occupied by | 339 | /* Note: this includes all the lowmem currently occupied by |
339 | the initrd, we rely on that fact to keep the data intact. */ | 340 | the initrd, we rely on that fact to keep the data intact. */ |
340 | memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); | 341 | memblock_reserve(ramdisk_here, area_size); |
341 | initrd_start = ramdisk_here + PAGE_OFFSET; | 342 | initrd_start = ramdisk_here + PAGE_OFFSET; |
342 | initrd_end = initrd_start + ramdisk_size; | 343 | initrd_end = initrd_start + ramdisk_size; |
343 | printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", | 344 | printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", |
@@ -393,7 +394,7 @@ static void __init reserve_initrd(void) | |||
393 | initrd_start = 0; | 394 | initrd_start = 0; |
394 | 395 | ||
395 | if (ramdisk_size >= (end_of_lowmem>>1)) { | 396 | if (ramdisk_size >= (end_of_lowmem>>1)) { |
396 | memblock_x86_free_range(ramdisk_image, ramdisk_end); | 397 | memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); |
397 | printk(KERN_ERR "initrd too large to handle, " | 398 | printk(KERN_ERR "initrd too large to handle, " |
398 | "disabling initrd\n"); | 399 | "disabling initrd\n"); |
399 | return; | 400 | return; |
@@ -416,7 +417,7 @@ static void __init reserve_initrd(void) | |||
416 | 417 | ||
417 | relocate_initrd(); | 418 | relocate_initrd(); |
418 | 419 | ||
419 | memblock_x86_free_range(ramdisk_image, ramdisk_end); | 420 | memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); |
420 | } | 421 | } |
421 | #else | 422 | #else |
422 | static void __init reserve_initrd(void) | 423 | static void __init reserve_initrd(void) |
@@ -490,15 +491,13 @@ static void __init memblock_x86_reserve_range_setup_data(void) | |||
490 | { | 491 | { |
491 | struct setup_data *data; | 492 | struct setup_data *data; |
492 | u64 pa_data; | 493 | u64 pa_data; |
493 | char buf[32]; | ||
494 | 494 | ||
495 | if (boot_params.hdr.version < 0x0209) | 495 | if (boot_params.hdr.version < 0x0209) |
496 | return; | 496 | return; |
497 | pa_data = boot_params.hdr.setup_data; | 497 | pa_data = boot_params.hdr.setup_data; |
498 | while (pa_data) { | 498 | while (pa_data) { |
499 | data = early_memremap(pa_data, sizeof(*data)); | 499 | data = early_memremap(pa_data, sizeof(*data)); |
500 | sprintf(buf, "setup data %x", data->type); | 500 | memblock_reserve(pa_data, sizeof(*data) + data->len); |
501 | memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf); | ||
502 | pa_data = data->next; | 501 | pa_data = data->next; |
503 | early_iounmap(data, sizeof(*data)); | 502 | early_iounmap(data, sizeof(*data)); |
504 | } | 503 | } |
@@ -554,7 +553,7 @@ static void __init reserve_crashkernel(void) | |||
554 | crash_base = memblock_find_in_range(alignment, | 553 | crash_base = memblock_find_in_range(alignment, |
555 | CRASH_KERNEL_ADDR_MAX, crash_size, alignment); | 554 | CRASH_KERNEL_ADDR_MAX, crash_size, alignment); |
556 | 555 | ||
557 | if (crash_base == MEMBLOCK_ERROR) { | 556 | if (!crash_base) { |
558 | pr_info("crashkernel reservation failed - No suitable area found.\n"); | 557 | pr_info("crashkernel reservation failed - No suitable area found.\n"); |
559 | return; | 558 | return; |
560 | } | 559 | } |
@@ -568,7 +567,7 @@ static void __init reserve_crashkernel(void) | |||
568 | return; | 567 | return; |
569 | } | 568 | } |
570 | } | 569 | } |
571 | memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); | 570 | memblock_reserve(crash_base, crash_size); |
572 | 571 | ||
573 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | 572 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " |
574 | "for crashkernel (System RAM: %ldMB)\n", | 573 | "for crashkernel (System RAM: %ldMB)\n", |
@@ -626,7 +625,7 @@ static __init void reserve_ibft_region(void) | |||
626 | addr = find_ibft_region(&size); | 625 | addr = find_ibft_region(&size); |
627 | 626 | ||
628 | if (size) | 627 | if (size) |
629 | memblock_x86_reserve_range(addr, addr + size, "* ibft"); | 628 | memblock_reserve(addr, size); |
630 | } | 629 | } |
631 | 630 | ||
632 | static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; | 631 | static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a91ae7709b49..a73b61055ad6 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -14,11 +14,11 @@ void __init setup_trampolines(void) | |||
14 | 14 | ||
15 | /* Has to be in very low memory so we can execute real-mode AP code. */ | 15 | /* Has to be in very low memory so we can execute real-mode AP code. */ |
16 | mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); | 16 | mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); |
17 | if (mem == MEMBLOCK_ERROR) | 17 | if (!mem) |
18 | panic("Cannot allocate trampoline\n"); | 18 | panic("Cannot allocate trampoline\n"); |
19 | 19 | ||
20 | x86_trampoline_base = __va(mem); | 20 | x86_trampoline_base = __va(mem); |
21 | memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE"); | 21 | memblock_reserve(mem, size); |
22 | 22 | ||
23 | printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", | 23 | printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", |
24 | x86_trampoline_base, (unsigned long long)mem, size); | 24 | x86_trampoline_base, (unsigned long long)mem, size); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index db483369f10b..2c9cf0fd78f5 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -35,7 +35,7 @@ static int __read_mostly tsc_unstable; | |||
35 | erroneous rdtsc usage on !cpu_has_tsc processors */ | 35 | erroneous rdtsc usage on !cpu_has_tsc processors */ |
36 | static int __read_mostly tsc_disabled = -1; | 36 | static int __read_mostly tsc_disabled = -1; |
37 | 37 | ||
38 | static int tsc_clocksource_reliable; | 38 | int tsc_clocksource_reliable; |
39 | /* | 39 | /* |
40 | * Scheduler clock - returns current time in nanosec units. | 40 | * Scheduler clock - returns current time in nanosec units. |
41 | */ | 41 | */ |
@@ -178,11 +178,11 @@ static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) | |||
178 | } | 178 | } |
179 | 179 | ||
180 | #define CAL_MS 10 | 180 | #define CAL_MS 10 |
181 | #define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS)) | 181 | #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) |
182 | #define CAL_PIT_LOOPS 1000 | 182 | #define CAL_PIT_LOOPS 1000 |
183 | 183 | ||
184 | #define CAL2_MS 50 | 184 | #define CAL2_MS 50 |
185 | #define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS)) | 185 | #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) |
186 | #define CAL2_PIT_LOOPS 5000 | 186 | #define CAL2_PIT_LOOPS 5000 |
187 | 187 | ||
188 | 188 | ||
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 0aa5fed8b9e6..9eba29b46cb7 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -113,7 +113,7 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
113 | if (unsynchronized_tsc()) | 113 | if (unsynchronized_tsc()) |
114 | return; | 114 | return; |
115 | 115 | ||
116 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | 116 | if (tsc_clocksource_reliable) { |
117 | if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) | 117 | if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) |
118 | pr_info( | 118 | pr_info( |
119 | "Skipped synchronization checks as TSC is reliable.\n"); | 119 | "Skipped synchronization checks as TSC is reliable.\n"); |
@@ -172,7 +172,7 @@ void __cpuinit check_tsc_sync_target(void) | |||
172 | { | 172 | { |
173 | int cpus = 2; | 173 | int cpus = 2; |
174 | 174 | ||
175 | if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) | 175 | if (unsynchronized_tsc() || tsc_clocksource_reliable) |
176 | return; | 176 | return; |
177 | 177 | ||
178 | /* | 178 | /* |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 76e3f1cd0369..405f2620392f 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) | |||
338 | return HRTIMER_NORESTART; | 338 | return HRTIMER_NORESTART; |
339 | } | 339 | } |
340 | 340 | ||
341 | static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) | 341 | static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) |
342 | { | 342 | { |
343 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | ||
343 | struct kvm_timer *pt = &ps->pit_timer; | 344 | struct kvm_timer *pt = &ps->pit_timer; |
344 | s64 interval; | 345 | s64 interval; |
345 | 346 | ||
347 | if (!irqchip_in_kernel(kvm)) | ||
348 | return; | ||
349 | |||
346 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); | 350 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); |
347 | 351 | ||
348 | pr_debug("create pit timer, interval is %llu nsec\n", interval); | 352 | pr_debug("create pit timer, interval is %llu nsec\n", interval); |
@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) | |||
394 | /* FIXME: enhance mode 4 precision */ | 398 | /* FIXME: enhance mode 4 precision */ |
395 | case 4: | 399 | case 4: |
396 | if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { | 400 | if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { |
397 | create_pit_timer(ps, val, 0); | 401 | create_pit_timer(kvm, val, 0); |
398 | } | 402 | } |
399 | break; | 403 | break; |
400 | case 2: | 404 | case 2: |
401 | case 3: | 405 | case 3: |
402 | if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ | 406 | if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ |
403 | create_pit_timer(ps, val, 1); | 407 | create_pit_timer(kvm, val, 1); |
404 | } | 408 | } |
405 | break; | 409 | break; |
406 | default: | 410 | default: |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c38efd7b792e..4c938da2ba00 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -602,7 +602,6 @@ static void update_cpuid(struct kvm_vcpu *vcpu) | |||
602 | { | 602 | { |
603 | struct kvm_cpuid_entry2 *best; | 603 | struct kvm_cpuid_entry2 *best; |
604 | struct kvm_lapic *apic = vcpu->arch.apic; | 604 | struct kvm_lapic *apic = vcpu->arch.apic; |
605 | u32 timer_mode_mask; | ||
606 | 605 | ||
607 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 606 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
608 | if (!best) | 607 | if (!best) |
@@ -615,15 +614,12 @@ static void update_cpuid(struct kvm_vcpu *vcpu) | |||
615 | best->ecx |= bit(X86_FEATURE_OSXSAVE); | 614 | best->ecx |= bit(X86_FEATURE_OSXSAVE); |
616 | } | 615 | } |
617 | 616 | ||
618 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 617 | if (apic) { |
619 | best->function == 0x1) { | 618 | if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER)) |
620 | best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER); | 619 | apic->lapic_timer.timer_mode_mask = 3 << 17; |
621 | timer_mode_mask = 3 << 17; | 620 | else |
622 | } else | 621 | apic->lapic_timer.timer_mode_mask = 1 << 17; |
623 | timer_mode_mask = 1 << 17; | 622 | } |
624 | |||
625 | if (apic) | ||
626 | apic->lapic_timer.timer_mode_mask = timer_mode_mask; | ||
627 | } | 623 | } |
628 | 624 | ||
629 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 625 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
@@ -2135,6 +2131,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
2135 | case KVM_CAP_TSC_CONTROL: | 2131 | case KVM_CAP_TSC_CONTROL: |
2136 | r = kvm_has_tsc_control; | 2132 | r = kvm_has_tsc_control; |
2137 | break; | 2133 | break; |
2134 | case KVM_CAP_TSC_DEADLINE_TIMER: | ||
2135 | r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); | ||
2136 | break; | ||
2138 | default: | 2137 | default: |
2139 | r = 0; | 2138 | r = 0; |
2140 | break; | 2139 | break; |
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 46fc4ee09fc4..88ad5fbda6e1 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c | |||
@@ -82,9 +82,16 @@ insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, | |||
82 | const insn_attr_t *table; | 82 | const insn_attr_t *table; |
83 | if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) | 83 | if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) |
84 | return 0; | 84 | return 0; |
85 | table = inat_avx_tables[vex_m][vex_p]; | 85 | /* At first, this checks the master table */ |
86 | table = inat_avx_tables[vex_m][0]; | ||
86 | if (!table) | 87 | if (!table) |
87 | return 0; | 88 | return 0; |
89 | if (!inat_is_group(table[opcode]) && vex_p) { | ||
90 | /* If this is not a group, get attribute directly */ | ||
91 | table = inat_avx_tables[vex_m][vex_p]; | ||
92 | if (!table) | ||
93 | return 0; | ||
94 | } | ||
88 | return table[opcode]; | 95 | return table[opcode]; |
89 | } | 96 | } |
90 | 97 | ||
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 374562ed6704..5a1f9f3e3fbb 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c | |||
@@ -202,7 +202,7 @@ void insn_get_opcode(struct insn *insn) | |||
202 | m = insn_vex_m_bits(insn); | 202 | m = insn_vex_m_bits(insn); |
203 | p = insn_vex_p_bits(insn); | 203 | p = insn_vex_p_bits(insn); |
204 | insn->attr = inat_get_avx_attribute(op, m, p); | 204 | insn->attr = inat_get_avx_attribute(op, m, p); |
205 | if (!inat_accept_vex(insn->attr)) | 205 | if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr)) |
206 | insn->attr = 0; /* This instruction is bad */ | 206 | insn->attr = 0; /* This instruction is bad */ |
207 | goto end; /* VEX has only 1 byte for opcode */ | 207 | goto end; /* VEX has only 1 byte for opcode */ |
208 | } | 208 | } |
@@ -249,6 +249,8 @@ void insn_get_modrm(struct insn *insn) | |||
249 | pfx = insn_last_prefix(insn); | 249 | pfx = insn_last_prefix(insn); |
250 | insn->attr = inat_get_group_attribute(mod, pfx, | 250 | insn->attr = inat_get_group_attribute(mod, pfx, |
251 | insn->attr); | 251 | insn->attr); |
252 | if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) | ||
253 | insn->attr = 0; /* This is bad */ | ||
252 | } | 254 | } |
253 | } | 255 | } |
254 | 256 | ||
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index a793da5e560e..5b83c51c12e0 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
@@ -1,5 +1,11 @@ | |||
1 | # x86 Opcode Maps | 1 | # x86 Opcode Maps |
2 | # | 2 | # |
3 | # This is (mostly) based on following documentations. | ||
4 | # - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2 | ||
5 | # (#325383-040US, October 2011) | ||
6 | # - Intel(R) Advanced Vector Extensions Programming Reference | ||
7 | # (#319433-011,JUNE 2011). | ||
8 | # | ||
3 | #<Opcode maps> | 9 | #<Opcode maps> |
4 | # Table: table-name | 10 | # Table: table-name |
5 | # Referrer: escaped-name | 11 | # Referrer: escaped-name |
@@ -15,10 +21,13 @@ | |||
15 | # EndTable | 21 | # EndTable |
16 | # | 22 | # |
17 | # AVX Superscripts | 23 | # AVX Superscripts |
18 | # (VEX): this opcode can accept VEX prefix. | 24 | # (v): this opcode requires VEX prefix. |
19 | # (oVEX): this opcode requires VEX prefix. | 25 | # (v1): this opcode only supports 128bit VEX. |
20 | # (o128): this opcode only supports 128bit VEX. | 26 | # |
21 | # (o256): this opcode only supports 256bit VEX. | 27 | # Last Prefix Superscripts |
28 | # - (66): the last prefix is 0x66 | ||
29 | # - (F3): the last prefix is 0xF3 | ||
30 | # - (F2): the last prefix is 0xF2 | ||
22 | # | 31 | # |
23 | 32 | ||
24 | Table: one byte opcode | 33 | Table: one byte opcode |
@@ -199,8 +208,8 @@ a0: MOV AL,Ob | |||
199 | a1: MOV rAX,Ov | 208 | a1: MOV rAX,Ov |
200 | a2: MOV Ob,AL | 209 | a2: MOV Ob,AL |
201 | a3: MOV Ov,rAX | 210 | a3: MOV Ov,rAX |
202 | a4: MOVS/B Xb,Yb | 211 | a4: MOVS/B Yb,Xb |
203 | a5: MOVS/W/D/Q Xv,Yv | 212 | a5: MOVS/W/D/Q Yv,Xv |
204 | a6: CMPS/B Xb,Yb | 213 | a6: CMPS/B Xb,Yb |
205 | a7: CMPS/W/D Xv,Yv | 214 | a7: CMPS/W/D Xv,Yv |
206 | a8: TEST AL,Ib | 215 | a8: TEST AL,Ib |
@@ -233,8 +242,8 @@ c0: Grp2 Eb,Ib (1A) | |||
233 | c1: Grp2 Ev,Ib (1A) | 242 | c1: Grp2 Ev,Ib (1A) |
234 | c2: RETN Iw (f64) | 243 | c2: RETN Iw (f64) |
235 | c3: RETN | 244 | c3: RETN |
236 | c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix) | 245 | c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) |
237 | c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix) | 246 | c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) |
238 | c6: Grp11 Eb,Ib (1A) | 247 | c6: Grp11 Eb,Ib (1A) |
239 | c7: Grp11 Ev,Iz (1A) | 248 | c7: Grp11 Ev,Iz (1A) |
240 | c8: ENTER Iw,Ib | 249 | c8: ENTER Iw,Ib |
@@ -320,14 +329,19 @@ AVXcode: 1 | |||
320 | # 3DNow! uses the last imm byte as opcode extension. | 329 | # 3DNow! uses the last imm byte as opcode extension. |
321 | 0f: 3DNow! Pq,Qq,Ib | 330 | 0f: 3DNow! Pq,Qq,Ib |
322 | # 0x0f 0x10-0x1f | 331 | # 0x0f 0x10-0x1f |
323 | 10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128) | 332 | # NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands |
324 | 11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128) | 333 | # but it actually has operands. And also, vmovss and vmovsd only accept 128bit. |
325 | 12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX) | 334 | # MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form. |
326 | 13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128) | 335 | # Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming |
327 | 14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX) | 336 | # Reference A.1 |
328 | 15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX) | 337 | 10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1) |
329 | 16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX) | 338 | 11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1) |
330 | 17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128) | 339 | 12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2) |
340 | 13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1) | ||
341 | 14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66) | ||
342 | 15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66) | ||
343 | 16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3) | ||
344 | 17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) | ||
331 | 18: Grp16 (1A) | 345 | 18: Grp16 (1A) |
332 | 19: | 346 | 19: |
333 | 1a: | 347 | 1a: |
@@ -345,14 +359,14 @@ AVXcode: 1 | |||
345 | 25: | 359 | 25: |
346 | 26: | 360 | 26: |
347 | 27: | 361 | 27: |
348 | 28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX) | 362 | 28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66) |
349 | 29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX) | 363 | 29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66) |
350 | 2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128) | 364 | 2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1) |
351 | 2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX) | 365 | 2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66) |
352 | 2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128) | 366 | 2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1) |
353 | 2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128) | 367 | 2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1) |
354 | 2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd Vsd,Wsd (66),(VEX),(o128) | 368 | 2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1) |
355 | 2f: comiss Vss,Wss (VEX),(o128) | comisd Vsd,Wsd (66),(VEX),(o128) | 369 | 2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1) |
356 | # 0x0f 0x30-0x3f | 370 | # 0x0f 0x30-0x3f |
357 | 30: WRMSR | 371 | 30: WRMSR |
358 | 31: RDTSC | 372 | 31: RDTSC |
@@ -388,65 +402,66 @@ AVXcode: 1 | |||
388 | 4e: CMOVLE/NG Gv,Ev | 402 | 4e: CMOVLE/NG Gv,Ev |
389 | 4f: CMOVNLE/G Gv,Ev | 403 | 4f: CMOVNLE/G Gv,Ev |
390 | # 0x0f 0x50-0x5f | 404 | # 0x0f 0x50-0x5f |
391 | 50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX) | 405 | 50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66) |
392 | 51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128) | 406 | 51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1) |
393 | 52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128) | 407 | 52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1) |
394 | 53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128) | 408 | 53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1) |
395 | 54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX) | 409 | 54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66) |
396 | 55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX) | 410 | 55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66) |
397 | 56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX) | 411 | 56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66) |
398 | 57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX) | 412 | 57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66) |
399 | 58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128) | 413 | 58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1) |
400 | 59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128) | 414 | 59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1) |
401 | 5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128) | 415 | 5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1) |
402 | 5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX) | 416 | 5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3) |
403 | 5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128) | 417 | 5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1) |
404 | 5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128) | 418 | 5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1) |
405 | 5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128) | 419 | 5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1) |
406 | 5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128) | 420 | 5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1) |
407 | # 0x0f 0x60-0x6f | 421 | # 0x0f 0x60-0x6f |
408 | 60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128) | 422 | 60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1) |
409 | 61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128) | 423 | 61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1) |
410 | 62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128) | 424 | 62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1) |
411 | 63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128) | 425 | 63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1) |
412 | 64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128) | 426 | 64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1) |
413 | 65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128) | 427 | 65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1) |
414 | 66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128) | 428 | 66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1) |
415 | 67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128) | 429 | 67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1) |
416 | 68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128) | 430 | 68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1) |
417 | 69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128) | 431 | 69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1) |
418 | 6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128) | 432 | 6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1) |
419 | 6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128) | 433 | 6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1) |
420 | 6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128) | 434 | 6c: vpunpcklqdq Vx,Hx,Wx (66),(v1) |
421 | 6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128) | 435 | 6d: vpunpckhqdq Vx,Hx,Wx (66),(v1) |
422 | 6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128) | 436 | 6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1) |
423 | 6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX) | 437 | 6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3) |
424 | # 0x0f 0x70-0x7f | 438 | # 0x0f 0x70-0x7f |
425 | 70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128) | 439 | 70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1) |
426 | 71: Grp12 (1A) | 440 | 71: Grp12 (1A) |
427 | 72: Grp13 (1A) | 441 | 72: Grp13 (1A) |
428 | 73: Grp14 (1A) | 442 | 73: Grp14 (1A) |
429 | 74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128) | 443 | 74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1) |
430 | 75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128) | 444 | 75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1) |
431 | 76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128) | 445 | 76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1) |
432 | 77: emms/vzeroupper/vzeroall (VEX) | 446 | # Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX. |
433 | 78: VMREAD Ed/q,Gd/q | 447 | 77: emms | vzeroupper | vzeroall |
434 | 79: VMWRITE Gd/q,Ed/q | 448 | 78: VMREAD Ey,Gy |
449 | 79: VMWRITE Gy,Ey | ||
435 | 7a: | 450 | 7a: |
436 | 7b: | 451 | 7b: |
437 | 7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX) | 452 | 7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2) |
438 | 7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX) | 453 | 7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2) |
439 | 7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128) | 454 | 7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1) |
440 | 7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX) | 455 | 7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3) |
441 | # 0x0f 0x80-0x8f | 456 | # 0x0f 0x80-0x8f |
442 | 80: JO Jz (f64) | 457 | 80: JO Jz (f64) |
443 | 81: JNO Jz (f64) | 458 | 81: JNO Jz (f64) |
444 | 82: JB/JNAE/JC Jz (f64) | 459 | 82: JB/JC/JNAE Jz (f64) |
445 | 83: JNB/JAE/JNC Jz (f64) | 460 | 83: JAE/JNB/JNC Jz (f64) |
446 | 84: JZ/JE Jz (f64) | 461 | 84: JE/JZ Jz (f64) |
447 | 85: JNZ/JNE Jz (f64) | 462 | 85: JNE/JNZ Jz (f64) |
448 | 86: JBE/JNA Jz (f64) | 463 | 86: JBE/JNA Jz (f64) |
449 | 87: JNBE/JA Jz (f64) | 464 | 87: JA/JNBE Jz (f64) |
450 | 88: JS Jz (f64) | 465 | 88: JS Jz (f64) |
451 | 89: JNS Jz (f64) | 466 | 89: JNS Jz (f64) |
452 | 8a: JP/JPE Jz (f64) | 467 | 8a: JP/JPE Jz (f64) |
@@ -502,18 +517,18 @@ b8: JMPE | POPCNT Gv,Ev (F3) | |||
502 | b9: Grp10 (1A) | 517 | b9: Grp10 (1A) |
503 | ba: Grp8 Ev,Ib (1A) | 518 | ba: Grp8 Ev,Ib (1A) |
504 | bb: BTC Ev,Gv | 519 | bb: BTC Ev,Gv |
505 | bc: BSF Gv,Ev | 520 | bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) |
506 | bd: BSR Gv,Ev | 521 | bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) |
507 | be: MOVSX Gv,Eb | 522 | be: MOVSX Gv,Eb |
508 | bf: MOVSX Gv,Ew | 523 | bf: MOVSX Gv,Ew |
509 | # 0x0f 0xc0-0xcf | 524 | # 0x0f 0xc0-0xcf |
510 | c0: XADD Eb,Gb | 525 | c0: XADD Eb,Gb |
511 | c1: XADD Ev,Gv | 526 | c1: XADD Ev,Gv |
512 | c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX) | 527 | c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1) |
513 | c3: movnti Md/q,Gd/q | 528 | c3: movnti My,Gy |
514 | c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128) | 529 | c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1) |
515 | c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128) | 530 | c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1) |
516 | c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX) | 531 | c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66) |
517 | c7: Grp9 (1A) | 532 | c7: Grp9 (1A) |
518 | c8: BSWAP RAX/EAX/R8/R8D | 533 | c8: BSWAP RAX/EAX/R8/R8D |
519 | c9: BSWAP RCX/ECX/R9/R9D | 534 | c9: BSWAP RCX/ECX/R9/R9D |
@@ -524,55 +539,55 @@ cd: BSWAP RBP/EBP/R13/R13D | |||
524 | ce: BSWAP RSI/ESI/R14/R14D | 539 | ce: BSWAP RSI/ESI/R14/R14D |
525 | cf: BSWAP RDI/EDI/R15/R15D | 540 | cf: BSWAP RDI/EDI/R15/R15D |
526 | # 0x0f 0xd0-0xdf | 541 | # 0x0f 0xd0-0xdf |
527 | d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX) | 542 | d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2) |
528 | d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128) | 543 | d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1) |
529 | d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128) | 544 | d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1) |
530 | d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128) | 545 | d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1) |
531 | d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128) | 546 | d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1) |
532 | d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128) | 547 | d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1) |
533 | d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) | 548 | d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) |
534 | d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128) | 549 | d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1) |
535 | d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128) | 550 | d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1) |
536 | d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128) | 551 | d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1) |
537 | da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128) | 552 | da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1) |
538 | db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128) | 553 | db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) |
539 | dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128) | 554 | dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1) |
540 | dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128) | 555 | dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1) |
541 | de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128) | 556 | de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1) |
542 | df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128) | 557 | df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) |
543 | # 0x0f 0xe0-0xef | 558 | # 0x0f 0xe0-0xef |
544 | e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128) | 559 | e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1) |
545 | e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128) | 560 | e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1) |
546 | e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128) | 561 | e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1) |
547 | e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128) | 562 | e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1) |
548 | e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128) | 563 | e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1) |
549 | e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128) | 564 | e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1) |
550 | e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX) | 565 | e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2) |
551 | e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX) | 566 | e7: movntq Mq,Pq | vmovntdq Mx,Vx (66) |
552 | e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128) | 567 | e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1) |
553 | e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128) | 568 | e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1) |
554 | ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128) | 569 | ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1) |
555 | eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128) | 570 | eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) |
556 | ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128) | 571 | ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1) |
557 | ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128) | 572 | ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1) |
558 | ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128) | 573 | ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1) |
559 | ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128) | 574 | ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) |
560 | # 0x0f 0xf0-0xff | 575 | # 0x0f 0xf0-0xff |
561 | f0: lddqu Vdq,Mdq (F2),(VEX) | 576 | f0: vlddqu Vx,Mx (F2) |
562 | f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128) | 577 | f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1) |
563 | f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128) | 578 | f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1) |
564 | f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128) | 579 | f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1) |
565 | f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128) | 580 | f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1) |
566 | f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128) | 581 | f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1) |
567 | f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128) | 582 | f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1) |
568 | f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128) | 583 | f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1) |
569 | f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128) | 584 | f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1) |
570 | f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128) | 585 | f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1) |
571 | fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128) | 586 | fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1) |
572 | fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128) | 587 | fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) |
573 | fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128) | 588 | fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) |
574 | fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128) | 589 | fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) |
575 | fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128) | 590 | fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) |
576 | ff: | 591 | ff: |
577 | EndTable | 592 | EndTable |
578 | 593 | ||
@@ -580,155 +595,193 @@ Table: 3-byte opcode 1 (0x0f 0x38) | |||
580 | Referrer: 3-byte escape 1 | 595 | Referrer: 3-byte escape 1 |
581 | AVXcode: 2 | 596 | AVXcode: 2 |
582 | # 0x0f 0x38 0x00-0x0f | 597 | # 0x0f 0x38 0x00-0x0f |
583 | 00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128) | 598 | 00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1) |
584 | 01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128) | 599 | 01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1) |
585 | 02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128) | 600 | 02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1) |
586 | 03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128) | 601 | 03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1) |
587 | 04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128) | 602 | 04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1) |
588 | 05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128) | 603 | 05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1) |
589 | 06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128) | 604 | 06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1) |
590 | 07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128) | 605 | 07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1) |
591 | 08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128) | 606 | 08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1) |
592 | 09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128) | 607 | 09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1) |
593 | 0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128) | 608 | 0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1) |
594 | 0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128) | 609 | 0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1) |
595 | 0c: Vpermilps /r (66),(oVEX) | 610 | 0c: vpermilps Vx,Hx,Wx (66),(v) |
596 | 0d: Vpermilpd /r (66),(oVEX) | 611 | 0d: vpermilpd Vx,Hx,Wx (66),(v) |
597 | 0e: vtestps /r (66),(oVEX) | 612 | 0e: vtestps Vx,Wx (66),(v) |
598 | 0f: vtestpd /r (66),(oVEX) | 613 | 0f: vtestpd Vx,Wx (66),(v) |
599 | # 0x0f 0x38 0x10-0x1f | 614 | # 0x0f 0x38 0x10-0x1f |
600 | 10: pblendvb Vdq,Wdq (66) | 615 | 10: pblendvb Vdq,Wdq (66) |
601 | 11: | 616 | 11: |
602 | 12: | 617 | 12: |
603 | 13: | 618 | 13: vcvtph2ps Vx,Wx,Ib (66),(v) |
604 | 14: blendvps Vdq,Wdq (66) | 619 | 14: blendvps Vdq,Wdq (66) |
605 | 15: blendvpd Vdq,Wdq (66) | 620 | 15: blendvpd Vdq,Wdq (66) |
606 | 16: | 621 | 16: vpermps Vqq,Hqq,Wqq (66),(v) |
607 | 17: ptest Vdq,Wdq (66),(VEX) | 622 | 17: vptest Vx,Wx (66) |
608 | 18: vbroadcastss /r (66),(oVEX) | 623 | 18: vbroadcastss Vx,Wd (66),(v) |
609 | 19: vbroadcastsd /r (66),(oVEX),(o256) | 624 | 19: vbroadcastsd Vqq,Wq (66),(v) |
610 | 1a: vbroadcastf128 /r (66),(oVEX),(o256) | 625 | 1a: vbroadcastf128 Vqq,Mdq (66),(v) |
611 | 1b: | 626 | 1b: |
612 | 1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128) | 627 | 1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1) |
613 | 1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128) | 628 | 1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1) |
614 | 1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128) | 629 | 1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1) |
615 | 1f: | 630 | 1f: |
616 | # 0x0f 0x38 0x20-0x2f | 631 | # 0x0f 0x38 0x20-0x2f |
617 | 20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128) | 632 | 20: vpmovsxbw Vx,Ux/Mq (66),(v1) |
618 | 21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128) | 633 | 21: vpmovsxbd Vx,Ux/Md (66),(v1) |
619 | 22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128) | 634 | 22: vpmovsxbq Vx,Ux/Mw (66),(v1) |
620 | 23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128) | 635 | 23: vpmovsxwd Vx,Ux/Mq (66),(v1) |
621 | 24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128) | 636 | 24: vpmovsxwq Vx,Ux/Md (66),(v1) |
622 | 25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128) | 637 | 25: vpmovsxdq Vx,Ux/Mq (66),(v1) |
623 | 26: | 638 | 26: |
624 | 27: | 639 | 27: |
625 | 28: pmuldq Vdq,Wdq (66),(VEX),(o128) | 640 | 28: vpmuldq Vx,Hx,Wx (66),(v1) |
626 | 29: pcmpeqq Vdq,Wdq (66),(VEX),(o128) | 641 | 29: vpcmpeqq Vx,Hx,Wx (66),(v1) |
627 | 2a: movntdqa Vdq,Mdq (66),(VEX),(o128) | 642 | 2a: vmovntdqa Vx,Mx (66),(v1) |
628 | 2b: packusdw Vdq,Wdq (66),(VEX),(o128) | 643 | 2b: vpackusdw Vx,Hx,Wx (66),(v1) |
629 | 2c: vmaskmovps(ld) /r (66),(oVEX) | 644 | 2c: vmaskmovps Vx,Hx,Mx (66),(v) |
630 | 2d: vmaskmovpd(ld) /r (66),(oVEX) | 645 | 2d: vmaskmovpd Vx,Hx,Mx (66),(v) |
631 | 2e: vmaskmovps(st) /r (66),(oVEX) | 646 | 2e: vmaskmovps Mx,Hx,Vx (66),(v) |
632 | 2f: vmaskmovpd(st) /r (66),(oVEX) | 647 | 2f: vmaskmovpd Mx,Hx,Vx (66),(v) |
633 | # 0x0f 0x38 0x30-0x3f | 648 | # 0x0f 0x38 0x30-0x3f |
634 | 30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128) | 649 | 30: vpmovzxbw Vx,Ux/Mq (66),(v1) |
635 | 31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128) | 650 | 31: vpmovzxbd Vx,Ux/Md (66),(v1) |
636 | 32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128) | 651 | 32: vpmovzxbq Vx,Ux/Mw (66),(v1) |
637 | 33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128) | 652 | 33: vpmovzxwd Vx,Ux/Mq (66),(v1) |
638 | 34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128) | 653 | 34: vpmovzxwq Vx,Ux/Md (66),(v1) |
639 | 35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128) | 654 | 35: vpmovzxdq Vx,Ux/Mq (66),(v1) |
640 | 36: | 655 | 36: vpermd Vqq,Hqq,Wqq (66),(v) |
641 | 37: pcmpgtq Vdq,Wdq (66),(VEX),(o128) | 656 | 37: vpcmpgtq Vx,Hx,Wx (66),(v1) |
642 | 38: pminsb Vdq,Wdq (66),(VEX),(o128) | 657 | 38: vpminsb Vx,Hx,Wx (66),(v1) |
643 | 39: pminsd Vdq,Wdq (66),(VEX),(o128) | 658 | 39: vpminsd Vx,Hx,Wx (66),(v1) |
644 | 3a: pminuw Vdq,Wdq (66),(VEX),(o128) | 659 | 3a: vpminuw Vx,Hx,Wx (66),(v1) |
645 | 3b: pminud Vdq,Wdq (66),(VEX),(o128) | 660 | 3b: vpminud Vx,Hx,Wx (66),(v1) |
646 | 3c: pmaxsb Vdq,Wdq (66),(VEX),(o128) | 661 | 3c: vpmaxsb Vx,Hx,Wx (66),(v1) |
647 | 3d: pmaxsd Vdq,Wdq (66),(VEX),(o128) | 662 | 3d: vpmaxsd Vx,Hx,Wx (66),(v1) |
648 | 3e: pmaxuw Vdq,Wdq (66),(VEX),(o128) | 663 | 3e: vpmaxuw Vx,Hx,Wx (66),(v1) |
649 | 3f: pmaxud Vdq,Wdq (66),(VEX),(o128) | 664 | 3f: vpmaxud Vx,Hx,Wx (66),(v1) |
650 | # 0x0f 0x38 0x40-0x8f | 665 | # 0x0f 0x38 0x40-0x8f |
651 | 40: pmulld Vdq,Wdq (66),(VEX),(o128) | 666 | 40: vpmulld Vx,Hx,Wx (66),(v1) |
652 | 41: phminposuw Vdq,Wdq (66),(VEX),(o128) | 667 | 41: vphminposuw Vdq,Wdq (66),(v1) |
653 | 80: INVEPT Gd/q,Mdq (66) | 668 | 42: |
654 | 81: INVPID Gd/q,Mdq (66) | 669 | 43: |
670 | 44: | ||
671 | 45: vpsrlvd/q Vx,Hx,Wx (66),(v) | ||
672 | 46: vpsravd Vx,Hx,Wx (66),(v) | ||
673 | 47: vpsllvd/q Vx,Hx,Wx (66),(v) | ||
674 | # Skip 0x48-0x57 | ||
675 | 58: vpbroadcastd Vx,Wx (66),(v) | ||
676 | 59: vpbroadcastq Vx,Wx (66),(v) | ||
677 | 5a: vbroadcasti128 Vqq,Mdq (66),(v) | ||
678 | # Skip 0x5b-0x77 | ||
679 | 78: vpbroadcastb Vx,Wx (66),(v) | ||
680 | 79: vpbroadcastw Vx,Wx (66),(v) | ||
681 | # Skip 0x7a-0x7f | ||
682 | 80: INVEPT Gy,Mdq (66) | ||
683 | 81: INVPID Gy,Mdq (66) | ||
684 | 82: INVPCID Gy,Mdq (66) | ||
685 | 8c: vpmaskmovd/q Vx,Hx,Mx (66),(v) | ||
686 | 8e: vpmaskmovd/q Mx,Vx,Hx (66),(v) | ||
655 | # 0x0f 0x38 0x90-0xbf (FMA) | 687 | # 0x0f 0x38 0x90-0xbf (FMA) |
656 | 96: vfmaddsub132pd/ps /r (66),(VEX) | 688 | 90: vgatherdd/q Vx,Hx,Wx (66),(v) |
657 | 97: vfmsubadd132pd/ps /r (66),(VEX) | 689 | 91: vgatherqd/q Vx,Hx,Wx (66),(v) |
658 | 98: vfmadd132pd/ps /r (66),(VEX) | 690 | 92: vgatherdps/d Vx,Hx,Wx (66),(v) |
659 | 99: vfmadd132sd/ss /r (66),(VEX),(o128) | 691 | 93: vgatherqps/d Vx,Hx,Wx (66),(v) |
660 | 9a: vfmsub132pd/ps /r (66),(VEX) | 692 | 94: |
661 | 9b: vfmsub132sd/ss /r (66),(VEX),(o128) | 693 | 95: |
662 | 9c: vfnmadd132pd/ps /r (66),(VEX) | 694 | 96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v) |
663 | 9d: vfnmadd132sd/ss /r (66),(VEX),(o128) | 695 | 97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v) |
664 | 9e: vfnmsub132pd/ps /r (66),(VEX) | 696 | 98: vfmadd132ps/d Vx,Hx,Wx (66),(v) |
665 | 9f: vfnmsub132sd/ss /r (66),(VEX),(o128) | 697 | 99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1) |
666 | a6: vfmaddsub213pd/ps /r (66),(VEX) | 698 | 9a: vfmsub132ps/d Vx,Hx,Wx (66),(v) |
667 | a7: vfmsubadd213pd/ps /r (66),(VEX) | 699 | 9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1) |
668 | a8: vfmadd213pd/ps /r (66),(VEX) | 700 | 9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v) |
669 | a9: vfmadd213sd/ss /r (66),(VEX),(o128) | 701 | 9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1) |
670 | aa: vfmsub213pd/ps /r (66),(VEX) | 702 | 9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v) |
671 | ab: vfmsub213sd/ss /r (66),(VEX),(o128) | 703 | 9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1) |
672 | ac: vfnmadd213pd/ps /r (66),(VEX) | 704 | a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v) |
673 | ad: vfnmadd213sd/ss /r (66),(VEX),(o128) | 705 | a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v) |
674 | ae: vfnmsub213pd/ps /r (66),(VEX) | 706 | a8: vfmadd213ps/d Vx,Hx,Wx (66),(v) |
675 | af: vfnmsub213sd/ss /r (66),(VEX),(o128) | 707 | a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1) |
676 | b6: vfmaddsub231pd/ps /r (66),(VEX) | 708 | aa: vfmsub213ps/d Vx,Hx,Wx (66),(v) |
677 | b7: vfmsubadd231pd/ps /r (66),(VEX) | 709 | ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1) |
678 | b8: vfmadd231pd/ps /r (66),(VEX) | 710 | ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v) |
679 | b9: vfmadd231sd/ss /r (66),(VEX),(o128) | 711 | ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1) |
680 | ba: vfmsub231pd/ps /r (66),(VEX) | 712 | ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v) |
681 | bb: vfmsub231sd/ss /r (66),(VEX),(o128) | 713 | af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1) |
682 | bc: vfnmadd231pd/ps /r (66),(VEX) | 714 | b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v) |
683 | bd: vfnmadd231sd/ss /r (66),(VEX),(o128) | 715 | b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v) |
684 | be: vfnmsub231pd/ps /r (66),(VEX) | 716 | b8: vfmadd231ps/d Vx,Hx,Wx (66),(v) |
685 | bf: vfnmsub231sd/ss /r (66),(VEX),(o128) | 717 | b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1) |
718 | ba: vfmsub231ps/d Vx,Hx,Wx (66),(v) | ||
719 | bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1) | ||
720 | bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v) | ||
721 | bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1) | ||
722 | be: vfnmsub231ps/d Vx,Hx,Wx (66),(v) | ||
723 | bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1) | ||
686 | # 0x0f 0x38 0xc0-0xff | 724 | # 0x0f 0x38 0xc0-0xff |
687 | db: aesimc Vdq,Wdq (66),(VEX),(o128) | 725 | db: VAESIMC Vdq,Wdq (66),(v1) |
688 | dc: aesenc Vdq,Wdq (66),(VEX),(o128) | 726 | dc: VAESENC Vdq,Hdq,Wdq (66),(v1) |
689 | dd: aesenclast Vdq,Wdq (66),(VEX),(o128) | 727 | dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) |
690 | de: aesdec Vdq,Wdq (66),(VEX),(o128) | 728 | de: VAESDEC Vdq,Hdq,Wdq (66),(v1) |
691 | df: aesdeclast Vdq,Wdq (66),(VEX),(o128) | 729 | df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) |
692 | f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) | 730 | f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) |
693 | f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) | 731 | f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) |
732 | f3: ANDN Gy,By,Ey (v) | ||
733 | f4: Grp17 (1A) | ||
734 | f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) | ||
735 | f6: MULX By,Gy,rDX,Ey (F2),(v) | ||
736 | f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) | ||
694 | EndTable | 737 | EndTable |
695 | 738 | ||
696 | Table: 3-byte opcode 2 (0x0f 0x3a) | 739 | Table: 3-byte opcode 2 (0x0f 0x3a) |
697 | Referrer: 3-byte escape 2 | 740 | Referrer: 3-byte escape 2 |
698 | AVXcode: 3 | 741 | AVXcode: 3 |
699 | # 0x0f 0x3a 0x00-0xff | 742 | # 0x0f 0x3a 0x00-0xff |
700 | 04: vpermilps /r,Ib (66),(oVEX) | 743 | 00: vpermq Vqq,Wqq,Ib (66),(v) |
701 | 05: vpermilpd /r,Ib (66),(oVEX) | 744 | 01: vpermpd Vqq,Wqq,Ib (66),(v) |
702 | 06: vperm2f128 /r,Ib (66),(oVEX),(o256) | 745 | 02: vpblendd Vx,Hx,Wx,Ib (66),(v) |
703 | 08: roundps Vdq,Wdq,Ib (66),(VEX) | 746 | 03: |
704 | 09: roundpd Vdq,Wdq,Ib (66),(VEX) | 747 | 04: vpermilps Vx,Wx,Ib (66),(v) |
705 | 0a: roundss Vss,Wss,Ib (66),(VEX),(o128) | 748 | 05: vpermilpd Vx,Wx,Ib (66),(v) |
706 | 0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128) | 749 | 06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v) |
707 | 0c: blendps Vdq,Wdq,Ib (66),(VEX) | 750 | 07: |
708 | 0d: blendpd Vdq,Wdq,Ib (66),(VEX) | 751 | 08: vroundps Vx,Wx,Ib (66) |
709 | 0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128) | 752 | 09: vroundpd Vx,Wx,Ib (66) |
710 | 0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128) | 753 | 0a: vroundss Vss,Wss,Ib (66),(v1) |
711 | 14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128) | 754 | 0b: vroundsd Vsd,Wsd,Ib (66),(v1) |
712 | 15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128) | 755 | 0c: vblendps Vx,Hx,Wx,Ib (66) |
713 | 16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128) | 756 | 0d: vblendpd Vx,Hx,Wx,Ib (66) |
714 | 17: extractps Ed,Vdq,Ib (66),(VEX),(o128) | 757 | 0e: vpblendw Vx,Hx,Wx,Ib (66),(v1) |
715 | 18: vinsertf128 /r,Ib (66),(oVEX),(o256) | 758 | 0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1) |
716 | 19: vextractf128 /r,Ib (66),(oVEX),(o256) | 759 | 14: vpextrb Rd/Mb,Vdq,Ib (66),(v1) |
717 | 20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128) | 760 | 15: vpextrw Rd/Mw,Vdq,Ib (66),(v1) |
718 | 21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128) | 761 | 16: vpextrd/q Ey,Vdq,Ib (66),(v1) |
719 | 22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128) | 762 | 17: vextractps Ed,Vdq,Ib (66),(v1) |
720 | 40: dpps Vdq,Wdq,Ib (66),(VEX) | 763 | 18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) |
721 | 41: dppd Vdq,Wdq,Ib (66),(VEX),(o128) | 764 | 19: vextractf128 Wdq,Vqq,Ib (66),(v) |
722 | 42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128) | 765 | 1d: vcvtps2ph Wx,Vx,Ib (66),(v) |
723 | 44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128) | 766 | 20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1) |
724 | 4a: vblendvps /r,Ib (66),(oVEX) | 767 | 21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1) |
725 | 4b: vblendvpd /r,Ib (66),(oVEX) | 768 | 22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1) |
726 | 4c: vpblendvb /r,Ib (66),(oVEX),(o128) | 769 | 38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) |
727 | 60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128) | 770 | 39: vextracti128 Wdq,Vqq,Ib (66),(v) |
728 | 61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128) | 771 | 40: vdpps Vx,Hx,Wx,Ib (66) |
729 | 62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128) | 772 | 41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1) |
730 | 63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128) | 773 | 42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) |
731 | df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128) | 774 | 44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1) |
775 | 46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v) | ||
776 | 4a: vblendvps Vx,Hx,Wx,Lx (66),(v) | ||
777 | 4b: vblendvpd Vx,Hx,Wx,Lx (66),(v) | ||
778 | 4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1) | ||
779 | 60: vpcmpestrm Vdq,Wdq,Ib (66),(v1) | ||
780 | 61: vpcmpestri Vdq,Wdq,Ib (66),(v1) | ||
781 | 62: vpcmpistrm Vdq,Wdq,Ib (66),(v1) | ||
782 | 63: vpcmpistri Vdq,Wdq,Ib (66),(v1) | ||
783 | df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1) | ||
784 | f0: RORX Gy,Ey,Ib (F2),(v) | ||
732 | EndTable | 785 | EndTable |
733 | 786 | ||
734 | GrpTable: Grp1 | 787 | GrpTable: Grp1 |
@@ -790,7 +843,7 @@ GrpTable: Grp5 | |||
790 | 2: CALLN Ev (f64) | 843 | 2: CALLN Ev (f64) |
791 | 3: CALLF Ep | 844 | 3: CALLF Ep |
792 | 4: JMPN Ev (f64) | 845 | 4: JMPN Ev (f64) |
793 | 5: JMPF Ep | 846 | 5: JMPF Mp |
794 | 6: PUSH Ev (d64) | 847 | 6: PUSH Ev (d64) |
795 | 7: | 848 | 7: |
796 | EndTable | 849 | EndTable |
@@ -807,7 +860,7 @@ EndTable | |||
807 | GrpTable: Grp7 | 860 | GrpTable: Grp7 |
808 | 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | 861 | 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) |
809 | 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) | 862 | 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) |
810 | 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | 863 | 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) |
811 | 3: LIDT Ms | 864 | 3: LIDT Ms |
812 | 4: SMSW Mw/Rv | 865 | 4: SMSW Mw/Rv |
813 | 5: | 866 | 5: |
@@ -824,44 +877,45 @@ EndTable | |||
824 | 877 | ||
825 | GrpTable: Grp9 | 878 | GrpTable: Grp9 |
826 | 1: CMPXCHG8B/16B Mq/Mdq | 879 | 1: CMPXCHG8B/16B Mq/Mdq |
827 | 6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | 880 | 6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) |
828 | 7: VMPTRST Mq | 881 | 7: VMPTRST Mq | VMPTRST Mq (F3) |
829 | EndTable | 882 | EndTable |
830 | 883 | ||
831 | GrpTable: Grp10 | 884 | GrpTable: Grp10 |
832 | EndTable | 885 | EndTable |
833 | 886 | ||
834 | GrpTable: Grp11 | 887 | GrpTable: Grp11 |
888 | # Note: the operands are given by group opcode | ||
835 | 0: MOV | 889 | 0: MOV |
836 | EndTable | 890 | EndTable |
837 | 891 | ||
838 | GrpTable: Grp12 | 892 | GrpTable: Grp12 |
839 | 2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128) | 893 | 2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1) |
840 | 4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128) | 894 | 4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1) |
841 | 6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128) | 895 | 6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1) |
842 | EndTable | 896 | EndTable |
843 | 897 | ||
844 | GrpTable: Grp13 | 898 | GrpTable: Grp13 |
845 | 2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128) | 899 | 2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1) |
846 | 4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128) | 900 | 4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) |
847 | 6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128) | 901 | 6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1) |
848 | EndTable | 902 | EndTable |
849 | 903 | ||
850 | GrpTable: Grp14 | 904 | GrpTable: Grp14 |
851 | 2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128) | 905 | 2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1) |
852 | 3: psrldq Udq,Ib (66),(11B),(VEX),(o128) | 906 | 3: vpsrldq Hx,Ux,Ib (66),(11B),(v1) |
853 | 6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128) | 907 | 6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1) |
854 | 7: pslldq Udq,Ib (66),(11B),(VEX),(o128) | 908 | 7: vpslldq Hx,Ux,Ib (66),(11B),(v1) |
855 | EndTable | 909 | EndTable |
856 | 910 | ||
857 | GrpTable: Grp15 | 911 | GrpTable: Grp15 |
858 | 0: fxsave | 912 | 0: fxsave | RDFSBASE Ry (F3),(11B) |
859 | 1: fxstor | 913 | 1: fxstor | RDGSBASE Ry (F3),(11B) |
860 | 2: ldmxcsr (VEX) | 914 | 2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B) |
861 | 3: stmxcsr (VEX) | 915 | 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) |
862 | 4: XSAVE | 916 | 4: XSAVE |
863 | 5: XRSTOR | lfence (11B) | 917 | 5: XRSTOR | lfence (11B) |
864 | 6: mfence (11B) | 918 | 6: XSAVEOPT | mfence (11B) |
865 | 7: clflush | sfence (11B) | 919 | 7: clflush | sfence (11B) |
866 | EndTable | 920 | EndTable |
867 | 921 | ||
@@ -872,6 +926,12 @@ GrpTable: Grp16 | |||
872 | 3: prefetch T2 | 926 | 3: prefetch T2 |
873 | EndTable | 927 | EndTable |
874 | 928 | ||
929 | GrpTable: Grp17 | ||
930 | 1: BLSR By,Ey (v) | ||
931 | 2: BLSMSK By,Ey (v) | ||
932 | 3: BLSI By,Ey (v) | ||
933 | EndTable | ||
934 | |||
875 | # AMD's Prefetch Group | 935 | # AMD's Prefetch Group |
876 | GrpTable: GrpP | 936 | GrpTable: GrpP |
877 | 0: PREFETCH | 937 | 0: PREFETCH |
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 3d11327c9ab4..23d8e5fecf76 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o | |||
27 | obj-$(CONFIG_ACPI_NUMA) += srat.o | 27 | obj-$(CONFIG_ACPI_NUMA) += srat.o |
28 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o | 28 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o |
29 | 29 | ||
30 | obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o | ||
31 | |||
32 | obj-$(CONFIG_MEMTEST) += memtest.o | 30 | obj-$(CONFIG_MEMTEST) += memtest.o |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index ea305856151c..dd74e46828c0 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr, | |||
201 | do { | 201 | do { |
202 | VM_BUG_ON(compound_head(page) != head); | 202 | VM_BUG_ON(compound_head(page) != head); |
203 | pages[*nr] = page; | 203 | pages[*nr] = page; |
204 | if (PageTail(page)) | ||
205 | get_huge_page_tail(page); | ||
204 | (*nr)++; | 206 | (*nr)++; |
205 | page++; | 207 | page++; |
206 | refs++; | 208 | refs++; |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index b49962662101..f4f29b19fac5 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) | |||
45 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 45 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
46 | BUG_ON(!pte_none(*(kmap_pte-idx))); | 46 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
47 | set_pte(kmap_pte-idx, mk_pte(page, prot)); | 47 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
48 | arch_flush_lazy_mmu_mode(); | ||
48 | 49 | ||
49 | return (void *)vaddr; | 50 | return (void *)vaddr; |
50 | } | 51 | } |
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr) | |||
88 | */ | 89 | */ |
89 | kpte_clear_flush(kmap_pte-idx, vaddr); | 90 | kpte_clear_flush(kmap_pte-idx, vaddr); |
90 | kmap_atomic_idx_pop(); | 91 | kmap_atomic_idx_pop(); |
92 | arch_flush_lazy_mmu_mode(); | ||
91 | } | 93 | } |
92 | #ifdef CONFIG_DEBUG_HIGHMEM | 94 | #ifdef CONFIG_DEBUG_HIGHMEM |
93 | else { | 95 | else { |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 87488b93a65c..a298914058f9 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -67,7 +67,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
67 | good_end = max_pfn_mapped << PAGE_SHIFT; | 67 | good_end = max_pfn_mapped << PAGE_SHIFT; |
68 | 68 | ||
69 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); | 69 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); |
70 | if (base == MEMBLOCK_ERROR) | 70 | if (!base) |
71 | panic("Cannot find space for the kernel page tables"); | 71 | panic("Cannot find space for the kernel page tables"); |
72 | 72 | ||
73 | pgt_buf_start = base >> PAGE_SHIFT; | 73 | pgt_buf_start = base >> PAGE_SHIFT; |
@@ -80,7 +80,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
80 | 80 | ||
81 | void __init native_pagetable_reserve(u64 start, u64 end) | 81 | void __init native_pagetable_reserve(u64 start, u64 end) |
82 | { | 82 | { |
83 | memblock_x86_reserve_range(start, end, "PGTABLE"); | 83 | memblock_reserve(start, end - start); |
84 | } | 84 | } |
85 | 85 | ||
86 | struct map_range { | 86 | struct map_range { |
@@ -279,8 +279,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
279 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) | 279 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) |
280 | * so that they can be reused for other purposes. | 280 | * so that they can be reused for other purposes. |
281 | * | 281 | * |
282 | * On native it just means calling memblock_x86_reserve_range, on Xen it | 282 | * On native it just means calling memblock_reserve, on Xen it also |
283 | * also means marking RW the pagetable pages that we allocated before | 283 | * means marking RW the pagetable pages that we allocated before |
284 | * but that haven't been used. | 284 | * but that haven't been used. |
285 | * | 285 | * |
286 | * In fact on xen we mark RO the whole range pgt_buf_start - | 286 | * In fact on xen we mark RO the whole range pgt_buf_start - |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 29f7c6d98179..0c1da394a634 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -427,23 +427,17 @@ static void __init add_one_highpage_init(struct page *page) | |||
427 | void __init add_highpages_with_active_regions(int nid, | 427 | void __init add_highpages_with_active_regions(int nid, |
428 | unsigned long start_pfn, unsigned long end_pfn) | 428 | unsigned long start_pfn, unsigned long end_pfn) |
429 | { | 429 | { |
430 | struct range *range; | 430 | phys_addr_t start, end; |
431 | int nr_range; | 431 | u64 i; |
432 | int i; | 432 | |
433 | 433 | for_each_free_mem_range(i, nid, &start, &end, NULL) { | |
434 | nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); | 434 | unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), |
435 | 435 | start_pfn, end_pfn); | |
436 | for (i = 0; i < nr_range; i++) { | 436 | unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), |
437 | struct page *page; | 437 | start_pfn, end_pfn); |
438 | int node_pfn; | 438 | for ( ; pfn < e_pfn; pfn++) |
439 | 439 | if (pfn_valid(pfn)) | |
440 | for (node_pfn = range[i].start; node_pfn < range[i].end; | 440 | add_one_highpage_init(pfn_to_page(pfn)); |
441 | node_pfn++) { | ||
442 | if (!pfn_valid(node_pfn)) | ||
443 | continue; | ||
444 | page = pfn_to_page(node_pfn); | ||
445 | add_one_highpage_init(page); | ||
446 | } | ||
447 | } | 441 | } |
448 | } | 442 | } |
449 | #else | 443 | #else |
@@ -650,18 +644,18 @@ void __init initmem_init(void) | |||
650 | highstart_pfn = highend_pfn = max_pfn; | 644 | highstart_pfn = highend_pfn = max_pfn; |
651 | if (max_pfn > max_low_pfn) | 645 | if (max_pfn > max_low_pfn) |
652 | highstart_pfn = max_low_pfn; | 646 | highstart_pfn = max_low_pfn; |
653 | memblock_x86_register_active_regions(0, 0, highend_pfn); | ||
654 | sparse_memory_present_with_active_regions(0); | ||
655 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 647 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
656 | pages_to_mb(highend_pfn - highstart_pfn)); | 648 | pages_to_mb(highend_pfn - highstart_pfn)); |
657 | num_physpages = highend_pfn; | 649 | num_physpages = highend_pfn; |
658 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | 650 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
659 | #else | 651 | #else |
660 | memblock_x86_register_active_regions(0, 0, max_low_pfn); | ||
661 | sparse_memory_present_with_active_regions(0); | ||
662 | num_physpages = max_low_pfn; | 652 | num_physpages = max_low_pfn; |
663 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | 653 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
664 | #endif | 654 | #endif |
655 | |||
656 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | ||
657 | sparse_memory_present_with_active_regions(0); | ||
658 | |||
665 | #ifdef CONFIG_FLATMEM | 659 | #ifdef CONFIG_FLATMEM |
666 | max_mapnr = num_physpages; | 660 | max_mapnr = num_physpages; |
667 | #endif | 661 | #endif |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index bbaaa005bf0e..a8a56ce3a962 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -608,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start, | |||
608 | #ifndef CONFIG_NUMA | 608 | #ifndef CONFIG_NUMA |
609 | void __init initmem_init(void) | 609 | void __init initmem_init(void) |
610 | { | 610 | { |
611 | memblock_x86_register_active_regions(0, 0, max_pfn); | 611 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); |
612 | } | 612 | } |
613 | #endif | 613 | #endif |
614 | 614 | ||
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c deleted file mode 100644 index 992da5ec5a64..000000000000 --- a/arch/x86/mm/memblock.c +++ /dev/null | |||
@@ -1,348 +0,0 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/bitops.h> | ||
5 | #include <linux/memblock.h> | ||
6 | #include <linux/bootmem.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/range.h> | ||
9 | |||
10 | /* Check for already reserved areas */ | ||
11 | bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align) | ||
12 | { | ||
13 | struct memblock_region *r; | ||
14 | u64 addr = *addrp, last; | ||
15 | u64 size = *sizep; | ||
16 | bool changed = false; | ||
17 | |||
18 | again: | ||
19 | last = addr + size; | ||
20 | for_each_memblock(reserved, r) { | ||
21 | if (last > r->base && addr < r->base) { | ||
22 | size = r->base - addr; | ||
23 | changed = true; | ||
24 | goto again; | ||
25 | } | ||
26 | if (last > (r->base + r->size) && addr < (r->base + r->size)) { | ||
27 | addr = round_up(r->base + r->size, align); | ||
28 | size = last - addr; | ||
29 | changed = true; | ||
30 | goto again; | ||
31 | } | ||
32 | if (last <= (r->base + r->size) && addr >= r->base) { | ||
33 | *sizep = 0; | ||
34 | return false; | ||
35 | } | ||
36 | } | ||
37 | if (changed) { | ||
38 | *addrp = addr; | ||
39 | *sizep = size; | ||
40 | } | ||
41 | return changed; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * Find next free range after start, and size is returned in *sizep | ||
46 | */ | ||
47 | u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) | ||
48 | { | ||
49 | struct memblock_region *r; | ||
50 | |||
51 | for_each_memblock(memory, r) { | ||
52 | u64 ei_start = r->base; | ||
53 | u64 ei_last = ei_start + r->size; | ||
54 | u64 addr; | ||
55 | |||
56 | addr = round_up(ei_start, align); | ||
57 | if (addr < start) | ||
58 | addr = round_up(start, align); | ||
59 | if (addr >= ei_last) | ||
60 | continue; | ||
61 | *sizep = ei_last - addr; | ||
62 | while (memblock_x86_check_reserved_size(&addr, sizep, align)) | ||
63 | ; | ||
64 | |||
65 | if (*sizep) | ||
66 | return addr; | ||
67 | } | ||
68 | |||
69 | return MEMBLOCK_ERROR; | ||
70 | } | ||
71 | |||
72 | static __init struct range *find_range_array(int count) | ||
73 | { | ||
74 | u64 end, size, mem; | ||
75 | struct range *range; | ||
76 | |||
77 | size = sizeof(struct range) * count; | ||
78 | end = memblock.current_limit; | ||
79 | |||
80 | mem = memblock_find_in_range(0, end, size, sizeof(struct range)); | ||
81 | if (mem == MEMBLOCK_ERROR) | ||
82 | panic("can not find more space for range array"); | ||
83 | |||
84 | /* | ||
85 | * This range is tempoaray, so don't reserve it, it will not be | ||
86 | * overlapped because We will not alloccate new buffer before | ||
87 | * We discard this one | ||
88 | */ | ||
89 | range = __va(mem); | ||
90 | memset(range, 0, size); | ||
91 | |||
92 | return range; | ||
93 | } | ||
94 | |||
95 | static void __init memblock_x86_subtract_reserved(struct range *range, int az) | ||
96 | { | ||
97 | u64 final_start, final_end; | ||
98 | struct memblock_region *r; | ||
99 | |||
100 | /* Take out region array itself at first*/ | ||
101 | memblock_free_reserved_regions(); | ||
102 | |||
103 | memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt); | ||
104 | |||
105 | for_each_memblock(reserved, r) { | ||
106 | memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); | ||
107 | final_start = PFN_DOWN(r->base); | ||
108 | final_end = PFN_UP(r->base + r->size); | ||
109 | if (final_start >= final_end) | ||
110 | continue; | ||
111 | subtract_range(range, az, final_start, final_end); | ||
112 | } | ||
113 | |||
114 | /* Put region array back ? */ | ||
115 | memblock_reserve_reserved_regions(); | ||
116 | } | ||
117 | |||
118 | struct count_data { | ||
119 | int nr; | ||
120 | }; | ||
121 | |||
122 | static int __init count_work_fn(unsigned long start_pfn, | ||
123 | unsigned long end_pfn, void *datax) | ||
124 | { | ||
125 | struct count_data *data = datax; | ||
126 | |||
127 | data->nr++; | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int __init count_early_node_map(int nodeid) | ||
133 | { | ||
134 | struct count_data data; | ||
135 | |||
136 | data.nr = 0; | ||
137 | work_with_active_regions(nodeid, count_work_fn, &data); | ||
138 | |||
139 | return data.nr; | ||
140 | } | ||
141 | |||
142 | int __init __get_free_all_memory_range(struct range **rangep, int nodeid, | ||
143 | unsigned long start_pfn, unsigned long end_pfn) | ||
144 | { | ||
145 | int count; | ||
146 | struct range *range; | ||
147 | int nr_range; | ||
148 | |||
149 | count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2; | ||
150 | |||
151 | range = find_range_array(count); | ||
152 | nr_range = 0; | ||
153 | |||
154 | /* | ||
155 | * Use early_node_map[] and memblock.reserved.region to get range array | ||
156 | * at first | ||
157 | */ | ||
158 | nr_range = add_from_early_node_map(range, count, nr_range, nodeid); | ||
159 | subtract_range(range, count, 0, start_pfn); | ||
160 | subtract_range(range, count, end_pfn, -1ULL); | ||
161 | |||
162 | memblock_x86_subtract_reserved(range, count); | ||
163 | nr_range = clean_sort_range(range, count); | ||
164 | |||
165 | *rangep = range; | ||
166 | return nr_range; | ||
167 | } | ||
168 | |||
169 | int __init get_free_all_memory_range(struct range **rangep, int nodeid) | ||
170 | { | ||
171 | unsigned long end_pfn = -1UL; | ||
172 | |||
173 | #ifdef CONFIG_X86_32 | ||
174 | end_pfn = max_low_pfn; | ||
175 | #endif | ||
176 | return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn); | ||
177 | } | ||
178 | |||
179 | static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) | ||
180 | { | ||
181 | int i, count; | ||
182 | struct range *range; | ||
183 | int nr_range; | ||
184 | u64 final_start, final_end; | ||
185 | u64 free_size; | ||
186 | struct memblock_region *r; | ||
187 | |||
188 | count = (memblock.reserved.cnt + memblock.memory.cnt) * 2; | ||
189 | |||
190 | range = find_range_array(count); | ||
191 | nr_range = 0; | ||
192 | |||
193 | addr = PFN_UP(addr); | ||
194 | limit = PFN_DOWN(limit); | ||
195 | |||
196 | for_each_memblock(memory, r) { | ||
197 | final_start = PFN_UP(r->base); | ||
198 | final_end = PFN_DOWN(r->base + r->size); | ||
199 | if (final_start >= final_end) | ||
200 | continue; | ||
201 | if (final_start >= limit || final_end <= addr) | ||
202 | continue; | ||
203 | |||
204 | nr_range = add_range(range, count, nr_range, final_start, final_end); | ||
205 | } | ||
206 | subtract_range(range, count, 0, addr); | ||
207 | subtract_range(range, count, limit, -1ULL); | ||
208 | |||
209 | /* Subtract memblock.reserved.region in range ? */ | ||
210 | if (!get_free) | ||
211 | goto sort_and_count_them; | ||
212 | for_each_memblock(reserved, r) { | ||
213 | final_start = PFN_DOWN(r->base); | ||
214 | final_end = PFN_UP(r->base + r->size); | ||
215 | if (final_start >= final_end) | ||
216 | continue; | ||
217 | if (final_start >= limit || final_end <= addr) | ||
218 | continue; | ||
219 | |||
220 | subtract_range(range, count, final_start, final_end); | ||
221 | } | ||
222 | |||
223 | sort_and_count_them: | ||
224 | nr_range = clean_sort_range(range, count); | ||
225 | |||
226 | free_size = 0; | ||
227 | for (i = 0; i < nr_range; i++) | ||
228 | free_size += range[i].end - range[i].start; | ||
229 | |||
230 | return free_size << PAGE_SHIFT; | ||
231 | } | ||
232 | |||
233 | u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) | ||
234 | { | ||
235 | return __memblock_x86_memory_in_range(addr, limit, true); | ||
236 | } | ||
237 | |||
238 | u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit) | ||
239 | { | ||
240 | return __memblock_x86_memory_in_range(addr, limit, false); | ||
241 | } | ||
242 | |||
243 | void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) | ||
244 | { | ||
245 | if (start == end) | ||
246 | return; | ||
247 | |||
248 | if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end)) | ||
249 | return; | ||
250 | |||
251 | memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name); | ||
252 | |||
253 | memblock_reserve(start, end - start); | ||
254 | } | ||
255 | |||
256 | void __init memblock_x86_free_range(u64 start, u64 end) | ||
257 | { | ||
258 | if (start == end) | ||
259 | return; | ||
260 | |||
261 | if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end)) | ||
262 | return; | ||
263 | |||
264 | memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1); | ||
265 | |||
266 | memblock_free(start, end - start); | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Need to call this function after memblock_x86_register_active_regions, | ||
271 | * so early_node_map[] is filled already. | ||
272 | */ | ||
273 | u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align) | ||
274 | { | ||
275 | u64 addr; | ||
276 | addr = find_memory_core_early(nid, size, align, start, end); | ||
277 | if (addr != MEMBLOCK_ERROR) | ||
278 | return addr; | ||
279 | |||
280 | /* Fallback, should already have start end within node range */ | ||
281 | return memblock_find_in_range(start, end, size, align); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * Finds an active region in the address range from start_pfn to last_pfn and | ||
286 | * returns its range in ei_startpfn and ei_endpfn for the memblock entry. | ||
287 | */ | ||
288 | static int __init memblock_x86_find_active_region(const struct memblock_region *ei, | ||
289 | unsigned long start_pfn, | ||
290 | unsigned long last_pfn, | ||
291 | unsigned long *ei_startpfn, | ||
292 | unsigned long *ei_endpfn) | ||
293 | { | ||
294 | u64 align = PAGE_SIZE; | ||
295 | |||
296 | *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT; | ||
297 | *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT; | ||
298 | |||
299 | /* Skip map entries smaller than a page */ | ||
300 | if (*ei_startpfn >= *ei_endpfn) | ||
301 | return 0; | ||
302 | |||
303 | /* Skip if map is outside the node */ | ||
304 | if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn) | ||
305 | return 0; | ||
306 | |||
307 | /* Check for overlaps */ | ||
308 | if (*ei_startpfn < start_pfn) | ||
309 | *ei_startpfn = start_pfn; | ||
310 | if (*ei_endpfn > last_pfn) | ||
311 | *ei_endpfn = last_pfn; | ||
312 | |||
313 | return 1; | ||
314 | } | ||
315 | |||
316 | /* Walk the memblock.memory map and register active regions within a node */ | ||
317 | void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn, | ||
318 | unsigned long last_pfn) | ||
319 | { | ||
320 | unsigned long ei_startpfn; | ||
321 | unsigned long ei_endpfn; | ||
322 | struct memblock_region *r; | ||
323 | |||
324 | for_each_memblock(memory, r) | ||
325 | if (memblock_x86_find_active_region(r, start_pfn, last_pfn, | ||
326 | &ei_startpfn, &ei_endpfn)) | ||
327 | add_active_range(nid, ei_startpfn, ei_endpfn); | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * Find the hole size (in bytes) in the memory range. | ||
332 | * @start: starting address of the memory range to scan | ||
333 | * @end: ending address of the memory range to scan | ||
334 | */ | ||
335 | u64 __init memblock_x86_hole_size(u64 start, u64 end) | ||
336 | { | ||
337 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
338 | unsigned long last_pfn = end >> PAGE_SHIFT; | ||
339 | unsigned long ei_startpfn, ei_endpfn, ram = 0; | ||
340 | struct memblock_region *r; | ||
341 | |||
342 | for_each_memblock(memory, r) | ||
343 | if (memblock_x86_find_active_region(r, start_pfn, last_pfn, | ||
344 | &ei_startpfn, &ei_endpfn)) | ||
345 | ram += ei_endpfn - ei_startpfn; | ||
346 | |||
347 | return end - start - ((u64)ram << PAGE_SHIFT); | ||
348 | } | ||
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 92faf3a1c53e..c80b9fb95734 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) | |||
34 | (unsigned long long) pattern, | 34 | (unsigned long long) pattern, |
35 | (unsigned long long) start_bad, | 35 | (unsigned long long) start_bad, |
36 | (unsigned long long) end_bad); | 36 | (unsigned long long) end_bad); |
37 | memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); | 37 | memblock_reserve(start_bad, end_bad - start_bad); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) | 40 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) |
@@ -70,24 +70,19 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size) | |||
70 | 70 | ||
71 | static void __init do_one_pass(u64 pattern, u64 start, u64 end) | 71 | static void __init do_one_pass(u64 pattern, u64 start, u64 end) |
72 | { | 72 | { |
73 | u64 size = 0; | 73 | u64 i; |
74 | 74 | phys_addr_t this_start, this_end; | |
75 | while (start < end) { | 75 | |
76 | start = memblock_x86_find_in_range_size(start, &size, 1); | 76 | for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) { |
77 | 77 | this_start = clamp_t(phys_addr_t, this_start, start, end); | |
78 | /* done ? */ | 78 | this_end = clamp_t(phys_addr_t, this_end, start, end); |
79 | if (start >= end) | 79 | if (this_start < this_end) { |
80 | break; | 80 | printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", |
81 | if (start + size > end) | 81 | (unsigned long long)this_start, |
82 | size = end - start; | 82 | (unsigned long long)this_end, |
83 | 83 | (unsigned long long)cpu_to_be64(pattern)); | |
84 | printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", | 84 | memtest(pattern, this_start, this_end - this_start); |
85 | (unsigned long long) start, | 85 | } |
86 | (unsigned long long) start + size, | ||
87 | (unsigned long long) cpu_to_be64(pattern)); | ||
88 | memtest(pattern, start, size); | ||
89 | |||
90 | start += size; | ||
91 | } | 86 | } |
92 | } | 87 | } |
93 | 88 | ||
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index fbeaaf416610..496f494593bf 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -192,8 +192,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) | |||
192 | /* Initialize NODE_DATA for a node on the local memory */ | 192 | /* Initialize NODE_DATA for a node on the local memory */ |
193 | static void __init setup_node_data(int nid, u64 start, u64 end) | 193 | static void __init setup_node_data(int nid, u64 start, u64 end) |
194 | { | 194 | { |
195 | const u64 nd_low = PFN_PHYS(MAX_DMA_PFN); | ||
196 | const u64 nd_high = PFN_PHYS(max_pfn_mapped); | ||
197 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | 195 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
198 | bool remapped = false; | 196 | bool remapped = false; |
199 | u64 nd_pa; | 197 | u64 nd_pa; |
@@ -224,17 +222,12 @@ static void __init setup_node_data(int nid, u64 start, u64 end) | |||
224 | nd_pa = __pa(nd); | 222 | nd_pa = __pa(nd); |
225 | remapped = true; | 223 | remapped = true; |
226 | } else { | 224 | } else { |
227 | nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, | 225 | nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); |
228 | nd_size, SMP_CACHE_BYTES); | 226 | if (!nd_pa) { |
229 | if (nd_pa == MEMBLOCK_ERROR) | ||
230 | nd_pa = memblock_find_in_range(nd_low, nd_high, | ||
231 | nd_size, SMP_CACHE_BYTES); | ||
232 | if (nd_pa == MEMBLOCK_ERROR) { | ||
233 | pr_err("Cannot find %zu bytes in node %d\n", | 227 | pr_err("Cannot find %zu bytes in node %d\n", |
234 | nd_size, nid); | 228 | nd_size, nid); |
235 | return; | 229 | return; |
236 | } | 230 | } |
237 | memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); | ||
238 | nd = __va(nd_pa); | 231 | nd = __va(nd_pa); |
239 | } | 232 | } |
240 | 233 | ||
@@ -371,8 +364,7 @@ void __init numa_reset_distance(void) | |||
371 | 364 | ||
372 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | 365 | /* numa_distance could be 1LU marking allocation failure, test cnt */ |
373 | if (numa_distance_cnt) | 366 | if (numa_distance_cnt) |
374 | memblock_x86_free_range(__pa(numa_distance), | 367 | memblock_free(__pa(numa_distance), size); |
375 | __pa(numa_distance) + size); | ||
376 | numa_distance_cnt = 0; | 368 | numa_distance_cnt = 0; |
377 | numa_distance = NULL; /* enable table creation */ | 369 | numa_distance = NULL; /* enable table creation */ |
378 | } | 370 | } |
@@ -395,13 +387,13 @@ static int __init numa_alloc_distance(void) | |||
395 | 387 | ||
396 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), | 388 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), |
397 | size, PAGE_SIZE); | 389 | size, PAGE_SIZE); |
398 | if (phys == MEMBLOCK_ERROR) { | 390 | if (!phys) { |
399 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | 391 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); |
400 | /* don't retry until explicitly reset */ | 392 | /* don't retry until explicitly reset */ |
401 | numa_distance = (void *)1LU; | 393 | numa_distance = (void *)1LU; |
402 | return -ENOMEM; | 394 | return -ENOMEM; |
403 | } | 395 | } |
404 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | 396 | memblock_reserve(phys, size); |
405 | 397 | ||
406 | numa_distance = __va(phys); | 398 | numa_distance = __va(phys); |
407 | numa_distance_cnt = cnt; | 399 | numa_distance_cnt = cnt; |
@@ -482,8 +474,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | |||
482 | numaram = 0; | 474 | numaram = 0; |
483 | } | 475 | } |
484 | 476 | ||
485 | e820ram = max_pfn - (memblock_x86_hole_size(0, | 477 | e820ram = max_pfn - absent_pages_in_range(0, max_pfn); |
486 | PFN_PHYS(max_pfn)) >> PAGE_SHIFT); | 478 | |
487 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | 479 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ |
488 | if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | 480 | if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { |
489 | printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", | 481 | printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", |
@@ -505,13 +497,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
505 | if (WARN_ON(nodes_empty(node_possible_map))) | 497 | if (WARN_ON(nodes_empty(node_possible_map))) |
506 | return -EINVAL; | 498 | return -EINVAL; |
507 | 499 | ||
508 | for (i = 0; i < mi->nr_blks; i++) | 500 | for (i = 0; i < mi->nr_blks; i++) { |
509 | memblock_x86_register_active_regions(mi->blk[i].nid, | 501 | struct numa_memblk *mb = &mi->blk[i]; |
510 | mi->blk[i].start >> PAGE_SHIFT, | 502 | memblock_set_node(mb->start, mb->end - mb->start, mb->nid); |
511 | mi->blk[i].end >> PAGE_SHIFT); | 503 | } |
512 | |||
513 | /* for out of order entries */ | ||
514 | sort_node_map(); | ||
515 | 504 | ||
516 | /* | 505 | /* |
517 | * If sections array is gonna be used for pfn -> nid mapping, check | 506 | * If sections array is gonna be used for pfn -> nid mapping, check |
@@ -545,6 +534,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
545 | setup_node_data(nid, start, end); | 534 | setup_node_data(nid, start, end); |
546 | } | 535 | } |
547 | 536 | ||
537 | /* Dump memblock with node info and return. */ | ||
538 | memblock_dump_all(); | ||
548 | return 0; | 539 | return 0; |
549 | } | 540 | } |
550 | 541 | ||
@@ -582,7 +573,7 @@ static int __init numa_init(int (*init_func)(void)) | |||
582 | nodes_clear(node_possible_map); | 573 | nodes_clear(node_possible_map); |
583 | nodes_clear(node_online_map); | 574 | nodes_clear(node_online_map); |
584 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | 575 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); |
585 | remove_all_active_ranges(); | 576 | WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES)); |
586 | numa_reset_distance(); | 577 | numa_reset_distance(); |
587 | 578 | ||
588 | ret = init_func(); | 579 | ret = init_func(); |
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 3adebe7e536a..534255a36b6b 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -199,23 +199,23 @@ void __init init_alloc_remap(int nid, u64 start, u64 end) | |||
199 | 199 | ||
200 | /* allocate node memory and the lowmem remap area */ | 200 | /* allocate node memory and the lowmem remap area */ |
201 | node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); | 201 | node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); |
202 | if (node_pa == MEMBLOCK_ERROR) { | 202 | if (!node_pa) { |
203 | pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", | 203 | pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", |
204 | size, nid); | 204 | size, nid); |
205 | return; | 205 | return; |
206 | } | 206 | } |
207 | memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); | 207 | memblock_reserve(node_pa, size); |
208 | 208 | ||
209 | remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, | 209 | remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, |
210 | max_low_pfn << PAGE_SHIFT, | 210 | max_low_pfn << PAGE_SHIFT, |
211 | size, LARGE_PAGE_BYTES); | 211 | size, LARGE_PAGE_BYTES); |
212 | if (remap_pa == MEMBLOCK_ERROR) { | 212 | if (!remap_pa) { |
213 | pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", | 213 | pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", |
214 | size, nid); | 214 | size, nid); |
215 | memblock_x86_free_range(node_pa, node_pa + size); | 215 | memblock_free(node_pa, size); |
216 | return; | 216 | return; |
217 | } | 217 | } |
218 | memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); | 218 | memblock_reserve(remap_pa, size); |
219 | remap_va = phys_to_virt(remap_pa); | 219 | remap_va = phys_to_virt(remap_pa); |
220 | 220 | ||
221 | /* perform actual remap */ | 221 | /* perform actual remap */ |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index dd27f401f0a0..92e27119ee1a 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -19,7 +19,7 @@ unsigned long __init numa_free_all_bootmem(void) | |||
19 | for_each_online_node(i) | 19 | for_each_online_node(i) |
20 | pages += free_all_bootmem_node(NODE_DATA(i)); | 20 | pages += free_all_bootmem_node(NODE_DATA(i)); |
21 | 21 | ||
22 | pages += free_all_memory_core_early(MAX_NUMNODES); | 22 | pages += free_low_memory_core_early(MAX_NUMNODES); |
23 | 23 | ||
24 | return pages; | 24 | return pages; |
25 | } | 25 | } |
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index d0ed086b6247..46db56845f18 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c | |||
@@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) | |||
28 | return -ENOENT; | 28 | return -ENOENT; |
29 | } | 29 | } |
30 | 30 | ||
31 | static u64 mem_hole_size(u64 start, u64 end) | ||
32 | { | ||
33 | unsigned long start_pfn = PFN_UP(start); | ||
34 | unsigned long end_pfn = PFN_DOWN(end); | ||
35 | |||
36 | if (start_pfn < end_pfn) | ||
37 | return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); | ||
38 | return 0; | ||
39 | } | ||
40 | |||
31 | /* | 41 | /* |
32 | * Sets up nid to range from @start to @end. The return value is -errno if | 42 | * Sets up nid to range from @start to @end. The return value is -errno if |
33 | * something went wrong, 0 otherwise. | 43 | * something went wrong, 0 otherwise. |
@@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, | |||
89 | * Calculate target node size. x86_32 freaks on __udivdi3() so do | 99 | * Calculate target node size. x86_32 freaks on __udivdi3() so do |
90 | * the division in ulong number of pages and convert back. | 100 | * the division in ulong number of pages and convert back. |
91 | */ | 101 | */ |
92 | size = max_addr - addr - memblock_x86_hole_size(addr, max_addr); | 102 | size = max_addr - addr - mem_hole_size(addr, max_addr); |
93 | size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); | 103 | size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); |
94 | 104 | ||
95 | /* | 105 | /* |
@@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, | |||
135 | * Continue to add memory to this fake node if its | 145 | * Continue to add memory to this fake node if its |
136 | * non-reserved memory is less than the per-node size. | 146 | * non-reserved memory is less than the per-node size. |
137 | */ | 147 | */ |
138 | while (end - start - | 148 | while (end - start - mem_hole_size(start, end) < size) { |
139 | memblock_x86_hole_size(start, end) < size) { | ||
140 | end += FAKE_NODE_MIN_SIZE; | 149 | end += FAKE_NODE_MIN_SIZE; |
141 | if (end > limit) { | 150 | if (end > limit) { |
142 | end = limit; | 151 | end = limit; |
@@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, | |||
150 | * this one must extend to the boundary. | 159 | * this one must extend to the boundary. |
151 | */ | 160 | */ |
152 | if (end < dma32_end && dma32_end - end - | 161 | if (end < dma32_end && dma32_end - end - |
153 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | 162 | mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
154 | end = dma32_end; | 163 | end = dma32_end; |
155 | 164 | ||
156 | /* | 165 | /* |
@@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, | |||
158 | * next node, this one must extend to the end of the | 167 | * next node, this one must extend to the end of the |
159 | * physical node. | 168 | * physical node. |
160 | */ | 169 | */ |
161 | if (limit - end - | 170 | if (limit - end - mem_hole_size(end, limit) < size) |
162 | memblock_x86_hole_size(end, limit) < size) | ||
163 | end = limit; | 171 | end = limit; |
164 | 172 | ||
165 | ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, | 173 | ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, |
@@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) | |||
180 | { | 188 | { |
181 | u64 end = start + size; | 189 | u64 end = start + size; |
182 | 190 | ||
183 | while (end - start - memblock_x86_hole_size(start, end) < size) { | 191 | while (end - start - mem_hole_size(start, end) < size) { |
184 | end += FAKE_NODE_MIN_SIZE; | 192 | end += FAKE_NODE_MIN_SIZE; |
185 | if (end > max_addr) { | 193 | if (end > max_addr) { |
186 | end = max_addr; | 194 | end = max_addr; |
@@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, | |||
211 | * creates a uniform distribution of node sizes across the entire | 219 | * creates a uniform distribution of node sizes across the entire |
212 | * machine (but not necessarily over physical nodes). | 220 | * machine (but not necessarily over physical nodes). |
213 | */ | 221 | */ |
214 | min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / | 222 | min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES; |
215 | MAX_NUMNODES; | ||
216 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); | 223 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); |
217 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) | 224 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) |
218 | min_size = (min_size + FAKE_NODE_MIN_SIZE) & | 225 | min_size = (min_size + FAKE_NODE_MIN_SIZE) & |
@@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, | |||
252 | * this one must extend to the boundary. | 259 | * this one must extend to the boundary. |
253 | */ | 260 | */ |
254 | if (end < dma32_end && dma32_end - end - | 261 | if (end < dma32_end && dma32_end - end - |
255 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | 262 | mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
256 | end = dma32_end; | 263 | end = dma32_end; |
257 | 264 | ||
258 | /* | 265 | /* |
@@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, | |||
260 | * next node, this one must extend to the end of the | 267 | * next node, this one must extend to the end of the |
261 | * physical node. | 268 | * physical node. |
262 | */ | 269 | */ |
263 | if (limit - end - | 270 | if (limit - end - mem_hole_size(end, limit) < size) |
264 | memblock_x86_hole_size(end, limit) < size) | ||
265 | end = limit; | 271 | end = limit; |
266 | 272 | ||
267 | ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, | 273 | ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, |
@@ -351,11 +357,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
351 | 357 | ||
352 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), | 358 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), |
353 | phys_size, PAGE_SIZE); | 359 | phys_size, PAGE_SIZE); |
354 | if (phys == MEMBLOCK_ERROR) { | 360 | if (!phys) { |
355 | pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); | 361 | pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); |
356 | goto no_emu; | 362 | goto no_emu; |
357 | } | 363 | } |
358 | memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); | 364 | memblock_reserve(phys, phys_size); |
359 | phys_dist = __va(phys); | 365 | phys_dist = __va(phys); |
360 | 366 | ||
361 | for (i = 0; i < numa_dist_cnt; i++) | 367 | for (i = 0; i < numa_dist_cnt; i++) |
@@ -424,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
424 | 430 | ||
425 | /* free the copied physical distance table */ | 431 | /* free the copied physical distance table */ |
426 | if (phys_dist) | 432 | if (phys_dist) |
427 | memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); | 433 | memblock_free(__pa(phys_dist), phys_size); |
428 | return; | 434 | return; |
429 | 435 | ||
430 | no_emu: | 436 | no_emu: |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index bfab3fa10edc..7b65f752c5f8 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; | |||
568 | break; | 568 | break; |
569 | } | 569 | } |
570 | if (filter[i].jt != 0) { | 570 | if (filter[i].jt != 0) { |
571 | if (filter[i].jf) | 571 | if (filter[i].jf && f_offset) |
572 | t_offset += is_near(f_offset) ? 2 : 6; | 572 | t_offset += is_near(f_offset) ? 2 : 5; |
573 | EMIT_COND_JMP(t_op, t_offset); | 573 | EMIT_COND_JMP(t_op, t_offset); |
574 | if (filter[i].jf) | 574 | if (filter[i].jf) |
575 | EMIT_JMP(f_offset); | 575 | EMIT_JMP(f_offset); |
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile index 446902b2a6b6..1599f568f0e2 100644 --- a/arch/x86/oprofile/Makefile +++ b/arch/x86/oprofile/Makefile | |||
@@ -4,9 +4,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | |||
4 | oprof.o cpu_buffer.o buffer_sync.o \ | 4 | oprof.o cpu_buffer.o buffer_sync.o \ |
5 | event_buffer.o oprofile_files.o \ | 5 | event_buffer.o oprofile_files.o \ |
6 | oprofilefs.o oprofile_stats.o \ | 6 | oprofilefs.o oprofile_stats.o \ |
7 | timer_int.o ) | 7 | timer_int.o nmi_timer_int.o ) |
8 | 8 | ||
9 | oprofile-y := $(DRIVER_OBJS) init.o backtrace.o | 9 | oprofile-y := $(DRIVER_OBJS) init.o backtrace.o |
10 | oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ | 10 | oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ |
11 | op_model_ppro.o op_model_p4.o | 11 | op_model_ppro.o op_model_p4.o |
12 | oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o | ||
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c index cdfe4c54deca..9e138d00ad36 100644 --- a/arch/x86/oprofile/init.c +++ b/arch/x86/oprofile/init.c | |||
@@ -16,34 +16,23 @@ | |||
16 | * with the NMI mode driver. | 16 | * with the NMI mode driver. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifdef CONFIG_X86_LOCAL_APIC | ||
19 | extern int op_nmi_init(struct oprofile_operations *ops); | 20 | extern int op_nmi_init(struct oprofile_operations *ops); |
20 | extern int op_nmi_timer_init(struct oprofile_operations *ops); | ||
21 | extern void op_nmi_exit(void); | 21 | extern void op_nmi_exit(void); |
22 | extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); | 22 | #else |
23 | static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; } | ||
24 | static void op_nmi_exit(void) { } | ||
25 | #endif | ||
23 | 26 | ||
27 | extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); | ||
24 | 28 | ||
25 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 29 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
26 | { | 30 | { |
27 | int ret; | ||
28 | |||
29 | ret = -ENODEV; | ||
30 | |||
31 | #ifdef CONFIG_X86_LOCAL_APIC | ||
32 | ret = op_nmi_init(ops); | ||
33 | #endif | ||
34 | #ifdef CONFIG_X86_IO_APIC | ||
35 | if (ret < 0) | ||
36 | ret = op_nmi_timer_init(ops); | ||
37 | #endif | ||
38 | ops->backtrace = x86_backtrace; | 31 | ops->backtrace = x86_backtrace; |
39 | 32 | return op_nmi_init(ops); | |
40 | return ret; | ||
41 | } | 33 | } |
42 | 34 | ||
43 | |||
44 | void oprofile_arch_exit(void) | 35 | void oprofile_arch_exit(void) |
45 | { | 36 | { |
46 | #ifdef CONFIG_X86_LOCAL_APIC | ||
47 | op_nmi_exit(); | 37 | op_nmi_exit(); |
48 | #endif | ||
49 | } | 38 | } |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 75f9528e0372..26b8a8514ee5 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -595,24 +595,36 @@ static int __init p4_init(char **cpu_type) | |||
595 | return 0; | 595 | return 0; |
596 | } | 596 | } |
597 | 597 | ||
598 | static int force_arch_perfmon; | 598 | enum __force_cpu_type { |
599 | static int force_cpu_type(const char *str, struct kernel_param *kp) | 599 | reserved = 0, /* do not force */ |
600 | timer, | ||
601 | arch_perfmon, | ||
602 | }; | ||
603 | |||
604 | static int force_cpu_type; | ||
605 | |||
606 | static int set_cpu_type(const char *str, struct kernel_param *kp) | ||
600 | { | 607 | { |
601 | if (!strcmp(str, "arch_perfmon")) { | 608 | if (!strcmp(str, "timer")) { |
602 | force_arch_perfmon = 1; | 609 | force_cpu_type = timer; |
610 | printk(KERN_INFO "oprofile: forcing NMI timer mode\n"); | ||
611 | } else if (!strcmp(str, "arch_perfmon")) { | ||
612 | force_cpu_type = arch_perfmon; | ||
603 | printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); | 613 | printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); |
614 | } else { | ||
615 | force_cpu_type = 0; | ||
604 | } | 616 | } |
605 | 617 | ||
606 | return 0; | 618 | return 0; |
607 | } | 619 | } |
608 | module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); | 620 | module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); |
609 | 621 | ||
610 | static int __init ppro_init(char **cpu_type) | 622 | static int __init ppro_init(char **cpu_type) |
611 | { | 623 | { |
612 | __u8 cpu_model = boot_cpu_data.x86_model; | 624 | __u8 cpu_model = boot_cpu_data.x86_model; |
613 | struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ | 625 | struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ |
614 | 626 | ||
615 | if (force_arch_perfmon && cpu_has_arch_perfmon) | 627 | if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon) |
616 | return 0; | 628 | return 0; |
617 | 629 | ||
618 | /* | 630 | /* |
@@ -679,6 +691,9 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
679 | if (!cpu_has_apic) | 691 | if (!cpu_has_apic) |
680 | return -ENODEV; | 692 | return -ENODEV; |
681 | 693 | ||
694 | if (force_cpu_type == timer) | ||
695 | return -ENODEV; | ||
696 | |||
682 | switch (vendor) { | 697 | switch (vendor) { |
683 | case X86_VENDOR_AMD: | 698 | case X86_VENDOR_AMD: |
684 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ | 699 | /* Needs to be at least an Athlon (or hammer in 32bit mode) */ |
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c deleted file mode 100644 index 7f8052cd6620..000000000000 --- a/arch/x86/oprofile/nmi_timer_int.c +++ /dev/null | |||
@@ -1,50 +0,0 @@ | |||
1 | /** | ||
2 | * @file nmi_timer_int.c | ||
3 | * | ||
4 | * @remark Copyright 2003 OProfile authors | ||
5 | * @remark Read the file COPYING | ||
6 | * | ||
7 | * @author Zwane Mwaikambo <zwane@linuxpower.ca> | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/oprofile.h> | ||
14 | #include <linux/rcupdate.h> | ||
15 | #include <linux/kdebug.h> | ||
16 | |||
17 | #include <asm/nmi.h> | ||
18 | #include <asm/apic.h> | ||
19 | #include <asm/ptrace.h> | ||
20 | |||
21 | static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs) | ||
22 | { | ||
23 | oprofile_add_sample(regs, 0); | ||
24 | return NMI_HANDLED; | ||
25 | } | ||
26 | |||
27 | static int timer_start(void) | ||
28 | { | ||
29 | if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify, | ||
30 | 0, "oprofile-timer")) | ||
31 | return 1; | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | |||
36 | static void timer_stop(void) | ||
37 | { | ||
38 | unregister_nmi_handler(NMI_LOCAL, "oprofile-timer"); | ||
39 | synchronize_sched(); /* Allow already-started NMIs to complete. */ | ||
40 | } | ||
41 | |||
42 | |||
43 | int __init op_nmi_timer_init(struct oprofile_operations *ops) | ||
44 | { | ||
45 | ops->start = timer_start; | ||
46 | ops->stop = timer_stop; | ||
47 | ops->cpu_type = "timer"; | ||
48 | printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); | ||
49 | return 0; | ||
50 | } | ||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 37718f0f053d..4a01967f02e7 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -352,8 +352,7 @@ void __init efi_memblock_x86_reserve_range(void) | |||
352 | boot_params.efi_info.efi_memdesc_size; | 352 | boot_params.efi_info.efi_memdesc_size; |
353 | memmap.desc_version = boot_params.efi_info.efi_memdesc_version; | 353 | memmap.desc_version = boot_params.efi_info.efi_memdesc_version; |
354 | memmap.desc_size = boot_params.efi_info.efi_memdesc_size; | 354 | memmap.desc_size = boot_params.efi_info.efi_memdesc_size; |
355 | memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, | 355 | memblock_reserve(pmap, memmap.nr_map * memmap.desc_size); |
356 | "EFI memmap"); | ||
357 | } | 356 | } |
358 | 357 | ||
359 | #if EFI_DEBUG | 358 | #if EFI_DEBUG |
@@ -397,16 +396,14 @@ void __init efi_reserve_boot_services(void) | |||
397 | if ((start+size >= virt_to_phys(_text) | 396 | if ((start+size >= virt_to_phys(_text) |
398 | && start <= virt_to_phys(_end)) || | 397 | && start <= virt_to_phys(_end)) || |
399 | !e820_all_mapped(start, start+size, E820_RAM) || | 398 | !e820_all_mapped(start, start+size, E820_RAM) || |
400 | memblock_x86_check_reserved_size(&start, &size, | 399 | memblock_is_region_reserved(start, size)) { |
401 | 1<<EFI_PAGE_SHIFT)) { | ||
402 | /* Could not reserve, skip it */ | 400 | /* Could not reserve, skip it */ |
403 | md->num_pages = 0; | 401 | md->num_pages = 0; |
404 | memblock_dbg(PFX "Could not reserve boot range " | 402 | memblock_dbg(PFX "Could not reserve boot range " |
405 | "[0x%010llx-0x%010llx]\n", | 403 | "[0x%010llx-0x%010llx]\n", |
406 | start, start+size-1); | 404 | start, start+size-1); |
407 | } else | 405 | } else |
408 | memblock_x86_reserve_range(start, start+size, | 406 | memblock_reserve(start, size); |
409 | "EFI Boot"); | ||
410 | } | 407 | } |
411 | } | 408 | } |
412 | 409 | ||
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index e36bf714cb77..40e446941dd7 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c | |||
@@ -39,43 +39,14 @@ | |||
39 | */ | 39 | */ |
40 | 40 | ||
41 | static unsigned long efi_rt_eflags; | 41 | static unsigned long efi_rt_eflags; |
42 | static pgd_t efi_bak_pg_dir_pointer[2]; | ||
43 | 42 | ||
44 | void efi_call_phys_prelog(void) | 43 | void efi_call_phys_prelog(void) |
45 | { | 44 | { |
46 | unsigned long cr4; | ||
47 | unsigned long temp; | ||
48 | struct desc_ptr gdt_descr; | 45 | struct desc_ptr gdt_descr; |
49 | 46 | ||
50 | local_irq_save(efi_rt_eflags); | 47 | local_irq_save(efi_rt_eflags); |
51 | 48 | ||
52 | /* | 49 | load_cr3(initial_page_table); |
53 | * If I don't have PAE, I should just duplicate two entries in page | ||
54 | * directory. If I have PAE, I just need to duplicate one entry in | ||
55 | * page directory. | ||
56 | */ | ||
57 | cr4 = read_cr4_safe(); | ||
58 | |||
59 | if (cr4 & X86_CR4_PAE) { | ||
60 | efi_bak_pg_dir_pointer[0].pgd = | ||
61 | swapper_pg_dir[pgd_index(0)].pgd; | ||
62 | swapper_pg_dir[0].pgd = | ||
63 | swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; | ||
64 | } else { | ||
65 | efi_bak_pg_dir_pointer[0].pgd = | ||
66 | swapper_pg_dir[pgd_index(0)].pgd; | ||
67 | efi_bak_pg_dir_pointer[1].pgd = | ||
68 | swapper_pg_dir[pgd_index(0x400000)].pgd; | ||
69 | swapper_pg_dir[pgd_index(0)].pgd = | ||
70 | swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; | ||
71 | temp = PAGE_OFFSET + 0x400000; | ||
72 | swapper_pg_dir[pgd_index(0x400000)].pgd = | ||
73 | swapper_pg_dir[pgd_index(temp)].pgd; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * After the lock is released, the original page table is restored. | ||
78 | */ | ||
79 | __flush_tlb_all(); | 50 | __flush_tlb_all(); |
80 | 51 | ||
81 | gdt_descr.address = __pa(get_cpu_gdt_table(0)); | 52 | gdt_descr.address = __pa(get_cpu_gdt_table(0)); |
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void) | |||
85 | 56 | ||
86 | void efi_call_phys_epilog(void) | 57 | void efi_call_phys_epilog(void) |
87 | { | 58 | { |
88 | unsigned long cr4; | ||
89 | struct desc_ptr gdt_descr; | 59 | struct desc_ptr gdt_descr; |
90 | 60 | ||
91 | gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); | 61 | gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); |
92 | gdt_descr.size = GDT_SIZE - 1; | 62 | gdt_descr.size = GDT_SIZE - 1; |
93 | load_gdt(&gdt_descr); | 63 | load_gdt(&gdt_descr); |
94 | 64 | ||
95 | cr4 = read_cr4_safe(); | 65 | load_cr3(swapper_pg_dir); |
96 | |||
97 | if (cr4 & X86_CR4_PAE) { | ||
98 | swapper_pg_dir[pgd_index(0)].pgd = | ||
99 | efi_bak_pg_dir_pointer[0].pgd; | ||
100 | } else { | ||
101 | swapper_pg_dir[pgd_index(0)].pgd = | ||
102 | efi_bak_pg_dir_pointer[0].pgd; | ||
103 | swapper_pg_dir[pgd_index(0x400000)].pgd = | ||
104 | efi_bak_pg_dir_pointer[1].pgd; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * After the lock is released, the original page table is restored. | ||
109 | */ | ||
110 | __flush_tlb_all(); | 66 | __flush_tlb_all(); |
111 | 67 | ||
112 | local_irq_restore(efi_rt_eflags); | 68 | local_irq_restore(efi_rt_eflags); |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index b1489a06a49d..ad4ec1cb097e 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX]; | |||
76 | EXPORT_SYMBOL_GPL(sfi_mrtc_array); | 76 | EXPORT_SYMBOL_GPL(sfi_mrtc_array); |
77 | int sfi_mrtc_num; | 77 | int sfi_mrtc_num; |
78 | 78 | ||
79 | static void mrst_power_off(void) | ||
80 | { | ||
81 | if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) | ||
82 | intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1); | ||
83 | } | ||
84 | |||
85 | static void mrst_reboot(void) | ||
86 | { | ||
87 | if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) | ||
88 | intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); | ||
89 | else | ||
90 | intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); | ||
91 | } | ||
92 | |||
79 | /* parse all the mtimer info to a static mtimer array */ | 93 | /* parse all the mtimer info to a static mtimer array */ |
80 | static int __init sfi_parse_mtmr(struct sfi_table_header *table) | 94 | static int __init sfi_parse_mtmr(struct sfi_table_header *table) |
81 | { | 95 | { |
@@ -265,17 +279,6 @@ static int mrst_i8042_detect(void) | |||
265 | return 0; | 279 | return 0; |
266 | } | 280 | } |
267 | 281 | ||
268 | /* Reboot and power off are handled by the SCU on a MID device */ | ||
269 | static void mrst_power_off(void) | ||
270 | { | ||
271 | intel_scu_ipc_simple_command(0xf1, 1); | ||
272 | } | ||
273 | |||
274 | static void mrst_reboot(void) | ||
275 | { | ||
276 | intel_scu_ipc_simple_command(0xf1, 0); | ||
277 | } | ||
278 | |||
279 | /* | 282 | /* |
280 | * Moorestown does not have external NMI source nor port 0x61 to report | 283 | * Moorestown does not have external NMI source nor port 0x61 to report |
281 | * NMI status. The possible NMI sources are from pmu as a result of NMI | 284 | * NMI status. The possible NMI sources are from pmu as a result of NMI |
@@ -484,6 +487,46 @@ static void __init *max7315_platform_data(void *info) | |||
484 | return max7315; | 487 | return max7315; |
485 | } | 488 | } |
486 | 489 | ||
490 | static void *tca6416_platform_data(void *info) | ||
491 | { | ||
492 | static struct pca953x_platform_data tca6416; | ||
493 | struct i2c_board_info *i2c_info = info; | ||
494 | int gpio_base, intr; | ||
495 | char base_pin_name[SFI_NAME_LEN + 1]; | ||
496 | char intr_pin_name[SFI_NAME_LEN + 1]; | ||
497 | |||
498 | strcpy(i2c_info->type, "tca6416"); | ||
499 | strcpy(base_pin_name, "tca6416_base"); | ||
500 | strcpy(intr_pin_name, "tca6416_int"); | ||
501 | |||
502 | gpio_base = get_gpio_by_name(base_pin_name); | ||
503 | intr = get_gpio_by_name(intr_pin_name); | ||
504 | |||
505 | if (gpio_base == -1) | ||
506 | return NULL; | ||
507 | tca6416.gpio_base = gpio_base; | ||
508 | if (intr != -1) { | ||
509 | i2c_info->irq = intr + MRST_IRQ_OFFSET; | ||
510 | tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET; | ||
511 | } else { | ||
512 | i2c_info->irq = -1; | ||
513 | tca6416.irq_base = -1; | ||
514 | } | ||
515 | return &tca6416; | ||
516 | } | ||
517 | |||
518 | static void *mpu3050_platform_data(void *info) | ||
519 | { | ||
520 | struct i2c_board_info *i2c_info = info; | ||
521 | int intr = get_gpio_by_name("mpu3050_int"); | ||
522 | |||
523 | if (intr == -1) | ||
524 | return NULL; | ||
525 | |||
526 | i2c_info->irq = intr + MRST_IRQ_OFFSET; | ||
527 | return NULL; | ||
528 | } | ||
529 | |||
487 | static void __init *emc1403_platform_data(void *info) | 530 | static void __init *emc1403_platform_data(void *info) |
488 | { | 531 | { |
489 | static short intr2nd_pdata; | 532 | static short intr2nd_pdata; |
@@ -646,12 +689,15 @@ static void *msic_ocd_platform_data(void *info) | |||
646 | static const struct devs_id __initconst device_ids[] = { | 689 | static const struct devs_id __initconst device_ids[] = { |
647 | {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data}, | 690 | {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data}, |
648 | {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, | 691 | {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, |
692 | {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data}, | ||
649 | {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data}, | 693 | {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data}, |
650 | {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, | 694 | {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, |
651 | {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, | 695 | {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, |
696 | {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data}, | ||
652 | {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data}, | 697 | {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data}, |
653 | {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data}, | 698 | {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data}, |
654 | {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, | 699 | {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, |
700 | {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data}, | ||
655 | 701 | ||
656 | /* MSIC subdevices */ | 702 | /* MSIC subdevices */ |
657 | {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data}, | 703 | {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data}, |
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index f82082677337..d511aa97533a 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile | |||
@@ -18,14 +18,21 @@ chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk | |||
18 | quiet_cmd_posttest = TEST $@ | 18 | quiet_cmd_posttest = TEST $@ |
19 | cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) | 19 | cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) |
20 | 20 | ||
21 | posttest: $(obj)/test_get_len vmlinux | 21 | quiet_cmd_sanitytest = TEST $@ |
22 | cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000 | ||
23 | |||
24 | posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity | ||
22 | $(call cmd,posttest) | 25 | $(call cmd,posttest) |
26 | $(call cmd,sanitytest) | ||
23 | 27 | ||
24 | hostprogs-y := test_get_len | 28 | hostprogs-y += test_get_len insn_sanity |
25 | 29 | ||
26 | # -I needed for generated C source and C source which in the kernel tree. | 30 | # -I needed for generated C source and C source which in the kernel tree. |
27 | HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ | 31 | HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ |
28 | 32 | ||
33 | HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ | ||
34 | |||
29 | # Dependencies are also needed. | 35 | # Dependencies are also needed. |
30 | $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c | 36 | $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c |
31 | 37 | ||
38 | $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c | ||
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index eaf11f52fc0b..5f6a5b6c3a15 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk | |||
@@ -47,7 +47,7 @@ BEGIN { | |||
47 | sep_expr = "^\\|$" | 47 | sep_expr = "^\\|$" |
48 | group_expr = "^Grp[0-9A-Za-z]+" | 48 | group_expr = "^Grp[0-9A-Za-z]+" |
49 | 49 | ||
50 | imm_expr = "^[IJAO][a-z]" | 50 | imm_expr = "^[IJAOL][a-z]" |
51 | imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" | 51 | imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" |
52 | imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" | 52 | imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" |
53 | imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" | 53 | imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" |
@@ -59,6 +59,7 @@ BEGIN { | |||
59 | imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" | 59 | imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" |
60 | imm_flag["Ob"] = "INAT_MOFFSET" | 60 | imm_flag["Ob"] = "INAT_MOFFSET" |
61 | imm_flag["Ov"] = "INAT_MOFFSET" | 61 | imm_flag["Ov"] = "INAT_MOFFSET" |
62 | imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" | ||
62 | 63 | ||
63 | modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])" | 64 | modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])" |
64 | force64_expr = "\\([df]64\\)" | 65 | force64_expr = "\\([df]64\\)" |
@@ -70,8 +71,12 @@ BEGIN { | |||
70 | lprefix3_expr = "\\(F2\\)" | 71 | lprefix3_expr = "\\(F2\\)" |
71 | max_lprefix = 4 | 72 | max_lprefix = 4 |
72 | 73 | ||
73 | vexok_expr = "\\(VEX\\)" | 74 | # All opcodes starting with lower-case 'v' or with (v1) superscript |
74 | vexonly_expr = "\\(oVEX\\)" | 75 | # accepts VEX prefix |
76 | vexok_opcode_expr = "^v.*" | ||
77 | vexok_expr = "\\(v1\\)" | ||
78 | # All opcodes with (v) superscript supports *only* VEX prefix | ||
79 | vexonly_expr = "\\(v\\)" | ||
75 | 80 | ||
76 | prefix_expr = "\\(Prefix\\)" | 81 | prefix_expr = "\\(Prefix\\)" |
77 | prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" | 82 | prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" |
@@ -85,8 +90,8 @@ BEGIN { | |||
85 | prefix_num["SEG=GS"] = "INAT_PFX_GS" | 90 | prefix_num["SEG=GS"] = "INAT_PFX_GS" |
86 | prefix_num["SEG=SS"] = "INAT_PFX_SS" | 91 | prefix_num["SEG=SS"] = "INAT_PFX_SS" |
87 | prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" | 92 | prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" |
88 | prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2" | 93 | prefix_num["VEX+1byte"] = "INAT_PFX_VEX2" |
89 | prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3" | 94 | prefix_num["VEX+2byte"] = "INAT_PFX_VEX3" |
90 | 95 | ||
91 | clear_vars() | 96 | clear_vars() |
92 | } | 97 | } |
@@ -310,12 +315,10 @@ function convert_operands(count,opnd, i,j,imm,mod) | |||
310 | if (match(opcode, fpu_expr)) | 315 | if (match(opcode, fpu_expr)) |
311 | flags = add_flags(flags, "INAT_MODRM") | 316 | flags = add_flags(flags, "INAT_MODRM") |
312 | 317 | ||
313 | # check VEX only code | 318 | # check VEX codes |
314 | if (match(ext, vexonly_expr)) | 319 | if (match(ext, vexonly_expr)) |
315 | flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY") | 320 | flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY") |
316 | 321 | else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr)) | |
317 | # check VEX only code | ||
318 | if (match(ext, vexok_expr)) | ||
319 | flags = add_flags(flags, "INAT_VEXOK") | 322 | flags = add_flags(flags, "INAT_VEXOK") |
320 | 323 | ||
321 | # check prefixes | 324 | # check prefixes |
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c new file mode 100644 index 000000000000..cc2f8c131286 --- /dev/null +++ b/arch/x86/tools/insn_sanity.c | |||
@@ -0,0 +1,275 @@ | |||
1 | /* | ||
2 | * x86 decoder sanity test - based on test_get_insn.c | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2009 | ||
19 | * Copyright (C) Hitachi, Ltd., 2011 | ||
20 | */ | ||
21 | |||
22 | #include <stdlib.h> | ||
23 | #include <stdio.h> | ||
24 | #include <string.h> | ||
25 | #include <assert.h> | ||
26 | #include <unistd.h> | ||
27 | #include <sys/types.h> | ||
28 | #include <sys/stat.h> | ||
29 | #include <fcntl.h> | ||
30 | |||
31 | #define unlikely(cond) (cond) | ||
32 | #define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) | ||
33 | |||
34 | #include <asm/insn.h> | ||
35 | #include <inat.c> | ||
36 | #include <insn.c> | ||
37 | |||
38 | /* | ||
39 | * Test of instruction analysis against tampering. | ||
40 | * Feed random binary to instruction decoder and ensure not to | ||
41 | * access out-of-instruction-buffer. | ||
42 | */ | ||
43 | |||
44 | #define DEFAULT_MAX_ITER 10000 | ||
45 | #define INSN_NOP 0x90 | ||
46 | |||
47 | static const char *prog; /* Program name */ | ||
48 | static int verbose; /* Verbosity */ | ||
49 | static int x86_64; /* x86-64 bit mode flag */ | ||
50 | static unsigned int seed; /* Random seed */ | ||
51 | static unsigned long iter_start; /* Start of iteration number */ | ||
52 | static unsigned long iter_end = DEFAULT_MAX_ITER; /* End of iteration number */ | ||
53 | static FILE *input_file; /* Input file name */ | ||
54 | |||
55 | static void usage(const char *err) | ||
56 | { | ||
57 | if (err) | ||
58 | fprintf(stderr, "Error: %s\n\n", err); | ||
59 | fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); | ||
60 | fprintf(stderr, "\t-y 64bit mode\n"); | ||
61 | fprintf(stderr, "\t-n 32bit mode\n"); | ||
62 | fprintf(stderr, "\t-v Verbosity(-vv dumps any decoded result)\n"); | ||
63 | fprintf(stderr, "\t-s Give a random seed (and iteration number)\n"); | ||
64 | fprintf(stderr, "\t-m Give a maximum iteration number\n"); | ||
65 | fprintf(stderr, "\t-i Give an input file with decoded binary\n"); | ||
66 | exit(1); | ||
67 | } | ||
68 | |||
69 | static void dump_field(FILE *fp, const char *name, const char *indent, | ||
70 | struct insn_field *field) | ||
71 | { | ||
72 | fprintf(fp, "%s.%s = {\n", indent, name); | ||
73 | fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n", | ||
74 | indent, field->value, field->bytes[0], field->bytes[1], | ||
75 | field->bytes[2], field->bytes[3]); | ||
76 | fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent, | ||
77 | field->got, field->nbytes); | ||
78 | } | ||
79 | |||
80 | static void dump_insn(FILE *fp, struct insn *insn) | ||
81 | { | ||
82 | fprintf(fp, "Instruction = {\n"); | ||
83 | dump_field(fp, "prefixes", "\t", &insn->prefixes); | ||
84 | dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix); | ||
85 | dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix); | ||
86 | dump_field(fp, "opcode", "\t", &insn->opcode); | ||
87 | dump_field(fp, "modrm", "\t", &insn->modrm); | ||
88 | dump_field(fp, "sib", "\t", &insn->sib); | ||
89 | dump_field(fp, "displacement", "\t", &insn->displacement); | ||
90 | dump_field(fp, "immediate1", "\t", &insn->immediate1); | ||
91 | dump_field(fp, "immediate2", "\t", &insn->immediate2); | ||
92 | fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n", | ||
93 | insn->attr, insn->opnd_bytes, insn->addr_bytes); | ||
94 | fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n", | ||
95 | insn->length, insn->x86_64, insn->kaddr); | ||
96 | } | ||
97 | |||
98 | static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter, | ||
99 | unsigned char *insn_buf, struct insn *insn) | ||
100 | { | ||
101 | int i; | ||
102 | |||
103 | fprintf(fp, "%s:\n", msg); | ||
104 | |||
105 | dump_insn(fp, insn); | ||
106 | |||
107 | fprintf(fp, "You can reproduce this with below command(s);\n"); | ||
108 | |||
109 | /* Input a decoded instruction sequence directly */ | ||
110 | fprintf(fp, " $ echo "); | ||
111 | for (i = 0; i < MAX_INSN_SIZE; i++) | ||
112 | fprintf(fp, " %02x", insn_buf[i]); | ||
113 | fprintf(fp, " | %s -i -\n", prog); | ||
114 | |||
115 | if (!input_file) { | ||
116 | fprintf(fp, "Or \n"); | ||
117 | /* Give a seed and iteration number */ | ||
118 | fprintf(fp, " $ %s -s 0x%x,%lu\n", prog, seed, nr_iter); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static void init_random_seed(void) | ||
123 | { | ||
124 | int fd; | ||
125 | |||
126 | fd = open("/dev/urandom", O_RDONLY); | ||
127 | if (fd < 0) | ||
128 | goto fail; | ||
129 | |||
130 | if (read(fd, &seed, sizeof(seed)) != sizeof(seed)) | ||
131 | goto fail; | ||
132 | |||
133 | close(fd); | ||
134 | return; | ||
135 | fail: | ||
136 | usage("Failed to open /dev/urandom"); | ||
137 | } | ||
138 | |||
139 | /* Read given instruction sequence from the input file */ | ||
140 | static int read_next_insn(unsigned char *insn_buf) | ||
141 | { | ||
142 | char buf[256] = "", *tmp; | ||
143 | int i; | ||
144 | |||
145 | tmp = fgets(buf, ARRAY_SIZE(buf), input_file); | ||
146 | if (tmp == NULL || feof(input_file)) | ||
147 | return 0; | ||
148 | |||
149 | for (i = 0; i < MAX_INSN_SIZE; i++) { | ||
150 | insn_buf[i] = (unsigned char)strtoul(tmp, &tmp, 16); | ||
151 | if (*tmp != ' ') | ||
152 | break; | ||
153 | } | ||
154 | |||
155 | return i; | ||
156 | } | ||
157 | |||
158 | static int generate_insn(unsigned char *insn_buf) | ||
159 | { | ||
160 | int i; | ||
161 | |||
162 | if (input_file) | ||
163 | return read_next_insn(insn_buf); | ||
164 | |||
165 | /* Fills buffer with random binary up to MAX_INSN_SIZE */ | ||
166 | for (i = 0; i < MAX_INSN_SIZE - 1; i += 2) | ||
167 | *(unsigned short *)(&insn_buf[i]) = random() & 0xffff; | ||
168 | |||
169 | while (i < MAX_INSN_SIZE) | ||
170 | insn_buf[i++] = random() & 0xff; | ||
171 | |||
172 | return i; | ||
173 | } | ||
174 | |||
175 | static void parse_args(int argc, char **argv) | ||
176 | { | ||
177 | int c; | ||
178 | char *tmp = NULL; | ||
179 | int set_seed = 0; | ||
180 | |||
181 | prog = argv[0]; | ||
182 | while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) { | ||
183 | switch (c) { | ||
184 | case 'y': | ||
185 | x86_64 = 1; | ||
186 | break; | ||
187 | case 'n': | ||
188 | x86_64 = 0; | ||
189 | break; | ||
190 | case 'v': | ||
191 | verbose++; | ||
192 | break; | ||
193 | case 'i': | ||
194 | if (strcmp("-", optarg) == 0) | ||
195 | input_file = stdin; | ||
196 | else | ||
197 | input_file = fopen(optarg, "r"); | ||
198 | if (!input_file) | ||
199 | usage("Failed to open input file"); | ||
200 | break; | ||
201 | case 's': | ||
202 | seed = (unsigned int)strtoul(optarg, &tmp, 0); | ||
203 | if (*tmp == ',') { | ||
204 | optarg = tmp + 1; | ||
205 | iter_start = strtoul(optarg, &tmp, 0); | ||
206 | } | ||
207 | if (*tmp != '\0' || tmp == optarg) | ||
208 | usage("Failed to parse seed"); | ||
209 | set_seed = 1; | ||
210 | break; | ||
211 | case 'm': | ||
212 | iter_end = strtoul(optarg, &tmp, 0); | ||
213 | if (*tmp != '\0' || tmp == optarg) | ||
214 | usage("Failed to parse max_iter"); | ||
215 | break; | ||
216 | default: | ||
217 | usage(NULL); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | /* Check errors */ | ||
222 | if (iter_end < iter_start) | ||
223 | usage("Max iteration number must be bigger than iter-num"); | ||
224 | |||
225 | if (set_seed && input_file) | ||
226 | usage("Don't use input file (-i) with random seed (-s)"); | ||
227 | |||
228 | /* Initialize random seed */ | ||
229 | if (!input_file) { | ||
230 | if (!set_seed) /* No seed is given */ | ||
231 | init_random_seed(); | ||
232 | srand(seed); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | int main(int argc, char **argv) | ||
237 | { | ||
238 | struct insn insn; | ||
239 | int insns = 0; | ||
240 | int errors = 0; | ||
241 | unsigned long i; | ||
242 | unsigned char insn_buf[MAX_INSN_SIZE * 2]; | ||
243 | |||
244 | parse_args(argc, argv); | ||
245 | |||
246 | /* Prepare stop bytes with NOPs */ | ||
247 | memset(insn_buf + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE); | ||
248 | |||
249 | for (i = 0; i < iter_end; i++) { | ||
250 | if (generate_insn(insn_buf) <= 0) | ||
251 | break; | ||
252 | |||
253 | if (i < iter_start) /* Skip to given iteration number */ | ||
254 | continue; | ||
255 | |||
256 | /* Decode an instruction */ | ||
257 | insn_init(&insn, insn_buf, x86_64); | ||
258 | insn_get_length(&insn); | ||
259 | |||
260 | if (insn.next_byte <= insn.kaddr || | ||
261 | insn.kaddr + MAX_INSN_SIZE < insn.next_byte) { | ||
262 | /* Access out-of-range memory */ | ||
263 | dump_stream(stderr, "Error: Found an access violation", i, insn_buf, &insn); | ||
264 | errors++; | ||
265 | } else if (verbose && !insn_complete(&insn)) | ||
266 | dump_stream(stdout, "Info: Found an undecodable input", i, insn_buf, &insn); | ||
267 | else if (verbose >= 2) | ||
268 | dump_insn(stdout, &insn); | ||
269 | insns++; | ||
270 | } | ||
271 | |||
272 | fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); | ||
273 | |||
274 | return errors ? 1 : 0; | ||
275 | } | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 1f928659c338..12eb07bfb267 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1215,8 +1215,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1215 | local_irq_disable(); | 1215 | local_irq_disable(); |
1216 | early_boot_irqs_disabled = true; | 1216 | early_boot_irqs_disabled = true; |
1217 | 1217 | ||
1218 | memblock_init(); | ||
1219 | |||
1220 | xen_raw_console_write("mapping kernel into physical memory\n"); | 1218 | xen_raw_console_write("mapping kernel into physical memory\n"); |
1221 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); | 1219 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); |
1222 | xen_ident_map_ISA(); | 1220 | xen_ident_map_ISA(); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 87f6673b1207..f4bf8aa574f4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1774,10 +1774,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1774 | __xen_write_cr3(true, __pa(pgd)); | 1774 | __xen_write_cr3(true, __pa(pgd)); |
1775 | xen_mc_issue(PARAVIRT_LAZY_CPU); | 1775 | xen_mc_issue(PARAVIRT_LAZY_CPU); |
1776 | 1776 | ||
1777 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), | 1777 | memblock_reserve(__pa(xen_start_info->pt_base), |
1778 | __pa(xen_start_info->pt_base + | 1778 | xen_start_info->nr_pt_frames * PAGE_SIZE); |
1779 | xen_start_info->nr_pt_frames * PAGE_SIZE), | ||
1780 | "XEN PAGETABLES"); | ||
1781 | 1779 | ||
1782 | return pgd; | 1780 | return pgd; |
1783 | } | 1781 | } |
@@ -1853,10 +1851,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1853 | PFN_DOWN(__pa(initial_page_table))); | 1851 | PFN_DOWN(__pa(initial_page_table))); |
1854 | xen_write_cr3(__pa(initial_page_table)); | 1852 | xen_write_cr3(__pa(initial_page_table)); |
1855 | 1853 | ||
1856 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), | 1854 | memblock_reserve(__pa(xen_start_info->pt_base), |
1857 | __pa(xen_start_info->pt_base + | 1855 | xen_start_info->nr_pt_frames * PAGE_SIZE)); |
1858 | xen_start_info->nr_pt_frames * PAGE_SIZE), | ||
1859 | "XEN PAGETABLES"); | ||
1860 | 1856 | ||
1861 | return initial_page_table; | 1857 | return initial_page_table; |
1862 | } | 1858 | } |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1093f80c162d..e03c63692176 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -75,7 +75,7 @@ static void __init xen_add_extra_mem(u64 start, u64 size) | |||
75 | if (i == XEN_EXTRA_MEM_MAX_REGIONS) | 75 | if (i == XEN_EXTRA_MEM_MAX_REGIONS) |
76 | printk(KERN_WARNING "Warning: not enough extra memory regions\n"); | 76 | printk(KERN_WARNING "Warning: not enough extra memory regions\n"); |
77 | 77 | ||
78 | memblock_x86_reserve_range(start, start + size, "XEN EXTRA"); | 78 | memblock_reserve(start, size); |
79 | 79 | ||
80 | xen_max_p2m_pfn = PFN_DOWN(start + size); | 80 | xen_max_p2m_pfn = PFN_DOWN(start + size); |
81 | 81 | ||
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void) | |||
173 | domid_t domid = DOMID_SELF; | 173 | domid_t domid = DOMID_SELF; |
174 | int ret; | 174 | int ret; |
175 | 175 | ||
176 | ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); | 176 | /* |
177 | if (ret > 0) | 177 | * For the initial domain we use the maximum reservation as |
178 | max_pages = ret; | 178 | * the maximum page. |
179 | * | ||
180 | * For guest domains the current maximum reservation reflects | ||
181 | * the current maximum rather than the static maximum. In this | ||
182 | * case the e820 map provided to us will cover the static | ||
183 | * maximum region. | ||
184 | */ | ||
185 | if (xen_initial_domain()) { | ||
186 | ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); | ||
187 | if (ret > 0) | ||
188 | max_pages = ret; | ||
189 | } | ||
190 | |||
179 | return min(max_pages, MAX_DOMAIN_PAGES); | 191 | return min(max_pages, MAX_DOMAIN_PAGES); |
180 | } | 192 | } |
181 | 193 | ||
@@ -299,9 +311,8 @@ char * __init xen_memory_setup(void) | |||
299 | * - xen_start_info | 311 | * - xen_start_info |
300 | * See comment above "struct start_info" in <xen/interface/xen.h> | 312 | * See comment above "struct start_info" in <xen/interface/xen.h> |
301 | */ | 313 | */ |
302 | memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), | 314 | memblock_reserve(__pa(xen_start_info->mfn_list), |
303 | __pa(xen_start_info->pt_base), | 315 | xen_start_info->pt_base - xen_start_info->mfn_list); |
304 | "XEN START INFO"); | ||
305 | 316 | ||
306 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 317 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
307 | 318 | ||
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index f3e5eb43f71c..ac62f9cf1e10 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -41,14 +41,6 @@ static struct clocksource ccount_clocksource = { | |||
41 | .rating = 200, | 41 | .rating = 200, |
42 | .read = ccount_read, | 42 | .read = ccount_read, |
43 | .mask = CLOCKSOURCE_MASK(32), | 43 | .mask = CLOCKSOURCE_MASK(32), |
44 | /* | ||
45 | * With a shift of 22 the lower limit of the cpu clock is | ||
46 | * 1MHz, where NSEC_PER_CCOUNT is 1000 or a bit less than | ||
47 | * 2^10: Since we have 32 bits and the multiplicator can | ||
48 | * already take up as much as 10 bits, this leaves us with | ||
49 | * remaining upper 22 bits. | ||
50 | */ | ||
51 | .shift = 22, | ||
52 | }; | 44 | }; |
53 | 45 | ||
54 | static irqreturn_t timer_interrupt(int irq, void *dev_id); | 46 | static irqreturn_t timer_interrupt(int irq, void *dev_id); |
@@ -66,10 +58,7 @@ void __init time_init(void) | |||
66 | printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), | 58 | printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), |
67 | (int)(ccount_per_jiffy/(10000/HZ))%100); | 59 | (int)(ccount_per_jiffy/(10000/HZ))%100); |
68 | #endif | 60 | #endif |
69 | ccount_clocksource.mult = | 61 | clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ); |
70 | clocksource_hz2mult(CCOUNT_PER_JIFFY * HZ, | ||
71 | ccount_clocksource.shift); | ||
72 | clocksource_register(&ccount_clocksource); | ||
73 | 62 | ||
74 | /* Initialize the linux timer interrupt. */ | 63 | /* Initialize the linux timer interrupt. */ |
75 | 64 | ||